diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 027db8a010..c5458f068f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -87,6 +87,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; @@ -2490,4 +2491,22 @@ public Quota getQuotaModule() { public FederationRPCMetrics getRPCMetrics() { return this.rpcMonitor.getRPCMetrics(); } + + @Override + public void satisfyStoragePolicy(String path) throws IOException { + checkOperation(OperationCategory.WRITE, false); + } + + @Override + public boolean isStoragePolicySatisfierRunning() throws IOException { + checkOperation(OperationCategory.READ, false); + return false; + } + + @Override + public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus( + String path) throws IOException { + checkOperation(OperationCategory.READ, false); + return StoragePolicySatisfyPathStatus.NOT_AVAILABLE; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java index f42d91141c..edd1aca288 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java @@ -61,6 +61,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.junit.Assert; @@ -912,8 +913,6 @@ public void testSPSShouldNotLeakXattrIfSatisfyStoragePolicyCallOnECFiles() int defaultStripedBlockSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize() * 4; - config.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultStripedBlockSize); config.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); config.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, @@ -925,6 +924,9 @@ public void testSPSShouldNotLeakXattrIfSatisfyStoragePolicyCallOnECFiles() try { hdfsCluster = startCluster(config, diskTypes, diskTypes.length, storagesPerDatanode, capacity); + dfs = hdfsCluster.getFileSystem(); + dfs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); // set "/foo" directory with ONE_SSD storage policy. ClientProtocol client = NameNodeProxies.createProxy(config, @@ -939,7 +941,6 @@ public void testSPSShouldNotLeakXattrIfSatisfyStoragePolicyCallOnECFiles() // write file to fooDir final String testFile = "/foo/bar"; long fileLen = 20 * defaultStripedBlockSize; - dfs = hdfsCluster.getFileSystem(); DFSTestUtil.createFile(dfs, new Path(testFile), fileLen, (short) 3, 0); // ONESSD is unsuitable storage policy on EC files @@ -1632,7 +1633,7 @@ private String createFileAndSimulateFavoredNodes(int favoredNodesCount) LOG.info("Simulate block pinning in datanode {}", locations[favoredNodesCount]); DataNode dn = hdfsCluster.getDataNode(dnInfo.getIpcPort()); - DataNodeTestUtils.mockDatanodeBlkPinning(dn, true); + InternalDataNodeTestUtils.mockDatanodeBlkPinning(dn, true); favoredNodesCount--; if (favoredNodesCount <= 0) { break; // marked favoredNodesCount number of pinned block location diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java index 154ddae02d..6991ad2181 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java @@ -105,8 +105,6 @@ public void testMoverWithFullStripe() throws Exception { final Configuration conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, true); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); initConfWithStripe(conf, defaultStripeBlockSize); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(numOfDatanodes) @@ -128,6 +126,9 @@ public void testMoverWithFullStripe() throws Exception { HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); try { cluster.waitActive(); + DistributedFileSystem dfs = cluster.getFileSystem(); + dfs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); // set "/bar" directory with HOT storage policy. ClientProtocol client = NameNodeProxies.createProxy(conf, @@ -215,8 +216,6 @@ public void testWhenOnlyFewTargetNodesAreAvailableToSatisfyStoragePolicy() } final Configuration conf = new HdfsConfiguration(); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, true); initConfWithStripe(conf, defaultStripeBlockSize); @@ -240,7 +239,9 @@ public void testWhenOnlyFewTargetNodesAreAvailableToSatisfyStoragePolicy() HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); try { cluster.waitActive(); - + DistributedFileSystem dfs = cluster.getFileSystem(); + dfs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); // set "/bar" directory with HOT storage policy. ClientProtocol client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy(); @@ -327,8 +328,6 @@ public void testSPSWhenFileHasLowRedundancyBlocks() throws Exception { conf.set(DFSConfigKeys .DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY, "3000"); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, true); initConfWithStripe(conf, defaultStripeBlockSize); @@ -350,6 +349,8 @@ public void testSPSWhenFileHasLowRedundancyBlocks() throws Exception { try { cluster.waitActive(); DistributedFileSystem fs = cluster.getFileSystem(); + fs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); Path barDir = new Path("/bar"); fs.mkdirs(barDir); // set an EC policy on "/bar" directory @@ -419,8 +420,6 @@ public void testWhenNoTargetDatanodeToSatisfyStoragePolicy() } final Configuration conf = new HdfsConfiguration(); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, true); initConfWithStripe(conf, defaultStripeBlockSize); @@ -444,7 +443,9 @@ public void testWhenNoTargetDatanodeToSatisfyStoragePolicy() HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); try { cluster.waitActive(); - + DistributedFileSystem dfs = cluster.getFileSystem(); + dfs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); // set "/bar" directory with HOT storage policy. ClientProtocol client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java index 0644a83590..7e0663dff5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java @@ -165,61 +165,4 @@ public void testSetAndGetStoragePolicy() throws Exception { DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /fooz", 2, "File/Directory does not exist: /fooz"); } - - @Test(timeout = 30000) - public void testStoragePolicySatisfierCommand() throws Exception { - final String file = "/testStoragePolicySatisfierCommand"; - DFSTestUtil.createFile(fs, new Path(file), SIZE, REPL, 0); - - final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf); - DFSTestUtil.toolRun(admin, "-getStoragePolicy -path " + file, 0, - "The storage policy of " + file + " is unspecified"); - - DFSTestUtil.toolRun(admin, - "-setStoragePolicy -path " + file + " -policy COLD", 0, - "Set storage policy COLD on " + file.toString()); - - DFSTestUtil.toolRun(admin, "-satisfyStoragePolicy -path " + file, 0, - "Scheduled blocks to move based on the current storage policy on " - + file.toString()); - - DFSTestUtil.waitExpectedStorageType(file, StorageType.ARCHIVE, 1, 30000, - fs); - } - - @Test(timeout = 30000) - public void testIsSatisfierRunningCommand() throws Exception { - final String file = "/testIsSatisfierRunningCommand"; - DFSTestUtil.createFile(fs, new Path(file), SIZE, REPL, 0); - final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf); - DFSTestUtil.toolRun(admin, "-isSatisfierRunning", 0, "yes"); - - cluster.getNameNode().reconfigureProperty( - DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, "false"); - cluster.waitActive(); - - DFSTestUtil.toolRun(admin, "-isSatisfierRunning", 0, "no"); - - // Test with unnecessary args - DFSTestUtil.toolRun(admin, "-isSatisfierRunning status", 1, - "Can't understand arguments: "); - } - - @Test(timeout = 90000) - public void testSatisfyStoragePolicyCommandWithWaitOption() - throws Exception { - final String file = "/testSatisfyStoragePolicyCommandWithWaitOption"; - DFSTestUtil.createFile(fs, new Path(file), SIZE, REPL, 0); - - final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf); - - DFSTestUtil.toolRun(admin, "-setStoragePolicy -path " + file - + " -policy COLD", 0, "Set storage policy COLD on " + file.toString()); - - DFSTestUtil.toolRun(admin, "-satisfyStoragePolicy -w -path " + file, 0, - "Waiting for satisfy the policy"); - - DFSTestUtil - .waitExpectedStorageType(file, StorageType.ARCHIVE, 1, 30000, fs); - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java new file mode 100644 index 0000000000..856c3ec056 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java @@ -0,0 +1,127 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools; + +import java.io.IOException; +import java.net.URISyntaxException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/** + * Test StoragePolicySatisfy admin commands. + */ +public class TestStoragePolicySatisfyAdminCommands { + private static final short REPL = 1; + private static final int SIZE = 128; + + private Configuration conf = null; + private MiniDFSCluster cluster = null; + private DistributedFileSystem dfs = null; + + @Before + public void clusterSetUp() throws IOException, URISyntaxException { + conf = new HdfsConfiguration(); + conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, + true); + StorageType[][] newtypes = new StorageType[][] { + {StorageType.ARCHIVE, StorageType.DISK}}; + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL) + .storageTypes(newtypes).build(); + cluster.waitActive(); + dfs = cluster.getFileSystem(); + } + + @After + public void clusterShutdown() throws IOException{ + if(dfs != null) { + dfs.close(); + dfs = null; + } + if(cluster != null) { + cluster.shutdown(); + cluster = null; + } + } + + @Test(timeout = 30000) + public void testStoragePolicySatisfierCommand() throws Exception { + final String file = "/testStoragePolicySatisfierCommand"; + DFSTestUtil.createFile(dfs, new Path(file), SIZE, REPL, 0); + + final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf); + DFSTestUtil.toolRun(admin, "-getStoragePolicy -path " + file, 0, + "The storage policy of " + file + " is unspecified"); + + DFSTestUtil.toolRun(admin, + "-setStoragePolicy -path " + file + " -policy COLD", 0, + "Set storage policy COLD on " + file.toString()); + + DFSTestUtil.toolRun(admin, "-satisfyStoragePolicy -path " + file, 0, + "Scheduled blocks to move based on the current storage policy on " + + file.toString()); + + DFSTestUtil.waitExpectedStorageType(file, StorageType.ARCHIVE, 1, 30000, + dfs); + } + + @Test(timeout = 30000) + public void testIsSatisfierRunningCommand() throws Exception { + final String file = "/testIsSatisfierRunningCommand"; + DFSTestUtil.createFile(dfs, new Path(file), SIZE, REPL, 0); + final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf); + DFSTestUtil.toolRun(admin, "-isSatisfierRunning", 0, "yes"); + + cluster.getNameNode().reconfigureProperty( + DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, "false"); + cluster.waitActive(); + + DFSTestUtil.toolRun(admin, "-isSatisfierRunning", 0, "no"); + + // Test with unnecessary args + DFSTestUtil.toolRun(admin, "-isSatisfierRunning status", 1, + "Can't understand arguments: "); + } + + @Test(timeout = 90000) + public void testSatisfyStoragePolicyCommandWithWaitOption() + throws Exception { + final String file = "/testSatisfyStoragePolicyCommandWithWaitOption"; + DFSTestUtil.createFile(dfs, new Path(file), SIZE, REPL, 0); + + final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf); + + DFSTestUtil.toolRun(admin, "-setStoragePolicy -path " + file + + " -policy COLD", 0, "Set storage policy COLD on " + file.toString()); + + DFSTestUtil.toolRun(admin, "-satisfyStoragePolicy -w -path " + file, 0, + "Waiting for satisfy the policy"); + + DFSTestUtil.waitExpectedStorageType(file, StorageType.ARCHIVE, 1, 30000, + dfs); + } +}