diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 0524cf0e22..5e1d7176fa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -512,6 +512,10 @@ Release 2.6.0 - UNRELEASED HDFS-6956. Allow dynamically changing the tracing level in Hadoop servers (cmccabe) + HDFS-7156. Update fsck documentation. (Masahiro Yamaguch via shv) + + HDFS-7093. Add config key to restrict setStoragePolicy. (Arpit Agarwal) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 8aa244f2bf..4391578474 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -575,6 +575,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_DOMAIN_SOCKET_PATH_KEY = "dfs.domain.socket.path"; public static final String DFS_DOMAIN_SOCKET_PATH_DEFAULT = ""; + public static final String DFS_STORAGE_POLICY_ENABLED_KEY = "dfs.storage.policy.enabled"; + public static final boolean DFS_STORAGE_POLICY_ENABLED_DEFAULT = true; + // HA related configuration public static final String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes"; public static final String DFS_HA_NAMENODE_ID_KEY = "dfs.ha.namenode.id"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 4a26420920..54b7acdb32 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -86,6 +86,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROU import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_DEFAULT; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER; import static org.apache.hadoop.util.Time.now; @@ -423,6 +425,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats, private final CacheManager cacheManager; private final DatanodeStatistics datanodeStatistics; + // whether setStoragePolicy is allowed. + private final boolean isStoragePolicyEnabled; + private String nameserviceId; private RollingUpgradeInfo rollingUpgradeInfo = null; @@ -794,6 +799,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats, this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics(); this.blockIdGenerator = new SequentialBlockIdGenerator(this.blockManager); + this.isStoragePolicyEnabled = + conf.getBoolean(DFS_STORAGE_POLICY_ENABLED_KEY, + DFS_STORAGE_POLICY_ENABLED_DEFAULT); + this.fsOwner = UserGroupInformation.getCurrentUser(); this.fsOwnerShortUserName = fsOwner.getShortUserName(); this.supergroup = conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY, @@ -2305,8 +2314,17 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } private void setStoragePolicyInt(String src, final String policyName) - throws IOException { - checkSuperuserPrivilege(); + throws IOException, UnresolvedLinkException, AccessControlException { + + if (!isStoragePolicyEnabled) { + throw new IOException("Failed to set storage policy since " + + DFS_STORAGE_POLICY_ENABLED_KEY + " is set to false."); + } + FSPermissionChecker pc = null; + if (isPermissionEnabled) { + pc = getPermissionChecker(); + } + checkOperation(OperationCategory.WRITE); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); waitForLoadingFSImage(); @@ -2315,6 +2333,12 @@ public class FSNamesystem implements Namesystem, FSClusterStats, try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot set storage policy for " + src); + + if (pc != null) { + checkPermission(pc, src, false, null, null, FsAction.WRITE, null, + false, true); + } + src = FSDirectory.resolvePath(src, pathComponents, dir); // get the corresponding policy and make sure the policy name is valid diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java index b91090d167..31ab596c6d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java @@ -77,7 +77,8 @@ public class DFSck extends Configured implements Tool { private static final String USAGE = "Usage: DFSck " + "[-list-corruptfileblocks | " + "[-move | -delete | -openforwrite] " - + "[-files [-blocks [-locations | -racks]]]] [-showprogress]\n" + + "[-files [-blocks [-locations | -racks]]]] " + + "[-includeSnapshots] [-showprogress]\n" + "\t\tstart checking from this path\n" + "\t-move\tmove corrupted files to /lost+found\n" + "\t-delete\tdelete corrupted files\n" diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 3498f0ec69..9f5355545e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -2142,4 +2142,12 @@ + + dfs.storage.policy.enabled + true + + Allow users to change the storage policy on files and directories. + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSCommands.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSCommands.apt.vm index 170f352890..121f0ba5b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSCommands.apt.vm +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSCommands.apt.vm @@ -82,32 +82,40 @@ HDFS Commands Guide See {{{./HdfsUserGuide.html#fsck}fsck}} for more info. Usage: << + [-list-corruptfileblocks | [-move | -delete | -openforwrite] [-files [-blocks [-locations | -racks]]] - [-showprogress]>>> + [-includeSnapshots] [-showprogress]>>> -*------------------+---------------------------------------------+ -|| COMMAND_OPTION || Description -*------------------+---------------------------------------------+ -| | Start checking from this path. -*------------------+---------------------------------------------+ -| -move | Move corrupted files to /lost+found -*------------------+---------------------------------------------+ -| -delete | Delete corrupted files. -*------------------+---------------------------------------------+ -| -openforwrite | Print out files opened for write. -*------------------+---------------------------------------------+ -| -files | Print out files being checked. -*------------------+---------------------------------------------+ -| -blocks | Print out block report. -*------------------+---------------------------------------------+ -| -locations | Print out locations for every block. -*------------------+---------------------------------------------+ -| -racks | Print out network topology for data-node locations. -*------------------+---------------------------------------------+ -| -showprogress | Print out dots for progress in output. Default is OFF -| | (no progress). -*------------------+---------------------------------------------+ +*------------------------+---------------------------------------------+ +|| COMMAND_OPTION || Description +*------------------------+---------------------------------------------+ +| | Start checking from this path. +*------------------------+---------------------------------------------+ +| -move | Move corrupted files to /lost+found. +*------------------------+---------------------------------------------+ +| -delete | Delete corrupted files. +*------------------------+---------------------------------------------+ +| -files | Print out files being checked. +*------------------------+---------------------------------------------+ +| -openforwrite | Print out files opened for write. +*------------------------+---------------------------------------------+ +| | Include snapshot data if the given path +| -includeSnapshots | indicates a snapshottable directory or +| | there are snapshottable directories under it. +*------------------------+---------------------------------------------+ +| -list-corruptfileblocks| Print out list of missing blocks and +| | files they belong to. +*------------------------+---------------------------------------------+ +| -blocks | Print out block report. +*------------------------+---------------------------------------------+ +| -locations | Print out locations for every block. +*------------------------+---------------------------------------------+ +| -racks | Print out network topology for data-node locations. +*------------------------+---------------------------------------------+ +| -showprogress | Print out dots for progress in output. Default is OFF +| | (no progress). +*------------------------+---------------------------------------------+ * Administration Commands diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java index 39d143946f..771b7bd5b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySu import java.io.File; import java.io.FileNotFoundException; +import java.io.IOException; import java.util.*; import com.google.common.collect.Lists; @@ -69,6 +70,40 @@ public class TestBlockStoragePolicy { static final byte WARM = (byte) 8; static final byte HOT = (byte) 12; + + @Test (timeout=300000) + public void testConfigKeyEnabled() throws IOException { + Configuration conf = new HdfsConfiguration(); + conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, true); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(1).build(); + try { + cluster.waitActive(); + cluster.getFileSystem().setStoragePolicy(new Path("/"), "COLD"); + } finally { + cluster.shutdown(); + } + } + + /** + * Ensure that setStoragePolicy throws IOException when + * dfs.storage.policy.enabled is set to false. + * @throws IOException + */ + @Test (timeout=300000, expected=IOException.class) + public void testConfigKeyDisabled() throws IOException { + Configuration conf = new HdfsConfiguration(); + conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, false); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(1).build(); + try { + cluster.waitActive(); + cluster.getFileSystem().setStoragePolicy(new Path("/"), "COLD"); + } finally { + cluster.shutdown(); + } + } + @Test public void testDefaultPolicies() { final Map expectedPolicyStrings = new HashMap();