diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 95806defc6..e6e0bae39a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -562,6 +562,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_NAMENODE_HTTPS_PORT_DEFAULT; public static final String DFS_NAMENODE_NAME_DIR_KEY = HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_NAME_DIR_KEY; + public static final String DFS_NAMENODE_NAME_DIR_PERMISSION_KEY = + "dfs.namenode.storage.dir.perm"; + public static final String DFS_NAMENODE_NAME_DIR_PERMISSION_DEFAULT = + "700"; public static final String DFS_NAMENODE_EDITS_DIR_KEY = HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_EDITS_DIR_KEY; public static final String DFS_NAMENODE_SHARED_EDITS_DIR_KEY = "dfs.namenode.shared.edits.dir"; @@ -1109,6 +1113,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final int DFS_JOURNALNODE_RPC_PORT_DEFAULT = 8485; public static final String DFS_JOURNALNODE_RPC_BIND_HOST_KEY = "dfs.journalnode.rpc-bind-host"; public static final String DFS_JOURNALNODE_RPC_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_JOURNALNODE_RPC_PORT_DEFAULT; + public static final String DFS_JOURNAL_EDITS_DIR_PERMISSION_KEY = + "dfs.journalnode.edits.dir.perm"; + public static final String DFS_JOURNAL_EDITS_DIR_PERMISSION_DEFAULT = + "700"; public static final String DFS_JOURNALNODE_HTTP_ADDRESS_KEY = "dfs.journalnode.http-address"; public static final int DFS_JOURNALNODE_HTTP_PORT_DEFAULT = 8480; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java index 305f1e87ef..0ef54a87c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java @@ -26,6 +26,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; @@ -70,7 +72,9 @@ protected JNStorage(Configuration conf, File logDir, StartupOption startOpt, StorageErrorReporter errorReporter) throws IOException { super(NodeType.JOURNAL_NODE); - sd = new StorageDirectory(logDir); + sd = new StorageDirectory(logDir, null, false, new FsPermission(conf.get( + DFSConfigKeys.DFS_JOURNAL_EDITS_DIR_PERMISSION_KEY, + DFSConfigKeys.DFS_JOURNAL_EDITS_DIR_PERMISSION_DEFAULT))); this.addStorageDir(sd); this.fjm = new FileJournalManager(conf, sd, errorReporter); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java index 3dd43c7d70..2ba943a077 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java @@ -27,11 +27,14 @@ import java.nio.channels.OverlappingFileLockException; import java.nio.file.DirectoryStream; import java.nio.file.Files; +import java.nio.file.attribute.PosixFilePermission; +import java.nio.file.attribute.PosixFilePermissions; import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.Properties; +import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import org.apache.commons.io.FileUtils; @@ -39,6 +42,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; @@ -276,6 +280,7 @@ public static class StorageDirectory implements FormatConfirmable { final boolean isShared; final StorageDirType dirType; // storage dir type FileLock lock; // storage lock + private final FsPermission permission; private String storageUuid = null; // Storage directory identifier. @@ -311,6 +316,11 @@ public StorageDirectory(File dir, StorageDirType dirType, boolean isShared) { this(dir, dirType, isShared, null); } + public StorageDirectory(File dir, StorageDirType dirType, + boolean isShared, FsPermission permission) { + this(dir, dirType, isShared, null, permission); + } + /** * Constructor * @param dirType storage directory type @@ -320,7 +330,7 @@ public StorageDirectory(File dir, StorageDirType dirType, boolean isShared) { */ public StorageDirectory(StorageDirType dirType, boolean isShared, StorageLocation location) { - this(getStorageLocationFile(location), dirType, isShared, location); + this(getStorageLocationFile(location), dirType, isShared, location, null); } /** @@ -334,7 +344,7 @@ public StorageDirectory(StorageDirType dirType, boolean isShared, public StorageDirectory(String bpid, StorageDirType dirType, boolean isShared, StorageLocation location) { this(getBlockPoolCurrentDir(bpid, location), dirType, - isShared, location); + isShared, location, null); } private static File getBlockPoolCurrentDir(String bpid, @@ -348,13 +358,14 @@ private static File getBlockPoolCurrentDir(String bpid, } private StorageDirectory(File dir, StorageDirType dirType, - boolean isShared, StorageLocation location) { + boolean isShared, StorageLocation location, FsPermission permission) { this.root = dir; this.lock = null; // default dirType is UNDEFINED this.dirType = (dirType == null ? NameNodeDirType.UNDEFINED : dirType); this.isShared = isShared; this.location = location; + this.permission = permission; assert location == null || dir == null || dir.getAbsolutePath().startsWith( new File(location.getUri()).getAbsolutePath()): @@ -432,8 +443,14 @@ public void clearDirectory() throws IOException { if (!(FileUtil.fullyDelete(curDir))) throw new IOException("Cannot remove current directory: " + curDir); } - if (!curDir.mkdirs()) + if (!curDir.mkdirs()) { throw new IOException("Cannot create directory " + curDir); + } + if (permission != null) { + Set permissions = + PosixFilePermissions.fromString(permission.toString()); + Files.setPosixFilePermissions(curDir.toPath(), permissions); + } } /** @@ -655,8 +672,9 @@ public StorageState analyzeStorage(StartupOption startOpt, Storage storage, return StorageState.NON_EXISTENT; } LOG.info("{} does not exist. Creating ...", rootPath); - if (!root.mkdirs()) + if (!root.mkdirs()) { throw new IOException("Cannot create directory " + rootPath); + } hadMkdirs = true; } // or is inaccessible diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index b6ddd22d76..5681807565 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -300,7 +300,7 @@ boolean recoverTransitionRead(StartupOption startOpt, FSNamesystem target, // triggered. LOG.info("Storage directory " + sd.getRoot() + " is not formatted."); LOG.info("Formatting ..."); - sd.clearDirectory(); // create empty currrent dir + sd.clearDirectory(); // create empty current dir // For non-HA, no further action is needed here, as saveNamespace will // take care of the rest. if (!target.isHaEnabled()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java index ca0d41094b..98ae44ede9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java @@ -38,6 +38,8 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; @@ -128,7 +130,7 @@ public boolean isOfType(StorageDirType type) { private boolean restoreFailedStorage = false; private final Object restorationLock = new Object(); private boolean disablePreUpgradableLayoutCheck = false; - + private final Configuration conf; /** * TxId of the last transaction that was included in the most @@ -171,6 +173,7 @@ public NNStorage(Configuration conf, Collection imageDirs, Collection editsDirs) throws IOException { super(NodeType.NAME_NODE); + this.conf = conf; // this may modify the editsDirs, so copy before passing in setStorageDirectories(imageDirs, @@ -312,10 +315,16 @@ synchronized void setStorageDirectories(Collection fsNameDirs, NameNodeDirType.IMAGE; // Add to the list of storage directories, only if the // URI is of type file:// - if(dirName.getScheme().compareTo("file") == 0) { - this.addStorageDir(new StorageDirectory(new File(dirName.getPath()), + if (dirName.getScheme().compareTo("file") == 0) { + // Don't lock the dir if it's shared. + StorageDirectory sd = new StorageDirectory(new File(dirName.getPath()), dirType, - sharedEditsDirs.contains(dirName))); // Don't lock the dir if it's shared. + sharedEditsDirs.contains(dirName), + new FsPermission(conf.get( + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_KEY, + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_DEFAULT))); + + this.addStorageDir(sd); } } @@ -324,9 +333,12 @@ synchronized void setStorageDirectories(Collection fsNameDirs, checkSchemeConsistency(dirName); // Add to the list of storage directories, only if the // URI is of type file:// - if(dirName.getScheme().compareTo("file") == 0) { + if (dirName.getScheme().compareTo("file") == 0) { this.addStorageDir(new StorageDirectory(new File(dirName.getPath()), - NameNodeDirType.EDITS, sharedEditsDirs.contains(dirName))); + NameNodeDirType.EDITS, sharedEditsDirs.contains(dirName), + new FsPermission(conf.get( + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_KEY, + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_DEFAULT)))); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 74c4f40938..40583d0965 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -5446,4 +5446,24 @@ The queue size of BlockReportProcessingThread in BlockManager. + + + dfs.namenode.storage.dir.perm + 700 + + Permissions for the directories on on the local filesystem where + the DFS namenode stores the fsImage. The permissions can either be + octal or symbolic. + + + + + dfs.journalnode.edits.dir.perm + 700 + + Permissions for the directories on on the local filesystem where + the DFS journal node stores the edits. The permissions can either be + octal or symbolic. + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index 64cb16e458..77a5b061c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -54,6 +54,9 @@ import java.util.regex.Pattern; import org.apache.commons.io.FileUtils; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.hdfs.server.common.Storage; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -1259,6 +1262,17 @@ public static NNStorage setupEdits(List editUris, int numrolls, Collections.emptyList(), editUris); storage.format(new NamespaceInfo()); + // Verify permissions + LocalFileSystem fs = LocalFileSystem.getLocal(getConf()); + for (URI uri : editUris) { + String currDir = uri.getPath() + Path.SEPARATOR + + Storage.STORAGE_DIR_CURRENT; + FileStatus fileStatus = fs.getFileLinkStatus(new Path(currDir)); + FsPermission permission = fileStatus.getPermission(); + assertEquals(getConf().getInt( + DFSConfigKeys.DFS_JOURNAL_EDITS_DIR_PERMISSION_KEY, 700), + permission.toOctal()); + } FSEditLog editlog = getFSEditLog(storage); // open the edit log and add two transactions // logGenerationStamp is used, simply because it doesn't diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index 3e5fe75640..67c8f3c18f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -37,7 +37,9 @@ import java.util.Collection; import java.util.Iterator; import java.util.List; +import java.util.stream.Collectors; +import org.apache.hadoop.fs.LocalFileSystem; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -103,7 +105,7 @@ public void setUp() throws Exception { config = new HdfsConfiguration(); hdfsDir = new File(MiniDFSCluster.getBaseDirectory()); - if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { + if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) { throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'"); } LOG.info("--hdfsdir is " + hdfsDir.getAbsolutePath()); @@ -789,4 +791,27 @@ public void testStorageBlockContentsStaleAfterNNRestart() throws Exception { return; } + @Test(timeout = 60000) + public void testDirectoryPermissions() throws Exception { + Configuration conf = new Configuration(); + try (MiniDFSCluster dfsCluster + = new MiniDFSCluster.Builder(conf).build()) { + dfsCluster.waitActive(); + // name and edits + List nameDirs = + dfsCluster.getNameNode().getFSImage().getStorage().getStorageDirs(); + Collection nameDirUris = nameDirs.stream().map(d -> d + .getCurrentDir().toURI()).collect(Collectors.toList()); + assertNotNull(nameDirUris); + LocalFileSystem fs = LocalFileSystem.getLocal(config); + FsPermission permission = new FsPermission(conf.get( + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_KEY, + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_DEFAULT)); + for (URI uri : nameDirUris) { + FileStatus fileStatus = fs.getFileLinkStatus(new Path(uri)); + assertEquals(permission.toOctal(), + fileStatus.getPermission().toOctal()); + } + } + } }