HDFS-2470. NN should automatically set permissions on dfs.namenode.*.dir. Contributed by Siddharth Wagle.
This commit is contained in:
parent
8660e48ca1
commit
07e3cf952e
@ -562,6 +562,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||
public static final String DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_NAMENODE_HTTPS_PORT_DEFAULT;
|
||||
public static final String DFS_NAMENODE_NAME_DIR_KEY =
|
||||
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_NAME_DIR_KEY;
|
||||
public static final String DFS_NAMENODE_NAME_DIR_PERMISSION_KEY =
|
||||
"dfs.namenode.storage.dir.perm";
|
||||
public static final String DFS_NAMENODE_NAME_DIR_PERMISSION_DEFAULT =
|
||||
"700";
|
||||
public static final String DFS_NAMENODE_EDITS_DIR_KEY =
|
||||
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_EDITS_DIR_KEY;
|
||||
public static final String DFS_NAMENODE_SHARED_EDITS_DIR_KEY = "dfs.namenode.shared.edits.dir";
|
||||
@ -1109,6 +1113,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||
public static final int DFS_JOURNALNODE_RPC_PORT_DEFAULT = 8485;
|
||||
public static final String DFS_JOURNALNODE_RPC_BIND_HOST_KEY = "dfs.journalnode.rpc-bind-host";
|
||||
public static final String DFS_JOURNALNODE_RPC_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_JOURNALNODE_RPC_PORT_DEFAULT;
|
||||
public static final String DFS_JOURNAL_EDITS_DIR_PERMISSION_KEY =
|
||||
"dfs.journalnode.edits.dir.perm";
|
||||
public static final String DFS_JOURNAL_EDITS_DIR_PERMISSION_DEFAULT =
|
||||
"700";
|
||||
|
||||
public static final String DFS_JOURNALNODE_HTTP_ADDRESS_KEY = "dfs.journalnode.http-address";
|
||||
public static final int DFS_JOURNALNODE_HTTP_PORT_DEFAULT = 8480;
|
||||
|
@ -26,6 +26,8 @@
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
|
||||
@ -70,7 +72,9 @@ protected JNStorage(Configuration conf, File logDir, StartupOption startOpt,
|
||||
StorageErrorReporter errorReporter) throws IOException {
|
||||
super(NodeType.JOURNAL_NODE);
|
||||
|
||||
sd = new StorageDirectory(logDir);
|
||||
sd = new StorageDirectory(logDir, null, false, new FsPermission(conf.get(
|
||||
DFSConfigKeys.DFS_JOURNAL_EDITS_DIR_PERMISSION_KEY,
|
||||
DFSConfigKeys.DFS_JOURNAL_EDITS_DIR_PERMISSION_DEFAULT)));
|
||||
this.addStorageDir(sd);
|
||||
this.fjm = new FileJournalManager(conf, sd, errorReporter);
|
||||
|
||||
|
@ -27,11 +27,14 @@
|
||||
import java.nio.channels.OverlappingFileLockException;
|
||||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.attribute.PosixFilePermission;
|
||||
import java.nio.file.attribute.PosixFilePermissions;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
@ -39,6 +42,7 @@
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
|
||||
@ -276,6 +280,7 @@ public static class StorageDirectory implements FormatConfirmable {
|
||||
final boolean isShared;
|
||||
final StorageDirType dirType; // storage dir type
|
||||
FileLock lock; // storage lock
|
||||
private final FsPermission permission;
|
||||
|
||||
private String storageUuid = null; // Storage directory identifier.
|
||||
|
||||
@ -311,6 +316,11 @@ public StorageDirectory(File dir, StorageDirType dirType, boolean isShared) {
|
||||
this(dir, dirType, isShared, null);
|
||||
}
|
||||
|
||||
public StorageDirectory(File dir, StorageDirType dirType,
|
||||
boolean isShared, FsPermission permission) {
|
||||
this(dir, dirType, isShared, null, permission);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param dirType storage directory type
|
||||
@ -320,7 +330,7 @@ public StorageDirectory(File dir, StorageDirType dirType, boolean isShared) {
|
||||
*/
|
||||
public StorageDirectory(StorageDirType dirType, boolean isShared,
|
||||
StorageLocation location) {
|
||||
this(getStorageLocationFile(location), dirType, isShared, location);
|
||||
this(getStorageLocationFile(location), dirType, isShared, location, null);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -334,7 +344,7 @@ public StorageDirectory(StorageDirType dirType, boolean isShared,
|
||||
public StorageDirectory(String bpid, StorageDirType dirType,
|
||||
boolean isShared, StorageLocation location) {
|
||||
this(getBlockPoolCurrentDir(bpid, location), dirType,
|
||||
isShared, location);
|
||||
isShared, location, null);
|
||||
}
|
||||
|
||||
private static File getBlockPoolCurrentDir(String bpid,
|
||||
@ -348,13 +358,14 @@ private static File getBlockPoolCurrentDir(String bpid,
|
||||
}
|
||||
|
||||
private StorageDirectory(File dir, StorageDirType dirType,
|
||||
boolean isShared, StorageLocation location) {
|
||||
boolean isShared, StorageLocation location, FsPermission permission) {
|
||||
this.root = dir;
|
||||
this.lock = null;
|
||||
// default dirType is UNDEFINED
|
||||
this.dirType = (dirType == null ? NameNodeDirType.UNDEFINED : dirType);
|
||||
this.isShared = isShared;
|
||||
this.location = location;
|
||||
this.permission = permission;
|
||||
assert location == null || dir == null ||
|
||||
dir.getAbsolutePath().startsWith(
|
||||
new File(location.getUri()).getAbsolutePath()):
|
||||
@ -432,8 +443,14 @@ public void clearDirectory() throws IOException {
|
||||
if (!(FileUtil.fullyDelete(curDir)))
|
||||
throw new IOException("Cannot remove current directory: " + curDir);
|
||||
}
|
||||
if (!curDir.mkdirs())
|
||||
if (!curDir.mkdirs()) {
|
||||
throw new IOException("Cannot create directory " + curDir);
|
||||
}
|
||||
if (permission != null) {
|
||||
Set<PosixFilePermission> permissions =
|
||||
PosixFilePermissions.fromString(permission.toString());
|
||||
Files.setPosixFilePermissions(curDir.toPath(), permissions);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -655,8 +672,9 @@ public StorageState analyzeStorage(StartupOption startOpt, Storage storage,
|
||||
return StorageState.NON_EXISTENT;
|
||||
}
|
||||
LOG.info("{} does not exist. Creating ...", rootPath);
|
||||
if (!root.mkdirs())
|
||||
if (!root.mkdirs()) {
|
||||
throw new IOException("Cannot create directory " + rootPath);
|
||||
}
|
||||
hadMkdirs = true;
|
||||
}
|
||||
// or is inaccessible
|
||||
|
@ -300,7 +300,7 @@ boolean recoverTransitionRead(StartupOption startOpt, FSNamesystem target,
|
||||
// triggered.
|
||||
LOG.info("Storage directory " + sd.getRoot() + " is not formatted.");
|
||||
LOG.info("Formatting ...");
|
||||
sd.clearDirectory(); // create empty currrent dir
|
||||
sd.clearDirectory(); // create empty current dir
|
||||
// For non-HA, no further action is needed here, as saveNamespace will
|
||||
// take care of the rest.
|
||||
if (!target.isHaEnabled()) {
|
||||
|
@ -38,6 +38,8 @@
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
@ -128,7 +130,7 @@ public boolean isOfType(StorageDirType type) {
|
||||
private boolean restoreFailedStorage = false;
|
||||
private final Object restorationLock = new Object();
|
||||
private boolean disablePreUpgradableLayoutCheck = false;
|
||||
|
||||
private final Configuration conf;
|
||||
|
||||
/**
|
||||
* TxId of the last transaction that was included in the most
|
||||
@ -171,6 +173,7 @@ public NNStorage(Configuration conf,
|
||||
Collection<URI> imageDirs, Collection<URI> editsDirs)
|
||||
throws IOException {
|
||||
super(NodeType.NAME_NODE);
|
||||
this.conf = conf;
|
||||
|
||||
// this may modify the editsDirs, so copy before passing in
|
||||
setStorageDirectories(imageDirs,
|
||||
@ -312,10 +315,16 @@ synchronized void setStorageDirectories(Collection<URI> fsNameDirs,
|
||||
NameNodeDirType.IMAGE;
|
||||
// Add to the list of storage directories, only if the
|
||||
// URI is of type file://
|
||||
if(dirName.getScheme().compareTo("file") == 0) {
|
||||
this.addStorageDir(new StorageDirectory(new File(dirName.getPath()),
|
||||
if (dirName.getScheme().compareTo("file") == 0) {
|
||||
// Don't lock the dir if it's shared.
|
||||
StorageDirectory sd = new StorageDirectory(new File(dirName.getPath()),
|
||||
dirType,
|
||||
sharedEditsDirs.contains(dirName))); // Don't lock the dir if it's shared.
|
||||
sharedEditsDirs.contains(dirName),
|
||||
new FsPermission(conf.get(
|
||||
DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_DEFAULT)));
|
||||
|
||||
this.addStorageDir(sd);
|
||||
}
|
||||
}
|
||||
|
||||
@ -324,9 +333,12 @@ synchronized void setStorageDirectories(Collection<URI> fsNameDirs,
|
||||
checkSchemeConsistency(dirName);
|
||||
// Add to the list of storage directories, only if the
|
||||
// URI is of type file://
|
||||
if(dirName.getScheme().compareTo("file") == 0) {
|
||||
if (dirName.getScheme().compareTo("file") == 0) {
|
||||
this.addStorageDir(new StorageDirectory(new File(dirName.getPath()),
|
||||
NameNodeDirType.EDITS, sharedEditsDirs.contains(dirName)));
|
||||
NameNodeDirType.EDITS, sharedEditsDirs.contains(dirName),
|
||||
new FsPermission(conf.get(
|
||||
DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_DEFAULT))));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -5446,4 +5446,24 @@
|
||||
The queue size of BlockReportProcessingThread in BlockManager.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.storage.dir.perm</name>
|
||||
<value>700</value>
|
||||
<description>
|
||||
Permissions for the directories on on the local filesystem where
|
||||
the DFS namenode stores the fsImage. The permissions can either be
|
||||
octal or symbolic.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.journalnode.edits.dir.perm</name>
|
||||
<value>700</value>
|
||||
<description>
|
||||
Permissions for the directories on on the local filesystem where
|
||||
the DFS journal node stores the edits. The permissions can either be
|
||||
octal or symbolic.
|
||||
</description>
|
||||
</property>
|
||||
</configuration>
|
||||
|
@ -54,6 +54,9 @@
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.LocalFileSystem;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
@ -1259,6 +1262,17 @@ public static NNStorage setupEdits(List<URI> editUris, int numrolls,
|
||||
Collections.<URI>emptyList(),
|
||||
editUris);
|
||||
storage.format(new NamespaceInfo());
|
||||
// Verify permissions
|
||||
LocalFileSystem fs = LocalFileSystem.getLocal(getConf());
|
||||
for (URI uri : editUris) {
|
||||
String currDir = uri.getPath() + Path.SEPARATOR +
|
||||
Storage.STORAGE_DIR_CURRENT;
|
||||
FileStatus fileStatus = fs.getFileLinkStatus(new Path(currDir));
|
||||
FsPermission permission = fileStatus.getPermission();
|
||||
assertEquals(getConf().getInt(
|
||||
DFSConfigKeys.DFS_JOURNAL_EDITS_DIR_PERMISSION_KEY, 700),
|
||||
permission.toOctal());
|
||||
}
|
||||
FSEditLog editlog = getFSEditLog(storage);
|
||||
// open the edit log and add two transactions
|
||||
// logGenerationStamp is used, simply because it doesn't
|
||||
|
@ -37,7 +37,9 @@
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.hadoop.fs.LocalFileSystem;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
@ -103,7 +105,7 @@ public void setUp() throws Exception {
|
||||
config = new HdfsConfiguration();
|
||||
hdfsDir = new File(MiniDFSCluster.getBaseDirectory());
|
||||
|
||||
if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
|
||||
if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
|
||||
throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
|
||||
}
|
||||
LOG.info("--hdfsdir is " + hdfsDir.getAbsolutePath());
|
||||
@ -789,4 +791,27 @@ public void testStorageBlockContentsStaleAfterNNRestart() throws Exception {
|
||||
return;
|
||||
}
|
||||
|
||||
@Test(timeout = 60000)
|
||||
public void testDirectoryPermissions() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
try (MiniDFSCluster dfsCluster
|
||||
= new MiniDFSCluster.Builder(conf).build()) {
|
||||
dfsCluster.waitActive();
|
||||
// name and edits
|
||||
List<StorageDirectory> nameDirs =
|
||||
dfsCluster.getNameNode().getFSImage().getStorage().getStorageDirs();
|
||||
Collection<URI> nameDirUris = nameDirs.stream().map(d -> d
|
||||
.getCurrentDir().toURI()).collect(Collectors.toList());
|
||||
assertNotNull(nameDirUris);
|
||||
LocalFileSystem fs = LocalFileSystem.getLocal(config);
|
||||
FsPermission permission = new FsPermission(conf.get(
|
||||
DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_DEFAULT));
|
||||
for (URI uri : nameDirUris) {
|
||||
FileStatus fileStatus = fs.getFileLinkStatus(new Path(uri));
|
||||
assertEquals(permission.toOctal(),
|
||||
fileStatus.getPermission().toOctal());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user