HDFS-2470. NN should automatically set permissions on dfs.namenode.*.dir. Contributed by Siddharth Wagle.
This commit is contained in:
parent
8660e48ca1
commit
07e3cf952e
@ -562,6 +562,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||||||
public static final String DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_NAMENODE_HTTPS_PORT_DEFAULT;
|
public static final String DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_NAMENODE_HTTPS_PORT_DEFAULT;
|
||||||
public static final String DFS_NAMENODE_NAME_DIR_KEY =
|
public static final String DFS_NAMENODE_NAME_DIR_KEY =
|
||||||
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_NAME_DIR_KEY;
|
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_NAME_DIR_KEY;
|
||||||
|
public static final String DFS_NAMENODE_NAME_DIR_PERMISSION_KEY =
|
||||||
|
"dfs.namenode.storage.dir.perm";
|
||||||
|
public static final String DFS_NAMENODE_NAME_DIR_PERMISSION_DEFAULT =
|
||||||
|
"700";
|
||||||
public static final String DFS_NAMENODE_EDITS_DIR_KEY =
|
public static final String DFS_NAMENODE_EDITS_DIR_KEY =
|
||||||
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_EDITS_DIR_KEY;
|
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_EDITS_DIR_KEY;
|
||||||
public static final String DFS_NAMENODE_SHARED_EDITS_DIR_KEY = "dfs.namenode.shared.edits.dir";
|
public static final String DFS_NAMENODE_SHARED_EDITS_DIR_KEY = "dfs.namenode.shared.edits.dir";
|
||||||
@ -1109,6 +1113,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||||||
public static final int DFS_JOURNALNODE_RPC_PORT_DEFAULT = 8485;
|
public static final int DFS_JOURNALNODE_RPC_PORT_DEFAULT = 8485;
|
||||||
public static final String DFS_JOURNALNODE_RPC_BIND_HOST_KEY = "dfs.journalnode.rpc-bind-host";
|
public static final String DFS_JOURNALNODE_RPC_BIND_HOST_KEY = "dfs.journalnode.rpc-bind-host";
|
||||||
public static final String DFS_JOURNALNODE_RPC_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_JOURNALNODE_RPC_PORT_DEFAULT;
|
public static final String DFS_JOURNALNODE_RPC_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_JOURNALNODE_RPC_PORT_DEFAULT;
|
||||||
|
public static final String DFS_JOURNAL_EDITS_DIR_PERMISSION_KEY =
|
||||||
|
"dfs.journalnode.edits.dir.perm";
|
||||||
|
public static final String DFS_JOURNAL_EDITS_DIR_PERMISSION_DEFAULT =
|
||||||
|
"700";
|
||||||
|
|
||||||
public static final String DFS_JOURNALNODE_HTTP_ADDRESS_KEY = "dfs.journalnode.http-address";
|
public static final String DFS_JOURNALNODE_HTTP_ADDRESS_KEY = "dfs.journalnode.http-address";
|
||||||
public static final int DFS_JOURNALNODE_HTTP_PORT_DEFAULT = 8480;
|
public static final int DFS_JOURNALNODE_HTTP_PORT_DEFAULT = 8480;
|
||||||
|
@ -26,6 +26,8 @@
|
|||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
|
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
|
||||||
@ -70,7 +72,9 @@ protected JNStorage(Configuration conf, File logDir, StartupOption startOpt,
|
|||||||
StorageErrorReporter errorReporter) throws IOException {
|
StorageErrorReporter errorReporter) throws IOException {
|
||||||
super(NodeType.JOURNAL_NODE);
|
super(NodeType.JOURNAL_NODE);
|
||||||
|
|
||||||
sd = new StorageDirectory(logDir);
|
sd = new StorageDirectory(logDir, null, false, new FsPermission(conf.get(
|
||||||
|
DFSConfigKeys.DFS_JOURNAL_EDITS_DIR_PERMISSION_KEY,
|
||||||
|
DFSConfigKeys.DFS_JOURNAL_EDITS_DIR_PERMISSION_DEFAULT)));
|
||||||
this.addStorageDir(sd);
|
this.addStorageDir(sd);
|
||||||
this.fjm = new FileJournalManager(conf, sd, errorReporter);
|
this.fjm = new FileJournalManager(conf, sd, errorReporter);
|
||||||
|
|
||||||
|
@ -27,11 +27,14 @@
|
|||||||
import java.nio.channels.OverlappingFileLockException;
|
import java.nio.channels.OverlappingFileLockException;
|
||||||
import java.nio.file.DirectoryStream;
|
import java.nio.file.DirectoryStream;
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
|
import java.nio.file.attribute.PosixFilePermission;
|
||||||
|
import java.nio.file.attribute.PosixFilePermissions;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Properties;
|
import java.util.Properties;
|
||||||
|
import java.util.Set;
|
||||||
import java.util.concurrent.CopyOnWriteArrayList;
|
import java.util.concurrent.CopyOnWriteArrayList;
|
||||||
|
|
||||||
import org.apache.commons.io.FileUtils;
|
import org.apache.commons.io.FileUtils;
|
||||||
@ -39,6 +42,7 @@
|
|||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.StorageType;
|
||||||
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
|
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
|
||||||
@ -276,6 +280,7 @@ public static class StorageDirectory implements FormatConfirmable {
|
|||||||
final boolean isShared;
|
final boolean isShared;
|
||||||
final StorageDirType dirType; // storage dir type
|
final StorageDirType dirType; // storage dir type
|
||||||
FileLock lock; // storage lock
|
FileLock lock; // storage lock
|
||||||
|
private final FsPermission permission;
|
||||||
|
|
||||||
private String storageUuid = null; // Storage directory identifier.
|
private String storageUuid = null; // Storage directory identifier.
|
||||||
|
|
||||||
@ -311,6 +316,11 @@ public StorageDirectory(File dir, StorageDirType dirType, boolean isShared) {
|
|||||||
this(dir, dirType, isShared, null);
|
this(dir, dirType, isShared, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public StorageDirectory(File dir, StorageDirType dirType,
|
||||||
|
boolean isShared, FsPermission permission) {
|
||||||
|
this(dir, dirType, isShared, null, permission);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructor
|
* Constructor
|
||||||
* @param dirType storage directory type
|
* @param dirType storage directory type
|
||||||
@ -320,7 +330,7 @@ public StorageDirectory(File dir, StorageDirType dirType, boolean isShared) {
|
|||||||
*/
|
*/
|
||||||
public StorageDirectory(StorageDirType dirType, boolean isShared,
|
public StorageDirectory(StorageDirType dirType, boolean isShared,
|
||||||
StorageLocation location) {
|
StorageLocation location) {
|
||||||
this(getStorageLocationFile(location), dirType, isShared, location);
|
this(getStorageLocationFile(location), dirType, isShared, location, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -334,7 +344,7 @@ public StorageDirectory(StorageDirType dirType, boolean isShared,
|
|||||||
public StorageDirectory(String bpid, StorageDirType dirType,
|
public StorageDirectory(String bpid, StorageDirType dirType,
|
||||||
boolean isShared, StorageLocation location) {
|
boolean isShared, StorageLocation location) {
|
||||||
this(getBlockPoolCurrentDir(bpid, location), dirType,
|
this(getBlockPoolCurrentDir(bpid, location), dirType,
|
||||||
isShared, location);
|
isShared, location, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static File getBlockPoolCurrentDir(String bpid,
|
private static File getBlockPoolCurrentDir(String bpid,
|
||||||
@ -348,13 +358,14 @@ private static File getBlockPoolCurrentDir(String bpid,
|
|||||||
}
|
}
|
||||||
|
|
||||||
private StorageDirectory(File dir, StorageDirType dirType,
|
private StorageDirectory(File dir, StorageDirType dirType,
|
||||||
boolean isShared, StorageLocation location) {
|
boolean isShared, StorageLocation location, FsPermission permission) {
|
||||||
this.root = dir;
|
this.root = dir;
|
||||||
this.lock = null;
|
this.lock = null;
|
||||||
// default dirType is UNDEFINED
|
// default dirType is UNDEFINED
|
||||||
this.dirType = (dirType == null ? NameNodeDirType.UNDEFINED : dirType);
|
this.dirType = (dirType == null ? NameNodeDirType.UNDEFINED : dirType);
|
||||||
this.isShared = isShared;
|
this.isShared = isShared;
|
||||||
this.location = location;
|
this.location = location;
|
||||||
|
this.permission = permission;
|
||||||
assert location == null || dir == null ||
|
assert location == null || dir == null ||
|
||||||
dir.getAbsolutePath().startsWith(
|
dir.getAbsolutePath().startsWith(
|
||||||
new File(location.getUri()).getAbsolutePath()):
|
new File(location.getUri()).getAbsolutePath()):
|
||||||
@ -432,9 +443,15 @@ public void clearDirectory() throws IOException {
|
|||||||
if (!(FileUtil.fullyDelete(curDir)))
|
if (!(FileUtil.fullyDelete(curDir)))
|
||||||
throw new IOException("Cannot remove current directory: " + curDir);
|
throw new IOException("Cannot remove current directory: " + curDir);
|
||||||
}
|
}
|
||||||
if (!curDir.mkdirs())
|
if (!curDir.mkdirs()) {
|
||||||
throw new IOException("Cannot create directory " + curDir);
|
throw new IOException("Cannot create directory " + curDir);
|
||||||
}
|
}
|
||||||
|
if (permission != null) {
|
||||||
|
Set<PosixFilePermission> permissions =
|
||||||
|
PosixFilePermissions.fromString(permission.toString());
|
||||||
|
Files.setPosixFilePermissions(curDir.toPath(), permissions);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Directory {@code current} contains latest files defining
|
* Directory {@code current} contains latest files defining
|
||||||
@ -655,8 +672,9 @@ public StorageState analyzeStorage(StartupOption startOpt, Storage storage,
|
|||||||
return StorageState.NON_EXISTENT;
|
return StorageState.NON_EXISTENT;
|
||||||
}
|
}
|
||||||
LOG.info("{} does not exist. Creating ...", rootPath);
|
LOG.info("{} does not exist. Creating ...", rootPath);
|
||||||
if (!root.mkdirs())
|
if (!root.mkdirs()) {
|
||||||
throw new IOException("Cannot create directory " + rootPath);
|
throw new IOException("Cannot create directory " + rootPath);
|
||||||
|
}
|
||||||
hadMkdirs = true;
|
hadMkdirs = true;
|
||||||
}
|
}
|
||||||
// or is inaccessible
|
// or is inaccessible
|
||||||
|
@ -300,7 +300,7 @@ boolean recoverTransitionRead(StartupOption startOpt, FSNamesystem target,
|
|||||||
// triggered.
|
// triggered.
|
||||||
LOG.info("Storage directory " + sd.getRoot() + " is not formatted.");
|
LOG.info("Storage directory " + sd.getRoot() + " is not formatted.");
|
||||||
LOG.info("Formatting ...");
|
LOG.info("Formatting ...");
|
||||||
sd.clearDirectory(); // create empty currrent dir
|
sd.clearDirectory(); // create empty current dir
|
||||||
// For non-HA, no further action is needed here, as saveNamespace will
|
// For non-HA, no further action is needed here, as saveNamespace will
|
||||||
// take care of the rest.
|
// take care of the rest.
|
||||||
if (!target.isHaEnabled()) {
|
if (!target.isHaEnabled()) {
|
||||||
|
@ -38,6 +38,8 @@
|
|||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
@ -128,7 +130,7 @@ public boolean isOfType(StorageDirType type) {
|
|||||||
private boolean restoreFailedStorage = false;
|
private boolean restoreFailedStorage = false;
|
||||||
private final Object restorationLock = new Object();
|
private final Object restorationLock = new Object();
|
||||||
private boolean disablePreUpgradableLayoutCheck = false;
|
private boolean disablePreUpgradableLayoutCheck = false;
|
||||||
|
private final Configuration conf;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* TxId of the last transaction that was included in the most
|
* TxId of the last transaction that was included in the most
|
||||||
@ -171,6 +173,7 @@ public NNStorage(Configuration conf,
|
|||||||
Collection<URI> imageDirs, Collection<URI> editsDirs)
|
Collection<URI> imageDirs, Collection<URI> editsDirs)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
super(NodeType.NAME_NODE);
|
super(NodeType.NAME_NODE);
|
||||||
|
this.conf = conf;
|
||||||
|
|
||||||
// this may modify the editsDirs, so copy before passing in
|
// this may modify the editsDirs, so copy before passing in
|
||||||
setStorageDirectories(imageDirs,
|
setStorageDirectories(imageDirs,
|
||||||
@ -313,9 +316,15 @@ synchronized void setStorageDirectories(Collection<URI> fsNameDirs,
|
|||||||
// Add to the list of storage directories, only if the
|
// Add to the list of storage directories, only if the
|
||||||
// URI is of type file://
|
// URI is of type file://
|
||||||
if (dirName.getScheme().compareTo("file") == 0) {
|
if (dirName.getScheme().compareTo("file") == 0) {
|
||||||
this.addStorageDir(new StorageDirectory(new File(dirName.getPath()),
|
// Don't lock the dir if it's shared.
|
||||||
|
StorageDirectory sd = new StorageDirectory(new File(dirName.getPath()),
|
||||||
dirType,
|
dirType,
|
||||||
sharedEditsDirs.contains(dirName))); // Don't lock the dir if it's shared.
|
sharedEditsDirs.contains(dirName),
|
||||||
|
new FsPermission(conf.get(
|
||||||
|
DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_KEY,
|
||||||
|
DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_DEFAULT)));
|
||||||
|
|
||||||
|
this.addStorageDir(sd);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -326,7 +335,10 @@ synchronized void setStorageDirectories(Collection<URI> fsNameDirs,
|
|||||||
// URI is of type file://
|
// URI is of type file://
|
||||||
if (dirName.getScheme().compareTo("file") == 0) {
|
if (dirName.getScheme().compareTo("file") == 0) {
|
||||||
this.addStorageDir(new StorageDirectory(new File(dirName.getPath()),
|
this.addStorageDir(new StorageDirectory(new File(dirName.getPath()),
|
||||||
NameNodeDirType.EDITS, sharedEditsDirs.contains(dirName)));
|
NameNodeDirType.EDITS, sharedEditsDirs.contains(dirName),
|
||||||
|
new FsPermission(conf.get(
|
||||||
|
DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_KEY,
|
||||||
|
DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_DEFAULT))));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5446,4 +5446,24 @@
|
|||||||
The queue size of BlockReportProcessingThread in BlockManager.
|
The queue size of BlockReportProcessingThread in BlockManager.
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>dfs.namenode.storage.dir.perm</name>
|
||||||
|
<value>700</value>
|
||||||
|
<description>
|
||||||
|
Permissions for the directories on on the local filesystem where
|
||||||
|
the DFS namenode stores the fsImage. The permissions can either be
|
||||||
|
octal or symbolic.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>dfs.journalnode.edits.dir.perm</name>
|
||||||
|
<value>700</value>
|
||||||
|
<description>
|
||||||
|
Permissions for the directories on on the local filesystem where
|
||||||
|
the DFS journal node stores the edits. The permissions can either be
|
||||||
|
octal or symbolic.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
</configuration>
|
</configuration>
|
||||||
|
@ -54,6 +54,9 @@
|
|||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
import org.apache.commons.io.FileUtils;
|
import org.apache.commons.io.FileUtils;
|
||||||
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
|
import org.apache.hadoop.fs.LocalFileSystem;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
@ -1259,6 +1262,17 @@ public static NNStorage setupEdits(List<URI> editUris, int numrolls,
|
|||||||
Collections.<URI>emptyList(),
|
Collections.<URI>emptyList(),
|
||||||
editUris);
|
editUris);
|
||||||
storage.format(new NamespaceInfo());
|
storage.format(new NamespaceInfo());
|
||||||
|
// Verify permissions
|
||||||
|
LocalFileSystem fs = LocalFileSystem.getLocal(getConf());
|
||||||
|
for (URI uri : editUris) {
|
||||||
|
String currDir = uri.getPath() + Path.SEPARATOR +
|
||||||
|
Storage.STORAGE_DIR_CURRENT;
|
||||||
|
FileStatus fileStatus = fs.getFileLinkStatus(new Path(currDir));
|
||||||
|
FsPermission permission = fileStatus.getPermission();
|
||||||
|
assertEquals(getConf().getInt(
|
||||||
|
DFSConfigKeys.DFS_JOURNAL_EDITS_DIR_PERMISSION_KEY, 700),
|
||||||
|
permission.toOctal());
|
||||||
|
}
|
||||||
FSEditLog editlog = getFSEditLog(storage);
|
FSEditLog editlog = getFSEditLog(storage);
|
||||||
// open the edit log and add two transactions
|
// open the edit log and add two transactions
|
||||||
// logGenerationStamp is used, simply because it doesn't
|
// logGenerationStamp is used, simply because it doesn't
|
||||||
|
@ -37,7 +37,9 @@
|
|||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
import org.apache.hadoop.fs.LocalFileSystem;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
@ -789,4 +791,27 @@ public void testStorageBlockContentsStaleAfterNNRestart() throws Exception {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testDirectoryPermissions() throws Exception {
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
try (MiniDFSCluster dfsCluster
|
||||||
|
= new MiniDFSCluster.Builder(conf).build()) {
|
||||||
|
dfsCluster.waitActive();
|
||||||
|
// name and edits
|
||||||
|
List<StorageDirectory> nameDirs =
|
||||||
|
dfsCluster.getNameNode().getFSImage().getStorage().getStorageDirs();
|
||||||
|
Collection<URI> nameDirUris = nameDirs.stream().map(d -> d
|
||||||
|
.getCurrentDir().toURI()).collect(Collectors.toList());
|
||||||
|
assertNotNull(nameDirUris);
|
||||||
|
LocalFileSystem fs = LocalFileSystem.getLocal(config);
|
||||||
|
FsPermission permission = new FsPermission(conf.get(
|
||||||
|
DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_KEY,
|
||||||
|
DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_DEFAULT));
|
||||||
|
for (URI uri : nameDirUris) {
|
||||||
|
FileStatus fileStatus = fs.getFileLinkStatus(new Path(uri));
|
||||||
|
assertEquals(permission.toOctal(),
|
||||||
|
fileStatus.getPermission().toOctal());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user