HDFS-3172. dfs.upgrade.permission is dead code. Contributed by Eli Collins
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1307888 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
ff897e75c9
commit
eeb687daa7
@ -86,8 +86,6 @@ Deprecated Properties
|
|||||||
*---+---+
|
*---+---+
|
||||||
|dfs.socket.timeout | dfs.client.socket-timeout
|
|dfs.socket.timeout | dfs.client.socket-timeout
|
||||||
*---+---+
|
*---+---+
|
||||||
|dfs.upgrade.permission | dfs.namenode.upgrade.permission
|
|
||||||
*---+---+
|
|
||||||
|dfs.write.packet.size | dfs.client-write-packet-size
|
|dfs.write.packet.size | dfs.client-write-packet-size
|
||||||
*---+---+
|
*---+---+
|
||||||
|fs.checkpoint.dir | dfs.namenode.checkpoint.dir
|
|fs.checkpoint.dir | dfs.namenode.checkpoint.dir
|
||||||
|
@ -286,6 +286,8 @@ Release 2.0.0 - UNRELEASED
|
|||||||
HDFS-3158. LiveNodes member of NameNodeMXBean should list non-DFS used
|
HDFS-3158. LiveNodes member of NameNodeMXBean should list non-DFS used
|
||||||
space and capacity per DN. (atm)
|
space and capacity per DN. (atm)
|
||||||
|
|
||||||
|
HDFS-3172. dfs.upgrade.permission is dead code. (eli)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
|
HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
|
||||||
|
@ -239,11 +239,6 @@ to the web server.</p>
|
|||||||
<br />The name of the group of super-users.
|
<br />The name of the group of super-users.
|
||||||
</li>
|
</li>
|
||||||
|
|
||||||
<li><code>dfs.namenode.upgrade.permission = 0777</code>
|
|
||||||
<br />The choice of initial mode during upgrade. The <em>x</em> permission is <em>never</em> set for files.
|
|
||||||
For configuration files, the decimal value <em>511<sub>10</sub></em> may be used.
|
|
||||||
</li>
|
|
||||||
|
|
||||||
<li><code>fs.permissions.umask-mode = 022</code>
|
<li><code>fs.permissions.umask-mode = 022</code>
|
||||||
<br />The <code>umask</code> used when creating files and directories. For configuration files, the decimal
|
<br />The <code>umask</code> used when creating files and directories. For configuration files, the decimal
|
||||||
value <em>18<sub>10</sub></em> may be used.
|
value <em>18<sub>10</sub></em> may be used.
|
||||||
|
@ -107,8 +107,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||||||
public static final long DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT = 3600;
|
public static final long DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT = 3600;
|
||||||
public static final String DFS_NAMENODE_CHECKPOINT_TXNS_KEY = "dfs.namenode.checkpoint.txns";
|
public static final String DFS_NAMENODE_CHECKPOINT_TXNS_KEY = "dfs.namenode.checkpoint.txns";
|
||||||
public static final long DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT = 40000;
|
public static final long DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT = 40000;
|
||||||
public static final String DFS_NAMENODE_UPGRADE_PERMISSION_KEY = "dfs.namenode.upgrade.permission";
|
|
||||||
public static final int DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT = 00777;
|
|
||||||
public static final String DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY = "dfs.namenode.heartbeat.recheck-interval";
|
public static final String DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY = "dfs.namenode.heartbeat.recheck-interval";
|
||||||
public static final int DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT = 5*60*1000;
|
public static final int DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT = 5*60*1000;
|
||||||
public static final String DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.client.https.keystore.resource";
|
public static final String DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.client.https.keystore.resource";
|
||||||
|
@ -86,7 +86,6 @@ private static void addDeprecatedKeys() {
|
|||||||
deprecate("fs.checkpoint.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
|
deprecate("fs.checkpoint.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
|
||||||
deprecate("fs.checkpoint.edits.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
|
deprecate("fs.checkpoint.edits.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
|
||||||
deprecate("fs.checkpoint.period", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY);
|
deprecate("fs.checkpoint.period", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY);
|
||||||
deprecate("dfs.upgrade.permission", DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_KEY);
|
|
||||||
deprecate("heartbeat.recheck.interval", DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY);
|
deprecate("heartbeat.recheck.interval", DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY);
|
||||||
deprecate("dfs.https.client.keystore.resource", DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY);
|
deprecate("dfs.https.client.keystore.resource", DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY);
|
||||||
deprecate("dfs.https.need.client.auth", DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY);
|
deprecate("dfs.https.need.client.auth", DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY);
|
||||||
|
@ -52,8 +52,6 @@
|
|||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY;
|
||||||
@ -118,7 +116,6 @@
|
|||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||||
import org.apache.hadoop.ha.ServiceFailedException;
|
import org.apache.hadoop.ha.ServiceFailedException;
|
||||||
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
|
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.HAUtil;
|
import org.apache.hadoop.hdfs.HAUtil;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
@ -267,7 +264,6 @@ private static final void logAuditEvent(UserGroupInformation ugi,
|
|||||||
private boolean persistBlocks;
|
private boolean persistBlocks;
|
||||||
private UserGroupInformation fsOwner;
|
private UserGroupInformation fsOwner;
|
||||||
private String supergroup;
|
private String supergroup;
|
||||||
private PermissionStatus defaultPermission;
|
|
||||||
private boolean standbyShouldCheckpoint;
|
private boolean standbyShouldCheckpoint;
|
||||||
|
|
||||||
// Scan interval is not configurable.
|
// Scan interval is not configurable.
|
||||||
@ -846,11 +842,6 @@ private void setConfigurationParameters(Configuration conf)
|
|||||||
"must not be specified if HA is not enabled.");
|
"must not be specified if HA is not enabled.");
|
||||||
}
|
}
|
||||||
|
|
||||||
short filePermission = (short)conf.getInt(DFS_NAMENODE_UPGRADE_PERMISSION_KEY,
|
|
||||||
DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT);
|
|
||||||
this.defaultPermission = PermissionStatus.createImmutable(
|
|
||||||
fsOwner.getShortUserName(), supergroup, new FsPermission(filePermission));
|
|
||||||
|
|
||||||
this.serverDefaults = new FsServerDefaults(
|
this.serverDefaults = new FsServerDefaults(
|
||||||
conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT),
|
conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT),
|
||||||
conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT),
|
conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT),
|
||||||
|
Loading…
Reference in New Issue
Block a user