HDFS-6246. Remove 'dfs.support.append' flag from trunk code. Contributed by Uma Maheswara Rao G.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1589927 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Uma Maheswara Rao G 2014-04-25 05:19:50 +00:00
parent b34b7db215
commit 0931bd94be
6 changed files with 2 additions and 22 deletions

View File

@ -125,6 +125,8 @@ Trunk (Unreleased)
HDFS-6228. comments typo fix for FsDatasetImpl.java (zhaoyunjiong via umamahesh) HDFS-6228. comments typo fix for FsDatasetImpl.java (zhaoyunjiong via umamahesh)
HDFS-6246. Remove 'dfs.support.append' flag from trunk code. (umamahesh)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES

View File

@ -381,8 +381,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final int DFS_NAMENODE_HANDLER_COUNT_DEFAULT = 10; public static final int DFS_NAMENODE_HANDLER_COUNT_DEFAULT = 10;
public static final String DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY = "dfs.namenode.service.handler.count"; public static final String DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY = "dfs.namenode.service.handler.count";
public static final int DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT = 10; public static final int DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT = 10;
public static final String DFS_SUPPORT_APPEND_KEY = "dfs.support.append";
public static final boolean DFS_SUPPORT_APPEND_DEFAULT = true;
public static final String DFS_HTTP_POLICY_KEY = "dfs.http.policy"; public static final String DFS_HTTP_POLICY_KEY = "dfs.http.policy";
public static final String DFS_HTTP_POLICY_DEFAULT = HttpConfig.Policy.HTTP_ONLY.name(); public static final String DFS_HTTP_POLICY_DEFAULT = HttpConfig.Policy.HTTP_ONLY.name();
public static final String DFS_DEFAULT_CHUNK_VIEW_SIZE_KEY = "dfs.default.chunk.view.size"; public static final String DFS_DEFAULT_CHUNK_VIEW_SIZE_KEY = "dfs.default.chunk.view.size";

View File

@ -95,12 +95,6 @@ class BlockPoolSlice {
FileUtil.fullyDelete(tmpDir); FileUtil.fullyDelete(tmpDir);
} }
this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW); this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
final boolean supportAppends = conf.getBoolean(
DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
if (rbwDir.exists() && !supportAppends) {
FileUtil.fullyDelete(rbwDir);
}
final int maxBlocksPerDir = conf.getInt( final int maxBlocksPerDir = conf.getInt(
DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_KEY, DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_KEY,
DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_DEFAULT); DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_DEFAULT);

View File

@ -83,8 +83,6 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SUPPORT_APPEND_KEY;
import static org.apache.hadoop.util.Time.now; import static org.apache.hadoop.util.Time.now;
import java.io.BufferedWriter; import java.io.BufferedWriter;
@ -446,7 +444,6 @@ private void logAuditEvent(boolean succeeded,
NameNodeResourceChecker nnResourceChecker; NameNodeResourceChecker nnResourceChecker;
private final FsServerDefaults serverDefaults; private final FsServerDefaults serverDefaults;
private final boolean supportAppends;
private final ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure; private final ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure;
private volatile SafeModeInfo safeMode; // safe mode information private volatile SafeModeInfo safeMode; // safe mode information
@ -754,8 +751,6 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException {
DFSConfigKeys.DFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT); DFSConfigKeys.DFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT);
this.accessTimePrecision = conf.getLong(DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, this.accessTimePrecision = conf.getLong(DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,
DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT); DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT);
this.supportAppends = conf.getBoolean(DFS_SUPPORT_APPEND_KEY, DFS_SUPPORT_APPEND_DEFAULT);
LOG.info("Append Enabled: " + supportAppends);
this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf); this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
@ -2597,12 +2592,6 @@ private LocatedBlock appendFileInt(String src, String holder,
+ ", clientMachine=" + clientMachine); + ", clientMachine=" + clientMachine);
} }
boolean skipSync = false; boolean skipSync = false;
if (!supportAppends) {
throw new UnsupportedOperationException(
"Append is not enabled on this NameNode. Use the " +
DFS_SUPPORT_APPEND_KEY + " configuration option to enable it.");
}
LocatedBlock lb = null; LocatedBlock lb = null;
FSPermissionChecker pc = getPermissionChecker(); FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);

View File

@ -345,7 +345,6 @@ public void testAppendAfterSoftLimit()
throws IOException, InterruptedException { throws IOException, InterruptedException {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
//Set small soft-limit for lease //Set small soft-limit for lease
final long softLimit = 1L; final long softLimit = 1L;
final long hardLimit = 9999999L; final long hardLimit = 9999999L;

View File

@ -201,7 +201,6 @@ private static Path getDataDir(Configuration conf) {
@BeforeClass @BeforeClass
public static void beforeClass() throws Exception { public static void beforeClass() throws Exception {
bench = new TestDFSIO(); bench = new TestDFSIO();
bench.getConf().setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
cluster = new MiniDFSCluster.Builder(bench.getConf()) cluster = new MiniDFSCluster.Builder(bench.getConf())
.numDataNodes(2) .numDataNodes(2)
.format(true) .format(true)
@ -733,7 +732,6 @@ else if(testType == TestType.TEST_TYPE_READ_SKIP && skipSize == 0)
config.setInt("test.io.file.buffer.size", bufferSize); config.setInt("test.io.file.buffer.size", bufferSize);
config.setLong("test.io.skip.size", skipSize); config.setLong("test.io.skip.size", skipSize);
config.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
FileSystem fs = FileSystem.get(config); FileSystem fs = FileSystem.get(config);
if (isSequential) { if (isSequential) {