HDFS-6191. Disable quota checks when replaying edit log.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1585544 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Kihwal Lee 2014-04-07 18:25:12 +00:00
parent aea35d93e3
commit fe328621d4
6 changed files with 64 additions and 4 deletions

View File

@ -283,6 +283,8 @@ Release 2.5.0 - UNRELEASED
HDFS-6167. Relocate the non-public API classes in the hdfs.client package.
(szetszwo)
HDFS-6191. Disable quota checks when replaying edit log. (kihwal)
OPTIMIZATIONS
BUG FIXES

View File

@ -134,6 +134,7 @@ protected void loadNamesystem(Configuration conf) throws IOException {
BN_SAFEMODE_EXTENSION_DEFAULT);
BackupImage bnImage = new BackupImage(conf);
this.namesystem = new FSNamesystem(conf, bnImage);
namesystem.dir.disableQuotaChecks();
bnImage.setNamesystem(namesystem);
bnImage.recoverCreateRead();
}

View File

@ -117,6 +117,7 @@ private static INodeDirectorySnapshottable createRoot(FSNamesystem namesystem) {
FSImage fsImage;
private final FSNamesystem namesystem;
private volatile boolean ready = false;
private volatile boolean skipQuotaCheck = false; //skip while consuming edits
private final int maxComponentLength;
private final int maxDirItems;
private final int lsLimit; // max list limit
@ -283,6 +284,16 @@ void waitForReady() {
}
}
/** Enable quota verification */
void enableQuotaChecks() {
skipQuotaCheck = false;
}
/** Disable quota verification */
void disableQuotaChecks() {
skipQuotaCheck = true;
}
/**
* Add the given filename to the fs.
* @throws FileAlreadyExistsException
@ -1825,7 +1836,7 @@ private void updateCount(INodesInPath iip, int numOfINodes,
if (numOfINodes > inodes.length) {
numOfINodes = inodes.length;
}
if (checkQuota) {
if (checkQuota && !skipQuotaCheck) {
verifyQuota(inodes, numOfINodes, nsDelta, dsDelta, null);
}
unprotectedUpdateCount(iip, numOfINodes, nsDelta, dsDelta);
@ -2117,7 +2128,7 @@ private static void verifyQuota(INode[] inodes, int pos, long nsDelta,
*/
private void verifyQuotaForRename(INode[] src, INode[] dst)
throws QuotaExceededException {
if (!ready) {
if (!ready || skipQuotaCheck) {
// Do not check quota if edits log is still being processed
return;
}

View File

@ -1033,7 +1033,9 @@ void startActiveServices() throws IOException {
dir.fsImage.editLog.openForWrite();
}
// Enable quota checks.
dir.enableQuotaChecks();
if (haEnabled) {
// Renew all of the leases before becoming active.
// This is because, while we were in standby mode,
@ -1140,6 +1142,8 @@ void startStandbyServices(final Configuration conf) throws IOException {
blockManager.setPostponeBlocksFromFuture(true);
// Disable quota checks while in standby.
dir.disableQuotaChecks();
editLogTailer = new EditLogTailer(this, conf);
editLogTailer.start();
if (standbyShouldCheckpoint) {

View File

@ -248,6 +248,9 @@ private void initialize(final Configuration conf,
namesystem = new FSNamesystem(conf, checkpointImage, true);
// Disable quota checks
namesystem.dir.disableQuotaChecks();
// Initialize other scheduling parameters from the configuration
checkpointConf = new CheckpointConf(conf);
@ -850,7 +853,7 @@ public void selectInputStreams(Collection<EditLogInputStream> streams,
Collection<URI> imageDirs,
List<URI> editsDirs) throws IOException {
super(conf, imageDirs, editsDirs);
// the 2NN never writes edits -- it only downloads them. So
// we shouldn't have any editLog instance. Setting to null
// makes sure we don't accidentally depend on it.

View File

@ -20,12 +20,15 @@
import java.io.BufferedReader;
import java.io.IOException;
import java.io.StringReader;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -55,6 +58,7 @@ public class TestFSDirectory {
private final Path file5 = new Path(sub1, "z_file5");
private final Path sub2 = new Path(dir, "sub2");
private final Path file6 = new Path(sub2, "file6");
private Configuration conf;
private MiniDFSCluster cluster;
@ -124,6 +128,41 @@ public void testReset() throws Exception {
fsdir.imageLoadComplete();
Assert.assertTrue(fsdir.isReady());
}
@Test
public void testSkipQuotaCheck() throws Exception {
try {
// set quota. nsQuota of 1 means no files can be created
// under this directory.
hdfs.setQuota(sub2, 1, Long.MAX_VALUE);
// create a file
try {
// this should fail
DFSTestUtil.createFile(hdfs, file6, 1024, REPLICATION, seed);
throw new IOException("The create should have failed.");
} catch (NSQuotaExceededException qe) {
// ignored
}
// disable the quota check and retry. this should succeed.
fsdir.disableQuotaChecks();
DFSTestUtil.createFile(hdfs, file6, 1024, REPLICATION, seed);
// trying again after re-enabling the check.
hdfs.delete(file6, false); // cleanup
fsdir.enableQuotaChecks();
try {
// this should fail
DFSTestUtil.createFile(hdfs, file6, 1024, REPLICATION, seed);
throw new IOException("The create should have failed.");
} catch (NSQuotaExceededException qe) {
// ignored
}
} finally {
hdfs.delete(file6, false); // cleanup, in case the test failed in the middle.
hdfs.setQuota(sub2, Long.MAX_VALUE, Long.MAX_VALUE);
}
}
static void checkClassName(String line) {
int i = line.lastIndexOf('(');