HDFS-4767. If a directory is snapshottable, do not replace it when clearing quota. Contributed by Jing Zhao

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1476454 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-04-27 00:05:49 +00:00
parent 60341dae19
commit 5276f4e04d
3 changed files with 81 additions and 5 deletions

View File

@ -315,3 +315,6 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4650. When passing two non-existing snapshot names to snapshotDiff, it
returns success if the names are the same. (Jing Zhao via szetszwo)
HDFS-4767. If a directory is snapshottable, do not replace it when clearing
quota. (Jing Zhao via szetszwo)

View File

@ -2349,12 +2349,17 @@ INodeDirectory unprotectedSetQuota(String src, long nsQuota, long dsQuota)
quotaNode.setSpaceConsumed(counts.get(Quota.NAMESPACE),
counts.get(Quota.DISKSPACE));
} else if (!quotaNode.isQuotaSet() && latest == null) {
// will not come here for root because root's nsQuota is always set
// do not replace the node if the node is a snapshottable directory
// without snapshots
if (!(quotaNode instanceof INodeDirectoryWithSnapshot)) {
// will not come here for root because root is snapshottable and
// root's nsQuota is always set
INodeDirectory newNode = quotaNode.replaceSelf4INodeDirectory();
// update the inodeMap
inodeMap.put(newNode);
return newNode;
}
}
} else {
// a non-quota directory; so replace it with a directory with quota
INodeDirectory newNode = dirNode.replaceSelf4Quota(latest, nsQuota, dsQuota);

View File

@ -17,18 +17,27 @@
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff;
import org.apache.hadoop.hdfs.util.Diff.ListType;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
@ -90,4 +99,63 @@ public void testSetQuota() throws Exception {
assertTrue(subNode.isQuotaSet());
assertFalse(subNode instanceof INodeDirectoryWithSnapshot);
}
/**
* Test clear quota of a snapshottable dir or a dir with snapshot.
*/
@Test
public void testClearQuota() throws Exception {
final Path dir = new Path("/TestSnapshot");
hdfs.mkdirs(dir);
hdfs.allowSnapshot(dir);
hdfs.setQuota(dir, HdfsConstants.QUOTA_DONT_SET,
HdfsConstants.QUOTA_DONT_SET);
INode dirNode = fsdir.getINode4Write(dir.toString());
assertTrue(dirNode instanceof INodeDirectorySnapshottable);
assertEquals(0, ((INodeDirectorySnapshottable) dirNode).getDiffs().asList()
.size());
hdfs.setQuota(dir, HdfsConstants.QUOTA_DONT_SET - 1,
HdfsConstants.QUOTA_DONT_SET - 1);
dirNode = fsdir.getINode4Write(dir.toString());
assertTrue(dirNode instanceof INodeDirectorySnapshottable);
assertEquals(0, ((INodeDirectorySnapshottable) dirNode).getDiffs().asList()
.size());
hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
dirNode = fsdir.getINode4Write(dir.toString());
assertTrue(dirNode instanceof INodeDirectorySnapshottable);
assertEquals(0, ((INodeDirectorySnapshottable) dirNode).getDiffs().asList()
.size());
// allow snapshot on dir and create snapshot s1
SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
// clear quota of dir
hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
// dir should still be a snapshottable directory
dirNode = fsdir.getINode4Write(dir.toString());
assertTrue(dirNode instanceof INodeDirectorySnapshottable);
assertEquals(1, ((INodeDirectorySnapshottable) dirNode).getDiffs().asList()
.size());
SnapshottableDirectoryStatus[] status = hdfs.getSnapshottableDirListing();
assertEquals(1, status.length);
assertEquals(dir, status[0].getFullPath());
final Path subDir = new Path(dir, "sub");
hdfs.mkdirs(subDir);
hdfs.createSnapshot(dir, "s2");
final Path file = new Path(subDir, "file");
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
INode subNode = fsdir.getINode4Write(subDir.toString());
assertTrue(subNode instanceof INodeDirectoryWithSnapshot);
List<DirectoryDiff> diffList = ((INodeDirectoryWithSnapshot) subNode).getDiffs().asList();
assertEquals(1, diffList.size());
assertEquals("s2", Snapshot.getSnapshotName(diffList.get(0).snapshot));
List<INode> createdList = diffList.get(0).getChildrenDiff().getList(ListType.CREATED);
assertEquals(1, createdList.size());
assertSame(fsdir.getINode4Write(file.toString()), createdList.get(0));
}
}