HDFS-4637. INodeDirectory#replaceSelf4Quota may incorrectly convert a newly created directory to an INodeDirectoryWithSnapshot. Contributed by Jing Zhao

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1463075 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-04-01 01:53:21 +00:00
parent 9602869d19
commit 8ee6ecaea4
4 changed files with 97 additions and 2 deletions

View File

@ -220,3 +220,6 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4648. For snapshot deletion, when merging the diff from to-delete
snapshot to the prior snapshot, make sure files/directories created after
the prior snapshot get deleted. (Jing Zhao via szetszwo)
HDFS-4637. INodeDirectory#replaceSelf4Quota may incorrectly convert a newly
created directory to an INodeDirectoryWithSnapshot. (Jing Zhao via szetszwo)

View File

@ -159,7 +159,7 @@ INodeDirectoryWithQuota replaceSelf4Quota(final Snapshot latest,
Preconditions.checkState(!(this instanceof INodeDirectoryWithQuota),
"this is already an INodeDirectoryWithQuota, this=%s", this);
if (latest == null) {
if (!this.isInLatestSnapshot(latest)) {
final INodeDirectoryWithQuota q = new INodeDirectoryWithQuota(
this, true, nsQuota, dsQuota);
replaceSelf(q);

View File

@ -0,0 +1,93 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
public class TestSetQuotaWithSnapshot {
protected static final long seed = 0;
protected static final short REPLICATION = 3;
protected static final long BLOCKSIZE = 1024;
protected Configuration conf;
protected MiniDFSCluster cluster;
protected FSNamesystem fsn;
protected FSDirectory fsdir;
protected DistributedFileSystem hdfs;
@Rule
public ExpectedException exception = ExpectedException.none();
@Before
public void setUp() throws Exception {
conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.format(true).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
fsdir = fsn.getFSDirectory();
hdfs = cluster.getFileSystem();
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
@Test (timeout=60000)
public void testSetQuota() throws Exception {
final Path dir = new Path("/TestSnapshot");
hdfs.mkdirs(dir);
// allow snapshot on dir and create snapshot s1
SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
Path sub = new Path(dir, "sub");
hdfs.mkdirs(sub);
Path fileInSub = new Path(sub, "file");
DFSTestUtil.createFile(hdfs, fileInSub, BLOCKSIZE, REPLICATION, seed);
INodeDirectory subNode = INodeDirectory.valueOf(
fsdir.getINode(sub.toString()), sub);
// subNode should be a INodeDirectory, but not an INodeDirectoryWithSnapshot
assertFalse(subNode instanceof INodeDirectoryWithSnapshot);
hdfs.setQuota(sub, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
subNode = INodeDirectory.valueOf(fsdir.getINode(sub.toString()), sub);
assertTrue(subNode.isQuotaSet());
assertFalse(subNode instanceof INodeDirectoryWithSnapshot);
}
}

View File

@ -61,7 +61,6 @@ public class TestSnapshotDeletion {
protected static final short REPLICATION = 3;
protected static final short REPLICATION_1 = 2;
protected static final long BLOCKSIZE = 1024;
public static final int SNAPSHOTNUMBER = 10;
private final Path dir = new Path("/TestSnapshot");
private final Path sub = new Path(dir, "sub1");