HDFS-15313. Ensure inodes in active filesytem are not deleted during snapshot delete. Contributed by Shashikant Banerjee.
This commit is contained in:
parent
263c76b678
commit
82343790ee
@ -17,27 +17,21 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.namenode;
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
import java.io.PrintStream;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import java.io.PrintWriter;
|
import com.google.common.base.Preconditions;
|
||||||
import java.io.StringWriter;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
import com.google.common.collect.ImmutableMap;
|
import com.google.common.collect.ImmutableMap;
|
||||||
import com.google.common.collect.Maps;
|
import com.google.common.collect.Maps;
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.fs.ContentSummary;
|
import org.apache.hadoop.fs.ContentSummary;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||||
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference;
|
import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
|
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
|
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
|
||||||
@ -46,9 +40,14 @@
|
|||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.util.ChunkedArrayList;
|
import org.apache.hadoop.util.ChunkedArrayList;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import java.io.PrintStream;
|
||||||
import com.google.common.base.Preconditions;
|
import java.io.PrintWriter;
|
||||||
|
import java.io.StringWriter;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* We keep an in-memory representation of the file/block hierarchy.
|
* We keep an in-memory representation of the file/block hierarchy.
|
||||||
@ -225,6 +224,27 @@ public INodeAttributes getSnapshotINode(final int snapshotId) {
|
|||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Is this inode in the current state? */
|
||||||
|
public boolean isInCurrentState() {
|
||||||
|
if (isRoot()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
final INodeDirectory parentDir = getParent();
|
||||||
|
if (parentDir == null) {
|
||||||
|
return false; // this inode is only referenced in snapshots
|
||||||
|
}
|
||||||
|
if (!parentDir.isInCurrentState()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
final INode child = parentDir.getChild(getLocalNameBytes(),
|
||||||
|
Snapshot.CURRENT_STATE_ID);
|
||||||
|
if (this == child) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return child != null && child.isReference() &&
|
||||||
|
this.equals(child.asReference().getReferredINode());
|
||||||
|
}
|
||||||
|
|
||||||
/** Is this inode in the latest snapshot? */
|
/** Is this inode in the latest snapshot? */
|
||||||
public final boolean isInLatestSnapshot(final int latestSnapshotId) {
|
public final boolean isInLatestSnapshot(final int latestSnapshotId) {
|
||||||
if (latestSnapshotId == Snapshot.CURRENT_STATE_ID ||
|
if (latestSnapshotId == Snapshot.CURRENT_STATE_ID ||
|
||||||
@ -234,6 +254,8 @@ public final boolean isInLatestSnapshot(final int latestSnapshotId) {
|
|||||||
// if parent is a reference node, parent must be a renamed node. We can
|
// if parent is a reference node, parent must be a renamed node. We can
|
||||||
// stop the check at the reference node.
|
// stop the check at the reference node.
|
||||||
if (parent != null && parent.isReference()) {
|
if (parent != null && parent.isReference()) {
|
||||||
|
// TODO: Is it a bug to return true?
|
||||||
|
// Some ancestor nodes may not be in the latest snapshot.
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
final INodeDirectory parentDir = getParent();
|
final INodeDirectory parentDir = getParent();
|
||||||
|
@ -739,19 +739,22 @@ public void cleanDirectory(INode.ReclaimContext reclaimContext,
|
|||||||
// were created before "prior" will be covered by the later
|
// were created before "prior" will be covered by the later
|
||||||
// cleanSubtreeRecursively call.
|
// cleanSubtreeRecursively call.
|
||||||
if (priorCreated != null) {
|
if (priorCreated != null) {
|
||||||
if (currentINode.isLastReference() &&
|
// The nodes in priorCreated must be destroyed if
|
||||||
currentINode.getDiffs().getLastSnapshotId() == prior) {
|
// (1) this is the last reference, and
|
||||||
// If this is the last reference of the directory inode and it
|
// (2) prior is the last snapshot, and
|
||||||
// can not be accessed in any of the subsequent snapshots i.e,
|
// (3) currentINode is not in the current state.
|
||||||
// this is the latest snapshot diff and if this is the last
|
final boolean destroy = currentINode.isLastReference()
|
||||||
// reference, the created list can be
|
&& currentINode.getDiffs().getLastSnapshotId() == prior
|
||||||
// destroyed.
|
&& !currentINode.isInCurrentState();
|
||||||
priorDiff.getChildrenDiff().destroyCreatedList(
|
// we only check the node originally in prior's created list
|
||||||
reclaimContext, currentINode);
|
for (INode cNode : new ArrayList<>(priorDiff.
|
||||||
} else {
|
diff.getCreatedUnmodifiable())) {
|
||||||
// we only check the node originally in prior's created list
|
if (priorCreated.containsKey(cNode)) {
|
||||||
for (INode cNode : priorDiff.diff.getCreatedUnmodifiable()) {
|
if (destroy) {
|
||||||
if (priorCreated.containsKey(cNode)) {
|
cNode.destroyAndCollectBlocks(reclaimContext);
|
||||||
|
currentINode.removeChild(cNode);
|
||||||
|
priorDiff.diff.removeCreated(cNode);
|
||||||
|
} else {
|
||||||
cNode.cleanSubtree(reclaimContext, snapshot, NO_SNAPSHOT_ID);
|
cNode.cleanSubtree(reclaimContext, snapshot, NO_SNAPSHOT_ID);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,13 +17,13 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.util;
|
package org.apache.hadoop.hdfs.util;
|
||||||
|
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The difference between the current state and a previous state of a list.
|
* The difference between the current state and a previous state of a list.
|
||||||
*
|
*
|
||||||
@ -166,6 +166,17 @@ public E setCreated(int index, E element) {
|
|||||||
return old;
|
return old;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public boolean removeCreated(final E element) {
|
||||||
|
if (created != null) {
|
||||||
|
final int i = search(created, element.getKey());
|
||||||
|
if (i >= 0 && created.get(i) == element) {
|
||||||
|
created.remove(i);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
public void clearCreated() {
|
public void clearCreated() {
|
||||||
if (created != null) {
|
if (created != null) {
|
||||||
created.clear();
|
created.clear();
|
||||||
|
@ -17,17 +17,6 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.namenode;
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
|
|
||||||
import java.io.File;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.PrintWriter;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.EnumSet;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Random;
|
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
@ -48,12 +37,22 @@
|
|||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
|
||||||
import org.apache.hadoop.hdfs.util.Canceler;
|
import org.apache.hadoop.hdfs.util.Canceler;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.slf4j.event.Level;
|
|
||||||
|
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
import org.slf4j.event.Level;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.PrintWriter;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.EnumSet;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Random;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test FSImage save/load when Snapshot is supported
|
* Test FSImage save/load when Snapshot is supported
|
||||||
@ -610,4 +609,44 @@ String printTree(String label) throws Exception {
|
|||||||
output.println(b);
|
output.println(b);
|
||||||
return b;
|
return b;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test (timeout=60000)
|
||||||
|
public void testFSImageWithDoubleRename() throws Exception {
|
||||||
|
final Path dir1 = new Path("/dir1");
|
||||||
|
final Path dir2 = new Path("/dir2");
|
||||||
|
hdfs.mkdirs(dir1);
|
||||||
|
hdfs.mkdirs(dir2);
|
||||||
|
Path dira = new Path(dir1, "dira");
|
||||||
|
Path dirx = new Path(dir1, "dirx");
|
||||||
|
Path dirb = new Path(dira, "dirb");
|
||||||
|
hdfs.mkdirs(dira);
|
||||||
|
hdfs.mkdirs(dirb);
|
||||||
|
hdfs.mkdirs(dirx);
|
||||||
|
hdfs.allowSnapshot(dir1);
|
||||||
|
hdfs.createSnapshot(dir1, "s0");
|
||||||
|
Path file1 = new Path(dirb, "file1");
|
||||||
|
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, (short) 1, seed);
|
||||||
|
Path rennamePath = new Path(dirx, "dirb");
|
||||||
|
// mv /dir1/dira/dirb to /dir1/dirx/dirb
|
||||||
|
hdfs.rename(dirb, rennamePath);
|
||||||
|
hdfs.createSnapshot(dir1, "s1");
|
||||||
|
DFSTestUtil.appendFile(hdfs, new Path("/dir1/dirx/dirb/file1"),
|
||||||
|
"more data");
|
||||||
|
Path renamePath1 = new Path(dir2, "dira");
|
||||||
|
hdfs.mkdirs(renamePath1);
|
||||||
|
//mv dirx/dirb to /dir2/dira/dirb
|
||||||
|
hdfs.rename(rennamePath, renamePath1);
|
||||||
|
hdfs.delete(renamePath1, true);
|
||||||
|
hdfs.deleteSnapshot(dir1, "s1");
|
||||||
|
// save namespace and restart cluster
|
||||||
|
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||||
|
hdfs.saveNamespace();
|
||||||
|
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||||
|
cluster.shutdown();
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).format(false)
|
||||||
|
.numDataNodes(NUM_DATANODES).build();
|
||||||
|
cluster.waitActive();
|
||||||
|
fsn = cluster.getNamesystem();
|
||||||
|
hdfs = cluster.getFileSystem();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -311,6 +311,45 @@ public void testRenameDirectoryInSnapshot() throws Exception {
|
|||||||
sub3.getName()));
|
sub3.getName()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test (timeout=60000)
|
||||||
|
public void testRenameDirectoryAndFileInSnapshot() throws Exception {
|
||||||
|
final Path sub2 = new Path(sub1, "sub2");
|
||||||
|
final Path sub3 = new Path(sub1, "sub3");
|
||||||
|
final Path sub2file1 = new Path(sub2, "file1");
|
||||||
|
final Path sub2file2 = new Path(sub2, "file2");
|
||||||
|
final Path sub3file2 = new Path(sub3, "file2");
|
||||||
|
final Path sub3file3 = new Path(sub3, "file3");
|
||||||
|
final String sub1snap1 = "sub1snap1";
|
||||||
|
final String sub1snap2 = "sub1snap2";
|
||||||
|
final String sub1snap3 = "sub1snap3";
|
||||||
|
final String sub1snap4 = "sub1snap4";
|
||||||
|
hdfs.mkdirs(sub1);
|
||||||
|
hdfs.mkdirs(sub2);
|
||||||
|
DFSTestUtil.createFile(hdfs, sub2file1, BLOCKSIZE, REPL, SEED);
|
||||||
|
SnapshotTestHelper.createSnapshot(hdfs, sub1, sub1snap1);
|
||||||
|
hdfs.rename(sub2file1, sub2file2);
|
||||||
|
SnapshotTestHelper.createSnapshot(hdfs, sub1, sub1snap2);
|
||||||
|
|
||||||
|
// First rename the sub-directory.
|
||||||
|
hdfs.rename(sub2, sub3);
|
||||||
|
SnapshotTestHelper.createSnapshot(hdfs, sub1, sub1snap3);
|
||||||
|
hdfs.rename(sub3file2, sub3file3);
|
||||||
|
SnapshotTestHelper.createSnapshot(hdfs, sub1, sub1snap4);
|
||||||
|
hdfs.deleteSnapshot(sub1, sub1snap1);
|
||||||
|
hdfs.deleteSnapshot(sub1, sub1snap2);
|
||||||
|
hdfs.deleteSnapshot(sub1, sub1snap3);
|
||||||
|
// check the internal details
|
||||||
|
INode sub3file3Inode = fsdir.getINode4Write(sub3file3.toString());
|
||||||
|
INodeReference ref = sub3file3Inode
|
||||||
|
.asReference();
|
||||||
|
INodeReference.WithCount withCount = (WithCount) ref
|
||||||
|
.getReferredINode();
|
||||||
|
Assert.assertEquals(withCount.getReferenceCount(), 1);
|
||||||
|
// Ensure name list is empty for the reference sub3file3Inode
|
||||||
|
Assert.assertNull(withCount.getLastWithName());
|
||||||
|
Assert.assertTrue(sub3file3Inode.isInCurrentState());
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* After the following steps:
|
* After the following steps:
|
||||||
* <pre>
|
* <pre>
|
||||||
|
Loading…
Reference in New Issue
Block a user