HDFS-4141. Support directory diff - the difference between the current state and a previous snapshot of an INodeDirectory.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1405250 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-11-03 01:31:51 +00:00
parent 77fe43ac14
commit e5a7b3d430
12 changed files with 516 additions and 49 deletions

View File

@ -44,3 +44,6 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4116. Add auditlog for some snapshot operations. (Jing Zhao via suresh) HDFS-4116. Add auditlog for some snapshot operations. (Jing Zhao via suresh)
HDFS-4095. Add some snapshot related metrics. (Jing Zhao via suresh) HDFS-4095. Add some snapshot related metrics. (Jing Zhao via suresh)
HDFS-4141. Support directory diff - the difference between the current state
and a previous snapshot of an INodeDirectory. (szetszwo)

View File

@ -57,8 +57,6 @@
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithLink;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
import org.apache.hadoop.hdfs.util.ByteArray; import org.apache.hadoop.hdfs.util.ByteArray;

View File

@ -244,7 +244,7 @@ String getLocalParentDir() {
* @return null if the local name is null; * @return null if the local name is null;
* otherwise, return the local name byte array. * otherwise, return the local name byte array.
*/ */
byte[] getLocalNameBytes() { public byte[] getLocalNameBytes() {
return name; return name;
} }

View File

@ -30,7 +30,7 @@
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshotRoot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
@ -56,7 +56,7 @@ public static INodeDirectory valueOf(INode inode, String path
private List<INode> children; private List<INode> children;
protected INodeDirectory(String name, PermissionStatus permissions) { public INodeDirectory(String name, PermissionStatus permissions) {
super(name, permissions); super(name, permissions);
this.children = null; this.children = null;
} }
@ -514,7 +514,7 @@ static class INodesInPath {
*/ */
private boolean isSnapshot; private boolean isSnapshot;
/** /**
* Index of {@link INodeDirectorySnapshotRoot} for snapshot path, else -1 * Index of {@link INodeDirectoryWithSnapshot} for snapshot path, else -1
*/ */
private int snapshotRootIndex; private int snapshotRootIndex;
@ -542,7 +542,7 @@ INode[] getINodes() {
} }
/** /**
* @return index of the {@link INodeDirectorySnapshotRoot} in * @return index of the {@link INodeDirectoryWithSnapshot} in
* {@link #inodes} for snapshot path, else -1. * {@link #inodes} for snapshot path, else -1.
*/ */
int getSnapshotRootIndex() { int getSnapshotRootIndex() {

View File

@ -69,6 +69,7 @@ public static INodeFile valueOf(INode inode, String path) throws IOException {
protected INodeFile(INodeFile f) { protected INodeFile(INodeFile f) {
this(f.getPermissionStatus(), f.getBlocks(), f.getFileReplication(), this(f.getPermissionStatus(), f.getBlocks(), f.getFileReplication(),
f.getModificationTime(), f.getAccessTime(), f.getPreferredBlockSize()); f.getModificationTime(), f.getAccessTime(), f.getPreferredBlockSize());
this.name = f.getLocalNameBytes();
} }
/** /**

View File

@ -1,29 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
/** The root directory of a snapshot. */
public class INodeDirectorySnapshotRoot extends INodeDirectory {
INodeDirectorySnapshotRoot(String name, INodeDirectory dir) {
super(name, dir.getPermissionStatus());
setLocalName(name);
parent = dir;
}
}

View File

@ -57,8 +57,8 @@ static public INodeDirectorySnapshottable valueOf(
} }
/** A list of snapshots of this directory. */ /** A list of snapshots of this directory. */
private final List<INodeDirectorySnapshotRoot> snapshots private final List<INodeDirectoryWithSnapshot> snapshots
= new ArrayList<INodeDirectorySnapshotRoot>(); = new ArrayList<INodeDirectoryWithSnapshot>();
public INode getSnapshotINode(byte[] name) { public INode getSnapshotINode(byte[] name) {
if (snapshots == null || snapshots.size() == 0) { if (snapshots == null || snapshots.size() == 0) {
@ -98,7 +98,7 @@ public boolean isSnapshottable() {
} }
/** Add a snapshot root under this directory. */ /** Add a snapshot root under this directory. */
INodeDirectorySnapshotRoot addSnapshotRoot(final String name INodeDirectoryWithSnapshot addSnapshotRoot(final String name
) throws SnapshotException { ) throws SnapshotException {
//check snapshot quota //check snapshot quota
if (snapshots.size() + 1 > snapshotQuota) { if (snapshots.size() + 1 > snapshotQuota) {
@ -107,7 +107,7 @@ INodeDirectorySnapshotRoot addSnapshotRoot(final String name
+ snapshotQuota); + snapshotQuota);
} }
final INodeDirectorySnapshotRoot r = new INodeDirectorySnapshotRoot(name, this); final INodeDirectoryWithSnapshot r = new INodeDirectoryWithSnapshot(name, this);
snapshots.add(r); snapshots.add(r);
//set modification time //set modification time

View File

@ -0,0 +1,264 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
/** The directory with snapshots. */
public class INodeDirectoryWithSnapshot extends INodeDirectory {
/**
* The difference between the current state and a previous snapshot
* of an INodeDirectory.
*
* Two lists are maintained in the algorithm:
* - c-list for newly created inodes
* - d-list for the deleted inodes
*
* Denote the state of an inode by the following
* (0, 0): neither in c-list nor d-list
* (c, 0): in c-list but not in d-list
* (0, d): in d-list but not in c-list
* (c, d): in both c-list and d-list
*
* For each case below, ( , ) at the end shows the result state of the inode.
*
* Case 1. Suppose the inode i is NOT in the previous snapshot. (0, 0)
* 1.1. create i in current: add it to c-list (c, 0)
* 1.1.1. create i in current and then create: impossible
* 1.1.2. create i in current and then delete: remove it from c-list (0, 0)
* 1.1.3. create i in current and then modify: replace it in c-list (c, 0)
*
* 1.2. delete i from current: impossible
*
* 1.3. modify i in current: impossible
*
* Case 2. Suppose the inode i is ALREADY in the previous snapshot. (0, 0)
* 2.1. create i in current: impossible
*
* 2.2. delete i from current: add it to d-list (0, d)
* 2.2.1. delete i from current and then create: add it to c-list (c, d)
* 2.2.2. delete i from current and then delete: impossible
* 2.2.2. delete i from current and then modify: impossible
*
* 2.3. modify i in current: put it in both c-list and d-list (c, d)
* 2.3.1. modify i in current and then create: impossible
* 2.3.2. modify i in current and then delete: remove it from c-list (0, d)
* 2.3.3. modify i in current and then modify: replace it in c-list (c, d)
*/
static class Diff {
/**
* Search the inode from the list.
* @return -1 if the list is null; otherwise, return the insertion point
* defined in {@link Collections#binarySearch(List, Object)}.
* Note that, when the list is null, -1 is the correct insertion point.
*/
static int search(final List<INode> inodes, final INode i) {
return search(inodes, i.getLocalNameBytes());
}
private static int search(final List<INode> inodes, final byte[] name) {
return inodes == null? -1: Collections.binarySearch(inodes, name);
}
/** The ID (e.g. snapshot ID) of this object. */
final int id;
/** c-list: inode(s) created in current. */
private List<INode> created;
/** d-list: inode(s) deleted from current. */
private List<INode> deleted;
Diff(int id) {
this.id = id;
}
/**
* Insert the inode to created.
* @param i the insertion point defined
* in {@link Collections#binarySearch(List, Object)}
*/
private void insertCreated(final INode inode, final int i) {
if (i >= 0) {
throw new AssertionError("Inode already exists: inode=" + inode
+ ", created=" + created);
}
if (created == null) {
created = new ArrayList<INode>(DEFAULT_FILES_PER_DIRECTORY);
}
created.add(-i - 1, inode);
}
/**
* Insert the inode to deleted.
* @param i the insertion point defined
* in {@link Collections#binarySearch(List, Object)}
*/
private void insertDeleted(final INode inode, final int i) {
if (i >= 0) {
throw new AssertionError("Inode already exists: inode=" + inode
+ ", deleted=" + deleted);
}
if (deleted == null) {
deleted = new ArrayList<INode>(DEFAULT_FILES_PER_DIRECTORY);
}
deleted.add(-i - 1, inode);
}
/** Create an inode in current state. */
void create(final INode inode) {
final int c = search(created, inode);
insertCreated(inode, c);
}
/** Delete an inode from current state. */
void delete(final INode inode) {
final int c = search(created, inode);
if (c >= 0) {
// remove a newly created inode
created.remove(c);
} else {
// not in c-list, it must be in previous
final int d = search(deleted, inode);
insertDeleted(inode, d);
}
}
/** Modify an inode in current state. */
void modify(final INode oldinode, final INode newinode) {
if (!oldinode.equals(newinode)) {
throw new AssertionError("The names do not match: oldinode="
+ oldinode + ", newinode=" + newinode);
}
final int c = search(created, newinode);
if (c >= 0) {
// inode is already in c-list,
created.set(c, newinode);
} else {
final int d = search(deleted, oldinode);
if (d < 0) {
// neither in c-list nor d-list
insertCreated(newinode, c);
insertDeleted(oldinode, d);
}
}
}
/**
* Given an inode in current state, find the corresponding inode in previous
* snapshot. The inodes in current state and previous snapshot can possibly
* be the same.
*
* @param inodeInCurrent The inode, possibly null, in current state.
* @return null if the inode is not found in previous snapshot;
* otherwise, return the corresponding inode in previous snapshot.
*/
INode accessPrevious(byte[] name, INode inodeInCurrent) {
return accessPrevious(name, inodeInCurrent, created, deleted);
}
private static INode accessPrevious(byte[] name, INode inodeInCurrent,
final List<INode> clist, final List<INode> dlist) {
final int d = search(dlist, name);
if (d >= 0) {
// the inode was in previous and was once deleted in current.
return dlist.get(d);
} else {
final int c = search(clist, name);
// When c >= 0, the inode in current is a newly created inode.
return c >= 0? null: inodeInCurrent;
}
}
/**
* Given an inode in previous snapshot, find the corresponding inode in
* current state. The inodes in current state and previous snapshot can
* possibly be the same.
*
* @param inodeInPrevious The inode, possibly null, in previous snapshot.
* @return null if the inode is not found in current state;
* otherwise, return the corresponding inode in current state.
*/
INode accessCurrent(byte[] name, INode inodeInPrevious) {
return accessPrevious(name, inodeInPrevious, deleted, created);
}
/**
* Apply this diff to previous snapshot in order to obtain current state.
* @return the current state of the list.
*/
List<INode> apply2Previous(final List<INode> previous) {
return apply2Previous(previous, created, deleted);
}
private static List<INode> apply2Previous(final List<INode> previous,
final List<INode> clist, final List<INode> dlist) {
final List<INode> current = new ArrayList<INode>(previous);
if (dlist != null) {
for(INode d : dlist) {
current.remove(d);
}
}
if (clist != null) {
for(INode c : clist) {
final int i = search(current, c);
current.add(-i - 1, c);
}
}
return current;
}
/**
* Apply the reverse of this diff to current state in order
* to obtain the previous snapshot.
* @return the previous state of the list.
*/
List<INode> apply2Current(final List<INode> current) {
return apply2Previous(current, deleted, created);
}
/** Convert the inode list to a compact string. */
static String toString(List<INode> inodes) {
if (inodes == null) {
return null;
} else if (inodes.isEmpty()) {
return "[]";
}
final StringBuilder b = new StringBuilder("[")
.append(inodes.get(0).getLocalName());
for(int i = 1; i < inodes.size(); i++) {
b.append(", ").append(inodes.get(i).getLocalName());
}
return b.append("]").toString();
}
@Override
public String toString() {
return getClass().getSimpleName() + "_" + id
+ ":\n created=" + toString(created)
+ "\n deleted=" + toString(deleted);
}
}
INodeDirectoryWithSnapshot(String name, INodeDirectory dir) {
super(name, dir.getPermissionStatus());
parent = dir;
}
}

View File

@ -37,7 +37,6 @@ public class INodeFileWithLink extends INodeFile {
public INodeFileWithLink(INodeFile f) { public INodeFileWithLink(INodeFile f) {
super(f); super(f);
setLocalName(f.getLocalName());
next = this; next = this;
} }

View File

@ -104,7 +104,7 @@ private SnapshotCreation(final String path) throws IOException {
} }
void run(final String name) throws IOException { void run(final String name) throws IOException {
final INodeDirectorySnapshotRoot root = srcRoot.addSnapshotRoot(name); final INodeDirectoryWithSnapshot root = srcRoot.addSnapshotRoot(name);
processRecursively(srcRoot, root); processRecursively(srcRoot, root);
} }

View File

@ -28,7 +28,7 @@
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshotRoot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileSnapshot;
import org.junit.After; import org.junit.After;
@ -163,13 +163,13 @@ public void testSnapshotPathINodes() throws Exception {
// SnapshotRootIndex should be 3: {root, Testsnapshot, sub1, s1, file1} // SnapshotRootIndex should be 3: {root, Testsnapshot, sub1, s1, file1}
assertEquals(nodesInPath.getSnapshotRootIndex(), 3); assertEquals(nodesInPath.getSnapshotRootIndex(), 3);
assertTrue(inodes[nodesInPath.getSnapshotRootIndex()] instanceof assertTrue(inodes[nodesInPath.getSnapshotRootIndex()] instanceof
INodeDirectorySnapshotRoot); INodeDirectoryWithSnapshot);
// Check the INode for file1 (snapshot file) // Check the INode for file1 (snapshot file)
INode snapshotFileNode = inodes[inodes.length - 1]; INode snapshotFileNode = inodes[inodes.length - 1];
assertEquals(snapshotFileNode.getLocalName(), file1.getName()); assertEquals(snapshotFileNode.getLocalName(), file1.getName());
assertTrue(snapshotFileNode instanceof INodeFileSnapshot); assertTrue(snapshotFileNode instanceof INodeFileSnapshot);
assertTrue(snapshotFileNode.getParent() instanceof assertTrue(snapshotFileNode.getParent() instanceof
INodeDirectorySnapshotRoot); INodeDirectoryWithSnapshot);
// Call getExistingPathINodes and request only one INode. // Call getExistingPathINodes and request only one INode.
nodesInPath = fsdir.rootDir.getExistingPathINodes(components, 1, false); nodesInPath = fsdir.rootDir.getExistingPathINodes(components, 1, false);
@ -266,13 +266,13 @@ public void testSnapshotPathINodesAfterDeletion() throws Exception {
// SnapshotRootIndex should be 3: {root, Testsnapshot, sub1, s1, file1} // SnapshotRootIndex should be 3: {root, Testsnapshot, sub1, s1, file1}
assertEquals(nodesInPath.getSnapshotRootIndex(), 3); assertEquals(nodesInPath.getSnapshotRootIndex(), 3);
assertTrue(inodes[nodesInPath.getSnapshotRootIndex()] instanceof assertTrue(inodes[nodesInPath.getSnapshotRootIndex()] instanceof
INodeDirectorySnapshotRoot); INodeDirectoryWithSnapshot);
// Check the INode for file1 (snapshot file) // Check the INode for file1 (snapshot file)
INode snapshotFileNode = inodes[inodes.length - 1]; INode snapshotFileNode = inodes[inodes.length - 1];
assertEquals(snapshotFileNode.getLocalName(), file1.getName()); assertEquals(snapshotFileNode.getLocalName(), file1.getName());
assertTrue(snapshotFileNode instanceof INodeFileSnapshot); assertTrue(snapshotFileNode instanceof INodeFileSnapshot);
assertTrue(snapshotFileNode.getParent() instanceof assertTrue(snapshotFileNode.getParent() instanceof
INodeDirectorySnapshotRoot); INodeDirectoryWithSnapshot);
} }
/** /**
@ -326,11 +326,11 @@ public void testSnapshotPathINodesWithAddedFile() throws Exception {
// SnapshotRootIndex should still be 3: {root, Testsnapshot, sub1, s1, null} // SnapshotRootIndex should still be 3: {root, Testsnapshot, sub1, s1, null}
assertEquals(nodesInPath.getSnapshotRootIndex(), 3); assertEquals(nodesInPath.getSnapshotRootIndex(), 3);
assertTrue(inodes[nodesInPath.getSnapshotRootIndex()] instanceof assertTrue(inodes[nodesInPath.getSnapshotRootIndex()] instanceof
INodeDirectorySnapshotRoot); INodeDirectoryWithSnapshot);
// Check the last INode in inodes, which should be null // Check the last INode in inodes, which should be null
assertNull(inodes[inodes.length - 1]); assertNull(inodes[inodes.length - 1]);
assertTrue(inodes[inodes.length - 2] instanceof assertTrue(inodes[inodes.length - 2] instanceof
INodeDirectorySnapshotRoot); INodeDirectoryWithSnapshot);
} }
/** /**

View File

@ -0,0 +1,231 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.Diff;
import org.junit.Assert;
import org.junit.Test;
/**
* Test {@link INodeDirectoryWithSnapshot}, especially, {@link Diff}.
*/
public class TestINodeDirectoryWithSnapshot {
private static Random RANDOM = new Random();
private static final PermissionStatus PERM = PermissionStatus.createImmutable(
"user", "group", FsPermission.createImmutable((short)0));
static int nextStep(int n) {
return n == 0? 1: 10*n;
}
/** Test directory diff. */
@Test
public void testDiff() throws Exception {
for(int startSize = 0; startSize <= 1000; startSize = nextStep(startSize)) {
for(int m = 0; m <= 10000; m = nextStep(m)) {
runDiffTest(startSize, m, true);
}
}
}
/**
* The following are the step of the diff test:
* 1) Initialize the previous list and add s elements to it,
* where s = startSize.
* 2) Initialize the current list by coping all elements from the previous list
* 3) Initialize an empty diff object.
* 4) Make m modifications to the current list, where m = numModifications.
* Record the modifications in diff at the same time.
* 5) Test if current == previous + diff and previous == current - diff.
* 6) Test accessPrevious and accessCurrent.
*
* @param startSize
* @param numModifications
* @param computeDiff
*/
void runDiffTest(int startSize, int numModifications, boolean computeDiff) {
final int width = findWidth(startSize + numModifications);
System.out.println("\nstartSize=" + startSize
+ ", numModifications=" + numModifications
+ ", width=" + width);
// initialize previous
final List<INode> previous = new ArrayList<INode>();
int n = 0;
for(; n < startSize; n++) {
previous.add(newINode(n, width));
}
// make modifications to current and record the diff
final List<INode> current = new ArrayList<INode>(previous);
final Diff diff = computeDiff? new Diff(0): null;
for(int m = 0; m < numModifications; m++) {
// if current is empty, the next operation must be create;
// otherwise, randomly pick an operation.
final int nextOperation = current.isEmpty()? 1: RANDOM.nextInt(3) + 1;
switch(nextOperation) {
case 1: // create
{
final INode i = newINode(n++, width);
create(i, current, diff);
break;
}
case 2: // delete
{
final INode i = current.get(RANDOM.nextInt(current.size()));
delete(i, current, diff);
break;
}
case 3: // modify
{
final INode i = current.get(RANDOM.nextInt(current.size()));
modify(i, current, diff);
break;
}
}
}
if (computeDiff) {
// check if current == previous + diff
final List<INode> c = diff.apply2Previous(previous);
if (!hasIdenticalElements(current, c)) {
System.out.println("previous = " + Diff.toString(previous));
System.out.println();
System.out.println("current = " + Diff.toString(current));
System.out.println("c = " + Diff.toString(c));
System.out.println();
System.out.println("diff = " + diff);
throw new AssertionError("current and c are not identical.");
}
// check if previous == current - diff
final List<INode> p = diff.apply2Current(current);
if (!hasIdenticalElements(previous, p)) {
System.out.println("previous = " + Diff.toString(previous));
System.out.println("p = " + Diff.toString(p));
System.out.println();
System.out.println("current = " + Diff.toString(current));
System.out.println();
System.out.println("diff = " + diff);
throw new AssertionError("previous and p are not identical.");
}
}
if (computeDiff) {
for(int m = 0; m < n; m++) {
final INode inode = newINode(m, width);
{// test accessPrevious
final int i = Diff.search(current, inode);
final INode inodeInCurrent = i < 0? null: current.get(i);
final INode computed = diff.accessPrevious(
inode.getLocalNameBytes(), inodeInCurrent);
final int j = Diff.search(previous, inode);
final INode expected = j < 0? null: previous.get(j);
// must be the same object (equals is not enough)
Assert.assertTrue(computed == expected);
}
{// test accessCurrent
final int i = Diff.search(previous, inode);
final INode inodeInPrevious = i < 0? null: previous.get(i);
final INode computed = diff.accessCurrent(
inode.getLocalNameBytes(), inodeInPrevious);
final int j = Diff.search(current, inode);
final INode expected = j < 0? null: current.get(j);
// must be the same object (equals is not enough)
Assert.assertTrue(computed == expected);
}
}
}
}
static boolean hasIdenticalElements(final List<INode> expected,
final List<INode> computed) {
if (expected == null) {
return computed == null;
}
if (expected.size() != computed.size()) {
return false;
}
for(int i = 0; i < expected.size(); i++) {
// must be the same object (equals is not enough)
if (expected.get(i) != computed.get(i)) {
return false;
}
}
return true;
}
static String toString(INode inode) {
return inode == null? null
: inode.getLocalName() + ":" + inode.getModificationTime();
}
static int findWidth(int max) {
int w = 1;
for(long n = 10; n < max; n *= 10, w++);
return w;
}
static INode newINode(int n, int width) {
return new INodeDirectory(String.format("n%0" + width + "d", n), PERM);
}
static void create(INode inode, final List<INode> current, Diff diff) {
final int i = Diff.search(current, inode);
Assert.assertTrue(i < 0);
current.add(-i - 1, inode);
if (diff != null) {
diff.create(inode);
}
}
static void delete(INode inode, final List<INode> current, Diff diff) {
final int i = Diff.search(current, inode);
Assert.assertTrue("i=" + i + ", inode=" + inode + "\ncurrent=" + current,
i >= 0);
current.remove(i);
if (diff != null) {
diff.delete(inode);
}
}
static void modify(INode inode, final List<INode> current, Diff diff) {
final int i = Diff.search(current, inode);
Assert.assertTrue(i >= 0);
final INodeDirectory oldinode = (INodeDirectory)current.get(i);
final INodeDirectory newinode = new INodeDirectory(oldinode);
newinode.setModificationTime(oldinode.getModificationTime() + 1);
current.set(i, newinode);
if (diff != null) {
diff.modify(oldinode, newinode);
}
}
}