svn merge -c -1432788 for reverting HDFS-4098. Add FileWithLink, INodeFileUnderConstructionWithLink and INodeFileUnderConstructionSnapshot in order to support append to snapshotted files.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1433284 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-01-15 04:33:14 +00:00
parent 686e13db2f
commit 397835acdf
12 changed files with 161 additions and 379 deletions

View File

@ -101,9 +101,5 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4245. Include snapshot related operations in TestOfflineEditsViewer.
(Jing Zhao via szetszwo)
HDFS-4098. Add FileWithLink, INodeFileUnderConstructionWithLink and
INodeFileUnderConstructionSnapshot in order to support append to snapshotted
files. (szetszwo)
HDFS-4395. In INodeDirectorySnapshottable's constructor, the passed-in dir
could be an INodeDirectoryWithSnapshot. (Jing Zhao via szetszwo)

View File

@ -324,7 +324,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
if (oldFile.isUnderConstruction()) {
INodeFileUnderConstruction ucFile = (INodeFileUnderConstruction) oldFile;
fsNamesys.leaseManager.removeLeaseWithPrefixPath(addCloseOp.path);
INodeFile newFile = ucFile.toINodeFile(ucFile.getModificationTime());
INodeFile newFile = ucFile.convertToInodeFile(ucFile.getModificationTime());
fsDir.unprotectedReplaceINodeFile(addCloseOp.path, ucFile, newFile,
iip.getLatestSnapshot());
}

View File

@ -1357,11 +1357,10 @@ private LocatedBlocks getBlockLocationsUpdateTimes(String src,
doAccessTime = false;
}
final INodesInPath iip = dir.getINodesInPath(src);
long now = now();
final INodesInPath iip = dir.getMutableINodesInPath(src);
final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
if (!iip.isSnapshot() //snapshots are readonly, so don't update atime.
&& doAccessTime && isAccessTimeSupported()) {
final long now = now();
if (doAccessTime && isAccessTimeSupported()) {
if (now <= inode.getAccessTime() + getAccessTimePrecision()) {
// if we have to set access time but we only have the readlock, then
// restart this entire operation with the writeLock.
@ -1982,13 +1981,18 @@ private LocatedBlock startFileInternal(String src,
LocatedBlock prepareFileForWrite(String src, INodeFile file,
String leaseHolder, String clientMachine, DatanodeDescriptor clientNode,
boolean writeToEditLog, Snapshot latestSnapshot) throws IOException {
if (latestSnapshot != null) {
file = (INodeFile)file.recordModification(latestSnapshot).left;
}
final INodeFileUnderConstruction cons
= INodeFileUnderConstruction.toINodeFileUnderConstruction(
file, leaseHolder, clientMachine, clientNode);
//TODO SNAPSHOT: INodeFileUnderConstruction with link
INodeFileUnderConstruction cons = new INodeFileUnderConstruction(
file.getId(),
file.getLocalNameBytes(),
file.getFileReplication(),
file.getModificationTime(),
file.getPreferredBlockSize(),
file.getBlocks(),
file.getPermissionStatus(),
leaseHolder,
clientMachine,
clientNode);
dir.replaceINodeFile(src, file, cons, latestSnapshot);
leaseManager.addLease(cons.getClientName(), src);
@ -3297,7 +3301,7 @@ private void finalizeINodeFileUnderConstruction(String src,
// The file is no longer pending.
// Create permanent INode, update blocks
INodeFile newFile = pendingFile.toINodeFile(now());
INodeFile newFile = pendingFile.convertToInodeFile(now());
dir.replaceINodeFile(src, pendingFile, newFile, latestSnapshot);
// close file and persist block allocations for this file

View File

@ -509,7 +509,7 @@ public long getAccessTime() {
/**
* Set last access time of inode.
*/
public INode setAccessTime(long atime, Snapshot latest) {
INode setAccessTime(long atime, Snapshot latest) {
Pair<? extends INode, ? extends INode> pair = recordModification(latest);
INode nodeToUpdate = pair != null ? pair.left : this;
nodeToUpdate.accessTime = atime;

View File

@ -28,6 +28,8 @@
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithLink;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
/** I-node for closed file. */
@ -94,7 +96,7 @@ static long combinePreferredBlockSize(long header, long blockSize) {
preferredBlockSize);
}
protected INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime, long atime,
INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime, long atime,
BlockInfo[] blklist, short replication, long preferredBlockSize) {
super(id, name, permissions, null, mtime, atime);
header = HeaderFormat.combineReplication(header, replication);
@ -109,7 +111,7 @@ protected INodeFile(INodeFile that) {
}
@Override
public Pair<? extends INodeFile, ? extends INodeFile> createSnapshotCopy() {
public Pair<INodeFileWithLink, INodeFileSnapshot> createSnapshotCopy() {
return parent.replaceINodeFile(this).createSnapshotCopy();
}
@ -139,7 +141,7 @@ public short getBlockReplication() {
return getFileReplication();
}
public void setFileReplication(short replication, Snapshot latest) {
protected void setFileReplication(short replication, Snapshot latest) {
if (latest != null) {
final Pair<? extends INode, ? extends INode> p = recordModification(latest);
if (p != null) {

View File

@ -29,10 +29,6 @@
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.MutableBlockCollection;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileUnderConstructionWithLink;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithLink;
import com.google.common.base.Preconditions;
/**
* I-node for file being written.
@ -49,29 +45,6 @@ public static INodeFileUnderConstruction valueOf(INode inode, String path
return (INodeFileUnderConstruction)file;
}
/** Convert the given file to an {@link INodeFileUnderConstruction}. */
public static INodeFileUnderConstruction toINodeFileUnderConstruction(
INodeFile file,
String clientName,
String clientMachine,
DatanodeDescriptor clientNode) {
Preconditions.checkArgument(!(file instanceof INodeFileUnderConstruction),
"file is already an INodeFileUnderConstruction");
final INodeFileUnderConstruction uc = new INodeFileUnderConstruction(
file.getId(),
file.getLocalNameBytes(),
file.getFileReplication(),
file.getModificationTime(),
file.getPreferredBlockSize(),
file.getBlocks(),
file.getPermissionStatus(),
clientName,
clientMachine,
clientNode);
return file instanceof INodeFileWithLink?
new INodeFileUnderConstructionWithLink(uc): uc;
}
private String clientName; // lease holder
private final String clientMachine;
private final DatanodeDescriptor clientNode; // if client is a cluster node too.
@ -104,13 +77,6 @@ public static INodeFileUnderConstruction toINodeFileUnderConstruction(
this.clientMachine = clientMachine;
this.clientNode = clientNode;
}
protected INodeFileUnderConstruction(INodeFileUnderConstruction that) {
super(that);
this.clientName = that.clientName;
this.clientMachine = that.clientMachine;
this.clientNode = that.clientNode;
}
String getClientName() {
return clientName;
@ -136,26 +102,30 @@ public boolean isUnderConstruction() {
return true;
}
/**
* Converts an INodeFileUnderConstruction to an INodeFile.
* The original modification time is used as the access time.
* The new modification is the specified mtime.
*/
protected INodeFile toINodeFile(long mtime) {
assertAllBlocksComplete();
//
// converts a INodeFileUnderConstruction into a INodeFile
// use the modification time as the access time
//
INodeFile convertToInodeFile(long mtime) {
assert allBlocksComplete() : "Can't finalize inode " + this
+ " since it contains non-complete blocks! Blocks are "
+ Arrays.asList(getBlocks());
//TODO SNAPSHOT: may convert to INodeFileWithLink
return new INodeFile(getId(), getLocalNameBytes(), getPermissionStatus(),
mtime, getModificationTime(),
getBlocks(), getFileReplication(), getPreferredBlockSize());
}
/** Assert all blocks are complete. */
protected void assertAllBlocksComplete() {
final BlockInfo[] blocks = getBlocks();
for (int i = 0; i < blocks.length; i++) {
Preconditions.checkState(blocks[i].isComplete(), "Failed to finalize"
+ " %s %s since blocks[%s] is non-complete, where blocks=%s.",
getClass().getSimpleName(), this, i, Arrays.asList(getBlocks()));
/**
* @return true if all of the blocks in this file are marked as completed.
*/
private boolean allBlocksComplete() {
for (BlockInfo b : getBlocks()) {
if (!b.isComplete()) {
return false;
}
}
return true;
}
/**

View File

@ -1,154 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapINodeUpdateEntry;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import com.google.common.base.Preconditions;
/**
* {@link INodeFile} with a link to the next element.
* The link of all the snapshot files and the original file form a circular
* linked list so that all elements are accessible by any of the elements.
*/
@InterfaceAudience.Private
public interface FileWithLink {
/** @return the next element. */
public <N extends INodeFile & FileWithLink> N getNext();
/** Set the next element. */
public <N extends INodeFile & FileWithLink> void setNext(N next);
/** Insert inode to the circular linked list. */
public <N extends INodeFile & FileWithLink> void insert(N inode);
/** Utility methods for the classes which implement the interface. */
static class Util {
/**
* @return the max file replication of the elements
* in the circular linked list.
*/
static <F extends INodeFile & FileWithLink,
N extends INodeFile & FileWithLink> short getBlockReplication(
final F file) {
short max = file.getFileReplication();
// i may be null since next will be set to null when the INode is deleted
for(N i = file.getNext(); i != file && i != null; i = i.getNext()) {
final short replication = i.getFileReplication();
if (replication > max) {
max = replication;
}
}
return max;
}
/**
* Remove the current inode from the circular linked list.
* If some blocks at the end of the block list no longer belongs to
* any other inode, collect them and update the block list.
*/
static <F extends INodeFile & FileWithLink,
N extends INodeFile & FileWithLink>
int collectSubtreeBlocksAndClear(F file, BlocksMapUpdateInfo info) {
final N next = file.getNext();
Preconditions.checkState(next != file, "this is the only remaining inode.");
// There are other inode(s) using the blocks.
// Compute max file size excluding this and find the last inode.
long max = next.computeFileSize(true);
short maxReplication = next.getFileReplication();
FileWithLink last = next;
for(N i = next.getNext(); i != file; i = i.getNext()) {
final long size = i.computeFileSize(true);
if (size > max) {
max = size;
}
final short rep = i.getFileReplication();
if (rep > maxReplication) {
maxReplication = rep;
}
last = i;
}
collectBlocksBeyondMaxAndClear(file, max, info);
// remove this from the circular linked list.
last.setNext(next);
// Set the replication of the current INode to the max of all the other
// linked INodes, so that in case the current INode is retrieved from the
// blocksMap before it is removed or updated, the correct replication
// number can be retrieved.
file.setFileReplication(maxReplication, null);
file.setNext(null);
// clear parent
file.setParent(null);
return 1;
}
static <F extends INodeFile & FileWithLink,
N extends INodeFile & FileWithLink>
void collectBlocksBeyondMaxAndClear(final F file,
final long max, final BlocksMapUpdateInfo info) {
final BlockInfo[] oldBlocks = file.getBlocks();
if (oldBlocks != null) {
//find the minimum n such that the size of the first n blocks > max
int n = 0;
for(long size = 0; n < oldBlocks.length && max > size; n++) {
size += oldBlocks[n].getNumBytes();
}
// Replace the INode for all the remaining blocks in blocksMap
final N next = file.getNext();
final BlocksMapINodeUpdateEntry entry = new BlocksMapINodeUpdateEntry(
file, next);
if (info != null) {
for (int i = 0; i < n; i++) {
info.addUpdateBlock(oldBlocks[i], entry);
}
}
// starting from block n, the data is beyond max.
if (n < oldBlocks.length) {
// resize the array.
final BlockInfo[] newBlocks;
if (n == 0) {
newBlocks = null;
} else {
newBlocks = new BlockInfo[n];
System.arraycopy(oldBlocks, 0, newBlocks, 0, n);
}
for(N i = next; i != file; i = i.getNext()) {
i.setBlocks(newBlocks);
}
// collect the blocks beyond max.
if (info != null) {
for(; n < oldBlocks.length; n++) {
info.addDeleteBlock(oldBlocks[n]);
}
}
}
file.setBlocks(null);
}
}
}
}

View File

@ -1,44 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
/**
* INode representing a snapshot of an {@link INodeFileUnderConstruction}.
*/
@InterfaceAudience.Private
public class INodeFileUnderConstructionSnapshot
extends INodeFileUnderConstructionWithLink {
/** The file size at snapshot creation time. */
final long size;
INodeFileUnderConstructionSnapshot(INodeFileUnderConstructionWithLink f) {
super(f);
this.size = f.computeFileSize(true);
f.insert(this);
}
@Override
public long computeFileSize(boolean includesBlockInfoUnderConstruction) {
//ignore includesBlockInfoUnderConstruction
//since files in a snapshot are considered as closed.
return size;
}
}

View File

@ -1,88 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
/**
* Represent an {@link INodeFileUnderConstruction} that is snapshotted.
* Note that snapshot files are represented by
* {@link INodeFileUnderConstructionSnapshot}.
*/
@InterfaceAudience.Private
public class INodeFileUnderConstructionWithLink
extends INodeFileUnderConstruction implements FileWithLink {
private FileWithLink next;
public INodeFileUnderConstructionWithLink(INodeFileUnderConstruction f) {
super(f);
setNext(this);
}
@Override
protected INodeFileWithLink toINodeFile(final long mtime) {
assertAllBlocksComplete();
final long atime = getModificationTime();
final INodeFileWithLink f = new INodeFileWithLink(this);
f.setModificationTime(mtime, null);
f.setAccessTime(atime, null);
return f;
}
@Override
public Pair<? extends INodeFileUnderConstruction,
INodeFileUnderConstructionSnapshot> createSnapshotCopy() {
return new Pair<INodeFileUnderConstructionWithLink,
INodeFileUnderConstructionSnapshot>(
this, new INodeFileUnderConstructionSnapshot(this));
}
@SuppressWarnings("unchecked")
@Override
public <N extends INodeFile & FileWithLink> N getNext() {
return (N)next;
}
@Override
public <N extends INodeFile & FileWithLink> void setNext(N next) {
this.next = next;
}
@Override
public <N extends INodeFile & FileWithLink> void insert(N inode) {
inode.setNext(this.getNext());
this.setNext(inode);
}
@Override
public short getBlockReplication() {
return Util.getBlockReplication(this);
}
@Override
protected int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) {
if (next == null || next == this) {
// this is the only remaining inode.
return super.collectSubtreeBlocksAndClear(info);
} else {
return Util.collectSubtreeBlocksAndClear(this, info);
}
}
}

View File

@ -18,19 +18,23 @@
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
/**
* Represent an {@link INodeFile} that is snapshotted.
* Note that snapshot files are represented by {@link INodeFileSnapshot}.
* INodeFile with a link to the next element.
* This class is used to represent the original file that is snapshotted.
* The snapshot files are represented by {@link INodeFileSnapshot}.
* The link of all the snapshot files and the original file form a circular
* linked list so that all elements are accessible by any of the elements.
*/
@InterfaceAudience.Private
public class INodeFileWithLink extends INodeFile implements FileWithLink {
private FileWithLink next;
public class INodeFileWithLink extends INodeFile {
private INodeFileWithLink next;
public INodeFileWithLink(INodeFile f) {
super(f);
setNext(this);
next = this;
}
@Override
@ -39,35 +43,124 @@ public Pair<INodeFileWithLink, INodeFileSnapshot> createSnapshotCopy() {
new INodeFileSnapshot(this));
}
@SuppressWarnings("unchecked")
@Override
public <N extends INodeFile & FileWithLink> N getNext() {
return (N)next;
}
@Override
public <N extends INodeFile & FileWithLink> void setNext(N next) {
void setNext(INodeFileWithLink next) {
this.next = next;
}
@Override
public <N extends INodeFile & FileWithLink> void insert(N inode) {
INodeFileWithLink getNext() {
return next;
}
/** Insert inode to the circular linked list. */
void insert(INodeFileWithLink inode) {
inode.setNext(this.getNext());
this.setNext(inode);
}
/**
* @return the max file replication of the elements
* in the circular linked list.
*/
@Override
public short getBlockReplication() {
return Util.getBlockReplication(this);
short max = getFileReplication();
// i may be null since next will be set to null when the INode is deleted
for(INodeFileWithLink i = next; i != this && i != null; i = i.getNext()) {
final short replication = i.getFileReplication();
if (replication > max) {
max = replication;
}
}
return max;
}
/**
* {@inheritDoc}
*
* Remove the current inode from the circular linked list.
* If some blocks at the end of the block list no longer belongs to
* any other inode, collect them and update the block list.
*/
@Override
public int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) {
if (next == null || next == this) {
if (next == this) {
// this is the only remaining inode.
return super.collectSubtreeBlocksAndClear(info);
super.collectSubtreeBlocksAndClear(info);
} else {
return Util.collectSubtreeBlocksAndClear(this, info);
// There are other inode(s) using the blocks.
// Compute max file size excluding this and find the last inode.
long max = next.computeFileSize(true);
short maxReplication = next.getFileReplication();
INodeFileWithLink last = next;
for(INodeFileWithLink i = next.getNext(); i != this; i = i.getNext()) {
final long size = i.computeFileSize(true);
if (size > max) {
max = size;
}
final short rep = i.getFileReplication();
if (rep > maxReplication) {
maxReplication = rep;
}
last = i;
}
collectBlocksBeyondMaxAndClear(max, info);
// remove this from the circular linked list.
last.next = this.next;
// Set the replication of the current INode to the max of all the other
// linked INodes, so that in case the current INode is retrieved from the
// blocksMap before it is removed or updated, the correct replication
// number can be retrieved.
this.setFileReplication(maxReplication, null);
this.next = null;
// clear parent
setParent(null);
}
return 1;
}
private void collectBlocksBeyondMaxAndClear(final long max,
final BlocksMapUpdateInfo info) {
final BlockInfo[] oldBlocks = getBlocks();
if (oldBlocks != null) {
//find the minimum n such that the size of the first n blocks > max
int n = 0;
for(long size = 0; n < oldBlocks.length && max > size; n++) {
size += oldBlocks[n].getNumBytes();
}
// Replace the INode for all the remaining blocks in blocksMap
BlocksMapINodeUpdateEntry entry = new BlocksMapINodeUpdateEntry(this,
next);
if (info != null) {
for (int i = 0; i < n; i++) {
info.addUpdateBlock(oldBlocks[i], entry);
}
}
// starting from block n, the data is beyond max.
if (n < oldBlocks.length) {
// resize the array.
final BlockInfo[] newBlocks;
if (n == 0) {
newBlocks = null;
} else {
newBlocks = new BlockInfo[n];
System.arraycopy(oldBlocks, 0, newBlocks, 0, n);
}
for(INodeFileWithLink i = next; i != this; i = i.getNext()) {
i.setBlocks(newBlocks);
}
// collect the blocks beyond max.
if (info != null) {
for(; n < oldBlocks.length; n++) {
info.addDeleteBlock(oldBlocks[n]);
}
}
}
setBlocks(null);
}
}
}

View File

@ -364,7 +364,8 @@ public void testSnapshotPathINodesWithAddedFile() throws Exception {
* Test {@link INodeDirectory#getExistingPathINodes(byte[][], int, boolean)}
* for snapshot file while modifying file after snapshot.
*/
@Test
// TODO: disable it temporarily since it uses append.
// @Test
public void testSnapshotPathINodesAfterModification() throws Exception {
//file1 was deleted, create it again.
DFSTestUtil.createFile(hdfs, file1, 1024, REPLICATION, seed);

View File

@ -270,9 +270,10 @@ private Modification[] prepareModifications(TestDirectoryTree.Node[] nodes)
Modification delete = new FileDeletion(
node.fileList.get((node.nullFileIndex + 1) % node.fileList.size()),
hdfs);
Modification append = new FileAppend(
node.fileList.get((node.nullFileIndex + 2) % node.fileList.size()),
hdfs, (int) BLOCKSIZE);
// TODO: fix append for snapshots
// Modification append = new FileAppend(
// node.fileList.get((node.nullFileIndex + 2) % node.fileList.size()),
// hdfs, (int) BLOCKSIZE);
Modification chmod = new FileChangePermission(
node.fileList.get((node.nullFileIndex + 3) % node.fileList.size()),
hdfs, genRandomPermission());
@ -289,7 +290,8 @@ private Modification[] prepareModifications(TestDirectoryTree.Node[] nodes)
mList.add(create);
mList.add(delete);
mList.add(append);
// TODO: fix append for snapshots
// mList.add(append);
mList.add(chmod);
mList.add(chown);
mList.add(replication);