HDFS-4076. Support snapshot of single files. (szetszwo)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1400245 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
d5d2628e44
commit
5c1a7b9d5d
@ -1,5 +1,11 @@
|
|||||||
Hadoop HDFS Change Log
|
Hadoop HDFS Change Log
|
||||||
|
|
||||||
|
Branch-2802 Snapshot (Unreleased)
|
||||||
|
|
||||||
|
NEW FEATURES
|
||||||
|
|
||||||
|
HDFS-4076. Support snapshot of single files. (szetszwo)
|
||||||
|
|
||||||
Trunk (Unreleased)
|
Trunk (Unreleased)
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -44,10 +44,10 @@
|
|||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.FSLimitException;
|
import org.apache.hadoop.hdfs.protocol.FSLimitException;
|
||||||
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
|
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
|
||||||
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
|
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
@ -57,6 +57,8 @@
|
|||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileSnapshot;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithLink;
|
||||||
import org.apache.hadoop.hdfs.util.ByteArray;
|
import org.apache.hadoop.hdfs.util.ByteArray;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
@ -300,6 +302,40 @@ INode unprotectedAddFile( String path,
|
|||||||
return newNode;
|
return newNode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Add an INodeFileSnapshot to the source file. */
|
||||||
|
INodeFileSnapshot addFileSnapshot(String srcPath, String dstPath
|
||||||
|
) throws IOException, QuotaExceededException {
|
||||||
|
waitForReady();
|
||||||
|
|
||||||
|
final INodeFile src = rootDir.getINodeFile(srcPath);
|
||||||
|
INodeFileSnapshot snapshot = new INodeFileSnapshot(src, src.computeFileSize(true));
|
||||||
|
|
||||||
|
writeLock();
|
||||||
|
try {
|
||||||
|
//add destination snaplink
|
||||||
|
snapshot = addNode(dstPath, snapshot, UNKNOWN_DISK_SPACE);
|
||||||
|
|
||||||
|
if (snapshot != null && src.getClass() == INodeFile.class) {
|
||||||
|
//created a snapshot and the source is an INodeFile, replace the source.
|
||||||
|
replaceNode(srcPath, src, new INodeFileWithLink(src));
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
writeUnlock();
|
||||||
|
|
||||||
|
if (snapshot == null) {
|
||||||
|
NameNode.stateChangeLog.info(
|
||||||
|
"DIR* FSDirectory.addFileSnapshot: failed to add " + dstPath);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (NameNode.stateChangeLog.isDebugEnabled()) {
|
||||||
|
NameNode.stateChangeLog.debug("DIR* FSDirectory.addFileSnapshot: "
|
||||||
|
+ dstPath + " is added to the file system");
|
||||||
|
}
|
||||||
|
return snapshot;
|
||||||
|
}
|
||||||
|
|
||||||
INodeDirectory addToParent(byte[] src, INodeDirectory parentINode,
|
INodeDirectory addToParent(byte[] src, INodeDirectory parentINode,
|
||||||
INode newNode, boolean propagateModTime) {
|
INode newNode, boolean propagateModTime) {
|
||||||
// NOTE: This does not update space counts for parents
|
// NOTE: This does not update space counts for parents
|
||||||
|
@ -130,6 +130,21 @@ INode getNode(String path, boolean resolveLink)
|
|||||||
return getNode(getPathComponents(path), resolveLink);
|
return getNode(getPathComponents(path), resolveLink);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** @return the INodeFile corresponding to the path. */
|
||||||
|
INodeFile getINodeFile(String path) throws FileNotFoundException,
|
||||||
|
UnresolvedLinkException {
|
||||||
|
final INode inode = getNode(path, false);
|
||||||
|
if (inode == null) {
|
||||||
|
throw new FileNotFoundException("File \"" + path
|
||||||
|
+ "\" not found");
|
||||||
|
}
|
||||||
|
if (!(inode instanceof INodeFile)) {
|
||||||
|
throw new FileNotFoundException("Path \"" + path
|
||||||
|
+ "\" is not a file");
|
||||||
|
}
|
||||||
|
return (INodeFile)inode;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Retrieve existing INodes from a path. If existing is big enough to store
|
* Retrieve existing INodes from a path. If existing is big enough to store
|
||||||
* all path components (existing and non-existing), then existing INodes
|
* all path components (existing and non-existing), then existing INodes
|
||||||
|
@ -54,6 +54,11 @@ public class INodeFile extends INode implements BlockCollection {
|
|||||||
blocks = blklist;
|
blocks = blklist;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected INodeFile(INodeFile f) {
|
||||||
|
this(f.getPermissionStatus(), f.getBlocks(), f.getBlockReplication(),
|
||||||
|
f.getModificationTime(), f.getAccessTime(), f.getPreferredBlockSize());
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set the {@link FsPermission} of this {@link INodeFile}.
|
* Set the {@link FsPermission} of this {@link INodeFile}.
|
||||||
* Since this is a file,
|
* Since this is a file,
|
||||||
|
@ -0,0 +1,35 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* INode representing a snapshot of a file.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
public class INodeFileSnapshot extends INodeFileWithLink {
|
||||||
|
/** The file size at snapshot creation time. */
|
||||||
|
final long size;
|
||||||
|
|
||||||
|
public INodeFileSnapshot(INodeFile f, long size) {
|
||||||
|
super(f);
|
||||||
|
this.size = size;
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,45 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* INodeFile with a link to the next element.
|
||||||
|
* This class is used to represent the original file that is snapshotted.
|
||||||
|
* The snapshot files are represented by {@link INodeFileSnapshot}.
|
||||||
|
* The link of all the snapshot files and the original file form a circular
|
||||||
|
* linked list so that all elements are accessible by any of the elements.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
public class INodeFileWithLink extends INodeFile {
|
||||||
|
private INodeFileWithLink next;
|
||||||
|
|
||||||
|
public INodeFileWithLink(INodeFile f) {
|
||||||
|
super(f);
|
||||||
|
}
|
||||||
|
|
||||||
|
void setNext(INodeFileWithLink next) {
|
||||||
|
this.next = next;
|
||||||
|
}
|
||||||
|
|
||||||
|
INodeFileWithLink getNext() {
|
||||||
|
return next;
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user