HDFS-4079. Add SnapshotManager which maintains a list for all the snapshottable directories and supports snapshot methods such as setting a directory to snapshottable and creating a snapshot.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1400728 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-10-21 21:35:13 +00:00
parent 625d7cf20b
commit 820b5495ca
9 changed files with 184 additions and 30 deletions

View File

@ -14,3 +14,7 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4087. Protocol changes for listSnapshots functionality.
(Brandon Li via suresh)
HDFS-4079. Add SnapshotManager which maintains a list for all the
snapshottable directories and supports snapshot methods such as setting a
directory to snapshottable and creating a snapshot. (szetszwo)

View File

@ -1304,7 +1304,7 @@ INodeFile getFileINode(String src) throws UnresolvedLinkException {
/**
* Get {@link INode} associated with the file / directory.
*/
INode getINode(String src) throws UnresolvedLinkException {
public INode getINode(String src) throws UnresolvedLinkException {
readLock();
try {
INode iNode = rootDir.getNode(src, true);

View File

@ -169,7 +169,7 @@
import org.apache.hadoop.hdfs.server.namenode.ha.StandbyState;
import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@ -307,6 +307,7 @@ private static final void logAuditEvent(boolean succeeded,
/** The namespace tree. */
FSDirectory dir;
private final BlockManager blockManager;
private final SnapshotManager snapshotManager;
private final DatanodeStatistics datanodeStatistics;
// Block pool ID used by this namenode
@ -464,6 +465,7 @@ public static FSNamesystem loadFromDisk(Configuration conf,
DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT);
this.blockManager = new BlockManager(this, this, conf);
this.snapshotManager = new SnapshotManager(this);
this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
this.fsOwner = UserGroupInformation.getCurrentUser();
@ -2963,29 +2965,6 @@ void setQuota(String path, long nsQuota, long dsQuota)
getEditLog().logSync();
}
/**
* Set the given directory as a snapshottable directory.
* If the path is already a snapshottable directory, this is a no-op.
* Otherwise, the {@link INodeDirectory} of the path is replaced by an
* {@link INodeDirectorySnapshottable}.
*/
void setSnapshottable(final String path) throws IOException {
writeLock();
try {
final INodeDirectory d = INodeDirectory.valueOf(dir.getINode(path), path);
if (d.isSnapshottable()) {
//The directory is already a snapshottable directory.
return;
}
final INodeDirectorySnapshottable s
= INodeDirectorySnapshottable.newInstance(d);
dir.replaceINodeDirectory(path, d, s);
} finally {
writeUnlock();
}
}
/** Persist all metadata about this file.
* @param src The string representation of the path
* @param clientName The string representation of the client

View File

@ -38,7 +38,7 @@
* directory inodes.
*/
@InterfaceAudience.Private
abstract class INode implements Comparable<byte[]> {
public abstract class INode implements Comparable<byte[]> {
/*
* The inode name is in java UTF8 encoding;
* The name in HdfsFileStatus should keep the same encoding as this.
@ -135,7 +135,7 @@ protected void setPermissionStatus(PermissionStatus ps) {
setPermission(ps.getPermission());
}
/** Get the {@link PermissionStatus} */
protected PermissionStatus getPermissionStatus() {
public PermissionStatus getPermissionStatus() {
return new PermissionStatus(getUserName(),getGroupName(),getFsPermission());
}
private void updatePermissionStatus(PermissionStatusFormat f, long n) {
@ -246,7 +246,7 @@ byte[] getLocalNameBytes() {
/**
* Set local file name
*/
void setLocalName(String name) {
protected void setLocalName(String name) {
this.name = DFSUtil.string2Bytes(name);
}
@ -288,7 +288,7 @@ public long getModificationTime() {
/**
* Set last modification time of inode.
*/
void setModificationTime(long modtime) {
public void setModificationTime(long modtime) {
assert isDirectory();
if (this.modificationTime <= modtime) {
this.modificationTime = modtime;

View File

@ -50,7 +50,7 @@ public static INodeDirectory valueOf(INode inode, String src
private List<INode> children;
INodeDirectory(String name, PermissionStatus permissions) {
protected INodeDirectory(String name, PermissionStatus permissions) {
super(name, permissions);
this.children = null;
}

View File

@ -0,0 +1,29 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
/** The root directory of a snapshot. */
public class INodeDirectorySnapshotRoot extends INodeDirectory {
INodeDirectorySnapshotRoot(String name, INodeDirectory dir) {
super(name, dir.getPermissionStatus());
setLocalName(name);
parent = dir;
}
}

View File

@ -17,9 +17,15 @@
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
import org.apache.hadoop.util.Time;
/** Directories where taking snapshots is allowed. */
@InterfaceAudience.Private
@ -36,6 +42,20 @@ static public INodeDirectorySnapshottable newInstance(final INodeDirectory dir)
return new INodeDirectorySnapshottable(nsq, dsq, dir);
}
/** Cast INode to INodeDirectorySnapshottable. */
static public INodeDirectorySnapshottable valueOf(
INode inode, String src) throws IOException {
final INodeDirectory dir = INodeDirectory.valueOf(inode, src);
if (!dir.isSnapshottable()) {
throw new SnapshotException(src + " is not a snapshottable directory.");
}
return (INodeDirectorySnapshottable)dir;
}
/** A list of snapshots of this directory. */
private final List<INodeDirectorySnapshotRoot> snapshots
= new ArrayList<INodeDirectorySnapshotRoot>();
private INodeDirectorySnapshottable(long nsQuota, long dsQuota,
INodeDirectory dir) {
super(nsQuota, dsQuota, dir);
@ -45,4 +65,16 @@ private INodeDirectorySnapshottable(long nsQuota, long dsQuota,
public boolean isSnapshottable() {
return true;
}
/** Add a snapshot root under this directory. */
INodeDirectorySnapshotRoot addSnapshotRoot(final String name) {
final INodeDirectorySnapshotRoot r = new INodeDirectorySnapshotRoot(name, this);
snapshots.add(r);
//set modification time
final long timestamp = Time.now();
r.setModificationTime(timestamp);
setModificationTime(timestamp);
return r;
}
}

View File

@ -0,0 +1,33 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.IOException;
/** Snapshot related exception. */
public class SnapshotException extends IOException {
private static final long serialVersionUID = 1L;
SnapshotException(final String message) {
super(message);
}
SnapshotException(final Throwable cause) {
super(cause);
}
}

View File

@ -0,0 +1,77 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
/** Manage snapshottable directories and their snapshots. */
public class SnapshotManager {
private final Namesystem namesystem;
/** All snapshottable directories in the namesystem. */
private final List<INodeDirectorySnapshottable> snapshottables
= new ArrayList<INodeDirectorySnapshottable>();
public SnapshotManager(final Namesystem namesystem) {
this.namesystem = namesystem;
}
/**
* Set the given directory as a snapshottable directory.
* If the path is already a snapshottable directory, this is a no-op.
* Otherwise, the {@link INodeDirectory} of the path is replaced by an
* {@link INodeDirectorySnapshottable}.
*/
public void setSnapshottable(final String path,
final FSDirectory fsdir) throws IOException {
namesystem.writeLock();
try {
final INodeDirectory d = INodeDirectory.valueOf(fsdir.getINode(path), path);
if (d.isSnapshottable()) {
//The directory is already a snapshottable directory.
return;
}
final INodeDirectorySnapshottable s
= INodeDirectorySnapshottable.newInstance(d);
fsdir.replaceINodeDirectory(path, d, s);
snapshottables.add(s);
} finally {
namesystem.writeUnlock();
}
}
/** Create a snapshot of given path. */
public void createSnapshot(final String snapshotName, final String path,
final FSDirectory fsdir) throws IOException {
final INodeDirectorySnapshottable d = INodeDirectorySnapshottable.valueOf(
fsdir.getINode(path), path);
//TODO: check ns quota
final INodeDirectorySnapshotRoot root = d.addSnapshotRoot(snapshotName);
//TODO: create the remaining subtree
}
}