HDFS-4097. Provide CLI support for createSnapshot. Contributed by Brandon Li.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1401971 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2012-10-25 04:08:36 +00:00
parent 14d8d0a670
commit 2d5334931e
13 changed files with 129 additions and 137 deletions

View File

@ -2219,6 +2219,17 @@ public void setTimes(Path p, long mtime, long atime
) throws IOException { ) throws IOException {
} }
/**
* Create a snapshot
* @param snapshotName The name of the snapshot
* @param snapshotRoot The directory where the snapshot will be taken
*/
public void createSnapshot(String snapshotName, String snapshotRoot)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support createSnapshot");
}
// making it volatile to be able to do a double checked locking // making it volatile to be able to do a double checked locking
private volatile static boolean FILE_SYSTEMS_LOADED = false; private volatile static boolean FILE_SYSTEMS_LOADED = false;

View File

@ -57,6 +57,7 @@ public static void registerCommands(CommandFactory factory) {
factory.registerCommands(Tail.class); factory.registerCommands(Tail.class);
factory.registerCommands(Test.class); factory.registerCommands(Test.class);
factory.registerCommands(Touch.class); factory.registerCommands(Touch.class);
factory.registerCommands(SnapshotCommands.class);
} }
protected FsCommand() {} protected FsCommand() {}

View File

@ -0,0 +1,82 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell;
import java.io.IOException;
import java.util.LinkedList;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.shell.PathExceptions.PathIsNotDirectoryException;
/**
* Snapshot related operations
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
class SnapshotCommands extends FsCommand {
private final static String CREATE_SNAPSHOT = "createSnapshot";
public static void registerCommands(CommandFactory factory) {
factory.addClass(CreateSnapshot.class, "-" + CREATE_SNAPSHOT);
}
/**
* Create a snapshot
*/
public static class CreateSnapshot extends FsCommand {
public static final String NAME = CREATE_SNAPSHOT;
public static final String USAGE = "<snapshotName> <snapshotRoot>";
public static final String DESCRIPTION = "Create a snapshot on a directory";
private static String snapshotName;
@Override
protected void processPath(PathData item) throws IOException {
if (!item.stat.isDirectory()) {
throw new PathIsNotDirectoryException(item.toString());
}
}
@Override
protected void processOptions(LinkedList<String> args) throws IOException {
if (args.size() != 2) {
throw new IOException("args number not 2:" + args.size());
}
snapshotName = args.removeFirst();
// TODO: name length check
}
@Override
protected void processArguments(LinkedList<PathData> items)
throws IOException {
super.processArguments(items);
if (exitCode != 0) { // check for error collecting paths
return;
}
assert(items.size() == 1);
PathData sroot = items.getFirst();
String snapshotRoot = sroot.path.toString();
sroot.fs.createSnapshot(snapshotName, snapshotRoot);
}
}
}

View File

@ -26,3 +26,5 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4091. Add snapshot quota to limit the number of snapshots allowed. HDFS-4091. Add snapshot quota to limit the number of snapshots allowed.
(szetszwo) (szetszwo)
HDFS-4097. Provide CLI support for createSnapshot. (Brandon Li via suresh)

View File

@ -79,7 +79,6 @@
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.BlockStorageLocation; import org.apache.hadoop.fs.BlockStorageLocation;
@ -1880,6 +1879,17 @@ public boolean setSafeMode(SafeModeAction action) throws IOException {
return namenode.setSafeMode(action); return namenode.setSafeMode(action);
} }
/**
* Create one snapshot.
*
* @see ClientProtocol#createSnapshot(String snapshotName, String
* snapshotRoot)
*/
public void createSnapshot(String snapshotName, String snapshotRoot)
throws IOException {
namenode.createSnapshot(snapshotName, snapshotRoot);
}
/** /**
* Allow snapshot on a directory. * Allow snapshot on a directory.
* *

View File

@ -890,4 +890,10 @@ public void disallowSnapshot(String snapshotRoot)
throws IOException { throws IOException {
dfs.disallowSnapshot(snapshotRoot); dfs.disallowSnapshot(snapshotRoot);
} }
@Override
public void createSnapshot(String snapshotName, String snapshotRoot)
throws IOException {
dfs.createSnapshot(snapshotName, snapshotRoot);
}
} }

View File

@ -958,21 +958,6 @@ public void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
public void createSnapshot(String snapshotName, String snapshotRoot) public void createSnapshot(String snapshotName, String snapshotRoot)
throws IOException; throws IOException;
/**
* Delete a snapshot
* @param snapshotName name of the snapshot to be deleted
* @param snapshotRoot the path where the snapshot exists
*/
public void deleteSnapshot(String snapshotName, String snapshotRoot)
throws IOException;
/**
* List snapshots of one directory
* @param snapshotRoot the path where the snapshot exists
*/
public SnapshotInfo[] listSnapshots(String snapshotRoot)
throws IOException;
/** /**
* Allow snapshot on a directory. * Allow snapshot on a directory.
* @param snapshotRoot the directory to be snapped * @param snapshotRoot the directory to be snapped

View File

@ -22,7 +22,7 @@
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto;
/** /**
* Interface that represents the over the wire information for a file. * SnapshotInfo maintains information for a snapshot
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
@InterfaceStability.Evolving @InterfaceStability.Evolving

View File

@ -32,7 +32,6 @@
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.SnapshotInfo;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
@ -55,8 +54,6 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto;
@ -92,8 +89,6 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListSnapshotsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListSnapshotsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
@ -141,7 +136,6 @@
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
@ -160,8 +154,6 @@
public class ClientNamenodeProtocolServerSideTranslatorPB implements public class ClientNamenodeProtocolServerSideTranslatorPB implements
ClientNamenodeProtocolPB { ClientNamenodeProtocolPB {
final private ClientProtocol server; final private ClientProtocol server;
static final DeleteSnapshotResponseProto VOID_DELETE_SNAPSHOT_RESPONSE =
DeleteSnapshotResponseProto.newBuilder().build();
static final CreateSnapshotResponseProto VOID_CREATE_SNAPSHOT_RESPONSE = static final CreateSnapshotResponseProto VOID_CREATE_SNAPSHOT_RESPONSE =
CreateSnapshotResponseProto.newBuilder().build(); CreateSnapshotResponseProto.newBuilder().build();
static final AllowSnapshotResponseProto VOID_ALLOW_SNAPSHOT_RESPONSE = static final AllowSnapshotResponseProto VOID_ALLOW_SNAPSHOT_RESPONSE =
@ -876,43 +868,6 @@ public CreateSnapshotResponseProto createSnapshot(RpcController controller,
return VOID_CREATE_SNAPSHOT_RESPONSE; return VOID_CREATE_SNAPSHOT_RESPONSE;
} }
@Override
public DeleteSnapshotResponseProto deleteSnapshot(RpcController controller,
DeleteSnapshotRequestProto request) throws ServiceException {
try {
server.deleteSnapshot(request.getSnapshotName(),
request.getSnapshotRoot());
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_DELETE_SNAPSHOT_RESPONSE;
}
@Override
public ListSnapshotsResponseProto listSnapshots(RpcController controller,
ListSnapshotsRequestProto request) throws ServiceException {
SnapshotInfo[] result;
try {
result = server.listSnapshots(request.getSnapshotRoot());
ListSnapshotsResponseProto.Builder builder = ListSnapshotsResponseProto
.newBuilder();
for (SnapshotInfo si : result) {
SnapshotInfoProto.Builder infobuilder = SnapshotInfoProto.newBuilder();
infobuilder.setSnapshotName(si.getSnapshotName());
infobuilder.setSnapshotRoot(si.getSnapshotRoot());
infobuilder.setCreateTime(si.getCreateTime());
infobuilder.setPermission(si.getPermission());
infobuilder.setOwner(si.getOwner());
infobuilder.setGroup(si.getGroup());
builder.addSnapshots(infobuilder);
}
return builder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override @Override
public AllowSnapshotResponseProto allowSnapshot(RpcController controller, public AllowSnapshotResponseProto allowSnapshot(RpcController controller,
AllowSnapshotRequestProto req) throws ServiceException { AllowSnapshotRequestProto req) throws ServiceException {

View File

@ -42,7 +42,6 @@
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.SnapshotInfo;
import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@ -60,7 +59,6 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DisallowSnapshotRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto;
@ -82,8 +80,6 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListSnapshotsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListSnapshotsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto;
@ -106,7 +102,6 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
@ -848,40 +843,6 @@ public void createSnapshot(String snapshotName, String snapshotRoot)
} }
} }
@Override
public void deleteSnapshot(String snapshotName, String snapshotRoot)
throws IOException {
DeleteSnapshotRequestProto req = DeleteSnapshotRequestProto.newBuilder()
.setSnapshotName(snapshotName).setSnapshotRoot(snapshotRoot).build();
try {
rpcProxy.deleteSnapshot(null, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public SnapshotInfo[] listSnapshots(String snapshotRoot) throws IOException {
SnapshotInfo[] sinfo = null;
ListSnapshotsRequestProto req = null;
req = ListSnapshotsRequestProto.newBuilder().setSnapshotRoot(snapshotRoot)
.build();
try {
ListSnapshotsResponseProto resp = rpcProxy.listSnapshots(null, req);
sinfo = new SnapshotInfo[resp.getSnapshotsCount()];
for (int i = 0; i < resp.getSnapshotsCount(); i++) {
SnapshotInfoProto siProto = resp.getSnapshots(i);
sinfo[i] = new SnapshotInfo(siProto.getSnapshotName(), resp
.getSnapshots(i).getSnapshotRoot(), siProto.getCreateTime(),
siProto.getPermission(), siProto.getOwner(), siProto.getGroup());
}
return sinfo;
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override @Override
public void allowSnapshot(String snapshotRoot) throws IOException { public void allowSnapshot(String snapshotRoot) throws IOException {
AllowSnapshotRequestProto req = AllowSnapshotRequestProto.newBuilder() AllowSnapshotRequestProto req = AllowSnapshotRequestProto.newBuilder()

View File

@ -5556,4 +5556,14 @@ public void disallowSnapshot(String snapshotRoot)
throws SafeModeException, IOException { throws SafeModeException, IOException {
// TODO: implement // TODO: implement
} }
/**
* Create a snapshot
* @param snapshotName The name of the snapshot
* @param snapshotRoot The directory where the snapshot will be taken
*/
public void createSnapshot(String snapshotName, String snapshotRoot)
throws SafeModeException, IOException {
// TODO: implement
}
} }

View File

@ -62,7 +62,6 @@
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.SnapshotInfo;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
@ -1077,21 +1076,11 @@ public DataEncryptionKey getDataEncryptionKey() throws IOException {
@Override @Override
public void createSnapshot(String snapshotName, String snapshotRoot) public void createSnapshot(String snapshotName, String snapshotRoot)
throws IOException { throws IOException {
// TODO Auto-generated method stub if (!checkPathLength(snapshotRoot)) {
throw new IOException("createSnapshot: Pathname too long. Limit "
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
} }
namesystem.createSnapshot(snapshotName, snapshotRoot);
@Override
public void deleteSnapshot(String snapshotName, String snapshotRoot)
throws IOException {
// TODO Auto-generated method stub
}
@Override
public SnapshotInfo[] listSnapshots(String snapshotRoot) throws IOException {
// TODO Auto-generated method stub
SnapshotInfo[] si = new SnapshotInfo[1];
si[0] = new SnapshotInfo(null, null, null, null, null, null);
return si;
} }
@Override @Override

View File

@ -450,22 +450,6 @@ message CreateSnapshotRequestProto {
message CreateSnapshotResponseProto { // void response message CreateSnapshotResponseProto { // void response
} }
message DeleteSnapshotRequestProto {
required string snapshotName = 1;
required string snapshotRoot = 2;
}
message DeleteSnapshotResponseProto { // void response
}
message ListSnapshotsRequestProto {
required string snapshotRoot = 1;
}
message ListSnapshotsResponseProto {
repeated SnapshotInfoProto snapshots = 1;
}
message AllowSnapshotRequestProto { message AllowSnapshotRequestProto {
required string snapshotRoot = 1; required string snapshotRoot = 1;
} }
@ -555,10 +539,6 @@ service ClientNamenodeProtocol {
returns(GetDataEncryptionKeyResponseProto); returns(GetDataEncryptionKeyResponseProto);
rpc createSnapshot(CreateSnapshotRequestProto) rpc createSnapshot(CreateSnapshotRequestProto)
returns(CreateSnapshotResponseProto); returns(CreateSnapshotResponseProto);
rpc deleteSnapshot(DeleteSnapshotRequestProto)
returns(DeleteSnapshotResponseProto);
rpc listSnapshots(ListSnapshotsRequestProto)
returns(ListSnapshotsResponseProto);
rpc allowSnapshot(AllowSnapshotRequestProto) rpc allowSnapshot(AllowSnapshotRequestProto)
returns(AllowSnapshotResponseProto); returns(AllowSnapshotResponseProto);
rpc disallowSnapshot(DisallowSnapshotRequestProto) rpc disallowSnapshot(DisallowSnapshotRequestProto)