diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt index 820f7d4451..7e417fd122 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt @@ -11,3 +11,6 @@ Branch-2802 Snapshot (Unreleased) HDFS-4083. Protocol changes for snapshots. (suresh) HDFS-4077. Add support for Snapshottable Directory. (Nicholas via suresh) + + HDFS-4087. Protocol changes for listSnapshots functionality. + (Brandon Li via suresh) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 8219287971..9a159a48d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -965,4 +965,12 @@ public void createSnapshot(String snapshotName, String snapshotRoot) */ public void deleteSnapshot(String snapshotName, String snapshotRoot) throws IOException; + + /** + * List snapshots of one directory + * @param snapshotRoot the path where the snapshot exists + */ + public SnapshotInfo[] listSnapshots(String snapshotRoot) + throws IOException; } + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotInfo.java new file mode 100644 index 0000000000..5fe192466d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotInfo.java @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto; + +/** + * Interface that represents the over the wire information for a file. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class SnapshotInfo { + private final String snapshotName; + private final String snapshotRoot; + private final String createTime; + private final FsPermissionProto permission; + private final String owner; + private final String group; + + public SnapshotInfo(String sname, String sroot, String ctime, + FsPermissionProto permission, String owner, String group) { + this.snapshotName = sname; + this.snapshotRoot = sroot; + this.createTime = ctime; + this.permission = permission; + this.owner = owner; + this.group = group; + } + + final public String getSnapshotName() { + return snapshotName; + } + + final public String getSnapshotRoot() { + return snapshotRoot; + } + + final public String getCreateTime() { + return createTime; + } + + final public FsPermissionProto getPermission() { + return permission; + } + + final public String getOwner() { + return owner; + } + + final public String getGroup() { + return group; + } + + @Override + public String toString() { + return getClass().getSimpleName() + + "{snapshotName=" + snapshotName + + "; snapshotRoot=" + snapshotRoot + + "; createTime=" + createTime + + "; permission=" + permission + + "; owner=" + owner + + "; group=" + group + + "}"; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index cd02dcefc5..f77994c974 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.SnapshotInfo; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; @@ -135,6 +136,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.io.Text; @@ -881,7 +883,25 @@ public DeleteSnapshotResponseProto deleteSnapshot(RpcController controller, @Override public ListSnapshotsResponseProto listSnapshots(RpcController controller, ListSnapshotsRequestProto request) throws ServiceException { - // TODO Auto-generated method stub - return null; + SnapshotInfo[] result; + + try { + result = server.listSnapshots(request.getSnapshotRoot()); + ListSnapshotsResponseProto.Builder builder = ListSnapshotsResponseProto + .newBuilder(); + for (SnapshotInfo si : result) { + SnapshotInfoProto.Builder infobuilder = SnapshotInfoProto.newBuilder(); + infobuilder.setSnapshotName(si.getSnapshotName()); + infobuilder.setSnapshotRoot(si.getSnapshotRoot()); + infobuilder.setCreateTime(si.getCreateTime()); + infobuilder.setPermission(si.getPermission()); + infobuilder.setOwner(si.getOwner()); + infobuilder.setGroup(si.getGroup()); + builder.addSnapshots(infobuilder); + } + return builder.build(); + } catch (IOException e) { + throw new ServiceException(e); + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 36f0ce8a11..0bf0235e56 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.SnapshotInfo; import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -79,6 +80,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListSnapshotsRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListSnapshotsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto; @@ -101,6 +104,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; @@ -852,6 +856,27 @@ public void deleteSnapshot(String snapshotName, String snapshotRoot) } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } - + } + + @Override + public SnapshotInfo[] listSnapshots(String snapshotRoot) throws IOException { + SnapshotInfo[] sinfo = null; + ListSnapshotsRequestProto req = null; + + req = ListSnapshotsRequestProto.newBuilder().setSnapshotRoot(snapshotRoot) + .build(); + try { + ListSnapshotsResponseProto resp = rpcProxy.listSnapshots(null, req); + sinfo = new SnapshotInfo[resp.getSnapshotsCount()]; + for (int i = 0; i < resp.getSnapshotsCount(); i++) { + SnapshotInfoProto siProto = resp.getSnapshots(i); + sinfo[i] = new SnapshotInfo(siProto.getSnapshotName(), resp + .getSnapshots(i).getSnapshotRoot(), siProto.getCreateTime(), + siProto.getPermission(), siProto.getOwner(), siProto.getGroup()); + } + return sinfo; + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index ee3cdbcf9a..2bc5b4c079 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.SnapshotInfo; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol; @@ -1084,4 +1085,12 @@ public void deleteSnapshot(String snapshotName, String snapshotRoot) throws IOException { // TODO Auto-generated method stub } + + @Override + public SnapshotInfo[] listSnapshots(String snapshotRoot) throws IOException { + // TODO Auto-generated method stub + SnapshotInfo[] si = new SnapshotInfo[1]; + si[0] = new SnapshotInfo(null, null, null, null, null, null); + return si; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index d01a0c93c9..b705a52b47 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -381,9 +381,11 @@ message VersionResponseProto { */ message SnapshotInfoProto { required string snapshotName = 1; - required FsPermissionProto permission = 2; - required string owner = 3; - required string group = 4; + required string snapshotRoot = 2; + required FsPermissionProto permission = 3; + required string owner = 4; + required string group = 5; + required string createTime = 6; // TODO: do we need access time? }