diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java index 10041f57dd..e2be9c6226 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java @@ -165,6 +165,11 @@ void triggerBlockReport(BlockReportOptions options) */ long getBalancerBandwidth() throws IOException; + /** + * Get volume report of datanode. + */ + List getVolumeReport() throws IOException; + /** * Submit a disk balancer plan for execution. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeVolumeInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeVolumeInfo.java new file mode 100644 index 0000000000..40e091843e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeVolumeInfo.java @@ -0,0 +1,122 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.protocol; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.util.StringUtils; + +/** + * Locally available datanode volume information. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class DatanodeVolumeInfo { + private long usedSpace; + private long freeSpace; + private long reservedSpace; + private long reservedSpaceForReplicas; + private long numBlocks; + private StorageType storageType; + private String path; + + public DatanodeVolumeInfo(String path, long usedSpace, long freeSpace, + long reservedSpace, long reservedSpaceForReplicas, long numBlocks, + StorageType type) { + this.usedSpace = usedSpace; + this.freeSpace = freeSpace; + this.reservedSpace = reservedSpace; + this.reservedSpaceForReplicas = reservedSpaceForReplicas; + this.numBlocks = numBlocks; + this.storageType = type; + this.path = path; + } + + /** get used space. */ + public long getUsedSpace() { + return usedSpace; + } + + /** + * get free space. + */ + public long getFreeSpace() { + return freeSpace; + } + + /** + * get reserved space. + */ + public long getReservedSpace() { + return reservedSpace; + } + + /** + * get reserved space for replicas. + */ + public long getReservedSpaceForReplicas() { + return reservedSpaceForReplicas; + } + + /** + * get number of blocks. + */ + public long getNumBlocks() { + return numBlocks; + } + + /** + * get storage type. + */ + public StorageType getStorageType() { + return storageType; + } + + /** + * get volume path. + */ + public String getPath() { + return path; + } + + /** + * get volume report. + */ + public String getDatanodeVolumeReport() { + StringBuilder report = new StringBuilder(); + report + .append("Directory: " + path) + .append("\nStorageType: " + storageType) + .append( + "\nCapacity Used: " + usedSpace + "(" + + StringUtils.byteDesc(usedSpace) + ")") + .append( + "\nCapacity Left: " + freeSpace + "(" + + StringUtils.byteDesc(freeSpace) + ")") + .append( + "\nCapacity Reserved: " + reservedSpace + "(" + + StringUtils.byteDesc(reservedSpace) + ")") + .append( + "\nReserved Space for Replicas: " + reservedSpaceForReplicas + "(" + + StringUtils.byteDesc(reservedSpaceForReplicas) + ")") + .append("\nBlocks: " + numBlocks); + return report.toString(); + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java index 0cf006c34f..084c594d11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java @@ -20,6 +20,7 @@ import java.io.Closeable; import java.io.IOException; import java.net.InetSocketAddress; +import java.util.ArrayList; import java.util.List; import javax.net.SocketFactory; @@ -34,6 +35,7 @@ import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo; +import org.apache.hadoop.hdfs.protocol.DatanodeVolumeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto; @@ -45,6 +47,9 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto; import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto; @@ -73,6 +78,7 @@ import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -96,6 +102,9 @@ public class ClientDatanodeProtocolTranslatorPB implements RefreshNamenodesRequestProto.newBuilder().build(); private final static GetDatanodeInfoRequestProto VOID_GET_DATANODE_INFO = GetDatanodeInfoRequestProto.newBuilder().build(); + private final static GetVolumeReportRequestProto + VOID_GET_DATANODE_STORAGE_INFO = + GetVolumeReportRequestProto.newBuilder().build(); private final static GetReconfigurationStatusRequestProto VOID_GET_RECONFIG_STATUS = GetReconfigurationStatusRequestProto.newBuilder().build(); private final static StartReconfigurationRequestProto VOID_START_RECONFIG = @@ -421,4 +430,24 @@ public String getDiskBalancerSetting(String key) throws IOException { throw ProtobufHelper.getRemoteException(e); } } + + @Override + public List getVolumeReport() throws IOException { + try { + List volumeInfoList = new ArrayList<>(); + GetVolumeReportResponseProto volumeReport = rpcProxy.getVolumeReport( + NULL_CONTROLLER, VOID_GET_DATANODE_STORAGE_INFO); + List volumeProtoList = volumeReport + .getVolumeInfoList(); + for (DatanodeVolumeInfoProto proto : volumeProtoList) { + volumeInfoList.add(new DatanodeVolumeInfo(proto.getPath(), proto + .getUsedSpace(), proto.getFreeSpace(), proto.getReservedSpace(), + proto.getReservedSpaceForReplicas(), proto.getNumBlocks(), + PBHelperClient.convertStorageType(proto.getStorageType()))); + } + return volumeInfoList; + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto index e4333cd26a..8b26a560d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto @@ -131,6 +131,12 @@ message GetDatanodeInfoResponseProto { required DatanodeLocalInfoProto localInfo = 1; } +message GetVolumeReportRequestProto { +} + +message GetVolumeReportResponseProto { + repeated DatanodeVolumeInfoProto volumeInfo = 1; +} message TriggerBlockReportRequestProto { required bool incremental = 1; @@ -255,6 +261,9 @@ service ClientDatanodeProtocolService { rpc getDatanodeInfo(GetDatanodeInfoRequestProto) returns(GetDatanodeInfoResponseProto); + rpc getVolumeReport(GetVolumeReportRequestProto) + returns(GetVolumeReportResponseProto); + rpc getReconfigurationStatus(GetReconfigurationStatusRequestProto) returns(GetReconfigurationStatusResponseProto); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto index a34e512a8b..1be92d8c11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto @@ -69,6 +69,19 @@ message DatanodeLocalInfoProto { required uint64 uptime = 3; } +/** + * Datanode volume information + */ +message DatanodeVolumeInfoProto { + required string path = 1; + required StorageTypeProto storageType = 2; + required uint64 usedSpace = 3; + required uint64 freeSpace = 4; + required uint64 reservedSpace = 5; + required uint64 reservedSpaceForReplicas = 6; + required uint64 numBlocks = 7; +} + /** * DatanodeInfo array */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java index 4f9ef3fe31..09ca2747b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java @@ -18,11 +18,13 @@ package org.apache.hadoop.hdfs.protocolPB; import java.io.IOException; +import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.client.BlockReportOptions; import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; +import org.apache.hadoop.hdfs.protocol.DatanodeVolumeInfo; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.EvictWritersRequestProto; @@ -37,6 +39,9 @@ import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetVolumeReportResponseProto.Builder; import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto; @@ -47,6 +52,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto; @@ -323,4 +329,26 @@ public DiskBalancerSettingResponseProto getDiskBalancerSetting( throw new ServiceException(e); } } + + @Override + public GetVolumeReportResponseProto getVolumeReport(RpcController controller, + GetVolumeReportRequestProto request) throws ServiceException { + try { + Builder builder = GetVolumeReportResponseProto.newBuilder(); + List volumeReport = impl.getVolumeReport(); + for (DatanodeVolumeInfo info : volumeReport) { + builder.addVolumeInfo(DatanodeVolumeInfoProto.newBuilder() + .setPath(info.getPath()).setFreeSpace(info.getFreeSpace()) + .setNumBlocks(info.getNumBlocks()) + .setReservedSpace(info.getReservedSpace()) + .setReservedSpaceForReplicas(info.getReservedSpaceForReplicas()) + .setStorageType( + PBHelperClient.convertStorageType(info.getStorageType())) + .setUsedSpace(info.getUsedSpace())); + } + return builder.build(); + } catch (Exception e) { + throw new ServiceException(e); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index fbed59559b..66ef89ac36 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -78,6 +78,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Set; import java.util.UUID; import java.util.concurrent.Callable; @@ -122,6 +123,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder; import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo; +import org.apache.hadoop.hdfs.protocol.DatanodeVolumeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.ReconfigurationProtocol; @@ -3539,4 +3541,29 @@ public String getSlowDisks() { Set slowDisks = diskMetrics.getDiskOutliersStats().keySet(); return JSON.toString(slowDisks); } + + + @Override + public List getVolumeReport() throws IOException { + checkSuperuserPrivilege(); + Map volumeInfoMap = data.getVolumeInfoMap(); + if (volumeInfoMap == null) { + LOG.warn("DataNode volume info not available."); + return new ArrayList<>(0); + } + List volumeInfoList = new ArrayList<>(); + for (Entry volume : volumeInfoMap.entrySet()) { + @SuppressWarnings("unchecked") + Map volumeInfo = (Map) volume.getValue(); + DatanodeVolumeInfo dnStorageInfo = new DatanodeVolumeInfo( + volume.getKey(), (Long) volumeInfo.get("usedSpace"), + (Long) volumeInfo.get("freeSpace"), + (Long) volumeInfo.get("reservedSpace"), + (Long) volumeInfo.get("reservedSpaceForReplicas"), + (Long) volumeInfo.get("numBlocks"), + (StorageType) volumeInfo.get("storageType")); + volumeInfoList.add(dnStorageInfo); + } + return volumeInfoList; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index c1f79e0973..6d67089369 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -68,6 +68,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo; +import org.apache.hadoop.hdfs.protocol.DatanodeVolumeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; @@ -440,7 +441,8 @@ static int run(DistributedFileSystem dfs, String[] argv, int idx) throws IOExcep "\t[-reconfig " + "]\n" + "\t[-printTopology]\n" + - "\t[-refreshNamenodes datanode_host:ipc_port]\n"+ + "\t[-refreshNamenodes datanode_host:ipc_port]\n" + + "\t[-getVolumeReport datanode_host:ipc_port]\n" + "\t[-deleteBlockPool datanode_host:ipc_port blockpoolId [force]]\n"+ "\t[-setBalancerBandwidth ]\n" + "\t[-getBalancerBandwidth ]\n" + @@ -1073,6 +1075,9 @@ private void printHelp(String cmd) { "\t\tstops serving the removed block-pools\n"+ "\t\tand starts serving new block-pools\n"; + String getVolumeReport = "-getVolumeReport: Takes a datanodehost:port as " + + "argument,\n\t\tFor the given datanode, get the volume report\n"; + String deleteBlockPool = "-deleteBlockPool: Arguments are datanodehost:port, blockpool id\n"+ "\t\t and an optional argument \"force\". If force is passed,\n"+ "\t\t block pool directory for the given blockpool id on the given\n"+ @@ -1173,6 +1178,8 @@ private void printHelp(String cmd) { System.out.println(printTopology); } else if ("refreshNamenodes".equals(cmd)) { System.out.println(refreshNamenodes); + } else if ("getVolumeReport".equals(cmd)) { + System.out.println(getVolumeReport); } else if ("deleteBlockPool".equals(cmd)) { System.out.println(deleteBlockPool); } else if ("setBalancerBandwidth".equals(cmd)) { @@ -1839,6 +1846,9 @@ private static void printUsage(String cmd) { } else if ("-refreshNamenodes".equals(cmd)) { System.err.println("Usage: hdfs dfsadmin" + " [-refreshNamenodes datanode-host:port]"); + } else if ("-getVolumeReport".equals(cmd)) { + System.err.println("Usage: hdfs dfsadmin" + + " [-getVolumeReport datanode-host:port]"); } else if ("-deleteBlockPool".equals(cmd)) { System.err.println("Usage: hdfs dfsadmin" + " [-deleteBlockPool datanode-host:port blockpoolId [force]]"); @@ -1971,6 +1981,11 @@ public int run(String[] argv) throws Exception { printUsage(cmd); return exitCode; } + } else if ("-getVolumeReport".equals(cmd)) { + if (argv.length != 2) { + printUsage(cmd); + return exitCode; + } } else if ("-reconfig".equals(cmd)) { if (argv.length != 4) { printUsage(cmd); @@ -2072,6 +2087,8 @@ public int run(String[] argv) throws Exception { exitCode = printTopology(); } else if ("-refreshNamenodes".equals(cmd)) { exitCode = refreshNamenodes(argv, i); + } else if ("-getVolumeReport".equals(cmd)) { + exitCode = getVolumeReport(argv, i); } else if ("-deleteBlockPool".equals(cmd)) { exitCode = deleteBlockPool(argv, i); } else if ("-setBalancerBandwidth".equals(cmd)) { @@ -2134,6 +2151,17 @@ public int run(String[] argv) throws Exception { return exitCode; } + private int getVolumeReport(String[] argv, int i) throws IOException { + ClientDatanodeProtocol datanode = getDataNodeProxy(argv[i]); + List volumeReport = datanode + .getVolumeReport(); + System.out.println("Active Volumes : " + volumeReport.size()); + for (DatanodeVolumeInfo info : volumeReport) { + System.out.println("\n" + info.getDatanodeVolumeReport()); + } + return 0; + } + private ClientDatanodeProtocol getDataNodeProxy(String datanode) throws IOException { InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md index 1ee700316e..b8d13623f7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md @@ -358,6 +358,7 @@ Usage: hdfs dfsadmin [-reconfig ] hdfs dfsadmin [-printTopology] hdfs dfsadmin [-refreshNamenodes datanodehost:port] + hdfs dfsadmin [-getVolumeReport datanodehost:port] hdfs dfsadmin [-deleteBlockPool datanode-host:port blockpoolId [force]] hdfs dfsadmin [-setBalancerBandwidth ] hdfs dfsadmin [-getBalancerBandwidth ] @@ -393,6 +394,7 @@ Usage: | `-reconfig` \ \ \ | Starts reconfiguration or gets the status of an ongoing reconfiguration, or gets a list of reconfigurable properties. The second parameter specifies the node type. | | `-printTopology` | Print a tree of the racks and their nodes as reported by the Namenode | | `-refreshNamenodes` datanodehost:port | For the given datanode, reloads the configuration files, stops serving the removed block-pools and starts serving new block-pools. | +| `-getVolumeReport` datanodehost:port | For the given datanode, get the volume report. | | `-deleteBlockPool` datanode-host:port blockpoolId [force] | If force is passed, block pool directory for the given blockpool id on the given datanode is deleted along with its contents, otherwise the directory is deleted only if it is empty. The command will fail if datanode is still serving the block pool. Refer to refreshNamenodes to shutdown a block pool service on a datanode. | | `-setBalancerBandwidth` \ | Changes the network bandwidth used by each datanode during HDFS block balancing. \ is the maximum number of bytes per second that will be used by each datanode. This value overrides the dfs.datanode.balance.bandwidthPerSec parameter. NOTE: The new value is not persistent on the DataNode. | | `-getBalancerBandwidth` \ | Get the network bandwidth(in bytes per second) for the given datanode. This is the maximum network bandwidth used by the datanode during HDFS block balancing.| diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java index 177c5f417a..a23fe81ada 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java @@ -216,6 +216,26 @@ public void testGetDatanodeInfo() throws Exception { } } + @Test(timeout = 30000) + public void testGetVolumeReport() throws Exception { + redirectStream(); + final DFSAdmin dfsAdmin = new DFSAdmin(conf); + + for (int i = 0; i < cluster.getDataNodes().size(); i++) { + resetStream(); + final DataNode dn = cluster.getDataNodes().get(i); + final String addr = String.format("%s:%d", dn.getXferAddress() + .getHostString(), dn.getIpcPort()); + final int ret = ToolRunner.run(dfsAdmin, new String[] { + "-getVolumeReport", addr }); + assertEquals(0, ret); + + /* collect outputs */ + final List outs = Lists.newArrayList(); + scanIntoList(out, outs); + assertEquals(outs.get(0), "Active Volumes : 2"); + } + } /** * Test that if datanode is not reachable, some DFSAdmin commands will fail * elegantly with non-zero ret error code along with exception error message.