From 66f0bb646d040a80bde75b5b3f7eacafd0034fe4 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Fri, 29 Jan 2016 11:05:53 -0800 Subject: [PATCH] HDFS-9647. DiskBalancer: Add getRuntimeSettings. (Contributed by Anu Engineer) --- .../hdfs/protocol/ClientDatanodeProtocol.java | 10 ++++++ .../ClientDatanodeProtocolTranslatorPB.java | 17 ++++++++- .../main/proto/ClientDatanodeProtocol.proto | 19 ++++++++++ .../hadoop-hdfs/HDFS-1312_CHANGES.txt | 3 ++ ...atanodeProtocolServerSideTranslatorPB.java | 24 +++++++++++-- .../hadoop/hdfs/server/datanode/DataNode.java | 14 ++++++++ .../diskbalancer/DiskBalancerConstants.java | 35 +++++++++++++++++++ .../diskbalancer/TestDiskBalancerRPC.java | 16 ++++++--- 8 files changed, 130 insertions(+), 8 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerConstants.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java index 705c98f48d..dede89e7c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java @@ -183,4 +183,14 @@ void submitDiskBalancerPlan(String planID, long planVersion, long bandwidth, * Gets the status of an executing diskbalancer Plan. */ WorkStatus queryDiskBalancerPlan() throws IOException; + + /** + * Gets a run-time configuration value from running diskbalancer instance. + * For example : Disk Balancer bandwidth of a running instance. + * + * @param key runtime configuration key + * @return value of the key as a string. + * @throws IOException - Throws if there is no such key + */ + String getDiskBalancerSetting(String key) throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java index 59f2fd238e..e7e0d945bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java @@ -56,6 +56,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.WorkStatus; import org.apache.hadoop.ipc.ProtobufHelper; @@ -365,8 +367,8 @@ public void submitDiskBalancerPlan(String planID, long planVersion, /** * Cancels an executing disk balancer plan. - * @param planID - A SHA512 hash of the plan string. * + * @param planID - A SHA512 hash of the plan string. * @throws IOException on error */ @Override @@ -399,4 +401,17 @@ public WorkStatus queryDiskBalancerPlan() throws IOException { throw ProtobufHelper.getRemoteException(e); } } + + @Override + public String getDiskBalancerSetting(String key) throws IOException { + try { + DiskBalancerSettingRequestProto request = + DiskBalancerSettingRequestProto.newBuilder().setKey(key).build(); + DiskBalancerSettingResponseProto response = + rpcProxy.getDiskBalancerSetting(NULL_CONTROLLER, request); + return response.hasValue() ? response.getValue() : null; + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto index 28c8681a96..c61c7008cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto @@ -197,7 +197,21 @@ message QueryPlanStatusResponseProto { optional string status = 2; optional string planID = 3; optional string currentStatus = 4; +} +/** + * This message sends a request to data node get a specific setting + * that is used by disk balancer. + */ +message DiskBalancerSettingRequestProto { + required string key = 1; +} + +/** + * Response that describes the value of requested disk balancer setting. + */ +message DiskBalancerSettingResponseProto { + required string value = 1; } /** @@ -275,4 +289,9 @@ service ClientDatanodeProtocolService { */ rpc queryDiskBalancerPlan(QueryPlanStatusRequestProto) returns (QueryPlanStatusResponseProto); + /** + * Gets run-time settings of Disk Balancer. + */ + rpc getDiskBalancerSetting(DiskBalancerSettingRequestProto) + returns(DiskBalancerSettingResponseProto); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt index c6a5554a63..d3bdedffc8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt @@ -21,3 +21,6 @@ HDFS-1312 Change Log HDFS-9645. DiskBalancer: Add Query RPC. (Anu Engineer via Arpit Agarwal) + HDFS-9647. DiskBalancer: Add getRuntimeSettings. (Anu Engineer + via Arpit Agarwal) + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java index 03717c5e4d..32466333b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java @@ -53,7 +53,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto; - +import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; import org.apache.hadoop.hdfs.server.datanode.WorkStatus; @@ -279,7 +280,7 @@ public CancelPlanResponseProto cancelDiskBalancerPlan( try { impl.cancelDiskBalancePlan(request.getPlanID()); return CancelPlanResponseProto.newBuilder().build(); - }catch (Exception e) { + } catch (Exception e) { throw new ServiceException(e); } } @@ -289,7 +290,7 @@ public CancelPlanResponseProto cancelDiskBalancerPlan( */ @Override public QueryPlanStatusResponseProto queryDiskBalancerPlan( - RpcController controller, QueryPlanStatusRequestProto request) + RpcController controller, QueryPlanStatusRequestProto request) throws ServiceException { try { WorkStatus result = impl.queryDiskBalancerPlan(); @@ -304,4 +305,21 @@ public QueryPlanStatusResponseProto queryDiskBalancerPlan( throw new ServiceException(e); } } + + /** + * Returns a run-time setting from diskbalancer like Bandwidth. + */ + @Override + public DiskBalancerSettingResponseProto getDiskBalancerSetting( + RpcController controller, DiskBalancerSettingRequestProto request) + throws ServiceException { + try { + String val = impl.getDiskBalancerSetting(request.getKey()); + return DiskBalancerSettingResponseProto.newBuilder() + .setValue(val) + .build(); + } catch (Exception e) { + throw new ServiceException(e); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index b5bd7b9b2a..836dc811be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -3322,4 +3322,18 @@ public WorkStatus queryDiskBalancerPlan() throws IOException { checkSuperuserPrivilege(); throw new DiskbalancerException("Not Implemented", 0); } + + /** + * Gets a run-time configuration value from running diskbalancer instance. For + * example : Disk Balancer bandwidth of a running instance. + * + * @param key - String that represents the run time key value. + * @return value of the key as a string. + * @throws IOException - Throws if there is no such key + */ + @Override + public String getDiskBalancerSetting(String key) throws IOException { + checkSuperuserPrivilege(); + throw new DiskbalancerException("Not Implemented", 0); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerConstants.java new file mode 100644 index 0000000000..553827ef15 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerConstants.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdfs.server.diskbalancer; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Constants used by Disk Balancer. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public final class DiskBalancerConstants { + public static final String DISKBALANCER_BANDWIDTH = "DiskBalancerBandwidth"; + public static final String DISKBALANCER_VOLUME_NAME = + "DiskBalancerVolumeName"; + + // never constructed. + private DiskBalancerConstants() { + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java index a1278160f5..143b776a1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java @@ -60,7 +60,7 @@ public void tearDown() throws Exception { } @Test - public void TestSubmitTestRpc() throws Exception { + public void testSubmitTestRpc() throws Exception { final int dnIndex = 0; cluster.restartDataNode(dnIndex); cluster.waitActive(); @@ -91,7 +91,7 @@ public void TestSubmitTestRpc() throws Exception { } @Test - public void TestCancelTestRpc() throws Exception { + public void testCancelTestRpc() throws Exception { final int dnIndex = 0; cluster.restartDataNode(dnIndex); cluster.waitActive(); @@ -122,11 +122,10 @@ public void TestCancelTestRpc() throws Exception { } thrown.expect(DiskbalancerException.class); dataNode.cancelDiskBalancePlan(planHash); - } @Test - public void TestQueryTestRpc() throws Exception { + public void testQueryTestRpc() throws Exception { final int dnIndex = 0; cluster.restartDataNode(dnIndex); cluster.waitActive(); @@ -162,4 +161,13 @@ public void TestQueryTestRpc() throws Exception { thrown.expect(DiskbalancerException.class); dataNode.queryDiskBalancerPlan(); } + + @Test + public void testgetDiskBalancerSetting() throws Exception { + final int dnIndex = 0; + DataNode dataNode = cluster.getDataNodes().get(dnIndex); + thrown.expect(DiskbalancerException.class); + dataNode.getDiskBalancerSetting( + DiskBalancerConstants.DISKBALANCER_BANDWIDTH); + } }