HDFS-9647. DiskBalancer: Add getRuntimeSettings. (Contributed by Anu Engineer)

This commit is contained in:
Arpit Agarwal 2016-01-29 11:05:53 -08:00
parent 96fe685b7a
commit 66f0bb646d
8 changed files with 130 additions and 8 deletions

View File

@ -183,4 +183,14 @@ void submitDiskBalancerPlan(String planID, long planVersion, long bandwidth,
* Gets the status of an executing diskbalancer Plan. * Gets the status of an executing diskbalancer Plan.
*/ */
WorkStatus queryDiskBalancerPlan() throws IOException; WorkStatus queryDiskBalancerPlan() throws IOException;
/**
* Gets a run-time configuration value from running diskbalancer instance.
* For example : Disk Balancer bandwidth of a running instance.
*
* @param key runtime configuration key
* @return value of the key as a string.
* @throws IOException - Throws if there is no such key
*/
String getDiskBalancerSetting(String key) throws IOException;
} }

View File

@ -56,6 +56,8 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.datanode.WorkStatus; import org.apache.hadoop.hdfs.server.datanode.WorkStatus;
import org.apache.hadoop.ipc.ProtobufHelper; import org.apache.hadoop.ipc.ProtobufHelper;
@ -365,8 +367,8 @@ public void submitDiskBalancerPlan(String planID, long planVersion,
/** /**
* Cancels an executing disk balancer plan. * Cancels an executing disk balancer plan.
* @param planID - A SHA512 hash of the plan string.
* *
* @param planID - A SHA512 hash of the plan string.
* @throws IOException on error * @throws IOException on error
*/ */
@Override @Override
@ -399,4 +401,17 @@ public WorkStatus queryDiskBalancerPlan() throws IOException {
throw ProtobufHelper.getRemoteException(e); throw ProtobufHelper.getRemoteException(e);
} }
} }
@Override
public String getDiskBalancerSetting(String key) throws IOException {
try {
DiskBalancerSettingRequestProto request =
DiskBalancerSettingRequestProto.newBuilder().setKey(key).build();
DiskBalancerSettingResponseProto response =
rpcProxy.getDiskBalancerSetting(NULL_CONTROLLER, request);
return response.hasValue() ? response.getValue() : null;
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
} }

View File

@ -197,7 +197,21 @@ message QueryPlanStatusResponseProto {
optional string status = 2; optional string status = 2;
optional string planID = 3; optional string planID = 3;
optional string currentStatus = 4; optional string currentStatus = 4;
}
/**
* This message sends a request to data node get a specific setting
* that is used by disk balancer.
*/
message DiskBalancerSettingRequestProto {
required string key = 1;
}
/**
* Response that describes the value of requested disk balancer setting.
*/
message DiskBalancerSettingResponseProto {
required string value = 1;
} }
/** /**
@ -275,4 +289,9 @@ service ClientDatanodeProtocolService {
*/ */
rpc queryDiskBalancerPlan(QueryPlanStatusRequestProto) rpc queryDiskBalancerPlan(QueryPlanStatusRequestProto)
returns (QueryPlanStatusResponseProto); returns (QueryPlanStatusResponseProto);
/**
* Gets run-time settings of Disk Balancer.
*/
rpc getDiskBalancerSetting(DiskBalancerSettingRequestProto)
returns(DiskBalancerSettingResponseProto);
} }

View File

@ -21,3 +21,6 @@ HDFS-1312 Change Log
HDFS-9645. DiskBalancer: Add Query RPC. (Anu Engineer via Arpit Agarwal) HDFS-9645. DiskBalancer: Add Query RPC. (Anu Engineer via Arpit Agarwal)
HDFS-9647. DiskBalancer: Add getRuntimeSettings. (Anu Engineer
via Arpit Agarwal)

View File

@ -53,7 +53,8 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto;
import com.google.protobuf.RpcController; import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException; import com.google.protobuf.ServiceException;
import org.apache.hadoop.hdfs.server.datanode.WorkStatus; import org.apache.hadoop.hdfs.server.datanode.WorkStatus;
@ -279,7 +280,7 @@ public CancelPlanResponseProto cancelDiskBalancerPlan(
try { try {
impl.cancelDiskBalancePlan(request.getPlanID()); impl.cancelDiskBalancePlan(request.getPlanID());
return CancelPlanResponseProto.newBuilder().build(); return CancelPlanResponseProto.newBuilder().build();
}catch (Exception e) { } catch (Exception e) {
throw new ServiceException(e); throw new ServiceException(e);
} }
} }
@ -304,4 +305,21 @@ public QueryPlanStatusResponseProto queryDiskBalancerPlan(
throw new ServiceException(e); throw new ServiceException(e);
} }
} }
/**
* Returns a run-time setting from diskbalancer like Bandwidth.
*/
@Override
public DiskBalancerSettingResponseProto getDiskBalancerSetting(
RpcController controller, DiskBalancerSettingRequestProto request)
throws ServiceException {
try {
String val = impl.getDiskBalancerSetting(request.getKey());
return DiskBalancerSettingResponseProto.newBuilder()
.setValue(val)
.build();
} catch (Exception e) {
throw new ServiceException(e);
}
}
} }

View File

@ -3322,4 +3322,18 @@ public WorkStatus queryDiskBalancerPlan() throws IOException {
checkSuperuserPrivilege(); checkSuperuserPrivilege();
throw new DiskbalancerException("Not Implemented", 0); throw new DiskbalancerException("Not Implemented", 0);
} }
/**
* Gets a run-time configuration value from running diskbalancer instance. For
* example : Disk Balancer bandwidth of a running instance.
*
* @param key - String that represents the run time key value.
* @return value of the key as a string.
* @throws IOException - Throws if there is no such key
*/
@Override
public String getDiskBalancerSetting(String key) throws IOException {
checkSuperuserPrivilege();
throw new DiskbalancerException("Not Implemented", 0);
}
} }

View File

@ -0,0 +1,35 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.server.diskbalancer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Constants used by Disk Balancer.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public final class DiskBalancerConstants {
public static final String DISKBALANCER_BANDWIDTH = "DiskBalancerBandwidth";
public static final String DISKBALANCER_VOLUME_NAME =
"DiskBalancerVolumeName";
// never constructed.
private DiskBalancerConstants() {
}
}

View File

@ -60,7 +60,7 @@ public void tearDown() throws Exception {
} }
@Test @Test
public void TestSubmitTestRpc() throws Exception { public void testSubmitTestRpc() throws Exception {
final int dnIndex = 0; final int dnIndex = 0;
cluster.restartDataNode(dnIndex); cluster.restartDataNode(dnIndex);
cluster.waitActive(); cluster.waitActive();
@ -91,7 +91,7 @@ public void TestSubmitTestRpc() throws Exception {
} }
@Test @Test
public void TestCancelTestRpc() throws Exception { public void testCancelTestRpc() throws Exception {
final int dnIndex = 0; final int dnIndex = 0;
cluster.restartDataNode(dnIndex); cluster.restartDataNode(dnIndex);
cluster.waitActive(); cluster.waitActive();
@ -122,11 +122,10 @@ public void TestCancelTestRpc() throws Exception {
} }
thrown.expect(DiskbalancerException.class); thrown.expect(DiskbalancerException.class);
dataNode.cancelDiskBalancePlan(planHash); dataNode.cancelDiskBalancePlan(planHash);
} }
@Test @Test
public void TestQueryTestRpc() throws Exception { public void testQueryTestRpc() throws Exception {
final int dnIndex = 0; final int dnIndex = 0;
cluster.restartDataNode(dnIndex); cluster.restartDataNode(dnIndex);
cluster.waitActive(); cluster.waitActive();
@ -162,4 +161,13 @@ public void TestQueryTestRpc() throws Exception {
thrown.expect(DiskbalancerException.class); thrown.expect(DiskbalancerException.class);
dataNode.queryDiskBalancerPlan(); dataNode.queryDiskBalancerPlan();
} }
@Test
public void testgetDiskBalancerSetting() throws Exception {
final int dnIndex = 0;
DataNode dataNode = cluster.getDataNodes().get(dnIndex);
thrown.expect(DiskbalancerException.class);
dataNode.getDiskBalancerSetting(
DiskBalancerConstants.DISKBALANCER_BANDWIDTH);
}
} }