diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java index 477d308ecd..10041f57dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java @@ -175,7 +175,7 @@ void submitDiskBalancerPlan(String planID, long planVersion, String planFile, /** * Cancel an executing plan. * - * @param planID - A SHA512 hash of the plan string. + * @param planID - A SHA-1 hash of the plan string. */ void cancelDiskBalancePlan(String planID) throws IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java index 045ccd56e0..0cf006c34f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java @@ -369,7 +369,7 @@ public void submitDiskBalancerPlan(String planID, long planVersion, /** * Cancels an executing disk balancer plan. * - * @param planID - A SHA512 hash of the plan string. + * @param planID - A SHA-1 hash of the plan string. * @throws IOException on error */ @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto index 11d04afef7..e4333cd26a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto @@ -154,7 +154,7 @@ message GetBalancerBandwidthResponseProto { * balancer plan to a data node. */ message SubmitDiskBalancerPlanRequestProto { - required string planID = 1; // A hash of the plan like SHA512 + required string planID = 1; // A hash of the plan like SHA-1 required string plan = 2; // Plan file data in Json format optional uint64 planVersion = 3; // Plan version number optional bool ignoreDateCheck = 4; // Ignore date checks on this plan. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java index 2d126a0b8c..523c0a674d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java @@ -157,7 +157,7 @@ private void shutdownExecutor() { * Takes a client submitted plan and converts into a set of work items that * can be executed by the blockMover. * - * @param planId - A SHA512 of the plan string + * @param planId - A SHA-1 of the plan string * @param planVersion - version of the plan string - for future use. * @param planFileName - Plan file name * @param planData - Plan data in json format @@ -308,7 +308,7 @@ private void checkDiskBalancerEnabled() /** * Verifies that user provided plan is valid. * - * @param planID - SHA 512 of the plan. + * @param planID - SHA-1 of the plan. * @param planVersion - Version of the plan, for future use. * @param plan - Plan String in Json. * @param force - Skip verifying when the plan was generated. @@ -345,15 +345,15 @@ private void verifyPlanVersion(long planVersion) } /** - * Verifies that plan matches the SHA512 provided by the client. + * Verifies that plan matches the SHA-1 provided by the client. * - * @param planID - Sha512 Hex Bytes + * @param planID - SHA-1 Hex Bytes * @param plan - Plan String * @throws DiskBalancerException */ private NodePlan verifyPlanHash(String planID, String plan) throws DiskBalancerException { - final long sha512Length = 128; + final long sha1Length = 40; if (plan == null || plan.length() == 0) { LOG.error("Disk Balancer - Invalid plan."); throw new DiskBalancerException("Invalid plan.", @@ -361,8 +361,8 @@ private NodePlan verifyPlanHash(String planID, String plan) } if ((planID == null) || - (planID.length() != sha512Length) || - !DigestUtils.sha512Hex(plan.getBytes(Charset.forName("UTF-8"))) + (planID.length() != sha1Length) || + !DigestUtils.shaHex(plan.getBytes(Charset.forName("UTF-8"))) .equalsIgnoreCase(planID)) { LOG.error("Disk Balancer - Invalid plan hash."); throw new DiskBalancerException("Invalid or mis-matched hash.", diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java index 740292d6d5..8b83e270f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java @@ -93,7 +93,7 @@ private void cancelPlan(String planData) throws IOException { String dataNodeAddress = plan.getNodeName() + ":" + plan.getPort(); Preconditions.checkNotNull(dataNodeAddress); ClientDatanodeProtocol dataNode = getDataNodeProxy(dataNodeAddress); - String planHash = DigestUtils.sha512Hex(planData); + String planHash = DigestUtils.shaHex(planData); try { dataNode.cancelDiskBalancePlan(planHash); } catch (DiskBalancerException ex) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java index ae9b092e17..f363c340fa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java @@ -85,7 +85,7 @@ private void submitPlan(final String planFile, final String planData) String dataNodeAddress = plan.getNodeName() + ":" + plan.getPort(); Preconditions.checkNotNull(dataNodeAddress); ClientDatanodeProtocol dataNode = getDataNodeProxy(dataNodeAddress); - String planHash = DigestUtils.sha512Hex(planData); + String planHash = DigestUtils.shaHex(planData); try { // TODO : Support skipping date check. dataNode.submitDiskBalancerPlan(planHash, DiskBalancer.PLAN_VERSION, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java index b51d72f7e0..dc177fddd1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java @@ -191,7 +191,7 @@ public void testDiskBalancerEndToEnd() throws Exception { plan.setNodeUUID(dnNode.getDatanodeUuid()); plan.setTimeStamp(Time.now()); String planJson = plan.toJson(); - String planID = DigestUtils.sha512Hex(planJson); + String planID = DigestUtils.shaHex(planJson); assertNotNull(plan.getVolumeSetPlans()); assertTrue(plan.getVolumeSetPlans().size() > 0); plan.getVolumeSetPlans().get(0).setTolerancePercent(10); @@ -307,7 +307,7 @@ public void testBalanceDataBetweenMultiplePairsOfVolumes() plan.setNodeUUID(dnNode.getDatanodeUuid()); plan.setTimeStamp(Time.now()); String planJson = plan.toJson(); - String planID = DigestUtils.sha512Hex(planJson); + String planID = DigestUtils.shaHex(planJson); dataNode.submitDiskBalancerPlan(planID, 1, PLAN_FILE, planJson, false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java index aa73016cf7..12ce160fc9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java @@ -317,7 +317,7 @@ public RpcTestHelper invoke() throws Exception { plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort()); planner.balanceVolumeSet(node, node.getVolumeSets().get("DISK"), plan); planVersion = 1; - planHash = DigestUtils.sha512Hex(plan.toJson()); + planHash = DigestUtils.shaHex(plan.toJson()); return this; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java index f54bb32902..c362f49706 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java @@ -125,7 +125,7 @@ public void testDiskBalancerEnabled() throws DiskBalancerException { private void executeSubmitPlan(NodePlan plan, DiskBalancer balancer, int version) throws IOException { String planJson = plan.toJson(); - String planID = DigestUtils.sha512Hex(planJson); + String planID = DigestUtils.shaHex(planJson); balancer.submitPlan(planID, version, PLAN_FILE, planJson, false); } @@ -214,7 +214,7 @@ public void testSubmitWithNullPlan() throws Exception { NodePlan plan = mockMoverHelper.getPlan(); DiskBalancer balancer = mockMoverHelper.getBalancer(); String planJson = plan.toJson(); - String planID = DigestUtils.sha512Hex(planJson); + String planID = DigestUtils.shaHex(planJson); thrown.expect(DiskBalancerException.class); thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException @@ -231,7 +231,7 @@ public void testSubmitWithInvalidHash() throws Exception { String planJson = plan.toJson(); - String planID = DigestUtils.sha512Hex(planJson); + String planID = DigestUtils.shaHex(planJson); char repChar = planID.charAt(0); repChar++; @@ -261,7 +261,7 @@ public void testCancelDiskBalancerPlan() throws Exception { String planJson = plan.toJson(); - String planID = DigestUtils.sha512Hex(planJson); + String planID = DigestUtils.shaHex(planJson); balancer.cancelPlan(planID); DiskBalancerWorkStatus status = balancer.queryWorkStatus();