From 6ff957be88d48a8b41e9fcbe4cf466d672cd7bc1 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Tue, 14 Jul 2015 10:55:58 -0700 Subject: [PATCH] HDFS-8702. Erasure coding: update BlockManager.blockHasEnoughRacks(..) logic for striped block. Contributed by Kai Sasaki. --- .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 3 ++ .../server/blockmanagement/BlockManager.java | 43 ++++++++++++++++++- 2 files changed, 44 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt index cd9e19d847..2b91295989 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt @@ -347,3 +347,6 @@ HDFS-8669. Erasure Coding: handle missing internal block locations in DFSStripedInputStream. (jing9) + + HDFS-8702. Erasure coding: update BlockManager.blockHasEnoughRacks(..) logic + for striped block. (Kai Sasaki via jing9) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 968dc0c9d4..1aaf22569c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -3819,14 +3819,53 @@ private int invalidateWorkForOneNode(DatanodeInfo dn) { return toInvalidate.size(); } - // TODO: update the enough rack logic for striped blocks boolean blockHasEnoughRacks(BlockInfo storedBlock, int expectedStorageNum) { if (!this.shouldCheckForEnoughRacks) { return true; } - boolean enoughRacks = false; Collection corruptNodes = corruptReplicas.getNodes(storedBlock); + + if (storedBlock.isStriped()) { + return blockHasEnoughRacksStriped(storedBlock, corruptNodes); + } else { + return blockHashEnoughRacksContiguous(storedBlock, expectedStorageNum, + corruptNodes); + } + } + + /** + * Verify whether given striped block is distributed through enough racks. + * As dicussed in HDFS-7613, ec file requires racks at least as many as + * the number of data block number. + */ + boolean blockHasEnoughRacksStriped(BlockInfo storedBlock, + Collection corruptNodes) { + if (!datanodeManager.hasClusterEverBeenMultiRack()) { + return true; + } + boolean enoughRacks = false; + Set rackNameSet = new HashSet<>(); + int dataBlockNum = ((BlockInfoStriped)storedBlock).getRealDataBlockNum(); + for (DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock)) { + final DatanodeDescriptor cur = storage.getDatanodeDescriptor(); + if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) { + if ((corruptNodes == null) || !corruptNodes.contains(cur)) { + String rackNameNew = cur.getNetworkLocation(); + rackNameSet.add(rackNameNew); + if (rackNameSet.size() >= dataBlockNum) { + enoughRacks = true; + break; + } + } + } + } + return enoughRacks; + } + + boolean blockHashEnoughRacksContiguous(BlockInfo storedBlock, + int expectedStorageNum, Collection corruptNodes) { + boolean enoughRacks = false; String rackName = null; for(DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock)) { final DatanodeDescriptor cur = storage.getDatanodeDescriptor();