diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java index a90b1e9c16..c3bc72538b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java @@ -201,7 +201,7 @@ public BackgroundTaskResult call() throws Exception { File chunkFile = dataDir.toPath() .resolve(chunkInfo.getChunkName()).toFile(); if (FileUtils.deleteQuietly(chunkFile)) { - LOG.info("block {} chunk {} deleted", blockName, + LOG.debug("block {} chunk {} deleted", blockName, chunkFile.getAbsolutePath()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/KeyDeletingService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/KeyDeletingService.java index 59cf0e458f..f8f7801de4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/KeyDeletingService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/KeyDeletingService.java @@ -21,6 +21,7 @@ import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.scm.protocol.ScmBlockLocationProtocol; +import org.apache.hadoop.util.Time; import org.apache.hadoop.utils.BackgroundService; import org.apache.hadoop.utils.BackgroundTask; import org.apache.hadoop.utils.BackgroundTaskQueue; @@ -91,6 +92,7 @@ public int getPriority() { @Override public BackgroundTaskResult call() throws Exception { try { + long startTime = Time.monotonicNow(); List keyBlocksList = manager .getPendingDeletionKeys(keyLimitPerTask); if (keyBlocksList.size() > 0) { @@ -102,7 +104,7 @@ public BackgroundTaskResult call() throws Exception { try { // Purge key from KSM DB. manager.deletePendingDeletionKey(result.getObjectKey()); - LOG.info("Key {} deleted from KSM DB", result.getObjectKey()); + LOG.debug("Key {} deleted from KSM DB", result.getObjectKey()); } catch (IOException e) { // if a pending deletion key is failed to delete, // print a warning here and retain it in this state, @@ -118,6 +120,13 @@ public BackgroundTaskResult call() throws Exception { String.join(",", result.getFailedBlocks())); } } + + if (!results.isEmpty()) { + LOG.info("Number of key deleted from KSM DB: {}," + + " task elapsed time: {}ms", + results.size(), Time.monotonicNow() - startTime); + } + return results::size; } else { LOG.debug("No pending deletion key found in KSM"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/block/SCMBlockDeletingService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/block/SCMBlockDeletingService.java index a4bb4ffe17..3058b1ee90 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/block/SCMBlockDeletingService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/block/SCMBlockDeletingService.java @@ -24,6 +24,7 @@ import org.apache.hadoop.ozone.scm.container.Mapping; import org.apache.hadoop.ozone.scm.node.NodeManager; import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.util.Time; import org.apache.hadoop.utils.BackgroundService; import org.apache.hadoop.utils.BackgroundTask; import org.apache.hadoop.utils.BackgroundTaskQueue; @@ -98,28 +99,39 @@ public int getPriority() { @Override public EmptyTaskResult call() throws Exception { + int dnTxCount = 0; + long startTime = Time.monotonicNow(); // Scan SCM DB in HB interval and collect a throttled list of // to delete blocks. LOG.debug("Running DeletedBlockTransactionScanner"); DatanodeDeletedBlockTransactions transactions = getToDeleteContainerBlocks(); if (transactions != null && !transactions.isEmpty()) { - transactions.getDatanodes().forEach(datanodeID -> { - List dnTXs = - transactions.getDatanodeTransactions(datanodeID); + for (DatanodeID datanodeID : transactions.getDatanodes()) { + List dnTXs = transactions + .getDatanodeTransactions(datanodeID); + dnTxCount += dnTXs.size(); // TODO commandQueue needs a cap. // We should stop caching new commands if num of un-processed // command is bigger than a limit, e.g 50. In case datanode goes // offline for sometime, the cached commands be flooded. nodeManager.addDatanodeCommand(datanodeID, new DeleteBlocksCommand(dnTXs)); - LOG.info("Added delete block command for datanode {} in the queue," + LOG.debug( + "Added delete block command for datanode {} in the queue," + " number of delete block transactions: {}, TxID list: {}", datanodeID, dnTXs.size(), String.join(",", transactions.getTransactionIDList(datanodeID))); - - }); + } } + + if (dnTxCount > 0) { + LOG.info("Totally added {} delete blocks command for" + + " {} datanodes, task elapsed time: {}ms", + dnTxCount, transactions.getDatanodes().size(), + Time.monotonicNow() - startTime); + } + return EmptyTaskResult.newResult(); }