diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index 6b1ee3d5c3..2d0c6ea137 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -84,6 +84,7 @@ class BlockPoolSlice { private final int ioFileBufferSize; @VisibleForTesting public static final String DU_CACHE_FILE = "dfsUsed"; + private final Runnable shutdownHook; private volatile boolean dfsUsedSaved = false; private static final int SHUTDOWN_HOOK_PRIORITY = 30; private final boolean deleteDuplicateReplicas; @@ -162,15 +163,16 @@ class BlockPoolSlice { .build(); // Make the dfs usage to be saved during shutdown. - ShutdownHookManager.get().addShutdownHook( - new Runnable() { - @Override - public void run() { - if (!dfsUsedSaved) { - saveDfsUsed(); - } + shutdownHook = new Runnable() { + @Override + public void run() { + if (!dfsUsedSaved) { + saveDfsUsed(); } - }, SHUTDOWN_HOOK_PRIORITY); + } + }; + ShutdownHookManager.get().addShutdownHook(shutdownHook, + SHUTDOWN_HOOK_PRIORITY); } File getDirectory() { @@ -756,6 +758,11 @@ void shutdown(BlockListAsLongs blocksListToPersist) { saveDfsUsed(); dfsUsedSaved = true; + // Remove the shutdown hook to avoid any memory leak + if (shutdownHook != null) { + ShutdownHookManager.get().removeShutdownHook(shutdownHook); + } + if (dfsUsage instanceof CachingGetSpaceUsed) { IOUtils.cleanup(LOG, ((CachingGetSpaceUsed) dfsUsage)); }