From c12e994eda0f7e0c34fb0c0ff208789586c7142c Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Tue, 7 Aug 2012 20:17:59 +0000 Subject: [PATCH] HDFS-3754. BlockSender doesn't shutdown ReadaheadPool threads. Contributed by Eli Collins git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1370495 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../apache/hadoop/hdfs/server/datanode/BlockSender.java | 8 ++------ .../org/apache/hadoop/hdfs/server/datanode/DataNode.java | 6 ++++++ 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3730ba002f..6cf550d74e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -564,6 +564,8 @@ Branch-2 ( Unreleased changes ) HDFS-3579. libhdfs: fix exception handling. (Colin Patrick McCabe via atm) + HDFS-3754. BlockSender doesn't shutdown ReadaheadPool threads. (eli) + BREAKDOWN OF HDFS-3042 SUBTASKS HDFS-2185. HDFS portion of ZK-based FailoverController (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java index 133a550bd9..d8cba72003 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java @@ -20,7 +20,6 @@ import java.io.BufferedInputStream; import java.io.DataInputStream; import java.io.DataOutputStream; -import java.io.EOFException; import java.io.FileDescriptor; import java.io.FileInputStream; import java.io.FileNotFoundException; @@ -38,7 +37,6 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; -import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.LongWritable; @@ -163,8 +161,6 @@ class BlockSender implements java.io.Closeable { */ private static final long LONG_READ_THRESHOLD_BYTES = 256 * 1024; - private static ReadaheadPool readaheadPool = - ReadaheadPool.getInstance(); /** * Constructor @@ -691,8 +687,8 @@ private void manageOsCache() throws IOException { } // Perform readahead if necessary - if (readaheadLength > 0 && readaheadPool != null) { - curReadahead = readaheadPool.readaheadStream( + if (readaheadLength > 0 && datanode.readaheadPool != null) { + curReadahead = datanode.readaheadPool.readaheadStream( clientTraceFmt, blockInFd, offset, readaheadLength, Long.MAX_VALUE, curReadahead); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 6569fd19f6..dc6e3bc08c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -144,6 +144,7 @@ import org.apache.hadoop.hdfs.web.resources.Param; import org.apache.hadoop.http.HttpServer; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.ReadaheadPool; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RemoteException; @@ -275,6 +276,7 @@ public static InetSocketAddress createSocketAddr(String target) { private Configuration conf; private final String userWithLocalPathAccess; + ReadaheadPool readaheadPool; /** * Create the DataNode given a configuration and an array of dataDirs. @@ -669,6 +671,10 @@ void startDataNode(Configuration conf, blockPoolManager = new BlockPoolManager(this); blockPoolManager.refreshNamenodes(conf); + + // Create the ReadaheadPool from the DataNode context so we can + // exit without having to explicitly shutdown its thread pool. + readaheadPool = ReadaheadPool.getInstance(); } /**