diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java similarity index 99% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java index 015e154974..7509da52f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java @@ -50,6 +50,8 @@ import org.apache.htrace.Sampler; import org.apache.htrace.Trace; import org.apache.htrace.TraceScope; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** @@ -60,6 +62,8 @@ @InterfaceAudience.Private @Deprecated public class RemoteBlockReader extends FSInputChecker implements BlockReader { + static final Logger LOG = LoggerFactory.getLogger(FSInputChecker.class); + private final Peer peer; private final DatanodeID datanodeID; private final DataInputStream in; @@ -488,7 +492,7 @@ public int read(ByteBuffer buf) throws IOException { public int available() throws IOException { // An optimistic estimate of how much data is available // to us without doing network I/O. - return DFSClient.TCP_WINDOW_SIZE; + return RemoteBlockReader2.TCP_WINDOW_SIZE; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java similarity index 98% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java index 2a77cb6a84..5541e6d7d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java @@ -28,8 +28,6 @@ import java.util.EnumSet; import java.util.UUID; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.ReadOption; import org.apache.hadoop.hdfs.net.Peer; @@ -56,6 +54,9 @@ import com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * This is a wrapper around connection to datanode * and understands checksum, offset etc. @@ -85,16 +86,18 @@ @InterfaceAudience.Private public class RemoteBlockReader2 implements BlockReader { - static final Log LOG = LogFactory.getLog(RemoteBlockReader2.class); - + static final Logger LOG = LoggerFactory.getLogger(RemoteBlockReader2.class); + static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB; + final private Peer peer; final private DatanodeID datanodeID; final private PeerCache peerCache; final private long blockId; private final ReadableByteChannel in; + private DataChecksum checksum; - private final PacketReceiver packetReceiver = new PacketReceiver(true); + private ByteBuffer curDataSlice = null; /** offset in block of the last chunk received */ @@ -457,7 +460,7 @@ static void checkSuccess( public int available() throws IOException { // An optimistic estimate of how much data is available // to us without doing network I/O. - return DFSClient.TCP_WINDOW_SIZE; + return TCP_WINDOW_SIZE; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java similarity index 98% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java index 3045a13b20..c4093b1bd6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java @@ -24,14 +24,14 @@ import java.nio.ByteBuffer; import java.nio.channels.ReadableByteChannel; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.util.DirectBufferPool; import org.apache.hadoop.io.IOUtils; import com.google.common.base.Preconditions; import com.google.common.primitives.Ints; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Class to handle reading packets one-at-a-time from the wire. @@ -47,7 +47,7 @@ public class PacketReceiver implements Closeable { */ private static final int MAX_PACKET_SIZE = 16 * 1024 * 1024; - static final Log LOG = LogFactory.getLog(PacketReceiver.class); + static final Logger LOG = LoggerFactory.getLogger(PacketReceiver.class); private static final DirectBufferPool bufferPool = new DirectBufferPool(); private final boolean useDirectBuffers; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ByteBufferOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteBufferOutputStream.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ByteBufferOutputStream.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteBufferOutputStream.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 7b5979eae0..ef8fac58fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -867,6 +867,9 @@ Release 2.8.0 - UNRELEASED HDFS-8980. Remove unnecessary block replacement in INodeFile. (jing9) + HDFS-8990. Move RemoteBlockReader to hdfs-client module. + (Mingliang via wheat9) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 3c49ef7977..268a5b9cd7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -203,7 +203,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, DataEncryptionKeyFactory { public static final Log LOG = LogFactory.getLog(DFSClient.class); public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; // 1 hour - static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB private final Configuration conf; private final DfsClientConf dfsClientConf; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java index 8dd3d6fd38..5ff343ab0c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java @@ -24,10 +24,10 @@ import java.util.List; -import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -41,7 +41,7 @@ public class TestClientBlockVerification { static LocatedBlock testBlock = null; static { - ((Log4JLogger)RemoteBlockReader2.LOG).getLogger().setLevel(Level.ALL); + GenericTestUtils.setLogLevel(RemoteBlockReader2.LOG, Level.ALL); } @BeforeClass public static void setupCluster() throws Exception {