diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index 9e67ff27a1..2ed0abd798 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -56,6 +56,7 @@ import org.apache.hadoop.fs.ReadOption; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.DFSUtilClient.CorruptedBlocks; +import org.apache.hadoop.hdfs.client.impl.BlockReaderFactory; import org.apache.hadoop.hdfs.client.impl.DfsClientConf; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java similarity index 97% rename from hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java index 7af46097e7..f4b62d9120 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs; +package org.apache.hadoop.hdfs.client.impl; import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitFdResponse.USE_RECEIPT_VERIFICATION; @@ -34,7 +34,15 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.hdfs.client.impl.DfsClientConf; +import org.apache.hadoop.hdfs.BlockReader; +import org.apache.hadoop.hdfs.ClientContext; +import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.DFSInputStream; +import org.apache.hadoop.hdfs.DFSUtilClient; +import org.apache.hadoop.hdfs.ExtendedBlockId; +import org.apache.hadoop.hdfs.RemotePeerFactory; +import org.apache.hadoop.hdfs.ReplicaAccessor; +import org.apache.hadoop.hdfs.ReplicaAccessorBuilder; import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf; import org.apache.hadoop.hdfs.net.DomainPeer; import org.apache.hadoop.hdfs.net.Peer; @@ -646,7 +654,7 @@ private ShortCircuitReplicaInfo requestFileDescriptors(DomainPeer peer, } /** - * Get a RemoteBlockReader that communicates over a UNIX domain socket. + * Get a BlockReaderRemote that communicates over a UNIX domain socket. * * @return The new BlockReader, or null if we failed to create the block * reader. @@ -709,7 +717,7 @@ private BlockReader getRemoteBlockReaderFromDomain() throws IOException { } /** - * Get a RemoteBlockReader that communicates over a TCP socket. + * Get a BlockReaderRemote that communicates over a TCP socket. * * @return The new BlockReader. We will not return null, but instead throw * an exception if this fails. @@ -837,13 +845,13 @@ private static boolean isSecurityException(IOException ioe) { private BlockReader getRemoteBlockReader(Peer peer) throws IOException { int networkDistance = clientContext.getNetworkDistance(datanode); if (conf.getShortCircuitConf().isUseLegacyBlockReader()) { - return RemoteBlockReader.newBlockReader(fileName, + return BlockReaderRemote.newBlockReader(fileName, block, token, startOffset, length, conf.getIoBufferSize(), verifyChecksum, clientName, peer, datanode, clientContext.getPeerCache(), cachingStrategy, tracer, networkDistance); } else { - return RemoteBlockReader2.newBlockReader( + return BlockReaderRemote2.newBlockReader( fileName, block, token, startOffset, length, verifyChecksum, clientName, peer, datanode, clientContext.getPeerCache(), cachingStrategy, tracer, diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocal.java similarity index 99% rename from hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocal.java index 68630c785b..1b389960a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocal.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs; +package org.apache.hadoop.hdfs.client.impl; import java.io.IOException; import java.nio.ByteBuffer; @@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.ReadOption; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdfs.BlockReader; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocalLegacy.java similarity index 99% rename from hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocalLegacy.java index 65a837394d..7d20a83b2e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocalLegacy.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs; +package org.apache.hadoop.hdfs.client.impl; import java.io.DataInputStream; import java.io.File; @@ -33,8 +33,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ReadOption; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdfs.BlockReader; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; -import org.apache.hadoop.hdfs.client.impl.DfsClientConf; import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf; import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java similarity index 95% rename from hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java index 7e094f54d2..22d4e23d5e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs; +package org.apache.hadoop.hdfs.client.impl; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; @@ -29,6 +29,8 @@ import org.apache.hadoop.fs.FSInputChecker; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.ReadOption; +import org.apache.hadoop.hdfs.BlockReader; +import org.apache.hadoop.hdfs.PeerCache; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; @@ -54,13 +56,13 @@ /** * @deprecated this is an old implementation that is being left around - * in case any issues spring up with the new {@link RemoteBlockReader2} + * in case any issues spring up with the new {@link BlockReaderRemote2} * implementation. * It will be removed in the next release. */ @InterfaceAudience.Private @Deprecated -public class RemoteBlockReader extends FSInputChecker implements BlockReader { +public class BlockReaderRemote extends FSInputChecker implements BlockReader { static final Logger LOG = LoggerFactory.getLogger(FSInputChecker.class); private final Peer peer; @@ -209,7 +211,7 @@ protected synchronized int readChunk(long pos, byte[] buf, int offset, int len, byte[] checksumBuf) throws IOException { try (TraceScope ignored = tracer.newScope( - "RemoteBlockReader#readChunk(" + blockId + ")")) { + "BlockReaderRemote#readChunk(" + blockId + ")")) { return readChunkImpl(pos, buf, offset, len, checksumBuf); } } @@ -335,7 +337,7 @@ private synchronized int readChunkImpl(long pos, byte[] buf, int offset, return bytesToRead; } - private RemoteBlockReader(String file, String bpid, long blockId, + private BlockReaderRemote(String file, String bpid, long blockId, DataInputStream in, DataChecksum checksum, boolean verifyChecksum, long startOffset, long firstChunkOffset, long bytesToRead, Peer peer, DatanodeID datanodeID, PeerCache peerCache, Tracer tracer, @@ -386,7 +388,7 @@ private RemoteBlockReader(String file, String bpid, long blockId, * @param clientName Client name * @return New BlockReader instance, or null on error. */ - public static RemoteBlockReader newBlockReader(String file, + public static BlockReaderRemote newBlockReader(String file, ExtendedBlock block, Token blockToken, long startOffset, long len, @@ -412,7 +414,7 @@ public static RemoteBlockReader newBlockReader(String file, BlockOpResponseProto status = BlockOpResponseProto.parseFrom( PBHelperClient.vintPrefixed(in)); - RemoteBlockReader2.checkSuccess(status, peer, block, file); + BlockReaderRemote2.checkSuccess(status, peer, block, file); ReadOpChecksumInfoProto checksumInfo = status.getReadOpChecksumInfo(); DataChecksum checksum = DataTransferProtoUtil.fromProto( @@ -429,7 +431,7 @@ public static RemoteBlockReader newBlockReader(String file, startOffset + " for file " + file); } - return new RemoteBlockReader(file, block.getBlockPoolId(), block.getBlockId(), + return new BlockReaderRemote(file, block.getBlockPoolId(), block.getBlockId(), in, checksum, verifyChecksum, startOffset, firstChunkOffset, len, peer, datanodeID, peerCache, tracer, networkDistance); } @@ -467,7 +469,7 @@ public int readAll(byte[] buf, int offset, int len) throws IOException { void sendReadResult(Peer peer, Status statusCode) { assert !sentStatusCode : "already sent status code to " + peer; try { - RemoteBlockReader2.writeReadResult(peer.getOutputStream(), statusCode); + BlockReaderRemote2.writeReadResult(peer.getOutputStream(), statusCode); sentStatusCode = true; } catch (IOException e) { // It's ok not to be able to send this. But something is probably wrong. @@ -478,14 +480,14 @@ void sendReadResult(Peer peer, Status statusCode) { @Override public int read(ByteBuffer buf) throws IOException { - throw new UnsupportedOperationException("readDirect unsupported in RemoteBlockReader"); + throw new UnsupportedOperationException("readDirect unsupported in BlockReaderRemote"); } @Override public int available() { // An optimistic estimate of how much data is available // to us without doing network I/O. - return RemoteBlockReader2.TCP_WINDOW_SIZE; + return BlockReaderRemote2.TCP_WINDOW_SIZE; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote2.java similarity index 96% rename from hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote2.java index 9437353921..ebdc3fd653 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote2.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs; +package org.apache.hadoop.hdfs.client.impl; import java.io.BufferedOutputStream; import java.io.DataInputStream; @@ -30,6 +30,8 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.ReadOption; +import org.apache.hadoop.hdfs.BlockReader; +import org.apache.hadoop.hdfs.PeerCache; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -78,13 +80,13 @@ * * This is a new implementation introduced in Hadoop 0.23 which * is more efficient and simpler than the older BlockReader - * implementation. It should be renamed to RemoteBlockReader + * implementation. It should be renamed to BlockReaderRemote * once we are confident in it. */ @InterfaceAudience.Private -public class RemoteBlockReader2 implements BlockReader { +public class BlockReaderRemote2 implements BlockReader { - static final Logger LOG = LoggerFactory.getLogger(RemoteBlockReader2.class); + static final Logger LOG = LoggerFactory.getLogger(BlockReaderRemote2.class); static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB; final private Peer peer; @@ -138,7 +140,7 @@ public synchronized int read(byte[] buf, int off, int len) if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) { try (TraceScope ignored = tracer.newScope( - "RemoteBlockReader2#readNextPacket(" + blockId + ")")) { + "BlockReaderRemote2#readNextPacket(" + blockId + ")")) { readNextPacket(); } } @@ -162,7 +164,7 @@ public synchronized int read(ByteBuffer buf) throws IOException { if (curDataSlice == null || (curDataSlice.remaining() == 0 && bytesNeededToFinish > 0)) { try (TraceScope ignored = tracer.newScope( - "RemoteBlockReader2#readNextPacket(" + blockId + ")")) { + "BlockReaderRemote2#readNextPacket(" + blockId + ")")) { readNextPacket(); } } @@ -273,7 +275,7 @@ private void readTrailingEmptyPacket() throws IOException { } } - protected RemoteBlockReader2(String file, long blockId, + protected BlockReaderRemote2(String file, long blockId, DataChecksum checksum, boolean verifyChecksum, long startOffset, long firstChunkOffset, long bytesToRead, Peer peer, DatanodeID datanodeID, PeerCache peerCache, Tracer tracer, @@ -425,7 +427,7 @@ public static BlockReader newBlockReader(String file, startOffset + " for file " + file); } - return new RemoteBlockReader2(file, block.getBlockId(), checksum, + return new BlockReaderRemote2(file, block.getBlockId(), checksum, verifyChecksum, startOffset, firstChunkOffset, len, peer, datanodeID, peerCache, tracer, networkDistance); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderUtil.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderUtil.java similarity index 95% rename from hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderUtil.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderUtil.java index 85f925f30c..57fbe47632 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderUtil.java @@ -15,9 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs; +package org.apache.hadoop.hdfs.client.impl; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdfs.BlockReader; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExternalBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/ExternalBlockReader.java similarity index 96% rename from hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExternalBlockReader.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/ExternalBlockReader.java index 707a56a8a0..3d6d9a47c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExternalBlockReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/ExternalBlockReader.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs; +package org.apache.hadoop.hdfs.client.impl; import java.io.IOException; import java.nio.ByteBuffer; @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.ReadOption; +import org.apache.hadoop.hdfs.BlockReader; +import org.apache.hadoop.hdfs.ReplicaAccessor; import org.apache.hadoop.hdfs.shortcircuit.ClientMmap; import org.apache.hadoop.util.DataChecksum; diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml index 3ae8b59d2b..426fb72d71 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml @@ -133,7 +133,7 @@ - + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java index 7f71bf76dd..0379946e1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hdfs.BlockReader; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DFSUtilClient.CorruptedBlocks; -import org.apache.hadoop.hdfs.RemoteBlockReader2; +import org.apache.hadoop.hdfs.client.impl.BlockReaderRemote2; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -112,7 +112,7 @@ private BlockReader createBlockReader(long offsetInBlock) { * * TODO: add proper tracer */ - return RemoteBlockReader2.newBlockReader( + return BlockReaderRemote2.newBlockReader( "dummy", block, blockToken, offsetInBlock, block.getNumBytes() - offsetInBlock, true, "", newConnectedPeer(block, dnAddr, blockToken, source), source, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index 80f510cfa1..2451df3129 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -43,7 +43,7 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.hdfs.BlockReader; -import org.apache.hadoop.hdfs.BlockReaderFactory; +import org.apache.hadoop.hdfs.client.impl.BlockReaderFactory; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtilClient; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java index a1af1fc5bb..32a34e8a68 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java @@ -38,7 +38,7 @@ import org.apache.commons.lang.mutable.MutableBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hdfs.BlockReaderTestUtil; +import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil; import org.apache.hadoop.hdfs.ClientContext; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -48,7 +48,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; -import org.apache.hadoop.hdfs.client.impl.DfsClientConf; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java index ccc94a461e..3c58133947 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java @@ -25,6 +25,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java index 8d2398d5fa..3e0ad6daf2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java @@ -26,6 +26,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; +import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil; import org.junit.Assert; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDisableConnCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDisableConnCache.java index f2043fbddb..c9d831a29a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDisableConnCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDisableConnCache.java @@ -24,6 +24,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; +import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil; import org.junit.Test; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java index 05698ec8d9..d54164fc3c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java @@ -28,6 +28,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/BlockReaderTestUtil.java similarity index 95% rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java rename to hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/BlockReaderTestUtil.java index 1ca1ca5f61..c5979210e2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/BlockReaderTestUtil.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hdfs; +package org.apache.hadoop.hdfs.client.impl; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -32,6 +32,15 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsTracer; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.BlockReader; +import org.apache.hadoop.hdfs.ClientContext; +import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSUtilClient; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.RemotePeerFactory; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -224,7 +233,7 @@ public DataNode getDataNode(LocatedBlock testBlock) { int ipcport = nodes[0].getIpcPort(); return cluster.getDataNode(ipcport); } - + public static void enableHdfsCachingTracing() { LogManager.getLogger(CacheReplicationMonitor.class.getName()).setLevel( Level.TRACE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderBase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderBase.java similarity index 93% rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderBase.java rename to hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderBase.java index 3d916a79fc..70ed428332 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderBase.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderBase.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs; +package org.apache.hadoop.hdfs.client.impl; import static org.junit.Assert.assertEquals; @@ -24,6 +24,9 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.BlockReader; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.junit.After; import org.junit.Before; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java similarity index 97% rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java rename to hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java index a392c6c6c5..ca498c7882 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs; +package org.apache.hadoop.hdfs.client.impl; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CONTEXT; @@ -41,6 +41,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.BlockReader; +import org.apache.hadoop.hdfs.DFSInputStream; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -125,7 +130,7 @@ public void testFallbackFromShortCircuitToUnixDomainTraffic() cluster.shutdown(); sockDir.close(); } - + /** * Test the case where we have multiple threads waiting on the * ShortCircuitCache delivering a certain ShortCircuitReplica. @@ -200,7 +205,7 @@ public void run() { * Test the case where we have a failure to complete a short circuit read * that occurs, and then later on, we have a success. * Any thread waiting on a cache load should receive the failure (if it - * occurs); however, the failure result should not be cached. We want + * occurs); however, the failure result should not be cached. We want * to be able to retry later and succeed. */ @Test(timeout=60000) @@ -244,7 +249,7 @@ public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() { public void run() { try { // First time should fail. - List locatedBlocks = + List locatedBlocks = cluster.getNameNode().getRpcServer().getBlockLocations( TEST_FILE, 0, TEST_FILE_LEN).getLocatedBlocks(); LocatedBlock lblock = locatedBlocks.get(0); // first block @@ -253,7 +258,7 @@ public void run() { blockReader = BlockReaderTestUtil.getBlockReader( cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN); Assert.fail("expected getBlockReader to fail the first time."); - } catch (Throwable t) { + } catch (Throwable t) { Assert.assertTrue("expected to see 'TCP reads were disabled " + "for testing' in exception " + t, t.getMessage().contains( "TCP reads were disabled for testing")); @@ -267,7 +272,7 @@ public void run() { try { blockReader = BlockReaderTestUtil.getBlockReader( cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN); - } catch (Throwable t) { + } catch (Throwable t) { LOG.error("error trying to retrieve a block reader " + "the second time.", t); throw t; @@ -327,7 +332,7 @@ public void testShortCircuitReadFromServerWithoutShm() throws Exception { calculateFileContentsFromSeed(SEED, TEST_FILE_LEN); Assert.assertTrue(Arrays.equals(contents, expected)); final ShortCircuitCache cache = - fs.dfs.getClientContext().getShortCircuitCache(); + fs.getClient().getClientContext().getShortCircuitCache(); final DatanodeInfo datanode = new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId()); cache.getDfsClientShmManager().visit(new Visitor() { @@ -344,7 +349,7 @@ public void visit(HashMap info) cluster.shutdown(); sockDir.close(); } - + /** * Test that a client which does not support short-circuit reads using * shared memory can talk with a server which supports it. @@ -375,12 +380,12 @@ public void testShortCircuitReadFromClientWithoutShm() throws Exception { calculateFileContentsFromSeed(SEED, TEST_FILE_LEN); Assert.assertTrue(Arrays.equals(contents, expected)); final ShortCircuitCache cache = - fs.dfs.getClientContext().getShortCircuitCache(); + fs.getClient().getClientContext().getShortCircuitCache(); Assert.assertEquals(null, cache.getDfsClientShmManager()); cluster.shutdown(); sockDir.close(); } - + /** * Test shutting down the ShortCircuitCache while there are things in it. */ @@ -407,7 +412,7 @@ public void testShortCircuitCacheShutdown() throws Exception { calculateFileContentsFromSeed(SEED, TEST_FILE_LEN); Assert.assertTrue(Arrays.equals(contents, expected)); final ShortCircuitCache cache = - fs.dfs.getClientContext().getShortCircuitCache(); + fs.getClient().getClientContext().getShortCircuitCache(); cache.close(); Assert.assertTrue(cache.getDfsClientShmManager(). getDomainSocketWatcher().isClosed()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java similarity index 96% rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java rename to hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java index 0048d2a7f6..bd0c7c32c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs; +package org.apache.hadoop.hdfs.client.impl; import static org.hamcrest.CoreMatchers.equalTo; @@ -32,9 +32,15 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.ClientContext; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSInputStream; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.ExtendedBlockId; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; -import org.apache.hadoop.hdfs.client.impl.DfsClientConf; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache; @@ -55,24 +61,24 @@ public class TestBlockReaderLocal { private static TemporarySocketDirectory sockDir; - + @BeforeClass public static void init() { sockDir = new TemporarySocketDirectory(); DomainSocket.disableBindPathValidation(); } - + @AfterClass public static void shutdown() throws IOException { sockDir.close(); } - + public static void assertArrayRegionsEqual(byte []buf1, int off1, byte []buf2, int off2, int len) { for (int i = 0; i < len; i++) { if (buf1[off1 + i] != buf2[off2 + i]) { - Assert.fail("arrays differ at byte " + i + ". " + - "The first array has " + (int)buf1[off1 + i] + + Assert.fail("arrays differ at byte " + i + ". " + + "The first array has " + (int)buf1[off1 + i] + ", but the second array has " + (int)buf2[off2 + i]); } } @@ -85,7 +91,7 @@ public static void assertArrayRegionsEqual(byte []buf1, int off1, byte []buf2, * @param buf The ByteBuffer to read into * @param off The offset in the buffer to read into * @param len The number of bytes to read. - * + * * @throws IOException If it could not read the requested number of bytes */ private static void readFully(BlockReaderLocal reader, @@ -120,7 +126,7 @@ public void doTest(BlockReaderLocal reader, byte original[]) // default: no-op } } - + public void runBlockReaderLocalTest(BlockReaderLocalTest test, boolean checksum, long readahead) throws IOException { Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null)); @@ -139,7 +145,7 @@ public void runBlockReaderLocalTest(BlockReaderLocalTest test, BlockReaderLocal blockReaderLocal = null; FSDataInputStream fsIn = null; byte original[] = new byte[BlockReaderLocalTest.TEST_LENGTH]; - + FileSystem fs = null; ShortCircuitShm shm = null; RandomAccessFile raf = null; @@ -186,7 +192,7 @@ public void runBlockReaderLocalTest(BlockReaderLocalTest test, raf.setLength(8192); FileInputStream shmStream = new FileInputStream(raf.getFD()); shm = new ShortCircuitShm(ShmId.createRandom(), shmStream); - ShortCircuitReplica replica = + ShortCircuitReplica replica = new ShortCircuitReplica(key, dataIn, metaIn, shortCircuitCache, Time.now(), shm.allocAndRegisterSlot( ExtendedBlockId.fromExtendedBlock(block))); @@ -216,21 +222,21 @@ public void runBlockReaderLocalTest(BlockReaderLocalTest test, if (raf != null) raf.close(); } } - - private static class TestBlockReaderLocalImmediateClose + + private static class TestBlockReaderLocalImmediateClose extends BlockReaderLocalTest { } - + @Test public void testBlockReaderLocalImmediateClose() throws IOException { runBlockReaderLocalTest(new TestBlockReaderLocalImmediateClose(), true, 0); runBlockReaderLocalTest(new TestBlockReaderLocalImmediateClose(), false, 0); } - - private static class TestBlockReaderSimpleReads + + private static class TestBlockReaderSimpleReads extends BlockReaderLocalTest { @Override - public void doTest(BlockReaderLocal reader, byte original[]) + public void doTest(BlockReaderLocal reader, byte original[]) throws IOException { byte buf[] = new byte[TEST_LENGTH]; reader.readFully(buf, 0, 512); @@ -246,7 +252,7 @@ public void doTest(BlockReaderLocal reader, byte original[]) BlockReaderLocalTest.BYTES_PER_CHECKSUM); } } - + @Test public void testBlockReaderSimpleReads() throws IOException { runBlockReaderLocalTest(new TestBlockReaderSimpleReads(), true, @@ -275,11 +281,11 @@ public void testBlockReaderSimpleReadsNoChecksumNoReadahead() throws IOException { runBlockReaderLocalTest(new TestBlockReaderSimpleReads(), false, 0); } - - private static class TestBlockReaderLocalArrayReads2 + + private static class TestBlockReaderLocalArrayReads2 extends BlockReaderLocalTest { @Override - public void doTest(BlockReaderLocal reader, byte original[]) + public void doTest(BlockReaderLocal reader, byte original[]) throws IOException { byte buf[] = new byte[TEST_LENGTH]; reader.readFully(buf, 0, 10); @@ -296,7 +302,7 @@ public void doTest(BlockReaderLocal reader, byte original[]) assertArrayRegionsEqual(original, 1716, buf, 1716, 5); } } - + @Test public void testBlockReaderLocalArrayReads2() throws IOException { runBlockReaderLocalTest(new TestBlockReaderLocalArrayReads2(), @@ -322,10 +328,10 @@ public void testBlockReaderLocalArrayReads2NoChecksumNoReadahead() runBlockReaderLocalTest(new TestBlockReaderLocalArrayReads2(), false, 0); } - private static class TestBlockReaderLocalByteBufferReads + private static class TestBlockReaderLocalByteBufferReads extends BlockReaderLocalTest { @Override - public void doTest(BlockReaderLocal reader, byte original[]) + public void doTest(BlockReaderLocal reader, byte original[]) throws IOException { ByteBuffer buf = ByteBuffer.wrap(new byte[TEST_LENGTH]); readFully(reader, buf, 0, 10); @@ -339,7 +345,7 @@ public void doTest(BlockReaderLocal reader, byte original[]) assertArrayRegionsEqual(original, 811, buf.array(), 811, 5); } } - + @Test public void testBlockReaderLocalByteBufferReads() throws IOException { @@ -354,7 +360,7 @@ public void testBlockReaderLocalByteBufferReadsNoChecksum() new TestBlockReaderLocalByteBufferReads(), false, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); } - + @Test public void testBlockReaderLocalByteBufferReadsNoReadahead() throws IOException { @@ -373,7 +379,7 @@ public void testBlockReaderLocalByteBufferReadsNoChecksumNoReadahead() * Test reads that bypass the bounce buffer (because they are aligned * and bigger than the readahead). */ - private static class TestBlockReaderLocalByteBufferFastLaneReads + private static class TestBlockReaderLocalByteBufferFastLaneReads extends BlockReaderLocalTest { @Override public void doTest(BlockReaderLocal reader, byte original[]) @@ -410,7 +416,7 @@ public void doTest(BlockReaderLocal reader, byte original[]) 50); } } - + @Test public void testBlockReaderLocalByteBufferFastLaneReads() throws IOException { @@ -456,7 +462,7 @@ public void setup(File blockFile, boolean usingChecksums) } } - public void doTest(BlockReaderLocal reader, byte original[]) + public void doTest(BlockReaderLocal reader, byte original[]) throws IOException { byte buf[] = new byte[TEST_LENGTH]; if (usingChecksums) { @@ -471,19 +477,19 @@ public void doTest(BlockReaderLocal reader, byte original[]) } } } - + @Test public void testBlockReaderLocalReadCorruptStart() throws IOException { runBlockReaderLocalTest(new TestBlockReaderLocalReadCorruptStart(), true, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); } - + private static class TestBlockReaderLocalReadCorrupt extends BlockReaderLocalTest { boolean usingChecksums = false; @Override - public void setup(File blockFile, boolean usingChecksums) + public void setup(File blockFile, boolean usingChecksums) throws IOException { RandomAccessFile bf = null; this.usingChecksums = usingChecksums; @@ -496,7 +502,7 @@ public void setup(File blockFile, boolean usingChecksums) } } - public void doTest(BlockReaderLocal reader, byte original[]) + public void doTest(BlockReaderLocal reader, byte original[]) throws IOException { byte buf[] = new byte[TEST_LENGTH]; try { @@ -522,7 +528,7 @@ public void doTest(BlockReaderLocal reader, byte original[]) } } } - + @Test public void testBlockReaderLocalReadCorrupt() throws IOException { @@ -555,7 +561,7 @@ private static class TestBlockReaderLocalWithMlockChanges public void setup(File blockFile, boolean usingChecksums) throws IOException { } - + @Override public void doTest(BlockReaderLocal reader, byte original[]) throws IOException { @@ -675,7 +681,7 @@ public void testBlockReaderLocalOnFileWithoutChecksumNoChecksumNoReadahead() runBlockReaderLocalTest(new TestBlockReaderLocalOnFileWithoutChecksum(), false, 0); } - + @Test public void testBlockReaderLocalReadZeroBytes() throws IOException { @@ -703,7 +709,7 @@ public void testBlockReaderLocalReadZeroBytesNoChecksumNoReadahead() runBlockReaderLocalTest(new TestBlockReaderLocalReadZeroBytes(), false, 0); } - + @Test(timeout=60000) public void TestStatisticsForShortCircuitLocalRead() throws Exception { @@ -714,7 +720,7 @@ public void TestStatisticsForShortCircuitLocalRead() throws Exception { public void TestStatisticsForLocalRead() throws Exception { testStatistics(false); } - + private void testStatistics(boolean isShortCircuit) throws Exception { Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null); HdfsConfiguration conf = new HdfsConfiguration(); @@ -756,12 +762,12 @@ private void testStatistics(boolean isShortCircuit) throws Exception { IOUtils.readFully(fsIn, original, 0, BlockReaderLocalTest.TEST_LENGTH); HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn; - Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH, + Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH, dfsIn.getReadStatistics().getTotalBytesRead()); - Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH, + Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH, dfsIn.getReadStatistics().getTotalLocalBytesRead()); if (isShortCircuit) { - Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH, + Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH, dfsIn.getReadStatistics().getTotalShortCircuitBytesRead()); } else { Assert.assertEquals(0, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalLegacy.java similarity index 95% rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java rename to hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalLegacy.java index af28bd314f..273619c10b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalLegacy.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs; +package org.apache.hadoop.hdfs.client.impl; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -30,6 +30,13 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSInputStream; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DFSUtilClient; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; @@ -52,7 +59,7 @@ public static void setupCluster() throws IOException { DFSInputStream.tcpReadsDisabledForTesting = true; DomainSocket.disableBindPathValidation(); } - + private static HdfsConfiguration getConfiguration( TemporarySocketDirectory socketDir) throws IOException { HdfsConfiguration conf = new HdfsConfiguration(); @@ -84,7 +91,7 @@ private static HdfsConfiguration getConfiguration( public void testStablePositionAfterCorruptRead() throws Exception { final short REPL_FACTOR = 1; final long FILE_LENGTH = 512L; - + HdfsConfiguration conf = getConfiguration(null); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRemoteBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderRemote.java similarity index 87% rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRemoteBlockReader.java rename to hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderRemote.java index cef1d6d852..c30aac81b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRemoteBlockReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderRemote.java @@ -15,11 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs; +package org.apache.hadoop.hdfs.client.impl; +import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; -public class TestRemoteBlockReader extends TestBlockReaderBase { +public class TestBlockReaderRemote extends TestBlockReaderBase { HdfsConfiguration createConf() { HdfsConfiguration conf = new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRemoteBlockReader2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderRemote2.java similarity index 85% rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRemoteBlockReader2.java rename to hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderRemote2.java index c23b4b78f5..34a153908d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRemoteBlockReader2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderRemote2.java @@ -15,9 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs; +package org.apache.hadoop.hdfs.client.impl; -public class TestRemoteBlockReader2 extends TestBlockReaderBase { +import org.apache.hadoop.hdfs.HdfsConfiguration; + +public class TestBlockReaderRemote2 extends TestBlockReaderBase { HdfsConfiguration createConf() { HdfsConfiguration conf = new HdfsConfiguration(); return conf; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestClientBlockVerification.java similarity index 91% rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java rename to hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestClientBlockVerification.java index 5ff343ab0c..b6ec1bf0cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestClientBlockVerification.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hdfs; +package org.apache.hadoop.hdfs.client.impl; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; @@ -25,6 +25,7 @@ import java.util.List; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.test.GenericTestUtils; @@ -41,7 +42,7 @@ public class TestClientBlockVerification { static LocatedBlock testBlock = null; static { - GenericTestUtils.setLogLevel(RemoteBlockReader2.LOG, Level.ALL); + GenericTestUtils.setLogLevel(BlockReaderRemote2.LOG, Level.ALL); } @BeforeClass public static void setupCluster() throws Exception { @@ -57,7 +58,7 @@ public static void setupCluster() throws Exception { */ @Test public void testBlockVerification() throws Exception { - RemoteBlockReader2 reader = (RemoteBlockReader2)spy( + BlockReaderRemote2 reader = (BlockReaderRemote2)spy( util.getBlockReader(testBlock, 0, FILE_SIZE_K * 1024)); util.readAndCheckEOS(reader, FILE_SIZE_K * 1024, true); verify(reader).sendReadResult(Status.CHECKSUM_OK); @@ -69,7 +70,7 @@ public void testBlockVerification() throws Exception { */ @Test public void testIncompleteRead() throws Exception { - RemoteBlockReader2 reader = (RemoteBlockReader2)spy( + BlockReaderRemote2 reader = (BlockReaderRemote2)spy( util.getBlockReader(testBlock, 0, FILE_SIZE_K * 1024)); util.readAndCheckEOS(reader, FILE_SIZE_K / 2 * 1024, false); @@ -87,7 +88,7 @@ public void testIncompleteRead() throws Exception { @Test public void testCompletePartialRead() throws Exception { // Ask for half the file - RemoteBlockReader2 reader = (RemoteBlockReader2)spy( + BlockReaderRemote2 reader = (BlockReaderRemote2)spy( util.getBlockReader(testBlock, 0, FILE_SIZE_K * 1024 / 2)); // And read half the file util.readAndCheckEOS(reader, FILE_SIZE_K * 1024 / 2, true); @@ -107,7 +108,7 @@ public void testUnalignedReads() throws Exception { for (int length : lengths) { DFSClient.LOG.info("Testing startOffset = " + startOffset + " and " + " len=" + length); - RemoteBlockReader2 reader = (RemoteBlockReader2)spy( + BlockReaderRemote2 reader = (BlockReaderRemote2)spy( util.getBlockReader(testBlock, startOffset, length)); util.readAndCheckEOS(reader, length, true); verify(reader).sendReadResult(Status.CHECKSUM_OK); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java index aa46de2542..e7e7739899 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java @@ -35,7 +35,7 @@ import org.apache.hadoop.fs.FsTracer; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.BlockReader; -import org.apache.hadoop.hdfs.BlockReaderFactory; +import org.apache.hadoop.hdfs.client.impl.BlockReaderFactory; import org.apache.hadoop.hdfs.ClientContext; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java index 73f02bd886..00c2f6279c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java @@ -42,7 +42,7 @@ import org.apache.hadoop.fs.FsTracer; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.BlockReader; -import org.apache.hadoop.hdfs.BlockReaderFactory; +import org.apache.hadoop.hdfs.client.impl.BlockReaderFactory; import org.apache.hadoop.hdfs.ClientContext; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java index d82e383d8c..10d974c0c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java @@ -43,7 +43,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; -import org.apache.hadoop.hdfs.BlockReaderTestUtil; +import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; @@ -59,7 +59,6 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetCache; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetCache.PageRounder; -import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; import org.apache.hadoop.hdfs.server.namenode.FSImage; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.BlockIdCommand; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCacheRevocation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCacheRevocation.java index a6d972a36b..ce37abdfdd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCacheRevocation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCacheRevocation.java @@ -27,7 +27,7 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.ReadOption; -import org.apache.hadoop.hdfs.BlockReaderTestUtil; +import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java index 3793cae766..efb5cf8faf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java @@ -55,7 +55,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.BlockReaderTestUtil; +import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java index e395fcaef4..f78861373c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java @@ -39,8 +39,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.BlockReaderFactory; -import org.apache.hadoop.hdfs.BlockReaderTestUtil; +import org.apache.hadoop.hdfs.client.impl.BlockReaderFactory; +import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil; import org.apache.hadoop.hdfs.DFSInputStream; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java index 116dc88ba0..299865b146 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java @@ -45,7 +45,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.TestBlockReaderLocal; +import org.apache.hadoop.hdfs.client.impl.TestBlockReaderLocal; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; @@ -54,7 +54,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; -import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.net.unix.TemporarySocketDirectory; @@ -593,7 +592,7 @@ public void testReadWithRemoteBlockReader() /** * Test that file data can be read by reading the block - * through RemoteBlockReader + * through BlockReaderRemote * @throws IOException */ public void doTestShortCircuitReadWithRemoteBlockReader( @@ -623,9 +622,9 @@ public void doTestShortCircuitReadWithRemoteBlockReader( try { checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser, conf, shortCircuitFails); - //RemoteBlockReader have unsupported method read(ByteBuffer bf) + //BlockReaderRemote have unsupported method read(ByteBuffer bf) assertTrue( - "RemoteBlockReader unsupported method read(ByteBuffer bf) error", + "BlockReaderRemote unsupported method read(ByteBuffer bf) error", checkUnsupportedMethod(fs, file1, fileData, readOffset)); } catch(IOException e) { throw new IOException(