diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/InputStreamEntity.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/InputStreamEntity.java index e5361a80ef..21c25bd4c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/InputStreamEntity.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/InputStreamEntity.java @@ -42,10 +42,7 @@ public InputStreamEntity(InputStream is) { @Override public void write(OutputStream os) throws IOException { - long skipped = is.skip(offset); - if (skipped < offset) { - throw new IOException("Requested offset beyond stream size"); - } + IOUtils.skipFully(is, offset); if (len == -1) { IOUtils.copyBytes(is, os, 4096, true); } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3c195c51d3..c743139bd3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -480,6 +480,9 @@ Branch-2 ( Unreleased changes ) HDFS-470. libhdfs should handle 0-length reads from FSInputStream correctly. (Colin Patrick McCabe via eli) + HDFS-3492. fix some misuses of InputStream#skip. + (Colin Patrick McCabe via eli) + BREAKDOWN OF HDFS-3042 SUBTASKS HDFS-2185. HDFS portion of ZK-based FailoverController (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java index cd85ebae05..40b40c0841 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; import org.apache.hadoop.hdfs.util.DirectBufferPool; import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.DataChecksum; @@ -315,23 +316,10 @@ private BlockReaderLocal(Configuration conf, String hdfsfile, boolean success = false; try { // Skip both input streams to beginning of the chunk containing startOffset - long toSkip = firstChunkOffset; - while (toSkip > 0) { - long skipped = dataIn.skip(toSkip); - if (skipped == 0) { - throw new IOException("Couldn't initialize input stream"); - } - toSkip -= skipped; - } + IOUtils.skipFully(dataIn, firstChunkOffset); if (checksumIn != null) { long checkSumOffset = (firstChunkOffset / bytesPerChecksum) * checksumSize; - while (checkSumOffset > 0) { - long skipped = checksumIn.skip(checkSumOffset); - if (skipped == 0) { - throw new IOException("Couldn't initialize checksum input stream"); - } - checkSumOffset -= skipped; - } + IOUtils.skipFully(checksumIn, checkSumOffset); } success = true; } finally { @@ -636,17 +624,9 @@ public synchronized long skip(long n) throws IOException { slowReadBuff.position(slowReadBuff.limit()); checksumBuff.position(checksumBuff.limit()); - long dataSkipped = dataIn.skip(toskip); - if (dataSkipped != toskip) { - throw new IOException("skip error in data input stream"); - } - long checkSumOffset = (dataSkipped / bytesPerChecksum) * checksumSize; - if (checkSumOffset > 0) { - long skipped = checksumIn.skip(checkSumOffset); - if (skipped != checkSumOffset) { - throw new IOException("skip error in checksum input stream"); - } - } + IOUtils.skipFully(dataIn, toskip); + long checkSumOffset = (toskip / bytesPerChecksum) * checksumSize; + IOUtils.skipFully(checksumIn, checkSumOffset); // read into the middle of the chunk if (skipBuf == null) { @@ -701,4 +681,4 @@ public Socket takeSocket() { public boolean hasSentStatusCode() { return false; } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java index 37382a0eac..378969adf9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Time; @@ -95,8 +96,7 @@ static void checkFileContent(FileSystem fs, Path name, byte[] expected, // Now read using a different API. actual = new byte[expected.length-readOffset]; stm = fs.open(name); - long skipped = stm.skip(readOffset); - Assert.assertEquals(skipped, readOffset); + IOUtils.skipFully(stm, readOffset); //Read a small number of bytes first. int nread = stm.read(actual, 0, 3); nread += stm.read(actual, nread, 2); @@ -124,8 +124,7 @@ static void checkFileContentDirect(FileSystem fs, Path name, byte[] expected, ByteBuffer actual = ByteBuffer.allocate(expected.length - readOffset); - long skipped = stm.skip(readOffset); - Assert.assertEquals(skipped, readOffset); + IOUtils.skipFully(stm, readOffset); actual.limit(3); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index e69b1c3021..d4a7b3a257 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DiskChecker.DiskErrorException; @@ -686,7 +687,7 @@ synchronized InputStream getBlockInputStream(ExtendedBlock b public synchronized InputStream getBlockInputStream(ExtendedBlock b, long seekOffset) throws IOException { InputStream result = getBlockInputStream(b); - result.skip(seekOffset); + IOUtils.skipFully(result, seekOffset); return result; }