From 05a73a3a1ea064b0d819d1851bce820e7d1f3f65 Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Thu, 7 Jun 2012 21:06:13 +0000 Subject: [PATCH] Revert HDFS-3492 from r1347192: patch broke TestShortCircuitLocalRead git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1347796 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop/lib/wsrs/InputStreamEntity.java | 5 ++- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 -- .../apache/hadoop/hdfs/BlockReaderLocal.java | 34 +++++++++++++++---- .../hdfs/server/namenode/FSEditLogOp.java | 7 ++-- .../hdfs/TestShortCircuitLocalRead.java | 7 ++-- .../server/datanode/SimulatedFSDataset.java | 3 +- 6 files changed, 38 insertions(+), 21 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/InputStreamEntity.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/InputStreamEntity.java index 21c25bd4c5..e5361a80ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/InputStreamEntity.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/InputStreamEntity.java @@ -42,7 +42,10 @@ public InputStreamEntity(InputStream is) { @Override public void write(OutputStream os) throws IOException { - IOUtils.skipFully(is, offset); + long skipped = is.skip(offset); + if (skipped < offset) { + throw new IOException("Requested offset beyond stream size"); + } if (len == -1) { IOUtils.copyBytes(is, os, 4096, true); } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index f0bc858f8f..2fad18ecc7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -310,9 +310,6 @@ Branch-2 ( Unreleased changes ) HDFS-3505. DirectoryScanner does not join all threads in shutdown. (Colin Patrick McCabe via eli) - HDFS-3492. Fix some misuses of InputStream#skip (Colin Patrick McCabe - via todd) - HDFS-3485. DataTransferThrottler will over-throttle when currentTimeMillis jumps (Andy Isaacson via todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java index a51b710e29..cd85ebae05 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; import org.apache.hadoop.hdfs.util.DirectBufferPool; import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.DataChecksum; @@ -316,10 +315,23 @@ private BlockReaderLocal(Configuration conf, String hdfsfile, boolean success = false; try { // Skip both input streams to beginning of the chunk containing startOffset - IOUtils.skipFully(dataIn, firstChunkOffset); + long toSkip = firstChunkOffset; + while (toSkip > 0) { + long skipped = dataIn.skip(toSkip); + if (skipped == 0) { + throw new IOException("Couldn't initialize input stream"); + } + toSkip -= skipped; + } if (checksumIn != null) { long checkSumOffset = (firstChunkOffset / bytesPerChecksum) * checksumSize; - IOUtils.skipFully(dataIn, checkSumOffset); + while (checkSumOffset > 0) { + long skipped = checksumIn.skip(checkSumOffset); + if (skipped == 0) { + throw new IOException("Couldn't initialize checksum input stream"); + } + checkSumOffset -= skipped; + } } success = true; } finally { @@ -624,9 +636,17 @@ public synchronized long skip(long n) throws IOException { slowReadBuff.position(slowReadBuff.limit()); checksumBuff.position(checksumBuff.limit()); - IOUtils.skipFully(dataIn, toskip); - long checkSumOffset = (toskip / bytesPerChecksum) * checksumSize; - IOUtils.skipFully(checksumIn, checkSumOffset); + long dataSkipped = dataIn.skip(toskip); + if (dataSkipped != toskip) { + throw new IOException("skip error in data input stream"); + } + long checkSumOffset = (dataSkipped / bytesPerChecksum) * checksumSize; + if (checkSumOffset > 0) { + long skipped = checksumIn.skip(checkSumOffset); + if (skipped != checkSumOffset) { + throw new IOException("skip error in checksum input stream"); + } + } // read into the middle of the chunk if (skipBuf == null) { @@ -681,4 +701,4 @@ public Socket takeSocket() { public boolean hasSentStatusCode() { return false; } -} +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index 2aa8f736c1..489f030e13 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -44,7 +44,6 @@ import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.ArrayWritable; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableFactories; @@ -2290,11 +2289,9 @@ public FSEditLogOp readOp(boolean skipBrokenEdits) throws IOException { // 0xff, we want to skip over that region, because there's nothing // interesting there. long numSkip = e.getNumAfterTerminator(); - try { - IOUtils.skipFully(in, numSkip); - } catch (IOException t) { + if (in.skip(numSkip) < numSkip) { FSImage.LOG.error("Failed to skip " + numSkip + " bytes of " + - "garbage after an OP_INVALID. Unexpected early EOF.", t); + "garbage after an OP_INVALID. Unexpected early EOF."); return null; } } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java index 9c37b7a2fb..169756759e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java @@ -40,7 +40,6 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.junit.Assert; @@ -95,7 +94,8 @@ static void checkFileContent(FileSystem fs, Path name, byte[] expected, // Now read using a different API. actual = new byte[expected.length-readOffset]; stm = fs.open(name); - IOUtils.skipFully(stm, readOffset); + long skipped = stm.skip(readOffset); + Assert.assertEquals(skipped, readOffset); //Read a small number of bytes first. int nread = stm.read(actual, 0, 3); nread += stm.read(actual, nread, 2); @@ -123,7 +123,8 @@ static void checkFileContentDirect(FileSystem fs, Path name, byte[] expected, ByteBuffer actual = ByteBuffer.allocate(expected.length - readOffset); - IOUtils.skipFully(stm, readOffset); + long skipped = stm.skip(readOffset); + Assert.assertEquals(skipped, readOffset); actual.limit(3); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index d4a7b3a257..e69b1c3021 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -47,7 +47,6 @@ import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DiskChecker.DiskErrorException; @@ -687,7 +686,7 @@ synchronized InputStream getBlockInputStream(ExtendedBlock b public synchronized InputStream getBlockInputStream(ExtendedBlock b, long seekOffset) throws IOException { InputStream result = getBlockInputStream(b); - IOUtils.skipFully(result, seekOffset); + result.skip(seekOffset); return result; }