From ab96a0838dafbfea77382135914feadbfd03cf53 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Thu, 6 Feb 2014 15:45:47 +0000 Subject: [PATCH] HDFS-5881. Fix skip() of the short-circuit local reader(legacy). Contributed by Kihwal Lee. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1565310 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java | 6 +++--- .../main/java/org/apache/hadoop/hdfs/DFSInputStream.java | 8 ++++++++ 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 0ad51b7569..2720c7ddd1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -337,6 +337,8 @@ Release 2.4.0 - UNRELEASED HDFS-5709. Improve NameNode upgrade with existing reserved paths and path components. (Andrew Wang via atm) + HDFS-5881. Fix skip() of the short-circuit local reader(legacy). (kihwal) + Release 2.3.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java index 2f66193361..ffc4eb9f8e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java @@ -629,7 +629,7 @@ public synchronized long skip(long n) throws IOException { skipBuf = new byte[bytesPerChecksum]; } int ret = read(skipBuf, 0, (int)(n - remaining)); - return ret; + return (remaining + ret); } // optimize for big gap: discard the current buffer, skip to @@ -660,9 +660,9 @@ public synchronized long skip(long n) throws IOException { int ret = read(skipBuf, 0, myOffsetFromChunkBoundary); if (ret == -1) { // EOS - return toskip; + return (toskip + remaining); } else { - return (toskip + ret); + return (toskip + remaining + ret); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index 73861bc8ad..438030eaa9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -1345,6 +1345,14 @@ public synchronized void seek(long targetPos) throws IOException { pos += blockReader.skip(diff); if (pos == targetPos) { done = true; + } else { + // The range was already checked. If the block reader returns + // something unexpected instead of throwing an exception, it is + // most likely a bug. + String errMsg = "BlockReader failed to seek to " + + targetPos + ". Instead, it seeked to " + pos + "."; + DFSClient.LOG.warn(errMsg); + throw new IOException(errMsg); } } catch (IOException e) {//make following read to retry if(DFSClient.LOG.isDebugEnabled()) {