HDFS-3492. fix some misuses of InputStream#skip. Contributed by Colin Patrick McCabe

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1361449 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-07-14 00:18:56 +00:00
parent 8c2a2f909c
commit 06d635cd2c
5 changed files with 16 additions and 36 deletions

View File

@ -42,10 +42,7 @@ public InputStreamEntity(InputStream is) {
@Override @Override
public void write(OutputStream os) throws IOException { public void write(OutputStream os) throws IOException {
long skipped = is.skip(offset); IOUtils.skipFully(is, offset);
if (skipped < offset) {
throw new IOException("Requested offset beyond stream size");
}
if (len == -1) { if (len == -1) {
IOUtils.copyBytes(is, os, 4096, true); IOUtils.copyBytes(is, os, 4096, true);
} else { } else {

View File

@ -480,6 +480,9 @@ Branch-2 ( Unreleased changes )
HDFS-470. libhdfs should handle 0-length reads from FSInputStream HDFS-470. libhdfs should handle 0-length reads from FSInputStream
correctly. (Colin Patrick McCabe via eli) correctly. (Colin Patrick McCabe via eli)
HDFS-3492. fix some misuses of InputStream#skip.
(Colin Patrick McCabe via eli)
BREAKDOWN OF HDFS-3042 SUBTASKS BREAKDOWN OF HDFS-3042 SUBTASKS
HDFS-2185. HDFS portion of ZK-based FailoverController (todd) HDFS-2185. HDFS portion of ZK-based FailoverController (todd)

View File

@ -39,6 +39,7 @@
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
import org.apache.hadoop.hdfs.util.DirectBufferPool; import org.apache.hadoop.hdfs.util.DirectBufferPool;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum;
@ -315,23 +316,10 @@ private BlockReaderLocal(Configuration conf, String hdfsfile,
boolean success = false; boolean success = false;
try { try {
// Skip both input streams to beginning of the chunk containing startOffset // Skip both input streams to beginning of the chunk containing startOffset
long toSkip = firstChunkOffset; IOUtils.skipFully(dataIn, firstChunkOffset);
while (toSkip > 0) {
long skipped = dataIn.skip(toSkip);
if (skipped == 0) {
throw new IOException("Couldn't initialize input stream");
}
toSkip -= skipped;
}
if (checksumIn != null) { if (checksumIn != null) {
long checkSumOffset = (firstChunkOffset / bytesPerChecksum) * checksumSize; long checkSumOffset = (firstChunkOffset / bytesPerChecksum) * checksumSize;
while (checkSumOffset > 0) { IOUtils.skipFully(checksumIn, checkSumOffset);
long skipped = checksumIn.skip(checkSumOffset);
if (skipped == 0) {
throw new IOException("Couldn't initialize checksum input stream");
}
checkSumOffset -= skipped;
}
} }
success = true; success = true;
} finally { } finally {
@ -636,17 +624,9 @@ public synchronized long skip(long n) throws IOException {
slowReadBuff.position(slowReadBuff.limit()); slowReadBuff.position(slowReadBuff.limit());
checksumBuff.position(checksumBuff.limit()); checksumBuff.position(checksumBuff.limit());
long dataSkipped = dataIn.skip(toskip); IOUtils.skipFully(dataIn, toskip);
if (dataSkipped != toskip) { long checkSumOffset = (toskip / bytesPerChecksum) * checksumSize;
throw new IOException("skip error in data input stream"); IOUtils.skipFully(checksumIn, checkSumOffset);
}
long checkSumOffset = (dataSkipped / bytesPerChecksum) * checksumSize;
if (checkSumOffset > 0) {
long skipped = checksumIn.skip(checkSumOffset);
if (skipped != checkSumOffset) {
throw new IOException("skip error in checksum input stream");
}
}
// read into the middle of the chunk // read into the middle of the chunk
if (skipBuf == null) { if (skipBuf == null) {

View File

@ -40,6 +40,7 @@
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
@ -95,8 +96,7 @@ static void checkFileContent(FileSystem fs, Path name, byte[] expected,
// Now read using a different API. // Now read using a different API.
actual = new byte[expected.length-readOffset]; actual = new byte[expected.length-readOffset];
stm = fs.open(name); stm = fs.open(name);
long skipped = stm.skip(readOffset); IOUtils.skipFully(stm, readOffset);
Assert.assertEquals(skipped, readOffset);
//Read a small number of bytes first. //Read a small number of bytes first.
int nread = stm.read(actual, 0, 3); int nread = stm.read(actual, 0, 3);
nread += stm.read(actual, nread, 2); nread += stm.read(actual, nread, 2);
@ -124,8 +124,7 @@ static void checkFileContentDirect(FileSystem fs, Path name, byte[] expected,
ByteBuffer actual = ByteBuffer.allocate(expected.length - readOffset); ByteBuffer actual = ByteBuffer.allocate(expected.length - readOffset);
long skipped = stm.skip(readOffset); IOUtils.skipFully(stm, readOffset);
Assert.assertEquals(skipped, readOffset);
actual.limit(3); actual.limit(3);

View File

@ -47,6 +47,7 @@
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean; import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@ -686,7 +687,7 @@ synchronized InputStream getBlockInputStream(ExtendedBlock b
public synchronized InputStream getBlockInputStream(ExtendedBlock b, public synchronized InputStream getBlockInputStream(ExtendedBlock b,
long seekOffset) throws IOException { long seekOffset) throws IOException {
InputStream result = getBlockInputStream(b); InputStream result = getBlockInputStream(b);
result.skip(seekOffset); IOUtils.skipFully(result, seekOffset);
return result; return result;
} }