diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 674b090e97..c7a2423948 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -2129,6 +2129,9 @@ Release 2.7.2 - UNRELEASED HDFS-8676. Delayed rolling upgrade finalization can cause heartbeat expiration. (Walter Su via kihwal) + HDFS-9220. Reading small file (< 512 bytes) that is open for append fails + due to incorrect checksum (Jing Zhao via kihwal) + Release 2.7.1 - 2015-07-06 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index 4c40e83b04..99cdbea644 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -753,11 +753,10 @@ private int receivePacket() throws IOException { final int offset = checksumBuf.arrayOffset() + checksumBuf.position() + skip; final int end = offset + checksumLen - skip; - // If offset > end, there is no more checksum to write. + // If offset >= end, there is no more checksum to write. // I.e. a partial chunk checksum rewrite happened and there is no // more to write after that. - if (offset > end) { - assert crcBytes != null; + if (offset >= end && doCrcRecalc) { lastCrc = crcBytes; } else { final int remainingBytes = checksumLen - skip; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java index 8a950272b9..3c72db35bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java @@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -549,4 +550,40 @@ public void testComplexAppend() throws IOException { public void testComplexAppend2() throws IOException { testComplexAppend(true); } + + /** + * Make sure when the block length after appending is less than 512 bytes, the + * checksum re-calculation and overwrite are performed correctly. + */ + @Test + public void testAppendLessThanChecksumChunk() throws Exception { + final byte[] buf = new byte[1024]; + final MiniDFSCluster cluster = new MiniDFSCluster + .Builder(new HdfsConfiguration()).numDataNodes(1).build(); + cluster.waitActive(); + + try (DistributedFileSystem fs = cluster.getFileSystem()) { + final int len1 = 200; + final int len2 = 300; + final Path p = new Path("/foo"); + + FSDataOutputStream out = fs.create(p); + out.write(buf, 0, len1); + out.close(); + + out = fs.append(p); + out.write(buf, 0, len2); + // flush but leave open + out.hflush(); + + // read data to verify the replica's content and checksum are correct + FSDataInputStream in = fs.open(p); + final int length = in.read(0, buf, 0, len1 + len2); + assertTrue(length > 0); + in.close(); + out.close(); + } finally { + cluster.shutdown(); + } + } }