From f4098dfae44b6dd287a3054f2c04658773b4f466 Mon Sep 17 00:00:00 2001 From: boli2 Date: Mon, 13 Jul 2015 00:41:36 -0400 Subject: [PATCH] HDFS-8744. Erasure Coding: the number of chunks in packet is not updated when writing parity data. Contributed by Li Bo --- hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 3 +++ .../main/java/org/apache/hadoop/hdfs/DFSOutputStream.java | 2 +- .../src/main/java/org/apache/hadoop/hdfs/DFSPacket.java | 6 +++--- .../java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java | 4 +++- 4 files changed, 10 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt index 90f6732da3..7b6d165195 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt @@ -341,3 +341,6 @@ HDFS-8484. Erasure coding: Two contiguous blocks occupy IDs belong to same striped group. (Walter Su via jing9) + + HDFS-8744. Erasure Coding: the number of chunks in packet is not updated + when writing parity data. (Li Bo) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index 9e201ad5d9..f41044b299 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -419,7 +419,7 @@ protected synchronized void writeChunk(byte[] b, int offset, int len, currentPacket.writeChecksum(checksum, ckoff, cklen); currentPacket.writeData(b, offset, len); - currentPacket.incNumChunks(); + currentPacket.incNumChunks(1); streamer.incBytesCurBlock(len); // If packet is full, enqueue it for transmission diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java index a26e35e7c5..2698de39c2 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java @@ -259,10 +259,10 @@ synchronized int getNumChunks() { } /** - * increase the number of chunks by one + * increase the number of chunks by n */ - synchronized void incNumChunks() { - numChunks++; + synchronized void incNumChunks(int n) { + numChunks += n; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java index 4234351d36..e6de714aba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java @@ -389,11 +389,13 @@ private List generatePackets( int maxBytesToPacket = p.getMaxChunks() * bytesPerChecksum; int toWrite = byteBuffer.remaining() > maxBytesToPacket ? maxBytesToPacket: byteBuffer.remaining(); - int ckLen = ((toWrite - 1) / bytesPerChecksum + 1) * getChecksumSize(); + int chunks = (toWrite - 1) / bytesPerChecksum + 1; + int ckLen = chunks * getChecksumSize(); p.writeChecksum(checksumBuf, ckOff, ckLen); ckOff += ckLen; p.writeData(byteBuffer, toWrite); getCurrentStreamer().incBytesCurBlock(toWrite); + p.incNumChunks(chunks); packets.add(p); } return packets;