HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than DFSOutputStream#writeChunk (cmccabe)
This commit is contained in:
parent
8366a36ad3
commit
c94d594a57
@ -21,6 +21,8 @@
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.apache.htrace.NullScope;
|
||||
import org.apache.htrace.TraceScope;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
@ -194,16 +196,26 @@ protected int getChecksumSize() {
|
||||
return sum.getChecksumSize();
|
||||
}
|
||||
|
||||
protected TraceScope createWriteTraceScope() {
|
||||
return NullScope.INSTANCE;
|
||||
}
|
||||
|
||||
/** Generate checksums for the given data chunks and output chunks & checksums
|
||||
* to the underlying output stream.
|
||||
*/
|
||||
private void writeChecksumChunks(byte b[], int off, int len)
|
||||
throws IOException {
|
||||
sum.calculateChunkedSums(b, off, len, checksum, 0);
|
||||
for (int i = 0; i < len; i += sum.getBytesPerChecksum()) {
|
||||
int chunkLen = Math.min(sum.getBytesPerChecksum(), len - i);
|
||||
int ckOffset = i / sum.getBytesPerChecksum() * getChecksumSize();
|
||||
writeChunk(b, off + i, chunkLen, checksum, ckOffset, getChecksumSize());
|
||||
TraceScope scope = createWriteTraceScope();
|
||||
try {
|
||||
for (int i = 0; i < len; i += sum.getBytesPerChecksum()) {
|
||||
int chunkLen = Math.min(sum.getBytesPerChecksum(), len - i);
|
||||
int ckOffset = i / sum.getBytesPerChecksum() * getChecksumSize();
|
||||
writeChunk(b, off + i, chunkLen, checksum, ckOffset,
|
||||
getChecksumSize());
|
||||
}
|
||||
} finally {
|
||||
scope.close();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -378,6 +378,9 @@ Release 2.8.0 - UNRELEASED
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||
DFSOutputStream#writeChunk (cmccabe)
|
||||
|
||||
BUG FIXES
|
||||
|
||||
HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.
|
||||
|
@ -372,21 +372,14 @@ private void computePacketChunkSize(int psize, int csize) {
|
||||
}
|
||||
}
|
||||
|
||||
protected TraceScope createWriteTraceScope() {
|
||||
return dfsClient.getPathTraceScope("DFSOutputStream#write", src);
|
||||
}
|
||||
|
||||
// @see FSOutputSummer#writeChunk()
|
||||
@Override
|
||||
protected synchronized void writeChunk(byte[] b, int offset, int len,
|
||||
byte[] checksum, int ckoff, int cklen) throws IOException {
|
||||
TraceScope scope =
|
||||
dfsClient.getPathTraceScope("DFSOutputStream#writeChunk", src);
|
||||
try {
|
||||
writeChunkImpl(b, offset, len, checksum, ckoff, cklen);
|
||||
} finally {
|
||||
scope.close();
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized void writeChunkImpl(byte[] b, int offset, int len,
|
||||
byte[] checksum, int ckoff, int cklen) throws IOException {
|
||||
dfsClient.checkOpen();
|
||||
checkClosed();
|
||||
|
||||
|
@ -89,7 +89,7 @@ public void testWriteTraceHooks() throws Exception {
|
||||
"org.apache.hadoop.hdfs.protocol.ClientProtocol.complete",
|
||||
"ClientNamenodeProtocol#complete",
|
||||
"newStreamForCreate",
|
||||
"DFSOutputStream#writeChunk",
|
||||
"DFSOutputStream#write",
|
||||
"DFSOutputStream#close",
|
||||
"dataStreamer",
|
||||
"OpWriteBlockProto",
|
||||
@ -117,7 +117,7 @@ public void testWriteTraceHooks() throws Exception {
|
||||
"org.apache.hadoop.hdfs.protocol.ClientProtocol.complete",
|
||||
"ClientNamenodeProtocol#complete",
|
||||
"newStreamForCreate",
|
||||
"DFSOutputStream#writeChunk",
|
||||
"DFSOutputStream#write",
|
||||
"DFSOutputStream#close",
|
||||
};
|
||||
for (String desc : spansInTopTrace) {
|
||||
|
Loading…
Reference in New Issue
Block a user