HDFS-7963. Fix expected tracing spans in TestTracing. Contributed by Masatake Iwasaki.

This commit is contained in:
Kihwal Lee 2015-03-26 08:42:45 -05:00
parent b4b4fe9056
commit 222845632b
2 changed files with 26 additions and 5 deletions

View File

@ -1277,6 +1277,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7977. NFS couldn't take percentile intervals (brandonli) HDFS-7977. NFS couldn't take percentile intervals (brandonli)
HDFS-7963. Fix expected tracing spans in TestTracing along with HDFS-7054.
(Masatake Iwasaki via kihwal)
BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
HDFS-7720. Quota by Storage Type API, tools and ClientNameNode HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

View File

@ -88,7 +88,10 @@ public void testWriteTraceHooks() throws Exception {
"ClientNamenodeProtocol#fsync", "ClientNamenodeProtocol#fsync",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.complete", "org.apache.hadoop.hdfs.protocol.ClientProtocol.complete",
"ClientNamenodeProtocol#complete", "ClientNamenodeProtocol#complete",
"DFSOutputStream", "newStreamForCreate",
"DFSOutputStream#writeChunk",
"DFSOutputStream#close",
"dataStreamer",
"OpWriteBlockProto", "OpWriteBlockProto",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.addBlock", "org.apache.hadoop.hdfs.protocol.ClientProtocol.addBlock",
"ClientNamenodeProtocol#addBlock" "ClientNamenodeProtocol#addBlock"
@ -102,12 +105,27 @@ public void testWriteTraceHooks() throws Exception {
long spanStart = s.getStartTimeMillis(); long spanStart = s.getStartTimeMillis();
long spanEnd = s.getStopTimeMillis(); long spanEnd = s.getStopTimeMillis();
// There should only be one trace id as it should all be homed in the // Spans homed in the top trace shoud have same trace id.
// top trace. // Spans having multiple parents (e.g. "dataStreamer" added by HDFS-7054)
for (Span span : SetSpanReceiver.SetHolder.spans.values()) { // and children of them are exception.
String[] spansInTopTrace = {
"testWriteTraceHooks",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.create",
"ClientNamenodeProtocol#create",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.fsync",
"ClientNamenodeProtocol#fsync",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.complete",
"ClientNamenodeProtocol#complete",
"newStreamForCreate",
"DFSOutputStream#writeChunk",
"DFSOutputStream#close",
};
for (String desc : spansInTopTrace) {
for (Span span : map.get(desc)) {
Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId()); Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
} }
} }
}
@Test @Test
public void testWriteWithoutTraceHooks() throws Exception { public void testWriteWithoutTraceHooks() throws Exception {