[Fuego] [PATCH RFC 1/2] parser: add a generic HTML table generator for Benchmark tests

Liu Wenlong liuwl.fnst at cn.fujitsu.com
Mon Dec 17 01:53:17 UTC 2018


It might be helpful to display the test results in a HTML table
for those Benchmark tests.
The test results of different board, different spec, different
kernel version will be in different HTML table.

In the current Fuego, only 3 chart types below are supported,
- "measure_plot"             (for Benchmark tests)
- "testcase_table"           (for Functional tests)
- "testset_summary_table"    (for Functional tests)

So, add another new one HTML table designed for Benchmark tests,
- "measure_table"

Signed-off-by: Liu Wenlong <liuwl.fnst at cn.fujitsu.com>
---
 engine/scripts/mod.js                       |   2 +-
 engine/scripts/parser/common.py             |  56 +++++-----
 engine/scripts/parser/prepare_chart_data.py | 161 +++++++++++++++++++++++++++-
 3 files changed, 193 insertions(+), 26 deletions(-)

diff --git a/engine/scripts/mod.js b/engine/scripts/mod.js
index d9074cc..96d35de 100644
--- a/engine/scripts/mod.js
+++ b/engine/scripts/mod.js
@@ -359,7 +359,7 @@ function do_all_charts(series) {
             new_plot = plot_one_chart(chart, i);
             plots.push(new_plot);
         }
-        else if (chart_type == "testcase_table" || chart_type == "testset_summary_table" ) {
+        else if (chart_type == "testcase_table" || chart_type == "testset_summary_table" || chart_type == "measure_table") {
             // create all html elements
             jQuery('.plots').append(
                 '<div class="container">' +
diff --git a/engine/scripts/parser/common.py b/engine/scripts/parser/common.py
index 99bacfc..8ef9568 100644
--- a/engine/scripts/parser/common.py
+++ b/engine/scripts/parser/common.py
@@ -209,6 +209,37 @@ def get_criterion(tguid, criteria_data, default_criterion=None):
             criterion = crit
     return criterion
 
+def data_compare(value, ref_value, op):
+    try:
+        if op == 'lt':
+            result = float(value) < float(ref_value)
+        elif op == 'le':
+            result = float(value) <= float(ref_value)
+        elif op == 'gt':
+            result = float(value) > float(ref_value)
+        elif op == 'ge':
+            result = float(value) >= float(ref_value)
+        elif op == 'eq':
+            result = float(value) == float(ref_value)
+        elif op == 'ne':
+            result = float(value) != float(ref_value)
+        elif op == 'bt':
+            ref_low, ref_high = ref_value.split(',', 1)
+            result = float(value) >= float(ref_low) and float(value) <= float(ref_high)
+        else:
+            return "ERROR"
+    except:
+        return "SKIP"
+
+    if result:
+        status = "PASS"
+    else:
+        status = "FAIL"
+
+    dprint("  result=%s" % result)
+    dprint("  status=%s" % status)
+    return status
+
 def check_measure(tguid, criteria_data, measure):
     dprint("in check_measure")
     value = measure.get('measure', None)
@@ -247,30 +278,7 @@ def check_measure(tguid, criteria_data, measure):
         eprint("criteria (%s) missing reference operator - returning SKIP" % criterion)
         return 'SKIP'
 
-    if op == 'lt':
-        result = value < float(ref_value)
-    elif op == 'le':
-        result = value <= float(ref_value)
-    elif op == 'gt':
-        result = value > float(ref_value)
-    elif op == 'ge':
-        result = value >= float(ref_value)
-    elif op == 'eq':
-        result = value == float(ref_value)
-    elif op == 'ne':
-        result = value != float(ref_value)
-    elif op == 'bt':
-        ref_low, ref_high = ref_value.split(',', 1)
-        result = value >= float(ref_low) and value <= float(ref_high)
-
-    if result:
-        status = "PASS"
-    else:
-        status = "FAIL"
-
-    dprint("  result=%s" % result)
-    dprint("  status=%s" % status)
-    return status
+    return data_compare(value, ref_value, op)
 
 def decide_status(tguid, criteria_data, child_pass_list, child_fail_list):
     dprint("in decide_status:")
diff --git a/engine/scripts/parser/prepare_chart_data.py b/engine/scripts/parser/prepare_chart_data.py
index 00fdc80..b6dd50e 100644
--- a/engine/scripts/parser/prepare_chart_data.py
+++ b/engine/scripts/parser/prepare_chart_data.py
@@ -36,7 +36,7 @@ import sys, os, re, json, collections
 from filelock import FileLock
 from operator import itemgetter
 from fuego_parser_utils import split_test_id, get_test_case
-from common import dprint, vprint, iprint, wprint, eprint
+from common import dprint, vprint, iprint, wprint, eprint, data_compare
 
 # board testname spec build_number timestamp kernel tguid ref result
 #  0       1       2     3            4        5      6    7    8
@@ -413,6 +413,163 @@ def make_measure_plots(test_name, chart_config, entries):
         chart_list.append(chart)
     return chart_list
 
+def make_measure_tables(test_name, chart_config, entries):
+    # make a table of testcase results for every testcase
+    chart_list = []
+    # the value of 'JENKINS_URL' is "http://localhost:8080/fuego/", which is not we want.
+    jenkins_url_prefix = "/fuego"
+
+    # get a list of (board, test specs) in the data
+    # FIXTHIS - use list of test sets in chart_config, if present
+    bsp_map = {}
+    for entry in entries:
+        bsp_key = entry.board + "." + entry.spec + "." + entry.kernel
+        bsp_map[bsp_key] = ((entry.board, entry.spec, entry.kernel))
+    bsp_list = bsp_map.values()
+
+    # now make a chart for each one:
+    for board, spec, kver in bsp_list:
+        # create a series for each combination of board,spec,test,kernel,tguid
+        dprint("Making a chart for board: %s, test spec: %s, kernel: %s" \
+               % (board, spec, kver))
+        series_list = []
+        title = "%s-%s-%s (%s)" % (board, test_name, spec, kver)
+
+        # get list of test cases for this board and test spec
+        tc_entries = []
+        for entry in entries:
+            if entry.board == board and entry.spec == spec and \
+               entry.kernel == kver and entry.op != "ERROR-undefined":
+                tc_entries.append(entry)
+
+        # determine how many build numbers are represented in the data
+        # and prepare to count the values in each one
+        # count offfsets in the count array are:
+        #   0 = PASS, 1 = FAIL, 2 = SKIP, 3 = ERR
+        build_num_map = {}
+        for entry in tc_entries:
+            build_num_map[entry.build_number] = [0,0,0,0]
+
+        # gather the data for each row
+        result_map = {}
+        for entry in tc_entries:
+            row_key = entry.tguid
+
+            dprint("row_key=%s" % row_key)
+            if row_key not in result_map:
+                dprint("making a new row for '%s'" % row_key)
+                result_map[row_key] = {}
+
+            # add a data point (result) for this entry
+            result_map[row_key][entry.build_number] = entry.result,entry.op,entry.ref
+            # count the result
+            result = data_compare(entry.result, entry.ref, entry.op)
+            if result=="PASS":
+                build_num_map[entry.build_number][0] += 1
+            elif result=="FAIL":
+                build_num_map[entry.build_number][1] += 1
+            elif result=="ERROR":
+                build_num_map[entry.build_number][2] += 1
+            else:
+                build_num_map[entry.build_number][3] += 1
+
+        bn_list = build_num_map.keys()
+        bn_list.sort()
+
+        # FIXTHIS - should read col_limit from chart_config
+        col_limit = 10
+        col_list = bn_list[-col_limit:]
+        bn_col_count = len(col_list)
+
+        # OK, now build the table
+        html = '<table border=="1" cellspacing="0">' + \
+            '<tr style="background-color:#cccccc">' + \
+            '<th colspan=' + str(bn_col_count+2) + '" align="left">' + \
+            'board: ' + board + '<br>' + \
+            'test spec: ' + spec + '<br>' + \
+            'kernel: ' + entry.kernel + '<br>' + \
+            '</th></tr>' + \
+            '<tr style="background-color:#cccccc">' + \
+            '<th rowspan="3" align="left">measure item</th>' + \
+            '<th rowspan="3" align="left">test set</th>' + \
+            '<th align="center" colspan="' + str(bn_col_count) + '">results</th>' + \
+            '</th></tr>' + \
+            '<tr style="background-color:#cccccc">' + \
+            '<th align="center" colspan="' + str(bn_col_count) + '">build_number</th>' + \
+            '</th></tr>'
+
+
+        row = '<tr style="background-color:#cccccc">'
+        for bn in col_list:
+            row += '<th>' + str(bn) + '</th>'
+        row += '</tr>'
+        html += row
+
+        # one row per test case
+        tg_list = result_map.keys()
+        tg_list.sort(cmp_alpha_num)
+
+        for tg in tg_list:
+            # break apart tguid(tc) and divide into test set and test case
+            parts = tg.split(".")
+            ts = parts[0]
+            tc = ".".join(parts[1:])
+
+            # FIXTHIS: add a column for the unit of each measure item
+            row_tc_head = '<tr><td>' + tc + '</td><td>' + ts + '</td>'
+            row_ref_head = '<tr><td>' + tc + '(ref)</td><td>' + ts + '</td>'
+            result = \
+            row_tc = \
+            row_ref = ""
+            for bn in col_list:
+                try:
+                    value,op,ref = result_map[tg][bn]
+                except:
+                    value = ""
+                result = data_compare(value, ref, op)
+                if result=="PASS":
+                    cell_attr = 'style="background-color:#ccffcc" align=\"center\"'
+                elif result=="FAIL":
+                    cell_attr = 'style="background-color:#ffcccc" align=\"center\"'
+                else:
+                    cell_attr = 'align="center"'
+                    value='-'
+
+                row_tc += ("<td %s>" % cell_attr) + value + "</td>"
+                row_ref += "<td align=\"center\">" + op  + " " + ref + "</td>"
+            row_tail = '</tr>'
+
+            # add a new line for each testcase
+            html += row_tc_head + row_tc + row_tail
+            # add a new line for the reference data of each testcase
+            html += row_ref_head + row_ref + row_tail
+
+        # now add the totals to the bottom of the table
+        row = '<tr style="background-color:#cccccc"><th colspan="' + str(bn_col_count+2) + '" align="center">Totals</td></tr>'
+        html += row
+
+        summary_str = ["pass","fail","skip","error"]
+        for i in range(4):
+            row = '<tr><th colspan="2" align="left">' + summary_str[i] + '</td>'
+            for bn in col_list:
+                try:
+                    result = build_num_map[bn][i]
+                except:
+                    result = ""
+                row += "<td>" + str(result) + "</td>"
+            row += '</tr>'
+            html += row
+        html += '</table>'
+        dprint("HTML for this table is: '%s'" % html)
+
+        chart = {
+                    "title": title,
+                    "chart_type": "measure_table",
+                    "data": html
+                }
+        chart_list.append(chart)
+    return chart_list
+
 # define a comparison function for strings that might end with numbers
 # like "test1, test2, ... test10"
 # if items end in digits, and the leading strings are the same, then
@@ -769,6 +926,8 @@ def make_chart_data(test_logdir, TESTDIR, chart_config_filename, data_lines):
     # make the requested charts
     if chart_type=="measure_plot":
         chart_list = make_measure_plots(test_name, chart_config, entries)
+    elif chart_type=="measure_table":
+        chart_list = make_measure_tables(test_name, chart_config, entries)
     elif chart_type=="testcase_table":
         chart_list = make_testcase_table(test_name, chart_config, entries)
     elif chart_type=="testset_summary_table":
-- 
2.7.4





More information about the Fuego mailing list