[Fuego] [PATCH 3/3] cyclictest: modify parsing and test measures

Daniel Sangorrin daniel.sangorrin at toshiba.co.jp
Tue Oct 31 08:31:55 UTC 2017


cyclictest can have more or less result lines depending on
how many threads are executed.

PROBLEM: Fuego currently does not support arrays of arbitrary
size that can be populated dynamically. The reason
is because we splitted reference.json from parser.py which
could have easily solved that.

For now, I have decided to calculate the global minimum,
average and maximum latency variables for all threads (1
thread per core in the new default spec).

TODO: I tried to write chart_config.json to only display the
maximum values but it didn't work as expected (all measures
are shown).

Signed-off-by: Daniel Sangorrin <daniel.sangorrin at toshiba.co.jp>
---
 .../tests/Benchmark.cyclictest/chart_config.json   |  4 ++-
 engine/tests/Benchmark.cyclictest/criteria.json    | 26 ++++++++++++++++
 engine/tests/Benchmark.cyclictest/parser.py        | 35 ++++++++++------------
 engine/tests/Benchmark.cyclictest/reference.json   | 26 ++++++++++++++++
 engine/tests/Benchmark.cyclictest/reference.log    | 17 -----------
 5 files changed, 71 insertions(+), 37 deletions(-)
 create mode 100644 engine/tests/Benchmark.cyclictest/criteria.json
 create mode 100644 engine/tests/Benchmark.cyclictest/reference.json
 delete mode 100644 engine/tests/Benchmark.cyclictest/reference.log

diff --git a/engine/tests/Benchmark.cyclictest/chart_config.json b/engine/tests/Benchmark.cyclictest/chart_config.json
index 101d5ac..31b0d37 100644
--- a/engine/tests/Benchmark.cyclictest/chart_config.json
+++ b/engine/tests/Benchmark.cyclictest/chart_config.json
@@ -1,3 +1,5 @@
 {
-        "cyclictest":["Thread0", "Thread1"]
+	"chart_type": "measure_plot",
+	"measures": ["latencies.max_latency"],
+	"test_sets": ["default"]
 }
diff --git a/engine/tests/Benchmark.cyclictest/criteria.json b/engine/tests/Benchmark.cyclictest/criteria.json
new file mode 100644
index 0000000..f137d55
--- /dev/null
+++ b/engine/tests/Benchmark.cyclictest/criteria.json
@@ -0,0 +1,26 @@
+{
+    "schema_version":"1.0",
+    "criteria":[
+        {
+            "tguid":"default.latencies.max_latency",
+            "reference":{
+                "value":1000,
+                "operator":"le"
+            }
+        },
+        {
+            "tguid":"default.latencies.min_latency",
+            "reference":{
+                "value":1000,
+                "operator":"le"
+            }
+        },
+        {
+            "tguid":"default.latencies.avg_latency",
+            "reference":{
+                "value":1000,
+                "operator":"le"
+            }
+        }
+    ]
+}
diff --git a/engine/tests/Benchmark.cyclictest/parser.py b/engine/tests/Benchmark.cyclictest/parser.py
index 44425ea..3f4f05d 100755
--- a/engine/tests/Benchmark.cyclictest/parser.py
+++ b/engine/tests/Benchmark.cyclictest/parser.py
@@ -1,26 +1,23 @@
 #!/usr/bin/python
-# See common.py for description of command-line arguments
-
 import os, re, sys
 sys.path.insert(0, os.environ['FUEGO_CORE'] + '/engine/scripts/parser')
 import common as plib
 
-ref_section_pat = "^\[[\w\d_ .]+.[gle]{2}\]"
-cur_search_pat = re.compile("^T:([\s\d]+)(.*)P:(.*)C:(.*)Min:([\s\d]+)Act:([\s\d]+)Avg:([\s\d]+)Max:([\s\d]+)",re.MULTILINE)
-
-res_dict = {}
-cur_dict = {}
-
-pat_result = plib.parse(cur_search_pat)
-if pat_result:
-	cur_dict["Thread0.Min"] = '%d' % int(pat_result[0][4])
-	cur_dict["Thread0.Act"] = '%d' % int(pat_result[0][5])
-	cur_dict["Thread0.Avg"] = '%d' % int(pat_result[0][6])
-	cur_dict["Thread0.Max"] = '%d' % int(pat_result[0][7])
-	cur_dict["Thread1.Min"] = '%d' % int(pat_result[1][4])
-	cur_dict["Thread1.Act"] = '%d' % int(pat_result[1][5])
-	cur_dict["Thread1.Avg"] = '%d' % int(pat_result[1][6])
-	cur_dict["Thread1.Max"] = '%d' % int(pat_result[1][7])
+regex_string = "^T:.*Min:\s+(\d+).*Avg:\s+(\d+) Max:\s+(\d+)"
+measurements = {}
+matches = plib.parse_log(regex_string)
 
+if matches:
+	min_latencies = []
+	avg_latencies = []
+	max_latencies = []
+	for thread in matches:
+		min_latencies.append(float(thread[0]))
+		avg_latencies.append(float(thread[1]))
+		max_latencies.append(float(thread[2]))
+	measurements['default.latencies'] = [
+		{"name": "max_latency", "measure" : max(max_latencies)},
+		{"name": "min_latency", "measure" : min(min_latencies)},
+		{"name": "avg_latency", "measure" : sum(avg_latencies)/len(avg_latencies)}]
 
-sys.exit(plib.process_data(ref_section_pat, cur_dict, 'm', 'usec'))
+sys.exit(plib.process(measurements))
diff --git a/engine/tests/Benchmark.cyclictest/reference.json b/engine/tests/Benchmark.cyclictest/reference.json
new file mode 100644
index 0000000..415a8dd
--- /dev/null
+++ b/engine/tests/Benchmark.cyclictest/reference.json
@@ -0,0 +1,26 @@
+{
+    "test_sets":[
+        {
+            "name":"default",
+            "test_cases":[
+                {
+                    "name":"latencies",
+                    "measurements":[
+                        {
+                            "name":"max_latency",
+                            "unit":"us"
+                        },
+                        {
+                            "name":"min_latency",
+                            "unit":"us"
+                        },
+                        {
+                            "name":"avg_latency",
+                            "unit":"us"
+                        }
+                    ]
+                }
+            ]
+        }
+    ]
+}
diff --git a/engine/tests/Benchmark.cyclictest/reference.log b/engine/tests/Benchmark.cyclictest/reference.log
deleted file mode 100644
index 83a959d..0000000
--- a/engine/tests/Benchmark.cyclictest/reference.log
+++ /dev/null
@@ -1,17 +0,0 @@
-#sdfdsf
-[Thread0.Min|le]
-10000000
-[Thread0.Act|le]
-10000000
-[Thread0.Avg|le]
-10000000
-[Thread0.Max|le]
-10000000
-[Thread1.Min|le]
-10000000
-[Thread1.Act|le]
-10000000
-[Thread1.Avg|le]
-10000000
-[Thread1.Max|le]
-10000000
-- 
2.7.4




More information about the Fuego mailing list