[Fuego] [PATCH] rm-jobs: fix buf and update results.json

Daniel Sangorrin daniel.sangorrin at toshiba.co.jp
Mon Aug 7 03:02:10 UTC 2017


Updating results.json required making the update function
independent of the environment variables used by
common.py.

Signed-off-by: Daniel Sangorrin <daniel.sangorrin at toshiba.co.jp>
---
 engine/scripts/ftc                            |  33 ++++---
 engine/scripts/parser/common.py               | 130 +------------------------
 engine/scripts/parser/fuego_parser_results.py | 131 ++++++++++++++++++++++++++
 engine/scripts/parser/fuego_parser_utils.py   |  60 ++++++++++++
 4 files changed, 217 insertions(+), 137 deletions(-)
 create mode 100644 engine/scripts/parser/fuego_parser_results.py
 create mode 100644 engine/scripts/parser/fuego_parser_utils.py

diff --git a/engine/scripts/ftc b/engine/scripts/ftc
index ee4becf..1985591 100755
--- a/engine/scripts/ftc
+++ b/engine/scripts/ftc
@@ -1120,18 +1120,27 @@ def do_rm_jobs(conf, options):
                 for match in matches:
                     server.delete_job(match)
                     if remove_logs:
-                        # remove the per-build_number log folders for the job
-                        path = conf.FUEGO_RW + '/logs/' + '.'.join(match.split('.')[2:]) + '/' + '.'.join(match.split('.')[:2]) + '*'
-                        folders = glob.glob(path)
-                        for folder in folders:
-                            print 'Removing folder ' + folder
-                            shutil.rmtree(folder, ignore_errors=True)
-                        # remove the log folder itself in case it became empty
-                        folders = glob.glob(path)
-                        if not folders:
-                            path = conf.FUEGO_RW + '/logs/' + '.'.join(match.split('.')[2:])
-                            print 'Removing folder ' + path
-                            shutil.rmtree(path, ignore_errors=True)
+                        # remove the per-build_number log directories for the job
+                        TESTDIR = '.'.join(match.split('.')[2:])
+                        test_logdir = conf.FUEGO_RW + '/logs/' + TESTDIR
+                        build_dirs = glob.glob(test_logdir + '/' + '.'.join(match.split('.')[:2]) + '*')
+                        for d in build_dirs:
+                            if not os.path.isdir(d):
+                                continue
+                            print 'Removing folder ' + d
+                            shutil.rmtree(d)
+                        # check if the test log directory became empty
+                        remaining_build_dirs = glob.glob(test_logdir + '/*')
+                        build_dirs = [d for d in remaining_build_dirs if os.path.isdir(d)]
+                        if not build_dirs:
+                            print 'Removing test logdir: ' + test_logdir
+                            shutil.rmtree(test_logdir, ignore_errors=True)
+                        else:
+                            print 'updating results.json'
+                            sys.path.insert(0, conf.FUEGO_CORE + '/engine/scripts/parser')
+                            from fuego_parser_results import update_results_json
+                            REF_JSON = '%s/engine/tests/%s/reference.json' % (conf.FUEGO_CORE, TESTDIR)
+                            update_results_json(test_logdir, TESTDIR, REF_JSON)
                     count = count + 1
     if not quiet:
         print "Deleted %d jobs." % count
diff --git a/engine/scripts/parser/common.py b/engine/scripts/parser/common.py
index 3be5d32..738a128 100644
--- a/engine/scripts/parser/common.py
+++ b/engine/scripts/parser/common.py
@@ -27,7 +27,7 @@ By Daniel Sangorrin (July 2017)
 """
 
 import sys, os, re, json, time, collections
-from filelock import FileLock
+from fuego_parser_utils import hls, split_test_id, get_test_case
 
 debug=0
 
@@ -76,23 +76,11 @@ except:
 REF_JSON  = '%s/engine/tests/%s/reference.json' % (FUEGO_CORE, TESTDIR)
 TEST_LOG = '%s/logs/%s/%s.%s.%s.%s/testlog.txt' % (FUEGO_RW, TESTDIR, NODE_NAME, TESTSPEC, BUILD_NUMBER, BUILD_ID)
 RUN_JSON = LOGDIR + '/run.json'
-RESULTS_JSON = LOGDIR + '/../results.json'
 
 #Here are some pre-packaged regex strings
 # m[0] = numer, m(1) = status
 REGEX_TEST_NUMBER_STATUS = '"^TEST-(\d+) (.*)$'
 
-# helper function for printing warnings and errors
-def hls(string,type_char):
-    if type_char == "w":
-        type_string = "WARNING"
-    else:
-        type_string = "ERROR"
-
-    print "########################### " + type_string + " ###############################"
-    print string
-    print "-----"
-
 # used by test's parser.py
 def parse_log(regex_string):
     print "Parsing " + TEST_LOG + " with regex: " + regex_string
@@ -112,26 +100,6 @@ def parse(regex):
     print "matches: " + str(matches)
     return matches
 
-def split_test_id(test_case_id):
-    try:
-        test_set_name, test_case_name = test_case_id.split('.')
-    except:
-        test_set_name = "default"
-        test_case_name = test_case_id
-
-    return test_set_name, test_case_name
-
-def get_test_case(test_case_id, run_data):
-    test_set_name, test_case_name = split_test_id(test_case_id)
-
-    for test_set in run_data['test_sets']:
-        if test_set['name'] == test_set_name:
-            for test_case in test_set['test_cases']:
-                if test_case['name'] == test_case_name:
-                    return test_case
-    hls("Unable to get test case " + test_case_id, "e")
-    return None
-
 def add_results(results, run_data):
     dprint("in add_results")
     if not results:
@@ -512,97 +480,6 @@ def prepare_run_data(results):
 
     return run_data
 
-def extract_test_case_ids(ref):
-    test_case_ids = []
-    for test_set in ref['test_sets']:
-        test_set_id = test_set['name']
-        for test_case in test_set['test_cases']:
-            test_case_id = test_set_id + '.' + test_case['name']
-            test_case_ids.append(test_case_id)
-    return test_case_ids
-
-def update_results_json():
-    dprint("in update_results_json")
-    testdir = LOGDIR + '/../'
-    results_data = { "test_name" : TESTDIR }
-
-    try:
-        with open(REF_JSON) as ref_file:
-            ref = json.load(ref_file, object_pairs_hook=collections.OrderedDict)
-        ref_test_case_ids = extract_test_case_ids(ref)
-    except:
-        print "reference.json not available"
-        ref_test_case_ids = None
-
-    # look for run.json files on each build folder
-    for folder in sorted(os.listdir(testdir)):
-        path = testdir + folder
-        if not os.path.isdir(path):
-            continue
-        run_file = path + '/run.json'
-        try:
-            with open(run_file, 'r') as f:
-                run_data = json.load(f)
-        except:
-            hls('Unable to open ' + run_file, 'w')
-            continue
-
-        try:
-            if not ref_test_case_ids:
-                test_case_ids = extract_test_case_ids(run_data)
-                print "extracted: " + str(test_case_ids)
-            else:
-                test_case_ids = ref_test_case_ids
-
-            for test_case_id in test_case_ids:
-                test_set_name, test_case_name = split_test_id(test_case_id)
-                test_case = get_test_case(test_case_id, run_data)
-
-                key = '%s-%s-%s-%s-%s' % (run_data['metadata']['board'],
-                                        run_data['metadata']['test_spec'],
-                                        run_data['metadata']['kernel_version'],
-                                        test_set_name,
-                                        test_case_name)
-
-                if key not in results_data:
-                    results_data[key] = {
-                        "test_plan": run_data['metadata']['test_plan'],
-                        "test_spec": run_data['metadata']['test_spec'],
-                        'board' : run_data['metadata']['board'],
-                        'kernel_version' : run_data['metadata']['kernel_version'],
-                        'toolchain' : run_data['metadata']['toolchain'],
-                        'test_set' : test_set_name,
-                        'test_case' : test_case_name
-                    }
-                    results_data[key]['status'] = []
-                    results_data[key]['measurements'] = {}
-                    results_data[key]['build_number'] =[]
-                    results_data[key]['start_time'] = []
-                    results_data[key]['duration_ms'] = []
-
-                results_data[key]['status'].append(test_case['status'])
-                results_data[key]['build_number'].append(run_data['metadata']['build_number'])
-                results_data[key]['start_time'].append(run_data['metadata']['start_time'])
-                results_data[key]['duration_ms'].append(run_data['duration_ms'])
-
-                measurements = test_case.get('measurements', [])
-                for measure in measurements:
-                    value = measure.get('measure', 0)
-                    status = measure.get('status', "SKIP")
-                    if measure['name'] not in results_data[key]['measurements']:
-                        results_data[key]['measurements'][measure['name']] = {'measure': [value], 'status': [status]}
-                    else:
-                        results_data[key]['measurements'][measure['name']]['measure'].append(value)
-                        results_data[key]['measurements'][measure['name']]['status'].append(status)
-        except:
-            hls('Unable to parse ' + run_file, 'w')
-            continue
-
-    print "Writing merged results to ", RESULTS_JSON
-    with FileLock(RESULTS_JSON + '.lock'):
-        with open(RESULTS_JSON, 'w') as f:
-            f.write(json.dumps(results_data, sort_keys=True, indent=4, separators=(',', ': ')))
-
 def delete(data, key):
     if key in data:
         del(data[key])
@@ -623,10 +500,13 @@ def process(results={}):
         e.g.: results['test_set1.test_casea'] = "PASS"
         e.g.: results['Sequential_Output.Block'] = [{'name':'speed', 'measure':123}, {'name':'cpu', 'measure':78}]
     """
+    from fuego_parser_results import update_results_json
+
     print "parsed results: " + str(results)
     run_data = prepare_run_data(results)
     save_run_json(run_data)
-    update_results_json()
+    test_logdir = FUEGO_RW + '/logs/' + TESTDIR
+    update_results_json(test_logdir, TESTDIR, REF_JSON)
     status = run_data.get('status', 'FAIL')
     return 0 if status == 'PASS' else 1
 
diff --git a/engine/scripts/parser/fuego_parser_results.py b/engine/scripts/parser/fuego_parser_results.py
new file mode 100644
index 0000000..4e64a06
--- /dev/null
+++ b/engine/scripts/parser/fuego_parser_results.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Toshiba corp.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+"""
+results.py - This library is used to merge run.json files into a single
+             results.json file
+By Daniel Sangorrin (August 2017)
+"""
+
+import os, json, collections
+from filelock import FileLock
+from fuego_parser_utils import split_test_id, get_test_case
+
+debug = 0
+
+def dprint(msg):
+    if debug:
+        print "DEBUG:", msg
+
+def extract_test_case_ids(ref):
+    test_case_ids = []
+    for test_set in ref['test_sets']:
+        test_set_id = test_set['name']
+        for test_case in test_set['test_cases']:
+            test_case_id = test_set_id + '.' + test_case['name']
+            test_case_ids.append(test_case_id)
+    return test_case_ids
+
+def update_results_json(test_logdir, TESTDIR, REF_JSON):
+    dprint("In update_results_json")
+    results_data = { "test_name" : TESTDIR }
+
+    try:
+        with open(REF_JSON) as ref_file:
+            ref = json.load(ref_file, object_pairs_hook=collections.OrderedDict)
+        ref_test_case_ids = extract_test_case_ids(ref)
+    except:
+        print "reference.json not available"
+        ref_test_case_ids = None
+
+    # look for run.json files on each build folder
+    for folder in sorted(os.listdir(test_logdir)):
+        path = test_logdir + '/' + folder
+        if not os.path.isdir(path):
+            continue
+        run_file = path + '/run.json'
+        try:
+            with open(run_file, 'r') as f:
+                run_data = json.load(f)
+        except:
+            print('WARNING: Unable to open ' + run_file)
+            continue
+
+        try:
+            if not ref_test_case_ids:
+                test_case_ids = extract_test_case_ids(run_data)
+                dprint("Test cases extracted from run data: " + str(test_case_ids))
+            else:
+                test_case_ids = ref_test_case_ids
+
+            for test_case_id in test_case_ids:
+                dprint("Processing test_case_ide: " + str(test_case_id))
+                test_set_name, test_case_name = split_test_id(test_case_id)
+                test_case = get_test_case(test_case_id, run_data)
+
+                key = '%s-%s-%s-%s-%s' % (run_data['metadata']['board'],
+                                        run_data['metadata']['test_spec'],
+                                        run_data['metadata']['kernel_version'],
+                                        test_set_name,
+                                        test_case_name)
+
+                dprint("Key: " + key)
+                if key not in results_data:
+                    results_data[key] = {
+                        "test_plan": run_data['metadata']['test_plan'],
+                        "test_spec": run_data['metadata']['test_spec'],
+                        'board' : run_data['metadata']['board'],
+                        'kernel_version' : run_data['metadata']['kernel_version'],
+                        'toolchain' : run_data['metadata']['toolchain'],
+                        'test_set' : test_set_name,
+                        'test_case' : test_case_name
+                    }
+                    results_data[key]['status'] = []
+                    results_data[key]['measurements'] = {}
+                    results_data[key]['build_number'] =[]
+                    results_data[key]['start_time'] = []
+                    results_data[key]['duration_ms'] = []
+
+                results_data[key]['status'].append(test_case['status'])
+                results_data[key]['build_number'].append(run_data['metadata']['build_number'])
+                results_data[key]['start_time'].append(run_data['metadata']['start_time'])
+                results_data[key]['duration_ms'].append(run_data['duration_ms'])
+
+                measurements = test_case.get('measurements', [])
+                for measure in measurements:
+                    value = measure.get('measure', 0)
+                    status = measure.get('status', "SKIP")
+                    if measure['name'] not in results_data[key]['measurements']:
+                        results_data[key]['measurements'][measure['name']] = {'measure': [value], 'status': [status]}
+                    else:
+                        results_data[key]['measurements'][measure['name']]['measure'].append(value)
+                        results_data[key]['measurements'][measure['name']]['status'].append(status)
+        except:
+            print('WARNING: Unable to parse ' + run_file)
+            continue
+
+    RESULTS_JSON = test_logdir + '/results.json'
+    print "Writing merged results to ", RESULTS_JSON
+    with FileLock(RESULTS_JSON + '.lock'):
+        with open(RESULTS_JSON, 'w') as f:
+            f.write(json.dumps(results_data, sort_keys=True, indent=4, separators=(',', ': ')))
diff --git a/engine/scripts/parser/fuego_parser_utils.py b/engine/scripts/parser/fuego_parser_utils.py
new file mode 100644
index 0000000..bcddb7b
--- /dev/null
+++ b/engine/scripts/parser/fuego_parser_utils.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Toshiba corp.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+"""
+fuego_parser_utils.py - This library is contains common independent functions
+By Daniel Sangorrin (August 2017)
+"""
+
+# helper function for printing warnings and errors
+def hls(string,type_char):
+    if type_char == "w":
+        type_string = "WARNING"
+    else:
+        type_string = "ERROR"
+
+    print "########################### " + type_string + " ###############################"
+    print string
+    print "-----"
+
+def split_test_id(test_case_id):
+    try:
+        test_set_name, test_case_name = test_case_id.split('.')
+    except:
+        test_set_name = "default"
+        test_case_name = test_case_id
+
+    return test_set_name, test_case_name
+
+def get_test_case(test_case_id, run_data):
+    test_set_name, test_case_name = split_test_id(test_case_id)
+
+    for test_set in run_data['test_sets']:
+        if test_set['name'] == test_set_name:
+            for test_case in test_set['test_cases']:
+                if test_case['name'] == test_case_name:
+                    return test_case
+
+    hls("Unable to get test case " + test_case_id, "e")
+    return None
+
-- 
2.7.4




More information about the Fuego mailing list