Browse Source

Some improvements

master
Dirk Alders 3 years ago
parent
commit
5e0b7e3e96
3 changed files with 22 additions and 24 deletions
  1. 6
    3
      module_status.py
  2. 15
    20
      run.py
  3. 1
    1
      scripts/Makefile

+ 6
- 3
module_status.py View File

4
 
4
 
5
 import os
5
 import os
6
 import json
6
 import json
7
-import fstools
8
-import re
9
 import subprocess
7
 import subprocess
10
 
8
 
9
+from unittest.run import module_uid
10
+from unittest.run import UNITTEST_KEY_TESTOBJECT_INFO
11
+
11
 
12
 
12
 class termcolors:
13
 class termcolors:
13
     HEADER = '\033[95m'
14
     HEADER = '\033[95m'
92
         except IOError:
93
         except IOError:
93
             return STATUS_UNKNOWN
94
             return STATUS_UNKNOWN
94
         else:
95
         else:
95
-            if ut_ut['testobject_information'] != ut_lib['testobject_information'] or ut_ut['unittest_information'] != ut_lib['unittest_information']:
96
+            tested_version = ut_lib.get(UNITTEST_KEY_TESTOBJECT_INFO, {}).get('Version')
97
+            current_version = module_uid(os.path.join(module_folder, 'pylibs', os.path.basename(module_folder)))
98
+            if ut_ut['testobject_information'] != ut_lib['testobject_information'] or ut_ut['unittest_information'] != ut_lib['unittest_information'] or tested_version != current_version:
96
                 return STATUS_OLD
99
                 return STATUS_OLD
97
             else:
100
             else:
98
                 ut_status = ut_lib.get('testobject_information', {}).get('State', 'unknown')
101
                 ut_status = ut_lib.get('testobject_information', {}).get('State', 'unknown')

+ 15
- 20
run.py View File

333
     #
333
     #
334
     print_action("Adding Lost Requirement Soul")
334
     print_action("Adding Lost Requirement Soul")
335
     data_collection['lost_souls']['item_list'] = []
335
     data_collection['lost_souls']['item_list'] = []
336
-    for req_id in data_collection['specification'].get('item_dict', {}):
337
-        item = data_collection['specification']['item_dict'][req_id]
336
+    for req_id in data_collection[UNITTEST_KEY_SPECIFICATION].get('item_dict', {}):
337
+        item = data_collection[UNITTEST_KEY_SPECIFICATION]['item_dict'][req_id]
338
         if item['system_type_uid'] == '_MR7eNHYYEem_kd-7nxt1sg':
338
         if item['system_type_uid'] == '_MR7eNHYYEem_kd-7nxt1sg':
339
             testcase_available = False
339
             testcase_available = False
340
-            for testrun in data_collection['testrun_list']:
340
+            for testrun in data_collection[UNITTEST_KEY_TESTRUNS]:
341
                 if req_id in testrun['testcases']:
341
                 if req_id in testrun['testcases']:
342
                     testcase_available = True
342
                     testcase_available = True
343
                     break
343
                     break
347
     #
347
     #
348
     print_action("Adding Lost Testcase Soul")
348
     print_action("Adding Lost Testcase Soul")
349
     data_collection['lost_souls']['testcase_list'] = []
349
     data_collection['lost_souls']['testcase_list'] = []
350
-    for testrun in data_collection['testrun_list']:
350
+    for testrun in data_collection[UNITTEST_KEY_TESTRUNS]:
351
         for tc_id in testrun.get('testcases', {}):
351
         for tc_id in testrun.get('testcases', {}):
352
-            if tc_id not in data_collection['specification'].get('item_dict', {}) and tc_id not in data_collection['lost_souls']['testcase_list']:
352
+            if tc_id not in data_collection[UNITTEST_KEY_SPECIFICATION].get('item_dict', {}) and tc_id not in data_collection['lost_souls']['testcase_list']:
353
                 data_collection['lost_souls']['testcase_list'].append(tc_id)
353
                 data_collection['lost_souls']['testcase_list'].append(tc_id)
354
                 print_info('"%s" has no corresponding testcase' % tc_id, FAIL)
354
                 print_info('"%s" has no corresponding testcase' % tc_id, FAIL)
355
     #
355
     #
398
 
398
 
399
 
399
 
400
 def unittest_status(unittest_folder):
400
 def unittest_status(unittest_folder):
401
-    config = imp.load_source('', os.path.join(unittest_folder, 'src', 'config.py'))
402
-    #
403
     print_header('Checking status of all submodules')
401
     print_header('Checking status of all submodules')
404
     print_action('Updating all submodules (fetch)')
402
     print_action('Updating all submodules (fetch)')
405
     process = subprocess.Popen("LANGUAGE='en_US.UTF-8 git' git submodule foreach git fetch", cwd=os.path.dirname(unittest_folder), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
403
     process = subprocess.Popen("LANGUAGE='en_US.UTF-8 git' git submodule foreach git fetch", cwd=os.path.dirname(unittest_folder), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
427
                 data[m] += line
425
                 data[m] += line
428
         for key in data:
426
         for key in data:
429
             if "working tree clean" not in data[key] and "working directory clean" not in data[key]:
427
             if "working tree clean" not in data[key] and "working directory clean" not in data[key]:
430
-                data[key] = ("local changes", WARNING)
428
+                data[key] = ("local changes", FAIL)
431
             elif "Your branch is behind" in data[key]:
429
             elif "Your branch is behind" in data[key]:
432
                 data[key] = ("no up to date (try git pull)", FAIL)
430
                 data[key] = ("no up to date (try git pull)", FAIL)
433
             elif "HEAD detached at" in data[key]:
431
             elif "HEAD detached at" in data[key]:
447
     with open(unittest_filename(unittest_folder, FILES['data-collection']), 'r') as fh:
445
     with open(unittest_filename(unittest_folder, FILES['data-collection']), 'r') as fh:
448
         data_collection = json.loads(fh.read())
446
         data_collection = json.loads(fh.read())
449
     print_action('Checking release state of this testrun... ')
447
     print_action('Checking release state of this testrun... ')
450
-    if data_collection['testobject_information']['State'] != 'Released':
448
+    if data_collection[UNITTEST_KEY_TESTOBJECT_INFO]['State'] != 'Released':
451
         print_info("FAILED", FAIL)
449
         print_info("FAILED", FAIL)
452
     else:
450
     else:
453
         print_info("SUCCESS", OKGREEN)
451
         print_info("SUCCESS", OKGREEN)
454
     #
452
     #
455
-    print_action('Checking up to dateness of testrults in library...')
456
-    try:
457
-        with open(os.path.join(unittest_folder, '..', 'pylibs', config.lib.__name__, '_testresults_', FILES['data-collection']), 'r') as fh:
458
-            lib_result = json.loads(fh.read())
459
-    except FileNotFoundError:
460
-        print_info("FAILED: Testresults not in library", FAIL)
461
-    else:
462
-        if data_collection['testobject_information'] != lib_result['testobject_information'] or data_collection['unittest_information'] != lib_result['unittest_information']:
463
-            print_info("FAILED", FAIL)
464
-        else:
465
-            print_info("SUCCESS", OKGREEN)
453
+    from unittest.module_status import module_unittest_status
454
+    print_action('Checking status of testrults in library...')
455
+    st = module_unittest_status(os.path.abspath(os.path.join(unittest_folder, '..')))
456
+    stc = {
457
+        'RELEASED': OKGREEN,
458
+        'IN_WORK': OKBLUE,
459
+    }.get(st, FAIL)
460
+    print_info(st, stc)

+ 1
- 1
scripts/Makefile View File

27
 	@echo "    - testrun_smoke: Run some testcases"
27
 	@echo "    - testrun_smoke: Run some testcases"
28
 	@echo "    - testrun_single: Run one testcases"
28
 	@echo "    - testrun_single: Run one testcases"
29
 
29
 
30
-release: release_testcases full publish
30
+release: clean prepare testrun_full coverage_analysis finalise compile publish status
31
 full: clean prepare testrun_full coverage_analysis finalise compile status
31
 full: clean prepare testrun_full coverage_analysis finalise compile status
32
 short: clean prepare testrun_short coverage_analysis finalise compile status
32
 short: clean prepare testrun_short coverage_analysis finalise compile status
33
 smoke: clean prepare testrun_smoke coverage_analysis finalise compile status
33
 smoke: clean prepare testrun_smoke coverage_analysis finalise compile status

Loading…
Cancel
Save