Unittest improvements (output)

This commit is contained in:
Dirk Alders 2021-03-04 18:36:23 +01:00
parent 5e0b7e3e96
commit ecec65be06
6 changed files with 578 additions and 359 deletions

297
jsonlog.py Normal file
View File

@ -0,0 +1,297 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import json
import os
import subprocess
import fstools
from unittest.output import STATUS_AVAILABLE, STATUS_CHANGED, STATUS_CLEAN, STATUS_EXISTS, STATUS_IN_WORK, STATUS_MISSING, STATUS_OLD, STATUS_RELEASED, STATUS_UNKNOWN
JSONLOG_FNAME = 'unittest.json'
#
# MAIN KEYS
#
MAIN_KEY_COVERAGE_INFO = 'coverage_information'
MAIN_KEY_LOST_SOULS = "lost_souls"
MAIN_KEY_SPECIFICATION = 'specification'
MAIN_KEY_SYSTEM_INFO = 'system_information'
MAIN_KEY_TESTOBJECT_INFO = 'testobject_information'
MAIN_KEY_TESTRUNS = 'testrun_list'
MAIN_KEY_UNITTEST_INFO = 'unittest_information'
#
# SUBKEYS FOR MAIN_KEY_COVERAGE_INFO
#
COVI_KEY_NAME = 'name'
COVI_KEY_FILEPATH = 'filepath'
COVI_KEY_LINE_COVERAGE = 'line_coverage'
COVI_KEY_BRANCH_COVERAGE = 'branch_coverage'
COVI_KEY_FILES = 'files'
#
# SUBKEYS FOR MAIN_KEY_LOST_SOULS
#
LOST_ITEMLIST = "item_list"
LOST_TESTCASELIST = "testcase_list"
#
# SUBKEYS FOR MAIN_KEY_SPECIFICATION
#
SPEC_ITEM_DICT = 'item_dict'
#
# SUBKEYS FOR MAIN_KEY_SYSTEM_INFO
#
SYSI_ARCHITECTURE = "Architecture"
SYSI_DISTRIBUTION = "Distribution"
SYSI_HOSTNAME = "Hostname"
SYSI_KERNEL = "Kernel"
SYSI_MACHINE = "Machine"
SYSI_PATH = "Path"
SYSI_SYSTEM = "System"
SYSI_USERNAME = "Username"
#
# SUBKEYS FOR MAIN_KEY_TESTOBJECT_INFO
#
TOBI_DEPENDENCIES = "Dependencies"
TOBI_DESCRIPTION = "Description"
TOBI_NAME = "Name"
TOBI_STATE = "State"
TOBI_STATE_RELESED = 'Released'
TOBI_STATE_IN_DEVELOPMENT = 'In development'
TOBI_SUPP_INTERP = "Supported Interpreters"
TOBI_VERSION = "Version"
#
# SUBKEYS FOR MAIN_KEY_TESTRUNS
#
TRUN_TESTCASES = 'testcases'
#
# SUBKEYS FOR MAIN_KEY_UNITTEST_INFO
#
UTEI_VERSION = TOBI_VERSION
def get_lib_folder(ut_folder):
return os.path.join(ut_folder, 'pylibs', os.path.basename(ut_folder))
def get_ut_config(ut_folder):
return os.path.join(get_ut_src_folder(ut_folder), 'config.py')
def get_ut_testcase_folder(ut_folder):
return os.path.join(get_ut_src_folder(ut_folder), 'tests')
def get_ut_testresult_folder(ut_folder):
return os.path.join(get_ut_subfolder(ut_folder), 'testresults')
def get_lib_testresult_folder(ut_folder):
return os.path.join(get_lib_folder(ut_folder), '_testresults_')
def get_lib_jsonlog(ut_folder):
return os.path.join(get_lib_testresult_folder(ut_folder), JSONLOG_FNAME)
def get_ut_jsonlog(ut_folder):
return os.path.join(get_ut_testresult_folder(ut_folder), JSONLOG_FNAME)
def get_ut_src_folder(ut_folder):
return os.path.join(get_ut_subfolder(ut_folder), 'src')
def get_ut_subfolder(ut_folder):
return os.path.join(ut_folder, 'unittest')
def module_uid(path):
return fstools.uid_filelist(path, '*.py', rekursive=True)
def __get_release_state__(ut_folder, lib):
if lib:
fn = get_lib_jsonlog(ut_folder)
else:
fn = get_ut_jsonlog(ut_folder)
try:
with open(fn, 'r') as fh:
ut_data = json.loads(fh.read())
except IOError:
return STATUS_MISSING
else:
ut_status = ut_data.get(MAIN_KEY_TESTOBJECT_INFO, {}).get(TOBI_STATE, 'unknown')
if 'released' in ut_status.lower():
return STATUS_RELEASED
elif 'work' in ut_status.lower():
return STATUS_IN_WORK
else:
return STATUS_UNKNOWN
def get_lib_release_state(ut_folder):
return __get_release_state__(ut_folder, True)
def get_ut_release_state(ut_folder):
return __get_release_state__(ut_folder, False)
def __get_testcase_integrity__(ut_folder, lib):
if lib:
fn = get_lib_jsonlog(ut_folder)
else:
fn = get_ut_jsonlog(ut_folder)
try:
with open(fn, 'r') as fh:
ut_data = json.loads(fh.read())
except IOError:
return STATUS_MISSING
else:
tc_version = ut_data.get(MAIN_KEY_UNITTEST_INFO, {}).get(UTEI_VERSION)
current_version = module_uid(get_ut_testcase_folder(ut_folder))
if tc_version == current_version:
return STATUS_CLEAN
else:
return STATUS_CHANGED
def get_lib_testcase_integrity(ut_folder):
return __get_testcase_integrity__(ut_folder, True)
def get_ut_testcase_integrity(ut_folder):
return __get_testcase_integrity__(ut_folder, False)
def __get_src_integrity__(ut_folder, lib):
if lib:
fn = get_lib_jsonlog(ut_folder)
else:
fn = get_ut_jsonlog(ut_folder)
try:
with open(fn, 'r') as fh:
ut_data = json.loads(fh.read())
except IOError:
return STATUS_MISSING
else:
tested_version = ut_data.get(MAIN_KEY_TESTOBJECT_INFO, {}).get(TOBI_VERSION)
current_version = module_uid(get_lib_folder(ut_folder))
if tested_version == current_version:
return STATUS_CLEAN
else:
return STATUS_CHANGED
def get_lib_src_integrity(ut_folder):
return __get_src_integrity__(ut_folder, True)
def get_ut_src_integrity(ut_folder):
return __get_src_integrity__(ut_folder, False)
def status_module(ut_folder):
try:
with open(get_lib_jsonlog(ut_folder), 'r') as fh:
ut_lib = json.loads(fh.read())
except IOError:
return STATUS_MISSING
else:
try:
with open(get_ut_jsonlog(ut_folder), 'r') as fh:
ut_ut = json.loads(fh.read())
except IOError:
return STATUS_UNKNOWN
else:
tested_version = ut_lib.get(MAIN_KEY_TESTOBJECT_INFO, {}).get(TOBI_VERSION)
current_version = module_uid(get_lib_folder(ut_folder))
if ut_ut[MAIN_KEY_TESTOBJECT_INFO] != ut_lib[MAIN_KEY_TESTOBJECT_INFO] or ut_ut[MAIN_KEY_UNITTEST_INFO] != ut_lib[MAIN_KEY_UNITTEST_INFO] or tested_version != current_version:
return STATUS_OLD
else:
ut_status = ut_lib.get(MAIN_KEY_TESTOBJECT_INFO, {}).get(TOBI_STATE, 'unknown')
if 'released' in ut_status.lower():
return STATUS_RELEASED
elif 'work' in ut_status.lower():
return STATUS_IN_WORK
else:
return STATUS_UNKNOWN
def versions_module(ut_folder):
try:
with open(get_ut_jsonlog(ut_folder), 'r') as fh:
ut = json.loads(fh.read())
except IOError:
return STATUS_UNKNOWN
else:
interpreters = ut.get(MAIN_KEY_TESTOBJECT_INFO, {}).get(TOBI_SUPP_INTERP, '')
interpreters = interpreters.split(',')
for i in range(len(interpreters)):
interpreters[i] = interpreters[i].strip()
interpreters[i] = interpreters[i][6:]
return ', '.join(interpreters)
def __get_coverage__(ut_folder, lib):
if lib:
fn = get_lib_jsonlog(ut_folder)
else:
fn = get_ut_jsonlog(ut_folder)
try:
with open(fn, 'r') as fh:
ut = json.loads(fh.read())
except IOError:
return None, None
else:
lcov = ut.get(MAIN_KEY_COVERAGE_INFO, [{}])[0].get(COVI_KEY_LINE_COVERAGE)
bcov = ut.get(MAIN_KEY_COVERAGE_INFO, [{}])[0].get(COVI_KEY_BRANCH_COVERAGE)
return lcov, bcov
def lib_coverage(ut_folder):
return __get_coverage__(ut_folder, True)
def ut_coverage(ut_folder):
return __get_coverage__(ut_folder, False)
def status_git(ut_folder):
p = subprocess.Popen("git -C %s status 2> /dev/null" % ut_folder, stdout=subprocess.PIPE, shell=True)
output = p.communicate()[0]
p_status = p.wait()
if p_status == 0:
if b"nichts zu committen" in output and b"um lokale Commits zu publizieren" not in output:
return STATUS_CLEAN
else:
return STATUS_CHANGED
else:
return STATUS_UNKNOWN
def status_doc(ut_folder):
if os.path.exists(os.path.join(get_lib_folder(ut_folder), '_docs_', 'index.html')):
return STATUS_AVAILABLE
else:
if os.path.exists(os.path.join(ut_folder, 'docs', 'index.rst')):
return STATUS_IN_WORK
else:
return STATUS_MISSING
def status_spec(ut_folder):
if os.path.exists(os.path.join(ut_folder, 'requirements', 'specification.reqif')):
try:
with open(get_ut_jsonlog(ut_folder), 'r') as fh:
ut = json.loads(fh.read())
if len(ut[MAIN_KEY_LOST_SOULS][LOST_ITEMLIST]) > 0 or len(ut[MAIN_KEY_LOST_SOULS][LOST_TESTCASELIST]) > 0:
return STATUS_IN_WORK
else:
return STATUS_CLEAN
except IOError:
return STATUS_EXISTS
else:
return STATUS_MISSING

View File

@ -3,52 +3,19 @@
#
import os
import json
import subprocess
from unittest.run import module_uid
from unittest.run import UNITTEST_KEY_TESTOBJECT_INFO
class termcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
STATUS_RELEASED = 'RELEASED'
STATUS_AVAILABLE = 'AVAILABLE'
STATUS_IN_WORK = 'IN_WORK'
STATUS_EXISTS = 'EXISTS'
STATUS_OLD = 'OLD'
STATUS_MISSING = 'MISSING'
#
STATUS_CLEAN = 'CLEAN'
STATUS_CHANGED = 'CHANGED'
#
STATUS_UNKNOWN = 'UNKNOWN'
from unittest.output import termcolors, coverage_output
from unittest.output import STATUS_COLORS, STATUS_UNKNOWN
from unittest.jsonlog import lib_coverage, status_doc, status_git, status_module, status_spec, versions_module
STATUS_LENGTH = 13
STR_STATUS = {
STATUS_RELEASED: termcolors.OKGREEN + (STATUS_LENGTH - len(STATUS_RELEASED)) * ' ' + STATUS_RELEASED + termcolors.ENDC,
STATUS_AVAILABLE: termcolors.OKGREEN + (STATUS_LENGTH - len(STATUS_AVAILABLE)) * ' ' + STATUS_AVAILABLE + termcolors.ENDC,
STATUS_IN_WORK: termcolors.WARNING + (STATUS_LENGTH - len(STATUS_IN_WORK)) * ' ' + STATUS_IN_WORK + termcolors.ENDC,
STATUS_OLD: termcolors.WARNING + (STATUS_LENGTH - len(STATUS_OLD)) * ' ' + STATUS_OLD + termcolors.ENDC,
STATUS_EXISTS: termcolors.WARNING + (STATUS_LENGTH - len(STATUS_EXISTS)) * ' ' + STATUS_EXISTS + termcolors.ENDC,
STATUS_MISSING: termcolors.FAIL + (STATUS_LENGTH - len(STATUS_MISSING)) * ' ' + STATUS_MISSING + termcolors.ENDC,
#
STATUS_CLEAN: termcolors.OKGREEN + (STATUS_LENGTH - len(STATUS_CLEAN)) * ' ' + STATUS_CLEAN + termcolors.ENDC,
STATUS_CHANGED: termcolors.WARNING + (STATUS_LENGTH - len(STATUS_CHANGED)) * ' ' + STATUS_CHANGED + termcolors.ENDC,
#
STATUS_UNKNOWN: termcolors.FAIL + (STATUS_LENGTH - len(STATUS_UNKNOWN)) * ' ' + STATUS_UNKNOWN + termcolors.ENDC,
}
def status_output(status_or_text, default_color=STATUS_COLORS[STATUS_UNKNOWN]):
if status_or_text in STATUS_COLORS:
default_color = STATUS_COLORS[status_or_text]
return default_color + (STATUS_LENGTH - len(status_or_text[:STATUS_LENGTH])) * ' ' + status_or_text[:STATUS_LENGTH] + termcolors.ENDC
def module_status_head():
@ -67,113 +34,14 @@ def module_status_head():
return rv
def module_status_line(module_folder):
def module_status_line(ut_folder):
rv = '%25s%s%s%s%s%s%s\n' % (
os.path.basename(module_folder) + ':',
STR_STATUS.get(module_unittest_status(module_folder), STATUS_UNKNOWN),
STR_STATUS.get(module_doc_status(module_folder), STATUS_UNKNOWN),
module_unittest_versions(module_folder),
module_unittest_coverage(module_folder),
STR_STATUS.get(module_spec_status(module_folder), STATUS_UNKNOWN),
STR_STATUS.get(module_git_status(module_folder), STATUS_UNKNOWN),
os.path.basename(ut_folder) + ':',
status_output(status_module(ut_folder)),
status_output(status_doc(ut_folder)),
status_output(versions_module(ut_folder), termcolors.BOLD),
coverage_output(*lib_coverage(ut_folder), length=STATUS_LENGTH),
status_output(status_spec(ut_folder)),
status_output(status_git(ut_folder)),
)
return rv
def module_unittest_status(module_folder):
try:
with open(os.path.join(module_folder, 'pylibs', os.path.basename(module_folder), '_testresults_', 'unittest.json'), 'r') as fh:
ut_lib = json.loads(fh.read())
except IOError:
return STATUS_MISSING
else:
try:
with open(os.path.join(module_folder, 'unittest', 'testresults', 'unittest.json'), 'r') as fh:
ut_ut = json.loads(fh.read())
except IOError:
return STATUS_UNKNOWN
else:
tested_version = ut_lib.get(UNITTEST_KEY_TESTOBJECT_INFO, {}).get('Version')
current_version = module_uid(os.path.join(module_folder, 'pylibs', os.path.basename(module_folder)))
if ut_ut['testobject_information'] != ut_lib['testobject_information'] or ut_ut['unittest_information'] != ut_lib['unittest_information'] or tested_version != current_version:
return STATUS_OLD
else:
ut_status = ut_lib.get('testobject_information', {}).get('State', 'unknown')
if 'released' in ut_status.lower():
return STATUS_RELEASED
elif 'work' in ut_status.lower():
return STATUS_IN_WORK
else:
return STATUS_UNKNOWN
def module_unittest_versions(module_folder):
try:
with open(os.path.join(module_folder, 'unittest', 'testresults', 'unittest.json'), 'r') as fh:
ut = json.loads(fh.read())
except IOError:
return STR_STATUS[STATUS_UNKNOWN]
else:
interpreters = ut.get('testobject_information', '').get('Supported Interpreters')
interpreters = interpreters.split(',')
for i in range(len(interpreters)):
interpreters[i] = interpreters[i].strip()
interpreters[i] = interpreters[i][6:]
rv = ', '.join(interpreters)
return (STATUS_LENGTH - len(rv)) * ' ' + rv
def module_unittest_coverage(module_folder):
try:
with open(os.path.join(module_folder, 'unittest', 'testresults', 'unittest.json'), 'r') as fh:
ut = json.loads(fh.read())
except IOError:
return STR_STATUS[STATUS_UNKNOWN]
else:
lcov = ut.get('coverage_information', [{}])[0].get('line_coverage')
bcov = ut.get('coverage_information', [{}])[0].get('branch_coverage')
if lcov is None or bcov is None:
return STR_STATUS[STATUS_UNKNOWN]
elif lcov > 90:
rv = termcolors.OKGREEN + '%3d%% (%3d%%)' % (lcov, bcov) + termcolors.ENDC
else:
rv = termcolors.WARNING + '%3d%% (%3d%%)' % (lcov, bcov) + termcolors.ENDC
return (STATUS_LENGTH - 11) * ' ' + rv
def module_git_status(module_folder):
p = subprocess.Popen("git -C %s status 2> /dev/null" % module_folder, stdout=subprocess.PIPE, shell=True)
output = p.communicate()[0]
p_status = p.wait()
if p_status == 0:
if b"nichts zu committen" in output and b"um lokale Commits zu publizieren" not in output:
return STATUS_CLEAN
else:
return STATUS_CHANGED
else:
return STATUS_UNKNOWN
def module_doc_status(module_folder):
if os.path.exists(os.path.join(module_folder, 'pylibs', os.path.basename(module_folder), '_docs_', 'index.html')):
return STATUS_AVAILABLE
else:
if os.path.exists(os.path.join(module_folder, 'docs', 'index.rst')):
return STATUS_IN_WORK
else:
return STATUS_MISSING
def module_spec_status(module_folder):
if os.path.exists(os.path.join(module_folder, 'requirements', 'specification.reqif')):
try:
with open(os.path.join(module_folder, 'unittest', 'testresults', 'unittest.json'), 'r') as fh:
ut = json.loads(fh.read())
if len(ut['lost_souls']['item_list']) > 0 or len(ut['lost_souls']['testcase_list']) > 0:
return STATUS_IN_WORK
else:
return STATUS_CLEAN
except IOError:
return STATUS_EXISTS
else:
return STATUS_MISSING

81
output.py Normal file
View File

@ -0,0 +1,81 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
# STATUS_KEYS
#
STATUS_AVAILABLE = 'AVAILABLE'
STATUS_CLEAN = 'CLEAN'
STATUS_RELEASED = 'RELEASED'
STATUS_SUCCESS = 'SUCCESS'
#
STATUS_CHANGED = 'CHANGED'
STATUS_EXISTS = 'EXISTS'
STATUS_IN_WORK = 'IN_WORK'
STATUS_OLD = 'OLD'
#
STATUS_FAILED = 'FAILED'
STATUS_MISSING = 'MISSING'
STATUS_UNKNOWN = 'UNKNOWN'
class termcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
STATUS_COLORS = {
STATUS_AVAILABLE: termcolors.OKGREEN,
STATUS_CLEAN: termcolors.OKGREEN,
STATUS_RELEASED: termcolors.OKGREEN,
STATUS_SUCCESS: termcolors.OKGREEN,
#
STATUS_CHANGED: termcolors.WARNING,
STATUS_EXISTS: termcolors.WARNING,
STATUS_IN_WORK: termcolors.WARNING,
STATUS_OLD: termcolors.WARNING,
#
STATUS_FAILED: termcolors.FAIL,
STATUS_MISSING: termcolors.FAIL,
STATUS_UNKNOWN: termcolors.FAIL,
}
def print_header(txt):
print(termcolors.BOLD + termcolors.WARNING + txt + termcolors.ENDC)
def print_action(txt):
print(termcolors.BOLD + ' * ' + txt + termcolors.ENDC)
def status_output(txt, default_color):
return STATUS_COLORS.get(txt, default_color) + txt + termcolors.ENDC
def print_info(txt, default_color=termcolors.ENDC):
print(' ' + status_output(txt, default_color))
def coverage_output(lcov, bcov, length=None):
if lcov is None or bcov is None:
return (length - len(STATUS_UNKNOWN)) * ' ' + status_output(STATUS_UNKNOWN, termcolors.FAIL)
elif lcov > 90:
rv = termcolors.OKGREEN + '%3d%% (%3d%%)' % (lcov, bcov) + termcolors.ENDC
else:
rv = termcolors.WARNING + '%3d%% (%3d%%)' % (lcov, bcov) + termcolors.ENDC
if length is None:
return rv
else:
return (length - 11) * ' ' + rv
def print_coverage(lcov, bcov):
print(' ' + coverage_output(lcov, bcov))

372
run.py
View File

@ -2,6 +2,8 @@
# -*- coding: utf-8 -*-
#
import fstools
from unittest import jsonlog
from unittest import output
import report
import reqif
@ -23,14 +25,6 @@ except ImportError:
jinja2 = None
import shutil
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
ARG_CLEAN = 'clean'
ARG_RUN = 'run'
@ -40,28 +34,29 @@ ARG_STATUS = 'status'
ARG_COPY = 'copy'
ARG_RELEASE = 'release'
UNITTEST_KEY_SYSTEM_INFO = 'system_information'
UNITTEST_KEY_UNITTEST_INFO = 'unittest_information'
UNITTEST_KEY_TESTOBJECT_INFO = 'testobject_information'
UNITTEST_KEY_TESTRUNS = 'testrun_list'
UNITTEST_KEY_COVERAGE_INFO = 'coverage_information'
UNITTEST_KEY_SPECIFICATION = 'specification'
FN_DATA_COLLECTION = 'unittest.json'
FN_TEX_REPORT = 'unittest.tex'
FN_PDF_REPORT = 'unittest.pdf'
FN_COVERAGE = 'coverage.xml'
FILES = {
'data-collection': 'unittest.json',
'tex-report': 'unittest.tex',
'coverage-xml': 'coverage.xml'
}
REPORT_FILES = [FILES['data-collection'], FILES['coverage-xml'], 'unittest.pdf']
REPORT_FILES = [FN_DATA_COLLECTION, FN_COVERAGE, FN_PDF_REPORT]
def testresults_filename(ut_folder, filename):
return os.path.join(jsonlog.get_ut_testresult_folder(ut_folder), filename)
def remove_file(filename):
if os.path.exists(filename) and not filename.endswith('.gitkeep'):
try:
output.print_info('Removing %s' % filename)
os.remove(filename)
except OSError:
pass
class coverage_info(list):
KEY_NAME = 'name'
KEY_FILEPATH = 'filepath'
KEY_LINE_COVERAGE = 'line_coverage'
KEY_BRANCH_COVERAGE = 'branch_coverage'
KEY_FILES = 'files'
KEY_FRAGMENTS = 'fragments'
KEY_START_LINE = 'start'
KEY_END_LINE = 'end'
@ -77,23 +72,23 @@ class coverage_info(list):
itemlist = xmldoc.getElementsByTagName('package')
for p in itemlist:
module = {}
module[self.KEY_NAME] = p.attributes['name'].value[len(module_basepath) + 1:]
module[self.KEY_FILEPATH] = p.attributes['name'].value.replace('.', os.path.sep)
module[self.KEY_LINE_COVERAGE] = float(p.attributes['line-rate'].value) * 100.
module[jsonlog.COVI_KEY_NAME] = p.attributes['name'].value[len(module_basepath) + 1:]
module[jsonlog.COVI_KEY_FILEPATH] = p.attributes['name'].value.replace('.', os.path.sep)
module[jsonlog.COVI_KEY_LINE_COVERAGE] = float(p.attributes['line-rate'].value) * 100.
try:
module[self.KEY_BRANCH_COVERAGE] = float(p.attributes['branch-rate'].value) * 100.
module[jsonlog.COVI_KEY_BRANCH_COVERAGE] = float(p.attributes['branch-rate'].value) * 100.
except AttributeError:
module[self.KEY_BRANCH_COVERAGE] = None
module[self.KEY_FILES] = []
module[jsonlog.COVI_KEY_BRANCH_COVERAGE] = None
module[jsonlog.COVI_KEY_FILES] = []
for c in p.getElementsByTagName('class'):
f = {}
f[self.KEY_NAME] = c.attributes['filename'].value[len(module_basepath) + 1:].replace(os.path.sep, '.')
f[self.KEY_FILEPATH] = c.attributes['filename'].value
f[self.KEY_LINE_COVERAGE] = float(c.attributes['line-rate'].value) * 100.
f[jsonlog.COVI_KEY_NAME] = c.attributes['filename'].value[len(module_basepath) + 1:].replace(os.path.sep, '.')
f[jsonlog.COVI_KEY_FILEPATH] = c.attributes['filename'].value
f[jsonlog.COVI_KEY_LINE_COVERAGE] = float(c.attributes['line-rate'].value) * 100.
try:
f[self.KEY_BRANCH_COVERAGE] = float(p.attributes['branch-rate'].value) * 100.
f[jsonlog.COVI_KEY_BRANCH_COVERAGE] = float(p.attributes['branch-rate'].value) * 100.
except Exception:
f[self.KEY_BRANCH_COVERAGE] = None
f[jsonlog.COVI_KEY_BRANCH_COVERAGE] = None
f[self.KEY_FRAGMENTS] = []
last_hit = None
start_line = 1
@ -146,52 +141,23 @@ class coverage_info(list):
line[self.KEY_END_LINE] = None
line[self.KEY_COVERAGE_STATE] = self.CLEAN
f[self.KEY_FRAGMENTS].append(line)
module[self.KEY_FILES].append(f)
module[jsonlog.COVI_KEY_FILES].append(f)
self.append(module)
def __str__(self):
rv = ''
for module in self:
rv += '%s (%.1f%% - %s)\n' % (module.get(self.KEY_NAME), module.get(self.KEY_LINE_COVERAGE), module.get(self.KEY_FILEPATH))
for py_file in module.get(self.KEY_FILES):
rv += ' %s (%.1f%% - %s)\n' % (py_file.get(self.KEY_NAME), py_file.get(self.KEY_LINE_COVERAGE), py_file.get(self.KEY_FILEPATH))
rv += '%s (%.1f%% - %s)\n' % (module.get(jsonlog.COVI_KEY_NAME), module.get(jsonlog.COVI_KEY_LINE_COVERAGE), module.get(jsonlog.COVI_KEY_FILEPATH))
for py_file in module.get(jsonlog.COVI_KEY_FILES):
rv += ' %s (%.1f%% - %s)\n' % (py_file.get(jsonlog.COVI_KEY_NAME), py_file.get(jsonlog.COVI_KEY_LINE_COVERAGE), py_file.get(jsonlog.COVI_KEY_FILEPATH))
for fragment in py_file.get(self.KEY_FRAGMENTS):
if fragment.get(self.KEY_END_LINE) is not None:
rv += ' %d - %d: %s\n' % (fragment.get(self.KEY_START_LINE), fragment.get(self.KEY_END_LINE), repr(fragment.get(self.KEY_COVERAGE_STATE)))
else:
rv += ' %d - : %s\n' % (fragment.get(self.KEY_START_LINE), repr(fragment.get(self.KEY_COVERAGE_STATE)))
rv += ' %d - : %s\n' % (fragment.get(self.KEY_START_LINE), repr(fragment.get(self.COVERAGE_STATE)))
return rv
def unittest_filename(base_folder, filename):
return os.path.join(base_folder, 'testresults', filename)
def print_header(txt, color=BOLD + WARNING):
print(color + txt + ENDC)
def print_action(txt, color=BOLD):
print(color + ' * ' + txt + ENDC)
def print_info(txt, color=ENDC):
print(' ' + color + txt + ENDC)
def remove_file(filename):
if os.path.exists(filename) and not filename.endswith('.gitkeep'):
try:
print_info('Removing %s' % filename)
os.remove(filename)
except OSError:
pass
def module_uid(path):
return fstools.uid_filelist(path, '*.py', rekursive=True)
def unittest(options, args, unittest_folder):
if 'release_testcases' in args:
unittest_release_testcases(unittest_folder)
@ -209,13 +175,13 @@ def unittest(options, args, unittest_folder):
unittest_publish(unittest_folder)
def unittest_release_testcases(unittest_folder):
unittest_uid = module_uid(os.path.join(unittest_folder, 'src', 'tests'))
config_file = os.path.join(unittest_folder, 'src', 'config.py')
print_header('Releasing unittest')
def unittest_release_testcases(ut_folder):
unittest_uid = jsonlog.module_uid(jsonlog.get_ut_testcase_folder(ut_folder))
output.print_header('Releasing unittest')
config_file = jsonlog.get_ut_config(ut_folder)
with open(config_file, 'r') as fh:
conf_file = fh.read()
print_action('Setting release_unittest_version = %s in %s' % (unittest_uid, config_file))
output.print_action('Setting release_unittest_version = %s in %s' % (unittest_uid, config_file))
with open(config_file, 'w') as fh:
for line in conf_file.splitlines():
if line.startswith('release_unittest_version'):
@ -224,85 +190,83 @@ def unittest_release_testcases(unittest_folder):
fh.write(line + '\n')
def unittest_clean(unittest_folder):
print_header('Cleaning up...')
print_action('Testresults from last testrun')
for fn in os.listdir(unittest_filename(unittest_folder, '')):
remove_file(unittest_filename(unittest_folder, fn))
remove_file(unittest_filename(unittest_folder, FILES['coverage-xml']))
def unittest_clean(ut_folder):
output.print_header('Cleaning up...')
output.print_action('Testresults from last testrun')
for fn in os.listdir(testresults_filename(ut_folder, '')):
remove_file(testresults_filename(ut_folder, fn))
def unittest_prepare(unittest_folder):
config = imp.load_source('', os.path.join(unittest_folder, 'src', 'config.py'))
def unittest_prepare(ut_folder):
config = imp.load_source('', jsonlog.get_ut_config(ut_folder))
#
print_header("Initiating unittest for first testrun...")
if not os.path.exists(unittest_filename(unittest_folder, '')):
print_action('Creating outpout folder %s' % unittest_filename(unittest_folder, ''))
fstools.mkdir(unittest_filename(unittest_folder, ''))
output.print_header("Initiating unittest for first testrun...")
if not os.path.exists(testresults_filename(ut_folder, '')):
output.print_action('Creating outpout folder %s' % testresults_filename(ut_folder, ''))
fstools.mkdir(testresults_filename(ut_folder, ''))
#
print_action('Creating unittest data-collection: %s' % unittest_filename(unittest_folder, FILES['data-collection']))
output.print_action('Creating unittest data-collection: %s' % testresults_filename(ut_folder, FN_DATA_COLLECTION))
#
system_info = {}
system_info['Architecture'] = platform.architecture()[0]
system_info['Machine'] = platform.machine()
system_info['Hostname'] = platform.node()
system_info['Distribution'] = ' '.join(dist())
system_info['System'] = platform.system()
system_info['Kernel'] = platform.release() + ' (%s)' % platform.version()
system_info['Username'] = getpass.getuser()
system_info['Path'] = unittest_folder
system_info[jsonlog.SYSI_ARCHITECTURE] = platform.architecture()[0]
system_info[jsonlog.SYSI_MACHINE] = platform.machine()
system_info[jsonlog.SYSI_HOSTNAME] = platform.node()
system_info[jsonlog.SYSI_DISTRIBUTION] = ' '.join(dist())
system_info[jsonlog.SYSI_SYSTEM] = platform.system()
system_info[jsonlog.SYSI_KERNEL] = platform.release() + ' (%s)' % platform.version()
system_info[jsonlog.SYSI_USERNAME] = getpass.getuser()
system_info[jsonlog.SYSI_PATH] = ut_folder
#
unittest_info = {}
unittest_info['Version'] = module_uid(os.path.join(unittest_folder, 'src', 'tests'))
unittest_info[jsonlog.UTEI_VERSION] = jsonlog.module_uid(jsonlog.get_ut_testcase_folder(ut_folder))
#
testobject_info = {}
testobject_info['Name'] = config.lib.__name__
testobject_info['Version'] = module_uid(config.lib.__path__[0])
testobject_info['Description'] = config.lib.__DESCRIPTION__
testobject_info['Supported Interpreters'] = ', '.join(['python%d' % vers for vers in config.lib.__INTERPRETER__])
testobject_info['State'] = 'Released' if config.release_unittest_version == module_uid(os.path.join(unittest_folder, 'src', 'tests')) else 'In development'
testobject_info['Dependencies'] = []
testobject_info[jsonlog.TOBI_NAME] = config.lib.__name__
testobject_info[jsonlog.TOBI_VERSION] = jsonlog.module_uid(config.lib.__path__[0])
testobject_info[jsonlog.TOBI_DESCRIPTION] = config.lib.__DESCRIPTION__
testobject_info[jsonlog.TOBI_SUPP_INTERP] = ', '.join(['python%d' % vers for vers in config.lib.__INTERPRETER__])
testobject_info[jsonlog.TOBI_STATE] = jsonlog.TOBI_STATE_RELESED if config.release_unittest_version == unittest_info[jsonlog.UTEI_VERSION] else jsonlog.TOBI_STATE_IN_DEVELOPMENT
testobject_info[jsonlog.TOBI_DEPENDENCIES] = []
for dependency in config.lib.__DEPENDENCIES__:
testobject_info['Dependencies'].append((dependency, module_uid(os.path.join(unittest_folder, 'src', dependency))))
testobject_info[jsonlog.TOBI_DEPENDENCIES].append((dependency, jsonlog.module_uid(os.path.join(jsonlog.get_ut_src_folder(ut_folder), dependency))))
#
spec_filename = os.path.join(unittest_folder, '..', 'requirements', 'specification.reqif')
print_action("Adding Requirement Specification from %s" % spec_filename)
spec_filename = os.path.join(ut_folder, 'requirements', 'specification.reqif')
output.print_action("Adding Requirement Specification from %s" % spec_filename)
try:
spec = reqif.reqif_dict(spec_filename, 'Heading', 'Software Specification')
except FileNotFoundError:
print_info('FAILED', FAIL)
output.print_info(output.STATUS_FAILED)
spec = {}
else:
print_info('SUCCESS', OKGREEN)
output.print_info(output.STATUS_SUCCESS)
#
data_collection = {
UNITTEST_KEY_SYSTEM_INFO: system_info,
UNITTEST_KEY_UNITTEST_INFO: unittest_info,
UNITTEST_KEY_TESTOBJECT_INFO: testobject_info,
UNITTEST_KEY_SPECIFICATION: spec,
UNITTEST_KEY_TESTRUNS: [],
jsonlog.MAIN_KEY_SYSTEM_INFO: system_info,
jsonlog.MAIN_KEY_UNITTEST_INFO: unittest_info,
jsonlog.MAIN_KEY_TESTOBJECT_INFO: testobject_info,
jsonlog.MAIN_KEY_SPECIFICATION: spec,
jsonlog.MAIN_KEY_TESTRUNS: [],
}
with open(unittest_filename(unittest_folder, FILES['data-collection']), 'w') as fh:
with open(testresults_filename(ut_folder, FN_DATA_COLLECTION), 'w') as fh:
fh.write(json.dumps(data_collection, indent=4, sort_keys=True))
def unittest_testrun(unittest_folder, options):
tests = imp.load_source('', os.path.join(unittest_folder, 'src', 'tests', '__init__.py'))
config = imp.load_source('', os.path.join(unittest_folder, 'src', 'config.py'))
def unittest_testrun(ut_folder, options):
tests = imp.load_source('', os.path.join(jsonlog.get_ut_testcase_folder(ut_folder), '__init__.py'))
config = imp.load_source('', jsonlog.get_ut_config(ut_folder))
#
interpreter_version = 'python ' + '.'.join(['%d' % n for n in sys.version_info[:3]]) + ' (%s)' % sys.version_info[3]
#
execution_level = report.TCEL_REVERSE_NAMED.get(options.execution_level, report.TCEL_FULL)
#
if sys.version_info.major in config.lib.__INTERPRETER__:
print_header("Running \"%s\" Unittest with %s" % (options.execution_level, interpreter_version))
print_action('Loading Testrun data from %s' % unittest_filename(unittest_folder, FILES['data-collection']))
with open(unittest_filename(unittest_folder, FILES['data-collection']), 'r') as fh:
output.print_header("Running \"%s\" Unittest with %s" % (options.execution_level, interpreter_version))
with open(testresults_filename(ut_folder, FN_DATA_COLLECTION), 'r') as fh:
data_collection = json.loads(fh.read())
print_action('Executing Testcases')
output.print_action('Executing Testcases')
heading_dict = {}
for key in data_collection[UNITTEST_KEY_SPECIFICATION].get('item_dict', {}):
heading_dict[key] = data_collection[UNITTEST_KEY_SPECIFICATION]['item_dict'][key]['Heading']
for key in data_collection[jsonlog.MAIN_KEY_SPECIFICATION].get(jsonlog.SPEC_ITEM_DICT, {}):
heading_dict[key] = data_collection[jsonlog.MAIN_KEY_SPECIFICATION][jsonlog.SPEC_ITEM_DICT][key]['Heading']
test_session = report.testSession(
['__unittest__', 'root'],
interpreter=interpreter_version,
@ -312,63 +276,61 @@ def unittest_testrun(unittest_folder, options):
)
tests.testrun(test_session)
#
print_action('Adding Testrun data to %s' % unittest_filename(unittest_folder, FILES['data-collection']))
data_collection[UNITTEST_KEY_TESTRUNS].append(test_session)
with open(unittest_filename(unittest_folder, FILES['data-collection']), 'w') as fh:
output.print_action('Adding Testrun data to %s' % testresults_filename(ut_folder, FN_DATA_COLLECTION))
data_collection[jsonlog.MAIN_KEY_TESTRUNS].append(test_session)
with open(testresults_filename(ut_folder, FN_DATA_COLLECTION), 'w') as fh:
fh.write(json.dumps(data_collection, indent=4, sort_keys=True))
else:
print_header("Library does not support %s." % interpreter_version)
output.print_header("Library does not support %s." % interpreter_version)
def unittest_finalise(unittest_folder):
config = imp.load_source('', os.path.join(unittest_folder, 'src', 'config.py'))
def unittest_finalise(ut_folder):
config = imp.load_source('', jsonlog.get_ut_config(ut_folder))
#
print_header("Adding Requirement information")
output.print_header("Adding Requirement information")
#
print_action('Loading Testrun data from %s' % unittest_filename(unittest_folder, FILES['data-collection']))
with open(unittest_filename(unittest_folder, FILES['data-collection']), 'r') as fh:
with open(testresults_filename(ut_folder, FN_DATA_COLLECTION), 'r') as fh:
data_collection = json.loads(fh.read())
#
data_collection['lost_souls'] = {}
data_collection[jsonlog.MAIN_KEY_LOST_SOULS] = {}
#
print_action("Adding Lost Requirement Soul")
data_collection['lost_souls']['item_list'] = []
for req_id in data_collection[UNITTEST_KEY_SPECIFICATION].get('item_dict', {}):
item = data_collection[UNITTEST_KEY_SPECIFICATION]['item_dict'][req_id]
output.print_action("Adding Lost Requirement Soul")
data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_ITEMLIST] = []
for req_id in data_collection[jsonlog.MAIN_KEY_SPECIFICATION].get(jsonlog.SPEC_ITEM_DICT, {}):
item = data_collection[jsonlog.MAIN_KEY_SPECIFICATION][jsonlog.SPEC_ITEM_DICT][req_id]
if item['system_type_uid'] == '_MR7eNHYYEem_kd-7nxt1sg':
testcase_available = False
for testrun in data_collection[UNITTEST_KEY_TESTRUNS]:
if req_id in testrun['testcases']:
for testrun in data_collection[jsonlog.MAIN_KEY_TESTRUNS]:
if req_id in testrun[jsonlog.TRUN_TESTCASES]:
testcase_available = True
break
if not testcase_available:
data_collection['lost_souls']['item_list'].append(req_id)
print_info('%s - "%s" has no corresponding testcase' % (item['system_uid'], item['Heading']), FAIL)
data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_ITEMLIST].append(req_id)
output.print_info('%s - "%s" has no corresponding testcase' % (item['system_uid'], item['Heading']), output.termcolors.FAIL)
#
print_action("Adding Lost Testcase Soul")
data_collection['lost_souls']['testcase_list'] = []
for testrun in data_collection[UNITTEST_KEY_TESTRUNS]:
for tc_id in testrun.get('testcases', {}):
if tc_id not in data_collection[UNITTEST_KEY_SPECIFICATION].get('item_dict', {}) and tc_id not in data_collection['lost_souls']['testcase_list']:
data_collection['lost_souls']['testcase_list'].append(tc_id)
print_info('"%s" has no corresponding testcase' % tc_id, FAIL)
output.print_action("Adding Lost Testcase Soul")
data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_TESTCASELIST] = []
for testrun in data_collection[jsonlog.MAIN_KEY_TESTRUNS]:
for tc_id in testrun.get(jsonlog.TRUN_TESTCASES, {}):
if tc_id not in data_collection[jsonlog.MAIN_KEY_SPECIFICATION].get(jsonlog.SPEC_ITEM_DICT, {}) and tc_id not in data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_TESTCASELIST]:
data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_TESTCASELIST].append(tc_id)
output.print_info('"%s" has no corresponding testcase' % tc_id, output.termcolors.FAIL)
#
print_header("Adding Coverage information")
print_action('Adding Coverage Information to %s' % unittest_filename(unittest_folder, FILES['data-collection']))
data_collection[UNITTEST_KEY_COVERAGE_INFO] = coverage_info(unittest_filename(unittest_folder, 'coverage.xml'), os.path.dirname(config.lib_path))
with open(unittest_filename(unittest_folder, FILES['data-collection']), 'w') as fh:
output.print_header("Adding Coverage information")
output.print_action('Adding Coverage Information to %s' % testresults_filename(ut_folder, FN_DATA_COLLECTION))
data_collection[jsonlog.MAIN_KEY_COVERAGE_INFO] = coverage_info(testresults_filename(ut_folder, FN_COVERAGE), os.path.dirname(config.lib_path))
with open(testresults_filename(ut_folder, FN_DATA_COLLECTION), 'w') as fh:
fh.write(json.dumps(data_collection, indent=4, sort_keys=True))
#
print_header("Creating LaTeX-Report of Unittest")
print_action('Loading Testrun data from %s' % unittest_filename(unittest_folder, FILES['data-collection']))
with open(unittest_filename(unittest_folder, FILES['data-collection']), 'r') as fh:
output.print_header("Creating LaTeX-Report of Unittest")
with open(testresults_filename(ut_folder, FN_DATA_COLLECTION), 'r') as fh:
data_collection = json.loads(fh.read())
if jinja2 is None:
print_action('You need to install jinja2 to create a LaTeX-Report!', FAIL)
output.print_action('You need to install jinja2 to create a LaTeX-Report!', output.termcolors.FAIL)
else:
fn = unittest_filename(unittest_folder, FILES['tex-report'])
print_action('Creating LaTeX-File %s' % fn)
fn = testresults_filename(ut_folder, FN_TEX_REPORT)
output.print_action('Creating LaTeX-File %s' % fn)
with open(fn, 'w') as fh:
#
template_path = os.path.join(os.path.dirname(__file__), 'templates')
@ -378,37 +340,44 @@ def unittest_finalise(unittest_folder):
fh.write(template.render(data=data_collection))
def unittest_publish(unittest_folder):
config = imp.load_source('', os.path.join(unittest_folder, 'src', 'config.py'))
def unittest_publish(ut_folder):
config = imp.load_source('', jsonlog.get_ut_config(ut_folder))
#
print_header('Copy unittest files to library')
output.print_header('Copy unittest files to library')
target_folder = os.path.join(config.lib_path, '_testresults_')
print_action('Copying Unittest Files to %s' % target_folder)
output.print_action('Copying Unittest Files to %s' % target_folder)
if not os.path.exists(target_folder):
print_info('Creating folder %s' % target_folder)
output.print_info('Creating folder %s' % target_folder)
fstools.mkdir(target_folder)
else:
for fn in os.listdir(target_folder):
remove_file(os.path.join(target_folder, fn))
for fn in REPORT_FILES:
src = unittest_filename(unittest_folder, fn)
src = testresults_filename(ut_folder, fn)
dst = os.path.join(target_folder, fn)
print_info('copying %s -> %s' % (src, dst))
output.print_info('copying %s -> %s' % (src, dst))
shutil.copyfile(src, dst)
def unittest_status(unittest_folder):
print_header('Checking status of all submodules')
print_action('Updating all submodules (fetch)')
process = subprocess.Popen("LANGUAGE='en_US.UTF-8 git' git submodule foreach git fetch", cwd=os.path.dirname(unittest_folder), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def unittest_status(ut_folder):
#
# GIT STATUS
#
output.print_header('Checking GIT repository status')
# GIT FETCH
output.print_action('Fetching repository from server...')
process = subprocess.Popen("LANGUAGE='en_US.UTF-8 git' git submodule foreach git fetch", cwd=ut_folder, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stderroutput = process.communicate()[1]
if stderroutput == b'':
print_info('SUCCESS', color=OKGREEN)
output.print_info(output.STATUS_SUCCESS)
else:
print_info('FAILED', color=FAIL)
print_action('Checking status...')
process = subprocess.Popen("LANGUAGE='en_US.UTF-8 git' git submodule foreach git status", cwd=os.path.dirname(unittest_folder), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output.print_info(output.STATUS_FAILED)
# GIT_REPO
output.print_action('Analysing repository status...')
output.print_info(jsonlog.status_git(ut_folder))
# SUBMODULES
output.print_action('Analysing submodule status...')
process = subprocess.Popen("LANGUAGE='en_US.UTF-8 git' git submodule foreach git status", cwd=ut_folder, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdoutput, stderroutput = process.communicate()
if stderroutput == b'':
module = None
@ -425,36 +394,41 @@ def unittest_status(unittest_folder):
data[m] += line
for key in data:
if "working tree clean" not in data[key] and "working directory clean" not in data[key]:
data[key] = ("local changes", FAIL)
data[key] = ("LOCAL CHANGES", output.termcolors.WARNING)
elif "Your branch is behind" in data[key]:
data[key] = ("no up to date (try git pull)", FAIL)
data[key] = ("OUTDATED (try git pull)", output.termcolors.WARNING)
elif "HEAD detached at" in data[key]:
data[key] = ("no up to date (try git checkout master)", FAIL)
data[key] = ("OUTDATED (try git checkout master)", output.termcolors.WARNING)
elif "Your branch is ahead of" in data[key]:
data[key] = ("push required", WARNING)
data[key] = ("CHANGED (try git push)", output.termcolors.WARNING)
elif "nothing to commit" in data[key]:
data[key] = ("clean", OKGREEN)
data[key] = ("CLEAN", output.termcolors.OKGREEN)
else:
data[key] = ("unknown", FAIL)
print_info('Submodule %s... %s' % (key, data[key][1] + data[key][0]))
data[key] = ("UNKNOWN", output.termcolors.FAIL)
output.print_info('Submodule %s... %s' % (key, data[key][1] + data[key][0]))
else:
print_info('FAILED', color=FAIL)
output.print_info(output.STATUS_FAILED)
#
print_header('Checking status of unittest and testresults in the library')
print_action('Loading Testrun data from %s' % unittest_filename(unittest_folder, FILES['data-collection']))
with open(unittest_filename(unittest_folder, FILES['data-collection']), 'r') as fh:
data_collection = json.loads(fh.read())
print_action('Checking release state of this testrun... ')
if data_collection[UNITTEST_KEY_TESTOBJECT_INFO]['State'] != 'Released':
print_info("FAILED", FAIL)
else:
print_info("SUCCESS", OKGREEN)
# TESTRUN STATUS
#
from unittest.module_status import module_unittest_status
print_action('Checking status of testrults in library...')
st = module_unittest_status(os.path.abspath(os.path.join(unittest_folder, '..')))
stc = {
'RELEASED': OKGREEN,
'IN_WORK': OKBLUE,
}.get(st, FAIL)
print_info(st, stc)
output.print_header('Checking status of unittest in the library')
for txt, fcn in (
('Checking release state... ', jsonlog.get_lib_release_state),
('Checking testcase integrity... ', jsonlog.get_lib_testcase_integrity),
('Checking source integrity... ', jsonlog.get_lib_src_integrity)
):
output.print_action(txt)
output.print_info(fcn(ut_folder))
output.print_action('Checking code coverage... ')
output.print_coverage(*jsonlog.lib_coverage(ut_folder))
#
output.print_header('Checking status of unittest for this testrun')
for txt, fcn in (
('Checking release state... ', jsonlog.get_ut_release_state),
('Checking testcase integrity... ', jsonlog.get_ut_testcase_integrity),
('Checking source integrity... ', jsonlog.get_ut_src_integrity)
):
output.print_action(txt)
output.print_info(fcn(ut_folder))
output.print_action('Checking code coverage... ')
output.print_coverage(*jsonlog.ut_coverage(ut_folder))

View File

@ -27,11 +27,11 @@ help:
@echo " - testrun_smoke: Run some testcases"
@echo " - testrun_single: Run one testcases"
release: clean prepare testrun_full coverage_analysis finalise compile publish status
full: clean prepare testrun_full coverage_analysis finalise compile status
short: clean prepare testrun_short coverage_analysis finalise compile status
smoke: clean prepare testrun_smoke coverage_analysis finalise compile status
single: clean prepare testrun_single coverage_analysis finalise compile status
release: testrun_full coverage_analysis finalise compile publish status
full: testrun_full finalise compile status
short: testrun_short finalise compile status
smoke: testrun_smoke finalise compile status
single: testrun_single finalise compile status
clean:
@$(PYT3_CMD) src/unittest/scripts/unittest.py clean
@ -44,20 +44,19 @@ release_testcases:
prepare:
@$(PYT3_CMD) src/unittest/scripts/unittest.py prepare
testrun_full:
testrun_full: clean prepare
@$(COV2_CMD) run -a --branch --source=`$(PYT3_CMD) src/config.py -p` src/unittest/scripts/unittest.py testrun -e full
@$(COV3_CMD) run -a --branch --source=`$(PYT3_CMD) src/config.py -p` src/unittest/scripts/unittest.py testrun -e full
testrun_short:
testrun_short: clean prepare
@$(COV2_CMD) run -a --branch --source=`$(PYT3_CMD) src/config.py -p` src/unittest/scripts/unittest.py testrun -e short
@$(COV3_CMD) run -a --branch --source=`$(PYT3_CMD) src/config.py -p` src/unittest/scripts/unittest.py testrun -e short
testrun_smoke:
testrun_smoke: clean prepare
@$(COV2_CMD) run -a --branch --source=`$(PYT3_CMD) src/config.py -p` src/unittest/scripts/unittest.py testrun -e smoke
@$(COV3_CMD) run -a --branch --source=`$(PYT3_CMD) src/config.py -p` src/unittest/scripts/unittest.py testrun -e smoke
testrun_single:
testrun_single: clean prepare
@$(COV2_CMD) run -a --branch --source=`$(PYT3_CMD) src/config.py -p` src/unittest/scripts/unittest.py testrun -e single
@$(COV3_CMD) run -a --branch --source=`$(PYT3_CMD) src/config.py -p` src/unittest/scripts/unittest.py testrun -e single
@ -65,7 +64,7 @@ coverage_analysis:
@echo "\e[1m\e[93mCreating Coverage-XML-File: $(pwd)/testresults/coverage.xml\e[0m"
@$(COV3_CMD) xml -o testresults/coverage.xml
finalise:
finalise: coverage_analysis
@$(PYT3_CMD) src/unittest/scripts/unittest.py finalise
compile:

View File

@ -19,4 +19,4 @@ parser.add_option("-e", "--execution-level", dest="execution_level", default="fu
if report.TCEL_REVERSE_NAMED.get(tests.execution_level, report.TCEL_FULL) < report.TCEL_REVERSE_NAMED.get(options.execution_level, report.TCEL_FULL):
options.execution_level = tests.execution_level
unittest.run.unittest(options, args, BASEPATH)
unittest.run.unittest(options, args, os.path.dirname(BASEPATH))