Initial unittest implementation

This commit is contained in:
Dirk Alders 2020-01-26 16:19:29 +01:00
parent 74a793bd9b
commit 547fb39ec5
21 changed files with 1317 additions and 0 deletions

8
__init__.py Normal file
View File

@ -0,0 +1,8 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
from unittest import module_status
from unittest import run
from unittest import test
__DEPENDENCIES__ = ['fstools', 'report']

176
module_status.py Normal file
View File

@ -0,0 +1,176 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import os
import json
import fstools
import re
import subprocess
class termcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
STATUS_RELEASED = 'RELEASED'
STATUS_AVAILABLE = 'AVAILABLE'
STATUS_IN_WORK = 'IN_WORK'
STATUS_EXISTS = 'EXISTS'
STATUS_OLD = 'OLD'
STATUS_MISSING = 'MISSING'
#
STATUS_CLEAN = 'CLEAN'
STATUS_CHANGED = 'CHANGED'
#
STATUS_UNKNOWN = 'UNKNOWN'
STATUS_LENGTH = 13
STR_STATUS = {
STATUS_RELEASED: termcolors.OKGREEN + (STATUS_LENGTH - len(STATUS_RELEASED)) * ' ' + STATUS_RELEASED + termcolors.ENDC,
STATUS_AVAILABLE: termcolors.OKGREEN + (STATUS_LENGTH - len(STATUS_AVAILABLE)) * ' ' + STATUS_AVAILABLE + termcolors.ENDC,
STATUS_IN_WORK: termcolors.WARNING + (STATUS_LENGTH - len(STATUS_IN_WORK)) * ' ' + STATUS_IN_WORK + termcolors.ENDC,
STATUS_OLD: termcolors.WARNING + (STATUS_LENGTH - len(STATUS_OLD)) * ' ' + STATUS_OLD + termcolors.ENDC,
STATUS_EXISTS: termcolors.WARNING + (STATUS_LENGTH - len(STATUS_EXISTS)) * ' ' + STATUS_EXISTS + termcolors.ENDC,
STATUS_MISSING: termcolors.FAIL + (STATUS_LENGTH - len(STATUS_MISSING)) * ' ' + STATUS_MISSING + termcolors.ENDC,
#
STATUS_CLEAN: termcolors.OKGREEN + (STATUS_LENGTH - len(STATUS_CLEAN)) * ' ' + STATUS_CLEAN + termcolors.ENDC,
STATUS_CHANGED: termcolors.WARNING + (STATUS_LENGTH - len(STATUS_CHANGED)) * ' ' + STATUS_CHANGED + termcolors.ENDC,
#
STATUS_UNKNOWN: termcolors.FAIL + (STATUS_LENGTH - len(STATUS_UNKNOWN)) * ' ' + STATUS_UNKNOWN + termcolors.ENDC,
}
def module_status_head():
rv = termcolors.BOLD + termcolors.UNDERLINE + 'Status of the unittests for pylibs:\n' + termcolors.ENDC
LINE_FORMAT = '%25s%' + str(STATUS_LENGTH) + 's%' + str(STATUS_LENGTH) + 's%' + str(STATUS_LENGTH) + 's%' + str(STATUS_LENGTH) + 's%' + str(STATUS_LENGTH) + 's%' + str(STATUS_LENGTH) + 's\n'
rv += termcolors.BOLD + termcolors.HEADER + LINE_FORMAT % (
'Library',
'UT-Status',
'DOC-Status',
'Versions',
'UT-Coverage',
'SPEC-Status',
'GIT-Status',
)
rv += (25 + 6 * STATUS_LENGTH) * '-' + '\n' + termcolors.ENDC
return rv
def module_status_line(module_folder):
rv = '%25s%s%s%s%s%s%s\n' % (
os.path.basename(module_folder) + ':',
STR_STATUS.get(module_unittest_status(module_folder), STATUS_UNKNOWN),
STR_STATUS.get(module_doc_status(module_folder), STATUS_UNKNOWN),
module_unittest_versions(module_folder),
module_unittest_coverage(module_folder),
STR_STATUS.get(module_spec_status(module_folder), STATUS_UNKNOWN),
STR_STATUS.get(module_git_status(module_folder), STATUS_UNKNOWN),
)
return rv
def module_unittest_status(module_folder):
try:
with open(os.path.join(module_folder, 'pylibs', os.path.basename(module_folder), '_testresults_', 'unittest.json'), 'r') as fh:
ut_lib = json.loads(fh.read())
except IOError:
return STATUS_MISSING
else:
try:
with open(os.path.join(module_folder, 'unittest', 'testresults', 'unittest.json'), 'r') as fh:
ut_ut = json.loads(fh.read())
except IOError:
return STATUS_UNKNOWN
else:
if ut_ut['testobject_information'] != ut_lib['testobject_information'] or ut_ut['unittest_information'] != ut_lib['unittest_information']:
return STATUS_OLD
else:
ut_status = ut_lib.get('testobject_information', {}).get('State', 'unknown')
if 'released' in ut_status.lower():
return STATUS_RELEASED
elif 'work' in ut_status.lower():
return STATUS_IN_WORK
else:
return STATUS_UNKNOWN
def module_unittest_versions(module_folder):
try:
with open(os.path.join(module_folder, 'unittest', 'testresults', 'unittest.json'), 'r') as fh:
ut = json.loads(fh.read())
except IOError:
return STR_STATUS[STATUS_UNKNOWN]
else:
interpreters = ut.get('testobject_information', '').get('Supported Interpreters')
interpreters = interpreters.split(',')
for i in range(len(interpreters)):
interpreters[i] = interpreters[i].strip()
interpreters[i] = interpreters[i][6:]
rv = ', '.join(interpreters)
return (STATUS_LENGTH - len(rv)) * ' ' + rv
def module_unittest_coverage(module_folder):
try:
with open(os.path.join(module_folder, 'unittest', 'testresults', 'unittest.json'), 'r') as fh:
ut = json.loads(fh.read())
except IOError:
return STR_STATUS[STATUS_UNKNOWN]
else:
lcov = ut.get('coverage_information', [{}])[0].get('line_coverage')
bcov = ut.get('coverage_information', [{}])[0].get('branch_coverage')
if lcov is None or bcov is None:
return STR_STATUS[STATUS_UNKNOWN]
elif lcov > 90:
rv = termcolors.OKGREEN + '%3d%% (%3d%%)' % (lcov, bcov) + termcolors.ENDC
else:
rv = termcolors.WARNING + '%3d%% (%3d%%)' % (lcov, bcov) + termcolors.ENDC
return (STATUS_LENGTH - 11) * ' ' + rv
def module_git_status(module_folder):
p = subprocess.Popen("git -C %s status" % module_folder, stdout=subprocess.PIPE, shell=True)
output = p.communicate()[0]
p_status = p.wait()
if p_status == 0:
if b"nichts zu committen" in output and b"um lokale Commits zu publizieren" not in output:
return STATUS_CLEAN
else:
return STATUS_CHANGED
else:
return STATUS_UNKNOWN
def module_doc_status(module_folder):
if os.path.exists(os.path.join(module_folder, 'pylibs', os.path.basename(module_folder), '_docs_', 'index.html')):
return STATUS_AVAILABLE
else:
if os.path.exists(os.path.join(module_folder, 'docs', 'index.rst')):
return STATUS_IN_WORK
else:
return STATUS_MISSING
def module_spec_status(module_folder):
if os.path.exists(os.path.join(module_folder, 'requirements', 'specification.reqif')):
try:
with open(os.path.join(module_folder, 'unittest', 'testresults', 'unittest.json'), 'r') as fh:
ut = json.loads(fh.read())
if len(ut['lost_souls']['item_list']) > 0 or len(ut['lost_souls']['testcase_list']) > 0:
return STATUS_IN_WORK
else:
return STATUS_CLEAN
except IOError:
return STATUS_EXISTS
else:
return STATUS_MISSING

466
run.py Normal file
View File

@ -0,0 +1,466 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import fstools
import report
import reqif
import json
import os
import platform
import getpass
import sys
import subprocess
import imp
import xml.dom.minidom
try:
import jinja2
except ImportError:
jinja2 = None
import shutil
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
ARG_CLEAN = 'clean'
ARG_RUN = 'run'
ARG_FINALISE = 'finalise'
ARG_PDF = 'pdf'
ARG_STATUS = 'status'
ARG_COPY = 'copy'
ARG_RELEASE = 'release'
UNITTEST_KEY_SYSTEM_INFO = 'system_information'
UNITTEST_KEY_UNITTEST_INFO = 'unittest_information'
UNITTEST_KEY_TESTOBJECT_INFO = 'testobject_information'
UNITTEST_KEY_TESTRUNS = 'testrun_list'
UNITTEST_KEY_COVERAGE_INFO = 'coverage_information'
UNITTEST_KEY_SPECIFICATION = 'specification'
FILES = {
'data-collection': 'unittest.json',
'tex-report': 'unittest.tex',
'coverage-xml': 'coverage.xml'
}
REPORT_FILES = [FILES['data-collection'], FILES['coverage-xml'], 'unittest.pdf']
class coverage_info(list):
KEY_NAME = 'name'
KEY_FILEPATH = 'filepath'
KEY_LINE_COVERAGE = 'line_coverage'
KEY_BRANCH_COVERAGE = 'branch_coverage'
KEY_FILES = 'files'
KEY_FRAGMENTS = 'fragments'
KEY_START_LINE = 'start'
KEY_END_LINE = 'end'
KEY_COVERAGE_STATE = 'coverage_state'
COVERED = 'covered'
UNCOVERED = 'uncovered'
CLEAN = 'clean'
PARTIALLY_COVERED = 'partially-covered'
def __init__(self, xml_filename, module_basepath):
list.__init__(self)
xmldoc = xml.dom.minidom.parse(xml_filename)
itemlist = xmldoc.getElementsByTagName('package')
for p in itemlist:
module = {}
module[self.KEY_NAME] = p.attributes['name'].value[len(module_basepath) + 1:]
module[self.KEY_FILEPATH] = p.attributes['name'].value.replace('.', os.path.sep)
module[self.KEY_LINE_COVERAGE] = float(p.attributes['line-rate'].value) * 100.
try:
module[self.KEY_BRANCH_COVERAGE] = float(p.attributes['branch-rate'].value) * 100.
except AttributeError:
module[self.KEY_BRANCH_COVERAGE] = None
module[self.KEY_FILES] = []
for c in p.getElementsByTagName('class'):
f = {}
f[self.KEY_NAME] = c.attributes['filename'].value[len(module_basepath) + 1:].replace(os.path.sep, '.')
f[self.KEY_FILEPATH] = c.attributes['filename'].value
f[self.KEY_LINE_COVERAGE] = float(c.attributes['line-rate'].value) * 100.
try:
f[self.KEY_BRANCH_COVERAGE] = float(p.attributes['branch-rate'].value) * 100.
except:
f[self.KEY_BRANCH_COVERAGE] = None
f[self.KEY_FRAGMENTS] = []
last_hit = None
start_line = 1
end_line = 1
for line in c.getElementsByTagName('line'):
line_no = int(line.attributes['number'].value)
hit = bool(int(line.attributes['hits'].value))
if hit:
cc = line.attributes.get('condition-coverage')
if cc is not None and not cc.value.startswith('100%'):
hit = self.PARTIALLY_COVERED
else:
hit = self.COVERED
else:
hit = self.UNCOVERED
if line_no == 1:
last_hit = hit
elif last_hit != hit or line_no > end_line + 1:
if last_hit is not None:
line = {}
line[self.KEY_START_LINE] = start_line
line[self.KEY_END_LINE] = end_line
line[self.KEY_COVERAGE_STATE] = last_hit
f[self.KEY_FRAGMENTS].append(line)
if line_no > end_line + 1:
line = {}
if last_hit is not None:
line[self.KEY_START_LINE] = end_line + 1
else:
line[self.KEY_START_LINE] = start_line
line[self.KEY_END_LINE] = line_no - 1
line[self.KEY_COVERAGE_STATE] = self.CLEAN
f[self.KEY_FRAGMENTS].append(line)
start_line = line_no
end_line = line_no
last_hit = hit
elif line_no == end_line + 1:
end_line = line_no
if last_hit is not None:
line = {}
line[self.KEY_START_LINE] = start_line
line[self.KEY_END_LINE] = end_line
line[self.KEY_COVERAGE_STATE] = last_hit
f[self.KEY_FRAGMENTS].append(line)
line = {}
if last_hit is not None:
line[self.KEY_START_LINE] = end_line + 1
else:
line[self.KEY_START_LINE] = start_line
line[self.KEY_END_LINE] = None
line[self.KEY_COVERAGE_STATE] = self.CLEAN
f[self.KEY_FRAGMENTS].append(line)
module[self.KEY_FILES].append(f)
self.append(module)
def __str__(self):
rv = ''
for module in self:
rv += '%s (%.1f%% - %s)\n' % (module.get(self.KEY_NAME), module.get(self.KEY_LINE_COVERAGE), module.get(self.KEY_FILEPATH))
for py_file in module.get(self.KEY_FILES):
rv += ' %s (%.1f%% - %s)\n' % (py_file.get(self.KEY_NAME), py_file.get(self.KEY_LINE_COVERAGE), py_file.get(self.KEY_FILEPATH))
for fragment in py_file.get(self.KEY_FRAGMENTS):
if fragment.get(self.KEY_END_LINE) is not None:
rv += ' %d - %d: %s\n' % (fragment.get(self.KEY_START_LINE), fragment.get(self.KEY_END_LINE), repr(fragment.get(self.KEY_COVERAGE_STATE)))
else:
rv += ' %d - : %s\n' % (fragment.get(self.KEY_START_LINE), repr(fragment.get(self.KEY_COVERAGE_STATE)))
return rv
def unittest_filename(base_folder, filename):
return os.path.join(base_folder, 'testresults', filename)
def print_header(txt, color=BOLD + WARNING):
print(color + txt + ENDC)
def print_action(txt, color=BOLD):
print(color + ' * ' + txt + ENDC)
def print_info(txt, color=ENDC):
print(' ' + color + txt + ENDC)
def remove_file(filename):
if os.path.exists(filename) and not filename.endswith('.gitkeep'):
try:
print_info('Removing %s' % filename)
os.remove(filename)
except OSError:
pass
def module_uid(path):
return fstools.uid_filelist(path, '*.py', rekursive=True)
def unittest(options, args, unittest_folder):
if ARG_CLEAN in args:
unittest_init(unittest_folder)
elif ARG_RUN in args:
unittest_run(unittest_folder, options)
elif ARG_FINALISE in args:
unittest_finalise(unittest_folder)
elif ARG_PDF in args:
unittest_pdf(unittest_folder)
elif ARG_STATUS in args:
unittest_status(unittest_folder)
elif ARG_COPY in args:
unittest_copy(unittest_folder)
elif ARG_RELEASE in args:
unittest_release(unittest_folder)
def unittest_init(unittest_folder):
config = imp.load_source('', os.path.join(unittest_folder, 'src', 'config.py'))
#
print_header("Initiating unittest for first testrun...")
if not os.path.exists(unittest_filename(unittest_folder, '')):
print_action('Creating outpout folder %s' % unittest_filename(unittest_folder, ''))
fstools.mkdir(unittest_filename(unittest_folder, ''))
#
print_action('Cleaning up data from last testrun')
for fn in os.listdir(unittest_filename(unittest_folder, '')):
remove_file(unittest_filename(unittest_folder, fn))
remove_file(unittest_filename(unittest_folder, FILES['coverage-xml']))
#
print_action('Creating unittest data-collection: %s' % unittest_filename(unittest_folder, FILES['data-collection']))
#
system_info = {}
system_info['Architecture'] = platform.architecture()[0]
system_info['Machine'] = platform.machine()
system_info['Hostname'] = platform.node()
system_info['Distribution'] = ' '.join(platform.dist())
system_info['System'] = platform.system()
system_info['Kernel'] = platform.release() + ' (%s)' % platform.version()
system_info['Username'] = getpass.getuser()
system_info['Path'] = unittest_folder
#
unittest_info = {}
unittest_info['Version'] = module_uid(os.path.join(unittest_folder, 'src', 'tests'))
#
testobject_info = {}
testobject_info['Name'] = config.lib.__name__
testobject_info['Version'] = module_uid(config.lib.__path__[0])
testobject_info['Description'] = config.lib.__DESCRIPTION__
testobject_info['Supported Interpreters'] = ', '.join(['python%d' % vers for vers in config.lib.__INTERPRETER__])
testobject_info['State'] = 'Released' if config.release_unittest_version == module_uid(os.path.join(unittest_folder, 'src', 'tests')) else 'In development'
testobject_info['Dependencies'] = []
for dependency in config.lib.__DEPENDENCIES__:
testobject_info['Dependencies'].append((dependency, module_uid(os.path.join(unittest_folder, 'src', dependency))))
#
spec_filename = os.path.join(unittest_folder, '..', 'requirements', 'specification.reqif')
print_action("Adding Requirement Specification from %s" % spec_filename)
try:
spec = reqif.reqif_dict(spec_filename, 'Heading', 'Software Specification')
except FileNotFoundError:
print_info('FAILED', FAIL)
spec = {}
else:
print_info('SUCCESS', OKGREEN)
#
data_collection = {
UNITTEST_KEY_SYSTEM_INFO: system_info,
UNITTEST_KEY_UNITTEST_INFO: unittest_info,
UNITTEST_KEY_TESTOBJECT_INFO: testobject_info,
UNITTEST_KEY_SPECIFICATION: spec,
UNITTEST_KEY_TESTRUNS: [],
}
with open(unittest_filename(unittest_folder, FILES['data-collection']), 'w') as fh:
fh.write(json.dumps(data_collection, indent=4, sort_keys=True))
def unittest_run(unittest_folder, options):
tests = imp.load_source('', os.path.join(unittest_folder, 'src', 'tests', '__init__.py'))
config = imp.load_source('', os.path.join(unittest_folder, 'src', 'config.py'))
#
interpreter_version = 'python ' + '.'.join(['%d' % n for n in sys.version_info[:3]]) + ' (%s)' % sys.version_info[3]
#
execution_level = report.TCEL_REVERSE_NAMED.get(options.execution_level, report.TCEL_FULL)
#
if sys.version_info.major in config.lib.__INTERPRETER__:
print_header("Running Unittest with %s" % interpreter_version)
print_action('Loading Testrun data from %s' % unittest_filename(unittest_folder, FILES['data-collection']))
with open(unittest_filename(unittest_folder, FILES['data-collection']), 'r') as fh:
data_collection = json.loads(fh.read())
print_action('Executing Testcases')
heading_dict = {}
for key in data_collection[UNITTEST_KEY_SPECIFICATION].get('item_dict', {}):
heading_dict[key] = data_collection[UNITTEST_KEY_SPECIFICATION]['item_dict'][key]['Heading']
test_session = report.testSession(
['__unittest__', config.lib.logger_name] + config.additional_loggers_to_catch,
interpreter=interpreter_version,
testcase_execution_level=execution_level,
testrun_id='p%d' % sys.version_info[0],
heading_dict=heading_dict
)
tests.testrun(test_session)
#
print_action('Adding Testrun data to %s' % unittest_filename(unittest_folder, FILES['data-collection']))
data_collection[UNITTEST_KEY_TESTRUNS].append(test_session)
with open(unittest_filename(unittest_folder, FILES['data-collection']), 'w') as fh:
fh.write(json.dumps(data_collection, indent=4, sort_keys=True))
else:
print_header("Library does not support %s." % interpreter_version)
def unittest_finalise(unittest_folder):
config = imp.load_source('', os.path.join(unittest_folder, 'src', 'config.py'))
#
print_action('Adding Testrun data to %s' % unittest_filename(unittest_folder, FILES['data-collection']))
with open(unittest_filename(unittest_folder, FILES['data-collection']), 'r') as fh:
data_collection = json.loads(fh.read())
#
print_header("Adding Requirement information")
#
data_collection['lost_souls'] = {}
#
print_action("Adding Lost Requirement Soul")
data_collection['lost_souls']['item_list'] = []
for req_id in data_collection['specification'].get('item_dict', {}):
item = data_collection['specification']['item_dict'][req_id]
if item['system_type_uid'] == '_MR7eNHYYEem_kd-7nxt1sg':
testcase_available = False
for testrun in data_collection['testrun_list']:
if req_id in testrun['testcases']:
testcase_available = True
break
if not testcase_available:
data_collection['lost_souls']['item_list'].append(req_id)
print_info('%s - "%s" has no corresponding testcase' % (item['system_uid'], item['Heading']), FAIL)
#
print_action("Adding Lost Testcase Soul")
data_collection['lost_souls']['testcase_list'] = []
for testrun in data_collection['testrun_list']:
for tc_id in testrun.get('testcases', {}):
if tc_id not in data_collection['specification'].get('item_dict', {}) and tc_id not in data_collection['lost_souls']['testcase_list']:
data_collection['lost_souls']['testcase_list'].append(tc_id)
print_info('"%s" has no corresponding testcase' % tc_id, FAIL)
#
print_header("Adding Coverage information")
print_action('Adding Coverage Information to %s' % unittest_filename(unittest_folder, FILES['data-collection']))
data_collection[UNITTEST_KEY_COVERAGE_INFO] = coverage_info(unittest_filename(unittest_folder, 'coverage.xml'), os.path.dirname(config.lib_path))
with open(unittest_filename(unittest_folder, FILES['data-collection']), 'w') as fh:
fh.write(json.dumps(data_collection, indent=4, sort_keys=True))
def unittest_pdf(unittest_folder):
print_header("Creating PDF-Report of Unittest")
print_action('Loading Testrun data from %s' % unittest_filename(unittest_folder, FILES['data-collection']))
with open(unittest_filename(unittest_folder, FILES['data-collection']), 'r') as fh:
data_collection = json.loads(fh.read())
if jinja2 is None:
print_action('You need to install jinja2 to create a PDF-Report!', FAIL)
else:
fn = unittest_filename(unittest_folder, FILES['tex-report'])
print_action('Creating LaTeX-File %s' % fn)
with open(fn, 'w') as fh:
#
template_path = os.path.join(os.path.dirname(__file__), 'templates')
template_filename = 'unittest.tex'
jenv = jinja2.Environment(loader=jinja2.FileSystemLoader(template_path))
template = jenv.get_template(template_filename)
fh.write(template.render(data=data_collection))
print_action('Creating PDF %s' % unittest_filename(unittest_folder, 'unittest.pdf'))
for i in range(3):
sys.stdout.write(' Starting run %d/3 of pdflatex... ' % (i + 1))
sys.stdout.flush()
exit_value = os.system("pdflatex -interaction nonstopmode --output-directory %(path)s %(path)s/unittest.tex 1> /dev/null" % {'path': unittest_filename(unittest_folder, '')})
if exit_value != 0:
print(FAIL + 'FAILED' + ENDC)
break
else:
print(OKGREEN + 'SUCCESS' + ENDC)
def unittest_status(unittest_folder):
config = imp.load_source('', os.path.join(unittest_folder, 'src', 'config.py'))
#
print_header('Checking status of all submodules')
print_action('Updating all submodules (fetch)')
process = subprocess.Popen("LANGUAGE='en_US.UTF-8 git' git submodule foreach git fetch", cwd=os.path.dirname(unittest_folder), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stderroutput = process.communicate()[1]
if stderroutput == b'':
print_info('SUCCESS', color=OKGREEN)
else:
print_info('FAILED', color=FAIL)
print_action('Checking status...')
process = subprocess.Popen("LANGUAGE='en_US.UTF-8 git' git submodule foreach git status", cwd=os.path.dirname(unittest_folder), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdoutput, stderroutput = process.communicate()
if stderroutput == b'':
module = None
data = {}
for line in stdoutput.splitlines():
line = str(line)
if 'Entering' in line:
m = line[line.index("'") + 1:]
m = str(m[:m.index("'")])
if m != module:
data[m] = ''
module = m
else:
data[m] += line
for key in data:
if "working tree clean" not in data[key] and "working directory clean" not in data[key]:
data[key] = ("local changes", WARNING)
elif "Your branch is behind" in data[key]:
data[key] = ("no up to date (try git pull)", FAIL)
elif "HEAD detached at" in data[key]:
data[key] = ("no up to date (try git checkout master)", FAIL)
elif "Your branch is ahead of" in data[key]:
data[key] = ("push required", WARNING)
elif "nothing to commit" in data[key]:
data[key] = ("clean", OKGREEN)
else:
data[key] = ("unknown", FAIL)
print_info('Submodule %s... %s' % (key, data[key][1] + data[key][0]))
else:
print_info('FAILED', color=FAIL)
#
print_header('Checking status of unittest and testresults in the library')
print_action('Loading Testrun data from %s' % unittest_filename(unittest_folder, FILES['data-collection']))
with open(unittest_filename(unittest_folder, FILES['data-collection']), 'r') as fh:
data_collection = json.loads(fh.read())
print_action('Checking release state of this testrun... ')
if data_collection['testobject_information']['State'] != 'Released':
print_info("FAILED", FAIL)
else:
print_info("SUCCESS", OKGREEN)
#
print_action('Checking up to dateness of testrults in library...')
try:
with open(os.path.join(unittest_folder, '..', 'pylibs', config.lib.__name__, '_testresults_', FILES['data-collection']), 'r') as fh:
lib_result = json.loads(fh.read())
except FileNotFoundError:
print_info("FAILED: Testresults not in library", FAIL)
else:
if data_collection['testobject_information'] != lib_result['testobject_information'] or data_collection['unittest_information'] != lib_result['unittest_information']:
print_info("FAILED", FAIL)
else:
print_info("SUCCESS", OKGREEN)
def unittest_copy(unittest_folder):
config = imp.load_source('', os.path.join(unittest_folder, 'src', 'config.py'))
#
print_header('Copy unittest files to library')
target_folder = os.path.join(config.lib_path, '_testresults_')
print_action('Copying Unittest Files to %s' % target_folder)
if not os.path.exists(target_folder):
print_info('Creating folder %s' % target_folder)
fstools.mkdir(target_folder)
else:
for fn in os.listdir(target_folder):
remove_file(os.path.join(target_folder, fn))
for fn in REPORT_FILES:
src = unittest_filename(unittest_folder, fn)
dst = os.path.join(target_folder, fn)
print_info('copying %s -> %s' % (src, dst))
shutil.copyfile(src, dst)
def unittest_release(unittest_folder):
with open(os.path.join(unittest_folder, 'src', 'config.py'), 'r') as fh:
conf_file = fh.read()
with open(os.path.join(unittest_folder, 'src', 'config.py'), 'w') as fh:
for line in conf_file.splitlines():
if line.startswith('release_unittest_version'):
fh.write("release_unittest_version = '%s'\n" % module_uid(os.path.join(unittest_folder, 'src', 'tests')))
else:
fh.write(line + '\n')

View File

@ -0,0 +1 @@
dirk@here.28603

View File

@ -0,0 +1,21 @@
{%- import 'macros.tex' as macros %}
\section{Test-Coverage}
{% for module in coverage_information %}
\subsection{ {\tt {{macros.latex_filter(module.name)}} }}
The line coverage for {\tt {{macros.latex_filter(module.name)}} } was {{macros.latex_filter("%.1f" % module.line_coverage)}}\% {% if module.branch_coverage %}\\
The branch coverage for {\tt {{macros.latex_filter(module.name)}} } was {{macros.latex_filter("%.1f" % module.branch_coverage)}}\%{% endif %}
{% for py_file in module.files %}
\subsubsection{ {\tt {{macros.latex_filter(py_file.name)}} }}
The line coverage for {\tt {{macros.latex_filter(py_file.name)}} } was {{macros.latex_filter("%.1f" % py_file.line_coverage)}}\% {% if py_file.branch_coverage %}\\
The branch coverage for {\tt {{macros.latex_filter(py_file.name)}} } was {{macros.latex_filter("%.1f" % py_file.branch_coverage)}}\%{% endif %}
\vspace*{2.7ex}
{%- for fragment in py_file.fragments %}
\lstset{backgroundcolor=\color{bg-{{fragment.coverage_state}}}}\vspace*{-2.7ex}
{%- if fragment.end is not none %}
\lstinputlisting[language=Python, linerange={{fragment.start}}-{{fragment.end}}, firstnumber={{fragment.start}}]{ {{py_file.filepath}} }
{%- else %}
\lstinputlisting[language=Python, firstline={{fragment.start}}, firstnumber={{fragment.start}}]{ {{py_file.filepath}} }
{%- endif %}
{%- endfor %}
{% endfor %}
{% endfor %}

View File

@ -0,0 +1,13 @@
{%- import 'macros.tex' as macros %}
\begin{tabu} to \linewidth {lll}
\toprule
\textbf{Module- or Filename} & \textbf{Line-Coverage} & \textbf{Branch-Coverage}\\
{%- for module in coverage_information %}
\midrule
{\tt {{macros.latex_filter(module.name)}} } & {{macros.latex_filter("%.1f" % module.line_coverage)}}\% & {% if module.branch_coverage %}{{macros.latex_filter("%.1f" % module.branch_coverage)}}\%{% endif %} \\
{%- for py_file in module.files %}
\hspace*{2ex}{\tt {{macros.latex_filter(py_file.name)}} } & {{macros.latex_filter("%.1f" % py_file.line_coverage)}}\% & \\
{%- endfor %}
{%- endfor %}
\bottomrule
\end{tabu}

View File

@ -0,0 +1,22 @@
{%- import 'macros.tex' as macros %}
{{ testobject_information.Description }}
\begin{tabu} to \linewidth {lX}
\toprule
{\bf Library Information} & \\
\midrule
{%- for key in testobject_information %}
{%- if key != "Description" and key != 'Dependencies' %}
{{macros.latex_filter(key)}} & {{macros.latex_filter(testobject_information[key])}} \\
{%- endif %}
{%- endfor %}
{%- if 'Dependencies' in data.testobject_information %}
\midrule
{\bf Dependencies} & \\
\midrule
{%- for module, version in testobject_information.Dependencies %}
{{macros.latex_filter(module)}} & {{macros.latex_filter(version)}}\\
{%- endfor %}
{%- endif %}
\bottomrule
\end{tabu}

View File

@ -0,0 +1,12 @@
{%- import 'macros.tex' as macros %}
\begin{tabu} to \linewidth {lX}
\toprule
{\bf System Information} & \\
\midrule
{%- for key in system_information %}
{%- if key != "Description" %}
{{macros.latex_filter(key)}} & {{macros.latex_filter(data.system_information[key])}} \\
{%- endif %}
{%- endfor %}
\bottomrule
\end{tabu}

View File

@ -0,0 +1,11 @@
{%- import 'macros.tex' as macros %}
\begin{tabu} to \linewidth {lX}
\toprule
{\bf Unittest Information} & \\
\midrule
{%- for key in unittest_information %}
{{macros.latex_filter(key)}} & {{macros.latex_filter(data.unittest_information[key])}} \\
Testruns with & {% for testrun in data.testrun_list %}{{testrun.interpreter}}{% if not loop.last %}, {% endif %}{% endfor %}\\
{%- endfor %}
\bottomrule
\end{tabu}

11
templates/macros.tex Normal file
View File

@ -0,0 +1,11 @@
{%- macro latex_filter(text) -%}{{ text.replace('\\', '/').replace('%', '\\%').replace('/xc2/xb0', '$^\circ$').replace('"', '\'').replace('/', '/\\allowbreak ').replace('&', '\\allowbreak \\&').replace('_', '\\_').replace('->', '$\\rightarrow$').replace('<-', '$\\leftarrow$').replace('=>', '$\\Rightarrow$').replace('<=', '$\\leq$').replace('>=', '$\\geq$').replace('<', '$<$').replace('>', '$>$').replace('{', '\{').replace('}', '\}').replace('#', '\\#')}}
{%- endmacro -%}
{%- macro color_by_level(level) -%}{% if level <= 10 %}black{% else %}{% if level <= 20 %}green{% else %}{% if level <= 30 %}orange{% else %}red{% endif %}{% endif %}{% endif %}
{%- endmacro -%}
{%- macro bg_by_levelno(level) -%}{% if level <= 10 %}0.8 0.8 0.8{% else %}{% if level <= 20 %}0.8 0.95 0.8{% else %}{% if level <= 30 %}1 0.75 0.45{% else %}0.95 0.8 0.8{% endif %}{% endif %}{% endif %}
{%- endmacro -%}
{%- macro result(level) -%}{% if level <= 10 %}Info{% else %}{% if level <= 20 %}\textcolor{green}{Success}{% else %}{% if level <= 30 %}\textcolor{orange}{Warning}{% else %}\textcolor{red}{Failed}{% endif %}{% endif %}{% endif %}
{%- endmacro -%}

View File

@ -0,0 +1,5 @@
{%- import 'macros.tex' as macros %}
{{ '\\%s{%s}' % (sectype, macros.latex_filter(item.Heading))}}
{%- if 'Description' in item and item.Description != '' %}
{{ item.Description }}
{%- endif %}

View File

@ -0,0 +1,16 @@
{%- import 'macros.tex' as macros %}
{{ '\\%s{%s}' % (sectype, macros.latex_filter(item.Heading))}}
{{ '\\label{%s%s}' % (labeltype, item.system_uid)}}
{%- if 'Description' in item and item.Description != '' %}
\paragraph{Description}\mbox{}\\
{{ item.Description }}
{%- endif %}
{%- if 'ReasonForImplementation' in item and item.ReasonForImplementation != '' %}
\paragraph{Reason for the implementation}\mbox{}\\
{{ item.ReasonForImplementation }}
{%- endif %}
{%- if 'Fitcriterion' in item and item.Fitcriterion != '' %}
\paragraph{Fitcriterion}\mbox{}\\
{{ item.Fitcriterion }}
{%- endif %}

View File

@ -0,0 +1,18 @@
\paragraph{Testresult}\mbox{}\\
This test was passed with the state: {\bf {{ macros.result(testcase.levelno) }}}.
{%- for tLogger in testcase.testcaseLogger %}
\vspace*{2.5ex}
\begin{tabu} to \linewidth {lX}
\toprule
{\bf {{ macros.result(tLogger.levelno) }} } & {{ macros.latex_filter(tLogger.message) }}\\
\bottomrule
\end{tabu}
{%- for mLogger in tLogger.moduleLogger %}
\definecolor{shadecolor}{rgb}{ {{macros.bg_by_levelno(mLogger.levelno) }} }\begin{modulelog}[breaklines=true, breakanywhere=true]
{{ mLogger.message }}
\end{modulelog}
\vspace*{-0.225cm}
{%- endfor %}
{%- endfor %}

View File

@ -0,0 +1,19 @@
{%- import 'macros.tex' as macros %}
\paragraph{Testresult}\mbox{}\\
This test was passed with the state: {\bf {{ macros.result(testcase.levelno) }}}. See also full trace
in section \ref{testcase:{{testrun.testrun_id}}__{{testcase.message}}}!
\begin{longtabu} to \linewidth {lX}
\toprule
Testrun: & {{ testrun.interpreter }}\\
Caller: & {{ macros.latex_filter(testcase.pathname) }} ({{ "%d" % testcase.lineno }})\\
Start-Time: & {{ macros.latex_filter(testcase.time_start) }}\\
Finished-Time: & {{ macros.latex_filter(testcase.time_finished) }}\\
Time-Consumption & {{ '%.3fs' % (testcase.time_consumption) }}\\
\midrule
\multicolumn{2}{l}{\bf{Testsummary:}}\\
\midrule
{%- for tLogger in testcase.testcaseLogger %}
\bf{\,{{ macros.result(tLogger.levelno) }} } & {{ macros.latex_filter(tLogger.message) }}\\
{%- endfor %}
\bottomrule
\end{longtabu}

View File

@ -0,0 +1,13 @@
{%- import 'macros.tex' as macros %}
\begin{tabu} to \linewidth {lX}
\toprule
Number of tests & {{ "{\\bf %d}" % testrun.number_of_tests }}\\
Number of successfull tests & {{ "{\\bf %d}" % testrun.number_of_successfull_tests }}\\
Number of possibly failed tests & \textcolor{% if testrun.number_of_possibly_failed_tests > 0%}{orange}{% else %}{black}{% endif %}{{ "{\\bf %d}" % testrun.number_of_possibly_failed_tests }}\\
Number of failed tests & \textcolor{% if testrun.number_of_failed_tests > 0%}{red}{% else %}{black}{% endif %}{{ "{\\bf %d}" % testrun.number_of_failed_tests }}\\
\midrule
Executionlevel & {{ macros.latex_filter(testrun.testcase_names.get('%d' % testrun.testcase_execution_level, 'unknown')) }}\\
Time consumption & {{ '%.3fs' % testrun.time_consumption }}\\
\bottomrule
\end{tabu}

120
templates/unittest.tex Normal file
View File

@ -0,0 +1,120 @@
{%- import 'macros.tex' as macros %}
{%- include 'unittest_head.tex' %}
{%- include 'unittest_titlepage.tex' %}
\tableofcontents
\newpage
\section{Test Information}
\subsection{Test Candidate Information}
{%- with testobject_information = data.testobject_information %}
{%- include 'information/candidate.tex' %}
{%- endwith %}
\subsection{Unittest Information}
{%- with unittest_information = data.unittest_information %}
{%- include 'information/unittest.tex' %}
{%- endwith %}
\subsection{Test System Information}
{%- with system_information = data.system_information %}
{%- include 'information/system.tex' %}
{%- endwith %}
\section{Statistic}
{%- for testrun in data.testrun_list %}
\subsection{\textcolor{% if testrun.number_of_failed_tests > 0%}{red}{% else %}{% if testrun.number_of_possibly_failed_tests > 0%}{orange}{% else %}{green}{% endif %}{% endif %}{Test-Statistic for testrun with {{testrun.interpreter}}}}
{%- include 'test/run_statistic.tex' %}
{%- endfor %}
\subsection{Coverage Statistic}
{%- with coverage_information = data.coverage_information %}
{%- include 'coverage/statistic.tex' %}
{%- endwith %}
\newpage
{%- if data.specification.get('item_dict', {})|length >0 %}
\section{Tested Requirements}
{%- for item_id in data.specification.uid_list_sorted %}
{%- if item_id not in data.lost_souls.item_list %}
{%- with item = data.specification.item_dict[item_id] %}
{%- if item.system_type_uid == '_4-K5EHYYEem_kd-7nxt1sg' %}
{%- with sectype = 'subsection' %}
{%- include 'reqif/heading.tex' %}
{%- endwith %}
{%- elif item.system_type_uid == '_MR7eNHYYEem_kd-7nxt1sg' %}
{%- with sectype = 'subsubsection', labeltype = 'item:' %}
{%- include 'reqif/requirement.tex' %}
{%- endwith %}
{%- if item_id not in data.lost_souls.item_list %}
{%- for testrun in data.testrun_list %}
{%- if item.system_uid in testrun.testcases %}
{%- with testcase = testrun.testcases[item.system_uid] %}
{%- include 'test/case_short.tex' %}
{%- endwith %}
{%- else %}
\textcolor{orange}{\bf No testresults available!}
{%- endif %}
{%- endfor %}
{%- endif %}
{%- endif %}
{%- endwith %}
{%- endif %}
{%- endfor %}
{%- endif %}
{% if data.lost_souls.item_list|length > 0 %}
\newpage
\section{\textcolor{red}{Requirements with no corresponding Testcase}}
{% for item_id in data.specification.uid_list_sorted %}
{% with item = data.specification.item_dict[item_id] %}
{% if item.system_type_uid == '_MR7eNHYYEem_kd-7nxt1sg' and item_id in data.lost_souls.item_list %}
{%- with sectype = 'subsection', labeltype = 'lost_soul:' %}
{% include 'reqif/requirement.tex' %}
{%- endwith %}
{% endif %}
{% endwith %}
{% endfor %}
{% endif %}
{% if data.lost_souls.testcase_list|length > 0 %}
\newpage
\section{\textcolor{orange}{Testcases with no corresponding Requirement}}
{%- for testrun in data.testrun_list %}
\subsection{Summary for testrun with {{ testrun.interpreter }}}
{% for lost_soul_id in data.lost_souls.testcase_list %}
{% if lost_soul_id in testrun.testcases %}
{% with testcase = testrun.testcases[lost_soul_id] %}
\subsubsection{ {{macros.latex_filter(testcase.message)}} }
{% include 'test/case_short.tex' %}
{% endwith %}
{% endif %}
{% endfor %}
{% endfor %}
{% endif %}
\newpage
\appendix
{%- for testrun in data.testrun_list %}
\section{Trace for testrun with {{ testrun.interpreter }}}
{%- for max_level, absmax_level, num_tests in ( (40, 1000, testrun.number_of_failed_tests), (30, 39, testrun.number_of_possibly_failed_tests), (0, 29, testrun.number_of_successfull_tests) ) %}
{% if num_tests > 0 %}
\subsection{Tests with status {{ macros.result(max_level) }} ({{num_tests}})}
{%- for testcase_id in testrun.uid_list_sorted %}
{% with testcase = testrun.testcases[testcase_id], item = data.specification.get('item_dict', {}).get(testrun.testcases[testcase_id].message) %}
{%- if testcase.levelno >= max_level and testcase.levelno <= absmax_level%}
{%- if item %}
{%- with sectype = 'subsubsection', labeltype = 'testcase:' + testrun.testrun_id + '__' %}
{%- include 'reqif/requirement.tex' %}
{%- endwith %}
{%- else %}
\subsubsection{ {{ macros.latex_filter(testcase.message) }} }\label{testcase:{{testrun.testrun_id}}__{{testcase.message}}}
{%- endif %}
{% include "test/case_long.tex" %}
{%- endif %}
{%- endwith %}
{%- endfor %}
{%- endif %}
{% endfor %}
{%- endfor %}
{% with coverage_information = data.coverage_information %}
{% include 'coverage/report.tex' %}
{% endwith %}
{% include 'unittest_foot.tex' %}

View File

@ -0,0 +1 @@
\end{document}

View File

@ -0,0 +1,89 @@
{%- import 'macros.tex' as macros %}
\documentclass[a4paper]{article}
%\documentclass[a4paper,landscape]{article}
\renewcommand{\familydefault}{\sfdefault}
\usepackage[table]{xcolor}
\definecolor{orange}{rgb}{1, 0.7, 0}
\definecolor{lightgrey}{rgb}{0.925, 0.925, 0.925}
\setlength{\topmargin}{-3cm}
\setlength{\oddsidemargin}{-0.5cm}
\setlength{\evensidemargin}{0cm}
\setlength{\textwidth}{17.5cm}
\setlength{\textheight}{24.5cm}
%\setlength{\textwidth}{25cm}
%\setlength{\textheight}{15cm}
\setlength{\headheight}{84pt}
\usepackage{fancyvrb}
\usepackage{fvextra}
%\usepackage{framed,color}
%\newenvironment{modulelog}{\snugshade\Verbatim}{\endVerbatim\endsnugshade}
\usepackage{adjustbox}
\newenvironment{modulelog}%
{\par\noindent\adjustbox{margin=0ex,bgcolor=shadecolor,margin=0ex}\bgroup\varwidth\linewidth\Verbatim}%
{\endVerbatim\endvarwidth\egroup}
%\usepackage{xcolor}
\renewcommand{\baselinestretch}{1,2}
\setlength{\parindent}{0pt}
\setlength{\parskip}{9pt plus3pt minus3pt}
\usepackage{listings}
\usepackage{color}
\definecolor{bg-partially-covered}{rgb}{1,1,0.6} % light-yellow
\definecolor{bg-uncovered}{rgb}{1,0.8,0.8} % light-red
\definecolor{bg-covered}{rgb}{0.95,1,0.95} % very light-green
\definecolor{bg-clean}{rgb}{1,1,1} % white
\definecolor{mygreen}{rgb}{0,0.6,0}
\definecolor{mygray}{rgb}{0.5,0.5,0.5}
\definecolor{mymauve}{rgb}{0.58,0,0.82}
\lstset{ %
backgroundcolor=\color{white}, % choose the background color; you must add \usepackage{color} or \usepackage{xcolor}; should come as last argument
basicstyle=\footnotesize, % the size of the fonts that are used for the code
breakatwhitespace=false, % sets if automatic breaks should only happen at whitespace
breaklines=true, % sets automatic line breaking
captionpos=b, % sets the caption-position to bottom
commentstyle=\color{mygreen}, % comment style
deletekeywords={...}, % if you want to delete keywords from the given language
escapeinside={\%*}{*)}, % if you want to add LaTeX within your code
extendedchars=true, % lets you use non-ASCII characters; for 8-bits encodings only, does not work with UTF-8
frame=none, % adds a frame around the code
keepspaces=true, % keeps spaces in text, useful for keeping indentation of code (possibly needs columns=flexible)
keywordstyle=\color{blue}, % keyword style
language=Octave, % the language of the code
morekeywords={*,...}, % if you want to add more keywords to the set
numbers=left, % where to put the line-numbers; possible values are (none, left, right)
numbersep=5pt, % how far the line-numbers are from the code
numberstyle=\tiny\color{mygray}, % the style that is used for the line-numbers
rulecolor=\color{black}, % if not set, the frame-color may be changed on line-breaks within not-black text (e.g. comments (green here))
showlines=true,
showspaces=false, % show spaces everywhere adding particular underscores; it overrides 'showstringspaces'
showstringspaces=false, % underline spaces within strings only
showtabs=false, % show tabs within strings adding particular underscores
stepnumber=1, % the step between two line-numbers. If it's 1, each line will be numbered
stringstyle=\color{mymauve}, % string literal style
tabsize=2, % sets default tabsize to 2 spaces
}
\usepackage{hyperref}
\usepackage{longtable}
\usepackage{tabu}
\usepackage{multicol}
\usepackage{booktabs}
\usepackage{graphicx}
\usepackage{lastpage} % for the number of the last page in the document
\usepackage{fancyhdr}
\fancyhf{}
\renewcommand{\headrulewidth}{0pt}
\renewcommand{\footrulewidth}{0pt}
\lhead{\textcolor{gray}{}}
\chead{\textcolor{gray}{ Unittest for {\tt {{ macros.latex_filter(data.testobject_information.Name) }} }}}
\rhead{\textcolor{gray}{}}
\lfoot{\textcolor{gray}{}}
\cfoot{\textcolor{gray}{}}
\rfoot{\textcolor{gray}{\thepage\,/ \pageref{LastPage}}}
\begin{document}

View File

@ -0,0 +1,15 @@
{%- import 'macros.tex' as macros %}
\begin{titlepage}
\date{\today}
\title{
Unittest for {\tt {{ macros.latex_filter(data.testobject_information.Name) }} }
}
\date{\today}
\maketitle
\thispagestyle{empty}
\newpage
\end{titlepage}
\setcounter{page}{1}
\pagestyle{fancy}

241
test.py Normal file
View File

@ -0,0 +1,241 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import logging
logger = logging.getLogger('__unittest__')
REPORT_LEVEL_FAIL = logging.ERROR
REPORT_LEVEL_INSPECT = logging.WARNING
REPORT_LEVEL_PASS = logging.INFO
def __get_repr__(value, data_filter=repr):
if type(value) == dict:
return '{ ' + ', '.join(['%s: %s' % (repr(key), __get_repr__(value.get(key))) for key in value.keys()]) + ' }'
elif type(value) == list:
return '[ ' + ', '.join(['%s' % (__get_repr__(v)) for v in value]) + ' ]'
else:
return data_filter(value)
def __report_result__(result, description, data_filter=repr):
logger.debug('Result (%s): %s (%s)', description, __get_repr__(result, data_filter), repr(type(result)))
def __report_expectation_equivalency__(expectation, description, data_filter=repr):
logger.debug('Expectation (%s): result = %s (%s)', description, __get_repr__(expectation, data_filter), repr(type(expectation)))
def __report_expectation_range__(min_expectation, max_expectation, description):
logger.debug('Expectation (%s): %s <= result <= %s', description, __get_repr__(min_expectation), __get_repr__(max_expectation))
def __equivalent_dict__(result, expectation, report_comment_fail=None, dict_key='test_variable'):
result_keys = set(result.keys())
expect_keys = set(expectation.keys())
#
log_lvl = REPORT_LEVEL_PASS
#
# missing elements
#
missing_keys = expect_keys - result_keys
if len(missing_keys) > 0:
logger.error('Missing key(s) in dict (%s): %s.' + report_comment_fail, dict_key, ', '.join(['%s' % repr(key) for key in missing_keys]))
log_lvl = REPORT_LEVEL_FAIL
#
# odd elements
#
odd_keys = result_keys - expect_keys
if len(odd_keys) > 0:
logger.error('Odd key(s) in dict (%s): %s.' + report_comment_fail, dict_key, ', '.join(['%s' % repr(key) for key in odd_keys]))
log_lvl = REPORT_LEVEL_FAIL
#
# differences
#
common_keys = result_keys - missing_keys - odd_keys
for key in common_keys:
ll = __equivalent__(result[key], expectation[key], report_comment_fail=report_comment_fail, dict_key=dict_key + ('.' if dict_key != '' else '') + str(key))
if log_lvl < ll:
log_lvl = ll
return log_lvl
def __equivalent_list__(result, expectation, report_comment_fail=None, list_key='test_variable'):
_odd_ = []
_result_ = result[:]
e_index = list(range(0, len(expectation)))
log_lvl = REPORT_LEVEL_PASS
r = 0
while len(_result_) > 0:
value = _result_.pop(0)
just_type_diff = None
for e in e_index:
ll = __equivalent__(value, expectation[e], None)
if ll == REPORT_LEVEL_PASS:
e_index.pop(e_index.index(e))
break
elif ll == REPORT_LEVEL_INSPECT:
just_type_diff = e
else:
if just_type_diff is None:
_odd_.append(value)
else:
log_lvl = __equivalent__(value, expectation[just_type_diff], report_comment_fail, dict_key='%s[%d]' % (list_key, r))
e_index.pop(e_index.index(just_type_diff))
r += 1
#
# missing elements
#
if len(e_index) > 0:
logger.error('Missing value(s) in list (%s): %s.' + report_comment_fail, list_key, ', '.join(['%s' % repr(expectation[e]) for e in e_index]))
log_lvl = REPORT_LEVEL_FAIL
#
# odd elements
#
if len(_odd_) > 0:
logger.error('Odd value(s) in list (%s): %s.' + report_comment_fail, list_key, ', '.join(['%s' % repr(v) for v in _odd_]))
log_lvl = REPORT_LEVEL_FAIL
return log_lvl
def __equivalent__(result, expectation, report_comment_fail=None, dict_key='test_variable'):
report_comment_fail = (' ' + report_comment_fail) if report_comment_fail is not None else ''
log_lvl = REPORT_LEVEL_PASS
if type(result) == dict and type(expectation) == dict:
ll = __equivalent_dict__(result, expectation, report_comment_fail, dict_key)
if log_lvl < ll:
log_lvl = ll
elif type(result) == list and type(expectation) == list:
ll = __equivalent_list__(result, expectation, report_comment_fail, dict_key)
if log_lvl < ll:
log_lvl = ll
else:
if result != expectation:
log_lvl = REPORT_LEVEL_FAIL
logger.error('Content %s is incorrect' + (' for %s' % dict_key if dict_key != '' else '') + '.' + report_comment_fail, __get_repr__(result))
if type(result) != type(expectation):
if log_lvl < REPORT_LEVEL_INSPECT:
log_lvl = REPORT_LEVEL_INSPECT
logger.warning('Type %s is NOT %s%s (%s). ' + report_comment_fail.strip() or '', __get_repr__(type(result)), __get_repr__(type(expectation)), (' for %s' % dict_key if dict_key != '' else ''), __get_repr__(result))
return log_lvl
def equivalency_chk(result, expectation, tcl, description='Variable', report_comment_fail=None, data_filter=repr):
"""
Routine to check values for equivalency inside a test run and report to a testCaseLogger.
:param result: The result of a test execution of a module
:type result: All types are supported
:param expectation: The expected value (shall be equivalent to result)
:type expectation: All types are supported
:param description: A descrition of the result. It will be reported like "xxx is correct." Example: descrition="stringrepresentation created by modulename"
:type description: str
:param report_level_pass: The reporting level as defined in :class:`logging` (e.g.: logging.INFO)
:param report_level_fail: The reporting level as defined in :class:`logging` (e.g.: logging.ERROR)
:param report_comment_fail: Comment for a failed Testexecution. Will be added in brakets after the Result-Text.
:type report_comment_fail: str
"""
__report_result__(result, description, data_filter=data_filter)
__report_expectation_equivalency__(expectation, description, data_filter=data_filter)
report_level = __equivalent__(result, expectation, report_comment_fail=report_comment_fail, dict_key='result')
if report_level == REPORT_LEVEL_PASS:
tcl.log(report_level, description + ' is correct (Content %s and Type is %s).', data_filter(result), repr(type(result)))
else:
tcl.log(report_level, description + ' is NOT correct. See detailed log for more information.')
return report_level
class equivalency_order_chk(object):
def __init__(self, ordered_values, tcl, description='Variable', report_comment_fail=None):
self._expected_values = ordered_values
self._tcl = tcl
self._description = description
self._report_comment_fail = report_comment_fail
self._reported_values = []
def report_value(self, value):
self._reported_values.append(value)
def report(self):
__report_result__(self._reported_values, self._description)
__report_expectation_equivalency__(self._expected_values, self._description)
report_lvl = REPORT_LEVEL_PASS
for i in range(0, min(len(self._expected_values), len(self._reported_values))):
report_lvl = max(report_lvl, equivalency_chk(self._reported_values[i], self._expected_values[i], logger, 'Submitted value number %d' % (i + 1), self._report_comment_fail))
if report_lvl <= REPORT_LEVEL_PASS:
self._tcl.log(report_lvl, self._description + ': Values and number of submitted values is correct. See detailed log for more information.')
else:
self._tcl.log(report_lvl, self._description + ': Values and number of submitted values is NOT correct. See detailed log for more information.')
return report_lvl
def report_range_check(self, minus_tollerance, plus_tollerance):
__report_result__(self._reported_values, self._description)
report_lvl = REPORT_LEVEL_PASS
report_lvl = max(report_lvl, equivalency_chk(len(self._reported_values), len(self._reported_values), self._tcl, 'Number of submitted values', self._report_comment_fail))
for i in range(0, min(len(self._expected_values), len(self._reported_values))):
report_lvl = max(report_lvl, range_chk(self._reported_values[i], self._expected_values[i] - minus_tollerance, self._expected_values[i] + plus_tollerance, logger, 'Submitted value number %d' % (i + 1), self._report_comment_fail))
if report_lvl <= REPORT_LEVEL_PASS:
self._tcl.log(report_lvl, self._description + ': Valueaccuracy and number of submitted values is correct. See detailed log for more information.')
else:
self._tcl.log(report_lvl, self._description + ': Valueaccuracy and number of submitted values is NOT correct. See detailed log for more information.')
return report_lvl
def __range__(result, min_expectation, max_expectation, report_comment_fail):
report_comment_fail = (' ' + report_comment_fail) if report_comment_fail is not None else ''
log_lvl = REPORT_LEVEL_PASS
if result < min_expectation or result > max_expectation:
log_lvl = REPORT_LEVEL_FAIL
logger.error('Content %s is incorrect.' + report_comment_fail, __get_repr__(result))
if type(result) != type(min_expectation) or type(result) != type(max_expectation):
if log_lvl < REPORT_LEVEL_INSPECT:
log_lvl = REPORT_LEVEL_INSPECT
logger.warning('Type %s is incorrect.' + report_comment_fail, __get_repr__(type(result)))
return log_lvl
def range_chk(result, min_expectation, max_expectation, tcl, description='Value', report_comment_fail=None):
"""
Routine to check values to be in a range inside a test run and report to a testCaseLogger.
:param result: The result of a test execution of a module
:type result: All numeric types are supported
:param min: The result shall be more or equal
:type min: All numeric types are supported
:param max: The result shall be less or equivalent
:type max: All numeric types are supported
:param description: A descrition of the result. It will be reported like "xxx is correct." Example: descrition="stringrepresentation created by modulename"
:type description: str
:param report_level_pass: The reporting level as defined in :class:`logging` (e.g.: logging.INFO)
:param report_level_fail: The reporting level as defined in :class:`logging` (e.g.: logging.ERROR)
:param report_comment_fail: Comment for a failed Testexecution. Will be added in brakets after the Result-Text.
:type report_comment_fail: str
"""
__report_result__(result, description)
__report_expectation_range__(min_expectation, max_expectation, description)
report_level = __range__(result, min_expectation, max_expectation, report_comment_fail=report_comment_fail)
if report_level == REPORT_LEVEL_PASS:
tcl.log(report_level, description + ' is correct (Content %s in [%s ... %s] and Type is %s).', repr(result), repr(min_expectation), repr(max_expectation), repr(type(result)))
else:
tcl.log(report_level, description + ' is NOT correct. See detailed log for more information.')
return report_level
def in_list_chk(result, expectation_list, tcl, description='Value', report_level_pass=logging.INFO, report_level_fail=logging.ERROR, report_comment_fail=None):
"""
Routine to check values to be in a range inside a test run and report to a testCaseLogger.
:param result: The result of a test execution of a module
:type result: All types are supported
:param expectation_list: The list of allowed values
:type expectation_list: A list of all types is supported
:param description: A descrition of the result. It will be reported like "xxx is correct." Example: descrition="stringrepresentation created by modulename"
:type description: str
:param report_level_pass: The reporting level as defined in :class:`logging` (e.g.: logging.INFO)
:param report_level_fail: The reporting level as defined in :class:`logging` (e.g.: logging.ERROR)
:param report_comment_fail: Comment for a failed Testexecution. Will be added in brakets after the Result-Text.
:type report_comment_fail: str
"""
__report_values__(result, expectation)
tcl.log(REPORT_LEVEL_FAIL, 'in_list check not yet implemented')
return REPORT_LEVEL_FAIL

39
unittest_flow.sh Executable file
View File

@ -0,0 +1,39 @@
#!/bin/bash
#
# Set commands depending on distribution
#
. /etc/os-release
# python2
if [[ "$ID" == "arch" || "$ID" == "manjaro" || "$ID_LIKE" == *"opensuse"* || "$ID" == "solus" ]]; then
COV2_CMD="coverage2"
PYT2_CMD="python2"
else
COV2_CMD="python2-coverage"
PYT2_CMD="python2"
fi
# python3
if [[ "$ID" == "arch" || "$ID" == "manjaro" || "$ID_LIKE" == *"opensuse"* || "$ID" == "solus" ]]; then
COV3_CMD="coverage3"
PYT3_CMD="python3"
else
COV3_CMD="python3-coverage"
PYT3_CMD="python3"
fi
# pdf viewer
PDF_CMD="xdg-open"
#
# Unittest Flow
#
$PYT3_CMD src/unittest.py clean
echo -e "\e[1m * Erasing collected coverage information\e[0m"
$COV2_CMD erase
$COV2_CMD run -a --branch --source=$($PYT3_CMD src/config.py -p) src/unittest.py run $*
$COV3_CMD run -a --branch --source=$($PYT3_CMD src/config.py -p) src/unittest.py run $*
echo -e "\e[1m\e[93mCreating Coverage-XML-File: $(pwd)/testresults/coverage.xml\e[0m"
$COV3_CMD xml -o testresults/coverage.xml
$PYT3_CMD src/unittest.py finalise
$PYT3_CMD src/unittest.py status
$PYT3_CMD src/unittest.py pdf
$PDF_CMD testresults/unittest.pdf