:xMerge branch 'master' of https://git.mount-mockery.de/pylib/unittest
This commit is contained in:
commit
bc0672bec7
36
run.py
36
run.py
@ -5,7 +5,7 @@ import fstools
|
||||
from unittest import jsonlog
|
||||
from unittest import output
|
||||
import report
|
||||
import reqif
|
||||
import rspec
|
||||
|
||||
import json
|
||||
import os
|
||||
@ -139,12 +139,15 @@ class coverage_info(list):
|
||||
def __str__(self):
|
||||
rv = ''
|
||||
for module in self:
|
||||
rv += '%s (%.1f%% - %s)\n' % (module.get(jsonlog.COVI_KEY_NAME), module.get(jsonlog.COVI_KEY_LINE_COVERAGE), module.get(jsonlog.COVI_KEY_FILEPATH))
|
||||
rv += '%s (%.1f%% - %s)\n' % (module.get(jsonlog.COVI_KEY_NAME),
|
||||
module.get(jsonlog.COVI_KEY_LINE_COVERAGE), module.get(jsonlog.COVI_KEY_FILEPATH))
|
||||
for py_file in module.get(jsonlog.COVI_KEY_FILES):
|
||||
rv += ' %s (%.1f%% - %s)\n' % (py_file.get(jsonlog.COVI_KEY_NAME), py_file.get(jsonlog.COVI_KEY_LINE_COVERAGE), py_file.get(jsonlog.COVI_KEY_FILEPATH))
|
||||
rv += ' %s (%.1f%% - %s)\n' % (py_file.get(jsonlog.COVI_KEY_NAME),
|
||||
py_file.get(jsonlog.COVI_KEY_LINE_COVERAGE), py_file.get(jsonlog.COVI_KEY_FILEPATH))
|
||||
for fragment in py_file.get(self.KEY_FRAGMENTS):
|
||||
if fragment.get(self.KEY_END_LINE) is not None:
|
||||
rv += ' %d - %d: %s\n' % (fragment.get(self.KEY_START_LINE), fragment.get(self.KEY_END_LINE), repr(fragment.get(self.KEY_COVERAGE_STATE)))
|
||||
rv += ' %d - %d: %s\n' % (fragment.get(self.KEY_START_LINE),
|
||||
fragment.get(self.KEY_END_LINE), repr(fragment.get(self.KEY_COVERAGE_STATE)))
|
||||
else:
|
||||
rv += ' %d - : %s\n' % (fragment.get(self.KEY_START_LINE), repr(fragment.get(self.COVERAGE_STATE)))
|
||||
return rv
|
||||
@ -211,12 +214,13 @@ def unittest_prepare(ut_folder):
|
||||
testobject_info[jsonlog.TOBI_STATE] = jsonlog.TOBI_STATE_RELEASED if config.release_unittest_version == unittest_info[jsonlog.UTEI_VERSION] else jsonlog.TOBI_STATE_IN_DEVELOPMENT
|
||||
testobject_info[jsonlog.TOBI_DEPENDENCIES] = []
|
||||
for dependency in config.lib.__DEPENDENCIES__:
|
||||
testobject_info[jsonlog.TOBI_DEPENDENCIES].append((dependency, jsonlog.module_uid(os.path.join(jsonlog.get_ut_src_folder(ut_folder), dependency))))
|
||||
testobject_info[jsonlog.TOBI_DEPENDENCIES].append(
|
||||
(dependency, jsonlog.module_uid(os.path.join(jsonlog.get_ut_src_folder(ut_folder), dependency))))
|
||||
#
|
||||
spec_filename = os.path.join(ut_folder, 'requirements', 'specification.reqif')
|
||||
spec_filename = os.path.join(config.lib_path, '_requirements_', 'specification.py')
|
||||
output.print_action("Adding Requirement Specification from %s" % spec_filename)
|
||||
try:
|
||||
spec = reqif.reqif_dict(spec_filename, 'Heading', 'Software Specification')
|
||||
spec = rspec.rs_by_spec_file(spec_filename)
|
||||
except FileNotFoundError:
|
||||
output.print_info(output.STATUS_FAILED)
|
||||
spec = {}
|
||||
@ -251,7 +255,7 @@ def unittest_testrun(ut_folder, options):
|
||||
for key in data_collection[jsonlog.MAIN_KEY_SPECIFICATION].get(jsonlog.SPEC_ITEM_DICT, {}):
|
||||
heading_dict[key] = data_collection[jsonlog.MAIN_KEY_SPECIFICATION][jsonlog.SPEC_ITEM_DICT][key]['Heading']
|
||||
test_session = report.testSession(
|
||||
['__unittest__', 'root'],
|
||||
['__unittest__', 'root', config.lib.__name__],
|
||||
interpreter=interpreter_version,
|
||||
testcase_execution_level=execution_level,
|
||||
testrun_id='p%d' % sys.version_info[0],
|
||||
@ -279,9 +283,9 @@ def unittest_finalise(ut_folder):
|
||||
#
|
||||
output.print_action("Adding Lost Requirement Soul")
|
||||
data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_ITEMLIST] = []
|
||||
for req_id in data_collection[jsonlog.MAIN_KEY_SPECIFICATION].get(jsonlog.SPEC_ITEM_DICT, {}):
|
||||
item = data_collection[jsonlog.MAIN_KEY_SPECIFICATION][jsonlog.SPEC_ITEM_DICT][req_id]
|
||||
if item['system_type_uid'] == '_MR7eNHYYEem_kd-7nxt1sg':
|
||||
for req_id in data_collection[jsonlog.MAIN_KEY_SPECIFICATION].get(rspec.rspec.KEY_MAIN_ENTRIES, {}):
|
||||
item = data_collection[jsonlog.MAIN_KEY_SPECIFICATION][rspec.rspec.KEY_MAIN_ENTRIES][req_id]
|
||||
if not req_id.lower().startswith("sec-"):
|
||||
testcase_available = False
|
||||
for testrun in data_collection[jsonlog.MAIN_KEY_TESTRUNS]:
|
||||
if req_id in testrun[jsonlog.TRUN_TESTCASES]:
|
||||
@ -289,13 +293,13 @@ def unittest_finalise(ut_folder):
|
||||
break
|
||||
if not testcase_available:
|
||||
data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_ITEMLIST].append(req_id)
|
||||
output.print_info('%s - "%s" has no corresponding testcase' % (item['system_uid'], item['Heading']), output.termcolors.FAIL)
|
||||
output.print_info('%s - "%s" has no corresponding testcase' % (req_id, item['heading']), output.termcolors.FAIL)
|
||||
#
|
||||
output.print_action("Adding Lost Testcase Soul")
|
||||
data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_TESTCASELIST] = []
|
||||
for testrun in data_collection[jsonlog.MAIN_KEY_TESTRUNS]:
|
||||
for tc_id in testrun.get(jsonlog.TRUN_TESTCASES, {}):
|
||||
if tc_id not in data_collection[jsonlog.MAIN_KEY_SPECIFICATION].get(jsonlog.SPEC_ITEM_DICT, {}) and tc_id not in data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_TESTCASELIST]:
|
||||
if tc_id not in data_collection[jsonlog.MAIN_KEY_SPECIFICATION].get(rspec.rspec.KEY_MAIN_ENTRIES, {}) and tc_id not in data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_TESTCASELIST]:
|
||||
data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_TESTCASELIST].append(tc_id)
|
||||
output.print_info('"%s" has no corresponding testcase' % tc_id, output.termcolors.FAIL)
|
||||
#
|
||||
@ -363,7 +367,8 @@ def unittest_status(ut_folder):
|
||||
output.print_header('Checking GIT repository status')
|
||||
# GIT FETCH
|
||||
output.print_action('Fetching repository from server...')
|
||||
process = subprocess.Popen("LANGUAGE='en_US.UTF-8 git' git submodule foreach git fetch", cwd=ut_folder, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
process = subprocess.Popen("LANGUAGE='en_US.UTF-8 git' git submodule foreach git fetch",
|
||||
cwd=ut_folder, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
stderroutput = process.communicate()[1]
|
||||
if stderroutput == b'':
|
||||
output.print_info(output.STATUS_SUCCESS)
|
||||
@ -374,7 +379,8 @@ def unittest_status(ut_folder):
|
||||
output.print_info(jsonlog.status_git(ut_folder))
|
||||
# SUBMODULES
|
||||
output.print_action('Analysing submodule status...')
|
||||
process = subprocess.Popen("LANGUAGE='en_US.UTF-8 git' git submodule foreach git status", cwd=ut_folder, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
process = subprocess.Popen("LANGUAGE='en_US.UTF-8 git' git submodule foreach git status",
|
||||
cwd=ut_folder, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
stdoutput, stderroutput = process.communicate()
|
||||
if stderroutput == b'':
|
||||
module = None
|
||||
|
@ -37,11 +37,11 @@ venv3:
|
||||
venv3/bin/pip install -r requirements.txt
|
||||
|
||||
clean: venv3
|
||||
@echo "\033[1;33mCleaning up unittest...\e[0m"
|
||||
@echo "\e[1m * Testresults from last testrun\e[0m"
|
||||
@echo -e "\033[1;33mCleaning up unittest...\e[0m"
|
||||
@echo -e "\e[1m * Testresults from last testrun\e[0m"
|
||||
@ls testresults | xargs -i echo " testresults/{}"
|
||||
@ls testresults | xargs -i rm -f "testresults/{}"
|
||||
@echo "\e[1m * Collected coverage information\e[0m"
|
||||
@echo -e "\e[1m * Collected coverage information\e[0m"
|
||||
@$(COV3_CMD) erase
|
||||
|
||||
cleanall: clean
|
||||
|
@ -1,5 +0,0 @@
|
||||
{%- import 'macros.tex' as macros %}
|
||||
{{ '\\%s{%s}' % (sectype, macros.latex_filter(item.Heading))}}
|
||||
{%- if 'Description' in item and item.Description != '' %}
|
||||
{{ item.Description }}
|
||||
{%- endif %}
|
@ -1,16 +0,0 @@
|
||||
{%- import 'macros.tex' as macros %}
|
||||
{{ '\\%s{%s}' % (sectype, macros.latex_filter(item.Heading))}}
|
||||
{{ '\\label{%s%s}' % (labeltype, item.system_uid)}}
|
||||
{%- if 'Description' in item and item.Description != '' %}
|
||||
\paragraph{Description}\mbox{}\\
|
||||
{{ item.Description }}
|
||||
{%- endif %}
|
||||
{%- if 'ReasonForImplementation' in item and item.ReasonForImplementation != '' %}
|
||||
\paragraph{Reason for the implementation}\mbox{}\\
|
||||
{{ item.ReasonForImplementation }}
|
||||
{%- endif %}
|
||||
{%- if 'Fitcriterion' in item and item.Fitcriterion != '' %}
|
||||
\paragraph{Fitcriterion}\mbox{}\\
|
||||
{{ item.Fitcriterion }}
|
||||
{%- endif %}
|
||||
|
5
templates/rspec/heading.tex
Normal file
5
templates/rspec/heading.tex
Normal file
@ -0,0 +1,5 @@
|
||||
{%- import 'macros.tex' as macros %}
|
||||
{{ '\\%s{%s}' % (sectype, macros.latex_filter(item.heading))}}
|
||||
{%- if 'description' in item and item.description != '' %}
|
||||
{{ item.description }}
|
||||
{%- endif %}
|
16
templates/rspec/requirement.tex
Normal file
16
templates/rspec/requirement.tex
Normal file
@ -0,0 +1,16 @@
|
||||
{%- import 'macros.tex' as macros %}
|
||||
{{ '\\%s{%s}' % (sectype, macros.latex_filter(item.heading))}}
|
||||
{{ '\\label{%s%s}' % (labeltype, item.system_uid)}}
|
||||
{%- if 'description' in item and item.description != '' %}
|
||||
\paragraph{Description}\mbox{}\\
|
||||
{{ item.description }}
|
||||
{%- endif %}
|
||||
{%- if 'reason' in item and item.reason != '' %}
|
||||
\paragraph{Reason for the implementation}\mbox{}\\
|
||||
{{ item.reason }}
|
||||
{%- endif %}
|
||||
{%- if 'fitcriterion' in item and item.fitcriterion != '' %}
|
||||
\paragraph{Fitcriterion}\mbox{}\\
|
||||
{{ item.fitcriterion }}
|
||||
{%- endif %}
|
||||
|
@ -29,44 +29,43 @@
|
||||
{%- endwith %}
|
||||
|
||||
\newpage
|
||||
{%- if data.specification.get('item_dict', {})|length >0 %}
|
||||
{%- if data.specification.get('entries', {})|length >0 %}
|
||||
\section{Tested Requirements}
|
||||
{%- for item_id in data.specification.uid_list_sorted %}
|
||||
{%- if item_id not in data.lost_souls.item_list %}
|
||||
{%- with item = data.specification.item_dict[item_id] %}
|
||||
{%- if item.system_type_uid == '_4-K5EHYYEem_kd-7nxt1sg' %}
|
||||
{%- with sectype = 'subsection' %}
|
||||
{%- include 'reqif/heading.tex' %}
|
||||
{%- endwith %}
|
||||
{%- elif item.system_type_uid == '_MR7eNHYYEem_kd-7nxt1sg' %}
|
||||
{%- with sectype = 'subsubsection', labeltype = 'item:' %}
|
||||
{%- include 'reqif/requirement.tex' %}
|
||||
{%- endwith %}
|
||||
{%- if item_id not in data.lost_souls.item_list %}
|
||||
{%- for testrun in data.testrun_list %}
|
||||
{%- if item.system_uid in testrun.testcases %}
|
||||
{%- with testcase = testrun.testcases[item.system_uid] %}
|
||||
{%- include 'test/case_short.tex' %}
|
||||
{%- endwith %}
|
||||
{%- else %}
|
||||
{%- for item_id in data.specification.sections %}
|
||||
{%- with item = data.specification.entries[item_id] %}
|
||||
{%- with sectype = 'subsection' %}
|
||||
{%- include 'rspec/heading.tex' %}
|
||||
{%- endwith %}
|
||||
{%- endwith %}
|
||||
{%- for req_id in data.specification.entries[item_id].childs %}
|
||||
{%- with item = data.specification.entries[req_id] %}
|
||||
{%- if req_id not in data.lost_souls.item_list %}
|
||||
{%- for testrun in data.testrun_list %}
|
||||
{%- if req_id in testrun.testcases %}
|
||||
{%- with sectype = 'subsubsection' %}
|
||||
{%- include 'rspec/requirement.tex' %}
|
||||
{%- endwith %}
|
||||
{%- with testcase = testrun.testcases[req_id] %}
|
||||
{%- include 'test/case_short.tex' %}
|
||||
{%- endwith %}
|
||||
{%- else %}
|
||||
\textcolor{orange}{\bf No testresults available!}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- endwith %}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
|
||||
{% if data.lost_souls.item_list|length > 0 %}
|
||||
\newpage
|
||||
\section{\textcolor{red}{Requirements with no corresponding Testcase}}
|
||||
{% for item_id in data.specification.uid_list_sorted %}
|
||||
{% with item = data.specification.item_dict[item_id] %}
|
||||
{% if item.system_type_uid == '_MR7eNHYYEem_kd-7nxt1sg' and item_id in data.lost_souls.item_list %}
|
||||
{% for item_id in data.specification.entries %}
|
||||
{% with item = data.specification.entries[item_id] %}
|
||||
{% if item_id in data.lost_souls.item_list %}
|
||||
{%- with sectype = 'subsection', labeltype = 'lost_soul:' %}
|
||||
{% include 'reqif/requirement.tex' %}
|
||||
{% include 'rspec/requirement.tex' %}
|
||||
{%- endwith %}
|
||||
{% endif %}
|
||||
{% endwith %}
|
||||
@ -101,7 +100,7 @@
|
||||
{%- if testcase.levelno >= max_level and testcase.levelno <= absmax_level%}
|
||||
{%- if item %}
|
||||
{%- with sectype = 'subsubsection', labeltype = 'testcase:' + testrun.testrun_id + '__' %}
|
||||
{%- include 'reqif/requirement.tex' %}
|
||||
{%- include 'rspec/requirement.tex' %}
|
||||
{%- endwith %}
|
||||
{%- else %}
|
||||
\subsubsection{ {{ macros.latex_filter(testcase.message) }} }\label{testcase:{{testrun.testrun_id}}__{{testcase.message}}}
|
||||
|
88
test.py
88
test.py
@ -22,8 +22,20 @@ def __report_result__(result, description, data_filter=repr):
|
||||
logger.debug('Result (%s): %s (%s)', description, __get_repr__(result, data_filter), repr(type(result)))
|
||||
|
||||
|
||||
def __report_expectation__(compare, expectation, description, data_filter=repr):
|
||||
logger.debug('Expectation (%s): result %s %s (%s)', description, compare, __get_repr__(expectation, data_filter), repr(type(expectation)))
|
||||
|
||||
|
||||
def __report_expectation_equivalency__(expectation, description, data_filter=repr):
|
||||
logger.debug('Expectation (%s): result = %s (%s)', description, __get_repr__(expectation, data_filter), repr(type(expectation)))
|
||||
__report_expectation__("=", expectation, description, data_filter=repr)
|
||||
|
||||
|
||||
def __report_expectation_greater__(expectation, description, data_filter=repr):
|
||||
__report_expectation__(">", expectation, description, data_filter=repr)
|
||||
|
||||
|
||||
def __report_expectation_less__(expectation, description, data_filter=repr):
|
||||
__report_expectation__("<", expectation, description, data_filter=repr)
|
||||
|
||||
|
||||
def __report_expectation_inlist__(expectation, description, data_filter=repr):
|
||||
@ -124,6 +136,34 @@ def __equivalent__(result, expectation, report_comment_fail=None, dict_key='test
|
||||
return log_lvl
|
||||
|
||||
|
||||
def __less__(result, expectation, report_comment_fail=None, dict_key='test_variable'):
|
||||
report_comment_fail = (' ' + report_comment_fail) if report_comment_fail is not None else ''
|
||||
log_lvl = REPORT_LEVEL_PASS
|
||||
if result >= expectation:
|
||||
log_lvl = REPORT_LEVEL_FAIL
|
||||
logger.error('Content %s is incorrect' + (' for %s' % dict_key if dict_key != '' else '') + '.' + report_comment_fail, __get_repr__(result))
|
||||
if type(result) != type(expectation):
|
||||
if log_lvl < REPORT_LEVEL_INSPECT:
|
||||
log_lvl = REPORT_LEVEL_INSPECT
|
||||
logger.warning('Type %s is NOT %s%s (%s). ' + report_comment_fail.strip() or '', __get_repr__(type(result)),
|
||||
__get_repr__(type(expectation)), (' for %s' % dict_key if dict_key != '' else ''), __get_repr__(result))
|
||||
return log_lvl
|
||||
|
||||
|
||||
def __greater__(result, expectation, report_comment_fail=None, dict_key='test_variable'):
|
||||
report_comment_fail = (' ' + report_comment_fail) if report_comment_fail is not None else ''
|
||||
log_lvl = REPORT_LEVEL_PASS
|
||||
if result <= expectation:
|
||||
log_lvl = REPORT_LEVEL_FAIL
|
||||
logger.error('Content %s is incorrect' + (' for %s' % dict_key if dict_key != '' else '') + '.' + report_comment_fail, __get_repr__(result))
|
||||
if type(result) != type(expectation):
|
||||
if log_lvl < REPORT_LEVEL_INSPECT:
|
||||
log_lvl = REPORT_LEVEL_INSPECT
|
||||
logger.warning('Type %s is NOT %s%s (%s). ' + report_comment_fail.strip() or '', __get_repr__(type(result)),
|
||||
__get_repr__(type(expectation)), (' for %s' % dict_key if dict_key != '' else ''), __get_repr__(result))
|
||||
return log_lvl
|
||||
|
||||
|
||||
def equivalency_chk(result, expectation, tcl, description='Variable', report_comment_fail=None, data_filter=repr):
|
||||
"""
|
||||
Routine to check values for equivalency inside a test run and report to a testCaseLogger.
|
||||
@ -147,6 +187,52 @@ def equivalency_chk(result, expectation, tcl, description='Variable', report_com
|
||||
return report_level
|
||||
|
||||
|
||||
def less_chk(result, expectation, tcl, description='Variable', report_comment_fail=None, data_filter=repr):
|
||||
"""
|
||||
Routine to check result > expectation inside a test run and report to a testCaseLogger.
|
||||
|
||||
:param result: The result of a test execution of a module
|
||||
:type result: All types are supported
|
||||
:param expectation: The expected value (shall be equivalent to result)
|
||||
:type expectation: All types are supported
|
||||
:param description: A descrition of the result. It will be reported like "xxx is correct." Example: descrition="stringrepresentation created by modulename"
|
||||
:type description: str
|
||||
:param report_comment_fail: Comment for a failed Testexecution. Will be added in brakets after the Result-Text.
|
||||
:type report_comment_fail: str
|
||||
"""
|
||||
__report_result__(result, description, data_filter=data_filter)
|
||||
__report_expectation_less__(expectation, description, data_filter=data_filter)
|
||||
report_level = __less__(result, expectation, report_comment_fail=report_comment_fail, dict_key='result')
|
||||
if report_level == REPORT_LEVEL_PASS:
|
||||
tcl.log(report_level, description + ' is greater expectation (Content %s and Type is %s).', data_filter(result), repr(type(result)))
|
||||
else:
|
||||
tcl.log(report_level, description + ' is NOT correct. See detailed log for more information.')
|
||||
return report_level
|
||||
|
||||
|
||||
def greater_chk(result, expectation, tcl, description='Variable', report_comment_fail=None, data_filter=repr):
|
||||
"""
|
||||
Routine to check result > expectation inside a test run and report to a testCaseLogger.
|
||||
|
||||
:param result: The result of a test execution of a module
|
||||
:type result: All types are supported
|
||||
:param expectation: The expected value (shall be equivalent to result)
|
||||
:type expectation: All types are supported
|
||||
:param description: A descrition of the result. It will be reported like "xxx is correct." Example: descrition="stringrepresentation created by modulename"
|
||||
:type description: str
|
||||
:param report_comment_fail: Comment for a failed Testexecution. Will be added in brakets after the Result-Text.
|
||||
:type report_comment_fail: str
|
||||
"""
|
||||
__report_result__(result, description, data_filter=data_filter)
|
||||
__report_expectation_greater__(expectation, description, data_filter=data_filter)
|
||||
report_level = __greater__(result, expectation, report_comment_fail=report_comment_fail, dict_key='result')
|
||||
if report_level == REPORT_LEVEL_PASS:
|
||||
tcl.log(report_level, description + ' is greater expectation (Content %s and Type is %s).', data_filter(result), repr(type(result)))
|
||||
else:
|
||||
tcl.log(report_level, description + ' is NOT correct. See detailed log for more information.')
|
||||
return report_level
|
||||
|
||||
|
||||
class equivalency_order_chk(object):
|
||||
def __init__(self, ordered_values, tcl, description='Variable', report_comment_fail=None):
|
||||
self._expected_values = ordered_values
|
||||
|
Loading…
x
Reference in New Issue
Block a user