This commit is contained in:
Dirk Alders 2025-03-23 16:18:26 +01:00
commit bc0672bec7
8 changed files with 159 additions and 68 deletions

36
run.py
View File

@ -5,7 +5,7 @@ import fstools
from unittest import jsonlog from unittest import jsonlog
from unittest import output from unittest import output
import report import report
import reqif import rspec
import json import json
import os import os
@ -139,12 +139,15 @@ class coverage_info(list):
def __str__(self): def __str__(self):
rv = '' rv = ''
for module in self: for module in self:
rv += '%s (%.1f%% - %s)\n' % (module.get(jsonlog.COVI_KEY_NAME), module.get(jsonlog.COVI_KEY_LINE_COVERAGE), module.get(jsonlog.COVI_KEY_FILEPATH)) rv += '%s (%.1f%% - %s)\n' % (module.get(jsonlog.COVI_KEY_NAME),
module.get(jsonlog.COVI_KEY_LINE_COVERAGE), module.get(jsonlog.COVI_KEY_FILEPATH))
for py_file in module.get(jsonlog.COVI_KEY_FILES): for py_file in module.get(jsonlog.COVI_KEY_FILES):
rv += ' %s (%.1f%% - %s)\n' % (py_file.get(jsonlog.COVI_KEY_NAME), py_file.get(jsonlog.COVI_KEY_LINE_COVERAGE), py_file.get(jsonlog.COVI_KEY_FILEPATH)) rv += ' %s (%.1f%% - %s)\n' % (py_file.get(jsonlog.COVI_KEY_NAME),
py_file.get(jsonlog.COVI_KEY_LINE_COVERAGE), py_file.get(jsonlog.COVI_KEY_FILEPATH))
for fragment in py_file.get(self.KEY_FRAGMENTS): for fragment in py_file.get(self.KEY_FRAGMENTS):
if fragment.get(self.KEY_END_LINE) is not None: if fragment.get(self.KEY_END_LINE) is not None:
rv += ' %d - %d: %s\n' % (fragment.get(self.KEY_START_LINE), fragment.get(self.KEY_END_LINE), repr(fragment.get(self.KEY_COVERAGE_STATE))) rv += ' %d - %d: %s\n' % (fragment.get(self.KEY_START_LINE),
fragment.get(self.KEY_END_LINE), repr(fragment.get(self.KEY_COVERAGE_STATE)))
else: else:
rv += ' %d - : %s\n' % (fragment.get(self.KEY_START_LINE), repr(fragment.get(self.COVERAGE_STATE))) rv += ' %d - : %s\n' % (fragment.get(self.KEY_START_LINE), repr(fragment.get(self.COVERAGE_STATE)))
return rv return rv
@ -211,12 +214,13 @@ def unittest_prepare(ut_folder):
testobject_info[jsonlog.TOBI_STATE] = jsonlog.TOBI_STATE_RELEASED if config.release_unittest_version == unittest_info[jsonlog.UTEI_VERSION] else jsonlog.TOBI_STATE_IN_DEVELOPMENT testobject_info[jsonlog.TOBI_STATE] = jsonlog.TOBI_STATE_RELEASED if config.release_unittest_version == unittest_info[jsonlog.UTEI_VERSION] else jsonlog.TOBI_STATE_IN_DEVELOPMENT
testobject_info[jsonlog.TOBI_DEPENDENCIES] = [] testobject_info[jsonlog.TOBI_DEPENDENCIES] = []
for dependency in config.lib.__DEPENDENCIES__: for dependency in config.lib.__DEPENDENCIES__:
testobject_info[jsonlog.TOBI_DEPENDENCIES].append((dependency, jsonlog.module_uid(os.path.join(jsonlog.get_ut_src_folder(ut_folder), dependency)))) testobject_info[jsonlog.TOBI_DEPENDENCIES].append(
(dependency, jsonlog.module_uid(os.path.join(jsonlog.get_ut_src_folder(ut_folder), dependency))))
# #
spec_filename = os.path.join(ut_folder, 'requirements', 'specification.reqif') spec_filename = os.path.join(config.lib_path, '_requirements_', 'specification.py')
output.print_action("Adding Requirement Specification from %s" % spec_filename) output.print_action("Adding Requirement Specification from %s" % spec_filename)
try: try:
spec = reqif.reqif_dict(spec_filename, 'Heading', 'Software Specification') spec = rspec.rs_by_spec_file(spec_filename)
except FileNotFoundError: except FileNotFoundError:
output.print_info(output.STATUS_FAILED) output.print_info(output.STATUS_FAILED)
spec = {} spec = {}
@ -251,7 +255,7 @@ def unittest_testrun(ut_folder, options):
for key in data_collection[jsonlog.MAIN_KEY_SPECIFICATION].get(jsonlog.SPEC_ITEM_DICT, {}): for key in data_collection[jsonlog.MAIN_KEY_SPECIFICATION].get(jsonlog.SPEC_ITEM_DICT, {}):
heading_dict[key] = data_collection[jsonlog.MAIN_KEY_SPECIFICATION][jsonlog.SPEC_ITEM_DICT][key]['Heading'] heading_dict[key] = data_collection[jsonlog.MAIN_KEY_SPECIFICATION][jsonlog.SPEC_ITEM_DICT][key]['Heading']
test_session = report.testSession( test_session = report.testSession(
['__unittest__', 'root'], ['__unittest__', 'root', config.lib.__name__],
interpreter=interpreter_version, interpreter=interpreter_version,
testcase_execution_level=execution_level, testcase_execution_level=execution_level,
testrun_id='p%d' % sys.version_info[0], testrun_id='p%d' % sys.version_info[0],
@ -279,9 +283,9 @@ def unittest_finalise(ut_folder):
# #
output.print_action("Adding Lost Requirement Soul") output.print_action("Adding Lost Requirement Soul")
data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_ITEMLIST] = [] data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_ITEMLIST] = []
for req_id in data_collection[jsonlog.MAIN_KEY_SPECIFICATION].get(jsonlog.SPEC_ITEM_DICT, {}): for req_id in data_collection[jsonlog.MAIN_KEY_SPECIFICATION].get(rspec.rspec.KEY_MAIN_ENTRIES, {}):
item = data_collection[jsonlog.MAIN_KEY_SPECIFICATION][jsonlog.SPEC_ITEM_DICT][req_id] item = data_collection[jsonlog.MAIN_KEY_SPECIFICATION][rspec.rspec.KEY_MAIN_ENTRIES][req_id]
if item['system_type_uid'] == '_MR7eNHYYEem_kd-7nxt1sg': if not req_id.lower().startswith("sec-"):
testcase_available = False testcase_available = False
for testrun in data_collection[jsonlog.MAIN_KEY_TESTRUNS]: for testrun in data_collection[jsonlog.MAIN_KEY_TESTRUNS]:
if req_id in testrun[jsonlog.TRUN_TESTCASES]: if req_id in testrun[jsonlog.TRUN_TESTCASES]:
@ -289,13 +293,13 @@ def unittest_finalise(ut_folder):
break break
if not testcase_available: if not testcase_available:
data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_ITEMLIST].append(req_id) data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_ITEMLIST].append(req_id)
output.print_info('%s - "%s" has no corresponding testcase' % (item['system_uid'], item['Heading']), output.termcolors.FAIL) output.print_info('%s - "%s" has no corresponding testcase' % (req_id, item['heading']), output.termcolors.FAIL)
# #
output.print_action("Adding Lost Testcase Soul") output.print_action("Adding Lost Testcase Soul")
data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_TESTCASELIST] = [] data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_TESTCASELIST] = []
for testrun in data_collection[jsonlog.MAIN_KEY_TESTRUNS]: for testrun in data_collection[jsonlog.MAIN_KEY_TESTRUNS]:
for tc_id in testrun.get(jsonlog.TRUN_TESTCASES, {}): for tc_id in testrun.get(jsonlog.TRUN_TESTCASES, {}):
if tc_id not in data_collection[jsonlog.MAIN_KEY_SPECIFICATION].get(jsonlog.SPEC_ITEM_DICT, {}) and tc_id not in data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_TESTCASELIST]: if tc_id not in data_collection[jsonlog.MAIN_KEY_SPECIFICATION].get(rspec.rspec.KEY_MAIN_ENTRIES, {}) and tc_id not in data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_TESTCASELIST]:
data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_TESTCASELIST].append(tc_id) data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_TESTCASELIST].append(tc_id)
output.print_info('"%s" has no corresponding testcase' % tc_id, output.termcolors.FAIL) output.print_info('"%s" has no corresponding testcase' % tc_id, output.termcolors.FAIL)
# #
@ -363,7 +367,8 @@ def unittest_status(ut_folder):
output.print_header('Checking GIT repository status') output.print_header('Checking GIT repository status')
# GIT FETCH # GIT FETCH
output.print_action('Fetching repository from server...') output.print_action('Fetching repository from server...')
process = subprocess.Popen("LANGUAGE='en_US.UTF-8 git' git submodule foreach git fetch", cwd=ut_folder, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) process = subprocess.Popen("LANGUAGE='en_US.UTF-8 git' git submodule foreach git fetch",
cwd=ut_folder, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stderroutput = process.communicate()[1] stderroutput = process.communicate()[1]
if stderroutput == b'': if stderroutput == b'':
output.print_info(output.STATUS_SUCCESS) output.print_info(output.STATUS_SUCCESS)
@ -374,7 +379,8 @@ def unittest_status(ut_folder):
output.print_info(jsonlog.status_git(ut_folder)) output.print_info(jsonlog.status_git(ut_folder))
# SUBMODULES # SUBMODULES
output.print_action('Analysing submodule status...') output.print_action('Analysing submodule status...')
process = subprocess.Popen("LANGUAGE='en_US.UTF-8 git' git submodule foreach git status", cwd=ut_folder, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) process = subprocess.Popen("LANGUAGE='en_US.UTF-8 git' git submodule foreach git status",
cwd=ut_folder, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdoutput, stderroutput = process.communicate() stdoutput, stderroutput = process.communicate()
if stderroutput == b'': if stderroutput == b'':
module = None module = None

View File

@ -37,11 +37,11 @@ venv3:
venv3/bin/pip install -r requirements.txt venv3/bin/pip install -r requirements.txt
clean: venv3 clean: venv3
@echo "\033[1;33mCleaning up unittest...\e[0m" @echo -e "\033[1;33mCleaning up unittest...\e[0m"
@echo "\e[1m * Testresults from last testrun\e[0m" @echo -e "\e[1m * Testresults from last testrun\e[0m"
@ls testresults | xargs -i echo " testresults/{}" @ls testresults | xargs -i echo " testresults/{}"
@ls testresults | xargs -i rm -f "testresults/{}" @ls testresults | xargs -i rm -f "testresults/{}"
@echo "\e[1m * Collected coverage information\e[0m" @echo -e "\e[1m * Collected coverage information\e[0m"
@$(COV3_CMD) erase @$(COV3_CMD) erase
cleanall: clean cleanall: clean

View File

@ -1,5 +0,0 @@
{%- import 'macros.tex' as macros %}
{{ '\\%s{%s}' % (sectype, macros.latex_filter(item.Heading))}}
{%- if 'Description' in item and item.Description != '' %}
{{ item.Description }}
{%- endif %}

View File

@ -1,16 +0,0 @@
{%- import 'macros.tex' as macros %}
{{ '\\%s{%s}' % (sectype, macros.latex_filter(item.Heading))}}
{{ '\\label{%s%s}' % (labeltype, item.system_uid)}}
{%- if 'Description' in item and item.Description != '' %}
\paragraph{Description}\mbox{}\\
{{ item.Description }}
{%- endif %}
{%- if 'ReasonForImplementation' in item and item.ReasonForImplementation != '' %}
\paragraph{Reason for the implementation}\mbox{}\\
{{ item.ReasonForImplementation }}
{%- endif %}
{%- if 'Fitcriterion' in item and item.Fitcriterion != '' %}
\paragraph{Fitcriterion}\mbox{}\\
{{ item.Fitcriterion }}
{%- endif %}

View File

@ -0,0 +1,5 @@
{%- import 'macros.tex' as macros %}
{{ '\\%s{%s}' % (sectype, macros.latex_filter(item.heading))}}
{%- if 'description' in item and item.description != '' %}
{{ item.description }}
{%- endif %}

View File

@ -0,0 +1,16 @@
{%- import 'macros.tex' as macros %}
{{ '\\%s{%s}' % (sectype, macros.latex_filter(item.heading))}}
{{ '\\label{%s%s}' % (labeltype, item.system_uid)}}
{%- if 'description' in item and item.description != '' %}
\paragraph{Description}\mbox{}\\
{{ item.description }}
{%- endif %}
{%- if 'reason' in item and item.reason != '' %}
\paragraph{Reason for the implementation}\mbox{}\\
{{ item.reason }}
{%- endif %}
{%- if 'fitcriterion' in item and item.fitcriterion != '' %}
\paragraph{Fitcriterion}\mbox{}\\
{{ item.fitcriterion }}
{%- endif %}

View File

@ -29,44 +29,43 @@
{%- endwith %} {%- endwith %}
\newpage \newpage
{%- if data.specification.get('item_dict', {})|length >0 %} {%- if data.specification.get('entries', {})|length >0 %}
\section{Tested Requirements} \section{Tested Requirements}
{%- for item_id in data.specification.uid_list_sorted %} {%- for item_id in data.specification.sections %}
{%- if item_id not in data.lost_souls.item_list %} {%- with item = data.specification.entries[item_id] %}
{%- with item = data.specification.item_dict[item_id] %} {%- with sectype = 'subsection' %}
{%- if item.system_type_uid == '_4-K5EHYYEem_kd-7nxt1sg' %} {%- include 'rspec/heading.tex' %}
{%- with sectype = 'subsection' %} {%- endwith %}
{%- include 'reqif/heading.tex' %} {%- endwith %}
{%- endwith %} {%- for req_id in data.specification.entries[item_id].childs %}
{%- elif item.system_type_uid == '_MR7eNHYYEem_kd-7nxt1sg' %} {%- with item = data.specification.entries[req_id] %}
{%- with sectype = 'subsubsection', labeltype = 'item:' %} {%- if req_id not in data.lost_souls.item_list %}
{%- include 'reqif/requirement.tex' %} {%- for testrun in data.testrun_list %}
{%- endwith %} {%- if req_id in testrun.testcases %}
{%- if item_id not in data.lost_souls.item_list %} {%- with sectype = 'subsubsection' %}
{%- for testrun in data.testrun_list %} {%- include 'rspec/requirement.tex' %}
{%- if item.system_uid in testrun.testcases %} {%- endwith %}
{%- with testcase = testrun.testcases[item.system_uid] %} {%- with testcase = testrun.testcases[req_id] %}
{%- include 'test/case_short.tex' %} {%- include 'test/case_short.tex' %}
{%- endwith %} {%- endwith %}
{%- else %} {%- else %}
\textcolor{orange}{\bf No testresults available!} \textcolor{orange}{\bf No testresults available!}
{%- endif %} {%- endif %}
{%- endfor %} {%- endfor %}
{%- endif %}
{%- endif %} {%- endif %}
{%- endwith %} {%- endwith %}
{%- endif %} {%- endfor %}
{%- endfor %} {%- endfor %}
{%- endif %} {%- endif %}
{% if data.lost_souls.item_list|length > 0 %} {% if data.lost_souls.item_list|length > 0 %}
\newpage \newpage
\section{\textcolor{red}{Requirements with no corresponding Testcase}} \section{\textcolor{red}{Requirements with no corresponding Testcase}}
{% for item_id in data.specification.uid_list_sorted %} {% for item_id in data.specification.entries %}
{% with item = data.specification.item_dict[item_id] %} {% with item = data.specification.entries[item_id] %}
{% if item.system_type_uid == '_MR7eNHYYEem_kd-7nxt1sg' and item_id in data.lost_souls.item_list %} {% if item_id in data.lost_souls.item_list %}
{%- with sectype = 'subsection', labeltype = 'lost_soul:' %} {%- with sectype = 'subsection', labeltype = 'lost_soul:' %}
{% include 'reqif/requirement.tex' %} {% include 'rspec/requirement.tex' %}
{%- endwith %} {%- endwith %}
{% endif %} {% endif %}
{% endwith %} {% endwith %}
@ -101,7 +100,7 @@
{%- if testcase.levelno >= max_level and testcase.levelno <= absmax_level%} {%- if testcase.levelno >= max_level and testcase.levelno <= absmax_level%}
{%- if item %} {%- if item %}
{%- with sectype = 'subsubsection', labeltype = 'testcase:' + testrun.testrun_id + '__' %} {%- with sectype = 'subsubsection', labeltype = 'testcase:' + testrun.testrun_id + '__' %}
{%- include 'reqif/requirement.tex' %} {%- include 'rspec/requirement.tex' %}
{%- endwith %} {%- endwith %}
{%- else %} {%- else %}
\subsubsection{ {{ macros.latex_filter(testcase.message) }} }\label{testcase:{{testrun.testrun_id}}__{{testcase.message}}} \subsubsection{ {{ macros.latex_filter(testcase.message) }} }\label{testcase:{{testrun.testrun_id}}__{{testcase.message}}}

88
test.py
View File

@ -22,8 +22,20 @@ def __report_result__(result, description, data_filter=repr):
logger.debug('Result (%s): %s (%s)', description, __get_repr__(result, data_filter), repr(type(result))) logger.debug('Result (%s): %s (%s)', description, __get_repr__(result, data_filter), repr(type(result)))
def __report_expectation__(compare, expectation, description, data_filter=repr):
logger.debug('Expectation (%s): result %s %s (%s)', description, compare, __get_repr__(expectation, data_filter), repr(type(expectation)))
def __report_expectation_equivalency__(expectation, description, data_filter=repr): def __report_expectation_equivalency__(expectation, description, data_filter=repr):
logger.debug('Expectation (%s): result = %s (%s)', description, __get_repr__(expectation, data_filter), repr(type(expectation))) __report_expectation__("=", expectation, description, data_filter=repr)
def __report_expectation_greater__(expectation, description, data_filter=repr):
__report_expectation__(">", expectation, description, data_filter=repr)
def __report_expectation_less__(expectation, description, data_filter=repr):
__report_expectation__("<", expectation, description, data_filter=repr)
def __report_expectation_inlist__(expectation, description, data_filter=repr): def __report_expectation_inlist__(expectation, description, data_filter=repr):
@ -124,6 +136,34 @@ def __equivalent__(result, expectation, report_comment_fail=None, dict_key='test
return log_lvl return log_lvl
def __less__(result, expectation, report_comment_fail=None, dict_key='test_variable'):
report_comment_fail = (' ' + report_comment_fail) if report_comment_fail is not None else ''
log_lvl = REPORT_LEVEL_PASS
if result >= expectation:
log_lvl = REPORT_LEVEL_FAIL
logger.error('Content %s is incorrect' + (' for %s' % dict_key if dict_key != '' else '') + '.' + report_comment_fail, __get_repr__(result))
if type(result) != type(expectation):
if log_lvl < REPORT_LEVEL_INSPECT:
log_lvl = REPORT_LEVEL_INSPECT
logger.warning('Type %s is NOT %s%s (%s). ' + report_comment_fail.strip() or '', __get_repr__(type(result)),
__get_repr__(type(expectation)), (' for %s' % dict_key if dict_key != '' else ''), __get_repr__(result))
return log_lvl
def __greater__(result, expectation, report_comment_fail=None, dict_key='test_variable'):
report_comment_fail = (' ' + report_comment_fail) if report_comment_fail is not None else ''
log_lvl = REPORT_LEVEL_PASS
if result <= expectation:
log_lvl = REPORT_LEVEL_FAIL
logger.error('Content %s is incorrect' + (' for %s' % dict_key if dict_key != '' else '') + '.' + report_comment_fail, __get_repr__(result))
if type(result) != type(expectation):
if log_lvl < REPORT_LEVEL_INSPECT:
log_lvl = REPORT_LEVEL_INSPECT
logger.warning('Type %s is NOT %s%s (%s). ' + report_comment_fail.strip() or '', __get_repr__(type(result)),
__get_repr__(type(expectation)), (' for %s' % dict_key if dict_key != '' else ''), __get_repr__(result))
return log_lvl
def equivalency_chk(result, expectation, tcl, description='Variable', report_comment_fail=None, data_filter=repr): def equivalency_chk(result, expectation, tcl, description='Variable', report_comment_fail=None, data_filter=repr):
""" """
Routine to check values for equivalency inside a test run and report to a testCaseLogger. Routine to check values for equivalency inside a test run and report to a testCaseLogger.
@ -147,6 +187,52 @@ def equivalency_chk(result, expectation, tcl, description='Variable', report_com
return report_level return report_level
def less_chk(result, expectation, tcl, description='Variable', report_comment_fail=None, data_filter=repr):
"""
Routine to check result > expectation inside a test run and report to a testCaseLogger.
:param result: The result of a test execution of a module
:type result: All types are supported
:param expectation: The expected value (shall be equivalent to result)
:type expectation: All types are supported
:param description: A descrition of the result. It will be reported like "xxx is correct." Example: descrition="stringrepresentation created by modulename"
:type description: str
:param report_comment_fail: Comment for a failed Testexecution. Will be added in brakets after the Result-Text.
:type report_comment_fail: str
"""
__report_result__(result, description, data_filter=data_filter)
__report_expectation_less__(expectation, description, data_filter=data_filter)
report_level = __less__(result, expectation, report_comment_fail=report_comment_fail, dict_key='result')
if report_level == REPORT_LEVEL_PASS:
tcl.log(report_level, description + ' is greater expectation (Content %s and Type is %s).', data_filter(result), repr(type(result)))
else:
tcl.log(report_level, description + ' is NOT correct. See detailed log for more information.')
return report_level
def greater_chk(result, expectation, tcl, description='Variable', report_comment_fail=None, data_filter=repr):
"""
Routine to check result > expectation inside a test run and report to a testCaseLogger.
:param result: The result of a test execution of a module
:type result: All types are supported
:param expectation: The expected value (shall be equivalent to result)
:type expectation: All types are supported
:param description: A descrition of the result. It will be reported like "xxx is correct." Example: descrition="stringrepresentation created by modulename"
:type description: str
:param report_comment_fail: Comment for a failed Testexecution. Will be added in brakets after the Result-Text.
:type report_comment_fail: str
"""
__report_result__(result, description, data_filter=data_filter)
__report_expectation_greater__(expectation, description, data_filter=data_filter)
report_level = __greater__(result, expectation, report_comment_fail=report_comment_fail, dict_key='result')
if report_level == REPORT_LEVEL_PASS:
tcl.log(report_level, description + ' is greater expectation (Content %s and Type is %s).', data_filter(result), repr(type(result)))
else:
tcl.log(report_level, description + ' is NOT correct. See detailed log for more information.')
return report_level
class equivalency_order_chk(object): class equivalency_order_chk(object):
def __init__(self, ordered_values, tcl, description='Variable', report_comment_fail=None): def __init__(self, ordered_values, tcl, description='Variable', report_comment_fail=None):
self._expected_values = ordered_values self._expected_values = ordered_values