From a73c8f8c3523df17eeddad6297d1454c16fc4552 Mon Sep 17 00:00:00 2001 From: Dirk Alders Date: Thu, 9 Feb 2023 07:56:47 +0100 Subject: [PATCH 1/5] greater_chk and less_chk added --- test.py | 88 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 87 insertions(+), 1 deletion(-) diff --git a/test.py b/test.py index 6ee4977..68e7bda 100644 --- a/test.py +++ b/test.py @@ -22,8 +22,20 @@ def __report_result__(result, description, data_filter=repr): logger.debug('Result (%s): %s (%s)', description, __get_repr__(result, data_filter), repr(type(result))) +def __report_expectation__(compare, expectation, description, data_filter=repr): + logger.debug('Expectation (%s): result %s %s (%s)', description, compare, __get_repr__(expectation, data_filter), repr(type(expectation))) + + def __report_expectation_equivalency__(expectation, description, data_filter=repr): - logger.debug('Expectation (%s): result = %s (%s)', description, __get_repr__(expectation, data_filter), repr(type(expectation))) + __report_expectation__("=", expectation, description, data_filter=repr) + + +def __report_expectation_greater__(expectation, description, data_filter=repr): + __report_expectation__(">", expectation, description, data_filter=repr) + + +def __report_expectation_less__(expectation, description, data_filter=repr): + __report_expectation__("<", expectation, description, data_filter=repr) def __report_expectation_inlist__(expectation, description, data_filter=repr): @@ -124,6 +136,34 @@ def __equivalent__(result, expectation, report_comment_fail=None, dict_key='test return log_lvl +def __less__(result, expectation, report_comment_fail=None, dict_key='test_variable'): + report_comment_fail = (' ' + report_comment_fail) if report_comment_fail is not None else '' + log_lvl = REPORT_LEVEL_PASS + if result >= expectation: + log_lvl = REPORT_LEVEL_FAIL + logger.error('Content %s is incorrect' + (' for %s' % dict_key if dict_key != '' else '') + '.' + report_comment_fail, __get_repr__(result)) + if type(result) != type(expectation): + if log_lvl < REPORT_LEVEL_INSPECT: + log_lvl = REPORT_LEVEL_INSPECT + logger.warning('Type %s is NOT %s%s (%s). ' + report_comment_fail.strip() or '', __get_repr__(type(result)), + __get_repr__(type(expectation)), (' for %s' % dict_key if dict_key != '' else ''), __get_repr__(result)) + return log_lvl + + +def __greater__(result, expectation, report_comment_fail=None, dict_key='test_variable'): + report_comment_fail = (' ' + report_comment_fail) if report_comment_fail is not None else '' + log_lvl = REPORT_LEVEL_PASS + if result <= expectation: + log_lvl = REPORT_LEVEL_FAIL + logger.error('Content %s is incorrect' + (' for %s' % dict_key if dict_key != '' else '') + '.' + report_comment_fail, __get_repr__(result)) + if type(result) != type(expectation): + if log_lvl < REPORT_LEVEL_INSPECT: + log_lvl = REPORT_LEVEL_INSPECT + logger.warning('Type %s is NOT %s%s (%s). ' + report_comment_fail.strip() or '', __get_repr__(type(result)), + __get_repr__(type(expectation)), (' for %s' % dict_key if dict_key != '' else ''), __get_repr__(result)) + return log_lvl + + def equivalency_chk(result, expectation, tcl, description='Variable', report_comment_fail=None, data_filter=repr): """ Routine to check values for equivalency inside a test run and report to a testCaseLogger. @@ -147,6 +187,52 @@ def equivalency_chk(result, expectation, tcl, description='Variable', report_com return report_level +def less_chk(result, expectation, tcl, description='Variable', report_comment_fail=None, data_filter=repr): + """ + Routine to check result > expectation inside a test run and report to a testCaseLogger. + + :param result: The result of a test execution of a module + :type result: All types are supported + :param expectation: The expected value (shall be equivalent to result) + :type expectation: All types are supported + :param description: A descrition of the result. It will be reported like "xxx is correct." Example: descrition="stringrepresentation created by modulename" + :type description: str + :param report_comment_fail: Comment for a failed Testexecution. Will be added in brakets after the Result-Text. + :type report_comment_fail: str + """ + __report_result__(result, description, data_filter=data_filter) + __report_expectation_less__(expectation, description, data_filter=data_filter) + report_level = __less__(result, expectation, report_comment_fail=report_comment_fail, dict_key='result') + if report_level == REPORT_LEVEL_PASS: + tcl.log(report_level, description + ' is greater expectation (Content %s and Type is %s).', data_filter(result), repr(type(result))) + else: + tcl.log(report_level, description + ' is NOT correct. See detailed log for more information.') + return report_level + + +def greater_chk(result, expectation, tcl, description='Variable', report_comment_fail=None, data_filter=repr): + """ + Routine to check result > expectation inside a test run and report to a testCaseLogger. + + :param result: The result of a test execution of a module + :type result: All types are supported + :param expectation: The expected value (shall be equivalent to result) + :type expectation: All types are supported + :param description: A descrition of the result. It will be reported like "xxx is correct." Example: descrition="stringrepresentation created by modulename" + :type description: str + :param report_comment_fail: Comment for a failed Testexecution. Will be added in brakets after the Result-Text. + :type report_comment_fail: str + """ + __report_result__(result, description, data_filter=data_filter) + __report_expectation_greater__(expectation, description, data_filter=data_filter) + report_level = __greater__(result, expectation, report_comment_fail=report_comment_fail, dict_key='result') + if report_level == REPORT_LEVEL_PASS: + tcl.log(report_level, description + ' is greater expectation (Content %s and Type is %s).', data_filter(result), repr(type(result))) + else: + tcl.log(report_level, description + ' is NOT correct. See detailed log for more information.') + return report_level + + class equivalency_order_chk(object): def __init__(self, ordered_values, tcl, description='Variable', report_comment_fail=None): self._expected_values = ordered_values From 9fc738e99ec0bb1cb3e081ebca4dcfd3049b5ad7 Mon Sep 17 00:00:00 2001 From: Dirk Alders Date: Thu, 8 Jun 2023 08:18:39 +0200 Subject: [PATCH 2/5] Makefileadaption for proper status output --- scripts/Makefile | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/scripts/Makefile b/scripts/Makefile index 9d8c07f..5544708 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -37,12 +37,13 @@ venv3: venv3/bin/pip install -r requirements.txt clean: venv3 - @echo "\033[1;33mCleaning up unittest...\e[0m" - @echo "\e[1m * Testresults from last testrun\e[0m" + @echo -e "\033[1;33mCleaning up unittest...\e[0m" + @echo -e "\e[1m * Testresults from last testrun\e[0m" @ls testresults | xargs -i echo " testresults/{}" @ls testresults | xargs -i rm -f "testresults/{}" - @echo "\e[1m * Collected coverage information\e[0m" + @echo -e "\e[1m * Collected coverage information\e[0m" @$(COV3_CMD) erase + @rm -r venv3 cleanall: clean @echo "\e[1m * Virtualenv\e[0m" From cb9b2ec388ea81afe9251da7cbec21cf9849d580 Mon Sep 17 00:00:00 2001 From: Dirk Alders Date: Thu, 8 Jun 2023 08:25:52 +0200 Subject: [PATCH 3/5] BugFix: clean rule in Makefile --- scripts/Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/Makefile b/scripts/Makefile index 5544708..03eb888 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -43,7 +43,6 @@ clean: venv3 @ls testresults | xargs -i rm -f "testresults/{}" @echo -e "\e[1m * Collected coverage information\e[0m" @$(COV3_CMD) erase - @rm -r venv3 cleanall: clean @echo "\e[1m * Virtualenv\e[0m" From 5ec5cda68aa65b9f7f85ac7f8147dc7dd9140e1b Mon Sep 17 00:00:00 2001 From: Dirk Alders Date: Sun, 29 Sep 2024 22:18:14 +0200 Subject: [PATCH 4/5] Added tested library logs to unittest --- run.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/run.py b/run.py index c929a72..e70e17f 100644 --- a/run.py +++ b/run.py @@ -251,7 +251,7 @@ def unittest_testrun(ut_folder, options): for key in data_collection[jsonlog.MAIN_KEY_SPECIFICATION].get(jsonlog.SPEC_ITEM_DICT, {}): heading_dict[key] = data_collection[jsonlog.MAIN_KEY_SPECIFICATION][jsonlog.SPEC_ITEM_DICT][key]['Heading'] test_session = report.testSession( - ['__unittest__', 'root'], + ['__unittest__', 'root', config.lib.__name__], interpreter=interpreter_version, testcase_execution_level=execution_level, testrun_id='p%d' % sys.version_info[0], From 50f0174176b1b6f2e5da6f23f65e48d353466537 Mon Sep 17 00:00:00 2001 From: Dirk Alders Date: Mon, 17 Mar 2025 12:47:12 +0100 Subject: [PATCH 5/5] adaptions for change to rspec instead of reqif --- run.py | 34 +++++++++++--------- templates/reqif/heading.tex | 5 --- templates/reqif/requirement.tex | 16 ---------- templates/rspec/heading.tex | 5 +++ templates/rspec/requirement.tex | 16 ++++++++++ templates/unittest.tex | 55 ++++++++++++++++----------------- 6 files changed, 68 insertions(+), 63 deletions(-) delete mode 100644 templates/reqif/heading.tex delete mode 100644 templates/reqif/requirement.tex create mode 100644 templates/rspec/heading.tex create mode 100644 templates/rspec/requirement.tex diff --git a/run.py b/run.py index e70e17f..6374ff5 100644 --- a/run.py +++ b/run.py @@ -5,7 +5,7 @@ import fstools from unittest import jsonlog from unittest import output import report -import reqif +import rspec import json import os @@ -139,12 +139,15 @@ class coverage_info(list): def __str__(self): rv = '' for module in self: - rv += '%s (%.1f%% - %s)\n' % (module.get(jsonlog.COVI_KEY_NAME), module.get(jsonlog.COVI_KEY_LINE_COVERAGE), module.get(jsonlog.COVI_KEY_FILEPATH)) + rv += '%s (%.1f%% - %s)\n' % (module.get(jsonlog.COVI_KEY_NAME), + module.get(jsonlog.COVI_KEY_LINE_COVERAGE), module.get(jsonlog.COVI_KEY_FILEPATH)) for py_file in module.get(jsonlog.COVI_KEY_FILES): - rv += ' %s (%.1f%% - %s)\n' % (py_file.get(jsonlog.COVI_KEY_NAME), py_file.get(jsonlog.COVI_KEY_LINE_COVERAGE), py_file.get(jsonlog.COVI_KEY_FILEPATH)) + rv += ' %s (%.1f%% - %s)\n' % (py_file.get(jsonlog.COVI_KEY_NAME), + py_file.get(jsonlog.COVI_KEY_LINE_COVERAGE), py_file.get(jsonlog.COVI_KEY_FILEPATH)) for fragment in py_file.get(self.KEY_FRAGMENTS): if fragment.get(self.KEY_END_LINE) is not None: - rv += ' %d - %d: %s\n' % (fragment.get(self.KEY_START_LINE), fragment.get(self.KEY_END_LINE), repr(fragment.get(self.KEY_COVERAGE_STATE))) + rv += ' %d - %d: %s\n' % (fragment.get(self.KEY_START_LINE), + fragment.get(self.KEY_END_LINE), repr(fragment.get(self.KEY_COVERAGE_STATE))) else: rv += ' %d - : %s\n' % (fragment.get(self.KEY_START_LINE), repr(fragment.get(self.COVERAGE_STATE))) return rv @@ -211,12 +214,13 @@ def unittest_prepare(ut_folder): testobject_info[jsonlog.TOBI_STATE] = jsonlog.TOBI_STATE_RELEASED if config.release_unittest_version == unittest_info[jsonlog.UTEI_VERSION] else jsonlog.TOBI_STATE_IN_DEVELOPMENT testobject_info[jsonlog.TOBI_DEPENDENCIES] = [] for dependency in config.lib.__DEPENDENCIES__: - testobject_info[jsonlog.TOBI_DEPENDENCIES].append((dependency, jsonlog.module_uid(os.path.join(jsonlog.get_ut_src_folder(ut_folder), dependency)))) + testobject_info[jsonlog.TOBI_DEPENDENCIES].append( + (dependency, jsonlog.module_uid(os.path.join(jsonlog.get_ut_src_folder(ut_folder), dependency)))) # - spec_filename = os.path.join(ut_folder, 'requirements', 'specification.reqif') + spec_filename = os.path.join(config.lib_path, '_requirements_', 'specification.py') output.print_action("Adding Requirement Specification from %s" % spec_filename) try: - spec = reqif.reqif_dict(spec_filename, 'Heading', 'Software Specification') + spec = rspec.rs_by_spec_file(spec_filename) except FileNotFoundError: output.print_info(output.STATUS_FAILED) spec = {} @@ -279,9 +283,9 @@ def unittest_finalise(ut_folder): # output.print_action("Adding Lost Requirement Soul") data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_ITEMLIST] = [] - for req_id in data_collection[jsonlog.MAIN_KEY_SPECIFICATION].get(jsonlog.SPEC_ITEM_DICT, {}): - item = data_collection[jsonlog.MAIN_KEY_SPECIFICATION][jsonlog.SPEC_ITEM_DICT][req_id] - if item['system_type_uid'] == '_MR7eNHYYEem_kd-7nxt1sg': + for req_id in data_collection[jsonlog.MAIN_KEY_SPECIFICATION].get(rspec.rspec.KEY_MAIN_ENTRIES, {}): + item = data_collection[jsonlog.MAIN_KEY_SPECIFICATION][rspec.rspec.KEY_MAIN_ENTRIES][req_id] + if not req_id.lower().startswith("sec-"): testcase_available = False for testrun in data_collection[jsonlog.MAIN_KEY_TESTRUNS]: if req_id in testrun[jsonlog.TRUN_TESTCASES]: @@ -289,13 +293,13 @@ def unittest_finalise(ut_folder): break if not testcase_available: data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_ITEMLIST].append(req_id) - output.print_info('%s - "%s" has no corresponding testcase' % (item['system_uid'], item['Heading']), output.termcolors.FAIL) + output.print_info('%s - "%s" has no corresponding testcase' % (req_id, item['heading']), output.termcolors.FAIL) # output.print_action("Adding Lost Testcase Soul") data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_TESTCASELIST] = [] for testrun in data_collection[jsonlog.MAIN_KEY_TESTRUNS]: for tc_id in testrun.get(jsonlog.TRUN_TESTCASES, {}): - if tc_id not in data_collection[jsonlog.MAIN_KEY_SPECIFICATION].get(jsonlog.SPEC_ITEM_DICT, {}) and tc_id not in data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_TESTCASELIST]: + if tc_id not in data_collection[jsonlog.MAIN_KEY_SPECIFICATION].get(rspec.rspec.KEY_MAIN_ENTRIES, {}) and tc_id not in data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_TESTCASELIST]: data_collection[jsonlog.MAIN_KEY_LOST_SOULS][jsonlog.LOST_TESTCASELIST].append(tc_id) output.print_info('"%s" has no corresponding testcase' % tc_id, output.termcolors.FAIL) # @@ -363,7 +367,8 @@ def unittest_status(ut_folder): output.print_header('Checking GIT repository status') # GIT FETCH output.print_action('Fetching repository from server...') - process = subprocess.Popen("LANGUAGE='en_US.UTF-8 git' git submodule foreach git fetch", cwd=ut_folder, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + process = subprocess.Popen("LANGUAGE='en_US.UTF-8 git' git submodule foreach git fetch", + cwd=ut_folder, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stderroutput = process.communicate()[1] if stderroutput == b'': output.print_info(output.STATUS_SUCCESS) @@ -374,7 +379,8 @@ def unittest_status(ut_folder): output.print_info(jsonlog.status_git(ut_folder)) # SUBMODULES output.print_action('Analysing submodule status...') - process = subprocess.Popen("LANGUAGE='en_US.UTF-8 git' git submodule foreach git status", cwd=ut_folder, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + process = subprocess.Popen("LANGUAGE='en_US.UTF-8 git' git submodule foreach git status", + cwd=ut_folder, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdoutput, stderroutput = process.communicate() if stderroutput == b'': module = None diff --git a/templates/reqif/heading.tex b/templates/reqif/heading.tex deleted file mode 100644 index a467805..0000000 --- a/templates/reqif/heading.tex +++ /dev/null @@ -1,5 +0,0 @@ -{%- import 'macros.tex' as macros %} -{{ '\\%s{%s}' % (sectype, macros.latex_filter(item.Heading))}} -{%- if 'Description' in item and item.Description != '' %} - {{ item.Description }} -{%- endif %} diff --git a/templates/reqif/requirement.tex b/templates/reqif/requirement.tex deleted file mode 100644 index 9c91340..0000000 --- a/templates/reqif/requirement.tex +++ /dev/null @@ -1,16 +0,0 @@ -{%- import 'macros.tex' as macros %} -{{ '\\%s{%s}' % (sectype, macros.latex_filter(item.Heading))}} -{{ '\\label{%s%s}' % (labeltype, item.system_uid)}} -{%- if 'Description' in item and item.Description != '' %} -\paragraph{Description}\mbox{}\\ -{{ item.Description }} -{%- endif %} -{%- if 'ReasonForImplementation' in item and item.ReasonForImplementation != '' %} -\paragraph{Reason for the implementation}\mbox{}\\ -{{ item.ReasonForImplementation }} -{%- endif %} -{%- if 'Fitcriterion' in item and item.Fitcriterion != '' %} -\paragraph{Fitcriterion}\mbox{}\\ -{{ item.Fitcriterion }} -{%- endif %} - diff --git a/templates/rspec/heading.tex b/templates/rspec/heading.tex new file mode 100644 index 0000000..0c2cc54 --- /dev/null +++ b/templates/rspec/heading.tex @@ -0,0 +1,5 @@ +{%- import 'macros.tex' as macros %} +{{ '\\%s{%s}' % (sectype, macros.latex_filter(item.heading))}} +{%- if 'description' in item and item.description != '' %} + {{ item.description }} +{%- endif %} diff --git a/templates/rspec/requirement.tex b/templates/rspec/requirement.tex new file mode 100644 index 0000000..1b51a80 --- /dev/null +++ b/templates/rspec/requirement.tex @@ -0,0 +1,16 @@ +{%- import 'macros.tex' as macros %} +{{ '\\%s{%s}' % (sectype, macros.latex_filter(item.heading))}} +{{ '\\label{%s%s}' % (labeltype, item.system_uid)}} +{%- if 'description' in item and item.description != '' %} +\paragraph{Description}\mbox{}\\ +{{ item.description }} +{%- endif %} +{%- if 'reason' in item and item.reason != '' %} +\paragraph{Reason for the implementation}\mbox{}\\ +{{ item.reason }} +{%- endif %} +{%- if 'fitcriterion' in item and item.fitcriterion != '' %} +\paragraph{Fitcriterion}\mbox{}\\ +{{ item.fitcriterion }} +{%- endif %} + diff --git a/templates/unittest.tex b/templates/unittest.tex index 4d1bc63..a104ecc 100644 --- a/templates/unittest.tex +++ b/templates/unittest.tex @@ -29,44 +29,43 @@ {%- endwith %} \newpage -{%- if data.specification.get('item_dict', {})|length >0 %} +{%- if data.specification.get('entries', {})|length >0 %} \section{Tested Requirements} - {%- for item_id in data.specification.uid_list_sorted %} - {%- if item_id not in data.lost_souls.item_list %} - {%- with item = data.specification.item_dict[item_id] %} - {%- if item.system_type_uid == '_4-K5EHYYEem_kd-7nxt1sg' %} - {%- with sectype = 'subsection' %} - {%- include 'reqif/heading.tex' %} - {%- endwith %} - {%- elif item.system_type_uid == '_MR7eNHYYEem_kd-7nxt1sg' %} - {%- with sectype = 'subsubsection', labeltype = 'item:' %} - {%- include 'reqif/requirement.tex' %} - {%- endwith %} - {%- if item_id not in data.lost_souls.item_list %} - {%- for testrun in data.testrun_list %} - {%- if item.system_uid in testrun.testcases %} - {%- with testcase = testrun.testcases[item.system_uid] %} - {%- include 'test/case_short.tex' %} - {%- endwith %} - {%- else %} + {%- for item_id in data.specification.sections %} + {%- with item = data.specification.entries[item_id] %} + {%- with sectype = 'subsection' %} + {%- include 'rspec/heading.tex' %} + {%- endwith %} + {%- endwith %} + {%- for req_id in data.specification.entries[item_id].childs %} + {%- with item = data.specification.entries[req_id] %} + {%- if req_id not in data.lost_souls.item_list %} + {%- for testrun in data.testrun_list %} + {%- if req_id in testrun.testcases %} + {%- with sectype = 'subsubsection' %} + {%- include 'rspec/requirement.tex' %} + {%- endwith %} + {%- with testcase = testrun.testcases[req_id] %} + {%- include 'test/case_short.tex' %} + {%- endwith %} + {%- else %} \textcolor{orange}{\bf No testresults available!} - {%- endif %} - {%- endfor %} - {%- endif %} + {%- endif %} + {%- endfor %} {%- endif %} {%- endwith %} - {%- endif %} + {%- endfor %} {%- endfor %} {%- endif %} {% if data.lost_souls.item_list|length > 0 %} \newpage \section{\textcolor{red}{Requirements with no corresponding Testcase}} - {% for item_id in data.specification.uid_list_sorted %} - {% with item = data.specification.item_dict[item_id] %} - {% if item.system_type_uid == '_MR7eNHYYEem_kd-7nxt1sg' and item_id in data.lost_souls.item_list %} + {% for item_id in data.specification.entries %} + {% with item = data.specification.entries[item_id] %} + {% if item_id in data.lost_souls.item_list %} {%- with sectype = 'subsection', labeltype = 'lost_soul:' %} - {% include 'reqif/requirement.tex' %} + {% include 'rspec/requirement.tex' %} {%- endwith %} {% endif %} {% endwith %} @@ -101,7 +100,7 @@ {%- if testcase.levelno >= max_level and testcase.levelno <= absmax_level%} {%- if item %} {%- with sectype = 'subsubsection', labeltype = 'testcase:' + testrun.testrun_id + '__' %} - {%- include 'reqif/requirement.tex' %} + {%- include 'rspec/requirement.tex' %} {%- endwith %} {%- else %} \subsubsection{ {{ macros.latex_filter(testcase.message) }} }\label{testcase:{{testrun.testrun_id}}__{{testcase.message}}}