Browse Source

Initial unittest implementation

master
Dirk Alders 4 years ago
parent
commit
547fb39ec5

+ 8
- 0
__init__.py View File

@@ -0,0 +1,8 @@
1
+#!/usr/bin/env python
2
+# -*- coding: utf-8 -*-
3
+#
4
+from unittest import module_status
5
+from unittest import run
6
+from unittest import test
7
+
8
+__DEPENDENCIES__ = ['fstools', 'report']

+ 176
- 0
module_status.py View File

@@ -0,0 +1,176 @@
1
+#!/usr/bin/env python
2
+# -*- coding: utf-8 -*-
3
+#
4
+
5
+import os
6
+import json
7
+import fstools
8
+import re
9
+import subprocess
10
+
11
+
12
+class termcolors:
13
+    HEADER = '\033[95m'
14
+    OKBLUE = '\033[94m'
15
+    OKGREEN = '\033[92m'
16
+    WARNING = '\033[93m'
17
+    FAIL = '\033[91m'
18
+    ENDC = '\033[0m'
19
+    BOLD = '\033[1m'
20
+    UNDERLINE = '\033[4m'
21
+
22
+
23
+STATUS_RELEASED = 'RELEASED'
24
+STATUS_AVAILABLE = 'AVAILABLE'
25
+STATUS_IN_WORK = 'IN_WORK'
26
+STATUS_EXISTS = 'EXISTS'
27
+STATUS_OLD = 'OLD'
28
+STATUS_MISSING = 'MISSING'
29
+#
30
+STATUS_CLEAN = 'CLEAN'
31
+STATUS_CHANGED = 'CHANGED'
32
+#
33
+STATUS_UNKNOWN = 'UNKNOWN'
34
+
35
+
36
+STATUS_LENGTH = 13
37
+
38
+STR_STATUS = {
39
+    STATUS_RELEASED: termcolors.OKGREEN + (STATUS_LENGTH - len(STATUS_RELEASED)) * ' ' + STATUS_RELEASED + termcolors.ENDC,
40
+    STATUS_AVAILABLE: termcolors.OKGREEN + (STATUS_LENGTH - len(STATUS_AVAILABLE)) * ' ' + STATUS_AVAILABLE + termcolors.ENDC,
41
+    STATUS_IN_WORK: termcolors.WARNING + (STATUS_LENGTH - len(STATUS_IN_WORK)) * ' ' + STATUS_IN_WORK + termcolors.ENDC,
42
+    STATUS_OLD: termcolors.WARNING + (STATUS_LENGTH - len(STATUS_OLD)) * ' ' + STATUS_OLD + termcolors.ENDC,
43
+    STATUS_EXISTS: termcolors.WARNING + (STATUS_LENGTH - len(STATUS_EXISTS)) * ' ' + STATUS_EXISTS + termcolors.ENDC,
44
+    STATUS_MISSING: termcolors.FAIL + (STATUS_LENGTH - len(STATUS_MISSING)) * ' ' + STATUS_MISSING + termcolors.ENDC,
45
+    #
46
+    STATUS_CLEAN: termcolors.OKGREEN + (STATUS_LENGTH - len(STATUS_CLEAN)) * ' ' + STATUS_CLEAN + termcolors.ENDC,
47
+    STATUS_CHANGED: termcolors.WARNING + (STATUS_LENGTH - len(STATUS_CHANGED)) * ' ' + STATUS_CHANGED + termcolors.ENDC,
48
+    #
49
+    STATUS_UNKNOWN: termcolors.FAIL + (STATUS_LENGTH - len(STATUS_UNKNOWN)) * ' ' + STATUS_UNKNOWN + termcolors.ENDC,
50
+}
51
+
52
+
53
+def module_status_head():
54
+    rv = termcolors.BOLD + termcolors.UNDERLINE + 'Status of the unittests for pylibs:\n' + termcolors.ENDC
55
+    LINE_FORMAT = '%25s%' + str(STATUS_LENGTH) + 's%' + str(STATUS_LENGTH) + 's%' + str(STATUS_LENGTH) + 's%' + str(STATUS_LENGTH) + 's%' + str(STATUS_LENGTH) + 's%' + str(STATUS_LENGTH) + 's\n'
56
+    rv += termcolors.BOLD + termcolors.HEADER + LINE_FORMAT % (
57
+        'Library',
58
+        'UT-Status',
59
+        'DOC-Status',
60
+        'Versions',
61
+        'UT-Coverage',
62
+        'SPEC-Status',
63
+        'GIT-Status',
64
+    )
65
+    rv += (25 + 6 * STATUS_LENGTH) * '-' + '\n' + termcolors.ENDC
66
+    return rv
67
+
68
+
69
+def module_status_line(module_folder):
70
+    rv = '%25s%s%s%s%s%s%s\n' % (
71
+        os.path.basename(module_folder) + ':',
72
+        STR_STATUS.get(module_unittest_status(module_folder), STATUS_UNKNOWN),
73
+        STR_STATUS.get(module_doc_status(module_folder), STATUS_UNKNOWN),
74
+        module_unittest_versions(module_folder),
75
+        module_unittest_coverage(module_folder),
76
+        STR_STATUS.get(module_spec_status(module_folder), STATUS_UNKNOWN),
77
+        STR_STATUS.get(module_git_status(module_folder), STATUS_UNKNOWN),
78
+    )
79
+    return rv
80
+
81
+
82
+def module_unittest_status(module_folder):
83
+    try:
84
+        with open(os.path.join(module_folder, 'pylibs', os.path.basename(module_folder), '_testresults_', 'unittest.json'), 'r') as fh:
85
+            ut_lib = json.loads(fh.read())
86
+    except IOError:
87
+        return STATUS_MISSING
88
+    else:
89
+        try:
90
+            with open(os.path.join(module_folder, 'unittest', 'testresults', 'unittest.json'), 'r') as fh:
91
+                ut_ut = json.loads(fh.read())
92
+        except IOError:
93
+            return STATUS_UNKNOWN
94
+        else:
95
+            if ut_ut['testobject_information'] != ut_lib['testobject_information'] or ut_ut['unittest_information'] != ut_lib['unittest_information']:
96
+                return STATUS_OLD
97
+            else:
98
+                ut_status = ut_lib.get('testobject_information', {}).get('State', 'unknown')
99
+                if 'released' in ut_status.lower():
100
+                    return STATUS_RELEASED
101
+                elif 'work' in ut_status.lower():
102
+                    return STATUS_IN_WORK
103
+                else:
104
+                    return STATUS_UNKNOWN
105
+
106
+
107
+def module_unittest_versions(module_folder):
108
+    try:
109
+        with open(os.path.join(module_folder, 'unittest', 'testresults', 'unittest.json'), 'r') as fh:
110
+            ut = json.loads(fh.read())
111
+    except IOError:
112
+        return STR_STATUS[STATUS_UNKNOWN]
113
+    else:
114
+        interpreters = ut.get('testobject_information', '').get('Supported Interpreters')
115
+        interpreters = interpreters.split(',')
116
+        for i in range(len(interpreters)):
117
+            interpreters[i] = interpreters[i].strip()
118
+            interpreters[i] = interpreters[i][6:]
119
+        rv = ', '.join(interpreters)
120
+        return (STATUS_LENGTH - len(rv)) * ' ' + rv
121
+
122
+
123
+def module_unittest_coverage(module_folder):
124
+    try:
125
+        with open(os.path.join(module_folder, 'unittest', 'testresults', 'unittest.json'), 'r') as fh:
126
+            ut = json.loads(fh.read())
127
+    except IOError:
128
+        return STR_STATUS[STATUS_UNKNOWN]
129
+    else:
130
+        lcov = ut.get('coverage_information', [{}])[0].get('line_coverage')
131
+        bcov = ut.get('coverage_information', [{}])[0].get('branch_coverage')
132
+        if lcov is None or bcov is None:
133
+            return STR_STATUS[STATUS_UNKNOWN]
134
+        elif lcov > 90:
135
+            rv = termcolors.OKGREEN + '%3d%% (%3d%%)' % (lcov, bcov) + termcolors.ENDC
136
+        else:
137
+            rv = termcolors.WARNING + '%3d%% (%3d%%)' % (lcov, bcov) + termcolors.ENDC
138
+        return (STATUS_LENGTH - 11) * ' ' + rv
139
+
140
+
141
+def module_git_status(module_folder):
142
+    p = subprocess.Popen("git -C %s status" % module_folder, stdout=subprocess.PIPE, shell=True)
143
+    output = p.communicate()[0]
144
+    p_status = p.wait()
145
+    if p_status == 0:
146
+        if b"nichts zu committen" in output and b"um lokale Commits zu publizieren" not in output:
147
+            return STATUS_CLEAN
148
+        else:
149
+            return STATUS_CHANGED
150
+    else:
151
+        return STATUS_UNKNOWN
152
+
153
+
154
+def module_doc_status(module_folder):
155
+    if os.path.exists(os.path.join(module_folder, 'pylibs', os.path.basename(module_folder), '_docs_', 'index.html')):
156
+        return STATUS_AVAILABLE
157
+    else:
158
+        if os.path.exists(os.path.join(module_folder, 'docs', 'index.rst')):
159
+            return STATUS_IN_WORK
160
+        else:
161
+            return STATUS_MISSING
162
+
163
+
164
+def module_spec_status(module_folder):
165
+    if os.path.exists(os.path.join(module_folder, 'requirements', 'specification.reqif')):
166
+        try:
167
+            with open(os.path.join(module_folder, 'unittest', 'testresults', 'unittest.json'), 'r') as fh:
168
+                ut = json.loads(fh.read())
169
+                if len(ut['lost_souls']['item_list']) > 0 or len(ut['lost_souls']['testcase_list']) > 0:
170
+                    return STATUS_IN_WORK
171
+                else:
172
+                    return STATUS_CLEAN
173
+        except IOError:
174
+            return STATUS_EXISTS
175
+    else:
176
+        return STATUS_MISSING

+ 466
- 0
run.py View File

@@ -0,0 +1,466 @@
1
+#!/usr/bin/env python
2
+# -*- coding: utf-8 -*-
3
+#
4
+import fstools
5
+import report
6
+import reqif
7
+
8
+import json
9
+import os
10
+import platform
11
+import getpass
12
+import sys
13
+import subprocess
14
+import imp
15
+import xml.dom.minidom
16
+try:
17
+    import jinja2
18
+except ImportError:
19
+    jinja2 = None
20
+import shutil
21
+
22
+HEADER = '\033[95m'
23
+OKBLUE = '\033[94m'
24
+OKGREEN = '\033[92m'
25
+WARNING = '\033[93m'
26
+FAIL = '\033[91m'
27
+ENDC = '\033[0m'
28
+BOLD = '\033[1m'
29
+UNDERLINE = '\033[4m'
30
+
31
+ARG_CLEAN = 'clean'
32
+ARG_RUN = 'run'
33
+ARG_FINALISE = 'finalise'
34
+ARG_PDF = 'pdf'
35
+ARG_STATUS = 'status'
36
+ARG_COPY = 'copy'
37
+ARG_RELEASE = 'release'
38
+
39
+UNITTEST_KEY_SYSTEM_INFO = 'system_information'
40
+UNITTEST_KEY_UNITTEST_INFO = 'unittest_information'
41
+UNITTEST_KEY_TESTOBJECT_INFO = 'testobject_information'
42
+UNITTEST_KEY_TESTRUNS = 'testrun_list'
43
+UNITTEST_KEY_COVERAGE_INFO = 'coverage_information'
44
+UNITTEST_KEY_SPECIFICATION = 'specification'
45
+
46
+FILES = {
47
+    'data-collection': 'unittest.json',
48
+    'tex-report': 'unittest.tex',
49
+    'coverage-xml': 'coverage.xml'
50
+}
51
+
52
+REPORT_FILES = [FILES['data-collection'], FILES['coverage-xml'], 'unittest.pdf']
53
+
54
+
55
+class coverage_info(list):
56
+    KEY_NAME = 'name'
57
+    KEY_FILEPATH = 'filepath'
58
+    KEY_LINE_COVERAGE = 'line_coverage'
59
+    KEY_BRANCH_COVERAGE = 'branch_coverage'
60
+    KEY_FILES = 'files'
61
+    KEY_FRAGMENTS = 'fragments'
62
+    KEY_START_LINE = 'start'
63
+    KEY_END_LINE = 'end'
64
+    KEY_COVERAGE_STATE = 'coverage_state'
65
+    COVERED = 'covered'
66
+    UNCOVERED = 'uncovered'
67
+    CLEAN = 'clean'
68
+    PARTIALLY_COVERED = 'partially-covered'
69
+
70
+    def __init__(self, xml_filename, module_basepath):
71
+        list.__init__(self)
72
+        xmldoc = xml.dom.minidom.parse(xml_filename)
73
+        itemlist = xmldoc.getElementsByTagName('package')
74
+        for p in itemlist:
75
+            module = {}
76
+            module[self.KEY_NAME] = p.attributes['name'].value[len(module_basepath) + 1:]
77
+            module[self.KEY_FILEPATH] = p.attributes['name'].value.replace('.', os.path.sep)
78
+            module[self.KEY_LINE_COVERAGE] = float(p.attributes['line-rate'].value) * 100.
79
+            try:
80
+                module[self.KEY_BRANCH_COVERAGE] = float(p.attributes['branch-rate'].value) * 100.
81
+            except AttributeError:
82
+                module[self.KEY_BRANCH_COVERAGE] = None
83
+            module[self.KEY_FILES] = []
84
+            for c in p.getElementsByTagName('class'):
85
+                f = {}
86
+                f[self.KEY_NAME] = c.attributes['filename'].value[len(module_basepath) + 1:].replace(os.path.sep, '.')
87
+                f[self.KEY_FILEPATH] = c.attributes['filename'].value
88
+                f[self.KEY_LINE_COVERAGE] = float(c.attributes['line-rate'].value) * 100.
89
+                try:
90
+                    f[self.KEY_BRANCH_COVERAGE] = float(p.attributes['branch-rate'].value) * 100.
91
+                except:
92
+                    f[self.KEY_BRANCH_COVERAGE] = None
93
+                f[self.KEY_FRAGMENTS] = []
94
+                last_hit = None
95
+                start_line = 1
96
+                end_line = 1
97
+                for line in c.getElementsByTagName('line'):
98
+                    line_no = int(line.attributes['number'].value)
99
+                    hit = bool(int(line.attributes['hits'].value))
100
+                    if hit:
101
+                        cc = line.attributes.get('condition-coverage')
102
+                        if cc is not None and not cc.value.startswith('100%'):
103
+                            hit = self.PARTIALLY_COVERED
104
+                        else:
105
+                            hit = self.COVERED
106
+                    else:
107
+                        hit = self.UNCOVERED
108
+                    if line_no == 1:
109
+                        last_hit = hit
110
+                    elif last_hit != hit or line_no > end_line + 1:
111
+                        if last_hit is not None:
112
+                            line = {}
113
+                            line[self.KEY_START_LINE] = start_line
114
+                            line[self.KEY_END_LINE] = end_line
115
+                            line[self.KEY_COVERAGE_STATE] = last_hit
116
+                            f[self.KEY_FRAGMENTS].append(line)
117
+                        if line_no > end_line + 1:
118
+                            line = {}
119
+                            if last_hit is not None:
120
+                                line[self.KEY_START_LINE] = end_line + 1
121
+                            else:
122
+                                line[self.KEY_START_LINE] = start_line
123
+                            line[self.KEY_END_LINE] = line_no - 1
124
+                            line[self.KEY_COVERAGE_STATE] = self.CLEAN
125
+                            f[self.KEY_FRAGMENTS].append(line)
126
+                        start_line = line_no
127
+                        end_line = line_no
128
+                        last_hit = hit
129
+                    elif line_no == end_line + 1:
130
+                        end_line = line_no
131
+                if last_hit is not None:
132
+                    line = {}
133
+                    line[self.KEY_START_LINE] = start_line
134
+                    line[self.KEY_END_LINE] = end_line
135
+                    line[self.KEY_COVERAGE_STATE] = last_hit
136
+                    f[self.KEY_FRAGMENTS].append(line)
137
+                line = {}
138
+                if last_hit is not None:
139
+                    line[self.KEY_START_LINE] = end_line + 1
140
+                else:
141
+                    line[self.KEY_START_LINE] = start_line
142
+                line[self.KEY_END_LINE] = None
143
+                line[self.KEY_COVERAGE_STATE] = self.CLEAN
144
+                f[self.KEY_FRAGMENTS].append(line)
145
+                module[self.KEY_FILES].append(f)
146
+            self.append(module)
147
+
148
+    def __str__(self):
149
+        rv = ''
150
+        for module in self:
151
+            rv += '%s (%.1f%% - %s)\n' % (module.get(self.KEY_NAME), module.get(self.KEY_LINE_COVERAGE), module.get(self.KEY_FILEPATH))
152
+            for py_file in module.get(self.KEY_FILES):
153
+                rv += '    %s (%.1f%% - %s)\n' % (py_file.get(self.KEY_NAME), py_file.get(self.KEY_LINE_COVERAGE), py_file.get(self.KEY_FILEPATH))
154
+                for fragment in py_file.get(self.KEY_FRAGMENTS):
155
+                    if fragment.get(self.KEY_END_LINE) is not None:
156
+                        rv += '        %d - %d: %s\n' % (fragment.get(self.KEY_START_LINE), fragment.get(self.KEY_END_LINE), repr(fragment.get(self.KEY_COVERAGE_STATE)))
157
+                    else:
158
+                        rv += '        %d -  : %s\n' % (fragment.get(self.KEY_START_LINE), repr(fragment.get(self.KEY_COVERAGE_STATE)))
159
+        return rv
160
+
161
+
162
+def unittest_filename(base_folder, filename):
163
+    return os.path.join(base_folder, 'testresults', filename)
164
+
165
+
166
+def print_header(txt, color=BOLD + WARNING):
167
+    print(color + txt + ENDC)
168
+
169
+
170
+def print_action(txt, color=BOLD):
171
+    print(color + '  * ' + txt + ENDC)
172
+
173
+
174
+def print_info(txt, color=ENDC):
175
+    print('      ' + color + txt + ENDC)
176
+
177
+
178
+def remove_file(filename):
179
+    if os.path.exists(filename) and not filename.endswith('.gitkeep'):
180
+        try:
181
+            print_info('Removing %s' % filename)
182
+            os.remove(filename)
183
+        except OSError:
184
+            pass
185
+
186
+
187
+def module_uid(path):
188
+    return fstools.uid_filelist(path, '*.py', rekursive=True)
189
+
190
+
191
+def unittest(options, args, unittest_folder):
192
+    if ARG_CLEAN in args:
193
+        unittest_init(unittest_folder)
194
+    elif ARG_RUN in args:
195
+        unittest_run(unittest_folder, options)
196
+    elif ARG_FINALISE in args:
197
+        unittest_finalise(unittest_folder)
198
+    elif ARG_PDF in args:
199
+        unittest_pdf(unittest_folder)
200
+    elif ARG_STATUS in args:
201
+        unittest_status(unittest_folder)
202
+    elif ARG_COPY in args:
203
+        unittest_copy(unittest_folder)
204
+    elif ARG_RELEASE in args:
205
+        unittest_release(unittest_folder)
206
+
207
+
208
+def unittest_init(unittest_folder):
209
+    config = imp.load_source('', os.path.join(unittest_folder, 'src', 'config.py'))
210
+    #
211
+    print_header("Initiating unittest for first testrun...")
212
+    if not os.path.exists(unittest_filename(unittest_folder, '')):
213
+        print_action('Creating outpout folder %s' % unittest_filename(unittest_folder, ''))
214
+        fstools.mkdir(unittest_filename(unittest_folder, ''))
215
+    #
216
+    print_action('Cleaning up data from last testrun')
217
+    for fn in os.listdir(unittest_filename(unittest_folder, '')):
218
+        remove_file(unittest_filename(unittest_folder, fn))
219
+    remove_file(unittest_filename(unittest_folder, FILES['coverage-xml']))
220
+    #
221
+    print_action('Creating unittest data-collection: %s' % unittest_filename(unittest_folder, FILES['data-collection']))
222
+    #
223
+    system_info = {}
224
+    system_info['Architecture'] = platform.architecture()[0]
225
+    system_info['Machine'] = platform.machine()
226
+    system_info['Hostname'] = platform.node()
227
+    system_info['Distribution'] = ' '.join(platform.dist())
228
+    system_info['System'] = platform.system()
229
+    system_info['Kernel'] = platform.release() + ' (%s)' % platform.version()
230
+    system_info['Username'] = getpass.getuser()
231
+    system_info['Path'] = unittest_folder
232
+    #
233
+    unittest_info = {}
234
+    unittest_info['Version'] = module_uid(os.path.join(unittest_folder, 'src', 'tests'))
235
+    #
236
+    testobject_info = {}
237
+    testobject_info['Name'] = config.lib.__name__
238
+    testobject_info['Version'] = module_uid(config.lib.__path__[0])
239
+    testobject_info['Description'] = config.lib.__DESCRIPTION__
240
+    testobject_info['Supported Interpreters'] = ', '.join(['python%d' % vers for vers in config.lib.__INTERPRETER__])
241
+    testobject_info['State'] = 'Released' if config.release_unittest_version == module_uid(os.path.join(unittest_folder, 'src', 'tests')) else 'In development'
242
+    testobject_info['Dependencies'] = []
243
+    for dependency in config.lib.__DEPENDENCIES__:
244
+        testobject_info['Dependencies'].append((dependency, module_uid(os.path.join(unittest_folder, 'src', dependency))))
245
+    #
246
+    spec_filename = os.path.join(unittest_folder, '..', 'requirements', 'specification.reqif')
247
+    print_action("Adding Requirement Specification from %s" % spec_filename)
248
+    try:
249
+        spec = reqif.reqif_dict(spec_filename, 'Heading', 'Software Specification')
250
+    except FileNotFoundError:
251
+        print_info('FAILED', FAIL)
252
+        spec = {}
253
+    else:
254
+        print_info('SUCCESS', OKGREEN)
255
+    #
256
+    data_collection = {
257
+        UNITTEST_KEY_SYSTEM_INFO: system_info,
258
+        UNITTEST_KEY_UNITTEST_INFO: unittest_info,
259
+        UNITTEST_KEY_TESTOBJECT_INFO: testobject_info,
260
+        UNITTEST_KEY_SPECIFICATION: spec,
261
+        UNITTEST_KEY_TESTRUNS: [],
262
+    }
263
+    with open(unittest_filename(unittest_folder, FILES['data-collection']), 'w') as fh:
264
+        fh.write(json.dumps(data_collection, indent=4, sort_keys=True))
265
+
266
+
267
+def unittest_run(unittest_folder, options):
268
+    tests = imp.load_source('', os.path.join(unittest_folder, 'src', 'tests', '__init__.py'))
269
+    config = imp.load_source('', os.path.join(unittest_folder, 'src', 'config.py'))
270
+    #
271
+    interpreter_version = 'python ' + '.'.join(['%d' % n for n in sys.version_info[:3]]) + ' (%s)' % sys.version_info[3]
272
+    #
273
+    execution_level = report.TCEL_REVERSE_NAMED.get(options.execution_level, report.TCEL_FULL)
274
+    #
275
+    if sys.version_info.major in config.lib.__INTERPRETER__:
276
+        print_header("Running Unittest with %s" % interpreter_version)
277
+        print_action('Loading Testrun data from %s' % unittest_filename(unittest_folder, FILES['data-collection']))
278
+        with open(unittest_filename(unittest_folder, FILES['data-collection']), 'r') as fh:
279
+            data_collection = json.loads(fh.read())
280
+        print_action('Executing Testcases')
281
+        heading_dict = {}
282
+        for key in data_collection[UNITTEST_KEY_SPECIFICATION].get('item_dict', {}):
283
+            heading_dict[key] = data_collection[UNITTEST_KEY_SPECIFICATION]['item_dict'][key]['Heading']
284
+        test_session = report.testSession(
285
+            ['__unittest__', config.lib.logger_name] + config.additional_loggers_to_catch,
286
+            interpreter=interpreter_version,
287
+            testcase_execution_level=execution_level,
288
+            testrun_id='p%d' % sys.version_info[0],
289
+            heading_dict=heading_dict
290
+        )
291
+        tests.testrun(test_session)
292
+        #
293
+        print_action('Adding Testrun data to %s' % unittest_filename(unittest_folder, FILES['data-collection']))
294
+        data_collection[UNITTEST_KEY_TESTRUNS].append(test_session)
295
+        with open(unittest_filename(unittest_folder, FILES['data-collection']), 'w') as fh:
296
+            fh.write(json.dumps(data_collection, indent=4, sort_keys=True))
297
+    else:
298
+        print_header("Library does not support %s." % interpreter_version)
299
+
300
+
301
+def unittest_finalise(unittest_folder):
302
+    config = imp.load_source('', os.path.join(unittest_folder, 'src', 'config.py'))
303
+    #
304
+    print_action('Adding Testrun data to %s' % unittest_filename(unittest_folder, FILES['data-collection']))
305
+    with open(unittest_filename(unittest_folder, FILES['data-collection']), 'r') as fh:
306
+        data_collection = json.loads(fh.read())
307
+    #
308
+    print_header("Adding Requirement information")
309
+    #
310
+    data_collection['lost_souls'] = {}
311
+    #
312
+    print_action("Adding Lost Requirement Soul")
313
+    data_collection['lost_souls']['item_list'] = []
314
+    for req_id in data_collection['specification'].get('item_dict', {}):
315
+        item = data_collection['specification']['item_dict'][req_id]
316
+        if item['system_type_uid'] == '_MR7eNHYYEem_kd-7nxt1sg':
317
+            testcase_available = False
318
+            for testrun in data_collection['testrun_list']:
319
+                if req_id in testrun['testcases']:
320
+                    testcase_available = True
321
+                    break
322
+            if not testcase_available:
323
+                data_collection['lost_souls']['item_list'].append(req_id)
324
+                print_info('%s - "%s" has no corresponding testcase' % (item['system_uid'], item['Heading']), FAIL)
325
+    #
326
+    print_action("Adding Lost Testcase Soul")
327
+    data_collection['lost_souls']['testcase_list'] = []
328
+    for testrun in data_collection['testrun_list']:
329
+        for tc_id in testrun.get('testcases', {}):
330
+            if tc_id not in data_collection['specification'].get('item_dict', {}) and tc_id not in data_collection['lost_souls']['testcase_list']:
331
+                data_collection['lost_souls']['testcase_list'].append(tc_id)
332
+                print_info('"%s" has no corresponding testcase' % tc_id, FAIL)
333
+    #
334
+    print_header("Adding Coverage information")
335
+    print_action('Adding Coverage Information to %s' % unittest_filename(unittest_folder, FILES['data-collection']))
336
+    data_collection[UNITTEST_KEY_COVERAGE_INFO] = coverage_info(unittest_filename(unittest_folder, 'coverage.xml'), os.path.dirname(config.lib_path))
337
+    with open(unittest_filename(unittest_folder, FILES['data-collection']), 'w') as fh:
338
+        fh.write(json.dumps(data_collection, indent=4, sort_keys=True))
339
+
340
+
341
+def unittest_pdf(unittest_folder):
342
+    print_header("Creating PDF-Report of Unittest")
343
+    print_action('Loading Testrun data from %s' % unittest_filename(unittest_folder, FILES['data-collection']))
344
+    with open(unittest_filename(unittest_folder, FILES['data-collection']), 'r') as fh:
345
+        data_collection = json.loads(fh.read())
346
+
347
+    if jinja2 is None:
348
+        print_action('You need to install jinja2 to create a PDF-Report!', FAIL)
349
+    else:
350
+        fn = unittest_filename(unittest_folder, FILES['tex-report'])
351
+        print_action('Creating LaTeX-File %s' % fn)
352
+        with open(fn, 'w') as fh:
353
+            #
354
+            template_path = os.path.join(os.path.dirname(__file__), 'templates')
355
+            template_filename = 'unittest.tex'
356
+            jenv = jinja2.Environment(loader=jinja2.FileSystemLoader(template_path))
357
+            template = jenv.get_template(template_filename)
358
+            fh.write(template.render(data=data_collection))
359
+    print_action('Creating PDF %s' % unittest_filename(unittest_folder, 'unittest.pdf'))
360
+    for i in range(3):
361
+        sys.stdout.write('      Starting run %d/3 of pdflatex... ' % (i + 1))
362
+        sys.stdout.flush()
363
+        exit_value = os.system("pdflatex -interaction nonstopmode --output-directory %(path)s %(path)s/unittest.tex 1> /dev/null" % {'path': unittest_filename(unittest_folder, '')})
364
+        if exit_value != 0:
365
+            print(FAIL + 'FAILED' + ENDC)
366
+            break
367
+        else:
368
+            print(OKGREEN + 'SUCCESS' + ENDC)
369
+
370
+
371
+def unittest_status(unittest_folder):
372
+    config = imp.load_source('', os.path.join(unittest_folder, 'src', 'config.py'))
373
+    #
374
+    print_header('Checking status of all submodules')
375
+    print_action('Updating all submodules (fetch)')
376
+    process = subprocess.Popen("LANGUAGE='en_US.UTF-8 git' git submodule foreach git fetch", cwd=os.path.dirname(unittest_folder), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
377
+    stderroutput = process.communicate()[1]
378
+    if stderroutput == b'':
379
+        print_info('SUCCESS', color=OKGREEN)
380
+    else:
381
+        print_info('FAILED', color=FAIL)
382
+
383
+    print_action('Checking status...')
384
+    process = subprocess.Popen("LANGUAGE='en_US.UTF-8 git' git submodule foreach git status", cwd=os.path.dirname(unittest_folder), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
385
+    stdoutput, stderroutput = process.communicate()
386
+    if stderroutput == b'':
387
+        module = None
388
+        data = {}
389
+        for line in stdoutput.splitlines():
390
+            line = str(line)
391
+            if 'Entering' in line:
392
+                m = line[line.index("'") + 1:]
393
+                m = str(m[:m.index("'")])
394
+                if m != module:
395
+                    data[m] = ''
396
+                    module = m
397
+            else:
398
+                data[m] += line
399
+        for key in data:
400
+            if "working tree clean" not in data[key] and "working directory clean" not in data[key]:
401
+                data[key] = ("local changes", WARNING)
402
+            elif "Your branch is behind" in data[key]:
403
+                data[key] = ("no up to date (try git pull)", FAIL)
404
+            elif "HEAD detached at" in data[key]:
405
+                data[key] = ("no up to date (try git checkout master)", FAIL)
406
+            elif "Your branch is ahead of" in data[key]:
407
+                data[key] = ("push required", WARNING)
408
+            elif "nothing to commit" in data[key]:
409
+                data[key] = ("clean", OKGREEN)
410
+            else:
411
+                data[key] = ("unknown", FAIL)
412
+            print_info('Submodule %s... %s' % (key, data[key][1] + data[key][0]))
413
+    else:
414
+        print_info('FAILED', color=FAIL)
415
+    #
416
+    print_header('Checking status of unittest and testresults in the library')
417
+    print_action('Loading Testrun data from %s' % unittest_filename(unittest_folder, FILES['data-collection']))
418
+    with open(unittest_filename(unittest_folder, FILES['data-collection']), 'r') as fh:
419
+        data_collection = json.loads(fh.read())
420
+    print_action('Checking release state of this testrun... ')
421
+    if data_collection['testobject_information']['State'] != 'Released':
422
+        print_info("FAILED", FAIL)
423
+    else:
424
+        print_info("SUCCESS", OKGREEN)
425
+    #
426
+    print_action('Checking up to dateness of testrults in library...')
427
+    try:
428
+        with open(os.path.join(unittest_folder, '..', 'pylibs', config.lib.__name__, '_testresults_', FILES['data-collection']), 'r') as fh:
429
+            lib_result = json.loads(fh.read())
430
+    except FileNotFoundError:
431
+        print_info("FAILED: Testresults not in library", FAIL)
432
+    else:
433
+        if data_collection['testobject_information'] != lib_result['testobject_information'] or data_collection['unittest_information'] != lib_result['unittest_information']:
434
+            print_info("FAILED", FAIL)
435
+        else:
436
+            print_info("SUCCESS", OKGREEN)
437
+
438
+
439
+def unittest_copy(unittest_folder):
440
+    config = imp.load_source('', os.path.join(unittest_folder, 'src', 'config.py'))
441
+    #
442
+    print_header('Copy unittest files to library')
443
+    target_folder = os.path.join(config.lib_path, '_testresults_')
444
+    print_action('Copying Unittest Files to  %s' % target_folder)
445
+    if not os.path.exists(target_folder):
446
+        print_info('Creating folder %s' % target_folder)
447
+        fstools.mkdir(target_folder)
448
+    else:
449
+        for fn in os.listdir(target_folder):
450
+            remove_file(os.path.join(target_folder, fn))
451
+    for fn in REPORT_FILES:
452
+        src = unittest_filename(unittest_folder, fn)
453
+        dst = os.path.join(target_folder, fn)
454
+        print_info('copying %s -> %s' % (src, dst))
455
+        shutil.copyfile(src, dst)
456
+
457
+
458
+def unittest_release(unittest_folder):
459
+    with open(os.path.join(unittest_folder, 'src', 'config.py'), 'r') as fh:
460
+        conf_file = fh.read()
461
+    with open(os.path.join(unittest_folder, 'src', 'config.py'), 'w') as fh:
462
+        for line in conf_file.splitlines():
463
+            if line.startswith('release_unittest_version'):
464
+                fh.write("release_unittest_version = '%s'\n" % module_uid(os.path.join(unittest_folder, 'src', 'tests')))
465
+            else:
466
+                fh.write(line + '\n')

+ 1
- 0
templates/.#unittest_titlepage.tex View File

@@ -0,0 +1 @@
1
+dirk@here.28603

+ 21
- 0
templates/coverage/report.tex View File

@@ -0,0 +1,21 @@
1
+{%- import 'macros.tex' as macros %}
2
+\section{Test-Coverage}
3
+{% for module in coverage_information %}
4
+    \subsection{ {\tt {{macros.latex_filter(module.name)}} }}
5
+    The line coverage for {\tt {{macros.latex_filter(module.name)}} } was {{macros.latex_filter("%.1f" % module.line_coverage)}}\% {% if module.branch_coverage %}\\
6
+    The branch coverage for {\tt {{macros.latex_filter(module.name)}} } was {{macros.latex_filter("%.1f" % module.branch_coverage)}}\%{% endif %}
7
+{% for py_file in module.files %}
8
+    \subsubsection{ {\tt {{macros.latex_filter(py_file.name)}} }}
9
+    The line coverage for {\tt {{macros.latex_filter(py_file.name)}} } was {{macros.latex_filter("%.1f" % py_file.line_coverage)}}\% {% if py_file.branch_coverage %}\\
10
+    The branch coverage for {\tt {{macros.latex_filter(py_file.name)}} } was {{macros.latex_filter("%.1f" % py_file.branch_coverage)}}\%{% endif %}
11
+    \vspace*{2.7ex}
12
+{%- for fragment in py_file.fragments %}
13
+    \lstset{backgroundcolor=\color{bg-{{fragment.coverage_state}}}}\vspace*{-2.7ex}
14
+{%- if fragment.end is not none %}
15
+    \lstinputlisting[language=Python, linerange={{fragment.start}}-{{fragment.end}}, firstnumber={{fragment.start}}]{ {{py_file.filepath}} }
16
+{%- else %}
17
+    \lstinputlisting[language=Python, firstline={{fragment.start}}, firstnumber={{fragment.start}}]{ {{py_file.filepath}} }
18
+{%- endif %}
19
+{%- endfor %}
20
+{% endfor %}
21
+{% endfor %}

+ 13
- 0
templates/coverage/statistic.tex View File

@@ -0,0 +1,13 @@
1
+{%- import 'macros.tex' as macros %}
2
+\begin{tabu} to \linewidth {lll}
3
+\toprule
4
+\textbf{Module- or Filename} & \textbf{Line-Coverage} & \textbf{Branch-Coverage}\\
5
+{%- for module in coverage_information %}
6
+\midrule
7
+{\tt {{macros.latex_filter(module.name)}} } & {{macros.latex_filter("%.1f" % module.line_coverage)}}\% & {% if module.branch_coverage %}{{macros.latex_filter("%.1f" % module.branch_coverage)}}\%{% endif %} \\
8
+{%- for py_file in module.files %}
9
+\hspace*{2ex}{\tt {{macros.latex_filter(py_file.name)}} } & {{macros.latex_filter("%.1f" % py_file.line_coverage)}}\% &  \\
10
+{%- endfor %}
11
+{%- endfor %}
12
+\bottomrule
13
+\end{tabu}

+ 22
- 0
templates/information/candidate.tex View File

@@ -0,0 +1,22 @@
1
+{%- import 'macros.tex' as macros %}
2
+{{ testobject_information.Description }}
3
+
4
+\begin{tabu} to \linewidth {lX}
5
+\toprule
6
+{\bf Library Information}   & \\
7
+\midrule
8
+{%- for key in testobject_information %}
9
+{%- if key != "Description" and key != 'Dependencies' %}
10
+{{macros.latex_filter(key)}} & {{macros.latex_filter(testobject_information[key])}} \\
11
+{%- endif %}
12
+{%- endfor %}
13
+{%- if 'Dependencies' in data.testobject_information %}
14
+\midrule
15
+{\bf Dependencies} & \\
16
+\midrule
17
+{%- for module, version in testobject_information.Dependencies %}
18
+{{macros.latex_filter(module)}} & {{macros.latex_filter(version)}}\\
19
+{%- endfor %}
20
+{%- endif %}
21
+\bottomrule
22
+\end{tabu}

+ 12
- 0
templates/information/system.tex View File

@@ -0,0 +1,12 @@
1
+{%- import 'macros.tex' as macros %}
2
+\begin{tabu} to \linewidth {lX}
3
+\toprule
4
+{\bf System Information}   & \\
5
+\midrule
6
+{%- for key in system_information %}
7
+{%- if key != "Description" %}
8
+{{macros.latex_filter(key)}} & {{macros.latex_filter(data.system_information[key])}} \\
9
+{%- endif %}
10
+{%- endfor %}
11
+\bottomrule
12
+\end{tabu}

+ 11
- 0
templates/information/unittest.tex View File

@@ -0,0 +1,11 @@
1
+{%- import 'macros.tex' as macros %}
2
+\begin{tabu} to \linewidth {lX}
3
+\toprule
4
+{\bf Unittest Information}   & \\
5
+\midrule
6
+{%- for key in unittest_information %}
7
+{{macros.latex_filter(key)}} & {{macros.latex_filter(data.unittest_information[key])}} \\
8
+Testruns with & {% for testrun in data.testrun_list %}{{testrun.interpreter}}{% if not loop.last %}, {% endif %}{% endfor %}\\
9
+{%- endfor %}
10
+\bottomrule
11
+\end{tabu}

+ 11
- 0
templates/macros.tex View File

@@ -0,0 +1,11 @@
1
+{%- macro latex_filter(text) -%}{{ text.replace('\\', '/').replace('%', '\\%').replace('/xc2/xb0', '$^\circ$').replace('"', '\'').replace('/', '/\\allowbreak ').replace('&', '\\allowbreak \\&').replace('_', '\\_').replace('->', '$\\rightarrow$').replace('<-', '$\\leftarrow$').replace('=>', '$\\Rightarrow$').replace('<=', '$\\leq$').replace('>=', '$\\geq$').replace('<', '$<$').replace('>', '$>$').replace('{', '\{').replace('}', '\}').replace('#', '\\#')}}
2
+{%- endmacro -%}
3
+
4
+{%- macro color_by_level(level) -%}{% if level <= 10 %}black{% else %}{% if level <= 20 %}green{% else %}{% if level <= 30 %}orange{% else %}red{% endif %}{% endif %}{% endif %}
5
+{%- endmacro -%}
6
+
7
+{%- macro bg_by_levelno(level) -%}{% if level <= 10 %}0.8 0.8 0.8{% else %}{% if level <= 20 %}0.8 0.95 0.8{% else %}{% if level <= 30 %}1 0.75 0.45{% else %}0.95 0.8 0.8{% endif %}{% endif %}{% endif %}
8
+{%- endmacro -%}
9
+
10
+{%- macro result(level) -%}{% if level <= 10 %}Info{% else %}{% if level <= 20 %}\textcolor{green}{Success}{% else %}{% if level <= 30 %}\textcolor{orange}{Warning}{% else %}\textcolor{red}{Failed}{% endif %}{% endif %}{% endif %}
11
+{%- endmacro -%}

+ 5
- 0
templates/reqif/heading.tex View File

@@ -0,0 +1,5 @@
1
+{%- import 'macros.tex' as macros %}
2
+{{ '\\%s{%s}' % (sectype, macros.latex_filter(item.Heading))}}
3
+{%- if 'Description' in item and item.Description != '' %}
4
+	{{ item.Description }}
5
+{%- endif %}

+ 16
- 0
templates/reqif/requirement.tex View File

@@ -0,0 +1,16 @@
1
+{%- import 'macros.tex' as macros %}
2
+{{ '\\%s{%s}' % (sectype, macros.latex_filter(item.Heading))}}
3
+{{ '\\label{%s%s}' % (labeltype, item.system_uid)}}
4
+{%- if 'Description' in item and item.Description != '' %}
5
+\paragraph{Description}\mbox{}\\
6
+{{ item.Description }}
7
+{%- endif %}
8
+{%- if 'ReasonForImplementation' in item and item.ReasonForImplementation != '' %}
9
+\paragraph{Reason for the implementation}\mbox{}\\
10
+{{ item.ReasonForImplementation }}
11
+{%- endif %}
12
+{%- if 'Fitcriterion' in item and item.Fitcriterion != '' %}
13
+\paragraph{Fitcriterion}\mbox{}\\
14
+{{ item.Fitcriterion }}
15
+{%- endif %}
16
+

+ 18
- 0
templates/test/case_long.tex View File

@@ -0,0 +1,18 @@
1
+\paragraph{Testresult}\mbox{}\\
2
+This test was passed with the state: {\bf {{ macros.result(testcase.levelno) }}}.
3
+
4
+{%- for tLogger in testcase.testcaseLogger %}
5
+
6
+\vspace*{2.5ex}
7
+\begin{tabu} to \linewidth {lX}
8
+\toprule
9
+{\bf {{ macros.result(tLogger.levelno) }} } & {{ macros.latex_filter(tLogger.message) }}\\
10
+\bottomrule
11
+\end{tabu}
12
+{%- for mLogger in tLogger.moduleLogger %}
13
+\definecolor{shadecolor}{rgb}{ {{macros.bg_by_levelno(mLogger.levelno) }} }\begin{modulelog}[breaklines=true, breakanywhere=true]
14
+{{ mLogger.message }}
15
+\end{modulelog}
16
+\vspace*{-0.225cm}
17
+{%- endfor %}
18
+{%- endfor %}

+ 19
- 0
templates/test/case_short.tex View File

@@ -0,0 +1,19 @@
1
+{%- import 'macros.tex' as macros %}
2
+\paragraph{Testresult}\mbox{}\\
3
+This test was passed with the state: {\bf {{ macros.result(testcase.levelno) }}}. See also full trace
4
+in section \ref{testcase:{{testrun.testrun_id}}__{{testcase.message}}}!
5
+\begin{longtabu} to \linewidth {lX}
6
+\toprule
7
+Testrun: & {{ testrun.interpreter }}\\
8
+Caller: & {{ macros.latex_filter(testcase.pathname) }} ({{ "%d" % testcase.lineno }})\\
9
+Start-Time: & {{ macros.latex_filter(testcase.time_start) }}\\
10
+Finished-Time: & {{ macros.latex_filter(testcase.time_finished) }}\\
11
+Time-Consumption & {{ '%.3fs' % (testcase.time_consumption) }}\\
12
+\midrule
13
+\multicolumn{2}{l}{\bf{Testsummary:}}\\
14
+\midrule
15
+{%- for tLogger in testcase.testcaseLogger %}
16
+\bf{\,{{ macros.result(tLogger.levelno) }} } & {{ macros.latex_filter(tLogger.message) }}\\
17
+{%- endfor %}
18
+\bottomrule
19
+\end{longtabu}

+ 13
- 0
templates/test/run_statistic.tex View File

@@ -0,0 +1,13 @@
1
+{%- import 'macros.tex' as macros %}
2
+\begin{tabu} to \linewidth {lX}
3
+	\toprule
4
+	Number of tests & {{ "{\\bf %d}" % testrun.number_of_tests }}\\
5
+	Number of successfull tests & {{ "{\\bf %d}" % testrun.number_of_successfull_tests }}\\
6
+	Number of possibly failed tests & \textcolor{% if testrun.number_of_possibly_failed_tests > 0%}{orange}{% else %}{black}{% endif %}{{ "{\\bf %d}" % testrun.number_of_possibly_failed_tests }}\\
7
+	Number of failed tests & \textcolor{% if testrun.number_of_failed_tests > 0%}{red}{% else %}{black}{% endif %}{{ "{\\bf %d}" % testrun.number_of_failed_tests }}\\
8
+	\midrule
9
+	Executionlevel    & {{ macros.latex_filter(testrun.testcase_names.get('%d' % testrun.testcase_execution_level, 'unknown')) }}\\
10
+	Time consumption  & {{ '%.3fs' % testrun.time_consumption }}\\
11
+	\bottomrule
12
+\end{tabu}
13
+

+ 120
- 0
templates/unittest.tex View File

@@ -0,0 +1,120 @@
1
+{%- import 'macros.tex' as macros %}
2
+{%- include 'unittest_head.tex' %}
3
+{%- include 'unittest_titlepage.tex' %}
4
+\tableofcontents
5
+\newpage
6
+
7
+\section{Test Information}
8
+\subsection{Test Candidate Information}
9
+{%- with testobject_information = data.testobject_information %}
10
+  {%- include 'information/candidate.tex' %}
11
+{%- endwith %}
12
+\subsection{Unittest Information}
13
+{%- with unittest_information = data.unittest_information %}
14
+  {%- include 'information/unittest.tex' %}
15
+{%- endwith %}
16
+\subsection{Test System Information}
17
+{%- with system_information = data.system_information %}
18
+  {%- include 'information/system.tex' %}
19
+{%- endwith %}
20
+
21
+\section{Statistic}
22
+{%- for testrun in data.testrun_list %}
23
+  \subsection{\textcolor{% if testrun.number_of_failed_tests > 0%}{red}{% else %}{% if testrun.number_of_possibly_failed_tests > 0%}{orange}{% else %}{green}{% endif %}{% endif %}{Test-Statistic for testrun with {{testrun.interpreter}}}}
24
+  {%- include 'test/run_statistic.tex' %}
25
+{%- endfor %}
26
+\subsection{Coverage Statistic}
27
+{%- with coverage_information = data.coverage_information %}
28
+  {%- include 'coverage/statistic.tex' %}
29
+{%- endwith %}
30
+
31
+\newpage
32
+{%- if data.specification.get('item_dict', {})|length >0 %}
33
+  \section{Tested Requirements}
34
+  {%- for item_id in data.specification.uid_list_sorted %}
35
+    {%- if item_id not in data.lost_souls.item_list %}
36
+      {%- with item = data.specification.item_dict[item_id] %}
37
+        {%- if item.system_type_uid == '_4-K5EHYYEem_kd-7nxt1sg' %}
38
+          {%- with sectype = 'subsection' %}
39
+            {%- include 'reqif/heading.tex' %}
40
+          {%- endwith %}
41
+        {%- elif item.system_type_uid == '_MR7eNHYYEem_kd-7nxt1sg' %}
42
+          {%- with sectype = 'subsubsection', labeltype = 'item:' %}
43
+            {%- include 'reqif/requirement.tex' %}
44
+          {%- endwith %}
45
+          {%- if item_id not in data.lost_souls.item_list %}
46
+            {%- for testrun in data.testrun_list %}
47
+              {%- if item.system_uid in testrun.testcases %}
48
+                {%- with testcase = testrun.testcases[item.system_uid] %}
49
+                  {%- include 'test/case_short.tex' %}
50
+                {%- endwith %}
51
+              {%- else %}
52
+                \textcolor{orange}{\bf No testresults available!}
53
+              {%- endif %}
54
+            {%- endfor %}
55
+          {%- endif %}
56
+        {%- endif %}
57
+      {%- endwith %}
58
+    {%- endif %}
59
+  {%- endfor %}
60
+{%- endif %}
61
+
62
+{% if data.lost_souls.item_list|length > 0 %}
63
+  \newpage
64
+  \section{\textcolor{red}{Requirements with no corresponding Testcase}}
65
+  {% for item_id in data.specification.uid_list_sorted %}
66
+    {% with item = data.specification.item_dict[item_id] %}
67
+      {% if item.system_type_uid == '_MR7eNHYYEem_kd-7nxt1sg' and item_id in data.lost_souls.item_list %}
68
+        {%- with sectype = 'subsection', labeltype = 'lost_soul:' %}
69
+          {% include 'reqif/requirement.tex' %}
70
+        {%- endwith %}
71
+      {% endif %}
72
+    {% endwith %}
73
+  {% endfor %}
74
+{% endif %}
75
+
76
+{% if data.lost_souls.testcase_list|length > 0 %}
77
+  \newpage
78
+  \section{\textcolor{orange}{Testcases with no corresponding Requirement}}
79
+  {%- for testrun in data.testrun_list %}
80
+    \subsection{Summary for testrun with {{ testrun.interpreter }}}
81
+    {% for lost_soul_id in data.lost_souls.testcase_list %}
82
+      {% if lost_soul_id in testrun.testcases %}
83
+        {% with testcase = testrun.testcases[lost_soul_id] %}
84
+          \subsubsection{ {{macros.latex_filter(testcase.message)}} }
85
+          {% include 'test/case_short.tex' %}
86
+        {% endwith %}
87
+      {% endif %}
88
+    {% endfor %}
89
+  {% endfor %}
90
+{% endif %}
91
+
92
+\newpage
93
+\appendix
94
+{%- for testrun in data.testrun_list %}
95
+  \section{Trace for testrun with {{ testrun.interpreter }}}
96
+  {%- for max_level, absmax_level, num_tests in ( (40, 1000, testrun.number_of_failed_tests), (30, 39, testrun.number_of_possibly_failed_tests), (0, 29, testrun.number_of_successfull_tests) ) %}
97
+    {% if num_tests > 0 %}
98
+      \subsection{Tests with status {{ macros.result(max_level) }} ({{num_tests}})}
99
+      {%- for testcase_id in testrun.uid_list_sorted %}
100
+        {% with testcase = testrun.testcases[testcase_id], item = data.specification.get('item_dict', {}).get(testrun.testcases[testcase_id].message) %}
101
+          {%- if testcase.levelno >= max_level and testcase.levelno <= absmax_level%}
102
+            {%- if item %}
103
+              {%- with sectype = 'subsubsection', labeltype = 'testcase:' + testrun.testrun_id + '__' %}
104
+                {%- include 'reqif/requirement.tex' %}
105
+              {%- endwith %}
106
+            {%- else %}
107
+              \subsubsection{ {{ macros.latex_filter(testcase.message) }} }\label{testcase:{{testrun.testrun_id}}__{{testcase.message}}}
108
+            {%- endif %}
109
+            {% include "test/case_long.tex" %}
110
+          {%- endif %}
111
+        {%- endwith %}
112
+      {%- endfor %}
113
+    {%- endif %}
114
+  {% endfor %}
115
+{%- endfor %}
116
+
117
+{% with coverage_information = data.coverage_information %}
118
+  {% include 'coverage/report.tex' %}
119
+{% endwith %}
120
+{% include 'unittest_foot.tex' %}

+ 1
- 0
templates/unittest_foot.tex View File

@@ -0,0 +1 @@
1
+\end{document}

+ 89
- 0
templates/unittest_head.tex View File

@@ -0,0 +1,89 @@
1
+{%- import 'macros.tex' as macros %}
2
+\documentclass[a4paper]{article}
3
+%\documentclass[a4paper,landscape]{article}
4
+
5
+\renewcommand{\familydefault}{\sfdefault}
6
+\usepackage[table]{xcolor}
7
+\definecolor{orange}{rgb}{1, 0.7, 0}
8
+\definecolor{lightgrey}{rgb}{0.925, 0.925, 0.925}
9
+
10
+\setlength{\topmargin}{-3cm}
11
+\setlength{\oddsidemargin}{-0.5cm}
12
+\setlength{\evensidemargin}{0cm}
13
+\setlength{\textwidth}{17.5cm}
14
+\setlength{\textheight}{24.5cm}
15
+%\setlength{\textwidth}{25cm}
16
+%\setlength{\textheight}{15cm}
17
+\setlength{\headheight}{84pt}
18
+
19
+\usepackage{fancyvrb}
20
+\usepackage{fvextra}
21
+%\usepackage{framed,color}
22
+%\newenvironment{modulelog}{\snugshade\Verbatim}{\endVerbatim\endsnugshade}
23
+\usepackage{adjustbox}
24
+\newenvironment{modulelog}%
25
+{\par\noindent\adjustbox{margin=0ex,bgcolor=shadecolor,margin=0ex}\bgroup\varwidth\linewidth\Verbatim}%
26
+{\endVerbatim\endvarwidth\egroup}
27
+%\usepackage{xcolor}
28
+
29
+\renewcommand{\baselinestretch}{1,2}
30
+\setlength{\parindent}{0pt}
31
+\setlength{\parskip}{9pt plus3pt minus3pt}
32
+
33
+\usepackage{listings}
34
+\usepackage{color}
35
+\definecolor{bg-partially-covered}{rgb}{1,1,0.6}    % light-yellow
36
+\definecolor{bg-uncovered}{rgb}{1,0.8,0.8}          % light-red
37
+\definecolor{bg-covered}{rgb}{0.95,1,0.95}          % very light-green
38
+\definecolor{bg-clean}{rgb}{1,1,1}                  % white
39
+\definecolor{mygreen}{rgb}{0,0.6,0}
40
+\definecolor{mygray}{rgb}{0.5,0.5,0.5}
41
+\definecolor{mymauve}{rgb}{0.58,0,0.82}
42
+\lstset{ %
43
+  backgroundcolor=\color{white},   % choose the background color; you must add \usepackage{color} or \usepackage{xcolor}; should come as last argument
44
+  basicstyle=\footnotesize,        % the size of the fonts that are used for the code
45
+  breakatwhitespace=false,         % sets if automatic breaks should only happen at whitespace
46
+  breaklines=true,                 % sets automatic line breaking
47
+  captionpos=b,                    % sets the caption-position to bottom
48
+  commentstyle=\color{mygreen},    % comment style
49
+  deletekeywords={...},            % if you want to delete keywords from the given language
50
+  escapeinside={\%*}{*)},          % if you want to add LaTeX within your code
51
+  extendedchars=true,              % lets you use non-ASCII characters; for 8-bits encodings only, does not work with UTF-8
52
+  frame=none,	                   % adds a frame around the code
53
+  keepspaces=true,                 % keeps spaces in text, useful for keeping indentation of code (possibly needs columns=flexible)
54
+  keywordstyle=\color{blue},       % keyword style
55
+  language=Octave,                 % the language of the code
56
+  morekeywords={*,...},            % if you want to add more keywords to the set
57
+  numbers=left,                    % where to put the line-numbers; possible values are (none, left, right)
58
+  numbersep=5pt,                   % how far the line-numbers are from the code
59
+  numberstyle=\tiny\color{mygray}, % the style that is used for the line-numbers
60
+  rulecolor=\color{black},         % if not set, the frame-color may be changed on line-breaks within not-black text (e.g. comments (green here))
61
+  showlines=true,
62
+  showspaces=false,                % show spaces everywhere adding particular underscores; it overrides 'showstringspaces'
63
+  showstringspaces=false,          % underline spaces within strings only
64
+  showtabs=false,                  % show tabs within strings adding particular underscores
65
+  stepnumber=1,                    % the step between two line-numbers. If it's 1, each line will be numbered
66
+  stringstyle=\color{mymauve},     % string literal style
67
+  tabsize=2,	                   % sets default tabsize to 2 spaces
68
+}
69
+\usepackage{hyperref}
70
+\usepackage{longtable}
71
+\usepackage{tabu}
72
+\usepackage{multicol}
73
+\usepackage{booktabs}
74
+\usepackage{graphicx}
75
+\usepackage{lastpage} % for the number of the last page in the document
76
+\usepackage{fancyhdr}
77
+
78
+\fancyhf{}
79
+\renewcommand{\headrulewidth}{0pt}
80
+\renewcommand{\footrulewidth}{0pt}
81
+\lhead{\textcolor{gray}{}}
82
+\chead{\textcolor{gray}{ Unittest for {\tt {{ macros.latex_filter(data.testobject_information.Name) }} }}}
83
+\rhead{\textcolor{gray}{}}
84
+\lfoot{\textcolor{gray}{}}
85
+\cfoot{\textcolor{gray}{}}
86
+\rfoot{\textcolor{gray}{\thepage\,/ \pageref{LastPage}}}
87
+
88
+\begin{document}
89
+

+ 15
- 0
templates/unittest_titlepage.tex View File

@@ -0,0 +1,15 @@
1
+{%- import 'macros.tex' as macros %}
2
+\begin{titlepage}
3
+\date{\today}
4
+\title{
5
+	Unittest for {\tt {{ macros.latex_filter(data.testobject_information.Name) }} }
6
+}
7
+\date{\today} 
8
+\maketitle
9
+\thispagestyle{empty}
10
+\newpage
11
+\end{titlepage}
12
+
13
+\setcounter{page}{1}
14
+\pagestyle{fancy}
15
+

+ 241
- 0
test.py View File

@@ -0,0 +1,241 @@
1
+#!/usr/bin/env python
2
+# -*- coding: utf-8 -*-
3
+#
4
+import logging
5
+logger = logging.getLogger('__unittest__')
6
+
7
+REPORT_LEVEL_FAIL = logging.ERROR
8
+REPORT_LEVEL_INSPECT = logging.WARNING
9
+REPORT_LEVEL_PASS = logging.INFO
10
+
11
+
12
+def __get_repr__(value, data_filter=repr):
13
+    if type(value) == dict:
14
+        return '{ ' + ', '.join(['%s: %s' % (repr(key), __get_repr__(value.get(key))) for key in value.keys()]) + ' }'
15
+    elif type(value) == list:
16
+        return '[ ' + ', '.join(['%s' % (__get_repr__(v)) for v in value]) + ' ]'
17
+    else:
18
+        return data_filter(value)
19
+
20
+
21
+def __report_result__(result, description, data_filter=repr):
22
+    logger.debug('Result (%s): %s (%s)', description, __get_repr__(result, data_filter), repr(type(result)))
23
+
24
+
25
+def __report_expectation_equivalency__(expectation, description, data_filter=repr):
26
+    logger.debug('Expectation (%s): result = %s (%s)', description, __get_repr__(expectation, data_filter), repr(type(expectation)))
27
+
28
+
29
+def __report_expectation_range__(min_expectation, max_expectation, description):
30
+    logger.debug('Expectation (%s): %s <= result <= %s', description, __get_repr__(min_expectation), __get_repr__(max_expectation))
31
+
32
+
33
+def __equivalent_dict__(result, expectation, report_comment_fail=None, dict_key='test_variable'):
34
+    result_keys = set(result.keys())
35
+    expect_keys = set(expectation.keys())
36
+    #
37
+    log_lvl = REPORT_LEVEL_PASS
38
+    #
39
+    # missing elements
40
+    #
41
+    missing_keys = expect_keys - result_keys
42
+    if len(missing_keys) > 0:
43
+        logger.error('Missing key(s) in dict (%s): %s.' + report_comment_fail, dict_key, ', '.join(['%s' % repr(key) for key in missing_keys]))
44
+        log_lvl = REPORT_LEVEL_FAIL
45
+    #
46
+    # odd elements
47
+    #
48
+    odd_keys = result_keys - expect_keys
49
+    if len(odd_keys) > 0:
50
+        logger.error('Odd key(s) in dict (%s): %s.' + report_comment_fail, dict_key, ', '.join(['%s' % repr(key) for key in odd_keys]))
51
+        log_lvl = REPORT_LEVEL_FAIL
52
+    #
53
+    # differences
54
+    #
55
+    common_keys = result_keys - missing_keys - odd_keys
56
+    for key in common_keys:
57
+        ll = __equivalent__(result[key], expectation[key], report_comment_fail=report_comment_fail, dict_key=dict_key + ('.' if dict_key != '' else '') + str(key))
58
+        if log_lvl < ll:
59
+            log_lvl = ll
60
+    return log_lvl
61
+
62
+
63
+def __equivalent_list__(result, expectation, report_comment_fail=None, list_key='test_variable'):
64
+    _odd_ = []
65
+    _result_ = result[:]
66
+    e_index = list(range(0, len(expectation)))
67
+    log_lvl = REPORT_LEVEL_PASS
68
+    r = 0
69
+    while len(_result_) > 0:
70
+        value = _result_.pop(0)
71
+        just_type_diff = None
72
+        for e in e_index:
73
+            ll = __equivalent__(value, expectation[e], None)
74
+            if ll == REPORT_LEVEL_PASS:
75
+                e_index.pop(e_index.index(e))
76
+                break
77
+            elif ll == REPORT_LEVEL_INSPECT:
78
+                just_type_diff = e
79
+        else:
80
+            if just_type_diff is None:
81
+                _odd_.append(value)
82
+            else:
83
+                log_lvl = __equivalent__(value, expectation[just_type_diff], report_comment_fail, dict_key='%s[%d]' % (list_key, r))
84
+                e_index.pop(e_index.index(just_type_diff))
85
+        r += 1
86
+    #
87
+    # missing elements
88
+    #
89
+    if len(e_index) > 0:
90
+        logger.error('Missing value(s) in list (%s): %s.' + report_comment_fail, list_key, ', '.join(['%s' % repr(expectation[e]) for e in e_index]))
91
+        log_lvl = REPORT_LEVEL_FAIL
92
+    #
93
+    # odd elements
94
+    #
95
+    if len(_odd_) > 0:
96
+        logger.error('Odd value(s) in list (%s): %s.' + report_comment_fail, list_key, ', '.join(['%s' % repr(v) for v in _odd_]))
97
+        log_lvl = REPORT_LEVEL_FAIL
98
+    return log_lvl
99
+
100
+
101
+def __equivalent__(result, expectation, report_comment_fail=None, dict_key='test_variable'):
102
+    report_comment_fail = (' ' + report_comment_fail) if report_comment_fail is not None else ''
103
+    log_lvl = REPORT_LEVEL_PASS
104
+    if type(result) == dict and type(expectation) == dict:
105
+        ll = __equivalent_dict__(result, expectation, report_comment_fail, dict_key)
106
+        if log_lvl < ll:
107
+            log_lvl = ll
108
+    elif type(result) == list and type(expectation) == list:
109
+        ll = __equivalent_list__(result, expectation, report_comment_fail, dict_key)
110
+        if log_lvl < ll:
111
+            log_lvl = ll
112
+    else:
113
+        if result != expectation:
114
+            log_lvl = REPORT_LEVEL_FAIL
115
+            logger.error('Content %s is incorrect' + (' for %s' % dict_key if dict_key != '' else '') + '.' + report_comment_fail, __get_repr__(result))
116
+        if type(result) != type(expectation):
117
+            if log_lvl < REPORT_LEVEL_INSPECT:
118
+                log_lvl = REPORT_LEVEL_INSPECT
119
+            logger.warning('Type %s is NOT %s%s (%s). ' + report_comment_fail.strip() or '', __get_repr__(type(result)), __get_repr__(type(expectation)), (' for %s' % dict_key if dict_key != '' else ''), __get_repr__(result))
120
+    return log_lvl
121
+
122
+
123
+def equivalency_chk(result, expectation, tcl, description='Variable', report_comment_fail=None, data_filter=repr):
124
+    """
125
+    Routine to check values for equivalency inside a test run and report to a testCaseLogger.
126
+
127
+    :param result: The result of a test execution of a module
128
+    :type result: All types are supported
129
+    :param expectation: The expected value (shall be equivalent to result)
130
+    :type expectation: All types are supported
131
+    :param description: A descrition of the result. It will be reported like "xxx is correct." Example: descrition="stringrepresentation created by modulename"
132
+    :type description: str
133
+    :param report_level_pass: The reporting level as defined in :class:`logging` (e.g.: logging.INFO)
134
+    :param report_level_fail: The reporting level as defined in :class:`logging` (e.g.: logging.ERROR)
135
+    :param report_comment_fail: Comment for a failed Testexecution. Will be added in brakets after the Result-Text.
136
+    :type report_comment_fail: str
137
+    """
138
+    __report_result__(result, description, data_filter=data_filter)
139
+    __report_expectation_equivalency__(expectation, description, data_filter=data_filter)
140
+    report_level = __equivalent__(result, expectation, report_comment_fail=report_comment_fail, dict_key='result')
141
+    if report_level == REPORT_LEVEL_PASS:
142
+        tcl.log(report_level, description + ' is correct (Content %s and Type is %s).', data_filter(result), repr(type(result)))
143
+    else:
144
+        tcl.log(report_level, description + ' is NOT correct. See detailed log for more information.')
145
+    return report_level
146
+
147
+
148
+class equivalency_order_chk(object):
149
+    def __init__(self, ordered_values, tcl, description='Variable', report_comment_fail=None):
150
+        self._expected_values = ordered_values
151
+        self._tcl = tcl
152
+        self._description = description
153
+        self._report_comment_fail = report_comment_fail
154
+        self._reported_values = []
155
+
156
+    def report_value(self, value):
157
+        self._reported_values.append(value)
158
+
159
+    def report(self):
160
+        __report_result__(self._reported_values, self._description)
161
+        __report_expectation_equivalency__(self._expected_values, self._description)
162
+        report_lvl = REPORT_LEVEL_PASS
163
+        for i in range(0, min(len(self._expected_values), len(self._reported_values))):
164
+            report_lvl = max(report_lvl, equivalency_chk(self._reported_values[i], self._expected_values[i], logger, 'Submitted value number %d' % (i + 1), self._report_comment_fail))
165
+        if report_lvl <= REPORT_LEVEL_PASS:
166
+            self._tcl.log(report_lvl, self._description + ': Values and number of submitted values is correct. See detailed log for more information.')
167
+        else:
168
+            self._tcl.log(report_lvl, self._description + ': Values and number of submitted values is NOT correct. See detailed log for more information.')
169
+        return report_lvl
170
+
171
+    def report_range_check(self, minus_tollerance, plus_tollerance):
172
+        __report_result__(self._reported_values, self._description)
173
+        report_lvl = REPORT_LEVEL_PASS
174
+        report_lvl = max(report_lvl, equivalency_chk(len(self._reported_values), len(self._reported_values), self._tcl, 'Number of submitted values', self._report_comment_fail))
175
+        for i in range(0, min(len(self._expected_values), len(self._reported_values))):
176
+            report_lvl = max(report_lvl, range_chk(self._reported_values[i], self._expected_values[i] - minus_tollerance, self._expected_values[i] + plus_tollerance, logger, 'Submitted value number %d' % (i + 1), self._report_comment_fail))
177
+        if report_lvl <= REPORT_LEVEL_PASS:
178
+            self._tcl.log(report_lvl, self._description + ': Valueaccuracy and number of submitted values is correct. See detailed log for more information.')
179
+        else:
180
+            self._tcl.log(report_lvl, self._description + ': Valueaccuracy and number of submitted values is NOT correct. See detailed log for more information.')
181
+        return report_lvl
182
+
183
+
184
+def __range__(result, min_expectation, max_expectation, report_comment_fail):
185
+    report_comment_fail = (' ' + report_comment_fail) if report_comment_fail is not None else ''
186
+    log_lvl = REPORT_LEVEL_PASS
187
+    if result < min_expectation or result > max_expectation:
188
+        log_lvl = REPORT_LEVEL_FAIL
189
+        logger.error('Content %s is incorrect.' + report_comment_fail, __get_repr__(result))
190
+    if type(result) != type(min_expectation) or type(result) != type(max_expectation):
191
+        if log_lvl < REPORT_LEVEL_INSPECT:
192
+            log_lvl = REPORT_LEVEL_INSPECT
193
+        logger.warning('Type %s is incorrect.' + report_comment_fail, __get_repr__(type(result)))
194
+    return log_lvl
195
+
196
+
197
+def range_chk(result, min_expectation, max_expectation, tcl, description='Value', report_comment_fail=None):
198
+    """
199
+    Routine to check values to be in a range inside a test run and report to a testCaseLogger.
200
+
201
+    :param result: The result of a test execution of a module
202
+    :type result: All numeric types are supported
203
+    :param min: The result shall be more or equal
204
+    :type min: All numeric types are supported
205
+    :param max: The result shall be less or equivalent
206
+    :type max: All numeric types are supported
207
+    :param description: A descrition of the result. It will be reported like "xxx is correct." Example: descrition="stringrepresentation created by modulename"
208
+    :type description: str
209
+    :param report_level_pass: The reporting level as defined in :class:`logging` (e.g.: logging.INFO)
210
+    :param report_level_fail: The reporting level as defined in :class:`logging` (e.g.: logging.ERROR)
211
+    :param report_comment_fail: Comment for a failed Testexecution. Will be added in brakets after the Result-Text.
212
+    :type report_comment_fail: str
213
+    """
214
+    __report_result__(result, description)
215
+    __report_expectation_range__(min_expectation, max_expectation, description)
216
+    report_level = __range__(result, min_expectation, max_expectation, report_comment_fail=report_comment_fail)
217
+    if report_level == REPORT_LEVEL_PASS:
218
+        tcl.log(report_level, description + ' is correct (Content %s in [%s ... %s] and Type is %s).', repr(result), repr(min_expectation), repr(max_expectation), repr(type(result)))
219
+    else:
220
+        tcl.log(report_level, description + ' is NOT correct. See detailed log for more information.')
221
+    return report_level
222
+
223
+
224
+def in_list_chk(result, expectation_list, tcl, description='Value', report_level_pass=logging.INFO, report_level_fail=logging.ERROR, report_comment_fail=None):
225
+    """
226
+    Routine to check values to be in a range inside a test run and report to a testCaseLogger.
227
+
228
+    :param result: The result of a test execution of a module
229
+    :type result: All types are supported
230
+    :param expectation_list: The list of allowed values
231
+    :type expectation_list: A list of all types is supported
232
+    :param description: A descrition of the result. It will be reported like "xxx is correct." Example: descrition="stringrepresentation created by modulename"
233
+    :type description: str
234
+    :param report_level_pass: The reporting level as defined in :class:`logging` (e.g.: logging.INFO)
235
+    :param report_level_fail: The reporting level as defined in :class:`logging` (e.g.: logging.ERROR)
236
+    :param report_comment_fail: Comment for a failed Testexecution. Will be added in brakets after the Result-Text.
237
+    :type report_comment_fail: str
238
+    """
239
+    __report_values__(result, expectation)
240
+    tcl.log(REPORT_LEVEL_FAIL, 'in_list check not yet implemented')
241
+    return REPORT_LEVEL_FAIL

+ 39
- 0
unittest_flow.sh View File

@@ -0,0 +1,39 @@
1
+#!/bin/bash
2
+
3
+#
4
+# Set commands depending on distribution
5
+#
6
+. /etc/os-release
7
+# python2
8
+if [[ "$ID" == "arch" || "$ID" == "manjaro" || "$ID_LIKE" == *"opensuse"*  || "$ID" == "solus" ]]; then
9
+    COV2_CMD="coverage2"
10
+    PYT2_CMD="python2"
11
+else
12
+    COV2_CMD="python2-coverage"
13
+    PYT2_CMD="python2"
14
+fi
15
+# python3
16
+if [[ "$ID" == "arch" || "$ID" == "manjaro" || "$ID_LIKE" == *"opensuse"* || "$ID" == "solus" ]]; then
17
+    COV3_CMD="coverage3"
18
+    PYT3_CMD="python3"
19
+else
20
+    COV3_CMD="python3-coverage"
21
+    PYT3_CMD="python3"
22
+fi
23
+# pdf viewer
24
+PDF_CMD="xdg-open"
25
+
26
+#
27
+# Unittest Flow
28
+#
29
+$PYT3_CMD src/unittest.py clean
30
+echo -e "\e[1m  * Erasing collected coverage information\e[0m"
31
+$COV2_CMD erase
32
+$COV2_CMD run -a --branch --source=$($PYT3_CMD src/config.py -p) src/unittest.py run $*
33
+$COV3_CMD run -a --branch --source=$($PYT3_CMD src/config.py -p) src/unittest.py run $*
34
+echo -e "\e[1m\e[93mCreating Coverage-XML-File: $(pwd)/testresults/coverage.xml\e[0m"
35
+$COV3_CMD xml -o testresults/coverage.xml
36
+$PYT3_CMD src/unittest.py finalise
37
+$PYT3_CMD src/unittest.py status
38
+$PYT3_CMD src/unittest.py pdf
39
+$PDF_CMD testresults/unittest.pdf

Loading…
Cancel
Save