2020-01-26 16:19:29 +01:00
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import logging
logger = logging . getLogger ( ' __unittest__ ' )
REPORT_LEVEL_FAIL = logging . ERROR
REPORT_LEVEL_INSPECT = logging . WARNING
REPORT_LEVEL_PASS = logging . INFO
def __get_repr__ ( value , data_filter = repr ) :
if type ( value ) == dict :
return ' { ' + ' , ' . join ( [ ' %s : %s ' % ( repr ( key ) , __get_repr__ ( value . get ( key ) ) ) for key in value . keys ( ) ] ) + ' } '
elif type ( value ) == list :
return ' [ ' + ' , ' . join ( [ ' %s ' % ( __get_repr__ ( v ) ) for v in value ] ) + ' ] '
else :
return data_filter ( value )
def __report_result__ ( result , description , data_filter = repr ) :
logger . debug ( ' Result ( %s ): %s ( %s ) ' , description , __get_repr__ ( result , data_filter ) , repr ( type ( result ) ) )
def __report_expectation_equivalency__ ( expectation , description , data_filter = repr ) :
logger . debug ( ' Expectation ( %s ): result = %s ( %s ) ' , description , __get_repr__ ( expectation , data_filter ) , repr ( type ( expectation ) ) )
2021-01-06 22:56:09 +01:00
def __report_expectation_inlist__ ( expectation , description , data_filter = repr ) :
logger . debug ( ' Expectation ( %s ): %s in result ' , description , __get_repr__ ( expectation , data_filter ) )
2020-01-26 16:19:29 +01:00
def __report_expectation_range__ ( min_expectation , max_expectation , description ) :
logger . debug ( ' Expectation ( %s ): %s <= result <= %s ' , description , __get_repr__ ( min_expectation ) , __get_repr__ ( max_expectation ) )
def __equivalent_dict__ ( result , expectation , report_comment_fail = None , dict_key = ' test_variable ' ) :
result_keys = set ( result . keys ( ) )
expect_keys = set ( expectation . keys ( ) )
#
log_lvl = REPORT_LEVEL_PASS
#
# missing elements
#
missing_keys = expect_keys - result_keys
if len ( missing_keys ) > 0 :
logger . error ( ' Missing key(s) in dict ( %s ): %s . ' + report_comment_fail , dict_key , ' , ' . join ( [ ' %s ' % repr ( key ) for key in missing_keys ] ) )
log_lvl = REPORT_LEVEL_FAIL
#
# odd elements
#
odd_keys = result_keys - expect_keys
if len ( odd_keys ) > 0 :
logger . error ( ' Odd key(s) in dict ( %s ): %s . ' + report_comment_fail , dict_key , ' , ' . join ( [ ' %s ' % repr ( key ) for key in odd_keys ] ) )
log_lvl = REPORT_LEVEL_FAIL
#
# differences
#
common_keys = result_keys - missing_keys - odd_keys
for key in common_keys :
ll = __equivalent__ ( result [ key ] , expectation [ key ] , report_comment_fail = report_comment_fail , dict_key = dict_key + ( ' . ' if dict_key != ' ' else ' ' ) + str ( key ) )
if log_lvl < ll :
log_lvl = ll
return log_lvl
def __equivalent_list__ ( result , expectation , report_comment_fail = None , list_key = ' test_variable ' ) :
_odd_ = [ ]
_result_ = result [ : ]
e_index = list ( range ( 0 , len ( expectation ) ) )
log_lvl = REPORT_LEVEL_PASS
r = 0
while len ( _result_ ) > 0 :
value = _result_ . pop ( 0 )
just_type_diff = None
for e in e_index :
ll = __equivalent__ ( value , expectation [ e ] , None )
if ll == REPORT_LEVEL_PASS :
e_index . pop ( e_index . index ( e ) )
break
elif ll == REPORT_LEVEL_INSPECT :
just_type_diff = e
else :
if just_type_diff is None :
_odd_ . append ( value )
else :
log_lvl = __equivalent__ ( value , expectation [ just_type_diff ] , report_comment_fail , dict_key = ' %s [ %d ] ' % ( list_key , r ) )
e_index . pop ( e_index . index ( just_type_diff ) )
r + = 1
#
# missing elements
#
if len ( e_index ) > 0 :
logger . error ( ' Missing value(s) in list ( %s ): %s . ' + report_comment_fail , list_key , ' , ' . join ( [ ' %s ' % repr ( expectation [ e ] ) for e in e_index ] ) )
log_lvl = REPORT_LEVEL_FAIL
#
# odd elements
#
if len ( _odd_ ) > 0 :
logger . error ( ' Odd value(s) in list ( %s ): %s . ' + report_comment_fail , list_key , ' , ' . join ( [ ' %s ' % repr ( v ) for v in _odd_ ] ) )
log_lvl = REPORT_LEVEL_FAIL
return log_lvl
def __equivalent__ ( result , expectation , report_comment_fail = None , dict_key = ' test_variable ' ) :
report_comment_fail = ( ' ' + report_comment_fail ) if report_comment_fail is not None else ' '
log_lvl = REPORT_LEVEL_PASS
if type ( result ) == dict and type ( expectation ) == dict :
ll = __equivalent_dict__ ( result , expectation , report_comment_fail , dict_key )
if log_lvl < ll :
log_lvl = ll
elif type ( result ) == list and type ( expectation ) == list :
ll = __equivalent_list__ ( result , expectation , report_comment_fail , dict_key )
if log_lvl < ll :
log_lvl = ll
else :
if result != expectation :
log_lvl = REPORT_LEVEL_FAIL
logger . error ( ' Content %s is incorrect ' + ( ' for %s ' % dict_key if dict_key != ' ' else ' ' ) + ' . ' + report_comment_fail , __get_repr__ ( result ) )
if type ( result ) != type ( expectation ) :
if log_lvl < REPORT_LEVEL_INSPECT :
log_lvl = REPORT_LEVEL_INSPECT
logger . warning ( ' Type %s is NOT %s %s ( %s ). ' + report_comment_fail . strip ( ) or ' ' , __get_repr__ ( type ( result ) ) , __get_repr__ ( type ( expectation ) ) , ( ' for %s ' % dict_key if dict_key != ' ' else ' ' ) , __get_repr__ ( result ) )
return log_lvl
def equivalency_chk ( result , expectation , tcl , description = ' Variable ' , report_comment_fail = None , data_filter = repr ) :
"""
Routine to check values for equivalency inside a test run and report to a testCaseLogger .
: param result : The result of a test execution of a module
: type result : All types are supported
: param expectation : The expected value ( shall be equivalent to result )
: type expectation : All types are supported
: param description : A descrition of the result . It will be reported like " xxx is correct. " Example : descrition = " stringrepresentation created by modulename "
: type description : str
: param report_comment_fail : Comment for a failed Testexecution . Will be added in brakets after the Result - Text .
: type report_comment_fail : str
"""
__report_result__ ( result , description , data_filter = data_filter )
__report_expectation_equivalency__ ( expectation , description , data_filter = data_filter )
report_level = __equivalent__ ( result , expectation , report_comment_fail = report_comment_fail , dict_key = ' result ' )
if report_level == REPORT_LEVEL_PASS :
tcl . log ( report_level , description + ' is correct (Content %s and Type is %s ). ' , data_filter ( result ) , repr ( type ( result ) ) )
else :
tcl . log ( report_level , description + ' is NOT correct. See detailed log for more information. ' )
return report_level
class equivalency_order_chk ( object ) :
def __init__ ( self , ordered_values , tcl , description = ' Variable ' , report_comment_fail = None ) :
self . _expected_values = ordered_values
self . _tcl = tcl
self . _description = description
self . _report_comment_fail = report_comment_fail
self . _reported_values = [ ]
def report_value ( self , value ) :
self . _reported_values . append ( value )
def report ( self ) :
__report_result__ ( self . _reported_values , self . _description )
__report_expectation_equivalency__ ( self . _expected_values , self . _description )
report_lvl = REPORT_LEVEL_PASS
for i in range ( 0 , min ( len ( self . _expected_values ) , len ( self . _reported_values ) ) ) :
report_lvl = max ( report_lvl , equivalency_chk ( self . _reported_values [ i ] , self . _expected_values [ i ] , logger , ' Submitted value number %d ' % ( i + 1 ) , self . _report_comment_fail ) )
if report_lvl < = REPORT_LEVEL_PASS :
self . _tcl . log ( report_lvl , self . _description + ' : Values and number of submitted values is correct. See detailed log for more information. ' )
else :
self . _tcl . log ( report_lvl , self . _description + ' : Values and number of submitted values is NOT correct. See detailed log for more information. ' )
return report_lvl
def report_range_check ( self , minus_tollerance , plus_tollerance ) :
__report_result__ ( self . _reported_values , self . _description )
report_lvl = REPORT_LEVEL_PASS
report_lvl = max ( report_lvl , equivalency_chk ( len ( self . _reported_values ) , len ( self . _reported_values ) , self . _tcl , ' Number of submitted values ' , self . _report_comment_fail ) )
for i in range ( 0 , min ( len ( self . _expected_values ) , len ( self . _reported_values ) ) ) :
report_lvl = max ( report_lvl , range_chk ( self . _reported_values [ i ] , self . _expected_values [ i ] - minus_tollerance , self . _expected_values [ i ] + plus_tollerance , logger , ' Submitted value number %d ' % ( i + 1 ) , self . _report_comment_fail ) )
if report_lvl < = REPORT_LEVEL_PASS :
self . _tcl . log ( report_lvl , self . _description + ' : Valueaccuracy and number of submitted values is correct. See detailed log for more information. ' )
else :
self . _tcl . log ( report_lvl , self . _description + ' : Valueaccuracy and number of submitted values is NOT correct. See detailed log for more information. ' )
return report_lvl
def __range__ ( result , min_expectation , max_expectation , report_comment_fail ) :
report_comment_fail = ( ' ' + report_comment_fail ) if report_comment_fail is not None else ' '
log_lvl = REPORT_LEVEL_PASS
if result < min_expectation or result > max_expectation :
log_lvl = REPORT_LEVEL_FAIL
logger . error ( ' Content %s is incorrect. ' + report_comment_fail , __get_repr__ ( result ) )
if type ( result ) != type ( min_expectation ) or type ( result ) != type ( max_expectation ) :
if log_lvl < REPORT_LEVEL_INSPECT :
log_lvl = REPORT_LEVEL_INSPECT
logger . warning ( ' Type %s is incorrect. ' + report_comment_fail , __get_repr__ ( type ( result ) ) )
return log_lvl
def range_chk ( result , min_expectation , max_expectation , tcl , description = ' Value ' , report_comment_fail = None ) :
"""
Routine to check values to be in a range inside a test run and report to a testCaseLogger .
: param result : The result of a test execution of a module
: type result : All numeric types are supported
: param min : The result shall be more or equal
: type min : All numeric types are supported
: param max : The result shall be less or equivalent
: type max : All numeric types are supported
: param description : A descrition of the result . It will be reported like " xxx is correct. " Example : descrition = " stringrepresentation created by modulename "
: type description : str
: param report_level_pass : The reporting level as defined in : class : ` logging ` ( e . g . : logging . INFO )
: param report_level_fail : The reporting level as defined in : class : ` logging ` ( e . g . : logging . ERROR )
: param report_comment_fail : Comment for a failed Testexecution . Will be added in brakets after the Result - Text .
: type report_comment_fail : str
"""
__report_result__ ( result , description )
__report_expectation_range__ ( min_expectation , max_expectation , description )
report_level = __range__ ( result , min_expectation , max_expectation , report_comment_fail = report_comment_fail )
if report_level == REPORT_LEVEL_PASS :
tcl . log ( report_level , description + ' is correct (Content %s in [ %s ... %s ] and Type is %s ). ' , repr ( result ) , repr ( min_expectation ) , repr ( max_expectation ) , repr ( type ( result ) ) )
else :
tcl . log ( report_level , description + ' is NOT correct. See detailed log for more information. ' )
return report_level
2021-01-06 22:56:09 +01:00
def in_list_dict_chk ( result_list , expectation_key , tcl , description = ' Value ' , report_comment_fail = None , data_filter = repr ) :
2020-01-26 16:19:29 +01:00
"""
Routine to check values to be in a range inside a test run and report to a testCaseLogger .
2021-01-06 22:56:09 +01:00
: param result_key : The result of a test execution of a module
: type result_key : All types are supported
: param expectation_list : The list or dict of allowed values
: type expectation_list : A list of all types or a dict is supported
2020-01-26 16:19:29 +01:00
: param description : A descrition of the result . It will be reported like " xxx is correct. " Example : descrition = " stringrepresentation created by modulename "
: type description : str
: param report_comment_fail : Comment for a failed Testexecution . Will be added in brakets after the Result - Text .
: type report_comment_fail : str
"""
2021-01-06 22:56:09 +01:00
__report_result__ ( result_list , description )
__report_expectation_inlist__ ( expectation_key , description , data_filter = data_filter )
if expectation_key in result_list :
tcl . log ( REPORT_LEVEL_PASS , description + ' is correct ( %s is in the list or dict). ' , data_filter ( expectation_key ) )
return REPORT_LEVEL_PASS
else :
tcl . log ( REPORT_LEVEL_FAIL , description + ' is NOT correct ( %s is NOT in the list or dict). ' , data_filter ( expectation_key ) )
return REPORT_LEVEL_FAIL