SankeyExcelParser 1.0.0b0__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- SankeyExcelParser/__init__.py +0 -0
- SankeyExcelParser/io_excel.py +1867 -0
- SankeyExcelParser/io_excel_constants.py +811 -0
- SankeyExcelParser/sankey.py +3138 -0
- SankeyExcelParser/sankey_utils/__init__.py +0 -0
- SankeyExcelParser/sankey_utils/data.py +1118 -0
- SankeyExcelParser/sankey_utils/excel_source.py +31 -0
- SankeyExcelParser/sankey_utils/flux.py +344 -0
- SankeyExcelParser/sankey_utils/functions.py +278 -0
- SankeyExcelParser/sankey_utils/node.py +340 -0
- SankeyExcelParser/sankey_utils/protos/__init__.py +0 -0
- SankeyExcelParser/sankey_utils/protos/flux.py +84 -0
- SankeyExcelParser/sankey_utils/protos/node.py +386 -0
- SankeyExcelParser/sankey_utils/protos/sankey_object.py +135 -0
- SankeyExcelParser/sankey_utils/protos/tag_group.py +95 -0
- SankeyExcelParser/sankey_utils/sankey_object.py +165 -0
- SankeyExcelParser/sankey_utils/table_object.py +37 -0
- SankeyExcelParser/sankey_utils/tag.py +95 -0
- SankeyExcelParser/sankey_utils/tag_group.py +206 -0
- SankeyExcelParser/su_trace.py +239 -0
- SankeyExcelParser/tests/integration/__init__.py +0 -0
- SankeyExcelParser/tests/integration/test_base.py +356 -0
- SankeyExcelParser/tests/integration/test_run_check_input.py +100 -0
- SankeyExcelParser/tests/integration/test_run_conversions.py +96 -0
- SankeyExcelParser/tests/integration/test_run_load_input.py +94 -0
- SankeyExcelParser/tests/unit/__init__.py +0 -0
- SankeyExcelParser-1.0.0b0.data/scripts/run_parse_and_write_excel.py +155 -0
- SankeyExcelParser-1.0.0b0.data/scripts/run_parse_excel.py +115 -0
- SankeyExcelParser-1.0.0b0.dist-info/METADATA +113 -0
- SankeyExcelParser-1.0.0b0.dist-info/RECORD +32 -0
- SankeyExcelParser-1.0.0b0.dist-info/WHEEL +5 -0
- SankeyExcelParser-1.0.0b0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,239 @@
|
|
1
|
+
import os
|
2
|
+
import time
|
3
|
+
import logging
|
4
|
+
import logging.handlers
|
5
|
+
import psutil
|
6
|
+
|
7
|
+
import pandas as pd
|
8
|
+
|
9
|
+
logger = logging.getLogger()
|
10
|
+
|
11
|
+
|
12
|
+
def logger_init(
|
13
|
+
logname,
|
14
|
+
mode
|
15
|
+
):
|
16
|
+
"""
|
17
|
+
Initialize global logger
|
18
|
+
|
19
|
+
Parameters
|
20
|
+
----------
|
21
|
+
logname : string
|
22
|
+
Log file
|
23
|
+
mode : string
|
24
|
+
File open function (see https://www.manpagez.com/man/3/fopen/)
|
25
|
+
"""
|
26
|
+
global logger
|
27
|
+
logger = logging.getLogger("sumoptimisation") # root logger
|
28
|
+
if len(logger.handlers) > 0:
|
29
|
+
logger.handlers[0].close()
|
30
|
+
logger.removeHandler(logger.handlers[0])
|
31
|
+
logger.setLevel(logging.DEBUG)
|
32
|
+
hdlr = logging.FileHandler(logname, mode)
|
33
|
+
fmt = logging.Formatter("%(levelname)-5s %(message)s", "%x %X")
|
34
|
+
hdlr.setFormatter(fmt)
|
35
|
+
logger.addHandler(hdlr)
|
36
|
+
|
37
|
+
|
38
|
+
def base_filename():
|
39
|
+
"""
|
40
|
+
Returns filename from which logger comes from.
|
41
|
+
|
42
|
+
Returns
|
43
|
+
-------
|
44
|
+
string
|
45
|
+
Base bilename
|
46
|
+
"""
|
47
|
+
return logging.getLogger("sumoptimisation").handlers[0].baseFilename
|
48
|
+
|
49
|
+
|
50
|
+
def run_log(myfile):
|
51
|
+
"""
|
52
|
+
Start logging.
|
53
|
+
|
54
|
+
Parameters
|
55
|
+
----------
|
56
|
+
myfile : string
|
57
|
+
Log file
|
58
|
+
"""
|
59
|
+
# Check if file will be created
|
60
|
+
IsNew = not os.path.isfile(myfile)
|
61
|
+
# Log format
|
62
|
+
logging.basicConfig(
|
63
|
+
filename=myfile,
|
64
|
+
format='%(asctime)s,%(msecs)03d - %(levelname)-8s - %(funcName)-20s (%(lineno)04d): %(message)s',
|
65
|
+
datefmt='%H:%M:%S',
|
66
|
+
level=logging.INFO)
|
67
|
+
# Define a Handler which writes INFO messages or higher to the sys.stderr
|
68
|
+
console = logging.StreamHandler()
|
69
|
+
# Set a format which is simpler for console use
|
70
|
+
co_formatter = logging.Formatter('%(funcName)-20s (%(lineno)04d): %(message)s')
|
71
|
+
# Tell the handler to use this format
|
72
|
+
console.setFormatter(co_formatter)
|
73
|
+
# Add the handler to the root logger
|
74
|
+
logging.getLogger().addHandler(console)
|
75
|
+
if IsNew:
|
76
|
+
strnow = time.strftime('%Y-%m-%d')
|
77
|
+
logging.info(f'Log file just created. Date of creation : {strnow}')
|
78
|
+
else:
|
79
|
+
logging.info('*****************************')
|
80
|
+
logging.info('********** New run **********')
|
81
|
+
logging.info('*****************************')
|
82
|
+
|
83
|
+
|
84
|
+
def log_level(StrLevel="INFO"):
|
85
|
+
'''
|
86
|
+
Change the level information of the current logger.
|
87
|
+
|
88
|
+
Parameters
|
89
|
+
----------
|
90
|
+
StrLevel : string
|
91
|
+
Level of Information.
|
92
|
+
Possible values are (All calls with a higher value than the selected one are logged)
|
93
|
+
- "NOTSET"(value 0)
|
94
|
+
- "DEBUG"(value 10)
|
95
|
+
- "INFO"(20)
|
96
|
+
- "WARNING"(30)
|
97
|
+
- "ERROR"(40)
|
98
|
+
- "CRITICAL"(50)
|
99
|
+
'''
|
100
|
+
switcher = {
|
101
|
+
"NOTSET": 0,
|
102
|
+
"DEBUG": 10,
|
103
|
+
"INFO": 20,
|
104
|
+
"WARNING": 30,
|
105
|
+
"ERROR": 40,
|
106
|
+
"CRITICAL": 50}
|
107
|
+
NewLevel = 20
|
108
|
+
if StrLevel in switcher:
|
109
|
+
NewLevel = switcher[StrLevel]
|
110
|
+
logging.getLogger().setLevel(NewLevel)
|
111
|
+
|
112
|
+
|
113
|
+
def check_log(nbmax=20):
|
114
|
+
'''
|
115
|
+
Create a new filename for logs.
|
116
|
+
Remove older files, keep only the 'nbmax' latest files.
|
117
|
+
|
118
|
+
Parameters
|
119
|
+
----------
|
120
|
+
nbmax : int, optionnal (default=20)
|
121
|
+
Maximum of latest log files to keep.
|
122
|
+
|
123
|
+
Returns
|
124
|
+
-------
|
125
|
+
string
|
126
|
+
Log filename as 'logs/log_YYMMDD.log'.
|
127
|
+
'''
|
128
|
+
log_def = 'log_' + time.strftime('%Y%m%d') + '.log'
|
129
|
+
dir_fi = 'logs' + os.path.sep
|
130
|
+
if not os.path.isdir('logs'):
|
131
|
+
os.makedirs('logs')
|
132
|
+
else:
|
133
|
+
li_file = os.listdir('logs')
|
134
|
+
df_file = pd.DataFrame(li_file, columns=['files'])
|
135
|
+
df_file['date'] = [os.path.getctime(dir_fi + fi) for fi in li_file]
|
136
|
+
df_sort = df_file.sort_values(by=['date'], ascending=False)
|
137
|
+
if len(df_sort['date'] > nbmax):
|
138
|
+
df_del = df_sort['files'][nbmax:]
|
139
|
+
for fi in df_del:
|
140
|
+
os.remove(dir_fi + fi)
|
141
|
+
log_def = dir_fi + log_def
|
142
|
+
return log_def
|
143
|
+
|
144
|
+
|
145
|
+
def timems(
|
146
|
+
t_input: float,
|
147
|
+
f_out='',
|
148
|
+
b_full=False,
|
149
|
+
):
|
150
|
+
'''
|
151
|
+
Convert input as millisecs or milisec, as int or full timestamp.
|
152
|
+
|
153
|
+
Parameters
|
154
|
+
----------
|
155
|
+
t_input : float
|
156
|
+
Time is secs to convert.
|
157
|
+
f_out : string {'milli', 'micro', ''}, optionnal (default='')
|
158
|
+
Maximum of latest log files to keep.
|
159
|
+
- 'milli' : convert input time in milisecs
|
160
|
+
- 'micro' : convert input time in microsecs
|
161
|
+
- other : keep as secs
|
162
|
+
b_full : bool, optionnal (default=False)
|
163
|
+
Chose type of output
|
164
|
+
- true : Return full timestamp with date as string
|
165
|
+
- false : Return only converted data as int
|
166
|
+
|
167
|
+
Returns
|
168
|
+
-------
|
169
|
+
string or int
|
170
|
+
full timestamp with date as string / converted data as int
|
171
|
+
'''
|
172
|
+
if b_full:
|
173
|
+
st0 = time.localtime(t_input)
|
174
|
+
comp = 0
|
175
|
+
if f_out == 'milli':
|
176
|
+
comp = int(round((t_input-int(t_input))*1000))
|
177
|
+
elif f_out == 'micro':
|
178
|
+
comp = int(round((t_input-int(t_input))*1000000))
|
179
|
+
return time.strftime(f'%Y-%m-%d %H:%M:%S,{comp}', st0) # return a string
|
180
|
+
else:
|
181
|
+
if f_out == 'milli':
|
182
|
+
comp = int(round(t_input*1000))
|
183
|
+
elif f_out == 'micro':
|
184
|
+
comp = int(round(t_input*1000000))
|
185
|
+
else:
|
186
|
+
comp = int(round(t_input)) # time in sec
|
187
|
+
return comp # return an integer
|
188
|
+
|
189
|
+
|
190
|
+
def perf_process(procname='python'):
|
191
|
+
'''
|
192
|
+
Gives process and system infos.
|
193
|
+
|
194
|
+
Parameters
|
195
|
+
----------
|
196
|
+
procname : string, optionnal (default='python')
|
197
|
+
Process from which we need informations
|
198
|
+
|
199
|
+
Returns
|
200
|
+
-------
|
201
|
+
array-like[3]
|
202
|
+
Process infos [PID, Process Info, System info].
|
203
|
+
- PID : id of process
|
204
|
+
- Process info : CPU charge of process, MEM consumtion,
|
205
|
+
MEM dedicated to sub-process, MEM dedicated to main process
|
206
|
+
- System info : CPU charge, MEM used, MEM available
|
207
|
+
'''
|
208
|
+
first_pid_found = False
|
209
|
+
procname = procname.lower()
|
210
|
+
for proc in psutil.process_iter():
|
211
|
+
pname = proc.name().lower()
|
212
|
+
if procname in pname:
|
213
|
+
if not first_pid_found:
|
214
|
+
first_pid_found = True
|
215
|
+
pyid = proc.pid
|
216
|
+
stproc_id = f'process id is : {pyid}'
|
217
|
+
else:
|
218
|
+
stproc_id += f', id_add: {proc.pid}'
|
219
|
+
# PROCESS INFOS
|
220
|
+
ppy = psutil.Process(pyid)
|
221
|
+
stproc_inf = f'PROCESS - cpu_percent: {ppy.cpu_percent()}, '
|
222
|
+
mem_val = str(round(ppy.memory_percent(), 2))
|
223
|
+
stproc_inf += f'mem_percent: {mem_val}, '
|
224
|
+
ppym = ppy.memory_full_info()
|
225
|
+
mem_val = str(round(ppym.uss/1024/1024))
|
226
|
+
stproc_inf += f'mem_dedic_PID: {mem_val} Mo, '
|
227
|
+
ppym = psutil.Process().memory_full_info()
|
228
|
+
mem_val = str(round(ppym.uss/1024/1024))
|
229
|
+
stproc_inf += f'mem_dedic: {mem_val} Mo'
|
230
|
+
sysm = psutil.virtual_memory()
|
231
|
+
# SYSTEM INFOS
|
232
|
+
stsys_inf = f'SYSTEM - cpu_percent: {psutil.cpu_percent()}, '
|
233
|
+
mem_val = str(round(sysm.percent, 2))
|
234
|
+
stsys_inf += f'mem_percent: {mem_val}, '
|
235
|
+
mem_val = str(round(sysm.used/1024/1024))
|
236
|
+
stsys_inf += f'mem_used: {mem_val} Mo, '
|
237
|
+
mem_val = str(round(sysm.available/1024/1024))
|
238
|
+
stsys_inf += f'mem_avail: {mem_val} Mo'
|
239
|
+
return [stproc_id, stproc_inf, stsys_inf]
|
File without changes
|
@@ -0,0 +1,356 @@
|
|
1
|
+
"""
|
2
|
+
Auteur : Vincent LE DOZE
|
3
|
+
Date : 07/12/23
|
4
|
+
"""
|
5
|
+
|
6
|
+
# External libs ---------------------------------------------------------------
|
7
|
+
import unittest
|
8
|
+
import os
|
9
|
+
import json
|
10
|
+
import tempfile
|
11
|
+
|
12
|
+
# External modules ------------------------------------------------------------
|
13
|
+
from os import listdir
|
14
|
+
from shutil import copyfile
|
15
|
+
|
16
|
+
# Local modules ---------------------------------------------------------------
|
17
|
+
from SankeyExcelParser.io_excel import load_sankey_from_excel_file
|
18
|
+
from SankeyExcelParser.sankey import Sankey
|
19
|
+
from SankeyExcelParser import su_trace as su_trace
|
20
|
+
|
21
|
+
# Constants -------------------------------------------------------------------
|
22
|
+
TESTS_DIR = os.environ.get('TESTS_DIR')
|
23
|
+
XLPARSER_TESTS_REFS_DIR = 'ref_tests__SankeyExcelParser'
|
24
|
+
SCMFA_TESTS_REFS_DIR = 'ref_tests__SCMFA'
|
25
|
+
OPENSANKEY_TESTS_REFS_DIR = 'ref_tests__OpenSankey'
|
26
|
+
MFASANKEY_TESTS_REFS_DIR = 'ref_tests__MFASankey'
|
27
|
+
|
28
|
+
DIRS_TO_EXCLUDE = [
|
29
|
+
'.git',
|
30
|
+
'.md',
|
31
|
+
XLPARSER_TESTS_REFS_DIR,
|
32
|
+
SCMFA_TESTS_REFS_DIR,
|
33
|
+
OPENSANKEY_TESTS_REFS_DIR,
|
34
|
+
MFASANKEY_TESTS_REFS_DIR
|
35
|
+
]
|
36
|
+
|
37
|
+
MAXSIZE = 200000
|
38
|
+
|
39
|
+
REF_TEST_PREFIX = 'expected_'
|
40
|
+
REF_SANKEY_SUFFIX = '_sankey-dict'
|
41
|
+
REF_LOG_SUFFIX = '_parsing_logs'
|
42
|
+
|
43
|
+
EXPECTED_RESULTS = {}
|
44
|
+
EXPECTED_RESULTS[os.path.relpath('Tests/create_empty_ter/pommes_poires') + ' create empty ter'] = None
|
45
|
+
EXPECTED_RESULTS[os.path.relpath('Tests/create_empty_ter/simplified_example_fr') + ' create empty ter'] = None
|
46
|
+
EXPECTED_RESULTS[os.path.relpath('Projets/AlimentationAnimale/orge check input')] = None
|
47
|
+
|
48
|
+
LOGS_KEY = 'Logs'
|
49
|
+
SANKEY_KEY = 'Sankey'
|
50
|
+
|
51
|
+
TEST_PARAMETERS = []
|
52
|
+
|
53
|
+
|
54
|
+
# Functions -------------------------------------------------------------------
|
55
|
+
def keep_exploring_file_or_folder(file_or_folder):
|
56
|
+
# Exclude certains files or folders
|
57
|
+
for _ in DIRS_TO_EXCLUDE:
|
58
|
+
if _ in file_or_folder:
|
59
|
+
return False
|
60
|
+
return True
|
61
|
+
|
62
|
+
|
63
|
+
def parse_folder(current_dir):
|
64
|
+
folder_content = listdir(current_dir)
|
65
|
+
for file_or_folder in folder_content:
|
66
|
+
# Folder exclusion conditions
|
67
|
+
if not keep_exploring_file_or_folder(file_or_folder):
|
68
|
+
continue
|
69
|
+
# Does test file exists ?
|
70
|
+
if os.path.isfile(os.path.join(current_dir, file_or_folder)):
|
71
|
+
# It it something like <file>.xlsx ?
|
72
|
+
if ('xlsx' in file_or_folder) and \
|
73
|
+
('reconciled' not in file_or_folder) and \
|
74
|
+
('converted' not in file_or_folder) and \
|
75
|
+
('old' not in file_or_folder) and \
|
76
|
+
('solution' not in file_or_folder):
|
77
|
+
# If file is too large, don't test
|
78
|
+
file_stats = os.stat(os.path.join(current_dir, file_or_folder))
|
79
|
+
if file_stats.st_size > MAXSIZE:
|
80
|
+
continue
|
81
|
+
# Get file name
|
82
|
+
file_relpath = os.path.relpath(
|
83
|
+
os.path.join(current_dir, file_or_folder),
|
84
|
+
TESTS_DIR)
|
85
|
+
test_name = os.path.splitext(file_relpath)[0]
|
86
|
+
test_dir, test_subname = os.path.split(test_name)
|
87
|
+
# Update expected results
|
88
|
+
if test_name not in EXPECTED_RESULTS:
|
89
|
+
EXPECTED_RESULTS[test_name] = {}
|
90
|
+
# Create results output dir
|
91
|
+
test_refs_dir = os.path.join(current_dir, XLPARSER_TESTS_REFS_DIR)
|
92
|
+
if XLPARSER_TESTS_REFS_DIR not in listdir(current_dir):
|
93
|
+
os.mkdir(test_refs_dir)
|
94
|
+
# Get related logs
|
95
|
+
test_ref_log_path = os.path.join(
|
96
|
+
test_refs_dir,
|
97
|
+
REF_TEST_PREFIX + test_subname + REF_LOG_SUFFIX + '.json')
|
98
|
+
EXPECTED_RESULTS[test_name][LOGS_KEY] = {}
|
99
|
+
if os.path.isfile(test_ref_log_path):
|
100
|
+
with open(test_ref_log_path, "r") as test_ref_log_file:
|
101
|
+
test_ref_log = json.load(test_ref_log_file)
|
102
|
+
EXPECTED_RESULTS[test_name][LOGS_KEY] = test_ref_log
|
103
|
+
# Get related sankey as dict expected result
|
104
|
+
test_ref_sankey_path = os.path.join(
|
105
|
+
test_refs_dir,
|
106
|
+
REF_TEST_PREFIX + test_subname + REF_SANKEY_SUFFIX + '.json')
|
107
|
+
EXPECTED_RESULTS[test_name][SANKEY_KEY] = {}
|
108
|
+
if os.path.isfile(test_ref_sankey_path):
|
109
|
+
with open(test_ref_sankey_path, "r") as test_ref_sankey_file:
|
110
|
+
test_ref_sankey = json.load(test_ref_sankey_file)
|
111
|
+
EXPECTED_RESULTS[test_name][SANKEY_KEY] = test_ref_sankey
|
112
|
+
# Finish updating test
|
113
|
+
TEST_PARAMETERS.append((
|
114
|
+
test_name,
|
115
|
+
file_relpath,
|
116
|
+
EXPECTED_RESULTS[test_name]))
|
117
|
+
continue
|
118
|
+
# Recursivly parse sub-directories
|
119
|
+
parse_folder(os.path.join(current_dir, file_or_folder))
|
120
|
+
|
121
|
+
|
122
|
+
# Fill constants values
|
123
|
+
parse_folder(TESTS_DIR)
|
124
|
+
|
125
|
+
|
126
|
+
# Class -----------------------------------------------------------------------
|
127
|
+
class MFAProblemsTests(unittest.TestCase):
|
128
|
+
generate_results = False
|
129
|
+
|
130
|
+
@classmethod
|
131
|
+
def set_generate_results(cls):
|
132
|
+
cls.generate_results = True
|
133
|
+
cls.new_results = {}
|
134
|
+
|
135
|
+
def prepare_test(
|
136
|
+
self,
|
137
|
+
file_name: str,
|
138
|
+
use_reconciled: bool = False
|
139
|
+
):
|
140
|
+
"""
|
141
|
+
Read and check input excel file to use for tests.
|
142
|
+
|
143
|
+
Parameters
|
144
|
+
----------
|
145
|
+
:param file_name: Input excel file to read
|
146
|
+
:type file_name: str
|
147
|
+
|
148
|
+
Optional parameters
|
149
|
+
-------------------
|
150
|
+
:param use_reconciled: Use reconciled version of given file
|
151
|
+
:type use_reconciled: bool, optional (defaults to False)
|
152
|
+
|
153
|
+
Returns
|
154
|
+
-------
|
155
|
+
:return: sankey, sheet_to_remove_names, excel_filepath, tmp_dir
|
156
|
+
:rtype: tuple as (Sankey, list, str, str)
|
157
|
+
"""
|
158
|
+
# Make temp directory
|
159
|
+
tmp_dir = tempfile.mkdtemp()
|
160
|
+
# Init log file
|
161
|
+
logname = tmp_dir + os.path.sep + "rollover.log"
|
162
|
+
su_trace.logger_init(logname, "w")
|
163
|
+
# Get path of file to test
|
164
|
+
tests_dir = os.environ.get('TESTS_DIR')
|
165
|
+
excel_filepath = os.path.join(tests_dir, file_name)
|
166
|
+
# Get reconcilied version of file to test
|
167
|
+
excel_rec_filename = os.path.splitext(file_name)[0]+'_reconciled.xlsx'
|
168
|
+
excel_rec_filepath = os.path.join(tests_dir, excel_rec_filename)
|
169
|
+
# Can we test reconciled file ?
|
170
|
+
if os.path.isfile(excel_rec_filepath):
|
171
|
+
file_stats = os.stat(excel_rec_filepath)
|
172
|
+
if file_stats.st_size > MAXSIZE:
|
173
|
+
use_reconciled = False
|
174
|
+
# Read files
|
175
|
+
sheet_to_remove_names = []
|
176
|
+
sankey = Sankey()
|
177
|
+
if use_reconciled and os.path.isfile(excel_rec_filepath):
|
178
|
+
excel_rectmp_filepath = os.path.join(tmp_dir, os.path.basename(excel_rec_filename))
|
179
|
+
excel_filepath = excel_rec_filepath
|
180
|
+
copyfile(excel_rec_filepath, excel_rectmp_filepath)
|
181
|
+
ok, msg = load_sankey_from_excel_file(
|
182
|
+
excel_rectmp_filepath,
|
183
|
+
sankey,
|
184
|
+
sheet_to_remove_names)
|
185
|
+
if not ok:
|
186
|
+
su_trace.logger.error(msg)
|
187
|
+
return (sankey, sheet_to_remove_names, excel_filepath, tmp_dir)
|
188
|
+
# Remove results if needed
|
189
|
+
if sankey.has_at_least_one_result():
|
190
|
+
sankey.reset_all_results()
|
191
|
+
else:
|
192
|
+
sankey = Sankey()
|
193
|
+
ok, msg = load_sankey_from_excel_file(
|
194
|
+
excel_filepath,
|
195
|
+
sankey,
|
196
|
+
sheet_to_remove_names)
|
197
|
+
if not ok:
|
198
|
+
su_trace.logger.error(msg)
|
199
|
+
return (sankey, sheet_to_remove_names, excel_filepath, tmp_dir)
|
200
|
+
# Verify structure that we got
|
201
|
+
ok, msg = sankey.check_overall_sankey_structure()
|
202
|
+
if not ok:
|
203
|
+
su_trace.logger.error(msg)
|
204
|
+
return (sankey, sheet_to_remove_names, excel_filepath, tmp_dir)
|
205
|
+
# Return
|
206
|
+
return (sankey, sheet_to_remove_names, excel_filepath, tmp_dir)
|
207
|
+
|
208
|
+
def _compare_recursively(
|
209
|
+
self,
|
210
|
+
test_entry: dict,
|
211
|
+
ref_entry: dict
|
212
|
+
):
|
213
|
+
"""
|
214
|
+
Compare two dicts recursively
|
215
|
+
|
216
|
+
Parameters
|
217
|
+
----------
|
218
|
+
:param test_entry: _description_
|
219
|
+
:type test_entry: dict
|
220
|
+
|
221
|
+
:param ref_entry: _description_
|
222
|
+
:type ref_entry: dict
|
223
|
+
|
224
|
+
Optional parameters
|
225
|
+
-------------------
|
226
|
+
"""
|
227
|
+
# Dict -> Recurs
|
228
|
+
if isinstance(test_entry, dict):
|
229
|
+
for key in test_entry.keys():
|
230
|
+
if key != LOGS_KEY:
|
231
|
+
self._compare_recursively(
|
232
|
+
test_entry[key],
|
233
|
+
ref_entry[key])
|
234
|
+
return
|
235
|
+
# List -> loop on all items
|
236
|
+
if isinstance(test_entry, list):
|
237
|
+
for item_to_test, item_as_ref in zip(test_entry, ref_entry):
|
238
|
+
self._compare_recursively(
|
239
|
+
item_to_test,
|
240
|
+
item_as_ref)
|
241
|
+
return
|
242
|
+
# Float -> test approx
|
243
|
+
if isinstance(test_entry, float):
|
244
|
+
self.assertAlmostEqual(
|
245
|
+
test_entry,
|
246
|
+
ref_entry, 2)
|
247
|
+
return
|
248
|
+
# Normal test
|
249
|
+
self.assertEqual(
|
250
|
+
test_entry,
|
251
|
+
ref_entry)
|
252
|
+
|
253
|
+
def check_logs(
|
254
|
+
self,
|
255
|
+
expected_results: dict,
|
256
|
+
test_name: str
|
257
|
+
):
|
258
|
+
"""
|
259
|
+
Compare logs with expected logs
|
260
|
+
|
261
|
+
Parameters
|
262
|
+
----------
|
263
|
+
:param expected_results: Dict that contains results to check with
|
264
|
+
:type expected_results: dict
|
265
|
+
|
266
|
+
:param test_name: Name of current test
|
267
|
+
:type test_name: str
|
268
|
+
"""
|
269
|
+
# Read current logs
|
270
|
+
base_filename = su_trace.base_filename()
|
271
|
+
with open(base_filename, "r") as f:
|
272
|
+
results = f.read()
|
273
|
+
# Splited lines
|
274
|
+
results_array = results.split('\n')
|
275
|
+
# Splited line but filtered
|
276
|
+
filter_result_array = []
|
277
|
+
for row in results_array:
|
278
|
+
if ('Main Problem' in row) or \
|
279
|
+
('DEBUG' in row) or \
|
280
|
+
('PERF' in row) or \
|
281
|
+
('SOLVED in' in row):
|
282
|
+
continue
|
283
|
+
if 'Entering variables classification at' in row:
|
284
|
+
continue
|
285
|
+
if ('took' in row) or \
|
286
|
+
('Took' in row) or \
|
287
|
+
('done in' in row) or \
|
288
|
+
('matrix reduction done' in row) or \
|
289
|
+
('Output (matrix_reduction)' in row) or \
|
290
|
+
('Interval' in row):
|
291
|
+
continue
|
292
|
+
filter_result_array.append(row)
|
293
|
+
# Check if logs are the same
|
294
|
+
if not self.generate_results:
|
295
|
+
self.assertEqual(
|
296
|
+
filter_result_array,
|
297
|
+
expected_results[LOGS_KEY])
|
298
|
+
else:
|
299
|
+
if test_name not in self.new_results:
|
300
|
+
self.new_results[test_name] = {}
|
301
|
+
self.new_results[test_name][LOGS_KEY] = filter_result_array
|
302
|
+
|
303
|
+
def compare_sankey(
|
304
|
+
self,
|
305
|
+
test_name: str,
|
306
|
+
sankey: Sankey,
|
307
|
+
expected_results: dict
|
308
|
+
):
|
309
|
+
"""
|
310
|
+
Compare a Sankey struct to expected results
|
311
|
+
|
312
|
+
Parameters
|
313
|
+
----------
|
314
|
+
:param test_name: name of current test
|
315
|
+
:type test_name: str
|
316
|
+
|
317
|
+
:param sankey: current sankey struct to check
|
318
|
+
:type sankey: Sankey
|
319
|
+
|
320
|
+
:param expected_results: Dict that contains results to check with
|
321
|
+
:type expected_results: dict
|
322
|
+
"""
|
323
|
+
# Get sankey equivalent dict
|
324
|
+
sankey_dict = sankey.get_as_dict()
|
325
|
+
# Run tests
|
326
|
+
if not self.generate_results:
|
327
|
+
# Compare with ref
|
328
|
+
self._compare_recursively(
|
329
|
+
sankey_dict,
|
330
|
+
expected_results[SANKEY_KEY])
|
331
|
+
return
|
332
|
+
# Create test
|
333
|
+
if test_name not in self.new_results:
|
334
|
+
self.new_results[test_name] = {}
|
335
|
+
self.new_results[test_name][SANKEY_KEY] = sankey_dict
|
336
|
+
|
337
|
+
@classmethod
|
338
|
+
def tearDownClass(cls):
|
339
|
+
if cls.generate_results:
|
340
|
+
for test_name in cls.new_results:
|
341
|
+
test_dir, test_subname = os.path.split(test_name)
|
342
|
+
test_refs_dir = os.path.join(TESTS_DIR, test_dir, XLPARSER_TESTS_REFS_DIR)
|
343
|
+
# Save logs
|
344
|
+
if LOGS_KEY in cls.new_results[test_name].keys():
|
345
|
+
test_logs_filename = REF_TEST_PREFIX + test_subname + REF_LOG_SUFFIX + '.json'
|
346
|
+
test_logs_filepath = os.path.join(test_refs_dir, test_logs_filename)
|
347
|
+
logs_as_json = json.dumps(cls.new_results[test_name][LOGS_KEY], indent=2)
|
348
|
+
with open(test_logs_filepath, "w") as test_logs_file:
|
349
|
+
test_logs_file.write(logs_as_json)
|
350
|
+
# Save sankey struct as json if present (if reconciliation did not failed)
|
351
|
+
if SANKEY_KEY in cls.new_results[test_name].keys():
|
352
|
+
test_sankey_filename = REF_TEST_PREFIX + test_subname + REF_SANKEY_SUFFIX + '.json'
|
353
|
+
test_sankey_filepath = os.path.join(test_refs_dir, test_sankey_filename)
|
354
|
+
sankey_as_json = json.dumps(cls.new_results[test_name][SANKEY_KEY], indent=2)
|
355
|
+
with open(test_sankey_filepath, "w") as test_sankey_file:
|
356
|
+
test_sankey_file.write(sankey_as_json)
|
@@ -0,0 +1,100 @@
|
|
1
|
+
"""
|
2
|
+
Auteur : Vincent LE DOZE
|
3
|
+
Date : 07/12/23
|
4
|
+
"""
|
5
|
+
|
6
|
+
# External libs ---------------------------------------------------------------
|
7
|
+
import argparse
|
8
|
+
# import os
|
9
|
+
import unittest
|
10
|
+
from parameterized import parameterized
|
11
|
+
|
12
|
+
# Local modules ---------------------------------------------------------------
|
13
|
+
from SankeyExcelParser.tests.integration.test_base import MFAProblemsTests
|
14
|
+
from SankeyExcelParser.tests.integration.test_base import TEST_PARAMETERS
|
15
|
+
from SankeyExcelParser.tests.integration.test_base import SANKEY_KEY
|
16
|
+
|
17
|
+
|
18
|
+
# Class -----------------------------------------------------------------------
|
19
|
+
class MFAProblemTestCheckInput(MFAProblemsTests):
|
20
|
+
|
21
|
+
# @parameterized.expand([(
|
22
|
+
# os.path.relpath('Projets/AlimentationAnimale/orge check')+' input',
|
23
|
+
# os.path.relpath('Projets/AlimentationAnimale/orge_new_segm.xlsx'),
|
24
|
+
# EXPECTED_RESULTS[os.path.relpath('Projets/AlimentationAnimale/orge')+' check input'])])
|
25
|
+
@parameterized.expand(TEST_PARAMETERS, skip_on_empty=True)
|
26
|
+
def test_check_input(
|
27
|
+
self,
|
28
|
+
test_name: str,
|
29
|
+
file_name: str,
|
30
|
+
expected_results: dict
|
31
|
+
):
|
32
|
+
"""
|
33
|
+
Check if Sankey extraction from Excel file is OK.
|
34
|
+
|
35
|
+
|
36
|
+
Parameters
|
37
|
+
----------
|
38
|
+
:param test_name: Name of current test
|
39
|
+
:type test_name: str
|
40
|
+
|
41
|
+
:param file_name: Name of current excel file to test
|
42
|
+
:type file_name: str
|
43
|
+
|
44
|
+
:param ouput_log: Logs expected for sankey extraction and checks
|
45
|
+
:type ouput_log: list
|
46
|
+
|
47
|
+
Optional parameters
|
48
|
+
-------------------
|
49
|
+
"""
|
50
|
+
# For Debug
|
51
|
+
print('\n{}'.format(self._testMethodName), end=' -> ', flush=True)
|
52
|
+
# Read and check file
|
53
|
+
(
|
54
|
+
sankey,
|
55
|
+
sheet_to_remove_names,
|
56
|
+
excel_filepath,
|
57
|
+
_
|
58
|
+
) = self.prepare_test(file_name)
|
59
|
+
# Check logs
|
60
|
+
self.check_logs(expected_results, test_name)
|
61
|
+
# Delete useless entries to avoid erasing previous results
|
62
|
+
if self.generate_results:
|
63
|
+
del expected_results[SANKEY_KEY]
|
64
|
+
|
65
|
+
|
66
|
+
# Main ------------------------------------------------------------------------
|
67
|
+
if __name__ == '__main__':
|
68
|
+
# Get args
|
69
|
+
parser = argparse.ArgumentParser()
|
70
|
+
parser.add_argument(
|
71
|
+
"--generate_results",
|
72
|
+
action='store_true',
|
73
|
+
required=False,
|
74
|
+
help="Option to regenerate tests results")
|
75
|
+
parser.add_argument(
|
76
|
+
'filenames',
|
77
|
+
metavar='F',
|
78
|
+
type=str,
|
79
|
+
nargs='*',
|
80
|
+
help='Specific files to test')
|
81
|
+
args = parser.parse_args()
|
82
|
+
# Generate test if needed
|
83
|
+
if args.generate_results:
|
84
|
+
MFAProblemsTests.set_generate_results()
|
85
|
+
# Get tests names to run
|
86
|
+
if len(args.filenames) == 0:
|
87
|
+
loader = unittest.TestLoader()
|
88
|
+
names = loader.getTestCaseNames(MFAProblemTestCheckInput)
|
89
|
+
else:
|
90
|
+
names = args.filenames
|
91
|
+
# Append tests to test suite
|
92
|
+
suite = unittest.TestSuite()
|
93
|
+
for name in names:
|
94
|
+
try:
|
95
|
+
suite.addTest(MFAProblemTestCheckInput(name))
|
96
|
+
except Exception:
|
97
|
+
print("Error when adding {} to test base".format(name))
|
98
|
+
# Run tests
|
99
|
+
runner = unittest.TextTestRunner()
|
100
|
+
runner.run(suite)
|