SankeyExcelParser 1.0.0b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. SankeyExcelParser/__init__.py +0 -0
  2. SankeyExcelParser/io_excel.py +1867 -0
  3. SankeyExcelParser/io_excel_constants.py +811 -0
  4. SankeyExcelParser/sankey.py +3138 -0
  5. SankeyExcelParser/sankey_utils/__init__.py +0 -0
  6. SankeyExcelParser/sankey_utils/data.py +1118 -0
  7. SankeyExcelParser/sankey_utils/excel_source.py +31 -0
  8. SankeyExcelParser/sankey_utils/flux.py +344 -0
  9. SankeyExcelParser/sankey_utils/functions.py +278 -0
  10. SankeyExcelParser/sankey_utils/node.py +340 -0
  11. SankeyExcelParser/sankey_utils/protos/__init__.py +0 -0
  12. SankeyExcelParser/sankey_utils/protos/flux.py +84 -0
  13. SankeyExcelParser/sankey_utils/protos/node.py +386 -0
  14. SankeyExcelParser/sankey_utils/protos/sankey_object.py +135 -0
  15. SankeyExcelParser/sankey_utils/protos/tag_group.py +95 -0
  16. SankeyExcelParser/sankey_utils/sankey_object.py +165 -0
  17. SankeyExcelParser/sankey_utils/table_object.py +37 -0
  18. SankeyExcelParser/sankey_utils/tag.py +95 -0
  19. SankeyExcelParser/sankey_utils/tag_group.py +206 -0
  20. SankeyExcelParser/su_trace.py +239 -0
  21. SankeyExcelParser/tests/integration/__init__.py +0 -0
  22. SankeyExcelParser/tests/integration/test_base.py +356 -0
  23. SankeyExcelParser/tests/integration/test_run_check_input.py +100 -0
  24. SankeyExcelParser/tests/integration/test_run_conversions.py +96 -0
  25. SankeyExcelParser/tests/integration/test_run_load_input.py +94 -0
  26. SankeyExcelParser/tests/unit/__init__.py +0 -0
  27. SankeyExcelParser-1.0.0b0.data/scripts/run_parse_and_write_excel.py +155 -0
  28. SankeyExcelParser-1.0.0b0.data/scripts/run_parse_excel.py +115 -0
  29. SankeyExcelParser-1.0.0b0.dist-info/METADATA +113 -0
  30. SankeyExcelParser-1.0.0b0.dist-info/RECORD +32 -0
  31. SankeyExcelParser-1.0.0b0.dist-info/WHEEL +5 -0
  32. SankeyExcelParser-1.0.0b0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,3138 @@
1
+ """
2
+ Auteur : Vincent LE DOZE
3
+ Date : 21/04/23
4
+ """
5
+
6
+ # External libs ---------------------------------------------------------------
7
+ import copy
8
+ import numpy as np
9
+ import pandas as pd
10
+ import seaborn as sns
11
+
12
+ # Local libs ------------------------------------------------------------------
13
+ import SankeyExcelParser.io_excel_constants as CONST
14
+ import SankeyExcelParser.su_trace as su_trace
15
+
16
+ # External modules ------------------------------------------------------------
17
+ from openpyxl.styles import PatternFill, Font, Alignment, Border, Side
18
+
19
+ # Local modules ---------------------------------------------------------------
20
+ from SankeyExcelParser.sankey_utils.sankey_object import SankeyObject
21
+ from SankeyExcelParser.sankey_utils.node import Node
22
+ from SankeyExcelParser.sankey_utils.flux import Flux
23
+ from SankeyExcelParser.sankey_utils.data import Data
24
+ from SankeyExcelParser.sankey_utils.tag_group import TagGroup
25
+ from SankeyExcelParser.sankey_utils.functions import _stdStr
26
+ from SankeyExcelParser.sankey_utils.functions import _getValueIfPresent
27
+ from SankeyExcelParser.sankey_utils.functions import _extractFluxFromMatrix
28
+ from SankeyExcelParser.sankey_utils.functions import _createMatrixFromFlux
29
+ from SankeyExcelParser.sankey_utils.functions import _reorderTable
30
+
31
+ # Constants ----------------------------------------------------------
32
+ COLOR_WHITE = 'FFFFFF'
33
+ COLOR_BLACK = '000000'
34
+ COLOR_GREY = 'CFCFCF'
35
+ HEADER_ROW_ID = 1 # tables rows start from 1
36
+ INDEX_COL_ID = 0 # tables cols start from 0, because fy
37
+ SHEET_BY_DEFAULT = {}
38
+ SHEET_FORMATING_BY_DEFAULT = {}
39
+
40
+ # Default excel sheet attributes
41
+ SHEET_BY_DEFAULT['name'] = 'Default' # TODO reprendre nom user
42
+ SHEET_BY_DEFAULT['color'] = COLOR_WHITE
43
+ SHEET_BY_DEFAULT['table'] = pd.DataFrame()
44
+ SHEET_BY_DEFAULT['write_header'] = True
45
+ SHEET_BY_DEFAULT['write_index'] = False
46
+
47
+ # Default excel sheet header's cells formatting
48
+ SHEET_FORMATING_BY_DEFAULT['header'] = {}
49
+ SHEET_FORMATING_BY_DEFAULT['header']['alignement'] = Alignment(
50
+ horizontal='left',
51
+ vertical='center',
52
+ text_rotation=0,
53
+ wrap_text=True,
54
+ shrink_to_fit=False,
55
+ indent=0)
56
+ SHEET_FORMATING_BY_DEFAULT['header']['border'] = Border(
57
+ left=Side(border_style="thin", color=COLOR_BLACK),
58
+ right=Side(border_style="thin", color=COLOR_BLACK),
59
+ bottom=Side(border_style="thick", color=COLOR_BLACK))
60
+ SHEET_FORMATING_BY_DEFAULT['header']['font'] = Font(
61
+ bold=True,
62
+ color=COLOR_BLACK)
63
+ SHEET_FORMATING_BY_DEFAULT['header']['fill'] = PatternFill(
64
+ 'solid', fgColor=COLOR_WHITE)
65
+ SHEET_FORMATING_BY_DEFAULT['header']['default_height'] = 15
66
+ SHEET_FORMATING_BY_DEFAULT['no_header'] = copy.deepcopy(
67
+ SHEET_FORMATING_BY_DEFAULT['header'])
68
+
69
+ # Default excel sheet header's cells formatting
70
+ SHEET_FORMATING_BY_DEFAULT['index'] = copy.deepcopy(
71
+ SHEET_FORMATING_BY_DEFAULT['header'])
72
+ SHEET_FORMATING_BY_DEFAULT['index']['default_width'] = 15
73
+
74
+ # Default excel sheet content's cells formatting
75
+ SHEET_FORMATING_BY_DEFAULT['content'] = {}
76
+ SHEET_FORMATING_BY_DEFAULT['content']['alignement'] = Alignment(
77
+ horizontal='left',
78
+ vertical='center',
79
+ text_rotation=0,
80
+ wrap_text=True,
81
+ shrink_to_fit=False,
82
+ indent=0)
83
+ SHEET_FORMATING_BY_DEFAULT['content']['border'] = Border(
84
+ left=Side(border_style="thin", color=COLOR_BLACK),
85
+ right=Side(border_style="thin", color=COLOR_BLACK),
86
+ bottom=Side(border_style="dashed", color=COLOR_BLACK))
87
+ SHEET_FORMATING_BY_DEFAULT['content']['font'] = Font(
88
+ bold=False,
89
+ color=COLOR_BLACK)
90
+ SHEET_FORMATING_BY_DEFAULT['content']['fill'] = PatternFill(
91
+ 'solid', fgColor=COLOR_WHITE)
92
+ SHEET_FORMATING_BY_DEFAULT['content']['compute_width'] = True
93
+ SHEET_FORMATING_BY_DEFAULT['content']['default_width'] = 15 # Columns default width
94
+ SHEET_FORMATING_BY_DEFAULT['content']['default_height'] = 15 # Lines default heights
95
+ SHEET_FORMATING_BY_DEFAULT['no_content'] = copy.deepcopy(
96
+ SHEET_FORMATING_BY_DEFAULT['content'])
97
+
98
+ # If necessary on special cells
99
+ # Dict with keys :
100
+ # - cols numbers as tuple
101
+ # - then rows numbers as tuples
102
+ SHEET_FORMATING_BY_DEFAULT['spe_content'] = {}
103
+ SHEET_FORMATING_BY_DEFAULT['spe_header'] = {}
104
+
105
+
106
+ # Class ----------------------------------------------------------------------------
107
+ class UserExcelConverter(object):
108
+ """
109
+ Allows convertion between the user sheets names, cols names and the
110
+ standard names used when parsing the excel file.
111
+
112
+ Attributes
113
+ ----------
114
+ :param sheets_names: TODO
115
+ :type sheets: dict
116
+
117
+ :param cols_names: TODO
118
+ :type flux: dict
119
+ """
120
+
121
+ def __init__(self, language=CONST.LANG_FR):
122
+ """ Create & Initialize. """
123
+ self._sheets_names = {}
124
+ self._cols_names = {}
125
+ self._language = language
126
+
127
+ @property
128
+ def sheets_names(self):
129
+ return self._sheets_names
130
+
131
+ def add_new_sheet(self, std_name, user_name):
132
+ if std_name not in self._sheets_names.keys():
133
+ self._sheets_names[std_name] = user_name
134
+ self._cols_names[std_name] = {}
135
+
136
+ def get_user_sheet_name(self, std_sheet_name):
137
+ std_sheet_name = self._deal_with_specific_sheet_names(std_sheet_name)
138
+ if std_sheet_name in self._sheets_names.keys():
139
+ return self._sheets_names[std_sheet_name]
140
+ return CONST.DICT_OF_SHEET_NAMES[std_sheet_name][self._language]
141
+
142
+ def add_new_col(self, std_sheet_name, std_col_name, user_col_name):
143
+ if std_sheet_name in self._sheets_names.keys():
144
+ if std_col_name not in self._cols_names.keys():
145
+ self._cols_names[std_sheet_name][std_col_name] = user_col_name
146
+
147
+ def get_user_col_name(self, std_sheet_name, std_col_name):
148
+ # Standard sheet name
149
+ std_sheet_name = self._deal_with_specific_sheet_names(std_sheet_name)
150
+ # Check if user specified antother name for this col of given sheet
151
+ if std_sheet_name in self._sheets_names.keys():
152
+ if std_col_name in self._cols_names[std_sheet_name].keys():
153
+ return self._cols_names[std_sheet_name][std_col_name]
154
+ # Otherwise, try to give standard col name
155
+ try:
156
+ return CONST.DICT_OF_COLS_NAMES[std_sheet_name][std_col_name][self._language]
157
+ except Exception:
158
+ # Useful for taggroup cols which are not standard cols
159
+ return std_col_name
160
+
161
+ def _deal_with_specific_sheet_names(self, sheet_name):
162
+ if sheet_name == CONST.NODE_TYPE_PRODUCT:
163
+ return CONST.PRODUCTS_SHEET
164
+ if sheet_name == CONST.NODE_TYPE_SECTOR:
165
+ return CONST.SECTORS_SHEET
166
+ if sheet_name == CONST.NODE_TYPE_EXCHANGE:
167
+ return CONST.EXCHANGES_SHEET
168
+ return sheet_name
169
+
170
+
171
+ class Sankey(object):
172
+ """
173
+ Contains all Sankey diagram informations.
174
+
175
+ Attributes
176
+ ----------
177
+ :param nodes: Dictionnary of all nodes, accessibles with their standardized name.
178
+ :type nodes: dict{node_name=str, node=:class:`sankey.Node`}
179
+
180
+ :param flux: Dictionnary of all flux, accessibles with their standardized name.
181
+ :type flux: dict(flux_name=str: flux=:class:`sankey.Flux`)
182
+
183
+ :param taggs: Dictionnary of all tags groups, accessible with their type, and then with their
184
+ standardized name.
185
+ :type taggs: dict(tagg_type=str: taggs=dict(tagg_name=str: tagg=:class:`sankey.TagGroup`))
186
+
187
+ :param tooltips: All possible tooltips for Sankey objects, with their respective description
188
+ :type tooltips: dict(name=str: description=str)
189
+
190
+ :param units: All possible units for Sankey datas, with their respective description
191
+ :type units: dict(name=str: description=str)
192
+ """
193
+
194
+ def __init__(self):
195
+ """ Create & Initialize a Sankey object. """
196
+ # Default attributs
197
+ self.nodes = {}
198
+ self.flux = {}
199
+ self.taggs = {
200
+ CONST.TAG_TYPE_DATA: {},
201
+ CONST.TAG_TYPE_FLUX: {},
202
+ CONST.TAG_TYPE_LEVEL: {},
203
+ CONST.TAG_TYPE_NODE: {}
204
+ }
205
+ self.tooltips = {}
206
+ # Attributes linked to reconciliation
207
+ self.units = {}
208
+ self.constraints = {}
209
+ # Allow retreiving user naming convention
210
+ self.xl_user_converter = UserExcelConverter()
211
+ # Other attributes
212
+ self._max_nodes_level = 1
213
+ self.reset_msgs()
214
+
215
+ @property
216
+ def max_nodes_level(self):
217
+ return self._max_nodes_level
218
+
219
+ @property
220
+ def info_msg(self):
221
+ return self._info_msg
222
+
223
+ def add_info_msg(self, msg):
224
+ if type(msg) is str:
225
+ self._info_msg += msg
226
+ # Check last character to be EOL
227
+ if self._info_msg[-1] != '\n':
228
+ self._info_msg += '\n'
229
+
230
+ @property
231
+ def warn_msg(self):
232
+ return self._warn_msg
233
+
234
+ def add_warn_msg(self, msg):
235
+ if type(msg) is str:
236
+ self._warn_msg += msg
237
+ # Check last character to be EOL
238
+ if self._warn_msg[-1] != '\n':
239
+ self._warn_msg += '\n'
240
+
241
+ @property
242
+ def err_msg(self):
243
+ return self._err_msg
244
+
245
+ def add_err_msg(self, msg):
246
+ if type(msg) is str:
247
+ self._err_msg += msg
248
+ # Check last character to be EOL
249
+ if self._err_msg[-1] != '\n':
250
+ self._err_msg += '\n'
251
+
252
+ def add_msg_on_new_node(self, msg):
253
+ if self._msg_on_new_nodes is None:
254
+ return
255
+ if self._msg_on_new_nodes == 'warn':
256
+ msg += " Not sure if it was intended."
257
+ self.add_warn_msg(msg)
258
+ else:
259
+ self.add_info_msg(msg)
260
+
261
+ def add_msg_on_new_flux(self, msg):
262
+ if self._msg_on_new_flux is None:
263
+ return
264
+ if self._msg_on_new_flux == 'warn':
265
+ msg += " Not sure if it was intended."
266
+ self.add_warn_msg(msg)
267
+ else:
268
+ self.add_info_msg(msg)
269
+
270
+ def reset_msgs(
271
+ self,
272
+ msg_on_new_nodes=None,
273
+ msg_on_new_flux=None,
274
+ ):
275
+ self._info_msg = ''
276
+ self._warn_msg = ''
277
+ self._err_msg = ''
278
+ self._msg_on_new_nodes = msg_on_new_nodes
279
+ self._msg_on_new_flux = msg_on_new_flux
280
+
281
+ def send_msgs(
282
+ self,
283
+ name_of_operation
284
+ ):
285
+ # Send info msgs
286
+ if len(self._info_msg) > 0:
287
+ _ = 'Info(s) on {} :'.format(name_of_operation)
288
+ su_trace.logger.debug(_)
289
+ for _ in self._info_msg.split('\n'):
290
+ if len(_) > 0:
291
+ su_trace.logger.debug(' - {}'.format(_))
292
+ # Send warning msgs
293
+ if len(self._warn_msg) > 0:
294
+ _ = 'Warning(s) on {} :'.format(name_of_operation)
295
+ su_trace.logger.info(_)
296
+ for _ in self._warn_msg.split('\n'):
297
+ if len(_) > 0:
298
+ su_trace.logger.info(' - {}'.format(_))
299
+ # Send error msgs
300
+ if len(self._err_msg) > 0:
301
+ _ = 'Error(s) on {} :'.format(name_of_operation)
302
+ su_trace.logger.error(_)
303
+ for _ in self._err_msg.split('\n'):
304
+ if len(_) > 0:
305
+ su_trace.logger.error(' - {}'.format(_))
306
+
307
+ @property
308
+ def data_taggs(self):
309
+ return list(self.taggs[CONST.TAG_TYPE_DATA].values())
310
+
311
+ @property
312
+ def flux_taggs(self):
313
+ return list(self.taggs[CONST.TAG_TYPE_FLUX].values())
314
+
315
+ @property
316
+ def level_taggs(self):
317
+ return list(self.taggs[CONST.TAG_TYPE_LEVEL].values())
318
+
319
+ @property
320
+ def node_taggs(self):
321
+ return list(self.taggs[CONST.TAG_TYPE_NODE].values())
322
+
323
+ @property
324
+ def taggs_extra_infos_names(self):
325
+ extra_infos_names = set()
326
+ for taggs_types in self.taggs.values():
327
+ for tagg in taggs_types.values():
328
+ extra_infos_names |= set(tagg.extra_infos_name)
329
+ return sorted(extra_infos_names)
330
+
331
+ @property
332
+ def nodes_extra_infos_names(self):
333
+ extra_infos_names = set()
334
+ for node in self.nodes.values():
335
+ extra_infos_names |= set(node.extra_infos_name)
336
+ return sorted(extra_infos_names)
337
+
338
+ @property
339
+ def data_extra_infos_names(self):
340
+ extra_infos_names = set()
341
+ for flux in self.flux.values():
342
+ if flux.has_data():
343
+ for data in flux.datas:
344
+ extra_infos_names |= set(data.extra_infos_name)
345
+ return sorted(extra_infos_names)
346
+
347
+ def reset_all_results(self):
348
+ """
349
+ Remove all results datas from Sankey
350
+ Useful when recomputing MFA
351
+ """
352
+ for flux in self.flux.values():
353
+ flux.reset_results()
354
+
355
+ def get_tagg_from_name_and_type(self, tagg_name, tagg_type):
356
+ """
357
+ Gives a tagggroup from a name and a type
358
+
359
+ Attributes
360
+ ----------
361
+ :param tagg_name: Name of the taggroup
362
+ :type tagg_name: str
363
+
364
+ :param tagg_type: Type of the tag group
365
+ :type tagg_type: str
366
+
367
+ Returns
368
+ -------
369
+ :return: Taggroup found or None
370
+ :rtype: TagGroup | None
371
+ """
372
+ try:
373
+ tagg_name_ref = _stdStr(tagg_name)
374
+ return self.taggs[tagg_type][tagg_name_ref]
375
+ except Exception:
376
+ return None
377
+
378
+ def __repr__(self):
379
+ """
380
+ Gives a string representation of Sankey object.
381
+
382
+ Returns
383
+ -------
384
+ :return: String format of self.
385
+ :rtype: str
386
+ """
387
+ s = ''
388
+ # Add nodes
389
+ s += 'Nodes \n'
390
+ s += '-'*40 + '\n'
391
+ for node in self.nodes.values():
392
+ s += '{}\n'.format(node)
393
+ # Add flux
394
+ s += 'Flux \n'
395
+ s += '-'*40 + '\n'
396
+ for flux in self.flux.values():
397
+ s += '{}\n'.format(flux)
398
+ return s
399
+
400
+ def update_from_tags_table(
401
+ self,
402
+ table: pd.DataFrame
403
+ ):
404
+ """
405
+ Update self from a tag groups table.
406
+
407
+ Exemple of possible tables
408
+
409
+ +-----------------+----------+---------------------+------------------+--------+--------+
410
+ | TAG_NAME | TAG_TYPE | TAG_TAGS | TAG_COLORS | INFO 1 | INFO 2 |
411
+ +=================+==========+=====================+==================+========+========+
412
+ | tag g0 | nodeTag | tag01:tag02:tag03 | hex1:hex2:hex3 | | |
413
+ +-----------------+----------+---------------------+------------------+--------+--------+
414
+ | tag g1 | levelTag | tag11:tag12 | | | |
415
+ +-----------------+----------+---------------------+------------------+--------+--------+
416
+ | tag g1 | levelTag | tag12 | hex1 | special tag | |
417
+ +-----------------+----------+---------------------+------------------+--------+--------+
418
+ | tag g3 / tag g4 | nodeTag | tag31:tag32 / tag41 | hex1:hex2 / hex3 | | |
419
+ +-----------------+----------+---------------------+------------------+--------+--------+
420
+
421
+ Parameters
422
+ ----------
423
+ :param table: Table to parse.
424
+ :type table: panda.DataFrame
425
+
426
+ Returns
427
+ -------
428
+ :return: Tuple with boolean at True if everything went ok, False otherwise
429
+ and an error message if necessary
430
+ :rtype: (bool, str)
431
+ """
432
+ # Init warning message
433
+ self.reset_msgs()
434
+ # Extract columns names that contains extra infos
435
+ taggs_extra_infos = [_ for _ in table.columns if _ not in CONST.TAG_SHEET_COLS]
436
+ # Create new tags from table
437
+ for index in table.index:
438
+ line = table.iloc[index]
439
+ # The / is specific to level tags - it creates antagonists tag groups
440
+ taggs_names = line[CONST.TAG_NAME].split('/') # List of tag groups' names
441
+ taggs_tags_names = line[CONST.TAG_TAGS].split('/') # List of tag groups' tags names
442
+ taggs_tags_colors = _getValueIfPresent(line, CONST.TAG_COLOR, None) # List of tag groups' tags colors
443
+ # We can have no colors
444
+ if (taggs_tags_colors == '') or (taggs_tags_colors is None):
445
+ taggs_tags_colors = [None]*len(taggs_names)
446
+ else:
447
+ taggs_tags_colors = taggs_tags_colors.split('/')
448
+ # If we have antagonist tag grps, do we have the correct number of '/' between tag grps attributes
449
+ if len(taggs_names) != len(taggs_tags_names):
450
+ err = 'At line {} : '.format(index)
451
+ err += 'Not the same amount of separation with \"\\" '
452
+ err += 'for tags for antagonists tags groups \"{}\" '.format(
453
+ line[CONST.TAG_NAME])
454
+ return False, err
455
+ # If we have antagonist tag grps, check coherence on number of colors attributes
456
+ if len(taggs_names) < len(taggs_tags_colors):
457
+ warn = 'At line {} : '.format(index)
458
+ warn += 'Not the same amount of separation with \"\\" '
459
+ warn += 'for colors for antagonists tags groups \"{}\" '.format(
460
+ line[CONST.TAG_NAME])
461
+ self.add_warn_msg(warn)
462
+ # Remove surplus colors
463
+ nb_to_pop = len(taggs_tags_colors) - len(taggs_names)
464
+ for _ in range(nb_to_pop):
465
+ taggs_tags_colors.pop(-1)
466
+ if len(taggs_names) > len(taggs_tags_colors):
467
+ warn = 'At line {} : '.format(index)
468
+ warn += 'Not the same amount of separation with \"\\" '
469
+ warn += 'for colors for antagonists tags groups \"{}\" '.format(
470
+ line[CONST.TAG_NAME])
471
+ self.add_warn_msg(warn)
472
+ # Complete missing colors
473
+ nb_to_complete = len(taggs_names) - len(taggs_tags_colors)
474
+ for _ in range(nb_to_complete):
475
+ taggs_tags_colors.append(None)
476
+ # Create tags groups with their respective tags
477
+ prev_taggs = []
478
+ for tagg_name, tagg_tags_names, tagg_tags_colors in zip(taggs_names, taggs_tags_names, taggs_tags_colors):
479
+ # Old tag groups
480
+ if (tagg_name == 'Dimensions'):
481
+ continue
482
+ # Create new tag groupe
483
+ tagg = self.get_or_create_tagg(tagg_name, line[CONST.TAG_TYPE])
484
+ if tagg is None:
485
+ err = 'At line {2} : Could not create tag group \"{0}\" : bad type \"{1}\"'.format(
486
+ line[CONST.TAG_NAME], line[CONST.TAG_TYPE], index)
487
+ return False, err
488
+ # Add tags and their respective colors to tag groups
489
+ tags_names = tagg_tags_names.split(':')
490
+ if tagg_tags_colors is not None:
491
+ tags_colors = tagg_tags_colors.split(':')
492
+ if len(tags_names) > len(tags_colors):
493
+ tags_colors += [None]*(len(tags_names) - len(tags_colors))
494
+ else:
495
+ tags_colors = [None]*len(tags_names)
496
+ for tag_name, tag_color in zip(tags_names, tags_colors):
497
+ tag = tagg.get_or_create_tag(tag_name)
498
+ tag.color = tag_color
499
+ # Update tag group attributes
500
+ tagg.update(
501
+ is_palette=_getValueIfPresent(line, CONST.TAG_IS_PALETTE, None),
502
+ colormap=_getValueIfPresent(line, CONST.TAG_COLORMAP, None))
503
+ # Add tag group extra infos
504
+ for extra_info in taggs_extra_infos:
505
+ tagg.add_extra_info(extra_info, line[extra_info])
506
+ # If we have antagonists tags, we need to precise it
507
+ for prev_tagg in prev_taggs:
508
+ tagg.add_antagonist_tagg(prev_tagg)
509
+ prev_taggs.append(tagg)
510
+ return True, self.warn_msg
511
+
512
+ def update_from_data_table(
513
+ self,
514
+ input_table: pd.DataFrame,
515
+ warn_on_new_nodes: bool = False,
516
+ warn_on_new_flux: bool = False
517
+ ):
518
+ # Init warning message
519
+ self.reset_msgs(
520
+ msg_on_new_nodes=('warn' if warn_on_new_nodes else None),
521
+ msg_on_new_flux=('warn' if warn_on_new_flux else None))
522
+ # Copy table to avoid modification on reference
523
+ table = input_table.copy()
524
+ # Extra columns in table (more than needed)
525
+ data_extra_infos = \
526
+ set(table.columns) \
527
+ - set(CONST.DATA_SHEET_COLS) \
528
+ - set(self.taggs[CONST.TAG_TYPE_FLUX].keys()) \
529
+ - set(self.taggs[CONST.TAG_TYPE_DATA].keys())
530
+ # Create new flux & data from table
531
+ for index in table.index:
532
+ # Read line
533
+ line = table.iloc[index]
534
+ # Create Flux
535
+ flux = self.get_or_create_flux(
536
+ line[CONST.DATA_ORIGIN],
537
+ line[CONST.DATA_DESTINATION])
538
+ if flux is None:
539
+ self.add_warn_msg(
540
+ 'At line {} : Could not find or create specified flux.\n'.
541
+ format(index))
542
+ continue
543
+ # Get dataTag
544
+ ok_data_tags, data_tags = self._get_tags_from_line(CONST.TAG_TYPE_DATA, line)
545
+ # Get fluxTag
546
+ _, flux_tags = self._get_tags_from_line(CONST.TAG_TYPE_FLUX, line)
547
+ # Do not process line if data tags retreiving failed somehow
548
+ if not ok_data_tags:
549
+ self.add_warn_msg(
550
+ 'At line {} : There are problems with the given tags so the line cannot be processed.\n'
551
+ .format(index))
552
+ continue
553
+ # Corresponding datas in given flux / tags
554
+ datas = flux.get_corresponding_datas_from_tags(data_tags)
555
+ # Read datas attributes
556
+ data_attrs = {
557
+ 'value': _getValueIfPresent(line, CONST.DATA_VALUE, None),
558
+ 'quantity': _getValueIfPresent(line, CONST.DATA_QUANTITY, None),
559
+ 'natural_unit': _getValueIfPresent(line, CONST.DATA_NATURAL_UNIT, None),
560
+ 'factor': _getValueIfPresent(line, CONST.DATA_FACTOR, None),
561
+ 'sigma_relative': _getValueIfPresent(line, CONST.DATA_UNCERT, CONST.DEFAULT_SIGMA_RELATIVE),
562
+ 'source': _getValueIfPresent(line, CONST.DATA_SOURCE, None),
563
+ 'hypothesis': _getValueIfPresent(line, CONST.DATA_HYPOTHESIS, None)}
564
+ # Update datas with read attributes & tags infos
565
+ for data in datas:
566
+ # Associated flux tags
567
+ for flux_tag in flux_tags:
568
+ data.add_tag(flux_tag)
569
+ # Data attributes
570
+ data.update(**data_attrs)
571
+ # Extra infos
572
+ for extra_info in data_extra_infos:
573
+ data.add_extra_info(
574
+ extra_info,
575
+ _getValueIfPresent(line, extra_info, None))
576
+ return True, self.warn_msg
577
+
578
+ def update_from_min_max_table(
579
+ self,
580
+ input_table: pd.DataFrame,
581
+ warn_on_new_nodes: bool = False,
582
+ warn_on_new_flux: bool = False
583
+ ):
584
+ # Init warning message
585
+ self.reset_msgs(
586
+ msg_on_new_nodes=('warn' if warn_on_new_nodes else None),
587
+ msg_on_new_flux=('warn' if warn_on_new_flux else None))
588
+ # Copy table to avoid modification on reference
589
+ table = input_table.copy()
590
+ # Create new flux & data from table
591
+ for index in table.index:
592
+ # Read line
593
+ line = table.iloc[index]
594
+ # Get min max attributes
595
+ min_max_attributes = {
596
+ "min_val": _getValueIfPresent(line, CONST.MIN_MAX_MIN, None),
597
+ "min_quantity": _getValueIfPresent(line, CONST.MIN_MAX_MIN_QUANTITY, None),
598
+ "max_val": _getValueIfPresent(line, CONST.MIN_MAX_MAX, None),
599
+ "max_quantity": _getValueIfPresent(line, CONST.MIN_MAX_MAX_QUANTITY, None)}
600
+ min_max_optionnal_attributes = {
601
+ "natural_unit": _getValueIfPresent(line, CONST.MIN_MAX_NATURAL_UNIT, None),
602
+ "factor": _getValueIfPresent(line, CONST.MIN_MAX_FACTOR, None),
603
+ "hypothesis": _getValueIfPresent(line, CONST.MIN_MAX_HYPOTHESIS, None),
604
+ "source": _getValueIfPresent(line, CONST.MIN_MAX_SOURCE, None)}
605
+ # We create min/max only if we have attributes
606
+ ok_to_parse_min_max = False
607
+ for _ in min_max_attributes.values():
608
+ if (_ is not None):
609
+ ok_to_parse_min_max = True
610
+ break
611
+ # We have the necessary attributes to parse
612
+ if ok_to_parse_min_max:
613
+ # First we get or create corresponding flux
614
+ node_orig = line[CONST.MIN_MAX_ORIGIN]
615
+ node_dest = line[CONST.MIN_MAX_DESTINATION]
616
+ corresp_flux = []
617
+ if str(node_orig) == '*':
618
+ node_dest = _stdStr(node_dest)
619
+ for flux in self.flux.values():
620
+ if flux.dest.name == node_dest:
621
+ corresp_flux.append(flux)
622
+ elif str(node_dest) == '*':
623
+ node_orig = _stdStr(node_orig)
624
+ for flux in self.flux.values():
625
+ if flux.orig.name == node_orig:
626
+ corresp_flux.append(flux)
627
+ else:
628
+ _ = self.get_or_create_flux(node_orig, node_dest)
629
+ if _ is None:
630
+ self.add_warn_msg(
631
+ 'At line {} : Could not find or create specified flux.\n'.
632
+ format(index))
633
+ continue
634
+ corresp_flux.append(_)
635
+ # Get tags if they exist
636
+ _, flux_tags_present = self._get_tags_from_line(CONST.TAG_TYPE_FLUX, line)
637
+ ok_data_tags, data_tags_present = self._get_tags_from_line(CONST.TAG_TYPE_DATA, line)
638
+ # Do not process line if tags retreiving failed somehow
639
+ if (not ok_data_tags):
640
+ self.add_warn_msg(
641
+ 'At line {} : There are problems with the given tags so the line cannot be processed.\n'
642
+ .format(index))
643
+ continue
644
+ # Update flux or data related to flux
645
+ for flux in corresp_flux:
646
+ # Corresponding datas
647
+ datas = flux.get_corresponding_datas_from_tags(data_tags_present)
648
+ # Update min max for each data
649
+ for data in datas:
650
+ for flux_tag in flux_tags_present:
651
+ data.add_tag(flux_tag)
652
+ data.min_max.update(**min_max_attributes)
653
+ data.min_max.update(**min_max_optionnal_attributes)
654
+ return True, self.warn_msg
655
+
656
+ def update_from_constraints_table(
657
+ self,
658
+ input_table: pd.DataFrame,
659
+ warn_on_new_nodes: bool = False,
660
+ warn_on_new_flux: bool = False
661
+ ):
662
+ # Init warning message
663
+ self.reset_msgs(
664
+ msg_on_new_nodes=('warn' if warn_on_new_nodes else None),
665
+ msg_on_new_flux=('warn' if warn_on_new_flux else None))
666
+ # Copy table to avoid modification on reference
667
+ table = input_table.copy()
668
+ # Create new flux & data from table
669
+ for index in table.index:
670
+ # Read line
671
+ line = table.iloc[index]
672
+ # Get or create corresponding flux
673
+ node_orig = line[CONST.CONSTRAINT_ORIGIN]
674
+ node_dest = line[CONST.CONSTRAINT_DESTINATION]
675
+ corresp_flux = []
676
+ if str(node_orig) == '*':
677
+ # TODO
678
+ # A discuter - Dans 'add_other_constraints' de 'SCFMA',
679
+ # avoir '*' signifie tous les noeuds produits existants...
680
+ node_dest = _stdStr(node_dest)
681
+ for flux in self.flux.values():
682
+ if flux.dest.name == node_dest:
683
+ corresp_flux.append(flux)
684
+ elif str(node_dest) == '*':
685
+ # TODO
686
+ # A discuter - Dans 'add_other_constraints' de 'SCFMA',
687
+ # avoir '*' signifie tous les noeuds produits existants...
688
+ node_orig = _stdStr(node_orig)
689
+ for flux in self.flux.values():
690
+ if flux.orig.name == node_orig:
691
+ corresp_flux.append(flux)
692
+ else:
693
+ _ = self.get_or_create_flux(node_orig, node_dest)
694
+ if _ is None:
695
+ self.add_warn_msg(
696
+ 'At line {} : Could not find or create specified flux.\n'
697
+ .format(index))
698
+ continue
699
+ corresp_flux.append(_)
700
+ # Get corresponding data if it exists
701
+ # ok_flux_tags, flux_tags_present = self._get_tags_from_line(CONST.TAG_TYPE_FLUX, line)
702
+ ok_data_tags, data_tags_present = self._get_tags_from_line(CONST.TAG_TYPE_DATA, line)
703
+ # Do not process line if tags retreiving failed somehow
704
+ if (not ok_data_tags):
705
+ self.add_warn_msg(
706
+ 'At line {} : There are problems with the given tags so the line cannot be processed.\n'
707
+ .format(index))
708
+ continue
709
+ # Get min max attributes
710
+ try:
711
+ constraint_id = str(_getValueIfPresent(line, CONST.CONSTRAINT_ID, None))
712
+ except Exception:
713
+ err = 'At line {} : Unable to get a valid constraint id.\n'.format(index)
714
+ return False, err
715
+ constraint_attributes = {
716
+ "eq": _getValueIfPresent(line, CONST.CONSTRAINT_EQ, None),
717
+ "ineq_inf": _getValueIfPresent(line, CONST.CONSTRAINT_INEQ_INF, None),
718
+ "ineq_sup": _getValueIfPresent(line, CONST.CONSTRAINT_INEQ_SUP, None),
719
+ "source": _getValueIfPresent(line, CONST.CONSTRAINT_SOURCE, None),
720
+ "hypothesis": _getValueIfPresent(line, CONST.CONSTRAINT_HYPOTHESIS, None),
721
+ "traduction": _getValueIfPresent(line, CONST.CONSTRAINT_TRADUCTION, None)}
722
+ # Update flux or data related to given constraint
723
+ for flux in corresp_flux:
724
+ # Create and add contraint to data
725
+ if len(data_tags_present) > 0:
726
+ # Find datas that corresponds to given tags
727
+ datas = flux.get_corresponding_datas_from_tags(
728
+ data_tags_present)
729
+ # If multiple datas
730
+ split_constraint = (len(datas) > 1)
731
+ # Update constraints for each data
732
+ for id, data in enumerate(datas):
733
+ self.add_constraint(
734
+ '{0}0{1}'.format(constraint_id, id) if split_constraint else constraint_id,
735
+ data,
736
+ **constraint_attributes)
737
+ else: # Create and add contraint to flux
738
+ self.add_constraint(constraint_id, flux, **constraint_attributes)
739
+ return True, self.warn_msg
740
+
741
+ def update_from_result_table(
742
+ self,
743
+ input_table: pd.DataFrame
744
+ ):
745
+ # Init warning message
746
+ self.reset_msgs(
747
+ msg_on_new_nodes='warn',
748
+ msg_on_new_flux='warn')
749
+ # Copy table to avoid modification on reference
750
+ table = input_table.copy()
751
+ # Extra columns in table (more than needed)
752
+ extra_cols = \
753
+ set(table.columns) \
754
+ - set(CONST.RESULTS_SHEET_COLS) \
755
+ - set(self.taggs[CONST.TAG_TYPE_FLUX].keys()) \
756
+ - set(self.taggs[CONST.TAG_TYPE_DATA].keys())
757
+ # Create new flux & data from table
758
+ for index in table.index:
759
+ # Read line
760
+ line = table.iloc[index]
761
+ # Create Flux
762
+ flux = self.get_or_create_flux(line[CONST.RESULTS_ORIGIN], line[CONST.RESULTS_DESTINATION])
763
+ if flux is None:
764
+ self.add_warn_msg(
765
+ 'At line {} : Could not find or create specified flux.\n'
766
+ .format(index))
767
+ continue
768
+ # Datatags
769
+ ok_data_tags, data_tags_present = self._get_tags_from_line(CONST.TAG_TYPE_DATA, line)
770
+ # Do not process line if tags retreiving failed somehow
771
+ if not ok_data_tags:
772
+ self.add_warn_msg(
773
+ 'At line {} : There are problems with the given DataTags so the line cannot be processed.\n'
774
+ .format(index))
775
+ continue
776
+ # Corresponding data - Must find only one or None
777
+ datas = flux.get_corresponding_datas_from_tags(data_tags_present)
778
+ if len(datas) > 1:
779
+ self.add_warn_msg(
780
+ "At line {} : ".format(index) +
781
+ "Too much existing & corresponding datas " +
782
+ "to result with the tags ({}) ".format(
783
+ ','.join([_.name_unformatted for _ in data_tags_present])) +
784
+ "We cannot match result with an existing data.\n")
785
+ if len(datas) == 0:
786
+ self.add_warn_msg(
787
+ "At line {} : ".format(index) +
788
+ "No data matching with the given result tagged by ({}) ".format(
789
+ ','.join([_.name_unformatted for _ in data_tags_present])) +
790
+ "We cannot match result with an existing data.\n")
791
+ continue
792
+ # FluxTags
793
+ _, flux_tags_present = self._get_tags_from_line(CONST.TAG_TYPE_FLUX, line)
794
+ # Read results attributes
795
+ result_attributes = {
796
+ "value": _getValueIfPresent(line, CONST.RESULTS_VALUE, None)}
797
+ # If we have at least one of the columns listed in result_attributes,
798
+ # ... ok to read data
799
+ for _ in result_attributes.values():
800
+ if (_ is not None):
801
+ result = Data(**result_attributes)
802
+ result.min_val = _getValueIfPresent(line, CONST.RESULTS_FREE_MIN, None)
803
+ result.max_val = _getValueIfPresent(line, CONST.RESULTS_FREE_MAX, None)
804
+ # Link to flux
805
+ flux.add_result(result)
806
+ # Apply tags
807
+ for tag in (data_tags_present + flux_tags_present):
808
+ result.add_tag(tag)
809
+ # Set result alterego
810
+ result.alterego = datas[0]
811
+ # Save data extra infos
812
+ for extra_info in extra_cols:
813
+ result.add_extra_info(extra_info, _getValueIfPresent(line, extra_info, None))
814
+ # We break the loop
815
+ break
816
+ return True, self.warn_msg
817
+
818
+ def update_from_analysis_table(
819
+ self,
820
+ input_table: pd.DataFrame
821
+ ):
822
+ # Init warning message
823
+ self.reset_msgs(
824
+ msg_on_new_nodes='warn',
825
+ msg_on_new_flux='warn')
826
+ # Copy table to avoid modification on reference
827
+ table = input_table.copy()
828
+ # Extra columns in table (more than needed)
829
+ extra_cols = \
830
+ set(table.columns) \
831
+ - set(CONST.ANALYSIS_SHEET_COLS) \
832
+ - set(self.taggs[CONST.TAG_TYPE_FLUX].keys()) \
833
+ - set(self.taggs[CONST.TAG_TYPE_DATA].keys())
834
+ # Create new flux & data from table
835
+ for index in table.index:
836
+ # Read line
837
+ line = table.iloc[index]
838
+ # Create Flux
839
+ flux = self.get_or_create_flux(
840
+ line[CONST.RESULTS_ORIGIN],
841
+ line[CONST.RESULTS_DESTINATION])
842
+ if flux is None:
843
+ self.add_warn_msg(
844
+ 'At line {} : Could not find or create specified flux.\n'
845
+ .format(index))
846
+ continue
847
+ # Data input attributes
848
+ data_attributes = {
849
+ "value": _getValueIfPresent(line, CONST.ANALYSIS_VALUE_IN, None),
850
+ "sigma_relative": _getValueIfPresent(
851
+ line, CONST.ANALYSIS_VALUE_IN_SIGMA, CONST.DEFAULT_SIGMA_RELATIVE
852
+ ),
853
+ "sigma_percent": _getValueIfPresent(
854
+ line, CONST.ANALYSIS_VALUE_IN_SIGMA_PRCT, CONST.DEFAULT_SIGMA_PERCENT
855
+ ),
856
+ "min_val": _getValueIfPresent(line, CONST.ANALYSIS_VALUE_MIN_IN, None),
857
+ "max_val": _getValueIfPresent(line, CONST.ANALYSIS_VALUE_MAX_IN, None)}
858
+ # Results attributes
859
+ result_attributes = {
860
+ "value": _getValueIfPresent(line, CONST.RESULTS_VALUE, None)}
861
+ # Analysis attributes
862
+ analysis_attributes = {
863
+ CONST.ANALYSIS_NB_SIGMAS: _getValueIfPresent(line, CONST.ANALYSIS_NB_SIGMAS, None),
864
+ CONST.ANALYSIS_AI: _getValueIfPresent(line, CONST.ANALYSIS_AI, None),
865
+ CONST.ANALYSIS_CLASSIF: _getValueIfPresent(line, CONST.ANALYSIS_CLASSIF, None)}
866
+ # DataTags
867
+ ok_data_tags, data_tags_present = self._get_tags_from_line(CONST.TAG_TYPE_DATA, line)
868
+ # Do not process line if retreiving failed somehow
869
+ if not ok_data_tags:
870
+ self.add_warn_msg(
871
+ 'At line {} : There are problems with the given DataTags so the line cannot be processed.\n'
872
+ .format(index))
873
+ continue
874
+ # Corresponding data
875
+ datas = flux.get_corresponding_datas_from_tags(data_tags_present)
876
+ # Update data - Must find only one or None
877
+ if len(datas) == 1:
878
+ # Data was read before, we update it
879
+ data = datas[0]
880
+ data.update_unknown_only(**data_attributes)
881
+ else:
882
+ # Too much matching data, error
883
+ self.add_warn_msg(
884
+ "At line {} : ".format(index) +
885
+ "Too much existing & corresponding datas " +
886
+ "with the tags ({}). ".format(
887
+ ','.join([_.name_unformatted for _ in data_tags_present])) +
888
+ "We will do our best for matching.\n")
889
+ # Try to match data
890
+ try:
891
+ # Min dist algo - init
892
+ value_to_match = float(data_attributes["value"])
893
+ data = datas[0]
894
+ min_dist = abs(data.value - value_to_match)
895
+ # Min dist algo - run
896
+ for _ in datas[1:]:
897
+ dist = abs(_.value - value_to_match)
898
+ if min_dist > dist:
899
+ min_dist = dist
900
+ data = _
901
+ # Update data attributes
902
+ data.update_unknown_only(**data_attributes)
903
+ # If we have an error, we pass to next line
904
+ except Exception:
905
+ self.add_warn_msg(
906
+ " - Couldn't find a matching data.\n")
907
+ data = None
908
+ # Corresponding result
909
+ results = flux.get_corresponding_results_from_tags(data_tags_present)
910
+ # FluxTags
911
+ _, flux_tags_present = self._get_tags_from_line(CONST.TAG_TYPE_FLUX, line)
912
+ # Update results - Must find only one or None
913
+ result = Data()
914
+ if len(results) == 0:
915
+ # result was not read before, so we update flux
916
+ result = Data(**result_attributes)
917
+ for tag in (data_tags_present + flux_tags_present):
918
+ result.add_tag(tag)
919
+ flux.add_result(result)
920
+ elif len(results) == 1:
921
+ # result was read before, we update it
922
+ result = results[0]
923
+ result.update_unknown_only(**result_attributes)
924
+ for tag in (flux_tags_present):
925
+ result.add_tag(tag)
926
+ else: # Too much matching result, error
927
+ return False, \
928
+ 'At line {} : '.format(index) + \
929
+ 'Result tagged with ({}) appear more than once'.format(
930
+ ','.join([_.name_unformatted for _ in data_tags_present]))
931
+ # Link result and data together
932
+ result.alterego = data
933
+ # Add analysis attributes
934
+ result.add_extra_infos(analysis_attributes)
935
+ # Add extra info to both
936
+ for extra_info in extra_cols:
937
+ extra_info_value = _getValueIfPresent(line, extra_info, None)
938
+ result.add_extra_info(extra_info, extra_info_value)
939
+ return True, self.warn_msg
940
+
941
+ def update_from_uncertainty_table(
942
+ self,
943
+ input_table: pd.DataFrame
944
+ ):
945
+ # Init warning message
946
+ self.reset_msgs(
947
+ msg_on_new_nodes='warn',
948
+ msg_on_new_flux='warn')
949
+ # Copy table to avoid modification on reference
950
+ table = input_table.copy()
951
+ # # Extra columns in table (more than needed)
952
+ # extra_cols = \
953
+ # set(table.columns) \
954
+ # - set(CONST.UNCERTAINTY_SHEET_COLS) \
955
+ # - set(self.taggs[CONST.TAG_TYPE_FLUX].keys()) \
956
+ # - set(self.taggs[CONST.TAG_TYPE_DATA].keys())
957
+ # Create new flux & data from table
958
+ for index in table.index:
959
+ # Read line
960
+ line = table.iloc[index]
961
+ # Create Flux
962
+ flux = self.get_or_create_flux(line[CONST.RESULTS_ORIGIN], line[CONST.RESULTS_DESTINATION])
963
+ if flux is None:
964
+ self.add_warn_msg(
965
+ 'At line {} : Could not find or create specified flux.\n'
966
+ .format(index))
967
+ continue
968
+ # Read monte carlo attributes
969
+ flux.add_monte_carlo(
970
+ _getValueIfPresent(line, CONST.UNCERTAINTY_MC_MU_IN, None),
971
+ _getValueIfPresent(line, CONST.UNCERTAINTY_MC_STD_IN, None),
972
+ _getValueIfPresent(line, CONST.UNCERTAINTY_MC_MU, None),
973
+ _getValueIfPresent(line, CONST.UNCERTAINTY_MC_STD, None),
974
+ _getValueIfPresent(line, CONST.UNCERTAINTY_MC_MIN, None),
975
+ _getValueIfPresent(line, CONST.UNCERTAINTY_MC_MAX, None))
976
+ # Update probas
977
+ for _ in CONST.UNCERTAINTY_PCOLS:
978
+ flux.monte_carlo.add_proba(_, _getValueIfPresent(line, _, None))
979
+ # Update hists
980
+ for _ in CONST.UNCERTAINTY_HCOLS:
981
+ flux.monte_carlo.add_hist(_, _getValueIfPresent(line, _, None))
982
+ # Tags
983
+ ok_flux_tags, flux_tags_present = self._get_tags_from_line(CONST.TAG_TYPE_FLUX, line)
984
+ ok_data_tags, data_tags_present = self._get_tags_from_line(CONST.TAG_TYPE_DATA, line)
985
+ # Do not process line if tags retreiving failed somehow
986
+ if (not ok_data_tags) or (not ok_flux_tags):
987
+ self.add_warn_msg(
988
+ 'At line {} : There are problems with the given tags so the line cannot be processed.\n'
989
+ .format(index))
990
+ continue
991
+ # Apply tags
992
+ for tag in (flux_tags_present + data_tags_present):
993
+ flux.monte_carlo.add_tag(tag)
994
+ return True, self.warn_msg
995
+
996
+ def update_from_conversions_table(
997
+ self,
998
+ input_table: pd.DataFrame,
999
+ node_list: list
1000
+ ):
1001
+ # Init warning message
1002
+ self.reset_msgs(
1003
+ msg_on_new_nodes='warn',
1004
+ msg_on_new_flux='warn')
1005
+ # Copy table to avoid modification on reference
1006
+ table = input_table.copy()
1007
+ # Extra columns in table (more than needed)
1008
+ extra_cols = set(table.columns) - set(CONST.CONVERSIONS_SHEET_COLS)
1009
+ # Here, extra infos are tooltips or units descriptions
1010
+ # Get tooltip or units descriptions info
1011
+ line = table.iloc[0] # Always the first line
1012
+ units = {}
1013
+ tooltips = {}
1014
+ for extra_info in extra_cols:
1015
+ extra_info_value = _getValueIfPresent(line, extra_info, None)
1016
+ # If we have "/" in col name, then the col is about unit
1017
+ is_extra_info_about_unit = (len(extra_info.split(' / ')) > 1)
1018
+ if is_extra_info_about_unit: # Tooltips
1019
+ if 'unité naturelle' in extra_info:
1020
+ extra_info = extra_info.split('/')[1].strip().upper()
1021
+ units[extra_info] = extra_info_value
1022
+ else:
1023
+ tooltips[extra_info] = extra_info_value
1024
+ self.units.update(units)
1025
+ self.tooltips.update(tooltips)
1026
+ # Create new flux & data from table
1027
+ for index in table.index[1:]: # We pass the first line
1028
+ # Read line
1029
+ line = table.iloc[index]
1030
+ # Find node
1031
+ node = self.get_or_create_node(line[CONST.CONVERSIONS_PRODUCT])
1032
+ if node is None:
1033
+ self.add_warn_msg(
1034
+ 'At line {} : Could not find or create specified node.\n'
1035
+ .format(index))
1036
+ continue
1037
+ node_list.append(node)
1038
+ # Unit input attributes
1039
+ unit_localisation = node.match_localisation(
1040
+ _getValueIfPresent(line, CONST.CONVERSIONS_LOCATION, None))
1041
+ node.add_natural_unit(
1042
+ _getValueIfPresent(line, CONST.CONVERSIONS_NATURAL_UNIT, None),
1043
+ unit_localisation)
1044
+ node.add_factor(
1045
+ _getValueIfPresent(line, CONST.CONVERSIONS_FACTOR, None),
1046
+ unit_localisation)
1047
+ # Add tooltips for node
1048
+ for tooltip_name, tooltip_description in tooltips.items():
1049
+ node.add_tooltip(
1050
+ tooltip_name,
1051
+ tooltip_description,
1052
+ _getValueIfPresent(line, tooltip_name, None))
1053
+ # Add other conversions for node
1054
+ for unit_name in units.keys():
1055
+ factor = _getValueIfPresent(line, unit_name, None)
1056
+ if factor is not None:
1057
+ node.add_other_factor(unit_name, factor, unit_localisation)
1058
+ return True, self.warn_msg
1059
+
1060
+ def update_from_nodes_table(
1061
+ self,
1062
+ input_table: pd.DataFrame,
1063
+ warn_on_new_nodes: bool = False
1064
+ ):
1065
+ # Init warning message
1066
+ self.reset_msgs(
1067
+ msg_on_new_nodes=('warn' if warn_on_new_nodes else None))
1068
+ # Copy table to avoid modification on reference table
1069
+ table = input_table.copy()
1070
+ # Extra columns in table (more than needed)
1071
+ extra_cols = \
1072
+ set(table.columns) \
1073
+ - set(CONST.NODES_SHEET_COLS) \
1074
+ - set(self.taggs[CONST.TAG_TYPE_NODE].keys()) \
1075
+ - set(self.taggs[CONST.TAG_TYPE_LEVEL].keys())
1076
+ # Create new nodes from data available in table
1077
+ for index in table.index:
1078
+ # Get line
1079
+ line = table.iloc[index]
1080
+ name = line[CONST.NODES_NODE]
1081
+ level = line[CONST.NODES_LEVEL]
1082
+ # Do we know this node
1083
+ known_node = self.is_node_registered(name)
1084
+ # Create node if needed
1085
+ node = self.get_or_create_node(name, level)
1086
+ if node is None:
1087
+ self.add_warn_msg(
1088
+ 'At line {} : Could not find or create specified node.\n'
1089
+ .format(index))
1090
+ continue
1091
+ # If we already knew this node, then next children will be in a new group
1092
+ if known_node:
1093
+ node.create_new_children_group()
1094
+ # Update node
1095
+ node.update(
1096
+ mat_balance=_getValueIfPresent(line, CONST.NODES_MAT_BALANCE, 1),
1097
+ color=_getValueIfPresent(line, CONST.NODES_COLOR, None),
1098
+ definition=_getValueIfPresent(line, CONST.NODES_DEFINITIONS, None))
1099
+ # Apply node tags
1100
+ self._read_tags_from_line(node, CONST.TAG_TYPE_NODE, line)
1101
+ # Apply level tags
1102
+ self._read_tags_from_line(node, CONST.TAG_TYPE_LEVEL, line)
1103
+ # Save extra infos
1104
+ for extra_info in extra_cols:
1105
+ node.add_extra_info(extra_info, _getValueIfPresent(line, extra_info, None))
1106
+ # Does the node have parents ?
1107
+ if (level > 1):
1108
+ prev_level = level - 1
1109
+ for prev_index in reversed(range(index)):
1110
+ prev_line = table.iloc[prev_index]
1111
+ if prev_line[CONST.NODES_LEVEL] == prev_level:
1112
+ parent_name = _stdStr(prev_line[CONST.NODES_NODE])
1113
+ self.nodes[parent_name].add_child(node)
1114
+ break
1115
+ return True, self.warn_msg
1116
+
1117
+ def _read_tags_from_line(
1118
+ self,
1119
+ sankey_object: SankeyObject,
1120
+ taggs_type: str,
1121
+ line: pd.DataFrame
1122
+ ):
1123
+ """
1124
+ TODO commenter
1125
+ """
1126
+ for tagg in self.taggs[taggs_type].keys():
1127
+ tags_name = _getValueIfPresent(line, tagg, None)
1128
+ if tags_name is not None:
1129
+ tags_name = str(tags_name).split(':')
1130
+ for tag_name in tags_name:
1131
+ tag = self.taggs[taggs_type][tagg].get_tag_from_name(tag_name)
1132
+ if tag is not None:
1133
+ sankey_object.add_tag(tag)
1134
+ else:
1135
+ if (tag_name == '0.') or (tag_name == '0.0'):
1136
+ self.add_warn_msg(
1137
+ 'For tag group \"{0}\", tag \"{1}\" does not exist, so not taken in account. '.format(
1138
+ self.taggs[taggs_type][tagg].name_unformatted,
1139
+ tag_name
1140
+ )+'Did you mean \"0\" ? \n')
1141
+ else:
1142
+ self.add_warn_msg(
1143
+ 'For tag group \"{0}\", tag \"{1}\" does not exist, so not taken in account.\n'.format(
1144
+ self.taggs[taggs_type][tagg].name_unformatted,
1145
+ tag_name
1146
+ ))
1147
+
1148
+ def _get_tags_from_line(
1149
+ self,
1150
+ taggs_type: str,
1151
+ line: pd.DataFrame
1152
+ ):
1153
+ """
1154
+ Get all tags related to a given tag type for a given table line.
1155
+
1156
+ Verifications
1157
+ -------------
1158
+ - Parsing of tags : lone tag as str "tag name" or group of tags as str "tag1:tag2:..."
1159
+ - Check if tag group (line header) exists
1160
+ - Check if tag exists for given tag group. If not :
1161
+ - Warn msg specifiying that given tag will not be taken in account
1162
+ - Specific warn msg if tags are 'numbers', only integers are accepted :
1163
+ if tag is "1.0" -> trigger a warning msg as it must be "1"
1164
+
1165
+ Parameters
1166
+ ----------
1167
+ :param taggs_type: Type of tags to find
1168
+ :type taggs_type: str
1169
+
1170
+ :param line: Table's line to parse
1171
+ :type line: pd.DataFrame
1172
+
1173
+ Returns
1174
+ -------
1175
+ :return: List of tags that have been found, None if nothing has been found
1176
+ :rtype: list | None
1177
+ """
1178
+ # Tag search is valid
1179
+ # 1. If there is no existing tag group for given tag group type
1180
+ # 2. If there are tag groups for given tag group type
1181
+ # AND at least one valid tag is given / tag group, considering that no written tag = all tags
1182
+ all_tags_search_is_ok = True
1183
+ tags_present = []
1184
+ for tagg_name in self.taggs[taggs_type].keys():
1185
+ # Read value in given line and for tag group's column
1186
+ tags_name = _getValueIfPresent(line, tagg_name, None)
1187
+ # If we found something for given tag group
1188
+ if tags_name is not None:
1189
+ tags_search_is_ok = False
1190
+ tags_name = str(tags_name).split(':')
1191
+ for tag_name in tags_name:
1192
+ tag = self.taggs[taggs_type][tagg_name].get_tag_from_name(tag_name)
1193
+ if tag is not None:
1194
+ tags_present.append(tag)
1195
+ tags_search_is_ok = True
1196
+ else:
1197
+ if (tag_name == '0.') or (tag_name == '0.0'):
1198
+ self.add_warn_msg(
1199
+ 'For tag group \"{0}\", tag \"{1}\" is unknown. '.format(
1200
+ tagg_name, tag_name
1201
+ )+'Did you mean \"0\" ? \n')
1202
+ else:
1203
+ self.add_warn_msg(
1204
+ 'For tag group \"{0}\", tag \"{1}\" is unknown.\n'.format(
1205
+ tagg_name, tag_name
1206
+ ))
1207
+ # Search is not ok if no acceptable tag has been found for given tag group
1208
+ all_tags_search_is_ok &= tags_search_is_ok
1209
+ # Return list of found tags
1210
+ return all_tags_search_is_ok, tags_present
1211
+
1212
+ def update_from_matrix_table(
1213
+ self,
1214
+ table: pd.DataFrame,
1215
+ **kwargs
1216
+ ):
1217
+ # Init options
1218
+ warn_on_new_nodes = False # Warning message on new node creation
1219
+ warn_on_new_flux = False # Warning message on new flux creation
1220
+ data_in_matrix = False # Read data from input matrix
1221
+ if 'warn_on_new_nodes' in kwargs.keys():
1222
+ warn_on_new_nodes = kwargs['warn_on_new_nodes']
1223
+ if 'warn_on_new_flux' in kwargs.keys():
1224
+ warn_on_new_flux = kwargs['warn_on_new_flux']
1225
+ if 'data_in_matrix' in kwargs.keys():
1226
+ data_in_matrix = kwargs['data_in_matrix']
1227
+ self.reset_msgs(
1228
+ msg_on_new_nodes=('warn' if warn_on_new_nodes else None),
1229
+ msg_on_new_flux=('warn' if warn_on_new_flux else None))
1230
+ # Check if we will set tags for nodes in rows and in cols
1231
+ tagg_name, tagg_type, tag_name_col, tag_name_row = \
1232
+ None, None, None, None
1233
+ if 'tag_group' in kwargs.keys():
1234
+ try:
1235
+ tagg_name = kwargs['tags_group']
1236
+ tagg_type = kwargs['tags_type']
1237
+ tag_name_col = kwargs['tag_col']
1238
+ tag_name_row = kwargs['tag_row']
1239
+ except Exception:
1240
+ err = 'Unable to extract tag group info.'
1241
+ return False, err
1242
+ # Get or create tag groups if needed, then get corresponding tags
1243
+ # for columns and rows
1244
+ tagg, tag_col, tag_row = None, None, None
1245
+ if tagg_name is not None:
1246
+ tagg = self.get_or_create_tagg(tagg_name, tagg_type)
1247
+ tag_col = tagg.get_or_create_tag(tag_name_col)
1248
+ tag_row = tagg.get_or_create_tag(tag_name_row)
1249
+ # Extract list of origins, and destinations
1250
+ origins, destinations, values = [], [], []
1251
+ _extractFluxFromMatrix(table, origins, destinations, values)
1252
+ # Iterate on flux list to create nodes
1253
+ for (origin, destination, value) in zip(origins, destinations, values):
1254
+ # Will create flux and nodes if they don't exist
1255
+ flux = self.get_or_create_flux(origin, destination)
1256
+ if flux is None: # Continue if flux creation has failed
1257
+ continue
1258
+ # Apply value if needed
1259
+ if data_in_matrix and (value is not None):
1260
+ for data in flux.datas:
1261
+ data.value = value
1262
+ # If we have tag to set for cols and row, we add them to the nodes
1263
+ if tag_col is not None:
1264
+ self.get_or_create_node(origin).add_tag(tag_col)
1265
+ if tag_row is not None:
1266
+ self.get_or_create_node(destination).add_tag(tag_row)
1267
+ return True, ''
1268
+
1269
+ def autocompute_missing_flux(self):
1270
+ """
1271
+ Auto compute missing flux.
1272
+ """
1273
+ # Init output indicators
1274
+ all_ok = True
1275
+ # Init warning message
1276
+ self.reset_msgs(msg_on_new_flux='info')
1277
+ # loop on nodes
1278
+ for node in self.nodes.values():
1279
+ if node.level == 1:
1280
+ self._complete_parenthood_flux(node)
1281
+ self._complete_single_child_flux(node)
1282
+ # Send msgs
1283
+ self.send_msgs('Flux completions (based on nodes parenthood relations)')
1284
+ # End
1285
+ return all_ok
1286
+
1287
+ def _complete_parenthood_flux(self, parent_node):
1288
+ """
1289
+ Check if flux are consistants throught node parenthood
1290
+
1291
+ Here : Flux from/to child must also exist for parent
1292
+ """
1293
+ for children_grp in parent_node.children_grps:
1294
+ for child_node in children_grp:
1295
+ # Firstly, we need to recurse on all children
1296
+ # because here we create missing flux for parents
1297
+ if child_node.has_at_least_one_child():
1298
+ self._complete_parenthood_flux(child_node)
1299
+ # Secondly,
1300
+ # If there is a flux from child to another node, then the flux also exists for the parent
1301
+ for child_output_flux in child_node.output_flux:
1302
+ child_dest_node = child_output_flux.dest
1303
+ parent_dest_nodes = [parent_output_flux.dest for parent_output_flux in parent_node.output_flux]
1304
+ if child_dest_node not in parent_dest_nodes:
1305
+ self.get_or_create_flux(parent_node.name, child_dest_node.name)
1306
+ # Thirdly,
1307
+ # If there is a flux to child from another node, then the flux also exists for the parent
1308
+ for child_input_flux in child_node.input_flux:
1309
+ child_orig_node = child_input_flux.orig
1310
+ parent_orig_nodes = [parent_input_flux.orig for parent_input_flux in parent_node.input_flux]
1311
+ if child_orig_node not in parent_orig_nodes:
1312
+ self.get_or_create_flux(child_orig_node.name, parent_node.name)
1313
+
1314
+ def _complete_single_child_flux(self, parent_node):
1315
+ """
1316
+ Check if flux are consistants throught node parenthood
1317
+
1318
+ Here : Flux from/to parent with single child must also exist for the child
1319
+ """
1320
+ for children_grp in parent_node.children_grps:
1321
+ # Firstly, we create missing flux for all children
1322
+ # groups with only one child
1323
+ if len(children_grp) == 1:
1324
+ # Get the unique child
1325
+ child_node = children_grp[0]
1326
+ # If there is a flux from parent to another node, then the flux also exists for the unique child
1327
+ for parent_output_flux in parent_node.output_flux:
1328
+ parent_dest_node = parent_output_flux.dest
1329
+ child_dest_nodes = [child_output_flux.dest for child_output_flux in child_node.output_flux]
1330
+ if parent_dest_node not in child_dest_nodes:
1331
+ self.get_or_create_flux(child_node.name, parent_dest_node.name)
1332
+ # If there is a flux to parent from another node, then the flux also exists for the unique child
1333
+ for parent_input_flux in parent_node.input_flux:
1334
+ parent_orig_node = parent_input_flux.orig
1335
+ child_orig_nodes = [child_input_flux.orig for child_input_flux in child_node.input_flux]
1336
+ if parent_orig_node not in child_orig_nodes:
1337
+ self.get_or_create_flux(parent_orig_node.name, child_node.name)
1338
+ # Secondly, we need to recurse on children
1339
+ # because here flux are defined by parents
1340
+ for child_node in children_grp:
1341
+ if child_node.has_at_least_one_child():
1342
+ self._complete_single_child_flux(child_node)
1343
+
1344
+ def _detect_parenthood_missing_flux(self, parent_node):
1345
+ """
1346
+ Check if flux are consistants throught node parenthood
1347
+
1348
+ Here : Raise an error if there is a flux from/to given parent
1349
+ that does not exist for at least one of its children
1350
+ """
1351
+ # Init output indicator
1352
+ all_ok = True
1353
+ # Run
1354
+ for children_grp in parent_node.children_grps:
1355
+ # If children grp is empty, pass
1356
+ if len(children_grp) == 0:
1357
+ continue
1358
+ # First,
1359
+ # Create sets of all incoming & outgoing flux for all children
1360
+ # in given children group
1361
+ children_input_flux_origs = set()
1362
+ children_output_flux_dests = set()
1363
+ for child in children_grp:
1364
+ for child_input_flux in child.input_flux:
1365
+ children_input_flux_origs.add(child_input_flux.orig)
1366
+ for child_output_flux in child.output_flux:
1367
+ children_output_flux_dests.add(child_output_flux.dest)
1368
+ # Then,
1369
+ # Check all incoming flux for parent
1370
+ for parent_input_flux in parent_node.input_flux:
1371
+ # Error if any of the incoming flux, does not exist for at least one child
1372
+ # in current children group
1373
+ if parent_input_flux.orig not in children_input_flux_origs:
1374
+ err_msg = 'Flux inconsistency. '
1375
+ err_msg += 'For node "{}", flux from "{}" does not exists '.format(
1376
+ parent_node.name, parent_input_flux.orig.name)
1377
+ err_msg += 'for at least one of these children nodes ({})'.format(
1378
+ ','.join(['"{}"'.format(_.name) for _ in children_grp]))
1379
+ self.add_err_msg(err_msg)
1380
+ all_ok = False
1381
+ # Then,
1382
+ # Check all outgoing flux for parent
1383
+ for parent_output_flux in parent_node.output_flux:
1384
+ # Error if any of the outgoing flux, does not exist for at least one child
1385
+ # in current children group
1386
+ if parent_output_flux.dest not in children_output_flux_dests:
1387
+ err_msg = 'Flux inconsistency. '
1388
+ err_msg += 'For node "{}", flux to "{}" does not exists '.format(
1389
+ parent_node.name, parent_output_flux.dest.name)
1390
+ err_msg += 'for at least one of these children nodes ({})'.format(
1391
+ ','.join(['"{}"'.format(_.name) for _ in children_grp]))
1392
+ self.add_err_msg(err_msg)
1393
+ all_ok = False
1394
+ # Finally, recursion on children
1395
+ # because here flux are defined by parents
1396
+ for child_node in children_grp:
1397
+ if child_node.has_at_least_one_child():
1398
+ all_ok &= self._detect_parenthood_missing_flux(child_node)
1399
+ return all_ok
1400
+
1401
+ def autocompute_mat_balance(self):
1402
+ """
1403
+ Compute matter balance for all nodes. This computation is executed only
1404
+ if it was specified by user in input excel file (ie; mat_balance col present)
1405
+ """
1406
+ # Init logging message
1407
+ self.reset_msgs()
1408
+ # Get all nodes name for nodes set as origin or destination
1409
+ orig = set()
1410
+ dest = set()
1411
+ for flux in self.flux.values():
1412
+ orig.add(flux.orig.name)
1413
+ dest.add(flux.dest.name)
1414
+ # Find which nodes are set as destination AND origin
1415
+ ok_mat_balance = orig & dest
1416
+ # Exclude exchange
1417
+ try:
1418
+ tag_exchange = \
1419
+ self.taggs[CONST.TAG_TYPE_NODE][_stdStr(CONST.NODE_TYPE)] \
1420
+ .get_tag_from_name(CONST.NODE_TYPE_EXCHANGE)
1421
+ for node in tag_exchange.references:
1422
+ node.mat_balance = 0
1423
+ # remove node from mat_balance set
1424
+ ok_mat_balance -= {node.name}
1425
+ except Exception:
1426
+ # TODO : erreur sur tag echange qui disparait
1427
+ pass
1428
+ # Update values if needed
1429
+ for node in self.nodes.values():
1430
+ # if mat_balance was not correctly set to 1 by the user we correct it and set it to 0
1431
+ if node.name in ok_mat_balance:
1432
+ if node.mat_balance is None:
1433
+ node.mat_balance = 1
1434
+ else:
1435
+ if node.mat_balance is not None:
1436
+ if node.mat_balance != 0:
1437
+ msg = 'Node {} : '.format(node.name)
1438
+ msg += 'Matter balance has been changed because it was inconsistent '
1439
+ msg += '(before = {}, now = 0).'.format(node.mat_balance)
1440
+ self.add_info_msg(msg)
1441
+ node.mat_balance = 0
1442
+ # Send msgs
1443
+ self.send_msgs('matter balance autocomputing')
1444
+
1445
+ def has_at_least_one_mat_balance(self):
1446
+ for node in self.nodes.values():
1447
+ if node.mat_balance is not None:
1448
+ return True
1449
+ return False
1450
+
1451
+ def autocompute_nodes_levels(self):
1452
+ """
1453
+ Recompute the Primary level for all node accordingly to global leveling
1454
+ """
1455
+ for node in self.nodes.values():
1456
+ if not node.has_parents():
1457
+ node.level = 1
1458
+ max_node_level = node.autocompute_level_and_children_levels()
1459
+ self._max_nodes_level = max(self._max_nodes_level, max_node_level)
1460
+
1461
+ def check_overall_sankey_structure(self):
1462
+ """
1463
+ Check if everything is allright regarding sankey structure
1464
+
1465
+ Returns
1466
+ -------
1467
+ :return: True if everything is ok, False with error message otherwise.
1468
+ :rtype: (bool, str)
1469
+ """
1470
+ # Keep track of errors
1471
+ err_msgs = []
1472
+ # Check coherence between product, sector and exchanges
1473
+ tagg_type_node = self.get_tagg_from_name_and_type(
1474
+ CONST.NODE_TYPE,
1475
+ CONST.TAG_TYPE_NODE)
1476
+ if tagg_type_node is not None:
1477
+ # Get Tags
1478
+ tag_product = tagg_type_node.get_tag_from_name(CONST.NODE_TYPE_PRODUCT)
1479
+ tag_sector = tagg_type_node.get_tag_from_name(CONST.NODE_TYPE_SECTOR)
1480
+ tag_exchange = tagg_type_node.get_tag_from_name(CONST.NODE_TYPE_EXCHANGE)
1481
+ # Must have product -> sector / exchange
1482
+ if tag_product is not None:
1483
+ products = tag_product.references
1484
+ for product in products:
1485
+ for output_flux in product.output_flux:
1486
+ if output_flux.dest in products:
1487
+ err_msgs.append(
1488
+ 'We cannot have this flux {} : reason Product->Product'.format(output_flux))
1489
+ # Must have sector -> product / exchange
1490
+ if tag_sector is not None:
1491
+ sectors = tag_sector.references
1492
+ for sector in sectors:
1493
+ for output_flux in sector.output_flux:
1494
+ if output_flux.dest in sectors:
1495
+ err_msgs.append(
1496
+ 'We cannot have this flux {} : reason Sector->Sector'.format(output_flux))
1497
+ # Must have exchange -> product / exchange
1498
+ if tag_exchange is not None:
1499
+ exchanges = tag_exchange.references
1500
+ for exchange in exchanges:
1501
+ for output_flux in sector.output_flux:
1502
+ if output_flux.dest in exchanges:
1503
+ err_msgs.append(
1504
+ 'We cannot have this flux {} : reason Exchange->Exchange'.format(output_flux))
1505
+ return len(err_msgs) == 0, '/n'.join(err_msgs)
1506
+
1507
+ def check_overall_sankey_coherence(self):
1508
+ """
1509
+ Check if everything in sankey is coherent
1510
+ """
1511
+ # Init logging message
1512
+ self.reset_msgs()
1513
+ # Init indicator
1514
+ all_ok = True
1515
+ # Check if there are missing flux in parenthood relations
1516
+ for node in self.nodes.values():
1517
+ if node.level == 1:
1518
+ all_ok &= self._detect_parenthood_missing_flux(node)
1519
+ # Check matter balance coherence
1520
+ all_ok &= self._check_parenthood_mat_balance_coherence()
1521
+ # Check constraints coherence
1522
+ all_ok &= self._check_constraints_coherence()
1523
+ # Send msgs
1524
+ self.send_msgs("Sankey coherence checks")
1525
+ # Return
1526
+ return all_ok
1527
+
1528
+ def _check_parenthood_mat_balance_coherence(self):
1529
+ """
1530
+ Check if mat balance are coherents relatively to node parenthood.
1531
+ 1. If parent has mat_balance at 1, then children can not have mat_balance at 0
1532
+ 2. If all children has mat_balance at 1, then parent can not have mat_balance at 0
1533
+ """
1534
+ # Loop on all nodes
1535
+ for node in self.nodes.values():
1536
+ # Loop on all children grp for given node
1537
+ for children_grp in node.children_grps:
1538
+ # Protection
1539
+ if len(children_grp) == 0:
1540
+ continue
1541
+ # Init indicators
1542
+ all_children_at_1 = True
1543
+ children_at_0 = []
1544
+ # Loop on all children
1545
+ for child in children_grp:
1546
+ all_children_at_1 &= (child.mat_balance == 1)
1547
+ if child.mat_balance == 0:
1548
+ children_at_0.append(child)
1549
+ # Check coherence
1550
+ if (node.mat_balance == 1) and (not all_children_at_1):
1551
+ msg = 'Matter balance incoherence. '
1552
+ msg += 'For node "{}", matter balance has been set to be respected (=1) '.format(
1553
+ node.name)
1554
+ msg += 'but not for these following children nodes ({})'.format(
1555
+ ','.join(['"{}"'.format(_.name) for _ in children_at_0]))
1556
+ self.add_warn_msg(msg)
1557
+ if (node.mat_balance == 0) and (all_children_at_1):
1558
+ msg = 'Matter balance incoherence. '
1559
+ msg += 'For node "{}", matter balance has been set to be free (=0) '.format(
1560
+ node.name)
1561
+ msg += 'but matter balance has been set to be respected (=1) '
1562
+ msg += 'for all its children nodes ({}) '.format(
1563
+ ','.join(['"{}"'.format(_.name) for _ in children_at_0]))
1564
+ self.add_warn_msg(msg)
1565
+ return True
1566
+
1567
+ def _check_constraints_coherence(self):
1568
+ """
1569
+ Check if constraints are coherents and respect writing conventions.
1570
+ """
1571
+ ok = True
1572
+ for id, constraints in self.constraints.items():
1573
+ # List to check datas / flux redondancy
1574
+ all_refs = []
1575
+ for constraint in constraints:
1576
+ ref = constraint.reference
1577
+ # Error if ref already present in constraint
1578
+ if ref in all_refs:
1579
+ msg = 'Constraint reference repetitions. '
1580
+ msg += 'For constraint with id={} '.format(id)
1581
+ if isinstance(ref, Data):
1582
+ msg += 'Data "{0} -> {1} : {2}", appear more than once. '.format(
1583
+ ref.flux.orig.name,
1584
+ ref.flux.dest.name,
1585
+ [tag.name_unformatted for tag in ref.tags if tag.group.type == CONST.TAG_TYPE_DATA])
1586
+ if isinstance(ref, Flux):
1587
+ msg += 'Flux "{0} -> {1}", appear more than once. '.format(
1588
+ ref.orig.name,
1589
+ ref.dest.name)
1590
+ msg += 'In solving process, it is not possible to process constraints with data redundancies.'
1591
+ self.add_err_msg(msg)
1592
+ # Must send back failure indicator
1593
+ ok = False
1594
+ else:
1595
+ all_refs.append(ref)
1596
+ # End
1597
+ return ok
1598
+
1599
+ def is_node_registered(
1600
+ self,
1601
+ node
1602
+ ):
1603
+ """
1604
+ Return True if we already have node in our list.
1605
+
1606
+ Parameters
1607
+ ----------
1608
+ :param node: Name of the node to find or create. Or node object to find.
1609
+ :type node: str | Node
1610
+
1611
+ Returns
1612
+ -------
1613
+ :return: True if the node is in the list, False otherwise.
1614
+ :rtype: bool
1615
+ """
1616
+ if type(node) is str:
1617
+ ref_node = _stdStr(node)
1618
+ return (ref_node in self.nodes.keys())
1619
+ if type(node) is Node:
1620
+ return (node in self.nodes.values())
1621
+
1622
+ def get_or_create_node(
1623
+ self,
1624
+ name: str,
1625
+ level: int = 1
1626
+ ):
1627
+ """
1628
+ Return the node with given name.
1629
+ - If the node does not exist, we create it.
1630
+
1631
+ Parameters
1632
+ ----------
1633
+ :param name: Name of the node to find or create.
1634
+ :type name: str
1635
+
1636
+ Optional parameters
1637
+ -------------------
1638
+ :param level: Set level for created node, defaults to 1
1639
+ :type level: int, optional
1640
+
1641
+ Returns
1642
+ -------
1643
+ :return: The node with given name.
1644
+ :rtype: :class:`sankey.Node`
1645
+ """
1646
+ if type(name) is not str:
1647
+ return None
1648
+ ref_name = _stdStr(name)
1649
+ if ref_name not in self.nodes.keys():
1650
+ self.nodes[ref_name] = Node(name, level)
1651
+ self.add_msg_on_new_node("Created a new node \"{}\".".format(name))
1652
+ # Info on maximum of existing levels for nodes
1653
+ self._max_nodes_level = max(level, self._max_nodes_level)
1654
+ return self.nodes[ref_name]
1655
+
1656
+ def get_or_create_flux(self, orig: str, dest: str):
1657
+ """
1658
+ Return the flux with given origin and destination nodes.
1659
+ - If the flux does not exist, create it.
1660
+ - If the nodes do not exist, we create them.
1661
+
1662
+ Parameters
1663
+ ----------
1664
+ :param orig: Origin node name.
1665
+ :type orig: str
1666
+
1667
+ :param dest: Destination node name
1668
+ :type dest: str
1669
+
1670
+ Returns
1671
+ -------
1672
+ :return: The flux between the given origin and destination
1673
+ :rtype: :class:`sankey.Flux`
1674
+ """
1675
+ if (type(orig) is not str) or (type(dest) is not str):
1676
+ return None
1677
+ ref_name = '{0} - {1}'.format(_stdStr(orig), _stdStr(dest))
1678
+ if ref_name not in self.flux.keys():
1679
+ # Create nodes if they dont already exist
1680
+ node_orig = self.get_or_create_node(orig)
1681
+ node_dest = self.get_or_create_node(dest)
1682
+ # Create flux
1683
+ flux = Flux(node_orig, node_dest)
1684
+ # We must instantiate datas for all existing datatags configs
1685
+ flux.instanciate_all_datas(
1686
+ data_taggs=self.taggs[CONST.TAG_TYPE_DATA])
1687
+ # Create reference point in sankey struct
1688
+ self.flux[ref_name] = flux
1689
+ # Logging message
1690
+ msg = "Created a new flux [\"{0}\" -> \"{1}\"].".format(orig, dest)
1691
+ self.add_msg_on_new_flux(msg)
1692
+ return self.flux[ref_name]
1693
+
1694
+ def get_or_create_tagg(
1695
+ self,
1696
+ tagg_name: str,
1697
+ tagg_type: str,
1698
+ tags=''
1699
+ ):
1700
+ """
1701
+ Get tag group related to given name and type.
1702
+ Create a new tag group if necessary.
1703
+
1704
+ Parameters
1705
+ ----------
1706
+ :param tagg_name: Tagg group name
1707
+ :type tagg_name: str
1708
+
1709
+ :param tagg_type: Tagg group type
1710
+ :type tagg_type: str
1711
+
1712
+ :param tags: tags to add to tag group if newly created
1713
+ :type tags: str
1714
+
1715
+ Returns
1716
+ -------
1717
+ :return: The asked tag group if everything was ok. Else None
1718
+ :rtype: TagGroup | None
1719
+ """
1720
+ # Check if we have the correct type
1721
+ if tagg_type not in self.taggs.keys():
1722
+ return None
1723
+ # Find tag
1724
+ ref_tagg_name = _stdStr(tagg_name)
1725
+ if ref_tagg_name not in self.taggs[tagg_type].keys():
1726
+ self.taggs[tagg_type][ref_tagg_name] = TagGroup(tagg_name, tagg_type, tags=tags)
1727
+ return self.taggs[tagg_type][ref_tagg_name]
1728
+
1729
+ def add_constraint(self, id_constraint, reference, **kwargs):
1730
+ if isinstance(reference, Flux) or isinstance(reference, Data):
1731
+ # Create piece of constraints
1732
+ constraint = reference.add_constraint(id_constraint, **kwargs)
1733
+ # Update constraint for given id
1734
+ if id_constraint in self.constraints.keys():
1735
+ self.constraints[id_constraint].append(constraint)
1736
+ else:
1737
+ self.constraints[id_constraint] = [constraint]
1738
+
1739
+ def has_at_least_one_flux(self):
1740
+ return len(self.flux) > 0
1741
+
1742
+ def has_at_least_one_data(self):
1743
+ if self.has_at_least_one_flux():
1744
+ for flux in self.flux.values():
1745
+ if flux.has_data():
1746
+ return True
1747
+ return False
1748
+
1749
+ def has_at_least_one_result(self):
1750
+ if self.has_at_least_one_flux():
1751
+ for flux in self.flux.values():
1752
+ if flux.has_result():
1753
+ return True
1754
+ return False
1755
+
1756
+ def has_at_least_one_tagg(self):
1757
+ has_elems = False
1758
+ for tagg_dict in self.taggs.values():
1759
+ has_elems |= (len(tagg_dict) > 0)
1760
+ return has_elems
1761
+
1762
+ def has_at_least_one_constraint(self):
1763
+ return len(self.constraints) > 0
1764
+
1765
+ def write_in_excel_file(
1766
+ self,
1767
+ excel_file,
1768
+ **kwargs
1769
+ ):
1770
+ """
1771
+ _summary_
1772
+
1773
+ Parameters
1774
+ ----------
1775
+ :param excel_file: Output excel file name
1776
+ :type excel_file: file object
1777
+
1778
+ Hidden parameters
1779
+ -----------------
1780
+ :param additional_sheets:
1781
+ Dict of tables as {sheet name as str: table as DataFrame} to add in Excel file
1782
+ :type additional_sheets: Dict{str: pandas.DataFrame}
1783
+ """
1784
+ # Dedicated function to find empty cells
1785
+ def is_empty(cell):
1786
+ if cell.value is None:
1787
+ return True
1788
+ if isinstance(cell.value, str):
1789
+ if len(cell.value) == 0:
1790
+ return True
1791
+ if isinstance(cell.value, int):
1792
+ if cell.value == 0:
1793
+ return True
1794
+ return False
1795
+ # First create sheets as panda tables
1796
+ sheets = {}
1797
+ nodes_entries = []
1798
+ nodes_entries__levels = []
1799
+ self.write_tags_sheet(sheets)
1800
+ self.write_nodes_sheets(sheets, nodes_entries, nodes_entries__levels)
1801
+ self.write_flux_sheets(nodes_entries, nodes_entries__levels, sheets)
1802
+ self.write_data_sheets(nodes_entries, sheets)
1803
+ # Then write tables in excel file
1804
+ for sheet in sheets.values():
1805
+ # Dont process empty tables
1806
+ if sheet['table'].empty:
1807
+ continue
1808
+ # Create sheet with data
1809
+ sheet['table'].to_excel(
1810
+ excel_file,
1811
+ sheet_name=sheet['name'],
1812
+ index=sheet['write_index'],
1813
+ header=sheet['write_header'],
1814
+ startrow=0,
1815
+ startcol=0)
1816
+ # Add formating to sheet
1817
+ excel_sheet = excel_file.sheets[sheet['name']]
1818
+ excel_sheet.sheet_properties.tabColor = sheet['color']
1819
+ # Rows iterator
1820
+ rows = excel_sheet.rows
1821
+ cols_max_size = []
1822
+ # Apply defaut height for header
1823
+ excel_sheet.row_dimensions[HEADER_ROW_ID].height = \
1824
+ sheet['header']['default_height']
1825
+ # Add special formating to header
1826
+ header = next(rows)
1827
+ for (_, cell) in enumerate(header):
1828
+ # Col index
1829
+ col_id = INDEX_COL_ID + _ # Because enumerates starts from 0, and table cols from 0
1830
+ # Apply different formating depending if cell contains value or not
1831
+ if not is_empty(cell):
1832
+ cell.alignment = sheet['header']['alignement']
1833
+ cell.border = sheet['header']['border']
1834
+ cell.fill = sheet['header']['fill']
1835
+ cell.font = sheet['header']['font']
1836
+ if sheet['content']['compute_width']:
1837
+ cols_max_size.append(len(str(cell.value)) + 8)
1838
+ else:
1839
+ cols_max_size.append(sheet['content']['default_width'])
1840
+ else:
1841
+ cell.alignment = sheet['no_header']['alignement']
1842
+ cell.border = sheet['no_header']['border']
1843
+ cell.fill = sheet['no_header']['fill']
1844
+ cell.font = sheet['no_header']['font']
1845
+ cols_max_size.append(sheet['content']['default_width'])
1846
+ # Apply special formating if needed
1847
+ for spe_cols_ids in sheet['spe_header'].keys():
1848
+ if (col_id - INDEX_COL_ID) in spe_cols_ids: # In special_col_ids, we start from 0
1849
+ cell.alignment = sheet['spe_header'][spe_cols_ids]['alignement']
1850
+ cell.border = sheet['spe_header'][spe_cols_ids]['border']
1851
+ cell.fill = sheet['spe_header'][spe_cols_ids]['fill']
1852
+ cell.font = sheet['spe_header'][spe_cols_ids]['font']
1853
+ # Add special formating to the rest of the table
1854
+ for (_, row) in enumerate(rows):
1855
+ # Row index in table
1856
+ row_id = HEADER_ROW_ID + _ + 1 # enumerate starts from 0, but sheet table rows from 1
1857
+ # Apply defaut height
1858
+ excel_sheet.row_dimensions[row_id].height = \
1859
+ sheet['content']['default_height']
1860
+ # Apply formating to each cells
1861
+ for (_, cell) in enumerate(row):
1862
+ # Col index in table
1863
+ col_id = INDEX_COL_ID + _ # Because enumerates starts from 0, and table cols from 0
1864
+ # Apply different formating depending if cell contains value or not
1865
+ if not is_empty(cell):
1866
+ # Update cell width from max content
1867
+ if sheet['content']['compute_width']:
1868
+ cols_max_size[col_id] = max(
1869
+ cols_max_size[col_id],
1870
+ len(str(cell.value)) + 8)
1871
+ # Apply content index format if necessary
1872
+ if sheet['write_index'] and (col_id == INDEX_COL_ID):
1873
+ cell.alignment = sheet['index']['alignement']
1874
+ cell.border = sheet['index']['border']
1875
+ cell.fill = sheet['index']['fill']
1876
+ cell.font = sheet['index']['font']
1877
+ cols_max_size[col_id] = sheet['index']['default_width']
1878
+ else:
1879
+ # Apply default content format
1880
+ cell.alignment = sheet['content']['alignement']
1881
+ cell.border = sheet['content']['border']
1882
+ cell.fill = sheet['content']['fill']
1883
+ cell.font = sheet['content']['font']
1884
+ else:
1885
+ cell.alignment = sheet['no_content']['alignement']
1886
+ cell.border = sheet['no_content']['border']
1887
+ cell.fill = sheet['no_content']['fill']
1888
+ cell.font = sheet['no_content']['font']
1889
+ # Apply special formating if needed
1890
+ for spe_cols_ids in sheet['spe_content'].keys():
1891
+ if (col_id - INDEX_COL_ID) in spe_cols_ids: # In special_col_ids, we start from 0
1892
+ for spe_row_ids in sheet['spe_content'][spe_cols_ids].keys():
1893
+ # /!\ In spe_row_ids, we start from 0
1894
+ if (row_id - HEADER_ROW_ID - 1) in spe_row_ids:
1895
+ # Force cell formating
1896
+ cell.alignment = \
1897
+ sheet['spe_content'][spe_cols_ids][spe_row_ids]['alignement']
1898
+ cell.border = \
1899
+ sheet['spe_content'][spe_cols_ids][spe_row_ids]['border']
1900
+ cell.fill = \
1901
+ sheet['spe_content'][spe_cols_ids][spe_row_ids]['fill']
1902
+ cell.font = \
1903
+ sheet['spe_content'][spe_cols_ids][spe_row_ids]['font']
1904
+ # Force cell dimension
1905
+ if 'default_height' in sheet['spe_content'][spe_cols_ids][spe_row_ids].keys():
1906
+ excel_sheet.row_dimensions[row_id].height = \
1907
+ sheet['spe_content'][spe_cols_ids][spe_row_ids]['default_height']
1908
+ # Apply columns width
1909
+ for col_id, col in enumerate(excel_sheet.columns):
1910
+ column_letter = col[0].column_letter
1911
+ excel_sheet.column_dimensions[column_letter].width = \
1912
+ cols_max_size[col_id]
1913
+ # Additionnal sheets
1914
+ # Will only work if 'additional_sheets' exists as arg in this function
1915
+ try:
1916
+ for sheet_name, sheet in kwargs['additional_sheets'].items():
1917
+ sheet.to_excel(
1918
+ excel_file,
1919
+ sheet_name=sheet_name,
1920
+ index=False,
1921
+ header=True,
1922
+ startrow=0,
1923
+ startcol=0)
1924
+ except Exception:
1925
+ pass
1926
+
1927
+ def write_tags_sheet(self, sheets: dict):
1928
+ """
1929
+ Rewrite tags and taggroups in an excel sheet.
1930
+
1931
+ Parameters
1932
+ ----------
1933
+ :param sheets: Contains the excel sheets
1934
+ :type sheets: dict (output, modified)
1935
+ """
1936
+ # ----------------------------------------------------
1937
+ # Check if we have tags to save
1938
+ if not self.has_at_least_one_tagg():
1939
+ return
1940
+ # ----------------------------------------------------
1941
+ # Sheet color
1942
+ SHEET_MAIN_COLOR = '9BBB59'
1943
+ # Sheet formating infos
1944
+ SHEET_FORMATING = copy.deepcopy(SHEET_FORMATING_BY_DEFAULT)
1945
+ SHEET_FORMATING['header']['fill'] = PatternFill(
1946
+ 'solid', fgColor=SHEET_MAIN_COLOR)
1947
+ # ----------------------------------------------------
1948
+ # Specify columns for table
1949
+ taggs_extra_infos_names = self.taggs_extra_infos_names
1950
+ table_columns = CONST.TAG_SHEET_COLS + taggs_extra_infos_names
1951
+ table_columns = [self.xl_user_converter.get_user_col_name(CONST.TAG_SHEET, _) for _ in table_columns]
1952
+ # ----------------------------------------------------
1953
+ # Fill table tag with types in specific order
1954
+ table_taggs = []
1955
+ for tagg_type in [CONST.TAG_TYPE_LEVEL, CONST.TAG_TYPE_NODE, CONST.TAG_TYPE_DATA, CONST.TAG_TYPE_FLUX]:
1956
+ antagonists_checked = []
1957
+ for tagg in self.taggs[tagg_type].values():
1958
+ # Already taken in account as antagonist tagg ?
1959
+ if tagg in antagonists_checked:
1960
+ continue
1961
+ # Tag groups infos
1962
+ name = tagg.name_unformatted
1963
+ tags = tagg.tags_str
1964
+ # Specific case with antagonist
1965
+ if tagg.has_antagonists():
1966
+ for antagonist_tagg in tagg.antagonists_taggs:
1967
+ # One line per pair of antagonist tags
1968
+ antagonist_name = name + '/' + antagonist_tagg.name_unformatted
1969
+ antagonist_tags = tags + '/' + antagonist_tagg.tags_str
1970
+ antagonists_checked.append(antagonist_tagg)
1971
+ # Create table line with corresponding data
1972
+ line_tagg = [
1973
+ antagonist_name,
1974
+ tagg_type,
1975
+ antagonist_tags,
1976
+ tagg.is_palette,
1977
+ tagg.colormap,
1978
+ tagg.colors]
1979
+ # Add extra info cols if needed
1980
+ for extra_info_name in taggs_extra_infos_names:
1981
+ if extra_info_name in tagg.extra_infos.keys():
1982
+ line_tagg.append(tagg.extra_infos[extra_info_name])
1983
+ else:
1984
+ line_tagg.append(None)
1985
+ # We can add it directly in the table
1986
+ table_taggs.append(line_tagg)
1987
+ else:
1988
+ # Create table line with corresponding data
1989
+ line_tagg = [
1990
+ name,
1991
+ tagg_type,
1992
+ tags,
1993
+ tagg.is_palette,
1994
+ tagg.colormap,
1995
+ tagg.colors]
1996
+ # Add extra info cols if needed
1997
+ for extra_info_name in taggs_extra_infos_names:
1998
+ if extra_info_name in tagg.extra_infos.keys():
1999
+ line_tagg.append(tagg.extra_infos[extra_info_name])
2000
+ else:
2001
+ line_tagg.append(None)
2002
+ # We can add it directly in the table
2003
+ table_taggs.append(line_tagg)
2004
+ table_taggs = pd.DataFrame(table_taggs, columns=table_columns)
2005
+ # Drop column that have no values
2006
+ table_taggs.dropna(axis=1, how='all', inplace=True)
2007
+ # Cast NaN as None because if you have None in a float column,
2008
+ # panda transform it as NaN -> cant compare tests after
2009
+ table_taggs.replace({np.nan: None}, inplace=True)
2010
+ # Update excel sheet attributes
2011
+ sheets[CONST.TAG_SHEET] = copy.deepcopy(SHEET_BY_DEFAULT)
2012
+ sheets[CONST.TAG_SHEET]['name'] = self.xl_user_converter.get_user_sheet_name(CONST.TAG_SHEET)
2013
+ sheets[CONST.TAG_SHEET]['color'] = SHEET_MAIN_COLOR
2014
+ sheets[CONST.TAG_SHEET]['table'] = table_taggs
2015
+ sheets[CONST.TAG_SHEET].update(SHEET_FORMATING)
2016
+
2017
+ def write_nodes_sheets(
2018
+ self,
2019
+ sheets: dict,
2020
+ nodes_entries: list,
2021
+ nodes_entries__levels: list
2022
+ ):
2023
+ """
2024
+ Rewrite nodes and their respective attributes and infos
2025
+ in one or somes excel sheets.
2026
+
2027
+ Parameters
2028
+ ----------
2029
+ :param sheets: Contains the excel sheets
2030
+ :type sheets: dict (output, modified)
2031
+
2032
+ :param nodes_entries: List of nodes sorted as they appear in table
2033
+ :type nodes_entries: list (output, modified)
2034
+
2035
+ :param nodes_entries__levels: List of levels related to nodes sorted as they appear in table
2036
+ :type nodes_entries__levels: list (output, modified)
2037
+ """
2038
+ # ----------------------------------------------------
2039
+ # Sheet color
2040
+ SHEET_MAIN_COLOR = '4F81BD'
2041
+ # Sheet formating infos
2042
+ SHEET_FORMATING = copy.deepcopy(SHEET_FORMATING_BY_DEFAULT)
2043
+ SHEET_FORMATING['header']['fill'] = PatternFill(
2044
+ 'solid', fgColor=SHEET_MAIN_COLOR)
2045
+ # Possible types of sheets
2046
+ NODES_IN_NODES_SHEET = 1
2047
+ NODES_IN_PRODUCTS_SECTORS_EXCHANGES_SHEETS = 2
2048
+ # ----------------------------------------------------
2049
+ # Default type of sheets
2050
+ sheets_type = NODES_IN_NODES_SHEET
2051
+ # Columns for tags
2052
+ columns_taggs_names = [tagg.name_unformatted for tagg in self.node_taggs]
2053
+ columns_taggs_names += [tagg.name_unformatted for tagg in self.level_taggs]
2054
+ # If we have node type tag (product:sector:exchange),
2055
+ # we remove it from column tags, because
2056
+ # we will create 3 tables (product:sector:exchange)
2057
+ # instead of only one (nodes)
2058
+ if CONST.NODE_TYPE in columns_taggs_names:
2059
+ columns_taggs_names.remove(CONST.NODE_TYPE)
2060
+ sheets_type = NODES_IN_PRODUCTS_SECTORS_EXCHANGES_SHEETS
2061
+ # Specify columns for node table
2062
+ nodes_extra_infos_names = self.nodes_extra_infos_names
2063
+ table_columns = \
2064
+ [CONST.NODES_LEVEL, CONST.NODES_NODE, CONST.NODES_MAT_BALANCE, CONST.NODES_COLOR] + \
2065
+ columns_taggs_names + \
2066
+ [CONST.NODES_DEFINITIONS] + \
2067
+ nodes_extra_infos_names
2068
+ # ----------------------------------------------------
2069
+ # Fill table node depending on which sheet type
2070
+ # - Case 1 : Everything in only one node sheet
2071
+ if sheets_type == NODES_IN_NODES_SHEET:
2072
+ # Fetch table content line by line
2073
+ lineages_tables = []
2074
+ lineages_processed = []
2075
+ lineages_entries = []
2076
+ lineages_entries__levels = []
2077
+ for node in self.nodes.values():
2078
+ if not node.has_parents():
2079
+ current_lineage_table = []
2080
+ current_lineage_entries = []
2081
+ current_lineage_entries__levels = []
2082
+ lineages_tables.append(current_lineage_table)
2083
+ lineages_entries.append(current_lineage_entries)
2084
+ lineages_entries__levels.append(current_lineage_entries__levels)
2085
+ node.update_table(
2086
+ 1,
2087
+ columns_taggs_names,
2088
+ nodes_extra_infos_names,
2089
+ lineages_processed,
2090
+ lineages_tables,
2091
+ current_lineage_table,
2092
+ lineages_entries,
2093
+ current_lineage_entries,
2094
+ lineages_entries__levels,
2095
+ current_lineage_entries__levels)
2096
+ # Stack-up all lineage tables
2097
+ table_node = sum(lineages_tables, [])
2098
+ nodes_entries += sum(lineages_entries, [])
2099
+ nodes_entries__levels += sum(lineages_entries__levels, [])
2100
+ # Fill table
2101
+ table_node = pd.DataFrame(
2102
+ table_node,
2103
+ columns=[self.xl_user_converter.get_user_col_name(CONST.NODES_SHEET, _) for _ in table_columns])
2104
+ # TODO supprimer colonnes vides ou qui contiennent valeurs par défaut
2105
+ # Exemple si toutes les couleurs sont à gris
2106
+ # Drop column that have no values
2107
+ table_node.dropna(axis=1, how='all', inplace=True)
2108
+ # Cast NaN as None because if you have None in a float column,
2109
+ # panda transform it as NaN -> cant compare tests after
2110
+ table_node.replace({np.nan: None}, inplace=True)
2111
+ # Save in sheets dictionary
2112
+ sheets[CONST.NODES_SHEET] = copy.deepcopy(SHEET_BY_DEFAULT)
2113
+ sheets[CONST.NODES_SHEET]['table'] = table_node
2114
+ sheets[CONST.NODES_SHEET]['name'] = self.xl_user_converter.get_user_sheet_name(CONST.NODES_SHEET)
2115
+ sheets[CONST.NODES_SHEET]['color'] = SHEET_MAIN_COLOR
2116
+ sheets[CONST.NODES_SHEET].update(SHEET_FORMATING)
2117
+ # ----------------------------------------------------
2118
+ # Special formating - Colors for levels
2119
+ # Only if we have more than one level
2120
+ if self._max_nodes_level <= 1:
2121
+ return
2122
+ # Crée une palette avec n couleurs celon le nombre de
2123
+ # niveau d'agregation allant du bleu clair au blanc
2124
+ level_palette = sns \
2125
+ .color_palette(
2126
+ "blend:#ffffff,#{}".format(SHEET_MAIN_COLOR),
2127
+ self._max_nodes_level+1) \
2128
+ .as_hex()
2129
+ # Get colors based on nodes levels
2130
+ colors__nodes_indexs = {}
2131
+ for index_node, (node, node_level) in enumerate(zip(nodes_entries, nodes_entries__levels)):
2132
+ # Color for node level
2133
+ color = level_palette[node_level-1].replace('#', '')
2134
+ # Update Dict
2135
+ if not (color in colors__nodes_indexs.keys()):
2136
+ colors__nodes_indexs[color] = []
2137
+ colors__nodes_indexs[color].append(index_node)
2138
+ # Save as special formating for excel sheet
2139
+ sheets[CONST.NODES_SHEET]['spe_content'][(0, 1)] = {}
2140
+ for (color, nodes_indexs) in colors__nodes_indexs.items():
2141
+ rows = tuple(nodes_indexs)
2142
+ sheets[CONST.NODES_SHEET]['spe_content'][(0, 1)][rows] = \
2143
+ copy.deepcopy(SHEET_FORMATING['content'])
2144
+ sheets[CONST.NODES_SHEET]['spe_content'][(0, 1)][rows]['fill'] = \
2145
+ PatternFill('solid', fgColor=color)
2146
+ # Case 2 : Nodes are separated in three sheets (Sector, Product, Exchange)
2147
+ if sheets_type == NODES_IN_PRODUCTS_SECTORS_EXCHANGES_SHEETS:
2148
+ tagg_type_node = self.get_tagg_from_name_and_type(CONST.NODE_TYPE, CONST.TAG_TYPE_NODE)
2149
+ for tag in tagg_type_node.tags.values():
2150
+ # Fetch content for table
2151
+ lineages_tables = []
2152
+ lineages_processed = []
2153
+ lineages_entries = []
2154
+ lineages_entries__levels = []
2155
+ for node in tag.references:
2156
+ if not node.has_parents():
2157
+ # Create a new lineage table
2158
+ current_lineage_table = []
2159
+ current_lineage_entries = []
2160
+ current_lineage_entries__levels = []
2161
+ lineages_tables.append(current_lineage_table)
2162
+ lineages_entries.append(current_lineage_entries)
2163
+ lineages_entries__levels.append(current_lineage_entries__levels)
2164
+ # Update given tables
2165
+ node.update_table(
2166
+ 1,
2167
+ columns_taggs_names,
2168
+ nodes_extra_infos_names,
2169
+ lineages_processed,
2170
+ lineages_tables,
2171
+ current_lineage_table,
2172
+ lineages_entries,
2173
+ current_lineage_entries,
2174
+ lineages_entries__levels,
2175
+ current_lineage_entries__levels)
2176
+ # Stackup tables
2177
+ table_node_type = sum(lineages_tables, [])
2178
+ nodes_entries__for_tag = sum(lineages_entries, [])
2179
+ nodes_entries__levels__for_tag = sum(lineages_entries__levels, [])
2180
+ # Fill table
2181
+ table_node_type = pd.DataFrame(
2182
+ table_node_type,
2183
+ columns=[self.xl_user_converter.get_user_col_name(tag.name, _) for _ in table_columns])
2184
+ # Update node entries
2185
+ nodes_entries.extend(nodes_entries__for_tag)
2186
+ nodes_entries__levels.extend(nodes_entries__levels__for_tag)
2187
+ # TODO supprimer colonnes vides ou qui contiennent valeurs par défaut
2188
+ # Drop column that have no values
2189
+ table_node_type.dropna(axis=1, how='all', inplace=True)
2190
+ # Cast NaN as None because if you have None in a float column,
2191
+ # panda transform it as NaN -> cant compare tests after
2192
+ table_node_type.replace({np.nan: None}, inplace=True)
2193
+ # Save in sheets dictionary
2194
+ sheets[tag.name] = copy.deepcopy(SHEET_BY_DEFAULT)
2195
+ sheets[tag.name]['table'] = table_node_type
2196
+ sheets[tag.name]['name'] = self.xl_user_converter.get_user_sheet_name(tag.name)
2197
+ sheets[tag.name]['color'] = SHEET_MAIN_COLOR
2198
+ # Update formating with copy, otherwise we have refs values & interference
2199
+ # between type node sheets
2200
+ sheets[tag.name].update(copy.deepcopy(SHEET_FORMATING))
2201
+ # ----------------------------------------------------
2202
+ # Special formating - Colors for levels
2203
+ # Only if we have more than one level
2204
+ if self._max_nodes_level <= 1:
2205
+ continue
2206
+ # Crée une palette avec n couleurs celon le nombre de
2207
+ # niveau d'agregation allant du bleu clair au blanc
2208
+ level_palette = sns \
2209
+ .color_palette(
2210
+ "blend:#ffffff,#{}".format(SHEET_MAIN_COLOR),
2211
+ self._max_nodes_level+1) \
2212
+ .as_hex()
2213
+ # Get colors based on nodes levels
2214
+ colors__nodes_indexs = {}
2215
+ loop_iterator = enumerate(zip(
2216
+ nodes_entries__for_tag,
2217
+ nodes_entries__levels__for_tag))
2218
+ for index_node, (node, node_level) in loop_iterator:
2219
+ # Color for node level
2220
+ color = level_palette[node_level-1].replace('#', '')
2221
+ # Update Dict
2222
+ if not (color in colors__nodes_indexs.keys()):
2223
+ colors__nodes_indexs[color] = []
2224
+ colors__nodes_indexs[color].append(index_node)
2225
+ # Save as special formating for excel sheet
2226
+ sheets[tag.name]['spe_content'][(0, 1)] = {}
2227
+ for (color, nodes_indexs) in colors__nodes_indexs.items():
2228
+ rows = tuple(nodes_indexs)
2229
+ sheets[tag.name]['spe_content'][(0, 1)][rows] = \
2230
+ copy.deepcopy(SHEET_FORMATING['content'])
2231
+ sheets[tag.name]['spe_content'][(0, 1)][rows]['fill'] = \
2232
+ PatternFill('solid', fgColor=color)
2233
+
2234
+ def write_flux_sheets(
2235
+ self,
2236
+ nodes_entries: list,
2237
+ nodes_entries__levels: list,
2238
+ sheets: dict
2239
+ ):
2240
+ """
2241
+ Rewrite flux and their respective attributes and infos
2242
+ in one or some excel sheets.
2243
+
2244
+ Parameters
2245
+ ----------
2246
+ :param nodes_entries: List of nodes sorted as they appear in nodes tables
2247
+ :type nodes_entries: list
2248
+
2249
+ :param nodes_entries__levels: List of levels related to nodes sorted as they appear in nodes tables
2250
+ :type nodes_entries__levels: list
2251
+
2252
+ :param sheets: Contains the excel sheets
2253
+ :type sheets: dict (output, modified)
2254
+ """
2255
+ # ----------------------------------------------------
2256
+ # Sheet color
2257
+ SHEET_MAIN_COLOR = '4F81BD'
2258
+ SHEET_CELL_COLOR = '87A9D2'
2259
+ # Sheet formating infos
2260
+ SHEET_FORMATING = copy.deepcopy(SHEET_FORMATING_BY_DEFAULT)
2261
+ SHEET_FORMATING['header']['alignement'] = Alignment(
2262
+ horizontal='left',
2263
+ vertical='bottom',
2264
+ text_rotation=90,
2265
+ wrap_text=False,
2266
+ shrink_to_fit=False,
2267
+ indent=0)
2268
+ SHEET_FORMATING['header']['fill'] = PatternFill(
2269
+ 'solid', fgColor=SHEET_MAIN_COLOR)
2270
+ SHEET_FORMATING['header']['border'] = Border(
2271
+ right=Side(border_style="dashed", color=COLOR_BLACK),
2272
+ bottom=Side(border_style="thick", color=COLOR_BLACK))
2273
+ SHEET_FORMATING['no_header']['border'] = Border(
2274
+ right=Side(border_style="thin", color=COLOR_BLACK),
2275
+ bottom=Side(border_style="thin", color=COLOR_BLACK))
2276
+ SHEET_FORMATING['index']['alignement'] = Alignment(
2277
+ horizontal='right',
2278
+ vertical='center',
2279
+ text_rotation=0,
2280
+ wrap_text=False,
2281
+ shrink_to_fit=False,
2282
+ indent=1)
2283
+ SHEET_FORMATING['index']['fill'] = PatternFill(
2284
+ 'solid', fgColor=SHEET_MAIN_COLOR)
2285
+ SHEET_FORMATING['index']['border'] = Border(
2286
+ right=Side(border_style="thick", color=COLOR_BLACK),
2287
+ bottom=Side(border_style="dashed", color=COLOR_BLACK))
2288
+ SHEET_FORMATING['content']['alignement'] = Alignment(
2289
+ horizontal='center',
2290
+ vertical='center',
2291
+ text_rotation=0,
2292
+ wrap_text=False,
2293
+ shrink_to_fit=False,
2294
+ indent=0)
2295
+ SHEET_FORMATING['content']['fill'] = PatternFill(
2296
+ 'solid', fgColor=SHEET_CELL_COLOR)
2297
+ SHEET_FORMATING['content']['border'] = Border(
2298
+ left=Side(border_style="thin", color=COLOR_BLACK),
2299
+ right=Side(border_style="thin", color=COLOR_BLACK),
2300
+ top=Side(border_style="thin", color=COLOR_BLACK),
2301
+ bottom=Side(border_style="thin", color=COLOR_BLACK))
2302
+ SHEET_FORMATING['no_content']['fill'] = PatternFill(
2303
+ 'solid', fgColor=COLOR_GREY)
2304
+ SHEET_FORMATING['no_content']['border'] = Border(
2305
+ left=Side(border_style="none"),
2306
+ right=Side(border_style="none"),
2307
+ bottom=Side(border_style="none"))
2308
+ # Possible types of sheets
2309
+ FLUX_IN_IO_SHEET = 1
2310
+ FLUX_IN_TER_SHEETS = 2
2311
+ # ----------------------------------------------------
2312
+ # Default type of sheets
2313
+ sheets_type = FLUX_IN_IO_SHEET
2314
+ tagg_type_node = self.get_tagg_from_name_and_type(
2315
+ CONST.NODE_TYPE,
2316
+ CONST.TAG_TYPE_NODE)
2317
+ if tagg_type_node is not None:
2318
+ has_product_tagged_nodes = len(tagg_type_node.tags[CONST.NODE_TYPE_PRODUCT].references) > 0
2319
+ has_sector_tagged_nodes = len(tagg_type_node.tags[CONST.NODE_TYPE_SECTOR].references) > 0
2320
+ has_exchange_tagged_nodes = len(tagg_type_node.tags[CONST.NODE_TYPE_EXCHANGE].references) > 0
2321
+ ok_for_ter_matrix = \
2322
+ (has_product_tagged_nodes or has_exchange_tagged_nodes) and \
2323
+ (has_sector_tagged_nodes or has_exchange_tagged_nodes)
2324
+ if ok_for_ter_matrix:
2325
+ sheets_type = FLUX_IN_TER_SHEETS
2326
+ # ----------------------------------------------------
2327
+ # Fill table node depending on which sheet type
2328
+ # - Case 1 : Everything in only one IO sheet
2329
+ if sheets_type == FLUX_IN_IO_SHEET:
2330
+ # Create matrix
2331
+ matrix = _createMatrixFromFlux(
2332
+ nodes_entries,
2333
+ nodes_entries)
2334
+ # From matrix create table with correct header & index names
2335
+ nodes_entries_names = [_.name for _ in nodes_entries]
2336
+ table = pd.DataFrame(
2337
+ matrix,
2338
+ index=nodes_entries_names,
2339
+ columns=nodes_entries_names)
2340
+ # Save in sheets table and parameters
2341
+ sheets[CONST.IO_SHEET] = copy.deepcopy(SHEET_BY_DEFAULT)
2342
+ sheets[CONST.IO_SHEET]['table'] = table
2343
+ sheets[CONST.IO_SHEET]['name'] = self.xl_user_converter.get_user_sheet_name(CONST.IO_SHEET)
2344
+ sheets[CONST.IO_SHEET]['color'] = SHEET_MAIN_COLOR
2345
+ sheets[CONST.IO_SHEET]['write_index'] = True
2346
+ # Initialize default values for sheet formating
2347
+ sheets[CONST.IO_SHEET].update(copy.deepcopy(SHEET_FORMATING))
2348
+ # For index and header, the height and width depend on nodes names
2349
+ max_size = len(max(nodes_entries_names)) + 10
2350
+ sheets[CONST.IO_SHEET]['header']['default_height'] = max_size*10
2351
+ sheets[CONST.IO_SHEET]['index']['default_width'] = max_size*2
2352
+ # For content, use fixed width and heigh
2353
+ sheets[CONST.IO_SHEET]['content']['compute_width'] = False
2354
+ sheets[CONST.IO_SHEET]['content']['default_width'] = 5
2355
+ sheets[CONST.IO_SHEET]['content']['default_height'] = 15
2356
+ # ----------------------------------------------------
2357
+ # Special formating - Colors for levels
2358
+ # Only if we have more than one level
2359
+ if self._max_nodes_level <= 1:
2360
+ return
2361
+ # Create a color gradient (from white to main_color) to fill nodes
2362
+ # cells depending on their respective level
2363
+ level_palette = sns \
2364
+ .color_palette(
2365
+ "blend:#ffffff,#{}".format(SHEET_MAIN_COLOR),
2366
+ self._max_nodes_level+1) \
2367
+ .as_hex()
2368
+ # Get colors based on nodes levels
2369
+ colors__nodes_indexs = {}
2370
+ for index_node, (node, node_level) in enumerate(zip(nodes_entries, nodes_entries__levels)):
2371
+ # Color for node level
2372
+ color = level_palette[node_level-1].replace('#', '')
2373
+ # Update Dict
2374
+ if not (color in colors__nodes_indexs.keys()):
2375
+ colors__nodes_indexs[color] = []
2376
+ colors__nodes_indexs[color].append(index_node)
2377
+ # Save as special formating for excel sheet
2378
+ sheets[CONST.IO_SHEET]['spe_content'][(0,)] = {}
2379
+ for (color, nodes_indexs) in colors__nodes_indexs.items():
2380
+ # For header, shift one col to the right, because we have the index col in first col
2381
+ cols = tuple(_+1 for _ in nodes_indexs)
2382
+ # For index no shift
2383
+ rows = tuple(nodes_indexs)
2384
+ # First row (Header)
2385
+ sheets[CONST.IO_SHEET]['spe_header'][cols] = \
2386
+ copy.deepcopy(sheets[CONST.IO_SHEET]['header']) # Keep other header's formating attributes
2387
+ sheets[CONST.IO_SHEET]['spe_header'][cols]['fill'] = \
2388
+ PatternFill('solid', fgColor=color) # Apply color filling based on node's level
2389
+ # First col (Index)
2390
+ sheets[CONST.IO_SHEET]['spe_content'][(0,)][rows] = \
2391
+ copy.deepcopy(sheets[CONST.IO_SHEET]['index']) # Keep other index's formating attributes
2392
+ sheets[CONST.IO_SHEET]['spe_content'][(0,)][rows]['fill'] = \
2393
+ PatternFill('solid', fgColor=color) # Apply color filling based on node's level
2394
+ # - Case 2 : Everything in only one TER sheet
2395
+ if sheets_type == FLUX_IN_TER_SHEETS:
2396
+ # Number of rows between the two matrixs
2397
+ NB_ROWS_BETWEEN_MATRIXS = 2 # /!\ must be > 1
2398
+ # Extract tags
2399
+ tag_product = tagg_type_node.get_tag_from_name(CONST.NODE_TYPE_PRODUCT)
2400
+ tag_sector = tagg_type_node.get_tag_from_name(CONST.NODE_TYPE_SECTOR)
2401
+ tag_exchange = tagg_type_node.get_tag_from_name(CONST.NODE_TYPE_EXCHANGE)
2402
+ # Extract nodes from tags
2403
+ nodes_tagged_as_products = tag_product.references
2404
+ nodes_tagged_as_sectors = tag_sector.references + tag_exchange.references
2405
+ # Use nodes entries to sort nodes from their parenthood relations and levels
2406
+ nodes_entries_tagged_as_products = []
2407
+ nodes_entries_tagged_as_products__levels = []
2408
+ nodes_entries_tagged_as_sectors = []
2409
+ nodes_entries_tagged_as_sectors__levels = []
2410
+ for (node, node_level) in zip(nodes_entries, nodes_entries__levels):
2411
+ if node in nodes_tagged_as_products:
2412
+ nodes_entries_tagged_as_products.append(node)
2413
+ nodes_entries_tagged_as_products__levels.append(node_level)
2414
+ if node in nodes_tagged_as_sectors:
2415
+ nodes_entries_tagged_as_sectors.append(node)
2416
+ nodes_entries_tagged_as_sectors__levels.append(node_level)
2417
+ # Create the two matrixs
2418
+ # 1 : sectors -> products
2419
+ # 2 : products -> sectors
2420
+ matrix_1 = _createMatrixFromFlux(
2421
+ nodes_entries_tagged_as_sectors,
2422
+ nodes_entries_tagged_as_products,
2423
+ transpose=True)
2424
+ matrix_2 = _createMatrixFromFlux(
2425
+ nodes_entries_tagged_as_products,
2426
+ nodes_entries_tagged_as_sectors)
2427
+ # Fuse the two matrixs
2428
+ # Header and indexs
2429
+ header = [_.name for _ in nodes_entries_tagged_as_sectors]
2430
+ index = [_.name for _ in nodes_entries_tagged_as_products]
2431
+ # Leave a white line between the two matrixs
2432
+ matrix = \
2433
+ matrix_1 + \
2434
+ [[None]*len(nodes_entries_tagged_as_sectors)]*(NB_ROWS_BETWEEN_MATRIXS - 1) + \
2435
+ [header] + \
2436
+ matrix_2
2437
+ # Panda table
2438
+ table = pd.DataFrame(
2439
+ matrix,
2440
+ index=(index + [None]*NB_ROWS_BETWEEN_MATRIXS + index), # White lines between matrixs
2441
+ columns=header)
2442
+ # Save in sheets table and parameters
2443
+ sheets[CONST.TER_SHEET] = {}
2444
+ sheets[CONST.TER_SHEET].update(copy.deepcopy(SHEET_BY_DEFAULT))
2445
+ sheets[CONST.TER_SHEET]['table'] = table
2446
+ sheets[CONST.TER_SHEET]['name'] = self.xl_user_converter.get_user_sheet_name(CONST.TER_SHEET)
2447
+ sheets[CONST.TER_SHEET]['color'] = SHEET_MAIN_COLOR
2448
+ sheets[CONST.TER_SHEET]['write_index'] = True
2449
+ # Initialize default values for formating
2450
+ sheets[CONST.TER_SHEET].update(copy.deepcopy(SHEET_FORMATING))
2451
+ # For index and header, the height and width depend on nodes names
2452
+ max_size_header = len(max(header)) + 10
2453
+ max_size_index = len(max(index)) + 10
2454
+ sheets[CONST.TER_SHEET]['header']['default_height'] = max_size_header*10
2455
+ sheets[CONST.TER_SHEET]['index']['default_width'] = max_size_index*2
2456
+ # For content use fixed width and height
2457
+ sheets[CONST.TER_SHEET]['content']['compute_width'] = False
2458
+ sheets[CONST.TER_SHEET]['content']['default_width'] = 3
2459
+ sheets[CONST.TER_SHEET]['content']['default_height'] = 15
2460
+ # ----------------------------------------------------
2461
+ # Special formating - Colors for levels
2462
+ # Only if we have more than one level
2463
+ if self._max_nodes_level <= 1:
2464
+ return
2465
+ # Create a color gradient (from white to main_color) to fill nodes
2466
+ # cells depending on their respective level
2467
+ level_palette = sns \
2468
+ .color_palette(
2469
+ "blend:#ffffff,#{}".format(SHEET_MAIN_COLOR),
2470
+ self._max_nodes_level+1) \
2471
+ .as_hex()
2472
+ # Header - Get colors based on nodes levels
2473
+ colors__nodes_indexs = {}
2474
+ loop_iterator = enumerate(zip(
2475
+ nodes_entries_tagged_as_sectors,
2476
+ nodes_entries_tagged_as_sectors__levels))
2477
+ for index_node, (node, node_level) in loop_iterator:
2478
+ # Color for node level
2479
+ # /!\ Levels starts from 1, but the level palette starts from 0
2480
+ color = level_palette[node_level-1].replace('#', '')
2481
+ # Update Dict
2482
+ if not (color in colors__nodes_indexs.keys()):
2483
+ colors__nodes_indexs[color] = []
2484
+ colors__nodes_indexs[color].append(index_node)
2485
+ # Header - Save special formating (colors based on nodes levels)
2486
+ second_matrix_starting_row = len(index) + (NB_ROWS_BETWEEN_MATRIXS - 1)
2487
+ for (color, nodes_indexs) in colors__nodes_indexs.items():
2488
+ # Convert as tuple to be used as dict key
2489
+ # Shift one col to the right, because we have the index col in first col
2490
+ cols = tuple(_+1 for _ in nodes_indexs)
2491
+ # Special formating for first matrix Header
2492
+ sheets[CONST.TER_SHEET]['spe_header'][cols] = \
2493
+ copy.deepcopy(sheets[CONST.TER_SHEET]['header']) # Keep other header's formating attributes
2494
+ sheets[CONST.TER_SHEET]['spe_header'][cols]['fill'] = \
2495
+ PatternFill('solid', fgColor=color)
2496
+ # Special formating for second matrix Header
2497
+ sheets[CONST.TER_SHEET]['spe_content'][cols] = {} # Init for
2498
+ sheets[CONST.TER_SHEET]['spe_content'][cols][(second_matrix_starting_row,)] = \
2499
+ sheets[CONST.TER_SHEET]['spe_header'][cols] # Copy of first matrix's header's formating attributes
2500
+ # Upper left corner of second matrix
2501
+ sheets[CONST.TER_SHEET]['spe_content'][(0,)] = {}
2502
+ sheets[CONST.TER_SHEET]['spe_content'][(0,)][(second_matrix_starting_row,)] = \
2503
+ sheets[CONST.TER_SHEET]['no_header']
2504
+ # Index - Get colors based on nodes levels
2505
+ colors__nodes_indexs = {}
2506
+ loop_iterator = enumerate(zip(
2507
+ nodes_entries_tagged_as_products,
2508
+ nodes_entries_tagged_as_products__levels))
2509
+ for index_node, (node, node_level) in loop_iterator:
2510
+ # Color for node level
2511
+ # /!\ Nodes levels start from 1, but the level_palette table starts from 0
2512
+ color = level_palette[node_level-1].replace('#', '')
2513
+ # Update Dict
2514
+ if not (color in colors__nodes_indexs.keys()):
2515
+ colors__nodes_indexs[color] = []
2516
+ colors__nodes_indexs[color].append(index_node)
2517
+ colors__nodes_indexs[color].append(
2518
+ index_node + len(nodes_entries_tagged_as_products) + NB_ROWS_BETWEEN_MATRIXS)
2519
+ # Index - Save special formating (colors based on nodes levels)
2520
+ sheets[CONST.TER_SHEET]['spe_content'][(0,)] = {}
2521
+ for (color, nodes_indexs) in colors__nodes_indexs.items():
2522
+ # Convert as tuple to be used as dict key
2523
+ row_id = tuple(nodes_indexs)
2524
+ # Special formating for Indexs
2525
+ sheets[CONST.TER_SHEET]['spe_content'][(0,)][row_id] = \
2526
+ copy.deepcopy(sheets[CONST.TER_SHEET]['index'])
2527
+ sheets[CONST.TER_SHEET]['spe_content'][(0,)][row_id]['fill'] = \
2528
+ PatternFill('solid', fgColor=color)
2529
+
2530
+ def write_data_sheets(
2531
+ self,
2532
+ nodes_entries: list,
2533
+ sheets: dict
2534
+ ):
2535
+ """
2536
+ Rewrite all datas realted sheets their respective attributes and infos
2537
+ in one or some excel sheets. That includes:
2538
+ - Datas
2539
+ - Min Max
2540
+ - Constraints
2541
+ - Results
2542
+ - Analysis
2543
+
2544
+ Parameters
2545
+ ----------
2546
+ :param nodes_entries: List of nodes sorted as they appear in nodes tables
2547
+ :type nodes_entries: list
2548
+
2549
+ :param sheets: Contains the excel sheets
2550
+ :type sheets: dict (output, modified)
2551
+ """
2552
+ # ----------------------------------------------------
2553
+ if not self.has_at_least_one_data():
2554
+ return
2555
+ # ----------------------------------------------------
2556
+ # Sheet color
2557
+ SHEET_MAIN_DATA_COLOR = '8064A2' # Green
2558
+ SHEET_MAIN_RESULTS_COLOR = '8064A2' # Violet
2559
+ # Sheet formating infos
2560
+ SHEET_FORMATING_FOR_DATA = copy.deepcopy(SHEET_FORMATING_BY_DEFAULT)
2561
+ SHEET_FORMATING_FOR_DATA['header']['fill'] = PatternFill(
2562
+ 'solid', fgColor=SHEET_MAIN_DATA_COLOR)
2563
+ SHEET_FORMATING_FOR_RESULTS = copy.deepcopy(SHEET_FORMATING_BY_DEFAULT)
2564
+ SHEET_FORMATING_FOR_RESULTS['header']['fill'] = PatternFill(
2565
+ 'solid', fgColor=SHEET_MAIN_RESULTS_COLOR)
2566
+ # ----------------------------------------------------
2567
+ # Create tables
2568
+ table_data, table_min_max, table_constraints, table_results, table_analysis = \
2569
+ self._create_all_data_and_result_tables(
2570
+ default_header=False,
2571
+ reorder_tables=True,
2572
+ nodes_entries=nodes_entries)
2573
+ # ----------------------------------------------------
2574
+ # DATA_SHEET : Update excel sheet attributes
2575
+ sheets[CONST.DATA_SHEET] = copy.deepcopy(SHEET_BY_DEFAULT)
2576
+ sheets[CONST.DATA_SHEET]['name'] = \
2577
+ self.xl_user_converter.get_user_sheet_name(CONST.DATA_SHEET)
2578
+ sheets[CONST.DATA_SHEET]['color'] = SHEET_MAIN_DATA_COLOR
2579
+ sheets[CONST.DATA_SHEET]['table'] = table_data
2580
+ sheets[CONST.DATA_SHEET].update(copy.deepcopy(SHEET_FORMATING_FOR_DATA))
2581
+ # MIN_MAX_SHEET : Update excel sheet attributes
2582
+ if not table_min_max.empty:
2583
+ sheets[CONST.MIN_MAX_SHEET] = copy.deepcopy(SHEET_BY_DEFAULT)
2584
+ sheets[CONST.MIN_MAX_SHEET]['name'] = \
2585
+ self.xl_user_converter.get_user_sheet_name(CONST.MIN_MAX_SHEET)
2586
+ sheets[CONST.MIN_MAX_SHEET]['color'] = SHEET_MAIN_DATA_COLOR
2587
+ sheets[CONST.MIN_MAX_SHEET]['table'] = table_min_max
2588
+ sheets[CONST.MIN_MAX_SHEET].update(copy.deepcopy(SHEET_FORMATING_FOR_DATA))
2589
+ # CONSTRAINTS_SHEET : Update excel sheet attributes
2590
+ if not table_constraints.empty:
2591
+ sheets[CONST.CONSTRAINTS_SHEET] = copy.deepcopy(SHEET_BY_DEFAULT)
2592
+ sheets[CONST.CONSTRAINTS_SHEET]['name'] = \
2593
+ self.xl_user_converter.get_user_sheet_name(CONST.CONSTRAINTS_SHEET)
2594
+ sheets[CONST.CONSTRAINTS_SHEET]['color'] = SHEET_MAIN_DATA_COLOR
2595
+ sheets[CONST.CONSTRAINTS_SHEET]['table'] = table_constraints
2596
+ sheets[CONST.CONSTRAINTS_SHEET].update(copy.deepcopy(SHEET_FORMATING_FOR_DATA))
2597
+ # Demarquation // id
2598
+ col_name = self.xl_user_converter.get_user_col_name(
2599
+ CONST.CONSTRAINTS_SHEET, CONST.CONSTRAINT_ID)
2600
+ cols = tuple(range(len(table_constraints.columns)))
2601
+ prev_id = None
2602
+ rows = []
2603
+ for _, id in enumerate(table_constraints[col_name]):
2604
+ row = _ - 1
2605
+ if prev_id is None:
2606
+ prev_id = id
2607
+ else:
2608
+ if prev_id == id:
2609
+ rows.append(row)
2610
+ else:
2611
+ prev_id = id
2612
+ if len(rows) > 0:
2613
+ rows = tuple(rows)
2614
+ sheets[CONST.CONSTRAINTS_SHEET]['spe_content'][cols] = {}
2615
+ sheets[CONST.CONSTRAINTS_SHEET]['spe_content'][cols][rows] = \
2616
+ copy.deepcopy(sheets[CONST.CONSTRAINTS_SHEET]['content'])
2617
+ sheets[CONST.CONSTRAINTS_SHEET]['spe_content'][cols][rows]['border'] = \
2618
+ Border(
2619
+ left=Side(border_style="thin", color=COLOR_BLACK),
2620
+ right=Side(border_style="thin", color=COLOR_BLACK),
2621
+ top=Side(border_style="none"))
2622
+ # ----------------------------------------------------
2623
+ # RESULTS_SHEET : Update excel sheet attributes
2624
+ if not table_results.empty:
2625
+ sheets[CONST.RESULTS_SHEET] = copy.deepcopy(SHEET_BY_DEFAULT)
2626
+ sheets[CONST.RESULTS_SHEET]['name'] = \
2627
+ self.xl_user_converter.get_user_sheet_name(CONST.RESULTS_SHEET)
2628
+ sheets[CONST.RESULTS_SHEET]['color'] = SHEET_MAIN_RESULTS_COLOR
2629
+ sheets[CONST.RESULTS_SHEET]['table'] = table_results
2630
+ sheets[CONST.RESULTS_SHEET].update(SHEET_FORMATING_FOR_RESULTS)
2631
+ # ----------------------------------------------------
2632
+ # ANALYSIS_SHEET : Update excel sheet attributes
2633
+ if not table_analysis.empty:
2634
+ sheets[CONST.ANALYSIS_SHEET] = copy.deepcopy(SHEET_BY_DEFAULT)
2635
+ sheets[CONST.ANALYSIS_SHEET]['name'] = \
2636
+ self.xl_user_converter.get_user_sheet_name(CONST.ANALYSIS_SHEET)
2637
+ sheets[CONST.ANALYSIS_SHEET]['color'] = SHEET_MAIN_RESULTS_COLOR
2638
+ sheets[CONST.ANALYSIS_SHEET]['table'] = table_analysis
2639
+ sheets[CONST.ANALYSIS_SHEET].update(SHEET_FORMATING_FOR_RESULTS)
2640
+
2641
+ def get_as_dict(self):
2642
+ """
2643
+ """
2644
+ # Init output structure
2645
+ output_dict = {}
2646
+ # Parse tags
2647
+ output_dict["taggs"] = {}
2648
+ for tag_type in self.taggs.keys():
2649
+ output_dict["taggs"][tag_type] = {}
2650
+ tagg_names_sorted = sorted(self.taggs[tag_type].keys())
2651
+ for tagg_name in tagg_names_sorted:
2652
+ tagg = self.taggs[tag_type][tagg_name]
2653
+ output_dict["taggs"][tag_type][tagg_name] = tagg.get_as_dict()
2654
+ # Parse nodes
2655
+ nodes_names_sorted = sorted(self.nodes.keys())
2656
+ output_dict["nodes"] = {}
2657
+ for node_name in nodes_names_sorted:
2658
+ node = self.nodes[node_name]
2659
+ output_dict["nodes"][node_name] = node.get_as_dict()
2660
+ # Parse flux
2661
+ flux_names_sorted = sorted(self.flux.keys())
2662
+ output_dict["flux"] = {}
2663
+ for flux_name in flux_names_sorted:
2664
+ flux = self.flux[flux_name]
2665
+ output_dict["flux"][flux_name] = flux.get_as_dict()
2666
+ # Parse constraintes
2667
+ constraints_ids_sorted = sorted(self.constraints.keys())
2668
+ output_dict["constraints"] = {}
2669
+ for constraint_id in constraints_ids_sorted:
2670
+ constraint = self.constraints[constraint_id]
2671
+ output_dict["constraints"][constraint_id] = [_.get_as_dict() for _ in constraint]
2672
+ # End
2673
+ return output_dict
2674
+
2675
+ def update_mfa_dict(self, mfa_dict: dict):
2676
+ """
2677
+ _summary_
2678
+
2679
+ Parameters
2680
+ ----------
2681
+ :param mfa_dict: Dictionnaire des données
2682
+ :type mfa_dict: dict
2683
+ """
2684
+ self.update_mfa_dict_taggs(mfa_dict)
2685
+ self.update_mfa_dict_nodes(mfa_dict)
2686
+ self.update_mfa_dict_flux(mfa_dict)
2687
+ self.update_mfa_dict_data_and_result(mfa_dict)
2688
+
2689
+ def update_mfa_dict_taggs(self, mfa_dict: dict):
2690
+ """
2691
+ _summary_
2692
+
2693
+ Parameters
2694
+ ----------
2695
+ :param mfa_dict: Dictionnaire des données
2696
+ :type mfa_dict: dict
2697
+ """
2698
+ # Check if we have tags to save
2699
+ if not self.has_at_least_one_tagg():
2700
+ return
2701
+ # Specify columns for table flux
2702
+ taggs_extra_infos_names = self.taggs_extra_infos_names
2703
+ table_taggs__columns = CONST.TAG_SHEET_COLS + taggs_extra_infos_names
2704
+ # Fill table tag with types in specific order
2705
+ table_taggs = []
2706
+ for tagg_type in [CONST.TAG_TYPE_LEVEL, CONST.TAG_TYPE_NODE, CONST.TAG_TYPE_DATA, CONST.TAG_TYPE_FLUX]:
2707
+ antagonists_checked = []
2708
+ for tagg in self.taggs[tagg_type].values():
2709
+ # Already taken in account as antagonist tagg ?
2710
+ if tagg in antagonists_checked:
2711
+ continue
2712
+ # Tag groups infos
2713
+ name = tagg.name_unformatted
2714
+ tags = tagg.tags_str
2715
+ # Specific case with antagonist
2716
+ if tagg.has_antagonists():
2717
+ for antagonist_tagg in tagg.antagonists_taggs:
2718
+ name += '/' + antagonist_tagg.name_unformatted
2719
+ tags += '/' + antagonist_tagg.tags_str
2720
+ antagonists_checked.append(antagonist_tagg)
2721
+ # Create table line with corresponding data
2722
+ line_tagg = [
2723
+ name,
2724
+ tagg_type,
2725
+ tags,
2726
+ tagg.is_palette,
2727
+ tagg.colormap,
2728
+ tagg.colors]
2729
+ # Add extra info cols if needed
2730
+ for extra_info_name in taggs_extra_infos_names:
2731
+ if extra_info_name in tagg.extra_infos.keys():
2732
+ line_tagg.append(tagg.extra_infos[extra_info_name])
2733
+ else:
2734
+ line_tagg.append(None)
2735
+ # We can add it directly in the table
2736
+ table_taggs.append(line_tagg)
2737
+ table_taggs = pd.DataFrame(table_taggs, columns=table_taggs__columns)
2738
+ # Drop column that have no values
2739
+ table_taggs.dropna(axis=1, how='all', inplace=True)
2740
+ # Cast NaN as None because if you have None in a float column,
2741
+ # panda transform it as NaN -> cant compare tests after
2742
+ table_taggs.replace({np.nan: None}, inplace=True)
2743
+ # Update MFA dict
2744
+ mfa_dict[CONST.TAG_SHEET] = table_taggs
2745
+
2746
+ def update_mfa_dict_nodes(self, mfa_dict: dict):
2747
+ """
2748
+ _summary_
2749
+
2750
+ Parameters
2751
+ ----------
2752
+ :param mfa_dict: Dictionnaire des données
2753
+ :type mfa_dict: dict
2754
+ """
2755
+ # Columns for tags
2756
+ columns_taggs_names = [tagg.name_unformatted for tagg in self.node_taggs]
2757
+ columns_taggs_names += [tagg.name_unformatted for tagg in self.level_taggs]
2758
+ # If we have node type tag (product:sector:exchange),
2759
+ # then it must be the first column of the tags columns
2760
+ if CONST.NODE_TYPE in columns_taggs_names:
2761
+ columns_taggs_names.remove(CONST.NODE_TYPE)
2762
+ columns_taggs_names.insert(0, CONST.NODE_TYPE)
2763
+ # Specify columns for node table
2764
+ nodes_extra_infos_names = self.nodes_extra_infos_names
2765
+ table_node__columns = \
2766
+ [CONST.NODES_LEVEL, CONST.NODES_NODE, CONST.NODES_MAT_BALANCE, CONST.NODES_COLOR] + \
2767
+ columns_taggs_names + \
2768
+ [CONST.NODES_DEFINITIONS] + \
2769
+ nodes_extra_infos_names
2770
+ # Fill table node
2771
+ table_node = []
2772
+ nodes_processed = []
2773
+ for node in self.nodes.values():
2774
+ if node not in nodes_processed:
2775
+ self._create_node_line(
2776
+ node,
2777
+ node.level,
2778
+ columns_taggs_names,
2779
+ nodes_extra_infos_names,
2780
+ table_node,
2781
+ nodes_processed)
2782
+ table_node = pd.DataFrame(table_node, columns=table_node__columns)
2783
+ # Drop column that have no values
2784
+ table_node.dropna(axis=1, how='all', inplace=True)
2785
+ # Cast NaN as None because if you have None in a float column,
2786
+ # panda transform it as NaN -> cant compare tests after
2787
+ table_node.replace({np.nan: None}, inplace=True)
2788
+ # Update MFA dict
2789
+ mfa_dict[CONST.NODES_SHEET] = table_node
2790
+
2791
+ def _create_node_line(
2792
+ self,
2793
+ node: Node,
2794
+ node_level: int,
2795
+ columns_taggs_names: list,
2796
+ nodes_extra_infos_names: list,
2797
+ table_node: list,
2798
+ nodes_processed: list,
2799
+ process_children: bool = True
2800
+ ):
2801
+ """
2802
+ _summary_
2803
+
2804
+ Parameters
2805
+ ----------
2806
+ TODO
2807
+ """
2808
+ # Create table line with corresponding data
2809
+ line_node = [
2810
+ node_level,
2811
+ node.name,
2812
+ node.mat_balance,
2813
+ node.color]
2814
+ # Add tags
2815
+ line_node += node.get_tags_from_taggroups(
2816
+ columns_taggs_names, return_names_instead_of_refs=True)
2817
+ # Add definition
2818
+ line_node.append(node.definition)
2819
+ # Add extra info cols if needed
2820
+ for extra_info_name in nodes_extra_infos_names:
2821
+ if extra_info_name in node.extra_infos.keys():
2822
+ line_node.append(node.extra_infos[extra_info_name])
2823
+ else:
2824
+ line_node.append(None)
2825
+ # Add line to the table
2826
+ table_node.append(line_node)
2827
+ # If we have children for this node, we add them directly under
2828
+ if node.has_at_least_one_child() and process_children:
2829
+ for childrengrp_id, childgroup in enumerate(node.children_grps):
2830
+ # Do we need to add a new line for current node : ie multiple childgroup
2831
+ if childrengrp_id > 0:
2832
+ table_node.append(line_node)
2833
+ # Recursivly process children
2834
+ for child in childgroup:
2835
+ self._create_node_line(
2836
+ child,
2837
+ node_level+1,
2838
+ columns_taggs_names,
2839
+ nodes_extra_infos_names,
2840
+ table_node,
2841
+ nodes_processed,
2842
+ process_children=(child not in nodes_processed))
2843
+ # Ok node processed
2844
+ nodes_processed.append(node)
2845
+
2846
+ def update_mfa_dict_flux(self, mfa_dict: dict):
2847
+ """
2848
+ _summary_
2849
+
2850
+ Parameters
2851
+ ----------
2852
+ :param mfa_dict: Dictionnaire des données
2853
+ :type mfa_dict: dict
2854
+ """
2855
+ # Check if we have flux to save
2856
+ if not self.has_at_least_one_flux():
2857
+ return
2858
+ # Init table flux
2859
+ table_flux = []
2860
+ # Fill table flux
2861
+ for flux in self.flux.values():
2862
+ # Create table line with corresponding data
2863
+ line_flux = [
2864
+ flux.orig.name,
2865
+ flux.dest.name]
2866
+ # We can add it directly in the table
2867
+ table_flux.append(line_flux)
2868
+ # Cast to dataFrame
2869
+ table_flux = pd.DataFrame(table_flux, columns=CONST.FLUX_SHEET_COLS)
2870
+ # Update MFA dict
2871
+ mfa_dict[CONST.FLUX_SHEET] = table_flux
2872
+
2873
+ def update_mfa_dict_data_and_result(
2874
+ self,
2875
+ mfa_dict: dict
2876
+ ):
2877
+ """
2878
+ _summary_
2879
+
2880
+ Parameters
2881
+ ----------
2882
+ :param mfa_dict: Dictionnaire des données
2883
+ :type mfa_dict: dict
2884
+ """
2885
+ # Check if we have data to save
2886
+ # TODO : probably need to check also for results data
2887
+ # if not self.has_at_least_one_data():
2888
+ # return
2889
+ # Create tables
2890
+ table_data, table_min_max, table_constraints, table_results, table_analysis = \
2891
+ self._create_all_data_and_result_tables()
2892
+ # Update MFA dict
2893
+ if not table_data.empty:
2894
+ mfa_dict[CONST.DATA_SHEET] = table_data
2895
+ if not table_min_max.empty:
2896
+ mfa_dict[CONST.MIN_MAX_SHEET] = table_min_max
2897
+ if not table_constraints.empty:
2898
+ mfa_dict[CONST.CONSTRAINTS_SHEET] = table_constraints
2899
+ if not table_results.empty:
2900
+ mfa_dict[CONST.RESULTS_SHEET] = table_results
2901
+ if not table_analysis.empty:
2902
+ mfa_dict[CONST.ANALYSIS_SHEET] = table_analysis
2903
+
2904
+ def _create_all_data_and_result_tables(
2905
+ self,
2906
+ default_header=True,
2907
+ reorder_tables=False,
2908
+ nodes_entries=None
2909
+ ):
2910
+ """
2911
+ _summary_
2912
+
2913
+ Parameters
2914
+ ----------
2915
+ :param default_header: Use default header or not
2916
+ :type default_header: bool (default=False)
2917
+
2918
+ :param nodes_entries: Node ordering to follow when writing tables. If None, not order to follow.
2919
+ :type nodes_entries: None | list (default=None)
2920
+ """
2921
+ # Columns for tags
2922
+ columns_datataggs_names = [tagg.name_unformatted for tagg in self.data_taggs]
2923
+ columns_fluxtaggs_names = [tagg.name_unformatted for tagg in self.flux_taggs]
2924
+ # ----------------------------------------------------
2925
+ # Specify all columns for data table
2926
+ data_extra_infos_names = [] # = self.data_extra_infos_names
2927
+ table_data__cols = \
2928
+ CONST.DATA_SHEET_COLS_1 + \
2929
+ columns_datataggs_names + \
2930
+ columns_fluxtaggs_names + \
2931
+ CONST.DATA_SHEET_COLS_2 + \
2932
+ data_extra_infos_names
2933
+ if not default_header:
2934
+ table_data__cols = \
2935
+ [self.xl_user_converter.get_user_col_name(CONST.DATA_SHEET, _) for _ in table_data__cols]
2936
+ # Specify all columns for min_max table
2937
+ table_min_max__cols = \
2938
+ CONST.MIN_MAX_SHEET_COLS_1 + \
2939
+ columns_datataggs_names + \
2940
+ columns_fluxtaggs_names + \
2941
+ CONST.MIN_MAX_SHEET_COLS_2
2942
+ if not default_header:
2943
+ table_min_max__cols = \
2944
+ [self.xl_user_converter.get_user_col_name(CONST.MIN_MAX_SHEET, _) for _ in table_min_max__cols]
2945
+ # Specify all columns for constraints table
2946
+ table_constraints__cols = \
2947
+ CONST.CONSTRAINT_SHEET_COLS_1 + \
2948
+ columns_datataggs_names + \
2949
+ columns_fluxtaggs_names + \
2950
+ CONST.CONSTRAINT_SHEET_COLS_2
2951
+ if not default_header:
2952
+ table_constraints__cols = \
2953
+ [self.xl_user_converter.get_user_col_name(CONST.CONSTRAINTS_SHEET, _) for _ in table_constraints__cols]
2954
+ # ----------------------------------------------------
2955
+ # Specify all columns for result table
2956
+ table_results__cols = \
2957
+ CONST.RESULTS_SHEET_COLS_1 + \
2958
+ columns_datataggs_names + \
2959
+ columns_fluxtaggs_names + \
2960
+ CONST.RESULTS_SHEET_COLS_2
2961
+ if not default_header:
2962
+ table_results__cols = \
2963
+ [self.xl_user_converter.get_user_col_name(CONST.RESULTS_SHEET, _) for _ in table_results__cols]
2964
+ # Specify all columns for analysis table
2965
+ table_analysis__cols = \
2966
+ CONST.ANALYSIS_SHEET_COLS_1 + \
2967
+ columns_datataggs_names + \
2968
+ columns_fluxtaggs_names + \
2969
+ CONST.ANALYSIS_SHEET_COLS_2
2970
+ # ----------------------------------------------------
2971
+ # Init empty tables
2972
+ table_data = []
2973
+ table_min_max = []
2974
+ table_constraints = []
2975
+ table_results = []
2976
+ table_analysis = []
2977
+ # ----------------------------------------------------
2978
+ # Write data sheets
2979
+ # Fill table data : Loop on flux, because data are related to flux
2980
+ if nodes_entries is None:
2981
+ nodes_entries = self.nodes.values()
2982
+ # Keep in memory the list of already processed flux
2983
+ flux_processed = []
2984
+ for node in nodes_entries:
2985
+ for flux in node.output_flux:
2986
+ # Check if flux has not been already processed
2987
+ if flux in flux_processed:
2988
+ continue
2989
+ # ----------------------------------------------------
2990
+ # Fill tables from data related informations
2991
+ for data in flux.datas:
2992
+ # Update data sheet
2993
+ data.update_table(
2994
+ columns_datataggs_names,
2995
+ columns_fluxtaggs_names,
2996
+ data_extra_infos_names,
2997
+ table_data)
2998
+ # Update min_max sheet
2999
+ data.min_max.update_table(
3000
+ columns_datataggs_names,
3001
+ columns_fluxtaggs_names,
3002
+ [], # No extra info for min_max
3003
+ table_min_max)
3004
+ # Update constraints sheets
3005
+ for constraints in data.constraints.values():
3006
+ for constraint in constraints:
3007
+ constraint.update_table(
3008
+ columns_datataggs_names,
3009
+ columns_fluxtaggs_names,
3010
+ [], # No extra info for constraints
3011
+ table_constraints)
3012
+ # ----------------------------------------------------
3013
+ # Fill remaining data info that are related to flux
3014
+ # Update min_max sheet
3015
+ flux.min_max.update_table(
3016
+ columns_datataggs_names,
3017
+ columns_fluxtaggs_names,
3018
+ [], # No extra info for min_max
3019
+ table_min_max)
3020
+ # Update constraints sheets
3021
+ for constraints in flux.constraints.values():
3022
+ for constraint in constraints:
3023
+ constraint.update_table(
3024
+ columns_datataggs_names,
3025
+ columns_fluxtaggs_names,
3026
+ [], # No extra info for constraints
3027
+ table_constraints)
3028
+ # ----------------------------------------------------
3029
+ # Fill tables from results related informations
3030
+ for result in flux.results:
3031
+ # Update result sheet
3032
+ result.update_table(
3033
+ columns_datataggs_names,
3034
+ columns_fluxtaggs_names,
3035
+ data_extra_infos_names,
3036
+ table_results,
3037
+ as_result=True,
3038
+ table_for_analysis=table_analysis)
3039
+ # ----------------------------------------------------
3040
+ # Keep track of processed flux
3041
+ flux_processed.append(flux)
3042
+ # ----------------------------------------------------
3043
+ # Create panda tables with correct headers
3044
+ table_data = pd.DataFrame(
3045
+ table_data,
3046
+ columns=table_data__cols)
3047
+ table_min_max = pd.DataFrame(
3048
+ table_min_max,
3049
+ columns=table_min_max__cols)
3050
+ table_constraints = pd.DataFrame(
3051
+ table_constraints,
3052
+ columns=table_constraints__cols)
3053
+ table_results = pd.DataFrame(
3054
+ table_results,
3055
+ columns=table_results__cols)
3056
+ # Special case for analysis sheet
3057
+ try:
3058
+ if len(table_analysis[0]) > len(table_analysis__cols):
3059
+ table_analysis__cols.append('Ai constraints ids')
3060
+ except Exception:
3061
+ pass
3062
+ table_analysis = pd.DataFrame(
3063
+ table_analysis,
3064
+ columns=table_analysis__cols)
3065
+ # ----------------------------------------------------
3066
+ # Contraints table must be sorted by ids
3067
+ id_col = CONST.CONSTRAINT_ID
3068
+ if not default_header:
3069
+ id_col = self.xl_user_converter.get_user_col_name(
3070
+ CONST.CONSTRAINTS_SHEET, CONST.CONSTRAINT_ID)
3071
+ table_constraints.sort_values(
3072
+ id_col,
3073
+ axis=0,
3074
+ ascending=True,
3075
+ inplace=True)
3076
+ # ----------------------------------------------------
3077
+ # Sort all table accordingly to tags
3078
+ if reorder_tables:
3079
+ tables_reordered = []
3080
+ # Ordering priority for col header / data taggroups
3081
+ ordering_priority_for_datataggs = [
3082
+ tagg.name_unformatted
3083
+ for tagg in reversed(self.data_taggs)]
3084
+ # Ordering priority for content for each col header / data tags
3085
+ ordering_priority_for_datatags = [
3086
+ [tag.name_unformatted for tag in tagg.tags.values()]
3087
+ for tagg in reversed(self.data_taggs)]
3088
+ for table in (table_data, table_min_max, table_constraints, table_results, table_analysis):
3089
+ # Ordering table
3090
+ tables_reordered.append(
3091
+ _reorderTable(
3092
+ table,
3093
+ ordering_priority_for_datataggs.copy(),
3094
+ ordering_priority_for_datatags.copy()))
3095
+ table_data, table_min_max, table_constraints, table_results, table_analysis = tables_reordered
3096
+ # ----------------------------------------------------
3097
+ # Drop column that have no values
3098
+ table_data.dropna(axis=1, how='all', inplace=True)
3099
+ table_min_max.dropna(axis=1, how='all', inplace=True)
3100
+ table_constraints.dropna(axis=1, how='all', inplace=True)
3101
+ table_results.dropna(axis=1, how='all', inplace=True)
3102
+ table_analysis.dropna(axis=1, how='all', inplace=True)
3103
+ # Cast NaN as None because if you have None in a float column,
3104
+ # panda transform it as NaN -> cant compare tests after
3105
+ table_data.replace({np.nan: None}, inplace=True)
3106
+ table_min_max.replace({np.nan: None}, inplace=True)
3107
+ table_constraints.replace({np.nan: None}, inplace=True)
3108
+ table_results.replace({np.nan: None}, inplace=True)
3109
+ table_analysis.replace({np.nan: None}, inplace=True)
3110
+ # Outputs
3111
+ return table_data, table_min_max, table_constraints, table_results, table_analysis
3112
+
3113
+ def autocompute_nodes_types(self):
3114
+ """
3115
+ Compute all nodes types. Nodes' types can be :
3116
+ - SR = Single-Root : Node that have no child and no parent
3117
+ - PR = Parent-Root : Node that have at least one child but no parent
3118
+ - PC = Parent-Child : Node that have at least one child and at least one parent
3119
+ - BC = Base-Child : Node that have no child and at least one parent
3120
+ """
3121
+ # Loop on all nodes
3122
+ for node in self.nodes.values():
3123
+ # Avoid numerous calls to parents / children existence checks
3124
+ has_parents = node.has_parents()
3125
+ has_at_least_one_child = node.has_at_least_one_child()
3126
+ # Build nodetype dict nodetypes are
3127
+ # 'SR': Single root, 'PR': parent root, 'BC': base child, 'PC': parent child
3128
+ if has_parents and has_at_least_one_child:
3129
+ node.type = 'PC'
3130
+ elif has_parents:
3131
+ node.type = 'BC'
3132
+ elif has_at_least_one_child:
3133
+ node.type = 'PR'
3134
+ else:
3135
+ node.type = 'SR'
3136
+ # Special cases - importation nette
3137
+ if ' nette' in node.name:
3138
+ node.type = 'PR'