aiphoria 0.0.1__py3-none-any.whl → 0.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aiphoria/__init__.py +59 -0
- aiphoria/core/__init__.py +55 -0
- aiphoria/core/builder.py +305 -0
- aiphoria/core/datachecker.py +1808 -0
- aiphoria/core/dataprovider.py +806 -0
- aiphoria/core/datastructures.py +1686 -0
- aiphoria/core/datavisualizer.py +431 -0
- aiphoria/core/datavisualizer_data/LICENSE +21 -0
- aiphoria/core/datavisualizer_data/datavisualizer_plotly.html +5561 -0
- aiphoria/core/datavisualizer_data/pako.min.js +2 -0
- aiphoria/core/datavisualizer_data/plotly-3.0.0.min.js +3879 -0
- aiphoria/core/flowmodifiersolver.py +1754 -0
- aiphoria/core/flowsolver.py +1472 -0
- aiphoria/core/logger.py +113 -0
- aiphoria/core/network_graph.py +136 -0
- aiphoria/core/network_graph_data/ECHARTS_LICENSE +202 -0
- aiphoria/core/network_graph_data/echarts_min.js +45 -0
- aiphoria/core/network_graph_data/network_graph.html +76 -0
- aiphoria/core/network_graph_data/network_graph.js +1391 -0
- aiphoria/core/parameters.py +269 -0
- aiphoria/core/types.py +20 -0
- aiphoria/core/utils.py +362 -0
- aiphoria/core/visualizer_parameters.py +7 -0
- aiphoria/data/example_scenario.xlsx +0 -0
- aiphoria/example.py +66 -0
- aiphoria/lib/docs/dynamic_stock.py +124 -0
- aiphoria/lib/odym/modules/ODYM_Classes.py +362 -0
- aiphoria/lib/odym/modules/ODYM_Functions.py +1299 -0
- aiphoria/lib/odym/modules/__init__.py +1 -0
- aiphoria/lib/odym/modules/dynamic_stock_model.py +808 -0
- aiphoria/lib/odym/modules/test/DSM_test_known_results.py +762 -0
- aiphoria/lib/odym/modules/test/ODYM_Classes_test_known_results.py +107 -0
- aiphoria/lib/odym/modules/test/ODYM_Functions_test_known_results.py +136 -0
- aiphoria/lib/odym/modules/test/__init__.py +2 -0
- aiphoria/runner.py +678 -0
- aiphoria-0.8.0.dist-info/METADATA +119 -0
- aiphoria-0.8.0.dist-info/RECORD +40 -0
- {aiphoria-0.0.1.dist-info → aiphoria-0.8.0.dist-info}/WHEEL +1 -1
- aiphoria-0.8.0.dist-info/licenses/LICENSE +21 -0
- aiphoria-0.0.1.dist-info/METADATA +0 -5
- aiphoria-0.0.1.dist-info/RECORD +0 -5
- {aiphoria-0.0.1.dist-info → aiphoria-0.8.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,431 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import json
|
|
3
|
+
import zlib
|
|
4
|
+
import base64
|
|
5
|
+
import webbrowser
|
|
6
|
+
from typing import List, Dict, Any
|
|
7
|
+
import plotly.graph_objects as go
|
|
8
|
+
from PIL import Image
|
|
9
|
+
from .datastructures import Scenario, Color
|
|
10
|
+
from .parameters import ParameterName
|
|
11
|
+
from importlib.resources import files
|
|
12
|
+
|
|
13
|
+
class DataVisualizer(object):
|
|
14
|
+
def __init__(self):
|
|
15
|
+
pass
|
|
16
|
+
|
|
17
|
+
def build_and_show(self, scenarios: List[Scenario],
|
|
18
|
+
visualizer_params: dict,
|
|
19
|
+
model_params: dict,
|
|
20
|
+
combine_to_one_file: bool = True) -> None:
|
|
21
|
+
"""
|
|
22
|
+
Build and show the scenarios in the browser.
|
|
23
|
+
|
|
24
|
+
:param scenarios: List of Scenario-objects
|
|
25
|
+
:param visualizer_params: Dictionary of visualizer parameters
|
|
26
|
+
:param model_params: Dictionary of model parameters (refer to Builder.py / build_results)
|
|
27
|
+
:param combine_to_one_file: Combine multiple scenarios to one output file (default: False)
|
|
28
|
+
:return: None
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
scenario_name_to_info = {} # Scenario name to info
|
|
32
|
+
scenario_name_to_data = {} # Scenario name to year to data
|
|
33
|
+
for scenario in scenarios:
|
|
34
|
+
scenario_name_to_info[scenario.name] = self._build_scenario_info(scenario)
|
|
35
|
+
scenario_name_to_data[scenario.name] = self._build_scenario_year_to_data(scenario, visualizer_params)
|
|
36
|
+
|
|
37
|
+
if combine_to_one_file:
|
|
38
|
+
# Build combined output file that contains all scenarios
|
|
39
|
+
for scenario in scenarios:
|
|
40
|
+
scenario_name_to_info[scenario.name] = self._build_scenario_info(scenario)
|
|
41
|
+
scenario_name_to_data[scenario.name] = self._build_scenario_year_to_data(scenario, visualizer_params)
|
|
42
|
+
|
|
43
|
+
# Generate HTML file for scenarios
|
|
44
|
+
html = self._build_combined_scenario_graph(scenario_name_to_info, scenario_name_to_data, visualizer_params)
|
|
45
|
+
|
|
46
|
+
output_path = model_params[ParameterName.OutputPath]
|
|
47
|
+
output_filename = "combined_sankey.html"
|
|
48
|
+
abs_path_to_file = os.path.join(output_path, output_filename)
|
|
49
|
+
|
|
50
|
+
with open(abs_path_to_file, "w", encoding="utf-8") as fs:
|
|
51
|
+
fs.write(html)
|
|
52
|
+
|
|
53
|
+
if model_params[ParameterName.ShowPlots]:
|
|
54
|
+
webbrowser.open("file://" + os.path.realpath(abs_path_to_file))
|
|
55
|
+
|
|
56
|
+
else:
|
|
57
|
+
# Build separate files for each scenario
|
|
58
|
+
for scenario_name in scenario_name_to_data:
|
|
59
|
+
# Generate HTML file for scenario
|
|
60
|
+
scenario_info = scenario_name_to_info[scenario_name]
|
|
61
|
+
scenario_year_to_data = scenario_name_to_data[scenario_name]
|
|
62
|
+
|
|
63
|
+
html = self._build_scenario_graph(scenario_name,
|
|
64
|
+
scenario_name_to_info,
|
|
65
|
+
scenario_name_to_data,
|
|
66
|
+
visualizer_params)
|
|
67
|
+
|
|
68
|
+
output_path = os.path.join(model_params[ParameterName.OutputPath], scenario_name)
|
|
69
|
+
output_filename = "{}_sankey.html".format(scenario_name)
|
|
70
|
+
abs_path_to_file = os.path.join(output_path, output_filename)
|
|
71
|
+
|
|
72
|
+
with open(abs_path_to_file, "w", encoding="utf-8") as fs:
|
|
73
|
+
fs.write(html)
|
|
74
|
+
|
|
75
|
+
if model_params[ParameterName.ShowPlots]:
|
|
76
|
+
webbrowser.open("file://" + os.path.realpath(abs_path_to_file))
|
|
77
|
+
|
|
78
|
+
def _build_scenario_year_to_data(self, scenario: Scenario, params: Dict):
|
|
79
|
+
flow_solver = scenario.flow_solver
|
|
80
|
+
|
|
81
|
+
small_node_threshold = params["small_node_threshold"]
|
|
82
|
+
process_transformation_stage_colors = params["process_transformation_stage_colors"]
|
|
83
|
+
virtual_process_graph_labels = params["virtual_process_graph_labels"]
|
|
84
|
+
flow_alpha = params["flow_alpha"]
|
|
85
|
+
virtual_process_color = params["virtual_process_color"]
|
|
86
|
+
virtual_flow_color = params["virtual_flow_color"]
|
|
87
|
+
|
|
88
|
+
# Baseline value name and unit names are used for flow data
|
|
89
|
+
baseline_value_name = scenario.model_params[ParameterName.BaselineValueName]
|
|
90
|
+
baseline_unit_name = scenario.model_params[ParameterName.BaselineUnitName]
|
|
91
|
+
|
|
92
|
+
# Check if all transformation stages have defined color
|
|
93
|
+
unique_transformation_stages = set()
|
|
94
|
+
year_to_process_to_flows = flow_solver.get_year_to_process_to_flows()
|
|
95
|
+
first_year = list(year_to_process_to_flows.keys())[0]
|
|
96
|
+
for process in year_to_process_to_flows[first_year]:
|
|
97
|
+
unique_transformation_stages.add(process.transformation_stage)
|
|
98
|
+
|
|
99
|
+
# Build colors for missing transformation stages or create default color palette
|
|
100
|
+
self._build_default_transformation_stage_colors(unique_transformation_stages,
|
|
101
|
+
process_transformation_stage_colors)
|
|
102
|
+
|
|
103
|
+
year_to_data = {}
|
|
104
|
+
year_to_process_to_flows = flow_solver.get_year_to_process_to_flows()
|
|
105
|
+
for year, process_to_flows in year_to_process_to_flows.items():
|
|
106
|
+
year_to_data[year] = {}
|
|
107
|
+
|
|
108
|
+
# Per year data
|
|
109
|
+
process_id_to_index = {}
|
|
110
|
+
for index, process in enumerate(process_to_flows):
|
|
111
|
+
process_id_to_index[process.id] = index
|
|
112
|
+
|
|
113
|
+
# Per year data of nodes and links for graph
|
|
114
|
+
year_node_labels = []
|
|
115
|
+
year_sources = []
|
|
116
|
+
year_targets = []
|
|
117
|
+
year_node_colors = []
|
|
118
|
+
year_node_positions_x = []
|
|
119
|
+
year_node_positions_y = []
|
|
120
|
+
year_node_custom_data = []
|
|
121
|
+
year_link_values = []
|
|
122
|
+
year_link_colors = []
|
|
123
|
+
year_link_custom_data = []
|
|
124
|
+
|
|
125
|
+
for index, process in enumerate(process_to_flows):
|
|
126
|
+
node_label = process.id + "({})".format(process.transformation_stage)
|
|
127
|
+
if process.label_in_graph:
|
|
128
|
+
node_label = process.label_in_graph
|
|
129
|
+
|
|
130
|
+
# Use virtual process color by default
|
|
131
|
+
node_color = virtual_process_color
|
|
132
|
+
if not process.is_virtual:
|
|
133
|
+
node_color = process_transformation_stage_colors[process.transformation_stage]
|
|
134
|
+
else:
|
|
135
|
+
# Check if there is a new label for virtual process
|
|
136
|
+
if process.id in virtual_process_graph_labels:
|
|
137
|
+
node_label = virtual_process_graph_labels[process.id]
|
|
138
|
+
|
|
139
|
+
year_node_labels.append(node_label)
|
|
140
|
+
year_node_colors.append(node_color)
|
|
141
|
+
year_node_positions_x.append(process.position_x)
|
|
142
|
+
year_node_positions_y.append(process.position_y)
|
|
143
|
+
|
|
144
|
+
inflows = process_to_flows[process]["in"]
|
|
145
|
+
outflows = process_to_flows[process]["out"]
|
|
146
|
+
|
|
147
|
+
# Calculate total inflows and total outflows for Process
|
|
148
|
+
total_inflows = sum([flow.evaluated_value for flow in inflows])
|
|
149
|
+
total_outflows = sum([flow.evaluated_value for flow in outflows])
|
|
150
|
+
for flow in outflows:
|
|
151
|
+
if flow.source_process_id not in process_id_to_index:
|
|
152
|
+
print("Source {} not found in process_id_to_index!".format(flow.source_process_id))
|
|
153
|
+
continue
|
|
154
|
+
|
|
155
|
+
if flow.target_process_id not in process_id_to_index:
|
|
156
|
+
print("Target {} not found in process_id_to_index!".format(flow.target_process_id))
|
|
157
|
+
continue
|
|
158
|
+
|
|
159
|
+
source_index = process_id_to_index[flow.source_process_id]
|
|
160
|
+
target_index = process_id_to_index[flow.target_process_id]
|
|
161
|
+
year_sources.append(source_index)
|
|
162
|
+
year_targets.append(target_index)
|
|
163
|
+
year_link_values.append(flow.evaluated_value)
|
|
164
|
+
|
|
165
|
+
link_color = ""
|
|
166
|
+
if flow.is_virtual:
|
|
167
|
+
link_color = virtual_flow_color.lstrip("#")
|
|
168
|
+
r, g, b = tuple(int(link_color[i:i+2], 16) for i in (0, 2, 4))
|
|
169
|
+
link_color = "rgba({},{},{},{})".format(r, g, b, flow_alpha)
|
|
170
|
+
else:
|
|
171
|
+
link_color = process_transformation_stage_colors[process.transformation_stage]
|
|
172
|
+
link_color = link_color.lstrip("#")
|
|
173
|
+
r, g, b = tuple(int(link_color[i:i+2], 16) for i in (0, 2, 4))
|
|
174
|
+
link_color = "rgba({},{},{},{})".format(r / 255, g / 255, b / 255, flow_alpha)
|
|
175
|
+
|
|
176
|
+
year_link_colors.append(link_color)
|
|
177
|
+
|
|
178
|
+
# Custom data for link
|
|
179
|
+
year_link_custom_data.append(
|
|
180
|
+
dict(
|
|
181
|
+
source_process_id=flow.source_process_id,
|
|
182
|
+
target_process_id=flow.target_process_id,
|
|
183
|
+
is_visible=True,
|
|
184
|
+
is_virtual=flow.is_virtual,
|
|
185
|
+
evaluated_value=flow.evaluated_value,
|
|
186
|
+
evaluated_share=flow.evaluated_share,
|
|
187
|
+
baseline_unit_name=baseline_unit_name,
|
|
188
|
+
baseline_value_name=baseline_value_name,
|
|
189
|
+
unit=flow.unit,
|
|
190
|
+
indicator_names=flow.get_indicator_names(),
|
|
191
|
+
indicator_units=flow.get_indicator_units(),
|
|
192
|
+
evaluated_indicator_values=flow.get_all_evaluated_values()
|
|
193
|
+
)
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
# Custom data for node
|
|
197
|
+
year_node_custom_data.append(
|
|
198
|
+
dict(
|
|
199
|
+
node_id=process.id,
|
|
200
|
+
is_visible=True,
|
|
201
|
+
is_virtual=process.is_virtual,
|
|
202
|
+
total_inflows=total_inflows,
|
|
203
|
+
total_outflows=total_outflows,
|
|
204
|
+
has_stock=process.stock_lifetime > 0,
|
|
205
|
+
transformation_stage=process.transformation_stage,
|
|
206
|
+
stock=dict(
|
|
207
|
+
distribution_type=process.stock_distribution_type,
|
|
208
|
+
distribution_params=process.stock_distribution_params,
|
|
209
|
+
lifetime=process.stock_lifetime,
|
|
210
|
+
),
|
|
211
|
+
x=process.position_x,
|
|
212
|
+
y=process.position_y,
|
|
213
|
+
))
|
|
214
|
+
|
|
215
|
+
year_to_data[year] = {
|
|
216
|
+
"labels": year_node_labels,
|
|
217
|
+
"sources": year_sources,
|
|
218
|
+
"targets": year_targets,
|
|
219
|
+
"values": year_link_values,
|
|
220
|
+
"node_colors": year_node_colors,
|
|
221
|
+
"link_colors": year_link_colors,
|
|
222
|
+
"link_custom_data": year_link_custom_data,
|
|
223
|
+
"node_positions_x": year_node_positions_x,
|
|
224
|
+
"node_positions_y": year_node_positions_y,
|
|
225
|
+
"node_custom_data": year_node_custom_data
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
# Make list of indicator names
|
|
229
|
+
stock_indicator_names = [baseline_value_name]
|
|
230
|
+
stock_indicator_units = [baseline_unit_name]
|
|
231
|
+
for indicator_name, indicator_entry in scenario.scenario_data.indicator_name_to_indicator.items():
|
|
232
|
+
stock_indicator_names.append(indicator_name)
|
|
233
|
+
stock_indicator_units.append(indicator_entry.unit)
|
|
234
|
+
|
|
235
|
+
# Unpack stock data to yearly values
|
|
236
|
+
dsm_baselines = flow_solver.get_baseline_dynamic_stocks()
|
|
237
|
+
dsm_indicators = flow_solver.get_indicator_dynamic_stocks()
|
|
238
|
+
for year_index, year in enumerate(year_to_data.keys()):
|
|
239
|
+
stock_id_to_stock_inflow = {}
|
|
240
|
+
stock_id_to_stock_outflow = {}
|
|
241
|
+
stock_id_to_stock_total = {}
|
|
242
|
+
stock_ids = [stock_id for stock_id in dsm_baselines.keys()]
|
|
243
|
+
for stock_id in stock_ids:
|
|
244
|
+
stock_id_to_stock_inflow[stock_id] = []
|
|
245
|
+
stock_id_to_stock_outflow[stock_id] = []
|
|
246
|
+
stock_id_to_stock_total[stock_id] = []
|
|
247
|
+
|
|
248
|
+
process_dsm_baseline = dsm_baselines[stock_id]
|
|
249
|
+
dsm_inflows = process_dsm_baseline.i
|
|
250
|
+
dsm_outflows = process_dsm_baseline.o
|
|
251
|
+
dsm_total = process_dsm_baseline.s
|
|
252
|
+
stock_id_to_stock_inflow[stock_id].append(dsm_inflows[year_index])
|
|
253
|
+
stock_id_to_stock_outflow[stock_id].append(dsm_outflows[year_index])
|
|
254
|
+
stock_id_to_stock_total[stock_id].append(dsm_total[year_index])
|
|
255
|
+
for indicator_name, process_dsm_indicator in dsm_indicators[stock_id].items():
|
|
256
|
+
dsm_inflows = process_dsm_indicator.i
|
|
257
|
+
dsm_outflows = process_dsm_indicator.o
|
|
258
|
+
dsm_total = process_dsm_indicator.s
|
|
259
|
+
stock_id_to_stock_inflow[stock_id].append(dsm_inflows[year_index])
|
|
260
|
+
stock_id_to_stock_outflow[stock_id].append(dsm_outflows[year_index])
|
|
261
|
+
stock_id_to_stock_total[stock_id].append(dsm_total[year_index])
|
|
262
|
+
|
|
263
|
+
stock_data = {
|
|
264
|
+
"stock_ids": stock_ids,
|
|
265
|
+
"stock_indicator_names": stock_indicator_names,
|
|
266
|
+
"stock_indicator_units": stock_indicator_units,
|
|
267
|
+
"stock_inflows": stock_id_to_stock_inflow,
|
|
268
|
+
"stock_outflows": stock_id_to_stock_outflow,
|
|
269
|
+
"stock_totals": stock_id_to_stock_total,
|
|
270
|
+
}
|
|
271
|
+
for key in stock_data.keys():
|
|
272
|
+
year_to_data[year][key] = stock_data[key]
|
|
273
|
+
|
|
274
|
+
return year_to_data
|
|
275
|
+
|
|
276
|
+
def _build_scenario_info(self, scenario: Scenario) -> Dict[str, Any]:
|
|
277
|
+
"""
|
|
278
|
+
Build info from Scenario-object.
|
|
279
|
+
|
|
280
|
+
:param scenario: Target Scenario-object
|
|
281
|
+
:return: Dictionary (key, value)
|
|
282
|
+
"""
|
|
283
|
+
scenario_info = {}
|
|
284
|
+
scenario_info["scenario_name"] = scenario.name
|
|
285
|
+
scenario_info["baseline_value_name"] = scenario.scenario_data.baseline_value_name
|
|
286
|
+
scenario_info["baseline_unit_name"] = scenario.scenario_data.baseline_unit_name
|
|
287
|
+
return scenario_info
|
|
288
|
+
|
|
289
|
+
def _build_scenario_graph(self,
|
|
290
|
+
scenario_name: str,
|
|
291
|
+
scenario_name_to_info: Dict[str, Any],
|
|
292
|
+
scenario_name_to_data: Dict[str, Dict] = None,
|
|
293
|
+
params: Dict = None):
|
|
294
|
+
|
|
295
|
+
# Leave only key with target scenario name
|
|
296
|
+
target_scenario_name_to_info = {scenario_name: scenario_name_to_info[scenario_name]}
|
|
297
|
+
target_scenario_name_to_data = {scenario_name: scenario_name_to_data[scenario_name]}
|
|
298
|
+
|
|
299
|
+
# Add JS script that is run after the Plotly has loaded
|
|
300
|
+
filename_plotly = os.path.join(os.path.abspath("."), "core", "datavisualizer_data/plotly-3.0.0.min.js")
|
|
301
|
+
filename_pako = os.path.join(os.path.abspath("."), "core", "datavisualizer_data/pako.min.js")
|
|
302
|
+
filename_html = os.path.join(os.path.abspath("."), "core", "datavisualizer_data/datavisualizer_plotly.html")
|
|
303
|
+
|
|
304
|
+
# Read HTML file contents
|
|
305
|
+
html = ""
|
|
306
|
+
with open(filename_html, "r", encoding="utf-8") as fs:
|
|
307
|
+
html = fs.read()
|
|
308
|
+
|
|
309
|
+
# Read PlotlyJS file contents
|
|
310
|
+
plotly_js = ""
|
|
311
|
+
with open(filename_plotly, "r", encoding="utf-8") as fs:
|
|
312
|
+
plotly_js = fs.read()
|
|
313
|
+
|
|
314
|
+
# Read Pako file contents
|
|
315
|
+
pako_js = ""
|
|
316
|
+
with open(filename_pako, "r", encoding="utf-8") as fs:
|
|
317
|
+
pako_js = fs.read()
|
|
318
|
+
|
|
319
|
+
# Replace contents with data
|
|
320
|
+
# Plotly
|
|
321
|
+
html = html.replace(
|
|
322
|
+
'<script src="./plotly-3.0.0.min.js"></script>',
|
|
323
|
+
f'<script type="text/javascript">{plotly_js}</script>')
|
|
324
|
+
|
|
325
|
+
# Pako
|
|
326
|
+
html = html.replace(
|
|
327
|
+
'<script src="./pako.min.js"></script>',
|
|
328
|
+
f'<script type="text/javascript">{pako_js}</script>')
|
|
329
|
+
|
|
330
|
+
# Encode scenario info data as base64-encoded zlib data
|
|
331
|
+
info_json = json.dumps(target_scenario_name_to_info)
|
|
332
|
+
info_compressed = zlib.compress(info_json.encode("utf-8"))
|
|
333
|
+
info_base64 = base64.b64encode(info_compressed).decode("utf-8")
|
|
334
|
+
html = html.replace("// rawScenarioInfo:", "rawScenarioInfo:")
|
|
335
|
+
html = html.replace("{rawScenarioInfo}", json.dumps(info_base64))
|
|
336
|
+
|
|
337
|
+
# Encode scenario data as base64-encoded zlib data
|
|
338
|
+
data_json = json.dumps(target_scenario_name_to_data)
|
|
339
|
+
data_compressed = zlib.compress(data_json.encode("utf-8"))
|
|
340
|
+
data_base64 = base64.b64encode(data_compressed).decode("utf-8")
|
|
341
|
+
html = html.replace("// rawScenarioData:", "rawScenarioData:")
|
|
342
|
+
html = html.replace("{rawScenarioData}", json.dumps(data_base64))
|
|
343
|
+
|
|
344
|
+
return html
|
|
345
|
+
|
|
346
|
+
def _build_combined_scenario_graph(self,
|
|
347
|
+
scenario_name_to_info: Dict[str, Any],
|
|
348
|
+
scenario_name_to_data: Dict[str, Any] = None,
|
|
349
|
+
params: Dict = None):
|
|
350
|
+
|
|
351
|
+
# Add JS script that is run after the Plotly has loaded
|
|
352
|
+
# filename_plotly = os.path.join(os.path.abspath("."), "core", "datavisualizer_data/plotly-3.0.0.min.js")
|
|
353
|
+
# filename_pako = os.path.join(os.path.abspath("."), "core", "datavisualizer_data/pako.min.js")
|
|
354
|
+
# filename_html = os.path.join(os.path.abspath("."), "core", "datavisualizer_data/datavisualizer_plotly.html")
|
|
355
|
+
filename_plotly = files("aiphoria.core").joinpath("datavisualizer_data/plotly-3.0.0.min.js")
|
|
356
|
+
filename_pako = files("aiphoria.core").joinpath("datavisualizer_data/pako.min.js")
|
|
357
|
+
filename_html = files("aiphoria.core").joinpath("datavisualizer_data/datavisualizer_plotly.html")
|
|
358
|
+
|
|
359
|
+
# Read HTML file contents
|
|
360
|
+
html = ""
|
|
361
|
+
with open(filename_html, "r", encoding="utf-8") as fs:
|
|
362
|
+
html = fs.read()
|
|
363
|
+
|
|
364
|
+
# Read PlotlyJS file contents
|
|
365
|
+
plotly_js = ""
|
|
366
|
+
with open(filename_plotly, "r", encoding="utf-8") as fs:
|
|
367
|
+
plotly_js = fs.read()
|
|
368
|
+
|
|
369
|
+
# Read Pako file contents
|
|
370
|
+
pako_js = ""
|
|
371
|
+
with open(filename_pako, "r", encoding="utf-8") as fs:
|
|
372
|
+
pako_js = fs.read()
|
|
373
|
+
|
|
374
|
+
# Replace contents with data
|
|
375
|
+
# Plotly
|
|
376
|
+
html = html.replace(
|
|
377
|
+
'<script src="./plotly-3.0.0.min.js"></script>',
|
|
378
|
+
f'<script type="text/javascript">{plotly_js}</script>')
|
|
379
|
+
|
|
380
|
+
# Pako
|
|
381
|
+
html = html.replace(
|
|
382
|
+
'<script src="./pako.min.js"></script>',
|
|
383
|
+
f'<script type="text/javascript">{pako_js}</script>')
|
|
384
|
+
|
|
385
|
+
# Encode scenario info data as base64-encoded zlib data
|
|
386
|
+
info_json = json.dumps(scenario_name_to_info)
|
|
387
|
+
info_compressed = zlib.compress(info_json.encode("utf-8"))
|
|
388
|
+
info_base64 = base64.b64encode(info_compressed).decode("utf-8")
|
|
389
|
+
html = html.replace("// rawScenarioInfo:", "rawScenarioInfo:")
|
|
390
|
+
html = html.replace("{rawScenarioInfo}", json.dumps(info_base64))
|
|
391
|
+
|
|
392
|
+
# Encode scenario data as base64-encoded zlib data
|
|
393
|
+
data_json = json.dumps(scenario_name_to_data)
|
|
394
|
+
data_compressed = zlib.compress(data_json.encode("utf-8"))
|
|
395
|
+
data_base64 = base64.b64encode(data_compressed).decode("utf-8")
|
|
396
|
+
html = html.replace("// rawScenarioData:", "rawScenarioData:")
|
|
397
|
+
html = html.replace("{rawScenarioData}", json.dumps(data_base64))
|
|
398
|
+
|
|
399
|
+
return html
|
|
400
|
+
|
|
401
|
+
def _build_default_transformation_stage_colors(self,
|
|
402
|
+
unique_transformation_stages: set,
|
|
403
|
+
process_transformation_stage_colors: Dict[str, str]):
|
|
404
|
+
"""
|
|
405
|
+
Build and fill missing transformation stage colors with default color palette.
|
|
406
|
+
|
|
407
|
+
:param unique_transformation_stages: Set of unique transformation stages
|
|
408
|
+
:param process_transformation_stage_colors: Dictionary (transformation stage, Color)
|
|
409
|
+
"""
|
|
410
|
+
|
|
411
|
+
# Default color palette for 8 transformation stages
|
|
412
|
+
default_color_palette = [
|
|
413
|
+
"#7dda60",
|
|
414
|
+
"#eb5e34",
|
|
415
|
+
"#8c76cf",
|
|
416
|
+
"#5baa11",
|
|
417
|
+
"#3281db",
|
|
418
|
+
"#61b053",
|
|
419
|
+
"#efc3ca",
|
|
420
|
+
"#dfc57b",
|
|
421
|
+
]
|
|
422
|
+
|
|
423
|
+
# Find missing transformation stage names
|
|
424
|
+
defined_transformation_stage_names = set(list(process_transformation_stage_colors.keys()))
|
|
425
|
+
missing_transformation_stage_names = unique_transformation_stages.difference(defined_transformation_stage_names)
|
|
426
|
+
|
|
427
|
+
# Fill process_transformation_stage_colors with
|
|
428
|
+
for index, transformation_stage in enumerate(missing_transformation_stage_names):
|
|
429
|
+
color = default_color_palette[index % len(default_color_palette)]
|
|
430
|
+
new_color = Color(params=[transformation_stage, color])
|
|
431
|
+
process_transformation_stage_colors[transformation_stage] = new_color.value
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2016-2024 Plotly Technologies Inc.
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in
|
|
13
|
+
all copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
21
|
+
THE SOFTWARE.
|