aiphoria 0.0.1__py3-none-any.whl → 0.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aiphoria/__init__.py +59 -0
- aiphoria/core/__init__.py +55 -0
- aiphoria/core/builder.py +305 -0
- aiphoria/core/datachecker.py +1808 -0
- aiphoria/core/dataprovider.py +806 -0
- aiphoria/core/datastructures.py +1686 -0
- aiphoria/core/datavisualizer.py +431 -0
- aiphoria/core/datavisualizer_data/LICENSE +21 -0
- aiphoria/core/datavisualizer_data/datavisualizer_plotly.html +5561 -0
- aiphoria/core/datavisualizer_data/pako.min.js +2 -0
- aiphoria/core/datavisualizer_data/plotly-3.0.0.min.js +3879 -0
- aiphoria/core/flowmodifiersolver.py +1754 -0
- aiphoria/core/flowsolver.py +1472 -0
- aiphoria/core/logger.py +113 -0
- aiphoria/core/network_graph.py +136 -0
- aiphoria/core/network_graph_data/ECHARTS_LICENSE +202 -0
- aiphoria/core/network_graph_data/echarts_min.js +45 -0
- aiphoria/core/network_graph_data/network_graph.html +76 -0
- aiphoria/core/network_graph_data/network_graph.js +1391 -0
- aiphoria/core/parameters.py +269 -0
- aiphoria/core/types.py +20 -0
- aiphoria/core/utils.py +362 -0
- aiphoria/core/visualizer_parameters.py +7 -0
- aiphoria/data/example_scenario.xlsx +0 -0
- aiphoria/example.py +66 -0
- aiphoria/lib/docs/dynamic_stock.py +124 -0
- aiphoria/lib/odym/modules/ODYM_Classes.py +362 -0
- aiphoria/lib/odym/modules/ODYM_Functions.py +1299 -0
- aiphoria/lib/odym/modules/__init__.py +1 -0
- aiphoria/lib/odym/modules/dynamic_stock_model.py +808 -0
- aiphoria/lib/odym/modules/test/DSM_test_known_results.py +762 -0
- aiphoria/lib/odym/modules/test/ODYM_Classes_test_known_results.py +107 -0
- aiphoria/lib/odym/modules/test/ODYM_Functions_test_known_results.py +136 -0
- aiphoria/lib/odym/modules/test/__init__.py +2 -0
- aiphoria/runner.py +678 -0
- aiphoria-0.8.0.dist-info/METADATA +119 -0
- aiphoria-0.8.0.dist-info/RECORD +40 -0
- {aiphoria-0.0.1.dist-info → aiphoria-0.8.0.dist-info}/WHEEL +1 -1
- aiphoria-0.8.0.dist-info/licenses/LICENSE +21 -0
- aiphoria-0.0.1.dist-info/METADATA +0 -5
- aiphoria-0.0.1.dist-info/RECORD +0 -5
- {aiphoria-0.0.1.dist-info → aiphoria-0.8.0.dist-info}/top_level.txt +0 -0
aiphoria/runner.py
ADDED
|
@@ -0,0 +1,678 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import sys
|
|
3
|
+
import time
|
|
4
|
+
import shutil
|
|
5
|
+
import pandas as pd
|
|
6
|
+
import numpy as np
|
|
7
|
+
from tqdm import tqdm
|
|
8
|
+
from typing import Union
|
|
9
|
+
import matplotlib.pyplot as plt
|
|
10
|
+
from .core.builder import init_builder, build_results
|
|
11
|
+
from .core.utils import (
|
|
12
|
+
setup_scenario_output_directories,
|
|
13
|
+
calculate_scenario_mass_balance,
|
|
14
|
+
shorten_sheet_name,
|
|
15
|
+
)
|
|
16
|
+
from .core.logger import log
|
|
17
|
+
from .core.parameters import ParameterName
|
|
18
|
+
from .core.network_graph import NetworkGraph
|
|
19
|
+
from .core.datavisualizer import DataVisualizer
|
|
20
|
+
|
|
21
|
+
_default_output_dir_name = "output"
|
|
22
|
+
_default_cache_dir_name = "cache"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def run_scenarios(path_to_settings_file: str = None,
|
|
26
|
+
path_to_output_dir: Union[str, None] = None,
|
|
27
|
+
remove_existing_output_dir: bool = False,
|
|
28
|
+
) -> bool:
|
|
29
|
+
"""
|
|
30
|
+
Run scenarios using the settings file.
|
|
31
|
+
If path_to_output_dir is set then overrides the setting from Excel file.
|
|
32
|
+
|
|
33
|
+
:param path_to_settings_file: Path to target settings Excel file
|
|
34
|
+
:param path_to_output_dir: Path to output directory
|
|
35
|
+
:param remove_existing_output_dir: Remove existing directory (default: False)
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
if path_to_settings_file is None:
|
|
39
|
+
sys.stderr.write("ERROR: No path to settings file\n")
|
|
40
|
+
sys.stderr.flush()
|
|
41
|
+
return False
|
|
42
|
+
|
|
43
|
+
abs_output_dir = os.path.realpath(os.path.expanduser(path_to_output_dir))
|
|
44
|
+
output_dir_exists = os.path.isdir(abs_output_dir)
|
|
45
|
+
if not remove_existing_output_dir and output_dir_exists:
|
|
46
|
+
sys.stderr.write("ERROR: Output directory already exists\n")
|
|
47
|
+
sys.stderr.flush()
|
|
48
|
+
return False
|
|
49
|
+
|
|
50
|
+
# Use the output path from settings file
|
|
51
|
+
if path_to_output_dir is None:
|
|
52
|
+
cwd = os.path.realpath(os.getcwd())
|
|
53
|
+
path_to_output_dir = os.path.join(cwd, _default_output_dir_name)
|
|
54
|
+
|
|
55
|
+
if os.path.isdir(path_to_output_dir):
|
|
56
|
+
if not remove_existing_output_dir:
|
|
57
|
+
log("Directory {} already exists.".format(path_to_output_dir))
|
|
58
|
+
return False
|
|
59
|
+
else:
|
|
60
|
+
shutil.rmtree(path_to_output_dir, ignore_errors=True)
|
|
61
|
+
|
|
62
|
+
if not os.path.isdir(path_to_output_dir):
|
|
63
|
+
os.mkdir(path_to_output_dir)
|
|
64
|
+
|
|
65
|
+
time_total_in_secs: float = time.perf_counter()
|
|
66
|
+
path_to_cache = os.path.join(path_to_output_dir, _default_cache_dir_name)
|
|
67
|
+
init_builder(path_to_cache=path_to_cache,
|
|
68
|
+
use_cache=False,
|
|
69
|
+
use_timing=False,
|
|
70
|
+
clear_cache=False)
|
|
71
|
+
|
|
72
|
+
# Build results
|
|
73
|
+
model_params, scenarios, color_definitions = build_results(path_to_settings_file, path_to_output_dir)
|
|
74
|
+
|
|
75
|
+
scenario_name_to_output_path = setup_scenario_output_directories(
|
|
76
|
+
model_params[ParameterName.OutputPath],
|
|
77
|
+
[scenario.name for scenario in scenarios]
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
if model_params[ParameterName.CreateNetworkGraphs]:
|
|
81
|
+
progress_bar = tqdm(total=len(scenarios),
|
|
82
|
+
desc="Building network graphs for solved scenarios")
|
|
83
|
+
for scenario_index, scenario in enumerate(scenarios):
|
|
84
|
+
# Extra options that are used when building network graphs
|
|
85
|
+
options = {
|
|
86
|
+
"transformation_stage_name_to_color": color_definitions,
|
|
87
|
+
"scenario_name": scenario.name
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
output_filename = os.path.join(
|
|
91
|
+
scenario_name_to_output_path.get(scenario.name), "network_graph.html")
|
|
92
|
+
network_visualizer = NetworkGraph()
|
|
93
|
+
network_visualizer.build(scenario.scenario_data, options)
|
|
94
|
+
|
|
95
|
+
if model_params[ParameterName.ShowPlots]:
|
|
96
|
+
network_visualizer.show(output_filename)
|
|
97
|
+
|
|
98
|
+
progress_bar.update()
|
|
99
|
+
progress_bar.close()
|
|
100
|
+
sys.stdout.flush()
|
|
101
|
+
sys.stderr.flush()
|
|
102
|
+
|
|
103
|
+
# %%
|
|
104
|
+
# ***************************************************************************
|
|
105
|
+
# * Step 3: Export scenario results to files (processes, flows, and stocks) *
|
|
106
|
+
# ***************************************************************************
|
|
107
|
+
|
|
108
|
+
# Sheet names to what are written to file. Note that the order is important.
|
|
109
|
+
sheet_names = ["Processes", "Flows",
|
|
110
|
+
"Flow values (baseline value)", "Mass balance"]
|
|
111
|
+
sheet_name_to_list_of_dfs = {name: [] for name in sheet_names}
|
|
112
|
+
|
|
113
|
+
progress_bar = tqdm(total=len(scenarios))
|
|
114
|
+
for scenario_index, scenario in enumerate(scenarios):
|
|
115
|
+
progress_bar.set_description("Exporting scenario processes, flows, and stocks data (scenario {}/{})".format(
|
|
116
|
+
scenario_index + 1, len(scenarios)))
|
|
117
|
+
|
|
118
|
+
# Processes Sheet
|
|
119
|
+
df_processes = scenario.flow_solver.get_processes_as_dataframe()
|
|
120
|
+
df_processes.insert(0, "Scenario", scenario.name)
|
|
121
|
+
sheet_name_to_list_of_dfs[sheet_names[0]].append(df_processes)
|
|
122
|
+
|
|
123
|
+
# Flows Sheet
|
|
124
|
+
df_flows = scenario.flow_solver.get_flows_as_dataframe()
|
|
125
|
+
df_flows.insert(0, "Scenario", scenario.name)
|
|
126
|
+
sheet_name_to_list_of_dfs[sheet_names[1]].append(df_flows)
|
|
127
|
+
|
|
128
|
+
# Flow values Sheet
|
|
129
|
+
df_flow_values = scenario.flow_solver.get_evaluated_flow_values_as_dataframe()
|
|
130
|
+
df_flow_values.insert(0, "Scenario", scenario.name)
|
|
131
|
+
sheet_name_to_list_of_dfs[sheet_names[2]].append(df_flow_values)
|
|
132
|
+
|
|
133
|
+
# Mass balance Sheet
|
|
134
|
+
df_scenario_mass_balance = calculate_scenario_mass_balance(
|
|
135
|
+
scenario.mfa_system)
|
|
136
|
+
df_scenario_mass_balance.insert(0, "Scenario", scenario.name)
|
|
137
|
+
sheet_name_to_list_of_dfs[sheet_names[3]].append(
|
|
138
|
+
df_scenario_mass_balance)
|
|
139
|
+
|
|
140
|
+
progress_bar.update(1)
|
|
141
|
+
progress_bar.close()
|
|
142
|
+
sys.stderr.flush()
|
|
143
|
+
sys.stdout.flush()
|
|
144
|
+
|
|
145
|
+
# Combine all scenario data to one Excel file
|
|
146
|
+
# by concatenating all sheet-specific list of DataFrames as one DataFrame
|
|
147
|
+
combined_excel_filename = os.path.join(
|
|
148
|
+
model_params[ParameterName.OutputPath], "combined_scenario_data.xlsx")
|
|
149
|
+
log(f"Exporting all scenarios to {combined_excel_filename}...")
|
|
150
|
+
with pd.ExcelWriter(combined_excel_filename, engine='xlsxwriter') as writer:
|
|
151
|
+
for sheet_name, list_of_dfs in sheet_name_to_list_of_dfs.items():
|
|
152
|
+
df = pd.concat(list_of_dfs, ignore_index=True)
|
|
153
|
+
df.to_excel(writer, sheet_name=sheet_name, index=False)
|
|
154
|
+
|
|
155
|
+
log(f"All scenario data exported to {combined_excel_filename}")
|
|
156
|
+
sys.stdout.flush()
|
|
157
|
+
|
|
158
|
+
# %%
|
|
159
|
+
# ***********************************************************************
|
|
160
|
+
# * Step 4: Build dynamic stock results for each Scenario and visualize *
|
|
161
|
+
# ***********************************************************************
|
|
162
|
+
progress_bar = tqdm(total=len(scenarios),
|
|
163
|
+
desc="Building dynamic stock results")
|
|
164
|
+
sys.stderr.flush()
|
|
165
|
+
for scenario_index, scenario in enumerate(scenarios):
|
|
166
|
+
progress_bar.set_description("Building dynamic stock results (scenario {}/{})".format(
|
|
167
|
+
scenario_index + 1, len(scenarios)))
|
|
168
|
+
|
|
169
|
+
flow_solver = scenario.flow_solver
|
|
170
|
+
years = scenario.scenario_data.years
|
|
171
|
+
scenario_output_path = scenario_name_to_output_path[scenario.name]
|
|
172
|
+
|
|
173
|
+
# Full name of the baseline, e.g. "Solid wood equivalent"
|
|
174
|
+
baseline_value_name = scenario.scenario_data.baseline_value_name
|
|
175
|
+
baseline_unit_name = scenario.scenario_data.baseline_unit_name
|
|
176
|
+
|
|
177
|
+
# Total number of indicators
|
|
178
|
+
indicators = flow_solver.get_indicator_name_to_indicator()
|
|
179
|
+
num_indicators = len(indicators.keys())
|
|
180
|
+
|
|
181
|
+
# Baseline DSM
|
|
182
|
+
stock_id_to_baseline_dsm = flow_solver.get_baseline_dynamic_stocks()
|
|
183
|
+
stock_id_to_indicator_name_to_dsm = flow_solver.get_indicator_dynamic_stocks()
|
|
184
|
+
|
|
185
|
+
if not len(stock_id_to_baseline_dsm.keys()):
|
|
186
|
+
log("Scenario '{}': no dynamic stocks in the defined system".format(
|
|
187
|
+
scenario.name))
|
|
188
|
+
progress_bar.update(1)
|
|
189
|
+
continue
|
|
190
|
+
|
|
191
|
+
# Each baseline/indicator needs 3 plots
|
|
192
|
+
# so total = baseline (3) + (number of indicators * 3)
|
|
193
|
+
num_subplots = 3 + (num_indicators * 3)
|
|
194
|
+
fig, axes = plt.subplots(
|
|
195
|
+
num_subplots, 1, sharex='all', sharey='none', figsize=(12, 20))
|
|
196
|
+
|
|
197
|
+
# Create an Excel writer for exporting data
|
|
198
|
+
excel_filename = os.path.join(scenario_output_path, "{}_dynamic_stocks.xlsx".format(scenario.name))
|
|
199
|
+
with pd.ExcelWriter(excel_filename, engine="xlsxwriter") as writer:
|
|
200
|
+
all_stock_total_dfs = []
|
|
201
|
+
all_stock_change_dfs = []
|
|
202
|
+
all_stock_outflow_dfs = []
|
|
203
|
+
for stock_id, baseline_dsm in stock_id_to_baseline_dsm.items():
|
|
204
|
+
plot_index = 0
|
|
205
|
+
|
|
206
|
+
# Truncate the stock ID to 20 characters (or any suitable length) to fit within the 31 character limit
|
|
207
|
+
# Truncate to the first 20 characters
|
|
208
|
+
stock_id_for_filename = stock_id[:20]
|
|
209
|
+
stock_id_for_filename = stock_id_for_filename.replace(
|
|
210
|
+
":", "_") # Replace ":" with "_"
|
|
211
|
+
|
|
212
|
+
# ******************
|
|
213
|
+
# * Baseline stock *
|
|
214
|
+
# ******************
|
|
215
|
+
baseline_stock_by_cohort = baseline_dsm.compute_s_c_inflow_driven()
|
|
216
|
+
baseline_outflow_by_cohort = baseline_dsm.compute_o_c_from_s_c()
|
|
217
|
+
baseline_stock_total = baseline_dsm.compute_stock_total()
|
|
218
|
+
baseline_stock_change = baseline_dsm.compute_stock_change()
|
|
219
|
+
baseline_stock_outflow = baseline_dsm.compute_outflow_total()
|
|
220
|
+
|
|
221
|
+
# Export stock by cohort
|
|
222
|
+
sheet_name = shorten_sheet_name(f'{stock_id_for_filename}_s_by_c_{baseline_value_name}')
|
|
223
|
+
df_baseline_stock_by_cohort = pd.DataFrame(
|
|
224
|
+
baseline_stock_by_cohort, columns=years, index=years)
|
|
225
|
+
df_baseline_stock_by_cohort.to_excel(
|
|
226
|
+
writer, sheet_name=sheet_name)
|
|
227
|
+
|
|
228
|
+
# Export outflow by cohort
|
|
229
|
+
sheet_name = shorten_sheet_name(f'{stock_id_for_filename}_o_by_c_{baseline_value_name}')
|
|
230
|
+
df_baseline_outflow_by_cohort = pd.DataFrame(
|
|
231
|
+
baseline_outflow_by_cohort, columns=years, index=years)
|
|
232
|
+
df_baseline_outflow_by_cohort.to_excel(
|
|
233
|
+
writer, sheet_name=sheet_name)
|
|
234
|
+
|
|
235
|
+
# Export stock total
|
|
236
|
+
df_baseline_stock_total = pd.DataFrame(baseline_stock_total, index=years)
|
|
237
|
+
df_baseline_stock_total.reset_index(inplace=True)
|
|
238
|
+
df_baseline_stock_total.columns = ["Year", "Stock total"]
|
|
239
|
+
df_baseline_stock_total["Scenario"] = scenario.name
|
|
240
|
+
df_baseline_stock_total["Stock ID"] = stock_id
|
|
241
|
+
df_baseline_stock_total["Indicator"] = baseline_unit_name
|
|
242
|
+
all_stock_total_dfs.append(df_baseline_stock_total)
|
|
243
|
+
|
|
244
|
+
# Export stock change
|
|
245
|
+
df_baseline_stock_change = pd.DataFrame(baseline_stock_change, index=years)
|
|
246
|
+
df_baseline_stock_change.reset_index(inplace=True)
|
|
247
|
+
df_baseline_stock_change.columns = ["Year", "Stock change"]
|
|
248
|
+
df_baseline_stock_change["Scenario"] = scenario.name
|
|
249
|
+
df_baseline_stock_change["Stock ID"] = stock_id
|
|
250
|
+
df_baseline_stock_change["Indicator"] = baseline_unit_name
|
|
251
|
+
all_stock_change_dfs.append(df_baseline_stock_change)
|
|
252
|
+
|
|
253
|
+
# Export stock outflow total
|
|
254
|
+
df_baseline_stock_outflow = pd.DataFrame(baseline_stock_outflow, index=years)
|
|
255
|
+
df_baseline_stock_outflow.reset_index(inplace=True)
|
|
256
|
+
df_baseline_stock_outflow.columns = ["Year", "Stock outflow total"]
|
|
257
|
+
df_baseline_stock_outflow["Scenario"] = scenario.name
|
|
258
|
+
df_baseline_stock_outflow["Stock ID"] = stock_id
|
|
259
|
+
df_baseline_stock_outflow["Indicator"] = baseline_unit_name
|
|
260
|
+
all_stock_outflow_dfs.append(df_baseline_stock_outflow)
|
|
261
|
+
|
|
262
|
+
# Plot baseline stock total (in-use stocks)
|
|
263
|
+
axes[plot_index + 0].plot(years, baseline_stock_total, marker='o', label="{}".format(stock_id))
|
|
264
|
+
axes[plot_index + 0].set_ylabel("In-use stock ({})".format(baseline_unit_name))
|
|
265
|
+
axes[plot_index + 0].set_title("In-use stock per year by product type")
|
|
266
|
+
|
|
267
|
+
# Plot baseline stock change
|
|
268
|
+
axes[plot_index + 1].plot(years, baseline_stock_change, marker='o', label=f'{stock_id}')
|
|
269
|
+
axes[plot_index + 1].set_ylabel("Stock change ({})".format(baseline_unit_name))
|
|
270
|
+
axes[plot_index + 1].set_title("Stock change per year by product type")
|
|
271
|
+
|
|
272
|
+
# Plot baseline outflow by cohort
|
|
273
|
+
axes[plot_index + 2].plot(years, baseline_stock_outflow, marker='o', label=f'{stock_id}')
|
|
274
|
+
axes[plot_index + 2].set_ylabel("Stock outflow ({})".format(baseline_unit_name))
|
|
275
|
+
axes[plot_index + 2].set_title("Stock outflow per year by product type")
|
|
276
|
+
|
|
277
|
+
plot_index += 3
|
|
278
|
+
for indicator_name, indicator_dsm in stock_id_to_indicator_name_to_dsm[stock_id].items():
|
|
279
|
+
# **************
|
|
280
|
+
# * Indicators *
|
|
281
|
+
# **************
|
|
282
|
+
indicator_unit = indicators[indicator_name].unit
|
|
283
|
+
indicator_stock_by_cohort = indicator_dsm.compute_s_c_inflow_driven()
|
|
284
|
+
indicator_outflow_by_cohort = indicator_dsm.compute_o_c_from_s_c()
|
|
285
|
+
indicator_stock_total = indicator_dsm.compute_stock_total()
|
|
286
|
+
indicator_stock_change = indicator_dsm.compute_stock_change()
|
|
287
|
+
indicator_stock_outflow = indicator_dsm.compute_outflow_total()
|
|
288
|
+
|
|
289
|
+
# Export indicator stock by cohort
|
|
290
|
+
sheet_name = shorten_sheet_name(f"{stock_id_for_filename}_s_by_c_{indicator_name}")
|
|
291
|
+
df_indicator_stock_by_cohort = pd.DataFrame(
|
|
292
|
+
indicator_stock_by_cohort, columns=years, index=years)
|
|
293
|
+
df_indicator_stock_by_cohort.to_excel(writer, sheet_name=sheet_name)
|
|
294
|
+
|
|
295
|
+
# Export indicator outflow by cohort
|
|
296
|
+
sheet_name = shorten_sheet_name(f"{stock_id_for_filename}_o_by_c_{indicator_name}")
|
|
297
|
+
df_indicator_oc = pd.DataFrame(
|
|
298
|
+
indicator_outflow_by_cohort, columns=years, index=years)
|
|
299
|
+
df_indicator_oc.to_excel(writer, sheet_name=sheet_name)
|
|
300
|
+
|
|
301
|
+
# Export indicator stock total
|
|
302
|
+
df_indicator_stock_total = pd.DataFrame(
|
|
303
|
+
indicator_stock_total, index=years)
|
|
304
|
+
df_indicator_stock_total.reset_index(inplace=True)
|
|
305
|
+
df_indicator_stock_total.columns = ["Year", "Stock total"]
|
|
306
|
+
df_indicator_stock_total["Scenario"] = scenario.name
|
|
307
|
+
df_indicator_stock_total["Stock ID"] = stock_id
|
|
308
|
+
df_indicator_stock_total["Indicator"] = indicator_name
|
|
309
|
+
all_stock_total_dfs.append(df_indicator_stock_total)
|
|
310
|
+
|
|
311
|
+
# Export indicator stock change
|
|
312
|
+
df_indicator_stock_change = pd.DataFrame(
|
|
313
|
+
indicator_stock_change, index=years)
|
|
314
|
+
df_indicator_stock_change.reset_index(inplace=True)
|
|
315
|
+
df_indicator_stock_change.columns = [
|
|
316
|
+
"Year", "Stock change"]
|
|
317
|
+
df_indicator_stock_change["Scenario"] = scenario.name
|
|
318
|
+
df_indicator_stock_change["Stock ID"] = stock_id
|
|
319
|
+
df_indicator_stock_change["Indicator"] = indicator_name
|
|
320
|
+
all_stock_change_dfs.append(df_indicator_stock_change)
|
|
321
|
+
|
|
322
|
+
# Export indicator stock outflow total
|
|
323
|
+
df_indicator_stock_outflow = pd.DataFrame(
|
|
324
|
+
indicator_stock_outflow, index=years)
|
|
325
|
+
df_indicator_stock_outflow.reset_index(inplace=True)
|
|
326
|
+
df_indicator_stock_outflow.columns = [
|
|
327
|
+
"Year", "Stock outflow total"]
|
|
328
|
+
df_indicator_stock_outflow["Scenario"] = scenario.name
|
|
329
|
+
df_indicator_stock_outflow["Stock ID"] = stock_id
|
|
330
|
+
df_indicator_stock_outflow["Indicator"] = indicator_name
|
|
331
|
+
all_stock_outflow_dfs.append(df_indicator_stock_outflow)
|
|
332
|
+
|
|
333
|
+
# Plot indicator stock total (in-use stocks)
|
|
334
|
+
axes[plot_index + 0].plot(years, indicator_stock_total, marker='o', label='{} ({}) {}'.format(
|
|
335
|
+
indicator_name, indicator_unit, stock_id))
|
|
336
|
+
axes[plot_index +
|
|
337
|
+
0].set_ylabel("In-use stock ({})".format(indicator_unit))
|
|
338
|
+
axes[plot_index + 0].set_title(
|
|
339
|
+
"{} stock in-use per year by product type".format(indicator_name))
|
|
340
|
+
|
|
341
|
+
# Plot indicator stock change
|
|
342
|
+
axes[plot_index + 1].plot(years, indicator_stock_change, marker='o', label="{} ({}) {}".format(
|
|
343
|
+
indicator_name, indicator_unit, stock_id))
|
|
344
|
+
axes[plot_index +
|
|
345
|
+
1].set_ylabel("Stock change ({})".format(indicator_unit))
|
|
346
|
+
axes[plot_index +
|
|
347
|
+
1].set_title("{} stock change per year".format(indicator_name))
|
|
348
|
+
|
|
349
|
+
# Plot indicator outflow by cohort
|
|
350
|
+
axes[plot_index + 2].plot(years, indicator_stock_outflow, marker='o', label="{} ({}) {}".format(
|
|
351
|
+
indicator_name, indicator_unit, stock_id
|
|
352
|
+
))
|
|
353
|
+
axes[plot_index +
|
|
354
|
+
2].set_ylabel("Stock outflow ({})".format(indicator_unit))
|
|
355
|
+
axes[plot_index + 2].set_title(
|
|
356
|
+
"{} outflow per year by product type".format(indicator_name))
|
|
357
|
+
|
|
358
|
+
plot_index += 3
|
|
359
|
+
|
|
360
|
+
if all_stock_total_dfs:
|
|
361
|
+
combined_stock_total_df = pd.concat(
|
|
362
|
+
all_stock_total_dfs, ignore_index=True)
|
|
363
|
+
combined_sheet_name = "Total_stock"
|
|
364
|
+
combined_stock_total_df.to_excel(
|
|
365
|
+
writer, sheet_name=combined_sheet_name, index=False)
|
|
366
|
+
|
|
367
|
+
if all_stock_change_dfs:
|
|
368
|
+
combined_stock_change_df = pd.concat(
|
|
369
|
+
all_stock_change_dfs, ignore_index=True)
|
|
370
|
+
combined_sheet_name = "Total_stock_change"
|
|
371
|
+
combined_stock_change_df.to_excel(
|
|
372
|
+
writer, sheet_name=combined_sheet_name, index=False)
|
|
373
|
+
|
|
374
|
+
if all_stock_outflow_dfs:
|
|
375
|
+
all_stock_outflow_dfs = pd.concat(
|
|
376
|
+
all_stock_outflow_dfs, ignore_index=True)
|
|
377
|
+
combined_sheet_name = "Total_stock_outflow"
|
|
378
|
+
all_stock_outflow_dfs.to_excel(
|
|
379
|
+
writer, sheet_name=combined_sheet_name, index=False)
|
|
380
|
+
|
|
381
|
+
# Set common properties to axes
|
|
382
|
+
range_x_ticks = range(min(years), max(years) + 1)
|
|
383
|
+
for axis in axes:
|
|
384
|
+
axis.set_xlabel("Year")
|
|
385
|
+
axis.title.set_size(12)
|
|
386
|
+
axis.legend()
|
|
387
|
+
|
|
388
|
+
# Adjust layout to prevent overlap
|
|
389
|
+
plt.tight_layout()
|
|
390
|
+
tick_gap = 1 if len(years) < 15 else 10
|
|
391
|
+
plt.xticks(years[::tick_gap])
|
|
392
|
+
|
|
393
|
+
# # Save the figure as an SVG file
|
|
394
|
+
# filename = os.path.join(scenario_output_path, "{}_stock_plots_by_product.svg".format(scenario.name))
|
|
395
|
+
# plt.savefig(filename, format='svg')
|
|
396
|
+
#
|
|
397
|
+
# if model_params[ParameterName.ShowPlots]:
|
|
398
|
+
# plt.show()
|
|
399
|
+
|
|
400
|
+
progress_bar.update(1)
|
|
401
|
+
progress_bar.refresh()
|
|
402
|
+
progress_bar.close()
|
|
403
|
+
sys.stderr.flush()
|
|
404
|
+
|
|
405
|
+
# *****************************************************
|
|
406
|
+
# * Step 5: Convert the carbon stocks to CO2 removals *
|
|
407
|
+
# *****************************************************
|
|
408
|
+
log("Calculating annual CO2 stock emissions / removals results...")
|
|
409
|
+
|
|
410
|
+
# Storage for comparison
|
|
411
|
+
all_scenario_results = {}
|
|
412
|
+
all_emitter_years = {}
|
|
413
|
+
|
|
414
|
+
show_steady_state_overlay = False # Toggle to enable/disable overlay
|
|
415
|
+
steady_state_threshold_ratio = 0.05 # Relative threshold for stability
|
|
416
|
+
min_steady_state_years = 5 # Minimum consecutive years for valid steady state
|
|
417
|
+
|
|
418
|
+
for scenario in scenarios:
|
|
419
|
+
scenario_output_path = scenario_name_to_output_path[scenario.name]
|
|
420
|
+
flow_solver = scenario.flow_solver
|
|
421
|
+
years = scenario.scenario_data.years
|
|
422
|
+
year_start = scenario.scenario_data.start_year
|
|
423
|
+
|
|
424
|
+
stock_id_to_indicator_name_to_dsm = flow_solver.get_indicator_dynamic_stocks()
|
|
425
|
+
if not len(stock_id_to_indicator_name_to_dsm.keys()):
|
|
426
|
+
log("Scenario '{}': no dynamic stocks in the defined system".format(
|
|
427
|
+
scenario.name))
|
|
428
|
+
continue
|
|
429
|
+
|
|
430
|
+
results_co2_removals = pd.DataFrame({'Year': years})
|
|
431
|
+
results_net_emitters = pd.DataFrame({'Year': years})
|
|
432
|
+
conversion_factor_c_to_co2 = model_params[ParameterName.ConversionFactorCToCO2]
|
|
433
|
+
|
|
434
|
+
# Define line styles, markers, and colors for differentiation
|
|
435
|
+
line_styles = ['-', '--', '-.', ':']
|
|
436
|
+
markers = ['o', 's', '^', 'D']
|
|
437
|
+
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
|
|
438
|
+
|
|
439
|
+
target_indicator_name = "Carbon"
|
|
440
|
+
plt.figure(figsize=(10, 6))
|
|
441
|
+
|
|
442
|
+
scenario_results = {}
|
|
443
|
+
steady_state_info = {}
|
|
444
|
+
net_emitter_info = {}
|
|
445
|
+
|
|
446
|
+
for index, (stock_id, indicator_name_to_dsm) in enumerate(stock_id_to_indicator_name_to_dsm.items()):
|
|
447
|
+
if target_indicator_name not in indicator_name_to_dsm:
|
|
448
|
+
continue
|
|
449
|
+
|
|
450
|
+
dsm = indicator_name_to_dsm[target_indicator_name]
|
|
451
|
+
total_inflows_carbon = dsm.i
|
|
452
|
+
total_outflows_carbon = dsm.o
|
|
453
|
+
annual_co2_removal = (
|
|
454
|
+
total_inflows_carbon - total_outflows_carbon) * conversion_factor_c_to_co2
|
|
455
|
+
results_co2_removals[stock_id] = annual_co2_removal
|
|
456
|
+
scenario_results[stock_id] = annual_co2_removal
|
|
457
|
+
|
|
458
|
+
# Detect steady-state years with rolling window approach
|
|
459
|
+
threshold = steady_state_threshold_ratio * \
|
|
460
|
+
max(abs(annual_co2_removal))
|
|
461
|
+
rolling_mean = pd.Series(annual_co2_removal).rolling(
|
|
462
|
+
window=min_steady_state_years, center=True).mean()
|
|
463
|
+
is_steady = abs(pd.Series(annual_co2_removal) -
|
|
464
|
+
rolling_mean) < threshold
|
|
465
|
+
|
|
466
|
+
# Extract consecutive steady years
|
|
467
|
+
steady_years = []
|
|
468
|
+
current_run = []
|
|
469
|
+
for year, steady in zip(years, is_steady):
|
|
470
|
+
if steady:
|
|
471
|
+
current_run.append(year)
|
|
472
|
+
else:
|
|
473
|
+
if len(current_run) >= min_steady_state_years:
|
|
474
|
+
steady_years.extend(current_run)
|
|
475
|
+
current_run = []
|
|
476
|
+
if len(current_run) >= min_steady_state_years:
|
|
477
|
+
steady_years.extend(current_run)
|
|
478
|
+
|
|
479
|
+
steady_state_info[stock_id] = sorted(set(steady_years))
|
|
480
|
+
|
|
481
|
+
# Detect net emitter years (negative removals)
|
|
482
|
+
emitter_years = [year for year, value in zip(
|
|
483
|
+
years, annual_co2_removal) if value < 0]
|
|
484
|
+
results_net_emitters[stock_id] = [
|
|
485
|
+
"Emitter" if value < 0 else "" for value in annual_co2_removal]
|
|
486
|
+
net_emitter_info[stock_id] = emitter_years
|
|
487
|
+
|
|
488
|
+
# Plot CO2 removals with steady state overlay
|
|
489
|
+
line_style = line_styles[index % len(line_styles)]
|
|
490
|
+
marker = markers[index % len(markers)]
|
|
491
|
+
color = colors[index % len(colors)]
|
|
492
|
+
plt.plot(years, annual_co2_removal, marker=marker, linestyle=line_style, color=color,
|
|
493
|
+
label=f'{stock_id}')
|
|
494
|
+
if show_steady_state_overlay and steady_years:
|
|
495
|
+
plt.axvspan(steady_years[0],
|
|
496
|
+
steady_years[-1], color=color, alpha=0.1)
|
|
497
|
+
|
|
498
|
+
all_scenario_results[scenario.name] = scenario_results
|
|
499
|
+
all_emitter_years[scenario.name] = net_emitter_info
|
|
500
|
+
|
|
501
|
+
plt.xlabel('Year')
|
|
502
|
+
plt.ylabel('CO2 Emissions / Removals (Mt CO2)')
|
|
503
|
+
plt.title('Annual CO2 Emissions / Removals by Product')
|
|
504
|
+
plt.grid(True)
|
|
505
|
+
tick_gap = 1 if len(years) < 15 else 10
|
|
506
|
+
plt.xticks(years[::tick_gap])
|
|
507
|
+
plt.legend()
|
|
508
|
+
plt.tight_layout()
|
|
509
|
+
|
|
510
|
+
# Export CO2 removal data to CSV
|
|
511
|
+
log("Exporting annual CO2 emissions / removal (Mt) by stock results...")
|
|
512
|
+
filename = os.path.join(scenario_output_path, f"{scenario.name}_annual_co2_removal_by_stock.csv")
|
|
513
|
+
results_co2_removals.to_csv(path_or_buf=filename, index=False, mode="w")
|
|
514
|
+
|
|
515
|
+
# Export net emitter flag table
|
|
516
|
+
log("Exporting annual CO2 net emitter years (where removals < 0)...")
|
|
517
|
+
filename = os.path.join(scenario_output_path, f"{scenario.name}_annual_net_emitter_flags.csv")
|
|
518
|
+
results_net_emitters.to_csv(path_or_buf=filename, index=False, mode="w")
|
|
519
|
+
|
|
520
|
+
# Export CO2 removal plot as SVG
|
|
521
|
+
filename = os.path.join(scenario_output_path, f"{scenario.name}_annual_co2_removal_by_product.svg")
|
|
522
|
+
plt.savefig(filename, format='svg')
|
|
523
|
+
|
|
524
|
+
# if model_params[ParameterName.ShowPlots]:
|
|
525
|
+
# plt.show()
|
|
526
|
+
|
|
527
|
+
# Print and export steady-state info
|
|
528
|
+
print(f"\nSteady-state periods for scenario '{scenario.name}':")
|
|
529
|
+
for stock_id, years_list in steady_state_info.items():
|
|
530
|
+
if years_list:
|
|
531
|
+
print(f" {stock_id}: {years_list[0]} to {years_list[-1]} ({len(years_list)} years)")
|
|
532
|
+
else:
|
|
533
|
+
print(f" {stock_id}: No steady-state period detected.")
|
|
534
|
+
|
|
535
|
+
steady_state_df = pd.DataFrame([
|
|
536
|
+
{'Stock': stock_id, 'StartYear': years_list[0] if years_list else None,
|
|
537
|
+
'EndYear': years_list[-1] if years_list else None, 'DurationYears': len(years_list)}
|
|
538
|
+
for stock_id, years_list in steady_state_info.items()
|
|
539
|
+
])
|
|
540
|
+
filename = os.path.join(scenario_output_path, f"{scenario.name}_steady_state_periods.csv")
|
|
541
|
+
steady_state_df.to_csv(filename, index=False)
|
|
542
|
+
|
|
543
|
+
# ************************************************************
|
|
544
|
+
# * Step 6: Visualize inflows per year to selected processes *
|
|
545
|
+
# ************************************************************
|
|
546
|
+
|
|
547
|
+
# This is only done when there is multiple years
|
|
548
|
+
# Visualize inflows per year to processes
|
|
549
|
+
visualize_inflows_to_process_ids = model_params[ParameterName.VisualizeInflowsToProcesses]
|
|
550
|
+
for scenario in scenarios:
|
|
551
|
+
scenario_output_path = scenario_name_to_output_path[scenario.name]
|
|
552
|
+
flow_solver = scenario.flow_solver
|
|
553
|
+
years = scenario.scenario_data.years
|
|
554
|
+
|
|
555
|
+
# Dictionary: Process ID to process
|
|
556
|
+
unique_processes = flow_solver.get_unique_processes()
|
|
557
|
+
for process_id in visualize_inflows_to_process_ids:
|
|
558
|
+
process = flow_solver.get_process(process_id, min(years))
|
|
559
|
+
flow_id_to_source_process_id = {}
|
|
560
|
+
|
|
561
|
+
# Find all source processes of all incoming flows to this process in all years
|
|
562
|
+
# This is needed to create stable set of process names so that the relative
|
|
563
|
+
# position of the processes stay the same in stacked chart between the years
|
|
564
|
+
source_process_ids = set()
|
|
565
|
+
for year in years:
|
|
566
|
+
inflows = flow_solver.get_process_flows(
|
|
567
|
+
process_id, year)["Inflows"]
|
|
568
|
+
unique_flow_ids = set()
|
|
569
|
+
for flow in inflows:
|
|
570
|
+
unique_flow_ids.add(flow.id)
|
|
571
|
+
flow_id_to_source_process_id[flow.id] = flow.source_process_id
|
|
572
|
+
|
|
573
|
+
# Find source process ID of each incoming flow and add
|
|
574
|
+
# to list of unique source process IDs if not already there
|
|
575
|
+
unique_flow_ids = list(unique_flow_ids)
|
|
576
|
+
for flow_id in unique_flow_ids:
|
|
577
|
+
source_process_ids.add(
|
|
578
|
+
flow_id_to_source_process_id[flow_id])
|
|
579
|
+
|
|
580
|
+
# Now source_process_ids-list contains list of all the possible process IDs
|
|
581
|
+
# that have flows incoming to process_id. This list is needed to keep the
|
|
582
|
+
# incoming process IDs the same every year because aiphoria allows the connections
|
|
583
|
+
# between the flows to change between the years.
|
|
584
|
+
source_process_ids = list(source_process_ids)
|
|
585
|
+
|
|
586
|
+
# Create 2D array with shape of (number of source process IDs, number of years)
|
|
587
|
+
# and fill with the value of the inflow from source process for each year
|
|
588
|
+
df_inflows_to_process = pd.DataFrame(columns=['Year', 'Source Process ID', 'Value ({})'.format(
|
|
589
|
+
model_params[ParameterName.BaselineUnitName])])
|
|
590
|
+
source_process_by_flow_values = np.zeros(
|
|
591
|
+
(len(source_process_ids), len(years)))
|
|
592
|
+
for year_index, year in enumerate(years):
|
|
593
|
+
inflows = flow_solver.get_process_flows(
|
|
594
|
+
process_id, year)["Inflows"]
|
|
595
|
+
for flow in inflows:
|
|
596
|
+
source_process_id_index = source_process_ids.index(
|
|
597
|
+
flow.source_process_id)
|
|
598
|
+
source_process_by_flow_values[source_process_id_index,
|
|
599
|
+
year_index] = flow.evaluated_value
|
|
600
|
+
df_inflows_to_process.loc[len(df_inflows_to_process)] = [year, flow.source_process_id,
|
|
601
|
+
flow.evaluated_value]
|
|
602
|
+
|
|
603
|
+
df_inflows_to_process = df_inflows_to_process.round(5)
|
|
604
|
+
|
|
605
|
+
# Export inflows to process to CSV file
|
|
606
|
+
# NOTE: Replace character ':' in Process ID to underscore because
|
|
607
|
+
# Windows system are not able to handle that character in filename
|
|
608
|
+
process_id_for_filename = process_id.replace(":", "_")
|
|
609
|
+
filename = os.path.join(scenario_output_path,
|
|
610
|
+
"{}_inflows_to_{}.csv".format(scenario.name, process_id_for_filename))
|
|
611
|
+
df_inflows_to_process.to_csv(
|
|
612
|
+
path_or_buf=filename, index=False, mode="w")
|
|
613
|
+
|
|
614
|
+
# Initialize the figure and axes for the stacked area chart
|
|
615
|
+
fig, ax = plt.subplots(figsize=(12, 8))
|
|
616
|
+
ax.stackplot(years, source_process_by_flow_values,
|
|
617
|
+
labels=list(source_process_ids))
|
|
618
|
+
ax.set_ylabel("Mm3 SWE")
|
|
619
|
+
ax.set_title("Inputs to {}".format(process.name))
|
|
620
|
+
ax.legend(loc='upper left')
|
|
621
|
+
tick_gap = 1 if len(years) < 15 else 10
|
|
622
|
+
plt.xticks(years[::tick_gap])
|
|
623
|
+
|
|
624
|
+
# Save the figure as an SVG file
|
|
625
|
+
filename = os.path.join(scenario_output_path,
|
|
626
|
+
"{}_inflows_to_{}.svg".format(scenario.name, process_id_for_filename))
|
|
627
|
+
plt.savefig(filename, format='svg')
|
|
628
|
+
|
|
629
|
+
# NOTE: Causes when running from PyCharm
|
|
630
|
+
# if model_params[ParameterName.ShowPlots]:
|
|
631
|
+
# plt.show()
|
|
632
|
+
|
|
633
|
+
# %%
|
|
634
|
+
# ***********************************************************
|
|
635
|
+
# * Step 7: Visualize the scenario results as Sankey graphs *
|
|
636
|
+
# ***********************************************************
|
|
637
|
+
|
|
638
|
+
# Virtual process graph label overrides
|
|
639
|
+
# TODO: Move also this to settings file?
|
|
640
|
+
virtual_process_graph_labels = {}
|
|
641
|
+
virtual_process_graph_labels["VP_P2:EU"] = "Unreported flow from P2"
|
|
642
|
+
virtual_process_graph_labels["VP_P3:EU"] = "Unreported flow from P3"
|
|
643
|
+
|
|
644
|
+
# Virtual Process and virtual Flow colors
|
|
645
|
+
visualizer_params = {
|
|
646
|
+
# User can hide processes in Sankey graph that have total inflows less than this value
|
|
647
|
+
# This value cannot be changed now in the Sankey graph
|
|
648
|
+
# TODO: Move this to settings file?
|
|
649
|
+
"small_node_threshold": 5,
|
|
650
|
+
|
|
651
|
+
# Dictionary to define labels for virtual flows
|
|
652
|
+
# If dictionary contains label for the virtual process then that is used,
|
|
653
|
+
# otherwise the virtual process ID is used
|
|
654
|
+
"virtual_process_graph_labels": virtual_process_graph_labels,
|
|
655
|
+
|
|
656
|
+
# Dictionary to define color of process by the process transformation stage name
|
|
657
|
+
# All must be provided as a RGB hex string, prefixed by character '#'
|
|
658
|
+
# Usage example: { "Source": "#707070" }
|
|
659
|
+
"process_transformation_stage_colors": color_definitions,
|
|
660
|
+
|
|
661
|
+
# How transparent flows are (0.0 = invisible, 1.0 = fully opaque)
|
|
662
|
+
"flow_alpha": 0.75,
|
|
663
|
+
|
|
664
|
+
# Color for virtual process
|
|
665
|
+
"virtual_process_color": "rgba(0.3, 0.3, 0.3, 0.6)",
|
|
666
|
+
|
|
667
|
+
# Color for virtual flows
|
|
668
|
+
"virtual_flow_color": "#808080",
|
|
669
|
+
}
|
|
670
|
+
|
|
671
|
+
if model_params[ParameterName.CreateSankeyCharts]:
|
|
672
|
+
log("Creating Sankey charts for scenarios...")
|
|
673
|
+
visualizer = DataVisualizer()
|
|
674
|
+
visualizer.build_and_show(
|
|
675
|
+
scenarios, visualizer_params, model_params, combine_to_one_file=True)
|
|
676
|
+
|
|
677
|
+
time_total_in_secs = time.perf_counter() - time_total_in_secs
|
|
678
|
+
log("Finished in {:.2f}s".format(time_total_in_secs))
|