flixopt 1.0.12__py3-none-any.whl → 2.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of flixopt might be problematic. Click here for more details.
- docs/examples/00-Minimal Example.md +5 -0
- docs/examples/01-Basic Example.md +5 -0
- docs/examples/02-Complex Example.md +10 -0
- docs/examples/03-Calculation Modes.md +5 -0
- docs/examples/index.md +5 -0
- docs/faq/contribute.md +49 -0
- docs/faq/index.md +3 -0
- docs/images/architecture_flixOpt-pre2.0.0.png +0 -0
- docs/images/architecture_flixOpt.png +0 -0
- docs/images/flixopt-icon.svg +1 -0
- docs/javascripts/mathjax.js +18 -0
- docs/release-notes/_template.txt +32 -0
- docs/release-notes/index.md +7 -0
- docs/release-notes/v2.0.0.md +93 -0
- docs/release-notes/v2.0.1.md +12 -0
- docs/user-guide/Mathematical Notation/Bus.md +33 -0
- docs/user-guide/Mathematical Notation/Effects, Penalty & Objective.md +132 -0
- docs/user-guide/Mathematical Notation/Flow.md +26 -0
- docs/user-guide/Mathematical Notation/LinearConverter.md +21 -0
- docs/user-guide/Mathematical Notation/Piecewise.md +49 -0
- docs/user-guide/Mathematical Notation/Storage.md +44 -0
- docs/user-guide/Mathematical Notation/index.md +22 -0
- docs/user-guide/Mathematical Notation/others.md +3 -0
- docs/user-guide/index.md +124 -0
- {flixOpt → flixopt}/__init__.py +5 -2
- {flixOpt → flixopt}/aggregation.py +113 -140
- flixopt/calculation.py +455 -0
- {flixOpt → flixopt}/commons.py +7 -4
- flixopt/components.py +630 -0
- {flixOpt → flixopt}/config.py +9 -8
- {flixOpt → flixopt}/config.yaml +3 -3
- flixopt/core.py +970 -0
- flixopt/effects.py +386 -0
- flixopt/elements.py +534 -0
- flixopt/features.py +1042 -0
- flixopt/flow_system.py +409 -0
- flixopt/interface.py +265 -0
- flixopt/io.py +308 -0
- flixopt/linear_converters.py +331 -0
- flixopt/plotting.py +1340 -0
- flixopt/results.py +898 -0
- flixopt/solvers.py +77 -0
- flixopt/structure.py +630 -0
- flixopt/utils.py +62 -0
- flixopt-2.0.1.dist-info/METADATA +145 -0
- flixopt-2.0.1.dist-info/RECORD +57 -0
- {flixopt-1.0.12.dist-info → flixopt-2.0.1.dist-info}/WHEEL +1 -1
- flixopt-2.0.1.dist-info/top_level.txt +6 -0
- pics/architecture_flixOpt-pre2.0.0.png +0 -0
- pics/architecture_flixOpt.png +0 -0
- pics/flixopt-icon.svg +1 -0
- pics/pics.pptx +0 -0
- scripts/gen_ref_pages.py +54 -0
- site/release-notes/_template.txt +32 -0
- flixOpt/calculation.py +0 -629
- flixOpt/components.py +0 -614
- flixOpt/core.py +0 -182
- flixOpt/effects.py +0 -410
- flixOpt/elements.py +0 -489
- flixOpt/features.py +0 -942
- flixOpt/flow_system.py +0 -351
- flixOpt/interface.py +0 -203
- flixOpt/linear_converters.py +0 -325
- flixOpt/math_modeling.py +0 -1145
- flixOpt/plotting.py +0 -712
- flixOpt/results.py +0 -563
- flixOpt/solvers.py +0 -21
- flixOpt/structure.py +0 -733
- flixOpt/utils.py +0 -134
- flixopt-1.0.12.dist-info/METADATA +0 -174
- flixopt-1.0.12.dist-info/RECORD +0 -29
- flixopt-1.0.12.dist-info/top_level.txt +0 -3
- {flixopt-1.0.12.dist-info → flixopt-2.0.1.dist-info/licenses}/LICENSE +0 -0
flixOpt/calculation.py
DELETED
|
@@ -1,629 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
This module contains the Calculation functionality for the flixOpt framework.
|
|
3
|
-
It is used to calculate a SystemModel for a given FlowSystem through a solver.
|
|
4
|
-
There are three different Calculation types:
|
|
5
|
-
1. FullCalculation: Calculates the SystemModel for the full FlowSystem
|
|
6
|
-
2. AggregatedCalculation: Calculates the SystemModel for the full FlowSystem, but aggregates the TimeSeriesData.
|
|
7
|
-
This simplifies the mathematical model and usually speeds up the solving process.
|
|
8
|
-
3. SegmentedCalculation: Solves a SystemModel for each individual Segment of the FlowSystem.
|
|
9
|
-
"""
|
|
10
|
-
|
|
11
|
-
import datetime
|
|
12
|
-
import json
|
|
13
|
-
import logging
|
|
14
|
-
import math
|
|
15
|
-
import pathlib
|
|
16
|
-
import timeit
|
|
17
|
-
from typing import Any, Dict, List, Literal, Optional, Union
|
|
18
|
-
|
|
19
|
-
import numpy as np
|
|
20
|
-
import yaml
|
|
21
|
-
|
|
22
|
-
from . import utils as utils
|
|
23
|
-
from .aggregation import AggregationModel, AggregationParameters, TimeSeriesCollection
|
|
24
|
-
from .components import Storage
|
|
25
|
-
from .core import Numeric, Skalar
|
|
26
|
-
from .elements import Component
|
|
27
|
-
from .features import InvestmentModel
|
|
28
|
-
from .flow_system import FlowSystem
|
|
29
|
-
from .solvers import Solver
|
|
30
|
-
from .structure import SystemModel, copy_and_convert_datatypes, get_compact_representation
|
|
31
|
-
|
|
32
|
-
logger = logging.getLogger('flixOpt')
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
class Calculation:
|
|
36
|
-
"""
|
|
37
|
-
class for defined way of solving a flow_system optimization
|
|
38
|
-
"""
|
|
39
|
-
|
|
40
|
-
def __init__(
|
|
41
|
-
self,
|
|
42
|
-
name,
|
|
43
|
-
flow_system: FlowSystem,
|
|
44
|
-
modeling_language: Literal['pyomo', 'cvxpy'] = 'pyomo',
|
|
45
|
-
time_indices: Optional[Union[range, List[int]]] = None,
|
|
46
|
-
):
|
|
47
|
-
"""
|
|
48
|
-
Parameters
|
|
49
|
-
----------
|
|
50
|
-
name : str
|
|
51
|
-
name of calculation
|
|
52
|
-
flow_system : FlowSystem
|
|
53
|
-
flow_system which should be calculated
|
|
54
|
-
modeling_language : 'pyomo','cvxpy' (not implemeted yet)
|
|
55
|
-
choose optimization modeling language
|
|
56
|
-
time_indices : List[int] or None
|
|
57
|
-
list with indices, which should be used for calculation. If None, then all timesteps are used.
|
|
58
|
-
"""
|
|
59
|
-
self.name = name
|
|
60
|
-
self.flow_system = flow_system
|
|
61
|
-
self.modeling_language = modeling_language
|
|
62
|
-
self.time_indices = time_indices
|
|
63
|
-
|
|
64
|
-
self.system_model: Optional[SystemModel] = None
|
|
65
|
-
self.durations = {'modeling': 0.0, 'solving': 0.0, 'saving': 0.0} # Dauer der einzelnen Dinge
|
|
66
|
-
|
|
67
|
-
self._paths: Dict[str, Optional[Union[pathlib.Path, List[pathlib.Path]]]] = {
|
|
68
|
-
'log': None,
|
|
69
|
-
'data': None,
|
|
70
|
-
'info': None,
|
|
71
|
-
}
|
|
72
|
-
self._results = None
|
|
73
|
-
|
|
74
|
-
def _define_path_names(self, save_results: Union[bool, str, pathlib.Path], include_timestamp: bool = False):
|
|
75
|
-
"""
|
|
76
|
-
Creates the path for saving results and alters the name of the calculation to have a timestamp
|
|
77
|
-
"""
|
|
78
|
-
if include_timestamp:
|
|
79
|
-
timestamp = datetime.datetime.now()
|
|
80
|
-
self.name = f'{timestamp.strftime("%Y-%m-%d")}_{self.name.replace(" ", "")}'
|
|
81
|
-
|
|
82
|
-
if save_results:
|
|
83
|
-
if not isinstance(save_results, (str, pathlib.Path)):
|
|
84
|
-
save_results = 'results/' # Standard path for results
|
|
85
|
-
path = pathlib.Path.cwd() / save_results # absoluter Pfad:
|
|
86
|
-
|
|
87
|
-
path.mkdir(parents=True, exist_ok=True) # Pfad anlegen, fall noch nicht vorhanden:
|
|
88
|
-
|
|
89
|
-
self._paths['log'] = path / f'{self.name}_solver.log'
|
|
90
|
-
self._paths['data'] = path / f'{self.name}_data.json'
|
|
91
|
-
self._paths['results'] = path / f'{self.name}_results.json'
|
|
92
|
-
self._paths['infos'] = path / f'{self.name}_infos.yaml'
|
|
93
|
-
|
|
94
|
-
def _save_solve_infos(self):
|
|
95
|
-
t_start = timeit.default_timer()
|
|
96
|
-
indent = 4 if len(self.flow_system.time_series) < 50 else None
|
|
97
|
-
with open(self._paths['results'], 'w', encoding='utf-8') as f:
|
|
98
|
-
results = copy_and_convert_datatypes(self.results(), use_numpy=False, use_element_label=False)
|
|
99
|
-
json.dump(results, f, indent=indent)
|
|
100
|
-
|
|
101
|
-
with open(self._paths['data'], 'w', encoding='utf-8') as f:
|
|
102
|
-
data = copy_and_convert_datatypes(self.flow_system.infos(), use_numpy=False, use_element_label=False)
|
|
103
|
-
json.dump(data, f, indent=indent)
|
|
104
|
-
|
|
105
|
-
self.durations['saving'] = round(timeit.default_timer() - t_start, 2)
|
|
106
|
-
|
|
107
|
-
t_start = timeit.default_timer()
|
|
108
|
-
nodes_info, edges_info = self.flow_system.network_infos()
|
|
109
|
-
infos = {
|
|
110
|
-
'Calculation': self.infos,
|
|
111
|
-
'Model': self.system_model.infos,
|
|
112
|
-
'FlowSystem': get_compact_representation(self.flow_system.infos(use_numpy=True, use_element_label=True)),
|
|
113
|
-
'Network': {'Nodes': nodes_info, 'Edges': edges_info},
|
|
114
|
-
}
|
|
115
|
-
|
|
116
|
-
with open(self._paths['infos'], 'w', encoding='utf-8') as f:
|
|
117
|
-
yaml.dump(
|
|
118
|
-
infos,
|
|
119
|
-
f,
|
|
120
|
-
width=1000, # Verhinderung Zeilenumbruch für lange equations
|
|
121
|
-
allow_unicode=True,
|
|
122
|
-
sort_keys=False,
|
|
123
|
-
)
|
|
124
|
-
|
|
125
|
-
message = f' Saved Calculation: {self.name} '
|
|
126
|
-
logger.info(f'{"":#^80}\n{message:#^80}\n{"":#^80}')
|
|
127
|
-
logger.info(f'Saving calculation to .json took {self.durations["saving"]:>8.2f} seconds')
|
|
128
|
-
logger.info(f'Saving calculation to .yaml took {(timeit.default_timer() - t_start):>8.2f} seconds')
|
|
129
|
-
|
|
130
|
-
def results(self):
|
|
131
|
-
if self._results is None:
|
|
132
|
-
self._results = self.system_model.results()
|
|
133
|
-
return self._results
|
|
134
|
-
|
|
135
|
-
@property
|
|
136
|
-
def infos(self):
|
|
137
|
-
return {
|
|
138
|
-
'Name': self.name,
|
|
139
|
-
'Number of indices': len(self.time_indices) if self.time_indices else 'all',
|
|
140
|
-
'Calculation Type': self.__class__.__name__,
|
|
141
|
-
'Durations': self.durations,
|
|
142
|
-
}
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
class FullCalculation(Calculation):
|
|
146
|
-
"""
|
|
147
|
-
class for defined way of solving a flow_system optimization
|
|
148
|
-
"""
|
|
149
|
-
|
|
150
|
-
def do_modeling(self) -> SystemModel:
|
|
151
|
-
t_start = timeit.default_timer()
|
|
152
|
-
|
|
153
|
-
self.flow_system.transform_data()
|
|
154
|
-
for time_series in self.flow_system.all_time_series:
|
|
155
|
-
time_series.activate_indices(self.time_indices)
|
|
156
|
-
|
|
157
|
-
self.system_model = SystemModel(self.name, self.modeling_language, self.flow_system, self.time_indices)
|
|
158
|
-
self.system_model.do_modeling()
|
|
159
|
-
self.system_model.translate_to_modeling_language()
|
|
160
|
-
|
|
161
|
-
self.durations['modeling'] = round(timeit.default_timer() - t_start, 2)
|
|
162
|
-
return self.system_model
|
|
163
|
-
|
|
164
|
-
def solve(self, solver: Solver, save_results: Union[bool, str, pathlib.Path] = False):
|
|
165
|
-
self._define_path_names(save_results)
|
|
166
|
-
t_start = timeit.default_timer()
|
|
167
|
-
solver.logfile_name = self._paths['log']
|
|
168
|
-
self.system_model.solve(solver)
|
|
169
|
-
self.durations['solving'] = round(timeit.default_timer() - t_start, 2)
|
|
170
|
-
|
|
171
|
-
if save_results:
|
|
172
|
-
self._save_solve_infos()
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
class AggregatedCalculation(Calculation):
|
|
176
|
-
"""
|
|
177
|
-
class for defined way of solving a flow_system optimization
|
|
178
|
-
"""
|
|
179
|
-
|
|
180
|
-
def __init__(
|
|
181
|
-
self,
|
|
182
|
-
name,
|
|
183
|
-
flow_system: FlowSystem,
|
|
184
|
-
aggregation_parameters: AggregationParameters,
|
|
185
|
-
components_to_clusterize: Optional[List[Component]] = None,
|
|
186
|
-
modeling_language: Literal['pyomo', 'cvxpy'] = 'pyomo',
|
|
187
|
-
time_indices: Optional[Union[range, List[int]]] = None,
|
|
188
|
-
):
|
|
189
|
-
"""
|
|
190
|
-
Class for Optimizing the FLowSystem including:
|
|
191
|
-
1. Aggregating TimeSeriesData via typical periods using tsam.
|
|
192
|
-
2. Equalizing variables of typical periods.
|
|
193
|
-
Parameters
|
|
194
|
-
----------
|
|
195
|
-
name : str
|
|
196
|
-
name of calculation
|
|
197
|
-
aggregation_parameters : AggregationParameters
|
|
198
|
-
Parameters for aggregation. See documentation of AggregationParameters class.
|
|
199
|
-
components_to_clusterize: List[Component] or None
|
|
200
|
-
List of Components to perform aggregation on. If None, then all components are aggregated.
|
|
201
|
-
This means, teh variables in the components are equalized to each other, according to the typical periods
|
|
202
|
-
computed in the DataAggregation
|
|
203
|
-
flow_system : FlowSystem
|
|
204
|
-
flow_system which should be calculated
|
|
205
|
-
modeling_language : 'pyomo','cvxpy' (not implemeted yet)
|
|
206
|
-
choose optimization modeling language
|
|
207
|
-
time_indices : List[int] or None
|
|
208
|
-
list with indices, which should be used for calculation. If None, then all timesteps are used.
|
|
209
|
-
"""
|
|
210
|
-
super().__init__(name, flow_system, modeling_language, time_indices)
|
|
211
|
-
self.aggregation_parameters = aggregation_parameters
|
|
212
|
-
self.components_to_clusterize = components_to_clusterize
|
|
213
|
-
self.time_series_for_aggregation = None
|
|
214
|
-
self.aggregation = None
|
|
215
|
-
self.time_series_collection: Optional[TimeSeriesCollection] = None
|
|
216
|
-
|
|
217
|
-
def do_modeling(self) -> SystemModel:
|
|
218
|
-
self.flow_system.transform_data()
|
|
219
|
-
for time_series in self.flow_system.all_time_series:
|
|
220
|
-
time_series.activate_indices(self.time_indices)
|
|
221
|
-
|
|
222
|
-
from .aggregation import Aggregation
|
|
223
|
-
|
|
224
|
-
(chosen_time_series, chosen_time_series_with_end, dt_in_hours, dt_in_hours_total) = (
|
|
225
|
-
self.flow_system.get_time_data_from_indices(self.time_indices)
|
|
226
|
-
)
|
|
227
|
-
|
|
228
|
-
t_start_agg = timeit.default_timer()
|
|
229
|
-
|
|
230
|
-
# Validation
|
|
231
|
-
dt_min, dt_max = np.min(dt_in_hours), np.max(dt_in_hours)
|
|
232
|
-
if not dt_min == dt_max:
|
|
233
|
-
raise ValueError(
|
|
234
|
-
f'Aggregation failed due to inconsistent time step sizes:'
|
|
235
|
-
f'delta_t varies from {dt_min} to {dt_max} hours.'
|
|
236
|
-
)
|
|
237
|
-
steps_per_period = self.aggregation_parameters.hours_per_period / dt_in_hours[0]
|
|
238
|
-
if not steps_per_period.is_integer():
|
|
239
|
-
raise Exception(
|
|
240
|
-
f'The selected {self.aggregation_parameters.hours_per_period=} does not match the time '
|
|
241
|
-
f'step size of {dt_in_hours[0]} hours). It must be a multiple of {dt_in_hours[0]} hours.'
|
|
242
|
-
)
|
|
243
|
-
|
|
244
|
-
logger.info(f'{"":#^80}')
|
|
245
|
-
logger.info(f'{" Aggregating TimeSeries Data ":#^80}')
|
|
246
|
-
|
|
247
|
-
self.time_series_collection = TimeSeriesCollection(
|
|
248
|
-
[ts for ts in self.flow_system.all_time_series if ts.is_array]
|
|
249
|
-
)
|
|
250
|
-
|
|
251
|
-
import pandas as pd
|
|
252
|
-
|
|
253
|
-
original_data = pd.DataFrame(self.time_series_collection.data, index=chosen_time_series)
|
|
254
|
-
|
|
255
|
-
# Aggregation - creation of aggregated timeseries:
|
|
256
|
-
self.aggregation = Aggregation(
|
|
257
|
-
original_data=original_data,
|
|
258
|
-
hours_per_time_step=dt_min,
|
|
259
|
-
hours_per_period=self.aggregation_parameters.hours_per_period,
|
|
260
|
-
nr_of_periods=self.aggregation_parameters.nr_of_periods,
|
|
261
|
-
weights=self.time_series_collection.weights,
|
|
262
|
-
time_series_for_high_peaks=self.aggregation_parameters.labels_for_high_peaks,
|
|
263
|
-
time_series_for_low_peaks=self.aggregation_parameters.labels_for_low_peaks,
|
|
264
|
-
)
|
|
265
|
-
|
|
266
|
-
self.aggregation.cluster()
|
|
267
|
-
self.aggregation.plot()
|
|
268
|
-
if self.aggregation_parameters.aggregate_data_and_fix_non_binary_vars:
|
|
269
|
-
self.time_series_collection.insert_data( # Converting it into a dict with labels as keys
|
|
270
|
-
{
|
|
271
|
-
col: np.array(values)
|
|
272
|
-
for col, values in self.aggregation.aggregated_data.to_dict(orient='list').items()
|
|
273
|
-
}
|
|
274
|
-
)
|
|
275
|
-
self.durations['aggregation'] = round(timeit.default_timer() - t_start_agg, 2)
|
|
276
|
-
|
|
277
|
-
# Model the System
|
|
278
|
-
t_start = timeit.default_timer()
|
|
279
|
-
|
|
280
|
-
self.system_model = SystemModel(self.name, self.modeling_language, self.flow_system, self.time_indices)
|
|
281
|
-
self.system_model.do_modeling()
|
|
282
|
-
# Add Aggregation Model after modeling the rest
|
|
283
|
-
aggregation_model = AggregationModel(
|
|
284
|
-
self.aggregation_parameters, self.flow_system, self.aggregation, self.components_to_clusterize
|
|
285
|
-
)
|
|
286
|
-
self.system_model.other_models.append(aggregation_model)
|
|
287
|
-
aggregation_model.do_modeling(self.system_model)
|
|
288
|
-
|
|
289
|
-
self.system_model.translate_to_modeling_language()
|
|
290
|
-
|
|
291
|
-
self.durations['modeling'] = round(timeit.default_timer() - t_start, 2)
|
|
292
|
-
return self.system_model
|
|
293
|
-
|
|
294
|
-
def solve(self, solver: Solver, save_results: Union[bool, str, pathlib.Path] = False):
|
|
295
|
-
self._define_path_names(save_results)
|
|
296
|
-
t_start = timeit.default_timer()
|
|
297
|
-
solver.logfile_name = self._paths['log']
|
|
298
|
-
self.system_model.solve(solver)
|
|
299
|
-
self.durations['solving'] = round(timeit.default_timer() - t_start, 2)
|
|
300
|
-
|
|
301
|
-
if save_results:
|
|
302
|
-
self._save_solve_infos()
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
class SegmentedCalculation(Calculation):
|
|
306
|
-
def __init__(
|
|
307
|
-
self,
|
|
308
|
-
name,
|
|
309
|
-
flow_system: FlowSystem,
|
|
310
|
-
segment_length: int,
|
|
311
|
-
overlap_length: int,
|
|
312
|
-
modeling_language: Literal['pyomo', 'cvxpy'] = 'pyomo',
|
|
313
|
-
time_indices: Optional[Union[range, list[int]]] = None,
|
|
314
|
-
):
|
|
315
|
-
"""
|
|
316
|
-
Dividing and Modeling the problem in (overlapping) segments.
|
|
317
|
-
The final values of each Segment are recognized by the following segment, effectively coupling
|
|
318
|
-
charge_states and flow_rates between segments.
|
|
319
|
-
Because of this intersection, both modeling and solving is done in one step
|
|
320
|
-
|
|
321
|
-
Take care:
|
|
322
|
-
Parameters like InvestParameters, sum_of_flow_hours and other restrictions over the total time_series
|
|
323
|
-
don't really work in this Calculation. Lower bounds to such SUMS can lead to weird results.
|
|
324
|
-
This is NOT yet explicitly checked for...
|
|
325
|
-
|
|
326
|
-
Parameters
|
|
327
|
-
----------
|
|
328
|
-
name : str
|
|
329
|
-
name of calculation
|
|
330
|
-
flow_system : FlowSystem
|
|
331
|
-
flow_system which should be calculated
|
|
332
|
-
segment_length : int
|
|
333
|
-
The number of time_steps per individual segment (without the overlap)
|
|
334
|
-
overlap_length : int
|
|
335
|
-
The number of time_steps that are added to each individual model. Used for better
|
|
336
|
-
results of storages)
|
|
337
|
-
modeling_language : 'pyomo', 'cvxpy' (not implemeted yet)
|
|
338
|
-
choose optimization modeling language
|
|
339
|
-
time_indices : List[int] or None
|
|
340
|
-
list with indices, which should be used for calculation. If None, then all timesteps are used.
|
|
341
|
-
|
|
342
|
-
"""
|
|
343
|
-
super().__init__(name, flow_system, modeling_language, time_indices)
|
|
344
|
-
self.segment_length = segment_length
|
|
345
|
-
self.overlap_length = overlap_length
|
|
346
|
-
self._total_length = len(self.time_indices) if self.time_indices is not None else len(flow_system.time_series)
|
|
347
|
-
self.number_of_segments = math.ceil(self._total_length / self.segment_length)
|
|
348
|
-
self.sub_calculations: List[FullCalculation] = []
|
|
349
|
-
|
|
350
|
-
assert segment_length > 2, 'The Segment length must be greater 2, due to unwanted internal side effects'
|
|
351
|
-
assert self.segment_length_with_overlap <= self._total_length, (
|
|
352
|
-
f'{self.segment_length_with_overlap=} cant be greater than the total length {self._total_length}'
|
|
353
|
-
)
|
|
354
|
-
|
|
355
|
-
# Storing all original start values
|
|
356
|
-
self._original_start_values = {
|
|
357
|
-
**{flow: flow.previous_flow_rate for flow in self.flow_system.flows.values()},
|
|
358
|
-
**{
|
|
359
|
-
comp: comp.initial_charge_state
|
|
360
|
-
for comp in self.flow_system.components.values()
|
|
361
|
-
if isinstance(comp, Storage)
|
|
362
|
-
},
|
|
363
|
-
}
|
|
364
|
-
self._transfered_start_values: Dict[str, Dict[str, Any]] = {}
|
|
365
|
-
|
|
366
|
-
def do_modeling_and_solve(self, solver: Solver, save_results: Union[bool, str, pathlib.Path] = True):
|
|
367
|
-
logger.info(f'{"":#^80}')
|
|
368
|
-
logger.info(f'{" Segmented Solving ":#^80}')
|
|
369
|
-
self._define_path_names(save_results)
|
|
370
|
-
|
|
371
|
-
for i in range(self.number_of_segments):
|
|
372
|
-
name_of_segment = f'Segment_{i + 1}'
|
|
373
|
-
if self.sub_calculations:
|
|
374
|
-
self._transfer_start_values(name_of_segment)
|
|
375
|
-
time_indices = self._get_indices(i)
|
|
376
|
-
logger.info(f'{name_of_segment}. (flow_system indices {time_indices.start}...{time_indices.stop - 1}):')
|
|
377
|
-
calculation = FullCalculation(name_of_segment, self.flow_system, self.modeling_language, time_indices)
|
|
378
|
-
# TODO: Add Before Values if available
|
|
379
|
-
self.sub_calculations.append(calculation)
|
|
380
|
-
calculation.do_modeling()
|
|
381
|
-
invest_elements = [
|
|
382
|
-
model.element.label_full
|
|
383
|
-
for model in calculation.system_model.sub_models
|
|
384
|
-
if isinstance(model, InvestmentModel)
|
|
385
|
-
]
|
|
386
|
-
if invest_elements:
|
|
387
|
-
logger.critical(
|
|
388
|
-
f'Investments are not supported in Segmented Calculation! '
|
|
389
|
-
f'Following elements Contain Investments: {invest_elements}'
|
|
390
|
-
)
|
|
391
|
-
calculation.solve(solver, save_results=False)
|
|
392
|
-
|
|
393
|
-
self._reset_start_values()
|
|
394
|
-
|
|
395
|
-
for calc in self.sub_calculations:
|
|
396
|
-
for key, value in calc.durations.items():
|
|
397
|
-
self.durations[key] += value
|
|
398
|
-
|
|
399
|
-
if save_results:
|
|
400
|
-
self._save_solve_infos()
|
|
401
|
-
|
|
402
|
-
def results(
|
|
403
|
-
self, combined_arrays: bool = False, combined_scalars: bool = False, individual_results: bool = False
|
|
404
|
-
) -> Dict[str, Union[Numeric, Dict[str, Numeric]]]:
|
|
405
|
-
"""
|
|
406
|
-
Retrieving the results of a Segmented Calculation is not as straight forward as with other Calculation types.
|
|
407
|
-
You have 3 options:
|
|
408
|
-
1. combined_arrays:
|
|
409
|
-
Retrieve the combined array Results of all Segments as 'combined_arrays'. All result arrays ar concatenated,
|
|
410
|
-
taking care of removing the overlap. These results can be directly compared to other Calculation results.
|
|
411
|
-
Unfortunately, Scalar values like the total of effects can not be combined in a deterministic way.
|
|
412
|
-
Rather convert the time series effect results to a sum yourself.
|
|
413
|
-
2. combined_scalars:
|
|
414
|
-
Retrieve the combined scalar Results of all Segments. All Scalar Values like the total of effects are
|
|
415
|
-
combined and stored in a List. Take care that the total of multiple Segment is not equivalent to the
|
|
416
|
-
total of the total timeSeries, as it includes the Overlap!
|
|
417
|
-
3. individual_results:
|
|
418
|
-
Retrieve the individual results of each Segment
|
|
419
|
-
|
|
420
|
-
"""
|
|
421
|
-
options_chosen = combined_arrays + combined_scalars + individual_results
|
|
422
|
-
assert options_chosen == 1, (
|
|
423
|
-
'Exactly one of the three options to retrieve the results needs to be chosen! You chose {options_chosen}!'
|
|
424
|
-
)
|
|
425
|
-
all_results = {f'Segment_{i + 1}': calculation.results() for i, calculation in enumerate(self.sub_calculations)}
|
|
426
|
-
if combined_arrays:
|
|
427
|
-
return _combine_nested_arrays(*list(all_results.values()), length_per_array=self.segment_length)
|
|
428
|
-
elif combined_scalars:
|
|
429
|
-
return _combine_nested_scalars(*list(all_results.values()))
|
|
430
|
-
else:
|
|
431
|
-
return all_results
|
|
432
|
-
|
|
433
|
-
def _save_solve_infos(self):
|
|
434
|
-
t_start = timeit.default_timer()
|
|
435
|
-
indent = 4 if len(self.flow_system.time_series) < 50 else None
|
|
436
|
-
with open(self._paths['results'], 'w', encoding='utf-8') as f:
|
|
437
|
-
results = copy_and_convert_datatypes(
|
|
438
|
-
self.results(combined_arrays=True), use_numpy=False, use_element_label=False
|
|
439
|
-
)
|
|
440
|
-
json.dump(results, f, indent=indent)
|
|
441
|
-
|
|
442
|
-
with open(self._paths['data'], 'w', encoding='utf-8') as f:
|
|
443
|
-
data = copy_and_convert_datatypes(self.flow_system.infos(), use_numpy=False, use_element_label=False)
|
|
444
|
-
json.dump(data, f, indent=indent)
|
|
445
|
-
|
|
446
|
-
with open(self._paths['results'].parent / f'{self.name}_results_extra.json', 'w', encoding='utf-8') as f:
|
|
447
|
-
results = {
|
|
448
|
-
'Individual Results': copy_and_convert_datatypes(
|
|
449
|
-
self.results(individual_results=True), use_numpy=False, use_element_label=False
|
|
450
|
-
),
|
|
451
|
-
'Skalar Results': copy_and_convert_datatypes(
|
|
452
|
-
self.results(combined_scalars=True), use_numpy=False, use_element_label=False
|
|
453
|
-
),
|
|
454
|
-
}
|
|
455
|
-
json.dump(results, f, indent=indent)
|
|
456
|
-
self.durations['saving'] = round(timeit.default_timer() - t_start, 2)
|
|
457
|
-
|
|
458
|
-
t_start = timeit.default_timer()
|
|
459
|
-
nodes_info, edges_info = self.flow_system.network_infos()
|
|
460
|
-
infos = {
|
|
461
|
-
'Calculation': self.infos,
|
|
462
|
-
'Model': self.sub_calculations[0].system_model.infos,
|
|
463
|
-
'FlowSystem': get_compact_representation(self.flow_system.infos(use_numpy=True, use_element_label=True)),
|
|
464
|
-
'Network': {'Nodes': nodes_info, 'Edges': edges_info},
|
|
465
|
-
}
|
|
466
|
-
|
|
467
|
-
with open(self._paths['infos'], 'w', encoding='utf-8') as f:
|
|
468
|
-
yaml.dump(
|
|
469
|
-
infos,
|
|
470
|
-
f,
|
|
471
|
-
width=1000, # Verhinderung Zeilenumbruch für lange equations
|
|
472
|
-
allow_unicode=True,
|
|
473
|
-
sort_keys=False,
|
|
474
|
-
)
|
|
475
|
-
|
|
476
|
-
message = f' Saved Calculation: {self.name} '
|
|
477
|
-
logger.info(f'{"":#^80}\n{message:#^80}\n{"":#^80}')
|
|
478
|
-
logger.info(f'Saving calculation to .json took {self.durations["saving"]:>8.2f} seconds')
|
|
479
|
-
logger.info(f'Saving calculation to .yaml took {(timeit.default_timer() - t_start):>8.2f} seconds')
|
|
480
|
-
|
|
481
|
-
def _transfer_start_values(self, segment_name: str):
|
|
482
|
-
"""
|
|
483
|
-
This function gets the last values of the previous solved segment and
|
|
484
|
-
inserts them as start values for the nest segment
|
|
485
|
-
"""
|
|
486
|
-
final_index_of_prior_segment = -(1 + self.overlap_length)
|
|
487
|
-
start_values_of_this_segment = {}
|
|
488
|
-
for flow in self.flow_system.flows.values():
|
|
489
|
-
flow.previous_flow_rate = flow.model.flow_rate.result[
|
|
490
|
-
final_index_of_prior_segment
|
|
491
|
-
] # TODO: maybe more values?
|
|
492
|
-
start_values_of_this_segment[flow.label_full] = flow.previous_flow_rate
|
|
493
|
-
for comp in self.flow_system.components.values():
|
|
494
|
-
if isinstance(comp, Storage):
|
|
495
|
-
comp.initial_charge_state = comp.model.charge_state.result[final_index_of_prior_segment]
|
|
496
|
-
start_values_of_this_segment[comp.label_full] = comp.initial_charge_state
|
|
497
|
-
|
|
498
|
-
self._transfered_start_values[segment_name] = start_values_of_this_segment
|
|
499
|
-
|
|
500
|
-
def _reset_start_values(self):
|
|
501
|
-
"""This resets the start values of all Elements to its original state"""
|
|
502
|
-
for flow in self.flow_system.flows.values():
|
|
503
|
-
flow.previous_flow_rate = self._original_start_values[flow]
|
|
504
|
-
for comp in self.flow_system.components.values():
|
|
505
|
-
if isinstance(comp, Storage):
|
|
506
|
-
comp.initial_charge_state = self._original_start_values[comp]
|
|
507
|
-
|
|
508
|
-
def _get_indices(self, segment_index: int) -> range:
|
|
509
|
-
start = segment_index * self.segment_length
|
|
510
|
-
return range(start, min(start + self.segment_length + self.overlap_length, self._total_length))
|
|
511
|
-
|
|
512
|
-
@property
|
|
513
|
-
def segment_length_with_overlap(self):
|
|
514
|
-
return self.segment_length + self.overlap_length
|
|
515
|
-
|
|
516
|
-
@property
|
|
517
|
-
def start_values_of_segments(self) -> Dict[str, Dict[str, Any]]:
|
|
518
|
-
"""Gives an overview of the start values of all Segments"""
|
|
519
|
-
return {
|
|
520
|
-
self.sub_calculations[0].name: {
|
|
521
|
-
element.label_full: value for element, value in self._original_start_values.items()
|
|
522
|
-
},
|
|
523
|
-
**self._transfered_start_values,
|
|
524
|
-
}
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
def _remove_none_values(d: Dict[Any, Optional[Any]]) -> Dict[Any, Any]:
|
|
528
|
-
# Remove None values from a dictionary
|
|
529
|
-
return {k: _remove_none_values(v) if isinstance(v, dict) else v for k, v in d.items() if v is not None}
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
def _remove_empty_dicts(d: Dict[Any, Any]) -> Dict[Any, Any]:
|
|
533
|
-
"""Recursively removes empty dictionaries from a nested dictionary."""
|
|
534
|
-
return {
|
|
535
|
-
k: _remove_empty_dicts(v) if isinstance(v, dict) else v
|
|
536
|
-
for k, v in d.items()
|
|
537
|
-
if not isinstance(v, dict) or _remove_empty_dicts(v)
|
|
538
|
-
}
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
def _combine_nested_arrays(
|
|
542
|
-
*dicts: Dict[str, Union[Numeric, dict]],
|
|
543
|
-
trim: Optional[int] = None,
|
|
544
|
-
length_per_array: Optional[int] = None,
|
|
545
|
-
) -> Dict[str, Union[np.ndarray, dict]]:
|
|
546
|
-
"""
|
|
547
|
-
Combines multiple dictionaries with identical structures by concatenating their arrays,
|
|
548
|
-
with optional trimming. Filters out all other values.
|
|
549
|
-
|
|
550
|
-
Parameters
|
|
551
|
-
----------
|
|
552
|
-
*dicts : Dict[str, Union[np.ndarray, dict]]
|
|
553
|
-
Dictionaries with matching structures and Numeric values.
|
|
554
|
-
trim : int, optional
|
|
555
|
-
Number of elements to trim from the end of each array except the last. Defaults to None.
|
|
556
|
-
length_per_array : int, optional
|
|
557
|
-
Trims the arrays to the desired length. Defaults to None.
|
|
558
|
-
If None, then trim is used.
|
|
559
|
-
|
|
560
|
-
Returns
|
|
561
|
-
-------
|
|
562
|
-
Dict[str, Union[np.ndarray, dict]]
|
|
563
|
-
A single dictionary with concatenated arrays at each key, ignoring non-array values.
|
|
564
|
-
|
|
565
|
-
Example
|
|
566
|
-
-------
|
|
567
|
-
>>> dict1 = {'a': np.array([1, 2, 3]), 'b': {'c': np.array([4, 5, 6])}}
|
|
568
|
-
>>> dict2 = {'a': np.array([7, 8, 9]), 'b': {'c': np.array([10, 11, 12])}}
|
|
569
|
-
>>> _combine_nested_arrays(dict1, dict2, trim=1)
|
|
570
|
-
{'a': array([1, 2, 7, 8, 9]), 'b': {'c': array([4, 5, 10, 11, 12])}}
|
|
571
|
-
"""
|
|
572
|
-
assert (trim is None) != (length_per_array is None), (
|
|
573
|
-
'Either trim or length_per_array must be provided,But not both!'
|
|
574
|
-
)
|
|
575
|
-
|
|
576
|
-
def combine_arrays_recursively(
|
|
577
|
-
*values: Union[Numeric, Dict[str, Numeric], Any],
|
|
578
|
-
) -> Optional[Union[np.ndarray, Dict[str, Union[np.ndarray, dict]]]]:
|
|
579
|
-
if all(isinstance(val, dict) for val in values): # If all values are dictionaries, recursively combine each key
|
|
580
|
-
return {key: combine_arrays_recursively(*(val[key] for val in values)) for key in values[0]}
|
|
581
|
-
|
|
582
|
-
if all(isinstance(val, np.ndarray) for val in values):
|
|
583
|
-
|
|
584
|
-
def limit(idx: int, arr: np.ndarray) -> np.ndarray:
|
|
585
|
-
# Performs the trimming of the arrays. Doesn't trim the last array!
|
|
586
|
-
if trim and idx < len(values) - 1:
|
|
587
|
-
return arr[:-trim]
|
|
588
|
-
elif length_per_array and idx < len(values) - 1:
|
|
589
|
-
return arr[:length_per_array]
|
|
590
|
-
return arr
|
|
591
|
-
|
|
592
|
-
values: List[np.ndarray]
|
|
593
|
-
return np.concatenate([limit(idx, arr) for idx, arr in enumerate(values)])
|
|
594
|
-
|
|
595
|
-
else: # Ignore non-array values
|
|
596
|
-
return None
|
|
597
|
-
|
|
598
|
-
combined_arrays = combine_arrays_recursively(*dicts)
|
|
599
|
-
combined_arrays = _remove_none_values(combined_arrays)
|
|
600
|
-
return _remove_empty_dicts(combined_arrays)
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
def _combine_nested_scalars(*dicts: Dict[str, Union[Numeric, dict]]) -> Dict[str, Union[List[Skalar], dict]]:
|
|
604
|
-
"""
|
|
605
|
-
Combines multiple dictionaries with identical structures by combining its skalar values to a list.
|
|
606
|
-
Filters out all other values.
|
|
607
|
-
|
|
608
|
-
Parameters
|
|
609
|
-
----------
|
|
610
|
-
*dicts : Dict[str, Union[np.ndarray, dict]]
|
|
611
|
-
Dictionaries with matching structures and Numeric values.
|
|
612
|
-
"""
|
|
613
|
-
|
|
614
|
-
def combine_scalars_recursively(
|
|
615
|
-
*values: Union[Numeric, Dict[str, Numeric], Any],
|
|
616
|
-
) -> Optional[Union[List[Skalar], Dict[str, Union[List[Skalar], dict]]]]:
|
|
617
|
-
# If all values are dictionaries, recursively combine each key
|
|
618
|
-
if all(isinstance(val, dict) for val in values):
|
|
619
|
-
return {key: combine_scalars_recursively(*(val[key] for val in values)) for key in values[0]}
|
|
620
|
-
|
|
621
|
-
# Concatenate arrays with optional trimming
|
|
622
|
-
if all(np.isscalar(val) for val in values):
|
|
623
|
-
return [val for val in values]
|
|
624
|
-
else: # Ignore non-skalar values
|
|
625
|
-
return None
|
|
626
|
-
|
|
627
|
-
combined_scalars = combine_scalars_recursively(*dicts)
|
|
628
|
-
combined_scalars = _remove_none_values(combined_scalars)
|
|
629
|
-
return _remove_empty_dicts(combined_scalars)
|