flixopt 2.2.0rc2__py3-none-any.whl → 3.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. flixopt/__init__.py +33 -4
  2. flixopt/aggregation.py +60 -80
  3. flixopt/calculation.py +403 -182
  4. flixopt/commons.py +1 -10
  5. flixopt/components.py +939 -448
  6. flixopt/config.py +553 -191
  7. flixopt/core.py +513 -846
  8. flixopt/effects.py +644 -178
  9. flixopt/elements.py +610 -355
  10. flixopt/features.py +394 -966
  11. flixopt/flow_system.py +736 -219
  12. flixopt/interface.py +1104 -302
  13. flixopt/io.py +103 -79
  14. flixopt/linear_converters.py +387 -95
  15. flixopt/modeling.py +757 -0
  16. flixopt/network_app.py +73 -39
  17. flixopt/plotting.py +294 -138
  18. flixopt/results.py +1254 -300
  19. flixopt/solvers.py +25 -21
  20. flixopt/structure.py +938 -396
  21. flixopt/utils.py +36 -12
  22. flixopt-3.0.1.dist-info/METADATA +209 -0
  23. flixopt-3.0.1.dist-info/RECORD +26 -0
  24. flixopt-3.0.1.dist-info/top_level.txt +1 -0
  25. docs/examples/00-Minimal Example.md +0 -5
  26. docs/examples/01-Basic Example.md +0 -5
  27. docs/examples/02-Complex Example.md +0 -10
  28. docs/examples/03-Calculation Modes.md +0 -5
  29. docs/examples/index.md +0 -5
  30. docs/faq/contribute.md +0 -61
  31. docs/faq/index.md +0 -3
  32. docs/images/architecture_flixOpt-pre2.0.0.png +0 -0
  33. docs/images/architecture_flixOpt.png +0 -0
  34. docs/images/flixopt-icon.svg +0 -1
  35. docs/javascripts/mathjax.js +0 -18
  36. docs/user-guide/Mathematical Notation/Bus.md +0 -33
  37. docs/user-guide/Mathematical Notation/Effects, Penalty & Objective.md +0 -132
  38. docs/user-guide/Mathematical Notation/Flow.md +0 -26
  39. docs/user-guide/Mathematical Notation/LinearConverter.md +0 -21
  40. docs/user-guide/Mathematical Notation/Piecewise.md +0 -49
  41. docs/user-guide/Mathematical Notation/Storage.md +0 -44
  42. docs/user-guide/Mathematical Notation/index.md +0 -22
  43. docs/user-guide/Mathematical Notation/others.md +0 -3
  44. docs/user-guide/index.md +0 -124
  45. flixopt/config.yaml +0 -10
  46. flixopt-2.2.0rc2.dist-info/METADATA +0 -167
  47. flixopt-2.2.0rc2.dist-info/RECORD +0 -54
  48. flixopt-2.2.0rc2.dist-info/top_level.txt +0 -5
  49. pics/architecture_flixOpt-pre2.0.0.png +0 -0
  50. pics/architecture_flixOpt.png +0 -0
  51. pics/flixOpt_plotting.jpg +0 -0
  52. pics/flixopt-icon.svg +0 -1
  53. pics/pics.pptx +0 -0
  54. scripts/extract_release_notes.py +0 -45
  55. scripts/gen_ref_pages.py +0 -54
  56. tests/ressources/Zeitreihen2020.csv +0 -35137
  57. {flixopt-2.2.0rc2.dist-info → flixopt-3.0.1.dist-info}/WHEEL +0 -0
  58. {flixopt-2.2.0rc2.dist-info → flixopt-3.0.1.dist-info}/licenses/LICENSE +0 -0
flixopt/calculation.py CHANGED
@@ -1,35 +1,43 @@
1
1
  """
2
2
  This module contains the Calculation functionality for the flixopt framework.
3
- It is used to calculate a SystemModel for a given FlowSystem through a solver.
3
+ It is used to calculate a FlowSystemModel for a given FlowSystem through a solver.
4
4
  There are three different Calculation types:
5
- 1. FullCalculation: Calculates the SystemModel for the full FlowSystem
6
- 2. AggregatedCalculation: Calculates the SystemModel for the full FlowSystem, but aggregates the TimeSeriesData.
5
+ 1. FullCalculation: Calculates the FlowSystemModel for the full FlowSystem
6
+ 2. AggregatedCalculation: Calculates the FlowSystemModel for the full FlowSystem, but aggregates the TimeSeriesData.
7
7
  This simplifies the mathematical model and usually speeds up the solving process.
8
- 3. SegmentedCalculation: Solves a SystemModel for each individual Segment of the FlowSystem.
8
+ 3. SegmentedCalculation: Solves a FlowSystemModel for each individual Segment of the FlowSystem.
9
9
  """
10
10
 
11
+ from __future__ import annotations
12
+
11
13
  import logging
12
14
  import math
13
15
  import pathlib
14
16
  import timeit
15
- from typing import Any, Dict, List, Optional, Union
17
+ import warnings
18
+ from collections import Counter
19
+ from typing import TYPE_CHECKING, Annotated, Any
16
20
 
17
21
  import numpy as np
18
- import pandas as pd
19
22
  import yaml
20
23
 
21
24
  from . import io as fx_io
22
25
  from . import utils as utils
23
- from .aggregation import AggregationModel, AggregationParameters
26
+ from .aggregation import Aggregation, AggregationModel, AggregationParameters
24
27
  from .components import Storage
25
28
  from .config import CONFIG
26
- from .core import Scalar
27
- from .elements import Component
29
+ from .core import DataConverter, Scalar, TimeSeriesData, drop_constant_arrays
28
30
  from .features import InvestmentModel
29
31
  from .flow_system import FlowSystem
30
32
  from .results import CalculationResults, SegmentedCalculationResults
31
- from .solvers import _Solver
32
- from .structure import SystemModel, copy_and_convert_datatypes, get_compact_representation
33
+
34
+ if TYPE_CHECKING:
35
+ import pandas as pd
36
+ import xarray as xr
37
+
38
+ from .elements import Component
39
+ from .solvers import _Solver
40
+ from .structure import FlowSystemModel
33
41
 
34
42
  logger = logging.getLogger('flixopt')
35
43
 
@@ -37,85 +45,112 @@ logger = logging.getLogger('flixopt')
37
45
  class Calculation:
38
46
  """
39
47
  class for defined way of solving a flow_system optimization
48
+
49
+ Args:
50
+ name: name of calculation
51
+ flow_system: flow_system which should be calculated
52
+ folder: folder where results should be saved. If None, then the current working directory is used.
53
+ normalize_weights: Whether to automatically normalize the weights (periods and scenarios) to sum up to 1 when solving.
54
+ active_timesteps: Deprecated. Use FlowSystem.sel(time=...) or FlowSystem.isel(time=...) instead.
40
55
  """
41
56
 
42
57
  def __init__(
43
58
  self,
44
59
  name: str,
45
60
  flow_system: FlowSystem,
46
- active_timesteps: Optional[pd.DatetimeIndex] = None,
47
- folder: Optional[pathlib.Path] = None,
61
+ active_timesteps: Annotated[
62
+ pd.DatetimeIndex | None,
63
+ 'DEPRECATED: Use flow_system.sel(time=...) or flow_system.isel(time=...) instead',
64
+ ] = None,
65
+ folder: pathlib.Path | None = None,
66
+ normalize_weights: bool = True,
48
67
  ):
49
- """
50
- Args:
51
- name: name of calculation
52
- flow_system: flow_system which should be calculated
53
- active_timesteps: list with indices, which should be used for calculation. If None, then all timesteps are used.
54
- folder: folder where results should be saved. If None, then the current working directory is used.
55
- """
56
68
  self.name = name
69
+ if flow_system.used_in_calculation:
70
+ logger.warning(
71
+ f'This FlowSystem is already used in a calculation:\n{flow_system}\n'
72
+ f'Creating a copy of the FlowSystem for Calculation "{self.name}".'
73
+ )
74
+ flow_system = flow_system.copy()
75
+
76
+ if active_timesteps is not None:
77
+ warnings.warn(
78
+ "The 'active_timesteps' parameter is deprecated and will be removed in a future version. "
79
+ 'Use flow_system.sel(time=timesteps) or flow_system.isel(time=indices) before passing '
80
+ 'the FlowSystem to the Calculation instead.',
81
+ DeprecationWarning,
82
+ stacklevel=2,
83
+ )
84
+ flow_system = flow_system.sel(time=active_timesteps)
85
+ self._active_timesteps = active_timesteps # deprecated
86
+ self.normalize_weights = normalize_weights
87
+
88
+ flow_system._used_in_calculation = True
89
+
57
90
  self.flow_system = flow_system
58
- self.model: Optional[SystemModel] = None
59
- self.active_timesteps = active_timesteps
91
+ self.model: FlowSystemModel | None = None
60
92
 
61
93
  self.durations = {'modeling': 0.0, 'solving': 0.0, 'saving': 0.0}
62
94
  self.folder = pathlib.Path.cwd() / 'results' if folder is None else pathlib.Path(folder)
63
- self.results: Optional[CalculationResults] = None
95
+ self.results: CalculationResults | None = None
64
96
 
65
97
  if self.folder.exists() and not self.folder.is_dir():
66
98
  raise NotADirectoryError(f'Path {self.folder} exists and is not a directory.')
67
99
  self.folder.mkdir(parents=False, exist_ok=True)
68
100
 
101
+ self._modeled = False
102
+
69
103
  @property
70
- def main_results(self) -> Dict[str, Union[Scalar, Dict]]:
104
+ def main_results(self) -> dict[str, Scalar | dict]:
71
105
  from flixopt.features import InvestmentModel
72
106
 
73
- return {
107
+ main_results = {
74
108
  'Objective': self.model.objective.value,
75
- 'Penalty': float(self.model.effects.penalty.total.solution.values),
109
+ 'Penalty': self.model.effects.penalty.total.solution.values,
76
110
  'Effects': {
77
111
  f'{effect.label} [{effect.unit}]': {
78
- 'operation': float(effect.model.operation.total.solution.values),
79
- 'invest': float(effect.model.invest.total.solution.values),
80
- 'total': float(effect.model.total.solution.values),
112
+ 'temporal': effect.submodel.temporal.total.solution.values,
113
+ 'periodic': effect.submodel.periodic.total.solution.values,
114
+ 'total': effect.submodel.total.solution.values,
81
115
  }
82
116
  for effect in self.flow_system.effects
83
117
  },
84
118
  'Invest-Decisions': {
85
119
  'Invested': {
86
- model.label_of_element: float(model.size.solution)
120
+ model.label_of_element: model.size.solution
87
121
  for component in self.flow_system.components.values()
88
- for model in component.model.all_sub_models
89
- if isinstance(model, InvestmentModel) and float(model.size.solution) >= CONFIG.modeling.EPSILON
122
+ for model in component.submodel.all_submodels
123
+ if isinstance(model, InvestmentModel) and model.size.solution.max() >= CONFIG.Modeling.epsilon
90
124
  },
91
125
  'Not invested': {
92
- model.label_of_element: float(model.size.solution)
126
+ model.label_of_element: model.size.solution
93
127
  for component in self.flow_system.components.values()
94
- for model in component.model.all_sub_models
95
- if isinstance(model, InvestmentModel) and float(model.size.solution) < CONFIG.modeling.EPSILON
128
+ for model in component.submodel.all_submodels
129
+ if isinstance(model, InvestmentModel) and model.size.solution.max() < CONFIG.Modeling.epsilon
96
130
  },
97
131
  },
98
132
  'Buses with excess': [
99
133
  {
100
134
  bus.label_full: {
101
- 'input': float(np.sum(bus.model.excess_input.solution.values)),
102
- 'output': float(np.sum(bus.model.excess_output.solution.values)),
135
+ 'input': bus.submodel.excess_input.solution.sum('time'),
136
+ 'output': bus.submodel.excess_output.solution.sum('time'),
103
137
  }
104
138
  }
105
139
  for bus in self.flow_system.buses.values()
106
140
  if bus.with_excess
107
141
  and (
108
- float(np.sum(bus.model.excess_input.solution.values)) > 1e-3
109
- or float(np.sum(bus.model.excess_output.solution.values)) > 1e-3
142
+ bus.submodel.excess_input.solution.sum() > 1e-3 or bus.submodel.excess_output.solution.sum() > 1e-3
110
143
  )
111
144
  ],
112
145
  }
113
146
 
147
+ return utils.round_nested_floats(main_results)
148
+
114
149
  @property
115
150
  def summary(self):
116
151
  return {
117
152
  'Name': self.name,
118
- 'Number of timesteps': len(self.flow_system.time_series_collection.timesteps),
153
+ 'Number of timesteps': len(self.flow_system.timesteps),
119
154
  'Calculation Type': self.__class__.__name__,
120
155
  'Constraints': self.model.constraints.ncons,
121
156
  'Variables': self.model.variables.nvars,
@@ -124,23 +159,75 @@ class Calculation:
124
159
  'Config': CONFIG.to_dict(),
125
160
  }
126
161
 
162
+ @property
163
+ def active_timesteps(self) -> pd.DatetimeIndex:
164
+ warnings.warn(
165
+ 'active_timesteps is deprecated. Use flow_system.sel(time=...) or flow_system.isel(time=...) instead.',
166
+ DeprecationWarning,
167
+ stacklevel=2,
168
+ )
169
+ return self._active_timesteps
170
+
171
+ @property
172
+ def modeled(self) -> bool:
173
+ return True if self.model is not None else False
174
+
127
175
 
128
176
  class FullCalculation(Calculation):
129
177
  """
130
- class for defined way of solving a flow_system optimization
178
+ FullCalculation solves the complete optimization problem using all time steps.
179
+
180
+ This is the most comprehensive calculation type that considers every time step
181
+ in the optimization, providing the most accurate but computationally intensive solution.
182
+
183
+ Args:
184
+ name: name of calculation
185
+ flow_system: flow_system which should be calculated
186
+ folder: folder where results should be saved. If None, then the current working directory is used.
187
+ normalize_weights: Whether to automatically normalize the weights (periods and scenarios) to sum up to 1 when solving.
188
+ active_timesteps: Deprecated. Use FlowSystem.sel(time=...) or FlowSystem.isel(time=...) instead.
131
189
  """
132
190
 
133
- def do_modeling(self) -> SystemModel:
191
+ def do_modeling(self) -> FullCalculation:
134
192
  t_start = timeit.default_timer()
135
- self._activate_time_series()
193
+ self.flow_system.connect_and_transform()
136
194
 
137
- self.model = self.flow_system.create_model()
195
+ self.model = self.flow_system.create_model(self.normalize_weights)
138
196
  self.model.do_modeling()
139
197
 
140
198
  self.durations['modeling'] = round(timeit.default_timer() - t_start, 2)
141
- return self.model
199
+ return self
200
+
201
+ def fix_sizes(self, ds: xr.Dataset, decimal_rounding: int | None = 5) -> FullCalculation:
202
+ """Fix the sizes of the calculations to specified values.
203
+
204
+ Args:
205
+ ds: The dataset that contains the variable names mapped to their sizes. If None, the dataset is loaded from the results.
206
+ decimal_rounding: The number of decimal places to round the sizes to. If no rounding is applied, numerical errors might lead to infeasibility.
207
+ """
208
+ if not self.modeled:
209
+ raise RuntimeError('Model was not created. Call do_modeling() first.')
210
+ if decimal_rounding is not None:
211
+ ds = ds.round(decimal_rounding)
212
+
213
+ for name, da in ds.data_vars.items():
214
+ if '|size' not in name:
215
+ continue
216
+ if name not in self.model.variables:
217
+ logger.debug(f'Variable {name} not found in calculation model. Skipping.')
218
+ continue
219
+
220
+ con = self.model.add_constraints(
221
+ self.model[name] == da,
222
+ name=f'{name}-fixed',
223
+ )
224
+ logger.debug(f'Fixed "{name}":\n{con}')
142
225
 
143
- def solve(self, solver: _Solver, log_file: Optional[pathlib.Path] = None, log_main_results: bool = True):
226
+ return self
227
+
228
+ def solve(
229
+ self, solver: _Solver, log_file: pathlib.Path | None = None, log_main_results: bool = True
230
+ ) -> FullCalculation:
144
231
  t_start = timeit.default_timer()
145
232
 
146
233
  self.model.solve(
@@ -163,11 +250,10 @@ class FullCalculation(Calculation):
163
250
 
164
251
  # Log the formatted output
165
252
  if log_main_results:
166
- logger.info(f'{" Main Results ":#^80}')
167
253
  logger.info(
168
- '\n'
254
+ f'{" Main Results ":#^80}\n'
169
255
  + yaml.dump(
170
- utils.round_floats(self.main_results),
256
+ utils.round_nested_floats(self.main_results),
171
257
  default_flow_style=False,
172
258
  sort_keys=False,
173
259
  allow_unicode=True,
@@ -177,16 +263,33 @@ class FullCalculation(Calculation):
177
263
 
178
264
  self.results = CalculationResults.from_calculation(self)
179
265
 
180
- def _activate_time_series(self):
181
- self.flow_system.transform_data()
182
- self.flow_system.time_series_collection.activate_timesteps(
183
- active_timesteps=self.active_timesteps,
184
- )
266
+ return self
185
267
 
186
268
 
187
269
  class AggregatedCalculation(FullCalculation):
188
270
  """
189
- class for defined way of solving a flow_system optimization
271
+ AggregatedCalculation reduces computational complexity by clustering time series into typical periods.
272
+
273
+ This calculation approach aggregates time series data using clustering techniques (tsam) to identify
274
+ representative time periods, significantly reducing computation time while maintaining solution accuracy.
275
+
276
+ Note:
277
+ The quality of the solution depends on the choice of aggregation parameters.
278
+ The optimal parameters depend on the specific problem and the characteristics of the time series data.
279
+ For more information, refer to the [tsam documentation](https://tsam.readthedocs.io/en/latest/).
280
+
281
+ Args:
282
+ name: Name of the calculation
283
+ flow_system: FlowSystem to be optimized
284
+ aggregation_parameters: Parameters for aggregation. See AggregationParameters class documentation
285
+ components_to_clusterize: list of Components to perform aggregation on. If None, all components are aggregated.
286
+ This equalizes variables in the components according to the typical periods computed in the aggregation
287
+ active_timesteps: DatetimeIndex of timesteps to use for calculation. If None, all timesteps are used
288
+ folder: Folder where results should be saved. If None, current working directory is used
289
+
290
+ Attributes:
291
+ aggregation (Aggregation | None): Contains the clustered time series data
292
+ aggregation_model (AggregationModel | None): Contains Variables and Constraints that equalize clusters of the time series data
190
293
  """
191
294
 
192
295
  def __init__(
@@ -194,45 +297,36 @@ class AggregatedCalculation(FullCalculation):
194
297
  name: str,
195
298
  flow_system: FlowSystem,
196
299
  aggregation_parameters: AggregationParameters,
197
- components_to_clusterize: Optional[List[Component]] = None,
198
- active_timesteps: Optional[pd.DatetimeIndex] = None,
199
- folder: Optional[pathlib.Path] = None,
300
+ components_to_clusterize: list[Component] | None = None,
301
+ active_timesteps: Annotated[
302
+ pd.DatetimeIndex | None,
303
+ 'DEPRECATED: Use flow_system.sel(time=...) or flow_system.isel(time=...) instead',
304
+ ] = None,
305
+ folder: pathlib.Path | None = None,
200
306
  ):
201
- """
202
- Class for Optimizing the `FlowSystem` including:
203
- 1. Aggregating TimeSeriesData via typical periods using tsam.
204
- 2. Equalizing variables of typical periods.
205
- Args:
206
- name: name of calculation
207
- flow_system: flow_system which should be calculated
208
- aggregation_parameters: Parameters for aggregation. See documentation of AggregationParameters class.
209
- components_to_clusterize: List of Components to perform aggregation on. If None, then all components are aggregated.
210
- This means, teh variables in the components are equalized to each other, according to the typical periods
211
- computed in the DataAggregation
212
- active_timesteps: pd.DatetimeIndex or None
213
- list with indices, which should be used for calculation. If None, then all timesteps are used.
214
- folder: folder where results should be saved. If None, then the current working directory is used.
215
- """
307
+ if flow_system.scenarios is not None:
308
+ raise ValueError('Aggregation is not supported for scenarios yet. Please use FullCalculation instead.')
216
309
  super().__init__(name, flow_system, active_timesteps, folder=folder)
217
310
  self.aggregation_parameters = aggregation_parameters
218
311
  self.components_to_clusterize = components_to_clusterize
219
- self.aggregation = None
312
+ self.aggregation: Aggregation | None = None
313
+ self.aggregation_model: AggregationModel | None = None
220
314
 
221
- def do_modeling(self) -> SystemModel:
315
+ def do_modeling(self) -> AggregatedCalculation:
222
316
  t_start = timeit.default_timer()
223
- self._activate_time_series()
317
+ self.flow_system.connect_and_transform()
224
318
  self._perform_aggregation()
225
319
 
226
320
  # Model the System
227
- self.model = self.flow_system.create_model()
321
+ self.model = self.flow_system.create_model(self.normalize_weights)
228
322
  self.model.do_modeling()
229
- # Add Aggregation Model after modeling the rest
230
- self.aggregation = AggregationModel(
323
+ # Add Aggregation Submodel after modeling the rest
324
+ self.aggregation_model = AggregationModel(
231
325
  self.model, self.aggregation_parameters, self.flow_system, self.aggregation, self.components_to_clusterize
232
326
  )
233
- self.aggregation.do_modeling()
327
+ self.aggregation_model.do_modeling()
234
328
  self.durations['modeling'] = round(timeit.default_timer() - t_start, 2)
235
- return self.model
329
+ return self
236
330
 
237
331
  def _perform_aggregation(self):
238
332
  from .aggregation import Aggregation
@@ -240,41 +334,34 @@ class AggregatedCalculation(FullCalculation):
240
334
  t_start_agg = timeit.default_timer()
241
335
 
242
336
  # Validation
243
- dt_min, dt_max = (
244
- np.min(self.flow_system.time_series_collection.hours_per_timestep),
245
- np.max(self.flow_system.time_series_collection.hours_per_timestep),
246
- )
337
+ dt_min = float(self.flow_system.hours_per_timestep.min().item())
338
+ dt_max = float(self.flow_system.hours_per_timestep.max().item())
247
339
  if not dt_min == dt_max:
248
340
  raise ValueError(
249
341
  f'Aggregation failed due to inconsistent time step sizes:'
250
342
  f'delta_t varies from {dt_min} to {dt_max} hours.'
251
343
  )
252
- steps_per_period = (
253
- self.aggregation_parameters.hours_per_period
254
- / self.flow_system.time_series_collection.hours_per_timestep.max()
255
- )
256
- is_integer = (
257
- self.aggregation_parameters.hours_per_period
258
- % self.flow_system.time_series_collection.hours_per_timestep.max()
259
- ).item() == 0
260
- if not (steps_per_period.size == 1 and is_integer):
344
+ ratio = self.aggregation_parameters.hours_per_period / dt_max
345
+ if not np.isclose(ratio, round(ratio), atol=1e-9):
261
346
  raise ValueError(
262
347
  f'The selected {self.aggregation_parameters.hours_per_period=} does not match the time '
263
- f'step size of {dt_min} hours). It must be a multiple of {dt_min} hours.'
348
+ f'step size of {dt_max} hours. It must be an integer multiple of {dt_max} hours.'
264
349
  )
265
350
 
266
351
  logger.info(f'{"":#^80}')
267
352
  logger.info(f'{" Aggregating TimeSeries Data ":#^80}')
268
353
 
354
+ ds = self.flow_system.to_dataset()
355
+
356
+ temporaly_changing_ds = drop_constant_arrays(ds, dim='time')
357
+
269
358
  # Aggregation - creation of aggregated timeseries:
270
359
  self.aggregation = Aggregation(
271
- original_data=self.flow_system.time_series_collection.to_dataframe(
272
- include_extra_timestep=False
273
- ), # Exclude last row (NaN)
360
+ original_data=temporaly_changing_ds.to_dataframe(),
274
361
  hours_per_time_step=float(dt_min),
275
362
  hours_per_period=self.aggregation_parameters.hours_per_period,
276
363
  nr_of_periods=self.aggregation_parameters.nr_of_periods,
277
- weights=self.flow_system.time_series_collection.calculate_aggregation_weights(),
364
+ weights=self.calculate_aggregation_weights(temporaly_changing_ds),
278
365
  time_series_for_high_peaks=self.aggregation_parameters.labels_for_high_peaks,
279
366
  time_series_for_low_peaks=self.aggregation_parameters.labels_for_low_peaks,
280
367
  )
@@ -282,13 +369,155 @@ class AggregatedCalculation(FullCalculation):
282
369
  self.aggregation.cluster()
283
370
  self.aggregation.plot(show=True, save=self.folder / 'aggregation.html')
284
371
  if self.aggregation_parameters.aggregate_data_and_fix_non_binary_vars:
285
- self.flow_system.time_series_collection.insert_new_data(
286
- self.aggregation.aggregated_data, include_extra_timestep=False
287
- )
372
+ ds = self.flow_system.to_dataset()
373
+ for name, series in self.aggregation.aggregated_data.items():
374
+ da = (
375
+ DataConverter.to_dataarray(series, self.flow_system.coords)
376
+ .rename(name)
377
+ .assign_attrs(ds[name].attrs)
378
+ )
379
+ if TimeSeriesData.is_timeseries_data(da):
380
+ da = TimeSeriesData.from_dataarray(da)
381
+
382
+ ds[name] = da
383
+
384
+ self.flow_system = FlowSystem.from_dataset(ds)
385
+ self.flow_system.connect_and_transform()
288
386
  self.durations['aggregation'] = round(timeit.default_timer() - t_start_agg, 2)
289
387
 
388
+ @classmethod
389
+ def calculate_aggregation_weights(cls, ds: xr.Dataset) -> dict[str, float]:
390
+ """Calculate weights for all datavars in the dataset. Weights are pulled from the attrs of the datavars."""
391
+
392
+ groups = [da.attrs['aggregation_group'] for da in ds.data_vars.values() if 'aggregation_group' in da.attrs]
393
+ group_counts = Counter(groups)
394
+
395
+ # Calculate weight for each group (1/count)
396
+ group_weights = {group: 1 / count for group, count in group_counts.items()}
397
+
398
+ weights = {}
399
+ for name, da in ds.data_vars.items():
400
+ group_weight = group_weights.get(da.attrs.get('aggregation_group'))
401
+ if group_weight is not None:
402
+ weights[name] = group_weight
403
+ else:
404
+ weights[name] = da.attrs.get('aggregation_weight', 1)
405
+
406
+ if np.all(np.isclose(list(weights.values()), 1, atol=1e-6)):
407
+ logger.info('All Aggregation weights were set to 1')
408
+
409
+ return weights
410
+
290
411
 
291
412
  class SegmentedCalculation(Calculation):
413
+ """Solve large optimization problems by dividing time horizon into (overlapping) segments.
414
+
415
+ This class addresses memory and computational limitations of large-scale optimization
416
+ problems by decomposing the time horizon into smaller overlapping segments that are
417
+ solved sequentially. Each segment uses final values from the previous segment as
418
+ initial conditions, ensuring dynamic continuity across the solution.
419
+
420
+ Key Concepts:
421
+ **Temporal Decomposition**: Divides long time horizons into manageable segments
422
+ **Overlapping Windows**: Segments share timesteps to improve storage dynamics
423
+ **Value Transfer**: Final states of one segment become initial states of the next
424
+ **Sequential Solving**: Each segment solved independently but with coupling
425
+
426
+ Limitations and Constraints:
427
+ **Investment Parameters**: InvestParameters are not supported in segmented calculations
428
+ as investment decisions must be made for the entire time horizon, not per segment.
429
+
430
+ **Global Constraints**: Time-horizon-wide constraints (flow_hours_total_min/max,
431
+ load_factor_min/max) may produce suboptimal results as they cannot be enforced
432
+ globally across segments.
433
+
434
+ **Storage Dynamics**: While overlap helps, storage optimization may be suboptimal
435
+ compared to full-horizon solutions due to limited foresight in each segment.
436
+
437
+ Args:
438
+ name: Unique identifier for the calculation, used in result files and logging.
439
+ flow_system: The FlowSystem to optimize, containing all components, flows, and buses.
440
+ timesteps_per_segment: Number of timesteps in each segment (excluding overlap).
441
+ Must be > 2 to avoid internal side effects. Larger values provide better
442
+ optimization at the cost of memory and computation time.
443
+ overlap_timesteps: Number of additional timesteps added to each segment.
444
+ Improves storage optimization by providing lookahead. Higher values
445
+ improve solution quality but increase computational cost.
446
+ nr_of_previous_values: Number of previous timestep values to transfer between
447
+ segments for initialization. Typically 1 is sufficient.
448
+ folder: Directory for saving results. Defaults to current working directory + 'results'.
449
+
450
+ Examples:
451
+ Annual optimization with monthly segments:
452
+
453
+ ```python
454
+ # 8760 hours annual data with monthly segments (730 hours) and 48-hour overlap
455
+ segmented_calc = SegmentedCalculation(
456
+ name='annual_energy_system',
457
+ flow_system=energy_system,
458
+ timesteps_per_segment=730, # ~1 month
459
+ overlap_timesteps=48, # 2 days overlap
460
+ folder=Path('results/segmented'),
461
+ )
462
+ segmented_calc.do_modeling_and_solve(solver='gurobi')
463
+ ```
464
+
465
+ Weekly optimization with daily overlap:
466
+
467
+ ```python
468
+ # Weekly segments for detailed operational planning
469
+ weekly_calc = SegmentedCalculation(
470
+ name='weekly_operations',
471
+ flow_system=industrial_system,
472
+ timesteps_per_segment=168, # 1 week (hourly data)
473
+ overlap_timesteps=24, # 1 day overlap
474
+ nr_of_previous_values=1,
475
+ )
476
+ ```
477
+
478
+ Large-scale system with minimal overlap:
479
+
480
+ ```python
481
+ # Large system with minimal overlap for computational efficiency
482
+ large_calc = SegmentedCalculation(
483
+ name='large_scale_grid',
484
+ flow_system=grid_system,
485
+ timesteps_per_segment=100, # Shorter segments
486
+ overlap_timesteps=5, # Minimal overlap
487
+ )
488
+ ```
489
+
490
+ Design Considerations:
491
+ **Segment Size**: Balance between solution quality and computational efficiency.
492
+ Larger segments provide better optimization but require more memory and time.
493
+
494
+ **Overlap Duration**: More overlap improves storage dynamics and reduces
495
+ end-effects but increases computational cost. Typically 5-10% of segment length.
496
+
497
+ **Storage Systems**: Systems with large storage components benefit from longer
498
+ overlaps to capture charge/discharge cycles effectively.
499
+
500
+ **Investment Decisions**: Use FullCalculation for problems requiring investment
501
+ optimization, as SegmentedCalculation cannot handle investment parameters.
502
+
503
+ Common Use Cases:
504
+ - **Annual Planning**: Long-term planning with seasonal variations
505
+ - **Large Networks**: Spatially or temporally large energy systems
506
+ - **Memory-Limited Systems**: When full optimization exceeds available memory
507
+ - **Operational Planning**: Detailed short-term optimization with limited foresight
508
+ - **Sensitivity Analysis**: Quick approximate solutions for parameter studies
509
+
510
+ Performance Tips:
511
+ - Start with FullCalculation and use this class if memory issues occur
512
+ - Use longer overlaps for systems with significant storage
513
+ - Monitor solution quality at segment boundaries for discontinuities
514
+
515
+ Warning:
516
+ The evaluation of the solution is a bit more complex than FullCalculation or AggregatedCalculation
517
+ due to the overlapping individual solutions.
518
+
519
+ """
520
+
292
521
  def __init__(
293
522
  self,
294
523
  name: str,
@@ -296,47 +525,25 @@ class SegmentedCalculation(Calculation):
296
525
  timesteps_per_segment: int,
297
526
  overlap_timesteps: int,
298
527
  nr_of_previous_values: int = 1,
299
- folder: Optional[pathlib.Path] = None,
528
+ folder: pathlib.Path | None = None,
300
529
  ):
301
- """
302
- Dividing and Modeling the problem in (overlapping) segments.
303
- The final values of each Segment are recognized by the following segment, effectively coupling
304
- charge_states and flow_rates between segments.
305
- Because of this intersection, both modeling and solving is done in one step
306
-
307
- Take care:
308
- Parameters like InvestParameters, sum_of_flow_hours and other restrictions over the total time_series
309
- don't really work in this Calculation. Lower bounds to such SUMS can lead to weird results.
310
- This is NOT yet explicitly checked for...
311
-
312
- Args:
313
- name: name of calculation
314
- flow_system: flow_system which should be calculated
315
- timesteps_per_segment: The number of time_steps per individual segment (without the overlap)
316
- overlap_timesteps: The number of time_steps that are added to each individual model. Used for better
317
- results of storages)
318
- folder: folder where results should be saved. If None, then the current working directory is used.
319
- """
320
530
  super().__init__(name, flow_system, folder=folder)
321
531
  self.timesteps_per_segment = timesteps_per_segment
322
532
  self.overlap_timesteps = overlap_timesteps
323
533
  self.nr_of_previous_values = nr_of_previous_values
324
- self.sub_calculations: List[FullCalculation] = []
325
-
326
- self.all_timesteps = self.flow_system.time_series_collection.all_timesteps
327
- self.all_timesteps_extra = self.flow_system.time_series_collection.all_timesteps_extra
534
+ self.sub_calculations: list[FullCalculation] = []
328
535
 
329
536
  self.segment_names = [
330
537
  f'Segment_{i + 1}' for i in range(math.ceil(len(self.all_timesteps) / self.timesteps_per_segment))
331
538
  ]
332
- self.active_timesteps_per_segment = self._calculate_timesteps_of_segment()
539
+ self._timesteps_per_segment = self._calculate_timesteps_per_segment()
333
540
 
334
541
  assert timesteps_per_segment > 2, 'The Segment length must be greater 2, due to unwanted internal side effects'
335
542
  assert self.timesteps_per_segment_with_overlap <= len(self.all_timesteps), (
336
543
  f'{self.timesteps_per_segment_with_overlap=} cant be greater than the total length {len(self.all_timesteps)}'
337
544
  )
338
545
 
339
- self.flow_system._connect_network() # Connect network to ensure that all FLows know their Component
546
+ self.flow_system._connect_network() # Connect network to ensure that all Flows know their Component
340
547
  # Storing all original start values
341
548
  self._original_start_values = {
342
549
  **{flow.label_full: flow.previous_flow_rate for flow in self.flow_system.flows.values()},
@@ -346,106 +553,120 @@ class SegmentedCalculation(Calculation):
346
553
  if isinstance(comp, Storage)
347
554
  },
348
555
  }
349
- self._transfered_start_values: List[Dict[str, Any]] = []
350
-
351
- def do_modeling_and_solve(
352
- self, solver: _Solver, log_file: Optional[pathlib.Path] = None, log_main_results: bool = False
353
- ):
354
- logger.info(f'{"":#^80}')
355
- logger.info(f'{" Segmented Solving ":#^80}')
556
+ self._transfered_start_values: list[dict[str, Any]] = []
356
557
 
558
+ def _create_sub_calculations(self):
357
559
  for i, (segment_name, timesteps_of_segment) in enumerate(
358
- zip(self.segment_names, self.active_timesteps_per_segment, strict=False)
560
+ zip(self.segment_names, self._timesteps_per_segment, strict=True)
359
561
  ):
360
- if self.sub_calculations:
361
- self._transfer_start_values(i)
562
+ calc = FullCalculation(f'{self.name}-{segment_name}', self.flow_system.sel(time=timesteps_of_segment))
563
+ calc.flow_system._connect_network() # Connect to have Correct names of Flows!
362
564
 
565
+ self.sub_calculations.append(calc)
363
566
  logger.info(
364
567
  f'{segment_name} [{i + 1:>2}/{len(self.segment_names):<2}] '
365
568
  f'({timesteps_of_segment[0]} -> {timesteps_of_segment[-1]}):'
366
569
  )
367
570
 
368
- calculation = FullCalculation(
369
- f'{self.name}-{segment_name}', self.flow_system, active_timesteps=timesteps_of_segment
571
+ def do_modeling_and_solve(
572
+ self, solver: _Solver, log_file: pathlib.Path | None = None, log_main_results: bool = False
573
+ ) -> SegmentedCalculation:
574
+ logger.info(f'{"":#^80}')
575
+ logger.info(f'{" Segmented Solving ":#^80}')
576
+ self._create_sub_calculations()
577
+
578
+ for i, calculation in enumerate(self.sub_calculations):
579
+ logger.info(
580
+ f'{self.segment_names[i]} [{i + 1:>2}/{len(self.segment_names):<2}] '
581
+ f'({calculation.flow_system.timesteps[0]} -> {calculation.flow_system.timesteps[-1]}):'
370
582
  )
371
- self.sub_calculations.append(calculation)
583
+
584
+ if i > 0 and self.nr_of_previous_values > 0:
585
+ self._transfer_start_values(i)
586
+
372
587
  calculation.do_modeling()
373
- invest_elements = [
374
- model.label_full
375
- for component in self.flow_system.components.values()
376
- for model in component.model.all_sub_models
377
- if isinstance(model, InvestmentModel)
378
- ]
379
- if invest_elements:
380
- logger.critical(
381
- f'Investments are not supported in Segmented Calculation! '
382
- f'Following InvestmentModels were found: {invest_elements}'
383
- )
588
+
589
+ # Warn about Investments, but only in fist run
590
+ if i == 0:
591
+ invest_elements = [
592
+ model.label_full
593
+ for component in calculation.flow_system.components.values()
594
+ for model in component.submodel.all_submodels
595
+ if isinstance(model, InvestmentModel)
596
+ ]
597
+ if invest_elements:
598
+ logger.critical(
599
+ f'Investments are not supported in Segmented Calculation! '
600
+ f'Following InvestmentModels were found: {invest_elements}'
601
+ )
602
+
384
603
  calculation.solve(
385
604
  solver,
386
605
  log_file=pathlib.Path(log_file) if log_file is not None else self.folder / f'{self.name}.log',
387
606
  log_main_results=log_main_results,
388
607
  )
389
608
 
390
- self._reset_start_values()
391
-
392
609
  for calc in self.sub_calculations:
393
610
  for key, value in calc.durations.items():
394
611
  self.durations[key] += value
395
612
 
396
613
  self.results = SegmentedCalculationResults.from_calculation(self)
397
614
 
398
- def _transfer_start_values(self, segment_index: int):
615
+ return self
616
+
617
+ def _transfer_start_values(self, i: int):
399
618
  """
400
619
  This function gets the last values of the previous solved segment and
401
620
  inserts them as start values for the next segment
402
621
  """
403
- timesteps_of_prior_segment = self.active_timesteps_per_segment[segment_index - 1]
622
+ timesteps_of_prior_segment = self.sub_calculations[i - 1].flow_system.timesteps_extra
404
623
 
405
- start = self.active_timesteps_per_segment[segment_index][0]
624
+ start = self.sub_calculations[i].flow_system.timesteps[0]
406
625
  start_previous_values = timesteps_of_prior_segment[self.timesteps_per_segment - self.nr_of_previous_values]
407
626
  end_previous_values = timesteps_of_prior_segment[self.timesteps_per_segment - 1]
408
627
 
409
628
  logger.debug(
410
- f'start of next segment: {start}. indices of previous values: {start_previous_values}:{end_previous_values}'
629
+ f'Start of next segment: {start}. Indices of previous values: {start_previous_values} -> {end_previous_values}'
411
630
  )
631
+ current_flow_system = self.sub_calculations[i - 1].flow_system
632
+ next_flow_system = self.sub_calculations[i].flow_system
633
+
412
634
  start_values_of_this_segment = {}
413
- for flow in self.flow_system.flows.values():
414
- flow.previous_flow_rate = flow.model.flow_rate.solution.sel(
635
+
636
+ for current_flow in current_flow_system.flows.values():
637
+ next_flow = next_flow_system.flows[current_flow.label_full]
638
+ next_flow.previous_flow_rate = current_flow.submodel.flow_rate.solution.sel(
415
639
  time=slice(start_previous_values, end_previous_values)
416
640
  ).values
417
- start_values_of_this_segment[flow.label_full] = flow.previous_flow_rate
418
- for comp in self.flow_system.components.values():
419
- if isinstance(comp, Storage):
420
- comp.initial_charge_state = comp.model.charge_state.solution.sel(time=start).item()
421
- start_values_of_this_segment[comp.label_full] = comp.initial_charge_state
641
+ start_values_of_this_segment[current_flow.label_full] = next_flow.previous_flow_rate
422
642
 
423
- self._transfered_start_values.append(start_values_of_this_segment)
643
+ for current_comp in current_flow_system.components.values():
644
+ next_comp = next_flow_system.components[current_comp.label_full]
645
+ if isinstance(next_comp, Storage):
646
+ next_comp.initial_charge_state = current_comp.submodel.charge_state.solution.sel(time=start).item()
647
+ start_values_of_this_segment[current_comp.label_full] = next_comp.initial_charge_state
424
648
 
425
- def _reset_start_values(self):
426
- """This resets the start values of all Elements to its original state"""
427
- for flow in self.flow_system.flows.values():
428
- flow.previous_flow_rate = self._original_start_values[flow.label_full]
429
- for comp in self.flow_system.components.values():
430
- if isinstance(comp, Storage):
431
- comp.initial_charge_state = self._original_start_values[comp.label_full]
649
+ self._transfered_start_values.append(start_values_of_this_segment)
432
650
 
433
- def _calculate_timesteps_of_segment(self) -> List[pd.DatetimeIndex]:
434
- active_timesteps_per_segment = []
651
+ def _calculate_timesteps_per_segment(self) -> list[pd.DatetimeIndex]:
652
+ timesteps_per_segment = []
435
653
  for i, _ in enumerate(self.segment_names):
436
654
  start = self.timesteps_per_segment * i
437
655
  end = min(start + self.timesteps_per_segment_with_overlap, len(self.all_timesteps))
438
- active_timesteps_per_segment.append(self.all_timesteps[start:end])
439
- return active_timesteps_per_segment
656
+ timesteps_per_segment.append(self.all_timesteps[start:end])
657
+ return timesteps_per_segment
440
658
 
441
659
  @property
442
660
  def timesteps_per_segment_with_overlap(self):
443
661
  return self.timesteps_per_segment + self.overlap_timesteps
444
662
 
445
663
  @property
446
- def start_values_of_segments(self) -> Dict[int, Dict[str, Any]]:
664
+ def start_values_of_segments(self) -> list[dict[str, Any]]:
447
665
  """Gives an overview of the start values of all Segments"""
448
- return {
449
- 0: {element.label_full: value for element, value in self._original_start_values.items()},
450
- **{i: start_values for i, start_values in enumerate(self._transfered_start_values, 1)},
451
- }
666
+ return [{name: value for name, value in self._original_start_values.items()}] + [
667
+ start_values for start_values in self._transfered_start_values
668
+ ]
669
+
670
+ @property
671
+ def all_timesteps(self) -> pd.DatetimeIndex:
672
+ return self.flow_system.timesteps