flixopt 2.1.7__py3-none-any.whl → 2.1.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of flixopt might be problematic. Click here for more details.
- docs/user-guide/Mathematical Notation/Effects, Penalty & Objective.md +8 -8
- docs/user-guide/Mathematical Notation/Flow.md +3 -3
- docs/user-guide/Mathematical Notation/InvestParameters.md +3 -0
- docs/user-guide/Mathematical Notation/LinearConverter.md +3 -3
- docs/user-guide/Mathematical Notation/OnOffParameters.md +3 -0
- docs/user-guide/Mathematical Notation/Storage.md +1 -1
- flixopt/aggregation.py +33 -32
- flixopt/calculation.py +158 -58
- flixopt/components.py +673 -150
- flixopt/config.py +17 -8
- flixopt/core.py +59 -54
- flixopt/effects.py +144 -63
- flixopt/elements.py +292 -107
- flixopt/features.py +61 -58
- flixopt/flow_system.py +69 -48
- flixopt/interface.py +952 -113
- flixopt/io.py +15 -10
- flixopt/linear_converters.py +373 -81
- flixopt/network_app.py +75 -39
- flixopt/plotting.py +215 -87
- flixopt/results.py +382 -209
- flixopt/solvers.py +25 -21
- flixopt/structure.py +41 -37
- flixopt/utils.py +10 -7
- {flixopt-2.1.7.dist-info → flixopt-2.1.8.dist-info}/METADATA +46 -42
- {flixopt-2.1.7.dist-info → flixopt-2.1.8.dist-info}/RECORD +30 -28
- scripts/gen_ref_pages.py +1 -1
- {flixopt-2.1.7.dist-info → flixopt-2.1.8.dist-info}/WHEEL +0 -0
- {flixopt-2.1.7.dist-info → flixopt-2.1.8.dist-info}/licenses/LICENSE +0 -0
- {flixopt-2.1.7.dist-info → flixopt-2.1.8.dist-info}/top_level.txt +0 -0
|
@@ -1,15 +1,15 @@
|
|
|
1
1
|
## Effects
|
|
2
|
-
[`Effects`][flixopt.effects.Effect] are used to allocate things like costs, emissions, or other "effects"
|
|
2
|
+
[`Effects`][flixopt.effects.Effect] are used to allocate things like costs, emissions, or other "effects" occurring in the system.
|
|
3
3
|
These arise from so called **Shares**, which originate from **Elements** like [Flows](Flow.md).
|
|
4
4
|
|
|
5
5
|
**Example:**
|
|
6
6
|
|
|
7
7
|
[`Flows`][flixopt.elements.Flow] have an attribute called `effects_per_flow_hour`, defining the effect amount of per flow hour.
|
|
8
|
-
|
|
8
|
+
Associated effects could be:
|
|
9
9
|
- costs - given in [€/kWh]...
|
|
10
10
|
- ...or emissions - given in [kg/kWh].
|
|
11
11
|
-
|
|
12
|
-
Effects are allocated
|
|
12
|
+
Effects are allocated separately for investments and operation.
|
|
13
13
|
|
|
14
14
|
### Shares to Effects
|
|
15
15
|
|
|
@@ -43,7 +43,7 @@ For example, the Effect "CO$_2$ emissions" (unit: kg)
|
|
|
43
43
|
can cause an additional share to Effect "monetary costs" (unit: €).
|
|
44
44
|
In this case, the factor $\text a_{x \rightarrow e}$ is the specific CO$_2$ price in €/kg. However, circular references have to be avoided.
|
|
45
45
|
|
|
46
|
-
The overall sum of investment shares of an Effect $e$ is given by $\eqref{Effect_invest}$
|
|
46
|
+
The overall sum of investment shares of an Effect $e$ is given by $\eqref{eq:Effect_invest}$
|
|
47
47
|
|
|
48
48
|
$$ \label{eq:Effect_invest}
|
|
49
49
|
E_{e, \text{inv}} =
|
|
@@ -68,8 +68,8 @@ With:
|
|
|
68
68
|
|
|
69
69
|
- $\mathcal{L}$ being the set of all elements in the FlowSystem
|
|
70
70
|
- $\mathcal{E}$ being the set of all effects in the FlowSystem
|
|
71
|
-
- $\text r_{x \rightarrow e, \text{inv}}$ being the factor between the
|
|
72
|
-
- $\text r_{x \rightarrow e, \text{op}}(\text{t}_i)$ being the factor between the
|
|
71
|
+
- $\text r_{x \rightarrow e, \text{inv}}$ being the factor between the invest part of Effect $x$ and Effect $e$
|
|
72
|
+
- $\text r_{x \rightarrow e, \text{op}}(\text{t}_i)$ being the factor between the operation part of Effect $x$ and Effect $e$
|
|
73
73
|
|
|
74
74
|
- $\text{t}_i$ being the time step
|
|
75
75
|
- $s_{l \rightarrow e, \text{inv}}$ being the share of element $l$ to the investment part of effect $e$
|
|
@@ -113,7 +113,7 @@ With:
|
|
|
113
113
|
- $\mathcal{T}$ being the set of all timesteps
|
|
114
114
|
- $s_{l \rightarrow \Phi}$ being the share of element $l$ to the penalty
|
|
115
115
|
|
|
116
|
-
At the moment, penalties only occur in [Buses](
|
|
116
|
+
At the moment, penalties only occur in [Buses](Bus.md)
|
|
117
117
|
|
|
118
118
|
## Objective
|
|
119
119
|
|
|
@@ -128,5 +128,5 @@ With:
|
|
|
128
128
|
- $\Phi$ being the [Penalty](#penalty)
|
|
129
129
|
|
|
130
130
|
This approach allows for a multi-criteria optimization using both...
|
|
131
|
-
- ... the **
|
|
131
|
+
- ... the **Weighted Sum** method, as the chosen **Objective Effect** can incorporate other Effects.
|
|
132
132
|
- ... the ($\epsilon$-constraint method) by constraining effects.
|
|
@@ -21,6 +21,6 @@ $$
|
|
|
21
21
|
$$
|
|
22
22
|
|
|
23
23
|
|
|
24
|
-
This mathematical
|
|
25
|
-
to define the
|
|
26
|
-
|
|
24
|
+
This mathematical formulation can be extended by using [OnOffParameters](./OnOffParameters.md)
|
|
25
|
+
to define the on/off state of the Flow, or by using [InvestParameters](./InvestParameters.md)
|
|
26
|
+
to change the size of the Flow from a constant to an optimization variable.
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
[`LinearConverters`][flixopt.components.LinearConverter] define a ratio between incoming and outgoing [Flows](
|
|
1
|
+
[`LinearConverters`][flixopt.components.LinearConverter] define a ratio between incoming and outgoing [Flows](Flow.md).
|
|
2
2
|
|
|
3
3
|
$$ \label{eq:Linear-Transformer-Ratio}
|
|
4
4
|
\sum_{f_{\text{in}} \in \mathcal F_{in}} \text a_{f_{\text{in}}}(\text{t}_i) \cdot p_{f_\text{in}}(\text{t}_i) = \sum_{f_{\text{out}} \in \mathcal F_{out}} \text b_{f_\text{out}}(\text{t}_i) \cdot p_{f_\text{out}}(\text{t}_i)
|
|
@@ -16,6 +16,6 @@ $$ \label{eq:Linear-Transformer-Ratio-simple}
|
|
|
16
16
|
\text a(\text{t}_i) \cdot p_{f_\text{in}}(\text{t}_i) = p_{f_\text{out}}(\text{t}_i)
|
|
17
17
|
$$
|
|
18
18
|
|
|
19
|
-
where $\text a$ can be interpreted as the conversion efficiency of the **
|
|
20
|
-
#### Piecewise
|
|
19
|
+
where $\text a$ can be interpreted as the conversion efficiency of the **LinearConverter**.
|
|
20
|
+
#### Piecewise Conversion factors
|
|
21
21
|
The conversion efficiency can be defined as a piecewise linear approximation. See [Piecewise](Piecewise.md) for more details.
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
# Storages
|
|
2
|
-
**Storages** have one incoming and one outgoing **[Flow](
|
|
2
|
+
**Storages** have one incoming and one outgoing **[Flow](Flow.md)** with a charging and discharging efficiency.
|
|
3
3
|
A storage has a state of charge $c(\text{t}_i)$ which is limited by its `size` $\text C$ and relative bounds $\eqref{eq:Storage_Bounds}$.
|
|
4
4
|
|
|
5
5
|
$$ \label{eq:Storage_Bounds}
|
flixopt/aggregation.py
CHANGED
|
@@ -3,16 +3,15 @@ This module contains the Aggregation functionality for the flixopt framework.
|
|
|
3
3
|
Through this, aggregating TimeSeriesData is possible.
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
6
8
|
import copy
|
|
7
9
|
import logging
|
|
8
10
|
import pathlib
|
|
9
11
|
import timeit
|
|
10
|
-
import
|
|
11
|
-
from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, Union
|
|
12
|
+
from typing import TYPE_CHECKING
|
|
12
13
|
|
|
13
|
-
import linopy
|
|
14
14
|
import numpy as np
|
|
15
|
-
import pandas as pd
|
|
16
15
|
|
|
17
16
|
try:
|
|
18
17
|
import tsam.timeseriesaggregation as tsam
|
|
@@ -22,18 +21,20 @@ except ImportError:
|
|
|
22
21
|
TSAM_AVAILABLE = False
|
|
23
22
|
|
|
24
23
|
from .components import Storage
|
|
25
|
-
from .core import Scalar, TimeSeriesData
|
|
26
|
-
from .elements import Component
|
|
27
|
-
from .flow_system import FlowSystem
|
|
28
24
|
from .structure import (
|
|
29
|
-
Element,
|
|
30
25
|
Model,
|
|
31
26
|
SystemModel,
|
|
32
27
|
)
|
|
33
28
|
|
|
34
29
|
if TYPE_CHECKING:
|
|
30
|
+
import linopy
|
|
31
|
+
import pandas as pd
|
|
35
32
|
import plotly.graph_objects as go
|
|
36
33
|
|
|
34
|
+
from .core import Scalar, TimeSeriesData
|
|
35
|
+
from .elements import Component
|
|
36
|
+
from .flow_system import FlowSystem
|
|
37
|
+
|
|
37
38
|
logger = logging.getLogger('flixopt')
|
|
38
39
|
|
|
39
40
|
|
|
@@ -48,9 +49,9 @@ class Aggregation:
|
|
|
48
49
|
hours_per_time_step: Scalar,
|
|
49
50
|
hours_per_period: Scalar,
|
|
50
51
|
nr_of_periods: int = 8,
|
|
51
|
-
weights:
|
|
52
|
-
time_series_for_high_peaks:
|
|
53
|
-
time_series_for_low_peaks:
|
|
52
|
+
weights: dict[str, float] | None = None,
|
|
53
|
+
time_series_for_high_peaks: list[str] | None = None,
|
|
54
|
+
time_series_for_low_peaks: list[str] | None = None,
|
|
54
55
|
):
|
|
55
56
|
"""
|
|
56
57
|
Args:
|
|
@@ -75,9 +76,9 @@ class Aggregation:
|
|
|
75
76
|
self.time_series_for_high_peaks = time_series_for_high_peaks or []
|
|
76
77
|
self.time_series_for_low_peaks = time_series_for_low_peaks or []
|
|
77
78
|
|
|
78
|
-
self.aggregated_data:
|
|
79
|
+
self.aggregated_data: pd.DataFrame | None = None
|
|
79
80
|
self.clustering_duration_seconds = None
|
|
80
|
-
self.tsam:
|
|
81
|
+
self.tsam: tsam.TimeSeriesAggregation | None = None
|
|
81
82
|
|
|
82
83
|
def cluster(self) -> None:
|
|
83
84
|
"""
|
|
@@ -140,7 +141,7 @@ class Aggregation:
|
|
|
140
141
|
def use_extreme_periods(self):
|
|
141
142
|
return self.time_series_for_high_peaks or self.time_series_for_low_peaks
|
|
142
143
|
|
|
143
|
-
def plot(self, colormap: str = 'viridis', show: bool = True, save:
|
|
144
|
+
def plot(self, colormap: str = 'viridis', show: bool = True, save: pathlib.Path | None = None) -> go.Figure:
|
|
144
145
|
from . import plotting
|
|
145
146
|
|
|
146
147
|
df_org = self.original_data.copy().rename(
|
|
@@ -162,14 +163,14 @@ class Aggregation:
|
|
|
162
163
|
figure_like=fig,
|
|
163
164
|
default_path=pathlib.Path('aggregated data.html'),
|
|
164
165
|
default_filetype='.html',
|
|
165
|
-
user_path=
|
|
166
|
+
user_path=save,
|
|
166
167
|
show=show,
|
|
167
|
-
save=
|
|
168
|
+
save=save is not None,
|
|
168
169
|
)
|
|
169
170
|
|
|
170
171
|
return fig
|
|
171
172
|
|
|
172
|
-
def get_cluster_indices(self) ->
|
|
173
|
+
def get_cluster_indices(self) -> dict[str, list[np.ndarray]]:
|
|
173
174
|
"""
|
|
174
175
|
Generates a dictionary that maps each cluster to a list of index vectors representing the time steps
|
|
175
176
|
assigned to that cluster for each period.
|
|
@@ -192,7 +193,7 @@ class Aggregation:
|
|
|
192
193
|
|
|
193
194
|
return index_vectors
|
|
194
195
|
|
|
195
|
-
def get_equation_indices(self, skip_first_index_of_period: bool = True) ->
|
|
196
|
+
def get_equation_indices(self, skip_first_index_of_period: bool = True) -> tuple[np.ndarray, np.ndarray]:
|
|
196
197
|
"""
|
|
197
198
|
Generates pairs of indices for the equations by comparing index vectors of the same cluster.
|
|
198
199
|
If `skip_first_index_of_period` is True, the first index of each period is skipped.
|
|
@@ -201,7 +202,7 @@ class Aggregation:
|
|
|
201
202
|
skip_first_index_of_period (bool): Whether to include or skip the first index of each period.
|
|
202
203
|
|
|
203
204
|
Returns:
|
|
204
|
-
|
|
205
|
+
tuple[np.ndarray, np.ndarray]: Two arrays of indices.
|
|
205
206
|
"""
|
|
206
207
|
idx_var1 = []
|
|
207
208
|
idx_var2 = []
|
|
@@ -237,8 +238,8 @@ class AggregationParameters:
|
|
|
237
238
|
aggregate_data_and_fix_non_binary_vars: bool,
|
|
238
239
|
percentage_of_period_freedom: float = 0,
|
|
239
240
|
penalty_of_period_freedom: float = 0,
|
|
240
|
-
time_series_for_high_peaks:
|
|
241
|
-
time_series_for_low_peaks:
|
|
241
|
+
time_series_for_high_peaks: list[TimeSeriesData] | None = None,
|
|
242
|
+
time_series_for_low_peaks: list[TimeSeriesData] | None = None,
|
|
242
243
|
):
|
|
243
244
|
"""
|
|
244
245
|
Initializes aggregation parameters for time series data
|
|
@@ -264,24 +265,24 @@ class AggregationParameters:
|
|
|
264
265
|
self.aggregate_data_and_fix_non_binary_vars = aggregate_data_and_fix_non_binary_vars
|
|
265
266
|
self.percentage_of_period_freedom = percentage_of_period_freedom
|
|
266
267
|
self.penalty_of_period_freedom = penalty_of_period_freedom
|
|
267
|
-
self.time_series_for_high_peaks:
|
|
268
|
-
self.time_series_for_low_peaks:
|
|
268
|
+
self.time_series_for_high_peaks: list[TimeSeriesData] = time_series_for_high_peaks or []
|
|
269
|
+
self.time_series_for_low_peaks: list[TimeSeriesData] = time_series_for_low_peaks or []
|
|
269
270
|
|
|
270
271
|
@property
|
|
271
272
|
def use_extreme_periods(self):
|
|
272
273
|
return self.time_series_for_high_peaks or self.time_series_for_low_peaks
|
|
273
274
|
|
|
274
275
|
@property
|
|
275
|
-
def labels_for_high_peaks(self) ->
|
|
276
|
+
def labels_for_high_peaks(self) -> list[str]:
|
|
276
277
|
return [ts.label for ts in self.time_series_for_high_peaks]
|
|
277
278
|
|
|
278
279
|
@property
|
|
279
|
-
def labels_for_low_peaks(self) ->
|
|
280
|
+
def labels_for_low_peaks(self) -> list[str]:
|
|
280
281
|
return [ts.label for ts in self.time_series_for_low_peaks]
|
|
281
282
|
|
|
282
283
|
@property
|
|
283
|
-
def use_low_peaks(self):
|
|
284
|
-
return self.time_series_for_low_peaks
|
|
284
|
+
def use_low_peaks(self) -> bool:
|
|
285
|
+
return bool(self.time_series_for_low_peaks)
|
|
285
286
|
|
|
286
287
|
|
|
287
288
|
class AggregationModel(Model):
|
|
@@ -295,7 +296,7 @@ class AggregationModel(Model):
|
|
|
295
296
|
aggregation_parameters: AggregationParameters,
|
|
296
297
|
flow_system: FlowSystem,
|
|
297
298
|
aggregation_data: Aggregation,
|
|
298
|
-
components_to_clusterize:
|
|
299
|
+
components_to_clusterize: list[Component] | None,
|
|
299
300
|
):
|
|
300
301
|
"""
|
|
301
302
|
Modeling-Element for "index-equating"-equations
|
|
@@ -314,9 +315,9 @@ class AggregationModel(Model):
|
|
|
314
315
|
|
|
315
316
|
indices = self.aggregation_data.get_equation_indices(skip_first_index_of_period=True)
|
|
316
317
|
|
|
317
|
-
time_variables:
|
|
318
|
-
binary_variables:
|
|
319
|
-
binary_time_variables:
|
|
318
|
+
time_variables: set[str] = {k for k, v in self._model.variables.data.items() if 'time' in v.indexes}
|
|
319
|
+
binary_variables: set[str] = {k for k, v in self._model.variables.data.items() if k in self._model.binaries}
|
|
320
|
+
binary_time_variables: set[str] = time_variables & binary_variables
|
|
320
321
|
|
|
321
322
|
for component in components:
|
|
322
323
|
if isinstance(component, Storage) and not self.aggregation_parameters.fix_storage_flows:
|
|
@@ -336,7 +337,7 @@ class AggregationModel(Model):
|
|
|
336
337
|
for variable in self.variables_direct.values():
|
|
337
338
|
self._model.effects.add_share_to_penalty('Aggregation', variable * penalty)
|
|
338
339
|
|
|
339
|
-
def _equate_indices(self, variable: linopy.Variable, indices:
|
|
340
|
+
def _equate_indices(self, variable: linopy.Variable, indices: tuple[np.ndarray, np.ndarray]) -> None:
|
|
340
341
|
assert len(indices[0]) == len(indices[1]), 'The length of the indices must match!!'
|
|
341
342
|
length = len(indices[0])
|
|
342
343
|
|
flixopt/calculation.py
CHANGED
|
@@ -8,14 +8,15 @@ There are three different Calculation types:
|
|
|
8
8
|
3. SegmentedCalculation: Solves a SystemModel for each individual Segment of the FlowSystem.
|
|
9
9
|
"""
|
|
10
10
|
|
|
11
|
+
from __future__ import annotations
|
|
12
|
+
|
|
11
13
|
import logging
|
|
12
14
|
import math
|
|
13
15
|
import pathlib
|
|
14
16
|
import timeit
|
|
15
|
-
from typing import
|
|
17
|
+
from typing import TYPE_CHECKING, Any
|
|
16
18
|
|
|
17
19
|
import numpy as np
|
|
18
|
-
import pandas as pd
|
|
19
20
|
import yaml
|
|
20
21
|
|
|
21
22
|
from . import io as fx_io
|
|
@@ -23,13 +24,17 @@ from . import utils as utils
|
|
|
23
24
|
from .aggregation import AggregationModel, AggregationParameters
|
|
24
25
|
from .components import Storage
|
|
25
26
|
from .config import CONFIG
|
|
26
|
-
from .core import Scalar
|
|
27
|
-
from .elements import Component
|
|
28
27
|
from .features import InvestmentModel
|
|
29
|
-
from .flow_system import FlowSystem
|
|
30
28
|
from .results import CalculationResults, SegmentedCalculationResults
|
|
31
|
-
|
|
32
|
-
|
|
29
|
+
|
|
30
|
+
if TYPE_CHECKING:
|
|
31
|
+
import pandas as pd
|
|
32
|
+
|
|
33
|
+
from .core import Scalar
|
|
34
|
+
from .elements import Component
|
|
35
|
+
from .flow_system import FlowSystem
|
|
36
|
+
from .solvers import _Solver
|
|
37
|
+
from .structure import SystemModel
|
|
33
38
|
|
|
34
39
|
logger = logging.getLogger('flixopt')
|
|
35
40
|
|
|
@@ -43,8 +48,8 @@ class Calculation:
|
|
|
43
48
|
self,
|
|
44
49
|
name: str,
|
|
45
50
|
flow_system: FlowSystem,
|
|
46
|
-
active_timesteps:
|
|
47
|
-
folder:
|
|
51
|
+
active_timesteps: pd.DatetimeIndex | None = None,
|
|
52
|
+
folder: pathlib.Path | None = None,
|
|
48
53
|
):
|
|
49
54
|
"""
|
|
50
55
|
Args:
|
|
@@ -55,19 +60,19 @@ class Calculation:
|
|
|
55
60
|
"""
|
|
56
61
|
self.name = name
|
|
57
62
|
self.flow_system = flow_system
|
|
58
|
-
self.model:
|
|
63
|
+
self.model: SystemModel | None = None
|
|
59
64
|
self.active_timesteps = active_timesteps
|
|
60
65
|
|
|
61
66
|
self.durations = {'modeling': 0.0, 'solving': 0.0, 'saving': 0.0}
|
|
62
67
|
self.folder = pathlib.Path.cwd() / 'results' if folder is None else pathlib.Path(folder)
|
|
63
|
-
self.results:
|
|
68
|
+
self.results: CalculationResults | None = None
|
|
64
69
|
|
|
65
70
|
if self.folder.exists() and not self.folder.is_dir():
|
|
66
71
|
raise NotADirectoryError(f'Path {self.folder} exists and is not a directory.')
|
|
67
72
|
self.folder.mkdir(parents=False, exist_ok=True)
|
|
68
73
|
|
|
69
74
|
@property
|
|
70
|
-
def main_results(self) ->
|
|
75
|
+
def main_results(self) -> dict[str, Scalar | dict]:
|
|
71
76
|
from flixopt.features import InvestmentModel
|
|
72
77
|
|
|
73
78
|
return {
|
|
@@ -127,7 +132,10 @@ class Calculation:
|
|
|
127
132
|
|
|
128
133
|
class FullCalculation(Calculation):
|
|
129
134
|
"""
|
|
130
|
-
|
|
135
|
+
FullCalculation solves the complete optimization problem using all time steps.
|
|
136
|
+
|
|
137
|
+
This is the most comprehensive calculation type that considers every time step
|
|
138
|
+
in the optimization, providing the most accurate but computationally intensive solution.
|
|
131
139
|
"""
|
|
132
140
|
|
|
133
141
|
def do_modeling(self) -> SystemModel:
|
|
@@ -140,7 +148,7 @@ class FullCalculation(Calculation):
|
|
|
140
148
|
self.durations['modeling'] = round(timeit.default_timer() - t_start, 2)
|
|
141
149
|
return self.model
|
|
142
150
|
|
|
143
|
-
def solve(self, solver: _Solver, log_file:
|
|
151
|
+
def solve(self, solver: _Solver, log_file: pathlib.Path | None = None, log_main_results: bool = True):
|
|
144
152
|
t_start = timeit.default_timer()
|
|
145
153
|
|
|
146
154
|
self.model.solve(
|
|
@@ -186,7 +194,25 @@ class FullCalculation(Calculation):
|
|
|
186
194
|
|
|
187
195
|
class AggregatedCalculation(FullCalculation):
|
|
188
196
|
"""
|
|
189
|
-
|
|
197
|
+
AggregatedCalculation reduces computational complexity by clustering time series into typical periods.
|
|
198
|
+
|
|
199
|
+
This calculation approach aggregates time series data using clustering techniques (tsam) to identify
|
|
200
|
+
representative time periods, significantly reducing computation time while maintaining solution accuracy.
|
|
201
|
+
|
|
202
|
+
Note:
|
|
203
|
+
The quality of the solution depends on the choice of aggregation parameters.
|
|
204
|
+
The optimal parameters depend on the specific problem and the characteristics of the time series data.
|
|
205
|
+
For more information, refer to the [tsam documentation](https://tsam.readthedocs.io/en/latest/).
|
|
206
|
+
|
|
207
|
+
Args:
|
|
208
|
+
name: Name of the calculation
|
|
209
|
+
flow_system: FlowSystem to be optimized
|
|
210
|
+
aggregation_parameters: Parameters for aggregation. See AggregationParameters class documentation
|
|
211
|
+
components_to_clusterize: list of Components to perform aggregation on. If None, all components are aggregated.
|
|
212
|
+
This equalizes variables in the components according to the typical periods computed in the aggregation
|
|
213
|
+
active_timesteps: DatetimeIndex of timesteps to use for calculation. If None, all timesteps are used
|
|
214
|
+
folder: Folder where results should be saved. If None, current working directory is used
|
|
215
|
+
aggregation: contains the aggregation model
|
|
190
216
|
"""
|
|
191
217
|
|
|
192
218
|
def __init__(
|
|
@@ -194,25 +220,10 @@ class AggregatedCalculation(FullCalculation):
|
|
|
194
220
|
name: str,
|
|
195
221
|
flow_system: FlowSystem,
|
|
196
222
|
aggregation_parameters: AggregationParameters,
|
|
197
|
-
components_to_clusterize:
|
|
198
|
-
active_timesteps:
|
|
199
|
-
folder:
|
|
223
|
+
components_to_clusterize: list[Component] | None = None,
|
|
224
|
+
active_timesteps: pd.DatetimeIndex | None = None,
|
|
225
|
+
folder: pathlib.Path | None = None,
|
|
200
226
|
):
|
|
201
|
-
"""
|
|
202
|
-
Class for Optimizing the `FlowSystem` including:
|
|
203
|
-
1. Aggregating TimeSeriesData via typical periods using tsam.
|
|
204
|
-
2. Equalizing variables of typical periods.
|
|
205
|
-
Args:
|
|
206
|
-
name: name of calculation
|
|
207
|
-
flow_system: flow_system which should be calculated
|
|
208
|
-
aggregation_parameters: Parameters for aggregation. See documentation of AggregationParameters class.
|
|
209
|
-
components_to_clusterize: List of Components to perform aggregation on. If None, then all components are aggregated.
|
|
210
|
-
This means, teh variables in the components are equalized to each other, according to the typical periods
|
|
211
|
-
computed in the DataAggregation
|
|
212
|
-
active_timesteps: pd.DatetimeIndex or None
|
|
213
|
-
list with indices, which should be used for calculation. If None, then all timesteps are used.
|
|
214
|
-
folder: folder where results should be saved. If None, then the current working directory is used.
|
|
215
|
-
"""
|
|
216
227
|
super().__init__(name, flow_system, active_timesteps, folder=folder)
|
|
217
228
|
self.aggregation_parameters = aggregation_parameters
|
|
218
229
|
self.components_to_clusterize = components_to_clusterize
|
|
@@ -289,6 +300,114 @@ class AggregatedCalculation(FullCalculation):
|
|
|
289
300
|
|
|
290
301
|
|
|
291
302
|
class SegmentedCalculation(Calculation):
|
|
303
|
+
"""Solve large optimization problems by dividing time horizon into (overlapping) segments.
|
|
304
|
+
|
|
305
|
+
This class addresses memory and computational limitations of large-scale optimization
|
|
306
|
+
problems by decomposing the time horizon into smaller overlapping segments that are
|
|
307
|
+
solved sequentially. Each segment uses final values from the previous segment as
|
|
308
|
+
initial conditions, ensuring dynamic continuity across the solution.
|
|
309
|
+
|
|
310
|
+
Key Concepts:
|
|
311
|
+
**Temporal Decomposition**: Divides long time horizons into manageable segments
|
|
312
|
+
**Overlapping Windows**: Segments share timesteps to improve storage dynamics
|
|
313
|
+
**Value Transfer**: Final states of one segment become initial states of the next
|
|
314
|
+
**Sequential Solving**: Each segment solved independently but with coupling
|
|
315
|
+
|
|
316
|
+
Limitations and Constraints:
|
|
317
|
+
**Investment Parameters**: InvestParameters are not supported in segmented calculations
|
|
318
|
+
as investment decisions must be made for the entire time horizon, not per segment.
|
|
319
|
+
|
|
320
|
+
**Global Constraints**: Time-horizon-wide constraints (flow_hours_total_min/max,
|
|
321
|
+
load_factor_min/max) may produce suboptimal results as they cannot be enforced
|
|
322
|
+
globally across segments.
|
|
323
|
+
|
|
324
|
+
**Storage Dynamics**: While overlap helps, storage optimization may be suboptimal
|
|
325
|
+
compared to full-horizon solutions due to limited foresight in each segment.
|
|
326
|
+
|
|
327
|
+
Args:
|
|
328
|
+
name: Unique identifier for the calculation, used in result files and logging.
|
|
329
|
+
flow_system: The FlowSystem to optimize, containing all components, flows, and buses.
|
|
330
|
+
timesteps_per_segment: Number of timesteps in each segment (excluding overlap).
|
|
331
|
+
Must be > 2 to avoid internal side effects. Larger values provide better
|
|
332
|
+
optimization at the cost of memory and computation time.
|
|
333
|
+
overlap_timesteps: Number of additional timesteps added to each segment.
|
|
334
|
+
Improves storage optimization by providing lookahead. Higher values
|
|
335
|
+
improve solution quality but increase computational cost.
|
|
336
|
+
nr_of_previous_values: Number of previous timestep values to transfer between
|
|
337
|
+
segments for initialization. Typically 1 is sufficient.
|
|
338
|
+
folder: Directory for saving results. Defaults to current working directory + 'results'.
|
|
339
|
+
|
|
340
|
+
Examples:
|
|
341
|
+
Annual optimization with monthly segments:
|
|
342
|
+
|
|
343
|
+
```python
|
|
344
|
+
# 8760 hours annual data with monthly segments (730 hours) and 48-hour overlap
|
|
345
|
+
segmented_calc = SegmentedCalculation(
|
|
346
|
+
name='annual_energy_system',
|
|
347
|
+
flow_system=energy_system,
|
|
348
|
+
timesteps_per_segment=730, # ~1 month
|
|
349
|
+
overlap_timesteps=48, # 2 days overlap
|
|
350
|
+
folder=Path('results/segmented'),
|
|
351
|
+
)
|
|
352
|
+
segmented_calc.do_modeling_and_solve(solver='gurobi')
|
|
353
|
+
```
|
|
354
|
+
|
|
355
|
+
Weekly optimization with daily overlap:
|
|
356
|
+
|
|
357
|
+
```python
|
|
358
|
+
# Weekly segments for detailed operational planning
|
|
359
|
+
weekly_calc = SegmentedCalculation(
|
|
360
|
+
name='weekly_operations',
|
|
361
|
+
flow_system=industrial_system,
|
|
362
|
+
timesteps_per_segment=168, # 1 week (hourly data)
|
|
363
|
+
overlap_timesteps=24, # 1 day overlap
|
|
364
|
+
nr_of_previous_values=1,
|
|
365
|
+
)
|
|
366
|
+
```
|
|
367
|
+
|
|
368
|
+
Large-scale system with minimal overlap:
|
|
369
|
+
|
|
370
|
+
```python
|
|
371
|
+
# Large system with minimal overlap for computational efficiency
|
|
372
|
+
large_calc = SegmentedCalculation(
|
|
373
|
+
name='large_scale_grid',
|
|
374
|
+
flow_system=grid_system,
|
|
375
|
+
timesteps_per_segment=100, # Shorter segments
|
|
376
|
+
overlap_timesteps=5, # Minimal overlap
|
|
377
|
+
)
|
|
378
|
+
```
|
|
379
|
+
|
|
380
|
+
Design Considerations:
|
|
381
|
+
**Segment Size**: Balance between solution quality and computational efficiency.
|
|
382
|
+
Larger segments provide better optimization but require more memory and time.
|
|
383
|
+
|
|
384
|
+
**Overlap Duration**: More overlap improves storage dynamics and reduces
|
|
385
|
+
end-effects but increases computational cost. Typically 5-10% of segment length.
|
|
386
|
+
|
|
387
|
+
**Storage Systems**: Systems with large storage components benefit from longer
|
|
388
|
+
overlaps to capture charge/discharge cycles effectively.
|
|
389
|
+
|
|
390
|
+
**Investment Decisions**: Use FullCalculation for problems requiring investment
|
|
391
|
+
optimization, as SegmentedCalculation cannot handle investment parameters.
|
|
392
|
+
|
|
393
|
+
Common Use Cases:
|
|
394
|
+
- **Annual Planning**: Long-term planning with seasonal variations
|
|
395
|
+
- **Large Networks**: Spatially or temporally large energy systems
|
|
396
|
+
- **Memory-Limited Systems**: When full optimization exceeds available memory
|
|
397
|
+
- **Operational Planning**: Detailed short-term optimization with limited foresight
|
|
398
|
+
- **Sensitivity Analysis**: Quick approximate solutions for parameter studies
|
|
399
|
+
|
|
400
|
+
Performance Tips:
|
|
401
|
+
- Start with FullCalculation and use this class if memory issues occur
|
|
402
|
+
- Use longer overlaps for systems with significant storage
|
|
403
|
+
- Monitor solution quality at segment boundaries for discontinuities
|
|
404
|
+
|
|
405
|
+
Warning:
|
|
406
|
+
The evaluation of the solution is a bit more complex than FullCalculation or AggregatedCalculation
|
|
407
|
+
due to the overlapping individual solutions.
|
|
408
|
+
|
|
409
|
+
"""
|
|
410
|
+
|
|
292
411
|
def __init__(
|
|
293
412
|
self,
|
|
294
413
|
name: str,
|
|
@@ -296,32 +415,13 @@ class SegmentedCalculation(Calculation):
|
|
|
296
415
|
timesteps_per_segment: int,
|
|
297
416
|
overlap_timesteps: int,
|
|
298
417
|
nr_of_previous_values: int = 1,
|
|
299
|
-
folder:
|
|
418
|
+
folder: pathlib.Path | None = None,
|
|
300
419
|
):
|
|
301
|
-
"""
|
|
302
|
-
Dividing and Modeling the problem in (overlapping) segments.
|
|
303
|
-
The final values of each Segment are recognized by the following segment, effectively coupling
|
|
304
|
-
charge_states and flow_rates between segments.
|
|
305
|
-
Because of this intersection, both modeling and solving is done in one step
|
|
306
|
-
|
|
307
|
-
Take care:
|
|
308
|
-
Parameters like InvestParameters, sum_of_flow_hours and other restrictions over the total time_series
|
|
309
|
-
don't really work in this Calculation. Lower bounds to such SUMS can lead to weird results.
|
|
310
|
-
This is NOT yet explicitly checked for...
|
|
311
|
-
|
|
312
|
-
Args:
|
|
313
|
-
name: name of calculation
|
|
314
|
-
flow_system: flow_system which should be calculated
|
|
315
|
-
timesteps_per_segment: The number of time_steps per individual segment (without the overlap)
|
|
316
|
-
overlap_timesteps: The number of time_steps that are added to each individual model. Used for better
|
|
317
|
-
results of storages)
|
|
318
|
-
folder: folder where results should be saved. If None, then the current working directory is used.
|
|
319
|
-
"""
|
|
320
420
|
super().__init__(name, flow_system, folder=folder)
|
|
321
421
|
self.timesteps_per_segment = timesteps_per_segment
|
|
322
422
|
self.overlap_timesteps = overlap_timesteps
|
|
323
423
|
self.nr_of_previous_values = nr_of_previous_values
|
|
324
|
-
self.sub_calculations:
|
|
424
|
+
self.sub_calculations: list[FullCalculation] = []
|
|
325
425
|
|
|
326
426
|
self.all_timesteps = self.flow_system.time_series_collection.all_timesteps
|
|
327
427
|
self.all_timesteps_extra = self.flow_system.time_series_collection.all_timesteps_extra
|
|
@@ -346,10 +446,10 @@ class SegmentedCalculation(Calculation):
|
|
|
346
446
|
if isinstance(comp, Storage)
|
|
347
447
|
},
|
|
348
448
|
}
|
|
349
|
-
self._transfered_start_values:
|
|
449
|
+
self._transfered_start_values: list[dict[str, Any]] = []
|
|
350
450
|
|
|
351
451
|
def do_modeling_and_solve(
|
|
352
|
-
self, solver: _Solver, log_file:
|
|
452
|
+
self, solver: _Solver, log_file: pathlib.Path | None = None, log_main_results: bool = False
|
|
353
453
|
):
|
|
354
454
|
logger.info(f'{"":#^80}')
|
|
355
455
|
logger.info(f'{" Segmented Solving ":#^80}')
|
|
@@ -430,7 +530,7 @@ class SegmentedCalculation(Calculation):
|
|
|
430
530
|
if isinstance(comp, Storage):
|
|
431
531
|
comp.initial_charge_state = self._original_start_values[comp.label_full]
|
|
432
532
|
|
|
433
|
-
def _calculate_timesteps_of_segment(self) ->
|
|
533
|
+
def _calculate_timesteps_of_segment(self) -> list[pd.DatetimeIndex]:
|
|
434
534
|
active_timesteps_per_segment = []
|
|
435
535
|
for i, _ in enumerate(self.segment_names):
|
|
436
536
|
start = self.timesteps_per_segment * i
|
|
@@ -443,7 +543,7 @@ class SegmentedCalculation(Calculation):
|
|
|
443
543
|
return self.timesteps_per_segment + self.overlap_timesteps
|
|
444
544
|
|
|
445
545
|
@property
|
|
446
|
-
def start_values_of_segments(self) ->
|
|
546
|
+
def start_values_of_segments(self) -> dict[int, dict[str, Any]]:
|
|
447
547
|
"""Gives an overview of the start values of all Segments"""
|
|
448
548
|
return {
|
|
449
549
|
0: {element.label_full: value for element, value in self._original_start_values.items()},
|