flixopt 2.2.0rc2__py3-none-any.whl → 3.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- flixopt/__init__.py +33 -4
- flixopt/aggregation.py +60 -80
- flixopt/calculation.py +403 -182
- flixopt/commons.py +1 -10
- flixopt/components.py +939 -448
- flixopt/config.py +553 -191
- flixopt/core.py +513 -846
- flixopt/effects.py +644 -178
- flixopt/elements.py +610 -355
- flixopt/features.py +394 -966
- flixopt/flow_system.py +736 -219
- flixopt/interface.py +1104 -302
- flixopt/io.py +103 -79
- flixopt/linear_converters.py +387 -95
- flixopt/modeling.py +757 -0
- flixopt/network_app.py +73 -39
- flixopt/plotting.py +294 -138
- flixopt/results.py +1254 -300
- flixopt/solvers.py +25 -21
- flixopt/structure.py +938 -396
- flixopt/utils.py +36 -12
- flixopt-3.0.1.dist-info/METADATA +209 -0
- flixopt-3.0.1.dist-info/RECORD +26 -0
- flixopt-3.0.1.dist-info/top_level.txt +1 -0
- docs/examples/00-Minimal Example.md +0 -5
- docs/examples/01-Basic Example.md +0 -5
- docs/examples/02-Complex Example.md +0 -10
- docs/examples/03-Calculation Modes.md +0 -5
- docs/examples/index.md +0 -5
- docs/faq/contribute.md +0 -61
- docs/faq/index.md +0 -3
- docs/images/architecture_flixOpt-pre2.0.0.png +0 -0
- docs/images/architecture_flixOpt.png +0 -0
- docs/images/flixopt-icon.svg +0 -1
- docs/javascripts/mathjax.js +0 -18
- docs/user-guide/Mathematical Notation/Bus.md +0 -33
- docs/user-guide/Mathematical Notation/Effects, Penalty & Objective.md +0 -132
- docs/user-guide/Mathematical Notation/Flow.md +0 -26
- docs/user-guide/Mathematical Notation/LinearConverter.md +0 -21
- docs/user-guide/Mathematical Notation/Piecewise.md +0 -49
- docs/user-guide/Mathematical Notation/Storage.md +0 -44
- docs/user-guide/Mathematical Notation/index.md +0 -22
- docs/user-guide/Mathematical Notation/others.md +0 -3
- docs/user-guide/index.md +0 -124
- flixopt/config.yaml +0 -10
- flixopt-2.2.0rc2.dist-info/METADATA +0 -167
- flixopt-2.2.0rc2.dist-info/RECORD +0 -54
- flixopt-2.2.0rc2.dist-info/top_level.txt +0 -5
- pics/architecture_flixOpt-pre2.0.0.png +0 -0
- pics/architecture_flixOpt.png +0 -0
- pics/flixOpt_plotting.jpg +0 -0
- pics/flixopt-icon.svg +0 -1
- pics/pics.pptx +0 -0
- scripts/extract_release_notes.py +0 -45
- scripts/gen_ref_pages.py +0 -54
- tests/ressources/Zeitreihen2020.csv +0 -35137
- {flixopt-2.2.0rc2.dist-info → flixopt-3.0.1.dist-info}/WHEEL +0 -0
- {flixopt-2.2.0rc2.dist-info → flixopt-3.0.1.dist-info}/licenses/LICENSE +0 -0
flixopt/flow_system.py
CHANGED
|
@@ -2,121 +2,440 @@
|
|
|
2
2
|
This module contains the FlowSystem class, which is used to collect instances of many other classes by the end User.
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
5
7
|
import json
|
|
6
8
|
import logging
|
|
7
|
-
import pathlib
|
|
8
9
|
import warnings
|
|
9
|
-
from
|
|
10
|
-
from typing import TYPE_CHECKING, Dict, List, Literal, Optional, Tuple, Union
|
|
10
|
+
from typing import TYPE_CHECKING, Any, Literal, Optional
|
|
11
11
|
|
|
12
12
|
import numpy as np
|
|
13
13
|
import pandas as pd
|
|
14
14
|
import xarray as xr
|
|
15
|
-
from rich.console import Console
|
|
16
|
-
from rich.pretty import Pretty
|
|
17
15
|
|
|
18
|
-
from . import
|
|
19
|
-
|
|
20
|
-
|
|
16
|
+
from .core import (
|
|
17
|
+
ConversionError,
|
|
18
|
+
DataConverter,
|
|
19
|
+
FlowSystemDimensions,
|
|
20
|
+
PeriodicData,
|
|
21
|
+
PeriodicDataUser,
|
|
22
|
+
TemporalData,
|
|
23
|
+
TemporalDataUser,
|
|
24
|
+
TimeSeriesData,
|
|
25
|
+
)
|
|
26
|
+
from .effects import (
|
|
27
|
+
Effect,
|
|
28
|
+
EffectCollection,
|
|
29
|
+
PeriodicEffects,
|
|
30
|
+
PeriodicEffectsUser,
|
|
31
|
+
TemporalEffects,
|
|
32
|
+
TemporalEffectsUser,
|
|
33
|
+
)
|
|
21
34
|
from .elements import Bus, Component, Flow
|
|
22
|
-
from .structure import
|
|
35
|
+
from .structure import Element, FlowSystemModel, Interface
|
|
23
36
|
|
|
24
37
|
if TYPE_CHECKING:
|
|
38
|
+
import pathlib
|
|
39
|
+
from collections.abc import Collection
|
|
40
|
+
|
|
25
41
|
import pyvis
|
|
26
42
|
|
|
27
43
|
logger = logging.getLogger('flixopt')
|
|
28
44
|
|
|
29
45
|
|
|
30
|
-
class FlowSystem:
|
|
46
|
+
class FlowSystem(Interface):
|
|
31
47
|
"""
|
|
32
|
-
A FlowSystem organizes the high level Elements (Components & Effects).
|
|
48
|
+
A FlowSystem organizes the high level Elements (Components, Buses & Effects).
|
|
49
|
+
|
|
50
|
+
This is the main container class that users work with to build and manage their System.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
timesteps: The timesteps of the model.
|
|
54
|
+
periods: The periods of the model.
|
|
55
|
+
scenarios: The scenarios of the model.
|
|
56
|
+
hours_of_last_timestep: The duration of the last time step. Uses the last time interval if not specified
|
|
57
|
+
hours_of_previous_timesteps: The duration of previous timesteps.
|
|
58
|
+
If None, the first time increment of time_series is used.
|
|
59
|
+
This is needed to calculate previous durations (for example consecutive_on_hours).
|
|
60
|
+
If you use an array, take care that its long enough to cover all previous values!
|
|
61
|
+
weights: The weights of each period and scenario. If None, all scenarios have the same weight (normalized to 1).
|
|
62
|
+
Its recommended to normalize the weights to sum up to 1.
|
|
63
|
+
scenario_independent_sizes: Controls whether investment sizes are equalized across scenarios.
|
|
64
|
+
- True: All sizes are shared/equalized across scenarios
|
|
65
|
+
- False: All sizes are optimized separately per scenario
|
|
66
|
+
- list[str]: Only specified components (by label_full) are equalized across scenarios
|
|
67
|
+
scenario_independent_flow_rates: Controls whether flow rates are equalized across scenarios.
|
|
68
|
+
- True: All flow rates are shared/equalized across scenarios
|
|
69
|
+
- False: All flow rates are optimized separately per scenario
|
|
70
|
+
- list[str]: Only specified flows (by label_full) are equalized across scenarios
|
|
71
|
+
|
|
72
|
+
Notes:
|
|
73
|
+
- Creates an empty registry for components and buses, an empty EffectCollection, and a placeholder for a SystemModel.
|
|
74
|
+
- The instance starts disconnected (self._connected_and_transformed == False) and will be
|
|
75
|
+
connected_and_transformed automatically when trying to solve a calculation.
|
|
33
76
|
"""
|
|
34
77
|
|
|
35
78
|
def __init__(
|
|
36
79
|
self,
|
|
37
80
|
timesteps: pd.DatetimeIndex,
|
|
38
|
-
|
|
39
|
-
|
|
81
|
+
periods: pd.Index | None = None,
|
|
82
|
+
scenarios: pd.Index | None = None,
|
|
83
|
+
hours_of_last_timestep: float | None = None,
|
|
84
|
+
hours_of_previous_timesteps: int | float | np.ndarray | None = None,
|
|
85
|
+
weights: PeriodicDataUser | None = None,
|
|
86
|
+
scenario_independent_sizes: bool | list[str] = True,
|
|
87
|
+
scenario_independent_flow_rates: bool | list[str] = False,
|
|
40
88
|
):
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
hours_of_previous_timesteps: The duration of previous timesteps.
|
|
46
|
-
If None, the first time increment of time_series is used.
|
|
47
|
-
This is needed to calculate previous durations (for example consecutive_on_hours).
|
|
48
|
-
If you use an array, take care that its long enough to cover all previous values!
|
|
49
|
-
"""
|
|
50
|
-
self.time_series_collection = TimeSeriesCollection(
|
|
51
|
-
timesteps=timesteps,
|
|
52
|
-
hours_of_last_timestep=hours_of_last_timestep,
|
|
53
|
-
hours_of_previous_timesteps=hours_of_previous_timesteps,
|
|
89
|
+
self.timesteps = self._validate_timesteps(timesteps)
|
|
90
|
+
self.timesteps_extra = self._create_timesteps_with_extra(self.timesteps, hours_of_last_timestep)
|
|
91
|
+
self.hours_of_previous_timesteps = self._calculate_hours_of_previous_timesteps(
|
|
92
|
+
self.timesteps, hours_of_previous_timesteps
|
|
54
93
|
)
|
|
55
94
|
|
|
56
|
-
|
|
57
|
-
self.
|
|
58
|
-
|
|
95
|
+
self.periods = None if periods is None else self._validate_periods(periods)
|
|
96
|
+
self.scenarios = None if scenarios is None else self._validate_scenarios(scenarios)
|
|
97
|
+
|
|
98
|
+
self.weights = weights
|
|
99
|
+
|
|
100
|
+
hours_per_timestep = self.calculate_hours_per_timestep(self.timesteps_extra)
|
|
101
|
+
|
|
102
|
+
self.hours_of_last_timestep = hours_per_timestep[-1].item()
|
|
103
|
+
|
|
104
|
+
self.hours_per_timestep = self.fit_to_model_coords('hours_per_timestep', hours_per_timestep)
|
|
105
|
+
|
|
106
|
+
# Element collections
|
|
107
|
+
self.components: dict[str, Component] = {}
|
|
108
|
+
self.buses: dict[str, Bus] = {}
|
|
59
109
|
self.effects: EffectCollection = EffectCollection()
|
|
60
|
-
self.model:
|
|
110
|
+
self.model: FlowSystemModel | None = None
|
|
61
111
|
|
|
62
|
-
self.
|
|
112
|
+
self._connected_and_transformed = False
|
|
113
|
+
self._used_in_calculation = False
|
|
63
114
|
|
|
64
115
|
self._network_app = None
|
|
65
116
|
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
117
|
+
# Use properties to validate and store scenario dimension settings
|
|
118
|
+
self.scenario_independent_sizes = scenario_independent_sizes
|
|
119
|
+
self.scenario_independent_flow_rates = scenario_independent_flow_rates
|
|
120
|
+
|
|
121
|
+
@staticmethod
|
|
122
|
+
def _validate_timesteps(timesteps: pd.DatetimeIndex) -> pd.DatetimeIndex:
|
|
123
|
+
"""Validate timesteps format and rename if needed."""
|
|
124
|
+
if not isinstance(timesteps, pd.DatetimeIndex):
|
|
125
|
+
raise TypeError('timesteps must be a pandas DatetimeIndex')
|
|
126
|
+
if len(timesteps) < 2:
|
|
127
|
+
raise ValueError('timesteps must contain at least 2 timestamps')
|
|
128
|
+
if timesteps.name != 'time':
|
|
129
|
+
timesteps.name = 'time'
|
|
130
|
+
if not timesteps.is_monotonic_increasing:
|
|
131
|
+
raise ValueError('timesteps must be sorted')
|
|
132
|
+
return timesteps
|
|
133
|
+
|
|
134
|
+
@staticmethod
|
|
135
|
+
def _validate_scenarios(scenarios: pd.Index) -> pd.Index:
|
|
136
|
+
"""
|
|
137
|
+
Validate and prepare scenario index.
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
scenarios: The scenario index to validate
|
|
141
|
+
"""
|
|
142
|
+
if not isinstance(scenarios, pd.Index) or len(scenarios) == 0:
|
|
143
|
+
raise ConversionError('Scenarios must be a non-empty Index')
|
|
144
|
+
|
|
145
|
+
if scenarios.name != 'scenario':
|
|
146
|
+
scenarios = scenarios.rename('scenario')
|
|
147
|
+
|
|
148
|
+
return scenarios
|
|
149
|
+
|
|
150
|
+
@staticmethod
|
|
151
|
+
def _validate_periods(periods: pd.Index) -> pd.Index:
|
|
152
|
+
"""
|
|
153
|
+
Validate and prepare period index.
|
|
154
|
+
|
|
155
|
+
Args:
|
|
156
|
+
periods: The period index to validate
|
|
157
|
+
"""
|
|
158
|
+
if not isinstance(periods, pd.Index) or len(periods) == 0:
|
|
159
|
+
raise ConversionError(f'Periods must be a non-empty Index. Got {periods}')
|
|
160
|
+
|
|
161
|
+
if not (
|
|
162
|
+
periods.dtype.kind == 'i' # integer dtype
|
|
163
|
+
and periods.is_monotonic_increasing # rising
|
|
164
|
+
and periods.is_unique
|
|
165
|
+
):
|
|
166
|
+
raise ConversionError(f'Periods must be a monotonically increasing and unique Index. Got {periods}')
|
|
167
|
+
|
|
168
|
+
if periods.name != 'period':
|
|
169
|
+
periods = periods.rename('period')
|
|
170
|
+
|
|
171
|
+
return periods
|
|
172
|
+
|
|
173
|
+
@staticmethod
|
|
174
|
+
def _create_timesteps_with_extra(
|
|
175
|
+
timesteps: pd.DatetimeIndex, hours_of_last_timestep: float | None
|
|
176
|
+
) -> pd.DatetimeIndex:
|
|
177
|
+
"""Create timesteps with an extra step at the end."""
|
|
178
|
+
if hours_of_last_timestep is None:
|
|
179
|
+
hours_of_last_timestep = (timesteps[-1] - timesteps[-2]) / pd.Timedelta(hours=1)
|
|
180
|
+
|
|
181
|
+
last_date = pd.DatetimeIndex([timesteps[-1] + pd.Timedelta(hours=hours_of_last_timestep)], name='time')
|
|
182
|
+
return pd.DatetimeIndex(timesteps.append(last_date), name='time')
|
|
183
|
+
|
|
184
|
+
@staticmethod
|
|
185
|
+
def calculate_hours_per_timestep(timesteps_extra: pd.DatetimeIndex) -> xr.DataArray:
|
|
186
|
+
"""Calculate duration of each timestep as a 1D DataArray."""
|
|
187
|
+
hours_per_step = np.diff(timesteps_extra) / pd.Timedelta(hours=1)
|
|
188
|
+
return xr.DataArray(
|
|
189
|
+
hours_per_step, coords={'time': timesteps_extra[:-1]}, dims='time', name='hours_per_timestep'
|
|
75
190
|
)
|
|
76
191
|
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
192
|
+
@staticmethod
|
|
193
|
+
def _calculate_hours_of_previous_timesteps(
|
|
194
|
+
timesteps: pd.DatetimeIndex, hours_of_previous_timesteps: float | np.ndarray | None
|
|
195
|
+
) -> float | np.ndarray:
|
|
196
|
+
"""Calculate duration of regular timesteps."""
|
|
197
|
+
if hours_of_previous_timesteps is not None:
|
|
198
|
+
return hours_of_previous_timesteps
|
|
199
|
+
# Calculate from the first interval
|
|
200
|
+
first_interval = timesteps[1] - timesteps[0]
|
|
201
|
+
return first_interval.total_seconds() / 3600 # Convert to hours
|
|
202
|
+
|
|
203
|
+
def _create_reference_structure(self) -> tuple[dict, dict[str, xr.DataArray]]:
|
|
204
|
+
"""
|
|
205
|
+
Override Interface method to handle FlowSystem-specific serialization.
|
|
206
|
+
Combines custom FlowSystem logic with Interface pattern for nested objects.
|
|
207
|
+
|
|
208
|
+
Returns:
|
|
209
|
+
Tuple of (reference_structure, extracted_arrays_dict)
|
|
210
|
+
"""
|
|
211
|
+
# Start with Interface base functionality for constructor parameters
|
|
212
|
+
reference_structure, all_extracted_arrays = super()._create_reference_structure()
|
|
213
|
+
|
|
214
|
+
# Remove timesteps, as it's directly stored in dataset index
|
|
215
|
+
reference_structure.pop('timesteps', None)
|
|
216
|
+
|
|
217
|
+
# Extract from components
|
|
218
|
+
components_structure = {}
|
|
219
|
+
for comp_label, component in self.components.items():
|
|
220
|
+
comp_structure, comp_arrays = component._create_reference_structure()
|
|
221
|
+
all_extracted_arrays.update(comp_arrays)
|
|
222
|
+
components_structure[comp_label] = comp_structure
|
|
223
|
+
reference_structure['components'] = components_structure
|
|
224
|
+
|
|
225
|
+
# Extract from buses
|
|
226
|
+
buses_structure = {}
|
|
227
|
+
for bus_label, bus in self.buses.items():
|
|
228
|
+
bus_structure, bus_arrays = bus._create_reference_structure()
|
|
229
|
+
all_extracted_arrays.update(bus_arrays)
|
|
230
|
+
buses_structure[bus_label] = bus_structure
|
|
231
|
+
reference_structure['buses'] = buses_structure
|
|
232
|
+
|
|
233
|
+
# Extract from effects
|
|
234
|
+
effects_structure = {}
|
|
235
|
+
for effect in self.effects:
|
|
236
|
+
effect_structure, effect_arrays = effect._create_reference_structure()
|
|
237
|
+
all_extracted_arrays.update(effect_arrays)
|
|
238
|
+
effects_structure[effect.label] = effect_structure
|
|
239
|
+
reference_structure['effects'] = effects_structure
|
|
240
|
+
|
|
241
|
+
return reference_structure, all_extracted_arrays
|
|
242
|
+
|
|
243
|
+
def to_dataset(self) -> xr.Dataset:
|
|
244
|
+
"""
|
|
245
|
+
Convert the FlowSystem to an xarray Dataset.
|
|
246
|
+
Ensures FlowSystem is connected before serialization.
|
|
247
|
+
|
|
248
|
+
Returns:
|
|
249
|
+
xr.Dataset: Dataset containing all DataArrays with structure in attributes
|
|
250
|
+
"""
|
|
251
|
+
if not self.connected_and_transformed:
|
|
252
|
+
logger.warning('FlowSystem is not connected_and_transformed. Connecting and transforming data now.')
|
|
253
|
+
self.connect_and_transform()
|
|
254
|
+
|
|
255
|
+
return super().to_dataset()
|
|
256
|
+
|
|
257
|
+
@classmethod
|
|
258
|
+
def from_dataset(cls, ds: xr.Dataset) -> FlowSystem:
|
|
259
|
+
"""
|
|
260
|
+
Create a FlowSystem from an xarray Dataset.
|
|
261
|
+
Handles FlowSystem-specific reconstruction logic.
|
|
262
|
+
|
|
263
|
+
Args:
|
|
264
|
+
ds: Dataset containing the FlowSystem data
|
|
265
|
+
|
|
266
|
+
Returns:
|
|
267
|
+
FlowSystem instance
|
|
268
|
+
"""
|
|
269
|
+
# Get the reference structure from attrs
|
|
270
|
+
reference_structure = dict(ds.attrs)
|
|
271
|
+
|
|
272
|
+
# Create arrays dictionary from dataset variables
|
|
273
|
+
arrays_dict = {name: array for name, array in ds.data_vars.items()}
|
|
274
|
+
|
|
275
|
+
# Create FlowSystem instance with constructor parameters
|
|
276
|
+
flow_system = cls(
|
|
277
|
+
timesteps=ds.indexes['time'],
|
|
278
|
+
periods=ds.indexes.get('period'),
|
|
279
|
+
scenarios=ds.indexes.get('scenario'),
|
|
280
|
+
weights=cls._resolve_dataarray_reference(reference_structure['weights'], arrays_dict)
|
|
281
|
+
if 'weights' in reference_structure
|
|
282
|
+
else None,
|
|
283
|
+
hours_of_last_timestep=reference_structure.get('hours_of_last_timestep'),
|
|
284
|
+
hours_of_previous_timesteps=reference_structure.get('hours_of_previous_timesteps'),
|
|
285
|
+
scenario_independent_sizes=reference_structure.get('scenario_independent_sizes', True),
|
|
286
|
+
scenario_independent_flow_rates=reference_structure.get('scenario_independent_flow_rates', False),
|
|
82
287
|
)
|
|
288
|
+
|
|
289
|
+
# Restore components
|
|
290
|
+
components_structure = reference_structure.get('components', {})
|
|
291
|
+
for comp_label, comp_data in components_structure.items():
|
|
292
|
+
component = cls._resolve_reference_structure(comp_data, arrays_dict)
|
|
293
|
+
if not isinstance(component, Component):
|
|
294
|
+
logger.critical(f'Restoring component {comp_label} failed.')
|
|
295
|
+
flow_system._add_components(component)
|
|
296
|
+
|
|
297
|
+
# Restore buses
|
|
298
|
+
buses_structure = reference_structure.get('buses', {})
|
|
299
|
+
for bus_label, bus_data in buses_structure.items():
|
|
300
|
+
bus = cls._resolve_reference_structure(bus_data, arrays_dict)
|
|
301
|
+
if not isinstance(bus, Bus):
|
|
302
|
+
logger.critical(f'Restoring bus {bus_label} failed.')
|
|
303
|
+
flow_system._add_buses(bus)
|
|
304
|
+
|
|
305
|
+
# Restore effects
|
|
306
|
+
effects_structure = reference_structure.get('effects', {})
|
|
307
|
+
for effect_label, effect_data in effects_structure.items():
|
|
308
|
+
effect = cls._resolve_reference_structure(effect_data, arrays_dict)
|
|
309
|
+
if not isinstance(effect, Effect):
|
|
310
|
+
logger.critical(f'Restoring effect {effect_label} failed.')
|
|
311
|
+
flow_system._add_effects(effect)
|
|
312
|
+
|
|
83
313
|
return flow_system
|
|
84
314
|
|
|
85
|
-
|
|
86
|
-
def from_dict(cls, data: Dict) -> 'FlowSystem':
|
|
315
|
+
def to_netcdf(self, path: str | pathlib.Path, compression: int = 0):
|
|
87
316
|
"""
|
|
88
|
-
|
|
317
|
+
Save the FlowSystem to a NetCDF file.
|
|
318
|
+
Ensures FlowSystem is connected before saving.
|
|
89
319
|
|
|
90
320
|
Args:
|
|
91
|
-
|
|
321
|
+
path: The path to the netCDF file.
|
|
322
|
+
compression: The compression level to use when saving the file.
|
|
92
323
|
"""
|
|
93
|
-
|
|
94
|
-
|
|
324
|
+
if not self.connected_and_transformed:
|
|
325
|
+
logger.warning('FlowSystem is not connected. Calling connect_and_transform() now.')
|
|
326
|
+
self.connect_and_transform()
|
|
95
327
|
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
hours_of_last_timestep=hours_of_last_timestep,
|
|
99
|
-
hours_of_previous_timesteps=data['hours_of_previous_timesteps'],
|
|
100
|
-
)
|
|
328
|
+
super().to_netcdf(path, compression)
|
|
329
|
+
logger.info(f'Saved FlowSystem to {path}')
|
|
101
330
|
|
|
102
|
-
|
|
331
|
+
def get_structure(self, clean: bool = False, stats: bool = False) -> dict:
|
|
332
|
+
"""
|
|
333
|
+
Get FlowSystem structure.
|
|
334
|
+
Ensures FlowSystem is connected before getting structure.
|
|
103
335
|
|
|
104
|
-
|
|
336
|
+
Args:
|
|
337
|
+
clean: If True, remove None and empty dicts and lists.
|
|
338
|
+
stats: If True, replace DataArray references with statistics
|
|
339
|
+
"""
|
|
340
|
+
if not self.connected_and_transformed:
|
|
341
|
+
logger.warning('FlowSystem is not connected. Calling connect_and_transform() now.')
|
|
342
|
+
self.connect_and_transform()
|
|
105
343
|
|
|
106
|
-
|
|
107
|
-
*[CLASS_REGISTRY[comp['__class__']].from_dict(comp) for comp in data['components'].values()]
|
|
108
|
-
)
|
|
344
|
+
return super().get_structure(clean, stats)
|
|
109
345
|
|
|
110
|
-
|
|
346
|
+
def to_json(self, path: str | pathlib.Path):
|
|
347
|
+
"""
|
|
348
|
+
Save the flow system to a JSON file.
|
|
349
|
+
Ensures FlowSystem is connected before saving.
|
|
111
350
|
|
|
112
|
-
|
|
351
|
+
Args:
|
|
352
|
+
path: The path to the JSON file.
|
|
353
|
+
"""
|
|
354
|
+
if not self.connected_and_transformed:
|
|
355
|
+
logger.warning(
|
|
356
|
+
'FlowSystem needs to be connected and transformed before saving to JSON. Calling connect_and_transform() now.'
|
|
357
|
+
)
|
|
358
|
+
self.connect_and_transform()
|
|
113
359
|
|
|
114
|
-
|
|
115
|
-
|
|
360
|
+
super().to_json(path)
|
|
361
|
+
|
|
362
|
+
def fit_to_model_coords(
|
|
363
|
+
self,
|
|
364
|
+
name: str,
|
|
365
|
+
data: TemporalDataUser | PeriodicDataUser | None,
|
|
366
|
+
dims: Collection[FlowSystemDimensions] | None = None,
|
|
367
|
+
) -> TemporalData | PeriodicData | None:
|
|
116
368
|
"""
|
|
117
|
-
|
|
369
|
+
Fit data to model coordinate system (currently time, but extensible).
|
|
370
|
+
|
|
371
|
+
Args:
|
|
372
|
+
name: Name of the data
|
|
373
|
+
data: Data to fit to model coordinates
|
|
374
|
+
dims: Collection of dimension names to use for fitting. If None, all dimensions are used.
|
|
375
|
+
|
|
376
|
+
Returns:
|
|
377
|
+
xr.DataArray aligned to model coordinate system. If data is None, returns None.
|
|
118
378
|
"""
|
|
119
|
-
|
|
379
|
+
if data is None:
|
|
380
|
+
return None
|
|
381
|
+
|
|
382
|
+
coords = self.coords
|
|
383
|
+
|
|
384
|
+
if dims is not None:
|
|
385
|
+
coords = {k: coords[k] for k in dims if k in coords}
|
|
386
|
+
|
|
387
|
+
# Rest of your method stays the same, just pass coords
|
|
388
|
+
if isinstance(data, TimeSeriesData):
|
|
389
|
+
try:
|
|
390
|
+
data.name = name # Set name of previous object!
|
|
391
|
+
return data.fit_to_coords(coords)
|
|
392
|
+
except ConversionError as e:
|
|
393
|
+
raise ConversionError(
|
|
394
|
+
f'Could not convert time series data "{name}" to DataArray:\n{data}\nOriginal Error: {e}'
|
|
395
|
+
) from e
|
|
396
|
+
|
|
397
|
+
try:
|
|
398
|
+
return DataConverter.to_dataarray(data, coords=coords).rename(name)
|
|
399
|
+
except ConversionError as e:
|
|
400
|
+
raise ConversionError(f'Could not convert data "{name}" to DataArray:\n{data}\nOriginal Error: {e}') from e
|
|
401
|
+
|
|
402
|
+
def fit_effects_to_model_coords(
|
|
403
|
+
self,
|
|
404
|
+
label_prefix: str | None,
|
|
405
|
+
effect_values: TemporalEffectsUser | PeriodicEffectsUser | None,
|
|
406
|
+
label_suffix: str | None = None,
|
|
407
|
+
dims: Collection[FlowSystemDimensions] | None = None,
|
|
408
|
+
delimiter: str = '|',
|
|
409
|
+
) -> TemporalEffects | PeriodicEffects | None:
|
|
410
|
+
"""
|
|
411
|
+
Transform EffectValues from the user to Internal Datatypes aligned with model coordinates.
|
|
412
|
+
"""
|
|
413
|
+
if effect_values is None:
|
|
414
|
+
return None
|
|
415
|
+
|
|
416
|
+
effect_values_dict = self.effects.create_effect_values_dict(effect_values)
|
|
417
|
+
|
|
418
|
+
return {
|
|
419
|
+
effect: self.fit_to_model_coords(
|
|
420
|
+
str(delimiter).join(filter(None, [label_prefix, effect, label_suffix])),
|
|
421
|
+
value,
|
|
422
|
+
dims=dims,
|
|
423
|
+
)
|
|
424
|
+
for effect, value in effect_values_dict.items()
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
def connect_and_transform(self):
|
|
428
|
+
"""Transform data for all elements using the new simplified approach."""
|
|
429
|
+
if self.connected_and_transformed:
|
|
430
|
+
logger.debug('FlowSystem already connected and transformed')
|
|
431
|
+
return
|
|
432
|
+
|
|
433
|
+
self.weights = self.fit_to_model_coords('weights', self.weights, dims=['period', 'scenario'])
|
|
434
|
+
|
|
435
|
+
self._connect_network()
|
|
436
|
+
for element in list(self.components.values()) + list(self.effects.effects.values()) + list(self.buses.values()):
|
|
437
|
+
element.transform_data(self)
|
|
438
|
+
self._connected_and_transformed = True
|
|
120
439
|
|
|
121
440
|
def add_elements(self, *elements: Element) -> None:
|
|
122
441
|
"""
|
|
@@ -126,12 +445,12 @@ class FlowSystem:
|
|
|
126
445
|
*elements: childs of Element like Boiler, HeatPump, Bus,...
|
|
127
446
|
modeling Elements
|
|
128
447
|
"""
|
|
129
|
-
if self.
|
|
448
|
+
if self.connected_and_transformed:
|
|
130
449
|
warnings.warn(
|
|
131
450
|
'You are adding elements to an already connected FlowSystem. This is not recommended (But it works).',
|
|
132
451
|
stacklevel=2,
|
|
133
452
|
)
|
|
134
|
-
self.
|
|
453
|
+
self._connected_and_transformed = False
|
|
135
454
|
for new_element in list(elements):
|
|
136
455
|
if isinstance(new_element, Component):
|
|
137
456
|
self._add_components(new_element)
|
|
@@ -144,75 +463,29 @@ class FlowSystem:
|
|
|
144
463
|
f'Tried to add incompatible object to FlowSystem: {type(new_element)=}: {new_element=} '
|
|
145
464
|
)
|
|
146
465
|
|
|
147
|
-
def
|
|
148
|
-
"""
|
|
149
|
-
Saves the flow system to a json file.
|
|
150
|
-
This not meant to be reloaded and recreate the object,
|
|
151
|
-
but rather used to document or compare the flow_system to others.
|
|
152
|
-
|
|
153
|
-
Args:
|
|
154
|
-
path: The path to the json file.
|
|
155
|
-
"""
|
|
156
|
-
with open(path, 'w', encoding='utf-8') as f:
|
|
157
|
-
json.dump(self.as_dict('stats'), f, indent=4, ensure_ascii=False)
|
|
158
|
-
|
|
159
|
-
def as_dict(self, data_mode: Literal['data', 'name', 'stats'] = 'data') -> Dict:
|
|
160
|
-
"""Convert the object to a dictionary representation."""
|
|
161
|
-
data = {
|
|
162
|
-
'components': {
|
|
163
|
-
comp.label: comp.to_dict()
|
|
164
|
-
for comp in sorted(self.components.values(), key=lambda component: component.label.upper())
|
|
165
|
-
},
|
|
166
|
-
'buses': {
|
|
167
|
-
bus.label: bus.to_dict() for bus in sorted(self.buses.values(), key=lambda bus: bus.label.upper())
|
|
168
|
-
},
|
|
169
|
-
'effects': {
|
|
170
|
-
effect.label: effect.to_dict()
|
|
171
|
-
for effect in sorted(self.effects, key=lambda effect: effect.label.upper())
|
|
172
|
-
},
|
|
173
|
-
'timesteps_extra': [date.isoformat() for date in self.time_series_collection.timesteps_extra],
|
|
174
|
-
'hours_of_previous_timesteps': self.time_series_collection.hours_of_previous_timesteps,
|
|
175
|
-
}
|
|
176
|
-
if data_mode == 'data':
|
|
177
|
-
return fx_io.replace_timeseries(data, 'data')
|
|
178
|
-
elif data_mode == 'stats':
|
|
179
|
-
return fx_io.remove_none_and_empty(fx_io.replace_timeseries(data, data_mode))
|
|
180
|
-
return fx_io.replace_timeseries(data, data_mode)
|
|
181
|
-
|
|
182
|
-
def as_dataset(self, constants_in_dataset: bool = False) -> xr.Dataset:
|
|
466
|
+
def create_model(self, normalize_weights: bool = True) -> FlowSystemModel:
|
|
183
467
|
"""
|
|
184
|
-
|
|
468
|
+
Create a linopy model from the FlowSystem.
|
|
185
469
|
|
|
186
470
|
Args:
|
|
187
|
-
|
|
188
|
-
"""
|
|
189
|
-
ds = self.time_series_collection.to_dataset(include_constants=constants_in_dataset)
|
|
190
|
-
ds.attrs = self.as_dict(data_mode='name')
|
|
191
|
-
return ds
|
|
192
|
-
|
|
193
|
-
def to_netcdf(self, path: Union[str, pathlib.Path], compression: int = 0, constants_in_dataset: bool = True):
|
|
471
|
+
normalize_weights: Whether to automatically normalize the weights (periods and scenarios) to sum up to 1 when solving.
|
|
194
472
|
"""
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
ds = self.as_dataset(constants_in_dataset=constants_in_dataset)
|
|
202
|
-
fx_io.save_dataset_to_netcdf(ds, path, compression=compression)
|
|
203
|
-
logger.info(f'Saved FlowSystem to {path}')
|
|
473
|
+
if not self.connected_and_transformed:
|
|
474
|
+
raise RuntimeError(
|
|
475
|
+
'FlowSystem is not connected_and_transformed. Call FlowSystem.connect_and_transform() first.'
|
|
476
|
+
)
|
|
477
|
+
self.model = FlowSystemModel(self, normalize_weights)
|
|
478
|
+
return self.model
|
|
204
479
|
|
|
205
480
|
def plot_network(
|
|
206
481
|
self,
|
|
207
|
-
path:
|
|
208
|
-
controls:
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
Literal['nodes', 'edges', 'layout', 'interaction', 'manipulation', 'physics', 'selection', 'renderer']
|
|
212
|
-
],
|
|
482
|
+
path: bool | str | pathlib.Path = 'flow_system.html',
|
|
483
|
+
controls: bool
|
|
484
|
+
| list[
|
|
485
|
+
Literal['nodes', 'edges', 'layout', 'interaction', 'manipulation', 'physics', 'selection', 'renderer']
|
|
213
486
|
] = True,
|
|
214
487
|
show: bool = False,
|
|
215
|
-
) ->
|
|
488
|
+
) -> pyvis.network.Network | None:
|
|
216
489
|
"""
|
|
217
490
|
Visualizes the network structure of a FlowSystem using PyVis, saving it as an interactive HTML file.
|
|
218
491
|
|
|
@@ -227,7 +500,7 @@ class FlowSystem:
|
|
|
227
500
|
show: Whether to open the visualization in the web browser.
|
|
228
501
|
|
|
229
502
|
Returns:
|
|
230
|
-
-
|
|
503
|
+
- 'pyvis.network.Network' | None: The `Network` instance representing the visualization, or `None` if `pyvis` is not installed.
|
|
231
504
|
|
|
232
505
|
Examples:
|
|
233
506
|
>>> flow_system.plot_network()
|
|
@@ -245,7 +518,7 @@ class FlowSystem:
|
|
|
245
518
|
|
|
246
519
|
def start_network_app(self):
|
|
247
520
|
"""Visualizes the network structure of a FlowSystem using Dash, Cytoscape, and networkx.
|
|
248
|
-
Requires optional dependencies: dash, dash-cytoscape, networkx, werkzeug.
|
|
521
|
+
Requires optional dependencies: dash, dash-cytoscape, dash-daq, networkx, flask, werkzeug.
|
|
249
522
|
"""
|
|
250
523
|
from .network_app import DASH_CYTOSCAPE_AVAILABLE, VISUALIZATION_ERROR, flow_graph, shownetwork
|
|
251
524
|
|
|
@@ -258,11 +531,12 @@ class FlowSystem:
|
|
|
258
531
|
if not DASH_CYTOSCAPE_AVAILABLE:
|
|
259
532
|
raise ImportError(
|
|
260
533
|
f'Network visualization requires optional dependencies. '
|
|
261
|
-
f'Install with: pip install flixopt[
|
|
534
|
+
f'Install with: `pip install flixopt[network_viz]`, `pip install flixopt[full]` '
|
|
535
|
+
f'or: `pip install dash dash-cytoscape dash-daq networkx werkzeug`. '
|
|
262
536
|
f'Original error: {VISUALIZATION_ERROR}'
|
|
263
537
|
)
|
|
264
538
|
|
|
265
|
-
if not self.
|
|
539
|
+
if not self._connected_and_transformed:
|
|
266
540
|
self._connect_network()
|
|
267
541
|
|
|
268
542
|
if self._network_app is not None:
|
|
@@ -278,12 +552,13 @@ class FlowSystem:
|
|
|
278
552
|
if not DASH_CYTOSCAPE_AVAILABLE:
|
|
279
553
|
raise ImportError(
|
|
280
554
|
f'Network visualization requires optional dependencies. '
|
|
281
|
-
f'Install with: pip install flixopt[
|
|
555
|
+
f'Install with: `pip install flixopt[network_viz]`, `pip install flixopt[full]` '
|
|
556
|
+
f'or: `pip install dash dash-cytoscape dash-daq networkx werkzeug`. '
|
|
282
557
|
f'Original error: {VISUALIZATION_ERROR}'
|
|
283
558
|
)
|
|
284
559
|
|
|
285
560
|
if self._network_app is None:
|
|
286
|
-
logger.warning(
|
|
561
|
+
logger.warning("No network app is currently running. Can't stop it")
|
|
287
562
|
return
|
|
288
563
|
|
|
289
564
|
try:
|
|
@@ -295,9 +570,9 @@ class FlowSystem:
|
|
|
295
570
|
finally:
|
|
296
571
|
self._network_app = None
|
|
297
572
|
|
|
298
|
-
def network_infos(self) ->
|
|
299
|
-
if not self.
|
|
300
|
-
self.
|
|
573
|
+
def network_infos(self) -> tuple[dict[str, dict[str, str]], dict[str, dict[str, str]]]:
|
|
574
|
+
if not self.connected_and_transformed:
|
|
575
|
+
self.connect_and_transform()
|
|
301
576
|
nodes = {
|
|
302
577
|
node.label_full: {
|
|
303
578
|
'label': node.label,
|
|
@@ -319,67 +594,6 @@ class FlowSystem:
|
|
|
319
594
|
|
|
320
595
|
return nodes, edges
|
|
321
596
|
|
|
322
|
-
def transform_data(self):
|
|
323
|
-
if not self._connected:
|
|
324
|
-
self._connect_network()
|
|
325
|
-
for element in self.all_elements.values():
|
|
326
|
-
element.transform_data(self)
|
|
327
|
-
|
|
328
|
-
def create_time_series(
|
|
329
|
-
self,
|
|
330
|
-
name: str,
|
|
331
|
-
data: Optional[Union[NumericData, TimeSeriesData, TimeSeries]],
|
|
332
|
-
needs_extra_timestep: bool = False,
|
|
333
|
-
) -> Optional[TimeSeries]:
|
|
334
|
-
"""
|
|
335
|
-
Tries to create a TimeSeries from NumericData Data and adds it to the time_series_collection
|
|
336
|
-
If the data already is a TimeSeries, nothing happens and the TimeSeries gets reset and returned
|
|
337
|
-
If the data is a TimeSeriesData, it is converted to a TimeSeries, and the aggregation weights are applied.
|
|
338
|
-
If the data is None, nothing happens.
|
|
339
|
-
"""
|
|
340
|
-
|
|
341
|
-
if data is None:
|
|
342
|
-
return None
|
|
343
|
-
elif isinstance(data, TimeSeries):
|
|
344
|
-
data.restore_data()
|
|
345
|
-
if data in self.time_series_collection:
|
|
346
|
-
return data
|
|
347
|
-
return self.time_series_collection.create_time_series(
|
|
348
|
-
data=data.active_data, name=name, needs_extra_timestep=needs_extra_timestep
|
|
349
|
-
)
|
|
350
|
-
return self.time_series_collection.create_time_series(
|
|
351
|
-
data=data, name=name, needs_extra_timestep=needs_extra_timestep
|
|
352
|
-
)
|
|
353
|
-
|
|
354
|
-
def create_effect_time_series(
|
|
355
|
-
self,
|
|
356
|
-
label_prefix: Optional[str],
|
|
357
|
-
effect_values: EffectValuesUser,
|
|
358
|
-
label_suffix: Optional[str] = None,
|
|
359
|
-
) -> Optional[EffectTimeSeries]:
|
|
360
|
-
"""
|
|
361
|
-
Transform EffectValues to EffectTimeSeries.
|
|
362
|
-
Creates a TimeSeries for each key in the nested_values dictionary, using the value as the data.
|
|
363
|
-
|
|
364
|
-
The resulting label of the TimeSeries is the label of the parent_element,
|
|
365
|
-
followed by the label of the Effect in the nested_values and the label_suffix.
|
|
366
|
-
If the key in the EffectValues is None, the alias 'Standard_Effect' is used
|
|
367
|
-
"""
|
|
368
|
-
effect_values: Optional[EffectValuesDict] = self.effects.create_effect_values_dict(effect_values)
|
|
369
|
-
if effect_values is None:
|
|
370
|
-
return None
|
|
371
|
-
|
|
372
|
-
return {
|
|
373
|
-
effect: self.create_time_series('|'.join(filter(None, [label_prefix, effect, label_suffix])), value)
|
|
374
|
-
for effect, value in effect_values.items()
|
|
375
|
-
}
|
|
376
|
-
|
|
377
|
-
def create_model(self) -> SystemModel:
|
|
378
|
-
if not self._connected:
|
|
379
|
-
raise RuntimeError('FlowSystem is not connected. Call FlowSystem.connect() first.')
|
|
380
|
-
self.model = SystemModel(self)
|
|
381
|
-
return self.model
|
|
382
|
-
|
|
383
597
|
def _check_if_element_is_unique(self, element: Element) -> None:
|
|
384
598
|
"""
|
|
385
599
|
checks if element or label of element already exists in list
|
|
@@ -388,25 +602,25 @@ class FlowSystem:
|
|
|
388
602
|
element: new element to check
|
|
389
603
|
"""
|
|
390
604
|
if element in self.all_elements.values():
|
|
391
|
-
raise ValueError(f'Element {element.
|
|
605
|
+
raise ValueError(f'Element {element.label_full} already added to FlowSystem!')
|
|
392
606
|
# check if name is already used:
|
|
393
607
|
if element.label_full in self.all_elements:
|
|
394
|
-
raise ValueError(f'Label of Element {element.
|
|
608
|
+
raise ValueError(f'Label of Element {element.label_full} already used in another element!')
|
|
395
609
|
|
|
396
610
|
def _add_effects(self, *args: Effect) -> None:
|
|
397
611
|
self.effects.add_effects(*args)
|
|
398
612
|
|
|
399
613
|
def _add_components(self, *components: Component) -> None:
|
|
400
614
|
for new_component in list(components):
|
|
401
|
-
logger.info(f'Registered new Component: {new_component.
|
|
615
|
+
logger.info(f'Registered new Component: {new_component.label_full}')
|
|
402
616
|
self._check_if_element_is_unique(new_component) # check if already exists:
|
|
403
|
-
self.components[new_component.
|
|
617
|
+
self.components[new_component.label_full] = new_component # Add to existing components
|
|
404
618
|
|
|
405
619
|
def _add_buses(self, *buses: Bus):
|
|
406
620
|
for new_bus in list(buses):
|
|
407
|
-
logger.info(f'Registered new Bus: {new_bus.
|
|
621
|
+
logger.info(f'Registered new Bus: {new_bus.label_full}')
|
|
408
622
|
self._check_if_element_is_unique(new_bus) # check if already exists:
|
|
409
|
-
self.buses[new_bus.
|
|
623
|
+
self.buses[new_bus.label_full] = new_bus # Add to existing components
|
|
410
624
|
|
|
411
625
|
def _connect_network(self):
|
|
412
626
|
"""Connects the network of components and buses. Can be rerun without changes if no elements were added"""
|
|
@@ -417,14 +631,14 @@ class FlowSystem:
|
|
|
417
631
|
|
|
418
632
|
# Add Bus if not already added (deprecated)
|
|
419
633
|
if flow._bus_object is not None and flow._bus_object not in self.buses.values():
|
|
420
|
-
self._add_buses(flow._bus_object)
|
|
421
634
|
warnings.warn(
|
|
422
|
-
f'The Bus {flow._bus_object.
|
|
635
|
+
f'The Bus {flow._bus_object.label_full} was added to the FlowSystem from {flow.label_full}.'
|
|
423
636
|
f'This is deprecated and will be removed in the future. '
|
|
424
637
|
f'Please pass the Bus.label to the Flow and the Bus to the FlowSystem instead.',
|
|
425
|
-
|
|
638
|
+
DeprecationWarning,
|
|
426
639
|
stacklevel=1,
|
|
427
640
|
)
|
|
641
|
+
self._add_buses(flow._bus_object)
|
|
428
642
|
|
|
429
643
|
# Connect Buses
|
|
430
644
|
bus = self.buses.get(flow.bus)
|
|
@@ -441,23 +655,326 @@ class FlowSystem:
|
|
|
441
655
|
f'Connected {len(self.buses)} Buses and {len(self.components)} '
|
|
442
656
|
f'via {len(self.flows)} Flows inside the FlowSystem.'
|
|
443
657
|
)
|
|
444
|
-
self._connected = True
|
|
445
658
|
|
|
446
|
-
def __repr__(self):
|
|
447
|
-
|
|
659
|
+
def __repr__(self) -> str:
|
|
660
|
+
"""Compact representation for debugging."""
|
|
661
|
+
status = '✓' if self.connected_and_transformed else '⚠'
|
|
662
|
+
|
|
663
|
+
# Build dimension info
|
|
664
|
+
dims = f'{len(self.timesteps)} timesteps [{self.timesteps[0].strftime("%Y-%m-%d")} to {self.timesteps[-1].strftime("%Y-%m-%d")}]'
|
|
665
|
+
if self.periods is not None:
|
|
666
|
+
dims += f', {len(self.periods)} periods'
|
|
667
|
+
if self.scenarios is not None:
|
|
668
|
+
dims += f', {len(self.scenarios)} scenarios'
|
|
669
|
+
|
|
670
|
+
return f'FlowSystem({dims}, {len(self.components)} Components, {len(self.buses)} Buses, {len(self.effects)} Effects, {status})'
|
|
671
|
+
|
|
672
|
+
def __str__(self) -> str:
|
|
673
|
+
"""Structured summary for users."""
|
|
674
|
+
|
|
675
|
+
def format_elements(element_names: list, label: str, alignment: int = 12):
|
|
676
|
+
name_list = ', '.join(element_names[:3])
|
|
677
|
+
if len(element_names) > 3:
|
|
678
|
+
name_list += f' ... (+{len(element_names) - 3} more)'
|
|
679
|
+
|
|
680
|
+
suffix = f' ({name_list})' if element_names else ''
|
|
681
|
+
padding = alignment - len(label) - 1 # -1 for the colon
|
|
682
|
+
return f'{label}:{"":<{padding}} {len(element_names)}{suffix}'
|
|
683
|
+
|
|
684
|
+
time_period = f'Time period: {self.timesteps[0].date()} to {self.timesteps[-1].date()}'
|
|
685
|
+
freq_str = str(self.timesteps.freq).replace('<', '').replace('>', '') if self.timesteps.freq else 'irregular'
|
|
686
|
+
|
|
687
|
+
lines = [
|
|
688
|
+
f'Timesteps: {len(self.timesteps)} ({freq_str}) [{time_period}]',
|
|
689
|
+
]
|
|
690
|
+
|
|
691
|
+
# Add periods if present
|
|
692
|
+
if self.periods is not None:
|
|
693
|
+
period_names = ', '.join(str(p) for p in self.periods[:3])
|
|
694
|
+
if len(self.periods) > 3:
|
|
695
|
+
period_names += f' ... (+{len(self.periods) - 3} more)'
|
|
696
|
+
lines.append(f'Periods: {len(self.periods)} ({period_names})')
|
|
697
|
+
|
|
698
|
+
# Add scenarios if present
|
|
699
|
+
if self.scenarios is not None:
|
|
700
|
+
scenario_names = ', '.join(str(s) for s in self.scenarios[:3])
|
|
701
|
+
if len(self.scenarios) > 3:
|
|
702
|
+
scenario_names += f' ... (+{len(self.scenarios) - 3} more)'
|
|
703
|
+
lines.append(f'Scenarios: {len(self.scenarios)} ({scenario_names})')
|
|
704
|
+
|
|
705
|
+
lines.extend(
|
|
706
|
+
[
|
|
707
|
+
format_elements(list(self.components.keys()), 'Components'),
|
|
708
|
+
format_elements(list(self.buses.keys()), 'Buses'),
|
|
709
|
+
format_elements(list(self.effects.effects.keys()), 'Effects'),
|
|
710
|
+
f'Status: {"Connected & Transformed" if self.connected_and_transformed else "Not connected"}',
|
|
711
|
+
]
|
|
712
|
+
)
|
|
713
|
+
lines = ['FlowSystem:', f'{"─" * max(len(line) for line in lines)}'] + lines
|
|
714
|
+
|
|
715
|
+
return '\n'.join(lines)
|
|
716
|
+
|
|
717
|
+
def __eq__(self, other: FlowSystem):
|
|
718
|
+
"""Check if two FlowSystems are equal by comparing their dataset representations."""
|
|
719
|
+
if not isinstance(other, FlowSystem):
|
|
720
|
+
raise NotImplementedError('Comparison with other types is not implemented for class FlowSystem')
|
|
721
|
+
|
|
722
|
+
ds_me = self.to_dataset()
|
|
723
|
+
ds_other = other.to_dataset()
|
|
724
|
+
|
|
725
|
+
try:
|
|
726
|
+
xr.testing.assert_equal(ds_me, ds_other)
|
|
727
|
+
except AssertionError:
|
|
728
|
+
return False
|
|
729
|
+
|
|
730
|
+
if ds_me.attrs != ds_other.attrs:
|
|
731
|
+
return False
|
|
732
|
+
|
|
733
|
+
return True
|
|
448
734
|
|
|
449
|
-
def
|
|
450
|
-
with
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
735
|
+
def __getitem__(self, item) -> Element:
|
|
736
|
+
"""Get element by exact label with helpful error messages."""
|
|
737
|
+
if item in self.all_elements:
|
|
738
|
+
return self.all_elements[item]
|
|
739
|
+
|
|
740
|
+
# Provide helpful error with suggestions
|
|
741
|
+
from difflib import get_close_matches
|
|
742
|
+
|
|
743
|
+
suggestions = get_close_matches(item, self.all_elements.keys(), n=3, cutoff=0.6)
|
|
744
|
+
|
|
745
|
+
if suggestions:
|
|
746
|
+
suggestion_str = ', '.join(f"'{s}'" for s in suggestions)
|
|
747
|
+
raise KeyError(f"Element '{item}' not found. Did you mean: {suggestion_str}?")
|
|
748
|
+
else:
|
|
749
|
+
raise KeyError(f"Element '{item}' not found in FlowSystem")
|
|
750
|
+
|
|
751
|
+
def __contains__(self, item: str) -> bool:
|
|
752
|
+
"""Check if element exists in the FlowSystem."""
|
|
753
|
+
return item in self.all_elements
|
|
754
|
+
|
|
755
|
+
def __iter__(self):
|
|
756
|
+
"""Iterate over element labels."""
|
|
757
|
+
return iter(self.all_elements.keys())
|
|
455
758
|
|
|
456
759
|
@property
|
|
457
|
-
def flows(self) ->
|
|
760
|
+
def flows(self) -> dict[str, Flow]:
|
|
458
761
|
set_of_flows = {flow for comp in self.components.values() for flow in comp.inputs + comp.outputs}
|
|
459
762
|
return {flow.label_full: flow for flow in set_of_flows}
|
|
460
763
|
|
|
461
764
|
@property
|
|
462
|
-
def all_elements(self) ->
|
|
765
|
+
def all_elements(self) -> dict[str, Element]:
|
|
463
766
|
return {**self.components, **self.effects.effects, **self.flows, **self.buses}
|
|
767
|
+
|
|
768
|
+
@property
|
|
769
|
+
def coords(self) -> dict[FlowSystemDimensions, pd.Index]:
|
|
770
|
+
active_coords = {'time': self.timesteps}
|
|
771
|
+
if self.periods is not None:
|
|
772
|
+
active_coords['period'] = self.periods
|
|
773
|
+
if self.scenarios is not None:
|
|
774
|
+
active_coords['scenario'] = self.scenarios
|
|
775
|
+
return active_coords
|
|
776
|
+
|
|
777
|
+
@property
|
|
778
|
+
def used_in_calculation(self) -> bool:
|
|
779
|
+
return self._used_in_calculation
|
|
780
|
+
|
|
781
|
+
def _validate_scenario_parameter(self, value: bool | list[str], param_name: str, element_type: str) -> None:
|
|
782
|
+
"""
|
|
783
|
+
Validate scenario parameter value.
|
|
784
|
+
|
|
785
|
+
Args:
|
|
786
|
+
value: The value to validate
|
|
787
|
+
param_name: Name of the parameter (for error messages)
|
|
788
|
+
element_type: Type of elements expected in list (e.g., 'component label_full', 'flow label_full')
|
|
789
|
+
|
|
790
|
+
Raises:
|
|
791
|
+
TypeError: If value is not bool or list[str]
|
|
792
|
+
ValueError: If list contains non-string elements
|
|
793
|
+
"""
|
|
794
|
+
if isinstance(value, bool):
|
|
795
|
+
return # Valid
|
|
796
|
+
elif isinstance(value, list):
|
|
797
|
+
if not all(isinstance(item, str) for item in value):
|
|
798
|
+
raise ValueError(f'{param_name} list must contain only strings ({element_type} values)')
|
|
799
|
+
else:
|
|
800
|
+
raise TypeError(f'{param_name} must be bool or list[str], got {type(value).__name__}')
|
|
801
|
+
|
|
802
|
+
@property
|
|
803
|
+
def scenario_independent_sizes(self) -> bool | list[str]:
|
|
804
|
+
"""
|
|
805
|
+
Controls whether investment sizes are equalized across scenarios.
|
|
806
|
+
|
|
807
|
+
Returns:
|
|
808
|
+
bool or list[str]: Configuration for scenario-independent sizing
|
|
809
|
+
"""
|
|
810
|
+
return self._scenario_independent_sizes
|
|
811
|
+
|
|
812
|
+
@scenario_independent_sizes.setter
|
|
813
|
+
def scenario_independent_sizes(self, value: bool | list[str]) -> None:
|
|
814
|
+
"""
|
|
815
|
+
Set whether investment sizes should be equalized across scenarios.
|
|
816
|
+
|
|
817
|
+
Args:
|
|
818
|
+
value: True (all equalized), False (all vary), or list of component label_full strings to equalize
|
|
819
|
+
|
|
820
|
+
Raises:
|
|
821
|
+
TypeError: If value is not bool or list[str]
|
|
822
|
+
ValueError: If list contains non-string elements
|
|
823
|
+
"""
|
|
824
|
+
self._validate_scenario_parameter(value, 'scenario_independent_sizes', 'Element.label_full')
|
|
825
|
+
self._scenario_independent_sizes = value
|
|
826
|
+
|
|
827
|
+
@property
|
|
828
|
+
def scenario_independent_flow_rates(self) -> bool | list[str]:
|
|
829
|
+
"""
|
|
830
|
+
Controls whether flow rates are equalized across scenarios.
|
|
831
|
+
|
|
832
|
+
Returns:
|
|
833
|
+
bool or list[str]: Configuration for scenario-independent flow rates
|
|
834
|
+
"""
|
|
835
|
+
return self._scenario_independent_flow_rates
|
|
836
|
+
|
|
837
|
+
@scenario_independent_flow_rates.setter
|
|
838
|
+
def scenario_independent_flow_rates(self, value: bool | list[str]) -> None:
|
|
839
|
+
"""
|
|
840
|
+
Set whether flow rates should be equalized across scenarios.
|
|
841
|
+
|
|
842
|
+
Args:
|
|
843
|
+
value: True (all equalized), False (all vary), or list of flow label_full strings to equalize
|
|
844
|
+
|
|
845
|
+
Raises:
|
|
846
|
+
TypeError: If value is not bool or list[str]
|
|
847
|
+
ValueError: If list contains non-string elements
|
|
848
|
+
"""
|
|
849
|
+
self._validate_scenario_parameter(value, 'scenario_independent_flow_rates', 'Flow.label_full')
|
|
850
|
+
self._scenario_independent_flow_rates = value
|
|
851
|
+
|
|
852
|
+
def sel(
|
|
853
|
+
self,
|
|
854
|
+
time: str | slice | list[str] | pd.Timestamp | pd.DatetimeIndex | None = None,
|
|
855
|
+
period: int | slice | list[int] | pd.Index | None = None,
|
|
856
|
+
scenario: str | slice | list[str] | pd.Index | None = None,
|
|
857
|
+
) -> FlowSystem:
|
|
858
|
+
"""
|
|
859
|
+
Select a subset of the flowsystem by the time coordinate.
|
|
860
|
+
|
|
861
|
+
Args:
|
|
862
|
+
time: Time selection (e.g., slice('2023-01-01', '2023-12-31'), '2023-06-15', or list of times)
|
|
863
|
+
period: Period selection (e.g., slice(2023, 2024), or list of periods)
|
|
864
|
+
scenario: Scenario selection (e.g., slice('scenario1', 'scenario2'), or list of scenarios)
|
|
865
|
+
|
|
866
|
+
Returns:
|
|
867
|
+
FlowSystem: New FlowSystem with selected data
|
|
868
|
+
"""
|
|
869
|
+
if not self.connected_and_transformed:
|
|
870
|
+
self.connect_and_transform()
|
|
871
|
+
|
|
872
|
+
ds = self.to_dataset()
|
|
873
|
+
|
|
874
|
+
# Build indexers dict from non-None parameters
|
|
875
|
+
indexers = {}
|
|
876
|
+
if time is not None:
|
|
877
|
+
indexers['time'] = time
|
|
878
|
+
if period is not None:
|
|
879
|
+
indexers['period'] = period
|
|
880
|
+
if scenario is not None:
|
|
881
|
+
indexers['scenario'] = scenario
|
|
882
|
+
|
|
883
|
+
if not indexers:
|
|
884
|
+
return self.copy() # Return a copy when no selection
|
|
885
|
+
|
|
886
|
+
selected_dataset = ds.sel(**indexers)
|
|
887
|
+
return self.__class__.from_dataset(selected_dataset)
|
|
888
|
+
|
|
889
|
+
def isel(
|
|
890
|
+
self,
|
|
891
|
+
time: int | slice | list[int] | None = None,
|
|
892
|
+
period: int | slice | list[int] | None = None,
|
|
893
|
+
scenario: int | slice | list[int] | None = None,
|
|
894
|
+
) -> FlowSystem:
|
|
895
|
+
"""
|
|
896
|
+
Select a subset of the flowsystem by integer indices.
|
|
897
|
+
|
|
898
|
+
Args:
|
|
899
|
+
time: Time selection by integer index (e.g., slice(0, 100), 50, or [0, 5, 10])
|
|
900
|
+
period: Period selection by integer index (e.g., slice(0, 100), 50, or [0, 5, 10])
|
|
901
|
+
scenario: Scenario selection by integer index (e.g., slice(0, 3), 50, or [0, 5, 10])
|
|
902
|
+
|
|
903
|
+
Returns:
|
|
904
|
+
FlowSystem: New FlowSystem with selected data
|
|
905
|
+
"""
|
|
906
|
+
if not self.connected_and_transformed:
|
|
907
|
+
self.connect_and_transform()
|
|
908
|
+
|
|
909
|
+
ds = self.to_dataset()
|
|
910
|
+
|
|
911
|
+
# Build indexers dict from non-None parameters
|
|
912
|
+
indexers = {}
|
|
913
|
+
if time is not None:
|
|
914
|
+
indexers['time'] = time
|
|
915
|
+
if period is not None:
|
|
916
|
+
indexers['period'] = period
|
|
917
|
+
if scenario is not None:
|
|
918
|
+
indexers['scenario'] = scenario
|
|
919
|
+
|
|
920
|
+
if not indexers:
|
|
921
|
+
return self.copy() # Return a copy when no selection
|
|
922
|
+
|
|
923
|
+
selected_dataset = ds.isel(**indexers)
|
|
924
|
+
return self.__class__.from_dataset(selected_dataset)
|
|
925
|
+
|
|
926
|
+
def resample(
|
|
927
|
+
self,
|
|
928
|
+
time: str,
|
|
929
|
+
method: Literal['mean', 'sum', 'max', 'min', 'first', 'last', 'std', 'var', 'median', 'count'] = 'mean',
|
|
930
|
+
**kwargs: Any,
|
|
931
|
+
) -> FlowSystem:
|
|
932
|
+
"""
|
|
933
|
+
Create a resampled FlowSystem by resampling data along the time dimension (like xr.Dataset.resample()).
|
|
934
|
+
Only resamples data variables that have a time dimension.
|
|
935
|
+
|
|
936
|
+
Args:
|
|
937
|
+
time: Resampling frequency (e.g., '3h', '2D', '1M')
|
|
938
|
+
method: Resampling method. Recommended: 'mean', 'first', 'last', 'max', 'min'
|
|
939
|
+
**kwargs: Additional arguments passed to xarray.resample()
|
|
940
|
+
|
|
941
|
+
Returns:
|
|
942
|
+
FlowSystem: New FlowSystem with resampled data
|
|
943
|
+
"""
|
|
944
|
+
if not self.connected_and_transformed:
|
|
945
|
+
self.connect_and_transform()
|
|
946
|
+
|
|
947
|
+
dataset = self.to_dataset()
|
|
948
|
+
|
|
949
|
+
# Separate variables with and without time dimension
|
|
950
|
+
time_vars = {}
|
|
951
|
+
non_time_vars = {}
|
|
952
|
+
|
|
953
|
+
for var_name, var in dataset.data_vars.items():
|
|
954
|
+
if 'time' in var.dims:
|
|
955
|
+
time_vars[var_name] = var
|
|
956
|
+
else:
|
|
957
|
+
non_time_vars[var_name] = var
|
|
958
|
+
|
|
959
|
+
# Only resample variables that have time dimension
|
|
960
|
+
time_dataset = dataset[list(time_vars.keys())]
|
|
961
|
+
resampler = time_dataset.resample(time=time, **kwargs)
|
|
962
|
+
|
|
963
|
+
if hasattr(resampler, method):
|
|
964
|
+
resampled_time_data = getattr(resampler, method)()
|
|
965
|
+
else:
|
|
966
|
+
available_methods = ['mean', 'sum', 'max', 'min', 'first', 'last', 'std', 'var', 'median', 'count']
|
|
967
|
+
raise ValueError(f'Unsupported resampling method: {method}. Available: {available_methods}')
|
|
968
|
+
|
|
969
|
+
# Combine resampled time variables with non-time variables
|
|
970
|
+
if non_time_vars:
|
|
971
|
+
non_time_dataset = dataset[list(non_time_vars.keys())]
|
|
972
|
+
resampled_dataset = xr.merge([resampled_time_data, non_time_dataset])
|
|
973
|
+
else:
|
|
974
|
+
resampled_dataset = resampled_time_data
|
|
975
|
+
|
|
976
|
+
return self.__class__.from_dataset(resampled_dataset)
|
|
977
|
+
|
|
978
|
+
@property
|
|
979
|
+
def connected_and_transformed(self) -> bool:
|
|
980
|
+
return self._connected_and_transformed
|