flixopt 2.2.0rc2__py3-none-any.whl → 3.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flixopt might be problematic. Click here for more details.

Files changed (58) hide show
  1. flixopt/__init__.py +33 -4
  2. flixopt/aggregation.py +60 -80
  3. flixopt/calculation.py +395 -178
  4. flixopt/commons.py +1 -10
  5. flixopt/components.py +939 -448
  6. flixopt/config.py +553 -191
  7. flixopt/core.py +513 -846
  8. flixopt/effects.py +644 -178
  9. flixopt/elements.py +610 -355
  10. flixopt/features.py +394 -966
  11. flixopt/flow_system.py +736 -219
  12. flixopt/interface.py +1104 -302
  13. flixopt/io.py +103 -79
  14. flixopt/linear_converters.py +387 -95
  15. flixopt/modeling.py +759 -0
  16. flixopt/network_app.py +73 -39
  17. flixopt/plotting.py +294 -138
  18. flixopt/results.py +1253 -299
  19. flixopt/solvers.py +25 -21
  20. flixopt/structure.py +938 -396
  21. flixopt/utils.py +38 -12
  22. flixopt-3.0.0.dist-info/METADATA +209 -0
  23. flixopt-3.0.0.dist-info/RECORD +26 -0
  24. flixopt-3.0.0.dist-info/top_level.txt +1 -0
  25. docs/examples/00-Minimal Example.md +0 -5
  26. docs/examples/01-Basic Example.md +0 -5
  27. docs/examples/02-Complex Example.md +0 -10
  28. docs/examples/03-Calculation Modes.md +0 -5
  29. docs/examples/index.md +0 -5
  30. docs/faq/contribute.md +0 -61
  31. docs/faq/index.md +0 -3
  32. docs/images/architecture_flixOpt-pre2.0.0.png +0 -0
  33. docs/images/architecture_flixOpt.png +0 -0
  34. docs/images/flixopt-icon.svg +0 -1
  35. docs/javascripts/mathjax.js +0 -18
  36. docs/user-guide/Mathematical Notation/Bus.md +0 -33
  37. docs/user-guide/Mathematical Notation/Effects, Penalty & Objective.md +0 -132
  38. docs/user-guide/Mathematical Notation/Flow.md +0 -26
  39. docs/user-guide/Mathematical Notation/LinearConverter.md +0 -21
  40. docs/user-guide/Mathematical Notation/Piecewise.md +0 -49
  41. docs/user-guide/Mathematical Notation/Storage.md +0 -44
  42. docs/user-guide/Mathematical Notation/index.md +0 -22
  43. docs/user-guide/Mathematical Notation/others.md +0 -3
  44. docs/user-guide/index.md +0 -124
  45. flixopt/config.yaml +0 -10
  46. flixopt-2.2.0rc2.dist-info/METADATA +0 -167
  47. flixopt-2.2.0rc2.dist-info/RECORD +0 -54
  48. flixopt-2.2.0rc2.dist-info/top_level.txt +0 -5
  49. pics/architecture_flixOpt-pre2.0.0.png +0 -0
  50. pics/architecture_flixOpt.png +0 -0
  51. pics/flixOpt_plotting.jpg +0 -0
  52. pics/flixopt-icon.svg +0 -1
  53. pics/pics.pptx +0 -0
  54. scripts/extract_release_notes.py +0 -45
  55. scripts/gen_ref_pages.py +0 -54
  56. tests/ressources/Zeitreihen2020.csv +0 -35137
  57. {flixopt-2.2.0rc2.dist-info → flixopt-3.0.0.dist-info}/WHEEL +0 -0
  58. {flixopt-2.2.0rc2.dist-info → flixopt-3.0.0.dist-info}/licenses/LICENSE +0 -0
flixopt/results.py CHANGED
@@ -1,11 +1,13 @@
1
+ from __future__ import annotations
2
+
1
3
  import datetime
2
4
  import json
3
5
  import logging
4
6
  import pathlib
5
- from typing import TYPE_CHECKING, Dict, List, Literal, Optional, Tuple, Union
7
+ import warnings
8
+ from typing import TYPE_CHECKING, Any, Literal
6
9
 
7
10
  import linopy
8
- import matplotlib.pyplot as plt
9
11
  import numpy as np
10
12
  import pandas as pd
11
13
  import plotly
@@ -14,67 +16,115 @@ import yaml
14
16
 
15
17
  from . import io as fx_io
16
18
  from . import plotting
17
- from .core import TimeSeriesCollection
19
+ from .flow_system import FlowSystem
18
20
 
19
21
  if TYPE_CHECKING:
22
+ import matplotlib.pyplot as plt
20
23
  import pyvis
21
24
 
22
25
  from .calculation import Calculation, SegmentedCalculation
26
+ from .core import FlowSystemDimensions
23
27
 
24
28
 
25
29
  logger = logging.getLogger('flixopt')
26
30
 
27
31
 
28
- class CalculationResults:
29
- """Results container for Calculation results.
32
+ class _FlowSystemRestorationError(Exception):
33
+ """Exception raised when a FlowSystem cannot be restored from dataset."""
30
34
 
31
- This class is used to collect the results of a Calculation.
32
- It provides access to component, bus, and effect
33
- results, and includes methods for filtering, plotting, and saving results.
35
+ pass
34
36
 
35
- The recommended way to create instances is through the class methods
36
- `from_file()` or `from_calculation()`, rather than direct initialization.
37
+
38
+ class CalculationResults:
39
+ """Comprehensive container for optimization calculation results and analysis tools.
40
+
41
+ This class provides unified access to all optimization results including flow rates,
42
+ component states, bus balances, and system effects. It offers powerful analysis
43
+ capabilities through filtering, plotting, and export functionality, making it
44
+ the primary interface for post-processing optimization results.
45
+
46
+ Key Features:
47
+ **Unified Access**: Single interface to all solution variables and constraints
48
+ **Element Results**: Direct access to component, bus, and effect-specific results
49
+ **Visualization**: Built-in plotting methods for heatmaps, time series, and networks
50
+ **Persistence**: Save/load functionality with compression for large datasets
51
+ **Analysis Tools**: Filtering, aggregation, and statistical analysis methods
52
+
53
+ Result Organization:
54
+ - **Components**: Equipment-specific results (flows, states, constraints)
55
+ - **Buses**: Network node balances and energy flows
56
+ - **Effects**: System-wide impacts (costs, emissions, resource consumption)
57
+ - **Solution**: Raw optimization variables and their values
58
+ - **Metadata**: Calculation parameters, timing, and system configuration
37
59
 
38
60
  Attributes:
39
- solution (xr.Dataset): Dataset containing optimization results.
40
- flow_system (xr.Dataset): Dataset containing the flow system.
41
- summary (Dict): Information about the calculation.
42
- name (str): Name identifier for the calculation.
43
- model (linopy.Model): The optimization model (if available).
44
- folder (pathlib.Path): Path to the results directory.
45
- components (Dict[str, ComponentResults]): Results for each component.
46
- buses (Dict[str, BusResults]): Results for each bus.
47
- effects (Dict[str, EffectResults]): Results for each effect.
48
- timesteps_extra (pd.DatetimeIndex): The extended timesteps.
49
- hours_per_timestep (xr.DataArray): Duration of each timestep in hours.
50
-
51
- Example:
52
- Load results from saved files:
53
-
54
- >>> results = CalculationResults.from_file('results_dir', 'optimization_run_1')
55
- >>> element_result = results['Boiler']
56
- >>> results.plot_heatmap('Boiler(Q_th)|flow_rate')
57
- >>> results.to_file(compression=5)
58
- >>> results.to_file(folder='new_results_dir', compression=5) # Save the results to a new folder
61
+ solution: Dataset containing all optimization variable solutions
62
+ flow_system_data: Dataset with complete system configuration and parameters. Restore the used FlowSystem for further analysis.
63
+ summary: Calculation metadata including solver status, timing, and statistics
64
+ name: Unique identifier for this calculation
65
+ model: Original linopy optimization model (if available)
66
+ folder: Directory path for result storage and loading
67
+ components: Dictionary mapping component labels to ComponentResults objects
68
+ buses: Dictionary mapping bus labels to BusResults objects
69
+ effects: Dictionary mapping effect names to EffectResults objects
70
+ timesteps_extra: Extended time index including boundary conditions
71
+ hours_per_timestep: Duration of each timestep for proper energy calculations
72
+
73
+ Examples:
74
+ Load and analyze saved results:
75
+
76
+ ```python
77
+ # Load results from file
78
+ results = CalculationResults.from_file('results', 'annual_optimization')
79
+
80
+ # Access specific component results
81
+ boiler_results = results['Boiler_01']
82
+ heat_pump_results = results['HeatPump_02']
83
+
84
+ # Plot component flow rates
85
+ results.plot_heatmap('Boiler_01(Natural_Gas)|flow_rate')
86
+ results['Boiler_01'].plot_node_balance()
87
+
88
+ # Access raw solution dataarrays
89
+ electricity_flows = results.solution[['Generator_01(Grid)|flow_rate', 'HeatPump_02(Grid)|flow_rate']]
90
+
91
+ # Filter and analyze results
92
+ peak_demand_hours = results.filter_solution(variable_dims='time')
93
+ costs_solution = results.effects['cost'].solution
94
+ ```
95
+
96
+ Advanced filtering and aggregation:
97
+
98
+ ```python
99
+ # Filter by variable type
100
+ scalar_results = results.filter_solution(variable_dims='scalar')
101
+ time_series = results.filter_solution(variable_dims='time')
102
+
103
+ # Custom data analysis leveraging xarray
104
+ peak_power = results.solution['Generator_01(Grid)|flow_rate'].max()
105
+ avg_efficiency = (
106
+ results.solution['HeatPump(Heat)|flow_rate'] / results.solution['HeatPump(Electricity)|flow_rate']
107
+ ).mean()
108
+ ```
109
+
110
+ Design Patterns:
111
+ **Factory Methods**: Use `from_file()` and `from_calculation()` for creation or access directly from `Calculation.results`
112
+ **Dictionary Access**: Use `results[element_label]` for element-specific results
113
+ **Lazy Loading**: Results objects created on-demand for memory efficiency
114
+ **Unified Interface**: Consistent API across different result types
115
+
59
116
  """
60
117
 
61
118
  @classmethod
62
- def from_file(cls, folder: Union[str, pathlib.Path], name: str):
63
- """Create CalculationResults instance by loading from saved files.
64
-
65
- This method loads the calculation results from previously saved files,
66
- including the solution, flow system, model (if available), and metadata.
119
+ def from_file(cls, folder: str | pathlib.Path, name: str) -> CalculationResults:
120
+ """Load CalculationResults from saved files.
67
121
 
68
122
  Args:
69
- folder: Path to the directory containing the saved files.
70
- name: Base name of the saved files (without file extensions).
123
+ folder: Directory containing saved files.
124
+ name: Base name of saved files (without extensions).
71
125
 
72
126
  Returns:
73
- CalculationResults: A new instance containing the loaded data.
74
-
75
- Raises:
76
- FileNotFoundError: If required files cannot be found.
77
- ValueError: If files exist but cannot be properly loaded.
127
+ CalculationResults: Loaded instance.
78
128
  """
79
129
  folder = pathlib.Path(folder)
80
130
  paths = fx_io.CalculationResultsPaths(folder, name)
@@ -87,12 +137,12 @@ class CalculationResults:
87
137
  except Exception as e:
88
138
  logger.critical(f'Could not load the linopy model "{name}" from file ("{paths.linopy_model}"): {e}')
89
139
 
90
- with open(paths.summary, 'r', encoding='utf-8') as f:
140
+ with open(paths.summary, encoding='utf-8') as f:
91
141
  summary = yaml.load(f, Loader=yaml.FullLoader)
92
142
 
93
143
  return cls(
94
144
  solution=fx_io.load_dataset_from_netcdf(paths.solution),
95
- flow_system=fx_io.load_dataset_from_netcdf(paths.flow_system),
145
+ flow_system_data=fx_io.load_dataset_from_netcdf(paths.flow_system),
96
146
  name=name,
97
147
  folder=folder,
98
148
  model=model,
@@ -100,25 +150,18 @@ class CalculationResults:
100
150
  )
101
151
 
102
152
  @classmethod
103
- def from_calculation(cls, calculation: 'Calculation'):
104
- """Create CalculationResults directly from a Calculation object.
105
-
106
- This method extracts the solution, flow system, and other relevant
107
- information directly from an existing Calculation object.
153
+ def from_calculation(cls, calculation: Calculation) -> CalculationResults:
154
+ """Create CalculationResults from a Calculation object.
108
155
 
109
156
  Args:
110
- calculation: A Calculation object containing a solved model.
157
+ calculation: Calculation object with solved model.
111
158
 
112
159
  Returns:
113
- CalculationResults: A new instance containing the results from
114
- the provided calculation.
115
-
116
- Raises:
117
- AttributeError: If the calculation doesn't have required attributes.
160
+ CalculationResults: New instance with extracted results.
118
161
  """
119
162
  return cls(
120
163
  solution=calculation.model.solution,
121
- flow_system=calculation.flow_system.as_dataset(constants_in_dataset=True),
164
+ flow_system_data=calculation.flow_system.to_dataset(),
122
165
  summary=calculation.summary,
123
166
  model=calculation.model,
124
167
  name=calculation.name,
@@ -128,87 +171,519 @@ class CalculationResults:
128
171
  def __init__(
129
172
  self,
130
173
  solution: xr.Dataset,
131
- flow_system: xr.Dataset,
174
+ flow_system_data: xr.Dataset,
132
175
  name: str,
133
- summary: Dict,
134
- folder: Optional[pathlib.Path] = None,
135
- model: Optional[linopy.Model] = None,
176
+ summary: dict,
177
+ folder: pathlib.Path | None = None,
178
+ model: linopy.Model | None = None,
179
+ **kwargs, # To accept old "flow_system" parameter
136
180
  ):
137
- """
181
+ """Initialize CalculationResults with optimization data.
182
+ Usually, this class is instantiated by the Calculation class, or by loading from file.
183
+
138
184
  Args:
139
- solution: The solution of the optimization.
140
- flow_system: The flow_system that was used to create the calculation as a datatset.
141
- name: The name of the calculation.
142
- summary: Information about the calculation,
143
- folder: The folder where the results are saved.
144
- model: The linopy model that was used to solve the calculation.
185
+ solution: Optimization solution dataset.
186
+ flow_system_data: Flow system configuration dataset.
187
+ name: Calculation name.
188
+ summary: Calculation metadata.
189
+ folder: Results storage folder.
190
+ model: Linopy optimization model.
191
+ Deprecated:
192
+ flow_system: Use flow_system_data instead.
145
193
  """
194
+ # Handle potential old "flow_system" parameter for backward compatibility
195
+ if 'flow_system' in kwargs and flow_system_data is None:
196
+ flow_system_data = kwargs.pop('flow_system')
197
+ warnings.warn(
198
+ "The 'flow_system' parameter is deprecated. Use 'flow_system_data' instead."
199
+ "Acess is now by '.flow_system_data', while '.flow_system' returns the restored FlowSystem.",
200
+ DeprecationWarning,
201
+ stacklevel=2,
202
+ )
203
+
146
204
  self.solution = solution
147
- self.flow_system = flow_system
205
+ self.flow_system_data = flow_system_data
148
206
  self.summary = summary
149
207
  self.name = name
150
208
  self.model = model
151
209
  self.folder = pathlib.Path(folder) if folder is not None else pathlib.Path.cwd() / 'results'
152
210
  self.components = {
153
- label: ComponentResults.from_json(self, infos) for label, infos in self.solution.attrs['Components'].items()
211
+ label: ComponentResults(self, **infos) for label, infos in self.solution.attrs['Components'].items()
154
212
  }
155
213
 
156
- self.buses = {label: BusResults.from_json(self, infos) for label, infos in self.solution.attrs['Buses'].items()}
214
+ self.buses = {label: BusResults(self, **infos) for label, infos in self.solution.attrs['Buses'].items()}
157
215
 
158
- self.effects = {
159
- label: EffectResults.from_json(self, infos) for label, infos in self.solution.attrs['Effects'].items()
160
- }
216
+ self.effects = {label: EffectResults(self, **infos) for label, infos in self.solution.attrs['Effects'].items()}
217
+
218
+ if 'Flows' not in self.solution.attrs:
219
+ warnings.warn(
220
+ 'No Data about flows found in the results. This data is only included since v2.2.0. Some functionality '
221
+ 'is not availlable. We recommend to evaluate your results with a version <2.2.0.',
222
+ stacklevel=2,
223
+ )
224
+ self.flows = {}
225
+ else:
226
+ self.flows = {
227
+ label: FlowResults(self, **infos) for label, infos in self.solution.attrs.get('Flows', {}).items()
228
+ }
161
229
 
162
230
  self.timesteps_extra = self.solution.indexes['time']
163
- self.hours_per_timestep = TimeSeriesCollection.calculate_hours_per_timestep(self.timesteps_extra)
231
+ self.hours_per_timestep = FlowSystem.calculate_hours_per_timestep(self.timesteps_extra)
232
+ self.scenarios = self.solution.indexes['scenario'] if 'scenario' in self.solution.indexes else None
233
+
234
+ self._effect_share_factors = None
235
+ self._flow_system = None
236
+
237
+ self._flow_rates = None
238
+ self._flow_hours = None
239
+ self._sizes = None
240
+ self._effects_per_component = None
164
241
 
165
- def __getitem__(self, key: str) -> Union['ComponentResults', 'BusResults', 'EffectResults']:
242
+ def __getitem__(self, key: str) -> ComponentResults | BusResults | EffectResults:
166
243
  if key in self.components:
167
244
  return self.components[key]
168
245
  if key in self.buses:
169
246
  return self.buses[key]
170
247
  if key in self.effects:
171
248
  return self.effects[key]
249
+ if key in self.flows:
250
+ return self.flows[key]
172
251
  raise KeyError(f'No element with label {key} found.')
173
252
 
174
253
  @property
175
- def storages(self) -> List['ComponentResults']:
176
- """All storages in the results."""
254
+ def storages(self) -> list[ComponentResults]:
255
+ """Get all storage components in the results."""
177
256
  return [comp for comp in self.components.values() if comp.is_storage]
178
257
 
179
258
  @property
180
259
  def objective(self) -> float:
181
- """The objective result of the optimization."""
182
- return self.summary['Main Results']['Objective']
260
+ """Get optimization objective value."""
261
+ # Deprecated. Fallback
262
+ if 'objective' not in self.solution:
263
+ logger.warning('Objective not found in solution. Fallback to summary (rounded value). This is deprecated')
264
+ return self.summary['Main Results']['Objective']
265
+
266
+ return self.solution['objective'].item()
183
267
 
184
268
  @property
185
269
  def variables(self) -> linopy.Variables:
186
- """The variables of the optimization. Only available if the linopy.Model is available."""
270
+ """Get optimization variables (requires linopy model)."""
187
271
  if self.model is None:
188
272
  raise ValueError('The linopy model is not available.')
189
273
  return self.model.variables
190
274
 
191
275
  @property
192
276
  def constraints(self) -> linopy.Constraints:
193
- """The constraints of the optimization. Only available if the linopy.Model is available."""
277
+ """Get optimization constraints (requires linopy model)."""
194
278
  if self.model is None:
195
279
  raise ValueError('The linopy model is not available.')
196
280
  return self.model.constraints
197
281
 
282
+ @property
283
+ def effect_share_factors(self):
284
+ if self._effect_share_factors is None:
285
+ effect_share_factors = self.flow_system.effects.calculate_effect_share_factors()
286
+ self._effect_share_factors = {'temporal': effect_share_factors[0], 'periodic': effect_share_factors[1]}
287
+ return self._effect_share_factors
288
+
289
+ @property
290
+ def flow_system(self) -> FlowSystem:
291
+ """The restored flow_system that was used to create the calculation.
292
+ Contains all input parameters."""
293
+ if self._flow_system is None:
294
+ old_level = logger.level
295
+ logger.level = logging.CRITICAL
296
+ try:
297
+ self._flow_system = FlowSystem.from_dataset(self.flow_system_data)
298
+ self._flow_system._connect_network()
299
+ except Exception as e:
300
+ logger.critical(
301
+ f'Not able to restore FlowSystem from dataset. Some functionality is not availlable. {e}'
302
+ )
303
+ raise _FlowSystemRestorationError(f'Not able to restore FlowSystem from dataset. {e}') from e
304
+ finally:
305
+ logger.level = old_level
306
+ return self._flow_system
307
+
198
308
  def filter_solution(
199
- self, variable_dims: Optional[Literal['scalar', 'time']] = None, element: Optional[str] = None
309
+ self,
310
+ variable_dims: Literal['scalar', 'time', 'scenario', 'timeonly', 'scenarioonly'] | None = None,
311
+ element: str | None = None,
312
+ timesteps: pd.DatetimeIndex | None = None,
313
+ scenarios: pd.Index | None = None,
314
+ contains: str | list[str] | None = None,
315
+ startswith: str | list[str] | None = None,
200
316
  ) -> xr.Dataset:
201
- """
202
- Filter the solution to a specific variable dimension and element.
203
- If no element is specified, all elements are included.
317
+ """Filter solution by variable dimension and/or element.
204
318
 
205
319
  Args:
206
- variable_dims: The dimension of the variables to filter for.
320
+ variable_dims: The dimension of which to get variables from.
321
+ - 'scalar': Get scalar variables (without dimensions)
322
+ - 'time': Get time-dependent variables (with a time dimension)
323
+ - 'scenario': Get scenario-dependent variables (with ONLY a scenario dimension)
324
+ - 'timeonly': Get time-dependent variables (with ONLY a time dimension)
325
+ - 'scenarioonly': Get scenario-dependent variables (with ONLY a scenario dimension)
207
326
  element: The element to filter for.
327
+ timesteps: Optional time indexes to select. Can be:
328
+ - pd.DatetimeIndex: Multiple timesteps
329
+ - str/pd.Timestamp: Single timestep
330
+ Defaults to all available timesteps.
331
+ scenarios: Optional scenario indexes to select. Can be:
332
+ - pd.Index: Multiple scenarios
333
+ - str/int: Single scenario (int is treated as a label, not an index position)
334
+ Defaults to all available scenarios.
335
+ contains: Filter variables that contain this string or strings.
336
+ If a list is provided, variables must contain ALL strings in the list.
337
+ startswith: Filter variables that start with this string or strings.
338
+ If a list is provided, variables must start with ANY of the strings in the list.
339
+ """
340
+ return filter_dataset(
341
+ self.solution if element is None else self[element].solution,
342
+ variable_dims=variable_dims,
343
+ timesteps=timesteps,
344
+ scenarios=scenarios,
345
+ contains=contains,
346
+ startswith=startswith,
347
+ )
348
+
349
+ @property
350
+ def effects_per_component(self) -> xr.Dataset:
351
+ """Returns a dataset containing effect results for each mode, aggregated by Component
352
+
353
+ Returns:
354
+ An xarray Dataset with an additional component dimension and effects as variables.
355
+ """
356
+ if self._effects_per_component is None:
357
+ self._effects_per_component = xr.Dataset(
358
+ {
359
+ mode: self._create_effects_dataset(mode).to_dataarray('effect', name=mode)
360
+ for mode in ['temporal', 'periodic', 'total']
361
+ }
362
+ )
363
+ dim_order = ['time', 'period', 'scenario', 'component', 'effect']
364
+ self._effects_per_component = self._effects_per_component.transpose(*dim_order, missing_dims='ignore')
365
+
366
+ return self._effects_per_component
367
+
368
+ def flow_rates(
369
+ self,
370
+ start: str | list[str] | None = None,
371
+ end: str | list[str] | None = None,
372
+ component: str | list[str] | None = None,
373
+ ) -> xr.DataArray:
374
+ """Returns a DataArray containing the flow rates of each Flow.
375
+
376
+ Args:
377
+ start: Optional source node(s) to filter by. Can be a single node name or a list of names.
378
+ end: Optional destination node(s) to filter by. Can be a single node name or a list of names.
379
+ component: Optional component(s) to filter by. Can be a single component name or a list of names.
380
+
381
+ Further usage:
382
+ Convert the dataarray to a dataframe:
383
+ >>>results.flow_rates().to_pandas()
384
+ Get the max or min over time:
385
+ >>>results.flow_rates().max('time')
386
+ Sum up the flow rates of flows with the same start and end:
387
+ >>>results.flow_rates(end='Fernwärme').groupby('start').sum(dim='flow')
388
+ To recombine filtered dataarrays, use `xr.concat` with dim 'flow':
389
+ >>>xr.concat([results.flow_rates(start='Fernwärme'), results.flow_rates(end='Fernwärme')], dim='flow')
390
+ """
391
+ if self._flow_rates is None:
392
+ self._flow_rates = self._assign_flow_coords(
393
+ xr.concat(
394
+ [flow.flow_rate.rename(flow.label) for flow in self.flows.values()],
395
+ dim=pd.Index(self.flows.keys(), name='flow'),
396
+ )
397
+ ).rename('flow_rates')
398
+ filters = {k: v for k, v in {'start': start, 'end': end, 'component': component}.items() if v is not None}
399
+ return filter_dataarray_by_coord(self._flow_rates, **filters)
400
+
401
+ def flow_hours(
402
+ self,
403
+ start: str | list[str] | None = None,
404
+ end: str | list[str] | None = None,
405
+ component: str | list[str] | None = None,
406
+ ) -> xr.DataArray:
407
+ """Returns a DataArray containing the flow hours of each Flow.
408
+
409
+ Flow hours represent the total energy/material transferred over time,
410
+ calculated by multiplying flow rates by the duration of each timestep.
411
+
412
+ Args:
413
+ start: Optional source node(s) to filter by. Can be a single node name or a list of names.
414
+ end: Optional destination node(s) to filter by. Can be a single node name or a list of names.
415
+ component: Optional component(s) to filter by. Can be a single component name or a list of names.
416
+
417
+ Further usage:
418
+ Convert the dataarray to a dataframe:
419
+ >>>results.flow_hours().to_pandas()
420
+ Sum up the flow hours over time:
421
+ >>>results.flow_hours().sum('time')
422
+ Sum up the flow hours of flows with the same start and end:
423
+ >>>results.flow_hours(end='Fernwärme').groupby('start').sum(dim='flow')
424
+ To recombine filtered dataarrays, use `xr.concat` with dim 'flow':
425
+ >>>xr.concat([results.flow_hours(start='Fernwärme'), results.flow_hours(end='Fernwärme')], dim='flow')
426
+
427
+ """
428
+ if self._flow_hours is None:
429
+ self._flow_hours = (self.flow_rates() * self.hours_per_timestep).rename('flow_hours')
430
+ filters = {k: v for k, v in {'start': start, 'end': end, 'component': component}.items() if v is not None}
431
+ return filter_dataarray_by_coord(self._flow_hours, **filters)
432
+
433
+ def sizes(
434
+ self,
435
+ start: str | list[str] | None = None,
436
+ end: str | list[str] | None = None,
437
+ component: str | list[str] | None = None,
438
+ ) -> xr.DataArray:
439
+ """Returns a dataset with the sizes of the Flows.
440
+ Args:
441
+ start: Optional source node(s) to filter by. Can be a single node name or a list of names.
442
+ end: Optional destination node(s) to filter by. Can be a single node name or a list of names.
443
+ component: Optional component(s) to filter by. Can be a single component name or a list of names.
444
+
445
+ Further usage:
446
+ Convert the dataarray to a dataframe:
447
+ >>>results.sizes().to_pandas()
448
+ To recombine filtered dataarrays, use `xr.concat` with dim 'flow':
449
+ >>>xr.concat([results.sizes(start='Fernwärme'), results.sizes(end='Fernwärme')], dim='flow')
450
+
451
+ """
452
+ if self._sizes is None:
453
+ self._sizes = self._assign_flow_coords(
454
+ xr.concat(
455
+ [flow.size.rename(flow.label) for flow in self.flows.values()],
456
+ dim=pd.Index(self.flows.keys(), name='flow'),
457
+ )
458
+ ).rename('flow_sizes')
459
+ filters = {k: v for k, v in {'start': start, 'end': end, 'component': component}.items() if v is not None}
460
+ return filter_dataarray_by_coord(self._sizes, **filters)
461
+
462
+ def _assign_flow_coords(self, da: xr.DataArray):
463
+ # Add start and end coordinates
464
+ da = da.assign_coords(
465
+ {
466
+ 'start': ('flow', [flow.start for flow in self.flows.values()]),
467
+ 'end': ('flow', [flow.end for flow in self.flows.values()]),
468
+ 'component': ('flow', [flow.component for flow in self.flows.values()]),
469
+ }
470
+ )
471
+
472
+ # Ensure flow is the last dimension if needed
473
+ existing_dims = [d for d in da.dims if d != 'flow']
474
+ da = da.transpose(*(existing_dims + ['flow']))
475
+ return da
476
+
477
+ def get_effect_shares(
478
+ self,
479
+ element: str,
480
+ effect: str,
481
+ mode: Literal['temporal', 'periodic'] | None = None,
482
+ include_flows: bool = False,
483
+ ) -> xr.Dataset:
484
+ """Retrieves individual effect shares for a specific element and effect.
485
+ Either for temporal, investment, or both modes combined.
486
+ Only includes the direct shares.
487
+
488
+ Args:
489
+ element: The element identifier for which to retrieve effect shares.
490
+ effect: The effect identifier for which to retrieve shares.
491
+ mode: Optional. The mode to retrieve shares for. Can be 'temporal', 'periodic',
492
+ or None to retrieve both. Defaults to None.
493
+
494
+ Returns:
495
+ An xarray Dataset containing the requested effect shares. If mode is None,
496
+ returns a merged Dataset containing both temporal and investment shares.
497
+
498
+ Raises:
499
+ ValueError: If the specified effect is not available or if mode is invalid.
500
+ """
501
+ if effect not in self.effects:
502
+ raise ValueError(f'Effect {effect} is not available.')
503
+
504
+ if mode is None:
505
+ return xr.merge(
506
+ [
507
+ self.get_effect_shares(
508
+ element=element, effect=effect, mode='temporal', include_flows=include_flows
509
+ ),
510
+ self.get_effect_shares(
511
+ element=element, effect=effect, mode='periodic', include_flows=include_flows
512
+ ),
513
+ ]
514
+ )
515
+
516
+ if mode not in ['temporal', 'periodic']:
517
+ raise ValueError(f'Mode {mode} is not available. Choose between "temporal" and "periodic".')
518
+
519
+ ds = xr.Dataset()
520
+
521
+ label = f'{element}->{effect}({mode})'
522
+ if label in self.solution:
523
+ ds = xr.Dataset({label: self.solution[label]})
524
+
525
+ if include_flows:
526
+ if element not in self.components:
527
+ raise ValueError(f'Only use Components when retrieving Effects including flows. Got {element}')
528
+ flows = [
529
+ label.split('|')[0] for label in self.components[element].inputs + self.components[element].outputs
530
+ ]
531
+ return xr.merge(
532
+ [ds]
533
+ + [
534
+ self.get_effect_shares(element=flow, effect=effect, mode=mode, include_flows=False)
535
+ for flow in flows
536
+ ]
537
+ )
538
+
539
+ return ds
540
+
541
+ def _compute_effect_total(
542
+ self,
543
+ element: str,
544
+ effect: str,
545
+ mode: Literal['temporal', 'periodic', 'total'] = 'total',
546
+ include_flows: bool = False,
547
+ ) -> xr.DataArray:
548
+ """Calculates the total effect for a specific element and effect.
549
+
550
+ This method computes the total direct and indirect effects for a given element
551
+ and effect, considering the conversion factors between different effects.
552
+
553
+ Args:
554
+ element: The element identifier for which to calculate total effects.
555
+ effect: The effect identifier to calculate.
556
+ mode: The calculation mode. Options are:
557
+ 'temporal': Returns temporal effects.
558
+ 'periodic': Returns investment-specific effects.
559
+ 'total': Returns the sum of temporal effects and periodic effects. Defaults to 'total'.
560
+ include_flows: Whether to include effects from flows connected to this element.
561
+
562
+ Returns:
563
+ An xarray DataArray containing the total effects, named with pattern
564
+ '{element}->{effect}' for mode='total' or '{element}->{effect}({mode})'
565
+ for other modes.
566
+
567
+ Raises:
568
+ ValueError: If the specified effect is not available.
569
+ """
570
+ if effect not in self.effects:
571
+ raise ValueError(f'Effect {effect} is not available.')
572
+
573
+ if mode == 'total':
574
+ temporal = self._compute_effect_total(
575
+ element=element, effect=effect, mode='temporal', include_flows=include_flows
576
+ )
577
+ periodic = self._compute_effect_total(
578
+ element=element, effect=effect, mode='periodic', include_flows=include_flows
579
+ )
580
+ if periodic.isnull().all() and temporal.isnull().all():
581
+ return xr.DataArray(np.nan)
582
+ if temporal.isnull().all():
583
+ return periodic.rename(f'{element}->{effect}')
584
+ temporal = temporal.sum('time')
585
+ if periodic.isnull().all():
586
+ return temporal.rename(f'{element}->{effect}')
587
+ if 'time' in temporal.indexes:
588
+ temporal = temporal.sum('time')
589
+ return periodic + temporal
590
+
591
+ total = xr.DataArray(0)
592
+ share_exists = False
593
+
594
+ relevant_conversion_factors = {
595
+ key[0]: value for key, value in self.effect_share_factors[mode].items() if key[1] == effect
596
+ }
597
+ relevant_conversion_factors[effect] = 1 # Share to itself is 1
598
+
599
+ for target_effect, conversion_factor in relevant_conversion_factors.items():
600
+ label = f'{element}->{target_effect}({mode})'
601
+ if label in self.solution:
602
+ share_exists = True
603
+ da = self.solution[label]
604
+ total = da * conversion_factor + total
605
+
606
+ if include_flows:
607
+ if element not in self.components:
608
+ raise ValueError(f'Only use Components when retrieving Effects including flows. Got {element}')
609
+ flows = [
610
+ label.split('|')[0] for label in self.components[element].inputs + self.components[element].outputs
611
+ ]
612
+ for flow in flows:
613
+ label = f'{flow}->{target_effect}({mode})'
614
+ if label in self.solution:
615
+ share_exists = True
616
+ da = self.solution[label]
617
+ total = da * conversion_factor + total
618
+ if not share_exists:
619
+ total = xr.DataArray(np.nan)
620
+ return total.rename(f'{element}->{effect}({mode})')
621
+
622
+ def _create_effects_dataset(self, mode: Literal['temporal', 'periodic', 'total']) -> xr.Dataset:
623
+ """Creates a dataset containing effect totals for all components (including their flows).
624
+ The dataset does contain the direct as well as the indirect effects of each component.
625
+
626
+ Args:
627
+ mode: The calculation mode ('temporal', 'periodic', or 'total').
628
+
629
+ Returns:
630
+ An xarray Dataset with components as dimension and effects as variables.
208
631
  """
209
- if element is not None:
210
- return filter_dataset(self[element].solution, variable_dims)
211
- return filter_dataset(self.solution, variable_dims)
632
+ ds = xr.Dataset()
633
+ all_arrays = {}
634
+ template = None # Template is needed to determine the dimensions of the arrays. This handles the case of no shares for an effect
635
+
636
+ components_list = list(self.components)
637
+
638
+ # First pass: collect arrays and find template
639
+ for effect in self.effects:
640
+ effect_arrays = []
641
+ for component in components_list:
642
+ da = self._compute_effect_total(element=component, effect=effect, mode=mode, include_flows=True)
643
+ effect_arrays.append(da)
644
+
645
+ if template is None and (da.dims or not da.isnull().all()):
646
+ template = da
647
+
648
+ all_arrays[effect] = effect_arrays
649
+
650
+ # Ensure we have a template
651
+ if template is None:
652
+ raise ValueError(
653
+ f"No template with proper dimensions found for mode '{mode}'. "
654
+ f'All computed arrays are scalars, which indicates a data issue.'
655
+ )
656
+
657
+ # Second pass: process all effects (guaranteed to include all)
658
+ for effect in self.effects:
659
+ dataarrays = all_arrays[effect]
660
+ component_arrays = []
661
+
662
+ for component, arr in zip(components_list, dataarrays, strict=False):
663
+ # Expand scalar NaN arrays to match template dimensions
664
+ if not arr.dims and np.isnan(arr.item()):
665
+ arr = xr.full_like(template, np.nan, dtype=float).rename(arr.name)
666
+
667
+ component_arrays.append(arr.expand_dims(component=[component]))
668
+
669
+ ds[effect] = xr.concat(component_arrays, dim='component', coords='minimal', join='outer').rename(effect)
670
+
671
+ # For now include a test to ensure correctness
672
+ suffix = {
673
+ 'temporal': '(temporal)|per_timestep',
674
+ 'periodic': '(periodic)',
675
+ 'total': '',
676
+ }
677
+ for effect in self.effects:
678
+ label = f'{effect}{suffix[mode]}'
679
+ computed = ds[effect].sum('component')
680
+ found = self.solution[label]
681
+ if not np.allclose(computed.values, found.fillna(0).values):
682
+ logger.critical(
683
+ f'Results for {effect}({mode}) in effects_dataset doesnt match {label}\n{computed=}\n, {found=}'
684
+ )
685
+
686
+ return ds
212
687
 
213
688
  def plot_heatmap(
214
689
  self,
@@ -216,12 +691,55 @@ class CalculationResults:
216
691
  heatmap_timeframes: Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'] = 'D',
217
692
  heatmap_timesteps_per_frame: Literal['W', 'D', 'h', '15min', 'min'] = 'h',
218
693
  color_map: str = 'portland',
219
- save: Union[bool, pathlib.Path] = False,
694
+ save: bool | pathlib.Path = False,
220
695
  show: bool = True,
221
696
  engine: plotting.PlottingEngine = 'plotly',
222
- ) -> Union[plotly.graph_objs.Figure, Tuple[plt.Figure, plt.Axes]]:
697
+ indexer: dict[FlowSystemDimensions, Any] | None = None,
698
+ ) -> plotly.graph_objs.Figure | tuple[plt.Figure, plt.Axes]:
699
+ """
700
+ Plots a heatmap of the solution of a variable.
701
+
702
+ Args:
703
+ variable_name: The name of the variable to plot.
704
+ heatmap_timeframes: The timeframes to use for the heatmap.
705
+ heatmap_timesteps_per_frame: The timesteps per frame to use for the heatmap.
706
+ color_map: The color map to use for the heatmap.
707
+ save: Whether to save the plot or not. If a path is provided, the plot will be saved at that location.
708
+ show: Whether to show the plot or not.
709
+ engine: The engine to use for plotting. Can be either 'plotly' or 'matplotlib'.
710
+ indexer: Optional selection dict, e.g., {'scenario': 'base', 'period': 2024}.
711
+ If None, uses first value for each dimension.
712
+ If empty dict {}, uses all values.
713
+
714
+ Examples:
715
+ Basic usage (uses first scenario, first period, all time):
716
+
717
+ >>> results.plot_heatmap('Battery|charge_state')
718
+
719
+ Select specific scenario and period:
720
+
721
+ >>> results.plot_heatmap('Boiler(Qth)|flow_rate', indexer={'scenario': 'base', 'period': 2024})
722
+
723
+ Time filtering (summer months only):
724
+
725
+ >>> results.plot_heatmap(
726
+ ... 'Boiler(Qth)|flow_rate',
727
+ ... indexer={
728
+ ... 'scenario': 'base',
729
+ ... 'time': results.solution.time[results.solution.time.dt.month.isin([6, 7, 8])],
730
+ ... },
731
+ ... )
732
+
733
+ Save to specific location:
734
+
735
+ >>> results.plot_heatmap(
736
+ ... 'Boiler(Qth)|flow_rate', indexer={'scenario': 'base'}, save='path/to/my_heatmap.html'
737
+ ... )
738
+ """
739
+ dataarray = self.solution[variable_name]
740
+
223
741
  return plot_heatmap(
224
- dataarray=self.solution[variable_name],
742
+ dataarray=dataarray,
225
743
  name=variable_name,
226
744
  folder=self.folder,
227
745
  heatmap_timeframes=heatmap_timeframes,
@@ -230,48 +748,47 @@ class CalculationResults:
230
748
  save=save,
231
749
  show=show,
232
750
  engine=engine,
751
+ indexer=indexer,
233
752
  )
234
753
 
235
754
  def plot_network(
236
755
  self,
237
- controls: Union[
238
- bool,
239
- List[
756
+ controls: (
757
+ bool
758
+ | list[
240
759
  Literal['nodes', 'edges', 'layout', 'interaction', 'manipulation', 'physics', 'selection', 'renderer']
241
- ],
242
- ] = True,
243
- path: Optional[pathlib.Path] = None,
760
+ ]
761
+ ) = True,
762
+ path: pathlib.Path | None = None,
244
763
  show: bool = False,
245
- ) -> 'pyvis.network.Network':
246
- """See flixopt.flow_system.FlowSystem.plot_network"""
247
- try:
248
- from .flow_system import FlowSystem
764
+ ) -> pyvis.network.Network | None:
765
+ """Plot interactive network visualization of the system.
249
766
 
250
- flow_system = FlowSystem.from_dataset(self.flow_system)
251
- except Exception as e:
252
- logger.critical(f'Could not reconstruct the flow_system from dataset: {e}')
253
- return None
767
+ Args:
768
+ controls: Enable/disable interactive controls.
769
+ path: Save path for network HTML.
770
+ show: Whether to display the plot.
771
+ """
254
772
  if path is None:
255
773
  path = self.folder / f'{self.name}--network.html'
256
- return flow_system.plot_network(controls=controls, path=path, show=show)
774
+ return self.flow_system.plot_network(controls=controls, path=path, show=show)
257
775
 
258
776
  def to_file(
259
777
  self,
260
- folder: Optional[Union[str, pathlib.Path]] = None,
261
- name: Optional[str] = None,
778
+ folder: str | pathlib.Path | None = None,
779
+ name: str | None = None,
262
780
  compression: int = 5,
263
781
  document_model: bool = True,
264
782
  save_linopy_model: bool = False,
265
783
  ):
266
- """
267
- Save the results to a file
784
+ """Save results to files.
785
+
268
786
  Args:
269
- folder: The folder where the results should be saved. Defaults to the folder of the calculation.
270
- name: The name of the results file. If not provided, Defaults to the name of the calculation.
271
- compression: The compression level to use when saving the solution file (0-9). 0 means no compression.
272
- document_model: Wether to document the mathematical formulations in the model.
273
- save_linopy_model: Wether to save the model to file. If True, the (linopy) model is saved as a .nc4 file.
274
- The model file size is rougly 100 times larger than the solution file.
787
+ folder: Save folder (defaults to calculation folder).
788
+ name: File name (defaults to calculation name).
789
+ compression: Compression level 0-9.
790
+ document_model: Whether to document model formulations as yaml.
791
+ save_linopy_model: Whether to save linopy model file.
275
792
  """
276
793
  folder = self.folder if folder is None else pathlib.Path(folder)
277
794
  name = self.name if name is None else name
@@ -286,7 +803,7 @@ class CalculationResults:
286
803
  paths = fx_io.CalculationResultsPaths(folder, name)
287
804
 
288
805
  fx_io.save_dataset_to_netcdf(self.solution, paths.solution, compression=compression)
289
- fx_io.save_dataset_to_netcdf(self.flow_system, paths.flow_system, compression=compression)
806
+ fx_io.save_dataset_to_netcdf(self.flow_system_data, paths.flow_system, compression=compression)
290
807
 
291
808
  with open(paths.summary, 'w', encoding='utf-8') as f:
292
809
  yaml.dump(self.summary, f, allow_unicode=True, sort_keys=False, indent=4, width=1000)
@@ -295,7 +812,7 @@ class CalculationResults:
295
812
  if self.model is None:
296
813
  logger.critical('No model in the CalculationResults. Saving the model is not possible.')
297
814
  else:
298
- self.model.to_netcdf(paths.linopy_model)
815
+ self.model.to_netcdf(paths.linopy_model, engine='h5netcdf')
299
816
 
300
817
  if document_model:
301
818
  if self.model is None:
@@ -307,12 +824,8 @@ class CalculationResults:
307
824
 
308
825
 
309
826
  class _ElementResults:
310
- @classmethod
311
- def from_json(cls, calculation_results, json_data: Dict) -> '_ElementResults':
312
- return cls(calculation_results, json_data['label'], json_data['variables'], json_data['constraints'])
313
-
314
827
  def __init__(
315
- self, calculation_results: CalculationResults, label: str, variables: List[str], constraints: List[str]
828
+ self, calculation_results: CalculationResults, label: str, variables: list[str], constraints: list[str]
316
829
  ):
317
830
  self._calculation_results = calculation_results
318
831
  self.label = label
@@ -323,11 +836,10 @@ class _ElementResults:
323
836
 
324
837
  @property
325
838
  def variables(self) -> linopy.Variables:
326
- """
327
- Returns the variables of the element.
839
+ """Get element variables (requires linopy model).
328
840
 
329
841
  Raises:
330
- ValueError: If the linopy model is not availlable.
842
+ ValueError: If linopy model is unavailable.
331
843
  """
332
844
  if self._calculation_results.model is None:
333
845
  raise ValueError('The linopy model is not available.')
@@ -335,79 +847,120 @@ class _ElementResults:
335
847
 
336
848
  @property
337
849
  def constraints(self) -> linopy.Constraints:
338
- """
339
- Returns the variables of the element.
850
+ """Get element constraints (requires linopy model).
340
851
 
341
852
  Raises:
342
- ValueError: If the linopy model is not availlable.
853
+ ValueError: If linopy model is unavailable.
343
854
  """
344
855
  if self._calculation_results.model is None:
345
856
  raise ValueError('The linopy model is not available.')
346
857
  return self._calculation_results.model.constraints[self._constraint_names]
347
858
 
348
- def filter_solution(self, variable_dims: Optional[Literal['scalar', 'time']] = None) -> xr.Dataset:
859
+ def filter_solution(
860
+ self,
861
+ variable_dims: Literal['scalar', 'time', 'scenario', 'timeonly', 'scenarioonly'] | None = None,
862
+ timesteps: pd.DatetimeIndex | None = None,
863
+ scenarios: pd.Index | None = None,
864
+ contains: str | list[str] | None = None,
865
+ startswith: str | list[str] | None = None,
866
+ ) -> xr.Dataset:
349
867
  """
350
- Filter the solution of the element by dimension.
868
+ Filter the solution to a specific variable dimension and element.
869
+ If no element is specified, all elements are included.
351
870
 
352
871
  Args:
353
- variable_dims: The dimension of the variables to filter for.
872
+ variable_dims: The dimension of which to get variables from.
873
+ - 'scalar': Get scalar variables (without dimensions)
874
+ - 'time': Get time-dependent variables (with a time dimension)
875
+ - 'scenario': Get scenario-dependent variables (with ONLY a scenario dimension)
876
+ - 'timeonly': Get time-dependent variables (with ONLY a time dimension)
877
+ - 'scenarioonly': Get scenario-dependent variables (with ONLY a scenario dimension)
878
+ timesteps: Optional time indexes to select. Can be:
879
+ - pd.DatetimeIndex: Multiple timesteps
880
+ - str/pd.Timestamp: Single timestep
881
+ Defaults to all available timesteps.
882
+ scenarios: Optional scenario indexes to select. Can be:
883
+ - pd.Index: Multiple scenarios
884
+ - str/int: Single scenario (int is treated as a label, not an index position)
885
+ Defaults to all available scenarios.
886
+ contains: Filter variables that contain this string or strings.
887
+ If a list is provided, variables must contain ALL strings in the list.
888
+ startswith: Filter variables that start with this string or strings.
889
+ If a list is provided, variables must start with ANY of the strings in the list.
354
890
  """
355
- return filter_dataset(self.solution, variable_dims)
891
+ return filter_dataset(
892
+ self.solution,
893
+ variable_dims=variable_dims,
894
+ timesteps=timesteps,
895
+ scenarios=scenarios,
896
+ contains=contains,
897
+ startswith=startswith,
898
+ )
356
899
 
357
900
 
358
901
  class _NodeResults(_ElementResults):
359
- @classmethod
360
- def from_json(cls, calculation_results, json_data: Dict) -> '_NodeResults':
361
- return cls(
362
- calculation_results,
363
- json_data['label'],
364
- json_data['variables'],
365
- json_data['constraints'],
366
- json_data['inputs'],
367
- json_data['outputs'],
368
- )
369
-
370
902
  def __init__(
371
903
  self,
372
904
  calculation_results: CalculationResults,
373
905
  label: str,
374
- variables: List[str],
375
- constraints: List[str],
376
- inputs: List[str],
377
- outputs: List[str],
906
+ variables: list[str],
907
+ constraints: list[str],
908
+ inputs: list[str],
909
+ outputs: list[str],
910
+ flows: list[str],
378
911
  ):
379
912
  super().__init__(calculation_results, label, variables, constraints)
380
913
  self.inputs = inputs
381
914
  self.outputs = outputs
915
+ self.flows = flows
382
916
 
383
917
  def plot_node_balance(
384
918
  self,
385
- save: Union[bool, pathlib.Path] = False,
919
+ save: bool | pathlib.Path = False,
386
920
  show: bool = True,
387
921
  colors: plotting.ColorType = 'viridis',
388
922
  engine: plotting.PlottingEngine = 'plotly',
389
- ) -> Union[plotly.graph_objs.Figure, Tuple[plt.Figure, plt.Axes]]:
923
+ indexer: dict[FlowSystemDimensions, Any] | None = None,
924
+ mode: Literal['flow_rate', 'flow_hours'] = 'flow_rate',
925
+ style: Literal['area', 'stacked_bar', 'line'] = 'stacked_bar',
926
+ drop_suffix: bool = True,
927
+ ) -> plotly.graph_objs.Figure | tuple[plt.Figure, plt.Axes]:
390
928
  """
391
929
  Plots the node balance of the Component or Bus.
392
930
  Args:
393
931
  save: Whether to save the plot or not. If a path is provided, the plot will be saved at that location.
394
932
  show: Whether to show the plot or not.
933
+ colors: The colors to use for the plot. See `flixopt.plotting.ColorType` for options.
395
934
  engine: The engine to use for plotting. Can be either 'plotly' or 'matplotlib'.
935
+ indexer: Optional selection dict, e.g., {'scenario': 'base', 'period': 2024}.
936
+ If None, uses first value for each dimension (except time).
937
+ If empty dict {}, uses all values.
938
+ style: The style to use for the dataset. Can be 'flow_rate' or 'flow_hours'.
939
+ - 'flow_rate': Returns the flow_rates of the Node.
940
+ - 'flow_hours': Returns the flow_hours of the Node. [flow_hours(t) = flow_rate(t) * dt(t)]. Renames suffixes to |flow_hours.
941
+ drop_suffix: Whether to drop the suffix from the variable names.
396
942
  """
943
+ ds = self.node_balance(with_last_timestep=True, mode=mode, drop_suffix=drop_suffix, indexer=indexer)
944
+
945
+ ds, suffix_parts = _apply_indexer_to_data(ds, indexer, drop=True)
946
+ suffix = '--' + '-'.join(suffix_parts) if suffix_parts else ''
947
+
948
+ title = f'{self.label} (flow rates){suffix}' if mode == 'flow_rate' else f'{self.label} (flow hours){suffix}'
949
+
397
950
  if engine == 'plotly':
398
951
  figure_like = plotting.with_plotly(
399
- self.node_balance(with_last_timestep=True).to_dataframe(),
952
+ ds.to_dataframe(),
400
953
  colors=colors,
401
- mode='area',
402
- title=f'Flow rates of {self.label}',
954
+ style=style,
955
+ title=title,
403
956
  )
404
957
  default_filetype = '.html'
405
958
  elif engine == 'matplotlib':
406
959
  figure_like = plotting.with_matplotlib(
407
- self.node_balance(with_last_timestep=True).to_dataframe(),
960
+ ds.to_dataframe(),
408
961
  colors=colors,
409
- mode='bar',
410
- title=f'Flow rates of {self.label}',
962
+ style=style,
963
+ title=title,
411
964
  )
412
965
  default_filetype = '.png'
413
966
  else:
@@ -415,7 +968,7 @@ class _NodeResults(_ElementResults):
415
968
 
416
969
  return plotting.export_figure(
417
970
  figure_like=figure_like,
418
- default_path=self._calculation_results.folder / f'{self.label} (flow rates)',
971
+ default_path=self._calculation_results.folder / title,
419
972
  default_filetype=default_filetype,
420
973
  user_path=None if isinstance(save, bool) else pathlib.Path(save),
421
974
  show=show,
@@ -427,46 +980,53 @@ class _NodeResults(_ElementResults):
427
980
  lower_percentage_group: float = 5,
428
981
  colors: plotting.ColorType = 'viridis',
429
982
  text_info: str = 'percent+label+value',
430
- save: Union[bool, pathlib.Path] = False,
983
+ save: bool | pathlib.Path = False,
431
984
  show: bool = True,
432
985
  engine: plotting.PlottingEngine = 'plotly',
433
- ) -> plotly.graph_objects.Figure:
434
- """
435
- Plots a pie chart of the flow hours of the inputs and outputs of buses or components.
436
-
986
+ indexer: dict[FlowSystemDimensions, Any] | None = None,
987
+ ) -> plotly.graph_objs.Figure | tuple[plt.Figure, list[plt.Axes]]:
988
+ """Plot pie chart of flow hours distribution.
437
989
  Args:
438
- colors: a colorscale or a list of colors to use for the plot
439
- lower_percentage_group: The percentage of flow_hours that is grouped in "Others" (0...100)
440
- text_info: What information to display on the pie plot
441
- save: Whether to save the figure.
442
- show: Whether to show the figure.
443
- engine: Plotting engine to use. Only 'plotly' is implemented atm.
990
+ lower_percentage_group: Percentage threshold for "Others" grouping.
991
+ colors: Color scheme. Also see plotly.
992
+ text_info: Information to display on pie slices.
993
+ save: Whether to save plot.
994
+ show: Whether to display plot.
995
+ engine: Plotting engine ('plotly' or 'matplotlib').
996
+ indexer: Optional selection dict, e.g., {'scenario': 'base', 'period': 2024}.
997
+ If None, uses first value for each dimension.
998
+ If empty dict {}, uses all values.
444
999
  """
445
- inputs = (
446
- sanitize_dataset(
447
- ds=self.solution[self.inputs],
448
- threshold=1e-5,
449
- drop_small_vars=True,
450
- zero_small_values=True,
451
- )
452
- * self._calculation_results.hours_per_timestep
1000
+ inputs = sanitize_dataset(
1001
+ ds=self.solution[self.inputs] * self._calculation_results.hours_per_timestep,
1002
+ threshold=1e-5,
1003
+ drop_small_vars=True,
1004
+ zero_small_values=True,
1005
+ drop_suffix='|',
453
1006
  )
454
- outputs = (
455
- sanitize_dataset(
456
- ds=self.solution[self.outputs],
457
- threshold=1e-5,
458
- drop_small_vars=True,
459
- zero_small_values=True,
460
- )
461
- * self._calculation_results.hours_per_timestep
1007
+ outputs = sanitize_dataset(
1008
+ ds=self.solution[self.outputs] * self._calculation_results.hours_per_timestep,
1009
+ threshold=1e-5,
1010
+ drop_small_vars=True,
1011
+ zero_small_values=True,
1012
+ drop_suffix='|',
462
1013
  )
463
1014
 
1015
+ inputs, suffix_parts = _apply_indexer_to_data(inputs, indexer, drop=True)
1016
+ outputs, suffix_parts = _apply_indexer_to_data(outputs, indexer, drop=True)
1017
+ suffix = '--' + '-'.join(suffix_parts) if suffix_parts else ''
1018
+
1019
+ title = f'{self.label} (total flow hours){suffix}'
1020
+
1021
+ inputs = inputs.sum('time')
1022
+ outputs = outputs.sum('time')
1023
+
464
1024
  if engine == 'plotly':
465
1025
  figure_like = plotting.dual_pie_with_plotly(
466
- inputs.to_dataframe().sum(),
467
- outputs.to_dataframe().sum(),
1026
+ data_left=inputs.to_pandas(),
1027
+ data_right=outputs.to_pandas(),
468
1028
  colors=colors,
469
- title=f'Flow hours of {self.label}',
1029
+ title=title,
470
1030
  text_info=text_info,
471
1031
  subtitles=('Inputs', 'Outputs'),
472
1032
  legend_title='Flows',
@@ -476,10 +1036,10 @@ class _NodeResults(_ElementResults):
476
1036
  elif engine == 'matplotlib':
477
1037
  logger.debug('Parameter text_info is not supported for matplotlib')
478
1038
  figure_like = plotting.dual_pie_with_matplotlib(
479
- inputs.to_dataframe().sum(),
480
- outputs.to_dataframe().sum(),
1039
+ data_left=inputs.to_pandas(),
1040
+ data_right=outputs.to_pandas(),
481
1041
  colors=colors,
482
- title=f'Total flow hours of {self.label}',
1042
+ title=title,
483
1043
  subtitles=('Inputs', 'Outputs'),
484
1044
  legend_title='Flows',
485
1045
  lower_percentage_group=lower_percentage_group,
@@ -490,7 +1050,7 @@ class _NodeResults(_ElementResults):
490
1050
 
491
1051
  return plotting.export_figure(
492
1052
  figure_like=figure_like,
493
- default_path=self._calculation_results.folder / f'{self.label} (total flow hours)',
1053
+ default_path=self._calculation_results.folder / title,
494
1054
  default_filetype=default_filetype,
495
1055
  user_path=None if isinstance(save, bool) else pathlib.Path(save),
496
1056
  show=show,
@@ -501,11 +1061,31 @@ class _NodeResults(_ElementResults):
501
1061
  self,
502
1062
  negate_inputs: bool = True,
503
1063
  negate_outputs: bool = False,
504
- threshold: Optional[float] = 1e-5,
1064
+ threshold: float | None = 1e-5,
505
1065
  with_last_timestep: bool = False,
1066
+ mode: Literal['flow_rate', 'flow_hours'] = 'flow_rate',
1067
+ drop_suffix: bool = False,
1068
+ indexer: dict[FlowSystemDimensions, Any] | None = None,
506
1069
  ) -> xr.Dataset:
507
- return sanitize_dataset(
508
- ds=self.solution[self.inputs + self.outputs],
1070
+ """
1071
+ Returns a dataset with the node balance of the Component or Bus.
1072
+ Args:
1073
+ negate_inputs: Whether to negate the input flow_rates of the Node.
1074
+ negate_outputs: Whether to negate the output flow_rates of the Node.
1075
+ threshold: The threshold for small values. Variables with all values below the threshold are dropped.
1076
+ with_last_timestep: Whether to include the last timestep in the dataset.
1077
+ mode: The mode to use for the dataset. Can be 'flow_rate' or 'flow_hours'.
1078
+ - 'flow_rate': Returns the flow_rates of the Node.
1079
+ - 'flow_hours': Returns the flow_hours of the Node. [flow_hours(t) = flow_rate(t) * dt(t)]. Renames suffixes to |flow_hours.
1080
+ drop_suffix: Whether to drop the suffix from the variable names.
1081
+ indexer: Optional selection dict, e.g., {'scenario': 'base', 'period': 2024}.
1082
+ If None, uses first value for each dimension.
1083
+ If empty dict {}, uses all values.
1084
+ """
1085
+ ds = self.solution[self.inputs + self.outputs]
1086
+
1087
+ ds = sanitize_dataset(
1088
+ ds=ds,
509
1089
  threshold=threshold,
510
1090
  timesteps=self._calculation_results.timesteps_extra if with_last_timestep else None,
511
1091
  negate=(
@@ -517,15 +1097,24 @@ class _NodeResults(_ElementResults):
517
1097
  if negate_inputs
518
1098
  else None
519
1099
  ),
1100
+ drop_suffix='|' if drop_suffix else None,
520
1101
  )
521
1102
 
1103
+ ds, _ = _apply_indexer_to_data(ds, indexer, drop=True)
1104
+
1105
+ if mode == 'flow_hours':
1106
+ ds = ds * self._calculation_results.hours_per_timestep
1107
+ ds = ds.rename_vars({var: var.replace('flow_rate', 'flow_hours') for var in ds.data_vars})
1108
+
1109
+ return ds
1110
+
522
1111
 
523
1112
  class BusResults(_NodeResults):
524
- """Results for a Bus"""
1113
+ """Results container for energy/material balance nodes in the system."""
525
1114
 
526
1115
 
527
1116
  class ComponentResults(_NodeResults):
528
- """Results for a Component"""
1117
+ """Results container for individual system components with specialized analysis tools."""
529
1118
 
530
1119
  @property
531
1120
  def is_storage(self) -> bool:
@@ -537,56 +1126,79 @@ class ComponentResults(_NodeResults):
537
1126
 
538
1127
  @property
539
1128
  def charge_state(self) -> xr.DataArray:
540
- """Get the solution of the charge state of the Storage."""
1129
+ """Get storage charge state solution."""
541
1130
  if not self.is_storage:
542
1131
  raise ValueError(f'Cant get charge_state. "{self.label}" is not a storage')
543
1132
  return self.solution[self._charge_state]
544
1133
 
545
1134
  def plot_charge_state(
546
1135
  self,
547
- save: Union[bool, pathlib.Path] = False,
1136
+ save: bool | pathlib.Path = False,
548
1137
  show: bool = True,
549
1138
  colors: plotting.ColorType = 'viridis',
550
1139
  engine: plotting.PlottingEngine = 'plotly',
1140
+ style: Literal['area', 'stacked_bar', 'line'] = 'stacked_bar',
1141
+ indexer: dict[FlowSystemDimensions, Any] | None = None,
551
1142
  ) -> plotly.graph_objs.Figure:
552
- """
553
- Plots the charge state of a Storage.
1143
+ """Plot storage charge state over time, combined with the node balance.
1144
+
554
1145
  Args:
555
1146
  save: Whether to save the plot or not. If a path is provided, the plot will be saved at that location.
556
1147
  show: Whether to show the plot or not.
557
- colors: The c
1148
+ colors: Color scheme. Also see plotly.
558
1149
  engine: Plotting engine to use. Only 'plotly' is implemented atm.
1150
+ style: The colors to use for the plot. See `flixopt.plotting.ColorType` for options.
1151
+ indexer: Optional selection dict, e.g., {'scenario': 'base', 'period': 2024}.
1152
+ If None, uses first value for each dimension.
1153
+ If empty dict {}, uses all values.
559
1154
 
560
1155
  Raises:
561
- ValueError: If the Component is not a Storage.
1156
+ ValueError: If component is not a storage.
562
1157
  """
563
- if engine != 'plotly':
564
- raise NotImplementedError(
565
- f'Plotting engine "{engine}" not implemented for ComponentResults.plot_charge_state.'
566
- )
567
-
568
1158
  if not self.is_storage:
569
1159
  raise ValueError(f'Cant plot charge_state. "{self.label}" is not a storage')
570
1160
 
571
- fig = plotting.with_plotly(
572
- self.node_balance(with_last_timestep=True).to_dataframe(),
573
- colors=colors,
574
- mode='area',
575
- title=f'Operation Balance of {self.label}',
576
- )
1161
+ ds = self.node_balance(with_last_timestep=True, indexer=indexer)
1162
+ charge_state = self.charge_state
1163
+
1164
+ ds, suffix_parts = _apply_indexer_to_data(ds, indexer, drop=True)
1165
+ charge_state, suffix_parts = _apply_indexer_to_data(charge_state, indexer, drop=True)
1166
+ suffix = '--' + '-'.join(suffix_parts) if suffix_parts else ''
577
1167
 
578
- # TODO: Use colors for charge state?
1168
+ title = f'Operation Balance of {self.label}{suffix}'
579
1169
 
580
- charge_state = self.charge_state.to_dataframe()
581
- fig.add_trace(
582
- plotly.graph_objs.Scatter(
583
- x=charge_state.index, y=charge_state.values.flatten(), mode='lines', name=self._charge_state
1170
+ if engine == 'plotly':
1171
+ fig = plotting.with_plotly(
1172
+ ds.to_dataframe(),
1173
+ colors=colors,
1174
+ style=style,
1175
+ title=title,
584
1176
  )
585
- )
1177
+
1178
+ # TODO: Use colors for charge state?
1179
+
1180
+ charge_state = charge_state.to_dataframe()
1181
+ fig.add_trace(
1182
+ plotly.graph_objs.Scatter(
1183
+ x=charge_state.index, y=charge_state.values.flatten(), mode='lines', name=self._charge_state
1184
+ )
1185
+ )
1186
+ elif engine == 'matplotlib':
1187
+ fig, ax = plotting.with_matplotlib(
1188
+ ds.to_dataframe(),
1189
+ colors=colors,
1190
+ style=style,
1191
+ title=title,
1192
+ )
1193
+
1194
+ charge_state = charge_state.to_dataframe()
1195
+ ax.plot(charge_state.index, charge_state.values.flatten(), label=self._charge_state)
1196
+ fig.tight_layout()
1197
+ fig = fig, ax
586
1198
 
587
1199
  return plotting.export_figure(
588
1200
  fig,
589
- default_path=self._calculation_results.folder / f'{self.label} (charge state)',
1201
+ default_path=self._calculation_results.folder / title,
590
1202
  default_filetype='.html',
591
1203
  user_path=None if isinstance(save, bool) else pathlib.Path(save),
592
1204
  show=show,
@@ -594,17 +1206,20 @@ class ComponentResults(_NodeResults):
594
1206
  )
595
1207
 
596
1208
  def node_balance_with_charge_state(
597
- self, negate_inputs: bool = True, negate_outputs: bool = False, threshold: Optional[float] = 1e-5
1209
+ self, negate_inputs: bool = True, negate_outputs: bool = False, threshold: float | None = 1e-5
598
1210
  ) -> xr.Dataset:
599
- """
600
- Returns a dataset with the node balance of the Storage including its charge state.
1211
+ """Get storage node balance including charge state.
1212
+
601
1213
  Args:
602
- negate_inputs: Whether to negate the inputs of the Storage.
603
- negate_outputs: Whether to negate the outputs of the Storage.
604
- threshold: The threshold for small values.
1214
+ negate_inputs: Whether to negate input flows.
1215
+ negate_outputs: Whether to negate output flows.
1216
+ threshold: Threshold for small values.
1217
+
1218
+ Returns:
1219
+ xr.Dataset: Node balance with charge state.
605
1220
 
606
1221
  Raises:
607
- ValueError: If the Component is not a Storage.
1222
+ ValueError: If component is not a storage.
608
1223
  """
609
1224
  if not self.is_storage:
610
1225
  raise ValueError(f'Cant get charge_state. "{self.label}" is not a storage')
@@ -629,17 +1244,151 @@ class EffectResults(_ElementResults):
629
1244
  """Results for an Effect"""
630
1245
 
631
1246
  def get_shares_from(self, element: str):
632
- """Get the shares from an Element (without subelements) to the Effect"""
1247
+ """Get effect shares from specific element.
1248
+
1249
+ Args:
1250
+ element: Element label to get shares from.
1251
+
1252
+ Returns:
1253
+ xr.Dataset: Element shares to this effect.
1254
+ """
633
1255
  return self.solution[[name for name in self._variable_names if name.startswith(f'{element}->')]]
634
1256
 
635
1257
 
1258
+ class FlowResults(_ElementResults):
1259
+ def __init__(
1260
+ self,
1261
+ calculation_results: CalculationResults,
1262
+ label: str,
1263
+ variables: list[str],
1264
+ constraints: list[str],
1265
+ start: str,
1266
+ end: str,
1267
+ component: str,
1268
+ ):
1269
+ super().__init__(calculation_results, label, variables, constraints)
1270
+ self.start = start
1271
+ self.end = end
1272
+ self.component = component
1273
+
1274
+ @property
1275
+ def flow_rate(self) -> xr.DataArray:
1276
+ return self.solution[f'{self.label}|flow_rate']
1277
+
1278
+ @property
1279
+ def flow_hours(self) -> xr.DataArray:
1280
+ return (self.flow_rate * self._calculation_results.hours_per_timestep).rename(f'{self.label}|flow_hours')
1281
+
1282
+ @property
1283
+ def size(self) -> xr.DataArray:
1284
+ name = f'{self.label}|size'
1285
+ if name in self.solution:
1286
+ return self.solution[name]
1287
+ try:
1288
+ return self._calculation_results.flow_system.flows[self.label].size.rename(name)
1289
+ except _FlowSystemRestorationError:
1290
+ logger.critical(f'Size of flow {self.label}.size not availlable. Returning NaN')
1291
+ return xr.DataArray(np.nan).rename(name)
1292
+
1293
+
636
1294
  class SegmentedCalculationResults:
637
- """
638
- Class to store the results of a SegmentedCalculation.
1295
+ """Results container for segmented optimization calculations with temporal decomposition.
1296
+
1297
+ This class manages results from SegmentedCalculation runs where large optimization
1298
+ problems are solved by dividing the time horizon into smaller, overlapping segments.
1299
+ It provides unified access to results across all segments while maintaining the
1300
+ ability to analyze individual segment behavior.
1301
+
1302
+ Key Features:
1303
+ **Unified Time Series**: Automatically assembles results from all segments into
1304
+ continuous time series, removing overlaps and boundary effects
1305
+ **Segment Analysis**: Access individual segment results for debugging and validation
1306
+ **Consistency Checks**: Verify solution continuity at segment boundaries
1307
+ **Memory Efficiency**: Handles large datasets that exceed single-segment memory limits
1308
+
1309
+ Temporal Handling:
1310
+ The class manages the complex task of combining overlapping segment solutions
1311
+ into coherent time series, ensuring proper treatment of:
1312
+ - Storage state continuity between segments
1313
+ - Flow rate transitions at segment boundaries
1314
+ - Aggregated results over the full time horizon
1315
+
1316
+ Examples:
1317
+ Load and analyze segmented results:
1318
+
1319
+ ```python
1320
+ # Load segmented calculation results
1321
+ results = SegmentedCalculationResults.from_file('results', 'annual_segmented')
1322
+
1323
+ # Access unified results across all segments
1324
+ full_timeline = results.all_timesteps
1325
+ total_segments = len(results.segment_results)
1326
+
1327
+ # Analyze individual segments
1328
+ for i, segment in enumerate(results.segment_results):
1329
+ print(f'Segment {i + 1}: {len(segment.solution.time)} timesteps')
1330
+ segment_costs = segment.effects['cost'].total_value
1331
+
1332
+ # Check solution continuity at boundaries
1333
+ segment_boundaries = results.get_boundary_analysis()
1334
+ max_discontinuity = segment_boundaries['max_storage_jump']
1335
+ ```
1336
+
1337
+ Create from segmented calculation:
1338
+
1339
+ ```python
1340
+ # After running segmented calculation
1341
+ segmented_calc = SegmentedCalculation(
1342
+ name='annual_system',
1343
+ flow_system=system,
1344
+ timesteps_per_segment=730, # Monthly segments
1345
+ overlap_timesteps=48, # 2-day overlap
1346
+ )
1347
+ segmented_calc.do_modeling_and_solve(solver='gurobi')
1348
+
1349
+ # Extract unified results
1350
+ results = SegmentedCalculationResults.from_calculation(segmented_calc)
1351
+
1352
+ # Save combined results
1353
+ results.to_file(compression=5)
1354
+ ```
1355
+
1356
+ Performance analysis across segments:
1357
+
1358
+ ```python
1359
+ # Compare segment solve times
1360
+ solve_times = [seg.summary['durations']['solving'] for seg in results.segment_results]
1361
+ avg_solve_time = sum(solve_times) / len(solve_times)
1362
+
1363
+ # Verify solution quality consistency
1364
+ segment_objectives = [seg.summary['objective_value'] for seg in results.segment_results]
1365
+
1366
+ # Storage continuity analysis
1367
+ if 'Battery' in results.segment_results[0].components:
1368
+ storage_continuity = results.check_storage_continuity('Battery')
1369
+ ```
1370
+
1371
+ Design Considerations:
1372
+ **Boundary Effects**: Monitor solution quality at segment interfaces where
1373
+ foresight is limited compared to full-horizon optimization.
1374
+
1375
+ **Memory Management**: Individual segment results are maintained for detailed
1376
+ analysis while providing unified access for system-wide metrics.
1377
+
1378
+ **Validation Tools**: Built-in methods to verify temporal consistency and
1379
+ identify potential issues from segmentation approach.
1380
+
1381
+ Common Use Cases:
1382
+ - **Large-Scale Analysis**: Annual or multi-period optimization results
1383
+ - **Memory-Constrained Systems**: Results from systems exceeding hardware limits
1384
+ - **Segment Validation**: Verifying segmentation approach effectiveness
1385
+ - **Performance Monitoring**: Comparing segmented vs. full-horizon solutions
1386
+ - **Debugging**: Identifying issues specific to temporal decomposition
1387
+
639
1388
  """
640
1389
 
641
1390
  @classmethod
642
- def from_calculation(cls, calculation: 'SegmentedCalculation'):
1391
+ def from_calculation(cls, calculation: SegmentedCalculation):
643
1392
  return cls(
644
1393
  [calc.results for calc in calculation.sub_calculations],
645
1394
  all_timesteps=calculation.all_timesteps,
@@ -650,16 +1399,23 @@ class SegmentedCalculationResults:
650
1399
  )
651
1400
 
652
1401
  @classmethod
653
- def from_file(cls, folder: Union[str, pathlib.Path], name: str):
654
- """Create SegmentedCalculationResults directly from file"""
1402
+ def from_file(cls, folder: str | pathlib.Path, name: str):
1403
+ """Load SegmentedCalculationResults from saved files.
1404
+
1405
+ Args:
1406
+ folder: Directory containing saved files.
1407
+ name: Base name of saved files.
1408
+
1409
+ Returns:
1410
+ SegmentedCalculationResults: Loaded instance.
1411
+ """
655
1412
  folder = pathlib.Path(folder)
656
1413
  path = folder / name
657
- nc_file = path.with_suffix('.nc4')
658
- logger.info(f'loading calculation "{name}" from file ("{nc_file}")')
659
- with open(path.with_suffix('.json'), 'r', encoding='utf-8') as f:
1414
+ logger.info(f'loading calculation "{name}" from file ("{path.with_suffix(".nc4")}")')
1415
+ with open(path.with_suffix('.json'), encoding='utf-8') as f:
660
1416
  meta_data = json.load(f)
661
1417
  return cls(
662
- [CalculationResults.from_file(folder, name) for name in meta_data['sub_calculations']],
1418
+ [CalculationResults.from_file(folder, sub_name) for sub_name in meta_data['sub_calculations']],
663
1419
  all_timesteps=pd.DatetimeIndex(
664
1420
  [datetime.datetime.fromisoformat(date) for date in meta_data['all_timesteps']], name='time'
665
1421
  ),
@@ -671,12 +1427,12 @@ class SegmentedCalculationResults:
671
1427
 
672
1428
  def __init__(
673
1429
  self,
674
- segment_results: List[CalculationResults],
1430
+ segment_results: list[CalculationResults],
675
1431
  all_timesteps: pd.DatetimeIndex,
676
1432
  timesteps_per_segment: int,
677
1433
  overlap_timesteps: int,
678
1434
  name: str,
679
- folder: Optional[pathlib.Path] = None,
1435
+ folder: pathlib.Path | None = None,
680
1436
  ):
681
1437
  self.segment_results = segment_results
682
1438
  self.all_timesteps = all_timesteps
@@ -684,10 +1440,10 @@ class SegmentedCalculationResults:
684
1440
  self.overlap_timesteps = overlap_timesteps
685
1441
  self.name = name
686
1442
  self.folder = pathlib.Path(folder) if folder is not None else pathlib.Path.cwd() / 'results'
687
- self.hours_per_timestep = TimeSeriesCollection.calculate_hours_per_timestep(self.all_timesteps)
1443
+ self.hours_per_timestep = FlowSystem.calculate_hours_per_timestep(self.all_timesteps)
688
1444
 
689
1445
  @property
690
- def meta_data(self) -> Dict[str, Union[int, List[str]]]:
1446
+ def meta_data(self) -> dict[str, int | list[str]]:
691
1447
  return {
692
1448
  'all_timesteps': [datetime.datetime.isoformat(date) for date in self.all_timesteps],
693
1449
  'timesteps_per_segment': self.timesteps_per_segment,
@@ -696,11 +1452,18 @@ class SegmentedCalculationResults:
696
1452
  }
697
1453
 
698
1454
  @property
699
- def segment_names(self) -> List[str]:
1455
+ def segment_names(self) -> list[str]:
700
1456
  return [segment.name for segment in self.segment_results]
701
1457
 
702
1458
  def solution_without_overlap(self, variable_name: str) -> xr.DataArray:
703
- """Returns the solution of a variable without overlapping timesteps"""
1459
+ """Get variable solution removing segment overlaps.
1460
+
1461
+ Args:
1462
+ variable_name: Name of variable to extract.
1463
+
1464
+ Returns:
1465
+ xr.DataArray: Continuous solution without overlaps.
1466
+ """
704
1467
  dataarrays = [
705
1468
  result.solution[variable_name].isel(time=slice(None, self.timesteps_per_segment))
706
1469
  for result in self.segment_results[:-1]
@@ -713,21 +1476,23 @@ class SegmentedCalculationResults:
713
1476
  heatmap_timeframes: Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'] = 'D',
714
1477
  heatmap_timesteps_per_frame: Literal['W', 'D', 'h', '15min', 'min'] = 'h',
715
1478
  color_map: str = 'portland',
716
- save: Union[bool, pathlib.Path] = False,
1479
+ save: bool | pathlib.Path = False,
717
1480
  show: bool = True,
718
1481
  engine: plotting.PlottingEngine = 'plotly',
719
- ) -> Union[plotly.graph_objs.Figure, Tuple[plt.Figure, plt.Axes]]:
720
- """
721
- Plots a heatmap of the solution of a variable.
1482
+ ) -> plotly.graph_objs.Figure | tuple[plt.Figure, plt.Axes]:
1483
+ """Plot heatmap of variable solution across segments.
722
1484
 
723
1485
  Args:
724
- variable_name: The name of the variable to plot.
725
- heatmap_timeframes: The timeframes to use for the heatmap.
726
- heatmap_timesteps_per_frame: The timesteps per frame to use for the heatmap.
727
- color_map: The color map to use for the heatmap.
728
- save: Whether to save the plot or not. If a path is provided, the plot will be saved at that location.
729
- show: Whether to show the plot or not.
730
- engine: The engine to use for plotting. Can be either 'plotly' or 'matplotlib'.
1486
+ variable_name: Variable to plot.
1487
+ heatmap_timeframes: Time aggregation level.
1488
+ heatmap_timesteps_per_frame: Timesteps per frame.
1489
+ color_map: Color scheme. Also see plotly.
1490
+ save: Whether to save plot.
1491
+ show: Whether to display plot.
1492
+ engine: Plotting engine.
1493
+
1494
+ Returns:
1495
+ Figure object.
731
1496
  """
732
1497
  return plot_heatmap(
733
1498
  dataarray=self.solution_without_overlap(variable_name),
@@ -741,10 +1506,14 @@ class SegmentedCalculationResults:
741
1506
  engine=engine,
742
1507
  )
743
1508
 
744
- def to_file(
745
- self, folder: Optional[Union[str, pathlib.Path]] = None, name: Optional[str] = None, compression: int = 5
746
- ):
747
- """Save the results to a file"""
1509
+ def to_file(self, folder: str | pathlib.Path | None = None, name: str | None = None, compression: int = 5):
1510
+ """Save segmented results to files.
1511
+
1512
+ Args:
1513
+ folder: Save folder (defaults to instance folder).
1514
+ name: File name (defaults to instance name).
1515
+ compression: Compression level 0-9.
1516
+ """
748
1517
  folder = self.folder if folder is None else pathlib.Path(folder)
749
1518
  name = self.name if name is None else name
750
1519
  path = folder / name
@@ -756,7 +1525,7 @@ class SegmentedCalculationResults:
756
1525
  f'Folder {folder} and its parent do not exist. Please create them first.'
757
1526
  ) from e
758
1527
  for segment in self.segment_results:
759
- segment.to_file(folder=folder, name=f'{name}-{segment.name}', compression=compression)
1528
+ segment.to_file(folder=folder, name=segment.name, compression=compression)
760
1529
 
761
1530
  with open(path.with_suffix('.json'), 'w', encoding='utf-8') as f:
762
1531
  json.dump(self.meta_data, f, indent=4, ensure_ascii=False)
@@ -770,24 +1539,31 @@ def plot_heatmap(
770
1539
  heatmap_timeframes: Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'] = 'D',
771
1540
  heatmap_timesteps_per_frame: Literal['W', 'D', 'h', '15min', 'min'] = 'h',
772
1541
  color_map: str = 'portland',
773
- save: Union[bool, pathlib.Path] = False,
1542
+ save: bool | pathlib.Path = False,
774
1543
  show: bool = True,
775
1544
  engine: plotting.PlottingEngine = 'plotly',
1545
+ indexer: dict[str, Any] | None = None,
776
1546
  ):
777
- """
778
- Plots a heatmap of the solution of a variable.
1547
+ """Plot heatmap of time series data.
779
1548
 
780
1549
  Args:
781
- dataarray: The dataarray to plot.
782
- name: The name of the variable to plot.
783
- folder: The folder to save the plot to.
784
- heatmap_timeframes: The timeframes to use for the heatmap.
785
- heatmap_timesteps_per_frame: The timesteps per frame to use for the heatmap.
786
- color_map: The color map to use for the heatmap.
787
- save: Whether to save the plot or not. If a path is provided, the plot will be saved at that location.
788
- show: Whether to show the plot or not.
789
- engine: The engine to use for plotting. Can be either 'plotly' or 'matplotlib'.
1550
+ dataarray: Data to plot.
1551
+ name: Variable name for title.
1552
+ folder: Save folder.
1553
+ heatmap_timeframes: Time aggregation level.
1554
+ heatmap_timesteps_per_frame: Timesteps per frame.
1555
+ color_map: Color scheme. Also see plotly.
1556
+ save: Whether to save plot.
1557
+ show: Whether to display plot.
1558
+ engine: Plotting engine.
1559
+ indexer: Optional selection dict, e.g., {'scenario': 'base', 'period': 2024}.
1560
+ If None, uses first value for each dimension.
1561
+ If empty dict {}, uses all values.
790
1562
  """
1563
+ dataarray, suffix_parts = _apply_indexer_to_data(dataarray, indexer, drop=True)
1564
+ suffix = '--' + '-'.join(suffix_parts) if suffix_parts else ''
1565
+ name = name if not suffix_parts else name + suffix
1566
+
791
1567
  heatmap_data = plotting.heat_map_data_from_df(
792
1568
  dataarray.to_dataframe(name), heatmap_timeframes, heatmap_timesteps_per_frame, 'ffill'
793
1569
  )
@@ -819,25 +1595,23 @@ def plot_heatmap(
819
1595
 
820
1596
  def sanitize_dataset(
821
1597
  ds: xr.Dataset,
822
- timesteps: Optional[pd.DatetimeIndex] = None,
823
- threshold: Optional[float] = 1e-5,
824
- negate: Optional[List[str]] = None,
1598
+ timesteps: pd.DatetimeIndex | None = None,
1599
+ threshold: float | None = 1e-5,
1600
+ negate: list[str] | None = None,
825
1601
  drop_small_vars: bool = True,
826
1602
  zero_small_values: bool = False,
1603
+ drop_suffix: str | None = None,
827
1604
  ) -> xr.Dataset:
828
- """
829
- Sanitizes a dataset by handling small values (dropping or zeroing) and optionally reindexing the time axis.
1605
+ """Clean dataset by handling small values and reindexing time.
830
1606
 
831
1607
  Args:
832
- ds: The dataset to sanitize.
833
- timesteps: The timesteps to reindex the dataset to. If None, the original timesteps are kept.
834
- threshold: The threshold for small values processing. If None, no processing is done.
835
- negate: The variables to negate. If None, no variables are negated.
836
- drop_small_vars: If True, drops variables where all values are below threshold.
837
- zero_small_values: If True, sets values below threshold to zero.
838
-
839
- Returns:
840
- xr.Dataset: The sanitized dataset.
1608
+ ds: Dataset to sanitize.
1609
+ timesteps: Time index for reindexing (optional).
1610
+ threshold: Threshold for small values processing.
1611
+ negate: Variables to negate.
1612
+ drop_small_vars: Whether to drop variables below threshold.
1613
+ zero_small_values: Whether to zero values below threshold.
1614
+ drop_suffix: Drop suffix of data var names. Split by the provided str.
841
1615
  """
842
1616
  # Create a copy to avoid modifying the original
843
1617
  ds = ds.copy()
@@ -854,7 +1628,7 @@ def sanitize_dataset(
854
1628
 
855
1629
  # Option 1: Drop variables where all values are below threshold
856
1630
  if drop_small_vars:
857
- vars_to_drop = [var for var in ds.data_vars if (ds_no_nan_abs[var] <= threshold).all()]
1631
+ vars_to_drop = [var for var in ds.data_vars if (ds_no_nan_abs[var] <= threshold).all().item()]
858
1632
  ds = ds.drop_vars(vars_to_drop)
859
1633
 
860
1634
  # Option 2: Set small values to zero
@@ -863,7 +1637,7 @@ def sanitize_dataset(
863
1637
  # Create a boolean mask of values below threshold
864
1638
  mask = ds_no_nan_abs[var] <= threshold
865
1639
  # Only proceed if there are values to zero out
866
- if mask.any():
1640
+ if bool(mask.any().item()):
867
1641
  # Create a copy to ensure we don't modify data with views
868
1642
  ds[var] = ds[var].copy()
869
1643
  # Set values below threshold to zero
@@ -873,26 +1647,206 @@ def sanitize_dataset(
873
1647
  if timesteps is not None and not ds.indexes['time'].equals(timesteps):
874
1648
  ds = ds.reindex({'time': timesteps}, fill_value=np.nan)
875
1649
 
1650
+ if drop_suffix is not None:
1651
+ if not isinstance(drop_suffix, str):
1652
+ raise ValueError(f'Only pass str values to drop suffixes. Got {drop_suffix}')
1653
+ unique_dict = {}
1654
+ for var in ds.data_vars:
1655
+ new_name = var.split(drop_suffix)[0]
1656
+
1657
+ # If name already exists, keep original name
1658
+ if new_name in unique_dict.values():
1659
+ unique_dict[var] = var
1660
+ else:
1661
+ unique_dict[var] = new_name
1662
+ ds = ds.rename(unique_dict)
1663
+
876
1664
  return ds
877
1665
 
878
1666
 
879
1667
  def filter_dataset(
880
1668
  ds: xr.Dataset,
881
- variable_dims: Optional[Literal['scalar', 'time']] = None,
1669
+ variable_dims: Literal['scalar', 'time', 'scenario', 'timeonly', 'scenarioonly'] | None = None,
1670
+ timesteps: pd.DatetimeIndex | str | pd.Timestamp | None = None,
1671
+ scenarios: pd.Index | str | int | None = None,
1672
+ contains: str | list[str] | None = None,
1673
+ startswith: str | list[str] | None = None,
882
1674
  ) -> xr.Dataset:
883
- """
884
- Filters a dataset by its dimensions.
1675
+ """Filter dataset by variable dimensions, indexes, and with string filters for variable names.
885
1676
 
886
1677
  Args:
887
1678
  ds: The dataset to filter.
888
- variable_dims: The dimension of the variables to filter for.
1679
+ variable_dims: The dimension of which to get variables from.
1680
+ - 'scalar': Get scalar variables (without dimensions)
1681
+ - 'time': Get time-dependent variables (with a time dimension)
1682
+ - 'scenario': Get scenario-dependent variables (with ONLY a scenario dimension)
1683
+ - 'timeonly': Get time-dependent variables (with ONLY a time dimension)
1684
+ - 'scenarioonly': Get scenario-dependent variables (with ONLY a scenario dimension)
1685
+ timesteps: Optional time indexes to select. Can be:
1686
+ - pd.DatetimeIndex: Multiple timesteps
1687
+ - str/pd.Timestamp: Single timestep
1688
+ Defaults to all available timesteps.
1689
+ scenarios: Optional scenario indexes to select. Can be:
1690
+ - pd.Index: Multiple scenarios
1691
+ - str/int: Single scenario (int is treated as a label, not an index position)
1692
+ Defaults to all available scenarios.
1693
+ contains: Filter variables that contain this string or strings.
1694
+ If a list is provided, variables must contain ALL strings in the list.
1695
+ startswith: Filter variables that start with this string or strings.
1696
+ If a list is provided, variables must start with ANY of the strings in the list.
889
1697
  """
890
- if variable_dims is None:
891
- return ds
1698
+ # First filter by dimensions
1699
+ filtered_ds = ds.copy()
1700
+ if variable_dims is not None:
1701
+ if variable_dims == 'scalar':
1702
+ filtered_ds = filtered_ds[[v for v in filtered_ds.data_vars if not filtered_ds[v].dims]]
1703
+ elif variable_dims == 'time':
1704
+ filtered_ds = filtered_ds[[v for v in filtered_ds.data_vars if 'time' in filtered_ds[v].dims]]
1705
+ elif variable_dims == 'scenario':
1706
+ filtered_ds = filtered_ds[[v for v in filtered_ds.data_vars if 'scenario' in filtered_ds[v].dims]]
1707
+ elif variable_dims == 'timeonly':
1708
+ filtered_ds = filtered_ds[[v for v in filtered_ds.data_vars if filtered_ds[v].dims == ('time',)]]
1709
+ elif variable_dims == 'scenarioonly':
1710
+ filtered_ds = filtered_ds[[v for v in filtered_ds.data_vars if filtered_ds[v].dims == ('scenario',)]]
1711
+ else:
1712
+ raise ValueError(f'Unknown variable_dims "{variable_dims}" for filter_dataset')
1713
+
1714
+ # Filter by 'contains' parameter
1715
+ if contains is not None:
1716
+ if isinstance(contains, str):
1717
+ # Single string - keep variables that contain this string
1718
+ filtered_ds = filtered_ds[[v for v in filtered_ds.data_vars if contains in v]]
1719
+ elif isinstance(contains, list) and all(isinstance(s, str) for s in contains):
1720
+ # List of strings - keep variables that contain ALL strings in the list
1721
+ filtered_ds = filtered_ds[[v for v in filtered_ds.data_vars if all(s in v for s in contains)]]
1722
+ else:
1723
+ raise TypeError(f"'contains' must be a string or list of strings, got {type(contains)}")
1724
+
1725
+ # Filter by 'startswith' parameter
1726
+ if startswith is not None:
1727
+ if isinstance(startswith, str):
1728
+ # Single string - keep variables that start with this string
1729
+ filtered_ds = filtered_ds[[v for v in filtered_ds.data_vars if v.startswith(startswith)]]
1730
+ elif isinstance(startswith, list) and all(isinstance(s, str) for s in startswith):
1731
+ # List of strings - keep variables that start with ANY of the strings in the list
1732
+ filtered_ds = filtered_ds[[v for v in filtered_ds.data_vars if any(v.startswith(s) for s in startswith)]]
1733
+ else:
1734
+ raise TypeError(f"'startswith' must be a string or list of strings, got {type(startswith)}")
1735
+
1736
+ # Handle time selection if needed
1737
+ if timesteps is not None and 'time' in filtered_ds.dims:
1738
+ try:
1739
+ filtered_ds = filtered_ds.sel(time=timesteps)
1740
+ except KeyError as e:
1741
+ available_times = set(filtered_ds.indexes['time'])
1742
+ requested_times = set([timesteps]) if not isinstance(timesteps, pd.Index) else set(timesteps)
1743
+ missing_times = requested_times - available_times
1744
+ raise ValueError(
1745
+ f'Timesteps not found in dataset: {missing_times}. Available times: {available_times}'
1746
+ ) from e
1747
+
1748
+ # Handle scenario selection if needed
1749
+ if scenarios is not None and 'scenario' in filtered_ds.dims:
1750
+ try:
1751
+ filtered_ds = filtered_ds.sel(scenario=scenarios)
1752
+ except KeyError as e:
1753
+ available_scenarios = set(filtered_ds.indexes['scenario'])
1754
+ requested_scenarios = set([scenarios]) if not isinstance(scenarios, pd.Index) else set(scenarios)
1755
+ missing_scenarios = requested_scenarios - available_scenarios
1756
+ raise ValueError(
1757
+ f'Scenarios not found in dataset: {missing_scenarios}. Available scenarios: {available_scenarios}'
1758
+ ) from e
1759
+
1760
+ return filtered_ds
1761
+
1762
+
1763
+ def filter_dataarray_by_coord(da: xr.DataArray, **kwargs: str | list[str] | None) -> xr.DataArray:
1764
+ """Filter flows by node and component attributes.
1765
+
1766
+ Filters are applied in the order they are specified. All filters must match for an edge to be included.
1767
+
1768
+ To recombine filtered dataarrays, use `xr.concat`.
1769
+
1770
+ xr.concat([res.sizes(start='Fernwärme'), res.sizes(end='Fernwärme')], dim='flow')
1771
+
1772
+ Args:
1773
+ da: Flow DataArray with network metadata coordinates.
1774
+ **kwargs: Coord filters as name=value pairs.
1775
+
1776
+ Returns:
1777
+ Filtered DataArray with matching edges.
1778
+
1779
+ Raises:
1780
+ AttributeError: If required coordinates are missing.
1781
+ ValueError: If specified nodes don't exist or no matches found.
1782
+ """
1783
+
1784
+ # Helper function to process filters
1785
+ def apply_filter(array, coord_name: str, coord_values: Any | list[Any]):
1786
+ # Verify coord exists
1787
+ if coord_name not in array.coords:
1788
+ raise AttributeError(f"Missing required coordinate '{coord_name}'")
1789
+
1790
+ # Convert single value to list
1791
+ val_list = [coord_values] if isinstance(coord_values, str) else coord_values
1792
+
1793
+ # Verify coord_values exist
1794
+ available = set(array[coord_name].values)
1795
+ missing = [v for v in val_list if v not in available]
1796
+ if missing:
1797
+ raise ValueError(f'{coord_name.title()} value(s) not found: {missing}')
1798
+
1799
+ # Apply filter
1800
+ return array.where(
1801
+ array[coord_name].isin(val_list) if isinstance(coord_values, list) else array[coord_name] == coord_values,
1802
+ drop=True,
1803
+ )
1804
+
1805
+ # Apply filters from kwargs
1806
+ filters = {k: v for k, v in kwargs.items() if v is not None}
1807
+ try:
1808
+ for coord, values in filters.items():
1809
+ da = apply_filter(da, coord, values)
1810
+ except ValueError as e:
1811
+ raise ValueError(f'No edges match criteria: {filters}') from e
1812
+
1813
+ # Verify results exist
1814
+ if da.size == 0:
1815
+ raise ValueError(f'No edges match criteria: {filters}')
1816
+
1817
+ return da
1818
+
1819
+
1820
+ def _apply_indexer_to_data(
1821
+ data: xr.DataArray | xr.Dataset, indexer: dict[str, Any] | None = None, drop=False
1822
+ ) -> tuple[xr.DataArray | xr.Dataset, list[str]]:
1823
+ """
1824
+ Apply indexer selection or auto-select first values for non-time dimensions.
1825
+
1826
+ Args:
1827
+ data: xarray Dataset or DataArray
1828
+ indexer: Optional selection dict
1829
+ If None, uses first value for each dimension (except time).
1830
+ If empty dict {}, uses all values.
1831
+
1832
+ Returns:
1833
+ Tuple of (selected_data, selection_string)
1834
+ """
1835
+ selection_string = []
892
1836
 
893
- if variable_dims == 'scalar':
894
- return ds[[name for name, da in ds.data_vars.items() if len(da.dims) == 0]]
895
- elif variable_dims == 'time':
896
- return ds[[name for name, da in ds.data_vars.items() if 'time' in da.dims]]
1837
+ if indexer is not None:
1838
+ # User provided indexer
1839
+ data = data.sel(indexer, drop=drop)
1840
+ selection_string.extend(f'{v}[{k}]' for k, v in indexer.items())
897
1841
  else:
898
- raise ValueError(f'Not allowed value for "filter_dataset()": {variable_dims=}')
1842
+ # Auto-select first value for each dimension except 'time'
1843
+ selection = {}
1844
+ for dim in data.dims:
1845
+ if dim != 'time' and dim in data.coords:
1846
+ first_value = data.coords[dim].values[0]
1847
+ selection[dim] = first_value
1848
+ selection_string.append(f'{first_value}[{dim}]')
1849
+ if selection:
1850
+ data = data.sel(selection, drop=drop)
1851
+
1852
+ return data, selection_string