flixopt 2.2.0b0__py3-none-any.whl → 3.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flixopt might be problematic. Click here for more details.

Files changed (63) hide show
  1. flixopt/__init__.py +35 -1
  2. flixopt/aggregation.py +60 -81
  3. flixopt/calculation.py +381 -196
  4. flixopt/components.py +1022 -359
  5. flixopt/config.py +553 -191
  6. flixopt/core.py +475 -1315
  7. flixopt/effects.py +477 -214
  8. flixopt/elements.py +591 -344
  9. flixopt/features.py +403 -957
  10. flixopt/flow_system.py +781 -293
  11. flixopt/interface.py +1159 -189
  12. flixopt/io.py +50 -55
  13. flixopt/linear_converters.py +384 -92
  14. flixopt/modeling.py +759 -0
  15. flixopt/network_app.py +789 -0
  16. flixopt/plotting.py +273 -135
  17. flixopt/results.py +639 -383
  18. flixopt/solvers.py +25 -21
  19. flixopt/structure.py +928 -442
  20. flixopt/utils.py +34 -5
  21. flixopt-3.0.0.dist-info/METADATA +209 -0
  22. flixopt-3.0.0.dist-info/RECORD +26 -0
  23. {flixopt-2.2.0b0.dist-info → flixopt-3.0.0.dist-info}/WHEEL +1 -1
  24. flixopt-3.0.0.dist-info/top_level.txt +1 -0
  25. docs/examples/00-Minimal Example.md +0 -5
  26. docs/examples/01-Basic Example.md +0 -5
  27. docs/examples/02-Complex Example.md +0 -10
  28. docs/examples/03-Calculation Modes.md +0 -5
  29. docs/examples/index.md +0 -5
  30. docs/faq/contribute.md +0 -49
  31. docs/faq/index.md +0 -3
  32. docs/images/architecture_flixOpt-pre2.0.0.png +0 -0
  33. docs/images/architecture_flixOpt.png +0 -0
  34. docs/images/flixopt-icon.svg +0 -1
  35. docs/javascripts/mathjax.js +0 -18
  36. docs/release-notes/_template.txt +0 -32
  37. docs/release-notes/index.md +0 -7
  38. docs/release-notes/v2.0.0.md +0 -93
  39. docs/release-notes/v2.0.1.md +0 -12
  40. docs/release-notes/v2.1.0.md +0 -31
  41. docs/release-notes/v2.2.0.md +0 -55
  42. docs/user-guide/Mathematical Notation/Bus.md +0 -33
  43. docs/user-guide/Mathematical Notation/Effects, Penalty & Objective.md +0 -132
  44. docs/user-guide/Mathematical Notation/Flow.md +0 -26
  45. docs/user-guide/Mathematical Notation/Investment.md +0 -115
  46. docs/user-guide/Mathematical Notation/LinearConverter.md +0 -21
  47. docs/user-guide/Mathematical Notation/Piecewise.md +0 -49
  48. docs/user-guide/Mathematical Notation/Storage.md +0 -44
  49. docs/user-guide/Mathematical Notation/index.md +0 -22
  50. docs/user-guide/Mathematical Notation/others.md +0 -3
  51. docs/user-guide/index.md +0 -124
  52. flixopt/config.yaml +0 -10
  53. flixopt-2.2.0b0.dist-info/METADATA +0 -146
  54. flixopt-2.2.0b0.dist-info/RECORD +0 -59
  55. flixopt-2.2.0b0.dist-info/top_level.txt +0 -5
  56. pics/architecture_flixOpt-pre2.0.0.png +0 -0
  57. pics/architecture_flixOpt.png +0 -0
  58. pics/flixOpt_plotting.jpg +0 -0
  59. pics/flixopt-icon.svg +0 -1
  60. pics/pics.pptx +0 -0
  61. scripts/gen_ref_pages.py +0 -54
  62. tests/ressources/Zeitreihen2020.csv +0 -35137
  63. {flixopt-2.2.0b0.dist-info → flixopt-3.0.0.dist-info}/licenses/LICENSE +0 -0
flixopt/results.py CHANGED
@@ -1,12 +1,13 @@
1
+ from __future__ import annotations
2
+
1
3
  import datetime
2
4
  import json
3
5
  import logging
4
6
  import pathlib
5
7
  import warnings
6
- from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Tuple, Union
8
+ from typing import TYPE_CHECKING, Any, Literal
7
9
 
8
10
  import linopy
9
- import matplotlib.pyplot as plt
10
11
  import numpy as np
11
12
  import pandas as pd
12
13
  import plotly
@@ -15,13 +16,14 @@ import yaml
15
16
 
16
17
  from . import io as fx_io
17
18
  from . import plotting
18
- from .core import DataConverter, TimeSeriesCollection
19
19
  from .flow_system import FlowSystem
20
20
 
21
21
  if TYPE_CHECKING:
22
+ import matplotlib.pyplot as plt
22
23
  import pyvis
23
24
 
24
25
  from .calculation import Calculation, SegmentedCalculation
26
+ from .core import FlowSystemDimensions
25
27
 
26
28
 
27
29
  logger = logging.getLogger('flixopt')
@@ -29,59 +31,100 @@ logger = logging.getLogger('flixopt')
29
31
 
30
32
  class _FlowSystemRestorationError(Exception):
31
33
  """Exception raised when a FlowSystem cannot be restored from dataset."""
34
+
32
35
  pass
33
36
 
34
37
 
35
38
  class CalculationResults:
36
- """Results container for Calculation results.
37
-
38
- This class is used to collect the results of a Calculation.
39
- It provides access to component, bus, and effect
40
- results, and includes methods for filtering, plotting, and saving results.
41
-
42
- The recommended way to create instances is through the class methods
43
- `from_file()` or `from_calculation()`, rather than direct initialization.
39
+ """Comprehensive container for optimization calculation results and analysis tools.
40
+
41
+ This class provides unified access to all optimization results including flow rates,
42
+ component states, bus balances, and system effects. It offers powerful analysis
43
+ capabilities through filtering, plotting, and export functionality, making it
44
+ the primary interface for post-processing optimization results.
45
+
46
+ Key Features:
47
+ **Unified Access**: Single interface to all solution variables and constraints
48
+ **Element Results**: Direct access to component, bus, and effect-specific results
49
+ **Visualization**: Built-in plotting methods for heatmaps, time series, and networks
50
+ **Persistence**: Save/load functionality with compression for large datasets
51
+ **Analysis Tools**: Filtering, aggregation, and statistical analysis methods
52
+
53
+ Result Organization:
54
+ - **Components**: Equipment-specific results (flows, states, constraints)
55
+ - **Buses**: Network node balances and energy flows
56
+ - **Effects**: System-wide impacts (costs, emissions, resource consumption)
57
+ - **Solution**: Raw optimization variables and their values
58
+ - **Metadata**: Calculation parameters, timing, and system configuration
44
59
 
45
60
  Attributes:
46
- solution (xr.Dataset): Dataset containing optimization results.
47
- flow_system_data (xr.Dataset): Dataset containing the flow system.
48
- summary (Dict): Information about the calculation.
49
- name (str): Name identifier for the calculation.
50
- model (linopy.Model): The optimization model (if available).
51
- folder (pathlib.Path): Path to the results directory.
52
- components (Dict[str, ComponentResults]): Results for each component.
53
- buses (Dict[str, BusResults]): Results for each bus.
54
- effects (Dict[str, EffectResults]): Results for each effect.
55
- timesteps_extra (pd.DatetimeIndex): The extended timesteps.
56
- hours_per_timestep (xr.DataArray): Duration of each timestep in hours.
57
-
58
- Example:
59
- Load results from saved files:
60
-
61
- >>> results = CalculationResults.from_file('results_dir', 'optimization_run_1')
62
- >>> element_result = results['Boiler']
63
- >>> results.plot_heatmap('Boiler(Q_th)|flow_rate')
64
- >>> results.to_file(compression=5)
65
- >>> results.to_file(folder='new_results_dir', compression=5) # Save the results to a new folder
61
+ solution: Dataset containing all optimization variable solutions
62
+ flow_system_data: Dataset with complete system configuration and parameters. Restore the used FlowSystem for further analysis.
63
+ summary: Calculation metadata including solver status, timing, and statistics
64
+ name: Unique identifier for this calculation
65
+ model: Original linopy optimization model (if available)
66
+ folder: Directory path for result storage and loading
67
+ components: Dictionary mapping component labels to ComponentResults objects
68
+ buses: Dictionary mapping bus labels to BusResults objects
69
+ effects: Dictionary mapping effect names to EffectResults objects
70
+ timesteps_extra: Extended time index including boundary conditions
71
+ hours_per_timestep: Duration of each timestep for proper energy calculations
72
+
73
+ Examples:
74
+ Load and analyze saved results:
75
+
76
+ ```python
77
+ # Load results from file
78
+ results = CalculationResults.from_file('results', 'annual_optimization')
79
+
80
+ # Access specific component results
81
+ boiler_results = results['Boiler_01']
82
+ heat_pump_results = results['HeatPump_02']
83
+
84
+ # Plot component flow rates
85
+ results.plot_heatmap('Boiler_01(Natural_Gas)|flow_rate')
86
+ results['Boiler_01'].plot_node_balance()
87
+
88
+ # Access raw solution dataarrays
89
+ electricity_flows = results.solution[['Generator_01(Grid)|flow_rate', 'HeatPump_02(Grid)|flow_rate']]
90
+
91
+ # Filter and analyze results
92
+ peak_demand_hours = results.filter_solution(variable_dims='time')
93
+ costs_solution = results.effects['cost'].solution
94
+ ```
95
+
96
+ Advanced filtering and aggregation:
97
+
98
+ ```python
99
+ # Filter by variable type
100
+ scalar_results = results.filter_solution(variable_dims='scalar')
101
+ time_series = results.filter_solution(variable_dims='time')
102
+
103
+ # Custom data analysis leveraging xarray
104
+ peak_power = results.solution['Generator_01(Grid)|flow_rate'].max()
105
+ avg_efficiency = (
106
+ results.solution['HeatPump(Heat)|flow_rate'] / results.solution['HeatPump(Electricity)|flow_rate']
107
+ ).mean()
108
+ ```
109
+
110
+ Design Patterns:
111
+ **Factory Methods**: Use `from_file()` and `from_calculation()` for creation or access directly from `Calculation.results`
112
+ **Dictionary Access**: Use `results[element_label]` for element-specific results
113
+ **Lazy Loading**: Results objects created on-demand for memory efficiency
114
+ **Unified Interface**: Consistent API across different result types
115
+
66
116
  """
67
117
 
68
118
  @classmethod
69
- def from_file(cls, folder: Union[str, pathlib.Path], name: str):
70
- """Create CalculationResults instance by loading from saved files.
71
-
72
- This method loads the calculation results from previously saved files,
73
- including the solution, flow system, model (if available), and metadata.
119
+ def from_file(cls, folder: str | pathlib.Path, name: str) -> CalculationResults:
120
+ """Load CalculationResults from saved files.
74
121
 
75
122
  Args:
76
- folder: Path to the directory containing the saved files.
77
- name: Base name of the saved files (without file extensions).
123
+ folder: Directory containing saved files.
124
+ name: Base name of saved files (without extensions).
78
125
 
79
126
  Returns:
80
- CalculationResults: A new instance containing the loaded data.
81
-
82
- Raises:
83
- FileNotFoundError: If required files cannot be found.
84
- ValueError: If files exist but cannot be properly loaded.
127
+ CalculationResults: Loaded instance.
85
128
  """
86
129
  folder = pathlib.Path(folder)
87
130
  paths = fx_io.CalculationResultsPaths(folder, name)
@@ -94,7 +137,7 @@ class CalculationResults:
94
137
  except Exception as e:
95
138
  logger.critical(f'Could not load the linopy model "{name}" from file ("{paths.linopy_model}"): {e}')
96
139
 
97
- with open(paths.summary, 'r', encoding='utf-8') as f:
140
+ with open(paths.summary, encoding='utf-8') as f:
98
141
  summary = yaml.load(f, Loader=yaml.FullLoader)
99
142
 
100
143
  return cls(
@@ -107,25 +150,18 @@ class CalculationResults:
107
150
  )
108
151
 
109
152
  @classmethod
110
- def from_calculation(cls, calculation: 'Calculation'):
111
- """Create CalculationResults directly from a Calculation object.
112
-
113
- This method extracts the solution, flow system, and other relevant
114
- information directly from an existing Calculation object.
153
+ def from_calculation(cls, calculation: Calculation) -> CalculationResults:
154
+ """Create CalculationResults from a Calculation object.
115
155
 
116
156
  Args:
117
- calculation: A Calculation object containing a solved model.
157
+ calculation: Calculation object with solved model.
118
158
 
119
159
  Returns:
120
- CalculationResults: A new instance containing the results from
121
- the provided calculation.
122
-
123
- Raises:
124
- AttributeError: If the calculation doesn't have required attributes.
160
+ CalculationResults: New instance with extracted results.
125
161
  """
126
162
  return cls(
127
163
  solution=calculation.model.solution,
128
- flow_system_data=calculation.flow_system.as_dataset(constants_in_dataset=True),
164
+ flow_system_data=calculation.flow_system.to_dataset(),
129
165
  summary=calculation.summary,
130
166
  model=calculation.model,
131
167
  name=calculation.name,
@@ -137,19 +173,21 @@ class CalculationResults:
137
173
  solution: xr.Dataset,
138
174
  flow_system_data: xr.Dataset,
139
175
  name: str,
140
- summary: Dict,
141
- folder: Optional[pathlib.Path] = None,
142
- model: Optional[linopy.Model] = None,
176
+ summary: dict,
177
+ folder: pathlib.Path | None = None,
178
+ model: linopy.Model | None = None,
143
179
  **kwargs, # To accept old "flow_system" parameter
144
180
  ):
145
- """
181
+ """Initialize CalculationResults with optimization data.
182
+ Usually, this class is instantiated by the Calculation class, or by loading from file.
183
+
146
184
  Args:
147
- solution: The solution of the optimization.
148
- flow_system_data: The flow_system that was used to create the calculation as a datatset.
149
- name: The name of the calculation.
150
- summary: Information about the calculation,
151
- folder: The folder where the results are saved.
152
- model: The linopy model that was used to solve the calculation.
185
+ solution: Optimization solution dataset.
186
+ flow_system_data: Flow system configuration dataset.
187
+ name: Calculation name.
188
+ summary: Calculation metadata.
189
+ folder: Results storage folder.
190
+ model: Linopy optimization model.
153
191
  Deprecated:
154
192
  flow_system: Use flow_system_data instead.
155
193
  """
@@ -175,15 +213,13 @@ class CalculationResults:
175
213
 
176
214
  self.buses = {label: BusResults(self, **infos) for label, infos in self.solution.attrs['Buses'].items()}
177
215
 
178
- self.effects = {
179
- label: EffectResults(self, **infos) for label, infos in self.solution.attrs['Effects'].items()
180
- }
216
+ self.effects = {label: EffectResults(self, **infos) for label, infos in self.solution.attrs['Effects'].items()}
181
217
 
182
218
  if 'Flows' not in self.solution.attrs:
183
219
  warnings.warn(
184
220
  'No Data about flows found in the results. This data is only included since v2.2.0. Some functionality '
185
221
  'is not availlable. We recommend to evaluate your results with a version <2.2.0.',
186
- stacklevel=2,
222
+ stacklevel=2,
187
223
  )
188
224
  self.flows = {}
189
225
  else:
@@ -192,7 +228,7 @@ class CalculationResults:
192
228
  }
193
229
 
194
230
  self.timesteps_extra = self.solution.indexes['time']
195
- self.hours_per_timestep = TimeSeriesCollection.calculate_hours_per_timestep(self.timesteps_extra)
231
+ self.hours_per_timestep = FlowSystem.calculate_hours_per_timestep(self.timesteps_extra)
196
232
  self.scenarios = self.solution.indexes['scenario'] if 'scenario' in self.solution.indexes else None
197
233
 
198
234
  self._effect_share_factors = None
@@ -201,10 +237,9 @@ class CalculationResults:
201
237
  self._flow_rates = None
202
238
  self._flow_hours = None
203
239
  self._sizes = None
204
- self._effects_per_component = {'operation': None, 'invest': None, 'total': None}
205
- self._flow_network_info_ = None
240
+ self._effects_per_component = None
206
241
 
207
- def __getitem__(self, key: str) -> Union['ComponentResults', 'BusResults', 'EffectResults', 'FlowResults']:
242
+ def __getitem__(self, key: str) -> ComponentResults | BusResults | EffectResults:
208
243
  if key in self.components:
209
244
  return self.components[key]
210
245
  if key in self.buses:
@@ -216,25 +251,30 @@ class CalculationResults:
216
251
  raise KeyError(f'No element with label {key} found.')
217
252
 
218
253
  @property
219
- def storages(self) -> List['ComponentResults']:
220
- """All storages in the results."""
254
+ def storages(self) -> list[ComponentResults]:
255
+ """Get all storage components in the results."""
221
256
  return [comp for comp in self.components.values() if comp.is_storage]
222
257
 
223
258
  @property
224
259
  def objective(self) -> float:
225
- """The objective result of the optimization."""
226
- return self.summary['Main Results']['Objective']
260
+ """Get optimization objective value."""
261
+ # Deprecated. Fallback
262
+ if 'objective' not in self.solution:
263
+ logger.warning('Objective not found in solution. Fallback to summary (rounded value). This is deprecated')
264
+ return self.summary['Main Results']['Objective']
265
+
266
+ return self.solution['objective'].item()
227
267
 
228
268
  @property
229
269
  def variables(self) -> linopy.Variables:
230
- """The variables of the optimization. Only available if the linopy.Model is available."""
270
+ """Get optimization variables (requires linopy model)."""
231
271
  if self.model is None:
232
272
  raise ValueError('The linopy model is not available.')
233
273
  return self.model.variables
234
274
 
235
275
  @property
236
276
  def constraints(self) -> linopy.Constraints:
237
- """The constraints of the optimization. Only available if the linopy.Model is available."""
277
+ """Get optimization constraints (requires linopy model)."""
238
278
  if self.model is None:
239
279
  raise ValueError('The linopy model is not available.')
240
280
  return self.model.constraints
@@ -243,39 +283,38 @@ class CalculationResults:
243
283
  def effect_share_factors(self):
244
284
  if self._effect_share_factors is None:
245
285
  effect_share_factors = self.flow_system.effects.calculate_effect_share_factors()
246
- self._effect_share_factors = {'operation': effect_share_factors[0],
247
- 'invest': effect_share_factors[1]}
286
+ self._effect_share_factors = {'temporal': effect_share_factors[0], 'periodic': effect_share_factors[1]}
248
287
  return self._effect_share_factors
249
288
 
250
289
  @property
251
- def flow_system(self) -> 'FlowSystem':
252
- """ The restored flow_system that was used to create the calculation.
290
+ def flow_system(self) -> FlowSystem:
291
+ """The restored flow_system that was used to create the calculation.
253
292
  Contains all input parameters."""
254
293
  if self._flow_system is None:
294
+ old_level = logger.level
295
+ logger.level = logging.CRITICAL
255
296
  try:
256
- from . import FlowSystem
257
- current_logger_level = logger.getEffectiveLevel()
258
- logger.setLevel(logging.CRITICAL)
259
297
  self._flow_system = FlowSystem.from_dataset(self.flow_system_data)
260
298
  self._flow_system._connect_network()
261
- logger.setLevel(current_logger_level)
262
299
  except Exception as e:
263
- logger.critical(f'Not able to restore FlowSystem from dataset. Some functionality is not availlable. {e}')
300
+ logger.critical(
301
+ f'Not able to restore FlowSystem from dataset. Some functionality is not availlable. {e}'
302
+ )
264
303
  raise _FlowSystemRestorationError(f'Not able to restore FlowSystem from dataset. {e}') from e
304
+ finally:
305
+ logger.level = old_level
265
306
  return self._flow_system
266
307
 
267
308
  def filter_solution(
268
309
  self,
269
- variable_dims: Optional[Literal['scalar', 'time', 'scenario', 'timeonly', 'scenarioonly']] = None,
270
- element: Optional[str] = None,
271
- timesteps: Optional[pd.DatetimeIndex] = None,
272
- scenarios: Optional[pd.Index] = None,
273
- contains: Optional[Union[str, List[str]]] = None,
274
- startswith: Optional[Union[str, List[str]]] = None,
310
+ variable_dims: Literal['scalar', 'time', 'scenario', 'timeonly', 'scenarioonly'] | None = None,
311
+ element: str | None = None,
312
+ timesteps: pd.DatetimeIndex | None = None,
313
+ scenarios: pd.Index | None = None,
314
+ contains: str | list[str] | None = None,
315
+ startswith: str | list[str] | None = None,
275
316
  ) -> xr.Dataset:
276
- """
277
- Filter the solution to a specific variable dimension and element.
278
- If no element is specified, all elements are included.
317
+ """Filter solution by variable dimension and/or element.
279
318
 
280
319
  Args:
281
320
  variable_dims: The dimension of which to get variables from.
@@ -307,26 +346,30 @@ class CalculationResults:
307
346
  startswith=startswith,
308
347
  )
309
348
 
310
- def effects_per_component(self, mode: Literal['operation', 'invest', 'total'] = 'total') -> xr.Dataset:
311
- """Returns a dataset containing effect totals for each components (including their flows).
312
-
313
- Args:
314
- mode: Which effects to contain. (operation, invest, total)
349
+ @property
350
+ def effects_per_component(self) -> xr.Dataset:
351
+ """Returns a dataset containing effect results for each mode, aggregated by Component
315
352
 
316
353
  Returns:
317
354
  An xarray Dataset with an additional component dimension and effects as variables.
318
355
  """
319
- if mode not in ['operation', 'invest', 'total']:
320
- raise ValueError(f'Invalid mode {mode}')
321
- if self._effects_per_component[mode] is None:
322
- self._effects_per_component[mode] = self._create_effects_dataset(mode)
323
- return self._effects_per_component[mode]
356
+ if self._effects_per_component is None:
357
+ self._effects_per_component = xr.Dataset(
358
+ {
359
+ mode: self._create_effects_dataset(mode).to_dataarray('effect', name=mode)
360
+ for mode in ['temporal', 'periodic', 'total']
361
+ }
362
+ )
363
+ dim_order = ['time', 'period', 'scenario', 'component', 'effect']
364
+ self._effects_per_component = self._effects_per_component.transpose(*dim_order, missing_dims='ignore')
365
+
366
+ return self._effects_per_component
324
367
 
325
368
  def flow_rates(
326
369
  self,
327
- start: Optional[Union[str, List[str]]] = None,
328
- end: Optional[Union[str, List[str]]] = None,
329
- component: Optional[Union[str, List[str]]] = None,
370
+ start: str | list[str] | None = None,
371
+ end: str | list[str] | None = None,
372
+ component: str | list[str] | None = None,
330
373
  ) -> xr.DataArray:
331
374
  """Returns a DataArray containing the flow rates of each Flow.
332
375
 
@@ -347,17 +390,19 @@ class CalculationResults:
347
390
  """
348
391
  if self._flow_rates is None:
349
392
  self._flow_rates = self._assign_flow_coords(
350
- xr.concat([flow.flow_rate.rename(flow.label) for flow in self.flows.values()],
351
- dim=pd.Index(self.flows.keys(), name='flow'))
393
+ xr.concat(
394
+ [flow.flow_rate.rename(flow.label) for flow in self.flows.values()],
395
+ dim=pd.Index(self.flows.keys(), name='flow'),
396
+ )
352
397
  ).rename('flow_rates')
353
398
  filters = {k: v for k, v in {'start': start, 'end': end, 'component': component}.items() if v is not None}
354
399
  return filter_dataarray_by_coord(self._flow_rates, **filters)
355
400
 
356
401
  def flow_hours(
357
402
  self,
358
- start: Optional[Union[str, List[str]]] = None,
359
- end: Optional[Union[str, List[str]]] = None,
360
- component: Optional[Union[str, List[str]]] = None,
403
+ start: str | list[str] | None = None,
404
+ end: str | list[str] | None = None,
405
+ component: str | list[str] | None = None,
361
406
  ) -> xr.DataArray:
362
407
  """Returns a DataArray containing the flow hours of each Flow.
363
408
 
@@ -387,9 +432,9 @@ class CalculationResults:
387
432
 
388
433
  def sizes(
389
434
  self,
390
- start: Optional[Union[str, List[str]]] = None,
391
- end: Optional[Union[str, List[str]]] = None,
392
- component: Optional[Union[str, List[str]]] = None
435
+ start: str | list[str] | None = None,
436
+ end: str | list[str] | None = None,
437
+ component: str | list[str] | None = None,
393
438
  ) -> xr.DataArray:
394
439
  """Returns a dataset with the sizes of the Flows.
395
440
  Args:
@@ -406,56 +451,49 @@ class CalculationResults:
406
451
  """
407
452
  if self._sizes is None:
408
453
  self._sizes = self._assign_flow_coords(
409
- xr.concat([flow.size.rename(flow.label) for flow in self.flows.values()],
410
- dim=pd.Index(self.flows.keys(), name='flow'))
454
+ xr.concat(
455
+ [flow.size.rename(flow.label) for flow in self.flows.values()],
456
+ dim=pd.Index(self.flows.keys(), name='flow'),
457
+ )
411
458
  ).rename('flow_sizes')
412
459
  filters = {k: v for k, v in {'start': start, 'end': end, 'component': component}.items() if v is not None}
413
460
  return filter_dataarray_by_coord(self._sizes, **filters)
414
461
 
415
462
  def _assign_flow_coords(self, da: xr.DataArray):
416
463
  # Add start and end coordinates
417
- da = da.assign_coords({
418
- 'start': ('flow', [flow.start for flow in self.flows.values()]),
419
- 'end': ('flow', [flow.end for flow in self.flows.values()]),
420
- 'component': ('flow', [flow.component for flow in self.flows.values()]),
421
- })
464
+ da = da.assign_coords(
465
+ {
466
+ 'start': ('flow', [flow.start for flow in self.flows.values()]),
467
+ 'end': ('flow', [flow.end for flow in self.flows.values()]),
468
+ 'component': ('flow', [flow.component for flow in self.flows.values()]),
469
+ }
470
+ )
422
471
 
423
472
  # Ensure flow is the last dimension if needed
424
473
  existing_dims = [d for d in da.dims if d != 'flow']
425
474
  da = da.transpose(*(existing_dims + ['flow']))
426
475
  return da
427
476
 
428
- def _get_flow_network_info(self) -> Dict[str, Dict[str, str]]:
429
- flow_network_info = {}
430
-
431
- for flow in self.flows.values():
432
- flow_network_info[flow.label] = {
433
- 'label': flow.label,
434
- 'start': flow.start,
435
- 'end': flow.end,
436
- }
437
- return flow_network_info
438
-
439
477
  def get_effect_shares(
440
478
  self,
441
479
  element: str,
442
480
  effect: str,
443
- mode: Optional[Literal['operation', 'invest']] = None,
444
- include_flows: bool = False
481
+ mode: Literal['temporal', 'periodic'] | None = None,
482
+ include_flows: bool = False,
445
483
  ) -> xr.Dataset:
446
484
  """Retrieves individual effect shares for a specific element and effect.
447
- Either for operation, investment, or both modes combined.
485
+ Either for temporal, investment, or both modes combined.
448
486
  Only includes the direct shares.
449
487
 
450
488
  Args:
451
489
  element: The element identifier for which to retrieve effect shares.
452
490
  effect: The effect identifier for which to retrieve shares.
453
- mode: Optional. The mode to retrieve shares for. Can be 'operation', 'invest',
491
+ mode: Optional. The mode to retrieve shares for. Can be 'temporal', 'periodic',
454
492
  or None to retrieve both. Defaults to None.
455
493
 
456
494
  Returns:
457
495
  An xarray Dataset containing the requested effect shares. If mode is None,
458
- returns a merged Dataset containing both operation and investment shares.
496
+ returns a merged Dataset containing both temporal and investment shares.
459
497
 
460
498
  Raises:
461
499
  ValueError: If the specified effect is not available or if mode is invalid.
@@ -464,25 +502,38 @@ class CalculationResults:
464
502
  raise ValueError(f'Effect {effect} is not available.')
465
503
 
466
504
  if mode is None:
467
- return xr.merge([self.get_effect_shares(element=element, effect=effect, mode='operation', include_flows=include_flows),
468
- self.get_effect_shares(element=element, effect=effect, mode='invest', include_flows=include_flows)])
505
+ return xr.merge(
506
+ [
507
+ self.get_effect_shares(
508
+ element=element, effect=effect, mode='temporal', include_flows=include_flows
509
+ ),
510
+ self.get_effect_shares(
511
+ element=element, effect=effect, mode='periodic', include_flows=include_flows
512
+ ),
513
+ ]
514
+ )
469
515
 
470
- if mode not in ['operation', 'invest']:
471
- raise ValueError(f'Mode {mode} is not available. Choose between "operation" and "invest".')
516
+ if mode not in ['temporal', 'periodic']:
517
+ raise ValueError(f'Mode {mode} is not available. Choose between "temporal" and "periodic".')
472
518
 
473
519
  ds = xr.Dataset()
474
520
 
475
521
  label = f'{element}->{effect}({mode})'
476
522
  if label in self.solution:
477
- ds = xr.Dataset({label: self.solution[label]})
523
+ ds = xr.Dataset({label: self.solution[label]})
478
524
 
479
525
  if include_flows:
480
526
  if element not in self.components:
481
527
  raise ValueError(f'Only use Components when retrieving Effects including flows. Got {element}')
482
- flows = [label.split('|')[0] for label in self.components[element].inputs + self.components[element].outputs]
528
+ flows = [
529
+ label.split('|')[0] for label in self.components[element].inputs + self.components[element].outputs
530
+ ]
483
531
  return xr.merge(
484
- [ds] + [self.get_effect_shares(element=flow, effect=effect, mode=mode, include_flows=False)
485
- for flow in flows]
532
+ [ds]
533
+ + [
534
+ self.get_effect_shares(element=flow, effect=effect, mode=mode, include_flows=False)
535
+ for flow in flows
536
+ ]
486
537
  )
487
538
 
488
539
  return ds
@@ -491,7 +542,7 @@ class CalculationResults:
491
542
  self,
492
543
  element: str,
493
544
  effect: str,
494
- mode: Literal['operation', 'invest', 'total'] = 'total',
545
+ mode: Literal['temporal', 'periodic', 'total'] = 'total',
495
546
  include_flows: bool = False,
496
547
  ) -> xr.DataArray:
497
548
  """Calculates the total effect for a specific element and effect.
@@ -503,10 +554,9 @@ class CalculationResults:
503
554
  element: The element identifier for which to calculate total effects.
504
555
  effect: The effect identifier to calculate.
505
556
  mode: The calculation mode. Options are:
506
- 'operation': Returns operation-specific effects.
507
- 'invest': Returns investment-specific effects.
508
- 'total': Returns the sum of operation effects (across all timesteps)
509
- and investment effects. Defaults to 'total'.
557
+ 'temporal': Returns temporal effects.
558
+ 'periodic': Returns investment-specific effects.
559
+ 'total': Returns the sum of temporal effects and periodic effects. Defaults to 'total'.
510
560
  include_flows: Whether to include effects from flows connected to this element.
511
561
 
512
562
  Returns:
@@ -521,18 +571,22 @@ class CalculationResults:
521
571
  raise ValueError(f'Effect {effect} is not available.')
522
572
 
523
573
  if mode == 'total':
524
- operation = self._compute_effect_total(element=element, effect=effect, mode='operation', include_flows=include_flows)
525
- invest = self._compute_effect_total(element=element, effect=effect, mode='invest', include_flows=include_flows)
526
- if invest.isnull().all() and operation.isnull().all():
574
+ temporal = self._compute_effect_total(
575
+ element=element, effect=effect, mode='temporal', include_flows=include_flows
576
+ )
577
+ periodic = self._compute_effect_total(
578
+ element=element, effect=effect, mode='periodic', include_flows=include_flows
579
+ )
580
+ if periodic.isnull().all() and temporal.isnull().all():
527
581
  return xr.DataArray(np.nan)
528
- if operation.isnull().all():
529
- return invest.rename(f'{element}->{effect}')
530
- operation = operation.sum('time')
531
- if invest.isnull().all():
532
- return operation.rename(f'{element}->{effect}')
533
- if 'time' in operation.indexes:
534
- operation = operation.sum('time')
535
- return invest + operation
582
+ if temporal.isnull().all():
583
+ return periodic.rename(f'{element}->{effect}')
584
+ temporal = temporal.sum('time')
585
+ if periodic.isnull().all():
586
+ return temporal.rename(f'{element}->{effect}')
587
+ if 'time' in temporal.indexes:
588
+ temporal = temporal.sum('time')
589
+ return periodic + temporal
536
590
 
537
591
  total = xr.DataArray(0)
538
592
  share_exists = False
@@ -552,8 +606,9 @@ class CalculationResults:
552
606
  if include_flows:
553
607
  if element not in self.components:
554
608
  raise ValueError(f'Only use Components when retrieving Effects including flows. Got {element}')
555
- flows = [label.split('|')[0] for label in
556
- self.components[element].inputs + self.components[element].outputs]
609
+ flows = [
610
+ label.split('|')[0] for label in self.components[element].inputs + self.components[element].outputs
611
+ ]
557
612
  for flow in flows:
558
613
  label = f'{flow}->{target_effect}({mode})'
559
614
  if label in self.solution:
@@ -564,40 +619,60 @@ class CalculationResults:
564
619
  total = xr.DataArray(np.nan)
565
620
  return total.rename(f'{element}->{effect}({mode})')
566
621
 
567
- def _create_effects_dataset(self, mode: Literal['operation', 'invest', 'total'] = 'total') -> xr.Dataset:
622
+ def _create_effects_dataset(self, mode: Literal['temporal', 'periodic', 'total']) -> xr.Dataset:
568
623
  """Creates a dataset containing effect totals for all components (including their flows).
569
624
  The dataset does contain the direct as well as the indirect effects of each component.
570
625
 
571
626
  Args:
572
- mode: The calculation mode ('operation', 'invest', or 'total').
627
+ mode: The calculation mode ('temporal', 'periodic', or 'total').
573
628
 
574
629
  Returns:
575
630
  An xarray Dataset with components as dimension and effects as variables.
576
631
  """
577
- # Create an empty dataset
578
632
  ds = xr.Dataset()
633
+ all_arrays = {}
634
+ template = None # Template is needed to determine the dimensions of the arrays. This handles the case of no shares for an effect
579
635
 
580
- # Add each effect as a variable to the dataset
636
+ components_list = list(self.components)
637
+
638
+ # First pass: collect arrays and find template
581
639
  for effect in self.effects:
582
- # Create a list of DataArrays, one for each component
583
- component_arrays = [
584
- self._compute_effect_total(element=component, effect=effect, mode=mode, include_flows=True).expand_dims(
585
- component=[component]
586
- ) # Add component dimension to each array
587
- for component in list(self.components)
588
- ]
640
+ effect_arrays = []
641
+ for component in components_list:
642
+ da = self._compute_effect_total(element=component, effect=effect, mode=mode, include_flows=True)
643
+ effect_arrays.append(da)
644
+
645
+ if template is None and (da.dims or not da.isnull().all()):
646
+ template = da
647
+
648
+ all_arrays[effect] = effect_arrays
649
+
650
+ # Ensure we have a template
651
+ if template is None:
652
+ raise ValueError(
653
+ f"No template with proper dimensions found for mode '{mode}'. "
654
+ f'All computed arrays are scalars, which indicates a data issue.'
655
+ )
656
+
657
+ # Second pass: process all effects (guaranteed to include all)
658
+ for effect in self.effects:
659
+ dataarrays = all_arrays[effect]
660
+ component_arrays = []
661
+
662
+ for component, arr in zip(components_list, dataarrays, strict=False):
663
+ # Expand scalar NaN arrays to match template dimensions
664
+ if not arr.dims and np.isnan(arr.item()):
665
+ arr = xr.full_like(template, np.nan, dtype=float).rename(arr.name)
589
666
 
590
- # Combine all components into one DataArray for this effect
591
- if component_arrays:
592
- effect_array = xr.concat(component_arrays, dim='component', coords='minimal')
593
- # Add this effect as a variable to the dataset
594
- ds[effect] = effect_array
667
+ component_arrays.append(arr.expand_dims(component=[component]))
668
+
669
+ ds[effect] = xr.concat(component_arrays, dim='component', coords='minimal', join='outer').rename(effect)
595
670
 
596
671
  # For now include a test to ensure correctness
597
672
  suffix = {
598
- 'operation': '(operation)|total_per_timestep',
599
- 'invest': '(invest)|total',
600
- 'total': '|total',
673
+ 'temporal': '(temporal)|per_timestep',
674
+ 'periodic': '(periodic)',
675
+ 'total': '',
601
676
  }
602
677
  for effect in self.effects:
603
678
  label = f'{effect}{suffix[mode]}'
@@ -616,11 +691,11 @@ class CalculationResults:
616
691
  heatmap_timeframes: Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'] = 'D',
617
692
  heatmap_timesteps_per_frame: Literal['W', 'D', 'h', '15min', 'min'] = 'h',
618
693
  color_map: str = 'portland',
619
- save: Union[bool, pathlib.Path] = False,
694
+ save: bool | pathlib.Path = False,
620
695
  show: bool = True,
621
696
  engine: plotting.PlottingEngine = 'plotly',
622
- scenario: Optional[Union[str, int]] = None,
623
- ) -> Union[plotly.graph_objs.Figure, Tuple[plt.Figure, plt.Axes]]:
697
+ indexer: dict[FlowSystemDimensions, Any] | None = None,
698
+ ) -> plotly.graph_objs.Figure | tuple[plt.Figure, plt.Axes]:
624
699
  """
625
700
  Plots a heatmap of the solution of a variable.
626
701
 
@@ -632,19 +707,40 @@ class CalculationResults:
632
707
  save: Whether to save the plot or not. If a path is provided, the plot will be saved at that location.
633
708
  show: Whether to show the plot or not.
634
709
  engine: The engine to use for plotting. Can be either 'plotly' or 'matplotlib'.
635
- scenario: The scenario to plot. Defaults to the first scenario. Has no effect without scenarios present
710
+ indexer: Optional selection dict, e.g., {'scenario': 'base', 'period': 2024}.
711
+ If None, uses first value for each dimension.
712
+ If empty dict {}, uses all values.
713
+
714
+ Examples:
715
+ Basic usage (uses first scenario, first period, all time):
716
+
717
+ >>> results.plot_heatmap('Battery|charge_state')
718
+
719
+ Select specific scenario and period:
720
+
721
+ >>> results.plot_heatmap('Boiler(Qth)|flow_rate', indexer={'scenario': 'base', 'period': 2024})
722
+
723
+ Time filtering (summer months only):
724
+
725
+ >>> results.plot_heatmap(
726
+ ... 'Boiler(Qth)|flow_rate',
727
+ ... indexer={
728
+ ... 'scenario': 'base',
729
+ ... 'time': results.solution.time[results.solution.time.dt.month.isin([6, 7, 8])],
730
+ ... },
731
+ ... )
732
+
733
+ Save to specific location:
734
+
735
+ >>> results.plot_heatmap(
736
+ ... 'Boiler(Qth)|flow_rate', indexer={'scenario': 'base'}, save='path/to/my_heatmap.html'
737
+ ... )
636
738
  """
637
739
  dataarray = self.solution[variable_name]
638
740
 
639
- scenario_suffix = ''
640
- if 'scenario' in dataarray.indexes:
641
- chosen_scenario = scenario or self.scenarios[0]
642
- dataarray = dataarray.sel(scenario=chosen_scenario).drop_vars('scenario')
643
- scenario_suffix = f'--{chosen_scenario}'
644
-
645
741
  return plot_heatmap(
646
742
  dataarray=dataarray,
647
- name=f'{variable_name}{scenario_suffix}',
743
+ name=variable_name,
648
744
  folder=self.folder,
649
745
  heatmap_timeframes=heatmap_timeframes,
650
746
  heatmap_timesteps_per_frame=heatmap_timesteps_per_frame,
@@ -652,41 +748,47 @@ class CalculationResults:
652
748
  save=save,
653
749
  show=show,
654
750
  engine=engine,
751
+ indexer=indexer,
655
752
  )
656
753
 
657
754
  def plot_network(
658
755
  self,
659
- controls: Union[
660
- bool,
661
- List[
756
+ controls: (
757
+ bool
758
+ | list[
662
759
  Literal['nodes', 'edges', 'layout', 'interaction', 'manipulation', 'physics', 'selection', 'renderer']
663
- ],
664
- ] = True,
665
- path: Optional[pathlib.Path] = None,
760
+ ]
761
+ ) = True,
762
+ path: pathlib.Path | None = None,
666
763
  show: bool = False,
667
- ) -> 'pyvis.network.Network':
668
- """See flixopt.flow_system.FlowSystem.plot_network"""
764
+ ) -> pyvis.network.Network | None:
765
+ """Plot interactive network visualization of the system.
766
+
767
+ Args:
768
+ controls: Enable/disable interactive controls.
769
+ path: Save path for network HTML.
770
+ show: Whether to display the plot.
771
+ """
669
772
  if path is None:
670
773
  path = self.folder / f'{self.name}--network.html'
671
774
  return self.flow_system.plot_network(controls=controls, path=path, show=show)
672
775
 
673
776
  def to_file(
674
777
  self,
675
- folder: Optional[Union[str, pathlib.Path]] = None,
676
- name: Optional[str] = None,
778
+ folder: str | pathlib.Path | None = None,
779
+ name: str | None = None,
677
780
  compression: int = 5,
678
781
  document_model: bool = True,
679
782
  save_linopy_model: bool = False,
680
783
  ):
681
- """
682
- Save the results to a file
784
+ """Save results to files.
785
+
683
786
  Args:
684
- folder: The folder where the results should be saved. Defaults to the folder of the calculation.
685
- name: The name of the results file. If not provided, Defaults to the name of the calculation.
686
- compression: The compression level to use when saving the solution file (0-9). 0 means no compression.
687
- document_model: Wether to document the mathematical formulations in the model.
688
- save_linopy_model: Wether to save the model to file. If True, the (linopy) model is saved as a .nc4 file.
689
- The model file size is rougly 100 times larger than the solution file.
787
+ folder: Save folder (defaults to calculation folder).
788
+ name: File name (defaults to calculation name).
789
+ compression: Compression level 0-9.
790
+ document_model: Whether to document model formulations as yaml.
791
+ save_linopy_model: Whether to save linopy model file.
690
792
  """
691
793
  folder = self.folder if folder is None else pathlib.Path(folder)
692
794
  name = self.name if name is None else name
@@ -710,7 +812,7 @@ class CalculationResults:
710
812
  if self.model is None:
711
813
  logger.critical('No model in the CalculationResults. Saving the model is not possible.')
712
814
  else:
713
- self.model.to_netcdf(paths.linopy_model)
815
+ self.model.to_netcdf(paths.linopy_model, engine='h5netcdf')
714
816
 
715
817
  if document_model:
716
818
  if self.model is None:
@@ -723,7 +825,7 @@ class CalculationResults:
723
825
 
724
826
  class _ElementResults:
725
827
  def __init__(
726
- self, calculation_results: CalculationResults, label: str, variables: List[str], constraints: List[str]
828
+ self, calculation_results: CalculationResults, label: str, variables: list[str], constraints: list[str]
727
829
  ):
728
830
  self._calculation_results = calculation_results
729
831
  self.label = label
@@ -734,11 +836,10 @@ class _ElementResults:
734
836
 
735
837
  @property
736
838
  def variables(self) -> linopy.Variables:
737
- """
738
- Returns the variables of the element.
839
+ """Get element variables (requires linopy model).
739
840
 
740
841
  Raises:
741
- ValueError: If the linopy model is not availlable.
842
+ ValueError: If linopy model is unavailable.
742
843
  """
743
844
  if self._calculation_results.model is None:
744
845
  raise ValueError('The linopy model is not available.')
@@ -746,11 +847,10 @@ class _ElementResults:
746
847
 
747
848
  @property
748
849
  def constraints(self) -> linopy.Constraints:
749
- """
750
- Returns the variables of the element.
850
+ """Get element constraints (requires linopy model).
751
851
 
752
852
  Raises:
753
- ValueError: If the linopy model is not availlable.
853
+ ValueError: If linopy model is unavailable.
754
854
  """
755
855
  if self._calculation_results.model is None:
756
856
  raise ValueError('The linopy model is not available.')
@@ -758,11 +858,11 @@ class _ElementResults:
758
858
 
759
859
  def filter_solution(
760
860
  self,
761
- variable_dims: Optional[Literal['scalar', 'time', 'scenario', 'timeonly', 'scenarioonly']] = None,
762
- timesteps: Optional[pd.DatetimeIndex] = None,
763
- scenarios: Optional[pd.Index] = None,
764
- contains: Optional[Union[str, List[str]]] = None,
765
- startswith: Optional[Union[str, List[str]]] = None,
861
+ variable_dims: Literal['scalar', 'time', 'scenario', 'timeonly', 'scenarioonly'] | None = None,
862
+ timesteps: pd.DatetimeIndex | None = None,
863
+ scenarios: pd.Index | None = None,
864
+ contains: str | list[str] | None = None,
865
+ startswith: str | list[str] | None = None,
766
866
  ) -> xr.Dataset:
767
867
  """
768
868
  Filter the solution to a specific variable dimension and element.
@@ -803,11 +903,11 @@ class _NodeResults(_ElementResults):
803
903
  self,
804
904
  calculation_results: CalculationResults,
805
905
  label: str,
806
- variables: List[str],
807
- constraints: List[str],
808
- inputs: List[str],
809
- outputs: List[str],
810
- flows: List[str],
906
+ variables: list[str],
907
+ constraints: list[str],
908
+ inputs: list[str],
909
+ outputs: list[str],
910
+ flows: list[str],
811
911
  ):
812
912
  super().__init__(calculation_results, label, variables, constraints)
813
913
  self.inputs = inputs
@@ -816,15 +916,15 @@ class _NodeResults(_ElementResults):
816
916
 
817
917
  def plot_node_balance(
818
918
  self,
819
- save: Union[bool, pathlib.Path] = False,
919
+ save: bool | pathlib.Path = False,
820
920
  show: bool = True,
821
921
  colors: plotting.ColorType = 'viridis',
822
922
  engine: plotting.PlottingEngine = 'plotly',
823
- scenario: Optional[Union[str, int]] = None,
923
+ indexer: dict[FlowSystemDimensions, Any] | None = None,
824
924
  mode: Literal['flow_rate', 'flow_hours'] = 'flow_rate',
825
925
  style: Literal['area', 'stacked_bar', 'line'] = 'stacked_bar',
826
926
  drop_suffix: bool = True,
827
- ) -> Union[plotly.graph_objs.Figure, Tuple[plt.Figure, plt.Axes]]:
927
+ ) -> plotly.graph_objs.Figure | tuple[plt.Figure, plt.Axes]:
828
928
  """
829
929
  Plots the node balance of the Component or Bus.
830
930
  Args:
@@ -832,20 +932,20 @@ class _NodeResults(_ElementResults):
832
932
  show: Whether to show the plot or not.
833
933
  colors: The colors to use for the plot. See `flixopt.plotting.ColorType` for options.
834
934
  engine: The engine to use for plotting. Can be either 'plotly' or 'matplotlib'.
835
- scenario: The scenario to plot. Defaults to the first scenario. Has no effect without scenarios present
836
- mode: The mode to use for the dataset. Can be 'flow_rate' or 'flow_hours'.
935
+ indexer: Optional selection dict, e.g., {'scenario': 'base', 'period': 2024}.
936
+ If None, uses first value for each dimension (except time).
937
+ If empty dict {}, uses all values.
938
+ style: The style to use for the dataset. Can be 'flow_rate' or 'flow_hours'.
837
939
  - 'flow_rate': Returns the flow_rates of the Node.
838
940
  - 'flow_hours': Returns the flow_hours of the Node. [flow_hours(t) = flow_rate(t) * dt(t)]. Renames suffixes to |flow_hours.
839
941
  drop_suffix: Whether to drop the suffix from the variable names.
840
942
  """
841
- ds = self.node_balance(with_last_timestep=True, mode=mode, drop_suffix=drop_suffix)
943
+ ds = self.node_balance(with_last_timestep=True, mode=mode, drop_suffix=drop_suffix, indexer=indexer)
842
944
 
843
- title = f'{self.label} (flow rates)' if mode == 'flow_rate' else f'{self.label} (flow hours)'
945
+ ds, suffix_parts = _apply_indexer_to_data(ds, indexer, drop=True)
946
+ suffix = '--' + '-'.join(suffix_parts) if suffix_parts else ''
844
947
 
845
- if 'scenario' in ds.indexes:
846
- chosen_scenario = scenario or self._calculation_results.scenarios[0]
847
- ds = ds.sel(scenario=chosen_scenario).drop_vars('scenario')
848
- title = f'{title} - {chosen_scenario}'
948
+ title = f'{self.label} (flow rates){suffix}' if mode == 'flow_rate' else f'{self.label} (flow hours){suffix}'
849
949
 
850
950
  if engine == 'plotly':
851
951
  figure_like = plotting.with_plotly(
@@ -880,23 +980,22 @@ class _NodeResults(_ElementResults):
880
980
  lower_percentage_group: float = 5,
881
981
  colors: plotting.ColorType = 'viridis',
882
982
  text_info: str = 'percent+label+value',
883
- save: Union[bool, pathlib.Path] = False,
983
+ save: bool | pathlib.Path = False,
884
984
  show: bool = True,
885
985
  engine: plotting.PlottingEngine = 'plotly',
886
- scenario: Optional[Union[str, int]] = None,
887
- ) -> plotly.graph_objects.Figure:
888
- """
889
- Plots a pie chart of the flow hours of the inputs and outputs of buses or components.
890
-
986
+ indexer: dict[FlowSystemDimensions, Any] | None = None,
987
+ ) -> plotly.graph_objs.Figure | tuple[plt.Figure, list[plt.Axes]]:
988
+ """Plot pie chart of flow hours distribution.
891
989
  Args:
892
- colors: a colorscale or a list of colors to use for the plot
893
- lower_percentage_group: The percentage of flow_hours that is grouped in "Others" (0...100)
894
- text_info: What information to display on the pie plot
895
- save: Whether to save the figure.
896
- show: Whether to show the figure.
897
- engine: Plotting engine to use. Only 'plotly' is implemented atm.
898
- scenario: If scenarios are present: The scenario to plot. If None, the first scenario is used.
899
- drop_suffix: Whether to drop the suffix from the variable names.
990
+ lower_percentage_group: Percentage threshold for "Others" grouping.
991
+ colors: Color scheme. Also see plotly.
992
+ text_info: Information to display on pie slices.
993
+ save: Whether to save plot.
994
+ show: Whether to display plot.
995
+ engine: Plotting engine ('plotly' or 'matplotlib').
996
+ indexer: Optional selection dict, e.g., {'scenario': 'base', 'period': 2024}.
997
+ If None, uses first value for each dimension.
998
+ If empty dict {}, uses all values.
900
999
  """
901
1000
  inputs = sanitize_dataset(
902
1001
  ds=self.solution[self.inputs] * self._calculation_results.hours_per_timestep,
@@ -912,16 +1011,15 @@ class _NodeResults(_ElementResults):
912
1011
  zero_small_values=True,
913
1012
  drop_suffix='|',
914
1013
  )
915
- inputs = inputs.sum('time')
916
- outputs = outputs.sum('time')
917
1014
 
918
- title = f'{self.label} (total flow hours)'
1015
+ inputs, suffix_parts = _apply_indexer_to_data(inputs, indexer, drop=True)
1016
+ outputs, suffix_parts = _apply_indexer_to_data(outputs, indexer, drop=True)
1017
+ suffix = '--' + '-'.join(suffix_parts) if suffix_parts else ''
919
1018
 
920
- if 'scenario' in inputs.indexes:
921
- chosen_scenario = scenario or self._calculation_results.scenarios[0]
922
- inputs = inputs.sel(scenario=chosen_scenario).drop_vars('scenario')
923
- outputs = outputs.sel(scenario=chosen_scenario).drop_vars('scenario')
924
- title = f'{title} - {chosen_scenario}'
1019
+ title = f'{self.label} (total flow hours){suffix}'
1020
+
1021
+ inputs = inputs.sum('time')
1022
+ outputs = outputs.sum('time')
925
1023
 
926
1024
  if engine == 'plotly':
927
1025
  figure_like = plotting.dual_pie_with_plotly(
@@ -963,10 +1061,11 @@ class _NodeResults(_ElementResults):
963
1061
  self,
964
1062
  negate_inputs: bool = True,
965
1063
  negate_outputs: bool = False,
966
- threshold: Optional[float] = 1e-5,
1064
+ threshold: float | None = 1e-5,
967
1065
  with_last_timestep: bool = False,
968
1066
  mode: Literal['flow_rate', 'flow_hours'] = 'flow_rate',
969
1067
  drop_suffix: bool = False,
1068
+ indexer: dict[FlowSystemDimensions, Any] | None = None,
970
1069
  ) -> xr.Dataset:
971
1070
  """
972
1071
  Returns a dataset with the node balance of the Component or Bus.
@@ -979,6 +1078,9 @@ class _NodeResults(_ElementResults):
979
1078
  - 'flow_rate': Returns the flow_rates of the Node.
980
1079
  - 'flow_hours': Returns the flow_hours of the Node. [flow_hours(t) = flow_rate(t) * dt(t)]. Renames suffixes to |flow_hours.
981
1080
  drop_suffix: Whether to drop the suffix from the variable names.
1081
+ indexer: Optional selection dict, e.g., {'scenario': 'base', 'period': 2024}.
1082
+ If None, uses first value for each dimension.
1083
+ If empty dict {}, uses all values.
982
1084
  """
983
1085
  ds = self.solution[self.inputs + self.outputs]
984
1086
 
@@ -998,6 +1100,8 @@ class _NodeResults(_ElementResults):
998
1100
  drop_suffix='|' if drop_suffix else None,
999
1101
  )
1000
1102
 
1103
+ ds, _ = _apply_indexer_to_data(ds, indexer, drop=True)
1104
+
1001
1105
  if mode == 'flow_hours':
1002
1106
  ds = ds * self._calculation_results.hours_per_timestep
1003
1107
  ds = ds.rename_vars({var: var.replace('flow_rate', 'flow_hours') for var in ds.data_vars})
@@ -1006,11 +1110,11 @@ class _NodeResults(_ElementResults):
1006
1110
 
1007
1111
 
1008
1112
  class BusResults(_NodeResults):
1009
- """Results for a Bus"""
1113
+ """Results container for energy/material balance nodes in the system."""
1010
1114
 
1011
1115
 
1012
1116
  class ComponentResults(_NodeResults):
1013
- """Results for a Component"""
1117
+ """Results container for individual system components with specialized analysis tools."""
1014
1118
 
1015
1119
  @property
1016
1120
  def is_storage(self) -> bool:
@@ -1022,51 +1126,53 @@ class ComponentResults(_NodeResults):
1022
1126
 
1023
1127
  @property
1024
1128
  def charge_state(self) -> xr.DataArray:
1025
- """Get the solution of the charge state of the Storage."""
1129
+ """Get storage charge state solution."""
1026
1130
  if not self.is_storage:
1027
1131
  raise ValueError(f'Cant get charge_state. "{self.label}" is not a storage')
1028
1132
  return self.solution[self._charge_state]
1029
1133
 
1030
1134
  def plot_charge_state(
1031
1135
  self,
1032
- save: Union[bool, pathlib.Path] = False,
1136
+ save: bool | pathlib.Path = False,
1033
1137
  show: bool = True,
1034
1138
  colors: plotting.ColorType = 'viridis',
1035
1139
  engine: plotting.PlottingEngine = 'plotly',
1036
1140
  style: Literal['area', 'stacked_bar', 'line'] = 'stacked_bar',
1037
- scenario: Optional[Union[str, int]] = None,
1141
+ indexer: dict[FlowSystemDimensions, Any] | None = None,
1038
1142
  ) -> plotly.graph_objs.Figure:
1039
- """
1040
- Plots the charge state of a Storage.
1143
+ """Plot storage charge state over time, combined with the node balance.
1144
+
1041
1145
  Args:
1042
1146
  save: Whether to save the plot or not. If a path is provided, the plot will be saved at that location.
1043
1147
  show: Whether to show the plot or not.
1044
- colors: The c
1148
+ colors: Color scheme. Also see plotly.
1045
1149
  engine: Plotting engine to use. Only 'plotly' is implemented atm.
1046
- style: The plotting mode for the flow_rate
1047
- scenario: The scenario to plot. Defaults to the first scenario. Has no effect without scenarios present
1150
+ style: The colors to use for the plot. See `flixopt.plotting.ColorType` for options.
1151
+ indexer: Optional selection dict, e.g., {'scenario': 'base', 'period': 2024}.
1152
+ If None, uses first value for each dimension.
1153
+ If empty dict {}, uses all values.
1048
1154
 
1049
1155
  Raises:
1050
- ValueError: If the Component is not a Storage.
1156
+ ValueError: If component is not a storage.
1051
1157
  """
1052
1158
  if not self.is_storage:
1053
1159
  raise ValueError(f'Cant plot charge_state. "{self.label}" is not a storage')
1054
1160
 
1055
- ds = self.node_balance(with_last_timestep=True)
1161
+ ds = self.node_balance(with_last_timestep=True, indexer=indexer)
1056
1162
  charge_state = self.charge_state
1057
1163
 
1058
- scenario_suffix = ''
1059
- if 'scenario' in ds.indexes:
1060
- chosen_scenario = scenario or self._calculation_results.scenarios[0]
1061
- ds = ds.sel(scenario=chosen_scenario).drop_vars('scenario')
1062
- charge_state = charge_state.sel(scenario=chosen_scenario).drop_vars('scenario')
1063
- scenario_suffix = f'--{chosen_scenario}'
1164
+ ds, suffix_parts = _apply_indexer_to_data(ds, indexer, drop=True)
1165
+ charge_state, suffix_parts = _apply_indexer_to_data(charge_state, indexer, drop=True)
1166
+ suffix = '--' + '-'.join(suffix_parts) if suffix_parts else ''
1167
+
1168
+ title = f'Operation Balance of {self.label}{suffix}'
1169
+
1064
1170
  if engine == 'plotly':
1065
1171
  fig = plotting.with_plotly(
1066
1172
  ds.to_dataframe(),
1067
1173
  colors=colors,
1068
1174
  style=style,
1069
- title=f'Operation Balance of {self.label}{scenario_suffix}',
1175
+ title=title,
1070
1176
  )
1071
1177
 
1072
1178
  # TODO: Use colors for charge state?
@@ -1077,12 +1183,12 @@ class ComponentResults(_NodeResults):
1077
1183
  x=charge_state.index, y=charge_state.values.flatten(), mode='lines', name=self._charge_state
1078
1184
  )
1079
1185
  )
1080
- elif engine=='matplotlib':
1186
+ elif engine == 'matplotlib':
1081
1187
  fig, ax = plotting.with_matplotlib(
1082
1188
  ds.to_dataframe(),
1083
1189
  colors=colors,
1084
1190
  style=style,
1085
- title=f'Operation Balance of {self.label}{scenario_suffix}',
1191
+ title=title,
1086
1192
  )
1087
1193
 
1088
1194
  charge_state = charge_state.to_dataframe()
@@ -1092,7 +1198,7 @@ class ComponentResults(_NodeResults):
1092
1198
 
1093
1199
  return plotting.export_figure(
1094
1200
  fig,
1095
- default_path=self._calculation_results.folder / f'{self.label} (charge state){scenario_suffix}',
1201
+ default_path=self._calculation_results.folder / title,
1096
1202
  default_filetype='.html',
1097
1203
  user_path=None if isinstance(save, bool) else pathlib.Path(save),
1098
1204
  show=show,
@@ -1100,17 +1206,20 @@ class ComponentResults(_NodeResults):
1100
1206
  )
1101
1207
 
1102
1208
  def node_balance_with_charge_state(
1103
- self, negate_inputs: bool = True, negate_outputs: bool = False, threshold: Optional[float] = 1e-5
1209
+ self, negate_inputs: bool = True, negate_outputs: bool = False, threshold: float | None = 1e-5
1104
1210
  ) -> xr.Dataset:
1105
- """
1106
- Returns a dataset with the node balance of the Storage including its charge state.
1211
+ """Get storage node balance including charge state.
1212
+
1107
1213
  Args:
1108
- negate_inputs: Whether to negate the inputs of the Storage.
1109
- negate_outputs: Whether to negate the outputs of the Storage.
1110
- threshold: The threshold for small values.
1214
+ negate_inputs: Whether to negate input flows.
1215
+ negate_outputs: Whether to negate output flows.
1216
+ threshold: Threshold for small values.
1217
+
1218
+ Returns:
1219
+ xr.Dataset: Node balance with charge state.
1111
1220
 
1112
1221
  Raises:
1113
- ValueError: If the Component is not a Storage.
1222
+ ValueError: If component is not a storage.
1114
1223
  """
1115
1224
  if not self.is_storage:
1116
1225
  raise ValueError(f'Cant get charge_state. "{self.label}" is not a storage')
@@ -1135,7 +1244,14 @@ class EffectResults(_ElementResults):
1135
1244
  """Results for an Effect"""
1136
1245
 
1137
1246
  def get_shares_from(self, element: str):
1138
- """Get the shares from an Element (without subelements) to the Effect"""
1247
+ """Get effect shares from specific element.
1248
+
1249
+ Args:
1250
+ element: Element label to get shares from.
1251
+
1252
+ Returns:
1253
+ xr.Dataset: Element shares to this effect.
1254
+ """
1139
1255
  return self.solution[[name for name in self._variable_names if name.startswith(f'{element}->')]]
1140
1256
 
1141
1257
 
@@ -1144,8 +1260,8 @@ class FlowResults(_ElementResults):
1144
1260
  self,
1145
1261
  calculation_results: CalculationResults,
1146
1262
  label: str,
1147
- variables: List[str],
1148
- constraints: List[str],
1263
+ variables: list[str],
1264
+ constraints: list[str],
1149
1265
  start: str,
1150
1266
  end: str,
1151
1267
  component: str,
@@ -1169,22 +1285,110 @@ class FlowResults(_ElementResults):
1169
1285
  if name in self.solution:
1170
1286
  return self.solution[name]
1171
1287
  try:
1172
- return DataConverter.as_dataarray(
1173
- self._calculation_results.flow_system.flows[self.label].size,
1174
- scenarios=self._calculation_results.scenarios
1175
- ).rename(name)
1288
+ return self._calculation_results.flow_system.flows[self.label].size.rename(name)
1176
1289
  except _FlowSystemRestorationError:
1177
1290
  logger.critical(f'Size of flow {self.label}.size not availlable. Returning NaN')
1178
1291
  return xr.DataArray(np.nan).rename(name)
1179
1292
 
1180
1293
 
1181
1294
  class SegmentedCalculationResults:
1182
- """
1183
- Class to store the results of a SegmentedCalculation.
1295
+ """Results container for segmented optimization calculations with temporal decomposition.
1296
+
1297
+ This class manages results from SegmentedCalculation runs where large optimization
1298
+ problems are solved by dividing the time horizon into smaller, overlapping segments.
1299
+ It provides unified access to results across all segments while maintaining the
1300
+ ability to analyze individual segment behavior.
1301
+
1302
+ Key Features:
1303
+ **Unified Time Series**: Automatically assembles results from all segments into
1304
+ continuous time series, removing overlaps and boundary effects
1305
+ **Segment Analysis**: Access individual segment results for debugging and validation
1306
+ **Consistency Checks**: Verify solution continuity at segment boundaries
1307
+ **Memory Efficiency**: Handles large datasets that exceed single-segment memory limits
1308
+
1309
+ Temporal Handling:
1310
+ The class manages the complex task of combining overlapping segment solutions
1311
+ into coherent time series, ensuring proper treatment of:
1312
+ - Storage state continuity between segments
1313
+ - Flow rate transitions at segment boundaries
1314
+ - Aggregated results over the full time horizon
1315
+
1316
+ Examples:
1317
+ Load and analyze segmented results:
1318
+
1319
+ ```python
1320
+ # Load segmented calculation results
1321
+ results = SegmentedCalculationResults.from_file('results', 'annual_segmented')
1322
+
1323
+ # Access unified results across all segments
1324
+ full_timeline = results.all_timesteps
1325
+ total_segments = len(results.segment_results)
1326
+
1327
+ # Analyze individual segments
1328
+ for i, segment in enumerate(results.segment_results):
1329
+ print(f'Segment {i + 1}: {len(segment.solution.time)} timesteps')
1330
+ segment_costs = segment.effects['cost'].total_value
1331
+
1332
+ # Check solution continuity at boundaries
1333
+ segment_boundaries = results.get_boundary_analysis()
1334
+ max_discontinuity = segment_boundaries['max_storage_jump']
1335
+ ```
1336
+
1337
+ Create from segmented calculation:
1338
+
1339
+ ```python
1340
+ # After running segmented calculation
1341
+ segmented_calc = SegmentedCalculation(
1342
+ name='annual_system',
1343
+ flow_system=system,
1344
+ timesteps_per_segment=730, # Monthly segments
1345
+ overlap_timesteps=48, # 2-day overlap
1346
+ )
1347
+ segmented_calc.do_modeling_and_solve(solver='gurobi')
1348
+
1349
+ # Extract unified results
1350
+ results = SegmentedCalculationResults.from_calculation(segmented_calc)
1351
+
1352
+ # Save combined results
1353
+ results.to_file(compression=5)
1354
+ ```
1355
+
1356
+ Performance analysis across segments:
1357
+
1358
+ ```python
1359
+ # Compare segment solve times
1360
+ solve_times = [seg.summary['durations']['solving'] for seg in results.segment_results]
1361
+ avg_solve_time = sum(solve_times) / len(solve_times)
1362
+
1363
+ # Verify solution quality consistency
1364
+ segment_objectives = [seg.summary['objective_value'] for seg in results.segment_results]
1365
+
1366
+ # Storage continuity analysis
1367
+ if 'Battery' in results.segment_results[0].components:
1368
+ storage_continuity = results.check_storage_continuity('Battery')
1369
+ ```
1370
+
1371
+ Design Considerations:
1372
+ **Boundary Effects**: Monitor solution quality at segment interfaces where
1373
+ foresight is limited compared to full-horizon optimization.
1374
+
1375
+ **Memory Management**: Individual segment results are maintained for detailed
1376
+ analysis while providing unified access for system-wide metrics.
1377
+
1378
+ **Validation Tools**: Built-in methods to verify temporal consistency and
1379
+ identify potential issues from segmentation approach.
1380
+
1381
+ Common Use Cases:
1382
+ - **Large-Scale Analysis**: Annual or multi-period optimization results
1383
+ - **Memory-Constrained Systems**: Results from systems exceeding hardware limits
1384
+ - **Segment Validation**: Verifying segmentation approach effectiveness
1385
+ - **Performance Monitoring**: Comparing segmented vs. full-horizon solutions
1386
+ - **Debugging**: Identifying issues specific to temporal decomposition
1387
+
1184
1388
  """
1185
1389
 
1186
1390
  @classmethod
1187
- def from_calculation(cls, calculation: 'SegmentedCalculation'):
1391
+ def from_calculation(cls, calculation: SegmentedCalculation):
1188
1392
  return cls(
1189
1393
  [calc.results for calc in calculation.sub_calculations],
1190
1394
  all_timesteps=calculation.all_timesteps,
@@ -1195,16 +1399,23 @@ class SegmentedCalculationResults:
1195
1399
  )
1196
1400
 
1197
1401
  @classmethod
1198
- def from_file(cls, folder: Union[str, pathlib.Path], name: str):
1199
- """Create SegmentedCalculationResults directly from file"""
1402
+ def from_file(cls, folder: str | pathlib.Path, name: str):
1403
+ """Load SegmentedCalculationResults from saved files.
1404
+
1405
+ Args:
1406
+ folder: Directory containing saved files.
1407
+ name: Base name of saved files.
1408
+
1409
+ Returns:
1410
+ SegmentedCalculationResults: Loaded instance.
1411
+ """
1200
1412
  folder = pathlib.Path(folder)
1201
1413
  path = folder / name
1202
- nc_file = path.with_suffix('.nc4')
1203
- logger.info(f'loading calculation "{name}" from file ("{nc_file}")')
1204
- with open(path.with_suffix('.json'), 'r', encoding='utf-8') as f:
1414
+ logger.info(f'loading calculation "{name}" from file ("{path.with_suffix(".nc4")}")')
1415
+ with open(path.with_suffix('.json'), encoding='utf-8') as f:
1205
1416
  meta_data = json.load(f)
1206
1417
  return cls(
1207
- [CalculationResults.from_file(folder, name) for name in meta_data['sub_calculations']],
1418
+ [CalculationResults.from_file(folder, sub_name) for sub_name in meta_data['sub_calculations']],
1208
1419
  all_timesteps=pd.DatetimeIndex(
1209
1420
  [datetime.datetime.fromisoformat(date) for date in meta_data['all_timesteps']], name='time'
1210
1421
  ),
@@ -1216,12 +1427,12 @@ class SegmentedCalculationResults:
1216
1427
 
1217
1428
  def __init__(
1218
1429
  self,
1219
- segment_results: List[CalculationResults],
1430
+ segment_results: list[CalculationResults],
1220
1431
  all_timesteps: pd.DatetimeIndex,
1221
1432
  timesteps_per_segment: int,
1222
1433
  overlap_timesteps: int,
1223
1434
  name: str,
1224
- folder: Optional[pathlib.Path] = None,
1435
+ folder: pathlib.Path | None = None,
1225
1436
  ):
1226
1437
  self.segment_results = segment_results
1227
1438
  self.all_timesteps = all_timesteps
@@ -1229,10 +1440,10 @@ class SegmentedCalculationResults:
1229
1440
  self.overlap_timesteps = overlap_timesteps
1230
1441
  self.name = name
1231
1442
  self.folder = pathlib.Path(folder) if folder is not None else pathlib.Path.cwd() / 'results'
1232
- self.hours_per_timestep = TimeSeriesCollection.calculate_hours_per_timestep(self.all_timesteps)
1443
+ self.hours_per_timestep = FlowSystem.calculate_hours_per_timestep(self.all_timesteps)
1233
1444
 
1234
1445
  @property
1235
- def meta_data(self) -> Dict[str, Union[int, List[str]]]:
1446
+ def meta_data(self) -> dict[str, int | list[str]]:
1236
1447
  return {
1237
1448
  'all_timesteps': [datetime.datetime.isoformat(date) for date in self.all_timesteps],
1238
1449
  'timesteps_per_segment': self.timesteps_per_segment,
@@ -1241,11 +1452,18 @@ class SegmentedCalculationResults:
1241
1452
  }
1242
1453
 
1243
1454
  @property
1244
- def segment_names(self) -> List[str]:
1455
+ def segment_names(self) -> list[str]:
1245
1456
  return [segment.name for segment in self.segment_results]
1246
1457
 
1247
1458
  def solution_without_overlap(self, variable_name: str) -> xr.DataArray:
1248
- """Returns the solution of a variable without overlapping timesteps"""
1459
+ """Get variable solution removing segment overlaps.
1460
+
1461
+ Args:
1462
+ variable_name: Name of variable to extract.
1463
+
1464
+ Returns:
1465
+ xr.DataArray: Continuous solution without overlaps.
1466
+ """
1249
1467
  dataarrays = [
1250
1468
  result.solution[variable_name].isel(time=slice(None, self.timesteps_per_segment))
1251
1469
  for result in self.segment_results[:-1]
@@ -1258,21 +1476,23 @@ class SegmentedCalculationResults:
1258
1476
  heatmap_timeframes: Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'] = 'D',
1259
1477
  heatmap_timesteps_per_frame: Literal['W', 'D', 'h', '15min', 'min'] = 'h',
1260
1478
  color_map: str = 'portland',
1261
- save: Union[bool, pathlib.Path] = False,
1479
+ save: bool | pathlib.Path = False,
1262
1480
  show: bool = True,
1263
1481
  engine: plotting.PlottingEngine = 'plotly',
1264
- ) -> Union[plotly.graph_objs.Figure, Tuple[plt.Figure, plt.Axes]]:
1265
- """
1266
- Plots a heatmap of the solution of a variable.
1482
+ ) -> plotly.graph_objs.Figure | tuple[plt.Figure, plt.Axes]:
1483
+ """Plot heatmap of variable solution across segments.
1267
1484
 
1268
1485
  Args:
1269
- variable_name: The name of the variable to plot.
1270
- heatmap_timeframes: The timeframes to use for the heatmap.
1271
- heatmap_timesteps_per_frame: The timesteps per frame to use for the heatmap.
1272
- color_map: The color map to use for the heatmap.
1273
- save: Whether to save the plot or not. If a path is provided, the plot will be saved at that location.
1274
- show: Whether to show the plot or not.
1275
- engine: The engine to use for plotting. Can be either 'plotly' or 'matplotlib'.
1486
+ variable_name: Variable to plot.
1487
+ heatmap_timeframes: Time aggregation level.
1488
+ heatmap_timesteps_per_frame: Timesteps per frame.
1489
+ color_map: Color scheme. Also see plotly.
1490
+ save: Whether to save plot.
1491
+ show: Whether to display plot.
1492
+ engine: Plotting engine.
1493
+
1494
+ Returns:
1495
+ Figure object.
1276
1496
  """
1277
1497
  return plot_heatmap(
1278
1498
  dataarray=self.solution_without_overlap(variable_name),
@@ -1286,10 +1506,14 @@ class SegmentedCalculationResults:
1286
1506
  engine=engine,
1287
1507
  )
1288
1508
 
1289
- def to_file(
1290
- self, folder: Optional[Union[str, pathlib.Path]] = None, name: Optional[str] = None, compression: int = 5
1291
- ):
1292
- """Save the results to a file"""
1509
+ def to_file(self, folder: str | pathlib.Path | None = None, name: str | None = None, compression: int = 5):
1510
+ """Save segmented results to files.
1511
+
1512
+ Args:
1513
+ folder: Save folder (defaults to instance folder).
1514
+ name: File name (defaults to instance name).
1515
+ compression: Compression level 0-9.
1516
+ """
1293
1517
  folder = self.folder if folder is None else pathlib.Path(folder)
1294
1518
  name = self.name if name is None else name
1295
1519
  path = folder / name
@@ -1301,7 +1525,7 @@ class SegmentedCalculationResults:
1301
1525
  f'Folder {folder} and its parent do not exist. Please create them first.'
1302
1526
  ) from e
1303
1527
  for segment in self.segment_results:
1304
- segment.to_file(folder=folder, name=f'{name}-{segment.name}', compression=compression)
1528
+ segment.to_file(folder=folder, name=segment.name, compression=compression)
1305
1529
 
1306
1530
  with open(path.with_suffix('.json'), 'w', encoding='utf-8') as f:
1307
1531
  json.dump(self.meta_data, f, indent=4, ensure_ascii=False)
@@ -1315,24 +1539,31 @@ def plot_heatmap(
1315
1539
  heatmap_timeframes: Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'] = 'D',
1316
1540
  heatmap_timesteps_per_frame: Literal['W', 'D', 'h', '15min', 'min'] = 'h',
1317
1541
  color_map: str = 'portland',
1318
- save: Union[bool, pathlib.Path] = False,
1542
+ save: bool | pathlib.Path = False,
1319
1543
  show: bool = True,
1320
1544
  engine: plotting.PlottingEngine = 'plotly',
1545
+ indexer: dict[str, Any] | None = None,
1321
1546
  ):
1322
- """
1323
- Plots a heatmap of the solution of a variable.
1547
+ """Plot heatmap of time series data.
1324
1548
 
1325
1549
  Args:
1326
- dataarray: The dataarray to plot.
1327
- name: The name of the variable to plot.
1328
- folder: The folder to save the plot to.
1329
- heatmap_timeframes: The timeframes to use for the heatmap.
1330
- heatmap_timesteps_per_frame: The timesteps per frame to use for the heatmap.
1331
- color_map: The color map to use for the heatmap.
1332
- save: Whether to save the plot or not. If a path is provided, the plot will be saved at that location.
1333
- show: Whether to show the plot or not.
1334
- engine: The engine to use for plotting. Can be either 'plotly' or 'matplotlib'.
1550
+ dataarray: Data to plot.
1551
+ name: Variable name for title.
1552
+ folder: Save folder.
1553
+ heatmap_timeframes: Time aggregation level.
1554
+ heatmap_timesteps_per_frame: Timesteps per frame.
1555
+ color_map: Color scheme. Also see plotly.
1556
+ save: Whether to save plot.
1557
+ show: Whether to display plot.
1558
+ engine: Plotting engine.
1559
+ indexer: Optional selection dict, e.g., {'scenario': 'base', 'period': 2024}.
1560
+ If None, uses first value for each dimension.
1561
+ If empty dict {}, uses all values.
1335
1562
  """
1563
+ dataarray, suffix_parts = _apply_indexer_to_data(dataarray, indexer, drop=True)
1564
+ suffix = '--' + '-'.join(suffix_parts) if suffix_parts else ''
1565
+ name = name if not suffix_parts else name + suffix
1566
+
1336
1567
  heatmap_data = plotting.heat_map_data_from_df(
1337
1568
  dataarray.to_dataframe(name), heatmap_timeframes, heatmap_timesteps_per_frame, 'ffill'
1338
1569
  )
@@ -1364,27 +1595,23 @@ def plot_heatmap(
1364
1595
 
1365
1596
  def sanitize_dataset(
1366
1597
  ds: xr.Dataset,
1367
- timesteps: Optional[pd.DatetimeIndex] = None,
1368
- threshold: Optional[float] = 1e-5,
1369
- negate: Optional[List[str]] = None,
1598
+ timesteps: pd.DatetimeIndex | None = None,
1599
+ threshold: float | None = 1e-5,
1600
+ negate: list[str] | None = None,
1370
1601
  drop_small_vars: bool = True,
1371
1602
  zero_small_values: bool = False,
1372
- drop_suffix: Optional[str] = None,
1603
+ drop_suffix: str | None = None,
1373
1604
  ) -> xr.Dataset:
1374
- """
1375
- Sanitizes a dataset by handling small values (dropping or zeroing) and optionally reindexing the time axis.
1605
+ """Clean dataset by handling small values and reindexing time.
1376
1606
 
1377
1607
  Args:
1378
- ds: The dataset to sanitize.
1379
- timesteps: The timesteps to reindex the dataset to. If None, the original timesteps are kept.
1380
- threshold: The threshold for small values processing. If None, no processing is done.
1381
- negate: The variables to negate. If None, no variables are negated.
1382
- drop_small_vars: If True, drops variables where all values are below threshold.
1383
- zero_small_values: If True, sets values below threshold to zero.
1608
+ ds: Dataset to sanitize.
1609
+ timesteps: Time index for reindexing (optional).
1610
+ threshold: Threshold for small values processing.
1611
+ negate: Variables to negate.
1612
+ drop_small_vars: Whether to drop variables below threshold.
1613
+ zero_small_values: Whether to zero values below threshold.
1384
1614
  drop_suffix: Drop suffix of data var names. Split by the provided str.
1385
-
1386
- Returns:
1387
- xr.Dataset: The sanitized dataset.
1388
1615
  """
1389
1616
  # Create a copy to avoid modifying the original
1390
1617
  ds = ds.copy()
@@ -1401,7 +1628,7 @@ def sanitize_dataset(
1401
1628
 
1402
1629
  # Option 1: Drop variables where all values are below threshold
1403
1630
  if drop_small_vars:
1404
- vars_to_drop = [var for var in ds.data_vars if (ds_no_nan_abs[var] <= threshold).all()]
1631
+ vars_to_drop = [var for var in ds.data_vars if (ds_no_nan_abs[var] <= threshold).all().item()]
1405
1632
  ds = ds.drop_vars(vars_to_drop)
1406
1633
 
1407
1634
  # Option 2: Set small values to zero
@@ -1410,7 +1637,7 @@ def sanitize_dataset(
1410
1637
  # Create a boolean mask of values below threshold
1411
1638
  mask = ds_no_nan_abs[var] <= threshold
1412
1639
  # Only proceed if there are values to zero out
1413
- if mask.any():
1640
+ if bool(mask.any().item()):
1414
1641
  # Create a copy to ensure we don't modify data with views
1415
1642
  ds[var] = ds[var].copy()
1416
1643
  # Set values below threshold to zero
@@ -1439,14 +1666,13 @@ def sanitize_dataset(
1439
1666
 
1440
1667
  def filter_dataset(
1441
1668
  ds: xr.Dataset,
1442
- variable_dims: Optional[Literal['scalar', 'time', 'scenario', 'timeonly', 'scenarioonly']] = None,
1443
- timesteps: Optional[Union[pd.DatetimeIndex, str, pd.Timestamp]] = None,
1444
- scenarios: Optional[Union[pd.Index, str, int]] = None,
1445
- contains: Optional[Union[str, List[str]]] = None,
1446
- startswith: Optional[Union[str, List[str]]] = None,
1669
+ variable_dims: Literal['scalar', 'time', 'scenario', 'timeonly', 'scenarioonly'] | None = None,
1670
+ timesteps: pd.DatetimeIndex | str | pd.Timestamp | None = None,
1671
+ scenarios: pd.Index | str | int | None = None,
1672
+ contains: str | list[str] | None = None,
1673
+ startswith: str | list[str] | None = None,
1447
1674
  ) -> xr.Dataset:
1448
- """
1449
- Filters a dataset by its dimensions, indexes, and with string filters for variable names.
1675
+ """Filter dataset by variable dimensions, indexes, and with string filters for variable names.
1450
1676
 
1451
1677
  Args:
1452
1678
  ds: The dataset to filter.
@@ -1468,9 +1694,6 @@ def filter_dataset(
1468
1694
  If a list is provided, variables must contain ALL strings in the list.
1469
1695
  startswith: Filter variables that start with this string or strings.
1470
1696
  If a list is provided, variables must start with ANY of the strings in the list.
1471
-
1472
- Returns:
1473
- Filtered dataset with specified variables and indexes.
1474
1697
  """
1475
1698
  # First filter by dimensions
1476
1699
  filtered_ds = ds.copy()
@@ -1537,10 +1760,7 @@ def filter_dataset(
1537
1760
  return filtered_ds
1538
1761
 
1539
1762
 
1540
- def filter_dataarray_by_coord(
1541
- da: xr.DataArray,
1542
- **kwargs: Optional[Union[str, List[str]]]
1543
- ) -> xr.DataArray:
1763
+ def filter_dataarray_by_coord(da: xr.DataArray, **kwargs: str | list[str] | None) -> xr.DataArray:
1544
1764
  """Filter flows by node and component attributes.
1545
1765
 
1546
1766
  Filters are applied in the order they are specified. All filters must match for an edge to be included.
@@ -1560,8 +1780,9 @@ def filter_dataarray_by_coord(
1560
1780
  AttributeError: If required coordinates are missing.
1561
1781
  ValueError: If specified nodes don't exist or no matches found.
1562
1782
  """
1783
+
1563
1784
  # Helper function to process filters
1564
- def apply_filter(array, coord_name: str, coord_values: Union[Any, List[Any]]):
1785
+ def apply_filter(array, coord_name: str, coord_values: Any | list[Any]):
1565
1786
  # Verify coord exists
1566
1787
  if coord_name not in array.coords:
1567
1788
  raise AttributeError(f"Missing required coordinate '{coord_name}'")
@@ -1573,12 +1794,12 @@ def filter_dataarray_by_coord(
1573
1794
  available = set(array[coord_name].values)
1574
1795
  missing = [v for v in val_list if v not in available]
1575
1796
  if missing:
1576
- raise ValueError(f"{coord_name.title()} value(s) not found: {missing}")
1797
+ raise ValueError(f'{coord_name.title()} value(s) not found: {missing}')
1577
1798
 
1578
1799
  # Apply filter
1579
1800
  return array.where(
1580
1801
  array[coord_name].isin(val_list) if isinstance(coord_values, list) else array[coord_name] == coord_values,
1581
- drop=True
1802
+ drop=True,
1582
1803
  )
1583
1804
 
1584
1805
  # Apply filters from kwargs
@@ -1587,10 +1808,45 @@ def filter_dataarray_by_coord(
1587
1808
  for coord, values in filters.items():
1588
1809
  da = apply_filter(da, coord, values)
1589
1810
  except ValueError as e:
1590
- raise ValueError(f"No edges match criteria: {filters}") from e
1811
+ raise ValueError(f'No edges match criteria: {filters}') from e
1591
1812
 
1592
1813
  # Verify results exist
1593
1814
  if da.size == 0:
1594
- raise ValueError(f"No edges match criteria: {filters}")
1815
+ raise ValueError(f'No edges match criteria: {filters}')
1595
1816
 
1596
1817
  return da
1818
+
1819
+
1820
+ def _apply_indexer_to_data(
1821
+ data: xr.DataArray | xr.Dataset, indexer: dict[str, Any] | None = None, drop=False
1822
+ ) -> tuple[xr.DataArray | xr.Dataset, list[str]]:
1823
+ """
1824
+ Apply indexer selection or auto-select first values for non-time dimensions.
1825
+
1826
+ Args:
1827
+ data: xarray Dataset or DataArray
1828
+ indexer: Optional selection dict
1829
+ If None, uses first value for each dimension (except time).
1830
+ If empty dict {}, uses all values.
1831
+
1832
+ Returns:
1833
+ Tuple of (selected_data, selection_string)
1834
+ """
1835
+ selection_string = []
1836
+
1837
+ if indexer is not None:
1838
+ # User provided indexer
1839
+ data = data.sel(indexer, drop=drop)
1840
+ selection_string.extend(f'{v}[{k}]' for k, v in indexer.items())
1841
+ else:
1842
+ # Auto-select first value for each dimension except 'time'
1843
+ selection = {}
1844
+ for dim in data.dims:
1845
+ if dim != 'time' and dim in data.coords:
1846
+ first_value = data.coords[dim].values[0]
1847
+ selection[dim] = first_value
1848
+ selection_string.append(f'{first_value}[{dim}]')
1849
+ if selection:
1850
+ data = data.sel(selection, drop=drop)
1851
+
1852
+ return data, selection_string