flixopt 3.0.1__py3-none-any.whl → 6.0.0rc7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. flixopt/__init__.py +57 -49
  2. flixopt/carrier.py +159 -0
  3. flixopt/clustering/__init__.py +51 -0
  4. flixopt/clustering/base.py +1746 -0
  5. flixopt/clustering/intercluster_helpers.py +201 -0
  6. flixopt/color_processing.py +372 -0
  7. flixopt/comparison.py +819 -0
  8. flixopt/components.py +848 -270
  9. flixopt/config.py +853 -496
  10. flixopt/core.py +111 -98
  11. flixopt/effects.py +294 -284
  12. flixopt/elements.py +484 -223
  13. flixopt/features.py +220 -118
  14. flixopt/flow_system.py +2026 -389
  15. flixopt/interface.py +504 -286
  16. flixopt/io.py +1718 -55
  17. flixopt/linear_converters.py +291 -230
  18. flixopt/modeling.py +304 -181
  19. flixopt/network_app.py +2 -1
  20. flixopt/optimization.py +788 -0
  21. flixopt/optimize_accessor.py +373 -0
  22. flixopt/plot_result.py +143 -0
  23. flixopt/plotting.py +1177 -1034
  24. flixopt/results.py +1331 -372
  25. flixopt/solvers.py +12 -4
  26. flixopt/statistics_accessor.py +2412 -0
  27. flixopt/stats_accessor.py +75 -0
  28. flixopt/structure.py +954 -120
  29. flixopt/topology_accessor.py +676 -0
  30. flixopt/transform_accessor.py +2277 -0
  31. flixopt/types.py +120 -0
  32. flixopt-6.0.0rc7.dist-info/METADATA +290 -0
  33. flixopt-6.0.0rc7.dist-info/RECORD +36 -0
  34. {flixopt-3.0.1.dist-info → flixopt-6.0.0rc7.dist-info}/WHEEL +1 -1
  35. flixopt/aggregation.py +0 -382
  36. flixopt/calculation.py +0 -672
  37. flixopt/commons.py +0 -51
  38. flixopt/utils.py +0 -86
  39. flixopt-3.0.1.dist-info/METADATA +0 -209
  40. flixopt-3.0.1.dist-info/RECORD +0 -26
  41. {flixopt-3.0.1.dist-info → flixopt-6.0.0rc7.dist-info}/licenses/LICENSE +0 -0
  42. {flixopt-3.0.1.dist-info → flixopt-6.0.0rc7.dist-info}/top_level.txt +0 -0
flixopt/results.py CHANGED
@@ -1,5 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import copy
3
4
  import datetime
4
5
  import json
5
6
  import logging
@@ -10,33 +11,63 @@ from typing import TYPE_CHECKING, Any, Literal
10
11
  import linopy
11
12
  import numpy as np
12
13
  import pandas as pd
13
- import plotly
14
14
  import xarray as xr
15
- import yaml
16
15
 
17
16
  from . import io as fx_io
18
17
  from . import plotting
18
+ from .color_processing import process_colors
19
+ from .config import CONFIG, DEPRECATION_REMOVAL_VERSION, SUCCESS_LEVEL
19
20
  from .flow_system import FlowSystem
21
+ from .structure import CompositeContainerMixin, ResultsContainer
20
22
 
21
23
  if TYPE_CHECKING:
22
24
  import matplotlib.pyplot as plt
25
+ import plotly
23
26
  import pyvis
24
27
 
25
- from .calculation import Calculation, SegmentedCalculation
26
28
  from .core import FlowSystemDimensions
27
-
29
+ from .optimization import Optimization, SegmentedOptimization
28
30
 
29
31
  logger = logging.getLogger('flixopt')
30
32
 
31
33
 
34
+ def load_mapping_from_file(path: pathlib.Path) -> dict[str, str | list[str]]:
35
+ """Load color mapping from JSON or YAML file.
36
+
37
+ Tries loader based on file suffix first, with fallback to the other format.
38
+
39
+ Args:
40
+ path: Path to config file (.json or .yaml/.yml)
41
+
42
+ Returns:
43
+ Dictionary mapping components to colors or colorscales to component lists
44
+
45
+ Raises:
46
+ ValueError: If file cannot be loaded as JSON or YAML
47
+ """
48
+ return fx_io.load_config_file(path)
49
+
50
+
51
+ def _get_solution_attr(solution: xr.Dataset, key: str) -> dict:
52
+ """Get an attribute from solution, decoding JSON if necessary.
53
+
54
+ Solution attrs are stored as JSON strings for netCDF compatibility.
55
+ This helper handles both JSON strings and dicts (for backward compatibility).
56
+ """
57
+ value = solution.attrs.get(key, {})
58
+ if isinstance(value, str):
59
+ return json.loads(value)
60
+ return value
61
+
62
+
32
63
  class _FlowSystemRestorationError(Exception):
33
64
  """Exception raised when a FlowSystem cannot be restored from dataset."""
34
65
 
35
66
  pass
36
67
 
37
68
 
38
- class CalculationResults:
39
- """Comprehensive container for optimization calculation results and analysis tools.
69
+ class Results(CompositeContainerMixin['ComponentResults | BusResults | EffectResults | FlowResults']):
70
+ """Comprehensive container for optimization results and analysis tools.
40
71
 
41
72
  This class provides unified access to all optimization results including flow rates,
42
73
  component states, bus balances, and system effects. It offers powerful analysis
@@ -55,27 +86,27 @@ class CalculationResults:
55
86
  - **Buses**: Network node balances and energy flows
56
87
  - **Effects**: System-wide impacts (costs, emissions, resource consumption)
57
88
  - **Solution**: Raw optimization variables and their values
58
- - **Metadata**: Calculation parameters, timing, and system configuration
89
+ - **Metadata**: Optimization parameters, timing, and system configuration
59
90
 
60
91
  Attributes:
61
92
  solution: Dataset containing all optimization variable solutions
62
93
  flow_system_data: Dataset with complete system configuration and parameters. Restore the used FlowSystem for further analysis.
63
- summary: Calculation metadata including solver status, timing, and statistics
64
- name: Unique identifier for this calculation
94
+ summary: Optimization metadata including solver status, timing, and statistics
95
+ name: Unique identifier for this optimization
65
96
  model: Original linopy optimization model (if available)
66
97
  folder: Directory path for result storage and loading
67
98
  components: Dictionary mapping component labels to ComponentResults objects
68
99
  buses: Dictionary mapping bus labels to BusResults objects
69
100
  effects: Dictionary mapping effect names to EffectResults objects
70
101
  timesteps_extra: Extended time index including boundary conditions
71
- hours_per_timestep: Duration of each timestep for proper energy calculations
102
+ timestep_duration: Duration of each timestep in hours for proper energy calculations
72
103
 
73
104
  Examples:
74
105
  Load and analyze saved results:
75
106
 
76
107
  ```python
77
108
  # Load results from file
78
- results = CalculationResults.from_file('results', 'annual_optimization')
109
+ results = Results.from_file('results', 'annual_optimization')
79
110
 
80
111
  # Access specific component results
81
112
  boiler_results = results['Boiler_01']
@@ -107,27 +138,43 @@ class CalculationResults:
107
138
  ).mean()
108
139
  ```
109
140
 
141
+ Configure automatic color management for plots:
142
+
143
+ ```python
144
+ # Dict-based configuration:
145
+ results.setup_colors({'Solar*': 'Oranges', 'Wind*': 'Blues', 'Battery': 'green'})
146
+
147
+ # All plots automatically use configured colors (colors=None is the default)
148
+ results['ElectricityBus'].plot_node_balance()
149
+ results['Battery'].plot_charge_state()
150
+
151
+ # Override when needed
152
+ results['ElectricityBus'].plot_node_balance(colors='turbo') # Ignores setup
153
+ ```
154
+
110
155
  Design Patterns:
111
- **Factory Methods**: Use `from_file()` and `from_calculation()` for creation or access directly from `Calculation.results`
156
+ **Factory Methods**: Use `from_file()` and `from_optimization()` for creation or access directly from `Optimization.results`
112
157
  **Dictionary Access**: Use `results[element_label]` for element-specific results
113
158
  **Lazy Loading**: Results objects created on-demand for memory efficiency
114
159
  **Unified Interface**: Consistent API across different result types
115
160
 
116
161
  """
117
162
 
163
+ model: linopy.Model | None
164
+
118
165
  @classmethod
119
- def from_file(cls, folder: str | pathlib.Path, name: str) -> CalculationResults:
120
- """Load CalculationResults from saved files.
166
+ def from_file(cls, folder: str | pathlib.Path, name: str) -> Results:
167
+ """Load Results from saved files.
121
168
 
122
169
  Args:
123
170
  folder: Directory containing saved files.
124
171
  name: Base name of saved files (without extensions).
125
172
 
126
173
  Returns:
127
- CalculationResults: Loaded instance.
174
+ Results: Loaded instance.
128
175
  """
129
176
  folder = pathlib.Path(folder)
130
- paths = fx_io.CalculationResultsPaths(folder, name)
177
+ paths = fx_io.ResultsPaths(folder, name)
131
178
 
132
179
  model = None
133
180
  if paths.linopy_model.exists():
@@ -137,8 +184,7 @@ class CalculationResults:
137
184
  except Exception as e:
138
185
  logger.critical(f'Could not load the linopy model "{name}" from file ("{paths.linopy_model}"): {e}')
139
186
 
140
- with open(paths.summary, encoding='utf-8') as f:
141
- summary = yaml.load(f, Loader=yaml.FullLoader)
187
+ summary = fx_io.load_yaml(paths.summary)
142
188
 
143
189
  return cls(
144
190
  solution=fx_io.load_dataset_from_netcdf(paths.solution),
@@ -150,22 +196,22 @@ class CalculationResults:
150
196
  )
151
197
 
152
198
  @classmethod
153
- def from_calculation(cls, calculation: Calculation) -> CalculationResults:
154
- """Create CalculationResults from a Calculation object.
199
+ def from_optimization(cls, optimization: Optimization) -> Results:
200
+ """Create Results from an Optimization instance.
155
201
 
156
202
  Args:
157
- calculation: Calculation object with solved model.
203
+ optimization: The Optimization instance to extract results from.
158
204
 
159
205
  Returns:
160
- CalculationResults: New instance with extracted results.
206
+ Results: New instance containing the optimization results.
161
207
  """
162
208
  return cls(
163
- solution=calculation.model.solution,
164
- flow_system_data=calculation.flow_system.to_dataset(),
165
- summary=calculation.summary,
166
- model=calculation.model,
167
- name=calculation.name,
168
- folder=calculation.folder,
209
+ solution=optimization.model.solution,
210
+ flow_system_data=optimization.flow_system.to_dataset(),
211
+ summary=optimization.summary,
212
+ model=optimization.model,
213
+ name=optimization.name,
214
+ folder=optimization.folder,
169
215
  )
170
216
 
171
217
  def __init__(
@@ -176,30 +222,27 @@ class CalculationResults:
176
222
  summary: dict,
177
223
  folder: pathlib.Path | None = None,
178
224
  model: linopy.Model | None = None,
179
- **kwargs, # To accept old "flow_system" parameter
180
225
  ):
181
- """Initialize CalculationResults with optimization data.
182
- Usually, this class is instantiated by the Calculation class, or by loading from file.
226
+ """Initialize Results with optimization data.
227
+ Usually, this class is instantiated by an Optimization object via `Results.from_optimization()`
228
+ or by loading from file using `Results.from_file()`.
183
229
 
184
230
  Args:
185
231
  solution: Optimization solution dataset.
186
232
  flow_system_data: Flow system configuration dataset.
187
- name: Calculation name.
188
- summary: Calculation metadata.
233
+ name: Optimization name.
234
+ summary: Optimization metadata.
189
235
  folder: Results storage folder.
190
236
  model: Linopy optimization model.
191
- Deprecated:
192
- flow_system: Use flow_system_data instead.
193
237
  """
194
- # Handle potential old "flow_system" parameter for backward compatibility
195
- if 'flow_system' in kwargs and flow_system_data is None:
196
- flow_system_data = kwargs.pop('flow_system')
197
- warnings.warn(
198
- "The 'flow_system' parameter is deprecated. Use 'flow_system_data' instead."
199
- "Acess is now by '.flow_system_data', while '.flow_system' returns the restored FlowSystem.",
200
- DeprecationWarning,
201
- stacklevel=2,
202
- )
238
+ warnings.warn(
239
+ f'Results is deprecated and will be removed in v{DEPRECATION_REMOVAL_VERSION}. '
240
+ 'Access results directly via FlowSystem.solution after optimization, or use the '
241
+ '.plot accessor on FlowSystem and its components (e.g., flow_system.plot.heatmap(...)). '
242
+ 'To load old result files, use FlowSystem.from_old_results(folder, name).',
243
+ DeprecationWarning,
244
+ stacklevel=2,
245
+ )
203
246
 
204
247
  self.solution = solution
205
248
  self.flow_system_data = flow_system_data
@@ -207,29 +250,44 @@ class CalculationResults:
207
250
  self.name = name
208
251
  self.model = model
209
252
  self.folder = pathlib.Path(folder) if folder is not None else pathlib.Path.cwd() / 'results'
210
- self.components = {
211
- label: ComponentResults(self, **infos) for label, infos in self.solution.attrs['Components'].items()
253
+
254
+ # Create ResultsContainers for better access patterns
255
+ components_dict = {
256
+ label: ComponentResults(self, **infos)
257
+ for label, infos in _get_solution_attr(self.solution, 'Components').items()
212
258
  }
259
+ self.components = ResultsContainer(
260
+ elements=components_dict, element_type_name='component results', truncate_repr=10
261
+ )
213
262
 
214
- self.buses = {label: BusResults(self, **infos) for label, infos in self.solution.attrs['Buses'].items()}
263
+ buses_dict = {
264
+ label: BusResults(self, **infos) for label, infos in _get_solution_attr(self.solution, 'Buses').items()
265
+ }
266
+ self.buses = ResultsContainer(elements=buses_dict, element_type_name='bus results', truncate_repr=10)
215
267
 
216
- self.effects = {label: EffectResults(self, **infos) for label, infos in self.solution.attrs['Effects'].items()}
268
+ effects_dict = {
269
+ label: EffectResults(self, **infos) for label, infos in _get_solution_attr(self.solution, 'Effects').items()
270
+ }
271
+ self.effects = ResultsContainer(elements=effects_dict, element_type_name='effect results', truncate_repr=10)
217
272
 
218
- if 'Flows' not in self.solution.attrs:
273
+ flows_attr = _get_solution_attr(self.solution, 'Flows')
274
+ if not flows_attr:
219
275
  warnings.warn(
220
276
  'No Data about flows found in the results. This data is only included since v2.2.0. Some functionality '
221
277
  'is not availlable. We recommend to evaluate your results with a version <2.2.0.',
222
278
  stacklevel=2,
223
279
  )
224
- self.flows = {}
280
+ flows_dict = {}
281
+ self._has_flow_data = False
225
282
  else:
226
- self.flows = {
227
- label: FlowResults(self, **infos) for label, infos in self.solution.attrs.get('Flows', {}).items()
228
- }
283
+ flows_dict = {label: FlowResults(self, **infos) for label, infos in flows_attr.items()}
284
+ self._has_flow_data = True
285
+ self.flows = ResultsContainer(elements=flows_dict, element_type_name='flow results', truncate_repr=10)
229
286
 
230
287
  self.timesteps_extra = self.solution.indexes['time']
231
- self.hours_per_timestep = FlowSystem.calculate_hours_per_timestep(self.timesteps_extra)
288
+ self.timestep_duration = FlowSystem.calculate_timestep_duration(self.timesteps_extra)
232
289
  self.scenarios = self.solution.indexes['scenario'] if 'scenario' in self.solution.indexes else None
290
+ self.periods = self.solution.indexes['period'] if 'period' in self.solution.indexes else None
233
291
 
234
292
  self._effect_share_factors = None
235
293
  self._flow_system = None
@@ -239,16 +297,24 @@ class CalculationResults:
239
297
  self._sizes = None
240
298
  self._effects_per_component = None
241
299
 
242
- def __getitem__(self, key: str) -> ComponentResults | BusResults | EffectResults:
243
- if key in self.components:
244
- return self.components[key]
245
- if key in self.buses:
246
- return self.buses[key]
247
- if key in self.effects:
248
- return self.effects[key]
249
- if key in self.flows:
250
- return self.flows[key]
251
- raise KeyError(f'No element with label {key} found.')
300
+ self.colors: dict[str, str] = {}
301
+
302
+ def _get_container_groups(self) -> dict[str, ResultsContainer]:
303
+ """Return ordered container groups for CompositeContainerMixin."""
304
+ return {
305
+ 'Components': self.components,
306
+ 'Buses': self.buses,
307
+ 'Effects': self.effects,
308
+ 'Flows': self.flows,
309
+ }
310
+
311
+ def __repr__(self) -> str:
312
+ """Return grouped representation of all results."""
313
+ r = fx_io.format_title_with_underline(self.__class__.__name__, '=')
314
+ r += f'Name: "{self.name}"\nFolder: {self.folder}\n'
315
+ # Add grouped container view
316
+ r += '\n' + self._format_grouped_containers()
317
+ return r
252
318
 
253
319
  @property
254
320
  def storages(self) -> list[ComponentResults]:
@@ -288,23 +354,151 @@ class CalculationResults:
288
354
 
289
355
  @property
290
356
  def flow_system(self) -> FlowSystem:
291
- """The restored flow_system that was used to create the calculation.
357
+ """The restored flow_system that was used to create the optimization.
292
358
  Contains all input parameters."""
293
359
  if self._flow_system is None:
294
- old_level = logger.level
295
- logger.level = logging.CRITICAL
360
+ # Temporarily disable all logging to suppress messages during restoration
361
+ flixopt_logger = logging.getLogger('flixopt')
362
+ original_level = flixopt_logger.level
363
+ flixopt_logger.setLevel(logging.CRITICAL + 1) # Disable all logging
296
364
  try:
297
365
  self._flow_system = FlowSystem.from_dataset(self.flow_system_data)
298
366
  self._flow_system._connect_network()
299
367
  except Exception as e:
368
+ flixopt_logger.setLevel(original_level) # Re-enable before logging
300
369
  logger.critical(
301
370
  f'Not able to restore FlowSystem from dataset. Some functionality is not availlable. {e}'
302
371
  )
303
372
  raise _FlowSystemRestorationError(f'Not able to restore FlowSystem from dataset. {e}') from e
304
373
  finally:
305
- logger.level = old_level
374
+ flixopt_logger.setLevel(original_level) # Restore original level
306
375
  return self._flow_system
307
376
 
377
+ def setup_colors(
378
+ self,
379
+ config: dict[str, str | list[str]] | str | pathlib.Path | None = None,
380
+ default_colorscale: str | None = None,
381
+ ) -> dict[str, str]:
382
+ """
383
+ Setup colors for all variables across all elements. Overwrites existing ones.
384
+
385
+ Args:
386
+ config: Configuration for color assignment. Can be:
387
+ - dict: Maps components to colors/colorscales:
388
+ * 'component1': 'red' # Single component to single color
389
+ * 'component1': '#FF0000' # Single component to hex color
390
+ - OR maps colorscales to multiple components:
391
+ * 'colorscale_name': ['component1', 'component2'] # Colorscale across components
392
+ - str: Path to a JSON/YAML config file or a colorscale name to apply to all
393
+ - Path: Path to a JSON/YAML config file
394
+ - None: Use default_colorscale for all components
395
+ default_colorscale: Default colorscale for unconfigured components (default: 'turbo')
396
+
397
+ Examples:
398
+ setup_colors({
399
+ # Direct component-to-color mappings
400
+ 'Boiler1': '#FF0000',
401
+ 'CHP': 'darkred',
402
+ # Colorscale for multiple components
403
+ 'Oranges': ['Solar1', 'Solar2'],
404
+ 'Blues': ['Wind1', 'Wind2'],
405
+ 'Greens': ['Battery1', 'Battery2', 'Battery3'],
406
+ })
407
+
408
+ Returns:
409
+ Complete variable-to-color mapping dictionary
410
+ """
411
+
412
+ def get_all_variable_names(comp: str) -> list[str]:
413
+ """Collect all variables from the component, including flows and flow_hours."""
414
+ comp_object = self.components[comp]
415
+ var_names = [comp] + list(comp_object.variable_names)
416
+ for flow in comp_object.flows:
417
+ var_names.extend([flow, f'{flow}|flow_hours'])
418
+ return var_names
419
+
420
+ # Set default colorscale if not provided
421
+ if default_colorscale is None:
422
+ default_colorscale = CONFIG.Plotting.default_qualitative_colorscale
423
+
424
+ # Handle different config input types
425
+ if config is None:
426
+ # Apply default colorscale to all components
427
+ config_dict = {}
428
+ elif isinstance(config, (str, pathlib.Path)):
429
+ # Try to load from file first
430
+ config_path = pathlib.Path(config)
431
+ if config_path.exists():
432
+ # Load config from file using helper
433
+ config_dict = load_mapping_from_file(config_path)
434
+ else:
435
+ # Treat as colorscale name to apply to all components
436
+ all_components = list(self.components.keys())
437
+ config_dict = {config: all_components}
438
+ elif isinstance(config, dict):
439
+ config_dict = config
440
+ else:
441
+ raise TypeError(f'config must be dict, str, Path, or None, got {type(config)}')
442
+
443
+ # Step 1: Build component-to-color mapping
444
+ component_colors: dict[str, str] = {}
445
+
446
+ # Track which components are configured
447
+ configured_components = set()
448
+
449
+ # Process each configuration entry
450
+ for key, value in config_dict.items():
451
+ # Check if value is a list (colorscale -> [components])
452
+ # or a string (component -> color OR colorscale -> [components])
453
+
454
+ if isinstance(value, list):
455
+ # key is colorscale, value is list of components
456
+ # Format: 'Blues': ['Wind1', 'Wind2']
457
+ components = value
458
+ colorscale_name = key
459
+
460
+ # Validate components exist
461
+ for component in components:
462
+ if component not in self.components:
463
+ raise ValueError(f"Component '{component}' not found")
464
+
465
+ configured_components.update(components)
466
+
467
+ # Use process_colors to get one color per component from the colorscale
468
+ colors_for_components = process_colors(colorscale_name, components)
469
+ component_colors.update(colors_for_components)
470
+
471
+ elif isinstance(value, str):
472
+ # Check if key is an existing component
473
+ if key in self.components:
474
+ # Format: 'CHP': 'red' (component -> color)
475
+ component, color = key, value
476
+
477
+ configured_components.add(component)
478
+ component_colors[component] = color
479
+ else:
480
+ raise ValueError(f"Component '{key}' not found")
481
+ else:
482
+ raise TypeError(f'Config value must be str or list, got {type(value)}')
483
+
484
+ # Step 2: Assign colors to remaining unconfigured components
485
+ remaining_components = list(set(self.components.keys()) - configured_components)
486
+ if remaining_components:
487
+ # Use default colorscale to assign one color per remaining component
488
+ default_colors = process_colors(default_colorscale, remaining_components)
489
+ component_colors.update(default_colors)
490
+
491
+ # Step 3: Build variable-to-color mapping
492
+ # Clear existing colors to avoid stale keys
493
+ self.colors = {}
494
+ # Each component's variables all get the same color as the component
495
+ for component, color in component_colors.items():
496
+ variable_names = get_all_variable_names(component)
497
+ for var_name in variable_names:
498
+ self.colors[var_name] = color
499
+
500
+ return self.colors
501
+
308
502
  def filter_solution(
309
503
  self,
310
504
  variable_dims: Literal['scalar', 'time', 'scenario', 'timeonly', 'scenarioonly'] | None = None,
@@ -373,21 +567,42 @@ class CalculationResults:
373
567
  ) -> xr.DataArray:
374
568
  """Returns a DataArray containing the flow rates of each Flow.
375
569
 
376
- Args:
377
- start: Optional source node(s) to filter by. Can be a single node name or a list of names.
378
- end: Optional destination node(s) to filter by. Can be a single node name or a list of names.
379
- component: Optional component(s) to filter by. Can be a single component name or a list of names.
570
+ .. deprecated::
571
+ Use `results.plot.all_flow_rates` (Dataset) or
572
+ `results.flows['FlowLabel'].flow_rate` (DataArray) instead.
380
573
 
381
- Further usage:
382
- Convert the dataarray to a dataframe:
383
- >>>results.flow_rates().to_pandas()
384
- Get the max or min over time:
385
- >>>results.flow_rates().max('time')
386
- Sum up the flow rates of flows with the same start and end:
387
- >>>results.flow_rates(end='Fernwärme').groupby('start').sum(dim='flow')
388
- To recombine filtered dataarrays, use `xr.concat` with dim 'flow':
389
- >>>xr.concat([results.flow_rates(start='Fernwärme'), results.flow_rates(end='Fernwärme')], dim='flow')
574
+ **Note**: The new API differs from this method:
575
+
576
+ - Returns ``xr.Dataset`` (not ``DataArray``) with flow labels as variable names
577
+ - No ``'flow'`` dimension - each flow is a separate variable
578
+ - No filtering parameters - filter using these alternatives::
579
+
580
+ # Select specific flows by label
581
+ ds = results.plot.all_flow_rates
582
+ ds[['Boiler(Q_th)', 'CHP(Q_th)']]
583
+
584
+ # Filter by substring in label
585
+ ds[[v for v in ds.data_vars if 'Boiler' in v]]
586
+
587
+ # Filter by bus (start/end) - get flows connected to a bus
588
+ results['Fernwärme'].inputs # list of input flow labels
589
+ results['Fernwärme'].outputs # list of output flow labels
590
+ ds[results['Fernwärme'].inputs] # Dataset with only inputs to bus
591
+
592
+ # Filter by component - get flows of a component
593
+ results['Boiler'].inputs # list of input flow labels
594
+ results['Boiler'].outputs # list of output flow labels
390
595
  """
596
+ warnings.warn(
597
+ 'results.flow_rates() is deprecated. '
598
+ 'Use results.plot.all_flow_rates instead (returns Dataset, not DataArray). '
599
+ 'Note: The new API has no filtering parameters and uses flow labels as variable names. '
600
+ f'Will be removed in v{DEPRECATION_REMOVAL_VERSION}.',
601
+ DeprecationWarning,
602
+ stacklevel=2,
603
+ )
604
+ if not self._has_flow_data:
605
+ raise ValueError('Flow data is not available in this results object (pre-v2.2.0).')
391
606
  if self._flow_rates is None:
392
607
  self._flow_rates = self._assign_flow_coords(
393
608
  xr.concat(
@@ -406,6 +621,32 @@ class CalculationResults:
406
621
  ) -> xr.DataArray:
407
622
  """Returns a DataArray containing the flow hours of each Flow.
408
623
 
624
+ .. deprecated::
625
+ Use `results.plot.all_flow_hours` (Dataset) or
626
+ `results.flows['FlowLabel'].flow_rate * results.timestep_duration` instead.
627
+
628
+ **Note**: The new API differs from this method:
629
+
630
+ - Returns ``xr.Dataset`` (not ``DataArray``) with flow labels as variable names
631
+ - No ``'flow'`` dimension - each flow is a separate variable
632
+ - No filtering parameters - filter using these alternatives::
633
+
634
+ # Select specific flows by label
635
+ ds = results.plot.all_flow_hours
636
+ ds[['Boiler(Q_th)', 'CHP(Q_th)']]
637
+
638
+ # Filter by substring in label
639
+ ds[[v for v in ds.data_vars if 'Boiler' in v]]
640
+
641
+ # Filter by bus (start/end) - get flows connected to a bus
642
+ results['Fernwärme'].inputs # list of input flow labels
643
+ results['Fernwärme'].outputs # list of output flow labels
644
+ ds[results['Fernwärme'].inputs] # Dataset with only inputs to bus
645
+
646
+ # Filter by component - get flows of a component
647
+ results['Boiler'].inputs # list of input flow labels
648
+ results['Boiler'].outputs # list of output flow labels
649
+
409
650
  Flow hours represent the total energy/material transferred over time,
410
651
  calculated by multiplying flow rates by the duration of each timestep.
411
652
 
@@ -425,8 +666,16 @@ class CalculationResults:
425
666
  >>>xr.concat([results.flow_hours(start='Fernwärme'), results.flow_hours(end='Fernwärme')], dim='flow')
426
667
 
427
668
  """
669
+ warnings.warn(
670
+ 'results.flow_hours() is deprecated. '
671
+ 'Use results.plot.all_flow_hours instead (returns Dataset, not DataArray). '
672
+ 'Note: The new API has no filtering parameters and uses flow labels as variable names. '
673
+ f'Will be removed in v{DEPRECATION_REMOVAL_VERSION}.',
674
+ DeprecationWarning,
675
+ stacklevel=2,
676
+ )
428
677
  if self._flow_hours is None:
429
- self._flow_hours = (self.flow_rates() * self.hours_per_timestep).rename('flow_hours')
678
+ self._flow_hours = (self.flow_rates() * self.timestep_duration).rename('flow_hours')
430
679
  filters = {k: v for k, v in {'start': start, 'end': end, 'component': component}.items() if v is not None}
431
680
  return filter_dataarray_by_coord(self._flow_hours, **filters)
432
681
 
@@ -437,18 +686,43 @@ class CalculationResults:
437
686
  component: str | list[str] | None = None,
438
687
  ) -> xr.DataArray:
439
688
  """Returns a dataset with the sizes of the Flows.
440
- Args:
441
- start: Optional source node(s) to filter by. Can be a single node name or a list of names.
442
- end: Optional destination node(s) to filter by. Can be a single node name or a list of names.
443
- component: Optional component(s) to filter by. Can be a single component name or a list of names.
444
689
 
445
- Further usage:
446
- Convert the dataarray to a dataframe:
447
- >>>results.sizes().to_pandas()
448
- To recombine filtered dataarrays, use `xr.concat` with dim 'flow':
449
- >>>xr.concat([results.sizes(start='Fernwärme'), results.sizes(end='Fernwärme')], dim='flow')
690
+ .. deprecated::
691
+ Use `results.plot.all_sizes` (Dataset) or
692
+ `results.flows['FlowLabel'].size` (DataArray) instead.
693
+
694
+ **Note**: The new API differs from this method:
695
+
696
+ - Returns ``xr.Dataset`` (not ``DataArray``) with flow labels as variable names
697
+ - No ``'flow'`` dimension - each flow is a separate variable
698
+ - No filtering parameters - filter using these alternatives::
450
699
 
700
+ # Select specific flows by label
701
+ ds = results.plot.all_sizes
702
+ ds[['Boiler(Q_th)', 'CHP(Q_th)']]
703
+
704
+ # Filter by substring in label
705
+ ds[[v for v in ds.data_vars if 'Boiler' in v]]
706
+
707
+ # Filter by bus (start/end) - get flows connected to a bus
708
+ results['Fernwärme'].inputs # list of input flow labels
709
+ results['Fernwärme'].outputs # list of output flow labels
710
+ ds[results['Fernwärme'].inputs] # Dataset with only inputs to bus
711
+
712
+ # Filter by component - get flows of a component
713
+ results['Boiler'].inputs # list of input flow labels
714
+ results['Boiler'].outputs # list of output flow labels
451
715
  """
716
+ warnings.warn(
717
+ 'results.sizes() is deprecated. '
718
+ 'Use results.plot.all_sizes instead (returns Dataset, not DataArray). '
719
+ 'Note: The new API has no filtering parameters and uses flow labels as variable names. '
720
+ f'Will be removed in v{DEPRECATION_REMOVAL_VERSION}.',
721
+ DeprecationWarning,
722
+ stacklevel=2,
723
+ )
724
+ if not self._has_flow_data:
725
+ raise ValueError('Flow data is not available in this results object (pre-v2.2.0).')
452
726
  if self._sizes is None:
453
727
  self._sizes = self._assign_flow_coords(
454
728
  xr.concat(
@@ -461,11 +735,12 @@ class CalculationResults:
461
735
 
462
736
  def _assign_flow_coords(self, da: xr.DataArray):
463
737
  # Add start and end coordinates
738
+ flows_list = list(self.flows.values())
464
739
  da = da.assign_coords(
465
740
  {
466
- 'start': ('flow', [flow.start for flow in self.flows.values()]),
467
- 'end': ('flow', [flow.end for flow in self.flows.values()]),
468
- 'component': ('flow', [flow.component for flow in self.flows.values()]),
741
+ 'start': ('flow', [flow.start for flow in flows_list]),
742
+ 'end': ('flow', [flow.end for flow in flows_list]),
743
+ 'component': ('flow', [flow.component for flow in flows_list]),
469
744
  }
470
745
  )
471
746
 
@@ -553,7 +828,7 @@ class CalculationResults:
553
828
  Args:
554
829
  element: The element identifier for which to calculate total effects.
555
830
  effect: The effect identifier to calculate.
556
- mode: The calculation mode. Options are:
831
+ mode: The optimization mode. Options are:
557
832
  'temporal': Returns temporal effects.
558
833
  'periodic': Returns investment-specific effects.
559
834
  'total': Returns the sum of temporal effects and periodic effects. Defaults to 'total'.
@@ -584,8 +859,6 @@ class CalculationResults:
584
859
  temporal = temporal.sum('time')
585
860
  if periodic.isnull().all():
586
861
  return temporal.rename(f'{element}->{effect}')
587
- if 'time' in temporal.indexes:
588
- temporal = temporal.sum('time')
589
862
  return periodic + temporal
590
863
 
591
864
  total = xr.DataArray(0)
@@ -619,42 +892,57 @@ class CalculationResults:
619
892
  total = xr.DataArray(np.nan)
620
893
  return total.rename(f'{element}->{effect}({mode})')
621
894
 
895
+ def _create_template_for_mode(self, mode: Literal['temporal', 'periodic', 'total']) -> xr.DataArray:
896
+ """Create a template DataArray with the correct dimensions for a given mode.
897
+
898
+ Args:
899
+ mode: The optimization mode ('temporal', 'periodic', or 'total').
900
+
901
+ Returns:
902
+ A DataArray filled with NaN, with dimensions appropriate for the mode.
903
+ """
904
+ coords = {}
905
+ if mode == 'temporal':
906
+ coords['time'] = self.timesteps_extra
907
+ if self.periods is not None:
908
+ coords['period'] = self.periods
909
+ if self.scenarios is not None:
910
+ coords['scenario'] = self.scenarios
911
+
912
+ # Create template with appropriate shape
913
+ if coords:
914
+ shape = tuple(len(coords[dim]) for dim in coords)
915
+ return xr.DataArray(np.full(shape, np.nan, dtype=float), coords=coords, dims=list(coords.keys()))
916
+ else:
917
+ return xr.DataArray(np.nan)
918
+
622
919
  def _create_effects_dataset(self, mode: Literal['temporal', 'periodic', 'total']) -> xr.Dataset:
623
920
  """Creates a dataset containing effect totals for all components (including their flows).
624
921
  The dataset does contain the direct as well as the indirect effects of each component.
625
922
 
626
923
  Args:
627
- mode: The calculation mode ('temporal', 'periodic', or 'total').
924
+ mode: The optimization mode ('temporal', 'periodic', or 'total').
628
925
 
629
926
  Returns:
630
927
  An xarray Dataset with components as dimension and effects as variables.
631
928
  """
929
+ # Create template with correct dimensions for this mode
930
+ template = self._create_template_for_mode(mode)
931
+
632
932
  ds = xr.Dataset()
633
933
  all_arrays = {}
634
- template = None # Template is needed to determine the dimensions of the arrays. This handles the case of no shares for an effect
635
-
636
934
  components_list = list(self.components)
637
935
 
638
- # First pass: collect arrays and find template
936
+ # Collect arrays for all effects and components
639
937
  for effect in self.effects:
640
938
  effect_arrays = []
641
939
  for component in components_list:
642
940
  da = self._compute_effect_total(element=component, effect=effect, mode=mode, include_flows=True)
643
941
  effect_arrays.append(da)
644
942
 
645
- if template is None and (da.dims or not da.isnull().all()):
646
- template = da
647
-
648
943
  all_arrays[effect] = effect_arrays
649
944
 
650
- # Ensure we have a template
651
- if template is None:
652
- raise ValueError(
653
- f"No template with proper dimensions found for mode '{mode}'. "
654
- f'All computed arrays are scalars, which indicates a data issue.'
655
- )
656
-
657
- # Second pass: process all effects (guaranteed to include all)
945
+ # Process all effects: expand scalar NaN arrays to match template dimensions
658
946
  for effect in self.effects:
659
947
  dataarrays = all_arrays[effect]
660
948
  component_arrays = []
@@ -687,68 +975,136 @@ class CalculationResults:
687
975
 
688
976
  def plot_heatmap(
689
977
  self,
690
- variable_name: str,
691
- heatmap_timeframes: Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'] = 'D',
692
- heatmap_timesteps_per_frame: Literal['W', 'D', 'h', '15min', 'min'] = 'h',
693
- color_map: str = 'portland',
978
+ variable_name: str | list[str],
694
979
  save: bool | pathlib.Path = False,
695
- show: bool = True,
980
+ show: bool | None = None,
981
+ colors: plotting.ColorType | None = None,
696
982
  engine: plotting.PlottingEngine = 'plotly',
697
- indexer: dict[FlowSystemDimensions, Any] | None = None,
983
+ select: dict[FlowSystemDimensions, Any] | None = None,
984
+ facet_by: str | list[str] | None = 'scenario',
985
+ animate_by: str | None = 'period',
986
+ facet_cols: int | None = None,
987
+ reshape_time: tuple[Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'], Literal['W', 'D', 'h', '15min', 'min']]
988
+ | Literal['auto']
989
+ | None = 'auto',
990
+ fill: Literal['ffill', 'bfill'] | None = 'ffill',
991
+ **plot_kwargs: Any,
698
992
  ) -> plotly.graph_objs.Figure | tuple[plt.Figure, plt.Axes]:
699
993
  """
700
- Plots a heatmap of the solution of a variable.
994
+ Plots a heatmap visualization of a variable using imshow or time-based reshaping.
995
+
996
+ Supports multiple visualization features that can be combined:
997
+ - **Multi-variable**: Plot multiple variables on a single heatmap (creates 'variable' dimension)
998
+ - **Time reshaping**: Converts 'time' dimension into 2D (e.g., hours vs days)
999
+ - **Faceting**: Creates subplots for different dimension values
1000
+ - **Animation**: Animates through dimension values (Plotly only)
701
1001
 
702
1002
  Args:
703
- variable_name: The name of the variable to plot.
704
- heatmap_timeframes: The timeframes to use for the heatmap.
705
- heatmap_timesteps_per_frame: The timesteps per frame to use for the heatmap.
706
- color_map: The color map to use for the heatmap.
1003
+ variable_name: The name of the variable to plot, or a list of variable names.
1004
+ When a list is provided, variables are combined into a single DataArray
1005
+ with a new 'variable' dimension.
707
1006
  save: Whether to save the plot or not. If a path is provided, the plot will be saved at that location.
708
1007
  show: Whether to show the plot or not.
1008
+ colors: Color scheme for the heatmap. See `flixopt.plotting.ColorType` for options.
709
1009
  engine: The engine to use for plotting. Can be either 'plotly' or 'matplotlib'.
710
- indexer: Optional selection dict, e.g., {'scenario': 'base', 'period': 2024}.
711
- If None, uses first value for each dimension.
712
- If empty dict {}, uses all values.
1010
+ select: Optional data selection dict. Supports single values, lists, slices, and index arrays.
1011
+ Applied BEFORE faceting/animation/reshaping.
1012
+ facet_by: Dimension(s) to create facets (subplots) for. Can be a single dimension name (str)
1013
+ or list of dimensions. Each unique value combination creates a subplot. Ignored if not found.
1014
+ animate_by: Dimension to animate over (Plotly only). Creates animation frames that cycle through
1015
+ dimension values. Only one dimension can be animated. Ignored if not found.
1016
+ facet_cols: Number of columns in the facet grid layout (default: 3).
1017
+ reshape_time: Time reshaping configuration (default: 'auto'):
1018
+ - 'auto': Automatically applies ('D', 'h') when only 'time' dimension remains
1019
+ - Tuple: Explicit reshaping, e.g. ('D', 'h') for days vs hours,
1020
+ ('MS', 'D') for months vs days, ('W', 'h') for weeks vs hours
1021
+ - None: Disable auto-reshaping (will error if only 1D time data)
1022
+ Supported timeframes: 'YS', 'MS', 'W', 'D', 'h', '15min', 'min'
1023
+ fill: Method to fill missing values after reshape: 'ffill' (forward fill) or 'bfill' (backward fill).
1024
+ Default is 'ffill'.
1025
+ **plot_kwargs: Additional plotting customization options.
1026
+ Common options:
1027
+
1028
+ - **dpi** (int): Export resolution for saved plots. Default: 300.
1029
+
1030
+ For heatmaps specifically:
1031
+
1032
+ - **vmin** (float): Minimum value for color scale (both engines).
1033
+ - **vmax** (float): Maximum value for color scale (both engines).
1034
+
1035
+ For Matplotlib heatmaps:
1036
+
1037
+ - **imshow_kwargs** (dict): Additional kwargs for matplotlib's imshow (e.g., interpolation, aspect).
1038
+ - **cbar_kwargs** (dict): Additional kwargs for colorbar customization.
713
1039
 
714
1040
  Examples:
715
- Basic usage (uses first scenario, first period, all time):
1041
+ Direct imshow mode (default):
1042
+
1043
+ >>> results.plot_heatmap('Battery|charge_state', select={'scenario': 'base'})
716
1044
 
717
- >>> results.plot_heatmap('Battery|charge_state')
1045
+ Facet by scenario:
718
1046
 
719
- Select specific scenario and period:
1047
+ >>> results.plot_heatmap('Boiler(Qth)|flow_rate', facet_by='scenario', facet_cols=2)
720
1048
 
721
- >>> results.plot_heatmap('Boiler(Qth)|flow_rate', indexer={'scenario': 'base', 'period': 2024})
1049
+ Animate by period:
722
1050
 
723
- Time filtering (summer months only):
1051
+ >>> results.plot_heatmap('Boiler(Qth)|flow_rate', select={'scenario': 'base'}, animate_by='period')
1052
+
1053
+ Time reshape mode - daily patterns:
1054
+
1055
+ >>> results.plot_heatmap('Boiler(Qth)|flow_rate', select={'scenario': 'base'}, reshape_time=('D', 'h'))
1056
+
1057
+ Combined: time reshaping with faceting and animation:
724
1058
 
725
1059
  >>> results.plot_heatmap(
726
- ... 'Boiler(Qth)|flow_rate',
727
- ... indexer={
728
- ... 'scenario': 'base',
729
- ... 'time': results.solution.time[results.solution.time.dt.month.isin([6, 7, 8])],
730
- ... },
1060
+ ... 'Boiler(Qth)|flow_rate', facet_by='scenario', animate_by='period', reshape_time=('D', 'h')
731
1061
  ... )
732
1062
 
733
- Save to specific location:
1063
+ Multi-variable heatmap (variables as one axis):
734
1064
 
735
1065
  >>> results.plot_heatmap(
736
- ... 'Boiler(Qth)|flow_rate', indexer={'scenario': 'base'}, save='path/to/my_heatmap.html'
1066
+ ... ['Boiler(Q_th)|flow_rate', 'CHP(Q_th)|flow_rate', 'HeatStorage|charge_state'],
1067
+ ... select={'scenario': 'base', 'period': 1},
1068
+ ... reshape_time=None,
737
1069
  ... )
738
- """
739
- dataarray = self.solution[variable_name]
740
1070
 
1071
+ Multi-variable with time reshaping:
1072
+
1073
+ >>> results.plot_heatmap(
1074
+ ... ['Boiler(Q_th)|flow_rate', 'CHP(Q_th)|flow_rate'],
1075
+ ... facet_by='scenario',
1076
+ ... animate_by='period',
1077
+ ... reshape_time=('D', 'h'),
1078
+ ... )
1079
+
1080
+ High-resolution export with custom color range:
1081
+
1082
+ >>> results.plot_heatmap('Battery|charge_state', save=True, dpi=600, vmin=0, vmax=100)
1083
+
1084
+ Matplotlib heatmap with custom imshow settings:
1085
+
1086
+ >>> results.plot_heatmap(
1087
+ ... 'Boiler(Q_th)|flow_rate',
1088
+ ... engine='matplotlib',
1089
+ ... imshow_kwargs={'interpolation': 'bilinear', 'aspect': 'auto'},
1090
+ ... )
1091
+ """
1092
+ # Delegate to module-level plot_heatmap function
741
1093
  return plot_heatmap(
742
- dataarray=dataarray,
743
- name=variable_name,
1094
+ data=self.solution[variable_name],
1095
+ name=variable_name if isinstance(variable_name, str) else None,
744
1096
  folder=self.folder,
745
- heatmap_timeframes=heatmap_timeframes,
746
- heatmap_timesteps_per_frame=heatmap_timesteps_per_frame,
747
- color_map=color_map,
1097
+ colors=colors,
748
1098
  save=save,
749
1099
  show=show,
750
1100
  engine=engine,
751
- indexer=indexer,
1101
+ select=select,
1102
+ facet_by=facet_by,
1103
+ animate_by=animate_by,
1104
+ facet_cols=facet_cols,
1105
+ reshape_time=reshape_time,
1106
+ fill=fill,
1107
+ **plot_kwargs,
752
1108
  )
753
1109
 
754
1110
  def plot_network(
@@ -760,19 +1116,74 @@ class CalculationResults:
760
1116
  ]
761
1117
  ) = True,
762
1118
  path: pathlib.Path | None = None,
763
- show: bool = False,
1119
+ show: bool | None = None,
764
1120
  ) -> pyvis.network.Network | None:
765
1121
  """Plot interactive network visualization of the system.
766
1122
 
767
1123
  Args:
768
1124
  controls: Enable/disable interactive controls.
769
1125
  path: Save path for network HTML.
770
- show: Whether to display the plot.
1126
+ show: Whether to display the plot. If None, uses CONFIG.Plotting.default_show.
771
1127
  """
772
1128
  if path is None:
773
1129
  path = self.folder / f'{self.name}--network.html'
774
1130
  return self.flow_system.plot_network(controls=controls, path=path, show=show)
775
1131
 
1132
+ def to_flow_system(self) -> FlowSystem:
1133
+ """Convert Results to a FlowSystem with solution attached.
1134
+
1135
+ This method migrates results from the deprecated Results format to the
1136
+ new FlowSystem-based format, enabling use of the modern API.
1137
+
1138
+ Note:
1139
+ For loading old results files directly, consider using
1140
+ ``FlowSystem.from_old_results(folder, name)`` instead.
1141
+
1142
+ Returns:
1143
+ FlowSystem: A FlowSystem instance with the solution data attached.
1144
+
1145
+ Caveats:
1146
+ - The linopy model is NOT attached (only the solution data)
1147
+ - Element submodels are NOT recreated (no re-optimization without
1148
+ calling build_model() first)
1149
+ - Variable/constraint names on elements are NOT restored
1150
+
1151
+ Examples:
1152
+ Convert loaded Results to FlowSystem:
1153
+
1154
+ ```python
1155
+ # Load old results
1156
+ results = Results.from_file('results', 'my_optimization')
1157
+
1158
+ # Convert to FlowSystem
1159
+ flow_system = results.to_flow_system()
1160
+
1161
+ # Use new API
1162
+ flow_system.plot.heatmap()
1163
+ flow_system.solution.to_netcdf('solution.nc')
1164
+
1165
+ # Save in new single-file format
1166
+ flow_system.to_netcdf('my_optimization.nc')
1167
+ ```
1168
+ """
1169
+ from flixopt.io import convert_old_dataset
1170
+
1171
+ # Convert flow_system_data to new parameter names
1172
+ convert_old_dataset(self.flow_system_data)
1173
+
1174
+ # Reconstruct FlowSystem from stored data
1175
+ flow_system = FlowSystem.from_dataset(self.flow_system_data)
1176
+
1177
+ # Convert solution attrs from dicts to JSON strings for consistency with new format
1178
+ # The _get_solution_attr helper handles both formats, but we normalize here
1179
+ solution = self.solution.copy()
1180
+ for key in ['Components', 'Buses', 'Effects', 'Flows']:
1181
+ if key in solution.attrs and isinstance(solution.attrs[key], dict):
1182
+ solution.attrs[key] = json.dumps(solution.attrs[key])
1183
+
1184
+ flow_system.solution = solution
1185
+ return flow_system
1186
+
776
1187
  def to_file(
777
1188
  self,
778
1189
  folder: str | pathlib.Path | None = None,
@@ -780,59 +1191,70 @@ class CalculationResults:
780
1191
  compression: int = 5,
781
1192
  document_model: bool = True,
782
1193
  save_linopy_model: bool = False,
1194
+ overwrite: bool = False,
783
1195
  ):
784
1196
  """Save results to files.
785
1197
 
786
1198
  Args:
787
- folder: Save folder (defaults to calculation folder).
788
- name: File name (defaults to calculation name).
1199
+ folder: Save folder (defaults to optimization folder).
1200
+ name: File name (defaults to optimization name).
789
1201
  compression: Compression level 0-9.
790
1202
  document_model: Whether to document model formulations as yaml.
791
1203
  save_linopy_model: Whether to save linopy model file.
1204
+ overwrite: If False, raise error if results files already exist. If True, overwrite existing files.
1205
+
1206
+ Raises:
1207
+ FileExistsError: If overwrite=False and result files already exist.
792
1208
  """
793
1209
  folder = self.folder if folder is None else pathlib.Path(folder)
794
1210
  name = self.name if name is None else name
795
- if not folder.exists():
796
- try:
797
- folder.mkdir(parents=False)
798
- except FileNotFoundError as e:
799
- raise FileNotFoundError(
800
- f'Folder {folder} and its parent do not exist. Please create them first.'
801
- ) from e
802
1211
 
803
- paths = fx_io.CalculationResultsPaths(folder, name)
1212
+ # Ensure folder exists, creating parent directories as needed
1213
+ folder.mkdir(parents=True, exist_ok=True)
1214
+
1215
+ paths = fx_io.ResultsPaths(folder, name)
1216
+
1217
+ # Check if files already exist (unless overwrite is True)
1218
+ if not overwrite:
1219
+ existing_files = []
1220
+ for file_path in paths.all_paths().values():
1221
+ if file_path.exists():
1222
+ existing_files.append(file_path.name)
1223
+
1224
+ if existing_files:
1225
+ raise FileExistsError(
1226
+ f'Results files already exist in {folder}: {", ".join(existing_files)}. '
1227
+ f'Use overwrite=True to overwrite existing files.'
1228
+ )
804
1229
 
805
1230
  fx_io.save_dataset_to_netcdf(self.solution, paths.solution, compression=compression)
806
1231
  fx_io.save_dataset_to_netcdf(self.flow_system_data, paths.flow_system, compression=compression)
807
1232
 
808
- with open(paths.summary, 'w', encoding='utf-8') as f:
809
- yaml.dump(self.summary, f, allow_unicode=True, sort_keys=False, indent=4, width=1000)
1233
+ fx_io.save_yaml(self.summary, paths.summary, compact_numeric_lists=True)
810
1234
 
811
1235
  if save_linopy_model:
812
1236
  if self.model is None:
813
- logger.critical('No model in the CalculationResults. Saving the model is not possible.')
1237
+ logger.critical('No model in the Results. Saving the model is not possible.')
814
1238
  else:
815
- self.model.to_netcdf(paths.linopy_model, engine='h5netcdf')
1239
+ self.model.to_netcdf(paths.linopy_model, engine='netcdf4')
816
1240
 
817
1241
  if document_model:
818
1242
  if self.model is None:
819
- logger.critical('No model in the CalculationResults. Documenting the model is not possible.')
1243
+ logger.critical('No model in the Results. Documenting the model is not possible.')
820
1244
  else:
821
1245
  fx_io.document_linopy_model(self.model, path=paths.model_documentation)
822
1246
 
823
- logger.info(f'Saved calculation results "{name}" to {paths.model_documentation.parent}')
1247
+ logger.log(SUCCESS_LEVEL, f'Saved optimization results "{name}" to {paths.model_documentation.parent}')
824
1248
 
825
1249
 
826
1250
  class _ElementResults:
827
- def __init__(
828
- self, calculation_results: CalculationResults, label: str, variables: list[str], constraints: list[str]
829
- ):
830
- self._calculation_results = calculation_results
1251
+ def __init__(self, results: Results, label: str, variables: list[str], constraints: list[str]):
1252
+ self._results = results
831
1253
  self.label = label
832
- self._variable_names = variables
1254
+ self.variable_names = variables
833
1255
  self._constraint_names = constraints
834
1256
 
835
- self.solution = self._calculation_results.solution[self._variable_names]
1257
+ self.solution = self._results.solution[self.variable_names]
836
1258
 
837
1259
  @property
838
1260
  def variables(self) -> linopy.Variables:
@@ -841,9 +1263,9 @@ class _ElementResults:
841
1263
  Raises:
842
1264
  ValueError: If linopy model is unavailable.
843
1265
  """
844
- if self._calculation_results.model is None:
1266
+ if self._results.model is None:
845
1267
  raise ValueError('The linopy model is not available.')
846
- return self._calculation_results.model.variables[self._variable_names]
1268
+ return self._results.model.variables[self.variable_names]
847
1269
 
848
1270
  @property
849
1271
  def constraints(self) -> linopy.Constraints:
@@ -852,9 +1274,17 @@ class _ElementResults:
852
1274
  Raises:
853
1275
  ValueError: If linopy model is unavailable.
854
1276
  """
855
- if self._calculation_results.model is None:
1277
+ if self._results.model is None:
856
1278
  raise ValueError('The linopy model is not available.')
857
- return self._calculation_results.model.constraints[self._constraint_names]
1279
+ return self._results.model.constraints[self._constraint_names]
1280
+
1281
+ def __repr__(self) -> str:
1282
+ """Return string representation with element info and dataset preview."""
1283
+ class_name = self.__class__.__name__
1284
+ header = f'{class_name}: "{self.label}"'
1285
+ sol = self.solution.copy(deep=False)
1286
+ sol.attrs = {}
1287
+ return f'{header}\n{"-" * len(header)}\n{repr(sol)}'
858
1288
 
859
1289
  def filter_solution(
860
1290
  self,
@@ -901,7 +1331,7 @@ class _ElementResults:
901
1331
  class _NodeResults(_ElementResults):
902
1332
  def __init__(
903
1333
  self,
904
- calculation_results: CalculationResults,
1334
+ results: Results,
905
1335
  label: str,
906
1336
  variables: list[str],
907
1337
  constraints: list[str],
@@ -909,7 +1339,7 @@ class _NodeResults(_ElementResults):
909
1339
  outputs: list[str],
910
1340
  flows: list[str],
911
1341
  ):
912
- super().__init__(calculation_results, label, variables, constraints)
1342
+ super().__init__(results, label, variables, constraints)
913
1343
  self.inputs = inputs
914
1344
  self.outputs = outputs
915
1345
  self.flows = flows
@@ -917,75 +1347,194 @@ class _NodeResults(_ElementResults):
917
1347
  def plot_node_balance(
918
1348
  self,
919
1349
  save: bool | pathlib.Path = False,
920
- show: bool = True,
921
- colors: plotting.ColorType = 'viridis',
1350
+ show: bool | None = None,
1351
+ colors: plotting.ColorType | None = None,
922
1352
  engine: plotting.PlottingEngine = 'plotly',
923
- indexer: dict[FlowSystemDimensions, Any] | None = None,
924
- mode: Literal['flow_rate', 'flow_hours'] = 'flow_rate',
925
- style: Literal['area', 'stacked_bar', 'line'] = 'stacked_bar',
1353
+ select: dict[FlowSystemDimensions, Any] | None = None,
1354
+ unit_type: Literal['flow_rate', 'flow_hours'] = 'flow_rate',
1355
+ mode: Literal['area', 'stacked_bar', 'line'] = 'stacked_bar',
926
1356
  drop_suffix: bool = True,
1357
+ facet_by: str | list[str] | None = 'scenario',
1358
+ animate_by: str | None = 'period',
1359
+ facet_cols: int | None = None,
1360
+ **plot_kwargs: Any,
927
1361
  ) -> plotly.graph_objs.Figure | tuple[plt.Figure, plt.Axes]:
928
1362
  """
929
- Plots the node balance of the Component or Bus.
1363
+ Plots the node balance of the Component or Bus with optional faceting and animation.
1364
+
930
1365
  Args:
931
1366
  save: Whether to save the plot or not. If a path is provided, the plot will be saved at that location.
932
1367
  show: Whether to show the plot or not.
933
1368
  colors: The colors to use for the plot. See `flixopt.plotting.ColorType` for options.
934
1369
  engine: The engine to use for plotting. Can be either 'plotly' or 'matplotlib'.
935
- indexer: Optional selection dict, e.g., {'scenario': 'base', 'period': 2024}.
936
- If None, uses first value for each dimension (except time).
937
- If empty dict {}, uses all values.
938
- style: The style to use for the dataset. Can be 'flow_rate' or 'flow_hours'.
1370
+ select: Optional data selection dict. Supports:
1371
+ - Single values: {'scenario': 'base', 'period': 2024}
1372
+ - Multiple values: {'scenario': ['base', 'high', 'renewable']}
1373
+ - Slices: {'time': slice('2024-01', '2024-06')}
1374
+ - Index arrays: {'time': time_array}
1375
+ Note: Applied BEFORE faceting/animation.
1376
+ unit_type: The unit type to use for the dataset. Can be 'flow_rate' or 'flow_hours'.
939
1377
  - 'flow_rate': Returns the flow_rates of the Node.
940
1378
  - 'flow_hours': Returns the flow_hours of the Node. [flow_hours(t) = flow_rate(t) * dt(t)]. Renames suffixes to |flow_hours.
1379
+ mode: The plotting mode. Use 'stacked_bar' for stacked bar charts, 'line' for stepped lines, or 'area' for stacked area charts.
941
1380
  drop_suffix: Whether to drop the suffix from the variable names.
1381
+ facet_by: Dimension(s) to create facets (subplots) for. Can be a single dimension name (str)
1382
+ or list of dimensions. Each unique value combination creates a subplot. Ignored if not found.
1383
+ Example: 'scenario' creates one subplot per scenario.
1384
+ Example: ['period', 'scenario'] creates a grid of subplots for each scenario-period combination.
1385
+ animate_by: Dimension to animate over (Plotly only). Creates animation frames that cycle through
1386
+ dimension values. Only one dimension can be animated. Ignored if not found.
1387
+ facet_cols: Number of columns in the facet grid layout (default: 3).
1388
+ **plot_kwargs: Additional plotting customization options passed to underlying plotting functions.
1389
+
1390
+ Common options:
1391
+
1392
+ - **dpi** (int): Export resolution in dots per inch. Default: 300.
1393
+
1394
+ **For Plotly engine** (`engine='plotly'`):
1395
+
1396
+ - Any Plotly Express parameter for px.bar()/px.line()/px.area()
1397
+ Example: `range_y=[0, 100]`, `line_shape='linear'`
1398
+
1399
+ **For Matplotlib engine** (`engine='matplotlib'`):
1400
+
1401
+ - **plot_kwargs** (dict): Customize plot via `ax.bar()` or `ax.step()`.
1402
+ Example: `plot_kwargs={'linewidth': 3, 'alpha': 0.7, 'edgecolor': 'black'}`
1403
+
1404
+ See :func:`flixopt.plotting.with_plotly` and :func:`flixopt.plotting.with_matplotlib`
1405
+ for complete parameter reference.
1406
+
1407
+ Note: For Plotly, you can further customize the returned figure using `fig.update_traces()`
1408
+ and `fig.update_layout()` after calling this method.
1409
+
1410
+ Examples:
1411
+ Basic plot (current behavior):
1412
+
1413
+ >>> results['Boiler'].plot_node_balance()
1414
+
1415
+ Facet by scenario:
1416
+
1417
+ >>> results['Boiler'].plot_node_balance(facet_by='scenario', facet_cols=2)
1418
+
1419
+ Animate by period:
1420
+
1421
+ >>> results['Boiler'].plot_node_balance(animate_by='period')
1422
+
1423
+ Facet by scenario AND animate by period:
1424
+
1425
+ >>> results['Boiler'].plot_node_balance(facet_by='scenario', animate_by='period')
1426
+
1427
+ Select single scenario, then facet by period:
1428
+
1429
+ >>> results['Boiler'].plot_node_balance(select={'scenario': 'base'}, facet_by='period')
1430
+
1431
+ Select multiple scenarios and facet by them:
1432
+
1433
+ >>> results['Boiler'].plot_node_balance(
1434
+ ... select={'scenario': ['base', 'high', 'renewable']}, facet_by='scenario'
1435
+ ... )
1436
+
1437
+ Time range selection (summer months only):
1438
+
1439
+ >>> results['Boiler'].plot_node_balance(select={'time': slice('2024-06', '2024-08')}, facet_by='scenario')
1440
+
1441
+ High-resolution export for publication:
1442
+
1443
+ >>> results['Boiler'].plot_node_balance(engine='matplotlib', save='figure.png', dpi=600)
1444
+
1445
+ Plotly Express customization (e.g., set y-axis range):
1446
+
1447
+ >>> results['Boiler'].plot_node_balance(range_y=[0, 100])
1448
+
1449
+ Custom matplotlib appearance:
1450
+
1451
+ >>> results['Boiler'].plot_node_balance(engine='matplotlib', plot_kwargs={'linewidth': 3, 'alpha': 0.7})
1452
+
1453
+ Further customize Plotly figure after creation:
1454
+
1455
+ >>> fig = results['Boiler'].plot_node_balance(mode='line', show=False)
1456
+ >>> fig.update_traces(line={'width': 5, 'dash': 'dot'})
1457
+ >>> fig.update_layout(template='plotly_dark', width=1200, height=600)
1458
+ >>> fig.show()
942
1459
  """
943
- ds = self.node_balance(with_last_timestep=True, mode=mode, drop_suffix=drop_suffix, indexer=indexer)
1460
+ if engine not in {'plotly', 'matplotlib'}:
1461
+ raise ValueError(f'Engine "{engine}" not supported. Use one of ["plotly", "matplotlib"]')
1462
+
1463
+ # Extract dpi for export_figure
1464
+ dpi = plot_kwargs.pop('dpi', None) # None uses CONFIG.Plotting.default_dpi
1465
+
1466
+ # Don't pass select/indexer to node_balance - we'll apply it afterwards
1467
+ ds = self.node_balance(with_last_timestep=False, unit_type=unit_type, drop_suffix=drop_suffix)
1468
+
1469
+ ds, suffix_parts = _apply_selection_to_data(ds, select=select, drop=True)
944
1470
 
945
- ds, suffix_parts = _apply_indexer_to_data(ds, indexer, drop=True)
1471
+ # Matplotlib requires only 'time' dimension; check for extras after selection
1472
+ if engine == 'matplotlib':
1473
+ extra_dims = [d for d in ds.dims if d != 'time']
1474
+ if extra_dims:
1475
+ raise ValueError(
1476
+ f'Matplotlib engine only supports a single time axis, but found extra dimensions: {extra_dims}. '
1477
+ f'Please use select={{...}} to reduce dimensions or switch to engine="plotly" for faceting/animation.'
1478
+ )
946
1479
  suffix = '--' + '-'.join(suffix_parts) if suffix_parts else ''
947
1480
 
948
- title = f'{self.label} (flow rates){suffix}' if mode == 'flow_rate' else f'{self.label} (flow hours){suffix}'
1481
+ title = (
1482
+ f'{self.label} (flow rates){suffix}' if unit_type == 'flow_rate' else f'{self.label} (flow hours){suffix}'
1483
+ )
949
1484
 
950
1485
  if engine == 'plotly':
951
1486
  figure_like = plotting.with_plotly(
952
- ds.to_dataframe(),
953
- colors=colors,
954
- style=style,
1487
+ ds,
1488
+ facet_by=facet_by,
1489
+ animate_by=animate_by,
1490
+ colors=colors if colors is not None else self._results.colors,
1491
+ mode=mode,
955
1492
  title=title,
1493
+ facet_cols=facet_cols,
1494
+ xlabel='Time in h',
1495
+ **plot_kwargs,
956
1496
  )
957
1497
  default_filetype = '.html'
958
- elif engine == 'matplotlib':
1498
+ else:
959
1499
  figure_like = plotting.with_matplotlib(
960
- ds.to_dataframe(),
961
- colors=colors,
962
- style=style,
1500
+ ds,
1501
+ colors=colors if colors is not None else self._results.colors,
1502
+ mode=mode,
963
1503
  title=title,
1504
+ **plot_kwargs,
964
1505
  )
965
1506
  default_filetype = '.png'
966
- else:
967
- raise ValueError(f'Engine "{engine}" not supported. Use "plotly" or "matplotlib"')
968
1507
 
969
1508
  return plotting.export_figure(
970
1509
  figure_like=figure_like,
971
- default_path=self._calculation_results.folder / title,
1510
+ default_path=self._results.folder / title,
972
1511
  default_filetype=default_filetype,
973
1512
  user_path=None if isinstance(save, bool) else pathlib.Path(save),
974
1513
  show=show,
975
1514
  save=True if save else False,
1515
+ dpi=dpi,
976
1516
  )
977
1517
 
978
1518
  def plot_node_balance_pie(
979
1519
  self,
980
1520
  lower_percentage_group: float = 5,
981
- colors: plotting.ColorType = 'viridis',
1521
+ colors: plotting.ColorType | None = None,
982
1522
  text_info: str = 'percent+label+value',
983
1523
  save: bool | pathlib.Path = False,
984
- show: bool = True,
1524
+ show: bool | None = None,
985
1525
  engine: plotting.PlottingEngine = 'plotly',
986
- indexer: dict[FlowSystemDimensions, Any] | None = None,
1526
+ select: dict[FlowSystemDimensions, Any] | None = None,
1527
+ **plot_kwargs: Any,
987
1528
  ) -> plotly.graph_objs.Figure | tuple[plt.Figure, list[plt.Axes]]:
988
1529
  """Plot pie chart of flow hours distribution.
1530
+
1531
+ Note:
1532
+ Pie charts require scalar data (no extra dimensions beyond time).
1533
+ If your data has dimensions like 'scenario' or 'period', either:
1534
+
1535
+ - Use `select` to choose specific values: `select={'scenario': 'base', 'period': 2024}`
1536
+ - Let auto-selection choose the first value (a warning will be logged)
1537
+
989
1538
  Args:
990
1539
  lower_percentage_group: Percentage threshold for "Others" grouping.
991
1540
  colors: Color scheme. Also see plotly.
@@ -993,44 +1542,107 @@ class _NodeResults(_ElementResults):
993
1542
  save: Whether to save plot.
994
1543
  show: Whether to display plot.
995
1544
  engine: Plotting engine ('plotly' or 'matplotlib').
996
- indexer: Optional selection dict, e.g., {'scenario': 'base', 'period': 2024}.
997
- If None, uses first value for each dimension.
998
- If empty dict {}, uses all values.
1545
+ select: Optional data selection dict. Supports single values, lists, slices, and index arrays.
1546
+ Use this to select specific scenario/period before creating the pie chart.
1547
+ **plot_kwargs: Additional plotting customization options.
1548
+
1549
+ Common options:
1550
+
1551
+ - **dpi** (int): Export resolution in dots per inch. Default: 300.
1552
+ - **hover_template** (str): Hover text template (Plotly only).
1553
+ Example: `hover_template='%{label}: %{value} (%{percent})'`
1554
+ - **text_position** (str): Text position ('inside', 'outside', 'auto').
1555
+ - **hole** (float): Size of donut hole (0.0 to 1.0).
1556
+
1557
+ See :func:`flixopt.plotting.dual_pie_with_plotly` for complete reference.
1558
+
1559
+ Examples:
1560
+ Basic usage (auto-selects first scenario/period if present):
1561
+
1562
+ >>> results['Bus'].plot_node_balance_pie()
1563
+
1564
+ Explicitly select a scenario and period:
1565
+
1566
+ >>> results['Bus'].plot_node_balance_pie(select={'scenario': 'high_demand', 'period': 2030})
1567
+
1568
+ Create a donut chart with custom hover text:
1569
+
1570
+ >>> results['Bus'].plot_node_balance_pie(hole=0.4, hover_template='%{label}: %{value:.2f} (%{percent})')
1571
+
1572
+ High-resolution export:
1573
+
1574
+ >>> results['Bus'].plot_node_balance_pie(save='figure.png', dpi=600)
999
1575
  """
1576
+ # Extract dpi for export_figure
1577
+ dpi = plot_kwargs.pop('dpi', None) # None uses CONFIG.Plotting.default_dpi
1578
+
1000
1579
  inputs = sanitize_dataset(
1001
- ds=self.solution[self.inputs] * self._calculation_results.hours_per_timestep,
1580
+ ds=self.solution[self.inputs] * self._results.timestep_duration,
1002
1581
  threshold=1e-5,
1003
1582
  drop_small_vars=True,
1004
1583
  zero_small_values=True,
1005
1584
  drop_suffix='|',
1006
1585
  )
1007
1586
  outputs = sanitize_dataset(
1008
- ds=self.solution[self.outputs] * self._calculation_results.hours_per_timestep,
1587
+ ds=self.solution[self.outputs] * self._results.timestep_duration,
1009
1588
  threshold=1e-5,
1010
1589
  drop_small_vars=True,
1011
1590
  zero_small_values=True,
1012
1591
  drop_suffix='|',
1013
1592
  )
1014
1593
 
1015
- inputs, suffix_parts = _apply_indexer_to_data(inputs, indexer, drop=True)
1016
- outputs, suffix_parts = _apply_indexer_to_data(outputs, indexer, drop=True)
1017
- suffix = '--' + '-'.join(suffix_parts) if suffix_parts else ''
1018
-
1019
- title = f'{self.label} (total flow hours){suffix}'
1594
+ inputs, suffix_parts_in = _apply_selection_to_data(inputs, select=select, drop=True)
1595
+ outputs, suffix_parts_out = _apply_selection_to_data(outputs, select=select, drop=True)
1596
+ suffix_parts = suffix_parts_in + suffix_parts_out
1020
1597
 
1598
+ # Sum over time dimension
1021
1599
  inputs = inputs.sum('time')
1022
1600
  outputs = outputs.sum('time')
1023
1601
 
1602
+ # Auto-select first value for any remaining dimensions (scenario, period, etc.)
1603
+ # Pie charts need scalar data, so we automatically reduce extra dimensions
1604
+ extra_dims_inputs = [dim for dim in inputs.dims if dim != 'time']
1605
+ extra_dims_outputs = [dim for dim in outputs.dims if dim != 'time']
1606
+ extra_dims = sorted(set(extra_dims_inputs + extra_dims_outputs))
1607
+
1608
+ if extra_dims:
1609
+ auto_select = {}
1610
+ for dim in extra_dims:
1611
+ # Get first value of this dimension
1612
+ if dim in inputs.coords:
1613
+ first_val = inputs.coords[dim].values[0]
1614
+ elif dim in outputs.coords:
1615
+ first_val = outputs.coords[dim].values[0]
1616
+ else:
1617
+ continue
1618
+ auto_select[dim] = first_val
1619
+ logger.info(
1620
+ f'Pie chart auto-selected {dim}={first_val} (first value). '
1621
+ f'Use select={{"{dim}": value}} to choose a different value.'
1622
+ )
1623
+
1624
+ # Apply auto-selection only for coords present in each dataset
1625
+ inputs = inputs.sel({k: v for k, v in auto_select.items() if k in inputs.coords})
1626
+ outputs = outputs.sel({k: v for k, v in auto_select.items() if k in outputs.coords})
1627
+
1628
+ # Update suffix with auto-selected values
1629
+ auto_suffix_parts = [f'{dim}={val}' for dim, val in auto_select.items()]
1630
+ suffix_parts.extend(auto_suffix_parts)
1631
+
1632
+ suffix = '--' + '-'.join(sorted(set(suffix_parts))) if suffix_parts else ''
1633
+ title = f'{self.label} (total flow hours){suffix}'
1634
+
1024
1635
  if engine == 'plotly':
1025
1636
  figure_like = plotting.dual_pie_with_plotly(
1026
- data_left=inputs.to_pandas(),
1027
- data_right=outputs.to_pandas(),
1028
- colors=colors,
1637
+ data_left=inputs,
1638
+ data_right=outputs,
1639
+ colors=colors if colors is not None else self._results.colors,
1029
1640
  title=title,
1030
1641
  text_info=text_info,
1031
1642
  subtitles=('Inputs', 'Outputs'),
1032
1643
  legend_title='Flows',
1033
1644
  lower_percentage_group=lower_percentage_group,
1645
+ **plot_kwargs,
1034
1646
  )
1035
1647
  default_filetype = '.html'
1036
1648
  elif engine == 'matplotlib':
@@ -1038,11 +1650,12 @@ class _NodeResults(_ElementResults):
1038
1650
  figure_like = plotting.dual_pie_with_matplotlib(
1039
1651
  data_left=inputs.to_pandas(),
1040
1652
  data_right=outputs.to_pandas(),
1041
- colors=colors,
1653
+ colors=colors if colors is not None else self._results.colors,
1042
1654
  title=title,
1043
1655
  subtitles=('Inputs', 'Outputs'),
1044
1656
  legend_title='Flows',
1045
1657
  lower_percentage_group=lower_percentage_group,
1658
+ **plot_kwargs,
1046
1659
  )
1047
1660
  default_filetype = '.png'
1048
1661
  else:
@@ -1050,11 +1663,12 @@ class _NodeResults(_ElementResults):
1050
1663
 
1051
1664
  return plotting.export_figure(
1052
1665
  figure_like=figure_like,
1053
- default_path=self._calculation_results.folder / title,
1666
+ default_path=self._results.folder / title,
1054
1667
  default_filetype=default_filetype,
1055
1668
  user_path=None if isinstance(save, bool) else pathlib.Path(save),
1056
1669
  show=show,
1057
1670
  save=True if save else False,
1671
+ dpi=dpi,
1058
1672
  )
1059
1673
 
1060
1674
  def node_balance(
@@ -1063,9 +1677,9 @@ class _NodeResults(_ElementResults):
1063
1677
  negate_outputs: bool = False,
1064
1678
  threshold: float | None = 1e-5,
1065
1679
  with_last_timestep: bool = False,
1066
- mode: Literal['flow_rate', 'flow_hours'] = 'flow_rate',
1680
+ unit_type: Literal['flow_rate', 'flow_hours'] = 'flow_rate',
1067
1681
  drop_suffix: bool = False,
1068
- indexer: dict[FlowSystemDimensions, Any] | None = None,
1682
+ select: dict[FlowSystemDimensions, Any] | None = None,
1069
1683
  ) -> xr.Dataset:
1070
1684
  """
1071
1685
  Returns a dataset with the node balance of the Component or Bus.
@@ -1074,20 +1688,18 @@ class _NodeResults(_ElementResults):
1074
1688
  negate_outputs: Whether to negate the output flow_rates of the Node.
1075
1689
  threshold: The threshold for small values. Variables with all values below the threshold are dropped.
1076
1690
  with_last_timestep: Whether to include the last timestep in the dataset.
1077
- mode: The mode to use for the dataset. Can be 'flow_rate' or 'flow_hours'.
1691
+ unit_type: The unit type to use for the dataset. Can be 'flow_rate' or 'flow_hours'.
1078
1692
  - 'flow_rate': Returns the flow_rates of the Node.
1079
1693
  - 'flow_hours': Returns the flow_hours of the Node. [flow_hours(t) = flow_rate(t) * dt(t)]. Renames suffixes to |flow_hours.
1080
1694
  drop_suffix: Whether to drop the suffix from the variable names.
1081
- indexer: Optional selection dict, e.g., {'scenario': 'base', 'period': 2024}.
1082
- If None, uses first value for each dimension.
1083
- If empty dict {}, uses all values.
1695
+ select: Optional data selection dict. Supports single values, lists, slices, and index arrays.
1084
1696
  """
1085
1697
  ds = self.solution[self.inputs + self.outputs]
1086
1698
 
1087
1699
  ds = sanitize_dataset(
1088
1700
  ds=ds,
1089
1701
  threshold=threshold,
1090
- timesteps=self._calculation_results.timesteps_extra if with_last_timestep else None,
1702
+ timesteps=self._results.timesteps_extra if with_last_timestep else None,
1091
1703
  negate=(
1092
1704
  self.outputs + self.inputs
1093
1705
  if negate_outputs and negate_inputs
@@ -1100,10 +1712,10 @@ class _NodeResults(_ElementResults):
1100
1712
  drop_suffix='|' if drop_suffix else None,
1101
1713
  )
1102
1714
 
1103
- ds, _ = _apply_indexer_to_data(ds, indexer, drop=True)
1715
+ ds, _ = _apply_selection_to_data(ds, select=select, drop=True)
1104
1716
 
1105
- if mode == 'flow_hours':
1106
- ds = ds * self._calculation_results.hours_per_timestep
1717
+ if unit_type == 'flow_hours':
1718
+ ds = ds * self._results.timestep_duration
1107
1719
  ds = ds.rename_vars({var: var.replace('flow_rate', 'flow_hours') for var in ds.data_vars})
1108
1720
 
1109
1721
  return ds
@@ -1118,7 +1730,7 @@ class ComponentResults(_NodeResults):
1118
1730
 
1119
1731
  @property
1120
1732
  def is_storage(self) -> bool:
1121
- return self._charge_state in self._variable_names
1733
+ return self._charge_state in self.variable_names
1122
1734
 
1123
1735
  @property
1124
1736
  def _charge_state(self) -> str:
@@ -1134,75 +1746,202 @@ class ComponentResults(_NodeResults):
1134
1746
  def plot_charge_state(
1135
1747
  self,
1136
1748
  save: bool | pathlib.Path = False,
1137
- show: bool = True,
1138
- colors: plotting.ColorType = 'viridis',
1749
+ show: bool | None = None,
1750
+ colors: plotting.ColorType | None = None,
1139
1751
  engine: plotting.PlottingEngine = 'plotly',
1140
- style: Literal['area', 'stacked_bar', 'line'] = 'stacked_bar',
1141
- indexer: dict[FlowSystemDimensions, Any] | None = None,
1752
+ mode: Literal['area', 'stacked_bar', 'line'] = 'area',
1753
+ select: dict[FlowSystemDimensions, Any] | None = None,
1754
+ facet_by: str | list[str] | None = 'scenario',
1755
+ animate_by: str | None = 'period',
1756
+ facet_cols: int | None = None,
1757
+ **plot_kwargs: Any,
1142
1758
  ) -> plotly.graph_objs.Figure:
1143
- """Plot storage charge state over time, combined with the node balance.
1759
+ """Plot storage charge state over time, combined with the node balance with optional faceting and animation.
1144
1760
 
1145
1761
  Args:
1146
1762
  save: Whether to save the plot or not. If a path is provided, the plot will be saved at that location.
1147
1763
  show: Whether to show the plot or not.
1148
1764
  colors: Color scheme. Also see plotly.
1149
1765
  engine: Plotting engine to use. Only 'plotly' is implemented atm.
1150
- style: The colors to use for the plot. See `flixopt.plotting.ColorType` for options.
1151
- indexer: Optional selection dict, e.g., {'scenario': 'base', 'period': 2024}.
1152
- If None, uses first value for each dimension.
1153
- If empty dict {}, uses all values.
1766
+ mode: The plotting mode. Use 'stacked_bar' for stacked bar charts, 'line' for stepped lines, or 'area' for stacked area charts.
1767
+ select: Optional data selection dict. Supports single values, lists, slices, and index arrays.
1768
+ Applied BEFORE faceting/animation.
1769
+ facet_by: Dimension(s) to create facets (subplots) for. Can be a single dimension name (str)
1770
+ or list of dimensions. Each unique value combination creates a subplot. Ignored if not found.
1771
+ animate_by: Dimension to animate over (Plotly only). Creates animation frames that cycle through
1772
+ dimension values. Only one dimension can be animated. Ignored if not found.
1773
+ facet_cols: Number of columns in the facet grid layout (default: 3).
1774
+ **plot_kwargs: Additional plotting customization options passed to underlying plotting functions.
1775
+
1776
+ Common options:
1777
+
1778
+ - **dpi** (int): Export resolution in dots per inch. Default: 300.
1779
+
1780
+ **For Plotly engine:**
1781
+
1782
+ - Any Plotly Express parameter for px.bar()/px.line()/px.area()
1783
+ Example: `range_y=[0, 100]`, `line_shape='linear'`
1784
+
1785
+ **For Matplotlib engine:**
1786
+
1787
+ - **plot_kwargs** (dict): Customize plot via `ax.bar()` or `ax.step()`.
1788
+
1789
+ See :func:`flixopt.plotting.with_plotly` and :func:`flixopt.plotting.with_matplotlib`
1790
+ for complete parameter reference.
1791
+
1792
+ Note: For Plotly, you can further customize the returned figure using `fig.update_traces()`
1793
+ and `fig.update_layout()` after calling this method.
1154
1794
 
1155
1795
  Raises:
1156
1796
  ValueError: If component is not a storage.
1797
+
1798
+ Examples:
1799
+ Basic plot:
1800
+
1801
+ >>> results['Storage'].plot_charge_state()
1802
+
1803
+ Facet by scenario:
1804
+
1805
+ >>> results['Storage'].plot_charge_state(facet_by='scenario', facet_cols=2)
1806
+
1807
+ Animate by period:
1808
+
1809
+ >>> results['Storage'].plot_charge_state(animate_by='period')
1810
+
1811
+ Facet by scenario AND animate by period:
1812
+
1813
+ >>> results['Storage'].plot_charge_state(facet_by='scenario', animate_by='period')
1814
+
1815
+ Custom layout after creation:
1816
+
1817
+ >>> fig = results['Storage'].plot_charge_state(show=False)
1818
+ >>> fig.update_layout(template='plotly_dark', height=800)
1819
+ >>> fig.show()
1820
+
1821
+ High-resolution export:
1822
+
1823
+ >>> results['Storage'].plot_charge_state(save='storage.png', dpi=600)
1157
1824
  """
1825
+ # Extract dpi for export_figure
1826
+ dpi = plot_kwargs.pop('dpi', None) # None uses CONFIG.Plotting.default_dpi
1827
+
1828
+ # Extract charge state line color (for overlay customization)
1829
+ overlay_color = plot_kwargs.pop('charge_state_line_color', 'black')
1830
+
1158
1831
  if not self.is_storage:
1159
1832
  raise ValueError(f'Cant plot charge_state. "{self.label}" is not a storage')
1160
1833
 
1161
- ds = self.node_balance(with_last_timestep=True, indexer=indexer)
1162
- charge_state = self.charge_state
1834
+ # Get node balance and charge state
1835
+ ds = self.node_balance(with_last_timestep=True).fillna(0)
1836
+ charge_state_da = self.charge_state
1163
1837
 
1164
- ds, suffix_parts = _apply_indexer_to_data(ds, indexer, drop=True)
1165
- charge_state, suffix_parts = _apply_indexer_to_data(charge_state, indexer, drop=True)
1838
+ # Apply select filtering
1839
+ ds, suffix_parts = _apply_selection_to_data(ds, select=select, drop=True)
1840
+ charge_state_da, _ = _apply_selection_to_data(charge_state_da, select=select, drop=True)
1166
1841
  suffix = '--' + '-'.join(suffix_parts) if suffix_parts else ''
1167
1842
 
1168
1843
  title = f'Operation Balance of {self.label}{suffix}'
1169
1844
 
1170
1845
  if engine == 'plotly':
1171
- fig = plotting.with_plotly(
1172
- ds.to_dataframe(),
1173
- colors=colors,
1174
- style=style,
1846
+ # Plot flows (node balance) with the specified mode
1847
+ figure_like = plotting.with_plotly(
1848
+ ds,
1849
+ facet_by=facet_by,
1850
+ animate_by=animate_by,
1851
+ colors=colors if colors is not None else self._results.colors,
1852
+ mode=mode,
1175
1853
  title=title,
1854
+ facet_cols=facet_cols,
1855
+ xlabel='Time in h',
1856
+ **plot_kwargs,
1176
1857
  )
1177
1858
 
1178
- # TODO: Use colors for charge state?
1179
-
1180
- charge_state = charge_state.to_dataframe()
1181
- fig.add_trace(
1182
- plotly.graph_objs.Scatter(
1183
- x=charge_state.index, y=charge_state.values.flatten(), mode='lines', name=self._charge_state
1184
- )
1859
+ # Prepare charge_state as Dataset for plotting
1860
+ charge_state_ds = xr.Dataset({self._charge_state: charge_state_da})
1861
+
1862
+ # Plot charge_state with mode='line' to get Scatter traces
1863
+ charge_state_fig = plotting.with_plotly(
1864
+ charge_state_ds,
1865
+ facet_by=facet_by,
1866
+ animate_by=animate_by,
1867
+ colors=colors if colors is not None else self._results.colors,
1868
+ mode='line', # Always line for charge_state
1869
+ title='', # No title needed for this temp figure
1870
+ facet_cols=facet_cols,
1871
+ xlabel='Time in h',
1872
+ **plot_kwargs,
1185
1873
  )
1874
+
1875
+ # Add charge_state traces to the main figure
1876
+ # This preserves subplot assignments and animation frames
1877
+ for trace in charge_state_fig.data:
1878
+ trace.line.width = 2 # Make charge_state line more prominent
1879
+ trace.line.shape = 'linear' # Smooth line for charge state (not stepped like flows)
1880
+ trace.line.color = overlay_color
1881
+ figure_like.add_trace(trace)
1882
+
1883
+ # Also add traces from animation frames if they exist
1884
+ # Both figures use the same animate_by parameter, so they should have matching frames
1885
+ if hasattr(charge_state_fig, 'frames') and charge_state_fig.frames:
1886
+ # Add charge_state traces to each frame
1887
+ for i, frame in enumerate(charge_state_fig.frames):
1888
+ if i < len(figure_like.frames):
1889
+ for trace in frame.data:
1890
+ trace.line.width = 2
1891
+ trace.line.shape = 'linear' # Smooth line for charge state
1892
+ trace.line.color = overlay_color
1893
+ figure_like.frames[i].data = figure_like.frames[i].data + (trace,)
1894
+
1895
+ default_filetype = '.html'
1186
1896
  elif engine == 'matplotlib':
1897
+ # Matplotlib requires only 'time' dimension; check for extras after selection
1898
+ extra_dims = [d for d in ds.dims if d != 'time']
1899
+ if extra_dims:
1900
+ raise ValueError(
1901
+ f'Matplotlib engine only supports a single time axis, but found extra dimensions: {extra_dims}. '
1902
+ f'Please use select={{...}} to reduce dimensions or switch to engine="plotly" for faceting/animation.'
1903
+ )
1904
+ # For matplotlib, plot flows (node balance), then add charge_state as line
1187
1905
  fig, ax = plotting.with_matplotlib(
1188
- ds.to_dataframe(),
1189
- colors=colors,
1190
- style=style,
1906
+ ds,
1907
+ colors=colors if colors is not None else self._results.colors,
1908
+ mode=mode,
1191
1909
  title=title,
1910
+ **plot_kwargs,
1192
1911
  )
1193
1912
 
1194
- charge_state = charge_state.to_dataframe()
1195
- ax.plot(charge_state.index, charge_state.values.flatten(), label=self._charge_state)
1913
+ # Add charge_state as a line overlay
1914
+ charge_state_df = charge_state_da.to_dataframe()
1915
+ ax.plot(
1916
+ charge_state_df.index,
1917
+ charge_state_df.values.flatten(),
1918
+ label=self._charge_state,
1919
+ linewidth=2,
1920
+ color=overlay_color,
1921
+ )
1922
+ # Recreate legend with the same styling as with_matplotlib
1923
+ handles, labels = ax.get_legend_handles_labels()
1924
+ ax.legend(
1925
+ handles,
1926
+ labels,
1927
+ loc='upper center',
1928
+ bbox_to_anchor=(0.5, -0.15),
1929
+ ncol=5,
1930
+ frameon=False,
1931
+ )
1196
1932
  fig.tight_layout()
1197
- fig = fig, ax
1933
+
1934
+ figure_like = fig, ax
1935
+ default_filetype = '.png'
1198
1936
 
1199
1937
  return plotting.export_figure(
1200
- fig,
1201
- default_path=self._calculation_results.folder / title,
1202
- default_filetype='.html',
1938
+ figure_like=figure_like,
1939
+ default_path=self._results.folder / title,
1940
+ default_filetype=default_filetype,
1203
1941
  user_path=None if isinstance(save, bool) else pathlib.Path(save),
1204
1942
  show=show,
1205
1943
  save=True if save else False,
1944
+ dpi=dpi,
1206
1945
  )
1207
1946
 
1208
1947
  def node_balance_with_charge_state(
@@ -1227,7 +1966,7 @@ class ComponentResults(_NodeResults):
1227
1966
  return sanitize_dataset(
1228
1967
  ds=self.solution[variable_names],
1229
1968
  threshold=threshold,
1230
- timesteps=self._calculation_results.timesteps_extra,
1969
+ timesteps=self._results.timesteps_extra,
1231
1970
  negate=(
1232
1971
  self.outputs + self.inputs
1233
1972
  if negate_outputs and negate_inputs
@@ -1252,13 +1991,13 @@ class EffectResults(_ElementResults):
1252
1991
  Returns:
1253
1992
  xr.Dataset: Element shares to this effect.
1254
1993
  """
1255
- return self.solution[[name for name in self._variable_names if name.startswith(f'{element}->')]]
1994
+ return self.solution[[name for name in self.variable_names if name.startswith(f'{element}->')]]
1256
1995
 
1257
1996
 
1258
1997
  class FlowResults(_ElementResults):
1259
1998
  def __init__(
1260
1999
  self,
1261
- calculation_results: CalculationResults,
2000
+ results: Results,
1262
2001
  label: str,
1263
2002
  variables: list[str],
1264
2003
  constraints: list[str],
@@ -1266,7 +2005,7 @@ class FlowResults(_ElementResults):
1266
2005
  end: str,
1267
2006
  component: str,
1268
2007
  ):
1269
- super().__init__(calculation_results, label, variables, constraints)
2008
+ super().__init__(results, label, variables, constraints)
1270
2009
  self.start = start
1271
2010
  self.end = end
1272
2011
  self.component = component
@@ -1277,7 +2016,7 @@ class FlowResults(_ElementResults):
1277
2016
 
1278
2017
  @property
1279
2018
  def flow_hours(self) -> xr.DataArray:
1280
- return (self.flow_rate * self._calculation_results.hours_per_timestep).rename(f'{self.label}|flow_hours')
2019
+ return (self.flow_rate * self._results.timestep_duration).rename(f'{self.label}|flow_hours')
1281
2020
 
1282
2021
  @property
1283
2022
  def size(self) -> xr.DataArray:
@@ -1285,16 +2024,16 @@ class FlowResults(_ElementResults):
1285
2024
  if name in self.solution:
1286
2025
  return self.solution[name]
1287
2026
  try:
1288
- return self._calculation_results.flow_system.flows[self.label].size.rename(name)
2027
+ return self._results.flow_system.flows[self.label].size.rename(name)
1289
2028
  except _FlowSystemRestorationError:
1290
2029
  logger.critical(f'Size of flow {self.label}.size not availlable. Returning NaN')
1291
2030
  return xr.DataArray(np.nan).rename(name)
1292
2031
 
1293
2032
 
1294
- class SegmentedCalculationResults:
1295
- """Results container for segmented optimization calculations with temporal decomposition.
2033
+ class SegmentedResults:
2034
+ """Results container for segmented optimization optimizations with temporal decomposition.
1296
2035
 
1297
- This class manages results from SegmentedCalculation runs where large optimization
2036
+ This class manages results from SegmentedOptimization runs where large optimization
1298
2037
  problems are solved by dividing the time horizon into smaller, overlapping segments.
1299
2038
  It provides unified access to results across all segments while maintaining the
1300
2039
  ability to analyze individual segment behavior.
@@ -1317,8 +2056,8 @@ class SegmentedCalculationResults:
1317
2056
  Load and analyze segmented results:
1318
2057
 
1319
2058
  ```python
1320
- # Load segmented calculation results
1321
- results = SegmentedCalculationResults.from_file('results', 'annual_segmented')
2059
+ # Load segmented optimization results
2060
+ results = SegmentedResults.from_file('results', 'annual_segmented')
1322
2061
 
1323
2062
  # Access unified results across all segments
1324
2063
  full_timeline = results.all_timesteps
@@ -1334,20 +2073,20 @@ class SegmentedCalculationResults:
1334
2073
  max_discontinuity = segment_boundaries['max_storage_jump']
1335
2074
  ```
1336
2075
 
1337
- Create from segmented calculation:
2076
+ Create from segmented optimization:
1338
2077
 
1339
2078
  ```python
1340
- # After running segmented calculation
1341
- segmented_calc = SegmentedCalculation(
2079
+ # After running segmented optimization
2080
+ segmented_opt = SegmentedOptimization(
1342
2081
  name='annual_system',
1343
2082
  flow_system=system,
1344
2083
  timesteps_per_segment=730, # Monthly segments
1345
2084
  overlap_timesteps=48, # 2-day overlap
1346
2085
  )
1347
- segmented_calc.do_modeling_and_solve(solver='gurobi')
2086
+ segmented_opt.do_modeling_and_solve(solver='gurobi')
1348
2087
 
1349
2088
  # Extract unified results
1350
- results = SegmentedCalculationResults.from_calculation(segmented_calc)
2089
+ results = SegmentedResults.from_optimization(segmented_opt)
1351
2090
 
1352
2091
  # Save combined results
1353
2092
  results.to_file(compression=5)
@@ -1388,34 +2127,50 @@ class SegmentedCalculationResults:
1388
2127
  """
1389
2128
 
1390
2129
  @classmethod
1391
- def from_calculation(cls, calculation: SegmentedCalculation):
2130
+ def from_optimization(cls, optimization: SegmentedOptimization) -> SegmentedResults:
2131
+ """Create SegmentedResults from a SegmentedOptimization instance.
2132
+
2133
+ Args:
2134
+ optimization: The SegmentedOptimization instance to extract results from.
2135
+
2136
+ Returns:
2137
+ SegmentedResults: New instance containing the optimization results.
2138
+ """
1392
2139
  return cls(
1393
- [calc.results for calc in calculation.sub_calculations],
1394
- all_timesteps=calculation.all_timesteps,
1395
- timesteps_per_segment=calculation.timesteps_per_segment,
1396
- overlap_timesteps=calculation.overlap_timesteps,
1397
- name=calculation.name,
1398
- folder=calculation.folder,
2140
+ [calc.results for calc in optimization.sub_optimizations],
2141
+ all_timesteps=optimization.all_timesteps,
2142
+ timesteps_per_segment=optimization.timesteps_per_segment,
2143
+ overlap_timesteps=optimization.overlap_timesteps,
2144
+ name=optimization.name,
2145
+ folder=optimization.folder,
1399
2146
  )
1400
2147
 
1401
2148
  @classmethod
1402
- def from_file(cls, folder: str | pathlib.Path, name: str) -> SegmentedCalculationResults:
1403
- """Load SegmentedCalculationResults from saved files.
2149
+ def from_file(cls, folder: str | pathlib.Path, name: str) -> SegmentedResults:
2150
+ """Load SegmentedResults from saved files.
1404
2151
 
1405
2152
  Args:
1406
2153
  folder: Directory containing saved files.
1407
2154
  name: Base name of saved files.
1408
2155
 
1409
2156
  Returns:
1410
- SegmentedCalculationResults: Loaded instance.
2157
+ SegmentedResults: Loaded instance.
1411
2158
  """
1412
2159
  folder = pathlib.Path(folder)
1413
2160
  path = folder / name
1414
- logger.info(f'loading calculation "{name}" from file ("{path.with_suffix(".nc4")}")')
1415
- with open(path.with_suffix('.json'), encoding='utf-8') as f:
1416
- meta_data = json.load(f)
2161
+ meta_data_path = path.with_suffix('.json')
2162
+ logger.info(f'loading segemented optimization meta data from file ("{meta_data_path}")')
2163
+ meta_data = fx_io.load_json(meta_data_path)
2164
+
2165
+ # Handle both new 'sub_optimizations' and legacy 'sub_calculations' keys
2166
+ sub_names = meta_data.get('sub_optimizations') or meta_data.get('sub_calculations')
2167
+ if sub_names is None:
2168
+ raise KeyError(
2169
+ "Missing 'sub_optimizations' (or legacy 'sub_calculations') key in segmented results metadata."
2170
+ )
2171
+
1417
2172
  return cls(
1418
- [CalculationResults.from_file(folder, sub_name) for sub_name in meta_data['sub_calculations']],
2173
+ [Results.from_file(folder, sub_name) for sub_name in sub_names],
1419
2174
  all_timesteps=pd.DatetimeIndex(
1420
2175
  [datetime.datetime.fromisoformat(date) for date in meta_data['all_timesteps']], name='time'
1421
2176
  ),
@@ -1427,20 +2182,26 @@ class SegmentedCalculationResults:
1427
2182
 
1428
2183
  def __init__(
1429
2184
  self,
1430
- segment_results: list[CalculationResults],
2185
+ segment_results: list[Results],
1431
2186
  all_timesteps: pd.DatetimeIndex,
1432
2187
  timesteps_per_segment: int,
1433
2188
  overlap_timesteps: int,
1434
2189
  name: str,
1435
2190
  folder: pathlib.Path | None = None,
1436
2191
  ):
2192
+ warnings.warn(
2193
+ f'SegmentedResults is deprecated and will be removed in v{DEPRECATION_REMOVAL_VERSION}. '
2194
+ 'A replacement API for segmented optimization will be provided in a future release.',
2195
+ DeprecationWarning,
2196
+ stacklevel=2,
2197
+ )
1437
2198
  self.segment_results = segment_results
1438
2199
  self.all_timesteps = all_timesteps
1439
2200
  self.timesteps_per_segment = timesteps_per_segment
1440
2201
  self.overlap_timesteps = overlap_timesteps
1441
2202
  self.name = name
1442
2203
  self.folder = pathlib.Path(folder) if folder is not None else pathlib.Path.cwd() / 'results'
1443
- self.hours_per_timestep = FlowSystem.calculate_hours_per_timestep(self.all_timesteps)
2204
+ self._colors = {}
1444
2205
 
1445
2206
  @property
1446
2207
  def meta_data(self) -> dict[str, int | list[str]]:
@@ -1448,13 +2209,74 @@ class SegmentedCalculationResults:
1448
2209
  'all_timesteps': [datetime.datetime.isoformat(date) for date in self.all_timesteps],
1449
2210
  'timesteps_per_segment': self.timesteps_per_segment,
1450
2211
  'overlap_timesteps': self.overlap_timesteps,
1451
- 'sub_calculations': [calc.name for calc in self.segment_results],
2212
+ 'sub_optimizations': [calc.name for calc in self.segment_results],
1452
2213
  }
1453
2214
 
1454
2215
  @property
1455
2216
  def segment_names(self) -> list[str]:
1456
2217
  return [segment.name for segment in self.segment_results]
1457
2218
 
2219
+ @property
2220
+ def colors(self) -> dict[str, str]:
2221
+ return self._colors
2222
+
2223
+ @colors.setter
2224
+ def colors(self, colors: dict[str, str]):
2225
+ """Applies colors to all segments"""
2226
+ self._colors = colors
2227
+ for segment in self.segment_results:
2228
+ segment.colors = copy.deepcopy(colors)
2229
+
2230
+ def setup_colors(
2231
+ self,
2232
+ config: dict[str, str | list[str]] | str | pathlib.Path | None = None,
2233
+ default_colorscale: str | None = None,
2234
+ ) -> dict[str, str]:
2235
+ """
2236
+ Setup colors for all variables across all segment results.
2237
+
2238
+ This method applies the same color configuration to all segments, ensuring
2239
+ consistent visualization across the entire segmented optimization. The color
2240
+ mapping is propagated to each segment's Results instance.
2241
+
2242
+ Args:
2243
+ config: Configuration for color assignment. Can be:
2244
+ - dict: Maps components to colors/colorscales:
2245
+ * 'component1': 'red' # Single component to single color
2246
+ * 'component1': '#FF0000' # Single component to hex color
2247
+ - OR maps colorscales to multiple components:
2248
+ * 'colorscale_name': ['component1', 'component2'] # Colorscale across components
2249
+ - str: Path to a JSON/YAML config file or a colorscale name to apply to all
2250
+ - Path: Path to a JSON/YAML config file
2251
+ - None: Use default_colorscale for all components
2252
+ default_colorscale: Default colorscale for unconfigured components (default: 'turbo')
2253
+
2254
+ Examples:
2255
+ ```python
2256
+ # Apply colors to all segments
2257
+ segmented_results.setup_colors(
2258
+ {
2259
+ 'CHP': 'red',
2260
+ 'Blues': ['Storage1', 'Storage2'],
2261
+ 'Oranges': ['Solar1', 'Solar2'],
2262
+ }
2263
+ )
2264
+
2265
+ # Use a single colorscale for all components in all segments
2266
+ segmented_results.setup_colors('portland')
2267
+ ```
2268
+
2269
+ Returns:
2270
+ Complete variable-to-color mapping dictionary from the first segment
2271
+ (all segments will have the same mapping)
2272
+ """
2273
+ if not self.segment_results:
2274
+ raise ValueError('No segment_results available; cannot setup colors on an empty SegmentedResults.')
2275
+
2276
+ self.colors = self.segment_results[0].setup_colors(config=config, default_colorscale=default_colorscale)
2277
+
2278
+ return self.colors
2279
+
1458
2280
  def solution_without_overlap(self, variable_name: str) -> xr.DataArray:
1459
2281
  """Get variable solution removing segment overlaps.
1460
2282
 
@@ -1473,123 +2295,265 @@ class SegmentedCalculationResults:
1473
2295
  def plot_heatmap(
1474
2296
  self,
1475
2297
  variable_name: str,
1476
- heatmap_timeframes: Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'] = 'D',
1477
- heatmap_timesteps_per_frame: Literal['W', 'D', 'h', '15min', 'min'] = 'h',
1478
- color_map: str = 'portland',
2298
+ reshape_time: tuple[Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'], Literal['W', 'D', 'h', '15min', 'min']]
2299
+ | Literal['auto']
2300
+ | None = 'auto',
2301
+ colors: plotting.ColorType | None = None,
1479
2302
  save: bool | pathlib.Path = False,
1480
- show: bool = True,
2303
+ show: bool | None = None,
1481
2304
  engine: plotting.PlottingEngine = 'plotly',
2305
+ facet_by: str | list[str] | None = None,
2306
+ animate_by: str | None = None,
2307
+ facet_cols: int | None = None,
2308
+ fill: Literal['ffill', 'bfill'] | None = 'ffill',
2309
+ **plot_kwargs: Any,
1482
2310
  ) -> plotly.graph_objs.Figure | tuple[plt.Figure, plt.Axes]:
1483
2311
  """Plot heatmap of variable solution across segments.
1484
2312
 
1485
2313
  Args:
1486
2314
  variable_name: Variable to plot.
1487
- heatmap_timeframes: Time aggregation level.
1488
- heatmap_timesteps_per_frame: Timesteps per frame.
1489
- color_map: Color scheme. Also see plotly.
2315
+ reshape_time: Time reshaping configuration (default: 'auto'):
2316
+ - 'auto': Automatically applies ('D', 'h') when only 'time' dimension remains
2317
+ - Tuple like ('D', 'h'): Explicit reshaping (days vs hours)
2318
+ - None: Disable time reshaping
2319
+ colors: Color scheme. See plotting.ColorType for options.
1490
2320
  save: Whether to save plot.
1491
2321
  show: Whether to display plot.
1492
2322
  engine: Plotting engine.
2323
+ facet_by: Dimension(s) to create facets (subplots) for.
2324
+ animate_by: Dimension to animate over (Plotly only).
2325
+ facet_cols: Number of columns in the facet grid layout.
2326
+ fill: Method to fill missing values: 'ffill' or 'bfill'.
2327
+ **plot_kwargs: Additional plotting customization options.
2328
+ Common options:
2329
+
2330
+ - **dpi** (int): Export resolution for saved plots. Default: 300.
2331
+ - **vmin** (float): Minimum value for color scale.
2332
+ - **vmax** (float): Maximum value for color scale.
2333
+
2334
+ For Matplotlib heatmaps:
2335
+
2336
+ - **imshow_kwargs** (dict): Additional kwargs for matplotlib's imshow.
2337
+ - **cbar_kwargs** (dict): Additional kwargs for colorbar customization.
1493
2338
 
1494
2339
  Returns:
1495
2340
  Figure object.
1496
2341
  """
1497
2342
  return plot_heatmap(
1498
- dataarray=self.solution_without_overlap(variable_name),
2343
+ data=self.solution_without_overlap(variable_name),
1499
2344
  name=variable_name,
1500
2345
  folder=self.folder,
1501
- heatmap_timeframes=heatmap_timeframes,
1502
- heatmap_timesteps_per_frame=heatmap_timesteps_per_frame,
1503
- color_map=color_map,
2346
+ reshape_time=reshape_time,
2347
+ colors=colors,
1504
2348
  save=save,
1505
2349
  show=show,
1506
2350
  engine=engine,
2351
+ facet_by=facet_by,
2352
+ animate_by=animate_by,
2353
+ facet_cols=facet_cols,
2354
+ fill=fill,
2355
+ **plot_kwargs,
1507
2356
  )
1508
2357
 
1509
- def to_file(self, folder: str | pathlib.Path | None = None, name: str | None = None, compression: int = 5):
2358
+ def to_file(
2359
+ self,
2360
+ folder: str | pathlib.Path | None = None,
2361
+ name: str | None = None,
2362
+ compression: int = 5,
2363
+ overwrite: bool = False,
2364
+ ):
1510
2365
  """Save segmented results to files.
1511
2366
 
1512
2367
  Args:
1513
2368
  folder: Save folder (defaults to instance folder).
1514
2369
  name: File name (defaults to instance name).
1515
2370
  compression: Compression level 0-9.
2371
+ overwrite: If False, raise error if results files already exist. If True, overwrite existing files.
2372
+
2373
+ Raises:
2374
+ FileExistsError: If overwrite=False and result files already exist.
1516
2375
  """
1517
2376
  folder = self.folder if folder is None else pathlib.Path(folder)
1518
2377
  name = self.name if name is None else name
1519
2378
  path = folder / name
1520
- if not folder.exists():
1521
- try:
1522
- folder.mkdir(parents=False)
1523
- except FileNotFoundError as e:
1524
- raise FileNotFoundError(
1525
- f'Folder {folder} and its parent do not exist. Please create them first.'
1526
- ) from e
2379
+
2380
+ # Ensure folder exists, creating parent directories as needed
2381
+ folder.mkdir(parents=True, exist_ok=True)
2382
+
2383
+ # Check if metadata file already exists (unless overwrite is True)
2384
+ metadata_file = path.with_suffix('.json')
2385
+ if not overwrite and metadata_file.exists():
2386
+ raise FileExistsError(
2387
+ f'Segmented results file already exists: {metadata_file}. '
2388
+ f'Use overwrite=True to overwrite existing files.'
2389
+ )
2390
+
2391
+ # Save segments (they will check for overwrite themselves)
1527
2392
  for segment in self.segment_results:
1528
- segment.to_file(folder=folder, name=segment.name, compression=compression)
2393
+ segment.to_file(folder=folder, name=segment.name, compression=compression, overwrite=overwrite)
1529
2394
 
1530
- with open(path.with_suffix('.json'), 'w', encoding='utf-8') as f:
1531
- json.dump(self.meta_data, f, indent=4, ensure_ascii=False)
1532
- logger.info(f'Saved calculation "{name}" to {path}')
2395
+ fx_io.save_json(self.meta_data, metadata_file)
2396
+ logger.info(f'Saved optimization "{name}" to {path}')
1533
2397
 
1534
2398
 
1535
2399
  def plot_heatmap(
1536
- dataarray: xr.DataArray,
1537
- name: str,
1538
- folder: pathlib.Path,
1539
- heatmap_timeframes: Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'] = 'D',
1540
- heatmap_timesteps_per_frame: Literal['W', 'D', 'h', '15min', 'min'] = 'h',
1541
- color_map: str = 'portland',
2400
+ data: xr.DataArray | xr.Dataset,
2401
+ name: str | None = None,
2402
+ folder: pathlib.Path | None = None,
2403
+ colors: plotting.ColorType | None = None,
1542
2404
  save: bool | pathlib.Path = False,
1543
- show: bool = True,
2405
+ show: bool | None = None,
1544
2406
  engine: plotting.PlottingEngine = 'plotly',
1545
- indexer: dict[str, Any] | None = None,
2407
+ select: dict[str, Any] | None = None,
2408
+ facet_by: str | list[str] | None = None,
2409
+ animate_by: str | None = None,
2410
+ facet_cols: int | None = None,
2411
+ reshape_time: tuple[Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'], Literal['W', 'D', 'h', '15min', 'min']]
2412
+ | Literal['auto']
2413
+ | None = 'auto',
2414
+ fill: Literal['ffill', 'bfill'] | None = 'ffill',
2415
+ **plot_kwargs: Any,
1546
2416
  ):
1547
- """Plot heatmap of time series data.
2417
+ """Plot heatmap visualization with support for multi-variable, faceting, and animation.
2418
+
2419
+ This function provides a standalone interface to the heatmap plotting capabilities,
2420
+ supporting the same modern features as Results.plot_heatmap().
1548
2421
 
1549
2422
  Args:
1550
- dataarray: Data to plot.
1551
- name: Variable name for title.
1552
- folder: Save folder.
1553
- heatmap_timeframes: Time aggregation level.
1554
- heatmap_timesteps_per_frame: Timesteps per frame.
1555
- color_map: Color scheme. Also see plotly.
1556
- save: Whether to save plot.
1557
- show: Whether to display plot.
1558
- engine: Plotting engine.
1559
- indexer: Optional selection dict, e.g., {'scenario': 'base', 'period': 2024}.
1560
- If None, uses first value for each dimension.
1561
- If empty dict {}, uses all values.
2423
+ data: Data to plot. Can be a single DataArray or an xarray Dataset.
2424
+ When a Dataset is provided, all data variables are combined along a new 'variable' dimension.
2425
+ name: Optional name for the title. If not provided, uses the DataArray name or
2426
+ generates a default title for Datasets.
2427
+ folder: Save folder for the plot. Defaults to current directory if not provided.
2428
+ colors: Color scheme for the heatmap. See `flixopt.plotting.ColorType` for options.
2429
+ save: Whether to save the plot or not. If a path is provided, the plot will be saved at that location.
2430
+ show: Whether to show the plot or not.
2431
+ engine: The engine to use for plotting. Can be either 'plotly' or 'matplotlib'.
2432
+ select: Optional data selection dict. Supports single values, lists, slices, and index arrays.
2433
+ facet_by: Dimension(s) to create facets (subplots) for. Can be a single dimension name (str)
2434
+ or list of dimensions. Each unique value combination creates a subplot.
2435
+ animate_by: Dimension to animate over (Plotly only). Creates animation frames.
2436
+ facet_cols: Number of columns in the facet grid layout (default: 3).
2437
+ reshape_time: Time reshaping configuration (default: 'auto'):
2438
+ - 'auto': Automatically applies ('D', 'h') when only 'time' dimension remains
2439
+ - Tuple: Explicit reshaping, e.g. ('D', 'h') for days vs hours
2440
+ - None: Disable auto-reshaping
2441
+ fill: Method to fill missing values after reshape: 'ffill' (forward fill) or 'bfill' (backward fill).
2442
+ Default is 'ffill'.
2443
+
2444
+ Examples:
2445
+ Single DataArray with time reshaping:
2446
+
2447
+ >>> plot_heatmap(data, name='Temperature', folder=Path('.'), reshape_time=('D', 'h'))
2448
+
2449
+ Dataset with multiple variables (facet by variable):
2450
+
2451
+ >>> dataset = xr.Dataset({'Boiler': data1, 'CHP': data2, 'Storage': data3})
2452
+ >>> plot_heatmap(
2453
+ ... dataset,
2454
+ ... folder=Path('.'),
2455
+ ... facet_by='variable',
2456
+ ... reshape_time=('D', 'h'),
2457
+ ... )
2458
+
2459
+ Dataset with animation by variable:
2460
+
2461
+ >>> plot_heatmap(dataset, animate_by='variable', reshape_time=('D', 'h'))
1562
2462
  """
1563
- dataarray, suffix_parts = _apply_indexer_to_data(dataarray, indexer, drop=True)
2463
+ # Convert Dataset to DataArray with 'variable' dimension
2464
+ if isinstance(data, xr.Dataset):
2465
+ # Extract all data variables from the Dataset
2466
+ variable_names = list(data.data_vars)
2467
+ dataarrays = [data[var] for var in variable_names]
2468
+
2469
+ # Combine into single DataArray with 'variable' dimension
2470
+ data = xr.concat(dataarrays, dim='variable')
2471
+ data = data.assign_coords(variable=variable_names)
2472
+
2473
+ # Use Dataset variable names for title if name not provided
2474
+ if name is None:
2475
+ title_name = f'Heatmap of {len(variable_names)} variables'
2476
+ else:
2477
+ title_name = name
2478
+ else:
2479
+ # Single DataArray
2480
+ if name is None:
2481
+ title_name = data.name if data.name else 'Heatmap'
2482
+ else:
2483
+ title_name = name
2484
+
2485
+ # Apply select filtering
2486
+ data, suffix_parts = _apply_selection_to_data(data, select=select, drop=True)
1564
2487
  suffix = '--' + '-'.join(suffix_parts) if suffix_parts else ''
1565
- name = name if not suffix_parts else name + suffix
1566
2488
 
1567
- heatmap_data = plotting.heat_map_data_from_df(
1568
- dataarray.to_dataframe(name), heatmap_timeframes, heatmap_timesteps_per_frame, 'ffill'
1569
- )
2489
+ # Matplotlib heatmaps require at most 2D data
2490
+ # Time dimension will be reshaped to 2D (timeframe × timestep), so can't have other dims alongside it
2491
+ if engine == 'matplotlib':
2492
+ dims = list(data.dims)
1570
2493
 
1571
- xlabel, ylabel = f'timeframe [{heatmap_timeframes}]', f'timesteps [{heatmap_timesteps_per_frame}]'
2494
+ # If 'time' dimension exists and will be reshaped, we can't have any other dimensions
2495
+ if 'time' in dims and len(dims) > 1 and reshape_time is not None:
2496
+ extra_dims = [d for d in dims if d != 'time']
2497
+ raise ValueError(
2498
+ f'Matplotlib heatmaps with time reshaping cannot have additional dimensions. '
2499
+ f'Found extra dimensions: {extra_dims}. '
2500
+ f'Use select={{...}} to reduce to time only, use "reshape_time=None" or switch to engine="plotly" or use for multi-dimensional support.'
2501
+ )
2502
+ # If no 'time' dimension (already reshaped or different data), allow at most 2 dimensions
2503
+ elif 'time' not in dims and len(dims) > 2:
2504
+ raise ValueError(
2505
+ f'Matplotlib heatmaps support at most 2 dimensions, but data has {len(dims)}: {dims}. '
2506
+ f'Use select={{...}} to reduce dimensions or switch to engine="plotly".'
2507
+ )
2508
+
2509
+ # Build title
2510
+ title = f'{title_name}{suffix}'
2511
+ if isinstance(reshape_time, tuple):
2512
+ timeframes, timesteps_per_frame = reshape_time
2513
+ title += f' ({timeframes} vs {timesteps_per_frame})'
1572
2514
 
2515
+ # Extract dpi before passing to plotting functions
2516
+ dpi = plot_kwargs.pop('dpi', None) # None uses CONFIG.Plotting.default_dpi
2517
+
2518
+ # Plot with appropriate engine
1573
2519
  if engine == 'plotly':
1574
- figure_like = plotting.heat_map_plotly(
1575
- heatmap_data, title=name, color_map=color_map, xlabel=xlabel, ylabel=ylabel
2520
+ figure_like = plotting.heatmap_with_plotly(
2521
+ data=data,
2522
+ facet_by=facet_by,
2523
+ animate_by=animate_by,
2524
+ colors=colors,
2525
+ title=title,
2526
+ facet_cols=facet_cols,
2527
+ reshape_time=reshape_time,
2528
+ fill=fill,
2529
+ **plot_kwargs,
1576
2530
  )
1577
2531
  default_filetype = '.html'
1578
2532
  elif engine == 'matplotlib':
1579
- figure_like = plotting.heat_map_matplotlib(
1580
- heatmap_data, title=name, color_map=color_map, xlabel=xlabel, ylabel=ylabel
2533
+ figure_like = plotting.heatmap_with_matplotlib(
2534
+ data=data,
2535
+ colors=colors,
2536
+ title=title,
2537
+ reshape_time=reshape_time,
2538
+ fill=fill,
2539
+ **plot_kwargs,
1581
2540
  )
1582
2541
  default_filetype = '.png'
1583
2542
  else:
1584
2543
  raise ValueError(f'Engine "{engine}" not supported. Use "plotly" or "matplotlib"')
1585
2544
 
2545
+ # Set default folder if not provided
2546
+ if folder is None:
2547
+ folder = pathlib.Path('.')
2548
+
1586
2549
  return plotting.export_figure(
1587
2550
  figure_like=figure_like,
1588
- default_path=folder / f'{name} ({heatmap_timeframes}-{heatmap_timesteps_per_frame})',
2551
+ default_path=folder / title,
1589
2552
  default_filetype=default_filetype,
1590
2553
  user_path=None if isinstance(save, bool) else pathlib.Path(save),
1591
2554
  show=show,
1592
2555
  save=True if save else False,
2556
+ dpi=dpi,
1593
2557
  )
1594
2558
 
1595
2559
 
@@ -1787,8 +2751,13 @@ def filter_dataarray_by_coord(da: xr.DataArray, **kwargs: str | list[str] | None
1787
2751
  if coord_name not in array.coords:
1788
2752
  raise AttributeError(f"Missing required coordinate '{coord_name}'")
1789
2753
 
1790
- # Convert single value to list
1791
- val_list = [coord_values] if isinstance(coord_values, str) else coord_values
2754
+ # Normalize to list for sequence-like inputs (excluding strings)
2755
+ if isinstance(coord_values, str):
2756
+ val_list = [coord_values]
2757
+ elif isinstance(coord_values, (list, tuple, np.ndarray, pd.Index)):
2758
+ val_list = list(coord_values)
2759
+ else:
2760
+ val_list = [coord_values]
1792
2761
 
1793
2762
  # Verify coord_values exist
1794
2763
  available = set(array[coord_name].values)
@@ -1798,7 +2767,7 @@ def filter_dataarray_by_coord(da: xr.DataArray, **kwargs: str | list[str] | None
1798
2767
 
1799
2768
  # Apply filter
1800
2769
  return array.where(
1801
- array[coord_name].isin(val_list) if isinstance(coord_values, list) else array[coord_name] == coord_values,
2770
+ array[coord_name].isin(val_list) if len(val_list) > 1 else array[coord_name] == val_list[0],
1802
2771
  drop=True,
1803
2772
  )
1804
2773
 
@@ -1817,36 +2786,26 @@ def filter_dataarray_by_coord(da: xr.DataArray, **kwargs: str | list[str] | None
1817
2786
  return da
1818
2787
 
1819
2788
 
1820
- def _apply_indexer_to_data(
1821
- data: xr.DataArray | xr.Dataset, indexer: dict[str, Any] | None = None, drop=False
2789
+ def _apply_selection_to_data(
2790
+ data: xr.DataArray | xr.Dataset,
2791
+ select: dict[str, Any] | None = None,
2792
+ drop=False,
1822
2793
  ) -> tuple[xr.DataArray | xr.Dataset, list[str]]:
1823
2794
  """
1824
- Apply indexer selection or auto-select first values for non-time dimensions.
2795
+ Apply selection to data.
1825
2796
 
1826
2797
  Args:
1827
2798
  data: xarray Dataset or DataArray
1828
- indexer: Optional selection dict
1829
- If None, uses first value for each dimension (except time).
1830
- If empty dict {}, uses all values.
2799
+ select: Optional selection dict
2800
+ drop: Whether to drop dimensions after selection
1831
2801
 
1832
2802
  Returns:
1833
2803
  Tuple of (selected_data, selection_string)
1834
2804
  """
1835
2805
  selection_string = []
1836
2806
 
1837
- if indexer is not None:
1838
- # User provided indexer
1839
- data = data.sel(indexer, drop=drop)
1840
- selection_string.extend(f'{v}[{k}]' for k, v in indexer.items())
1841
- else:
1842
- # Auto-select first value for each dimension except 'time'
1843
- selection = {}
1844
- for dim in data.dims:
1845
- if dim != 'time' and dim in data.coords:
1846
- first_value = data.coords[dim].values[0]
1847
- selection[dim] = first_value
1848
- selection_string.append(f'{first_value}[{dim}]')
1849
- if selection:
1850
- data = data.sel(selection, drop=drop)
2807
+ if select:
2808
+ data = data.sel(select, drop=drop)
2809
+ selection_string.extend(f'{dim}={val}' for dim, val in select.items())
1851
2810
 
1852
2811
  return data, selection_string