flixopt 3.2.1__py3-none-any.whl → 3.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of flixopt might be problematic. Click here for more details.
- flixopt/calculation.py +1 -1
- flixopt/components.py +10 -0
- flixopt/effects.py +23 -27
- flixopt/elements.py +54 -1
- flixopt/flow_system.py +139 -84
- flixopt/interface.py +23 -2
- flixopt/io.py +396 -12
- flixopt/results.py +48 -22
- flixopt/structure.py +366 -48
- {flixopt-3.2.1.dist-info → flixopt-3.3.1.dist-info}/METADATA +1 -1
- {flixopt-3.2.1.dist-info → flixopt-3.3.1.dist-info}/RECORD +14 -14
- {flixopt-3.2.1.dist-info → flixopt-3.3.1.dist-info}/WHEEL +0 -0
- {flixopt-3.2.1.dist-info → flixopt-3.3.1.dist-info}/licenses/LICENSE +0 -0
- {flixopt-3.2.1.dist-info → flixopt-3.3.1.dist-info}/top_level.txt +0 -0
flixopt/io.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import inspect
|
|
3
4
|
import json
|
|
4
5
|
import logging
|
|
5
6
|
import pathlib
|
|
@@ -8,6 +9,7 @@ from dataclasses import dataclass
|
|
|
8
9
|
from typing import TYPE_CHECKING, Any
|
|
9
10
|
|
|
10
11
|
import numpy as np
|
|
12
|
+
import pandas as pd
|
|
11
13
|
import xarray as xr
|
|
12
14
|
import yaml
|
|
13
15
|
|
|
@@ -172,6 +174,7 @@ def save_yaml(
|
|
|
172
174
|
width: int = 1000,
|
|
173
175
|
allow_unicode: bool = True,
|
|
174
176
|
sort_keys: bool = False,
|
|
177
|
+
compact_numeric_lists: bool = False,
|
|
175
178
|
**kwargs,
|
|
176
179
|
) -> None:
|
|
177
180
|
"""
|
|
@@ -184,20 +187,56 @@ def save_yaml(
|
|
|
184
187
|
width: Maximum line width (default: 1000).
|
|
185
188
|
allow_unicode: If True, allow Unicode characters (default: True).
|
|
186
189
|
sort_keys: If True, sort dictionary keys (default: False).
|
|
187
|
-
|
|
190
|
+
compact_numeric_lists: If True, format numeric lists inline for better readability (default: False).
|
|
191
|
+
**kwargs: Additional arguments to pass to yaml.dump().
|
|
188
192
|
"""
|
|
189
193
|
path = pathlib.Path(path)
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
194
|
+
|
|
195
|
+
if compact_numeric_lists:
|
|
196
|
+
# Define custom representer for compact numeric lists
|
|
197
|
+
def represent_list(dumper, data):
|
|
198
|
+
"""
|
|
199
|
+
Custom representer for lists to format them inline (flow style)
|
|
200
|
+
but only if they contain only numbers or nested numeric lists.
|
|
201
|
+
"""
|
|
202
|
+
if data and all(
|
|
203
|
+
isinstance(item, (int, float, np.integer, np.floating))
|
|
204
|
+
or (isinstance(item, list) and all(isinstance(x, (int, float, np.integer, np.floating)) for x in item))
|
|
205
|
+
for item in data
|
|
206
|
+
):
|
|
207
|
+
return dumper.represent_sequence('tag:yaml.org,2002:seq', data, flow_style=True)
|
|
208
|
+
return dumper.represent_sequence('tag:yaml.org,2002:seq', data, flow_style=False)
|
|
209
|
+
|
|
210
|
+
# Create custom dumper with the representer
|
|
211
|
+
class CompactDumper(yaml.SafeDumper):
|
|
212
|
+
pass
|
|
213
|
+
|
|
214
|
+
CompactDumper.add_representer(list, represent_list)
|
|
215
|
+
|
|
216
|
+
with open(path, 'w', encoding='utf-8') as f:
|
|
217
|
+
yaml.dump(
|
|
218
|
+
data,
|
|
219
|
+
f,
|
|
220
|
+
Dumper=CompactDumper,
|
|
221
|
+
indent=indent,
|
|
222
|
+
width=width,
|
|
223
|
+
allow_unicode=allow_unicode,
|
|
224
|
+
sort_keys=sort_keys,
|
|
225
|
+
default_flow_style=False,
|
|
226
|
+
**kwargs,
|
|
227
|
+
)
|
|
228
|
+
else:
|
|
229
|
+
with open(path, 'w', encoding='utf-8') as f:
|
|
230
|
+
yaml.safe_dump(
|
|
231
|
+
data,
|
|
232
|
+
f,
|
|
233
|
+
indent=indent,
|
|
234
|
+
width=width,
|
|
235
|
+
allow_unicode=allow_unicode,
|
|
236
|
+
sort_keys=sort_keys,
|
|
237
|
+
default_flow_style=False,
|
|
238
|
+
**kwargs,
|
|
239
|
+
)
|
|
201
240
|
|
|
202
241
|
|
|
203
242
|
def load_config_file(path: str | pathlib.Path) -> dict:
|
|
@@ -547,3 +586,348 @@ class CalculationResultsPaths:
|
|
|
547
586
|
raise FileNotFoundError(f'Folder {new_folder} does not exist or is not a directory.')
|
|
548
587
|
self.folder = new_folder
|
|
549
588
|
self._update_paths()
|
|
589
|
+
|
|
590
|
+
|
|
591
|
+
def numeric_to_str_for_repr(
|
|
592
|
+
value: int | float | np.integer | np.floating | np.ndarray | pd.Series | pd.DataFrame | xr.DataArray,
|
|
593
|
+
precision: int = 1,
|
|
594
|
+
atol: float = 1e-10,
|
|
595
|
+
) -> str:
|
|
596
|
+
"""Format value for display in repr methods.
|
|
597
|
+
|
|
598
|
+
For single values or uniform arrays, returns the formatted value.
|
|
599
|
+
For arrays with variation, returns a range showing min-max.
|
|
600
|
+
|
|
601
|
+
Args:
|
|
602
|
+
value: Numeric value or container (DataArray, array, Series, DataFrame)
|
|
603
|
+
precision: Number of decimal places (default: 1)
|
|
604
|
+
atol: Absolute tolerance for considering values equal (default: 1e-10)
|
|
605
|
+
|
|
606
|
+
Returns:
|
|
607
|
+
Formatted string representation:
|
|
608
|
+
- Single/uniform values: "100.0"
|
|
609
|
+
- Nearly uniform values: "~100.0" (values differ slightly but display similarly)
|
|
610
|
+
- Varying values: "50.0-150.0" (shows range from min to max)
|
|
611
|
+
|
|
612
|
+
Raises:
|
|
613
|
+
TypeError: If value cannot be converted to numeric format
|
|
614
|
+
"""
|
|
615
|
+
# Handle simple scalar types
|
|
616
|
+
if isinstance(value, (int, float, np.integer, np.floating)):
|
|
617
|
+
return f'{float(value):.{precision}f}'
|
|
618
|
+
|
|
619
|
+
# Extract array data for variation checking
|
|
620
|
+
arr = None
|
|
621
|
+
if isinstance(value, xr.DataArray):
|
|
622
|
+
arr = value.values.flatten()
|
|
623
|
+
elif isinstance(value, (np.ndarray, pd.Series)):
|
|
624
|
+
arr = np.asarray(value).flatten()
|
|
625
|
+
elif isinstance(value, pd.DataFrame):
|
|
626
|
+
arr = value.values.flatten()
|
|
627
|
+
else:
|
|
628
|
+
# Fallback for unknown types
|
|
629
|
+
try:
|
|
630
|
+
return f'{float(value):.{precision}f}'
|
|
631
|
+
except (TypeError, ValueError) as e:
|
|
632
|
+
raise TypeError(f'Cannot format value of type {type(value).__name__} for repr') from e
|
|
633
|
+
|
|
634
|
+
# Normalize dtype and handle empties
|
|
635
|
+
arr = arr.astype(float, copy=False)
|
|
636
|
+
if arr.size == 0:
|
|
637
|
+
return '?'
|
|
638
|
+
|
|
639
|
+
# Filter non-finite values
|
|
640
|
+
finite = arr[np.isfinite(arr)]
|
|
641
|
+
if finite.size == 0:
|
|
642
|
+
return 'nan'
|
|
643
|
+
|
|
644
|
+
# Check for single value
|
|
645
|
+
if finite.size == 1:
|
|
646
|
+
return f'{float(finite[0]):.{precision}f}'
|
|
647
|
+
|
|
648
|
+
# Check if all values are the same or very close
|
|
649
|
+
min_val = float(np.nanmin(finite))
|
|
650
|
+
max_val = float(np.nanmax(finite))
|
|
651
|
+
|
|
652
|
+
# First check: values are essentially identical
|
|
653
|
+
if np.allclose(min_val, max_val, atol=atol):
|
|
654
|
+
return f'{float(np.mean(finite)):.{precision}f}'
|
|
655
|
+
|
|
656
|
+
# Second check: display values are the same but actual values differ slightly
|
|
657
|
+
min_str = f'{min_val:.{precision}f}'
|
|
658
|
+
max_str = f'{max_val:.{precision}f}'
|
|
659
|
+
if min_str == max_str:
|
|
660
|
+
return f'~{min_str}'
|
|
661
|
+
|
|
662
|
+
# Values vary significantly - show range
|
|
663
|
+
return f'{min_str}-{max_str}'
|
|
664
|
+
|
|
665
|
+
|
|
666
|
+
def _format_value_for_repr(value) -> str:
|
|
667
|
+
"""Format a single value for display in repr.
|
|
668
|
+
|
|
669
|
+
Args:
|
|
670
|
+
value: The value to format
|
|
671
|
+
|
|
672
|
+
Returns:
|
|
673
|
+
Formatted string representation of the value
|
|
674
|
+
"""
|
|
675
|
+
# Format numeric types using specialized formatter
|
|
676
|
+
if isinstance(value, (int, float, np.integer, np.floating, np.ndarray, pd.Series, pd.DataFrame, xr.DataArray)):
|
|
677
|
+
try:
|
|
678
|
+
return numeric_to_str_for_repr(value)
|
|
679
|
+
except Exception:
|
|
680
|
+
value_repr = repr(value)
|
|
681
|
+
if len(value_repr) > 50:
|
|
682
|
+
value_repr = value_repr[:47] + '...'
|
|
683
|
+
return value_repr
|
|
684
|
+
|
|
685
|
+
# Format dicts with numeric/array values nicely
|
|
686
|
+
elif isinstance(value, dict):
|
|
687
|
+
try:
|
|
688
|
+
formatted_items = []
|
|
689
|
+
for k, v in value.items():
|
|
690
|
+
if isinstance(
|
|
691
|
+
v, (int, float, np.integer, np.floating, np.ndarray, pd.Series, pd.DataFrame, xr.DataArray)
|
|
692
|
+
):
|
|
693
|
+
v_str = numeric_to_str_for_repr(v)
|
|
694
|
+
else:
|
|
695
|
+
v_str = repr(v)
|
|
696
|
+
if len(v_str) > 30:
|
|
697
|
+
v_str = v_str[:27] + '...'
|
|
698
|
+
formatted_items.append(f'{repr(k)}: {v_str}')
|
|
699
|
+
value_repr = '{' + ', '.join(formatted_items) + '}'
|
|
700
|
+
if len(value_repr) > 50:
|
|
701
|
+
value_repr = value_repr[:47] + '...'
|
|
702
|
+
return value_repr
|
|
703
|
+
except Exception:
|
|
704
|
+
value_repr = repr(value)
|
|
705
|
+
if len(value_repr) > 50:
|
|
706
|
+
value_repr = value_repr[:47] + '...'
|
|
707
|
+
return value_repr
|
|
708
|
+
|
|
709
|
+
# Default repr with truncation
|
|
710
|
+
else:
|
|
711
|
+
value_repr = repr(value)
|
|
712
|
+
if len(value_repr) > 50:
|
|
713
|
+
value_repr = value_repr[:47] + '...'
|
|
714
|
+
return value_repr
|
|
715
|
+
|
|
716
|
+
|
|
717
|
+
def build_repr_from_init(
|
|
718
|
+
obj: object,
|
|
719
|
+
excluded_params: set[str] | None = None,
|
|
720
|
+
label_as_positional: bool = True,
|
|
721
|
+
skip_default_size: bool = False,
|
|
722
|
+
) -> str:
|
|
723
|
+
"""Build a repr string from __init__ signature, showing non-default parameter values.
|
|
724
|
+
|
|
725
|
+
This utility function extracts common repr logic used across flixopt classes.
|
|
726
|
+
It introspects the __init__ method to build a constructor-style repr showing
|
|
727
|
+
only parameters that differ from their defaults.
|
|
728
|
+
|
|
729
|
+
Args:
|
|
730
|
+
obj: The object to create repr for
|
|
731
|
+
excluded_params: Set of parameter names to exclude (e.g., {'self', 'inputs', 'outputs'})
|
|
732
|
+
Default excludes 'self', 'label', and 'kwargs'
|
|
733
|
+
label_as_positional: If True and 'label' param exists, show it as first positional arg
|
|
734
|
+
skip_default_size: If True, skip 'size' parameter when it equals CONFIG.Modeling.big
|
|
735
|
+
|
|
736
|
+
Returns:
|
|
737
|
+
Formatted repr string like: ClassName("label", param=value)
|
|
738
|
+
"""
|
|
739
|
+
if excluded_params is None:
|
|
740
|
+
excluded_params = {'self', 'label', 'kwargs'}
|
|
741
|
+
else:
|
|
742
|
+
# Always exclude 'self'
|
|
743
|
+
excluded_params = excluded_params | {'self'}
|
|
744
|
+
|
|
745
|
+
try:
|
|
746
|
+
# Get the constructor arguments and their current values
|
|
747
|
+
init_signature = inspect.signature(obj.__init__)
|
|
748
|
+
init_params = init_signature.parameters
|
|
749
|
+
|
|
750
|
+
# Check if this has a 'label' parameter - if so, show it first as positional
|
|
751
|
+
has_label = 'label' in init_params and label_as_positional
|
|
752
|
+
|
|
753
|
+
# Build kwargs for non-default parameters
|
|
754
|
+
kwargs_parts = []
|
|
755
|
+
label_value = None
|
|
756
|
+
|
|
757
|
+
for param_name, param in init_params.items():
|
|
758
|
+
# Skip *args and **kwargs
|
|
759
|
+
if param.kind in (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD):
|
|
760
|
+
continue
|
|
761
|
+
|
|
762
|
+
# Handle label separately if showing as positional (check BEFORE excluded_params)
|
|
763
|
+
if param_name == 'label' and has_label:
|
|
764
|
+
label_value = getattr(obj, param_name, None)
|
|
765
|
+
continue
|
|
766
|
+
|
|
767
|
+
# Now check if parameter should be excluded
|
|
768
|
+
if param_name in excluded_params:
|
|
769
|
+
continue
|
|
770
|
+
|
|
771
|
+
# Get current value
|
|
772
|
+
value = getattr(obj, param_name, None)
|
|
773
|
+
|
|
774
|
+
# Skip if value matches default
|
|
775
|
+
if param.default != inspect.Parameter.empty:
|
|
776
|
+
# Special handling for empty containers (even if default was None)
|
|
777
|
+
if isinstance(value, (dict, list, tuple, set)) and len(value) == 0:
|
|
778
|
+
if param.default is None or (
|
|
779
|
+
isinstance(param.default, (dict, list, tuple, set)) and len(param.default) == 0
|
|
780
|
+
):
|
|
781
|
+
continue
|
|
782
|
+
|
|
783
|
+
# Handle array comparisons (xarray, numpy)
|
|
784
|
+
elif isinstance(value, (xr.DataArray, np.ndarray)):
|
|
785
|
+
try:
|
|
786
|
+
if isinstance(param.default, (xr.DataArray, np.ndarray)):
|
|
787
|
+
# Compare arrays element-wise
|
|
788
|
+
if isinstance(value, xr.DataArray) and isinstance(param.default, xr.DataArray):
|
|
789
|
+
if value.equals(param.default):
|
|
790
|
+
continue
|
|
791
|
+
elif np.array_equal(value, param.default):
|
|
792
|
+
continue
|
|
793
|
+
elif isinstance(param.default, (int, float, np.integer, np.floating)):
|
|
794
|
+
# Compare array to scalar (e.g., after transform_data converts scalar to DataArray)
|
|
795
|
+
if isinstance(value, xr.DataArray):
|
|
796
|
+
if np.all(value.values == float(param.default)):
|
|
797
|
+
continue
|
|
798
|
+
elif isinstance(value, np.ndarray):
|
|
799
|
+
if np.all(value == float(param.default)):
|
|
800
|
+
continue
|
|
801
|
+
except Exception:
|
|
802
|
+
pass # If comparison fails, include in repr
|
|
803
|
+
|
|
804
|
+
# Handle numeric comparisons (deals with 0 vs 0.0, int vs float)
|
|
805
|
+
elif isinstance(value, (int, float, np.integer, np.floating)) and isinstance(
|
|
806
|
+
param.default, (int, float, np.integer, np.floating)
|
|
807
|
+
):
|
|
808
|
+
try:
|
|
809
|
+
if float(value) == float(param.default):
|
|
810
|
+
continue
|
|
811
|
+
except (ValueError, TypeError):
|
|
812
|
+
pass
|
|
813
|
+
|
|
814
|
+
elif value == param.default:
|
|
815
|
+
continue
|
|
816
|
+
|
|
817
|
+
# Skip None values if default is None
|
|
818
|
+
if value is None and param.default is None:
|
|
819
|
+
continue
|
|
820
|
+
|
|
821
|
+
# Special case: hide CONFIG.Modeling.big for size parameter
|
|
822
|
+
if skip_default_size and param_name == 'size':
|
|
823
|
+
from .config import CONFIG
|
|
824
|
+
|
|
825
|
+
try:
|
|
826
|
+
if isinstance(value, (int, float, np.integer, np.floating)):
|
|
827
|
+
if float(value) == CONFIG.Modeling.big:
|
|
828
|
+
continue
|
|
829
|
+
except Exception:
|
|
830
|
+
pass
|
|
831
|
+
|
|
832
|
+
# Format value using helper function
|
|
833
|
+
value_repr = _format_value_for_repr(value)
|
|
834
|
+
kwargs_parts.append(f'{param_name}={value_repr}')
|
|
835
|
+
|
|
836
|
+
# Build args string with label first as positional if present
|
|
837
|
+
if has_label and label_value is not None:
|
|
838
|
+
# Use label_full if available, otherwise label
|
|
839
|
+
if hasattr(obj, 'label_full'):
|
|
840
|
+
label_repr = repr(obj.label_full)
|
|
841
|
+
else:
|
|
842
|
+
label_repr = repr(label_value)
|
|
843
|
+
|
|
844
|
+
if len(label_repr) > 50:
|
|
845
|
+
label_repr = label_repr[:47] + '...'
|
|
846
|
+
args_str = label_repr
|
|
847
|
+
if kwargs_parts:
|
|
848
|
+
args_str += ', ' + ', '.join(kwargs_parts)
|
|
849
|
+
else:
|
|
850
|
+
args_str = ', '.join(kwargs_parts)
|
|
851
|
+
|
|
852
|
+
# Build final repr
|
|
853
|
+
class_name = obj.__class__.__name__
|
|
854
|
+
|
|
855
|
+
return f'{class_name}({args_str})'
|
|
856
|
+
|
|
857
|
+
except Exception:
|
|
858
|
+
# Fallback if introspection fails
|
|
859
|
+
return f'{obj.__class__.__name__}(<repr_failed>)'
|
|
860
|
+
|
|
861
|
+
|
|
862
|
+
def format_flow_details(obj, has_inputs: bool = True, has_outputs: bool = True) -> str:
|
|
863
|
+
"""Format inputs and outputs as indented bullet list.
|
|
864
|
+
|
|
865
|
+
Args:
|
|
866
|
+
obj: Object with 'inputs' and/or 'outputs' attributes
|
|
867
|
+
has_inputs: Whether to check for inputs
|
|
868
|
+
has_outputs: Whether to check for outputs
|
|
869
|
+
|
|
870
|
+
Returns:
|
|
871
|
+
Formatted string with flow details (including leading newline), or empty string if no flows
|
|
872
|
+
"""
|
|
873
|
+
flow_lines = []
|
|
874
|
+
|
|
875
|
+
if has_inputs and hasattr(obj, 'inputs') and obj.inputs:
|
|
876
|
+
flow_lines.append(' inputs:')
|
|
877
|
+
for flow in obj.inputs:
|
|
878
|
+
flow_lines.append(f' * {repr(flow)}')
|
|
879
|
+
|
|
880
|
+
if has_outputs and hasattr(obj, 'outputs') and obj.outputs:
|
|
881
|
+
flow_lines.append(' outputs:')
|
|
882
|
+
for flow in obj.outputs:
|
|
883
|
+
flow_lines.append(f' * {repr(flow)}')
|
|
884
|
+
|
|
885
|
+
return '\n' + '\n'.join(flow_lines) if flow_lines else ''
|
|
886
|
+
|
|
887
|
+
|
|
888
|
+
def format_title_with_underline(title: str, underline_char: str = '-') -> str:
|
|
889
|
+
"""Format a title with underline of matching length.
|
|
890
|
+
|
|
891
|
+
Args:
|
|
892
|
+
title: The title text
|
|
893
|
+
underline_char: Character to use for underline (default: '-')
|
|
894
|
+
|
|
895
|
+
Returns:
|
|
896
|
+
Formatted string: "Title\\n-----\\n"
|
|
897
|
+
"""
|
|
898
|
+
return f'{title}\n{underline_char * len(title)}\n'
|
|
899
|
+
|
|
900
|
+
|
|
901
|
+
def format_sections_with_headers(sections: dict[str, str], underline_char: str = '-') -> list[str]:
|
|
902
|
+
"""Format sections with underlined headers.
|
|
903
|
+
|
|
904
|
+
Args:
|
|
905
|
+
sections: Dict mapping section headers to content
|
|
906
|
+
underline_char: Character for underlining headers
|
|
907
|
+
|
|
908
|
+
Returns:
|
|
909
|
+
List of formatted section strings
|
|
910
|
+
"""
|
|
911
|
+
formatted_sections = []
|
|
912
|
+
for section_header, section_content in sections.items():
|
|
913
|
+
underline = underline_char * len(section_header)
|
|
914
|
+
formatted_sections.append(f'{section_header}\n{underline}\n{section_content}')
|
|
915
|
+
return formatted_sections
|
|
916
|
+
|
|
917
|
+
|
|
918
|
+
def build_metadata_info(parts: list[str], prefix: str = ' | ') -> str:
|
|
919
|
+
"""Build metadata info string from parts.
|
|
920
|
+
|
|
921
|
+
Args:
|
|
922
|
+
parts: List of metadata strings (empty strings are filtered out)
|
|
923
|
+
prefix: Prefix to add if parts is non-empty
|
|
924
|
+
|
|
925
|
+
Returns:
|
|
926
|
+
Formatted info string or empty string
|
|
927
|
+
"""
|
|
928
|
+
# Filter out empty strings
|
|
929
|
+
parts = [p for p in parts if p]
|
|
930
|
+
if not parts:
|
|
931
|
+
return ''
|
|
932
|
+
info = ' | '.join(parts)
|
|
933
|
+
return prefix + info if prefix else info
|
flixopt/results.py
CHANGED
|
@@ -17,6 +17,7 @@ from . import plotting
|
|
|
17
17
|
from .color_processing import process_colors
|
|
18
18
|
from .config import CONFIG
|
|
19
19
|
from .flow_system import FlowSystem
|
|
20
|
+
from .structure import CompositeContainerMixin, ElementContainer, ResultsContainer
|
|
20
21
|
|
|
21
22
|
if TYPE_CHECKING:
|
|
22
23
|
import matplotlib.pyplot as plt
|
|
@@ -53,7 +54,7 @@ class _FlowSystemRestorationError(Exception):
|
|
|
53
54
|
pass
|
|
54
55
|
|
|
55
56
|
|
|
56
|
-
class CalculationResults:
|
|
57
|
+
class CalculationResults(CompositeContainerMixin['ComponentResults | BusResults | EffectResults | FlowResults']):
|
|
57
58
|
"""Comprehensive container for optimization calculation results and analysis tools.
|
|
58
59
|
|
|
59
60
|
This class provides unified access to all optimization results including flow rates,
|
|
@@ -238,13 +239,18 @@ class CalculationResults:
|
|
|
238
239
|
self.name = name
|
|
239
240
|
self.model = model
|
|
240
241
|
self.folder = pathlib.Path(folder) if folder is not None else pathlib.Path.cwd() / 'results'
|
|
241
|
-
|
|
242
|
+
|
|
243
|
+
# Create ResultsContainers for better access patterns
|
|
244
|
+
components_dict = {
|
|
242
245
|
label: ComponentResults(self, **infos) for label, infos in self.solution.attrs['Components'].items()
|
|
243
246
|
}
|
|
247
|
+
self.components = ResultsContainer(elements=components_dict, element_type_name='component results')
|
|
244
248
|
|
|
245
|
-
|
|
249
|
+
buses_dict = {label: BusResults(self, **infos) for label, infos in self.solution.attrs['Buses'].items()}
|
|
250
|
+
self.buses = ResultsContainer(elements=buses_dict, element_type_name='bus results')
|
|
246
251
|
|
|
247
|
-
|
|
252
|
+
effects_dict = {label: EffectResults(self, **infos) for label, infos in self.solution.attrs['Effects'].items()}
|
|
253
|
+
self.effects = ResultsContainer(elements=effects_dict, element_type_name='effect results')
|
|
248
254
|
|
|
249
255
|
if 'Flows' not in self.solution.attrs:
|
|
250
256
|
warnings.warn(
|
|
@@ -252,11 +258,14 @@ class CalculationResults:
|
|
|
252
258
|
'is not availlable. We recommend to evaluate your results with a version <2.2.0.',
|
|
253
259
|
stacklevel=2,
|
|
254
260
|
)
|
|
255
|
-
|
|
261
|
+
flows_dict = {}
|
|
262
|
+
self._has_flow_data = False
|
|
256
263
|
else:
|
|
257
|
-
|
|
264
|
+
flows_dict = {
|
|
258
265
|
label: FlowResults(self, **infos) for label, infos in self.solution.attrs.get('Flows', {}).items()
|
|
259
266
|
}
|
|
267
|
+
self._has_flow_data = True
|
|
268
|
+
self.flows = ResultsContainer(elements=flows_dict, element_type_name='flow results')
|
|
260
269
|
|
|
261
270
|
self.timesteps_extra = self.solution.indexes['time']
|
|
262
271
|
self.hours_per_timestep = FlowSystem.calculate_hours_per_timestep(self.timesteps_extra)
|
|
@@ -273,16 +282,22 @@ class CalculationResults:
|
|
|
273
282
|
|
|
274
283
|
self.colors: dict[str, str] = {}
|
|
275
284
|
|
|
276
|
-
def
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
285
|
+
def _get_container_groups(self) -> dict[str, ResultsContainer]:
|
|
286
|
+
"""Return ordered container groups for CompositeContainerMixin."""
|
|
287
|
+
return {
|
|
288
|
+
'Components': self.components,
|
|
289
|
+
'Buses': self.buses,
|
|
290
|
+
'Effects': self.effects,
|
|
291
|
+
'Flows': self.flows,
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
def __repr__(self) -> str:
|
|
295
|
+
"""Return grouped representation of all results."""
|
|
296
|
+
r = fx_io.format_title_with_underline(self.__class__.__name__, '=')
|
|
297
|
+
r += f'Name: "{self.name}"\nFolder: {self.folder}\n'
|
|
298
|
+
# Add grouped container view
|
|
299
|
+
r += '\n' + self._format_grouped_containers()
|
|
300
|
+
return r
|
|
286
301
|
|
|
287
302
|
@property
|
|
288
303
|
def storages(self) -> list[ComponentResults]:
|
|
@@ -547,6 +562,8 @@ class CalculationResults:
|
|
|
547
562
|
To recombine filtered dataarrays, use `xr.concat` with dim 'flow':
|
|
548
563
|
>>>xr.concat([results.flow_rates(start='Fernwärme'), results.flow_rates(end='Fernwärme')], dim='flow')
|
|
549
564
|
"""
|
|
565
|
+
if not self._has_flow_data:
|
|
566
|
+
raise ValueError('Flow data is not available in this results object (pre-v2.2.0).')
|
|
550
567
|
if self._flow_rates is None:
|
|
551
568
|
self._flow_rates = self._assign_flow_coords(
|
|
552
569
|
xr.concat(
|
|
@@ -608,6 +625,8 @@ class CalculationResults:
|
|
|
608
625
|
>>>xr.concat([results.sizes(start='Fernwärme'), results.sizes(end='Fernwärme')], dim='flow')
|
|
609
626
|
|
|
610
627
|
"""
|
|
628
|
+
if not self._has_flow_data:
|
|
629
|
+
raise ValueError('Flow data is not available in this results object (pre-v2.2.0).')
|
|
611
630
|
if self._sizes is None:
|
|
612
631
|
self._sizes = self._assign_flow_coords(
|
|
613
632
|
xr.concat(
|
|
@@ -620,11 +639,12 @@ class CalculationResults:
|
|
|
620
639
|
|
|
621
640
|
def _assign_flow_coords(self, da: xr.DataArray):
|
|
622
641
|
# Add start and end coordinates
|
|
642
|
+
flows_list = list(self.flows.values())
|
|
623
643
|
da = da.assign_coords(
|
|
624
644
|
{
|
|
625
|
-
'start': ('flow', [flow.start for flow in
|
|
626
|
-
'end': ('flow', [flow.end for flow in
|
|
627
|
-
'component': ('flow', [flow.component for flow in
|
|
645
|
+
'start': ('flow', [flow.start for flow in flows_list]),
|
|
646
|
+
'end': ('flow', [flow.end for flow in flows_list]),
|
|
647
|
+
'component': ('flow', [flow.component for flow in flows_list]),
|
|
628
648
|
}
|
|
629
649
|
)
|
|
630
650
|
|
|
@@ -743,8 +763,6 @@ class CalculationResults:
|
|
|
743
763
|
temporal = temporal.sum('time')
|
|
744
764
|
if periodic.isnull().all():
|
|
745
765
|
return temporal.rename(f'{element}->{effect}')
|
|
746
|
-
if 'time' in temporal.indexes:
|
|
747
|
-
temporal = temporal.sum('time')
|
|
748
766
|
return periodic + temporal
|
|
749
767
|
|
|
750
768
|
total = xr.DataArray(0)
|
|
@@ -1056,7 +1074,7 @@ class CalculationResults:
|
|
|
1056
1074
|
fx_io.save_dataset_to_netcdf(self.solution, paths.solution, compression=compression)
|
|
1057
1075
|
fx_io.save_dataset_to_netcdf(self.flow_system_data, paths.flow_system, compression=compression)
|
|
1058
1076
|
|
|
1059
|
-
fx_io.save_yaml(self.summary, paths.summary)
|
|
1077
|
+
fx_io.save_yaml(self.summary, paths.summary, compact_numeric_lists=True)
|
|
1060
1078
|
|
|
1061
1079
|
if save_linopy_model:
|
|
1062
1080
|
if self.model is None:
|
|
@@ -1106,6 +1124,14 @@ class _ElementResults:
|
|
|
1106
1124
|
raise ValueError('The linopy model is not available.')
|
|
1107
1125
|
return self._calculation_results.model.constraints[self._constraint_names]
|
|
1108
1126
|
|
|
1127
|
+
def __repr__(self) -> str:
|
|
1128
|
+
"""Return string representation with element info and dataset preview."""
|
|
1129
|
+
class_name = self.__class__.__name__
|
|
1130
|
+
header = f'{class_name}: "{self.label}"'
|
|
1131
|
+
sol = self.solution.copy(deep=False)
|
|
1132
|
+
sol.attrs = {}
|
|
1133
|
+
return f'{header}\n{"-" * len(header)}\n{repr(sol)}'
|
|
1134
|
+
|
|
1109
1135
|
def filter_solution(
|
|
1110
1136
|
self,
|
|
1111
1137
|
variable_dims: Literal['scalar', 'time', 'scenario', 'timeonly', 'scenarioonly'] | None = None,
|