flixopt 2.1.7__py3-none-any.whl → 2.1.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of flixopt might be problematic. Click here for more details.
- docs/user-guide/Mathematical Notation/Effects, Penalty & Objective.md +8 -8
- docs/user-guide/Mathematical Notation/Flow.md +3 -3
- docs/user-guide/Mathematical Notation/InvestParameters.md +3 -0
- docs/user-guide/Mathematical Notation/LinearConverter.md +3 -3
- docs/user-guide/Mathematical Notation/OnOffParameters.md +3 -0
- docs/user-guide/Mathematical Notation/Storage.md +1 -1
- flixopt/aggregation.py +33 -32
- flixopt/calculation.py +158 -58
- flixopt/components.py +673 -150
- flixopt/config.py +17 -8
- flixopt/core.py +59 -54
- flixopt/effects.py +144 -63
- flixopt/elements.py +292 -107
- flixopt/features.py +61 -58
- flixopt/flow_system.py +69 -48
- flixopt/interface.py +952 -113
- flixopt/io.py +15 -10
- flixopt/linear_converters.py +373 -81
- flixopt/network_app.py +73 -39
- flixopt/plotting.py +215 -87
- flixopt/results.py +382 -209
- flixopt/solvers.py +25 -21
- flixopt/structure.py +41 -37
- flixopt/utils.py +10 -7
- {flixopt-2.1.7.dist-info → flixopt-2.1.9.dist-info}/METADATA +46 -42
- {flixopt-2.1.7.dist-info → flixopt-2.1.9.dist-info}/RECORD +30 -28
- scripts/gen_ref_pages.py +1 -1
- {flixopt-2.1.7.dist-info → flixopt-2.1.9.dist-info}/WHEEL +0 -0
- {flixopt-2.1.7.dist-info → flixopt-2.1.9.dist-info}/licenses/LICENSE +0 -0
- {flixopt-2.1.7.dist-info → flixopt-2.1.9.dist-info}/top_level.txt +0 -0
flixopt/results.py
CHANGED
|
@@ -1,11 +1,12 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
1
3
|
import datetime
|
|
2
4
|
import json
|
|
3
5
|
import logging
|
|
4
6
|
import pathlib
|
|
5
|
-
from typing import TYPE_CHECKING,
|
|
7
|
+
from typing import TYPE_CHECKING, Literal
|
|
6
8
|
|
|
7
9
|
import linopy
|
|
8
|
-
import matplotlib.pyplot as plt
|
|
9
10
|
import numpy as np
|
|
10
11
|
import pandas as pd
|
|
11
12
|
import plotly
|
|
@@ -17,6 +18,7 @@ from . import plotting
|
|
|
17
18
|
from .core import TimeSeriesCollection
|
|
18
19
|
|
|
19
20
|
if TYPE_CHECKING:
|
|
21
|
+
import matplotlib.pyplot as plt
|
|
20
22
|
import pyvis
|
|
21
23
|
|
|
22
24
|
from .calculation import Calculation, SegmentedCalculation
|
|
@@ -26,55 +28,95 @@ logger = logging.getLogger('flixopt')
|
|
|
26
28
|
|
|
27
29
|
|
|
28
30
|
class CalculationResults:
|
|
29
|
-
"""
|
|
30
|
-
|
|
31
|
-
This class
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
31
|
+
"""Comprehensive container for optimization calculation results and analysis tools.
|
|
32
|
+
|
|
33
|
+
This class provides unified access to all optimization results including flow rates,
|
|
34
|
+
component states, bus balances, and system effects. It offers powerful analysis
|
|
35
|
+
capabilities through filtering, plotting, and export functionality, making it
|
|
36
|
+
the primary interface for post-processing optimization results.
|
|
37
|
+
|
|
38
|
+
Key Features:
|
|
39
|
+
**Unified Access**: Single interface to all solution variables and constraints
|
|
40
|
+
**Element Results**: Direct access to component, bus, and effect-specific results
|
|
41
|
+
**Visualization**: Built-in plotting methods for heatmaps, time series, and networks
|
|
42
|
+
**Persistence**: Save/load functionality with compression for large datasets
|
|
43
|
+
**Analysis Tools**: Filtering, aggregation, and statistical analysis methods
|
|
44
|
+
|
|
45
|
+
Result Organization:
|
|
46
|
+
- **Components**: Equipment-specific results (flows, states, constraints)
|
|
47
|
+
- **Buses**: Network node balances and energy flows
|
|
48
|
+
- **Effects**: System-wide impacts (costs, emissions, resource consumption)
|
|
49
|
+
- **Solution**: Raw optimization variables and their values
|
|
50
|
+
- **Metadata**: Calculation parameters, timing, and system configuration
|
|
37
51
|
|
|
38
52
|
Attributes:
|
|
39
|
-
solution
|
|
40
|
-
flow_system
|
|
41
|
-
summary
|
|
42
|
-
name
|
|
43
|
-
model
|
|
44
|
-
folder
|
|
45
|
-
components
|
|
46
|
-
buses
|
|
47
|
-
effects
|
|
48
|
-
timesteps_extra
|
|
49
|
-
hours_per_timestep
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
Load
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
53
|
+
solution: Dataset containing all optimization variable solutions
|
|
54
|
+
flow_system: Dataset with complete system configuration and parameters. Restore the used FlowSystem for further analysis.
|
|
55
|
+
summary: Calculation metadata including solver status, timing, and statistics
|
|
56
|
+
name: Unique identifier for this calculation
|
|
57
|
+
model: Original linopy optimization model (if available)
|
|
58
|
+
folder: Directory path for result storage and loading
|
|
59
|
+
components: Dictionary mapping component labels to ComponentResults objects
|
|
60
|
+
buses: Dictionary mapping bus labels to BusResults objects
|
|
61
|
+
effects: Dictionary mapping effect names to EffectResults objects
|
|
62
|
+
timesteps_extra: Extended time index including boundary conditions
|
|
63
|
+
hours_per_timestep: Duration of each timestep for proper energy calculations
|
|
64
|
+
|
|
65
|
+
Examples:
|
|
66
|
+
Load and analyze saved results:
|
|
67
|
+
|
|
68
|
+
```python
|
|
69
|
+
# Load results from file
|
|
70
|
+
results = CalculationResults.from_file('results', 'annual_optimization')
|
|
71
|
+
|
|
72
|
+
# Access specific component results
|
|
73
|
+
boiler_results = results['Boiler_01']
|
|
74
|
+
heat_pump_results = results['HeatPump_02']
|
|
75
|
+
|
|
76
|
+
# Plot component flow rates
|
|
77
|
+
results.plot_heatmap('Boiler_01(Natural_Gas)|flow_rate')
|
|
78
|
+
results['Boiler_01'].plot_node_balance()
|
|
79
|
+
|
|
80
|
+
# Access raw solution dataarrays
|
|
81
|
+
electricity_flows = results.solution[['Generator_01(Grid)|flow_rate', 'HeatPump_02(Grid)|flow_rate']]
|
|
82
|
+
|
|
83
|
+
# Filter and analyze results
|
|
84
|
+
peak_demand_hours = results.filter_solution(variable_dims='time')
|
|
85
|
+
costs_solution = results.effects['cost'].solution
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
Advanced filtering and aggregation:
|
|
89
|
+
|
|
90
|
+
```python
|
|
91
|
+
# Filter by variable type
|
|
92
|
+
scalar_results = results.filter_solution(variable_dims='scalar')
|
|
93
|
+
time_series = results.filter_solution(variable_dims='time')
|
|
94
|
+
|
|
95
|
+
# Custom data analysis leveraging xarray
|
|
96
|
+
peak_power = results.solution['Generator_01(Grid)|flow_rate'].max()
|
|
97
|
+
avg_efficiency = (
|
|
98
|
+
results.solution['HeatPump(Heat)|flow_rate'] / results.solution['HeatPump(Electricity)|flow_rate']
|
|
99
|
+
).mean()
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
Design Patterns:
|
|
103
|
+
**Factory Methods**: Use `from_file()` and `from_calculation()` for creation or access directly from `Calculation.results`
|
|
104
|
+
**Dictionary Access**: Use `results[element_label]` for element-specific results
|
|
105
|
+
**Lazy Loading**: Results objects created on-demand for memory efficiency
|
|
106
|
+
**Unified Interface**: Consistent API across different result types
|
|
107
|
+
|
|
59
108
|
"""
|
|
60
109
|
|
|
61
110
|
@classmethod
|
|
62
|
-
def from_file(cls, folder:
|
|
63
|
-
"""
|
|
64
|
-
|
|
65
|
-
This method loads the calculation results from previously saved files,
|
|
66
|
-
including the solution, flow system, model (if available), and metadata.
|
|
111
|
+
def from_file(cls, folder: str | pathlib.Path, name: str) -> CalculationResults:
|
|
112
|
+
"""Load CalculationResults from saved files.
|
|
67
113
|
|
|
68
114
|
Args:
|
|
69
|
-
folder:
|
|
70
|
-
name: Base name of
|
|
115
|
+
folder: Directory containing saved files.
|
|
116
|
+
name: Base name of saved files (without extensions).
|
|
71
117
|
|
|
72
118
|
Returns:
|
|
73
|
-
CalculationResults:
|
|
74
|
-
|
|
75
|
-
Raises:
|
|
76
|
-
FileNotFoundError: If required files cannot be found.
|
|
77
|
-
ValueError: If files exist but cannot be properly loaded.
|
|
119
|
+
CalculationResults: Loaded instance.
|
|
78
120
|
"""
|
|
79
121
|
folder = pathlib.Path(folder)
|
|
80
122
|
paths = fx_io.CalculationResultsPaths(folder, name)
|
|
@@ -87,7 +129,7 @@ class CalculationResults:
|
|
|
87
129
|
except Exception as e:
|
|
88
130
|
logger.critical(f'Could not load the linopy model "{name}" from file ("{paths.linopy_model}"): {e}')
|
|
89
131
|
|
|
90
|
-
with open(paths.summary,
|
|
132
|
+
with open(paths.summary, encoding='utf-8') as f:
|
|
91
133
|
summary = yaml.load(f, Loader=yaml.FullLoader)
|
|
92
134
|
|
|
93
135
|
return cls(
|
|
@@ -100,21 +142,14 @@ class CalculationResults:
|
|
|
100
142
|
)
|
|
101
143
|
|
|
102
144
|
@classmethod
|
|
103
|
-
def from_calculation(cls, calculation:
|
|
104
|
-
"""Create CalculationResults
|
|
105
|
-
|
|
106
|
-
This method extracts the solution, flow system, and other relevant
|
|
107
|
-
information directly from an existing Calculation object.
|
|
145
|
+
def from_calculation(cls, calculation: Calculation) -> CalculationResults:
|
|
146
|
+
"""Create CalculationResults from a Calculation object.
|
|
108
147
|
|
|
109
148
|
Args:
|
|
110
|
-
calculation:
|
|
149
|
+
calculation: Calculation object with solved model.
|
|
111
150
|
|
|
112
151
|
Returns:
|
|
113
|
-
CalculationResults:
|
|
114
|
-
the provided calculation.
|
|
115
|
-
|
|
116
|
-
Raises:
|
|
117
|
-
AttributeError: If the calculation doesn't have required attributes.
|
|
152
|
+
CalculationResults: New instance with extracted results.
|
|
118
153
|
"""
|
|
119
154
|
return cls(
|
|
120
155
|
solution=calculation.model.solution,
|
|
@@ -130,18 +165,20 @@ class CalculationResults:
|
|
|
130
165
|
solution: xr.Dataset,
|
|
131
166
|
flow_system: xr.Dataset,
|
|
132
167
|
name: str,
|
|
133
|
-
summary:
|
|
134
|
-
folder:
|
|
135
|
-
model:
|
|
168
|
+
summary: dict,
|
|
169
|
+
folder: pathlib.Path | None = None,
|
|
170
|
+
model: linopy.Model | None = None,
|
|
136
171
|
):
|
|
137
|
-
"""
|
|
172
|
+
"""Initialize CalculationResults with optimization data.
|
|
173
|
+
Usually, this class is instantiated by the Calculation class, or by loading from file.
|
|
174
|
+
|
|
138
175
|
Args:
|
|
139
|
-
solution:
|
|
140
|
-
flow_system:
|
|
141
|
-
name:
|
|
142
|
-
summary:
|
|
143
|
-
folder:
|
|
144
|
-
model:
|
|
176
|
+
solution: Optimization solution dataset.
|
|
177
|
+
flow_system: Flow system configuration dataset.
|
|
178
|
+
name: Calculation name.
|
|
179
|
+
summary: Calculation metadata.
|
|
180
|
+
folder: Results storage folder.
|
|
181
|
+
model: Linopy optimization model.
|
|
145
182
|
"""
|
|
146
183
|
self.solution = solution
|
|
147
184
|
self.flow_system = flow_system
|
|
@@ -162,7 +199,7 @@ class CalculationResults:
|
|
|
162
199
|
self.timesteps_extra = self.solution.indexes['time']
|
|
163
200
|
self.hours_per_timestep = TimeSeriesCollection.calculate_hours_per_timestep(self.timesteps_extra)
|
|
164
201
|
|
|
165
|
-
def __getitem__(self, key: str) ->
|
|
202
|
+
def __getitem__(self, key: str) -> ComponentResults | BusResults | EffectResults:
|
|
166
203
|
if key in self.components:
|
|
167
204
|
return self.components[key]
|
|
168
205
|
if key in self.buses:
|
|
@@ -172,39 +209,40 @@ class CalculationResults:
|
|
|
172
209
|
raise KeyError(f'No element with label {key} found.')
|
|
173
210
|
|
|
174
211
|
@property
|
|
175
|
-
def storages(self) ->
|
|
176
|
-
"""
|
|
212
|
+
def storages(self) -> list[ComponentResults]:
|
|
213
|
+
"""Get all storage components in the results."""
|
|
177
214
|
return [comp for comp in self.components.values() if comp.is_storage]
|
|
178
215
|
|
|
179
216
|
@property
|
|
180
217
|
def objective(self) -> float:
|
|
181
|
-
"""
|
|
218
|
+
"""Get optimization objective value."""
|
|
182
219
|
return self.summary['Main Results']['Objective']
|
|
183
220
|
|
|
184
221
|
@property
|
|
185
222
|
def variables(self) -> linopy.Variables:
|
|
186
|
-
"""
|
|
223
|
+
"""Get optimization variables (requires linopy model)."""
|
|
187
224
|
if self.model is None:
|
|
188
225
|
raise ValueError('The linopy model is not available.')
|
|
189
226
|
return self.model.variables
|
|
190
227
|
|
|
191
228
|
@property
|
|
192
229
|
def constraints(self) -> linopy.Constraints:
|
|
193
|
-
"""
|
|
230
|
+
"""Get optimization constraints (requires linopy model)."""
|
|
194
231
|
if self.model is None:
|
|
195
232
|
raise ValueError('The linopy model is not available.')
|
|
196
233
|
return self.model.constraints
|
|
197
234
|
|
|
198
235
|
def filter_solution(
|
|
199
|
-
self, variable_dims:
|
|
236
|
+
self, variable_dims: Literal['scalar', 'time'] | None = None, element: str | None = None
|
|
200
237
|
) -> xr.Dataset:
|
|
201
|
-
"""
|
|
202
|
-
Filter the solution to a specific variable dimension and element.
|
|
203
|
-
If no element is specified, all elements are included.
|
|
238
|
+
"""Filter solution by variable dimension and/or element.
|
|
204
239
|
|
|
205
240
|
Args:
|
|
206
|
-
variable_dims:
|
|
207
|
-
element:
|
|
241
|
+
variable_dims: Variable dimension to filter ('scalar' or 'time').
|
|
242
|
+
element: Element label to filter.
|
|
243
|
+
|
|
244
|
+
Returns:
|
|
245
|
+
xr.Dataset: Filtered solution dataset.
|
|
208
246
|
"""
|
|
209
247
|
if element is not None:
|
|
210
248
|
return filter_dataset(self[element].solution, variable_dims)
|
|
@@ -216,10 +254,10 @@ class CalculationResults:
|
|
|
216
254
|
heatmap_timeframes: Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'] = 'D',
|
|
217
255
|
heatmap_timesteps_per_frame: Literal['W', 'D', 'h', '15min', 'min'] = 'h',
|
|
218
256
|
color_map: str = 'portland',
|
|
219
|
-
save:
|
|
257
|
+
save: bool | pathlib.Path = False,
|
|
220
258
|
show: bool = True,
|
|
221
259
|
engine: plotting.PlottingEngine = 'plotly',
|
|
222
|
-
) ->
|
|
260
|
+
) -> plotly.graph_objs.Figure | tuple[plt.Figure, plt.Axes]:
|
|
223
261
|
return plot_heatmap(
|
|
224
262
|
dataarray=self.solution[variable_name],
|
|
225
263
|
name=variable_name,
|
|
@@ -234,16 +272,22 @@ class CalculationResults:
|
|
|
234
272
|
|
|
235
273
|
def plot_network(
|
|
236
274
|
self,
|
|
237
|
-
controls:
|
|
238
|
-
bool
|
|
239
|
-
|
|
275
|
+
controls: (
|
|
276
|
+
bool
|
|
277
|
+
| list[
|
|
240
278
|
Literal['nodes', 'edges', 'layout', 'interaction', 'manipulation', 'physics', 'selection', 'renderer']
|
|
241
|
-
]
|
|
242
|
-
|
|
243
|
-
path:
|
|
279
|
+
]
|
|
280
|
+
) = True,
|
|
281
|
+
path: pathlib.Path | None = None,
|
|
244
282
|
show: bool = False,
|
|
245
|
-
) ->
|
|
246
|
-
"""
|
|
283
|
+
) -> pyvis.network.Network | None:
|
|
284
|
+
"""Plot interactive network visualization of the system.
|
|
285
|
+
|
|
286
|
+
Args:
|
|
287
|
+
controls: Enable/disable interactive controls.
|
|
288
|
+
path: Save path for network HTML.
|
|
289
|
+
show: Whether to display the plot.
|
|
290
|
+
"""
|
|
247
291
|
try:
|
|
248
292
|
from .flow_system import FlowSystem
|
|
249
293
|
|
|
@@ -257,21 +301,20 @@ class CalculationResults:
|
|
|
257
301
|
|
|
258
302
|
def to_file(
|
|
259
303
|
self,
|
|
260
|
-
folder:
|
|
261
|
-
name:
|
|
304
|
+
folder: str | pathlib.Path | None = None,
|
|
305
|
+
name: str | None = None,
|
|
262
306
|
compression: int = 5,
|
|
263
307
|
document_model: bool = True,
|
|
264
308
|
save_linopy_model: bool = False,
|
|
265
309
|
):
|
|
266
|
-
"""
|
|
267
|
-
|
|
310
|
+
"""Save results to files.
|
|
311
|
+
|
|
268
312
|
Args:
|
|
269
|
-
folder:
|
|
270
|
-
name:
|
|
271
|
-
compression:
|
|
272
|
-
document_model:
|
|
273
|
-
save_linopy_model:
|
|
274
|
-
The model file size is rougly 100 times larger than the solution file.
|
|
313
|
+
folder: Save folder (defaults to calculation folder).
|
|
314
|
+
name: File name (defaults to calculation name).
|
|
315
|
+
compression: Compression level 0-9.
|
|
316
|
+
document_model: Whether to document model formulations as yaml.
|
|
317
|
+
save_linopy_model: Whether to save linopy model file.
|
|
275
318
|
"""
|
|
276
319
|
folder = self.folder if folder is None else pathlib.Path(folder)
|
|
277
320
|
name = self.name if name is None else name
|
|
@@ -308,11 +351,11 @@ class CalculationResults:
|
|
|
308
351
|
|
|
309
352
|
class _ElementResults:
|
|
310
353
|
@classmethod
|
|
311
|
-
def from_json(cls, calculation_results, json_data:
|
|
354
|
+
def from_json(cls, calculation_results, json_data: dict) -> _ElementResults:
|
|
312
355
|
return cls(calculation_results, json_data['label'], json_data['variables'], json_data['constraints'])
|
|
313
356
|
|
|
314
357
|
def __init__(
|
|
315
|
-
self, calculation_results: CalculationResults, label: str, variables:
|
|
358
|
+
self, calculation_results: CalculationResults, label: str, variables: list[str], constraints: list[str]
|
|
316
359
|
):
|
|
317
360
|
self._calculation_results = calculation_results
|
|
318
361
|
self.label = label
|
|
@@ -323,11 +366,10 @@ class _ElementResults:
|
|
|
323
366
|
|
|
324
367
|
@property
|
|
325
368
|
def variables(self) -> linopy.Variables:
|
|
326
|
-
"""
|
|
327
|
-
Returns the variables of the element.
|
|
369
|
+
"""Get element variables (requires linopy model).
|
|
328
370
|
|
|
329
371
|
Raises:
|
|
330
|
-
ValueError: If
|
|
372
|
+
ValueError: If linopy model is unavailable.
|
|
331
373
|
"""
|
|
332
374
|
if self._calculation_results.model is None:
|
|
333
375
|
raise ValueError('The linopy model is not available.')
|
|
@@ -335,29 +377,30 @@ class _ElementResults:
|
|
|
335
377
|
|
|
336
378
|
@property
|
|
337
379
|
def constraints(self) -> linopy.Constraints:
|
|
338
|
-
"""
|
|
339
|
-
Returns the variables of the element.
|
|
380
|
+
"""Get element constraints (requires linopy model).
|
|
340
381
|
|
|
341
382
|
Raises:
|
|
342
|
-
ValueError: If
|
|
383
|
+
ValueError: If linopy model is unavailable.
|
|
343
384
|
"""
|
|
344
385
|
if self._calculation_results.model is None:
|
|
345
386
|
raise ValueError('The linopy model is not available.')
|
|
346
387
|
return self._calculation_results.model.constraints[self._constraint_names]
|
|
347
388
|
|
|
348
|
-
def filter_solution(self, variable_dims:
|
|
349
|
-
"""
|
|
350
|
-
Filter the solution of the element by dimension.
|
|
389
|
+
def filter_solution(self, variable_dims: Literal['scalar', 'time'] | None = None) -> xr.Dataset:
|
|
390
|
+
"""Filter element solution by dimension.
|
|
351
391
|
|
|
352
392
|
Args:
|
|
353
|
-
variable_dims:
|
|
393
|
+
variable_dims: Variable dimension to filter.
|
|
394
|
+
|
|
395
|
+
Returns:
|
|
396
|
+
xr.Dataset: Filtered solution dataset.
|
|
354
397
|
"""
|
|
355
398
|
return filter_dataset(self.solution, variable_dims)
|
|
356
399
|
|
|
357
400
|
|
|
358
401
|
class _NodeResults(_ElementResults):
|
|
359
402
|
@classmethod
|
|
360
|
-
def from_json(cls, calculation_results, json_data:
|
|
403
|
+
def from_json(cls, calculation_results, json_data: dict) -> _NodeResults:
|
|
361
404
|
return cls(
|
|
362
405
|
calculation_results,
|
|
363
406
|
json_data['label'],
|
|
@@ -371,10 +414,10 @@ class _NodeResults(_ElementResults):
|
|
|
371
414
|
self,
|
|
372
415
|
calculation_results: CalculationResults,
|
|
373
416
|
label: str,
|
|
374
|
-
variables:
|
|
375
|
-
constraints:
|
|
376
|
-
inputs:
|
|
377
|
-
outputs:
|
|
417
|
+
variables: list[str],
|
|
418
|
+
constraints: list[str],
|
|
419
|
+
inputs: list[str],
|
|
420
|
+
outputs: list[str],
|
|
378
421
|
):
|
|
379
422
|
super().__init__(calculation_results, label, variables, constraints)
|
|
380
423
|
self.inputs = inputs
|
|
@@ -382,17 +425,21 @@ class _NodeResults(_ElementResults):
|
|
|
382
425
|
|
|
383
426
|
def plot_node_balance(
|
|
384
427
|
self,
|
|
385
|
-
save:
|
|
428
|
+
save: bool | pathlib.Path = False,
|
|
386
429
|
show: bool = True,
|
|
387
430
|
colors: plotting.ColorType = 'viridis',
|
|
388
431
|
engine: plotting.PlottingEngine = 'plotly',
|
|
389
|
-
) ->
|
|
390
|
-
"""
|
|
391
|
-
|
|
432
|
+
) -> plotly.graph_objs.Figure | tuple[plt.Figure, plt.Axes]:
|
|
433
|
+
"""Plot node balance flows.
|
|
434
|
+
|
|
392
435
|
Args:
|
|
393
|
-
save: Whether to save
|
|
394
|
-
show: Whether to
|
|
395
|
-
|
|
436
|
+
save: Whether to save plot (path or boolean).
|
|
437
|
+
show: Whether to display plot.
|
|
438
|
+
colors: Color scheme. Also see plotly.
|
|
439
|
+
engine: Plotting engine ('plotly' or 'matplotlib').
|
|
440
|
+
|
|
441
|
+
Returns:
|
|
442
|
+
Figure object.
|
|
396
443
|
"""
|
|
397
444
|
if engine == 'plotly':
|
|
398
445
|
figure_like = plotting.with_plotly(
|
|
@@ -427,20 +474,19 @@ class _NodeResults(_ElementResults):
|
|
|
427
474
|
lower_percentage_group: float = 5,
|
|
428
475
|
colors: plotting.ColorType = 'viridis',
|
|
429
476
|
text_info: str = 'percent+label+value',
|
|
430
|
-
save:
|
|
477
|
+
save: bool | pathlib.Path = False,
|
|
431
478
|
show: bool = True,
|
|
432
479
|
engine: plotting.PlottingEngine = 'plotly',
|
|
433
|
-
) -> plotly.
|
|
434
|
-
"""
|
|
435
|
-
Plots a pie chart of the flow hours of the inputs and outputs of buses or components.
|
|
480
|
+
) -> plotly.graph_objs.Figure | tuple[plt.Figure, list[plt.Axes]]:
|
|
481
|
+
"""Plot pie chart of flow hours distribution.
|
|
436
482
|
|
|
437
483
|
Args:
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
text_info:
|
|
441
|
-
save: Whether to save
|
|
442
|
-
show: Whether to
|
|
443
|
-
engine: Plotting engine
|
|
484
|
+
lower_percentage_group: Percentage threshold for "Others" grouping.
|
|
485
|
+
colors: Color scheme. Also see plotly.
|
|
486
|
+
text_info: Information to display on pie slices.
|
|
487
|
+
save: Whether to save plot.
|
|
488
|
+
show: Whether to display plot.
|
|
489
|
+
engine: Plotting engine ('plotly' or 'matplotlib').
|
|
444
490
|
"""
|
|
445
491
|
inputs = (
|
|
446
492
|
sanitize_dataset(
|
|
@@ -501,7 +547,7 @@ class _NodeResults(_ElementResults):
|
|
|
501
547
|
self,
|
|
502
548
|
negate_inputs: bool = True,
|
|
503
549
|
negate_outputs: bool = False,
|
|
504
|
-
threshold:
|
|
550
|
+
threshold: float | None = 1e-5,
|
|
505
551
|
with_last_timestep: bool = False,
|
|
506
552
|
) -> xr.Dataset:
|
|
507
553
|
return sanitize_dataset(
|
|
@@ -521,11 +567,11 @@ class _NodeResults(_ElementResults):
|
|
|
521
567
|
|
|
522
568
|
|
|
523
569
|
class BusResults(_NodeResults):
|
|
524
|
-
"""Results for
|
|
570
|
+
"""Results container for energy/material balance nodes in the system."""
|
|
525
571
|
|
|
526
572
|
|
|
527
573
|
class ComponentResults(_NodeResults):
|
|
528
|
-
"""Results for
|
|
574
|
+
"""Results container for individual system components with specialized analysis tools."""
|
|
529
575
|
|
|
530
576
|
@property
|
|
531
577
|
def is_storage(self) -> bool:
|
|
@@ -537,28 +583,31 @@ class ComponentResults(_NodeResults):
|
|
|
537
583
|
|
|
538
584
|
@property
|
|
539
585
|
def charge_state(self) -> xr.DataArray:
|
|
540
|
-
"""Get
|
|
586
|
+
"""Get storage charge state solution."""
|
|
541
587
|
if not self.is_storage:
|
|
542
588
|
raise ValueError(f'Cant get charge_state. "{self.label}" is not a storage')
|
|
543
589
|
return self.solution[self._charge_state]
|
|
544
590
|
|
|
545
591
|
def plot_charge_state(
|
|
546
592
|
self,
|
|
547
|
-
save:
|
|
593
|
+
save: bool | pathlib.Path = False,
|
|
548
594
|
show: bool = True,
|
|
549
595
|
colors: plotting.ColorType = 'viridis',
|
|
550
596
|
engine: plotting.PlottingEngine = 'plotly',
|
|
551
597
|
) -> plotly.graph_objs.Figure:
|
|
552
|
-
"""
|
|
553
|
-
|
|
598
|
+
"""Plot storage charge state over time, combined with the node balance.
|
|
599
|
+
|
|
554
600
|
Args:
|
|
555
|
-
save: Whether to save
|
|
556
|
-
show: Whether to
|
|
557
|
-
colors:
|
|
558
|
-
engine: Plotting engine
|
|
601
|
+
save: Whether to save plot.
|
|
602
|
+
show: Whether to display plot.
|
|
603
|
+
colors: Color scheme. Also see plotly.
|
|
604
|
+
engine: Plotting engine (only 'plotly' supported).
|
|
605
|
+
|
|
606
|
+
Returns:
|
|
607
|
+
plotly.graph_objs.Figure: Charge state plot.
|
|
559
608
|
|
|
560
609
|
Raises:
|
|
561
|
-
ValueError: If
|
|
610
|
+
ValueError: If component is not a storage.
|
|
562
611
|
"""
|
|
563
612
|
if engine != 'plotly':
|
|
564
613
|
raise NotImplementedError(
|
|
@@ -594,17 +643,20 @@ class ComponentResults(_NodeResults):
|
|
|
594
643
|
)
|
|
595
644
|
|
|
596
645
|
def node_balance_with_charge_state(
|
|
597
|
-
self, negate_inputs: bool = True, negate_outputs: bool = False, threshold:
|
|
646
|
+
self, negate_inputs: bool = True, negate_outputs: bool = False, threshold: float | None = 1e-5
|
|
598
647
|
) -> xr.Dataset:
|
|
599
|
-
"""
|
|
600
|
-
|
|
648
|
+
"""Get storage node balance including charge state.
|
|
649
|
+
|
|
601
650
|
Args:
|
|
602
|
-
negate_inputs: Whether to negate
|
|
603
|
-
negate_outputs: Whether to negate
|
|
604
|
-
threshold:
|
|
651
|
+
negate_inputs: Whether to negate input flows.
|
|
652
|
+
negate_outputs: Whether to negate output flows.
|
|
653
|
+
threshold: Threshold for small values.
|
|
654
|
+
|
|
655
|
+
Returns:
|
|
656
|
+
xr.Dataset: Node balance with charge state.
|
|
605
657
|
|
|
606
658
|
Raises:
|
|
607
|
-
ValueError: If
|
|
659
|
+
ValueError: If component is not a storage.
|
|
608
660
|
"""
|
|
609
661
|
if not self.is_storage:
|
|
610
662
|
raise ValueError(f'Cant get charge_state. "{self.label}" is not a storage')
|
|
@@ -629,17 +681,115 @@ class EffectResults(_ElementResults):
|
|
|
629
681
|
"""Results for an Effect"""
|
|
630
682
|
|
|
631
683
|
def get_shares_from(self, element: str):
|
|
632
|
-
"""Get
|
|
684
|
+
"""Get effect shares from specific element.
|
|
685
|
+
|
|
686
|
+
Args:
|
|
687
|
+
element: Element label to get shares from.
|
|
688
|
+
|
|
689
|
+
Returns:
|
|
690
|
+
xr.Dataset: Element shares to this effect.
|
|
691
|
+
"""
|
|
633
692
|
return self.solution[[name for name in self._variable_names if name.startswith(f'{element}->')]]
|
|
634
693
|
|
|
635
694
|
|
|
636
695
|
class SegmentedCalculationResults:
|
|
637
|
-
"""
|
|
638
|
-
|
|
696
|
+
"""Results container for segmented optimization calculations with temporal decomposition.
|
|
697
|
+
|
|
698
|
+
This class manages results from SegmentedCalculation runs where large optimization
|
|
699
|
+
problems are solved by dividing the time horizon into smaller, overlapping segments.
|
|
700
|
+
It provides unified access to results across all segments while maintaining the
|
|
701
|
+
ability to analyze individual segment behavior.
|
|
702
|
+
|
|
703
|
+
Key Features:
|
|
704
|
+
**Unified Time Series**: Automatically assembles results from all segments into
|
|
705
|
+
continuous time series, removing overlaps and boundary effects
|
|
706
|
+
**Segment Analysis**: Access individual segment results for debugging and validation
|
|
707
|
+
**Consistency Checks**: Verify solution continuity at segment boundaries
|
|
708
|
+
**Memory Efficiency**: Handles large datasets that exceed single-segment memory limits
|
|
709
|
+
|
|
710
|
+
Temporal Handling:
|
|
711
|
+
The class manages the complex task of combining overlapping segment solutions
|
|
712
|
+
into coherent time series, ensuring proper treatment of:
|
|
713
|
+
- Storage state continuity between segments
|
|
714
|
+
- Flow rate transitions at segment boundaries
|
|
715
|
+
- Aggregated results over the full time horizon
|
|
716
|
+
|
|
717
|
+
Examples:
|
|
718
|
+
Load and analyze segmented results:
|
|
719
|
+
|
|
720
|
+
```python
|
|
721
|
+
# Load segmented calculation results
|
|
722
|
+
results = SegmentedCalculationResults.from_file('results', 'annual_segmented')
|
|
723
|
+
|
|
724
|
+
# Access unified results across all segments
|
|
725
|
+
full_timeline = results.all_timesteps
|
|
726
|
+
total_segments = len(results.segment_results)
|
|
727
|
+
|
|
728
|
+
# Analyze individual segments
|
|
729
|
+
for i, segment in enumerate(results.segment_results):
|
|
730
|
+
print(f'Segment {i + 1}: {len(segment.solution.time)} timesteps')
|
|
731
|
+
segment_costs = segment.effects['cost'].total_value
|
|
732
|
+
|
|
733
|
+
# Check solution continuity at boundaries
|
|
734
|
+
segment_boundaries = results.get_boundary_analysis()
|
|
735
|
+
max_discontinuity = segment_boundaries['max_storage_jump']
|
|
736
|
+
```
|
|
737
|
+
|
|
738
|
+
Create from segmented calculation:
|
|
739
|
+
|
|
740
|
+
```python
|
|
741
|
+
# After running segmented calculation
|
|
742
|
+
segmented_calc = SegmentedCalculation(
|
|
743
|
+
name='annual_system',
|
|
744
|
+
flow_system=system,
|
|
745
|
+
timesteps_per_segment=730, # Monthly segments
|
|
746
|
+
overlap_timesteps=48, # 2-day overlap
|
|
747
|
+
)
|
|
748
|
+
segmented_calc.do_modeling_and_solve(solver='gurobi')
|
|
749
|
+
|
|
750
|
+
# Extract unified results
|
|
751
|
+
results = SegmentedCalculationResults.from_calculation(segmented_calc)
|
|
752
|
+
|
|
753
|
+
# Save combined results
|
|
754
|
+
results.to_file(compression=5)
|
|
755
|
+
```
|
|
756
|
+
|
|
757
|
+
Performance analysis across segments:
|
|
758
|
+
|
|
759
|
+
```python
|
|
760
|
+
# Compare segment solve times
|
|
761
|
+
solve_times = [seg.summary['durations']['solving'] for seg in results.segment_results]
|
|
762
|
+
avg_solve_time = sum(solve_times) / len(solve_times)
|
|
763
|
+
|
|
764
|
+
# Verify solution quality consistency
|
|
765
|
+
segment_objectives = [seg.summary['objective_value'] for seg in results.segment_results]
|
|
766
|
+
|
|
767
|
+
# Storage continuity analysis
|
|
768
|
+
if 'Battery' in results.segment_results[0].components:
|
|
769
|
+
storage_continuity = results.check_storage_continuity('Battery')
|
|
770
|
+
```
|
|
771
|
+
|
|
772
|
+
Design Considerations:
|
|
773
|
+
**Boundary Effects**: Monitor solution quality at segment interfaces where
|
|
774
|
+
foresight is limited compared to full-horizon optimization.
|
|
775
|
+
|
|
776
|
+
**Memory Management**: Individual segment results are maintained for detailed
|
|
777
|
+
analysis while providing unified access for system-wide metrics.
|
|
778
|
+
|
|
779
|
+
**Validation Tools**: Built-in methods to verify temporal consistency and
|
|
780
|
+
identify potential issues from segmentation approach.
|
|
781
|
+
|
|
782
|
+
Common Use Cases:
|
|
783
|
+
- **Large-Scale Analysis**: Annual or multi-year optimization results
|
|
784
|
+
- **Memory-Constrained Systems**: Results from systems exceeding hardware limits
|
|
785
|
+
- **Segment Validation**: Verifying segmentation approach effectiveness
|
|
786
|
+
- **Performance Monitoring**: Comparing segmented vs. full-horizon solutions
|
|
787
|
+
- **Debugging**: Identifying issues specific to temporal decomposition
|
|
788
|
+
|
|
639
789
|
"""
|
|
640
790
|
|
|
641
791
|
@classmethod
|
|
642
|
-
def from_calculation(cls, calculation:
|
|
792
|
+
def from_calculation(cls, calculation: SegmentedCalculation):
|
|
643
793
|
return cls(
|
|
644
794
|
[calc.results for calc in calculation.sub_calculations],
|
|
645
795
|
all_timesteps=calculation.all_timesteps,
|
|
@@ -650,13 +800,20 @@ class SegmentedCalculationResults:
|
|
|
650
800
|
)
|
|
651
801
|
|
|
652
802
|
@classmethod
|
|
653
|
-
def from_file(cls, folder:
|
|
654
|
-
"""
|
|
803
|
+
def from_file(cls, folder: str | pathlib.Path, name: str):
|
|
804
|
+
"""Load SegmentedCalculationResults from saved files.
|
|
805
|
+
|
|
806
|
+
Args:
|
|
807
|
+
folder: Directory containing saved files.
|
|
808
|
+
name: Base name of saved files.
|
|
809
|
+
|
|
810
|
+
Returns:
|
|
811
|
+
SegmentedCalculationResults: Loaded instance.
|
|
812
|
+
"""
|
|
655
813
|
folder = pathlib.Path(folder)
|
|
656
814
|
path = folder / name
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
with open(path.with_suffix('.json'), 'r', encoding='utf-8') as f:
|
|
815
|
+
logger.info(f'loading calculation "{name}" from file ("{path.with_suffix(".nc4")}")')
|
|
816
|
+
with open(path.with_suffix('.json'), encoding='utf-8') as f:
|
|
660
817
|
meta_data = json.load(f)
|
|
661
818
|
return cls(
|
|
662
819
|
[CalculationResults.from_file(folder, name) for name in meta_data['sub_calculations']],
|
|
@@ -671,12 +828,12 @@ class SegmentedCalculationResults:
|
|
|
671
828
|
|
|
672
829
|
def __init__(
|
|
673
830
|
self,
|
|
674
|
-
segment_results:
|
|
831
|
+
segment_results: list[CalculationResults],
|
|
675
832
|
all_timesteps: pd.DatetimeIndex,
|
|
676
833
|
timesteps_per_segment: int,
|
|
677
834
|
overlap_timesteps: int,
|
|
678
835
|
name: str,
|
|
679
|
-
folder:
|
|
836
|
+
folder: pathlib.Path | None = None,
|
|
680
837
|
):
|
|
681
838
|
self.segment_results = segment_results
|
|
682
839
|
self.all_timesteps = all_timesteps
|
|
@@ -687,7 +844,7 @@ class SegmentedCalculationResults:
|
|
|
687
844
|
self.hours_per_timestep = TimeSeriesCollection.calculate_hours_per_timestep(self.all_timesteps)
|
|
688
845
|
|
|
689
846
|
@property
|
|
690
|
-
def meta_data(self) ->
|
|
847
|
+
def meta_data(self) -> dict[str, int | list[str]]:
|
|
691
848
|
return {
|
|
692
849
|
'all_timesteps': [datetime.datetime.isoformat(date) for date in self.all_timesteps],
|
|
693
850
|
'timesteps_per_segment': self.timesteps_per_segment,
|
|
@@ -696,11 +853,18 @@ class SegmentedCalculationResults:
|
|
|
696
853
|
}
|
|
697
854
|
|
|
698
855
|
@property
|
|
699
|
-
def segment_names(self) ->
|
|
856
|
+
def segment_names(self) -> list[str]:
|
|
700
857
|
return [segment.name for segment in self.segment_results]
|
|
701
858
|
|
|
702
859
|
def solution_without_overlap(self, variable_name: str) -> xr.DataArray:
|
|
703
|
-
"""
|
|
860
|
+
"""Get variable solution removing segment overlaps.
|
|
861
|
+
|
|
862
|
+
Args:
|
|
863
|
+
variable_name: Name of variable to extract.
|
|
864
|
+
|
|
865
|
+
Returns:
|
|
866
|
+
xr.DataArray: Continuous solution without overlaps.
|
|
867
|
+
"""
|
|
704
868
|
dataarrays = [
|
|
705
869
|
result.solution[variable_name].isel(time=slice(None, self.timesteps_per_segment))
|
|
706
870
|
for result in self.segment_results[:-1]
|
|
@@ -713,21 +877,23 @@ class SegmentedCalculationResults:
|
|
|
713
877
|
heatmap_timeframes: Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'] = 'D',
|
|
714
878
|
heatmap_timesteps_per_frame: Literal['W', 'D', 'h', '15min', 'min'] = 'h',
|
|
715
879
|
color_map: str = 'portland',
|
|
716
|
-
save:
|
|
880
|
+
save: bool | pathlib.Path = False,
|
|
717
881
|
show: bool = True,
|
|
718
882
|
engine: plotting.PlottingEngine = 'plotly',
|
|
719
|
-
) ->
|
|
720
|
-
"""
|
|
721
|
-
Plots a heatmap of the solution of a variable.
|
|
883
|
+
) -> plotly.graph_objs.Figure | tuple[plt.Figure, plt.Axes]:
|
|
884
|
+
"""Plot heatmap of variable solution across segments.
|
|
722
885
|
|
|
723
886
|
Args:
|
|
724
|
-
variable_name:
|
|
725
|
-
heatmap_timeframes:
|
|
726
|
-
heatmap_timesteps_per_frame:
|
|
727
|
-
color_map:
|
|
728
|
-
save: Whether to save
|
|
729
|
-
show: Whether to
|
|
730
|
-
engine:
|
|
887
|
+
variable_name: Variable to plot.
|
|
888
|
+
heatmap_timeframes: Time aggregation level.
|
|
889
|
+
heatmap_timesteps_per_frame: Timesteps per frame.
|
|
890
|
+
color_map: Color scheme. Also see plotly.
|
|
891
|
+
save: Whether to save plot.
|
|
892
|
+
show: Whether to display plot.
|
|
893
|
+
engine: Plotting engine.
|
|
894
|
+
|
|
895
|
+
Returns:
|
|
896
|
+
Figure object.
|
|
731
897
|
"""
|
|
732
898
|
return plot_heatmap(
|
|
733
899
|
dataarray=self.solution_without_overlap(variable_name),
|
|
@@ -741,10 +907,14 @@ class SegmentedCalculationResults:
|
|
|
741
907
|
engine=engine,
|
|
742
908
|
)
|
|
743
909
|
|
|
744
|
-
def to_file(
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
910
|
+
def to_file(self, folder: str | pathlib.Path | None = None, name: str | None = None, compression: int = 5):
|
|
911
|
+
"""Save segmented results to files.
|
|
912
|
+
|
|
913
|
+
Args:
|
|
914
|
+
folder: Save folder (defaults to instance folder).
|
|
915
|
+
name: File name (defaults to instance name).
|
|
916
|
+
compression: Compression level 0-9.
|
|
917
|
+
"""
|
|
748
918
|
folder = self.folder if folder is None else pathlib.Path(folder)
|
|
749
919
|
name = self.name if name is None else name
|
|
750
920
|
path = folder / name
|
|
@@ -770,23 +940,25 @@ def plot_heatmap(
|
|
|
770
940
|
heatmap_timeframes: Literal['YS', 'MS', 'W', 'D', 'h', '15min', 'min'] = 'D',
|
|
771
941
|
heatmap_timesteps_per_frame: Literal['W', 'D', 'h', '15min', 'min'] = 'h',
|
|
772
942
|
color_map: str = 'portland',
|
|
773
|
-
save:
|
|
943
|
+
save: bool | pathlib.Path = False,
|
|
774
944
|
show: bool = True,
|
|
775
945
|
engine: plotting.PlottingEngine = 'plotly',
|
|
776
946
|
):
|
|
777
|
-
"""
|
|
778
|
-
Plots a heatmap of the solution of a variable.
|
|
947
|
+
"""Plot heatmap of time series data.
|
|
779
948
|
|
|
780
949
|
Args:
|
|
781
|
-
dataarray:
|
|
782
|
-
name:
|
|
783
|
-
folder:
|
|
784
|
-
heatmap_timeframes:
|
|
785
|
-
heatmap_timesteps_per_frame:
|
|
786
|
-
color_map:
|
|
787
|
-
save: Whether to save
|
|
788
|
-
show: Whether to
|
|
789
|
-
engine:
|
|
950
|
+
dataarray: Data to plot.
|
|
951
|
+
name: Variable name for title.
|
|
952
|
+
folder: Save folder.
|
|
953
|
+
heatmap_timeframes: Time aggregation level.
|
|
954
|
+
heatmap_timesteps_per_frame: Timesteps per frame.
|
|
955
|
+
color_map: Color scheme. Also see plotly.
|
|
956
|
+
save: Whether to save plot.
|
|
957
|
+
show: Whether to display plot.
|
|
958
|
+
engine: Plotting engine.
|
|
959
|
+
|
|
960
|
+
Returns:
|
|
961
|
+
Figure object.
|
|
790
962
|
"""
|
|
791
963
|
heatmap_data = plotting.heat_map_data_from_df(
|
|
792
964
|
dataarray.to_dataframe(name), heatmap_timeframes, heatmap_timesteps_per_frame, 'ffill'
|
|
@@ -819,25 +991,24 @@ def plot_heatmap(
|
|
|
819
991
|
|
|
820
992
|
def sanitize_dataset(
|
|
821
993
|
ds: xr.Dataset,
|
|
822
|
-
timesteps:
|
|
823
|
-
threshold:
|
|
824
|
-
negate:
|
|
994
|
+
timesteps: pd.DatetimeIndex | None = None,
|
|
995
|
+
threshold: float | None = 1e-5,
|
|
996
|
+
negate: list[str] | None = None,
|
|
825
997
|
drop_small_vars: bool = True,
|
|
826
998
|
zero_small_values: bool = False,
|
|
827
999
|
) -> xr.Dataset:
|
|
828
|
-
"""
|
|
829
|
-
Sanitizes a dataset by handling small values (dropping or zeroing) and optionally reindexing the time axis.
|
|
1000
|
+
"""Clean dataset by handling small values and reindexing time.
|
|
830
1001
|
|
|
831
1002
|
Args:
|
|
832
|
-
ds:
|
|
833
|
-
timesteps:
|
|
834
|
-
threshold:
|
|
835
|
-
negate:
|
|
836
|
-
drop_small_vars:
|
|
837
|
-
zero_small_values:
|
|
1003
|
+
ds: Dataset to sanitize.
|
|
1004
|
+
timesteps: Time index for reindexing (optional).
|
|
1005
|
+
threshold: Threshold for small values processing.
|
|
1006
|
+
negate: Variables to negate.
|
|
1007
|
+
drop_small_vars: Whether to drop variables below threshold.
|
|
1008
|
+
zero_small_values: Whether to zero values below threshold.
|
|
838
1009
|
|
|
839
1010
|
Returns:
|
|
840
|
-
xr.Dataset:
|
|
1011
|
+
xr.Dataset: Sanitized dataset.
|
|
841
1012
|
"""
|
|
842
1013
|
# Create a copy to avoid modifying the original
|
|
843
1014
|
ds = ds.copy()
|
|
@@ -854,7 +1025,7 @@ def sanitize_dataset(
|
|
|
854
1025
|
|
|
855
1026
|
# Option 1: Drop variables where all values are below threshold
|
|
856
1027
|
if drop_small_vars:
|
|
857
|
-
vars_to_drop = [var for var in ds.data_vars if (ds_no_nan_abs[var] <= threshold).all()]
|
|
1028
|
+
vars_to_drop = [var for var in ds.data_vars if (ds_no_nan_abs[var] <= threshold).all().item()]
|
|
858
1029
|
ds = ds.drop_vars(vars_to_drop)
|
|
859
1030
|
|
|
860
1031
|
# Option 2: Set small values to zero
|
|
@@ -863,7 +1034,7 @@ def sanitize_dataset(
|
|
|
863
1034
|
# Create a boolean mask of values below threshold
|
|
864
1035
|
mask = ds_no_nan_abs[var] <= threshold
|
|
865
1036
|
# Only proceed if there are values to zero out
|
|
866
|
-
if mask.any():
|
|
1037
|
+
if bool(mask.any().item()):
|
|
867
1038
|
# Create a copy to ensure we don't modify data with views
|
|
868
1039
|
ds[var] = ds[var].copy()
|
|
869
1040
|
# Set values below threshold to zero
|
|
@@ -878,14 +1049,16 @@ def sanitize_dataset(
|
|
|
878
1049
|
|
|
879
1050
|
def filter_dataset(
|
|
880
1051
|
ds: xr.Dataset,
|
|
881
|
-
variable_dims:
|
|
1052
|
+
variable_dims: Literal['scalar', 'time'] | None = None,
|
|
882
1053
|
) -> xr.Dataset:
|
|
883
|
-
"""
|
|
884
|
-
Filters a dataset by its dimensions.
|
|
1054
|
+
"""Filter dataset by variable dimensions.
|
|
885
1055
|
|
|
886
1056
|
Args:
|
|
887
|
-
ds:
|
|
888
|
-
variable_dims:
|
|
1057
|
+
ds: Dataset to filter.
|
|
1058
|
+
variable_dims: Variable dimension to filter ('scalar' or 'time').
|
|
1059
|
+
|
|
1060
|
+
Returns:
|
|
1061
|
+
xr.Dataset: Filtered dataset.
|
|
889
1062
|
"""
|
|
890
1063
|
if variable_dims is None:
|
|
891
1064
|
return ds
|