fram-core 0.0.0__py3-none-any.whl → 0.1.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. fram_core-0.1.0a1.dist-info/METADATA +41 -0
  2. fram_core-0.1.0a1.dist-info/RECORD +100 -0
  3. {fram_core-0.0.0.dist-info → fram_core-0.1.0a1.dist-info}/WHEEL +1 -2
  4. fram_core-0.1.0a1.dist-info/licenses/LICENSE.md +8 -0
  5. framcore/Base.py +142 -0
  6. framcore/Model.py +73 -0
  7. framcore/__init__.py +9 -0
  8. framcore/aggregators/Aggregator.py +153 -0
  9. framcore/aggregators/HydroAggregator.py +837 -0
  10. framcore/aggregators/NodeAggregator.py +495 -0
  11. framcore/aggregators/WindSolarAggregator.py +323 -0
  12. framcore/aggregators/__init__.py +13 -0
  13. framcore/aggregators/_utils.py +184 -0
  14. framcore/attributes/Arrow.py +305 -0
  15. framcore/attributes/ElasticDemand.py +90 -0
  16. framcore/attributes/ReservoirCurve.py +37 -0
  17. framcore/attributes/SoftBound.py +19 -0
  18. framcore/attributes/StartUpCost.py +54 -0
  19. framcore/attributes/Storage.py +146 -0
  20. framcore/attributes/TargetBound.py +18 -0
  21. framcore/attributes/__init__.py +65 -0
  22. framcore/attributes/hydro/HydroBypass.py +42 -0
  23. framcore/attributes/hydro/HydroGenerator.py +83 -0
  24. framcore/attributes/hydro/HydroPump.py +156 -0
  25. framcore/attributes/hydro/HydroReservoir.py +27 -0
  26. framcore/attributes/hydro/__init__.py +13 -0
  27. framcore/attributes/level_profile_attributes.py +714 -0
  28. framcore/components/Component.py +112 -0
  29. framcore/components/Demand.py +130 -0
  30. framcore/components/Flow.py +167 -0
  31. framcore/components/HydroModule.py +330 -0
  32. framcore/components/Node.py +76 -0
  33. framcore/components/Thermal.py +204 -0
  34. framcore/components/Transmission.py +183 -0
  35. framcore/components/_PowerPlant.py +81 -0
  36. framcore/components/__init__.py +22 -0
  37. framcore/components/wind_solar.py +67 -0
  38. framcore/curves/Curve.py +44 -0
  39. framcore/curves/LoadedCurve.py +155 -0
  40. framcore/curves/__init__.py +9 -0
  41. framcore/events/__init__.py +21 -0
  42. framcore/events/events.py +51 -0
  43. framcore/expressions/Expr.py +490 -0
  44. framcore/expressions/__init__.py +28 -0
  45. framcore/expressions/_get_constant_from_expr.py +483 -0
  46. framcore/expressions/_time_vector_operations.py +615 -0
  47. framcore/expressions/_utils.py +73 -0
  48. framcore/expressions/queries.py +423 -0
  49. framcore/expressions/units.py +207 -0
  50. framcore/fingerprints/__init__.py +11 -0
  51. framcore/fingerprints/fingerprint.py +293 -0
  52. framcore/juliamodels/JuliaModel.py +161 -0
  53. framcore/juliamodels/__init__.py +7 -0
  54. framcore/loaders/__init__.py +10 -0
  55. framcore/loaders/loaders.py +407 -0
  56. framcore/metadata/Div.py +73 -0
  57. framcore/metadata/ExprMeta.py +50 -0
  58. framcore/metadata/LevelExprMeta.py +17 -0
  59. framcore/metadata/Member.py +55 -0
  60. framcore/metadata/Meta.py +44 -0
  61. framcore/metadata/__init__.py +15 -0
  62. framcore/populators/Populator.py +108 -0
  63. framcore/populators/__init__.py +7 -0
  64. framcore/querydbs/CacheDB.py +50 -0
  65. framcore/querydbs/ModelDB.py +34 -0
  66. framcore/querydbs/QueryDB.py +45 -0
  67. framcore/querydbs/__init__.py +11 -0
  68. framcore/solvers/Solver.py +48 -0
  69. framcore/solvers/SolverConfig.py +272 -0
  70. framcore/solvers/__init__.py +9 -0
  71. framcore/timeindexes/AverageYearRange.py +20 -0
  72. framcore/timeindexes/ConstantTimeIndex.py +17 -0
  73. framcore/timeindexes/DailyIndex.py +21 -0
  74. framcore/timeindexes/FixedFrequencyTimeIndex.py +762 -0
  75. framcore/timeindexes/HourlyIndex.py +21 -0
  76. framcore/timeindexes/IsoCalendarDay.py +31 -0
  77. framcore/timeindexes/ListTimeIndex.py +197 -0
  78. framcore/timeindexes/ModelYear.py +17 -0
  79. framcore/timeindexes/ModelYears.py +18 -0
  80. framcore/timeindexes/OneYearProfileTimeIndex.py +21 -0
  81. framcore/timeindexes/ProfileTimeIndex.py +32 -0
  82. framcore/timeindexes/SinglePeriodTimeIndex.py +37 -0
  83. framcore/timeindexes/TimeIndex.py +90 -0
  84. framcore/timeindexes/WeeklyIndex.py +21 -0
  85. framcore/timeindexes/__init__.py +36 -0
  86. framcore/timevectors/ConstantTimeVector.py +135 -0
  87. framcore/timevectors/LinearTransformTimeVector.py +114 -0
  88. framcore/timevectors/ListTimeVector.py +123 -0
  89. framcore/timevectors/LoadedTimeVector.py +104 -0
  90. framcore/timevectors/ReferencePeriod.py +41 -0
  91. framcore/timevectors/TimeVector.py +94 -0
  92. framcore/timevectors/__init__.py +17 -0
  93. framcore/utils/__init__.py +36 -0
  94. framcore/utils/get_regional_volumes.py +369 -0
  95. framcore/utils/get_supported_components.py +60 -0
  96. framcore/utils/global_energy_equivalent.py +46 -0
  97. framcore/utils/isolate_subnodes.py +163 -0
  98. framcore/utils/loaders.py +97 -0
  99. framcore/utils/node_flow_utils.py +236 -0
  100. framcore/utils/storage_subsystems.py +107 -0
  101. fram_core-0.0.0.dist-info/METADATA +0 -5
  102. fram_core-0.0.0.dist-info/RECORD +0 -4
  103. fram_core-0.0.0.dist-info/top_level.txt +0 -1
@@ -0,0 +1,837 @@
1
+ from __future__ import annotations
2
+
3
+ from collections import defaultdict
4
+ from copy import deepcopy
5
+ from time import time
6
+ from typing import TYPE_CHECKING
7
+
8
+ from framcore.aggregators import Aggregator
9
+ from framcore.aggregators._utils import (
10
+ _aggregate_result_volumes,
11
+ _aggregate_weighted_expressions,
12
+ _all_detailed_exprs_in_sum_expr,
13
+ _get_level_profile_weights_from_disagg_levelprofiles,
14
+ )
15
+ from framcore.attributes import AvgFlowVolume, Conversion, HydroGenerator, HydroReservoir, MaxFlowVolume, StockVolume
16
+ from framcore.components import Component, HydroModule
17
+ from framcore.curves import Curve
18
+ from framcore.expressions import Expr, get_level_value
19
+ from framcore.metadata import LevelExprMeta
20
+ from framcore.timeindexes import FixedFrequencyTimeIndex, SinglePeriodTimeIndex
21
+ from framcore.timevectors import ConstantTimeVector, TimeVector
22
+ from framcore.utils import get_hydro_downstream_energy_equivalent
23
+
24
+ if TYPE_CHECKING:
25
+ from framcore import Model
26
+
27
+
28
+ class HydroAggregator(Aggregator):
29
+ """
30
+ Aggregate hydro modules into two equivalent modules based on the regulation factor, into one regulated and one unregulated module per area.
31
+
32
+ Aggregation steps (self._aggregate):
33
+ 1. Group modules based on their power nodes (self._group_modules_by_power_node)
34
+ - Modules with generators are grouped based on their power nodes. You can choose to only group modules for certain power nodes by giving
35
+ self._power_node_members alone or together with self._metakey_power_node. NB! Watershed that crosses power nodes should not be aggregated in two
36
+ different HydroAggregators as the aggregator will remove all connected modules from the model after the first aggregation.
37
+ - Reservoirs are assigned to the power node which has the highest cumulative energy equivalent downstream of the reservoir. This is because JulES
38
+ currently only support one-to-one mapping of detailed and aggregated reservoirs.
39
+ - Reservoirs without generators downstream are ignored in the aggregation.
40
+ 2. Group area modules into regulated and unregulated based on regulation factor (self._group_modules_by_regulation_factor)
41
+ - Regulation factor = upstream reservoir capacity / yearly upstream inflow. Modules with generators that have regulation factor <= self._ror_threshold
42
+ are grouped into unregulated run-of-river modules, the other modules with generators are grouped into regulated reservoir modules.
43
+ - All reservoirs are assigned to the regulated group.
44
+ - Generators without upstream inflows are ignored in the aggregation.
45
+ 3. Make aggregated hydro module for each group (self._aggregate_groups)
46
+ - The resulting HydroModule has a generator with energy equivalent of 1 kWh/m3. The inflow, discharge capacity and reservoir capacity
47
+ is calculated based on energy and transformed back to water using this energy equivalent.
48
+ - Generation capacity (release_cap*energy_equivalent/agg_energy_equivalent, capacity of hydraulic couplings not double counted). The release capacity
49
+ profile is ignored except if self._release_capacity_profile is given, then this profile is used for all aggregated modules.
50
+ - Energy reservoir capacity (res_cap*energy_equivalent_downstream/agg_energy_equivalent)
51
+ - Gross energy inflow (inflow_up*energy_equivalent/agg_energy_equivalent) - TODO: Add possibility to adjust inflow to closer represent net inflow
52
+ - Inflow profiles weighted based on gross energy inflow (inflow_up_per_profile*energy_equivalent) - calc from core model using self._map_topology()
53
+ - TODO: Other details like pumps and environmental constraints are currently ignored in the aggregation.
54
+ 3a. Aggregate results if all modules in group have results.
55
+ - Production is the sum of production levels with weighted profiles
56
+ - Reservoir filling is the sum of energy reservoir filling levels (filling*energy_equivalent_downstream/agg_energy_equivalent) with weighted profiles
57
+ - TODO: Spill, bypass and pumping results are currently ignored in the aggregation.
58
+ - TODO: Add possibility to skip results aggregation.
59
+ 3b. Make new hydro module and delete original modules from model data.
60
+ 4. Add mapping from detailed to aggregated modules to self._aggregation_map.
61
+
62
+ Disaggregation steps (self._disaggregate):
63
+ 1. Restore original modules from self._original_data. NB! Changes to aggregated modules are lost except for results (TODO)
64
+ 2. Move production and filling results from aggregated modules to detailed modules, weighted based on production capacity and reservoir capacity.
65
+ - TODO: Spill and bypass results are currently ignored in the disaggregation.
66
+ 3. Delete aggregated modules.
67
+
68
+ NB! Watershed that crosses power nodes should not be aggregated in two different HydroAggregators as the aggregator will remove all connected modules
69
+ from the model after the first aggregation. Reservoirs will also be assigned to the power node which has the highest cumulative energy equivalent, so
70
+ this aggregator does not work well for reservoirs that are upstream of multiple power nodes.
71
+
72
+ Other comments:
73
+ - It is recommended to only use the same aggregator type once on the same components of a model. If you want to go from one aggregation level to
74
+ another, it is better to use model.disaggregate first and then aggregate again. This is to keep the logic simple and avoid complex expressions.
75
+ We have also logic that recognises if result expressions come from aggregations or disaggregations. When aggregating or disaggregating these,
76
+ we can go back to the original results rather than setting up complex expressions that for examples aggregates the disaggregated results.
77
+ - Levels and profiles are aggregated separately, and then combined into attributes.
78
+ - We have chosen to eagerly evaluate weights for aggregation and disaggregation of levels and profiles. This is a balance between eagerly evaluating
79
+ everything, and setting up complex expressions. Eagerly evaluating everything would require setting up new timevectors after eager evaluation, which
80
+ is not ideal. While setting up complex expressions gives expressions that are harder to work with and slower to query from.
81
+
82
+ Attributes:
83
+ _metakey_energy_eq_downstream (str): Metadata key for energy equivalent downstream.
84
+ _data_dim (SinglePeriodTimeIndex): Data dimension for eager evalutation.
85
+ _scen_dim (FixedFrequencyTimeIndex): Scenario dimension for eager evalutation.
86
+ _grouped_modules (dict[str, set[str]]): Mapping of aggregated modules to detailed modules. agg to detailed
87
+ _grouped_reservoirs (dict[str, set[str]]): Mapping of aggregated reservoirs to detailed reservoirs. agg to detailed
88
+ _ror_threshold (float): Regulation factor (upstream reservoir capacity / yearly upstream inflow) threshold for run-of-river classification.
89
+ Default is 0.5.
90
+ _metakey_power_node (str | None): If given, check metadata of power nodes to check if they should be grouped.
91
+ _power_node_members (list[str] | None): If given along with metakey_power_node, group modules only for power nodes with these metadata values.
92
+ If given without metakey_power_node, only group power nodes in this list.
93
+ _release_capacity_profile (TimeVector | None): If given, use this profile for all aggregated modules' release capacities.
94
+
95
+ Parent Attributes (see framcore.aggregators.Aggregator):
96
+ _is_last_call_aggregate (bool | None): Tracks whether the last operation was an aggregation.
97
+ _original_data (dict[str, Component | TimeVector | Curve | Expr] | None): Original detailed data before aggregation.
98
+ _aggregation_map (dict[str, set[str]] | None): Maps aggregated components to their detailed components. detailed to agg
99
+
100
+ """
101
+
102
+ def __init__(
103
+ self,
104
+ metakey_energy_eq_downstream: str,
105
+ data_dim: SinglePeriodTimeIndex,
106
+ scen_dim: FixedFrequencyTimeIndex,
107
+ ror_threshold: float = 0.5,
108
+ metakey_power_node: str | None = None,
109
+ power_node_members: list[str] | None = None,
110
+ release_capacity_profile: TimeVector | None = None,
111
+ ) -> None:
112
+ """
113
+ Initialize HydroAggregator.
114
+
115
+ Args:
116
+ metakey_energy_eq_downstream (str): Metadata key for energy equivalent downstream.
117
+ Can be calculated with framcore.utils.set_global_energy_equivalent
118
+ data_dim (SinglePeriodTimeIndex): Data dimension for eager evalutation.
119
+ scen_dim (FixedFrequencyTimeIndex): Scenario dimension for eager evalutation.
120
+ ror_threshold (float): Regulation factor (upstream reservoir capacity / yearly upstream inflow) threshold for run-of-river classification.
121
+ Default is 0.5.
122
+ metakey_power_node (str | None): If given, check metadata of power nodes to check if they should be grouped.
123
+ power_node_members (list[str] | None): If given along with metakey_power_node, group modules only for power nodes with these metadata values.
124
+ If given without metakey_power_node, only group power nodes in this list.
125
+ release_capacity_profile (TimeVector | None): If given, use this profile for all aggregated modules' release capacities.
126
+
127
+ """
128
+ super().__init__()
129
+ self._check_type(metakey_energy_eq_downstream, str)
130
+ self._check_type(ror_threshold, float)
131
+ assert ror_threshold >= 0, ValueError(f"ror_threshold must be non-negative, got {ror_threshold}.")
132
+ self._check_type(data_dim, SinglePeriodTimeIndex)
133
+ self._check_type(scen_dim, FixedFrequencyTimeIndex)
134
+ self._check_type(metakey_power_node, (str, type(None)))
135
+ self._check_type(power_node_members, (list, type(None)))
136
+ if metakey_power_node is not None:
137
+ assert len(power_node_members) > 0, ValueError("If metakey_power_node is given, power_node_members must also be given.")
138
+
139
+ self._metakey_energy_eq_downstream = metakey_energy_eq_downstream
140
+ self._ror_threshold = ror_threshold
141
+ self._metakey_power_node = metakey_power_node
142
+ self._power_node_members = power_node_members
143
+ self._release_capacity_profile = release_capacity_profile
144
+
145
+ self._data_dim = data_dim
146
+ self._scen_dim = scen_dim
147
+
148
+ self._grouped_modules: dict[str, set[str]] = defaultdict(list) # agg to detailed
149
+ self._grouped_reservoirs: dict[str, set[str]] = defaultdict(list) # agg to detailed
150
+
151
+ def _aggregate(self, model: Model) -> None: # noqa: C901, PLR0915
152
+ t0 = time()
153
+ data = model.get_data()
154
+
155
+ t = time()
156
+ upstream_topology = self._map_upstream_topology(data)
157
+ self.send_debug_event(f"_map_upstream_topology time: {round(time() - t, 3)} seconds")
158
+
159
+ t = time()
160
+ generator_module_groups, reservoir_module_groups = self._group_modules_by_power_node(model, upstream_topology)
161
+ self.send_debug_event(f"_group_modules_by_power_node time: {round(time() - t, 3)} seconds")
162
+
163
+ t = time()
164
+ self._group_modules_by_regulation_factor(model, generator_module_groups, reservoir_module_groups, upstream_topology)
165
+ self.send_debug_event(f"_group_modules_by_regulation_factor time: {round(time() - t, 3)} seconds")
166
+
167
+ t = time()
168
+ ignore_production_capacity_modules = self._ignore_production_capacity_modules(model)
169
+ self.send_debug_event(f"_ignore_production_capacity_modules time: {round(time() - t, 3)} seconds")
170
+
171
+ t = time()
172
+ self._aggregate_groups(model, upstream_topology, ignore_production_capacity_modules)
173
+ self.send_debug_event(f"_aggregate_groups time: {round(time() - t, 3)} seconds")
174
+
175
+ # Add reservoir modules to aggregation map
176
+ t = time()
177
+ self._aggregation_map = {dd: set([a]) for a, d in self._grouped_reservoirs.items() for dd in d}
178
+ self.send_debug_event(f"add reservoir modules to _aggregation_map time: {round(time() - t, 3)} seconds")
179
+
180
+ # Add generator modules to aggregation map
181
+ t = time()
182
+ for a, d in self._grouped_modules.items():
183
+ for dd in d:
184
+ if dd not in self._aggregation_map:
185
+ self._aggregation_map[dd] = set([a])
186
+ elif not data[dd].get_reservoir(): # if reservoir module already in map, skip as reservoir mapping is main mapping
187
+ self._aggregation_map[dd].add(a)
188
+ self.send_debug_event(f"add generator modules to _aggregation_map time: {round(time() - t, 3)} seconds")
189
+
190
+ # Delete detailed modules and add remaining modules to aggregation map
191
+ t = time()
192
+ upstream_topology_with_bypass_spill = self._map_upstream_topology(data, include_bypass_spill=True)
193
+ aggregated_hydromodules = {module for modules in generator_module_groups.values() for module in modules} # add generator modules
194
+ for grouped_modules in generator_module_groups.values(): # add upstream modules
195
+ for grouped_module in grouped_modules:
196
+ upstream = upstream_topology_with_bypass_spill[grouped_module]
197
+ aggregated_hydromodules.update(upstream)
198
+ for downstream_module in upstream_topology_with_bypass_spill: # add downstream modules
199
+ for upstream in upstream_topology_with_bypass_spill[downstream_module]:
200
+ if upstream in aggregated_hydromodules:
201
+ aggregated_hydromodules.add(downstream_module)
202
+ break
203
+ other_modules = [key for key, component in data.items() if isinstance(component, HydroModule) and key not in aggregated_hydromodules]
204
+ other_generator_modules = [m for m in other_modules if data[m].get_generator()]
205
+ for m in other_modules: # remove other modules that do not interact with generator modules
206
+ interacts = False
207
+ for upstreams in upstream_topology_with_bypass_spill[m]:
208
+ for upstream in upstreams:
209
+ if upstream in other_generator_modules:
210
+ interacts = True
211
+ break
212
+ for gm in other_generator_modules:
213
+ if m in upstream_topology_with_bypass_spill[gm]:
214
+ interacts = True
215
+ break
216
+ if not interacts:
217
+ aggregated_hydromodules.add(m)
218
+ message = f"Module {m} is not upstream or downstream of any generator module, adding to aggregation as it does not interact with power system."
219
+ self.send_warning_event(message)
220
+
221
+ for m_key in aggregated_hydromodules:
222
+ if m_key not in self._grouped_modules:
223
+ if not (m_key in self._aggregation_map or m_key in self._grouped_reservoirs):
224
+ self._aggregation_map[m_key] = set()
225
+ del model.get_data()[m_key]
226
+ self.send_debug_event(f"delete detailed modules time: {round(time() - t, 3)} seconds")
227
+
228
+ self.send_debug_event(f"total _aggregate: {round(time() - t0, 3)} seconds")
229
+
230
+ def _map_upstream_topology( # noqa: C901
231
+ self,
232
+ data: dict[str, Component | TimeVector | Curve | Expr],
233
+ include_bypass_spill: bool = False,
234
+ ) -> dict[str, list[str]]:
235
+ """Map HydroModules topology. Return dict[module, List[upstream modules + itself]]."""
236
+ module_names = [key for key, component in data.items() if isinstance(component, HydroModule)]
237
+
238
+ # Direct upstream mapping (including transport pumps)
239
+ direct_upstream = {module_name: [] for module_name in module_names}
240
+ for module_name in module_names:
241
+ release_to = data[module_name].get_release_to()
242
+ pump = data[module_name].get_pump()
243
+ if data[module_name].get_pump() and pump.get_from_module() == module_name: # transport pump
244
+ pump = data[module_name].get_pump()
245
+ pump_to = pump.get_to_module()
246
+ direct_upstream[pump_to].append(module_name)
247
+ elif release_to: # other
248
+ try:
249
+ direct_upstream[release_to].append(module_name)
250
+ except KeyError as e:
251
+ message = f"Reference to {release_to} does not exist in Model. Referenced by {module_name} Module."
252
+ raise KeyError(message) from e
253
+ if include_bypass_spill:
254
+ bypass = data[module_name].get_bypass()
255
+ if bypass:
256
+ bypass_to = bypass.get_to_module()
257
+ if bypass_to:
258
+ try:
259
+ direct_upstream[bypass_to].append(module_name)
260
+ except KeyError as e:
261
+ message = f"Reference to {bypass_to} does not exist in Model. Referenced by {module_name} Module."
262
+ raise KeyError(message) from e
263
+ spill_to = data[module_name].get_spill_to()
264
+ if spill_to:
265
+ try:
266
+ direct_upstream[spill_to].append(module_name)
267
+ except KeyError as e:
268
+ message = f"Reference to {spill_to} does not exist in Model. Referenced by {module_name} Module."
269
+ raise KeyError(message) from e
270
+
271
+ # Recursive upstream function
272
+ def find_all_upstream(
273
+ module_name: str,
274
+ visited: set,
275
+ data: dict[str, Component | TimeVector | Curve | Expr],
276
+ ) -> list[str]:
277
+ if module_name in visited:
278
+ return [] # Avoid circular dependencies
279
+ visited.add(module_name)
280
+ upstream_names = direct_upstream[module_name]
281
+ all_upstream = set(upstream_names)
282
+ for upstream in upstream_names:
283
+ all_upstream.update(find_all_upstream(upstream, visited, data))
284
+ all_upstream.add(module_name) # include itself
285
+ return visited
286
+
287
+ # Full upstream topology
288
+ topology = {}
289
+ for module_name in module_names:
290
+ topology[module_name] = list(find_all_upstream(module_name, set(), data))
291
+
292
+ return topology
293
+
294
+ def _build_upstream_reservoir_and_inflow_exprs(
295
+ self,
296
+ data: dict[str, Component | TimeVector | Curve | Expr],
297
+ upstream_topology: dict[str, list[str]],
298
+ ) -> tuple[dict[str, Expr], dict[str, Expr]]:
299
+ """Build upstream inflow and reservoir expressions for each generator module."""
300
+ upstream_inflow_exprs = dict[str, Expr]()
301
+ upstream_reservoir_exprs = dict[str, Expr]()
302
+ generator_modules = [key for key, module in data.items() if isinstance(module, HydroModule) and module.get_generator()]
303
+ for m in generator_modules:
304
+ inflow_expr = 0
305
+ reservoir_expr = 0
306
+ for mm in upstream_topology[m]:
307
+ inflow = data[mm].get_inflow()
308
+ if inflow:
309
+ inflow_expr += inflow.get_level()
310
+ reservoir = data[mm].get_reservoir()
311
+ if reservoir:
312
+ reservoir_expr += reservoir.get_capacity().get_level()
313
+
314
+ upstream_inflow_exprs[m] = inflow_expr
315
+ upstream_reservoir_exprs[m] = reservoir_expr
316
+
317
+ return upstream_inflow_exprs, upstream_reservoir_exprs
318
+
319
+ def _group_modules_by_power_node(self, model: Model, upstream_topology: dict[str, list[str]]) -> dict[str, list[str]]: # noqa: C901
320
+ """Group modules by power node. Return generator_module_groups, reservoir_module_groups."""
321
+ data = model.get_data()
322
+ generator_module_groups = defaultdict(list) # power_node -> generator_modules
323
+ reservoir_mapping = defaultdict(set) # reservoir -> power_node(s)
324
+ for key, component in data.items():
325
+ if isinstance(component, HydroModule) and component.get_generator():
326
+ power_node = component.get_generator().get_power_node()
327
+ if self._metakey_power_node is None and self._power_node_members and power_node not in self._power_node_members:
328
+ continue
329
+ if self._metakey_power_node is not None: # only group modules for nodes in self._power_node_members
330
+ power_node_component = data[power_node]
331
+ node_meta = power_node_component.get_meta(self._metakey_power_node)
332
+ if node_meta is None:
333
+ message = f"Module {key} does not have metadata '{self._metakey_power_node}' for node mapping."
334
+ raise ValueError(message)
335
+ node_meta_value = node_meta.get_value()
336
+ if node_meta_value not in self._power_node_members:
337
+ continue
338
+
339
+ generator_module_groups[power_node].append(key)
340
+
341
+ for m in upstream_topology[key]:
342
+ if data[m].get_reservoir():
343
+ reservoir_mapping[m].add(power_node)
344
+
345
+ # Group reservoirs to the power node with the highest cumulative energy equivalent downstream from the reservoir
346
+ reservoir_module_groups: dict[str, list[str]] = defaultdict(list)
347
+ for res_name in reservoir_mapping:
348
+ power_nodes = reservoir_mapping[res_name]
349
+ if len(power_nodes) > 1:
350
+ highest_power_node = max(
351
+ power_nodes,
352
+ key=lambda pn: get_level_value(
353
+ get_hydro_downstream_energy_equivalent(data, res_name, pn),
354
+ db=model,
355
+ unit="kWh/m3",
356
+ data_dim=self._data_dim,
357
+ scen_dim=self._scen_dim,
358
+ is_max=False,
359
+ ),
360
+ )
361
+ reservoir_module_groups[highest_power_node].append(res_name)
362
+ else:
363
+ reservoir_module_groups[next(iter(power_nodes))].append(res_name)
364
+
365
+ return generator_module_groups, reservoir_module_groups
366
+
367
+ def _group_modules_by_regulation_factor(
368
+ self,
369
+ model: Model,
370
+ generator_module_groups: dict[str, list[str]],
371
+ reservoir_module_groups: dict[str, list[str]],
372
+ upstream_topology: dict[str, list[str]],
373
+ ) -> None:
374
+ """
375
+ Group modules into regulated and unregulated based on regulation factor and self._ror_threshold.
376
+
377
+ Regulation factor = upstream reservoir capacity / yearly upstream inflow.
378
+ Run-of-river = regulation factor <= self._ror_threshold.
379
+ Regulated = regulation factor > self._ror_threshold.
380
+ """
381
+ data = model.get_data()
382
+ upstream_inflow_exprs, upstream_reservoir_exprs = self._build_upstream_reservoir_and_inflow_exprs(data, upstream_topology)
383
+
384
+ for area, member_modules in generator_module_groups.items():
385
+ ror_name = area + "_hydro_RoR"
386
+ reg_name = area + "_hydro_reservoir"
387
+
388
+ ror_modules = []
389
+ reservoir_modules = []
390
+
391
+ for m_key in member_modules:
392
+ if upstream_inflow_exprs[m_key] != 0:
393
+ upstream_inflow = get_level_value(
394
+ upstream_inflow_exprs[m_key],
395
+ db=model,
396
+ unit="Mm3/year",
397
+ data_dim=self._data_dim,
398
+ scen_dim=self._scen_dim,
399
+ is_max=False,
400
+ )
401
+ else:
402
+ continue # Skip generator modules with no upstream inflow
403
+ if upstream_reservoir_exprs[m_key] != 0:
404
+ upstream_reservoir = get_level_value(
405
+ upstream_reservoir_exprs[m_key],
406
+ db=model,
407
+ unit="Mm3",
408
+ data_dim=self._data_dim,
409
+ scen_dim=self._scen_dim,
410
+ is_max=False,
411
+ )
412
+ else:
413
+ upstream_reservoir = 0
414
+ regulation_factor = upstream_reservoir / upstream_inflow if upstream_inflow > 0 else 0
415
+
416
+ if regulation_factor <= self._ror_threshold:
417
+ ror_modules.append(m_key)
418
+ else:
419
+ reservoir_modules.append(m_key)
420
+
421
+ if len(ror_modules) > 0: # only make run-of-river group if there are any modules
422
+ self._grouped_modules[ror_name] = ror_modules
423
+
424
+ if len(reservoir_modules) > 0: # only make reservoir group if there are any modules
425
+ self._grouped_modules[reg_name] = reservoir_modules
426
+
427
+ if len(reservoir_module_groups[area]) > 0 and len(reservoir_modules) > 0: # add reservoirs to reg group
428
+ self._grouped_reservoirs[reg_name] = reservoir_module_groups[area]
429
+ elif len(reservoir_module_groups[area]) > 0: # add reservoirs to ror group if no reg group
430
+ self._grouped_reservoirs[ror_name] = reservoir_module_groups[area]
431
+ message = f"{area} has no modules over ror_threshold ({self._ror_threshold}), so all reservoirs are put in RoR module."
432
+ self.send_warning_event(message)
433
+
434
+ def _ignore_production_capacity_modules(
435
+ self,
436
+ model: Model,
437
+ ) -> list[str]:
438
+ """
439
+ Return list of module names to ignore production capacity for in aggregation, because of hydraulic coupled reservoirs.
440
+
441
+ Ignore the lowest production capacity of modules that are under the same hydraulic coupled reservoirs.
442
+ """
443
+ ignore_production_capacity_modules = []
444
+ data = model.get_data()
445
+ module_names = [key for key, component in data.items() if isinstance(component, HydroModule)]
446
+
447
+ for m in module_names:
448
+ if data[m].get_hydraulic_coupling() != 0:
449
+ under_hydraulic = [
450
+ (
451
+ mm,
452
+ get_level_value(
453
+ data[mm].get_generator().get_energy_eq().get_level() * data[mm].get_release_capacity().get_level(),
454
+ model,
455
+ "MW",
456
+ self._data_dim,
457
+ self._scen_dim,
458
+ is_max=False,
459
+ ),
460
+ )
461
+ for mm in module_names
462
+ if data[mm].get_release_to() == m
463
+ ]
464
+ assert len(under_hydraulic) > 1
465
+ ignore_production_capacity_modules.append(min(under_hydraulic, key=lambda x: x[1])[0])
466
+
467
+ return ignore_production_capacity_modules
468
+
469
+ def _aggregate_groups( # noqa: C901
470
+ self,
471
+ model: Model,
472
+ upstream_topology: dict[str, list[str]],
473
+ ignore_capacity: list[str],
474
+ ) -> None:
475
+ """Aggregate each group of modules into one HydroModule."""
476
+ data = model.get_data()
477
+ for new_id, module_names in self._grouped_modules.items():
478
+ num_reservoirs = 0
479
+ if new_id in self._grouped_reservoirs:
480
+ num_reservoirs = len(self._grouped_reservoirs[new_id])
481
+ self.send_info_event(f"{new_id} from {len(module_names)} generator modules and {num_reservoirs} reservoirs.")
482
+
483
+ # Generator and production
484
+ generator_module_names = [m for m in module_names if data[m].get_generator()]
485
+ productions = [data[m].get_generator().get_production() for m in generator_module_names]
486
+ sum_production = _aggregate_result_volumes(model, productions, "MW", self._data_dim, self._scen_dim, new_id, generator_module_names)
487
+
488
+ generator = HydroGenerator(
489
+ power_node=data[generator_module_names[0]].get_generator().get_power_node(),
490
+ energy_eq=Conversion(level=ConstantTimeVector(1.0, "kWh/m3", is_max_level=True)),
491
+ production=sum_production,
492
+ )
493
+ energy_eq = generator.get_energy_eq().get_level()
494
+
495
+ # Release capacity
496
+ release_capacities = [data[m].get_release_capacity() for m in generator_module_names if m not in ignore_capacity]
497
+ if self._release_capacity_profile:
498
+ if not all([rc.get_profile() is None for rc in release_capacities]):
499
+ message = f"Some release capacities in {new_id} have profiles, using provided profile for all."
500
+ self.send_warning_event(message)
501
+ release_capacities = deepcopy(release_capacities)
502
+ for rc in release_capacities:
503
+ rc.set_profile(self._release_capacity_profile)
504
+ generator_energy_eqs = [data[m].get_generator().get_energy_eq() for m in generator_module_names if m not in ignore_capacity]
505
+ release_capacity_levels = [rc.get_level() * ee.get_level() for rc, ee in zip(release_capacities, generator_energy_eqs, strict=True)]
506
+ release_capacity = MaxFlowVolume(level=sum(release_capacity_levels) / energy_eq, profile=self._release_capacity_profile)
507
+
508
+ # Inflow level
509
+ upstream_inflow_levels = defaultdict(list)
510
+ for m in generator_module_names:
511
+ for mm in upstream_topology[m]:
512
+ inflow = data[mm].get_inflow()
513
+ if inflow:
514
+ upstream_inflow_levels[m].append(inflow.get_level())
515
+ inflow_level_energy = sum(
516
+ sum(upstream_inflow_levels[m]) * data[m].get_generator().get_energy_eq().get_level()
517
+ for m in generator_module_names
518
+ if len(upstream_inflow_levels[m]) > 0
519
+ )
520
+ inflow_level = inflow_level_energy / energy_eq
521
+
522
+ # Inflow profile
523
+ one_profile = Expr(src=ConstantTimeVector(1.0, is_zero_one_profile=False), is_profile=True)
524
+ inflow_profile_to_energyinflow = defaultdict(list)
525
+ inflow_level_to_value = dict()
526
+ for m in generator_module_names:
527
+ m_energy_eq = data[m].get_generator().get_energy_eq().get_level()
528
+ m_energy_eq_value = get_level_value(
529
+ m_energy_eq,
530
+ db=model,
531
+ unit="kWh/m3",
532
+ data_dim=self._data_dim,
533
+ scen_dim=self._scen_dim,
534
+ is_max=False,
535
+ )
536
+ for upstream_module in upstream_topology[m]:
537
+ inflow = data[upstream_module].get_inflow()
538
+ if inflow:
539
+ if inflow not in inflow_level_to_value:
540
+ inflow_level_to_value[inflow] = get_level_value(
541
+ inflow.get_level(),
542
+ db=model,
543
+ unit="m3/s",
544
+ data_dim=self._data_dim,
545
+ scen_dim=self._scen_dim,
546
+ is_max=False,
547
+ )
548
+ upstream_energy_inflow = inflow_level_to_value[inflow] * m_energy_eq_value
549
+ upstream_profile = inflow.get_profile() if inflow.get_profile() else one_profile
550
+ inflow_profile_to_energyinflow[upstream_profile].append(upstream_energy_inflow)
551
+
552
+ profile_weights = [sum(energyinflows) for energyinflows in inflow_profile_to_energyinflow.values()]
553
+ inflow_profile = _aggregate_weighted_expressions(list(inflow_profile_to_energyinflow.keys()), profile_weights)
554
+ inflow = AvgFlowVolume(level=inflow_level, profile=inflow_profile)
555
+
556
+ # Reservoir capacity and filling
557
+ if new_id in self._grouped_reservoirs and len(self._grouped_reservoirs[new_id]) > 0:
558
+ reservoir_levels = [
559
+ data[m].get_reservoir().get_capacity().get_level() * data[m].get_meta(self._metakey_energy_eq_downstream).get_value()
560
+ for m in self._grouped_reservoirs[new_id]
561
+ ]
562
+ reservoir_level = sum(reservoir_levels) / energy_eq
563
+ reservoir_capacity = StockVolume(level=reservoir_level)
564
+
565
+ fillings = [data[m].get_reservoir().get_volume() for m in self._grouped_reservoirs[new_id]]
566
+ energy_eq_downstreams = [data[m].get_meta(self._metakey_energy_eq_downstream).get_value() for m in self._grouped_reservoirs[new_id]]
567
+ sum_filling = self._aggregate_fillings(fillings, energy_eq_downstreams, energy_eq, model, "GWh", new_id, self._grouped_reservoirs[new_id])
568
+ reservoir = HydroReservoir(capacity=reservoir_capacity, volume=sum_filling)
569
+ else:
570
+ reservoir = None
571
+
572
+ new_hydro = HydroModule(
573
+ generator=generator,
574
+ reservoir=reservoir,
575
+ inflow=inflow,
576
+ release_capacity=release_capacity,
577
+ )
578
+ new_hydro.add_meta(key=self._metakey_energy_eq_downstream, value=LevelExprMeta(energy_eq))
579
+
580
+ data[new_id] = new_hydro
581
+
582
+ def _aggregate_fillings(
583
+ self,
584
+ fillings: list[StockVolume],
585
+ energy_eq_downstreams: list[Expr],
586
+ energy_eq: Expr,
587
+ model: Model,
588
+ weight_unit: str,
589
+ group_id: str,
590
+ members: list[str],
591
+ ) -> StockVolume | None:
592
+ """Aggregate reservoir fillings if all fillings are not None."""
593
+ sum_filling = None
594
+ if all(filling.get_level() for filling in fillings):
595
+ level, profiles, weights = self._get_level_profiles_weights_fillings(model, fillings, energy_eq_downstreams, energy_eq, weight_unit)
596
+ profile = _aggregate_weighted_expressions(profiles, weights)
597
+ sum_filling = StockVolume(level=level, profile=profile)
598
+ elif any(filling.get_level() for filling in fillings):
599
+ missing = [member for member, filling in zip(members, fillings, strict=False) if not filling.get_level()]
600
+ message = (
601
+ "Some but not all grouped modules have reservoir filling defined, reservoir filling not aggregated. "
602
+ f"Group: {group_id}, missing filling for {missing}."
603
+ )
604
+ self.send_warning_event(message)
605
+ return sum_filling
606
+
607
+ def _get_level_profiles_weights_fillings(
608
+ self,
609
+ model: Model,
610
+ fillings: list[StockVolume],
611
+ energy_eq_downstreams: list[Expr],
612
+ energy_eq: Expr,
613
+ weight_unit: str,
614
+ ) -> tuple[Expr, list[Expr], list[float]]:
615
+ """
616
+ Get aggregated filling level, and profiles with weights from list of fillings.
617
+
618
+ Two cases:
619
+ 1) All fillings are expressions from previous disaggregation. Can be aggregated more efficiently.
620
+ 2) Default case, where we weight fillings based on energy equivalent inflow.
621
+ """
622
+ levels = [filling.get_level() for filling in fillings]
623
+ if all(self._is_disagg_filling_expr(level) for level in levels):
624
+ return _get_level_profile_weights_from_disagg_levelprofiles(model, fillings, self._data_dim, self._scen_dim)
625
+ levels_energy = [filling * ee for filling, ee in zip(levels, energy_eq_downstreams, strict=True)]
626
+ level = sum(levels_energy) / energy_eq
627
+ profiles = [filling.get_profile() for filling in fillings]
628
+ weights = [get_level_value(level_energy, model, weight_unit, self._data_dim, self._scen_dim, False) for level_energy in levels_energy]
629
+ return level, profiles, weights
630
+
631
+ def _is_disagg_filling_expr(self, expr: Expr) -> bool:
632
+ """Check if expr is ((weight * agg_level * energy_eq_downstream) / energy_eq_agg) which indicates it comes from disaggregation."""
633
+ if expr.is_leaf():
634
+ return False
635
+ ops, args = expr.get_operations(expect_ops=True, copy_list=False)
636
+ if not (
637
+ ops == "**/"
638
+ and len(args) == 4 # noqa E501
639
+ and all([args[0].is_leaf(), args[3].is_leaf()])
640
+ and not args[0].is_level()
641
+ and not args[0].is_flow()
642
+ and not args[0].is_stock()
643
+ and args[1].is_stock()
644
+ and args[2].is_level()
645
+ and not args[2].is_flow()
646
+ and not args[2].is_stock()
647
+ and args[3].is_level()
648
+ and not args[3].is_flow()
649
+ and not args[3].is_stock()
650
+ ):
651
+ return False
652
+ return args[2].is_leaf() or args[2].get_operations(expect_ops=True, copy_list=False)[0] == "+"
653
+
654
+ def _disaggregate( # noqa: C901
655
+ self,
656
+ model: Model,
657
+ original_data: dict[str, Component | TimeVector | Curve | Expr],
658
+ ) -> None:
659
+ """Disaggregate HydroAggregator."""
660
+ new_data = model.get_data()
661
+
662
+ deleted_group_names = self._get_deleted_group_modules(new_data) # find agg groups that have been deleted
663
+ agg_modules = {key: new_data.pop(key) for key in self._grouped_modules if key not in deleted_group_names} # isolate agg modules out of new_data
664
+
665
+ # Reinstate original detailed modules that are not fully deleted
666
+ for detailed_key, agg_keys in self._aggregation_map.items():
667
+ if agg_keys and all(key in deleted_group_names for key in agg_keys):
668
+ continue
669
+ new_data[detailed_key] = original_data[detailed_key]
670
+
671
+ # Set production results in detailed modules
672
+ for agg_key, detailed_keys in self._grouped_modules.items():
673
+ if agg_key in deleted_group_names:
674
+ continue
675
+
676
+ agg_production_level = agg_modules[agg_key].get_generator().get_production().get_level()
677
+ if agg_production_level is None: # keep original production if agg has no production defined
678
+ continue
679
+ if len(detailed_keys) == 1: # only one detailed module, set production directly
680
+ new_data[detailed_key].get_generator().get_production().set_level(agg_production_level)
681
+ continue
682
+ detailed_production_levels = [new_data[detailed_key].get_generator().get_production().get_level() for detailed_key in detailed_keys]
683
+ if any(detailed_production_levels) and not all(
684
+ detailed_production_levels,
685
+ ): # if some but not all detailed modules have production defined, skip setting productio
686
+ missing = [detailed_key for detailed_key, level in zip(detailed_keys, detailed_production_levels, strict=False) if not level]
687
+ message = f"Some but not all grouped modules have production defined. Production not disaggregated for {agg_key}, missing for {missing}."
688
+ self.send_warning_event(message)
689
+ continue
690
+ if _all_detailed_exprs_in_sum_expr(agg_production_level, detailed_production_levels): # if agg production is sum of detailed levels, keep original
691
+ continue
692
+ production_weights = self._get_disaggregation_production_weights(model, detailed_keys) # default method
693
+ for detailed_key in detailed_keys:
694
+ self._set_weighted_production(new_data[detailed_key], agg_modules[agg_key], production_weights[detailed_key])
695
+
696
+ # Set filling results in detailed modules
697
+ for agg_key, detailed_keys in self._grouped_reservoirs.items():
698
+ if agg_key in deleted_group_names:
699
+ continue
700
+
701
+ agg_filling_level = agg_modules[agg_key].get_reservoir().get_volume().get_level()
702
+ if agg_filling_level is None: # keep original filling if agg has no filling defined
703
+ continue
704
+ if len(detailed_keys) == 1: # only one detailed module, set filling directly
705
+ new_data[detailed_key].get_reservoir().get_volume().set_level(agg_filling_level)
706
+ continue
707
+ detailed_filling_levels = [new_data[detailed_key].get_reservoir().get_volume().get_level() for detailed_key in detailed_keys]
708
+ if any(detailed_filling_levels) and not all(
709
+ detailed_filling_levels,
710
+ ): # if some but not all detailed modules have filling defined, skip setting filling
711
+ missing = [detailed_key for detailed_key, level in zip(detailed_keys, detailed_filling_levels, strict=False) if not level]
712
+ message = f"Some but not all grouped modules have filling defined. Filling not disaggregated for {agg_key}, missing for {missing}."
713
+ self.send_warning_event(message)
714
+ continue
715
+ detailed_energy_eq_downstreams = [new_data[detailed_key].get_meta(self._metakey_energy_eq_downstream).get_value() for detailed_key in detailed_keys]
716
+ agg_energy_eq_downstream = agg_modules[agg_key].get_meta(self._metakey_energy_eq_downstream).get_value()
717
+ agg_detailed_fillings = [
718
+ detailed_filling * detailed_energy_eq
719
+ for detailed_filling, detailed_energy_eq in zip(detailed_filling_levels, detailed_energy_eq_downstreams, strict=True)
720
+ if detailed_filling and detailed_energy_eq
721
+ ]
722
+ if self._is_sum_filling_expr(
723
+ agg_filling_level,
724
+ agg_detailed_fillings,
725
+ agg_energy_eq_downstream,
726
+ ): # if agg filling is sum of detailed levels, keep original
727
+ continue
728
+ reservoir_weights = self._get_disaggregation_filling_weights(model, detailed_keys) # default method
729
+ for detailed_key in detailed_keys:
730
+ self._set_weighted_filling(new_data[detailed_key], agg_modules[agg_key], reservoir_weights[detailed_key])
731
+
732
+ self._grouped_modules.clear()
733
+ self._grouped_reservoirs.clear()
734
+
735
+ def _get_deleted_group_modules(self, new_data: dict[str, Component | TimeVector | Curve | Expr]) -> set[str]:
736
+ deleted_group_names: set[str] = set()
737
+
738
+ for group_name in self._grouped_modules:
739
+ if group_name not in new_data:
740
+ deleted_group_names.add(group_name)
741
+ continue
742
+
743
+ return deleted_group_names
744
+
745
+ def _get_disaggregation_production_weights(
746
+ self,
747
+ model: Model,
748
+ detailed_keys: list[str],
749
+ ) -> dict[str, float]:
750
+ """Get weights to disaggregate production based on production capacity."""
751
+ # Calculate production capacity for each detailed module
752
+ data = model.get_data()
753
+ production_weights = dict() # detailed_key -> production_weight
754
+ production_weight_factors = dict() # detailed_key -> production_weight_factor
755
+ for det in detailed_keys:
756
+ det_module = data[det]
757
+ release_capacity_level = det_module.get_release_capacity().get_level()
758
+ generator_energy_eq = det_module.get_generator().get_energy_eq().get_level()
759
+ production_weight = get_level_value(
760
+ release_capacity_level * generator_energy_eq,
761
+ db=model,
762
+ unit="kW",
763
+ data_dim=self._data_dim,
764
+ scen_dim=self._scen_dim,
765
+ is_max=False,
766
+ )
767
+ production_weights[det] = production_weight
768
+
769
+ # Calculate production weight for each detailed module
770
+ for det in detailed_keys:
771
+ production_weight_factors[det] = production_weights[det] / sum(production_weights.values())
772
+
773
+ return production_weight_factors
774
+
775
+ def _get_disaggregation_filling_weights(
776
+ self,
777
+ model: Model,
778
+ detailed_keys: list[str],
779
+ ) -> dict[str, float]:
780
+ """Get weights to disaggregate filling based on reservoir capacity."""
781
+ # Calculate reservoir capacity for each detailed module
782
+ data = model.get_data()
783
+ filling_weights = dict() # detailed_key -> reservoir_weight
784
+ filling_weight_factors = dict() # detailed_key -> reservoir_weight_factor
785
+ for det in detailed_keys:
786
+ det_module = data[det]
787
+ reservoir_capacity_level = det_module.get_reservoir().get_capacity().get_level()
788
+ reservoir_energy_eq = det_module.get_meta(self._metakey_energy_eq_downstream).get_value()
789
+ reservoir_weight = get_level_value(
790
+ reservoir_capacity_level * reservoir_energy_eq,
791
+ db=model,
792
+ unit="GWh",
793
+ data_dim=self._data_dim,
794
+ scen_dim=self._scen_dim,
795
+ is_max=False,
796
+ )
797
+ filling_weights[det] = reservoir_weight
798
+
799
+ # Calculate reservoir weight for each detailed module
800
+ for det in detailed_keys:
801
+ filling_weight_factors[det] = filling_weights[det] / sum(filling_weights.values())
802
+
803
+ return filling_weight_factors
804
+
805
+ def _set_weighted_production(self, detailed_module: HydroModule, agg_module: HydroModule, production_weight: float) -> None:
806
+ """Set production level and profile for detailed module based on aggregated module."""
807
+ agg_production_level = agg_module.get_generator().get_production().get_level()
808
+ agg_production_profile = agg_module.get_generator().get_production().get_profile()
809
+ production_level = production_weight * agg_production_level
810
+ detailed_module.get_generator().get_production().set_level(production_level)
811
+ detailed_module.get_generator().get_production().set_profile(agg_production_profile)
812
+
813
+ def _is_sum_filling_expr(self, agg_filling: Expr, agg_detailed_fillings: list[Expr], agg_energy_eq_downstream: Expr) -> bool:
814
+ """Check if expr is (sum(filling * energy_eq_downstream)) / agg_energy_eq_downstream, indicating it comes from aggregation."""
815
+ if agg_filling.is_leaf():
816
+ return False
817
+ ops, args = agg_filling.get_operations(expect_ops=True, copy_list=False)
818
+ if not (ops == "/" and len(args) == 2) and args[1] == agg_energy_eq_downstream: # noqa E501
819
+ return False
820
+ ops_sum, args_sum = args[0].get_operations(expect_ops=True, copy_list=False)
821
+ if "+" not in ops_sum:
822
+ return False
823
+ if len(args_sum) != len(agg_detailed_fillings):
824
+ return False
825
+ return all(arg in agg_detailed_fillings for arg in args_sum)
826
+
827
+ def _set_weighted_filling(self, detailed_module: HydroModule, agg_module: HydroModule, filling_weight: float) -> None:
828
+ """Set filling level and profile for detailed module based on aggregated module."""
829
+ agg_filling_level = agg_module.get_reservoir().get_volume().get_level()
830
+ agg_filling_profile = agg_module.get_reservoir().get_volume().get_profile()
831
+ if agg_filling_level: # keep original filling if agg has no filling defined
832
+ agg_energy_eq = agg_module.get_meta(self._metakey_energy_eq_downstream).get_value()
833
+ detailed_energy_eq = detailed_module.get_meta(self._metakey_energy_eq_downstream).get_value()
834
+
835
+ filling_level = (filling_weight * agg_filling_level * agg_energy_eq) / detailed_energy_eq
836
+ detailed_module.get_reservoir().get_volume().set_level(filling_level)
837
+ detailed_module.get_reservoir().get_volume().set_profile(agg_filling_profile)