fram-core 0.0.0__py3-none-any.whl → 0.1.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fram_core-0.1.0a1.dist-info/METADATA +41 -0
- fram_core-0.1.0a1.dist-info/RECORD +100 -0
- {fram_core-0.0.0.dist-info → fram_core-0.1.0a1.dist-info}/WHEEL +1 -2
- fram_core-0.1.0a1.dist-info/licenses/LICENSE.md +8 -0
- framcore/Base.py +142 -0
- framcore/Model.py +73 -0
- framcore/__init__.py +9 -0
- framcore/aggregators/Aggregator.py +153 -0
- framcore/aggregators/HydroAggregator.py +837 -0
- framcore/aggregators/NodeAggregator.py +495 -0
- framcore/aggregators/WindSolarAggregator.py +323 -0
- framcore/aggregators/__init__.py +13 -0
- framcore/aggregators/_utils.py +184 -0
- framcore/attributes/Arrow.py +305 -0
- framcore/attributes/ElasticDemand.py +90 -0
- framcore/attributes/ReservoirCurve.py +37 -0
- framcore/attributes/SoftBound.py +19 -0
- framcore/attributes/StartUpCost.py +54 -0
- framcore/attributes/Storage.py +146 -0
- framcore/attributes/TargetBound.py +18 -0
- framcore/attributes/__init__.py +65 -0
- framcore/attributes/hydro/HydroBypass.py +42 -0
- framcore/attributes/hydro/HydroGenerator.py +83 -0
- framcore/attributes/hydro/HydroPump.py +156 -0
- framcore/attributes/hydro/HydroReservoir.py +27 -0
- framcore/attributes/hydro/__init__.py +13 -0
- framcore/attributes/level_profile_attributes.py +714 -0
- framcore/components/Component.py +112 -0
- framcore/components/Demand.py +130 -0
- framcore/components/Flow.py +167 -0
- framcore/components/HydroModule.py +330 -0
- framcore/components/Node.py +76 -0
- framcore/components/Thermal.py +204 -0
- framcore/components/Transmission.py +183 -0
- framcore/components/_PowerPlant.py +81 -0
- framcore/components/__init__.py +22 -0
- framcore/components/wind_solar.py +67 -0
- framcore/curves/Curve.py +44 -0
- framcore/curves/LoadedCurve.py +155 -0
- framcore/curves/__init__.py +9 -0
- framcore/events/__init__.py +21 -0
- framcore/events/events.py +51 -0
- framcore/expressions/Expr.py +490 -0
- framcore/expressions/__init__.py +28 -0
- framcore/expressions/_get_constant_from_expr.py +483 -0
- framcore/expressions/_time_vector_operations.py +615 -0
- framcore/expressions/_utils.py +73 -0
- framcore/expressions/queries.py +423 -0
- framcore/expressions/units.py +207 -0
- framcore/fingerprints/__init__.py +11 -0
- framcore/fingerprints/fingerprint.py +293 -0
- framcore/juliamodels/JuliaModel.py +161 -0
- framcore/juliamodels/__init__.py +7 -0
- framcore/loaders/__init__.py +10 -0
- framcore/loaders/loaders.py +407 -0
- framcore/metadata/Div.py +73 -0
- framcore/metadata/ExprMeta.py +50 -0
- framcore/metadata/LevelExprMeta.py +17 -0
- framcore/metadata/Member.py +55 -0
- framcore/metadata/Meta.py +44 -0
- framcore/metadata/__init__.py +15 -0
- framcore/populators/Populator.py +108 -0
- framcore/populators/__init__.py +7 -0
- framcore/querydbs/CacheDB.py +50 -0
- framcore/querydbs/ModelDB.py +34 -0
- framcore/querydbs/QueryDB.py +45 -0
- framcore/querydbs/__init__.py +11 -0
- framcore/solvers/Solver.py +48 -0
- framcore/solvers/SolverConfig.py +272 -0
- framcore/solvers/__init__.py +9 -0
- framcore/timeindexes/AverageYearRange.py +20 -0
- framcore/timeindexes/ConstantTimeIndex.py +17 -0
- framcore/timeindexes/DailyIndex.py +21 -0
- framcore/timeindexes/FixedFrequencyTimeIndex.py +762 -0
- framcore/timeindexes/HourlyIndex.py +21 -0
- framcore/timeindexes/IsoCalendarDay.py +31 -0
- framcore/timeindexes/ListTimeIndex.py +197 -0
- framcore/timeindexes/ModelYear.py +17 -0
- framcore/timeindexes/ModelYears.py +18 -0
- framcore/timeindexes/OneYearProfileTimeIndex.py +21 -0
- framcore/timeindexes/ProfileTimeIndex.py +32 -0
- framcore/timeindexes/SinglePeriodTimeIndex.py +37 -0
- framcore/timeindexes/TimeIndex.py +90 -0
- framcore/timeindexes/WeeklyIndex.py +21 -0
- framcore/timeindexes/__init__.py +36 -0
- framcore/timevectors/ConstantTimeVector.py +135 -0
- framcore/timevectors/LinearTransformTimeVector.py +114 -0
- framcore/timevectors/ListTimeVector.py +123 -0
- framcore/timevectors/LoadedTimeVector.py +104 -0
- framcore/timevectors/ReferencePeriod.py +41 -0
- framcore/timevectors/TimeVector.py +94 -0
- framcore/timevectors/__init__.py +17 -0
- framcore/utils/__init__.py +36 -0
- framcore/utils/get_regional_volumes.py +369 -0
- framcore/utils/get_supported_components.py +60 -0
- framcore/utils/global_energy_equivalent.py +46 -0
- framcore/utils/isolate_subnodes.py +163 -0
- framcore/utils/loaders.py +97 -0
- framcore/utils/node_flow_utils.py +236 -0
- framcore/utils/storage_subsystems.py +107 -0
- fram_core-0.0.0.dist-info/METADATA +0 -5
- fram_core-0.0.0.dist-info/RECORD +0 -4
- fram_core-0.0.0.dist-info/top_level.txt +0 -1
|
@@ -0,0 +1,323 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from collections import defaultdict
|
|
4
|
+
from typing import TYPE_CHECKING
|
|
5
|
+
|
|
6
|
+
from framcore.aggregators._utils import (
|
|
7
|
+
_aggregate_costs,
|
|
8
|
+
_aggregate_result_volumes,
|
|
9
|
+
_aggregate_weighted_expressions,
|
|
10
|
+
_all_detailed_exprs_in_sum_expr,
|
|
11
|
+
)
|
|
12
|
+
from framcore.aggregators.Aggregator import Aggregator # full import path so inheritance works
|
|
13
|
+
from framcore.attributes import AvgFlowVolume, Cost
|
|
14
|
+
from framcore.components import Component, Solar, Wind
|
|
15
|
+
from framcore.curves import Curve
|
|
16
|
+
from framcore.expressions import Expr, get_level_value
|
|
17
|
+
from framcore.timeindexes import FixedFrequencyTimeIndex, SinglePeriodTimeIndex
|
|
18
|
+
from framcore.timevectors import ConstantTimeVector, TimeVector
|
|
19
|
+
|
|
20
|
+
if TYPE_CHECKING:
|
|
21
|
+
from framcore import Model
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class _WindSolarAggregator(Aggregator):
|
|
25
|
+
"""
|
|
26
|
+
Aggregate components into groups based on their power nodes.
|
|
27
|
+
|
|
28
|
+
Aggregation steps (self._aggregate):
|
|
29
|
+
1. Group components based on their power nodes (self._group_by_power_node):
|
|
30
|
+
2. Aggregate grouped components into a single aggregated component for each group (self._aggregate_groups):
|
|
31
|
+
- Max_capacity is calculated as the sum of the maximum capacity levels with weighted profiles.
|
|
32
|
+
- Variable operational costs (voc) are aggregated using weighted averages based on the weighting method (now only max_capacity supported).
|
|
33
|
+
- TODO: Add support for additional weighting methods (e.g. production instead of capacity).
|
|
34
|
+
- Production is aggregated as the sum of production levels with weighted profiles. TODO: Add possibility to skip results aggregation.
|
|
35
|
+
2a. Make new hydro module and delete original components from model data.
|
|
36
|
+
3. Add mapping from detailed to aggregated components to self._aggregation_map.
|
|
37
|
+
|
|
38
|
+
Disaggregation steps (self._disaggregate):
|
|
39
|
+
1. Restore original components from self._original_data. NB! Changes to aggregated modules are lost except for results (TODO)
|
|
40
|
+
2. Distribute production from aggregated components back to the original components:
|
|
41
|
+
- Results are weighted based on the weighting method (now only max_capacity supported).
|
|
42
|
+
3. Delete aggregated components from the model.
|
|
43
|
+
|
|
44
|
+
Comments:
|
|
45
|
+
- It is recommended to only use the same aggregator type once on the same components of a model. If you want to go from one aggregation level to
|
|
46
|
+
another, it is better to use model.disaggregate first and then aggregate again. This is to keep the logic simple and avoid complex expressions.
|
|
47
|
+
We have also logic that recognises if result expressions come from aggregations or disaggregations. When aggregating or disaggregating these,
|
|
48
|
+
we can go back to the original results rather than setting up complex expressions that for examples aggregates the disaggregated results.
|
|
49
|
+
- Levels and profiles are aggregated separately, and then combined into attributes.
|
|
50
|
+
- We have chosen to eagerly evaluate weights for aggregation and disaggregation of levels and profiles. This is a balance between eagerly evaluating
|
|
51
|
+
everything, and setting up complex expressions. Eagerly evaluating everything would require setting up new timevectors after eager evaluation, which
|
|
52
|
+
is not ideal. While setting up complex expressions gives expressions that are harder to work with and slower to query from.
|
|
53
|
+
|
|
54
|
+
Attributes:
|
|
55
|
+
_data_dim (SinglePeriodTimeIndex | None): Data dimension for eager evaluation.
|
|
56
|
+
_scen_dim (FixedFrequencyTimeIndex | None): Scenario dimension for eager evaluation.
|
|
57
|
+
_grouped_components (dict[str, set[str]]): Mapping of aggregated components to their detailed components. agg to detailed
|
|
58
|
+
|
|
59
|
+
Parent Attributes (see framcore.aggregators.Aggregator):
|
|
60
|
+
_is_last_call_aggregate (bool | None): Tracks whether the last operation was an aggregation.
|
|
61
|
+
_original_data (dict[str, Component | TimeVector | Curve | Expr] | None): Original detailed data before aggregation.
|
|
62
|
+
_aggregation_map (dict[str, set[str]] | None): Maps aggregated components to their detailed components. detailed to agg
|
|
63
|
+
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
def __init__(
|
|
67
|
+
self,
|
|
68
|
+
data_dim: SinglePeriodTimeIndex | None = None,
|
|
69
|
+
scen_dim: FixedFrequencyTimeIndex | None = None,
|
|
70
|
+
) -> None:
|
|
71
|
+
"""
|
|
72
|
+
Initialize Aggregator.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
data_dim (SinglePeriodTimeIndex): Data dimension for eager evalutation.
|
|
76
|
+
scen_dim (FixedFrequencyTimeIndex): Scenario dimension for eager evalutation.
|
|
77
|
+
|
|
78
|
+
"""
|
|
79
|
+
super().__init__()
|
|
80
|
+
self._data_dim = data_dim
|
|
81
|
+
self._scen_dim = scen_dim
|
|
82
|
+
self._grouped_components: dict[str, set[str]] = defaultdict(set)
|
|
83
|
+
|
|
84
|
+
def _aggregate(self, model: Model) -> None:
|
|
85
|
+
data = model.get_data()
|
|
86
|
+
|
|
87
|
+
# Group components by power node and remove groups of size 1
|
|
88
|
+
self._group_by_power_node(data)
|
|
89
|
+
|
|
90
|
+
# Aggregate the grouped components
|
|
91
|
+
self._aggregate_groups(model)
|
|
92
|
+
|
|
93
|
+
# Remove the original components from the model
|
|
94
|
+
for group_id in self._grouped_components:
|
|
95
|
+
for component_id in self._grouped_components[group_id]:
|
|
96
|
+
del data[component_id]
|
|
97
|
+
|
|
98
|
+
# Add mapping to self._aggregation_map
|
|
99
|
+
self._aggregation_map = {member_id: {group_id} for group_id, member_ids in self._grouped_components.items() for member_id in member_ids}
|
|
100
|
+
|
|
101
|
+
def _group_by_power_node(self, data: dict[str, Component | TimeVector | Curve | Expr]) -> None:
|
|
102
|
+
"""Group components by their power node and remove groups with only one member."""
|
|
103
|
+
self._grouped_components.clear()
|
|
104
|
+
for name, obj in data.items():
|
|
105
|
+
if isinstance(obj, self._component_type):
|
|
106
|
+
power_node = obj.get_power_node()
|
|
107
|
+
if power_node is None:
|
|
108
|
+
message = f"Component {name} has no power node defined. Cannot group by power node."
|
|
109
|
+
raise ValueError(message)
|
|
110
|
+
group_id = f"Aggregated{self._component_type.__name__}{power_node}"
|
|
111
|
+
self._grouped_components[group_id].add(name)
|
|
112
|
+
|
|
113
|
+
for group_id in list(self._grouped_components.keys()):
|
|
114
|
+
if len(self._grouped_components[group_id]) == 1:
|
|
115
|
+
del self._grouped_components[group_id]
|
|
116
|
+
|
|
117
|
+
def _aggregate_groups(self, model: Model) -> None:
|
|
118
|
+
"""Aggregate each group of components into a single component."""
|
|
119
|
+
for group_id, member_ids in self._grouped_components.items():
|
|
120
|
+
self._aggregate_group(model, group_id, member_ids)
|
|
121
|
+
|
|
122
|
+
def _aggregate_group(self, model: Model, group_id: str, member_ids: list[str]) -> None:
|
|
123
|
+
"""Aggregate a group of components into a single component."""
|
|
124
|
+
self.send_info_event(f"{group_id} from {len(member_ids)} components.")
|
|
125
|
+
data = model.get_data()
|
|
126
|
+
members = [data[member_id] for member_id in member_ids]
|
|
127
|
+
|
|
128
|
+
# Weights
|
|
129
|
+
capacity_levels = [member.get_max_capacity().get_level() for member in members]
|
|
130
|
+
capacity_profiles = [member.get_max_capacity().get_profile() for member in members]
|
|
131
|
+
vocs = [member.get_voc() for member in members]
|
|
132
|
+
if any(capacity_profiles) or any(vocs): # only calc capacity weights if needed
|
|
133
|
+
capacity_level_values = [get_level_value(cl, model, "MW", self._data_dim, self._scen_dim, True) for cl in capacity_levels]
|
|
134
|
+
if sum(capacity_level_values) == 0.0:
|
|
135
|
+
message = "All grouped components do not contribute to weights (capacity = 0). Simplified aggregation."
|
|
136
|
+
self.send_warning_event(message)
|
|
137
|
+
|
|
138
|
+
# Production capacity
|
|
139
|
+
capacity_levels = [member.get_max_capacity().get_level() for member in members]
|
|
140
|
+
capacity_level = sum(capacity_levels)
|
|
141
|
+
|
|
142
|
+
capacity_profile = None
|
|
143
|
+
if any(capacity_profiles) and (sum(capacity_level_values) != 0.0):
|
|
144
|
+
one_profile = Expr(src=ConstantTimeVector(1.0, is_zero_one_profile=False), is_profile=True)
|
|
145
|
+
capacity_profiles = [profile if profile else one_profile for profile in capacity_profiles]
|
|
146
|
+
capacity_profile = _aggregate_weighted_expressions(capacity_profiles, capacity_level_values)
|
|
147
|
+
|
|
148
|
+
sum_capacity = AvgFlowVolume(capacity_level, capacity_profile)
|
|
149
|
+
|
|
150
|
+
# Power node
|
|
151
|
+
power_node = members[0].get_power_node()
|
|
152
|
+
|
|
153
|
+
# Production
|
|
154
|
+
productions = [member.get_production() for member in members]
|
|
155
|
+
production = _aggregate_result_volumes(model, productions, "MW", self._data_dim, self._scen_dim, group_id, member_ids)
|
|
156
|
+
|
|
157
|
+
# Variable operational cost
|
|
158
|
+
voc = None
|
|
159
|
+
if any(vocs) and (sum(capacity_level_values) != 0.0):
|
|
160
|
+
voc_level, voc_profile, voc_intercept = _aggregate_costs(model, vocs, outside_weights=capacity_level_values, weight_unit="EUR/MWh")
|
|
161
|
+
voc = Cost(voc_level, voc_profile, voc_intercept)
|
|
162
|
+
|
|
163
|
+
new_wind = Wind(
|
|
164
|
+
power_node=power_node,
|
|
165
|
+
max_capacity=sum_capacity,
|
|
166
|
+
voc=voc,
|
|
167
|
+
production=production,
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
data[group_id] = new_wind
|
|
171
|
+
|
|
172
|
+
def _disaggregate(
|
|
173
|
+
self,
|
|
174
|
+
model: Model,
|
|
175
|
+
original_data: dict[str, Component | TimeVector | Curve | Expr],
|
|
176
|
+
) -> None:
|
|
177
|
+
new_data = model.get_data()
|
|
178
|
+
|
|
179
|
+
deleted_group_names = self._get_deleted_group_components(new_data)
|
|
180
|
+
agg_components = {key: new_data.pop(key) for key in self._grouped_components if key not in deleted_group_names} # isolate agg modules out of new_data
|
|
181
|
+
|
|
182
|
+
# Reinstate original detailed components that are not fully deleted
|
|
183
|
+
for detailed_key, agg_keys in self._aggregation_map.items():
|
|
184
|
+
if agg_keys and all(key in deleted_group_names for key in agg_keys):
|
|
185
|
+
continue
|
|
186
|
+
new_data[detailed_key] = original_data[detailed_key]
|
|
187
|
+
|
|
188
|
+
# Set production results in detailed modules
|
|
189
|
+
for agg_key, detailed_keys in self._grouped_components.items():
|
|
190
|
+
if agg_key in deleted_group_names:
|
|
191
|
+
continue
|
|
192
|
+
|
|
193
|
+
agg_production_level = agg_components[agg_key].get_production().get_level()
|
|
194
|
+
if agg_production_level is None: # keep original production if agg has no production defined
|
|
195
|
+
continue
|
|
196
|
+
if len(detailed_keys) == 1: # only one detailed module, set production directly
|
|
197
|
+
new_data[detailed_key].get_production().set_level(agg_production_level)
|
|
198
|
+
continue
|
|
199
|
+
detailed_production_levels = [new_data[detailed_key].get_production().get_level() for detailed_key in detailed_keys]
|
|
200
|
+
if any(detailed_production_levels) and not all(
|
|
201
|
+
detailed_production_levels,
|
|
202
|
+
): # if some but not all detailed components have production defined, skip setting production
|
|
203
|
+
missing = [detailed_key for detailed_key, level in zip(detailed_keys, detailed_production_levels, strict=False) if not level]
|
|
204
|
+
message = f"Some but not all grouped components have production defined. Production not disaggregated for {agg_key}, missing for {missing}."
|
|
205
|
+
self.send_warning_event(message)
|
|
206
|
+
continue
|
|
207
|
+
if _all_detailed_exprs_in_sum_expr(agg_production_level, detailed_production_levels): # if agg production is sum of detailed levels, keep original
|
|
208
|
+
continue
|
|
209
|
+
capacity_levels = [new_data[detailed_key].get_max_capacity().get_level() for detailed_key in detailed_keys]
|
|
210
|
+
capacity_level_values = [get_level_value(cl, model, "MW", self._data_dim, self._scen_dim, True) for cl in capacity_levels]
|
|
211
|
+
capacity_level_value_weights = [cl / sum(capacity_level_values) for cl in capacity_level_values]
|
|
212
|
+
production_weights = {detailed_key: weight for detailed_key, weight in zip(detailed_keys, capacity_level_value_weights, strict=False)}
|
|
213
|
+
for detailed_key in detailed_keys:
|
|
214
|
+
self._set_weighted_production(new_data[detailed_key], agg_components[agg_key], production_weights[detailed_key]) # default
|
|
215
|
+
|
|
216
|
+
def _get_deleted_group_components(self, new_data: dict[str, Component | TimeVector | Curve | Expr]) -> set[str]:
|
|
217
|
+
"""Identify which aggregated components have been deleted from the model."""
|
|
218
|
+
deleted_group_names: set[str] = set()
|
|
219
|
+
|
|
220
|
+
for group_name in self._grouped_components:
|
|
221
|
+
if group_name not in new_data:
|
|
222
|
+
deleted_group_names.add(group_name)
|
|
223
|
+
continue
|
|
224
|
+
|
|
225
|
+
return deleted_group_names
|
|
226
|
+
|
|
227
|
+
def _set_weighted_production(self, detailed_component: Component, agg_component: Component, production_weight: float) -> None:
|
|
228
|
+
"""Set production level and profile for detailed components based on aggregated component."""
|
|
229
|
+
agg_production_level = agg_component.get_production().get_level()
|
|
230
|
+
agg_production_profile = agg_component.get_production().get_profile()
|
|
231
|
+
production_level = production_weight * agg_production_level
|
|
232
|
+
detailed_component.get_production().set_level(production_level)
|
|
233
|
+
detailed_component.get_production().set_profile(agg_production_profile)
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
class WindAggregator(_WindSolarAggregator):
|
|
237
|
+
"""
|
|
238
|
+
Aggregate components into groups based on their power nodes.
|
|
239
|
+
|
|
240
|
+
Aggregation steps (self._aggregate):
|
|
241
|
+
1. Group components based on their power nodes (self._group_by_power_node):
|
|
242
|
+
2. Aggregate grouped components into a single aggregated component for each group (self._aggregate_groups):
|
|
243
|
+
- Max_capacity is calculated as the sum of the maximum capacity levels with weighted profiles.
|
|
244
|
+
- Variable operation costs (voc) are aggregated using weighted averages based on the weighting method (now ony max_capacity supported).
|
|
245
|
+
- TODO: Add support for additional weighting methods (e.g. production instead of capacity).
|
|
246
|
+
- Production is aggregated as the sum of production levels with weighted profiles.
|
|
247
|
+
2a. Make new hydro module and delete original components from model data.
|
|
248
|
+
3. Add mapping from detailed to aggregated components to self._aggregation_map.
|
|
249
|
+
|
|
250
|
+
Disaggregation steps (self._disaggregate):
|
|
251
|
+
1. Restore original components from self._original_data. NB! Changes to aggregated modules are lost except for results.
|
|
252
|
+
2. Distribute production from aggregated components back to the original components:
|
|
253
|
+
- Results are weighted based on the weighting method (now ony max_capacity supported).
|
|
254
|
+
3. Delete aggregated components from the model.
|
|
255
|
+
|
|
256
|
+
Comments:
|
|
257
|
+
- It is recommended to only use the same aggregator type once on the same components of a model. If you want to go from one aggregation level to
|
|
258
|
+
another, it is better to use model.disaggregate first and then aggregate again. This is to keep the logic simple and avoid complex expressions.
|
|
259
|
+
We have also logic that recognises if result expressions come from aggregations or disaggregations. When aggregating or disaggregating these,
|
|
260
|
+
we can go back to the original results rather than setting up complex expressions that for examples aggregates the disaggregated results.
|
|
261
|
+
- Levels and profiles are aggregated separately, and then combined into attributes.
|
|
262
|
+
- We have chosen to eagerly evaluate weights for aggregation of levels and profiles, and disaggregation. This is a balance between eagerly evaluating
|
|
263
|
+
everything, and setting up complex expressions. Eagerly evaluating everything would require setting up new timevectors after eager evaluation, which
|
|
264
|
+
is not ideal. While setting up complex expressions gives expressions that are harder to work with and slower to query from.
|
|
265
|
+
|
|
266
|
+
Attributes:
|
|
267
|
+
_data_dim (SinglePeriodTimeIndex | None): Data dimension for eager evaluation.
|
|
268
|
+
_scen_dim (FixedFrequencyTimeIndex | None): Scenario dimension for eager evaluation.
|
|
269
|
+
_grouped_components (dict[str, set[str]]): Mapping of aggregated components to their detailed components. agg to detailed
|
|
270
|
+
|
|
271
|
+
Parent Attributes (see framcore.aggregators.Aggregator):
|
|
272
|
+
_is_last_call_aggregate (bool | None): Tracks whether the last operation was an aggregation.
|
|
273
|
+
_original_data (dict[str, Component | TimeVector | Curve | Expr] | None): Original detailed data before aggregation.
|
|
274
|
+
_aggregation_map (dict[str, set[str]] | None): Maps aggregated components to their detailed components. detailed to agg
|
|
275
|
+
|
|
276
|
+
"""
|
|
277
|
+
|
|
278
|
+
_component_type = Wind
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
class SolarAggregator(_WindSolarAggregator):
|
|
282
|
+
"""
|
|
283
|
+
Aggregate components into groups based on their power nodes.
|
|
284
|
+
|
|
285
|
+
Aggregation steps (self._aggregate):
|
|
286
|
+
1. Group components based on their power nodes (self._group_by_power_node):
|
|
287
|
+
2. Aggregate grouped components into a single aggregated component for each group (self._aggregate_groups):
|
|
288
|
+
- Max_capacity is calculated as the sum of the maximum capacity levels with weighted profiles.
|
|
289
|
+
- Variable operation costs (voc) are aggregated using weighted averages based on the weighting method (now ony max_capacity supported).
|
|
290
|
+
- TODO: Add support for additional weighting methods (e.g. production instead of capacity).
|
|
291
|
+
- Production is aggregated as the sum of production levels with weighted profiles.
|
|
292
|
+
2a. Make new hydro module and delete original components from model data.
|
|
293
|
+
3. Add mapping from detailed to aggregated components to self._aggregation_map.
|
|
294
|
+
|
|
295
|
+
Disaggregation steps (self._disaggregate):
|
|
296
|
+
1. Restore original components from self._original_data. NB! Changes to aggregated modules are lost except for results.
|
|
297
|
+
2. Distribute production from aggregated components back to the original components:
|
|
298
|
+
- Results are weighted based on the weighting method (now ony max_capacity supported).
|
|
299
|
+
3. Delete aggregated components from the model.
|
|
300
|
+
|
|
301
|
+
Comments:
|
|
302
|
+
- It is recommended to only use the same aggregator type once on the same components of a model. If you want to go from one aggregation level to
|
|
303
|
+
another, it is better to use model.disaggregate first and then aggregate again. This is to keep the logic simple and avoid complex expressions.
|
|
304
|
+
We have also logic that recognises if result expressions come from aggregations or disaggregations. When aggregating or disaggregating these,
|
|
305
|
+
we can go back to the original results rather than setting up complex expressions that for examples aggregates the disaggregated results.
|
|
306
|
+
- Levels and profiles are aggregated separately, and then combined into attributes.
|
|
307
|
+
- We have chosen to eagerly evaluate weights for aggregation of levels and profiles, and disaggregation. This is a balance between eagerly evaluating
|
|
308
|
+
everything, and setting up complex expressions. Eagerly evaluating everything would require setting up new timevectors after eager evaluation, which
|
|
309
|
+
is not ideal. While setting up complex expressions gives expressions that are harder to work with and slower to query from.
|
|
310
|
+
|
|
311
|
+
Attributes:
|
|
312
|
+
_data_dim (SinglePeriodTimeIndex | None): Data dimension for eager evaluation.
|
|
313
|
+
_scen_dim (FixedFrequencyTimeIndex | None): Scenario dimension for eager evaluation.
|
|
314
|
+
_grouped_components (dict[str, set[str]]): Mapping of aggregated components to their detailed components. agg to detailed
|
|
315
|
+
|
|
316
|
+
Parent Attributes (see framcore.aggregators.Aggregator):
|
|
317
|
+
_is_last_call_aggregate (bool | None): Tracks whether the last operation was an aggregation.
|
|
318
|
+
_original_data (dict[str, Component | TimeVector | Curve | Expr] | None): Original detailed data before aggregation.
|
|
319
|
+
_aggregation_map (dict[str, set[str]] | None): Maps aggregated components to their detailed components. detailed to agg
|
|
320
|
+
|
|
321
|
+
"""
|
|
322
|
+
|
|
323
|
+
_component_type = Solar
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
# framcore/aggregators/__init__.py
|
|
2
|
+
from framcore.aggregators.Aggregator import Aggregator
|
|
3
|
+
from framcore.aggregators.HydroAggregator import HydroAggregator
|
|
4
|
+
from framcore.aggregators.NodeAggregator import NodeAggregator
|
|
5
|
+
from framcore.aggregators.WindSolarAggregator import WindAggregator, SolarAggregator
|
|
6
|
+
|
|
7
|
+
__all__ = [
|
|
8
|
+
"Aggregator",
|
|
9
|
+
"HydroAggregator",
|
|
10
|
+
"NodeAggregator",
|
|
11
|
+
"SolarAggregator",
|
|
12
|
+
"WindAggregator",
|
|
13
|
+
]
|
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
"""Utility functions for aggregation and disaggregation of model attributes."""
|
|
2
|
+
|
|
3
|
+
from math import isclose
|
|
4
|
+
|
|
5
|
+
from framcore.attributes import AvgFlowVolume, Cost, LevelProfile
|
|
6
|
+
from framcore.expressions import Expr, get_level_value
|
|
7
|
+
from framcore.Model import Model
|
|
8
|
+
from framcore.timeindexes import FixedFrequencyTimeIndex, SinglePeriodTimeIndex
|
|
9
|
+
from framcore.timevectors import ConstantTimeVector
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
# Aggregation util functions ---------------------------------------------------------------------
|
|
13
|
+
# Only for results
|
|
14
|
+
def _aggregate_result_volumes(
|
|
15
|
+
model: Model,
|
|
16
|
+
volumes: list[AvgFlowVolume],
|
|
17
|
+
weight_unit: str,
|
|
18
|
+
data_dim: SinglePeriodTimeIndex,
|
|
19
|
+
scen_dim: FixedFrequencyTimeIndex,
|
|
20
|
+
group_id: str,
|
|
21
|
+
grouped_ids: list[str],
|
|
22
|
+
) -> AvgFlowVolume | None:
|
|
23
|
+
"""Aggregate result volumes for grouped components. If some but not all grouped components have volume defined, send warning and return None."""
|
|
24
|
+
sum_volume = None
|
|
25
|
+
if all(volume.get_level() for volume in volumes):
|
|
26
|
+
level, profiles, weights = _get_level_profile_weights_volumes_from_results(model, volumes, weight_unit, data_dim, scen_dim)
|
|
27
|
+
profile = _aggregate_weighted_expressions(profiles, weights)
|
|
28
|
+
sum_volume = AvgFlowVolume(level=level, profile=profile)
|
|
29
|
+
elif any(volume.get_level() for volume in volumes):
|
|
30
|
+
missing = [grouped_id for grouped_id, volume in zip(grouped_ids, volumes, strict=False) if not volume.get_level()]
|
|
31
|
+
message = f"Some but not all grouped components have volume defined. Volume not aggregated for {group_id}, missing volume for {missing}."
|
|
32
|
+
model.send_warning_event(message)
|
|
33
|
+
return sum_volume
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def _get_level_profile_weights_volumes_from_results(
|
|
37
|
+
model: Model,
|
|
38
|
+
volumes: list[AvgFlowVolume],
|
|
39
|
+
weight_unit: str,
|
|
40
|
+
data_dim: SinglePeriodTimeIndex,
|
|
41
|
+
scen_dim: FixedFrequencyTimeIndex,
|
|
42
|
+
) -> tuple[Expr, list[Expr], list[float]]:
|
|
43
|
+
"""
|
|
44
|
+
Get aggregated level, and profiles with weights from list of volumes.
|
|
45
|
+
|
|
46
|
+
Two cases:
|
|
47
|
+
1. All volumes have previously been disaggregated (levels are weight * LevelExpr). Can be aggregated more efficiently.
|
|
48
|
+
2. Default: sum levels and get weights from level values.
|
|
49
|
+
"""
|
|
50
|
+
levels = [volume.get_level() for volume in volumes]
|
|
51
|
+
if all(_is_weight_flow_expr(level) for level in levels):
|
|
52
|
+
return _get_level_profile_weights_from_disagg_levelprofiles(model, volumes, data_dim, scen_dim)
|
|
53
|
+
level = sum(levels)
|
|
54
|
+
profiles = [volume.get_profile() for volume in volumes]
|
|
55
|
+
weights = [get_level_value(level, model, weight_unit, data_dim, scen_dim, False) for level in levels]
|
|
56
|
+
return level, profiles, weights
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def _get_level_profile_weights_from_disagg_levelprofiles(
|
|
60
|
+
model: Model,
|
|
61
|
+
objs: list[LevelProfile],
|
|
62
|
+
data_dim: SinglePeriodTimeIndex,
|
|
63
|
+
scen_dim: FixedFrequencyTimeIndex,
|
|
64
|
+
) -> tuple[Expr, list[Expr], list[float]]:
|
|
65
|
+
"""
|
|
66
|
+
Get aggregated level, and profiles with weights from disaggregated LevelProfiles with Level = weight * LevelExpr.
|
|
67
|
+
|
|
68
|
+
Two cases:
|
|
69
|
+
- If all sum weights are 1, return sum of levels and profiles with weights 1.
|
|
70
|
+
- Otherwise, return weighted sum of levels, and profiles with weights from level expressions.
|
|
71
|
+
"""
|
|
72
|
+
weights = _get_weights_from_levelprofiles(model, objs, data_dim, scen_dim)
|
|
73
|
+
if all(isclose(weight, 1.0, rel_tol=1e-6) for weight in weights.values()):
|
|
74
|
+
level = sum([obj[0] for obj in weights]) # all weights 1, return sum of objs
|
|
75
|
+
profiles = [obj[1] for obj in weights]
|
|
76
|
+
weights = [1.0 for _ in weights]
|
|
77
|
+
return level, profiles, weights
|
|
78
|
+
level = sum([weight * obj[0] for obj, weight in weights.items()]) # return weighted sum of objs
|
|
79
|
+
profiles = [obj[1] for obj in weights]
|
|
80
|
+
weights = [weight for weight in weights.values()]
|
|
81
|
+
return level, profiles, weights
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
# Generic
|
|
85
|
+
def _aggregate_weighted_expressions(exprs: list[Expr], weights: list[float]) -> Expr:
|
|
86
|
+
"""Calculate weighted average of expressions with sum of weights = 1. If all profiles are identical, return that expr."""
|
|
87
|
+
if any(e is None for e in exprs):
|
|
88
|
+
message = f"Cannot aggregate profiles if some profiles are None: {exprs}."
|
|
89
|
+
raise ValueError(message)
|
|
90
|
+
if all(exprs[0] == e for e in exprs):
|
|
91
|
+
return exprs[0]
|
|
92
|
+
weights_dict = dict()
|
|
93
|
+
for e, w in zip(exprs, weights, strict=True):
|
|
94
|
+
if e not in weights_dict:
|
|
95
|
+
weights_dict[e] = 0.0
|
|
96
|
+
weights_dict[e] += w / sum(weights)
|
|
97
|
+
return sum([w * e for e, w in weights_dict.items()])
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def _is_weight_flow_expr(expr: Expr) -> bool:
|
|
101
|
+
"""Check if expr is weight * FlowExpr, which indicates it comes from disaggregation."""
|
|
102
|
+
if expr.is_leaf():
|
|
103
|
+
return False
|
|
104
|
+
ops, args = expr.get_operations(expect_ops=True, copy_list=False)
|
|
105
|
+
if ops != "*" or len(args) != 2 or not args[0].is_leaf(): # noqa E501
|
|
106
|
+
return False
|
|
107
|
+
if not (not args[0].is_level() and not args[0].is_profile()):
|
|
108
|
+
return False
|
|
109
|
+
return args[1].is_flow()
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def _get_weights_from_levelprofiles(
|
|
113
|
+
model: Model,
|
|
114
|
+
objs: list[LevelProfile],
|
|
115
|
+
data_dim: SinglePeriodTimeIndex,
|
|
116
|
+
scen_dim: FixedFrequencyTimeIndex,
|
|
117
|
+
) -> dict[tuple[Expr, Expr], float]:
|
|
118
|
+
"""Get sum of weights for each unique (level, profile) pair from disaggregated LevelProfiles with Level = weight * LevelExpr."""
|
|
119
|
+
weights = dict()
|
|
120
|
+
for obj in objs:
|
|
121
|
+
ops, args = obj.get_level().get_operations(expect_ops=True, copy_list=False)
|
|
122
|
+
key = (args[1], obj.get_profile())
|
|
123
|
+
if key not in weights:
|
|
124
|
+
weights[key] = 0.0
|
|
125
|
+
weights[key] += get_level_value(args[0], model, unit=None, data_dim=data_dim, scen_dim=scen_dim, is_max=False)
|
|
126
|
+
|
|
127
|
+
for key in weights: # noqa: PLC0206
|
|
128
|
+
if isclose(weights[key], 1.0, rel_tol=1e-6):
|
|
129
|
+
weights[key] = 1.0
|
|
130
|
+
|
|
131
|
+
if any(weight > 1.0 for weight in weights.values()):
|
|
132
|
+
message = f"Sum of weights are over 1 for some level/profile combinations: {weights}."
|
|
133
|
+
raise ValueError(message)
|
|
134
|
+
|
|
135
|
+
return weights
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def _aggregate_costs(
|
|
139
|
+
model: Model,
|
|
140
|
+
costs: list[Cost],
|
|
141
|
+
weights: list[float],
|
|
142
|
+
weight_unit: str,
|
|
143
|
+
data_dim: SinglePeriodTimeIndex,
|
|
144
|
+
scen_dim: FixedFrequencyTimeIndex,
|
|
145
|
+
) -> tuple[Expr, Expr | None, Expr | None]:
|
|
146
|
+
"""Aggregate a list of costs with weights. Aggregated cost has weighted level, profile and intercept."""
|
|
147
|
+
# Initialize default values
|
|
148
|
+
aggregated_level = None
|
|
149
|
+
aggregated_profile = None
|
|
150
|
+
aggregated_intercept = None
|
|
151
|
+
|
|
152
|
+
# Handle levels
|
|
153
|
+
zero_level = Expr(ConstantTimeVector(0.0, is_max_level=False), is_level=True)
|
|
154
|
+
cost_levels = [cost.get_level() if cost.get_level() else zero_level for cost in costs]
|
|
155
|
+
aggregated_level = _aggregate_weighted_expressions(cost_levels, weights)
|
|
156
|
+
|
|
157
|
+
# Handle profiles
|
|
158
|
+
cost_profiles = [cost.get_profile() for cost in costs]
|
|
159
|
+
if any(cost_profiles):
|
|
160
|
+
one_profile = Expr(src=ConstantTimeVector(1.0, is_zero_one_profile=False), is_profile=True)
|
|
161
|
+
cost_profiles = [profile if profile else one_profile for profile in cost_profiles]
|
|
162
|
+
cost_level_values = [get_level_value(level, model, weight_unit, data_dim, scen_dim, False) for level in cost_levels]
|
|
163
|
+
profile_weights = [clv * weight for clv, weight in zip(cost_level_values, weights, strict=True)]
|
|
164
|
+
aggregated_profile = _aggregate_weighted_expressions(cost_profiles, profile_weights)
|
|
165
|
+
|
|
166
|
+
# Handle intercepts
|
|
167
|
+
cost_intercepts = [cost.get_intercept() for cost in costs]
|
|
168
|
+
if any(cost_intercepts):
|
|
169
|
+
one_profile = Expr(src=ConstantTimeVector(1.0, is_zero_one_profile=False), is_profile=True)
|
|
170
|
+
cost_intercepts = [intercept if intercept else one_profile for intercept in cost_intercepts]
|
|
171
|
+
aggregated_intercept = _aggregate_weighted_expressions(cost_intercepts, weights)
|
|
172
|
+
|
|
173
|
+
return aggregated_level, aggregated_profile, aggregated_intercept
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
# Disaggregation util functions ---------------------------------------------------------------------
|
|
177
|
+
def _all_detailed_exprs_in_sum_expr(expr: Expr, detailed_exprs: list[Expr]) -> bool:
|
|
178
|
+
"""Check if expr is sum of detailed exprs. Does not handle the case where len(exprs) == 1."""
|
|
179
|
+
if expr.is_leaf():
|
|
180
|
+
return False
|
|
181
|
+
ops, args = expr.get_operations(expect_ops=True, copy_list=False)
|
|
182
|
+
if ops != "+" or len(args) != len(detailed_exprs):
|
|
183
|
+
return False
|
|
184
|
+
return all(arg in detailed_exprs for arg in args)
|