flixopt 1.0.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flixopt might be problematic. Click here for more details.

flixOpt/__init__.py ADDED
@@ -0,0 +1,32 @@
1
+ """
2
+ This module bundles all common functionality of flixOpt and sets up the logging
3
+ """
4
+
5
+ from .commons import (
6
+ CONFIG,
7
+ AggregatedCalculation,
8
+ AggregationParameters,
9
+ Bus,
10
+ Effect,
11
+ Flow,
12
+ FlowSystem,
13
+ FullCalculation,
14
+ InvestParameters,
15
+ LinearConverter,
16
+ OnOffParameters,
17
+ SegmentedCalculation,
18
+ Sink,
19
+ Source,
20
+ SourceAndSink,
21
+ Storage,
22
+ TimeSeriesData,
23
+ Transmission,
24
+ change_logging_level,
25
+ create_datetime_array,
26
+ linear_converters,
27
+ plotting,
28
+ results,
29
+ solvers,
30
+ )
31
+
32
+ CONFIG.load_config()
flixOpt/aggregation.py ADDED
@@ -0,0 +1,430 @@
1
+ """
2
+ This module contains the Aggregation functionality for the flixOpt framework.
3
+ Through this, aggregating TimeSeriesData is possible.
4
+ """
5
+
6
+ import copy
7
+ import logging
8
+ import timeit
9
+ import warnings
10
+ from collections import Counter
11
+ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
12
+
13
+ import numpy as np
14
+ import pandas as pd
15
+
16
+ try:
17
+ import tsam.timeseriesaggregation as tsam
18
+ TSAM_AVAILABLE = True
19
+ except ImportError:
20
+ TSAM_AVAILABLE = False
21
+
22
+ from .components import Storage
23
+ from .core import Skalar, TimeSeries, TimeSeriesData
24
+ from .elements import Component
25
+ from .flow_system import FlowSystem
26
+ from .math_modeling import Equation, Variable, VariableTS
27
+ from .structure import (
28
+ Element,
29
+ ElementModel,
30
+ SystemModel,
31
+ create_equation,
32
+ create_variable,
33
+ )
34
+
35
+ if TYPE_CHECKING:
36
+ import plotly.graph_objects as go
37
+
38
+ warnings.filterwarnings('ignore', category=DeprecationWarning)
39
+ logger = logging.getLogger('flixOpt')
40
+
41
+
42
+ class Aggregation:
43
+ """
44
+ aggregation organizing class
45
+ """
46
+
47
+ def __init__(
48
+ self,
49
+ original_data: pd.DataFrame,
50
+ hours_per_time_step: Skalar,
51
+ hours_per_period: Skalar,
52
+ nr_of_periods: int = 8,
53
+ weights: Dict[str, float] = None,
54
+ time_series_for_high_peaks: List[str] = None,
55
+ time_series_for_low_peaks: List[str] = None,
56
+ ):
57
+ """
58
+ Write a docstring please
59
+
60
+ Parameters
61
+ ----------
62
+ timeseries: pd.DataFrame
63
+ timeseries of the data with a datetime index
64
+ """
65
+ if not TSAM_AVAILABLE:
66
+ raise ImportError("The 'tsam' package is required for clustering functionality. "
67
+ "Install it with 'pip install tsam'.")
68
+ self.original_data = copy.deepcopy(original_data)
69
+ self.hours_per_time_step = hours_per_time_step
70
+ self.hours_per_period = hours_per_period
71
+ self.nr_of_periods = nr_of_periods
72
+ self.nr_of_time_steps = len(self.original_data.index)
73
+ self.weights = weights or {}
74
+ self.time_series_for_high_peaks = time_series_for_high_peaks or []
75
+ self.time_series_for_low_peaks = time_series_for_low_peaks or []
76
+
77
+ self.aggregated_data: Optional[pd.DataFrame] = None
78
+ self.clustering_duration_seconds = None
79
+ self.tsam: Optional[tsam.TimeSeriesAggregation] = None
80
+
81
+ def cluster(self) -> None:
82
+ """
83
+ Durchführung der Zeitreihenaggregation
84
+ """
85
+ start_time = timeit.default_timer()
86
+ # Erstellen des aggregation objects
87
+ self.tsam = tsam.TimeSeriesAggregation(
88
+ self.original_data,
89
+ noTypicalPeriods=self.nr_of_periods,
90
+ hoursPerPeriod=self.hours_per_period,
91
+ resolution=self.hours_per_time_step,
92
+ clusterMethod='k_means',
93
+ extremePeriodMethod='new_cluster_center'
94
+ if self.use_extreme_periods
95
+ else 'None', # Wenn Extremperioden eingebunden werden sollen, nutze die Methode 'new_cluster_center' aus tsam
96
+ weightDict=self.weights,
97
+ addPeakMax=self.time_series_for_high_peaks,
98
+ addPeakMin=self.time_series_for_low_peaks,
99
+ )
100
+
101
+ self.tsam.createTypicalPeriods() # Ausführen der Aggregation/Clustering
102
+ self.aggregated_data = self.tsam.predictOriginalData()
103
+
104
+ self.clustering_duration_seconds = timeit.default_timer() - start_time # Zeit messen:
105
+ logger.info(self.describe_clusters())
106
+
107
+ def describe_clusters(self) -> str:
108
+ description = {}
109
+ for cluster in self.get_cluster_indices().keys():
110
+ description[cluster] = [
111
+ str(indexVector[0]) + '...' + str(indexVector[-1])
112
+ for indexVector in self.get_cluster_indices()[cluster]
113
+ ]
114
+
115
+ if self.use_extreme_periods:
116
+ # Zeitreihe rauslöschen:
117
+ extreme_periods = self.tsam.extremePeriods.copy()
118
+ for key in extreme_periods:
119
+ del extreme_periods[key]['profile']
120
+ else:
121
+ extreme_periods = {}
122
+
123
+ return (
124
+ f'{"":#^80}\n'
125
+ f'{" Clustering ":#^80}\n'
126
+ f'periods_order:\n'
127
+ f'{self.tsam.clusterOrder}\n'
128
+ f'clusterPeriodNoOccur:\n'
129
+ f'{self.tsam.clusterPeriodNoOccur}\n'
130
+ f'index_vectors_of_clusters:\n'
131
+ f'{description}\n'
132
+ f'{"":#^80}\n'
133
+ f'extreme_periods:\n'
134
+ f'{extreme_periods}\n'
135
+ f'{"":#^80}'
136
+ )
137
+
138
+ @property
139
+ def use_extreme_periods(self):
140
+ return self.time_series_for_high_peaks or self.time_series_for_low_peaks
141
+
142
+ def plot(self, colormap: str = 'viridis', show: bool = True) -> 'go.Figure':
143
+ from . import plotting
144
+
145
+ df_org = self.original_data.copy().rename(
146
+ columns={col: f'Original - {col}' for col in self.original_data.columns}
147
+ )
148
+ df_agg = self.aggregated_data.copy().rename(
149
+ columns={col: f'Aggregated - {col}' for col in self.aggregated_data.columns}
150
+ )
151
+ fig = plotting.with_plotly(df_org, 'line', colors=colormap)
152
+ for trace in fig.data:
153
+ trace.update(dict(line=dict(dash='dash')))
154
+ fig = plotting.with_plotly(df_agg, 'line', colors=colormap, show=show, fig=fig)
155
+
156
+ fig.update_layout(
157
+ title='Original vs Aggregated Data (original = ---)', xaxis_title='Index', yaxis_title='Value'
158
+ )
159
+ return fig
160
+
161
+ def get_cluster_indices(self) -> Dict[str, List[np.ndarray]]:
162
+ """
163
+ Generates a dictionary that maps each cluster to a list of index vectors representing the time steps
164
+ assigned to that cluster for each period.
165
+
166
+ Returns:
167
+ dict: {cluster_0: [index_vector_3, index_vector_7, ...],
168
+ cluster_1: [index_vector_1],
169
+ ...}
170
+ """
171
+ clusters = self.tsam.clusterPeriodNoOccur.keys()
172
+ index_vectors = {cluster: [] for cluster in clusters}
173
+
174
+ period_length = len(self.tsam.stepIdx)
175
+ total_steps = len(self.tsam.timeSeries)
176
+
177
+ for period, cluster_id in enumerate(self.tsam.clusterOrder):
178
+ start_idx = period * period_length
179
+ end_idx = np.min([start_idx + period_length, total_steps])
180
+ index_vectors[cluster_id].append(np.arange(start_idx, end_idx))
181
+
182
+ return index_vectors
183
+
184
+ def get_equation_indices(self, skip_first_index_of_period: bool = True) -> Tuple[np.ndarray, np.ndarray]:
185
+ """
186
+ Generates pairs of indices for the equations by comparing index vectors of the same cluster.
187
+ If `skip_first_index_of_period` is True, the first index of each period is skipped.
188
+
189
+ Args:
190
+ skip_first_index_of_period (bool): Whether to include or skip the first index of each period.
191
+
192
+ Returns:
193
+ Tuple[np.ndarray, np.ndarray]: Two arrays of indices.
194
+ """
195
+ idx_var1 = []
196
+ idx_var2 = []
197
+
198
+ # Iterate through cluster index vectors
199
+ for index_vectors in self.get_cluster_indices().values():
200
+ if len(index_vectors) <= 1: # Only proceed if cluster has more than one period
201
+ continue
202
+
203
+ # Process the first vector, optionally skip first index
204
+ first_vector = index_vectors[0][1:] if skip_first_index_of_period else index_vectors[0]
205
+
206
+ # Compare first vector to others in the cluster
207
+ for other_vector in index_vectors[1:]:
208
+ if skip_first_index_of_period:
209
+ other_vector = other_vector[1:]
210
+
211
+ # Compare elements up to the minimum length of both vectors
212
+ min_len = min(len(first_vector), len(other_vector))
213
+ idx_var1.extend(first_vector[:min_len])
214
+ idx_var2.extend(other_vector[:min_len])
215
+
216
+ # Convert lists to numpy arrays
217
+ return np.array(idx_var1), np.array(idx_var2)
218
+
219
+
220
+ class TimeSeriesCollection:
221
+ def __init__(self, time_series_list: List[TimeSeries]):
222
+ self.time_series_list = time_series_list
223
+ self.group_weights: Dict[str, float] = {}
224
+ self._unique_labels()
225
+ self._calculate_aggregation_weigths()
226
+ self.weights: Dict[str, float] = {
227
+ time_series.label: time_series.aggregation_weight for time_series in self.time_series_list
228
+ }
229
+ self.data: Dict[str, np.ndarray] = {
230
+ time_series.label: time_series.active_data for time_series in self.time_series_list
231
+ }
232
+
233
+ if np.all(np.isclose(list(self.weights.values()), 1, atol=1e-6)):
234
+ logger.info('All Aggregation weights were set to 1')
235
+
236
+ def _calculate_aggregation_weigths(self):
237
+ """Calculates the aggergation weights of all TimeSeries. Necessary to use groups"""
238
+ groups = [
239
+ time_series.aggregation_group
240
+ for time_series in self.time_series_list
241
+ if time_series.aggregation_group is not None
242
+ ]
243
+ group_size = dict(Counter(groups))
244
+ self.group_weights = {group: 1 / size for group, size in group_size.items()}
245
+ for time_series in self.time_series_list:
246
+ time_series.aggregation_weight = self.group_weights.get(
247
+ time_series.aggregation_group, time_series.aggregation_weight or 1
248
+ )
249
+
250
+ def _unique_labels(self):
251
+ """Makes sure every label of the TimeSeries in time_series_list is unique"""
252
+ label_counts = Counter([time_series.label for time_series in self.time_series_list])
253
+ duplicates = [label for label, count in label_counts.items() if count > 1]
254
+ assert duplicates == [], 'Duplicate TimeSeries labels found: {}.'.format(', '.join(duplicates))
255
+
256
+ def insert_data(self, data: Dict[str, np.ndarray]):
257
+ for time_series in self.time_series_list:
258
+ if time_series.label in data:
259
+ time_series.aggregated_data = data[time_series.label]
260
+ logger.debug(f'Inserted data for {time_series.label}')
261
+
262
+ def description(self) -> str:
263
+ # TODO:
264
+ result = f'{len(self.time_series_list)} TimeSeries used for aggregation:\n'
265
+ for time_series in self.time_series_list:
266
+ result += f' -> {time_series.label} (weight: {time_series.aggregation_weight:.4f}; group: "{time_series.aggregation_group}")\n'
267
+ if self.group_weights:
268
+ result += f'Aggregation_Groups: {list(self.group_weights.keys())}\n'
269
+ else:
270
+ result += 'Warning!: no agg_types defined, i.e. all TS have weight 1 (or explicitly given weight)!\n'
271
+ return result
272
+
273
+
274
+ class AggregationParameters:
275
+ def __init__(
276
+ self,
277
+ hours_per_period: float,
278
+ nr_of_periods: int,
279
+ fix_storage_flows: bool,
280
+ aggregate_data_and_fix_non_binary_vars: bool,
281
+ percentage_of_period_freedom: float = 0,
282
+ penalty_of_period_freedom: float = 0,
283
+ time_series_for_high_peaks: List[TimeSeriesData] = None,
284
+ time_series_for_low_peaks: List[TimeSeriesData] = None,
285
+ ):
286
+ """
287
+ Initializes aggregation parameters for time series data
288
+
289
+ Parameters
290
+ ----------
291
+ hours_per_period : float
292
+ Duration of each period in hours.
293
+ nr_of_periods : int
294
+ Number of typical periods to use in the aggregation.
295
+ fix_storage_flows : bool
296
+ Whether to aggregate storage flows (load/unload); if other flows
297
+ are fixed, fixing storage flows is usually not required.
298
+ aggregate_data_and_fix_non_binary_vars : bool
299
+ Whether to aggregate all time series data, which allows to fix all time series variables (like flow_rate),
300
+ or only fix binary variables. If False non time_series data is changed!! If True, the mathematical Problem
301
+ is simplified even further.
302
+ percentage_of_period_freedom : float, optional
303
+ Specifies the maximum percentage (0–100) of binary values within each period
304
+ that can deviate as "free variables", chosen by the solver (default is 0).
305
+ This allows binary variables to be 'partly equated' between aggregated periods.
306
+ penalty_of_period_freedom : float, optional
307
+ The penalty associated with each "free variable"; defaults to 0. Added to Penalty
308
+ time_series_for_high_peaks : list of TimeSeriesData
309
+ List of time series to use for explicitly selecting periods with high values.
310
+ time_series_for_low_peaks : list of TimeSeriesData
311
+ List of time series to use for explicitly selecting periods with low values.
312
+ """
313
+ self.hours_per_period = hours_per_period
314
+ self.nr_of_periods = nr_of_periods
315
+ self.fix_storage_flows = fix_storage_flows
316
+ self.aggregate_data_and_fix_non_binary_vars = aggregate_data_and_fix_non_binary_vars
317
+ self.percentage_of_period_freedom = percentage_of_period_freedom
318
+ self.penalty_of_period_freedom = penalty_of_period_freedom
319
+ self.time_series_for_high_peaks: List[TimeSeriesData] = time_series_for_high_peaks or []
320
+ self.time_series_for_low_peaks: List[TimeSeriesData] = time_series_for_low_peaks or []
321
+
322
+ @property
323
+ def use_extreme_periods(self):
324
+ return self.time_series_for_high_peaks or self.time_series_for_low_peaks
325
+
326
+ @property
327
+ def labels_for_high_peaks(self) -> List[str]:
328
+ return [ts.label for ts in self.time_series_for_high_peaks]
329
+
330
+ @property
331
+ def labels_for_low_peaks(self) -> List[str]:
332
+ return [ts.label for ts in self.time_series_for_low_peaks]
333
+
334
+ @property
335
+ def use_low_peaks(self):
336
+ return self.time_series_for_low_peaks is not None
337
+
338
+
339
+ class AggregationModel(ElementModel):
340
+ """The AggregationModel holds equations and variables related to the Aggregation of a FLowSystem.
341
+ It creates Equations that equates indices of variables, and introduces penalties related to binary variables, that
342
+ escape the equation to their related binaries in other periods"""
343
+
344
+ def __init__(
345
+ self,
346
+ aggregation_parameters: AggregationParameters,
347
+ flow_system: FlowSystem,
348
+ aggregation_data: Aggregation,
349
+ components_to_clusterize: Optional[List[Component]],
350
+ ):
351
+ """
352
+ Modeling-Element for "index-equating"-equations
353
+ """
354
+ super().__init__(Element('Aggregation'), 'Model')
355
+ self.flow_system = flow_system
356
+ self.aggregation_parameters = aggregation_parameters
357
+ self.aggregation_data = aggregation_data
358
+ self.components_to_clusterize = components_to_clusterize
359
+
360
+ def do_modeling(self, system_model: SystemModel):
361
+ if not self.components_to_clusterize:
362
+ components = self.flow_system.components.values()
363
+ else:
364
+ components = [component for component in self.components_to_clusterize]
365
+
366
+ indices = self.aggregation_data.get_equation_indices(skip_first_index_of_period=True)
367
+
368
+ for component in components:
369
+ if isinstance(component, Storage) and not self.aggregation_parameters.fix_storage_flows:
370
+ continue # Fix Nothing in The Storage
371
+
372
+ all_variables_of_component = component.model.all_variables
373
+ if self.aggregation_parameters.aggregate_data_and_fix_non_binary_vars:
374
+ all_relevant_variables = [v for v in all_variables_of_component.values() if isinstance(v, VariableTS)]
375
+ else:
376
+ all_relevant_variables = [
377
+ v for v in all_variables_of_component.values() if isinstance(v, VariableTS) and v.is_binary
378
+ ]
379
+ for variable in all_relevant_variables:
380
+ self.equate_indices(variable, indices, system_model)
381
+
382
+ penalty = self.aggregation_parameters.penalty_of_period_freedom
383
+ if (self.aggregation_parameters.percentage_of_period_freedom > 0) and penalty != 0:
384
+ for label, variable in self.variables.items():
385
+ system_model.effect_collection_model.add_share_to_penalty(
386
+ f'Aggregation_penalty__{label}', variable, penalty
387
+ )
388
+
389
+ def equate_indices(
390
+ self, variable: Variable, indices: Tuple[np.ndarray, np.ndarray], system_model: SystemModel
391
+ ) -> Equation:
392
+ # Gleichung:
393
+ # eq1: x(p1,t) - x(p3,t) = 0 # wobei p1 und p3 im gleichen Cluster sind und t = 0..N_p
394
+ length = len(indices[0])
395
+ assert len(indices[0]) == len(indices[1]), 'The length of the indices must match!!'
396
+
397
+ eq = create_equation(f'Equate_indices_of_{variable.label}', self)
398
+ eq.add_summand(variable, 1, indices_of_variable=indices[0])
399
+ eq.add_summand(variable, -1, indices_of_variable=indices[1])
400
+
401
+ # Korrektur: (bisher nur für Binärvariablen:)
402
+ if variable.is_binary and self.aggregation_parameters.percentage_of_period_freedom > 0:
403
+ # correction-vars (so viele wie Indexe in eq:)
404
+ var_k1 = create_variable(f'Korr1_{variable.label}', self, length, is_binary=True)
405
+ var_k0 = create_variable(f'Korr0_{variable.label}', self, length, is_binary=True)
406
+ # equation extends ...
407
+ # --> On(p3) can be 0/1 independent of On(p1,t)!
408
+ # eq1: On(p1,t) - On(p3,t) + K1(p3,t) - K0(p3,t) = 0
409
+ # --> correction On(p3) can be:
410
+ # On(p1,t) = 1 -> On(p3) can be 0 -> K0=1 (,K1=0)
411
+ # On(p1,t) = 0 -> On(p3) can be 1 -> K1=1 (,K0=1)
412
+ eq.add_summand(var_k1, +1)
413
+ eq.add_summand(var_k0, -1)
414
+
415
+ # interlock var_k1 and var_K2:
416
+ # eq: var_k0(t)+var_k1(t) <= 1.1
417
+ eq_lock = create_equation(f'lock_K0andK1_{variable.label}', self, eq_type='ineq')
418
+ eq_lock.add_summand(var_k0, 1)
419
+ eq_lock.add_summand(var_k1, 1)
420
+ eq_lock.add_constant(1.1)
421
+
422
+ # Begrenzung der Korrektur-Anzahl:
423
+ # eq: sum(K) <= n_Corr_max
424
+ eq_max = create_equation(f'Nr_of_Corrections_{variable.label}', self, eq_type='ineq')
425
+ eq_max.add_summand(var_k1, 1, as_sum=True)
426
+ eq_max.add_summand(var_k0, 1, as_sum=True)
427
+ eq_max.add_constant(
428
+ round(self.aggregation_parameters.percentage_of_period_freedom / 100 * var_k1.length)
429
+ ) # Maximum
430
+ return eq