flixopt 3.0.1__py3-none-any.whl → 6.0.0rc7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. flixopt/__init__.py +57 -49
  2. flixopt/carrier.py +159 -0
  3. flixopt/clustering/__init__.py +51 -0
  4. flixopt/clustering/base.py +1746 -0
  5. flixopt/clustering/intercluster_helpers.py +201 -0
  6. flixopt/color_processing.py +372 -0
  7. flixopt/comparison.py +819 -0
  8. flixopt/components.py +848 -270
  9. flixopt/config.py +853 -496
  10. flixopt/core.py +111 -98
  11. flixopt/effects.py +294 -284
  12. flixopt/elements.py +484 -223
  13. flixopt/features.py +220 -118
  14. flixopt/flow_system.py +2026 -389
  15. flixopt/interface.py +504 -286
  16. flixopt/io.py +1718 -55
  17. flixopt/linear_converters.py +291 -230
  18. flixopt/modeling.py +304 -181
  19. flixopt/network_app.py +2 -1
  20. flixopt/optimization.py +788 -0
  21. flixopt/optimize_accessor.py +373 -0
  22. flixopt/plot_result.py +143 -0
  23. flixopt/plotting.py +1177 -1034
  24. flixopt/results.py +1331 -372
  25. flixopt/solvers.py +12 -4
  26. flixopt/statistics_accessor.py +2412 -0
  27. flixopt/stats_accessor.py +75 -0
  28. flixopt/structure.py +954 -120
  29. flixopt/topology_accessor.py +676 -0
  30. flixopt/transform_accessor.py +2277 -0
  31. flixopt/types.py +120 -0
  32. flixopt-6.0.0rc7.dist-info/METADATA +290 -0
  33. flixopt-6.0.0rc7.dist-info/RECORD +36 -0
  34. {flixopt-3.0.1.dist-info → flixopt-6.0.0rc7.dist-info}/WHEEL +1 -1
  35. flixopt/aggregation.py +0 -382
  36. flixopt/calculation.py +0 -672
  37. flixopt/commons.py +0 -51
  38. flixopt/utils.py +0 -86
  39. flixopt-3.0.1.dist-info/METADATA +0 -209
  40. flixopt-3.0.1.dist-info/RECORD +0 -26
  41. {flixopt-3.0.1.dist-info → flixopt-6.0.0rc7.dist-info}/licenses/LICENSE +0 -0
  42. {flixopt-3.0.1.dist-info → flixopt-6.0.0rc7.dist-info}/top_level.txt +0 -0
flixopt/aggregation.py DELETED
@@ -1,382 +0,0 @@
1
- """
2
- This module contains the Aggregation functionality for the flixopt framework.
3
- Through this, aggregating TimeSeriesData is possible.
4
- """
5
-
6
- from __future__ import annotations
7
-
8
- import copy
9
- import logging
10
- import pathlib
11
- import timeit
12
- from typing import TYPE_CHECKING
13
-
14
- import numpy as np
15
-
16
- try:
17
- import tsam.timeseriesaggregation as tsam
18
-
19
- TSAM_AVAILABLE = True
20
- except ImportError:
21
- TSAM_AVAILABLE = False
22
-
23
- from .components import Storage
24
- from .structure import (
25
- FlowSystemModel,
26
- Submodel,
27
- )
28
-
29
- if TYPE_CHECKING:
30
- import linopy
31
- import pandas as pd
32
- import plotly.graph_objects as go
33
-
34
- from .core import Scalar, TimeSeriesData
35
- from .elements import Component
36
- from .flow_system import FlowSystem
37
-
38
- logger = logging.getLogger('flixopt')
39
-
40
-
41
- class Aggregation:
42
- """
43
- aggregation organizing class
44
- """
45
-
46
- def __init__(
47
- self,
48
- original_data: pd.DataFrame,
49
- hours_per_time_step: Scalar,
50
- hours_per_period: Scalar,
51
- nr_of_periods: int = 8,
52
- weights: dict[str, float] | None = None,
53
- time_series_for_high_peaks: list[str] | None = None,
54
- time_series_for_low_peaks: list[str] | None = None,
55
- ):
56
- """
57
- Args:
58
- original_data: The original data to aggregate
59
- hours_per_time_step: The duration of each timestep in hours.
60
- hours_per_period: The duration of each period in hours.
61
- nr_of_periods: The number of typical periods to use in the aggregation.
62
- weights: The weights for aggregation. If None, all time series are equally weighted.
63
- time_series_for_high_peaks: List of time series to use for explicitly selecting periods with high values.
64
- time_series_for_low_peaks: List of time series to use for explicitly selecting periods with low values.
65
- """
66
- if not TSAM_AVAILABLE:
67
- raise ImportError(
68
- "The 'tsam' package is required for clustering functionality. Install it with 'pip install tsam'."
69
- )
70
- self.original_data = copy.deepcopy(original_data)
71
- self.hours_per_time_step = hours_per_time_step
72
- self.hours_per_period = hours_per_period
73
- self.nr_of_periods = nr_of_periods
74
- self.nr_of_time_steps = len(self.original_data.index)
75
- self.weights = weights or {}
76
- self.time_series_for_high_peaks = time_series_for_high_peaks or []
77
- self.time_series_for_low_peaks = time_series_for_low_peaks or []
78
-
79
- self.aggregated_data: pd.DataFrame | None = None
80
- self.clustering_duration_seconds = None
81
- self.tsam: tsam.TimeSeriesAggregation | None = None
82
-
83
- def cluster(self) -> None:
84
- """
85
- Durchführung der Zeitreihenaggregation
86
- """
87
- start_time = timeit.default_timer()
88
- # Erstellen des aggregation objects
89
- self.tsam = tsam.TimeSeriesAggregation(
90
- self.original_data,
91
- noTypicalPeriods=self.nr_of_periods,
92
- hoursPerPeriod=self.hours_per_period,
93
- resolution=self.hours_per_time_step,
94
- clusterMethod='k_means',
95
- extremePeriodMethod='new_cluster_center'
96
- if self.use_extreme_periods
97
- else 'None', # Wenn Extremperioden eingebunden werden sollen, nutze die Methode 'new_cluster_center' aus tsam
98
- weightDict={name: weight for name, weight in self.weights.items() if name in self.original_data.columns},
99
- addPeakMax=self.time_series_for_high_peaks,
100
- addPeakMin=self.time_series_for_low_peaks,
101
- )
102
-
103
- self.tsam.createTypicalPeriods() # Ausführen der Aggregation/Clustering
104
- self.aggregated_data = self.tsam.predictOriginalData()
105
-
106
- self.clustering_duration_seconds = timeit.default_timer() - start_time # Zeit messen:
107
- logger.info(self.describe_clusters())
108
-
109
- def describe_clusters(self) -> str:
110
- description = {}
111
- for cluster in self.get_cluster_indices().keys():
112
- description[cluster] = [
113
- str(indexVector[0]) + '...' + str(indexVector[-1])
114
- for indexVector in self.get_cluster_indices()[cluster]
115
- ]
116
-
117
- if self.use_extreme_periods:
118
- # Zeitreihe rauslöschen:
119
- extreme_periods = self.tsam.extremePeriods.copy()
120
- for key in extreme_periods:
121
- del extreme_periods[key]['profile']
122
- else:
123
- extreme_periods = {}
124
-
125
- return (
126
- f'{"":#^80}\n'
127
- f'{" Clustering ":#^80}\n'
128
- f'periods_order:\n'
129
- f'{self.tsam.clusterOrder}\n'
130
- f'clusterPeriodNoOccur:\n'
131
- f'{self.tsam.clusterPeriodNoOccur}\n'
132
- f'index_vectors_of_clusters:\n'
133
- f'{description}\n'
134
- f'{"":#^80}\n'
135
- f'extreme_periods:\n'
136
- f'{extreme_periods}\n'
137
- f'{"":#^80}'
138
- )
139
-
140
- @property
141
- def use_extreme_periods(self):
142
- return self.time_series_for_high_peaks or self.time_series_for_low_peaks
143
-
144
- def plot(self, colormap: str = 'viridis', show: bool = True, save: pathlib.Path | None = None) -> go.Figure:
145
- from . import plotting
146
-
147
- df_org = self.original_data.copy().rename(
148
- columns={col: f'Original - {col}' for col in self.original_data.columns}
149
- )
150
- df_agg = self.aggregated_data.copy().rename(
151
- columns={col: f'Aggregated - {col}' for col in self.aggregated_data.columns}
152
- )
153
- fig = plotting.with_plotly(df_org, 'line', colors=colormap)
154
- for trace in fig.data:
155
- trace.update(dict(line=dict(dash='dash')))
156
- fig = plotting.with_plotly(df_agg, 'line', colors=colormap, fig=fig)
157
-
158
- fig.update_layout(
159
- title='Original vs Aggregated Data (original = ---)', xaxis_title='Index', yaxis_title='Value'
160
- )
161
-
162
- plotting.export_figure(
163
- figure_like=fig,
164
- default_path=pathlib.Path('aggregated data.html'),
165
- default_filetype='.html',
166
- user_path=save,
167
- show=show,
168
- save=save is not None,
169
- )
170
-
171
- return fig
172
-
173
- def get_cluster_indices(self) -> dict[str, list[np.ndarray]]:
174
- """
175
- Generates a dictionary that maps each cluster to a list of index vectors representing the time steps
176
- assigned to that cluster for each period.
177
-
178
- Returns:
179
- dict: {cluster_0: [index_vector_3, index_vector_7, ...],
180
- cluster_1: [index_vector_1],
181
- ...}
182
- """
183
- clusters = self.tsam.clusterPeriodNoOccur.keys()
184
- index_vectors = {cluster: [] for cluster in clusters}
185
-
186
- period_length = len(self.tsam.stepIdx)
187
- total_steps = len(self.tsam.timeSeries)
188
-
189
- for period, cluster_id in enumerate(self.tsam.clusterOrder):
190
- start_idx = period * period_length
191
- end_idx = np.min([start_idx + period_length, total_steps])
192
- index_vectors[cluster_id].append(np.arange(start_idx, end_idx))
193
-
194
- return index_vectors
195
-
196
- def get_equation_indices(self, skip_first_index_of_period: bool = True) -> tuple[np.ndarray, np.ndarray]:
197
- """
198
- Generates pairs of indices for the equations by comparing index vectors of the same cluster.
199
- If `skip_first_index_of_period` is True, the first index of each period is skipped.
200
-
201
- Args:
202
- skip_first_index_of_period (bool): Whether to include or skip the first index of each period.
203
-
204
- Returns:
205
- tuple[np.ndarray, np.ndarray]: Two arrays of indices.
206
- """
207
- idx_var1 = []
208
- idx_var2 = []
209
-
210
- # Iterate through cluster index vectors
211
- for index_vectors in self.get_cluster_indices().values():
212
- if len(index_vectors) <= 1: # Only proceed if cluster has more than one period
213
- continue
214
-
215
- # Process the first vector, optionally skip first index
216
- first_vector = index_vectors[0][1:] if skip_first_index_of_period else index_vectors[0]
217
-
218
- # Compare first vector to others in the cluster
219
- for other_vector in index_vectors[1:]:
220
- if skip_first_index_of_period:
221
- other_vector = other_vector[1:]
222
-
223
- # Compare elements up to the minimum length of both vectors
224
- min_len = min(len(first_vector), len(other_vector))
225
- idx_var1.extend(first_vector[:min_len])
226
- idx_var2.extend(other_vector[:min_len])
227
-
228
- # Convert lists to numpy arrays
229
- return np.array(idx_var1), np.array(idx_var2)
230
-
231
-
232
- class AggregationParameters:
233
- def __init__(
234
- self,
235
- hours_per_period: float,
236
- nr_of_periods: int,
237
- fix_storage_flows: bool,
238
- aggregate_data_and_fix_non_binary_vars: bool,
239
- percentage_of_period_freedom: float = 0,
240
- penalty_of_period_freedom: float = 0,
241
- time_series_for_high_peaks: list[TimeSeriesData] | None = None,
242
- time_series_for_low_peaks: list[TimeSeriesData] | None = None,
243
- ):
244
- """
245
- Initializes aggregation parameters for time series data
246
-
247
- Args:
248
- hours_per_period: Duration of each period in hours.
249
- nr_of_periods: Number of typical periods to use in the aggregation.
250
- fix_storage_flows: Whether to aggregate storage flows (load/unload); if other flows
251
- are fixed, fixing storage flows is usually not required.
252
- aggregate_data_and_fix_non_binary_vars: Whether to aggregate all time series data, which allows to fix all time series variables (like flow_rate),
253
- or only fix binary variables. If False non time_series data is changed!! If True, the mathematical Problem
254
- is simplified even further.
255
- percentage_of_period_freedom: Specifies the maximum percentage (0–100) of binary values within each period
256
- that can deviate as "free variables", chosen by the solver (default is 0).
257
- This allows binary variables to be 'partly equated' between aggregated periods.
258
- penalty_of_period_freedom: The penalty associated with each "free variable"; defaults to 0. Added to Penalty
259
- time_series_for_high_peaks: List of TimeSeriesData to use for explicitly selecting periods with high values.
260
- time_series_for_low_peaks: List of TimeSeriesData to use for explicitly selecting periods with low values.
261
- """
262
- self.hours_per_period = hours_per_period
263
- self.nr_of_periods = nr_of_periods
264
- self.fix_storage_flows = fix_storage_flows
265
- self.aggregate_data_and_fix_non_binary_vars = aggregate_data_and_fix_non_binary_vars
266
- self.percentage_of_period_freedom = percentage_of_period_freedom
267
- self.penalty_of_period_freedom = penalty_of_period_freedom
268
- self.time_series_for_high_peaks: list[TimeSeriesData] = time_series_for_high_peaks or []
269
- self.time_series_for_low_peaks: list[TimeSeriesData] = time_series_for_low_peaks or []
270
-
271
- @property
272
- def use_extreme_periods(self):
273
- return self.time_series_for_high_peaks or self.time_series_for_low_peaks
274
-
275
- @property
276
- def labels_for_high_peaks(self) -> list[str]:
277
- return [ts.name for ts in self.time_series_for_high_peaks]
278
-
279
- @property
280
- def labels_for_low_peaks(self) -> list[str]:
281
- return [ts.name for ts in self.time_series_for_low_peaks]
282
-
283
- @property
284
- def use_low_peaks(self) -> bool:
285
- return bool(self.time_series_for_low_peaks)
286
-
287
-
288
- class AggregationModel(Submodel):
289
- """The AggregationModel holds equations and variables related to the Aggregation of a FlowSystem.
290
- It creates Equations that equates indices of variables, and introduces penalties related to binary variables, that
291
- escape the equation to their related binaries in other periods"""
292
-
293
- def __init__(
294
- self,
295
- model: FlowSystemModel,
296
- aggregation_parameters: AggregationParameters,
297
- flow_system: FlowSystem,
298
- aggregation_data: Aggregation,
299
- components_to_clusterize: list[Component] | None,
300
- ):
301
- """
302
- Modeling-Element for "index-equating"-equations
303
- """
304
- super().__init__(model, label_of_element='Aggregation', label_of_model='Aggregation')
305
- self.flow_system = flow_system
306
- self.aggregation_parameters = aggregation_parameters
307
- self.aggregation_data = aggregation_data
308
- self.components_to_clusterize = components_to_clusterize
309
-
310
- def do_modeling(self):
311
- if not self.components_to_clusterize:
312
- components = self.flow_system.components.values()
313
- else:
314
- components = [component for component in self.components_to_clusterize]
315
-
316
- indices = self.aggregation_data.get_equation_indices(skip_first_index_of_period=True)
317
-
318
- time_variables: set[str] = {
319
- name for name in self._model.variables if 'time' in self._model.variables[name].dims
320
- }
321
- binary_variables: set[str] = set(self._model.variables.binaries)
322
- binary_time_variables: set[str] = time_variables & binary_variables
323
-
324
- for component in components:
325
- if isinstance(component, Storage) and not self.aggregation_parameters.fix_storage_flows:
326
- continue # Fix Nothing in The Storage
327
-
328
- all_variables_of_component = set(component.submodel.variables)
329
-
330
- if self.aggregation_parameters.aggregate_data_and_fix_non_binary_vars:
331
- relevant_variables = component.submodel.variables[all_variables_of_component & time_variables]
332
- else:
333
- relevant_variables = component.submodel.variables[all_variables_of_component & binary_time_variables]
334
- for variable in relevant_variables:
335
- self._equate_indices(component.submodel.variables[variable], indices)
336
-
337
- penalty = self.aggregation_parameters.penalty_of_period_freedom
338
- if (self.aggregation_parameters.percentage_of_period_freedom > 0) and penalty != 0:
339
- for variable in self.variables_direct.values():
340
- self._model.effects.add_share_to_penalty('Aggregation', variable * penalty)
341
-
342
- def _equate_indices(self, variable: linopy.Variable, indices: tuple[np.ndarray, np.ndarray]) -> None:
343
- assert len(indices[0]) == len(indices[1]), 'The length of the indices must match!!'
344
- length = len(indices[0])
345
-
346
- # Gleichung:
347
- # eq1: x(p1,t) - x(p3,t) = 0 # wobei p1 und p3 im gleichen Cluster sind und t = 0..N_p
348
- con = self.add_constraints(
349
- variable.isel(time=indices[0]) - variable.isel(time=indices[1]) == 0,
350
- short_name=f'equate_indices|{variable.name}',
351
- )
352
-
353
- # Korrektur: (bisher nur für Binärvariablen:)
354
- if (
355
- variable.name in self._model.variables.binaries
356
- and self.aggregation_parameters.percentage_of_period_freedom > 0
357
- ):
358
- sel = variable.isel(time=indices[0])
359
- coords = {d: sel.indexes[d] for d in sel.dims}
360
- var_k1 = self.add_variables(binary=True, coords=coords, short_name=f'correction1|{variable.name}')
361
-
362
- var_k0 = self.add_variables(binary=True, coords=coords, short_name=f'correction0|{variable.name}')
363
-
364
- # equation extends ...
365
- # --> On(p3) can be 0/1 independent of On(p1,t)!
366
- # eq1: On(p1,t) - On(p3,t) + K1(p3,t) - K0(p3,t) = 0
367
- # --> correction On(p3) can be:
368
- # On(p1,t) = 1 -> On(p3) can be 0 -> K0=1 (,K1=0)
369
- # On(p1,t) = 0 -> On(p3) can be 1 -> K1=1 (,K0=1)
370
- con.lhs += 1 * var_k1 - 1 * var_k0
371
-
372
- # interlock var_k1 and var_K2:
373
- # eq: var_k0(t)+var_k1(t) <= 1
374
- self.add_constraints(var_k0 + var_k1 <= 1, short_name=f'lock_k0_and_k1|{variable.name}')
375
-
376
- # Begrenzung der Korrektur-Anzahl:
377
- # eq: sum(K) <= n_Corr_max
378
- limit = int(np.floor(self.aggregation_parameters.percentage_of_period_freedom / 100 * length))
379
- self.add_constraints(
380
- var_k0.sum(dim='time') + var_k1.sum(dim='time') <= limit,
381
- short_name=f'limit_corrections|{variable.name}',
382
- )