flixopt 2.2.0rc2__py3-none-any.whl → 3.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. flixopt/__init__.py +33 -4
  2. flixopt/aggregation.py +60 -80
  3. flixopt/calculation.py +403 -182
  4. flixopt/commons.py +1 -10
  5. flixopt/components.py +939 -448
  6. flixopt/config.py +553 -191
  7. flixopt/core.py +513 -846
  8. flixopt/effects.py +644 -178
  9. flixopt/elements.py +610 -355
  10. flixopt/features.py +394 -966
  11. flixopt/flow_system.py +736 -219
  12. flixopt/interface.py +1104 -302
  13. flixopt/io.py +103 -79
  14. flixopt/linear_converters.py +387 -95
  15. flixopt/modeling.py +757 -0
  16. flixopt/network_app.py +73 -39
  17. flixopt/plotting.py +294 -138
  18. flixopt/results.py +1254 -300
  19. flixopt/solvers.py +25 -21
  20. flixopt/structure.py +938 -396
  21. flixopt/utils.py +36 -12
  22. flixopt-3.0.1.dist-info/METADATA +209 -0
  23. flixopt-3.0.1.dist-info/RECORD +26 -0
  24. flixopt-3.0.1.dist-info/top_level.txt +1 -0
  25. docs/examples/00-Minimal Example.md +0 -5
  26. docs/examples/01-Basic Example.md +0 -5
  27. docs/examples/02-Complex Example.md +0 -10
  28. docs/examples/03-Calculation Modes.md +0 -5
  29. docs/examples/index.md +0 -5
  30. docs/faq/contribute.md +0 -61
  31. docs/faq/index.md +0 -3
  32. docs/images/architecture_flixOpt-pre2.0.0.png +0 -0
  33. docs/images/architecture_flixOpt.png +0 -0
  34. docs/images/flixopt-icon.svg +0 -1
  35. docs/javascripts/mathjax.js +0 -18
  36. docs/user-guide/Mathematical Notation/Bus.md +0 -33
  37. docs/user-guide/Mathematical Notation/Effects, Penalty & Objective.md +0 -132
  38. docs/user-guide/Mathematical Notation/Flow.md +0 -26
  39. docs/user-guide/Mathematical Notation/LinearConverter.md +0 -21
  40. docs/user-guide/Mathematical Notation/Piecewise.md +0 -49
  41. docs/user-guide/Mathematical Notation/Storage.md +0 -44
  42. docs/user-guide/Mathematical Notation/index.md +0 -22
  43. docs/user-guide/Mathematical Notation/others.md +0 -3
  44. docs/user-guide/index.md +0 -124
  45. flixopt/config.yaml +0 -10
  46. flixopt-2.2.0rc2.dist-info/METADATA +0 -167
  47. flixopt-2.2.0rc2.dist-info/RECORD +0 -54
  48. flixopt-2.2.0rc2.dist-info/top_level.txt +0 -5
  49. pics/architecture_flixOpt-pre2.0.0.png +0 -0
  50. pics/architecture_flixOpt.png +0 -0
  51. pics/flixOpt_plotting.jpg +0 -0
  52. pics/flixopt-icon.svg +0 -1
  53. pics/pics.pptx +0 -0
  54. scripts/extract_release_notes.py +0 -45
  55. scripts/gen_ref_pages.py +0 -54
  56. tests/ressources/Zeitreihen2020.csv +0 -35137
  57. {flixopt-2.2.0rc2.dist-info → flixopt-3.0.1.dist-info}/WHEEL +0 -0
  58. {flixopt-2.2.0rc2.dist-info → flixopt-3.0.1.dist-info}/licenses/LICENSE +0 -0
flixopt/__init__.py CHANGED
@@ -2,9 +2,14 @@
2
2
  This module bundles all common functionality of flixopt and sets up the logging
3
3
  """
4
4
 
5
- from importlib.metadata import version
5
+ import warnings
6
+ from importlib.metadata import PackageNotFoundError, version
6
7
 
7
- __version__ = version('flixopt')
8
+ try:
9
+ __version__ = version('flixopt')
10
+ except PackageNotFoundError:
11
+ # Package is not installed (development mode without editable install)
12
+ __version__ = '0.0.0.dev0'
8
13
 
9
14
  from .commons import (
10
15
  CONFIG,
@@ -22,7 +27,6 @@ from .commons import (
22
27
  Piecewise,
23
28
  PiecewiseConversion,
24
29
  PiecewiseEffects,
25
- PiecewiseEffectsPerFlowHour,
26
30
  SegmentedCalculation,
27
31
  Sink,
28
32
  Source,
@@ -37,4 +41,29 @@ from .commons import (
37
41
  solvers,
38
42
  )
39
43
 
40
- CONFIG.load_config()
44
+ # === Runtime warning suppression for third-party libraries ===
45
+ # These warnings are from dependencies and cannot be fixed by end users.
46
+ # They are suppressed at runtime to provide a cleaner user experience.
47
+ # These filters match the test configuration in pyproject.toml for consistency.
48
+
49
+ # tsam: Time series aggregation library
50
+ # - UserWarning: Informational message about minimal value constraints during clustering.
51
+ warnings.filterwarnings('ignore', category=UserWarning, message='.*minimal value.*exceeds.*', module='tsam')
52
+ # TODO: Might be able to fix it in flixopt?
53
+
54
+ # linopy: Linear optimization library
55
+ # - UserWarning: Coordinate mismatch warnings that don't affect functionality and are expected.
56
+ warnings.filterwarnings(
57
+ 'ignore', category=UserWarning, message='Coordinates across variables not equal', module='linopy'
58
+ )
59
+ # - FutureWarning: join parameter default will change in future versions
60
+ warnings.filterwarnings(
61
+ 'ignore',
62
+ category=FutureWarning,
63
+ message="In a future version of xarray the default value for join will change from join='outer' to join='exact'",
64
+ module='linopy',
65
+ )
66
+
67
+ # numpy: Core numerical library
68
+ # - RuntimeWarning: Binary incompatibility warnings from compiled extensions (safe to ignore). numpy 1->2
69
+ warnings.filterwarnings('ignore', category=RuntimeWarning, message='numpy\\.ndarray size changed')
flixopt/aggregation.py CHANGED
@@ -3,16 +3,15 @@ This module contains the Aggregation functionality for the flixopt framework.
3
3
  Through this, aggregating TimeSeriesData is possible.
4
4
  """
5
5
 
6
+ from __future__ import annotations
7
+
6
8
  import copy
7
9
  import logging
8
10
  import pathlib
9
11
  import timeit
10
- import warnings
11
- from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, Union
12
+ from typing import TYPE_CHECKING
12
13
 
13
- import linopy
14
14
  import numpy as np
15
- import pandas as pd
16
15
 
17
16
  try:
18
17
  import tsam.timeseriesaggregation as tsam
@@ -22,18 +21,20 @@ except ImportError:
22
21
  TSAM_AVAILABLE = False
23
22
 
24
23
  from .components import Storage
25
- from .core import Scalar, TimeSeriesData
26
- from .elements import Component
27
- from .flow_system import FlowSystem
28
24
  from .structure import (
29
- Element,
30
- Model,
31
- SystemModel,
25
+ FlowSystemModel,
26
+ Submodel,
32
27
  )
33
28
 
34
29
  if TYPE_CHECKING:
30
+ import linopy
31
+ import pandas as pd
35
32
  import plotly.graph_objects as go
36
33
 
34
+ from .core import Scalar, TimeSeriesData
35
+ from .elements import Component
36
+ from .flow_system import FlowSystem
37
+
37
38
  logger = logging.getLogger('flixopt')
38
39
 
39
40
 
@@ -48,9 +49,9 @@ class Aggregation:
48
49
  hours_per_time_step: Scalar,
49
50
  hours_per_period: Scalar,
50
51
  nr_of_periods: int = 8,
51
- weights: Dict[str, float] = None,
52
- time_series_for_high_peaks: List[str] = None,
53
- time_series_for_low_peaks: List[str] = None,
52
+ weights: dict[str, float] | None = None,
53
+ time_series_for_high_peaks: list[str] | None = None,
54
+ time_series_for_low_peaks: list[str] | None = None,
54
55
  ):
55
56
  """
56
57
  Args:
@@ -75,9 +76,9 @@ class Aggregation:
75
76
  self.time_series_for_high_peaks = time_series_for_high_peaks or []
76
77
  self.time_series_for_low_peaks = time_series_for_low_peaks or []
77
78
 
78
- self.aggregated_data: Optional[pd.DataFrame] = None
79
+ self.aggregated_data: pd.DataFrame | None = None
79
80
  self.clustering_duration_seconds = None
80
- self.tsam: Optional[tsam.TimeSeriesAggregation] = None
81
+ self.tsam: tsam.TimeSeriesAggregation | None = None
81
82
 
82
83
  def cluster(self) -> None:
83
84
  """
@@ -140,7 +141,7 @@ class Aggregation:
140
141
  def use_extreme_periods(self):
141
142
  return self.time_series_for_high_peaks or self.time_series_for_low_peaks
142
143
 
143
- def plot(self, colormap: str = 'viridis', show: bool = True, save: Optional[pathlib.Path] = None) -> 'go.Figure':
144
+ def plot(self, colormap: str = 'viridis', show: bool = True, save: pathlib.Path | None = None) -> go.Figure:
144
145
  from . import plotting
145
146
 
146
147
  df_org = self.original_data.copy().rename(
@@ -162,14 +163,14 @@ class Aggregation:
162
163
  figure_like=fig,
163
164
  default_path=pathlib.Path('aggregated data.html'),
164
165
  default_filetype='.html',
165
- user_path=None if isinstance(save, bool) else pathlib.Path(save),
166
+ user_path=save,
166
167
  show=show,
167
- save=True if save else False,
168
+ save=save is not None,
168
169
  )
169
170
 
170
171
  return fig
171
172
 
172
- def get_cluster_indices(self) -> Dict[str, List[np.ndarray]]:
173
+ def get_cluster_indices(self) -> dict[str, list[np.ndarray]]:
173
174
  """
174
175
  Generates a dictionary that maps each cluster to a list of index vectors representing the time steps
175
176
  assigned to that cluster for each period.
@@ -192,7 +193,7 @@ class Aggregation:
192
193
 
193
194
  return index_vectors
194
195
 
195
- def get_equation_indices(self, skip_first_index_of_period: bool = True) -> Tuple[np.ndarray, np.ndarray]:
196
+ def get_equation_indices(self, skip_first_index_of_period: bool = True) -> tuple[np.ndarray, np.ndarray]:
196
197
  """
197
198
  Generates pairs of indices for the equations by comparing index vectors of the same cluster.
198
199
  If `skip_first_index_of_period` is True, the first index of each period is skipped.
@@ -201,7 +202,7 @@ class Aggregation:
201
202
  skip_first_index_of_period (bool): Whether to include or skip the first index of each period.
202
203
 
203
204
  Returns:
204
- Tuple[np.ndarray, np.ndarray]: Two arrays of indices.
205
+ tuple[np.ndarray, np.ndarray]: Two arrays of indices.
205
206
  """
206
207
  idx_var1 = []
207
208
  idx_var2 = []
@@ -237,8 +238,8 @@ class AggregationParameters:
237
238
  aggregate_data_and_fix_non_binary_vars: bool,
238
239
  percentage_of_period_freedom: float = 0,
239
240
  penalty_of_period_freedom: float = 0,
240
- time_series_for_high_peaks: List[TimeSeriesData] = None,
241
- time_series_for_low_peaks: List[TimeSeriesData] = None,
241
+ time_series_for_high_peaks: list[TimeSeriesData] | None = None,
242
+ time_series_for_low_peaks: list[TimeSeriesData] | None = None,
242
243
  ):
243
244
  """
244
245
  Initializes aggregation parameters for time series data
@@ -264,43 +265,43 @@ class AggregationParameters:
264
265
  self.aggregate_data_and_fix_non_binary_vars = aggregate_data_and_fix_non_binary_vars
265
266
  self.percentage_of_period_freedom = percentage_of_period_freedom
266
267
  self.penalty_of_period_freedom = penalty_of_period_freedom
267
- self.time_series_for_high_peaks: List[TimeSeriesData] = time_series_for_high_peaks or []
268
- self.time_series_for_low_peaks: List[TimeSeriesData] = time_series_for_low_peaks or []
268
+ self.time_series_for_high_peaks: list[TimeSeriesData] = time_series_for_high_peaks or []
269
+ self.time_series_for_low_peaks: list[TimeSeriesData] = time_series_for_low_peaks or []
269
270
 
270
271
  @property
271
272
  def use_extreme_periods(self):
272
273
  return self.time_series_for_high_peaks or self.time_series_for_low_peaks
273
274
 
274
275
  @property
275
- def labels_for_high_peaks(self) -> List[str]:
276
- return [ts.label for ts in self.time_series_for_high_peaks]
276
+ def labels_for_high_peaks(self) -> list[str]:
277
+ return [ts.name for ts in self.time_series_for_high_peaks]
277
278
 
278
279
  @property
279
- def labels_for_low_peaks(self) -> List[str]:
280
- return [ts.label for ts in self.time_series_for_low_peaks]
280
+ def labels_for_low_peaks(self) -> list[str]:
281
+ return [ts.name for ts in self.time_series_for_low_peaks]
281
282
 
282
283
  @property
283
- def use_low_peaks(self):
284
- return self.time_series_for_low_peaks is not None
284
+ def use_low_peaks(self) -> bool:
285
+ return bool(self.time_series_for_low_peaks)
285
286
 
286
287
 
287
- class AggregationModel(Model):
288
- """The AggregationModel holds equations and variables related to the Aggregation of a FLowSystem.
288
+ class AggregationModel(Submodel):
289
+ """The AggregationModel holds equations and variables related to the Aggregation of a FlowSystem.
289
290
  It creates Equations that equates indices of variables, and introduces penalties related to binary variables, that
290
291
  escape the equation to their related binaries in other periods"""
291
292
 
292
293
  def __init__(
293
294
  self,
294
- model: SystemModel,
295
+ model: FlowSystemModel,
295
296
  aggregation_parameters: AggregationParameters,
296
297
  flow_system: FlowSystem,
297
298
  aggregation_data: Aggregation,
298
- components_to_clusterize: Optional[List[Component]],
299
+ components_to_clusterize: list[Component] | None,
299
300
  ):
300
301
  """
301
302
  Modeling-Element for "index-equating"-equations
302
303
  """
303
- super().__init__(model, label_of_element='Aggregation', label_full='Aggregation')
304
+ super().__init__(model, label_of_element='Aggregation', label_of_model='Aggregation')
304
305
  self.flow_system = flow_system
305
306
  self.aggregation_parameters = aggregation_parameters
306
307
  self.aggregation_data = aggregation_data
@@ -314,40 +315,39 @@ class AggregationModel(Model):
314
315
 
315
316
  indices = self.aggregation_data.get_equation_indices(skip_first_index_of_period=True)
316
317
 
317
- time_variables: Set[str] = {k for k, v in self._model.variables.data.items() if 'time' in v.indexes}
318
- binary_variables: Set[str] = {k for k, v in self._model.variables.data.items() if k in self._model.binaries}
319
- binary_time_variables: Set[str] = time_variables & binary_variables
318
+ time_variables: set[str] = {
319
+ name for name in self._model.variables if 'time' in self._model.variables[name].dims
320
+ }
321
+ binary_variables: set[str] = set(self._model.variables.binaries)
322
+ binary_time_variables: set[str] = time_variables & binary_variables
320
323
 
321
324
  for component in components:
322
325
  if isinstance(component, Storage) and not self.aggregation_parameters.fix_storage_flows:
323
326
  continue # Fix Nothing in The Storage
324
327
 
325
- all_variables_of_component = set(component.model.variables)
328
+ all_variables_of_component = set(component.submodel.variables)
326
329
 
327
330
  if self.aggregation_parameters.aggregate_data_and_fix_non_binary_vars:
328
- relevant_variables = component.model.variables[all_variables_of_component & time_variables]
331
+ relevant_variables = component.submodel.variables[all_variables_of_component & time_variables]
329
332
  else:
330
- relevant_variables = component.model.variables[all_variables_of_component & binary_time_variables]
333
+ relevant_variables = component.submodel.variables[all_variables_of_component & binary_time_variables]
331
334
  for variable in relevant_variables:
332
- self._equate_indices(component.model.variables[variable], indices)
335
+ self._equate_indices(component.submodel.variables[variable], indices)
333
336
 
334
337
  penalty = self.aggregation_parameters.penalty_of_period_freedom
335
338
  if (self.aggregation_parameters.percentage_of_period_freedom > 0) and penalty != 0:
336
339
  for variable in self.variables_direct.values():
337
340
  self._model.effects.add_share_to_penalty('Aggregation', variable * penalty)
338
341
 
339
- def _equate_indices(self, variable: linopy.Variable, indices: Tuple[np.ndarray, np.ndarray]) -> None:
342
+ def _equate_indices(self, variable: linopy.Variable, indices: tuple[np.ndarray, np.ndarray]) -> None:
340
343
  assert len(indices[0]) == len(indices[1]), 'The length of the indices must match!!'
341
344
  length = len(indices[0])
342
345
 
343
346
  # Gleichung:
344
347
  # eq1: x(p1,t) - x(p3,t) = 0 # wobei p1 und p3 im gleichen Cluster sind und t = 0..N_p
345
- con = self.add(
346
- self._model.add_constraints(
347
- variable.isel(time=indices[0]) - variable.isel(time=indices[1]) == 0,
348
- name=f'{self.label_full}|equate_indices|{variable.name}',
349
- ),
350
- f'equate_indices|{variable.name}',
348
+ con = self.add_constraints(
349
+ variable.isel(time=indices[0]) - variable.isel(time=indices[1]) == 0,
350
+ short_name=f'equate_indices|{variable.name}',
351
351
  )
352
352
 
353
353
  # Korrektur: (bisher nur für Binärvariablen:)
@@ -355,23 +355,11 @@ class AggregationModel(Model):
355
355
  variable.name in self._model.variables.binaries
356
356
  and self.aggregation_parameters.percentage_of_period_freedom > 0
357
357
  ):
358
- var_k1 = self.add(
359
- self._model.add_variables(
360
- binary=True,
361
- coords={'time': variable.isel(time=indices[0]).indexes['time']},
362
- name=f'{self.label_full}|correction1|{variable.name}',
363
- ),
364
- f'correction1|{variable.name}',
365
- )
358
+ sel = variable.isel(time=indices[0])
359
+ coords = {d: sel.indexes[d] for d in sel.dims}
360
+ var_k1 = self.add_variables(binary=True, coords=coords, short_name=f'correction1|{variable.name}')
366
361
 
367
- var_k0 = self.add(
368
- self._model.add_variables(
369
- binary=True,
370
- coords={'time': variable.isel(time=indices[0]).indexes['time']},
371
- name=f'{self.label_full}|correction0|{variable.name}',
372
- ),
373
- f'correction0|{variable.name}',
374
- )
362
+ var_k0 = self.add_variables(binary=True, coords=coords, short_name=f'correction0|{variable.name}')
375
363
 
376
364
  # equation extends ...
377
365
  # --> On(p3) can be 0/1 independent of On(p1,t)!
@@ -382,21 +370,13 @@ class AggregationModel(Model):
382
370
  con.lhs += 1 * var_k1 - 1 * var_k0
383
371
 
384
372
  # interlock var_k1 and var_K2:
385
- # eq: var_k0(t)+var_k1(t) <= 1.1
386
- self.add(
387
- self._model.add_constraints(
388
- var_k0 + var_k1 <= 1.1, name=f'{self.label_full}|lock_k0_and_k1|{variable.name}'
389
- ),
390
- f'lock_k0_and_k1|{variable.name}',
391
- )
373
+ # eq: var_k0(t)+var_k1(t) <= 1
374
+ self.add_constraints(var_k0 + var_k1 <= 1, short_name=f'lock_k0_and_k1|{variable.name}')
392
375
 
393
376
  # Begrenzung der Korrektur-Anzahl:
394
377
  # eq: sum(K) <= n_Corr_max
395
- self.add(
396
- self._model.add_constraints(
397
- sum(var_k0) + sum(var_k1)
398
- <= round(self.aggregation_parameters.percentage_of_period_freedom / 100 * length),
399
- name=f'{self.label_full}|limit_corrections|{variable.name}',
400
- ),
401
- f'limit_corrections|{variable.name}',
378
+ limit = int(np.floor(self.aggregation_parameters.percentage_of_period_freedom / 100 * length))
379
+ self.add_constraints(
380
+ var_k0.sum(dim='time') + var_k1.sum(dim='time') <= limit,
381
+ short_name=f'limit_corrections|{variable.name}',
402
382
  )