flixopt 2.2.0b0__py3-none-any.whl → 3.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flixopt might be problematic. Click here for more details.

Files changed (63) hide show
  1. flixopt/__init__.py +35 -1
  2. flixopt/aggregation.py +60 -81
  3. flixopt/calculation.py +381 -196
  4. flixopt/components.py +1022 -359
  5. flixopt/config.py +553 -191
  6. flixopt/core.py +475 -1315
  7. flixopt/effects.py +477 -214
  8. flixopt/elements.py +591 -344
  9. flixopt/features.py +403 -957
  10. flixopt/flow_system.py +781 -293
  11. flixopt/interface.py +1159 -189
  12. flixopt/io.py +50 -55
  13. flixopt/linear_converters.py +384 -92
  14. flixopt/modeling.py +759 -0
  15. flixopt/network_app.py +789 -0
  16. flixopt/plotting.py +273 -135
  17. flixopt/results.py +639 -383
  18. flixopt/solvers.py +25 -21
  19. flixopt/structure.py +928 -442
  20. flixopt/utils.py +34 -5
  21. flixopt-3.0.0.dist-info/METADATA +209 -0
  22. flixopt-3.0.0.dist-info/RECORD +26 -0
  23. {flixopt-2.2.0b0.dist-info → flixopt-3.0.0.dist-info}/WHEEL +1 -1
  24. flixopt-3.0.0.dist-info/top_level.txt +1 -0
  25. docs/examples/00-Minimal Example.md +0 -5
  26. docs/examples/01-Basic Example.md +0 -5
  27. docs/examples/02-Complex Example.md +0 -10
  28. docs/examples/03-Calculation Modes.md +0 -5
  29. docs/examples/index.md +0 -5
  30. docs/faq/contribute.md +0 -49
  31. docs/faq/index.md +0 -3
  32. docs/images/architecture_flixOpt-pre2.0.0.png +0 -0
  33. docs/images/architecture_flixOpt.png +0 -0
  34. docs/images/flixopt-icon.svg +0 -1
  35. docs/javascripts/mathjax.js +0 -18
  36. docs/release-notes/_template.txt +0 -32
  37. docs/release-notes/index.md +0 -7
  38. docs/release-notes/v2.0.0.md +0 -93
  39. docs/release-notes/v2.0.1.md +0 -12
  40. docs/release-notes/v2.1.0.md +0 -31
  41. docs/release-notes/v2.2.0.md +0 -55
  42. docs/user-guide/Mathematical Notation/Bus.md +0 -33
  43. docs/user-guide/Mathematical Notation/Effects, Penalty & Objective.md +0 -132
  44. docs/user-guide/Mathematical Notation/Flow.md +0 -26
  45. docs/user-guide/Mathematical Notation/Investment.md +0 -115
  46. docs/user-guide/Mathematical Notation/LinearConverter.md +0 -21
  47. docs/user-guide/Mathematical Notation/Piecewise.md +0 -49
  48. docs/user-guide/Mathematical Notation/Storage.md +0 -44
  49. docs/user-guide/Mathematical Notation/index.md +0 -22
  50. docs/user-guide/Mathematical Notation/others.md +0 -3
  51. docs/user-guide/index.md +0 -124
  52. flixopt/config.yaml +0 -10
  53. flixopt-2.2.0b0.dist-info/METADATA +0 -146
  54. flixopt-2.2.0b0.dist-info/RECORD +0 -59
  55. flixopt-2.2.0b0.dist-info/top_level.txt +0 -5
  56. pics/architecture_flixOpt-pre2.0.0.png +0 -0
  57. pics/architecture_flixOpt.png +0 -0
  58. pics/flixOpt_plotting.jpg +0 -0
  59. pics/flixopt-icon.svg +0 -1
  60. pics/pics.pptx +0 -0
  61. scripts/gen_ref_pages.py +0 -54
  62. tests/ressources/Zeitreihen2020.csv +0 -35137
  63. {flixopt-2.2.0b0.dist-info → flixopt-3.0.0.dist-info}/licenses/LICENSE +0 -0
flixopt/__init__.py CHANGED
@@ -2,6 +2,15 @@
2
2
  This module bundles all common functionality of flixopt and sets up the logging
3
3
  """
4
4
 
5
+ import warnings
6
+ from importlib.metadata import PackageNotFoundError, version
7
+
8
+ try:
9
+ __version__ = version('flixopt')
10
+ except PackageNotFoundError:
11
+ # Package is not installed (development mode without editable install)
12
+ __version__ = '0.0.0.dev0'
13
+
5
14
  from .commons import (
6
15
  CONFIG,
7
16
  AggregatedCalculation,
@@ -32,4 +41,29 @@ from .commons import (
32
41
  solvers,
33
42
  )
34
43
 
35
- CONFIG.load_config()
44
+ # === Runtime warning suppression for third-party libraries ===
45
+ # These warnings are from dependencies and cannot be fixed by end users.
46
+ # They are suppressed at runtime to provide a cleaner user experience.
47
+ # These filters match the test configuration in pyproject.toml for consistency.
48
+
49
+ # tsam: Time series aggregation library
50
+ # - UserWarning: Informational message about minimal value constraints during clustering.
51
+ warnings.filterwarnings('ignore', category=UserWarning, message='.*minimal value.*exceeds.*', module='tsam')
52
+ # TODO: Might be able to fix it in flixopt?
53
+
54
+ # linopy: Linear optimization library
55
+ # - UserWarning: Coordinate mismatch warnings that don't affect functionality and are expected.
56
+ warnings.filterwarnings(
57
+ 'ignore', category=UserWarning, message='Coordinates across variables not equal', module='linopy'
58
+ )
59
+ # - FutureWarning: join parameter default will change in future versions
60
+ warnings.filterwarnings(
61
+ 'ignore',
62
+ category=FutureWarning,
63
+ message="In a future version of xarray the default value for join will change from join='outer' to join='exact'",
64
+ module='linopy',
65
+ )
66
+
67
+ # numpy: Core numerical library
68
+ # - RuntimeWarning: Binary incompatibility warnings from compiled extensions (safe to ignore). numpy 1->2
69
+ warnings.filterwarnings('ignore', category=RuntimeWarning, message='numpy\\.ndarray size changed')
flixopt/aggregation.py CHANGED
@@ -3,16 +3,15 @@ This module contains the Aggregation functionality for the flixopt framework.
3
3
  Through this, aggregating TimeSeriesData is possible.
4
4
  """
5
5
 
6
+ from __future__ import annotations
7
+
6
8
  import copy
7
9
  import logging
8
10
  import pathlib
9
11
  import timeit
10
- import warnings
11
- from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, Union
12
+ from typing import TYPE_CHECKING
12
13
 
13
- import linopy
14
14
  import numpy as np
15
- import pandas as pd
16
15
 
17
16
  try:
18
17
  import tsam.timeseriesaggregation as tsam
@@ -22,19 +21,20 @@ except ImportError:
22
21
  TSAM_AVAILABLE = False
23
22
 
24
23
  from .components import Storage
25
- from .core import Scalar, TimeSeriesData
26
- from .elements import Component
27
- from .flow_system import FlowSystem
28
24
  from .structure import (
29
- Element,
30
- Model,
31
- SystemModel,
25
+ FlowSystemModel,
26
+ Submodel,
32
27
  )
33
28
 
34
29
  if TYPE_CHECKING:
30
+ import linopy
31
+ import pandas as pd
35
32
  import plotly.graph_objects as go
36
33
 
37
- warnings.filterwarnings('ignore', category=DeprecationWarning)
34
+ from .core import Scalar, TimeSeriesData
35
+ from .elements import Component
36
+ from .flow_system import FlowSystem
37
+
38
38
  logger = logging.getLogger('flixopt')
39
39
 
40
40
 
@@ -49,9 +49,9 @@ class Aggregation:
49
49
  hours_per_time_step: Scalar,
50
50
  hours_per_period: Scalar,
51
51
  nr_of_periods: int = 8,
52
- weights: Dict[str, float] = None,
53
- time_series_for_high_peaks: List[str] = None,
54
- time_series_for_low_peaks: List[str] = None,
52
+ weights: dict[str, float] | None = None,
53
+ time_series_for_high_peaks: list[str] | None = None,
54
+ time_series_for_low_peaks: list[str] | None = None,
55
55
  ):
56
56
  """
57
57
  Args:
@@ -76,9 +76,9 @@ class Aggregation:
76
76
  self.time_series_for_high_peaks = time_series_for_high_peaks or []
77
77
  self.time_series_for_low_peaks = time_series_for_low_peaks or []
78
78
 
79
- self.aggregated_data: Optional[pd.DataFrame] = None
79
+ self.aggregated_data: pd.DataFrame | None = None
80
80
  self.clustering_duration_seconds = None
81
- self.tsam: Optional[tsam.TimeSeriesAggregation] = None
81
+ self.tsam: tsam.TimeSeriesAggregation | None = None
82
82
 
83
83
  def cluster(self) -> None:
84
84
  """
@@ -141,7 +141,7 @@ class Aggregation:
141
141
  def use_extreme_periods(self):
142
142
  return self.time_series_for_high_peaks or self.time_series_for_low_peaks
143
143
 
144
- def plot(self, colormap: str = 'viridis', show: bool = True, save: Optional[pathlib.Path] = None) -> 'go.Figure':
144
+ def plot(self, colormap: str = 'viridis', show: bool = True, save: pathlib.Path | None = None) -> go.Figure:
145
145
  from . import plotting
146
146
 
147
147
  df_org = self.original_data.copy().rename(
@@ -163,14 +163,14 @@ class Aggregation:
163
163
  figure_like=fig,
164
164
  default_path=pathlib.Path('aggregated data.html'),
165
165
  default_filetype='.html',
166
- user_path=None if isinstance(save, bool) else pathlib.Path(save),
166
+ user_path=save,
167
167
  show=show,
168
- save=True if save else False,
168
+ save=save is not None,
169
169
  )
170
170
 
171
171
  return fig
172
172
 
173
- def get_cluster_indices(self) -> Dict[str, List[np.ndarray]]:
173
+ def get_cluster_indices(self) -> dict[str, list[np.ndarray]]:
174
174
  """
175
175
  Generates a dictionary that maps each cluster to a list of index vectors representing the time steps
176
176
  assigned to that cluster for each period.
@@ -193,7 +193,7 @@ class Aggregation:
193
193
 
194
194
  return index_vectors
195
195
 
196
- def get_equation_indices(self, skip_first_index_of_period: bool = True) -> Tuple[np.ndarray, np.ndarray]:
196
+ def get_equation_indices(self, skip_first_index_of_period: bool = True) -> tuple[np.ndarray, np.ndarray]:
197
197
  """
198
198
  Generates pairs of indices for the equations by comparing index vectors of the same cluster.
199
199
  If `skip_first_index_of_period` is True, the first index of each period is skipped.
@@ -202,7 +202,7 @@ class Aggregation:
202
202
  skip_first_index_of_period (bool): Whether to include or skip the first index of each period.
203
203
 
204
204
  Returns:
205
- Tuple[np.ndarray, np.ndarray]: Two arrays of indices.
205
+ tuple[np.ndarray, np.ndarray]: Two arrays of indices.
206
206
  """
207
207
  idx_var1 = []
208
208
  idx_var2 = []
@@ -238,8 +238,8 @@ class AggregationParameters:
238
238
  aggregate_data_and_fix_non_binary_vars: bool,
239
239
  percentage_of_period_freedom: float = 0,
240
240
  penalty_of_period_freedom: float = 0,
241
- time_series_for_high_peaks: List[TimeSeriesData] = None,
242
- time_series_for_low_peaks: List[TimeSeriesData] = None,
241
+ time_series_for_high_peaks: list[TimeSeriesData] | None = None,
242
+ time_series_for_low_peaks: list[TimeSeriesData] | None = None,
243
243
  ):
244
244
  """
245
245
  Initializes aggregation parameters for time series data
@@ -265,43 +265,43 @@ class AggregationParameters:
265
265
  self.aggregate_data_and_fix_non_binary_vars = aggregate_data_and_fix_non_binary_vars
266
266
  self.percentage_of_period_freedom = percentage_of_period_freedom
267
267
  self.penalty_of_period_freedom = penalty_of_period_freedom
268
- self.time_series_for_high_peaks: List[TimeSeriesData] = time_series_for_high_peaks or []
269
- self.time_series_for_low_peaks: List[TimeSeriesData] = time_series_for_low_peaks or []
268
+ self.time_series_for_high_peaks: list[TimeSeriesData] = time_series_for_high_peaks or []
269
+ self.time_series_for_low_peaks: list[TimeSeriesData] = time_series_for_low_peaks or []
270
270
 
271
271
  @property
272
272
  def use_extreme_periods(self):
273
273
  return self.time_series_for_high_peaks or self.time_series_for_low_peaks
274
274
 
275
275
  @property
276
- def labels_for_high_peaks(self) -> List[str]:
277
- return [ts.label for ts in self.time_series_for_high_peaks]
276
+ def labels_for_high_peaks(self) -> list[str]:
277
+ return [ts.name for ts in self.time_series_for_high_peaks]
278
278
 
279
279
  @property
280
- def labels_for_low_peaks(self) -> List[str]:
281
- return [ts.label for ts in self.time_series_for_low_peaks]
280
+ def labels_for_low_peaks(self) -> list[str]:
281
+ return [ts.name for ts in self.time_series_for_low_peaks]
282
282
 
283
283
  @property
284
- def use_low_peaks(self):
285
- return self.time_series_for_low_peaks is not None
284
+ def use_low_peaks(self) -> bool:
285
+ return bool(self.time_series_for_low_peaks)
286
286
 
287
287
 
288
- class AggregationModel(Model):
289
- """The AggregationModel holds equations and variables related to the Aggregation of a FLowSystem.
288
+ class AggregationModel(Submodel):
289
+ """The AggregationModel holds equations and variables related to the Aggregation of a FlowSystem.
290
290
  It creates Equations that equates indices of variables, and introduces penalties related to binary variables, that
291
291
  escape the equation to their related binaries in other periods"""
292
292
 
293
293
  def __init__(
294
294
  self,
295
- model: SystemModel,
295
+ model: FlowSystemModel,
296
296
  aggregation_parameters: AggregationParameters,
297
297
  flow_system: FlowSystem,
298
298
  aggregation_data: Aggregation,
299
- components_to_clusterize: Optional[List[Component]],
299
+ components_to_clusterize: list[Component] | None,
300
300
  ):
301
301
  """
302
302
  Modeling-Element for "index-equating"-equations
303
303
  """
304
- super().__init__(model, label_of_element='Aggregation', label_full='Aggregation')
304
+ super().__init__(model, label_of_element='Aggregation', label_of_model='Aggregation')
305
305
  self.flow_system = flow_system
306
306
  self.aggregation_parameters = aggregation_parameters
307
307
  self.aggregation_data = aggregation_data
@@ -315,40 +315,39 @@ class AggregationModel(Model):
315
315
 
316
316
  indices = self.aggregation_data.get_equation_indices(skip_first_index_of_period=True)
317
317
 
318
- time_variables: Set[str] = {k for k, v in self._model.variables.data.items() if 'time' in v.indexes}
319
- binary_variables: Set[str] = {k for k, v in self._model.variables.data.items() if k in self._model.binaries}
320
- binary_time_variables: Set[str] = time_variables & binary_variables
318
+ time_variables: set[str] = {
319
+ name for name in self._model.variables if 'time' in self._model.variables[name].dims
320
+ }
321
+ binary_variables: set[str] = set(self._model.variables.binaries)
322
+ binary_time_variables: set[str] = time_variables & binary_variables
321
323
 
322
324
  for component in components:
323
325
  if isinstance(component, Storage) and not self.aggregation_parameters.fix_storage_flows:
324
326
  continue # Fix Nothing in The Storage
325
327
 
326
- all_variables_of_component = set(component.model.variables)
328
+ all_variables_of_component = set(component.submodel.variables)
327
329
 
328
330
  if self.aggregation_parameters.aggregate_data_and_fix_non_binary_vars:
329
- relevant_variables = component.model.variables[all_variables_of_component & time_variables]
331
+ relevant_variables = component.submodel.variables[all_variables_of_component & time_variables]
330
332
  else:
331
- relevant_variables = component.model.variables[all_variables_of_component & binary_time_variables]
333
+ relevant_variables = component.submodel.variables[all_variables_of_component & binary_time_variables]
332
334
  for variable in relevant_variables:
333
- self._equate_indices(component.model.variables[variable], indices)
335
+ self._equate_indices(component.submodel.variables[variable], indices)
334
336
 
335
337
  penalty = self.aggregation_parameters.penalty_of_period_freedom
336
338
  if (self.aggregation_parameters.percentage_of_period_freedom > 0) and penalty != 0:
337
339
  for variable in self.variables_direct.values():
338
340
  self._model.effects.add_share_to_penalty('Aggregation', variable * penalty)
339
341
 
340
- def _equate_indices(self, variable: linopy.Variable, indices: Tuple[np.ndarray, np.ndarray]) -> None:
342
+ def _equate_indices(self, variable: linopy.Variable, indices: tuple[np.ndarray, np.ndarray]) -> None:
341
343
  assert len(indices[0]) == len(indices[1]), 'The length of the indices must match!!'
342
344
  length = len(indices[0])
343
345
 
344
346
  # Gleichung:
345
347
  # eq1: x(p1,t) - x(p3,t) = 0 # wobei p1 und p3 im gleichen Cluster sind und t = 0..N_p
346
- con = self.add(
347
- self._model.add_constraints(
348
- variable.isel(time=indices[0]) - variable.isel(time=indices[1]) == 0,
349
- name=f'{self.label_full}|equate_indices|{variable.name}',
350
- ),
351
- f'equate_indices|{variable.name}',
348
+ con = self.add_constraints(
349
+ variable.isel(time=indices[0]) - variable.isel(time=indices[1]) == 0,
350
+ short_name=f'equate_indices|{variable.name}',
352
351
  )
353
352
 
354
353
  # Korrektur: (bisher nur für Binärvariablen:)
@@ -356,23 +355,11 @@ class AggregationModel(Model):
356
355
  variable.name in self._model.variables.binaries
357
356
  and self.aggregation_parameters.percentage_of_period_freedom > 0
358
357
  ):
359
- var_k1 = self.add(
360
- self._model.add_variables(
361
- binary=True,
362
- coords={'time': variable.isel(time=indices[0]).indexes['time']},
363
- name=f'{self.label_full}|correction1|{variable.name}',
364
- ),
365
- f'correction1|{variable.name}',
366
- )
358
+ sel = variable.isel(time=indices[0])
359
+ coords = {d: sel.indexes[d] for d in sel.dims}
360
+ var_k1 = self.add_variables(binary=True, coords=coords, short_name=f'correction1|{variable.name}')
367
361
 
368
- var_k0 = self.add(
369
- self._model.add_variables(
370
- binary=True,
371
- coords={'time': variable.isel(time=indices[0]).indexes['time']},
372
- name=f'{self.label_full}|correction0|{variable.name}',
373
- ),
374
- f'correction0|{variable.name}',
375
- )
362
+ var_k0 = self.add_variables(binary=True, coords=coords, short_name=f'correction0|{variable.name}')
376
363
 
377
364
  # equation extends ...
378
365
  # --> On(p3) can be 0/1 independent of On(p1,t)!
@@ -383,21 +370,13 @@ class AggregationModel(Model):
383
370
  con.lhs += 1 * var_k1 - 1 * var_k0
384
371
 
385
372
  # interlock var_k1 and var_K2:
386
- # eq: var_k0(t)+var_k1(t) <= 1.1
387
- self.add(
388
- self._model.add_constraints(
389
- var_k0 + var_k1 <= 1.1, name=f'{self.label_full}|lock_k0_and_k1|{variable.name}'
390
- ),
391
- f'lock_k0_and_k1|{variable.name}',
392
- )
373
+ # eq: var_k0(t)+var_k1(t) <= 1
374
+ self.add_constraints(var_k0 + var_k1 <= 1, short_name=f'lock_k0_and_k1|{variable.name}')
393
375
 
394
376
  # Begrenzung der Korrektur-Anzahl:
395
377
  # eq: sum(K) <= n_Corr_max
396
- self.add(
397
- self._model.add_constraints(
398
- sum(var_k0) + sum(var_k1)
399
- <= round(self.aggregation_parameters.percentage_of_period_freedom / 100 * length),
400
- name=f'{self.label_full}|limit_corrections|{variable.name}',
401
- ),
402
- f'limit_corrections|{variable.name}',
378
+ limit = int(np.floor(self.aggregation_parameters.percentage_of_period_freedom / 100 * length))
379
+ self.add_constraints(
380
+ var_k0.sum(dim='time') + var_k1.sum(dim='time') <= limit,
381
+ short_name=f'limit_corrections|{variable.name}',
403
382
  )