flixopt 2.1.1__py3-none-any.whl → 2.2.0b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flixopt might be problematic. Click here for more details.

flixopt/flow_system.py CHANGED
@@ -16,10 +16,17 @@ from rich.console import Console
16
16
  from rich.pretty import Pretty
17
17
 
18
18
  from . import io as fx_io
19
- from .core import NumericData, NumericDataTS, TimeSeries, TimeSeriesCollection, TimeSeriesData
20
- from .effects import Effect, EffectCollection, EffectTimeSeries, EffectValuesDict, EffectValuesUser
19
+ from .core import Scalar, ScenarioData, TimeSeries, TimeSeriesCollection, TimeSeriesData, TimestepData
20
+ from .effects import (
21
+ Effect,
22
+ EffectCollection,
23
+ EffectTimeSeries,
24
+ EffectValuesDict,
25
+ EffectValuesUserScenario,
26
+ EffectValuesUserTimestep,
27
+ )
21
28
  from .elements import Bus, Component, Flow
22
- from .structure import CLASS_REGISTRY, Element, SystemModel, get_compact_representation, get_str_representation
29
+ from .structure import CLASS_REGISTRY, Element, SystemModel
23
30
 
24
31
  if TYPE_CHECKING:
25
32
  import pyvis
@@ -35,23 +42,31 @@ class FlowSystem:
35
42
  def __init__(
36
43
  self,
37
44
  timesteps: pd.DatetimeIndex,
45
+ scenarios: Optional[pd.Index] = None,
38
46
  hours_of_last_timestep: Optional[float] = None,
39
47
  hours_of_previous_timesteps: Optional[Union[int, float, np.ndarray]] = None,
48
+ scenario_weights: Optional[ScenarioData] = None,
40
49
  ):
41
50
  """
42
51
  Args:
43
52
  timesteps: The timesteps of the model.
53
+ scenarios: The scenarios of the model.
44
54
  hours_of_last_timestep: The duration of the last time step. Uses the last time interval if not specified
45
55
  hours_of_previous_timesteps: The duration of previous timesteps.
46
56
  If None, the first time increment of time_series is used.
47
57
  This is needed to calculate previous durations (for example consecutive_on_hours).
48
58
  If you use an array, take care that its long enough to cover all previous values!
59
+ scenario_weights: The weights of the scenarios. If None, all scenarios have the same weight. All weights are normalized to 1.
49
60
  """
50
61
  self.time_series_collection = TimeSeriesCollection(
51
62
  timesteps=timesteps,
63
+ scenarios=scenarios,
52
64
  hours_of_last_timestep=hours_of_last_timestep,
53
65
  hours_of_previous_timesteps=hours_of_previous_timesteps,
54
66
  )
67
+ self.scenario_weights = self.create_time_series(
68
+ 'scenario_weights', scenario_weights, has_time_dim=False, has_scenario_dim=True
69
+ )
55
70
 
56
71
  # defaults:
57
72
  self.components: Dict[str, Component] = {}
@@ -66,10 +81,15 @@ class FlowSystem:
66
81
  timesteps_extra = pd.DatetimeIndex(ds.attrs['timesteps_extra'], name='time')
67
82
  hours_of_last_timestep = TimeSeriesCollection.calculate_hours_per_timestep(timesteps_extra).isel(time=-1).item()
68
83
 
84
+ scenarios = pd.Index(ds.attrs['scenarios'], name='scenario') if ds.attrs.get('scenarios') is not None else None
85
+ scenario_weights = fx_io.insert_dataarray(ds.attrs['scenario_weights'], ds)
86
+
69
87
  flow_system = FlowSystem(
70
88
  timesteps=timesteps_extra[:-1],
71
89
  hours_of_last_timestep=hours_of_last_timestep,
72
90
  hours_of_previous_timesteps=ds.attrs['hours_of_previous_timesteps'],
91
+ scenarios=scenarios,
92
+ scenario_weights=scenario_weights,
73
93
  )
74
94
 
75
95
  structure = fx_io.insert_dataarray({key: ds.attrs[key] for key in ['components', 'buses', 'effects']}, ds)
@@ -90,11 +110,15 @@ class FlowSystem:
90
110
  """
91
111
  timesteps_extra = pd.DatetimeIndex(data['timesteps_extra'], name='time')
92
112
  hours_of_last_timestep = TimeSeriesCollection.calculate_hours_per_timestep(timesteps_extra).isel(time=-1).item()
113
+ scenarios = pd.Index(data['scenarios'], name='scenario') if data.get('scenarios') is not None else None
114
+ scenario_weights = data.get('scenario_weights').selected_data if data.get('scenario_weights') is not None else None
93
115
 
94
116
  flow_system = FlowSystem(
95
117
  timesteps=timesteps_extra[:-1],
96
118
  hours_of_last_timestep=hours_of_last_timestep,
97
119
  hours_of_previous_timesteps=data['hours_of_previous_timesteps'],
120
+ scenarios=scenarios,
121
+ scenario_weights=scenario_weights,
98
122
  )
99
123
 
100
124
  flow_system.add_elements(*[Bus.from_dict(bus) for bus in data['buses'].values()])
@@ -170,6 +194,8 @@ class FlowSystem:
170
194
  },
171
195
  'timesteps_extra': [date.isoformat() for date in self.time_series_collection.timesteps_extra],
172
196
  'hours_of_previous_timesteps': self.time_series_collection.hours_of_previous_timesteps,
197
+ 'scenarios': self.time_series_collection.scenarios.tolist() if self.time_series_collection.scenarios is not None else None,
198
+ 'scenario_weights': self.scenario_weights,
173
199
  }
174
200
  if data_mode == 'data':
175
201
  return fx_io.replace_timeseries(data, 'data')
@@ -184,7 +210,7 @@ class FlowSystem:
184
210
  Args:
185
211
  constants_in_dataset: If True, constants are included as Dataset variables.
186
212
  """
187
- ds = self.time_series_collection.to_dataset(include_constants=constants_in_dataset)
213
+ ds = self.time_series_collection.as_dataset()
188
214
  ds.attrs = self.as_dict(data_mode='name')
189
215
  return ds
190
216
 
@@ -268,41 +294,80 @@ class FlowSystem:
268
294
  def transform_data(self):
269
295
  if not self._connected:
270
296
  self._connect_network()
297
+ self.scenario_weights = self.create_time_series(
298
+ 'scenario_weights', self.scenario_weights, has_time_dim=False, has_scenario_dim=True
299
+ )
271
300
  for element in self.all_elements.values():
272
301
  element.transform_data(self)
273
302
 
274
303
  def create_time_series(
275
304
  self,
276
305
  name: str,
277
- data: Optional[Union[NumericData, TimeSeriesData, TimeSeries]],
278
- needs_extra_timestep: bool = False,
279
- ) -> Optional[TimeSeries]:
306
+ data: Optional[Union[TimestepData, TimeSeriesData, TimeSeries]],
307
+ has_time_dim: bool = True,
308
+ has_scenario_dim: bool = True,
309
+ has_extra_timestep: bool = False,
310
+ ) -> Optional[Union[Scalar, TimeSeries]]:
280
311
  """
281
- Tries to create a TimeSeries from NumericData Data and adds it to the time_series_collection
312
+ Tries to create a TimeSeries from TimestepData and adds it to the time_series_collection
282
313
  If the data already is a TimeSeries, nothing happens and the TimeSeries gets reset and returned
283
314
  If the data is a TimeSeriesData, it is converted to a TimeSeries, and the aggregation weights are applied.
284
315
  If the data is None, nothing happens.
316
+
317
+ Args:
318
+ name: The name of the TimeSeries
319
+ data: The data to create a TimeSeries from
320
+ has_time_dim: Whether the data has a time dimension
321
+ has_scenario_dim: Whether the data has a scenario dimension
322
+ has_extra_timestep: Whether the data has an extra timestep
285
323
  """
324
+ if not has_time_dim and not has_scenario_dim:
325
+ raise ValueError('At least one of the dimensions must be present')
286
326
 
287
327
  if data is None:
288
328
  return None
289
- elif isinstance(data, TimeSeries):
329
+
330
+ if not has_time_dim and self.time_series_collection.scenarios is None:
331
+ return data
332
+
333
+ if isinstance(data, TimeSeries):
290
334
  data.restore_data()
291
335
  if data in self.time_series_collection:
292
336
  return data
293
- return self.time_series_collection.create_time_series(
294
- data=data.active_data, name=name, needs_extra_timestep=needs_extra_timestep
337
+ return self.time_series_collection.add_time_series(
338
+ data=data.selected_data,
339
+ name=name,
340
+ has_time_dim=has_time_dim,
341
+ has_scenario_dim=has_scenario_dim,
342
+ has_extra_timestep=has_extra_timestep,
295
343
  )
296
- return self.time_series_collection.create_time_series(
297
- data=data, name=name, needs_extra_timestep=needs_extra_timestep
344
+ elif isinstance(data, TimeSeriesData):
345
+ data.label = name
346
+ return self.time_series_collection.add_time_series(
347
+ data=data.data,
348
+ name=name,
349
+ has_time_dim=has_time_dim,
350
+ has_scenario_dim=has_scenario_dim,
351
+ has_extra_timestep=has_extra_timestep,
352
+ aggregation_weight=data.agg_weight,
353
+ aggregation_group=data.agg_group,
354
+ )
355
+ return self.time_series_collection.add_time_series(
356
+ data=data,
357
+ name=name,
358
+ has_time_dim=has_time_dim,
359
+ has_scenario_dim=has_scenario_dim,
360
+ has_extra_timestep=has_extra_timestep,
298
361
  )
299
362
 
300
363
  def create_effect_time_series(
301
364
  self,
302
365
  label_prefix: Optional[str],
303
- effect_values: EffectValuesUser,
366
+ effect_values: Union[EffectValuesUserScenario, EffectValuesUserTimestep],
304
367
  label_suffix: Optional[str] = None,
305
- ) -> Optional[EffectTimeSeries]:
368
+ has_time_dim: bool = True,
369
+ has_scenario_dim: bool = True,
370
+ ) -> Optional[Union[EffectTimeSeries, EffectValuesDict]]:
306
371
  """
307
372
  Transform EffectValues to EffectTimeSeries.
308
373
  Creates a TimeSeries for each key in the nested_values dictionary, using the value as the data.
@@ -310,13 +375,31 @@ class FlowSystem:
310
375
  The resulting label of the TimeSeries is the label of the parent_element,
311
376
  followed by the label of the Effect in the nested_values and the label_suffix.
312
377
  If the key in the EffectValues is None, the alias 'Standard_Effect' is used
378
+
379
+ Args:
380
+ label_prefix: Prefix for the TimeSeries name
381
+ effect_values: Dictionary of EffectValues
382
+ label_suffix: Suffix for the TimeSeries name
383
+ has_time_dim: Whether the data has a time dimension
384
+ has_scenario_dim: Whether the data has a scenario dimension
313
385
  """
386
+ if not has_time_dim and not has_scenario_dim:
387
+ raise ValueError('At least one of the dimensions must be present')
388
+
314
389
  effect_values: Optional[EffectValuesDict] = self.effects.create_effect_values_dict(effect_values)
315
390
  if effect_values is None:
316
391
  return None
317
392
 
393
+ if not has_time_dim and self.time_series_collection.scenarios is None:
394
+ return effect_values
395
+
318
396
  return {
319
- effect: self.create_time_series('|'.join(filter(None, [label_prefix, effect, label_suffix])), value)
397
+ effect: self.create_time_series(
398
+ name='|'.join(filter(None, [label_prefix, effect, label_suffix])),
399
+ data=value,
400
+ has_time_dim=has_time_dim,
401
+ has_scenario_dim=has_scenario_dim,
402
+ )
320
403
  for effect, value in effect_values.items()
321
404
  }
322
405
 
flixopt/interface.py CHANGED
@@ -4,14 +4,14 @@ These are tightly connected to features.py
4
4
  """
5
5
 
6
6
  import logging
7
- from typing import TYPE_CHECKING, Dict, Iterator, List, Optional, Union
7
+ from typing import TYPE_CHECKING, Dict, Iterator, List, Literal, Optional, Union
8
8
 
9
9
  from .config import CONFIG
10
- from .core import NumericData, NumericDataTS, Scalar
10
+ from .core import NumericDataTS, Scalar, ScenarioData, TimestepData
11
11
  from .structure import Interface, register_class_for_io
12
12
 
13
13
  if TYPE_CHECKING: # for type checking and preventing circular imports
14
- from .effects import EffectValuesUser, EffectValuesUserScalar
14
+ from .effects import EffectValuesUserScenario, EffectValuesUserTimestep
15
15
  from .flow_system import FlowSystem
16
16
 
17
17
 
@@ -20,7 +20,7 @@ logger = logging.getLogger('flixopt')
20
20
 
21
21
  @register_class_for_io
22
22
  class Piece(Interface):
23
- def __init__(self, start: NumericData, end: NumericData):
23
+ def __init__(self, start: TimestepData, end: TimestepData):
24
24
  """
25
25
  Define a Piece, which is part of a Piecewise object.
26
26
 
@@ -30,10 +30,15 @@ class Piece(Interface):
30
30
  """
31
31
  self.start = start
32
32
  self.end = end
33
+ self.has_time_dim = False
33
34
 
34
35
  def transform_data(self, flow_system: 'FlowSystem', name_prefix: str):
35
- self.start = flow_system.create_time_series(f'{name_prefix}|start', self.start)
36
- self.end = flow_system.create_time_series(f'{name_prefix}|end', self.end)
36
+ self.start = flow_system.create_time_series(
37
+ name=f'{name_prefix}|start', data=self.start, has_time_dim=self.has_time_dim, has_scenario_dim=True
38
+ )
39
+ self.end = flow_system.create_time_series(
40
+ name=f'{name_prefix}|end', data=self.end, has_time_dim=self.has_time_dim, has_scenario_dim=True
41
+ )
37
42
 
38
43
 
39
44
  @register_class_for_io
@@ -46,6 +51,17 @@ class Piecewise(Interface):
46
51
  pieces: The pieces of the piecewise.
47
52
  """
48
53
  self.pieces = pieces
54
+ self._has_time_dim = False
55
+
56
+ @property
57
+ def has_time_dim(self):
58
+ return self._has_time_dim
59
+
60
+ @has_time_dim.setter
61
+ def has_time_dim(self, value):
62
+ self._has_time_dim = value
63
+ for piece in self.pieces:
64
+ piece.has_time_dim = value
49
65
 
50
66
  def __len__(self):
51
67
  return len(self.pieces)
@@ -73,6 +89,18 @@ class PiecewiseConversion(Interface):
73
89
  piecewises: Dict of Piecewises defining the conversion factors. flow labels as keys, piecewise as values
74
90
  """
75
91
  self.piecewises = piecewises
92
+ self._has_time_dim = True
93
+ self.has_time_dim = True # Inital propagation
94
+
95
+ @property
96
+ def has_time_dim(self):
97
+ return self._has_time_dim
98
+
99
+ @has_time_dim.setter
100
+ def has_time_dim(self, value):
101
+ self._has_time_dim = value
102
+ for piecewise in self.piecewises.values():
103
+ piecewise.has_time_dim = value
76
104
 
77
105
  def items(self):
78
106
  return self.piecewises.items()
@@ -94,12 +122,24 @@ class PiecewiseEffects(Interface):
94
122
  """
95
123
  self.piecewise_origin = piecewise_origin
96
124
  self.piecewise_shares = piecewise_shares
125
+ self._has_time_dim = False
126
+ self.has_time_dim = False # Inital propagation
127
+
128
+ @property
129
+ def has_time_dim(self):
130
+ return self._has_time_dim
131
+
132
+ @has_time_dim.setter
133
+ def has_time_dim(self, value):
134
+ self._has_time_dim = value
135
+ self.piecewise_origin.has_time_dim = value
136
+ for piecewise in self.piecewise_shares.values():
137
+ piecewise.has_time_dim = value
97
138
 
98
139
  def transform_data(self, flow_system: 'FlowSystem', name_prefix: str):
99
- raise NotImplementedError('PiecewiseEffects is not yet implemented for non scalar shares')
100
- # self.piecewise_origin.transform_data(flow_system, f'{name_prefix}|PiecewiseEffects|origin')
101
- # for name, piecewise in self.piecewise_shares.items():
102
- # piecewise.transform_data(flow_system, f'{name_prefix}|PiecewiseEffects|{name}')
140
+ self.piecewise_origin.transform_data(flow_system, f'{name_prefix}|PiecewiseEffects|origin')
141
+ for effect, piecewise in self.piecewise_shares.items():
142
+ piecewise.transform_data(flow_system, f'{name_prefix}|PiecewiseEffects|{effect}')
103
143
 
104
144
 
105
145
  @register_class_for_io
@@ -110,14 +150,15 @@ class InvestParameters(Interface):
110
150
 
111
151
  def __init__(
112
152
  self,
113
- fixed_size: Optional[Union[int, float]] = None,
114
- minimum_size: Optional[Union[int, float]] = None,
115
- maximum_size: Optional[Union[int, float]] = None,
153
+ fixed_size: Optional[ScenarioData] = None,
154
+ minimum_size: Optional[ScenarioData] = None,
155
+ maximum_size: Optional[ScenarioData] = None,
116
156
  optional: bool = True, # Investition ist weglassbar
117
- fix_effects: Optional['EffectValuesUserScalar'] = None,
118
- specific_effects: Optional['EffectValuesUserScalar'] = None, # costs per Flow-Unit/Storage-Size/...
157
+ fix_effects: Optional['EffectValuesUserScenario'] = None,
158
+ specific_effects: Optional['EffectValuesUserScenario'] = None, # costs per Flow-Unit/Storage-Size/...
119
159
  piecewise_effects: Optional[PiecewiseEffects] = None,
120
- divest_effects: Optional['EffectValuesUserScalar'] = None,
160
+ divest_effects: Optional['EffectValuesUserScenario'] = None,
161
+ investment_scenarios: Optional[Union[Literal['individual'], List[Union[int, str]]]] = None,
121
162
  ):
122
163
  """
123
164
  Args:
@@ -128,58 +169,99 @@ class InvestParameters(Interface):
128
169
  specific_effects: Specific costs, e.g., in €/kW_nominal or €/m²_nominal.
129
170
  Example: {costs: 3, CO2: 0.3} with costs and CO2 representing an Object of class Effect
130
171
  (Attention: Annualize costs to chosen period!)
131
- piecewise_effects: Linear piecewise relation [invest_pieces, cost_pieces].
132
- Example 1:
133
- [ [5, 25, 25, 100], # size in kW
134
- {costs: [50,250,250,800], #
135
- PE: [5, 25, 25, 100] # kWh_PrimaryEnergy
136
- }
137
- ]
138
- Example 2 (if only standard-effect):
139
- [ [5, 25, 25, 100], # kW # size in kW
140
- [50,250,250,800] # value for standart effect, typically €
141
- ] # €
142
- (Attention: Annualize costs to chosen period!)
143
- (Args 'specific_effects' and 'fix_effects' can be used in parallel to Investsizepieces)
144
- minimum_size: Min nominal value (only if: size_is_fixed = False). Defaults to CONFIG.modeling.EPSILON.
145
- maximum_size: Max nominal value (only if: size_is_fixed = False). Defaults to CONFIG.modeling.BIG.
172
+ piecewise_effects: Define the effects of the investment as a piecewise function of the size of the investment.
173
+ minimum_size: Minimum possible size of the investment.
174
+ maximum_size: Maximum possible size of the investment.
175
+ investment_scenarios: For which scenarios to optimize the size for.
176
+ - 'individual': Optimize the size of each scenario individually
177
+ - List of scenario names: Optimize the size for the passed scenario names (equal size in all). All other scenarios will have the size 0.
178
+ - None: Equals to a list of all scenarios (default)
146
179
  """
147
- self.fix_effects: EffectValuesUser = fix_effects or {}
148
- self.divest_effects: EffectValuesUser = divest_effects or {}
180
+ self.fix_effects: EffectValuesUserScenario = fix_effects if fix_effects is not None else {}
181
+ self.divest_effects: EffectValuesUserScenario = divest_effects if divest_effects is not None else {}
149
182
  self.fixed_size = fixed_size
150
183
  self.optional = optional
151
- self.specific_effects: EffectValuesUser = specific_effects or {}
184
+ self.specific_effects: EffectValuesUserScenario = specific_effects if specific_effects is not None else {}
152
185
  self.piecewise_effects = piecewise_effects
153
186
  self._minimum_size = minimum_size if minimum_size is not None else CONFIG.modeling.EPSILON
154
187
  self._maximum_size = maximum_size if maximum_size is not None else CONFIG.modeling.BIG # default maximum
188
+ self.investment_scenarios = investment_scenarios
155
189
 
156
- def transform_data(self, flow_system: 'FlowSystem'):
157
- self.fix_effects = flow_system.effects.create_effect_values_dict(self.fix_effects)
158
- self.divest_effects = flow_system.effects.create_effect_values_dict(self.divest_effects)
159
- self.specific_effects = flow_system.effects.create_effect_values_dict(self.specific_effects)
190
+ def transform_data(self, flow_system: 'FlowSystem', name_prefix: str):
191
+ self._plausibility_checks(flow_system)
192
+ self.fix_effects = flow_system.create_effect_time_series(
193
+ label_prefix=name_prefix,
194
+ effect_values=self.fix_effects,
195
+ label_suffix='fix_effects',
196
+ has_time_dim=False,
197
+ has_scenario_dim=True,
198
+ )
199
+ self.divest_effects = flow_system.create_effect_time_series(
200
+ label_prefix=name_prefix,
201
+ effect_values=self.divest_effects,
202
+ label_suffix='divest_effects',
203
+ has_time_dim=False,
204
+ has_scenario_dim=True,
205
+ )
206
+ self.specific_effects = flow_system.create_effect_time_series(
207
+ label_prefix=name_prefix,
208
+ effect_values=self.specific_effects,
209
+ label_suffix='specific_effects',
210
+ has_time_dim=False,
211
+ has_scenario_dim=True,
212
+ )
213
+ if self.piecewise_effects is not None:
214
+ self.piecewise_effects.has_time_dim = False
215
+ self.piecewise_effects.transform_data(flow_system, f'{name_prefix}|PiecewiseEffects')
216
+
217
+ self._minimum_size = flow_system.create_time_series(
218
+ f'{name_prefix}|minimum_size', self.minimum_size, has_time_dim=False, has_scenario_dim=True
219
+ )
220
+ self._maximum_size = flow_system.create_time_series(
221
+ f'{name_prefix}|maximum_size', self.maximum_size, has_time_dim=False, has_scenario_dim=True
222
+ )
223
+ if self.fixed_size is not None:
224
+ self.fixed_size = flow_system.create_time_series(
225
+ f'{name_prefix}|fixed_size', self.fixed_size, has_time_dim=False, has_scenario_dim=True
226
+ )
227
+
228
+ def _plausibility_checks(self, flow_system):
229
+ if isinstance(self.investment_scenarios, list):
230
+ if not set(self.investment_scenarios).issubset(flow_system.time_series_collection.scenarios):
231
+ raise ValueError(
232
+ f'Some scenarios in investment_scenarios are not present in the time_series_collection: '
233
+ f'{set(self.investment_scenarios) - set(flow_system.time_series_collection.scenarios)}'
234
+ )
235
+ if self.investment_scenarios is not None:
236
+ if not self.optional:
237
+ if self.minimum_size is not None or self.fixed_size is not None:
238
+ logger.warning(
239
+ 'When using investment_scenarios, minimum_size and fixed_size should only ne used if optional is True.'
240
+ 'Otherwise the investment cannot be 0 incertain scenarios while being non-zero in others.'
241
+ )
160
242
 
161
243
  @property
162
244
  def minimum_size(self):
163
- return self.fixed_size or self._minimum_size
245
+ return self.fixed_size if self.fixed_size is not None else self._minimum_size
164
246
 
165
247
  @property
166
248
  def maximum_size(self):
167
- return self.fixed_size or self._maximum_size
249
+ return self.fixed_size if self.fixed_size is not None else self._maximum_size
168
250
 
169
251
 
170
252
  @register_class_for_io
171
253
  class OnOffParameters(Interface):
172
254
  def __init__(
173
255
  self,
174
- effects_per_switch_on: Optional['EffectValuesUser'] = None,
175
- effects_per_running_hour: Optional['EffectValuesUser'] = None,
176
- on_hours_total_min: Optional[int] = None,
177
- on_hours_total_max: Optional[int] = None,
178
- consecutive_on_hours_min: Optional[NumericData] = None,
179
- consecutive_on_hours_max: Optional[NumericData] = None,
180
- consecutive_off_hours_min: Optional[NumericData] = None,
181
- consecutive_off_hours_max: Optional[NumericData] = None,
182
- switch_on_total_max: Optional[int] = None,
256
+ effects_per_switch_on: Optional['EffectValuesUserTimestep'] = None,
257
+ effects_per_running_hour: Optional['EffectValuesUserTimestep'] = None,
258
+ on_hours_total_min: Optional[ScenarioData] = None,
259
+ on_hours_total_max: Optional[ScenarioData] = None,
260
+ consecutive_on_hours_min: Optional[TimestepData] = None,
261
+ consecutive_on_hours_max: Optional[TimestepData] = None,
262
+ consecutive_off_hours_min: Optional[TimestepData] = None,
263
+ consecutive_off_hours_max: Optional[TimestepData] = None,
264
+ switch_on_total_max: Optional[ScenarioData] = None,
183
265
  force_switch_on: bool = False,
184
266
  ):
185
267
  """
@@ -202,8 +284,8 @@ class OnOffParameters(Interface):
202
284
  switch_on_total_max: max nr of switchOn operations
203
285
  force_switch_on: force creation of switch on variable, even if there is no switch_on_total_max
204
286
  """
205
- self.effects_per_switch_on: EffectValuesUser = effects_per_switch_on or {}
206
- self.effects_per_running_hour: EffectValuesUser = effects_per_running_hour or {}
287
+ self.effects_per_switch_on: EffectValuesUserTimestep = effects_per_switch_on or {}
288
+ self.effects_per_running_hour: EffectValuesUserTimestep = effects_per_running_hour or {}
207
289
  self.on_hours_total_min: Scalar = on_hours_total_min
208
290
  self.on_hours_total_max: Scalar = on_hours_total_max
209
291
  self.consecutive_on_hours_min: NumericDataTS = consecutive_on_hours_min
@@ -232,6 +314,15 @@ class OnOffParameters(Interface):
232
314
  self.consecutive_off_hours_max = flow_system.create_time_series(
233
315
  f'{name_prefix}|consecutive_off_hours_max', self.consecutive_off_hours_max
234
316
  )
317
+ self.on_hours_total_max = flow_system.create_time_series(
318
+ f'{name_prefix}|on_hours_total_max', self.on_hours_total_max, has_time_dim=False
319
+ )
320
+ self.on_hours_total_min = flow_system.create_time_series(
321
+ f'{name_prefix}|on_hours_total_min', self.on_hours_total_min, has_time_dim=False
322
+ )
323
+ self.switch_on_total_max = flow_system.create_time_series(
324
+ f'{name_prefix}|switch_on_total_max', self.switch_on_total_max, has_time_dim=False
325
+ )
235
326
 
236
327
  @property
237
328
  def use_off(self) -> bool:
flixopt/io.py CHANGED
@@ -23,7 +23,7 @@ def replace_timeseries(obj, mode: Literal['name', 'stats', 'data'] = 'name'):
23
23
  return [replace_timeseries(v, mode) for v in obj]
24
24
  elif isinstance(obj, TimeSeries): # Adjust this based on the actual class
25
25
  if obj.all_equal:
26
- return obj.active_data.values[0].item()
26
+ return obj.selected_data.values.max().item()
27
27
  elif mode == 'name':
28
28
  return f'::::{obj.name}'
29
29
  elif mode == 'stats':
@@ -44,7 +44,7 @@ def insert_dataarray(obj, ds: xr.Dataset):
44
44
  return [insert_dataarray(v, ds) for v in obj]
45
45
  elif isinstance(obj, str) and obj.startswith('::::'):
46
46
  da = ds[obj[4:]]
47
- if da.isel(time=-1).isnull():
47
+ if 'time' in da.dims and da.isel(time=-1).isnull().any().item():
48
48
  return da.isel(time=slice(0, -1))
49
49
  return da
50
50
  else:
@@ -79,15 +79,17 @@ def _save_to_yaml(data, output_file='formatted_output.yaml'):
79
79
  output_file (str): Path to output YAML file
80
80
  """
81
81
  # Process strings to normalize all newlines and handle special patterns
82
- processed_data = _process_complex_strings(data)
82
+ processed_data = _normalize_complex_data(data)
83
83
 
84
84
  # Define a custom representer for strings
85
85
  def represent_str(dumper, data):
86
- # Use literal block style (|) for any string with newlines
86
+ # Use literal block style (|) for multi-line strings
87
87
  if '\n' in data:
88
+ # Clean up formatting for literal block style
89
+ data = data.strip() # Remove leading/trailing whitespace
88
90
  return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
89
91
 
90
- # Use quoted style for strings with special characters to ensure proper parsing
92
+ # Use quoted style for strings with special characters
91
93
  elif any(char in data for char in ':`{}[]#,&*!|>%@'):
92
94
  return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='"')
93
95
 
@@ -97,53 +99,80 @@ def _save_to_yaml(data, output_file='formatted_output.yaml'):
97
99
  # Add the string representer to SafeDumper
98
100
  yaml.add_representer(str, represent_str, Dumper=yaml.SafeDumper)
99
101
 
102
+ # Configure dumper options for better formatting
103
+ class CustomDumper(yaml.SafeDumper):
104
+ def increase_indent(self, flow=False, indentless=False):
105
+ return super(CustomDumper, self).increase_indent(flow, False)
106
+
100
107
  # Write to file with settings that ensure proper formatting
101
108
  with open(output_file, 'w', encoding='utf-8') as file:
102
109
  yaml.dump(
103
110
  processed_data,
104
111
  file,
105
- Dumper=yaml.SafeDumper,
112
+ Dumper=CustomDumper,
106
113
  sort_keys=False, # Preserve dictionary order
107
114
  default_flow_style=False, # Use block style for mappings
108
- width=float('inf'), # Don't wrap long lines
115
+ width=1000, # Set a reasonable line width
109
116
  allow_unicode=True, # Support Unicode characters
117
+ indent=2, # Set consistent indentation
110
118
  )
111
119
 
112
120
 
113
- def _process_complex_strings(data):
121
+ def _normalize_complex_data(data):
114
122
  """
115
- Process dictionary data recursively with comprehensive string normalization.
116
- Handles various types of strings and special formatting.
123
+ Recursively normalize strings in complex data structures.
124
+
125
+ Handles dictionaries, lists, and strings, applying various text normalization
126
+ rules while preserving important formatting elements.
117
127
 
118
128
  Args:
119
- data: The data to process (dict, list, str, or other)
129
+ data: Any data type (dict, list, str, or primitive)
120
130
 
121
131
  Returns:
122
- Processed data with normalized strings
132
+ Data with all strings normalized according to defined rules
123
133
  """
124
134
  if isinstance(data, dict):
125
- return {k: _process_complex_strings(v) for k, v in data.items()}
135
+ return {key: _normalize_complex_data(value) for key, value in data.items()}
136
+
126
137
  elif isinstance(data, list):
127
- return [_process_complex_strings(item) for item in data]
138
+ return [_normalize_complex_data(item) for item in data]
139
+
128
140
  elif isinstance(data, str):
129
- # Step 1: Normalize line endings to \n
130
- normalized = data.replace('\r\n', '\n').replace('\r', '\n')
141
+ return _normalize_string_content(data)
131
142
 
132
- # Step 2: Handle escaped newlines with robust regex
133
- normalized = re.sub(r'(?<!\\)\\n', '\n', normalized)
143
+ else:
144
+ return data
134
145
 
135
- # Step 3: Handle unnecessary double backslashes
136
- normalized = re.sub(r'\\\\(n)', r'\\\1', normalized)
137
146
 
138
- # Step 4: Ensure proper formatting of "[time: N]:\n---------"
139
- normalized = re.sub(r'(\[time: \d+\]):\s*\\?n', r'\1:\n', normalized)
147
+ def _normalize_string_content(text):
148
+ """
149
+ Apply comprehensive string normalization rules.
140
150
 
141
- # Step 5: Ensure "Constraint `...`" patterns are properly formatted
142
- normalized = re.sub(r'Constraint `([^`]+)`\\?n', r'Constraint `\1`\n', normalized)
151
+ Args:
152
+ text: The string to normalize
143
153
 
144
- return normalized
145
- else:
146
- return data
154
+ Returns:
155
+ Normalized string with standardized formatting
156
+ """
157
+ # Standardize line endings
158
+ text = text.replace('\r\n', '\n').replace('\r', '\n')
159
+
160
+ # Convert escaped newlines to actual newlines (avoiding double-backslashes)
161
+ text = re.sub(r'(?<!\\)\\n', '\n', text)
162
+
163
+ # Normalize double backslashes before specific escape sequences
164
+ text = re.sub(r'\\\\([rtn])', r'\\\1', text)
165
+
166
+ # Standardize constraint headers format
167
+ text = re.sub(r'Constraint\s*`([^`]+)`\s*(?:\\n|[\s\n]*)', r'Constraint `\1`\n', text)
168
+
169
+ # Clean up ellipsis patterns
170
+ text = re.sub(r'[\t ]*(\.\.\.)', r'\1', text)
171
+
172
+ # Limit consecutive newlines (max 2)
173
+ text = re.sub(r'\n{3,}', '\n\n', text)
174
+
175
+ return text.strip()
147
176
 
148
177
 
149
178
  def document_linopy_model(model: linopy.Model, path: pathlib.Path = None) -> Dict[str, str]: