zen-garden 2.8.12__py3-none-any.whl → 2.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
zen_garden/__init__.py CHANGED
@@ -1,6 +1,7 @@
1
1
  from . import model
2
2
  from . import postprocess
3
3
  from . import preprocess
4
+ from . import wrapper
4
5
  from .utils import get_inheritors
5
6
  from .model.element import Element
6
7
  from .optimization_setup import OptimizationSetup
@@ -51,11 +51,13 @@ def build_parser() -> argparse.ArgumentParser:
51
51
  add_help=True,
52
52
  usage="zen_garden [options]")
53
53
 
54
- parser.add_argument("--config",
55
- type=str,
56
- required=False,
57
- default="./config.json",
58
- help="Path to a Python or JSON config file.")
54
+ parser.add_argument(
55
+ "--config",
56
+ type=str,
57
+ required=False,
58
+ default="./config.json",
59
+ help="Path to a Python or JSON config file."
60
+ )
59
61
  parser.add_argument(
60
62
  "--dataset",
61
63
  type=str,
@@ -69,19 +71,23 @@ def build_parser() -> argparse.ArgumentParser:
69
71
  required=False,
70
72
  default=None,
71
73
  help=
72
- "Path to the output directory. Overrides output settings in config.")
74
+ "Path to the output directory. Overrides output settings in config."
75
+ )
73
76
  parser.add_argument(
74
77
  "--job_index",
75
78
  type=str,
76
79
  required=False,
77
80
  default=None,
78
81
  help="Comma-separated list of scenario indices. If omitted, the "
79
- "environment variable specified by --job_index_var is used.")
80
- parser.add_argument("--job_index_var",
81
- type=str,
82
- required=False,
83
- default="SLURM_ARRAY_TASK_ID",
84
- help="Environment variable for job index.")
82
+ "environment variable specified by --job_index_var is used."
83
+ )
84
+ parser.add_argument(
85
+ "--job_index_var",
86
+ type=str,
87
+ required=False,
88
+ default="SLURM_ARRAY_TASK_ID",
89
+ help="Environment variable for job index."
90
+ )
85
91
 
86
92
  return parser
87
93
 
@@ -155,10 +161,12 @@ def create_zen_garden_cli():
155
161
  Basic usage in a command line prompt:
156
162
 
157
163
  >>> zen-garden --config=".\\config.json" --dataset="1_base_case"
158
-
159
164
  """
165
+
160
166
  # parse command line arguments
161
167
  parser = build_parser()
168
+
169
+ # parse command line arguments
162
170
  args = parser.parse_args()
163
171
 
164
172
  ### get the job index
@@ -0,0 +1,89 @@
1
+ from zen_garden.cli.zen_garden_cli import build_parser, resolve_job_index
2
+ from zen_garden.wrapper.operation_scenarios import operation_scenarios
3
+ import argparse
4
+
5
+ def build_parser_op() -> argparse.ArgumentParser:
6
+
7
+ # load parser from zen-garden
8
+ parser = build_parser()
9
+
10
+ parser.add_argument(
11
+ "--config_op",
12
+ required=False,
13
+ type=str,
14
+ default=None,
15
+ help=
16
+ "The config file used to run the operation-only model, defaults to " \
17
+ " --config."
18
+ )
19
+ parser.add_argument(
20
+ "--dataset_op",
21
+ required=False,
22
+ type=str,
23
+ default=None,
24
+ help=
25
+ "Name of the dataset used for the operation-only runs. The outputs " \
26
+ "will be saved under this dataset name"
27
+ )
28
+ parser.add_argument(
29
+ "--scenarios_op",
30
+ required=False,
31
+ type=str,
32
+ default=None,
33
+ help=
34
+ "Path to the scenarios.json file used in the operation-only runs. " \
35
+ "Defaults to the scenarios.json file from --dataset"
36
+ )
37
+ parser.add_argument(
38
+ "--delete_data",
39
+ action="store_true",
40
+ help=
41
+ "Deletes the created operation-only models upon termination to avoid " \
42
+ "cluttering the data directory"
43
+ )
44
+
45
+ # add parser description
46
+ parser.description = "Run ZEN garden with a given config file. Per default, the" \
47
+ "config file will be read out from the current working " \
48
+ "directory. You can specify a config file with the --config "\
49
+ "argument. However, note that the output directory will " \
50
+ "always be the current working directory, independent of " \
51
+ "the dataset specified in the config file."
52
+
53
+ parser.usage = "usage: python -m zen_garden.wrapper.operational_scenarios [-h] " \
54
+ "[--config CONFIG] [--dataset DATASET] [--folder_output FOLDER_OUTPUT] "\
55
+ "[--job_index JOB_INDEX] [--job_index_var JOB_INDEX_VAR] "\
56
+ "[--scenarios_op SCENARIOS_OP] " \
57
+ "[--delete_data] [--use_existing]"
58
+
59
+ return parser
60
+
61
+
62
+ def create_zen_operation_cli() -> None:
63
+
64
+ # create parser and parse command line argument
65
+ parser = build_parser_op()
66
+ args = parser.parse_args()
67
+
68
+ # Make dataset a required argument
69
+ if args.dataset is None:
70
+ raise argparse.ArgumentError(
71
+ "Missing required argument --dataset."
72
+ )
73
+
74
+ # Resolve job index
75
+ job_index = resolve_job_index(args.job_index, args.job_index_var)
76
+
77
+ # run operation scenarios
78
+ operation_scenarios(
79
+ config=args.config,
80
+ dataset = args.dataset,
81
+ folder_output=args.folder_output,
82
+ job_index=job_index,
83
+ scenarios_op=args.scenarios_op,
84
+ delete_data=args.delete_data,
85
+ )
86
+
87
+ if __name__ == "__main__":
88
+
89
+ create_zen_operation_cli()
@@ -152,7 +152,6 @@ class System(Subscriptable):
152
152
  use_capacities_existing: bool = True
153
153
  allow_investment: bool = True
154
154
  storage_charge_discharge_binary: bool = False
155
- include_operation_only_phase: bool = False
156
155
 
157
156
 
158
157
 
@@ -170,9 +169,7 @@ class Solver(Subscriptable):
170
169
  save_parameters: bool = True
171
170
  selected_saved_parameters: list = [] # if empty, all parameters are saved
172
171
  selected_saved_variables: list = [] # if empty, all variables are saved
173
- selected_saved_variables_operation: list = [] # if empty, all variables are saved
174
172
  selected_saved_duals: list = [] # if empty, all duals are saved (if save_duals is True)
175
- selected_saved_duals_operation: list = [] # if empty, all duals are saved (if save_duals is True)
176
173
  linear_regression_check: dict[str, float] = {
177
174
  "eps_intercept": 0.1,
178
175
  "epsRvalue": 1 - (1e-5),
@@ -78,10 +78,6 @@ class OptimizationSetup(object):
78
78
  # step of optimization horizon
79
79
  self.step_horizon = 0
80
80
 
81
- # flag marking whether the optimization is in capacity expansion or
82
- # operations-only phase (only if `include_operation_only_phase` is true)
83
- self.operation_only_phase = False
84
-
85
81
  # Init the energy system
86
82
  self.energy_system = EnergySystem(optimization_setup=self)
87
83
 
@@ -538,159 +534,51 @@ class OptimizationSetup(object):
538
534
  parser.write_parsed_output()
539
535
 
540
536
  def add_results_of_optimization_step(self, step_horizon):
541
- """Adds capacity additions and carbon emissions to next optimization step
542
-
543
- This function takes the capacity additions and carbon emissions of the
544
- current optimization step and adds them to the existing capacity and
545
- existing emissions of the next optimization step. It is used for myopic
546
- foresight and for operation-only model runs.
547
-
548
- In myopic foresight, values from the currently simulated year are added
549
- as existing capacities and emissions for future steps.
550
-
551
- In operation-only optimizations, installed capacities from the previous
552
- investment optimization are added as existing capacities.
553
-
554
- In optimizations with both features, the capacity additions are taken from
555
- the investment phase while the emissions are taken from
556
- the operation phase. This allows model users to
557
- differentiate between how the system is planned and operated.
558
-
559
- :param step_horizon: year index of the current optimization step.
560
- In myopic foresight, capacities and emissions from this step are
561
- added to existing capacities and emissions.
562
- :type step_horizon: int
563
-
564
- :returns: None
565
- """
566
-
567
- if self.system.use_rolling_horizon:
568
- if not self.system.include_operation_only_phase:
569
- decision_horizon = self.get_decision_horizon(step_horizon)
570
- self.add_new_capacity_addition(decision_horizon)
571
- self.add_carbon_emission_cumulative(decision_horizon)
572
-
573
- elif not self.operation_only_phase:
574
- self.save_current_existing_capacity()
575
- time_steps = self.energy_system.set_time_steps_yearly
576
- self.add_new_capacity_addition(time_steps)
577
-
578
- else:
579
- self.reset_existing_capacity_to_previous_step()
580
- decision_horizon = self.get_decision_horizon(step_horizon)
581
- self.add_new_capacity_addition(decision_horizon, capacity_addition = self._old_capacity_addition, invest_capacity = self._old_invest_capacity, cost_capex_overnight = self._old_cost_capex_overnight)
582
- self.add_carbon_emission_cumulative(decision_horizon)
583
-
584
- else:
585
- if self.system.include_operation_only_phase and not self.operation_only_phase:
586
- time_steps = self.energy_system.set_time_steps_yearly
587
- self.add_new_capacity_addition(time_steps)
588
-
589
- def save_current_existing_capacity(self):
590
537
  """
591
- Stores current capacity values for each technology
592
-
593
- This function saves a copy of the input
594
- paratmeters: capacity_existing, lifetime_existing,
595
- capex_capacity_existing, capacity_existing_energy,
596
- and capex_capacity_existing_energy. The copies of these variables are
597
- saved directly to the technology class in attributes named
598
- "_old_<parameter_name>".
538
+ Adds capacity additions and carbon emissions to the next optimization step.
599
539
 
600
- :returns: None
601
- """
602
- for tech in self.get_all_elements(Technology):
603
- # new capacity
604
- tech._old_capacity_existing = tech.capacity_existing.copy(deep=True)
605
- tech._old_capex_capacity_existing = (
606
- tech.capex_capacity_existing.copy(deep=True)
607
- )
608
- tech._old_lifetime_existing = tech.lifetime_existing.copy(deep=True)
609
- tech._old_set_technologies_existing = tech.set_technologies_existing
610
- if hasattr(tech, 'capex_capacity_existing_energy'):
611
- tech._old_capex_capacity_existing_energy = (
612
- tech.capex_capacity_existing_energy.copy(deep=True)
613
- )
614
- if hasattr(tech, 'capacity_existing_energy'):
615
- tech._old_capacity_existing_energy = (
616
- tech.capacity_existing_energy.copy(deep=True)
617
- )
618
-
619
- self._old_capacity_addition = self.model.solution["capacity_addition"].to_series().dropna()
620
- self._old_invest_capacity = self.model.solution["capacity_investment"].to_series().dropna()
621
- self._old_cost_capex_overnight = self.model.solution["cost_capex_overnight"].to_series().dropna()
540
+ This function takes the capacity additions and carbon emissions of the
541
+ current optimization step and adds them to the existing capacity and
542
+ existing emissions of the next optimization step. Values from the
543
+ currently simulated year are added as existing capacities and
544
+ emissions for future steps.
622
545
 
546
+ Args:
547
+ step_horizon (int): The year index of the current optimization step.
548
+ In myopic foresight, capacities and emissions from this step are
549
+ added to existing capacities and emissions.
623
550
 
551
+ Returns:
552
+ None
624
553
 
625
- def reset_existing_capacity_to_previous_step(self):
626
554
  """
627
- Resets existing capacities to saved values
555
+ decision_horizon = self.get_decision_horizon(step_horizon)
556
+ # add newly capacity_addition of first year to existing capacity
557
+ self.add_new_capacity_addition(decision_horizon)
558
+ # add cumulative carbon emissions to previous carbon emissions
559
+ self.add_carbon_emission_cumulative(decision_horizon)
628
560
 
629
- This function resets capacity-related input parameters to
630
- previously saved values. The following parameters are reset:
631
- capacity_existing, lifetime_existing, capex_capacity_existing,
632
- capacity_existing_energy, and capex_capacity_existing_energy. The values
633
- are taken from the technology attributes "_old_<parameter_name>", as
634
- saved by :meth:`OptimizationSetup.save_current_existing_capacity`.
635
-
636
- :returns: None
561
+ def add_new_capacity_addition(self, decision_horizon):
637
562
  """
638
- for tech in self.get_all_elements(Technology):
639
- # new capacity
640
- tech.capacity_existing = tech._old_capacity_existing
641
- tech.capex_capacity_existing = tech._old_capex_capacity_existing
642
- tech.lifetime_existing = tech._old_capex_capacity_existing
643
- tech.set_technologies_existing = tech._old_set_technologies_existing
644
- if hasattr(tech, '_old_capex_capacity_existing_energy'):
645
- tech.capex_capacity_existing_energy = (
646
- tech._old_capex_capacity_existing_energy
647
- )
648
- if hasattr(tech, '_old_capacity_existing_energy'):
649
- tech.capacity_existing_energy = (
650
- tech._old_capacity_existing_energy
651
- )
652
-
653
- def add_new_capacity_addition(self,
654
- decision_horizon,
655
- capacity_addition = None,
656
- invest_capacity = None,
657
- cost_capex_overnight = None):
658
- """ Adds the newly built capacity to the existing capacity
563
+ Adds the newly built capacity to the existing capacity.
659
564
 
660
565
  This function adds installed capacities from the current optimization
661
- step to existing capacities in the model. It also adds
662
- costs from the installed capacities to existing capacity investment.
663
- Capacity values whose magnitude is below that specified by the solver
664
- setting "rounding_decimal_points_capacity" are set to zero.
665
-
666
- :param decision_horizon: list of the years for to transfer installed
667
- capacities to existing capacities.
668
- :type decision_horizon: list or int
566
+ step to existing capacities in the model. It also adds costs from the
567
+ installed capacities to existing capacity investment. Capacity values whose
568
+ magnitude is below that specified by the solver setting
569
+ "rounding_decimal_points_capacity" are set to zero.
669
570
 
670
- :param capacity_addition: dataframe of capacity additions to add to
671
- existing capacities (optional). If blank, capacity additions are
672
- taken from the current modeling results.
673
- :type capacity_addition: pandas.DataFrame
571
+ Args:
572
+ decision_horizon (list or int): A list of the years to transfer installed
573
+ capacities to existing capacities.
674
574
 
675
- :param invest_capacity: dataframe of capacity investments to add to
676
- existing investments (optional). If blank, capacity investments are
677
- taken from the current modeling results.
678
- :type invest_capacity: pandas.DataFrame
679
-
680
- :param cost_capex_overnight: dataframe of overnight capital costs to
681
- add to existing investments (optional). If blank, capital costs are
682
- taken from the current modeling results.
683
- :type cost_capex_overnight: pandas.DataFrame
684
-
685
- :returns: None
575
+ Returns:
576
+ None
686
577
 
687
578
  """
688
- if capacity_addition is None:
689
- capacity_addition = self.model.solution["capacity_addition"].to_series().dropna()
690
- if invest_capacity is None:
691
- invest_capacity = self.model.solution["capacity_investment"].to_series().dropna()
692
- if cost_capex_overnight is None:
693
- cost_capex_overnight = self.model.solution["cost_capex_overnight"].to_series().dropna()
579
+ capacity_addition = self.model.solution["capacity_addition"].to_series().dropna()
580
+ invest_capacity = self.model.solution["capacity_investment"].to_series().dropna()
581
+ cost_capex_overnight = self.model.solution["cost_capex_overnight"].to_series().dropna()
694
582
 
695
583
  if self.solver.round_parameters:
696
584
  rounding_value = 10 ** (-self.solver.rounding_decimal_points_capacity)
@@ -709,17 +597,19 @@ class OptimizationSetup(object):
709
597
  tech.add_new_capacity_investment(capacity_investment, decision_horizon)
710
598
 
711
599
  def add_carbon_emission_cumulative(self, decision_horizon):
712
- """ Add current emissions to existing emissions.
600
+ """
601
+ Adds current emissions to existing emissions.
713
602
 
714
603
  This function adds carbon emissions from the current optimization
715
604
  step to the existing carbon emissions.
716
605
 
717
- :param decision_horizon: list of the years for to transfer installed
718
- capacities to existing capacities.
719
- :type decision_horizon: list or int
606
+ Args:
607
+ decision_horizon (list or int): A list of the years to transfer
608
+ emissions to existing emissions.
720
609
 
721
- :returns: None
722
-
610
+ Returns:
611
+ None
612
+
723
613
  """
724
614
  interval_between_years = self.energy_system.system.interval_between_years
725
615
  last_year = decision_horizon[-1]
@@ -793,30 +683,4 @@ class OptimizationSetup(object):
793
683
  component_data.index = _custom_index
794
684
  return component_data
795
685
  except KeyError:
796
- raise KeyError(f"the custom set {custom_set} cannot be used as a subindex of {component_data.index}")
797
-
798
- def set_phase_configurations(self, phase):
799
- """ Sets proper configurations for operation-only problems.
800
-
801
- This function sets proper configurations for the current phase
802
- (capacity planning vs. operation only) of the model.
803
-
804
- :param phase: current phase of the optimization. Must be either
805
- `investment` (for capacity planning) or `operations` for
806
- operations-only.
807
- :type phase: str
808
-
809
- :returns: None
810
- """
811
-
812
- if phase == 'investment':
813
- logging.info(f"---- Optimizing investment ----")
814
- self.system.allow_investment = True
815
- self.operation_only_phase = False
816
-
817
- elif phase == 'operation':
818
- logging.info(f"---- Optimizing operation only ----")
819
- self.system.allow_investment = False
820
- self.operation_only_phase = True
821
- else:
822
- raise ValueError(f"Unrecognized phase: {phase}")
686
+ raise KeyError(f"the custom set {custom_set} cannot be used as a subindex of {component_data.index}")
@@ -295,13 +295,8 @@ class Postprocess:
295
295
 
296
296
  # skip variables not selected to be saved
297
297
  if (
298
- not self.optimization_setup.operation_only_phase
299
- and self.solver.selected_saved_variables
298
+ self.solver.selected_saved_variables
300
299
  and name not in self.solver.selected_saved_variables
301
- ) or (
302
- self.optimization_setup.operation_only_phase
303
- and self.solver.selected_saved_variables_operation
304
- and name not in self.solver.selected_saved_variables_operation
305
300
  ):
306
301
  continue
307
302
 
@@ -326,24 +321,14 @@ class Postprocess:
326
321
 
327
322
  units = self._unit_df(units,df.index)
328
323
 
329
- # rename for operations-only duals
330
- if self.optimization_setup.operation_only_phase:
331
- name = name + '_operation'
332
-
333
324
  # transform the dataframe to a json string and load it into the dictionary as dict
334
325
  data_frames[name] = self._transform_df(df,doc,units)
335
326
 
336
- # choose whether to write new file or append to existing file
337
- if self.optimization_setup.operation_only_phase:
338
- mode = 'a'
339
- else:
340
- mode = 'w'
341
-
342
327
  # write file
343
328
  self.write_file(
344
329
  self.name_dir.joinpath('var_dict'),
345
330
  data_frames,
346
- mode = mode
331
+ mode = 'w'
347
332
  )
348
333
 
349
334
  def save_duals(self):
@@ -361,13 +346,8 @@ class Postprocess:
361
346
 
362
347
  # skip variables not selected to be saved
363
348
  if (
364
- not self.optimization_setup.operation_only_phase
365
- and self.solver.selected_saved_duals
349
+ self.solver.selected_saved_duals
366
350
  and name not in self.solver.selected_saved_duals
367
- ) or (
368
- self.optimization_setup.operation_only_phase
369
- and self.solver.selected_saved_duals_operation
370
- and name not in self.solver.selected_saved_duals_operation
371
351
  ):
372
352
  continue
373
353
 
@@ -394,24 +374,14 @@ class Postprocess:
394
374
  if len(df.index.names) == len(index_list):
395
375
  df.index.names = index_list
396
376
 
397
- # rename for operations-only duals
398
- if self.optimization_setup.operation_only_phase:
399
- name = name + '_operation'
400
-
401
377
  # we transform the dataframe to a json string and load it into the dictionary as dict
402
378
  data_frames[name] = self._transform_df(df,doc)
403
379
 
404
- # choose whether to write new file or append to existing file
405
- if self.optimization_setup.operation_only_phase:
406
- mode = 'a'
407
- else:
408
- mode = 'w'
409
-
410
380
  # write file
411
381
  self.write_file(
412
382
  self.name_dir.joinpath('dual_dict'),
413
383
  data_frames,
414
- mode = mode
384
+ mode = 'w'
415
385
  )
416
386
 
417
387
  def save_system(self):
@@ -178,6 +178,10 @@ class Scenario():
178
178
  return {}
179
179
 
180
180
  def _read_ureg(self,default_ureg) -> pint.UnitRegistry:
181
+
182
+ # suppress pint output about redefining units
183
+ logging.getLogger('pint').setLevel(logging.ERROR)
184
+ # load ureg
181
185
  ureg = copy.copy(default_ureg)
182
186
  unit_path = os.path.join(self.path, "unit_definitions.txt")
183
187
  if os.path.exists(unit_path):
@@ -379,6 +383,9 @@ class SolutionLoader():
379
383
  series_to_concat.append(current_mf)
380
384
  break
381
385
 
386
+ if len(series_to_concat) == 0:
387
+ return pd.Series(dtype=float)
388
+
382
389
  return pd.concat(series_to_concat)
383
390
 
384
391
  def _concatenate_raw_dataseries(