zen-garden 2.8.12__py3-none-any.whl → 2.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
zen_garden/runner.py CHANGED
@@ -111,40 +111,34 @@ def run(config = "./config.json", dataset=None, job_index=None,
111
111
  steps_horizon = optimization_setup.get_optimization_horizon()
112
112
  # iterate through horizon steps
113
113
  for step in steps_horizon:
114
- # iterate through phases
115
- for phase in ['investment', 'operation']:
116
- #if operation phase, exclude capacity expansion
117
- if phase == 'operation' and not config.system.include_operation_only_phase:
118
- continue
119
- StringUtils.print_optimization_progress(scenario, steps_horizon, step, system=config.system)
120
- if optimization_setup.system.include_operation_only_phase:
121
- optimization_setup.set_phase_configurations(phase)
122
- # overwrite time indices
123
- optimization_setup.overwrite_time_indices(step)
124
- # create optimization problem
125
- optimization_setup.construct_optimization_problem()
126
- if optimization_setup.solver.use_scaling:
127
- optimization_setup.scaling.run_scaling()
128
- elif optimization_setup.solver.analyze_numerics or optimization_setup.solver.run_diagnostics:
129
- optimization_setup.scaling.analyze_numerics()
130
- # SOLVE THE OPTIMIZATION PROBLEM
131
- optimization_setup.solve()
132
- # break if infeasible
133
- if not optimization_setup.optimality:
134
- # write IIS
135
- optimization_setup.write_IIS(scenario)
136
- logging.warning(f"Optimization: {optimization_setup.model.termination_condition}")
137
- break
138
- if optimization_setup.solver.use_scaling:
139
- optimization_setup.scaling.re_scale()
140
- # save new capacity additions and cumulative carbon emissions for next time step
114
+ StringUtils.print_optimization_progress(scenario, steps_horizon, step, system=config.system)
115
+ # overwrite time indices
116
+ optimization_setup.overwrite_time_indices(step)
117
+ # create optimization problem
118
+ optimization_setup.construct_optimization_problem()
119
+ if optimization_setup.solver.use_scaling:
120
+ optimization_setup.scaling.run_scaling()
121
+ elif optimization_setup.solver.analyze_numerics or optimization_setup.solver.run_diagnostics:
122
+ optimization_setup.scaling.analyze_numerics()
123
+ # SOLVE THE OPTIMIZATION PROBLEM
124
+ optimization_setup.solve()
125
+ # break if infeasible
126
+ if not optimization_setup.optimality:
127
+ # write IIS
128
+ optimization_setup.write_IIS(scenario)
129
+ logging.warning(f"Optimization: {optimization_setup.model.termination_condition}")
130
+ break
131
+ if optimization_setup.solver.use_scaling:
132
+ optimization_setup.scaling.re_scale()
133
+ # save new capacity additions and cumulative carbon emissions for next time step
134
+ if optimization_setup.system.use_rolling_horizon:
141
135
  optimization_setup.add_results_of_optimization_step(step)
142
- # EVALUATE RESULTS
143
- # create scenario name, subfolder and param_map for postprocessing
144
- scenario_name, subfolder, param_map = StringUtils.generate_folder_path(
145
- config=config, scenario=scenario, scenario_dict=scenario_dict, steps_horizon=steps_horizon, step=step)
146
- # write results
147
- Postprocess(optimization_setup, scenarios=config.scenarios, subfolder=subfolder,
148
- model_name=model_name, scenario_name=scenario_name, param_map=param_map)
136
+ # EVALUATE RESULTS
137
+ # create scenario name, subfolder and param_map for postprocessing
138
+ scenario_name, subfolder, param_map = StringUtils.generate_folder_path(
139
+ config=config, scenario=scenario, scenario_dict=scenario_dict, steps_horizon=steps_horizon, step=step)
140
+ # write results
141
+ Postprocess(optimization_setup, scenarios=config.scenarios, subfolder=subfolder,
142
+ model_name=model_name, scenario_name=scenario_name, param_map=param_map)
149
143
  logging.info("--- Optimization finished ---")
150
144
  return optimization_setup
File without changes
@@ -0,0 +1,248 @@
1
+ from pathlib import Path
2
+ from typing import Iterable, List, Optional, Sequence
3
+
4
+ import shutil
5
+ import logging
6
+
7
+ from zen_garden import run, Results
8
+ from zen_garden.wrapper import utils
9
+
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ def validate_inputs(
15
+ dataset: Path | str,
16
+ folder_output: Path | str | None,
17
+ job_index: Iterable[int] | None
18
+ ) -> tuple[Path, Path, List[int] | None]:
19
+ """Validate and normalize user-provided inputs.
20
+
21
+ This function performs validates the inputs to ensure downstream
22
+ processing can rely on consistent types and assumptions. It verifies
23
+ that the dataset path exists and that the optional job index contains
24
+ only integers. The job index is normalized into a list when provided.
25
+
26
+ Args:
27
+ dataset (Path | str): Path to the dataset directory used for the original
28
+ capacity-expansion run. The path must exist.
29
+ job_index (List[int] | None): List of scenario indices to process. If
30
+ None, all available scenarios will be processed.
31
+
32
+ Returns:
33
+ Tuple
34
+ The validated dataset path and a list of scenario indices.
35
+
36
+ Raises:
37
+ FileNotFoundError: If the dataset path does not exist.
38
+ TypeError: If job_index is provided and contains non-integer values.
39
+ """
40
+ dataset = Path(dataset)
41
+
42
+ if not dataset.exists():
43
+ raise FileNotFoundError(f"Dataset not found: {dataset}")
44
+
45
+ if folder_output is None:
46
+ folder_output = "./outputs/"
47
+ folder_output = Path(folder_output)
48
+ if not (folder_output / dataset.name).exists():
49
+ raise FileNotFoundError(f"Results for dataset {dataset} do not exist"
50
+ f" in the folder {folder_output}.")
51
+
52
+ if job_index is None:
53
+ job_index_list = None
54
+ else:
55
+ job_index_list = list(job_index)
56
+ if not all(isinstance(i, int) for i in job_index_list):
57
+ raise TypeError("job_index must be an iterable of integers")
58
+
59
+ return dataset, folder_output, job_index_list
60
+
61
+
62
+ def load_scenarios(
63
+ results_path: Path,
64
+ job_index: List[int] | None,
65
+ ) -> List[str]:
66
+ """Load scenario names from simulation results.
67
+
68
+ This function inspects the results of a previous capacity-expansion run
69
+ and extracts the scenario names. When a job index is provided, only the
70
+ scenarios corresponding to those indices are returned.
71
+
72
+ Args:
73
+ results_path: Path to the directory containing simulation results.
74
+ job_index: List of indices identifying which scenarios to load.
75
+ If empty or None, all scenarios are returned.
76
+
77
+ Returns:
78
+ List[str]:
79
+ A list of scenario names corresponding to the selected indices.
80
+
81
+ Raises:
82
+ ValueError: If no scenarios are found in the results directory.
83
+ IndexError: If job_index contains indices that are out of range.
84
+ """
85
+ results = Results(results_path)
86
+ scenarios = list(results.solution_loader.scenarios.keys())
87
+
88
+ if not scenarios:
89
+ raise ValueError("No scenarios found in simulation results")
90
+
91
+ if job_index:
92
+ scenarios = [scenarios[i] for i in job_index]
93
+
94
+ return scenarios
95
+
96
+
97
+ def prepare_operational_dataset(
98
+ dataset: Path,
99
+ dataset_op: Path,
100
+ folder_output: Path,
101
+ scenario: str,
102
+ scenarios_op,
103
+ ) -> None:
104
+ """Create and configure an operational-only dataset for a scenario.
105
+
106
+ This function derives an operational dataset from the original
107
+ capacity-expansion dataset by copying the base dataset, adding
108
+ capacity expansion results as existing capacities, and disabling further
109
+ investment decisions.
110
+
111
+ Args:
112
+ dataset (Path): Path to the original capacity-expansion dataset.
113
+ dataset_op (Path): Destination path for the generated operational dataset.
114
+ folder_output (Path): Path to the directory containing capacity-expansion
115
+ results.
116
+ scenario (str): Name of the scenario whose results should be used in the
117
+ operational scenarios.
118
+ scenarios_op (str): Name of the file containing scenario configurations
119
+ for operational analysis. If provided, scenario analysis is enabled
120
+ in the operational simulations.
121
+
122
+ Side Effects:
123
+ - Creates files and directories under `dataset_op`.
124
+ """
125
+ logger.info("Preparing operational dataset: %s", dataset_op)
126
+
127
+ utils.copy_dataset(
128
+ dataset,
129
+ dataset_op,
130
+ scenarios=scenarios_op,
131
+ )
132
+
133
+ utils.capacity_addition_2_existing_capacity(
134
+ folder_output,
135
+ dataset,
136
+ dataset_op,
137
+ scenario
138
+ )
139
+
140
+ utils.modify_json(
141
+ dataset_op / "system.json",
142
+ {
143
+ "allow_investment": False,
144
+ "conduct_scenario_analysis": scenarios_op is not None,
145
+ },
146
+ )
147
+
148
+
149
+ def run_operational_simulation(
150
+ dataset_op: Path,
151
+ config: Path,
152
+ folder_output: Path,
153
+ ) -> None:
154
+ """Run an operational-only simulation.
155
+
156
+ Executes the Zen Garden simulation using a dataset that has been
157
+ prepared specifically for operational analysis (i.e., investment
158
+ decisions are disabled).
159
+
160
+ Args:
161
+ dataset_op (Path): Path to the operational dataset.
162
+ config (Path): Path to the simulation configuration file.
163
+ folder_output (Path): Directory where simulation outputs will be written.
164
+ """
165
+ logger.info("Running operational simulation for %s", dataset_op.name)
166
+ run(dataset=dataset_op, config=config, folder_output=folder_output)
167
+
168
+
169
+ def cleanup_dataset(dataset_op: Path, delete_data: bool) -> None:
170
+ """Remove a generated operational dataset directory if requested.
171
+
172
+ This helper function provides controlled cleanup of intermediate
173
+ datasets created during operational runs.
174
+
175
+ Args:
176
+ dataset_op (Path): Path to the created operational dataset.
177
+ delete_data (bool): If True, the dataset directory and all of its contents
178
+ are permanently deleted.
179
+ """
180
+ if delete_data:
181
+ logger.info("Deleting dataset: %s", dataset_op)
182
+ shutil.rmtree(dataset_op)
183
+
184
+
185
+ def operation_scenarios(
186
+ dataset: Path | str,
187
+ config: Path | str = Path("./config.json"),
188
+ folder_output: Path | str = Path("./outputs"),
189
+ job_index: Optional[Iterable[int]] = None,
190
+ scenarios_op: str | None = None,
191
+ delete_data: bool = False
192
+ ) -> None:
193
+ """
194
+ Run operational-only simulations derived from expansion results.
195
+
196
+ This is the main orchestration function for running operational
197
+ scenarios. For each selected scenario, it validates inputs, prepares
198
+ an operational dataset, executes the operation simulation, and optionally
199
+ cleans up intermediate data.
200
+
201
+ Args:
202
+ dataset (str | Path): Path to the original dataset used for
203
+ capacity-expansion runs.
204
+ config (str | Path): Path to the simulation configuration file.
205
+ folder_output (str | Path): Directory containing simulation outputs of
206
+ the capacity-planning problem. New operation results will also be
207
+ saved in this directory. Defaults to "./outputs/".
208
+ job_index (list[int]): Optional iterable of scenario indices in the
209
+ capacity-planning problem to run. Only these scenarios will be
210
+ used in the operation-only simulations. If None, all scenarios are
211
+ processed.
212
+ scenarios_op (str): Name of the scenario configuration for operational
213
+ analysis.
214
+ delete_data: If True, generated operational datasets are deleted
215
+ after use.
216
+
217
+ Side Effects:
218
+ - Creates and optionally deletes dataset directories.
219
+ - Executes simulation runs and writes output files to disk.
220
+ - Emits log messages during execution.
221
+ """
222
+ dataset, folder_output, job_index_list = validate_inputs(
223
+ dataset, folder_output, job_index)
224
+
225
+ dataset_path = dataset.parent
226
+ dataset_name = dataset.name
227
+ results_path = folder_output / dataset_name
228
+
229
+ scenarios = load_scenarios(results_path, job_index_list)
230
+
231
+ for scenario in scenarios:
232
+ dataset_op = dataset_path / f"{dataset_name}_{scenario}__operation"
233
+
234
+ prepare_operational_dataset(
235
+ dataset=dataset,
236
+ dataset_op=dataset_op,
237
+ folder_output=results_path,
238
+ scenario=scenario,
239
+ scenarios_op=scenarios_op,
240
+ )
241
+
242
+ run_operational_simulation(
243
+ dataset_op=dataset_op,
244
+ config=config,
245
+ folder_output=folder_output,
246
+ )
247
+
248
+ cleanup_dataset(dataset_op, delete_data)