zen-garden 2.8.13__py3-none-any.whl → 2.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zen_garden/__init__.py +1 -0
- zen_garden/cli/zen_garden_cli.py +21 -13
- zen_garden/cli/zen_operation_cli.py +89 -0
- zen_garden/default_config.py +0 -3
- zen_garden/optimization_setup.py +40 -176
- zen_garden/postprocess/postprocess.py +4 -34
- zen_garden/postprocess/results/solution_loader.py +4 -0
- zen_garden/preprocess/unit_handling.py +393 -100
- zen_garden/runner.py +28 -34
- zen_garden/wrapper/__init__.py +0 -0
- zen_garden/wrapper/operation_scenarios.py +248 -0
- zen_garden/wrapper/utils.py +438 -0
- {zen_garden-2.8.13.dist-info → zen_garden-2.9.0.dist-info}/METADATA +2 -2
- {zen_garden-2.8.13.dist-info → zen_garden-2.9.0.dist-info}/RECORD +17 -13
- {zen_garden-2.8.13.dist-info → zen_garden-2.9.0.dist-info}/entry_points.txt +1 -0
- {zen_garden-2.8.13.dist-info → zen_garden-2.9.0.dist-info}/WHEEL +0 -0
- {zen_garden-2.8.13.dist-info → zen_garden-2.9.0.dist-info}/licenses/LICENSE.txt +0 -0
|
@@ -0,0 +1,438 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import shutil
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
import pandas as pd
|
|
5
|
+
import json
|
|
6
|
+
import numpy as np
|
|
7
|
+
from zen_garden.postprocess.results.results import Results
|
|
8
|
+
from zen_garden.preprocess.unit_handling import UnitHandling
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def ensure_dir_exists(path: Path):
|
|
12
|
+
"""
|
|
13
|
+
Ensure that a directory exists. If it doesn't, create it.
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
path (Path): The directory path to check and create.
|
|
17
|
+
"""
|
|
18
|
+
if not path.exists():
|
|
19
|
+
path.mkdir(parents=True)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def copy_file(src: Path, dest: Path):
|
|
23
|
+
"""
|
|
24
|
+
Copy a single file from the source to the destination.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
src (Path): The source file path.
|
|
28
|
+
dest (Path): The destination file path.
|
|
29
|
+
"""
|
|
30
|
+
if not src.exists():
|
|
31
|
+
raise FileNotFoundError(f"Source file {src} not found.")
|
|
32
|
+
shutil.copy(src, dest)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def copy_dir(src: Path, dest: Path):
|
|
36
|
+
"""
|
|
37
|
+
Copy an entire directory from the source to the destination.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
src (Path): The source directory path.
|
|
41
|
+
dest (Path): The destination directory path.
|
|
42
|
+
"""
|
|
43
|
+
if not src.exists():
|
|
44
|
+
raise FileNotFoundError(f"Source directory {src} not found.")
|
|
45
|
+
shutil.copytree(src, dest)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def remove_existing_dir(dest: Path):
|
|
49
|
+
"""
|
|
50
|
+
Delete directory and all subdirectories if they exist.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
dest (Path): Directory to be deleted.
|
|
54
|
+
"""
|
|
55
|
+
# create new dataset for operational scenarios
|
|
56
|
+
if os.path.exists(dest):
|
|
57
|
+
shutil.rmtree(dest)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def copy_dataset(old_dataset: Path, new_dataset: Path, scenarios=None):
|
|
61
|
+
"""
|
|
62
|
+
Copy the entire dataset from the old directory to a new directory.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
old_dataset (Path): The path to the old dataset.
|
|
66
|
+
new_dataset (Path): The path to the new dataset.
|
|
67
|
+
scenarios (str, optional): A specific scenario file to copy. Defaults
|
|
68
|
+
to None.
|
|
69
|
+
"""
|
|
70
|
+
remove_existing_dir(new_dataset)
|
|
71
|
+
ensure_dir_exists(new_dataset)
|
|
72
|
+
copy_dir(old_dataset / "energy_system", new_dataset / "energy_system")
|
|
73
|
+
copy_dir(old_dataset / "set_carriers", new_dataset / "set_carriers")
|
|
74
|
+
copy_dir(old_dataset / "set_technologies",
|
|
75
|
+
new_dataset / "set_technologies")
|
|
76
|
+
copy_file(old_dataset / "system.json", new_dataset / "system.json")
|
|
77
|
+
|
|
78
|
+
if scenarios:
|
|
79
|
+
copy_file(old_dataset / scenarios, new_dataset / "scenarios.json")
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def load_results(out_dir: Path, scenario: str) -> dict:
|
|
83
|
+
"""
|
|
84
|
+
Load simulation results from the specified directory and scenario.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
out_dir (Path): Directory where the results are stored.
|
|
88
|
+
scenario (str): Name of the scenario to load results for.
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
dict: A dictionary containing various results data, such as capacity
|
|
92
|
+
addition, nodes, edges, and technologies.
|
|
93
|
+
"""
|
|
94
|
+
r = Results(path=out_dir)
|
|
95
|
+
|
|
96
|
+
assert 'capacity_addition' in r.get_component_names(
|
|
97
|
+
'variable'), "Results have no variable named capacity addition"
|
|
98
|
+
|
|
99
|
+
system = r.get_system()
|
|
100
|
+
solver = r.get_solver()
|
|
101
|
+
capacity_addition = r.get_total('capacity_addition',
|
|
102
|
+
scenario_name=scenario)
|
|
103
|
+
capacity_units = r.get_unit('capacity_addition', scenario_name=scenario)
|
|
104
|
+
|
|
105
|
+
# Get conversion technologies excluding retrofitting
|
|
106
|
+
set_conversion_not_retrofitting = list(
|
|
107
|
+
set(system.set_conversion_technologies) -
|
|
108
|
+
set(system.set_retrofitting_technologies))
|
|
109
|
+
|
|
110
|
+
# Get edges from results
|
|
111
|
+
edges = r.get_total('set_nodes_on_edges',
|
|
112
|
+
scenario_name=scenario).index.values
|
|
113
|
+
|
|
114
|
+
# Reformat the results
|
|
115
|
+
capacity_addition.columns.name = "year"
|
|
116
|
+
capacity_addition = capacity_addition.stack().unstack("capacity_type")
|
|
117
|
+
|
|
118
|
+
return {
|
|
119
|
+
"capacity_addition": capacity_addition,
|
|
120
|
+
"capacity_units": capacity_units,
|
|
121
|
+
"system": system,
|
|
122
|
+
"solver": solver,
|
|
123
|
+
"nodes": system.set_nodes,
|
|
124
|
+
"edges": edges,
|
|
125
|
+
"technologies": {
|
|
126
|
+
"set_conversion_technologies": set_conversion_not_retrofitting,
|
|
127
|
+
"set_transport_technologies": system.set_transport_technologies,
|
|
128
|
+
"set_storage_technologies": system.set_storage_technologies,
|
|
129
|
+
"set_retrofitting_technologies":
|
|
130
|
+
system.set_retrofitting_technologies
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def get_element_location(element_name: str, raw_results: dict):
|
|
136
|
+
"""
|
|
137
|
+
Get the location (nodes or edges) and the corresponding name for a given
|
|
138
|
+
element.
|
|
139
|
+
|
|
140
|
+
Args:
|
|
141
|
+
element_name (str): The name of the technology set (e.g.,
|
|
142
|
+
'set_transport_technologies').
|
|
143
|
+
raw_results (dict): The dictionary containing results data.
|
|
144
|
+
|
|
145
|
+
Returns:
|
|
146
|
+
tuple: A tuple containing the location (nodes or edges) and the
|
|
147
|
+
location name.
|
|
148
|
+
"""
|
|
149
|
+
if element_name == "set_transport_technologies":
|
|
150
|
+
location = raw_results["edges"]
|
|
151
|
+
location_name = "edge"
|
|
152
|
+
else:
|
|
153
|
+
location = raw_results["nodes"]
|
|
154
|
+
location_name = "node"
|
|
155
|
+
return location, location_name
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def get_element_folder(dataset_op: Path, element_name: str, tech: str) -> Path:
|
|
159
|
+
"""
|
|
160
|
+
Get the folder path for a specific technology within a given element.
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
dataset_op (Path): The dataset output directory.
|
|
164
|
+
element_name (str): The name of the technology set (e.g.,
|
|
165
|
+
'set_conversion_technologies').
|
|
166
|
+
tech (str): The name of the technology.
|
|
167
|
+
|
|
168
|
+
Returns:
|
|
169
|
+
Path: The path to the technology folder.
|
|
170
|
+
"""
|
|
171
|
+
if element_name == "set_retrofitting_technologies":
|
|
172
|
+
tech_folder_op = dataset_op / "set_technologies" / \
|
|
173
|
+
"set_conversion_technologies" / element_name / tech
|
|
174
|
+
else:
|
|
175
|
+
tech_folder_op = dataset_op / "set_technologies" / element_name / tech
|
|
176
|
+
return tech_folder_op
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
def format_capacity_addition(capacity_addition_tech: pd.DataFrame,
|
|
180
|
+
capacity_type: str,
|
|
181
|
+
suffix: str,
|
|
182
|
+
location_name: str) -> pd.DataFrame:
|
|
183
|
+
"""
|
|
184
|
+
Format the capacity addition DataFrame for consistency in column names.
|
|
185
|
+
|
|
186
|
+
Args:
|
|
187
|
+
capacity_addition_tech (pd.DataFrame): The DataFrame with capacity
|
|
188
|
+
addition data.
|
|
189
|
+
capacity_type (str): The type of capacity (e.g., 'power', 'energy').
|
|
190
|
+
suffix (str): File suffix corresponding to capacity_type.
|
|
191
|
+
location_name (str): The name of the location (either 'node' or 'edge').
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
pd.DataFrame: The formatted DataFrame with the correct column names.
|
|
195
|
+
"""
|
|
196
|
+
return capacity_addition_tech.rename(
|
|
197
|
+
columns={
|
|
198
|
+
"location": location_name,
|
|
199
|
+
capacity_type: f"capacity_existing{suffix}",
|
|
200
|
+
"year": "year_construction"
|
|
201
|
+
})
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
def aggregate_capacity(capacity_existing: pd.DataFrame,
|
|
205
|
+
location_name: str) -> pd.DataFrame:
|
|
206
|
+
"""
|
|
207
|
+
Aggregate capacity data by grouping it by location and year of construction.
|
|
208
|
+
|
|
209
|
+
Args:
|
|
210
|
+
capacity_existing (pd.DataFrame): The DataFrame with existing capacity
|
|
211
|
+
data.
|
|
212
|
+
location_name (str): The name of the location (either 'node' or 'edge').
|
|
213
|
+
|
|
214
|
+
Returns:
|
|
215
|
+
pd.DataFrame: The aggregated DataFrame with summed capacities.
|
|
216
|
+
"""
|
|
217
|
+
return capacity_existing.groupby([location_name, "year_construction"
|
|
218
|
+
]).sum().reset_index()
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
def save_capacity_existing(tech_folder_op: Path,
|
|
222
|
+
capacity_existing: pd.DataFrame, suffix: str):
|
|
223
|
+
"""
|
|
224
|
+
Save the aggregated capacity data to a CSV file.
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
tech_folder_op (Path): The path to the technology folder.
|
|
228
|
+
capacity_existing (pd.DataFrame): The aggregated capacity data.
|
|
229
|
+
suffix (str): The suffix to append to the file name (e.g., '_energy').
|
|
230
|
+
"""
|
|
231
|
+
capacity_existing.to_csv(tech_folder_op / f"capacity_existing{suffix}.csv",
|
|
232
|
+
mode='w',
|
|
233
|
+
header=True,
|
|
234
|
+
index=False)
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
def convert_to_original_units(capacity_addition_tech, capacity_units, capacity_type, unit_handling, tech, tech_folder_op, suffix):
|
|
238
|
+
"""
|
|
239
|
+
Convert the capacity addition to the original units of the existing
|
|
240
|
+
capacity.
|
|
241
|
+
|
|
242
|
+
Args:
|
|
243
|
+
capacity_addition_tech (pd.DataFrame): The DataFrame containing capacity
|
|
244
|
+
addition data.
|
|
245
|
+
capacity_units (pd.DataFrame): DataFrame containing unit information
|
|
246
|
+
for capacities.
|
|
247
|
+
capacity_type (str): The type of capacity ('power' or 'energy').
|
|
248
|
+
unit_handling (UnitHandling): The unit handling object for unit
|
|
249
|
+
conversions.
|
|
250
|
+
tech (str): The technology name.
|
|
251
|
+
tech_folder_op (Path): The path to the technology folder.
|
|
252
|
+
suffix (str): Suffix to differentiate between 'power' and 'energy'.
|
|
253
|
+
|
|
254
|
+
Returns:
|
|
255
|
+
pd.DataFrame: The converted capacity addition data in the correct units.
|
|
256
|
+
"""
|
|
257
|
+
# Get units for capacity addition
|
|
258
|
+
capacity_addition_unit = capacity_units.loc[(tech, capacity_type)]
|
|
259
|
+
|
|
260
|
+
# Ensure capacity addition is in base units
|
|
261
|
+
if not np.isclose(unit_handling.get_unit_multiplier(capacity_addition_unit, tech), 1):
|
|
262
|
+
raise AssertionError("Model output is not in base units")
|
|
263
|
+
|
|
264
|
+
# Get capacity_existing units from attributes file
|
|
265
|
+
fp_attributes = tech_folder_op / "attributes.json"
|
|
266
|
+
with open(fp_attributes, 'r') as f:
|
|
267
|
+
attributes = json.load(f)
|
|
268
|
+
capacity_existing_unit = attributes[f'capacity_existing{suffix}']['unit']
|
|
269
|
+
|
|
270
|
+
# Convert capacity addition to units of capacity_existing
|
|
271
|
+
unit_multiplier = unit_handling.get_unit_multiplier(
|
|
272
|
+
capacity_existing_unit, tech)
|
|
273
|
+
capacity_addition_tech[f"capacity_existing{suffix}"] = capacity_addition_tech[
|
|
274
|
+
f"capacity_existing{suffix}"] / unit_multiplier
|
|
275
|
+
|
|
276
|
+
# Print output if necessary
|
|
277
|
+
if not np.isclose(unit_multiplier, 1):
|
|
278
|
+
print(
|
|
279
|
+
f"Multiplying capacity addition (unit:{capacity_addition_unit}) "
|
|
280
|
+
f"by a scale factor of {1/unit_multiplier} to convert to units "
|
|
281
|
+
f"{capacity_existing_unit}"
|
|
282
|
+
)
|
|
283
|
+
|
|
284
|
+
return capacity_addition_tech
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
def round_capacity(results: dict, rounding_decimal_points: int, has_energy: bool) -> dict:
|
|
288
|
+
"""
|
|
289
|
+
Round the capacities in the results to remove values below a certain
|
|
290
|
+
threshold.
|
|
291
|
+
|
|
292
|
+
Args:
|
|
293
|
+
results (dict): The dictionary containing results data.
|
|
294
|
+
rounding_decimal_points (int): Number of decimal points after which to
|
|
295
|
+
round capacity values to zero. For example, if
|
|
296
|
+
``rounding_decimal_points=6``, then all capacities below 10^-6 are
|
|
297
|
+
rounded to zero.
|
|
298
|
+
has_energy (bool): Boolean whether the capacity addition has energy
|
|
299
|
+
column
|
|
300
|
+
Returns:
|
|
301
|
+
dict: The updated results dictionary with rounded capacity values.
|
|
302
|
+
"""
|
|
303
|
+
capacity_addition = results["capacity_addition"]
|
|
304
|
+
rounding_value = 10**(-rounding_decimal_points)
|
|
305
|
+
idx_keep = capacity_addition["power"] > rounding_value
|
|
306
|
+
|
|
307
|
+
if has_energy:
|
|
308
|
+
idx_keep_energy = (
|
|
309
|
+
capacity_addition["energy"] > rounding_value) | capacity_addition["energy"].isna()
|
|
310
|
+
idx_keep = idx_keep | idx_keep_energy
|
|
311
|
+
|
|
312
|
+
results["capacity_addition"] = capacity_addition.loc[idx_keep, :]
|
|
313
|
+
|
|
314
|
+
return results
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
def add_capacity_additions(dataset_op: Path, results: dict, element_name: str,
|
|
318
|
+
capacity_type: str, unit_handling):
|
|
319
|
+
"""
|
|
320
|
+
Transfer capacity additions from the results to the dataset for a given
|
|
321
|
+
element and capacity type.
|
|
322
|
+
|
|
323
|
+
Args:
|
|
324
|
+
dataset_op (Path): The output directory of the dataset.
|
|
325
|
+
results (dict): The raw simulation results.
|
|
326
|
+
element_name (str): The name of the technology set (e.g.,
|
|
327
|
+
'set_conversion_technologies').
|
|
328
|
+
capacity_type (str): The type of capacity ('power' or 'energy').
|
|
329
|
+
unit_handling (UnitHandling): The unit handling object for unit
|
|
330
|
+
conversions.
|
|
331
|
+
"""
|
|
332
|
+
print(f"Transferring capacity for {element_name}")
|
|
333
|
+
location, location_name = get_element_location(element_name, results)
|
|
334
|
+
elements = results["technologies"][element_name]
|
|
335
|
+
capacity_addition = results["capacity_addition"]
|
|
336
|
+
capacity_units = results["capacity_units"]
|
|
337
|
+
|
|
338
|
+
for tech in elements:
|
|
339
|
+
|
|
340
|
+
if tech not in capacity_addition.index.get_level_values(0):
|
|
341
|
+
continue
|
|
342
|
+
|
|
343
|
+
suffix = "" if capacity_type == "power" else "_energy"
|
|
344
|
+
tech_folder_op = get_element_folder(dataset_op, element_name, tech)
|
|
345
|
+
fp_capacity_existing = tech_folder_op / \
|
|
346
|
+
f"capacity_existing{suffix}.csv"
|
|
347
|
+
|
|
348
|
+
capacity_addition_tech = capacity_addition.loc[(
|
|
349
|
+
tech, capacity_type)].reset_index()
|
|
350
|
+
capacity_addition_tech = format_capacity_addition(
|
|
351
|
+
capacity_addition_tech, capacity_type, suffix, location_name)
|
|
352
|
+
|
|
353
|
+
capacity_addition_tech = convert_to_original_units(
|
|
354
|
+
capacity_addition_tech, capacity_units, capacity_type,
|
|
355
|
+
unit_handling, tech, tech_folder_op, suffix)
|
|
356
|
+
|
|
357
|
+
# Read or initialize the 'capacity_existing' CSV
|
|
358
|
+
if os.path.exists(fp_capacity_existing):
|
|
359
|
+
capacity_existing = pd.read_csv(fp_capacity_existing,
|
|
360
|
+
dtype={
|
|
361
|
+
location_name:
|
|
362
|
+
object,
|
|
363
|
+
"year_construction":
|
|
364
|
+
np.int64,
|
|
365
|
+
f"capacity_existing{suffix}":
|
|
366
|
+
np.float64
|
|
367
|
+
})
|
|
368
|
+
capacity_existing = pd.concat(
|
|
369
|
+
[capacity_existing,
|
|
370
|
+
capacity_addition_tech]).reset_index(drop=True)
|
|
371
|
+
else:
|
|
372
|
+
capacity_existing = capacity_addition_tech
|
|
373
|
+
|
|
374
|
+
# Aggregate capacity data
|
|
375
|
+
capacity_existing = aggregate_capacity(capacity_existing,
|
|
376
|
+
location_name)
|
|
377
|
+
|
|
378
|
+
# Save updated data
|
|
379
|
+
save_capacity_existing(tech_folder_op, capacity_existing, suffix)
|
|
380
|
+
|
|
381
|
+
|
|
382
|
+
def modify_json(file_path: Path, change_dict: dict):
|
|
383
|
+
"""
|
|
384
|
+
Modify a JSON file according to a change dictionary.
|
|
385
|
+
|
|
386
|
+
Args:
|
|
387
|
+
file_path (Path): Path to the JSON file.
|
|
388
|
+
change_dict (dict): Dictionary with attributes to change in the JSON
|
|
389
|
+
file.
|
|
390
|
+
"""
|
|
391
|
+
with open(file_path, 'r+') as f:
|
|
392
|
+
data = json.load(f)
|
|
393
|
+
data.update(change_dict) # Update dictionary with changes
|
|
394
|
+
f.seek(0) # Move cursor to the beginning of the file
|
|
395
|
+
json.dump(data, f, indent=4)
|
|
396
|
+
f.truncate() # Remove leftover pieces if old file was longer
|
|
397
|
+
|
|
398
|
+
|
|
399
|
+
def capacity_addition_2_existing_capacity(out_dir: Path,
|
|
400
|
+
dataset: Path,
|
|
401
|
+
dataset_op: Path,
|
|
402
|
+
scenario: str):
|
|
403
|
+
"""
|
|
404
|
+
Add capacity additions from the simulation results to the existing
|
|
405
|
+
capacity dataset.
|
|
406
|
+
|
|
407
|
+
Args:
|
|
408
|
+
out_dir (Path): Directory of simulation outputs.
|
|
409
|
+
dataset (Path): Original model dataset.
|
|
410
|
+
dataset_op (Path): New model dataset to which to add capacity additions
|
|
411
|
+
as existing capacities.
|
|
412
|
+
scenario (str): The scenario name to load.
|
|
413
|
+
rounding_value (int, optional): Threshold for rounding capacity
|
|
414
|
+
additions to zero. Defaults to None.
|
|
415
|
+
"""
|
|
416
|
+
# Load raw results
|
|
417
|
+
results = load_results(out_dir, scenario)
|
|
418
|
+
|
|
419
|
+
# Initialize unit handling class
|
|
420
|
+
unit_handling = UnitHandling(
|
|
421
|
+
dataset / "energy_system",
|
|
422
|
+
results["solver"].rounding_decimal_points_units)
|
|
423
|
+
|
|
424
|
+
has_energy = ("energy" in results["capacity_addition"].columns.values)
|
|
425
|
+
# Round capacities below tolerance to zero
|
|
426
|
+
results = round_capacity(results,
|
|
427
|
+
results["solver"].rounding_decimal_points_units,
|
|
428
|
+
has_energy
|
|
429
|
+
)
|
|
430
|
+
|
|
431
|
+
# Add power capacity additions for different technology sets
|
|
432
|
+
for element_name in results["technologies"].keys():
|
|
433
|
+
add_capacity_additions(dataset_op, results, element_name,
|
|
434
|
+
"power", unit_handling)
|
|
435
|
+
# add energy capacity additions if present
|
|
436
|
+
if has_energy:
|
|
437
|
+
add_capacity_additions(dataset_op, results, "set_storage_technologies",
|
|
438
|
+
"energy", unit_handling)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: zen_garden
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.9.0
|
|
4
4
|
Summary: ZEN-garden is an optimization model of energy systems and value chains.
|
|
5
5
|
Author: Alissa Ganter, Johannes Burger, Francesco De Marco, Lukas Kunz, Lukas Schmidt-Engelbertz, Christoph Funke, Paolo Gabrielli, Giovanni Sansavini
|
|
6
6
|
Author-email: Jacob Mannhardt <zen-garden@ethz.ch>
|
|
@@ -10,7 +10,7 @@ License-File: LICENSE.txt
|
|
|
10
10
|
Requires-Dist: xarray
|
|
11
11
|
Requires-Dist: cython
|
|
12
12
|
Requires-Dist: numpy
|
|
13
|
-
Requires-Dist: pandas
|
|
13
|
+
Requires-Dist: pandas<3.0.0
|
|
14
14
|
Requires-Dist: scipy
|
|
15
15
|
Requires-Dist: pint
|
|
16
16
|
Requires-Dist: tables
|
|
@@ -1,12 +1,13 @@
|
|
|
1
|
-
zen_garden/__init__.py,sha256=
|
|
1
|
+
zen_garden/__init__.py,sha256=vFEb5EpCLlaX_sxgqcwQge-HUzdq7nUGyXcol6F7dhE,722
|
|
2
2
|
zen_garden/__main__.py,sha256=Zrz6zr81gXN3_NLPwWz3a-8HiiRgqGx_OzQHarBcxtQ,2508
|
|
3
|
-
zen_garden/default_config.py,sha256=
|
|
4
|
-
zen_garden/optimization_setup.py,sha256
|
|
5
|
-
zen_garden/runner.py,sha256=
|
|
3
|
+
zen_garden/default_config.py,sha256=vys7xf_s0GrH9SSLCkDF3im6FONb15JE73wLT9yZ3_o,8426
|
|
4
|
+
zen_garden/optimization_setup.py,sha256=--nxXr28v2p1EHwqFIREXAhesUg5MfOTPwHWImFSOnM,35441
|
|
5
|
+
zen_garden/runner.py,sha256=8wGfLhevbeMMf5qDDsLdfI3fO78KCHFwSk23_xDcBVc,6739
|
|
6
6
|
zen_garden/utils.py,sha256=GFR-LBIkMA7pDQ46J07mr0ujqyIfNPlI0UVnvBDfHRw,60529
|
|
7
7
|
zen_garden/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
8
8
|
zen_garden/cli/zen_example_cli.py,sha256=hCYlAxM5BdphMzXA5LyHmTN3UiYIhMlG7G9sxYi0G_s,1411
|
|
9
|
-
zen_garden/cli/zen_garden_cli.py,sha256=
|
|
9
|
+
zen_garden/cli/zen_garden_cli.py,sha256=dU1vZMCHz8mZK1rJCEcxnvgmKN9ivN8XZf3jm7Juo1g,6455
|
|
10
|
+
zen_garden/cli/zen_operation_cli.py,sha256=GVjkzkmtfhVCL2uFeR0AtQ3bbx5_g9a3WAw0eiiDy-4,2872
|
|
10
11
|
zen_garden/cli/zen_visualization_cli.py,sha256=vSK2jbe6rNguXwLDMpU3-boreRtjjuDncCkZDCy7CSA,760
|
|
11
12
|
zen_garden/model/__init__.py,sha256=7QIygxkUlNsOBbJPr5VdAxcJQCz6t_FSlD1uytUhuDE,48
|
|
12
13
|
zen_garden/model/component.py,sha256=NeMV47l6Ec_ij_jburCx4cz6lI5k1-b6t0_DfuWFhhY,40488
|
|
@@ -24,18 +25,21 @@ zen_garden/model/technology/transport_technology.py,sha256=lxtlVYwlHe3NoDhNHPtvL
|
|
|
24
25
|
zen_garden/postprocess/.gitkeep,sha256=5QMkCfv_4482uc1E0M66B8-bVZtyQmV0O2OeJJR4VJU,19
|
|
25
26
|
zen_garden/postprocess/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
26
27
|
zen_garden/postprocess/comparisons.py,sha256=uyEtV0Q8_YPzJi4b12DWiqYU-kKJz6I3Li-I7_6RURY,13239
|
|
27
|
-
zen_garden/postprocess/postprocess.py,sha256=
|
|
28
|
+
zen_garden/postprocess/postprocess.py,sha256=r8aQf1oXX2SwKj1tj_Wc186RQtAF-G4fKH_qBtTIFHg,28377
|
|
28
29
|
zen_garden/postprocess/results/__init__.py,sha256=363lzC0uRJ5F42cOEhD9jxMPCioTt8WRt0qwZG0IizY,49
|
|
29
30
|
zen_garden/postprocess/results/cache.py,sha256=t-1P7k9EVkSxR1oB1NIodux3JbfhBhxxwgRytcQ8vjQ,1592
|
|
30
31
|
zen_garden/postprocess/results/results.py,sha256=S21h0Unf4l3a8WD0UFvn8IeMgDxKitZRaiX7iLvJnL0,41012
|
|
31
|
-
zen_garden/postprocess/results/solution_loader.py,sha256=
|
|
32
|
+
zen_garden/postprocess/results/solution_loader.py,sha256=VfmkEipLG5im3wm0aYA5qqg1ZvRutfZbvFeUvwZRoh8,31700
|
|
32
33
|
zen_garden/preprocess/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
33
34
|
zen_garden/preprocess/extract_input_data.py,sha256=PWKdel7s488SELseyAaoFkwXpzJU4MZg0pW0wO2ZHoI,51523
|
|
34
35
|
zen_garden/preprocess/parameter_change_log.py,sha256=WNhLYTyuaFkUl_e4QH36W1chpqg00m7zg__PawPogAY,387
|
|
35
36
|
zen_garden/preprocess/time_series_aggregation.py,sha256=5FeH4F0Hw37bY8HZscjPbYaOs3qn0OQOmmklUZg65tw,32644
|
|
36
|
-
zen_garden/preprocess/unit_handling.py,sha256=
|
|
37
|
-
zen_garden
|
|
38
|
-
zen_garden
|
|
39
|
-
zen_garden
|
|
40
|
-
zen_garden-2.
|
|
41
|
-
zen_garden-2.
|
|
37
|
+
zen_garden/preprocess/unit_handling.py,sha256=08sFQMR6hjKos_ANgfGIc3BLm3lCMmpuu196WRBioHs,74585
|
|
38
|
+
zen_garden/wrapper/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
39
|
+
zen_garden/wrapper/operation_scenarios.py,sha256=qfEMqnEc5qA10lO-rVGzVqvz_6MBNDIjZlrCncCpNJo,8499
|
|
40
|
+
zen_garden/wrapper/utils.py,sha256=YY-HSPQ2UPwws5PAumvBzq4tp-yUcbUqILkTyI_Z2TU,16352
|
|
41
|
+
zen_garden-2.9.0.dist-info/entry_points.txt,sha256=xtDb4g8Mho0X9g55niFwiJlajN8XGdTrLkhgyBz5hhs,304
|
|
42
|
+
zen_garden-2.9.0.dist-info/licenses/LICENSE.txt,sha256=_kEtxPe9gWOwMzdiy8nLzgABiPdMvUS0kaSCOIrEA_E,1101
|
|
43
|
+
zen_garden-2.9.0.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
|
|
44
|
+
zen_garden-2.9.0.dist-info/METADATA,sha256=cbzz6CgzUOOrk4tYepRyEAWhbUA3gpi8vwYglryQ8wQ,5796
|
|
45
|
+
zen_garden-2.9.0.dist-info/RECORD,,
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
[console_scripts]
|
|
2
2
|
zen-example=zen_garden.cli.zen_example_cli:create_zen_example_cli
|
|
3
3
|
zen-garden=zen_garden.cli.zen_garden_cli:create_zen_garden_cli
|
|
4
|
+
zen-operation=zen_garden.cli.zen_operation_cli:create_zen_operation_cli
|
|
4
5
|
zen-visualization=zen_garden.cli.zen_visualization_cli:create_zen_visualization_cli
|
|
5
6
|
|
|
File without changes
|
|
File without changes
|