BackboneTools 2025.0.0a0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- backbonetools-2025.0.0a0/.gitignore +10 -0
- backbonetools-2025.0.0a0/PKG-INFO +16 -0
- backbonetools-2025.0.0a0/README.md +1 -0
- backbonetools-2025.0.0a0/backbonetools/__init__.py +0 -0
- backbonetools-2025.0.0a0/backbonetools/io/exec.py +175 -0
- backbonetools-2025.0.0a0/backbonetools/io/input.py +298 -0
- backbonetools-2025.0.0a0/backbonetools/io/output.py +562 -0
- backbonetools-2025.0.0a0/pyproject.toml +29 -0
- backbonetools-2025.0.0a0/setup.py +7 -0
- backbonetools-2025.0.0a0/uv.lock +514 -0
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: BackboneTools
|
|
3
|
+
Version: 2025.0.0a0
|
|
4
|
+
Summary: BackboneTools is a Python package for working with Backbone.
|
|
5
|
+
Project-URL: Repository, https://gitlab.ruhr-uni-bochum.de/ee/backbonetools.git
|
|
6
|
+
Author-email: David Huckebrink <david.huckebrink@rub.de>, Leonie Plaga <leonie.plaga@rub.de>
|
|
7
|
+
Requires-Dist: gamsapi
|
|
8
|
+
Requires-Dist: matplotlib
|
|
9
|
+
Requires-Dist: numpy
|
|
10
|
+
Requires-Dist: openpyxl
|
|
11
|
+
Requires-Dist: pandas
|
|
12
|
+
Requires-Dist: plotly
|
|
13
|
+
Requires-Dist: tsam
|
|
14
|
+
Description-Content-Type: text/markdown
|
|
15
|
+
|
|
16
|
+
hello world
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
hello world
|
|
File without changes
|
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
"""
|
|
2
|
+
backbonetools - create, modify and visualise input and output data for the esm backbone and run backbone
|
|
3
|
+
Copyright (C) 2020-2025 Leonie Plaga, David Huckebrink, Christine Nowak, Jan Mutke, Jonas Finke, Silke Johanndeiter, Sophie Pathe
|
|
4
|
+
|
|
5
|
+
This program is free software: you can redistribute it and/or modify
|
|
6
|
+
it under the terms of the GNU General Public License as published by
|
|
7
|
+
the Free Software Foundation, either version 3 of the License, or
|
|
8
|
+
(at your option) any later version.
|
|
9
|
+
|
|
10
|
+
This program is distributed in the hope that it will be useful,
|
|
11
|
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
12
|
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
13
|
+
GNU General Public License for more details.
|
|
14
|
+
|
|
15
|
+
You should have received a copy of the GNU General Public License
|
|
16
|
+
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
import os
|
|
20
|
+
import subprocess as sp
|
|
21
|
+
from glob import glob
|
|
22
|
+
from multiprocessing.pool import ThreadPool
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
|
|
25
|
+
from backbonetools.io.input import BackboneInput
|
|
26
|
+
from backbonetools.io.output import BackboneResult
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def run_backbone(
|
|
30
|
+
input_dir,
|
|
31
|
+
input_file_gdx,
|
|
32
|
+
output_dir,
|
|
33
|
+
output_file=None,
|
|
34
|
+
backbone_dir=None,
|
|
35
|
+
**kwargs,
|
|
36
|
+
):
|
|
37
|
+
"""runs Backbone with the specified arguments
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
input_dir (str): Path to the input directory for Backbone to read configurations (e.g. `investInit.gms`)
|
|
41
|
+
input_file_gdx (str): Path to the Backbone input file
|
|
42
|
+
output_dir (str): Path to the output directory, where results shall be written
|
|
43
|
+
output_file (str): Name of the Backbone result file
|
|
44
|
+
backbone_dir (str, optional): Path to the Backbone Framework. Defaults to the path of the submodule in this repository.
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
# if not specified, derive from input_file_gdx
|
|
48
|
+
if not output_file:
|
|
49
|
+
output_file = Path(input_file_gdx).stem + "_result.gdx"
|
|
50
|
+
|
|
51
|
+
# if not specified, set backbone_dir to submodule path
|
|
52
|
+
if not backbone_dir:
|
|
53
|
+
backbone_dir = Path(__file__).parent.parent.joinpath("backbone")
|
|
54
|
+
|
|
55
|
+
# keyword arguments are parsed into backbone suitable form
|
|
56
|
+
# e.g. dict(maxTotalCost=42) will be passed as "--maxTotalCost=42" to backbone
|
|
57
|
+
keyword_args = [f"--{k}={v}" for k, v in kwargs.items()]
|
|
58
|
+
|
|
59
|
+
# spaces in file names don't work through subprocesses
|
|
60
|
+
contain_spaces = [
|
|
61
|
+
" " in str(file_or_dir)
|
|
62
|
+
for file_or_dir in [input_dir, input_file_gdx, output_dir, output_file]
|
|
63
|
+
]
|
|
64
|
+
if any(contain_spaces):
|
|
65
|
+
raise ValueError(
|
|
66
|
+
"Passing paths with spaces via subprocess to Gams is not supported yet."
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
# absolute paths are better for gams
|
|
70
|
+
input_dir = Path(input_dir).absolute().as_posix()
|
|
71
|
+
input_file_gdx = Path(input_file_gdx).absolute().as_posix()
|
|
72
|
+
output_dir = Path(output_dir).absolute().as_posix()
|
|
73
|
+
|
|
74
|
+
Path(input_dir).mkdir(exist_ok=True)
|
|
75
|
+
Path(output_dir).mkdir(exist_ok=True)
|
|
76
|
+
|
|
77
|
+
process_output = sp.run(
|
|
78
|
+
[
|
|
79
|
+
"gams",
|
|
80
|
+
"Backbone.gms",
|
|
81
|
+
f"--input_dir={input_dir}",
|
|
82
|
+
f"--input_file_gdx={input_file_gdx}",
|
|
83
|
+
f"--output_dir={output_dir}",
|
|
84
|
+
f"--output_file={output_file}",
|
|
85
|
+
*keyword_args,
|
|
86
|
+
],
|
|
87
|
+
cwd=backbone_dir,
|
|
88
|
+
stdout=sp.PIPE,
|
|
89
|
+
stderr=sp.PIPE,
|
|
90
|
+
# text= True
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
# stdout, stderr = process_output.communicate()
|
|
94
|
+
# write terminal output to a file
|
|
95
|
+
open(f"{output_dir}/{Path(output_file).stem}_terminal.log", "w").write(
|
|
96
|
+
process_output.stdout.decode()
|
|
97
|
+
)
|
|
98
|
+
open(f"{output_dir}/{Path(output_file).stem}_error.log", "w").write(
|
|
99
|
+
process_output.stderr.decode()
|
|
100
|
+
)
|
|
101
|
+
print("finished", output_file)
|
|
102
|
+
return Path(output_dir).joinpath(output_file).absolute()
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def sensitivity(
|
|
106
|
+
input_file_gdx,
|
|
107
|
+
bb_input_dir,
|
|
108
|
+
output_dir=None,
|
|
109
|
+
parallel=True,
|
|
110
|
+
param="CO2",
|
|
111
|
+
percentages=[0.05, 0.1, 0.2, 0.4, 0.8],
|
|
112
|
+
):
|
|
113
|
+
if param != "CO2":
|
|
114
|
+
raise NotImplementedError(
|
|
115
|
+
f"sensitivity not yet implemented for parameters other than {param}."
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
sensitivity_dir = Path(input_file_gdx).parent
|
|
119
|
+
sensitivity_input_dir = f"{sensitivity_dir}/{param}_inputs"
|
|
120
|
+
if not output_dir:
|
|
121
|
+
sensitivity_output_dir = f"{sensitivity_dir}/{param}_outputs"
|
|
122
|
+
else:
|
|
123
|
+
sensitivity_output_dir = output_dir
|
|
124
|
+
|
|
125
|
+
# create directories
|
|
126
|
+
Path(sensitivity_input_dir).mkdir(exist_ok=True)
|
|
127
|
+
Path(sensitivity_output_dir).mkdir(exist_ok=True)
|
|
128
|
+
|
|
129
|
+
bb_in = BackboneInput(input_file_gdx)
|
|
130
|
+
bb_opt_fn = bb_in._path.stem + f"_100{param}_result.gdx"
|
|
131
|
+
|
|
132
|
+
# run first optimisation i.e. 100% of `param`
|
|
133
|
+
bb_out_path = run_backbone(
|
|
134
|
+
bb_input_dir,
|
|
135
|
+
input_file_gdx=bb_in._path,
|
|
136
|
+
output_dir=sensitivity_output_dir,
|
|
137
|
+
output_file=bb_opt_fn,
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
# from scripts.results import BackboneResult
|
|
141
|
+
bb_result = BackboneResult(bb_out_path)
|
|
142
|
+
bb_result.r_emission()
|
|
143
|
+
|
|
144
|
+
emission_lims = bb_result.r_emission()["Val"].values * percentages
|
|
145
|
+
# percentages,emission_lims
|
|
146
|
+
|
|
147
|
+
for percentage, em_lim in zip(percentages, emission_lims):
|
|
148
|
+
new_db = bb_in.update_gams_parameter_in_db(
|
|
149
|
+
"p_groupPolicyEmission",
|
|
150
|
+
["emission group", "emissionCap", "CO2"],
|
|
151
|
+
value=em_lim,
|
|
152
|
+
)
|
|
153
|
+
out_fn = Path(bb_in._path).stem + f"_{percentage*100:02.0f}{param}"
|
|
154
|
+
Path(sensitivity_input_dir).mkdir(exist_ok=True)
|
|
155
|
+
new_db.export(f"{sensitivity_input_dir}/{out_fn}.gdx")
|
|
156
|
+
|
|
157
|
+
input_files = list(glob(f"{sensitivity_input_dir}/*.gdx"))
|
|
158
|
+
result_paths = []
|
|
159
|
+
|
|
160
|
+
if parallel:
|
|
161
|
+
threads = os.cpu_count() - 1
|
|
162
|
+
with ThreadPool(threads) as pool:
|
|
163
|
+
jobs = []
|
|
164
|
+
for file in input_files:
|
|
165
|
+
job = pool.apply_async(
|
|
166
|
+
run_backbone, (bb_input_dir, file, sensitivity_output_dir)
|
|
167
|
+
)
|
|
168
|
+
jobs.append(job)
|
|
169
|
+
result_paths = [job.get() for job in jobs]
|
|
170
|
+
else:
|
|
171
|
+
for file in input_files:
|
|
172
|
+
path = run_backbone(bb_input_dir, file, sensitivity_output_dir)
|
|
173
|
+
result_paths.append(path)
|
|
174
|
+
|
|
175
|
+
return result_paths
|
|
@@ -0,0 +1,298 @@
|
|
|
1
|
+
"""
|
|
2
|
+
backbonetools - create, modify and visualise input and output data for the esm backbone and run backbone
|
|
3
|
+
Copyright (C) 2020-2025 Leonie Plaga, David Huckebrink, Christine Nowak, Jan Mutke, Jonas Finke, Silke Johanndeiter, Sophie Pathe
|
|
4
|
+
|
|
5
|
+
This program is free software: you can redistribute it and/or modify
|
|
6
|
+
it under the terms of the GNU General Public License as published by
|
|
7
|
+
the Free Software Foundation, either version 3 of the License, or
|
|
8
|
+
(at your option) any later version.
|
|
9
|
+
|
|
10
|
+
This program is distributed in the hope that it will be useful,
|
|
11
|
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
12
|
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
13
|
+
GNU General Public License for more details.
|
|
14
|
+
|
|
15
|
+
You should have received a copy of the GNU General Public License
|
|
16
|
+
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
import subprocess as sp
|
|
20
|
+
import warnings
|
|
21
|
+
from functools import partial, reduce
|
|
22
|
+
from io import StringIO
|
|
23
|
+
from itertools import groupby
|
|
24
|
+
from pathlib import Path
|
|
25
|
+
|
|
26
|
+
import pandas as pd
|
|
27
|
+
import tsam.timeseriesaggregation as tsam
|
|
28
|
+
from gams import GamsDatabase, GamsException, GamsSetRecord, GamsWorkspace
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class BackboneInput:
|
|
32
|
+
def __init__(self, path):
|
|
33
|
+
# this section is redundant with results.BackboneResult
|
|
34
|
+
ws = GamsWorkspace(".")
|
|
35
|
+
|
|
36
|
+
self._path = Path(path).absolute()
|
|
37
|
+
|
|
38
|
+
if not self._path.exists():
|
|
39
|
+
raise FileNotFoundError(self._path)
|
|
40
|
+
|
|
41
|
+
self.gams_db = ws.add_database_from_gdx(self._path.as_posix())
|
|
42
|
+
|
|
43
|
+
self.set_symbols_as_attribs()
|
|
44
|
+
|
|
45
|
+
@property
|
|
46
|
+
def input_symbols(self):
|
|
47
|
+
if hasattr(self, "_symbols"):
|
|
48
|
+
return self._symbols
|
|
49
|
+
# get all symbols from the gams database
|
|
50
|
+
# list comprehension of gams_db raises an exception at the last index
|
|
51
|
+
symbols = []
|
|
52
|
+
try:
|
|
53
|
+
for p in self.gams_db:
|
|
54
|
+
symbols.append(p.name)
|
|
55
|
+
except GamsException as ge:
|
|
56
|
+
if "out of range" in str(ge) and str(self.gams_db.number_symbols) in str(
|
|
57
|
+
ge
|
|
58
|
+
):
|
|
59
|
+
pass
|
|
60
|
+
else:
|
|
61
|
+
raise ge
|
|
62
|
+
self._symbols = symbols
|
|
63
|
+
return symbols
|
|
64
|
+
|
|
65
|
+
def set_symbols_as_attribs(self):
|
|
66
|
+
"""
|
|
67
|
+
Sets all input symbols as methods. Allows for easy access like
|
|
68
|
+
`bb_input.p_gnu_io()` or `bb_input.ts_influx()`"""
|
|
69
|
+
for symbol in self.input_symbols:
|
|
70
|
+
setattr(self, symbol, partial(self.param_as_df_gdxdump, symbol))
|
|
71
|
+
|
|
72
|
+
def param_as_df_gdxdump(
|
|
73
|
+
self, symbol, encoding="1252", convert_time=True
|
|
74
|
+
) -> pd.DataFrame:
|
|
75
|
+
"""Use the 'gdxdump' GAMS utility via subprocess to convert a parameter into a pd.DataFrame.
|
|
76
|
+
This is sometimes beneficial, to circumvent decoding errors
|
|
77
|
+
"""
|
|
78
|
+
gdxdump = sp.run(
|
|
79
|
+
["gdxdump", self._path, "format", "csv", "symb", symbol], stdout=sp.PIPE
|
|
80
|
+
)
|
|
81
|
+
csv_data = gdxdump.stdout.decode(encoding=encoding)
|
|
82
|
+
header = csv_data.partition("\n")[0]
|
|
83
|
+
|
|
84
|
+
header = [x.strip('"') for x in header.split(",")]
|
|
85
|
+
dtypes = [str if x != "Val" else float for x in header]
|
|
86
|
+
dtypes = dict(zip(header, dtypes))
|
|
87
|
+
df = pd.read_csv(StringIO(csv_data), dtype=dtypes, na_values="Eps")
|
|
88
|
+
|
|
89
|
+
if df.empty:
|
|
90
|
+
return df
|
|
91
|
+
|
|
92
|
+
# converts the time column to int by omitting the first character
|
|
93
|
+
try:
|
|
94
|
+
if convert_time:
|
|
95
|
+
if "t" in df.columns:
|
|
96
|
+
df["t"] = df["t"].apply(lambda t: int(t[1:]))
|
|
97
|
+
else:
|
|
98
|
+
# for backbone inputs, the index is ["Dim1", "Dim2", ... , "Val"], but there might be a time column
|
|
99
|
+
first_row = df.loc[0]
|
|
100
|
+
is_time = first_row.apply(lambda x: str(x).startswith("t000"))
|
|
101
|
+
|
|
102
|
+
if any(is_time):
|
|
103
|
+
# any is necessary, otherwise *.idxmax() simply returns first element
|
|
104
|
+
time_col = is_time.idxmax() # should be something like "Dim4"
|
|
105
|
+
df[time_col] = df[time_col].apply(lambda t: int(t[1:]))
|
|
106
|
+
df.rename({time_col: "t"}, axis=1, inplace=True)
|
|
107
|
+
except Exception as e:
|
|
108
|
+
print(f"Error during time conversion for {symbol=}, {df=}")
|
|
109
|
+
|
|
110
|
+
return df
|
|
111
|
+
|
|
112
|
+
def update_gams_parameter_in_db(
|
|
113
|
+
self,
|
|
114
|
+
param_name: str,
|
|
115
|
+
indices: list,
|
|
116
|
+
value: float = None,
|
|
117
|
+
apply_percentage: float = None,
|
|
118
|
+
) -> GamsDatabase:
|
|
119
|
+
# note that you are always only working on the same instance of the database.
|
|
120
|
+
# retrieve the parameter i.e. "p_groupPolicyEmission"
|
|
121
|
+
parameter = self.gams_db.get_symbol(param_name)
|
|
122
|
+
|
|
123
|
+
# retrieve the record from that parameter i.e. ["emission group", "emissionCap", "CO2"]
|
|
124
|
+
try:
|
|
125
|
+
record = parameter.find_record(indices)
|
|
126
|
+
except GamsException as ge:
|
|
127
|
+
if "Cannot find" in ge.value:
|
|
128
|
+
# This is potentially dangerous since any index value can be added.
|
|
129
|
+
# However, in the case of a bad index, GAMS should throw a DomainViolationError
|
|
130
|
+
warnings.warn(
|
|
131
|
+
f"Warning: Caught 'GamsException: {ge.value}'.\n\tSetting {indices} to {value}"
|
|
132
|
+
)
|
|
133
|
+
record = parameter.add_record(indices)
|
|
134
|
+
else:
|
|
135
|
+
raise ge
|
|
136
|
+
# add_record
|
|
137
|
+
if not apply_percentage and type(value) is not None:
|
|
138
|
+
if isinstance(record, GamsSetRecord):
|
|
139
|
+
# sets only have text i.e. "Y"
|
|
140
|
+
record.set_text(value)
|
|
141
|
+
else:
|
|
142
|
+
record.set_value(value)
|
|
143
|
+
elif apply_percentage:
|
|
144
|
+
current_val = record.get_value()
|
|
145
|
+
record.set_value(current_val * apply_percentage)
|
|
146
|
+
else:
|
|
147
|
+
raise ValueError("one of 'value' and 'apply_percentage' must be passed!")
|
|
148
|
+
return self.gams_db
|
|
149
|
+
|
|
150
|
+
def aggregate_timeseries(
|
|
151
|
+
self, export_path, hours_per_period=24 * 7, n_periods=3, err_indicators=None
|
|
152
|
+
):
|
|
153
|
+
# err_indicators="print"|None
|
|
154
|
+
if err_indicators not in ("print", None):
|
|
155
|
+
raise NotImplementedError(
|
|
156
|
+
f" Only one of ('print',None) is supported for `err_indicators`, but {err_indicators=} was passed."
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
ts_params = ["ts_influx", "ts_node", "ts_unit", "ts_cf"]
|
|
160
|
+
|
|
161
|
+
ts_frames = []
|
|
162
|
+
drop_frames = []
|
|
163
|
+
# retrieve and transform timeseries data (not exaustive)
|
|
164
|
+
for p in ts_params:
|
|
165
|
+
p_df = self.param_as_df_gdxdump(p)
|
|
166
|
+
if p_df.empty:
|
|
167
|
+
drop_frames.append(p)
|
|
168
|
+
continue
|
|
169
|
+
# index of the time column
|
|
170
|
+
time_col = list(p_df.columns).index("t")
|
|
171
|
+
|
|
172
|
+
# all the previous columns (time is usually the penultimate column)
|
|
173
|
+
index_cols = list(p_df.columns[:time_col])
|
|
174
|
+
|
|
175
|
+
# transform from long to wide dataformat
|
|
176
|
+
p_df = p_df.pivot(index="t", columns=index_cols, values="Val")
|
|
177
|
+
|
|
178
|
+
# join column names from different levels and prepend param_name
|
|
179
|
+
# these will come in handy, when writing aggregated data to the *.gdx
|
|
180
|
+
# col is tuple, thus must be converted to a list to be mutable
|
|
181
|
+
p_df.columns = ["|".join([p] + list(col)) for col in p_df.columns]
|
|
182
|
+
|
|
183
|
+
ts_frames.append(p_df)
|
|
184
|
+
# merge ts_frames to a single dataframe
|
|
185
|
+
ts_df = reduce(
|
|
186
|
+
lambda left, right: pd.merge(
|
|
187
|
+
left, right, left_index=True, right_index=True, how="outer"
|
|
188
|
+
),
|
|
189
|
+
ts_frames,
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
# create and set a datetimeindex
|
|
193
|
+
dateindex = pd.date_range(start="2020-01-01 00:00", periods=8760, freq="h")
|
|
194
|
+
ts_df.set_index(dateindex, inplace=True)
|
|
195
|
+
ts_df.fillna(0, inplace=True)
|
|
196
|
+
|
|
197
|
+
# create aggregation
|
|
198
|
+
aggregation = tsam.TimeSeriesAggregation(
|
|
199
|
+
ts_df,
|
|
200
|
+
hoursPerPeriod=hours_per_period,
|
|
201
|
+
noTypicalPeriods=n_periods,
|
|
202
|
+
clusterMethod="adjacent_periods",
|
|
203
|
+
representationMethod="distributionAndMinMaxRepresentation",
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
# calculate the typical periods
|
|
207
|
+
typeriods = aggregation.createTypicalPeriods()
|
|
208
|
+
|
|
209
|
+
if err_indicators == "print":
|
|
210
|
+
print(aggregation.accuracyIndicators())
|
|
211
|
+
|
|
212
|
+
# get the sequence and number of occurances
|
|
213
|
+
period_sequence = [
|
|
214
|
+
(k, sum(1 for i in g)) for k, g in groupby(aggregation.clusterOrder)
|
|
215
|
+
]
|
|
216
|
+
|
|
217
|
+
# concatenate
|
|
218
|
+
ts_input_data = pd.concat(
|
|
219
|
+
[typeriods.loc[period_no, :] for period_no, _ in period_sequence]
|
|
220
|
+
).reset_index(drop=True)
|
|
221
|
+
|
|
222
|
+
# write data to gdx
|
|
223
|
+
for col_name in ts_input_data.columns:
|
|
224
|
+
# recreate the indices from the column names
|
|
225
|
+
bb_indices = col_name.split("|")
|
|
226
|
+
|
|
227
|
+
for i, val in enumerate(ts_input_data[col_name].values):
|
|
228
|
+
timestep = f"t{i + 1:06}"
|
|
229
|
+
self.update_gams_parameter_in_db(
|
|
230
|
+
bb_indices[0], bb_indices[1:] + [timestep], val
|
|
231
|
+
)
|
|
232
|
+
|
|
233
|
+
# add samples to .gdx where nessesary
|
|
234
|
+
samples_and_lengths = [
|
|
235
|
+
{
|
|
236
|
+
"name": f"s{i:03}",
|
|
237
|
+
"length": hours_per_period,
|
|
238
|
+
"weight": tup[1],
|
|
239
|
+
"cluster_no": tup[0],
|
|
240
|
+
}
|
|
241
|
+
for i, tup in enumerate(period_sequence)
|
|
242
|
+
]
|
|
243
|
+
sample_df = pd.DataFrame.from_records(samples_and_lengths, index="name")
|
|
244
|
+
sample_names = sample_df.index.to_list()
|
|
245
|
+
|
|
246
|
+
if "p_s_discountFactor" not in self.input_symbols:
|
|
247
|
+
self.gams_db.add_parameter_dc("p_s_discountFactor", ["s"])
|
|
248
|
+
for sample in sample_df.index:
|
|
249
|
+
self.update_gams_parameter_in_db("p_s_discountFactor", [sample], value=1)
|
|
250
|
+
# : ["sample", "group"]
|
|
251
|
+
self.update_gams_parameter_in_db(
|
|
252
|
+
"sGroup", [sample, "emission group"], value="Y"
|
|
253
|
+
)
|
|
254
|
+
assert "emission group" in self.group().values
|
|
255
|
+
|
|
256
|
+
if len(self.group().values) > 2:
|
|
257
|
+
print(
|
|
258
|
+
f"Warning: samples have been added for group 'emission group' but more are present in the .gdx:{self.group().values}"
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
# bind samples in gnss_bound
|
|
262
|
+
# get nodes that have a state
|
|
263
|
+
p_gn = self.p_gn()
|
|
264
|
+
nodes_w_state = p_gn.query("Dim3=='energyStoredPerUnitOfState'")[
|
|
265
|
+
["Dim1", "Dim2"]
|
|
266
|
+
]
|
|
267
|
+
# nodes_w_state.append([nodes_w_state]*(len(sample_names)-1))
|
|
268
|
+
gnss_df = pd.concat([nodes_w_state] * len(sample_names))
|
|
269
|
+
gnss_df.sort_values(by="Dim2", inplace=True)
|
|
270
|
+
|
|
271
|
+
from_sample = []
|
|
272
|
+
to_sample = []
|
|
273
|
+
for i in range(len(gnss_df)):
|
|
274
|
+
from_sample.append(sample_names[i % len(sample_names)])
|
|
275
|
+
to_sample.append(sample_names[(i + 1) % len(sample_names)])
|
|
276
|
+
|
|
277
|
+
gnss_df["from_sample"] = from_sample
|
|
278
|
+
gnss_df["to_sample"] = to_sample
|
|
279
|
+
|
|
280
|
+
# clear records in bounds, because they might be invalid post-aggregation
|
|
281
|
+
gnss_bounds = self.gams_db.get_symbol("gnss_bound")
|
|
282
|
+
gnss_bounds.clear()
|
|
283
|
+
|
|
284
|
+
for gnss_indices in gnss_df.values:
|
|
285
|
+
# print(gnss_indices)
|
|
286
|
+
self.update_gams_parameter_in_db(
|
|
287
|
+
"gnss_bound", list(gnss_indices), value="Y"
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
print("Dont forget to update your bb input configuration!!")
|
|
291
|
+
for i, tup in enumerate(period_sequence):
|
|
292
|
+
print(
|
|
293
|
+
f"s{i:03} - cluster:{tup[0]} - weight:{tup[1]},\tlength: {hours_per_period}"
|
|
294
|
+
)
|
|
295
|
+
sample_df.to_csv(str(export_path) + "_sample_weights.csv")
|
|
296
|
+
|
|
297
|
+
self.gams_db.export(export_path)
|
|
298
|
+
pass
|