emhass 0.9.0__py3-none-any.whl → 0.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- emhass/command_line.py +189 -284
- emhass/forecast.py +10 -16
- emhass/machine_learning_regressor.py +14 -53
- emhass/retrieve_hass.py +22 -95
- emhass/utils.py +11 -26
- {emhass-0.9.0.dist-info → emhass-0.9.1.dist-info}/METADATA +15 -12
- {emhass-0.9.0.dist-info → emhass-0.9.1.dist-info}/RECORD +11 -11
- {emhass-0.9.0.dist-info → emhass-0.9.1.dist-info}/LICENSE +0 -0
- {emhass-0.9.0.dist-info → emhass-0.9.1.dist-info}/WHEEL +0 -0
- {emhass-0.9.0.dist-info → emhass-0.9.1.dist-info}/entry_points.txt +0 -0
- {emhass-0.9.0.dist-info → emhass-0.9.1.dist-info}/top_level.txt +0 -0
emhass/command_line.py
CHANGED
@@ -24,12 +24,12 @@ from emhass.machine_learning_regressor import MLRegressor
|
|
24
24
|
from emhass import utils
|
25
25
|
|
26
26
|
|
27
|
-
def set_input_data_dict(emhass_conf: dict, costfun: str,
|
28
|
-
|
29
|
-
|
27
|
+
def set_input_data_dict(emhass_conf: dict, costfun: str,
|
28
|
+
params: str, runtimeparams: str, set_type: str, logger: logging.Logger,
|
29
|
+
get_data_from_file: Optional[bool] = False) -> dict:
|
30
30
|
"""
|
31
31
|
Set up some of the data needed for the different actions.
|
32
|
-
|
32
|
+
|
33
33
|
:param emhass_conf: Dictionary containing the needed emhass paths
|
34
34
|
:type emhass_conf: dict
|
35
35
|
:param costfun: The type of cost function to use for optimization problem
|
@@ -51,25 +51,18 @@ def set_input_data_dict(emhass_conf: dict, costfun: str,
|
|
51
51
|
logger.info("Setting up needed data")
|
52
52
|
# Parsing yaml
|
53
53
|
retrieve_hass_conf, optim_conf, plant_conf = utils.get_yaml_parse(
|
54
|
-
emhass_conf, use_secrets=not(get_data_from_file), params=params)
|
54
|
+
emhass_conf, use_secrets=not (get_data_from_file), params=params)
|
55
55
|
# Treat runtimeparams
|
56
56
|
params, retrieve_hass_conf, optim_conf, plant_conf = utils.treat_runtimeparams(
|
57
|
-
runtimeparams,
|
58
|
-
params,
|
59
|
-
retrieve_hass_conf,
|
60
|
-
optim_conf,
|
61
|
-
plant_conf,
|
62
|
-
set_type,
|
63
|
-
logger,
|
64
|
-
)
|
57
|
+
runtimeparams, params, retrieve_hass_conf, optim_conf, plant_conf, set_type, logger)
|
65
58
|
# Define main objects
|
66
|
-
rh = RetrieveHass(retrieve_hass_conf['hass_url'], retrieve_hass_conf['long_lived_token'],
|
67
|
-
retrieve_hass_conf['freq'], retrieve_hass_conf['time_zone'],
|
59
|
+
rh = RetrieveHass(retrieve_hass_conf['hass_url'], retrieve_hass_conf['long_lived_token'],
|
60
|
+
retrieve_hass_conf['freq'], retrieve_hass_conf['time_zone'],
|
68
61
|
params, emhass_conf, logger, get_data_from_file=get_data_from_file)
|
69
62
|
fcst = Forecast(retrieve_hass_conf, optim_conf, plant_conf,
|
70
63
|
params, emhass_conf, logger, get_data_from_file=get_data_from_file)
|
71
|
-
opt = Optimization(retrieve_hass_conf, optim_conf, plant_conf,
|
72
|
-
fcst.var_load_cost, fcst.var_prod_price,
|
64
|
+
opt = Optimization(retrieve_hass_conf, optim_conf, plant_conf,
|
65
|
+
fcst.var_load_cost, fcst.var_prod_price,
|
73
66
|
costfun, emhass_conf, logger)
|
74
67
|
# Perform setup based on type of action
|
75
68
|
if set_type == "perfect-optim":
|
@@ -79,25 +72,22 @@ def set_input_data_dict(emhass_conf: dict, costfun: str,
|
|
79
72
|
rh.df_final, days_list, var_list = pickle.load(inp)
|
80
73
|
retrieve_hass_conf['var_load'] = str(var_list[0])
|
81
74
|
retrieve_hass_conf['var_PV'] = str(var_list[1])
|
82
|
-
retrieve_hass_conf['var_interp'] = [
|
83
|
-
|
75
|
+
retrieve_hass_conf['var_interp'] = [
|
76
|
+
retrieve_hass_conf['var_PV'], retrieve_hass_conf['var_load']]
|
77
|
+
retrieve_hass_conf['var_replace_zero'] = [
|
78
|
+
retrieve_hass_conf['var_PV']]
|
84
79
|
else:
|
85
|
-
days_list = utils.get_days_list(
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
minimal_response=False,
|
91
|
-
significant_changes_only=False,
|
92
|
-
):
|
80
|
+
days_list = utils.get_days_list(
|
81
|
+
retrieve_hass_conf["days_to_retrieve"])
|
82
|
+
var_list = [retrieve_hass_conf["var_load"],
|
83
|
+
retrieve_hass_conf["var_PV"]]
|
84
|
+
if not rh.get_data(days_list, var_list, minimal_response=False, significant_changes_only=False):
|
93
85
|
return False
|
94
|
-
if not rh.prepare_data(
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
var_interp=retrieve_hass_conf["var_interp"],
|
100
|
-
):
|
86
|
+
if not rh.prepare_data(retrieve_hass_conf["var_load"],
|
87
|
+
load_negative=retrieve_hass_conf["load_negative"],
|
88
|
+
set_zero_min=retrieve_hass_conf["set_zero_min"],
|
89
|
+
var_replace_zero=retrieve_hass_conf["var_replace_zero"],
|
90
|
+
var_interp=retrieve_hass_conf["var_interp"]):
|
101
91
|
return False
|
102
92
|
df_input_data = rh.df_final.copy()
|
103
93
|
# What we don't need for this type of action
|
@@ -105,30 +95,23 @@ def set_input_data_dict(emhass_conf: dict, costfun: str,
|
|
105
95
|
elif set_type == "dayahead-optim":
|
106
96
|
# Get PV and load forecasts
|
107
97
|
df_weather = fcst.get_weather_forecast(
|
108
|
-
method=optim_conf["weather_forecast_method"]
|
109
|
-
)
|
98
|
+
method=optim_conf["weather_forecast_method"])
|
110
99
|
P_PV_forecast = fcst.get_power_from_weather(df_weather)
|
111
|
-
P_load_forecast = fcst.get_load_forecast(
|
112
|
-
|
113
|
-
|
100
|
+
P_load_forecast = fcst.get_load_forecast(
|
101
|
+
method=optim_conf['load_forecast_method'])
|
102
|
+
if isinstance(P_load_forecast, bool) and not P_load_forecast:
|
103
|
+
logger.error(
|
104
|
+
"Unable to get sensor power photovoltaics, or sensor power load no var loads. Check HA sensors and their daily data")
|
114
105
|
return False
|
115
|
-
df_input_data_dayahead = pd.DataFrame(
|
116
|
-
|
117
|
-
|
118
|
-
columns=["P_PV_forecast", "P_load_forecast"],
|
119
|
-
)
|
106
|
+
df_input_data_dayahead = pd.DataFrame(np.transpose(np.vstack(
|
107
|
+
[P_PV_forecast.values, P_load_forecast.values])), index=P_PV_forecast.index,
|
108
|
+
columns=["P_PV_forecast", "P_load_forecast"])
|
120
109
|
df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead)
|
121
110
|
params = json.loads(params)
|
122
|
-
if (
|
123
|
-
"prediction_horizon" in params["passed_data"]
|
124
|
-
and params["passed_data"]["prediction_horizon"] is not None
|
125
|
-
):
|
111
|
+
if ("prediction_horizon" in params["passed_data"] and params["passed_data"]["prediction_horizon"] is not None):
|
126
112
|
prediction_horizon = params["passed_data"]["prediction_horizon"]
|
127
113
|
df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[
|
128
|
-
df_input_data_dayahead.index[0]
|
129
|
-
prediction_horizon - 1
|
130
|
-
]
|
131
|
-
]
|
114
|
+
df_input_data_dayahead.index[0]: df_input_data_dayahead.index[prediction_horizon - 1]]
|
132
115
|
# What we don't need for this type of action
|
133
116
|
df_input_data, days_list = None, None
|
134
117
|
elif set_type == "naive-mpc-optim":
|
@@ -138,53 +121,43 @@ def set_input_data_dict(emhass_conf: dict, costfun: str,
|
|
138
121
|
rh.df_final, days_list, var_list = pickle.load(inp)
|
139
122
|
retrieve_hass_conf['var_load'] = str(var_list[0])
|
140
123
|
retrieve_hass_conf['var_PV'] = str(var_list[1])
|
141
|
-
retrieve_hass_conf['var_interp'] = [
|
142
|
-
|
124
|
+
retrieve_hass_conf['var_interp'] = [
|
125
|
+
retrieve_hass_conf['var_PV'], retrieve_hass_conf['var_load']]
|
126
|
+
retrieve_hass_conf['var_replace_zero'] = [
|
127
|
+
retrieve_hass_conf['var_PV']]
|
143
128
|
else:
|
144
129
|
days_list = utils.get_days_list(1)
|
145
|
-
var_list = [retrieve_hass_conf["var_load"],
|
146
|
-
|
147
|
-
|
148
|
-
var_list,
|
149
|
-
minimal_response=False,
|
150
|
-
significant_changes_only=False,
|
151
|
-
):
|
130
|
+
var_list = [retrieve_hass_conf["var_load"],
|
131
|
+
retrieve_hass_conf["var_PV"]]
|
132
|
+
if not rh.get_data(days_list, var_list, minimal_response=False, significant_changes_only=False):
|
152
133
|
return False
|
153
|
-
if not rh.prepare_data(
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
var_interp=retrieve_hass_conf["var_interp"],
|
159
|
-
):
|
134
|
+
if not rh.prepare_data(retrieve_hass_conf["var_load"],
|
135
|
+
load_negative=retrieve_hass_conf["load_negative"],
|
136
|
+
set_zero_min=retrieve_hass_conf["set_zero_min"],
|
137
|
+
var_replace_zero=retrieve_hass_conf["var_replace_zero"],
|
138
|
+
var_interp=retrieve_hass_conf["var_interp"]):
|
160
139
|
return False
|
161
140
|
df_input_data = rh.df_final.copy()
|
162
141
|
# Get PV and load forecasts
|
163
|
-
df_weather = fcst.get_weather_forecast(
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
142
|
+
df_weather = fcst.get_weather_forecast(
|
143
|
+
method=optim_conf['weather_forecast_method'])
|
144
|
+
P_PV_forecast = fcst.get_power_from_weather(
|
145
|
+
df_weather, set_mix_forecast=True, df_now=df_input_data)
|
146
|
+
P_load_forecast = fcst.get_load_forecast(
|
147
|
+
method=optim_conf['load_forecast_method'], set_mix_forecast=True, df_now=df_input_data)
|
148
|
+
if isinstance(P_load_forecast, bool) and not P_load_forecast:
|
149
|
+
logger.error(
|
150
|
+
"Unable to get sensor power photovoltaics, or sensor power load no var loads. Check HA sensors and their daily data")
|
168
151
|
return False
|
169
152
|
df_input_data_dayahead = pd.concat([P_PV_forecast, P_load_forecast], axis=1)
|
170
153
|
df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead)
|
171
154
|
df_input_data_dayahead.columns = ["P_PV_forecast", "P_load_forecast"]
|
172
155
|
params = json.loads(params)
|
173
|
-
if (
|
174
|
-
"prediction_horizon" in params["passed_data"]
|
175
|
-
and params["passed_data"]["prediction_horizon"] is not None
|
176
|
-
):
|
156
|
+
if ("prediction_horizon" in params["passed_data"] and params["passed_data"]["prediction_horizon"] is not None):
|
177
157
|
prediction_horizon = params["passed_data"]["prediction_horizon"]
|
178
158
|
df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[
|
179
|
-
df_input_data_dayahead.index[0]
|
180
|
-
|
181
|
-
]
|
182
|
-
]
|
183
|
-
elif (
|
184
|
-
set_type == "forecast-model-fit"
|
185
|
-
or set_type == "forecast-model-predict"
|
186
|
-
or set_type == "forecast-model-tune"
|
187
|
-
):
|
159
|
+
df_input_data_dayahead.index[0]: df_input_data_dayahead.index[prediction_horizon - 1]]
|
160
|
+
elif (set_type == "forecast-model-fit" or set_type == "forecast-model-predict" or set_type == "forecast-model-tune"):
|
188
161
|
df_input_data_dayahead = None
|
189
162
|
P_PV_forecast, P_load_forecast = None, None
|
190
163
|
params = json.loads(params)
|
@@ -198,18 +171,14 @@ def set_input_data_dict(emhass_conf: dict, costfun: str,
|
|
198
171
|
filename_path = emhass_conf['data_path'] / filename
|
199
172
|
with open(filename_path, 'rb') as inp:
|
200
173
|
df_input_data, _ = pickle.load(inp)
|
201
|
-
df_input_data = df_input_data[
|
202
|
-
df_input_data.index[-1] - pd.offsets.Day(days_to_retrieve) :
|
203
|
-
]
|
174
|
+
df_input_data = df_input_data[df_input_data.index[-1] - pd.offsets.Day(days_to_retrieve):]
|
204
175
|
else:
|
205
176
|
days_list = utils.get_days_list(days_to_retrieve)
|
206
177
|
var_list = [var_model]
|
207
178
|
if not rh.get_data(days_list, var_list):
|
208
179
|
return False
|
209
180
|
df_input_data = rh.df_final.copy()
|
210
|
-
|
211
181
|
elif set_type == "regressor-model-fit" or set_type == "regressor-model-predict":
|
212
|
-
|
213
182
|
df_input_data, df_input_data_dayahead = None, None
|
214
183
|
P_PV_forecast, P_load_forecast = None, None
|
215
184
|
params = json.loads(params)
|
@@ -225,32 +194,26 @@ def set_input_data_dict(emhass_conf: dict, costfun: str,
|
|
225
194
|
if get_data_from_file:
|
226
195
|
base_path = emhass_conf["data_path"] # + "/data"
|
227
196
|
filename_path = pathlib.Path(base_path) / csv_file
|
228
|
-
|
229
197
|
else:
|
230
198
|
filename_path = emhass_conf["data_path"] / csv_file
|
231
|
-
|
232
199
|
if filename_path.is_file():
|
233
200
|
df_input_data = pd.read_csv(filename_path, parse_dates=True)
|
234
|
-
|
235
201
|
else:
|
236
|
-
logger.error("The CSV file " + csv_file +
|
202
|
+
logger.error("The CSV file " + csv_file +
|
203
|
+
" was not found in path: " + str(emhass_conf["data_path"]))
|
237
204
|
return False
|
238
|
-
#raise ValueError("The CSV file " + csv_file + " was not found.")
|
205
|
+
# raise ValueError("The CSV file " + csv_file + " was not found.")
|
239
206
|
required_columns = []
|
240
207
|
required_columns.extend(features)
|
241
208
|
required_columns.append(target)
|
242
209
|
if timestamp is not None:
|
243
210
|
required_columns.append(timestamp)
|
244
|
-
|
245
211
|
if not set(required_columns).issubset(df_input_data.columns):
|
246
|
-
logger.error(
|
212
|
+
logger.error(
|
213
|
+
"The cvs file does not contain the required columns.")
|
247
214
|
msg = f"CSV file should contain the following columns: {', '.join(required_columns)}"
|
248
215
|
logger.error(msg)
|
249
216
|
return False
|
250
|
-
#raise ValueError(
|
251
|
-
# msg,
|
252
|
-
#)
|
253
|
-
|
254
217
|
elif set_type == "publish-data":
|
255
218
|
df_input_data, df_input_data_dayahead = None, None
|
256
219
|
P_PV_forecast, P_load_forecast = None, None
|
@@ -262,7 +225,6 @@ def set_input_data_dict(emhass_conf: dict, costfun: str,
|
|
262
225
|
df_input_data, df_input_data_dayahead = None, None
|
263
226
|
P_PV_forecast, P_load_forecast = None, None
|
264
227
|
days_list = None
|
265
|
-
|
266
228
|
# The input data dictionary to return
|
267
229
|
input_data_dict = {
|
268
230
|
'emhass_conf': emhass_conf,
|
@@ -281,12 +243,9 @@ def set_input_data_dict(emhass_conf: dict, costfun: str,
|
|
281
243
|
return input_data_dict
|
282
244
|
|
283
245
|
|
284
|
-
def perfect_forecast_optim(
|
285
|
-
|
286
|
-
|
287
|
-
save_data_to_file: Optional[bool] = True,
|
288
|
-
debug: Optional[bool] = False,
|
289
|
-
) -> pd.DataFrame:
|
246
|
+
def perfect_forecast_optim(input_data_dict: dict, logger: logging.Logger,
|
247
|
+
save_data_to_file: Optional[bool] = True,
|
248
|
+
debug: Optional[bool] = False) -> pd.DataFrame:
|
290
249
|
"""
|
291
250
|
Perform a call to the perfect forecast optimization routine.
|
292
251
|
|
@@ -305,33 +264,33 @@ def perfect_forecast_optim(
|
|
305
264
|
logger.info("Performing perfect forecast optimization")
|
306
265
|
# Load cost and prod price forecast
|
307
266
|
df_input_data = input_data_dict['fcst'].get_load_cost_forecast(
|
308
|
-
input_data_dict['df_input_data'],
|
267
|
+
input_data_dict['df_input_data'],
|
309
268
|
method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'],
|
310
269
|
list_and_perfect=True)
|
311
|
-
if isinstance(df_input_data,bool) and not df_input_data:
|
270
|
+
if isinstance(df_input_data, bool) and not df_input_data:
|
312
271
|
return False
|
313
272
|
df_input_data = input_data_dict['fcst'].get_prod_price_forecast(
|
314
273
|
df_input_data, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'],
|
315
274
|
list_and_perfect=True)
|
316
|
-
if isinstance(df_input_data,bool) and not df_input_data:
|
317
|
-
return False
|
318
|
-
opt_res = input_data_dict['opt'].perform_perfect_forecast_optim(
|
275
|
+
if isinstance(df_input_data, bool) and not df_input_data:
|
276
|
+
return False
|
277
|
+
opt_res = input_data_dict['opt'].perform_perfect_forecast_optim(
|
278
|
+
df_input_data, input_data_dict['days_list'])
|
319
279
|
# Save CSV file for analysis
|
320
280
|
if save_data_to_file:
|
321
|
-
filename = "opt_res_perfect_optim_" +
|
281
|
+
filename = "opt_res_perfect_optim_" + \
|
282
|
+
input_data_dict["costfun"] + ".csv"
|
322
283
|
else: # Just save the latest optimization results
|
323
284
|
filename = "opt_res_latest.csv"
|
324
285
|
if not debug:
|
325
|
-
opt_res.to_csv(
|
286
|
+
opt_res.to_csv(
|
287
|
+
input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
|
326
288
|
return opt_res
|
327
289
|
|
328
290
|
|
329
|
-
def dayahead_forecast_optim(
|
330
|
-
|
331
|
-
|
332
|
-
save_data_to_file: Optional[bool] = False,
|
333
|
-
debug: Optional[bool] = False,
|
334
|
-
) -> pd.DataFrame:
|
291
|
+
def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger,
|
292
|
+
save_data_to_file: Optional[bool] = False,
|
293
|
+
debug: Optional[bool] = False) -> pd.DataFrame:
|
335
294
|
"""
|
336
295
|
Perform a call to the day-ahead optimization routine.
|
337
296
|
|
@@ -352,13 +311,13 @@ def dayahead_forecast_optim(
|
|
352
311
|
df_input_data_dayahead = input_data_dict['fcst'].get_load_cost_forecast(
|
353
312
|
input_data_dict['df_input_data_dayahead'],
|
354
313
|
method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'])
|
355
|
-
if isinstance(df_input_data_dayahead,bool) and not df_input_data_dayahead:
|
356
|
-
return False
|
314
|
+
if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
|
315
|
+
return False
|
357
316
|
df_input_data_dayahead = input_data_dict['fcst'].get_prod_price_forecast(
|
358
|
-
df_input_data_dayahead,
|
317
|
+
df_input_data_dayahead,
|
359
318
|
method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'])
|
360
|
-
if isinstance(df_input_data_dayahead,bool) and not df_input_data_dayahead:
|
361
|
-
return False
|
319
|
+
if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
|
320
|
+
return False
|
362
321
|
opt_res_dayahead = input_data_dict['opt'].perform_dayahead_forecast_optim(
|
363
322
|
df_input_data_dayahead, input_data_dict['P_PV_forecast'], input_data_dict['P_load_forecast'])
|
364
323
|
# Save CSV file for publish_data
|
@@ -370,16 +329,14 @@ def dayahead_forecast_optim(
|
|
370
329
|
else: # Just save the latest optimization results
|
371
330
|
filename = "opt_res_latest.csv"
|
372
331
|
if not debug:
|
373
|
-
opt_res_dayahead.to_csv(
|
332
|
+
opt_res_dayahead.to_csv(
|
333
|
+
input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
|
374
334
|
return opt_res_dayahead
|
375
335
|
|
376
336
|
|
377
|
-
def naive_mpc_optim(
|
378
|
-
|
379
|
-
|
380
|
-
save_data_to_file: Optional[bool] = False,
|
381
|
-
debug: Optional[bool] = False,
|
382
|
-
) -> pd.DataFrame:
|
337
|
+
def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger,
|
338
|
+
save_data_to_file: Optional[bool] = False,
|
339
|
+
debug: Optional[bool] = False) -> pd.DataFrame:
|
383
340
|
"""
|
384
341
|
Perform a call to the naive Model Predictive Controller optimization routine.
|
385
342
|
|
@@ -400,12 +357,12 @@ def naive_mpc_optim(
|
|
400
357
|
df_input_data_dayahead = input_data_dict['fcst'].get_load_cost_forecast(
|
401
358
|
input_data_dict['df_input_data_dayahead'],
|
402
359
|
method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'])
|
403
|
-
if isinstance(df_input_data_dayahead,bool) and not df_input_data_dayahead:
|
404
|
-
return False
|
360
|
+
if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
|
361
|
+
return False
|
405
362
|
df_input_data_dayahead = input_data_dict['fcst'].get_prod_price_forecast(
|
406
363
|
df_input_data_dayahead, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'])
|
407
|
-
if isinstance(df_input_data_dayahead,bool) and not df_input_data_dayahead:
|
408
|
-
return False
|
364
|
+
if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
|
365
|
+
return False
|
409
366
|
# The specifics params for the MPC at runtime
|
410
367
|
prediction_horizon = input_data_dict["params"]["passed_data"]["prediction_horizon"]
|
411
368
|
soc_init = input_data_dict["params"]["passed_data"]["soc_init"]
|
@@ -414,16 +371,9 @@ def naive_mpc_optim(
|
|
414
371
|
def_start_timestep = input_data_dict["params"]["passed_data"]["def_start_timestep"]
|
415
372
|
def_end_timestep = input_data_dict["params"]["passed_data"]["def_end_timestep"]
|
416
373
|
opt_res_naive_mpc = input_data_dict["opt"].perform_naive_mpc_optim(
|
417
|
-
df_input_data_dayahead,
|
418
|
-
|
419
|
-
|
420
|
-
prediction_horizon,
|
421
|
-
soc_init,
|
422
|
-
soc_final,
|
423
|
-
def_total_hours,
|
424
|
-
def_start_timestep,
|
425
|
-
def_end_timestep,
|
426
|
-
)
|
374
|
+
df_input_data_dayahead, input_data_dict["P_PV_forecast"], input_data_dict["P_load_forecast"],
|
375
|
+
prediction_horizon, soc_init, soc_final, def_total_hours,
|
376
|
+
def_start_timestep, def_end_timestep)
|
427
377
|
# Save CSV file for publish_data
|
428
378
|
if save_data_to_file:
|
429
379
|
today = datetime.now(timezone.utc).replace(
|
@@ -433,13 +383,13 @@ def naive_mpc_optim(
|
|
433
383
|
else: # Just save the latest optimization results
|
434
384
|
filename = "opt_res_latest.csv"
|
435
385
|
if not debug:
|
436
|
-
opt_res_naive_mpc.to_csv(
|
386
|
+
opt_res_naive_mpc.to_csv(
|
387
|
+
input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
|
437
388
|
return opt_res_naive_mpc
|
438
389
|
|
439
390
|
|
440
|
-
def forecast_model_fit(
|
441
|
-
|
442
|
-
) -> Tuple[pd.DataFrame, pd.DataFrame, MLForecaster]:
|
391
|
+
def forecast_model_fit(input_data_dict: dict, logger: logging.Logger,
|
392
|
+
debug: Optional[bool] = False) -> Tuple[pd.DataFrame, pd.DataFrame, MLForecaster]:
|
443
393
|
"""Perform a forecast model fit from training data retrieved from Home Assistant.
|
444
394
|
|
445
395
|
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
@@ -459,7 +409,8 @@ def forecast_model_fit(
|
|
459
409
|
split_date_delta = input_data_dict['params']['passed_data']['split_date_delta']
|
460
410
|
perform_backtest = input_data_dict['params']['passed_data']['perform_backtest']
|
461
411
|
# The ML forecaster object
|
462
|
-
mlf = MLForecaster(data, model_type, var_model, sklearn_model,
|
412
|
+
mlf = MLForecaster(data, model_type, var_model, sklearn_model,
|
413
|
+
num_lags, input_data_dict['emhass_conf'], logger)
|
463
414
|
# Fit the ML model
|
464
415
|
df_pred, df_pred_backtest = mlf.fit(
|
465
416
|
split_date_delta=split_date_delta, perform_backtest=perform_backtest
|
@@ -473,13 +424,10 @@ def forecast_model_fit(
|
|
473
424
|
return df_pred, df_pred_backtest, mlf
|
474
425
|
|
475
426
|
|
476
|
-
def forecast_model_predict(
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
debug: Optional[bool] = False,
|
481
|
-
mlf: Optional[MLForecaster] = None,
|
482
|
-
) -> pd.DataFrame:
|
427
|
+
def forecast_model_predict(input_data_dict: dict, logger: logging.Logger,
|
428
|
+
use_last_window: Optional[bool] = True,
|
429
|
+
debug: Optional[bool] = False, mlf: Optional[MLForecaster] = None
|
430
|
+
) -> pd.DataFrame:
|
483
431
|
r"""Perform a forecast model predict using a previously trained skforecast model.
|
484
432
|
|
485
433
|
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
@@ -539,40 +487,24 @@ def forecast_model_predict(
|
|
539
487
|
input_data_dict["retrieve_hass_conf"]["time_zone"]
|
540
488
|
).replace(second=0, microsecond=0)
|
541
489
|
if input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "nearest":
|
542
|
-
idx_closest = predictions.index.get_indexer(
|
543
|
-
[now_precise], method="nearest"
|
544
|
-
)[0]
|
490
|
+
idx_closest = predictions.index.get_indexer([now_precise], method="nearest")[0]
|
545
491
|
elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "first":
|
546
|
-
idx_closest = predictions.index.get_indexer([now_precise], method="ffill")[
|
547
|
-
0
|
548
|
-
]
|
492
|
+
idx_closest = predictions.index.get_indexer([now_precise], method="ffill")[0]
|
549
493
|
elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "last":
|
550
|
-
idx_closest = predictions.index.get_indexer([now_precise], method="bfill")[
|
551
|
-
0
|
552
|
-
]
|
494
|
+
idx_closest = predictions.index.get_indexer([now_precise], method="bfill")[0]
|
553
495
|
if idx_closest == -1:
|
554
|
-
idx_closest = predictions.index.get_indexer(
|
555
|
-
[now_precise], method="nearest"
|
556
|
-
)[0]
|
496
|
+
idx_closest = predictions.index.get_indexer([now_precise], method="nearest")[0]
|
557
497
|
# Publish Load forecast
|
558
498
|
input_data_dict["rh"].post_data(
|
559
|
-
predictions,
|
560
|
-
|
561
|
-
|
562
|
-
model_predict_unit_of_measurement,
|
563
|
-
model_predict_friendly_name,
|
564
|
-
type_var="mlforecaster",
|
565
|
-
publish_prefix=publish_prefix,
|
566
|
-
)
|
499
|
+
predictions, idx_closest, model_predict_entity_id,
|
500
|
+
model_predict_unit_of_measurement, model_predict_friendly_name,
|
501
|
+
type_var="mlforecaster", publish_prefix=publish_prefix)
|
567
502
|
return predictions
|
568
503
|
|
569
504
|
|
570
|
-
def forecast_model_tune(
|
571
|
-
|
572
|
-
|
573
|
-
debug: Optional[bool] = False,
|
574
|
-
mlf: Optional[MLForecaster] = None,
|
575
|
-
) -> Tuple[pd.DataFrame, MLForecaster]:
|
505
|
+
def forecast_model_tune(input_data_dict: dict, logger: logging.Logger,
|
506
|
+
debug: Optional[bool] = False, mlf: Optional[MLForecaster] = None
|
507
|
+
) -> Tuple[pd.DataFrame, MLForecaster]:
|
576
508
|
"""Tune a forecast model hyperparameters using bayesian optimization.
|
577
509
|
|
578
510
|
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
@@ -607,15 +539,12 @@ def forecast_model_tune(
|
|
607
539
|
filename = model_type+'_mlf.pkl'
|
608
540
|
filename_path = input_data_dict['emhass_conf']['data_path'] / filename
|
609
541
|
with open(filename_path, 'wb') as outp:
|
610
|
-
pickle.dump(mlf, outp, pickle.HIGHEST_PROTOCOL)
|
542
|
+
pickle.dump(mlf, outp, pickle.HIGHEST_PROTOCOL)
|
611
543
|
return df_pred_optim, mlf
|
612
544
|
|
613
545
|
|
614
|
-
def regressor_model_fit(
|
615
|
-
|
616
|
-
logger: logging.Logger,
|
617
|
-
debug: Optional[bool] = False,
|
618
|
-
) -> None:
|
546
|
+
def regressor_model_fit(input_data_dict: dict, logger: logging.Logger,
|
547
|
+
debug: Optional[bool] = False) -> MLRegressor:
|
619
548
|
"""Perform a forecast model fit from training data retrieved from Home Assistant.
|
620
549
|
|
621
550
|
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
@@ -640,33 +569,24 @@ def regressor_model_fit(
|
|
640
569
|
features = input_data_dict["params"]["passed_data"]["features"]
|
641
570
|
else:
|
642
571
|
logger.error("parameter: 'features' not passed")
|
643
|
-
return False
|
572
|
+
return False
|
644
573
|
if "target" in input_data_dict["params"]["passed_data"]:
|
645
574
|
target = input_data_dict["params"]["passed_data"]["target"]
|
646
575
|
else:
|
647
576
|
logger.error("parameter: 'target' not passed")
|
648
|
-
return False
|
577
|
+
return False
|
649
578
|
if "timestamp" in input_data_dict["params"]["passed_data"]:
|
650
579
|
timestamp = input_data_dict["params"]["passed_data"]["timestamp"]
|
651
580
|
else:
|
652
581
|
logger.error("parameter: 'timestamp' not passed")
|
653
|
-
return False
|
582
|
+
return False
|
654
583
|
if "date_features" in input_data_dict["params"]["passed_data"]:
|
655
584
|
date_features = input_data_dict["params"]["passed_data"]["date_features"]
|
656
585
|
else:
|
657
586
|
logger.error("parameter: 'date_features' not passed")
|
658
|
-
return False
|
659
|
-
|
587
|
+
return False
|
660
588
|
# The MLRegressor object
|
661
|
-
mlr = MLRegressor(
|
662
|
-
data,
|
663
|
-
model_type,
|
664
|
-
regression_model,
|
665
|
-
features,
|
666
|
-
target,
|
667
|
-
timestamp,
|
668
|
-
logger,
|
669
|
-
)
|
589
|
+
mlr = MLRegressor(data, model_type, regression_model, features, target, timestamp, logger)
|
670
590
|
# Fit the ML model
|
671
591
|
mlr.fit(date_features=date_features)
|
672
592
|
# Save model
|
@@ -678,12 +598,9 @@ def regressor_model_fit(
|
|
678
598
|
return mlr
|
679
599
|
|
680
600
|
|
681
|
-
def regressor_model_predict(
|
682
|
-
|
683
|
-
|
684
|
-
debug: Optional[bool] = False,
|
685
|
-
mlr: Optional[MLRegressor] = None,
|
686
|
-
) -> None:
|
601
|
+
def regressor_model_predict(input_data_dict: dict, logger: logging.Logger,
|
602
|
+
debug: Optional[bool] = False, mlr: Optional[MLRegressor] = None
|
603
|
+
) -> np.ndarray:
|
687
604
|
"""Perform a prediction from csv file.
|
688
605
|
|
689
606
|
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
@@ -697,7 +614,7 @@ def regressor_model_predict(
|
|
697
614
|
model_type = input_data_dict["params"]["passed_data"]["model_type"]
|
698
615
|
else:
|
699
616
|
logger.error("parameter: 'model_type' not passed")
|
700
|
-
return False
|
617
|
+
return False
|
701
618
|
filename = model_type + "_mlr.pkl"
|
702
619
|
filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
|
703
620
|
if not debug:
|
@@ -709,37 +626,31 @@ def regressor_model_predict(
|
|
709
626
|
"The ML forecaster file was not found, please run a model fit method before this predict method",
|
710
627
|
)
|
711
628
|
return False
|
712
|
-
if "new_values" in input_data_dict["params"]["passed_data"]:
|
629
|
+
if "new_values" in input_data_dict["params"]["passed_data"]:
|
713
630
|
new_values = input_data_dict["params"]["passed_data"]["new_values"]
|
714
631
|
else:
|
715
632
|
logger.error("parameter: 'new_values' not passed")
|
716
|
-
return False
|
633
|
+
return False
|
717
634
|
# Predict from csv file
|
718
635
|
prediction = mlr.predict(new_values)
|
719
|
-
|
720
|
-
|
721
|
-
mlr_predict_unit_of_measurement = input_data_dict["params"]["passed_data"].get(
|
722
|
-
|
636
|
+
mlr_predict_entity_id = input_data_dict["params"]["passed_data"].get(
|
637
|
+
"mlr_predict_entity_id", "sensor.mlr_predict")
|
638
|
+
mlr_predict_unit_of_measurement = input_data_dict["params"]["passed_data"].get(
|
639
|
+
"mlr_predict_unit_of_measurement", "h")
|
640
|
+
mlr_predict_friendly_name = input_data_dict["params"]["passed_data"].get(
|
641
|
+
"mlr_predict_friendly_name", "mlr predictor")
|
723
642
|
# Publish prediction
|
724
643
|
idx = 0
|
725
644
|
if not debug:
|
726
|
-
input_data_dict["rh"].post_data(
|
727
|
-
|
728
|
-
|
729
|
-
mlr_predict_entity_id,
|
730
|
-
mlr_predict_unit_of_measurement,
|
731
|
-
mlr_predict_friendly_name,
|
732
|
-
type_var="mlregressor",
|
733
|
-
)
|
645
|
+
input_data_dict["rh"].post_data(prediction, idx, mlr_predict_entity_id,
|
646
|
+
mlr_predict_unit_of_measurement, mlr_predict_friendly_name,
|
647
|
+
type_var="mlregressor")
|
734
648
|
return prediction
|
735
649
|
|
736
650
|
|
737
|
-
def publish_data(
|
738
|
-
|
739
|
-
|
740
|
-
save_data_to_file: Optional[bool] = False,
|
741
|
-
opt_res_latest: Optional[pd.DataFrame] = None,
|
742
|
-
) -> pd.DataFrame:
|
651
|
+
def publish_data(input_data_dict: dict, logger: logging.Logger,
|
652
|
+
save_data_to_file: Optional[bool] = False,
|
653
|
+
opt_res_latest: Optional[pd.DataFrame] = None) -> pd.DataFrame:
|
743
654
|
"""
|
744
655
|
Publish the data obtained from the optimization results.
|
745
656
|
|
@@ -764,10 +675,12 @@ def publish_data(
|
|
764
675
|
filename = "opt_res_latest.csv"
|
765
676
|
if opt_res_latest is None:
|
766
677
|
if not os.path.isfile(input_data_dict['emhass_conf']['data_path'] / filename):
|
767
|
-
logger.error(
|
678
|
+
logger.error(
|
679
|
+
"File not found error, run an optimization task first.")
|
768
680
|
return
|
769
681
|
else:
|
770
|
-
opt_res_latest = pd.read_csv(
|
682
|
+
opt_res_latest = pd.read_csv(
|
683
|
+
input_data_dict['emhass_conf']['data_path'] / filename, index_col='timestamp')
|
771
684
|
opt_res_latest.index = pd.to_datetime(opt_res_latest.index)
|
772
685
|
opt_res_latest.index.freq = input_data_dict["retrieve_hass_conf"]["freq"]
|
773
686
|
# Estimate the current index
|
@@ -775,17 +688,15 @@ def publish_data(
|
|
775
688
|
input_data_dict["retrieve_hass_conf"]["time_zone"]
|
776
689
|
).replace(second=0, microsecond=0)
|
777
690
|
if input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "nearest":
|
778
|
-
idx_closest = opt_res_latest.index.get_indexer([now_precise], method="nearest")[
|
779
|
-
0
|
780
|
-
]
|
691
|
+
idx_closest = opt_res_latest.index.get_indexer([now_precise], method="nearest")[0]
|
781
692
|
elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "first":
|
782
|
-
idx_closest = opt_res_latest.index.get_indexer(
|
693
|
+
idx_closest = opt_res_latest.index.get_indexer(
|
694
|
+
[now_precise], method="ffill")[0]
|
783
695
|
elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "last":
|
784
|
-
idx_closest = opt_res_latest.index.get_indexer(
|
696
|
+
idx_closest = opt_res_latest.index.get_indexer(
|
697
|
+
[now_precise], method="bfill")[0]
|
785
698
|
if idx_closest == -1:
|
786
|
-
idx_closest = opt_res_latest.index.get_indexer([now_precise], method="nearest")[
|
787
|
-
0
|
788
|
-
]
|
699
|
+
idx_closest = opt_res_latest.index.get_indexer([now_precise], method="nearest")[0]
|
789
700
|
# Publish the data
|
790
701
|
params = json.loads(input_data_dict["params"])
|
791
702
|
publish_prefix = params["passed_data"]["publish_prefix"]
|
@@ -930,7 +841,8 @@ def publish_data(
|
|
930
841
|
)
|
931
842
|
cols_published = cols_published + ["unit_prod_price"]
|
932
843
|
# Create a DF resuming what has been published
|
933
|
-
opt_res = opt_res_latest[cols_published].loc[[
|
844
|
+
opt_res = opt_res_latest[cols_published].loc[[
|
845
|
+
opt_res_latest.index[idx_closest]]]
|
934
846
|
return opt_res
|
935
847
|
|
936
848
|
|
@@ -959,59 +871,59 @@ def main():
|
|
959
871
|
parser = argparse.ArgumentParser()
|
960
872
|
parser.add_argument('--action', type=str, help='Set the desired action, options are: perfect-optim, dayahead-optim,\
|
961
873
|
naive-mpc-optim, publish-data, forecast-model-fit, forecast-model-predict, forecast-model-tune')
|
962
|
-
parser.add_argument('--config', type=str,
|
963
|
-
|
874
|
+
parser.add_argument('--config', type=str,
|
875
|
+
help='Define path to the config.yaml file')
|
876
|
+
parser.add_argument('--data', type=str,
|
877
|
+
help='Define path to the Data files (.csv & .pkl)')
|
964
878
|
parser.add_argument('--root', type=str, help='Define path emhass root')
|
965
|
-
parser.add_argument('--costfun', type=str, default='profit',
|
966
|
-
|
967
|
-
parser.add_argument('--
|
968
|
-
|
969
|
-
parser.add_argument('--
|
879
|
+
parser.add_argument('--costfun', type=str, default='profit',
|
880
|
+
help='Define the type of cost function, options are: profit, cost, self-consumption')
|
881
|
+
parser.add_argument('--log2file', type=strtobool, default='False',
|
882
|
+
help='Define if we should log to a file or not')
|
883
|
+
parser.add_argument('--params', type=str, default=None,
|
884
|
+
help='Configuration parameters passed from data/options.json')
|
885
|
+
parser.add_argument('--runtimeparams', type=str, default=None,
|
886
|
+
help='Pass runtime optimization parameters as dictionnary')
|
887
|
+
parser.add_argument('--debug', type=strtobool,
|
888
|
+
default='False', help='Use True for testing purposes')
|
970
889
|
args = parser.parse_args()
|
971
890
|
# The path to the configuration files
|
972
|
-
|
973
891
|
if args.config is not None:
|
974
892
|
config_path = pathlib.Path(args.config)
|
975
893
|
else:
|
976
|
-
config_path = pathlib.Path(
|
977
|
-
|
894
|
+
config_path = pathlib.Path(
|
895
|
+
str(utils.get_root(__file__, num_parent=2) / 'config_emhass.yaml'))
|
978
896
|
if args.data is not None:
|
979
897
|
data_path = pathlib.Path(args.data)
|
980
898
|
else:
|
981
899
|
data_path = (config_path.parent / 'data/')
|
982
|
-
|
983
900
|
if args.root is not None:
|
984
901
|
root_path = pathlib.Path(args.root)
|
985
902
|
else:
|
986
903
|
root_path = config_path.parent
|
987
|
-
|
988
904
|
emhass_conf = {}
|
989
905
|
emhass_conf['config_path'] = config_path
|
990
906
|
emhass_conf['data_path'] = data_path
|
991
907
|
emhass_conf['root_path'] = root_path
|
992
908
|
# create logger
|
993
|
-
logger, ch = utils.get_logger(
|
994
|
-
|
909
|
+
logger, ch = utils.get_logger(
|
910
|
+
__name__, emhass_conf, save_to_file=bool(args.log2file))
|
995
911
|
logger.debug("config path: " + str(config_path))
|
996
912
|
logger.debug("data path: " + str(data_path))
|
997
913
|
logger.debug("root path: " + str(root_path))
|
998
|
-
|
999
|
-
|
1000
914
|
if not config_path.exists():
|
1001
|
-
logger.error(
|
1002
|
-
|
915
|
+
logger.error(
|
916
|
+
"Could not find config_emhass.yaml file in: " + str(config_path))
|
917
|
+
logger.error("Try setting config file path with --config")
|
1003
918
|
return False
|
1004
|
-
|
1005
919
|
if not os.path.isdir(data_path):
|
1006
920
|
logger.error("Could not find data foulder in: " + str(data_path))
|
1007
|
-
logger.error("Try setting data path with --data"
|
921
|
+
logger.error("Try setting data path with --data")
|
1008
922
|
return False
|
1009
|
-
|
1010
923
|
if not os.path.isdir(root_path / 'src'):
|
1011
924
|
logger.error("Could not find emhass/src foulder in: " + str(root_path))
|
1012
|
-
logger.error("Try setting emhass root path with --root"
|
925
|
+
logger.error("Try setting emhass root path with --root")
|
1013
926
|
return False
|
1014
|
-
|
1015
927
|
# Additionnal argument
|
1016
928
|
try:
|
1017
929
|
parser.add_argument(
|
@@ -1025,14 +937,16 @@ def main():
|
|
1025
937
|
"Version not found for emhass package. Or importlib exited with PackageNotFoundError.",
|
1026
938
|
)
|
1027
939
|
# Setup parameters
|
1028
|
-
input_data_dict = set_input_data_dict(emhass_conf,
|
1029
|
-
args.costfun, args.params, args.runtimeparams, args.action,
|
940
|
+
input_data_dict = set_input_data_dict(emhass_conf,
|
941
|
+
args.costfun, args.params, args.runtimeparams, args.action,
|
1030
942
|
logger, args.debug)
|
1031
943
|
# Perform selected action
|
1032
944
|
if args.action == "perfect-optim":
|
1033
|
-
opt_res = perfect_forecast_optim(
|
945
|
+
opt_res = perfect_forecast_optim(
|
946
|
+
input_data_dict, logger, debug=args.debug)
|
1034
947
|
elif args.action == "dayahead-optim":
|
1035
|
-
opt_res = dayahead_forecast_optim(
|
948
|
+
opt_res = dayahead_forecast_optim(
|
949
|
+
input_data_dict, logger, debug=args.debug)
|
1036
950
|
elif args.action == "naive-mpc-optim":
|
1037
951
|
opt_res = naive_mpc_optim(input_data_dict, logger, debug=args.debug)
|
1038
952
|
elif args.action == "forecast-model-fit":
|
@@ -1045,18 +959,14 @@ def main():
|
|
1045
959
|
_, _, mlf = forecast_model_fit(input_data_dict, logger, debug=args.debug)
|
1046
960
|
else:
|
1047
961
|
mlf = None
|
1048
|
-
df_pred = forecast_model_predict(
|
1049
|
-
input_data_dict, logger, debug=args.debug, mlf=mlf
|
1050
|
-
)
|
962
|
+
df_pred = forecast_model_predict(input_data_dict, logger, debug=args.debug, mlf=mlf)
|
1051
963
|
opt_res = None
|
1052
964
|
elif args.action == "forecast-model-tune":
|
1053
965
|
if args.debug:
|
1054
966
|
_, _, mlf = forecast_model_fit(input_data_dict, logger, debug=args.debug)
|
1055
967
|
else:
|
1056
968
|
mlf = None
|
1057
|
-
df_pred_optim, mlf = forecast_model_tune(
|
1058
|
-
input_data_dict, logger, debug=args.debug, mlf=mlf
|
1059
|
-
)
|
969
|
+
df_pred_optim, mlf = forecast_model_tune(input_data_dict, logger, debug=args.debug, mlf=mlf)
|
1060
970
|
opt_res = None
|
1061
971
|
elif args.action == "regressor-model-fit":
|
1062
972
|
mlr = regressor_model_fit(input_data_dict, logger, debug=args.debug)
|
@@ -1066,12 +976,7 @@ def main():
|
|
1066
976
|
mlr = regressor_model_fit(input_data_dict, logger, debug=args.debug)
|
1067
977
|
else:
|
1068
978
|
mlr = None
|
1069
|
-
prediction = regressor_model_predict(
|
1070
|
-
input_data_dict,
|
1071
|
-
logger,
|
1072
|
-
debug=args.debug,
|
1073
|
-
mlr=mlr,
|
1074
|
-
)
|
979
|
+
prediction = regressor_model_predict(input_data_dict, logger, debug=args.debug,mlr=mlr)
|
1075
980
|
opt_res = None
|
1076
981
|
elif args.action == "publish-data":
|
1077
982
|
opt_res = publish_data(input_data_dict, logger)
|
@@ -1100,7 +1005,7 @@ def main():
|
|
1100
1005
|
return prediction
|
1101
1006
|
elif args.action == "forecast-model-tune":
|
1102
1007
|
return df_pred_optim, mlf
|
1103
|
-
else:
|
1008
|
+
else:
|
1104
1009
|
return opt_res
|
1105
1010
|
|
1106
1011
|
|