emhass 0.8.5__py3-none-any.whl → 0.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- emhass/command_line.py +705 -272
- emhass/forecast.py +114 -45
- emhass/machine_learning_forecaster.py +4 -4
- emhass/machine_learning_regressor.py +290 -0
- emhass/optimization.py +4 -3
- emhass/retrieve_hass.py +235 -103
- emhass/static/advanced.html +3 -0
- emhass/static/script.js +2 -0
- emhass/utils.py +605 -305
- emhass/web_server.py +48 -26
- {emhass-0.8.5.dist-info → emhass-0.9.0.dist-info}/METADATA +19 -5
- emhass-0.9.0.dist-info/RECORD +26 -0
- emhass-0.8.5.dist-info/RECORD +0 -25
- {emhass-0.8.5.dist-info → emhass-0.9.0.dist-info}/LICENSE +0 -0
- {emhass-0.8.5.dist-info → emhass-0.9.0.dist-info}/WHEEL +0 -0
- {emhass-0.8.5.dist-info → emhass-0.9.0.dist-info}/entry_points.txt +0 -0
- {emhass-0.8.5.dist-info → emhass-0.9.0.dist-info}/top_level.txt +0 -0
emhass/command_line.py
CHANGED
@@ -8,36 +8,35 @@ import logging
|
|
8
8
|
import json
|
9
9
|
import copy
|
10
10
|
import pickle
|
11
|
-
import time
|
12
|
-
import numpy as np
|
13
|
-
import pandas as pd
|
14
11
|
from datetime import datetime, timezone
|
15
12
|
from typing import Optional, Tuple
|
13
|
+
from importlib.metadata import version
|
14
|
+
import numpy as np
|
15
|
+
import pandas as pd
|
16
|
+
|
16
17
|
from distutils.util import strtobool
|
17
18
|
|
18
|
-
from importlib.metadata import version
|
19
19
|
from emhass.retrieve_hass import RetrieveHass
|
20
20
|
from emhass.forecast import Forecast
|
21
21
|
from emhass.machine_learning_forecaster import MLForecaster
|
22
22
|
from emhass.optimization import Optimization
|
23
|
+
from emhass.machine_learning_regressor import MLRegressor
|
23
24
|
from emhass import utils
|
24
25
|
|
25
26
|
|
26
|
-
def set_input_data_dict(
|
27
|
+
def set_input_data_dict(emhass_conf: dict, costfun: str,
|
27
28
|
params: str, runtimeparams: str, set_type: str, logger: logging.Logger,
|
28
29
|
get_data_from_file: Optional[bool] = False) -> dict:
|
29
30
|
"""
|
30
31
|
Set up some of the data needed for the different actions.
|
31
32
|
|
32
|
-
:param
|
33
|
-
:type
|
34
|
-
:param base_path: The parent folder of the config_path
|
35
|
-
:type base_path: str
|
33
|
+
:param emhass_conf: Dictionary containing the needed emhass paths
|
34
|
+
:type emhass_conf: dict
|
36
35
|
:param costfun: The type of cost function to use for optimization problem
|
37
36
|
:type costfun: str
|
38
37
|
:param params: Configuration parameters passed from data/options.json
|
39
38
|
:type params: str
|
40
|
-
:param runtimeparams: Runtime optimization parameters passed as a
|
39
|
+
:param runtimeparams: Runtime optimization parameters passed as a dictionary
|
41
40
|
:type runtimeparams: str
|
42
41
|
:param set_type: Set the type of setup based on following type of optimization
|
43
42
|
:type set_type: str
|
@@ -52,120 +51,221 @@ def set_input_data_dict(config_path: pathlib.Path, base_path: str, costfun: str,
|
|
52
51
|
logger.info("Setting up needed data")
|
53
52
|
# Parsing yaml
|
54
53
|
retrieve_hass_conf, optim_conf, plant_conf = utils.get_yaml_parse(
|
55
|
-
|
54
|
+
emhass_conf, use_secrets=not(get_data_from_file), params=params)
|
56
55
|
# Treat runtimeparams
|
57
56
|
params, retrieve_hass_conf, optim_conf, plant_conf = utils.treat_runtimeparams(
|
58
|
-
runtimeparams,
|
59
|
-
|
57
|
+
runtimeparams,
|
58
|
+
params,
|
59
|
+
retrieve_hass_conf,
|
60
|
+
optim_conf,
|
61
|
+
plant_conf,
|
62
|
+
set_type,
|
63
|
+
logger,
|
64
|
+
)
|
60
65
|
# Define main objects
|
61
66
|
rh = RetrieveHass(retrieve_hass_conf['hass_url'], retrieve_hass_conf['long_lived_token'],
|
62
67
|
retrieve_hass_conf['freq'], retrieve_hass_conf['time_zone'],
|
63
|
-
params,
|
68
|
+
params, emhass_conf, logger, get_data_from_file=get_data_from_file)
|
64
69
|
fcst = Forecast(retrieve_hass_conf, optim_conf, plant_conf,
|
65
|
-
params,
|
70
|
+
params, emhass_conf, logger, get_data_from_file=get_data_from_file)
|
66
71
|
opt = Optimization(retrieve_hass_conf, optim_conf, plant_conf,
|
67
72
|
fcst.var_load_cost, fcst.var_prod_price,
|
68
|
-
costfun,
|
73
|
+
costfun, emhass_conf, logger)
|
69
74
|
# Perform setup based on type of action
|
70
75
|
if set_type == "perfect-optim":
|
71
76
|
# Retrieve data from hass
|
72
77
|
if get_data_from_file:
|
73
|
-
with open(
|
78
|
+
with open(emhass_conf['data_path'] / 'test_df_final.pkl', 'rb') as inp:
|
74
79
|
rh.df_final, days_list, var_list = pickle.load(inp)
|
80
|
+
retrieve_hass_conf['var_load'] = str(var_list[0])
|
81
|
+
retrieve_hass_conf['var_PV'] = str(var_list[1])
|
82
|
+
retrieve_hass_conf['var_interp'] = [retrieve_hass_conf['var_PV'], retrieve_hass_conf['var_load']]
|
83
|
+
retrieve_hass_conf['var_replace_zero'] = [retrieve_hass_conf['var_PV']]
|
75
84
|
else:
|
76
|
-
days_list = utils.get_days_list(retrieve_hass_conf[
|
77
|
-
var_list = [retrieve_hass_conf[
|
78
|
-
if not rh.get_data(
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
+
days_list = utils.get_days_list(retrieve_hass_conf["days_to_retrieve"])
|
86
|
+
var_list = [retrieve_hass_conf["var_load"], retrieve_hass_conf["var_PV"]]
|
87
|
+
if not rh.get_data(
|
88
|
+
days_list,
|
89
|
+
var_list,
|
90
|
+
minimal_response=False,
|
91
|
+
significant_changes_only=False,
|
92
|
+
):
|
93
|
+
return False
|
94
|
+
if not rh.prepare_data(
|
95
|
+
retrieve_hass_conf["var_load"],
|
96
|
+
load_negative=retrieve_hass_conf["load_negative"],
|
97
|
+
set_zero_min=retrieve_hass_conf["set_zero_min"],
|
98
|
+
var_replace_zero=retrieve_hass_conf["var_replace_zero"],
|
99
|
+
var_interp=retrieve_hass_conf["var_interp"],
|
100
|
+
):
|
85
101
|
return False
|
86
102
|
df_input_data = rh.df_final.copy()
|
87
103
|
# What we don't need for this type of action
|
88
104
|
P_PV_forecast, P_load_forecast, df_input_data_dayahead = None, None, None
|
89
105
|
elif set_type == "dayahead-optim":
|
90
106
|
# Get PV and load forecasts
|
91
|
-
df_weather = fcst.get_weather_forecast(
|
107
|
+
df_weather = fcst.get_weather_forecast(
|
108
|
+
method=optim_conf["weather_forecast_method"]
|
109
|
+
)
|
92
110
|
P_PV_forecast = fcst.get_power_from_weather(df_weather)
|
93
111
|
P_load_forecast = fcst.get_load_forecast(method=optim_conf['load_forecast_method'])
|
94
112
|
if isinstance(P_load_forecast,bool) and not P_load_forecast:
|
95
113
|
logger.error("Unable to get sensor power photovoltaics, or sensor power load no var loads. Check HA sensors and their daily data")
|
96
114
|
return False
|
97
|
-
df_input_data_dayahead = pd.DataFrame(
|
98
|
-
|
99
|
-
|
115
|
+
df_input_data_dayahead = pd.DataFrame(
|
116
|
+
np.transpose(np.vstack([P_PV_forecast.values, P_load_forecast.values])),
|
117
|
+
index=P_PV_forecast.index,
|
118
|
+
columns=["P_PV_forecast", "P_load_forecast"],
|
119
|
+
)
|
100
120
|
df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead)
|
101
121
|
params = json.loads(params)
|
102
|
-
if
|
103
|
-
prediction_horizon
|
104
|
-
|
122
|
+
if (
|
123
|
+
"prediction_horizon" in params["passed_data"]
|
124
|
+
and params["passed_data"]["prediction_horizon"] is not None
|
125
|
+
):
|
126
|
+
prediction_horizon = params["passed_data"]["prediction_horizon"]
|
127
|
+
df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[
|
128
|
+
df_input_data_dayahead.index[0] : df_input_data_dayahead.index[
|
129
|
+
prediction_horizon - 1
|
130
|
+
]
|
131
|
+
]
|
105
132
|
# What we don't need for this type of action
|
106
133
|
df_input_data, days_list = None, None
|
107
134
|
elif set_type == "naive-mpc-optim":
|
108
135
|
# Retrieve data from hass
|
109
136
|
if get_data_from_file:
|
110
|
-
with open(
|
137
|
+
with open(emhass_conf['data_path'] / 'test_df_final.pkl', 'rb') as inp:
|
111
138
|
rh.df_final, days_list, var_list = pickle.load(inp)
|
139
|
+
retrieve_hass_conf['var_load'] = str(var_list[0])
|
140
|
+
retrieve_hass_conf['var_PV'] = str(var_list[1])
|
141
|
+
retrieve_hass_conf['var_interp'] = [retrieve_hass_conf['var_PV'], retrieve_hass_conf['var_load']]
|
142
|
+
retrieve_hass_conf['var_replace_zero'] = [retrieve_hass_conf['var_PV']]
|
112
143
|
else:
|
113
144
|
days_list = utils.get_days_list(1)
|
114
|
-
var_list = [retrieve_hass_conf[
|
115
|
-
if not rh.get_data(
|
116
|
-
|
145
|
+
var_list = [retrieve_hass_conf["var_load"], retrieve_hass_conf["var_PV"]]
|
146
|
+
if not rh.get_data(
|
147
|
+
days_list,
|
148
|
+
var_list,
|
149
|
+
minimal_response=False,
|
150
|
+
significant_changes_only=False,
|
151
|
+
):
|
117
152
|
return False
|
118
|
-
if not rh.prepare_data(
|
119
|
-
|
120
|
-
|
121
|
-
|
153
|
+
if not rh.prepare_data(
|
154
|
+
retrieve_hass_conf["var_load"],
|
155
|
+
load_negative=retrieve_hass_conf["load_negative"],
|
156
|
+
set_zero_min=retrieve_hass_conf["set_zero_min"],
|
157
|
+
var_replace_zero=retrieve_hass_conf["var_replace_zero"],
|
158
|
+
var_interp=retrieve_hass_conf["var_interp"],
|
159
|
+
):
|
122
160
|
return False
|
123
161
|
df_input_data = rh.df_final.copy()
|
124
162
|
# Get PV and load forecasts
|
125
163
|
df_weather = fcst.get_weather_forecast(method=optim_conf['weather_forecast_method'])
|
126
164
|
P_PV_forecast = fcst.get_power_from_weather(df_weather, set_mix_forecast=True, df_now=df_input_data)
|
127
165
|
P_load_forecast = fcst.get_load_forecast(method=optim_conf['load_forecast_method'], set_mix_forecast=True, df_now=df_input_data)
|
166
|
+
if isinstance(P_load_forecast,bool) and not P_load_forecast:
|
167
|
+
logger.error("Unable to get sensor power photovoltaics, or sensor power load no var loads. Check HA sensors and their daily data")
|
168
|
+
return False
|
128
169
|
df_input_data_dayahead = pd.concat([P_PV_forecast, P_load_forecast], axis=1)
|
129
170
|
df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead)
|
130
|
-
df_input_data_dayahead.columns = [
|
171
|
+
df_input_data_dayahead.columns = ["P_PV_forecast", "P_load_forecast"]
|
131
172
|
params = json.loads(params)
|
132
|
-
if
|
133
|
-
prediction_horizon
|
134
|
-
|
135
|
-
|
173
|
+
if (
|
174
|
+
"prediction_horizon" in params["passed_data"]
|
175
|
+
and params["passed_data"]["prediction_horizon"] is not None
|
176
|
+
):
|
177
|
+
prediction_horizon = params["passed_data"]["prediction_horizon"]
|
178
|
+
df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[
|
179
|
+
df_input_data_dayahead.index[0] : df_input_data_dayahead.index[
|
180
|
+
prediction_horizon - 1
|
181
|
+
]
|
182
|
+
]
|
183
|
+
elif (
|
184
|
+
set_type == "forecast-model-fit"
|
185
|
+
or set_type == "forecast-model-predict"
|
186
|
+
or set_type == "forecast-model-tune"
|
187
|
+
):
|
136
188
|
df_input_data_dayahead = None
|
137
189
|
P_PV_forecast, P_load_forecast = None, None
|
138
190
|
params = json.loads(params)
|
139
191
|
# Retrieve data from hass
|
140
|
-
days_to_retrieve = params[
|
141
|
-
model_type = params[
|
142
|
-
var_model = params[
|
192
|
+
days_to_retrieve = params["passed_data"]["days_to_retrieve"]
|
193
|
+
model_type = params["passed_data"]["model_type"]
|
194
|
+
var_model = params["passed_data"]["var_model"]
|
143
195
|
if get_data_from_file:
|
144
196
|
days_list = None
|
145
197
|
filename = 'data_train_'+model_type+'.pkl'
|
146
|
-
|
147
|
-
with open(
|
198
|
+
filename_path = emhass_conf['data_path'] / filename
|
199
|
+
with open(filename_path, 'rb') as inp:
|
148
200
|
df_input_data, _ = pickle.load(inp)
|
149
|
-
df_input_data = df_input_data[
|
201
|
+
df_input_data = df_input_data[
|
202
|
+
df_input_data.index[-1] - pd.offsets.Day(days_to_retrieve) :
|
203
|
+
]
|
150
204
|
else:
|
151
205
|
days_list = utils.get_days_list(days_to_retrieve)
|
152
206
|
var_list = [var_model]
|
153
207
|
if not rh.get_data(days_list, var_list):
|
154
208
|
return False
|
155
209
|
df_input_data = rh.df_final.copy()
|
210
|
+
|
211
|
+
elif set_type == "regressor-model-fit" or set_type == "regressor-model-predict":
|
212
|
+
|
213
|
+
df_input_data, df_input_data_dayahead = None, None
|
214
|
+
P_PV_forecast, P_load_forecast = None, None
|
215
|
+
params = json.loads(params)
|
216
|
+
days_list = None
|
217
|
+
csv_file = params["passed_data"].get("csv_file", None)
|
218
|
+
if "features" in params["passed_data"]:
|
219
|
+
features = params["passed_data"]["features"]
|
220
|
+
if "target" in params["passed_data"]:
|
221
|
+
target = params["passed_data"]["target"]
|
222
|
+
if "timestamp" in params["passed_data"]:
|
223
|
+
timestamp = params["passed_data"]["timestamp"]
|
224
|
+
if csv_file:
|
225
|
+
if get_data_from_file:
|
226
|
+
base_path = emhass_conf["data_path"] # + "/data"
|
227
|
+
filename_path = pathlib.Path(base_path) / csv_file
|
228
|
+
|
229
|
+
else:
|
230
|
+
filename_path = emhass_conf["data_path"] / csv_file
|
231
|
+
|
232
|
+
if filename_path.is_file():
|
233
|
+
df_input_data = pd.read_csv(filename_path, parse_dates=True)
|
234
|
+
|
235
|
+
else:
|
236
|
+
logger.error("The CSV file " + csv_file + " was not found in path: " + str(emhass_conf["data_path"]))
|
237
|
+
return False
|
238
|
+
#raise ValueError("The CSV file " + csv_file + " was not found.")
|
239
|
+
required_columns = []
|
240
|
+
required_columns.extend(features)
|
241
|
+
required_columns.append(target)
|
242
|
+
if timestamp is not None:
|
243
|
+
required_columns.append(timestamp)
|
244
|
+
|
245
|
+
if not set(required_columns).issubset(df_input_data.columns):
|
246
|
+
logger.error("The cvs file does not contain the required columns.")
|
247
|
+
msg = f"CSV file should contain the following columns: {', '.join(required_columns)}"
|
248
|
+
logger.error(msg)
|
249
|
+
return False
|
250
|
+
#raise ValueError(
|
251
|
+
# msg,
|
252
|
+
#)
|
253
|
+
|
156
254
|
elif set_type == "publish-data":
|
157
255
|
df_input_data, df_input_data_dayahead = None, None
|
158
256
|
P_PV_forecast, P_load_forecast = None, None
|
159
257
|
days_list = None
|
160
258
|
else:
|
161
|
-
logger.error(
|
259
|
+
logger.error(
|
260
|
+
"The passed action argument and hence the set_type parameter for setup is not valid",
|
261
|
+
)
|
162
262
|
df_input_data, df_input_data_dayahead = None, None
|
163
263
|
P_PV_forecast, P_load_forecast = None, None
|
164
264
|
days_list = None
|
165
265
|
|
166
|
-
# The input data
|
266
|
+
# The input data dictionary to return
|
167
267
|
input_data_dict = {
|
168
|
-
'
|
268
|
+
'emhass_conf': emhass_conf,
|
169
269
|
'retrieve_hass_conf': retrieve_hass_conf,
|
170
270
|
'rh': rh,
|
171
271
|
'opt': opt,
|
@@ -179,12 +279,17 @@ def set_input_data_dict(config_path: pathlib.Path, base_path: str, costfun: str,
|
|
179
279
|
'days_list': days_list
|
180
280
|
}
|
181
281
|
return input_data_dict
|
182
|
-
|
183
|
-
|
184
|
-
|
282
|
+
|
283
|
+
|
284
|
+
def perfect_forecast_optim(
|
285
|
+
input_data_dict: dict,
|
286
|
+
logger: logging.Logger,
|
287
|
+
save_data_to_file: Optional[bool] = True,
|
288
|
+
debug: Optional[bool] = False,
|
289
|
+
) -> pd.DataFrame:
|
185
290
|
"""
|
186
291
|
Perform a call to the perfect forecast optimization routine.
|
187
|
-
|
292
|
+
|
188
293
|
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
189
294
|
:type input_data_dict: dict
|
190
295
|
:param logger: The passed logger object
|
@@ -201,24 +306,35 @@ def perfect_forecast_optim(input_data_dict: dict, logger: logging.Logger,
|
|
201
306
|
# Load cost and prod price forecast
|
202
307
|
df_input_data = input_data_dict['fcst'].get_load_cost_forecast(
|
203
308
|
input_data_dict['df_input_data'],
|
204
|
-
method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method']
|
309
|
+
method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'],
|
310
|
+
list_and_perfect=True)
|
311
|
+
if isinstance(df_input_data,bool) and not df_input_data:
|
312
|
+
return False
|
205
313
|
df_input_data = input_data_dict['fcst'].get_prod_price_forecast(
|
206
|
-
df_input_data, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method']
|
314
|
+
df_input_data, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'],
|
315
|
+
list_and_perfect=True)
|
316
|
+
if isinstance(df_input_data,bool) and not df_input_data:
|
317
|
+
return False
|
207
318
|
opt_res = input_data_dict['opt'].perform_perfect_forecast_optim(df_input_data, input_data_dict['days_list'])
|
208
319
|
# Save CSV file for analysis
|
209
320
|
if save_data_to_file:
|
210
|
-
filename =
|
211
|
-
else:
|
212
|
-
filename =
|
321
|
+
filename = "opt_res_perfect_optim_" + input_data_dict["costfun"] + ".csv"
|
322
|
+
else: # Just save the latest optimization results
|
323
|
+
filename = "opt_res_latest.csv"
|
213
324
|
if not debug:
|
214
|
-
opt_res.to_csv(
|
325
|
+
opt_res.to_csv(input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
|
215
326
|
return opt_res
|
216
|
-
|
217
|
-
|
218
|
-
|
327
|
+
|
328
|
+
|
329
|
+
def dayahead_forecast_optim(
|
330
|
+
input_data_dict: dict,
|
331
|
+
logger: logging.Logger,
|
332
|
+
save_data_to_file: Optional[bool] = False,
|
333
|
+
debug: Optional[bool] = False,
|
334
|
+
) -> pd.DataFrame:
|
219
335
|
"""
|
220
336
|
Perform a call to the day-ahead optimization routine.
|
221
|
-
|
337
|
+
|
222
338
|
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
223
339
|
:type input_data_dict: dict
|
224
340
|
:param logger: The passed logger object
|
@@ -236,26 +352,37 @@ def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger,
|
|
236
352
|
df_input_data_dayahead = input_data_dict['fcst'].get_load_cost_forecast(
|
237
353
|
input_data_dict['df_input_data_dayahead'],
|
238
354
|
method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'])
|
355
|
+
if isinstance(df_input_data_dayahead,bool) and not df_input_data_dayahead:
|
356
|
+
return False
|
239
357
|
df_input_data_dayahead = input_data_dict['fcst'].get_prod_price_forecast(
|
240
358
|
df_input_data_dayahead,
|
241
359
|
method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'])
|
360
|
+
if isinstance(df_input_data_dayahead,bool) and not df_input_data_dayahead:
|
361
|
+
return False
|
242
362
|
opt_res_dayahead = input_data_dict['opt'].perform_dayahead_forecast_optim(
|
243
363
|
df_input_data_dayahead, input_data_dict['P_PV_forecast'], input_data_dict['P_load_forecast'])
|
244
364
|
# Save CSV file for publish_data
|
245
365
|
if save_data_to_file:
|
246
|
-
today = datetime.now(timezone.utc).replace(
|
247
|
-
|
248
|
-
|
249
|
-
filename =
|
366
|
+
today = datetime.now(timezone.utc).replace(
|
367
|
+
hour=0, minute=0, second=0, microsecond=0
|
368
|
+
)
|
369
|
+
filename = "opt_res_dayahead_" + today.strftime("%Y_%m_%d") + ".csv"
|
370
|
+
else: # Just save the latest optimization results
|
371
|
+
filename = "opt_res_latest.csv"
|
250
372
|
if not debug:
|
251
|
-
opt_res_dayahead.to_csv(
|
373
|
+
opt_res_dayahead.to_csv(input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
|
252
374
|
return opt_res_dayahead
|
253
375
|
|
254
|
-
|
255
|
-
|
376
|
+
|
377
|
+
def naive_mpc_optim(
|
378
|
+
input_data_dict: dict,
|
379
|
+
logger: logging.Logger,
|
380
|
+
save_data_to_file: Optional[bool] = False,
|
381
|
+
debug: Optional[bool] = False,
|
382
|
+
) -> pd.DataFrame:
|
256
383
|
"""
|
257
384
|
Perform a call to the naive Model Predictive Controller optimization routine.
|
258
|
-
|
385
|
+
|
259
386
|
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
260
387
|
:type input_data_dict: dict
|
261
388
|
:param logger: The passed logger object
|
@@ -273,30 +400,46 @@ def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger,
|
|
273
400
|
df_input_data_dayahead = input_data_dict['fcst'].get_load_cost_forecast(
|
274
401
|
input_data_dict['df_input_data_dayahead'],
|
275
402
|
method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'])
|
403
|
+
if isinstance(df_input_data_dayahead,bool) and not df_input_data_dayahead:
|
404
|
+
return False
|
276
405
|
df_input_data_dayahead = input_data_dict['fcst'].get_prod_price_forecast(
|
277
406
|
df_input_data_dayahead, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'])
|
407
|
+
if isinstance(df_input_data_dayahead,bool) and not df_input_data_dayahead:
|
408
|
+
return False
|
278
409
|
# The specifics params for the MPC at runtime
|
279
|
-
prediction_horizon = input_data_dict[
|
280
|
-
soc_init = input_data_dict[
|
281
|
-
soc_final = input_data_dict[
|
282
|
-
def_total_hours = input_data_dict[
|
283
|
-
def_start_timestep = input_data_dict[
|
284
|
-
def_end_timestep = input_data_dict[
|
285
|
-
opt_res_naive_mpc = input_data_dict[
|
286
|
-
df_input_data_dayahead,
|
287
|
-
|
410
|
+
prediction_horizon = input_data_dict["params"]["passed_data"]["prediction_horizon"]
|
411
|
+
soc_init = input_data_dict["params"]["passed_data"]["soc_init"]
|
412
|
+
soc_final = input_data_dict["params"]["passed_data"]["soc_final"]
|
413
|
+
def_total_hours = input_data_dict["params"]["passed_data"]["def_total_hours"]
|
414
|
+
def_start_timestep = input_data_dict["params"]["passed_data"]["def_start_timestep"]
|
415
|
+
def_end_timestep = input_data_dict["params"]["passed_data"]["def_end_timestep"]
|
416
|
+
opt_res_naive_mpc = input_data_dict["opt"].perform_naive_mpc_optim(
|
417
|
+
df_input_data_dayahead,
|
418
|
+
input_data_dict["P_PV_forecast"],
|
419
|
+
input_data_dict["P_load_forecast"],
|
420
|
+
prediction_horizon,
|
421
|
+
soc_init,
|
422
|
+
soc_final,
|
423
|
+
def_total_hours,
|
424
|
+
def_start_timestep,
|
425
|
+
def_end_timestep,
|
426
|
+
)
|
288
427
|
# Save CSV file for publish_data
|
289
428
|
if save_data_to_file:
|
290
|
-
today = datetime.now(timezone.utc).replace(
|
291
|
-
|
292
|
-
|
293
|
-
filename =
|
429
|
+
today = datetime.now(timezone.utc).replace(
|
430
|
+
hour=0, minute=0, second=0, microsecond=0
|
431
|
+
)
|
432
|
+
filename = "opt_res_naive_mpc_" + today.strftime("%Y_%m_%d") + ".csv"
|
433
|
+
else: # Just save the latest optimization results
|
434
|
+
filename = "opt_res_latest.csv"
|
294
435
|
if not debug:
|
295
|
-
opt_res_naive_mpc.to_csv(
|
436
|
+
opt_res_naive_mpc.to_csv(input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
|
296
437
|
return opt_res_naive_mpc
|
297
438
|
|
298
|
-
|
299
|
-
|
439
|
+
|
440
|
+
def forecast_model_fit(
|
441
|
+
input_data_dict: dict, logger: logging.Logger, debug: Optional[bool] = False
|
442
|
+
) -> Tuple[pd.DataFrame, pd.DataFrame, MLForecaster]:
|
300
443
|
"""Perform a forecast model fit from training data retrieved from Home Assistant.
|
301
444
|
|
302
445
|
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
@@ -315,22 +458,28 @@ def forecast_model_fit(input_data_dict: dict, logger: logging.Logger,
|
|
315
458
|
num_lags = input_data_dict['params']['passed_data']['num_lags']
|
316
459
|
split_date_delta = input_data_dict['params']['passed_data']['split_date_delta']
|
317
460
|
perform_backtest = input_data_dict['params']['passed_data']['perform_backtest']
|
318
|
-
root = input_data_dict['root']
|
319
461
|
# The ML forecaster object
|
320
|
-
mlf = MLForecaster(data, model_type, var_model, sklearn_model, num_lags,
|
462
|
+
mlf = MLForecaster(data, model_type, var_model, sklearn_model, num_lags, input_data_dict['emhass_conf'], logger)
|
321
463
|
# Fit the ML model
|
322
|
-
df_pred, df_pred_backtest = mlf.fit(
|
323
|
-
|
464
|
+
df_pred, df_pred_backtest = mlf.fit(
|
465
|
+
split_date_delta=split_date_delta, perform_backtest=perform_backtest
|
466
|
+
)
|
324
467
|
# Save model
|
325
468
|
if not debug:
|
326
469
|
filename = model_type+'_mlf.pkl'
|
327
|
-
|
470
|
+
filename_path = input_data_dict['emhass_conf']['data_path'] / filename
|
471
|
+
with open(filename_path, 'wb') as outp:
|
328
472
|
pickle.dump(mlf, outp, pickle.HIGHEST_PROTOCOL)
|
329
473
|
return df_pred, df_pred_backtest, mlf
|
330
474
|
|
331
|
-
|
332
|
-
|
333
|
-
|
475
|
+
|
476
|
+
def forecast_model_predict(
|
477
|
+
input_data_dict: dict,
|
478
|
+
logger: logging.Logger,
|
479
|
+
use_last_window: Optional[bool] = True,
|
480
|
+
debug: Optional[bool] = False,
|
481
|
+
mlf: Optional[MLForecaster] = None,
|
482
|
+
) -> pd.DataFrame:
|
334
483
|
r"""Perform a forecast model predict using a previously trained skforecast model.
|
335
484
|
|
336
485
|
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
@@ -353,51 +502,77 @@ def forecast_model_predict(input_data_dict: dict, logger: logging.Logger,
|
|
353
502
|
"""
|
354
503
|
# Load model
|
355
504
|
model_type = input_data_dict['params']['passed_data']['model_type']
|
356
|
-
root = input_data_dict['root']
|
357
505
|
filename = model_type+'_mlf.pkl'
|
358
|
-
filename_path =
|
506
|
+
filename_path = input_data_dict['emhass_conf']['data_path'] / filename
|
359
507
|
if not debug:
|
360
508
|
if filename_path.is_file():
|
361
|
-
with open(filename_path,
|
509
|
+
with open(filename_path, "rb") as inp:
|
362
510
|
mlf = pickle.load(inp)
|
363
511
|
else:
|
364
|
-
logger.error(
|
512
|
+
logger.error(
|
513
|
+
"The ML forecaster file was not found, please run a model fit method before this predict method",
|
514
|
+
)
|
365
515
|
return
|
366
516
|
# Make predictions
|
367
517
|
if use_last_window:
|
368
|
-
data_last_window = copy.deepcopy(input_data_dict[
|
518
|
+
data_last_window = copy.deepcopy(input_data_dict["df_input_data"])
|
369
519
|
else:
|
370
520
|
data_last_window = None
|
371
521
|
predictions = mlf.predict(data_last_window)
|
372
522
|
# Publish data to a Home Assistant sensor
|
373
|
-
model_predict_publish = input_data_dict[
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
|
523
|
+
model_predict_publish = input_data_dict["params"]["passed_data"][
|
524
|
+
"model_predict_publish"
|
525
|
+
]
|
526
|
+
model_predict_entity_id = input_data_dict["params"]["passed_data"][
|
527
|
+
"model_predict_entity_id"
|
528
|
+
]
|
529
|
+
model_predict_unit_of_measurement = input_data_dict["params"]["passed_data"][
|
530
|
+
"model_predict_unit_of_measurement"
|
531
|
+
]
|
532
|
+
model_predict_friendly_name = input_data_dict["params"]["passed_data"][
|
533
|
+
"model_predict_friendly_name"
|
534
|
+
]
|
535
|
+
publish_prefix = input_data_dict["params"]["passed_data"]["publish_prefix"]
|
378
536
|
if model_predict_publish is True:
|
379
537
|
# Estimate the current index
|
380
|
-
now_precise = datetime.now(
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
idx_closest = predictions.index.get_indexer(
|
385
|
-
|
386
|
-
|
538
|
+
now_precise = datetime.now(
|
539
|
+
input_data_dict["retrieve_hass_conf"]["time_zone"]
|
540
|
+
).replace(second=0, microsecond=0)
|
541
|
+
if input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "nearest":
|
542
|
+
idx_closest = predictions.index.get_indexer(
|
543
|
+
[now_precise], method="nearest"
|
544
|
+
)[0]
|
545
|
+
elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "first":
|
546
|
+
idx_closest = predictions.index.get_indexer([now_precise], method="ffill")[
|
547
|
+
0
|
548
|
+
]
|
549
|
+
elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "last":
|
550
|
+
idx_closest = predictions.index.get_indexer([now_precise], method="bfill")[
|
551
|
+
0
|
552
|
+
]
|
387
553
|
if idx_closest == -1:
|
388
|
-
idx_closest = predictions.index.get_indexer(
|
554
|
+
idx_closest = predictions.index.get_indexer(
|
555
|
+
[now_precise], method="nearest"
|
556
|
+
)[0]
|
389
557
|
# Publish Load forecast
|
390
|
-
input_data_dict[
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
558
|
+
input_data_dict["rh"].post_data(
|
559
|
+
predictions,
|
560
|
+
idx_closest,
|
561
|
+
model_predict_entity_id,
|
562
|
+
model_predict_unit_of_measurement,
|
563
|
+
model_predict_friendly_name,
|
564
|
+
type_var="mlforecaster",
|
565
|
+
publish_prefix=publish_prefix,
|
566
|
+
)
|
396
567
|
return predictions
|
397
568
|
|
398
|
-
|
399
|
-
|
400
|
-
|
569
|
+
|
570
|
+
def forecast_model_tune(
|
571
|
+
input_data_dict: dict,
|
572
|
+
logger: logging.Logger,
|
573
|
+
debug: Optional[bool] = False,
|
574
|
+
mlf: Optional[MLForecaster] = None,
|
575
|
+
) -> Tuple[pd.DataFrame, MLForecaster]:
|
401
576
|
"""Tune a forecast model hyperparameters using bayesian optimization.
|
402
577
|
|
403
578
|
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
@@ -414,31 +589,160 @@ def forecast_model_tune(input_data_dict: dict, logger: logging.Logger,
|
|
414
589
|
"""
|
415
590
|
# Load model
|
416
591
|
model_type = input_data_dict['params']['passed_data']['model_type']
|
417
|
-
root = input_data_dict['root']
|
418
592
|
filename = model_type+'_mlf.pkl'
|
419
|
-
filename_path =
|
593
|
+
filename_path = input_data_dict['emhass_conf']['data_path'] / filename
|
420
594
|
if not debug:
|
421
595
|
if filename_path.is_file():
|
422
|
-
with open(filename_path,
|
596
|
+
with open(filename_path, "rb") as inp:
|
423
597
|
mlf = pickle.load(inp)
|
424
598
|
else:
|
425
|
-
logger.error(
|
599
|
+
logger.error(
|
600
|
+
"The ML forecaster file was not found, please run a model fit method before this tune method",
|
601
|
+
)
|
426
602
|
return None, None
|
427
603
|
# Tune the model
|
428
604
|
df_pred_optim = mlf.tune(debug=debug)
|
429
605
|
# Save model
|
430
606
|
if not debug:
|
431
607
|
filename = model_type+'_mlf.pkl'
|
432
|
-
|
433
|
-
|
608
|
+
filename_path = input_data_dict['emhass_conf']['data_path'] / filename
|
609
|
+
with open(filename_path, 'wb') as outp:
|
610
|
+
pickle.dump(mlf, outp, pickle.HIGHEST_PROTOCOL)
|
434
611
|
return df_pred_optim, mlf
|
435
612
|
|
436
|
-
|
437
|
-
|
438
|
-
|
613
|
+
|
614
|
+
def regressor_model_fit(
|
615
|
+
input_data_dict: dict,
|
616
|
+
logger: logging.Logger,
|
617
|
+
debug: Optional[bool] = False,
|
618
|
+
) -> None:
|
619
|
+
"""Perform a forecast model fit from training data retrieved from Home Assistant.
|
620
|
+
|
621
|
+
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
622
|
+
:type input_data_dict: dict
|
623
|
+
:param logger: The passed logger object
|
624
|
+
:type logger: logging.Logger
|
625
|
+
:param debug: True to debug, useful for unit testing, defaults to False
|
626
|
+
:type debug: Optional[bool], optional
|
439
627
|
"""
|
440
|
-
|
628
|
+
data = copy.deepcopy(input_data_dict["df_input_data"])
|
629
|
+
if "model_type" in input_data_dict["params"]["passed_data"]:
|
630
|
+
model_type = input_data_dict["params"]["passed_data"]["model_type"]
|
631
|
+
else:
|
632
|
+
logger.error("parameter: 'model_type' not passed")
|
633
|
+
return False
|
634
|
+
if "regression_model" in input_data_dict["params"]["passed_data"]:
|
635
|
+
regression_model = input_data_dict["params"]["passed_data"]["regression_model"]
|
636
|
+
else:
|
637
|
+
logger.error("parameter: 'regression_model' not passed")
|
638
|
+
return False
|
639
|
+
if "features" in input_data_dict["params"]["passed_data"]:
|
640
|
+
features = input_data_dict["params"]["passed_data"]["features"]
|
641
|
+
else:
|
642
|
+
logger.error("parameter: 'features' not passed")
|
643
|
+
return False
|
644
|
+
if "target" in input_data_dict["params"]["passed_data"]:
|
645
|
+
target = input_data_dict["params"]["passed_data"]["target"]
|
646
|
+
else:
|
647
|
+
logger.error("parameter: 'target' not passed")
|
648
|
+
return False
|
649
|
+
if "timestamp" in input_data_dict["params"]["passed_data"]:
|
650
|
+
timestamp = input_data_dict["params"]["passed_data"]["timestamp"]
|
651
|
+
else:
|
652
|
+
logger.error("parameter: 'timestamp' not passed")
|
653
|
+
return False
|
654
|
+
if "date_features" in input_data_dict["params"]["passed_data"]:
|
655
|
+
date_features = input_data_dict["params"]["passed_data"]["date_features"]
|
656
|
+
else:
|
657
|
+
logger.error("parameter: 'date_features' not passed")
|
658
|
+
return False
|
659
|
+
|
660
|
+
# The MLRegressor object
|
661
|
+
mlr = MLRegressor(
|
662
|
+
data,
|
663
|
+
model_type,
|
664
|
+
regression_model,
|
665
|
+
features,
|
666
|
+
target,
|
667
|
+
timestamp,
|
668
|
+
logger,
|
669
|
+
)
|
670
|
+
# Fit the ML model
|
671
|
+
mlr.fit(date_features=date_features)
|
672
|
+
# Save model
|
673
|
+
if not debug:
|
674
|
+
filename = model_type + "_mlr.pkl"
|
675
|
+
filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
|
676
|
+
with open(filename_path, "wb") as outp:
|
677
|
+
pickle.dump(mlr, outp, pickle.HIGHEST_PROTOCOL)
|
678
|
+
return mlr
|
679
|
+
|
680
|
+
|
681
|
+
def regressor_model_predict(
|
682
|
+
input_data_dict: dict,
|
683
|
+
logger: logging.Logger,
|
684
|
+
debug: Optional[bool] = False,
|
685
|
+
mlr: Optional[MLRegressor] = None,
|
686
|
+
) -> None:
|
687
|
+
"""Perform a prediction from csv file.
|
688
|
+
|
689
|
+
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
690
|
+
:type input_data_dict: dict
|
691
|
+
:param logger: The passed logger object
|
692
|
+
:type logger: logging.Logger
|
693
|
+
:param debug: True to debug, useful for unit testing, defaults to False
|
694
|
+
:type debug: Optional[bool], optional
|
695
|
+
"""
|
696
|
+
if "model_type" in input_data_dict["params"]["passed_data"]:
|
697
|
+
model_type = input_data_dict["params"]["passed_data"]["model_type"]
|
698
|
+
else:
|
699
|
+
logger.error("parameter: 'model_type' not passed")
|
700
|
+
return False
|
701
|
+
filename = model_type + "_mlr.pkl"
|
702
|
+
filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
|
703
|
+
if not debug:
|
704
|
+
if filename_path.is_file():
|
705
|
+
with open(filename_path, "rb") as inp:
|
706
|
+
mlr = pickle.load(inp)
|
707
|
+
else:
|
708
|
+
logger.error(
|
709
|
+
"The ML forecaster file was not found, please run a model fit method before this predict method",
|
710
|
+
)
|
711
|
+
return False
|
712
|
+
if "new_values" in input_data_dict["params"]["passed_data"]:
|
713
|
+
new_values = input_data_dict["params"]["passed_data"]["new_values"]
|
714
|
+
else:
|
715
|
+
logger.error("parameter: 'new_values' not passed")
|
716
|
+
return False
|
717
|
+
# Predict from csv file
|
718
|
+
prediction = mlr.predict(new_values)
|
441
719
|
|
720
|
+
mlr_predict_entity_id = input_data_dict["params"]["passed_data"].get("mlr_predict_entity_id","sensor.mlr_predict")
|
721
|
+
mlr_predict_unit_of_measurement = input_data_dict["params"]["passed_data"].get("mlr_predict_unit_of_measurement","h")
|
722
|
+
mlr_predict_friendly_name = input_data_dict["params"]["passed_data"].get("mlr_predict_friendly_name","mlr predictor")
|
723
|
+
# Publish prediction
|
724
|
+
idx = 0
|
725
|
+
if not debug:
|
726
|
+
input_data_dict["rh"].post_data(
|
727
|
+
prediction,
|
728
|
+
idx,
|
729
|
+
mlr_predict_entity_id,
|
730
|
+
mlr_predict_unit_of_measurement,
|
731
|
+
mlr_predict_friendly_name,
|
732
|
+
type_var="mlregressor",
|
733
|
+
)
|
734
|
+
return prediction
|
735
|
+
|
736
|
+
|
737
|
+
def publish_data(
|
738
|
+
input_data_dict: dict,
|
739
|
+
logger: logging.Logger,
|
740
|
+
save_data_to_file: Optional[bool] = False,
|
741
|
+
opt_res_latest: Optional[pd.DataFrame] = None,
|
742
|
+
) -> pd.DataFrame:
|
743
|
+
"""
|
744
|
+
Publish the data obtained from the optimization results.
|
745
|
+
|
442
746
|
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
443
747
|
:type input_data_dict: dict
|
444
748
|
:param logger: The passed logger object
|
@@ -452,161 +756,212 @@ def publish_data(input_data_dict: dict, logger: logging.Logger,
|
|
452
756
|
logger.info("Publishing data to HASS instance")
|
453
757
|
# Check if a day ahead optimization has been performed (read CSV file)
|
454
758
|
if save_data_to_file:
|
455
|
-
today = datetime.now(timezone.utc).replace(
|
456
|
-
|
759
|
+
today = datetime.now(timezone.utc).replace(
|
760
|
+
hour=0, minute=0, second=0, microsecond=0
|
761
|
+
)
|
762
|
+
filename = "opt_res_dayahead_" + today.strftime("%Y_%m_%d") + ".csv"
|
457
763
|
else:
|
458
|
-
filename =
|
764
|
+
filename = "opt_res_latest.csv"
|
459
765
|
if opt_res_latest is None:
|
460
|
-
if not os.path.isfile(
|
766
|
+
if not os.path.isfile(input_data_dict['emhass_conf']['data_path'] / filename):
|
461
767
|
logger.error("File not found error, run an optimization task first.")
|
462
768
|
return
|
463
769
|
else:
|
464
|
-
opt_res_latest = pd.read_csv(
|
770
|
+
opt_res_latest = pd.read_csv(input_data_dict['emhass_conf']['data_path'] / filename, index_col='timestamp')
|
465
771
|
opt_res_latest.index = pd.to_datetime(opt_res_latest.index)
|
466
|
-
opt_res_latest.index.freq = input_data_dict[
|
772
|
+
opt_res_latest.index.freq = input_data_dict["retrieve_hass_conf"]["freq"]
|
467
773
|
# Estimate the current index
|
468
|
-
now_precise = datetime.now(
|
469
|
-
|
470
|
-
|
471
|
-
|
472
|
-
idx_closest = opt_res_latest.index.get_indexer([now_precise], method=
|
473
|
-
|
474
|
-
|
774
|
+
now_precise = datetime.now(
|
775
|
+
input_data_dict["retrieve_hass_conf"]["time_zone"]
|
776
|
+
).replace(second=0, microsecond=0)
|
777
|
+
if input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "nearest":
|
778
|
+
idx_closest = opt_res_latest.index.get_indexer([now_precise], method="nearest")[
|
779
|
+
0
|
780
|
+
]
|
781
|
+
elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "first":
|
782
|
+
idx_closest = opt_res_latest.index.get_indexer([now_precise], method="ffill")[0]
|
783
|
+
elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "last":
|
784
|
+
idx_closest = opt_res_latest.index.get_indexer([now_precise], method="bfill")[0]
|
475
785
|
if idx_closest == -1:
|
476
|
-
idx_closest = opt_res_latest.index.get_indexer([now_precise], method=
|
786
|
+
idx_closest = opt_res_latest.index.get_indexer([now_precise], method="nearest")[
|
787
|
+
0
|
788
|
+
]
|
477
789
|
# Publish the data
|
478
|
-
params = json.loads(input_data_dict[
|
479
|
-
publish_prefix = params[
|
790
|
+
params = json.loads(input_data_dict["params"])
|
791
|
+
publish_prefix = params["passed_data"]["publish_prefix"]
|
480
792
|
# Publish PV forecast
|
481
|
-
custom_pv_forecast_id = params[
|
482
|
-
input_data_dict[
|
483
|
-
|
484
|
-
|
485
|
-
|
486
|
-
|
487
|
-
|
793
|
+
custom_pv_forecast_id = params["passed_data"]["custom_pv_forecast_id"]
|
794
|
+
input_data_dict["rh"].post_data(
|
795
|
+
opt_res_latest["P_PV"],
|
796
|
+
idx_closest,
|
797
|
+
custom_pv_forecast_id["entity_id"],
|
798
|
+
custom_pv_forecast_id["unit_of_measurement"],
|
799
|
+
custom_pv_forecast_id["friendly_name"],
|
800
|
+
type_var="power",
|
801
|
+
publish_prefix=publish_prefix,
|
802
|
+
)
|
488
803
|
# Publish Load forecast
|
489
|
-
custom_load_forecast_id = params[
|
490
|
-
input_data_dict[
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
804
|
+
custom_load_forecast_id = params["passed_data"]["custom_load_forecast_id"]
|
805
|
+
input_data_dict["rh"].post_data(
|
806
|
+
opt_res_latest["P_Load"],
|
807
|
+
idx_closest,
|
808
|
+
custom_load_forecast_id["entity_id"],
|
809
|
+
custom_load_forecast_id["unit_of_measurement"],
|
810
|
+
custom_load_forecast_id["friendly_name"],
|
811
|
+
type_var="power",
|
812
|
+
publish_prefix=publish_prefix,
|
813
|
+
)
|
814
|
+
cols_published = ["P_PV", "P_Load"]
|
497
815
|
# Publish deferrable loads
|
498
|
-
custom_deferrable_forecast_id = params[
|
499
|
-
|
816
|
+
custom_deferrable_forecast_id = params["passed_data"][
|
817
|
+
"custom_deferrable_forecast_id"
|
818
|
+
]
|
819
|
+
for k in range(input_data_dict["opt"].optim_conf["num_def_loads"]):
|
500
820
|
if "P_deferrable{}".format(k) not in opt_res_latest.columns:
|
501
|
-
logger.error(
|
821
|
+
logger.error(
|
822
|
+
"P_deferrable{}".format(k)
|
823
|
+
+ " was not found in results DataFrame. Optimization task may need to be relaunched or it did not converge to a solution.",
|
824
|
+
)
|
502
825
|
else:
|
503
|
-
input_data_dict[
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
|
826
|
+
input_data_dict["rh"].post_data(
|
827
|
+
opt_res_latest["P_deferrable{}".format(k)],
|
828
|
+
idx_closest,
|
829
|
+
custom_deferrable_forecast_id[k]["entity_id"],
|
830
|
+
custom_deferrable_forecast_id[k]["unit_of_measurement"],
|
831
|
+
custom_deferrable_forecast_id[k]["friendly_name"],
|
832
|
+
type_var="deferrable",
|
833
|
+
publish_prefix=publish_prefix,
|
834
|
+
)
|
835
|
+
cols_published = cols_published + ["P_deferrable{}".format(k)]
|
510
836
|
# Publish battery power
|
511
|
-
if input_data_dict[
|
512
|
-
if
|
513
|
-
logger.error(
|
837
|
+
if input_data_dict["opt"].optim_conf["set_use_battery"]:
|
838
|
+
if "P_batt" not in opt_res_latest.columns:
|
839
|
+
logger.error(
|
840
|
+
"P_batt was not found in results DataFrame. Optimization task may need to be relaunched or it did not converge to a solution.",
|
841
|
+
)
|
514
842
|
else:
|
515
|
-
custom_batt_forecast_id = params[
|
516
|
-
input_data_dict[
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
524
|
-
|
525
|
-
|
526
|
-
|
527
|
-
|
528
|
-
|
529
|
-
|
530
|
-
|
843
|
+
custom_batt_forecast_id = params["passed_data"]["custom_batt_forecast_id"]
|
844
|
+
input_data_dict["rh"].post_data(
|
845
|
+
opt_res_latest["P_batt"],
|
846
|
+
idx_closest,
|
847
|
+
custom_batt_forecast_id["entity_id"],
|
848
|
+
custom_batt_forecast_id["unit_of_measurement"],
|
849
|
+
custom_batt_forecast_id["friendly_name"],
|
850
|
+
type_var="batt",
|
851
|
+
publish_prefix=publish_prefix,
|
852
|
+
)
|
853
|
+
cols_published = cols_published + ["P_batt"]
|
854
|
+
custom_batt_soc_forecast_id = params["passed_data"][
|
855
|
+
"custom_batt_soc_forecast_id"
|
856
|
+
]
|
857
|
+
input_data_dict["rh"].post_data(
|
858
|
+
opt_res_latest["SOC_opt"] * 100,
|
859
|
+
idx_closest,
|
860
|
+
custom_batt_soc_forecast_id["entity_id"],
|
861
|
+
custom_batt_soc_forecast_id["unit_of_measurement"],
|
862
|
+
custom_batt_soc_forecast_id["friendly_name"],
|
863
|
+
type_var="SOC",
|
864
|
+
publish_prefix=publish_prefix,
|
865
|
+
)
|
866
|
+
cols_published = cols_published + ["SOC_opt"]
|
531
867
|
# Publish grid power
|
532
|
-
custom_grid_forecast_id = params[
|
533
|
-
input_data_dict[
|
534
|
-
|
535
|
-
|
536
|
-
|
537
|
-
|
538
|
-
|
539
|
-
|
868
|
+
custom_grid_forecast_id = params["passed_data"]["custom_grid_forecast_id"]
|
869
|
+
input_data_dict["rh"].post_data(
|
870
|
+
opt_res_latest["P_grid"],
|
871
|
+
idx_closest,
|
872
|
+
custom_grid_forecast_id["entity_id"],
|
873
|
+
custom_grid_forecast_id["unit_of_measurement"],
|
874
|
+
custom_grid_forecast_id["friendly_name"],
|
875
|
+
type_var="power",
|
876
|
+
publish_prefix=publish_prefix,
|
877
|
+
)
|
878
|
+
cols_published = cols_published + ["P_grid"]
|
540
879
|
# Publish total value of cost function
|
541
|
-
custom_cost_fun_id = params[
|
542
|
-
col_cost_fun = [i for i in opt_res_latest.columns if
|
543
|
-
input_data_dict[
|
544
|
-
|
545
|
-
|
546
|
-
|
547
|
-
|
548
|
-
|
880
|
+
custom_cost_fun_id = params["passed_data"]["custom_cost_fun_id"]
|
881
|
+
col_cost_fun = [i for i in opt_res_latest.columns if "cost_fun_" in i]
|
882
|
+
input_data_dict["rh"].post_data(
|
883
|
+
opt_res_latest[col_cost_fun],
|
884
|
+
idx_closest,
|
885
|
+
custom_cost_fun_id["entity_id"],
|
886
|
+
custom_cost_fun_id["unit_of_measurement"],
|
887
|
+
custom_cost_fun_id["friendly_name"],
|
888
|
+
type_var="cost_fun",
|
889
|
+
publish_prefix=publish_prefix,
|
890
|
+
)
|
549
891
|
# Publish the optimization status
|
550
|
-
custom_cost_fun_id = params[
|
892
|
+
custom_cost_fun_id = params["passed_data"]["custom_optim_status_id"]
|
551
893
|
if "optim_status" not in opt_res_latest:
|
552
|
-
opt_res_latest["optim_status"] =
|
553
|
-
logger.warning(
|
554
|
-
|
555
|
-
|
556
|
-
|
557
|
-
|
558
|
-
|
559
|
-
|
560
|
-
|
894
|
+
opt_res_latest["optim_status"] = "Optimal"
|
895
|
+
logger.warning(
|
896
|
+
"no optim_status in opt_res_latest, run an optimization task first",
|
897
|
+
)
|
898
|
+
input_data_dict["rh"].post_data(
|
899
|
+
opt_res_latest["optim_status"],
|
900
|
+
idx_closest,
|
901
|
+
custom_cost_fun_id["entity_id"],
|
902
|
+
custom_cost_fun_id["unit_of_measurement"],
|
903
|
+
custom_cost_fun_id["friendly_name"],
|
904
|
+
type_var="optim_status",
|
905
|
+
publish_prefix=publish_prefix,
|
906
|
+
)
|
907
|
+
cols_published = cols_published + ["optim_status"]
|
561
908
|
# Publish unit_load_cost
|
562
|
-
custom_unit_load_cost_id = params[
|
563
|
-
input_data_dict[
|
564
|
-
|
565
|
-
|
566
|
-
|
567
|
-
|
568
|
-
|
569
|
-
|
909
|
+
custom_unit_load_cost_id = params["passed_data"]["custom_unit_load_cost_id"]
|
910
|
+
input_data_dict["rh"].post_data(
|
911
|
+
opt_res_latest["unit_load_cost"],
|
912
|
+
idx_closest,
|
913
|
+
custom_unit_load_cost_id["entity_id"],
|
914
|
+
custom_unit_load_cost_id["unit_of_measurement"],
|
915
|
+
custom_unit_load_cost_id["friendly_name"],
|
916
|
+
type_var="unit_load_cost",
|
917
|
+
publish_prefix=publish_prefix,
|
918
|
+
)
|
919
|
+
cols_published = cols_published + ["unit_load_cost"]
|
570
920
|
# Publish unit_prod_price
|
571
|
-
custom_unit_prod_price_id = params[
|
572
|
-
input_data_dict[
|
573
|
-
|
574
|
-
|
575
|
-
|
576
|
-
|
577
|
-
|
578
|
-
|
921
|
+
custom_unit_prod_price_id = params["passed_data"]["custom_unit_prod_price_id"]
|
922
|
+
input_data_dict["rh"].post_data(
|
923
|
+
opt_res_latest["unit_prod_price"],
|
924
|
+
idx_closest,
|
925
|
+
custom_unit_prod_price_id["entity_id"],
|
926
|
+
custom_unit_prod_price_id["unit_of_measurement"],
|
927
|
+
custom_unit_prod_price_id["friendly_name"],
|
928
|
+
type_var="unit_prod_price",
|
929
|
+
publish_prefix=publish_prefix,
|
930
|
+
)
|
931
|
+
cols_published = cols_published + ["unit_prod_price"]
|
579
932
|
# Create a DF resuming what has been published
|
580
933
|
opt_res = opt_res_latest[cols_published].loc[[opt_res_latest.index[idx_closest]]]
|
581
934
|
return opt_res
|
582
|
-
|
583
|
-
|
935
|
+
|
936
|
+
|
584
937
|
def main():
|
585
938
|
r"""Define the main command line entry function.
|
586
939
|
|
587
940
|
This function may take several arguments as inputs. You can type `emhass --help` to see the list of options:
|
588
|
-
|
941
|
+
|
589
942
|
- action: Set the desired action, options are: perfect-optim, dayahead-optim,
|
590
943
|
naive-mpc-optim, publish-data, forecast-model-fit, forecast-model-predict, forecast-model-tune
|
591
|
-
|
944
|
+
|
592
945
|
- config: Define path to the config.yaml file
|
593
|
-
|
946
|
+
|
594
947
|
- costfun: Define the type of cost function, options are: profit, cost, self-consumption
|
595
|
-
|
948
|
+
|
596
949
|
- log2file: Define if we should log to a file or not
|
597
|
-
|
950
|
+
|
598
951
|
- params: Configuration parameters passed from data/options.json if using the add-on
|
599
|
-
|
952
|
+
|
600
953
|
- runtimeparams: Pass runtime optimization parameters as dictionnary
|
601
|
-
|
954
|
+
|
602
955
|
- debug: Use True for testing purposes
|
603
|
-
|
956
|
+
|
604
957
|
"""
|
605
958
|
# Parsing arguments
|
606
959
|
parser = argparse.ArgumentParser()
|
607
960
|
parser.add_argument('--action', type=str, help='Set the desired action, options are: perfect-optim, dayahead-optim,\
|
608
961
|
naive-mpc-optim, publish-data, forecast-model-fit, forecast-model-predict, forecast-model-tune')
|
609
962
|
parser.add_argument('--config', type=str, help='Define path to the config.yaml file')
|
963
|
+
parser.add_argument('--data', type=str, help='Define path to the Data files (.csv & .pkl)')
|
964
|
+
parser.add_argument('--root', type=str, help='Define path emhass root')
|
610
965
|
parser.add_argument('--costfun', type=str, default='profit', help='Define the type of cost function, options are: profit, cost, self-consumption')
|
611
966
|
parser.add_argument('--log2file', type=strtobool, default='False', help='Define if we should log to a file or not')
|
612
967
|
parser.add_argument('--params', type=str, default=None, help='Configuration parameters passed from data/options.json')
|
@@ -614,62 +969,140 @@ def main():
|
|
614
969
|
parser.add_argument('--debug', type=strtobool, default='False', help='Use True for testing purposes')
|
615
970
|
args = parser.parse_args()
|
616
971
|
# The path to the configuration files
|
617
|
-
|
618
|
-
|
972
|
+
|
973
|
+
if args.config is not None:
|
974
|
+
config_path = pathlib.Path(args.config)
|
975
|
+
else:
|
976
|
+
config_path = pathlib.Path(str(utils.get_root(__file__, num_parent=2) / 'config_emhass.yaml' ))
|
977
|
+
|
978
|
+
if args.data is not None:
|
979
|
+
data_path = pathlib.Path(args.data)
|
980
|
+
else:
|
981
|
+
data_path = (config_path.parent / 'data/')
|
982
|
+
|
983
|
+
if args.root is not None:
|
984
|
+
root_path = pathlib.Path(args.root)
|
985
|
+
else:
|
986
|
+
root_path = config_path.parent
|
987
|
+
|
988
|
+
emhass_conf = {}
|
989
|
+
emhass_conf['config_path'] = config_path
|
990
|
+
emhass_conf['data_path'] = data_path
|
991
|
+
emhass_conf['root_path'] = root_path
|
619
992
|
# create logger
|
620
|
-
logger, ch = utils.get_logger(__name__,
|
993
|
+
logger, ch = utils.get_logger(__name__, emhass_conf, save_to_file=bool(args.log2file))
|
994
|
+
|
995
|
+
logger.debug("config path: " + str(config_path))
|
996
|
+
logger.debug("data path: " + str(data_path))
|
997
|
+
logger.debug("root path: " + str(root_path))
|
998
|
+
|
999
|
+
|
1000
|
+
if not config_path.exists():
|
1001
|
+
logger.error("Could not find config_emhass.yaml file in: " + str(config_path))
|
1002
|
+
logger.error("Try setting config file path with --config" )
|
1003
|
+
return False
|
1004
|
+
|
1005
|
+
if not os.path.isdir(data_path):
|
1006
|
+
logger.error("Could not find data foulder in: " + str(data_path))
|
1007
|
+
logger.error("Try setting data path with --data" )
|
1008
|
+
return False
|
1009
|
+
|
1010
|
+
if not os.path.isdir(root_path / 'src'):
|
1011
|
+
logger.error("Could not find emhass/src foulder in: " + str(root_path))
|
1012
|
+
logger.error("Try setting emhass root path with --root" )
|
1013
|
+
return False
|
1014
|
+
|
621
1015
|
# Additionnal argument
|
622
1016
|
try:
|
623
|
-
parser.add_argument(
|
1017
|
+
parser.add_argument(
|
1018
|
+
"--version",
|
1019
|
+
action="version",
|
1020
|
+
version="%(prog)s " + version("emhass"),
|
1021
|
+
)
|
624
1022
|
args = parser.parse_args()
|
625
1023
|
except Exception:
|
626
|
-
logger.info(
|
1024
|
+
logger.info(
|
1025
|
+
"Version not found for emhass package. Or importlib exited with PackageNotFoundError.",
|
1026
|
+
)
|
627
1027
|
# Setup parameters
|
628
|
-
input_data_dict = set_input_data_dict(
|
1028
|
+
input_data_dict = set_input_data_dict(emhass_conf,
|
629
1029
|
args.costfun, args.params, args.runtimeparams, args.action,
|
630
1030
|
logger, args.debug)
|
631
1031
|
# Perform selected action
|
632
|
-
if args.action ==
|
1032
|
+
if args.action == "perfect-optim":
|
633
1033
|
opt_res = perfect_forecast_optim(input_data_dict, logger, debug=args.debug)
|
634
|
-
elif args.action ==
|
1034
|
+
elif args.action == "dayahead-optim":
|
635
1035
|
opt_res = dayahead_forecast_optim(input_data_dict, logger, debug=args.debug)
|
636
|
-
elif args.action ==
|
1036
|
+
elif args.action == "naive-mpc-optim":
|
637
1037
|
opt_res = naive_mpc_optim(input_data_dict, logger, debug=args.debug)
|
638
|
-
elif args.action ==
|
639
|
-
df_fit_pred, df_fit_pred_backtest, mlf = forecast_model_fit(
|
1038
|
+
elif args.action == "forecast-model-fit":
|
1039
|
+
df_fit_pred, df_fit_pred_backtest, mlf = forecast_model_fit(
|
1040
|
+
input_data_dict, logger, debug=args.debug
|
1041
|
+
)
|
640
1042
|
opt_res = None
|
641
|
-
elif args.action ==
|
1043
|
+
elif args.action == "forecast-model-predict":
|
642
1044
|
if args.debug:
|
643
1045
|
_, _, mlf = forecast_model_fit(input_data_dict, logger, debug=args.debug)
|
644
1046
|
else:
|
645
1047
|
mlf = None
|
646
|
-
df_pred = forecast_model_predict(
|
1048
|
+
df_pred = forecast_model_predict(
|
1049
|
+
input_data_dict, logger, debug=args.debug, mlf=mlf
|
1050
|
+
)
|
647
1051
|
opt_res = None
|
648
|
-
elif args.action ==
|
1052
|
+
elif args.action == "forecast-model-tune":
|
649
1053
|
if args.debug:
|
650
1054
|
_, _, mlf = forecast_model_fit(input_data_dict, logger, debug=args.debug)
|
651
1055
|
else:
|
652
1056
|
mlf = None
|
653
|
-
df_pred_optim, mlf = forecast_model_tune(
|
1057
|
+
df_pred_optim, mlf = forecast_model_tune(
|
1058
|
+
input_data_dict, logger, debug=args.debug, mlf=mlf
|
1059
|
+
)
|
1060
|
+
opt_res = None
|
1061
|
+
elif args.action == "regressor-model-fit":
|
1062
|
+
mlr = regressor_model_fit(input_data_dict, logger, debug=args.debug)
|
654
1063
|
opt_res = None
|
655
|
-
elif args.action ==
|
1064
|
+
elif args.action == "regressor-model-predict":
|
1065
|
+
if args.debug:
|
1066
|
+
mlr = regressor_model_fit(input_data_dict, logger, debug=args.debug)
|
1067
|
+
else:
|
1068
|
+
mlr = None
|
1069
|
+
prediction = regressor_model_predict(
|
1070
|
+
input_data_dict,
|
1071
|
+
logger,
|
1072
|
+
debug=args.debug,
|
1073
|
+
mlr=mlr,
|
1074
|
+
)
|
1075
|
+
opt_res = None
|
1076
|
+
elif args.action == "publish-data":
|
656
1077
|
opt_res = publish_data(input_data_dict, logger)
|
657
1078
|
else:
|
658
1079
|
logger.error("The passed action argument is not valid")
|
1080
|
+
logger.error("Try setting --action: perfect-optim, dayahead-optim, naive-mpc-optim, forecast-model-fit, forecast-model-predict, forecast-model-tune or publish-data")
|
659
1081
|
opt_res = None
|
660
1082
|
logger.info(opt_res)
|
661
1083
|
# Flush the logger
|
662
1084
|
ch.close()
|
663
1085
|
logger.removeHandler(ch)
|
664
|
-
if
|
665
|
-
args.action ==
|
1086
|
+
if (
|
1087
|
+
args.action == "perfect-optim"
|
1088
|
+
or args.action == "dayahead-optim"
|
1089
|
+
or args.action == "naive-mpc-optim"
|
1090
|
+
or args.action == "publish-data"
|
1091
|
+
):
|
666
1092
|
return opt_res
|
667
|
-
elif args.action ==
|
1093
|
+
elif args.action == "forecast-model-fit":
|
668
1094
|
return df_fit_pred, df_fit_pred_backtest, mlf
|
669
|
-
elif args.action ==
|
1095
|
+
elif args.action == "forecast-model-predict":
|
670
1096
|
return df_pred
|
671
|
-
elif args.action ==
|
1097
|
+
elif args.action == "regressor-model-fit":
|
1098
|
+
return mlr
|
1099
|
+
elif args.action == "regressor-model-predict":
|
1100
|
+
return prediction
|
1101
|
+
elif args.action == "forecast-model-tune":
|
672
1102
|
return df_pred_optim, mlf
|
1103
|
+
else:
|
1104
|
+
return opt_res
|
1105
|
+
|
673
1106
|
|
674
|
-
if __name__ ==
|
1107
|
+
if __name__ == "__main__":
|
675
1108
|
main()
|