emhass 0.8.6__py3-none-any.whl → 0.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- emhass/command_line.py +629 -293
- emhass/forecast.py +58 -31
- emhass/machine_learning_forecaster.py +4 -4
- emhass/machine_learning_regressor.py +251 -0
- emhass/optimization.py +4 -3
- emhass/retrieve_hass.py +161 -102
- emhass/static/advanced.html +3 -0
- emhass/static/script.js +2 -0
- emhass/utils.py +588 -303
- emhass/web_server.py +48 -26
- {emhass-0.8.6.dist-info → emhass-0.9.1.dist-info}/METADATA +29 -12
- emhass-0.9.1.dist-info/RECORD +26 -0
- emhass-0.8.6.dist-info/RECORD +0 -25
- {emhass-0.8.6.dist-info → emhass-0.9.1.dist-info}/LICENSE +0 -0
- {emhass-0.8.6.dist-info → emhass-0.9.1.dist-info}/WHEEL +0 -0
- {emhass-0.8.6.dist-info → emhass-0.9.1.dist-info}/entry_points.txt +0 -0
- {emhass-0.8.6.dist-info → emhass-0.9.1.dist-info}/top_level.txt +0 -0
emhass/command_line.py
CHANGED
@@ -8,36 +8,35 @@ import logging
|
|
8
8
|
import json
|
9
9
|
import copy
|
10
10
|
import pickle
|
11
|
-
import time
|
12
|
-
import numpy as np
|
13
|
-
import pandas as pd
|
14
11
|
from datetime import datetime, timezone
|
15
12
|
from typing import Optional, Tuple
|
13
|
+
from importlib.metadata import version
|
14
|
+
import numpy as np
|
15
|
+
import pandas as pd
|
16
|
+
|
16
17
|
from distutils.util import strtobool
|
17
18
|
|
18
|
-
from importlib.metadata import version
|
19
19
|
from emhass.retrieve_hass import RetrieveHass
|
20
20
|
from emhass.forecast import Forecast
|
21
21
|
from emhass.machine_learning_forecaster import MLForecaster
|
22
22
|
from emhass.optimization import Optimization
|
23
|
+
from emhass.machine_learning_regressor import MLRegressor
|
23
24
|
from emhass import utils
|
24
25
|
|
25
26
|
|
26
|
-
def set_input_data_dict(
|
27
|
-
|
28
|
-
|
27
|
+
def set_input_data_dict(emhass_conf: dict, costfun: str,
|
28
|
+
params: str, runtimeparams: str, set_type: str, logger: logging.Logger,
|
29
|
+
get_data_from_file: Optional[bool] = False) -> dict:
|
29
30
|
"""
|
30
31
|
Set up some of the data needed for the different actions.
|
31
|
-
|
32
|
-
:param
|
33
|
-
:type
|
34
|
-
:param base_path: The parent folder of the config_path
|
35
|
-
:type base_path: str
|
32
|
+
|
33
|
+
:param emhass_conf: Dictionary containing the needed emhass paths
|
34
|
+
:type emhass_conf: dict
|
36
35
|
:param costfun: The type of cost function to use for optimization problem
|
37
36
|
:type costfun: str
|
38
37
|
:param params: Configuration parameters passed from data/options.json
|
39
38
|
:type params: str
|
40
|
-
:param runtimeparams: Runtime optimization parameters passed as a
|
39
|
+
:param runtimeparams: Runtime optimization parameters passed as a dictionary
|
41
40
|
:type runtimeparams: str
|
42
41
|
:param set_type: Set the type of setup based on following type of optimization
|
43
42
|
:type set_type: str
|
@@ -52,99 +51,125 @@ def set_input_data_dict(config_path: pathlib.Path, base_path: str, costfun: str,
|
|
52
51
|
logger.info("Setting up needed data")
|
53
52
|
# Parsing yaml
|
54
53
|
retrieve_hass_conf, optim_conf, plant_conf = utils.get_yaml_parse(
|
55
|
-
|
54
|
+
emhass_conf, use_secrets=not (get_data_from_file), params=params)
|
56
55
|
# Treat runtimeparams
|
57
56
|
params, retrieve_hass_conf, optim_conf, plant_conf = utils.treat_runtimeparams(
|
58
|
-
runtimeparams, params, retrieve_hass_conf,
|
59
|
-
optim_conf, plant_conf, set_type, logger)
|
57
|
+
runtimeparams, params, retrieve_hass_conf, optim_conf, plant_conf, set_type, logger)
|
60
58
|
# Define main objects
|
61
|
-
rh = RetrieveHass(retrieve_hass_conf['hass_url'], retrieve_hass_conf['long_lived_token'],
|
62
|
-
retrieve_hass_conf['freq'], retrieve_hass_conf['time_zone'],
|
63
|
-
params,
|
59
|
+
rh = RetrieveHass(retrieve_hass_conf['hass_url'], retrieve_hass_conf['long_lived_token'],
|
60
|
+
retrieve_hass_conf['freq'], retrieve_hass_conf['time_zone'],
|
61
|
+
params, emhass_conf, logger, get_data_from_file=get_data_from_file)
|
64
62
|
fcst = Forecast(retrieve_hass_conf, optim_conf, plant_conf,
|
65
|
-
params,
|
66
|
-
opt = Optimization(retrieve_hass_conf, optim_conf, plant_conf,
|
67
|
-
fcst.var_load_cost, fcst.var_prod_price,
|
68
|
-
costfun,
|
63
|
+
params, emhass_conf, logger, get_data_from_file=get_data_from_file)
|
64
|
+
opt = Optimization(retrieve_hass_conf, optim_conf, plant_conf,
|
65
|
+
fcst.var_load_cost, fcst.var_prod_price,
|
66
|
+
costfun, emhass_conf, logger)
|
69
67
|
# Perform setup based on type of action
|
70
68
|
if set_type == "perfect-optim":
|
71
69
|
# Retrieve data from hass
|
72
70
|
if get_data_from_file:
|
73
|
-
with open(
|
71
|
+
with open(emhass_conf['data_path'] / 'test_df_final.pkl', 'rb') as inp:
|
74
72
|
rh.df_final, days_list, var_list = pickle.load(inp)
|
73
|
+
retrieve_hass_conf['var_load'] = str(var_list[0])
|
74
|
+
retrieve_hass_conf['var_PV'] = str(var_list[1])
|
75
|
+
retrieve_hass_conf['var_interp'] = [
|
76
|
+
retrieve_hass_conf['var_PV'], retrieve_hass_conf['var_load']]
|
77
|
+
retrieve_hass_conf['var_replace_zero'] = [
|
78
|
+
retrieve_hass_conf['var_PV']]
|
75
79
|
else:
|
76
|
-
days_list = utils.get_days_list(
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
80
|
+
days_list = utils.get_days_list(
|
81
|
+
retrieve_hass_conf["days_to_retrieve"])
|
82
|
+
var_list = [retrieve_hass_conf["var_load"],
|
83
|
+
retrieve_hass_conf["var_PV"]]
|
84
|
+
if not rh.get_data(days_list, var_list, minimal_response=False, significant_changes_only=False):
|
85
|
+
return False
|
86
|
+
if not rh.prepare_data(retrieve_hass_conf["var_load"],
|
87
|
+
load_negative=retrieve_hass_conf["load_negative"],
|
88
|
+
set_zero_min=retrieve_hass_conf["set_zero_min"],
|
89
|
+
var_replace_zero=retrieve_hass_conf["var_replace_zero"],
|
90
|
+
var_interp=retrieve_hass_conf["var_interp"]):
|
85
91
|
return False
|
86
92
|
df_input_data = rh.df_final.copy()
|
87
93
|
# What we don't need for this type of action
|
88
94
|
P_PV_forecast, P_load_forecast, df_input_data_dayahead = None, None, None
|
89
95
|
elif set_type == "dayahead-optim":
|
90
96
|
# Get PV and load forecasts
|
91
|
-
df_weather = fcst.get_weather_forecast(
|
97
|
+
df_weather = fcst.get_weather_forecast(
|
98
|
+
method=optim_conf["weather_forecast_method"])
|
92
99
|
P_PV_forecast = fcst.get_power_from_weather(df_weather)
|
93
|
-
P_load_forecast = fcst.get_load_forecast(
|
94
|
-
|
95
|
-
|
100
|
+
P_load_forecast = fcst.get_load_forecast(
|
101
|
+
method=optim_conf['load_forecast_method'])
|
102
|
+
if isinstance(P_load_forecast, bool) and not P_load_forecast:
|
103
|
+
logger.error(
|
104
|
+
"Unable to get sensor power photovoltaics, or sensor power load no var loads. Check HA sensors and their daily data")
|
96
105
|
return False
|
97
|
-
df_input_data_dayahead = pd.DataFrame(np.transpose(np.vstack(
|
98
|
-
|
99
|
-
|
106
|
+
df_input_data_dayahead = pd.DataFrame(np.transpose(np.vstack(
|
107
|
+
[P_PV_forecast.values, P_load_forecast.values])), index=P_PV_forecast.index,
|
108
|
+
columns=["P_PV_forecast", "P_load_forecast"])
|
100
109
|
df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead)
|
101
110
|
params = json.loads(params)
|
102
|
-
if
|
103
|
-
prediction_horizon = params[
|
104
|
-
df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[
|
111
|
+
if ("prediction_horizon" in params["passed_data"] and params["passed_data"]["prediction_horizon"] is not None):
|
112
|
+
prediction_horizon = params["passed_data"]["prediction_horizon"]
|
113
|
+
df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[
|
114
|
+
df_input_data_dayahead.index[0]: df_input_data_dayahead.index[prediction_horizon - 1]]
|
105
115
|
# What we don't need for this type of action
|
106
116
|
df_input_data, days_list = None, None
|
107
117
|
elif set_type == "naive-mpc-optim":
|
108
118
|
# Retrieve data from hass
|
109
119
|
if get_data_from_file:
|
110
|
-
with open(
|
120
|
+
with open(emhass_conf['data_path'] / 'test_df_final.pkl', 'rb') as inp:
|
111
121
|
rh.df_final, days_list, var_list = pickle.load(inp)
|
122
|
+
retrieve_hass_conf['var_load'] = str(var_list[0])
|
123
|
+
retrieve_hass_conf['var_PV'] = str(var_list[1])
|
124
|
+
retrieve_hass_conf['var_interp'] = [
|
125
|
+
retrieve_hass_conf['var_PV'], retrieve_hass_conf['var_load']]
|
126
|
+
retrieve_hass_conf['var_replace_zero'] = [
|
127
|
+
retrieve_hass_conf['var_PV']]
|
112
128
|
else:
|
113
129
|
days_list = utils.get_days_list(1)
|
114
|
-
var_list = [retrieve_hass_conf[
|
115
|
-
|
116
|
-
|
130
|
+
var_list = [retrieve_hass_conf["var_load"],
|
131
|
+
retrieve_hass_conf["var_PV"]]
|
132
|
+
if not rh.get_data(days_list, var_list, minimal_response=False, significant_changes_only=False):
|
117
133
|
return False
|
118
|
-
if not rh.prepare_data(retrieve_hass_conf[
|
119
|
-
|
120
|
-
|
121
|
-
|
134
|
+
if not rh.prepare_data(retrieve_hass_conf["var_load"],
|
135
|
+
load_negative=retrieve_hass_conf["load_negative"],
|
136
|
+
set_zero_min=retrieve_hass_conf["set_zero_min"],
|
137
|
+
var_replace_zero=retrieve_hass_conf["var_replace_zero"],
|
138
|
+
var_interp=retrieve_hass_conf["var_interp"]):
|
122
139
|
return False
|
123
140
|
df_input_data = rh.df_final.copy()
|
124
141
|
# Get PV and load forecasts
|
125
|
-
df_weather = fcst.get_weather_forecast(
|
126
|
-
|
127
|
-
|
142
|
+
df_weather = fcst.get_weather_forecast(
|
143
|
+
method=optim_conf['weather_forecast_method'])
|
144
|
+
P_PV_forecast = fcst.get_power_from_weather(
|
145
|
+
df_weather, set_mix_forecast=True, df_now=df_input_data)
|
146
|
+
P_load_forecast = fcst.get_load_forecast(
|
147
|
+
method=optim_conf['load_forecast_method'], set_mix_forecast=True, df_now=df_input_data)
|
148
|
+
if isinstance(P_load_forecast, bool) and not P_load_forecast:
|
149
|
+
logger.error(
|
150
|
+
"Unable to get sensor power photovoltaics, or sensor power load no var loads. Check HA sensors and their daily data")
|
151
|
+
return False
|
128
152
|
df_input_data_dayahead = pd.concat([P_PV_forecast, P_load_forecast], axis=1)
|
129
153
|
df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead)
|
130
|
-
df_input_data_dayahead.columns = [
|
154
|
+
df_input_data_dayahead.columns = ["P_PV_forecast", "P_load_forecast"]
|
131
155
|
params = json.loads(params)
|
132
|
-
if
|
133
|
-
prediction_horizon = params[
|
134
|
-
df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[
|
135
|
-
|
156
|
+
if ("prediction_horizon" in params["passed_data"] and params["passed_data"]["prediction_horizon"] is not None):
|
157
|
+
prediction_horizon = params["passed_data"]["prediction_horizon"]
|
158
|
+
df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[
|
159
|
+
df_input_data_dayahead.index[0]: df_input_data_dayahead.index[prediction_horizon - 1]]
|
160
|
+
elif (set_type == "forecast-model-fit" or set_type == "forecast-model-predict" or set_type == "forecast-model-tune"):
|
136
161
|
df_input_data_dayahead = None
|
137
162
|
P_PV_forecast, P_load_forecast = None, None
|
138
163
|
params = json.loads(params)
|
139
164
|
# Retrieve data from hass
|
140
|
-
days_to_retrieve = params[
|
141
|
-
model_type = params[
|
142
|
-
var_model = params[
|
165
|
+
days_to_retrieve = params["passed_data"]["days_to_retrieve"]
|
166
|
+
model_type = params["passed_data"]["model_type"]
|
167
|
+
var_model = params["passed_data"]["var_model"]
|
143
168
|
if get_data_from_file:
|
144
169
|
days_list = None
|
145
170
|
filename = 'data_train_'+model_type+'.pkl'
|
146
|
-
|
147
|
-
with open(
|
171
|
+
filename_path = emhass_conf['data_path'] / filename
|
172
|
+
with open(filename_path, 'rb') as inp:
|
148
173
|
df_input_data, _ = pickle.load(inp)
|
149
174
|
df_input_data = df_input_data[df_input_data.index[-1] - pd.offsets.Day(days_to_retrieve):]
|
150
175
|
else:
|
@@ -153,19 +178,56 @@ def set_input_data_dict(config_path: pathlib.Path, base_path: str, costfun: str,
|
|
153
178
|
if not rh.get_data(days_list, var_list):
|
154
179
|
return False
|
155
180
|
df_input_data = rh.df_final.copy()
|
181
|
+
elif set_type == "regressor-model-fit" or set_type == "regressor-model-predict":
|
182
|
+
df_input_data, df_input_data_dayahead = None, None
|
183
|
+
P_PV_forecast, P_load_forecast = None, None
|
184
|
+
params = json.loads(params)
|
185
|
+
days_list = None
|
186
|
+
csv_file = params["passed_data"].get("csv_file", None)
|
187
|
+
if "features" in params["passed_data"]:
|
188
|
+
features = params["passed_data"]["features"]
|
189
|
+
if "target" in params["passed_data"]:
|
190
|
+
target = params["passed_data"]["target"]
|
191
|
+
if "timestamp" in params["passed_data"]:
|
192
|
+
timestamp = params["passed_data"]["timestamp"]
|
193
|
+
if csv_file:
|
194
|
+
if get_data_from_file:
|
195
|
+
base_path = emhass_conf["data_path"] # + "/data"
|
196
|
+
filename_path = pathlib.Path(base_path) / csv_file
|
197
|
+
else:
|
198
|
+
filename_path = emhass_conf["data_path"] / csv_file
|
199
|
+
if filename_path.is_file():
|
200
|
+
df_input_data = pd.read_csv(filename_path, parse_dates=True)
|
201
|
+
else:
|
202
|
+
logger.error("The CSV file " + csv_file +
|
203
|
+
" was not found in path: " + str(emhass_conf["data_path"]))
|
204
|
+
return False
|
205
|
+
# raise ValueError("The CSV file " + csv_file + " was not found.")
|
206
|
+
required_columns = []
|
207
|
+
required_columns.extend(features)
|
208
|
+
required_columns.append(target)
|
209
|
+
if timestamp is not None:
|
210
|
+
required_columns.append(timestamp)
|
211
|
+
if not set(required_columns).issubset(df_input_data.columns):
|
212
|
+
logger.error(
|
213
|
+
"The cvs file does not contain the required columns.")
|
214
|
+
msg = f"CSV file should contain the following columns: {', '.join(required_columns)}"
|
215
|
+
logger.error(msg)
|
216
|
+
return False
|
156
217
|
elif set_type == "publish-data":
|
157
218
|
df_input_data, df_input_data_dayahead = None, None
|
158
219
|
P_PV_forecast, P_load_forecast = None, None
|
159
220
|
days_list = None
|
160
221
|
else:
|
161
|
-
logger.error(
|
222
|
+
logger.error(
|
223
|
+
"The passed action argument and hence the set_type parameter for setup is not valid",
|
224
|
+
)
|
162
225
|
df_input_data, df_input_data_dayahead = None, None
|
163
226
|
P_PV_forecast, P_load_forecast = None, None
|
164
227
|
days_list = None
|
165
|
-
|
166
|
-
# The input data dictionnary to return
|
228
|
+
# The input data dictionary to return
|
167
229
|
input_data_dict = {
|
168
|
-
'
|
230
|
+
'emhass_conf': emhass_conf,
|
169
231
|
'retrieve_hass_conf': retrieve_hass_conf,
|
170
232
|
'rh': rh,
|
171
233
|
'opt': opt,
|
@@ -179,12 +241,14 @@ def set_input_data_dict(config_path: pathlib.Path, base_path: str, costfun: str,
|
|
179
241
|
'days_list': days_list
|
180
242
|
}
|
181
243
|
return input_data_dict
|
182
|
-
|
244
|
+
|
245
|
+
|
183
246
|
def perfect_forecast_optim(input_data_dict: dict, logger: logging.Logger,
|
184
|
-
|
247
|
+
save_data_to_file: Optional[bool] = True,
|
248
|
+
debug: Optional[bool] = False) -> pd.DataFrame:
|
185
249
|
"""
|
186
250
|
Perform a call to the perfect forecast optimization routine.
|
187
|
-
|
251
|
+
|
188
252
|
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
189
253
|
:type input_data_dict: dict
|
190
254
|
:param logger: The passed logger object
|
@@ -200,27 +264,36 @@ def perfect_forecast_optim(input_data_dict: dict, logger: logging.Logger,
|
|
200
264
|
logger.info("Performing perfect forecast optimization")
|
201
265
|
# Load cost and prod price forecast
|
202
266
|
df_input_data = input_data_dict['fcst'].get_load_cost_forecast(
|
203
|
-
input_data_dict['df_input_data'],
|
267
|
+
input_data_dict['df_input_data'],
|
204
268
|
method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'],
|
205
269
|
list_and_perfect=True)
|
270
|
+
if isinstance(df_input_data, bool) and not df_input_data:
|
271
|
+
return False
|
206
272
|
df_input_data = input_data_dict['fcst'].get_prod_price_forecast(
|
207
273
|
df_input_data, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'],
|
208
274
|
list_and_perfect=True)
|
209
|
-
|
275
|
+
if isinstance(df_input_data, bool) and not df_input_data:
|
276
|
+
return False
|
277
|
+
opt_res = input_data_dict['opt'].perform_perfect_forecast_optim(
|
278
|
+
df_input_data, input_data_dict['days_list'])
|
210
279
|
# Save CSV file for analysis
|
211
280
|
if save_data_to_file:
|
212
|
-
filename =
|
213
|
-
|
214
|
-
|
281
|
+
filename = "opt_res_perfect_optim_" + \
|
282
|
+
input_data_dict["costfun"] + ".csv"
|
283
|
+
else: # Just save the latest optimization results
|
284
|
+
filename = "opt_res_latest.csv"
|
215
285
|
if not debug:
|
216
|
-
opt_res.to_csv(
|
286
|
+
opt_res.to_csv(
|
287
|
+
input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
|
217
288
|
return opt_res
|
218
|
-
|
219
|
-
|
220
|
-
|
289
|
+
|
290
|
+
|
291
|
+
def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger,
|
292
|
+
save_data_to_file: Optional[bool] = False,
|
293
|
+
debug: Optional[bool] = False) -> pd.DataFrame:
|
221
294
|
"""
|
222
295
|
Perform a call to the day-ahead optimization routine.
|
223
|
-
|
296
|
+
|
224
297
|
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
225
298
|
:type input_data_dict: dict
|
226
299
|
:param logger: The passed logger object
|
@@ -238,26 +311,35 @@ def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger,
|
|
238
311
|
df_input_data_dayahead = input_data_dict['fcst'].get_load_cost_forecast(
|
239
312
|
input_data_dict['df_input_data_dayahead'],
|
240
313
|
method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'])
|
314
|
+
if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
|
315
|
+
return False
|
241
316
|
df_input_data_dayahead = input_data_dict['fcst'].get_prod_price_forecast(
|
242
|
-
df_input_data_dayahead,
|
317
|
+
df_input_data_dayahead,
|
243
318
|
method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'])
|
319
|
+
if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
|
320
|
+
return False
|
244
321
|
opt_res_dayahead = input_data_dict['opt'].perform_dayahead_forecast_optim(
|
245
322
|
df_input_data_dayahead, input_data_dict['P_PV_forecast'], input_data_dict['P_load_forecast'])
|
246
323
|
# Save CSV file for publish_data
|
247
324
|
if save_data_to_file:
|
248
|
-
today = datetime.now(timezone.utc).replace(
|
249
|
-
|
250
|
-
|
251
|
-
filename =
|
325
|
+
today = datetime.now(timezone.utc).replace(
|
326
|
+
hour=0, minute=0, second=0, microsecond=0
|
327
|
+
)
|
328
|
+
filename = "opt_res_dayahead_" + today.strftime("%Y_%m_%d") + ".csv"
|
329
|
+
else: # Just save the latest optimization results
|
330
|
+
filename = "opt_res_latest.csv"
|
252
331
|
if not debug:
|
253
|
-
opt_res_dayahead.to_csv(
|
332
|
+
opt_res_dayahead.to_csv(
|
333
|
+
input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
|
254
334
|
return opt_res_dayahead
|
255
335
|
|
256
|
-
|
257
|
-
|
336
|
+
|
337
|
+
def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger,
|
338
|
+
save_data_to_file: Optional[bool] = False,
|
339
|
+
debug: Optional[bool] = False) -> pd.DataFrame:
|
258
340
|
"""
|
259
341
|
Perform a call to the naive Model Predictive Controller optimization routine.
|
260
|
-
|
342
|
+
|
261
343
|
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
262
344
|
:type input_data_dict: dict
|
263
345
|
:param logger: The passed logger object
|
@@ -275,30 +357,39 @@ def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger,
|
|
275
357
|
df_input_data_dayahead = input_data_dict['fcst'].get_load_cost_forecast(
|
276
358
|
input_data_dict['df_input_data_dayahead'],
|
277
359
|
method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'])
|
360
|
+
if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
|
361
|
+
return False
|
278
362
|
df_input_data_dayahead = input_data_dict['fcst'].get_prod_price_forecast(
|
279
363
|
df_input_data_dayahead, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'])
|
364
|
+
if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
|
365
|
+
return False
|
280
366
|
# The specifics params for the MPC at runtime
|
281
|
-
prediction_horizon = input_data_dict[
|
282
|
-
soc_init = input_data_dict[
|
283
|
-
soc_final = input_data_dict[
|
284
|
-
def_total_hours = input_data_dict[
|
285
|
-
def_start_timestep = input_data_dict[
|
286
|
-
def_end_timestep = input_data_dict[
|
287
|
-
opt_res_naive_mpc = input_data_dict[
|
288
|
-
df_input_data_dayahead, input_data_dict[
|
289
|
-
prediction_horizon, soc_init, soc_final, def_total_hours,
|
367
|
+
prediction_horizon = input_data_dict["params"]["passed_data"]["prediction_horizon"]
|
368
|
+
soc_init = input_data_dict["params"]["passed_data"]["soc_init"]
|
369
|
+
soc_final = input_data_dict["params"]["passed_data"]["soc_final"]
|
370
|
+
def_total_hours = input_data_dict["params"]["passed_data"]["def_total_hours"]
|
371
|
+
def_start_timestep = input_data_dict["params"]["passed_data"]["def_start_timestep"]
|
372
|
+
def_end_timestep = input_data_dict["params"]["passed_data"]["def_end_timestep"]
|
373
|
+
opt_res_naive_mpc = input_data_dict["opt"].perform_naive_mpc_optim(
|
374
|
+
df_input_data_dayahead, input_data_dict["P_PV_forecast"], input_data_dict["P_load_forecast"],
|
375
|
+
prediction_horizon, soc_init, soc_final, def_total_hours,
|
376
|
+
def_start_timestep, def_end_timestep)
|
290
377
|
# Save CSV file for publish_data
|
291
378
|
if save_data_to_file:
|
292
|
-
today = datetime.now(timezone.utc).replace(
|
293
|
-
|
294
|
-
|
295
|
-
filename =
|
379
|
+
today = datetime.now(timezone.utc).replace(
|
380
|
+
hour=0, minute=0, second=0, microsecond=0
|
381
|
+
)
|
382
|
+
filename = "opt_res_naive_mpc_" + today.strftime("%Y_%m_%d") + ".csv"
|
383
|
+
else: # Just save the latest optimization results
|
384
|
+
filename = "opt_res_latest.csv"
|
296
385
|
if not debug:
|
297
|
-
opt_res_naive_mpc.to_csv(
|
386
|
+
opt_res_naive_mpc.to_csv(
|
387
|
+
input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
|
298
388
|
return opt_res_naive_mpc
|
299
389
|
|
300
|
-
|
301
|
-
|
390
|
+
|
391
|
+
def forecast_model_fit(input_data_dict: dict, logger: logging.Logger,
|
392
|
+
debug: Optional[bool] = False) -> Tuple[pd.DataFrame, pd.DataFrame, MLForecaster]:
|
302
393
|
"""Perform a forecast model fit from training data retrieved from Home Assistant.
|
303
394
|
|
304
395
|
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
@@ -317,22 +408,26 @@ def forecast_model_fit(input_data_dict: dict, logger: logging.Logger,
|
|
317
408
|
num_lags = input_data_dict['params']['passed_data']['num_lags']
|
318
409
|
split_date_delta = input_data_dict['params']['passed_data']['split_date_delta']
|
319
410
|
perform_backtest = input_data_dict['params']['passed_data']['perform_backtest']
|
320
|
-
root = input_data_dict['root']
|
321
411
|
# The ML forecaster object
|
322
|
-
mlf = MLForecaster(data, model_type, var_model, sklearn_model,
|
412
|
+
mlf = MLForecaster(data, model_type, var_model, sklearn_model,
|
413
|
+
num_lags, input_data_dict['emhass_conf'], logger)
|
323
414
|
# Fit the ML model
|
324
|
-
df_pred, df_pred_backtest = mlf.fit(
|
325
|
-
|
415
|
+
df_pred, df_pred_backtest = mlf.fit(
|
416
|
+
split_date_delta=split_date_delta, perform_backtest=perform_backtest
|
417
|
+
)
|
326
418
|
# Save model
|
327
419
|
if not debug:
|
328
420
|
filename = model_type+'_mlf.pkl'
|
329
|
-
|
421
|
+
filename_path = input_data_dict['emhass_conf']['data_path'] / filename
|
422
|
+
with open(filename_path, 'wb') as outp:
|
330
423
|
pickle.dump(mlf, outp, pickle.HIGHEST_PROTOCOL)
|
331
424
|
return df_pred, df_pred_backtest, mlf
|
332
425
|
|
333
|
-
|
334
|
-
|
335
|
-
|
426
|
+
|
427
|
+
def forecast_model_predict(input_data_dict: dict, logger: logging.Logger,
|
428
|
+
use_last_window: Optional[bool] = True,
|
429
|
+
debug: Optional[bool] = False, mlf: Optional[MLForecaster] = None
|
430
|
+
) -> pd.DataFrame:
|
336
431
|
r"""Perform a forecast model predict using a previously trained skforecast model.
|
337
432
|
|
338
433
|
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
@@ -355,51 +450,61 @@ def forecast_model_predict(input_data_dict: dict, logger: logging.Logger,
|
|
355
450
|
"""
|
356
451
|
# Load model
|
357
452
|
model_type = input_data_dict['params']['passed_data']['model_type']
|
358
|
-
root = input_data_dict['root']
|
359
453
|
filename = model_type+'_mlf.pkl'
|
360
|
-
filename_path =
|
454
|
+
filename_path = input_data_dict['emhass_conf']['data_path'] / filename
|
361
455
|
if not debug:
|
362
456
|
if filename_path.is_file():
|
363
|
-
with open(filename_path,
|
457
|
+
with open(filename_path, "rb") as inp:
|
364
458
|
mlf = pickle.load(inp)
|
365
459
|
else:
|
366
|
-
logger.error(
|
460
|
+
logger.error(
|
461
|
+
"The ML forecaster file was not found, please run a model fit method before this predict method",
|
462
|
+
)
|
367
463
|
return
|
368
464
|
# Make predictions
|
369
465
|
if use_last_window:
|
370
|
-
data_last_window = copy.deepcopy(input_data_dict[
|
466
|
+
data_last_window = copy.deepcopy(input_data_dict["df_input_data"])
|
371
467
|
else:
|
372
468
|
data_last_window = None
|
373
469
|
predictions = mlf.predict(data_last_window)
|
374
470
|
# Publish data to a Home Assistant sensor
|
375
|
-
model_predict_publish = input_data_dict[
|
376
|
-
|
377
|
-
|
378
|
-
|
379
|
-
|
471
|
+
model_predict_publish = input_data_dict["params"]["passed_data"][
|
472
|
+
"model_predict_publish"
|
473
|
+
]
|
474
|
+
model_predict_entity_id = input_data_dict["params"]["passed_data"][
|
475
|
+
"model_predict_entity_id"
|
476
|
+
]
|
477
|
+
model_predict_unit_of_measurement = input_data_dict["params"]["passed_data"][
|
478
|
+
"model_predict_unit_of_measurement"
|
479
|
+
]
|
480
|
+
model_predict_friendly_name = input_data_dict["params"]["passed_data"][
|
481
|
+
"model_predict_friendly_name"
|
482
|
+
]
|
483
|
+
publish_prefix = input_data_dict["params"]["passed_data"]["publish_prefix"]
|
380
484
|
if model_predict_publish is True:
|
381
485
|
# Estimate the current index
|
382
|
-
now_precise = datetime.now(
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
idx_closest = predictions.index.get_indexer([now_precise], method=
|
387
|
-
elif input_data_dict[
|
388
|
-
idx_closest = predictions.index.get_indexer([now_precise], method=
|
486
|
+
now_precise = datetime.now(
|
487
|
+
input_data_dict["retrieve_hass_conf"]["time_zone"]
|
488
|
+
).replace(second=0, microsecond=0)
|
489
|
+
if input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "nearest":
|
490
|
+
idx_closest = predictions.index.get_indexer([now_precise], method="nearest")[0]
|
491
|
+
elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "first":
|
492
|
+
idx_closest = predictions.index.get_indexer([now_precise], method="ffill")[0]
|
493
|
+
elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "last":
|
494
|
+
idx_closest = predictions.index.get_indexer([now_precise], method="bfill")[0]
|
389
495
|
if idx_closest == -1:
|
390
|
-
idx_closest = predictions.index.get_indexer([now_precise], method=
|
496
|
+
idx_closest = predictions.index.get_indexer([now_precise], method="nearest")[0]
|
391
497
|
# Publish Load forecast
|
392
|
-
input_data_dict[
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
type_var = 'mlforecaster',
|
397
|
-
publish_prefix=publish_prefix)
|
498
|
+
input_data_dict["rh"].post_data(
|
499
|
+
predictions, idx_closest, model_predict_entity_id,
|
500
|
+
model_predict_unit_of_measurement, model_predict_friendly_name,
|
501
|
+
type_var="mlforecaster", publish_prefix=publish_prefix)
|
398
502
|
return predictions
|
399
503
|
|
400
|
-
|
401
|
-
|
402
|
-
|
504
|
+
|
505
|
+
def forecast_model_tune(input_data_dict: dict, logger: logging.Logger,
|
506
|
+
debug: Optional[bool] = False, mlf: Optional[MLForecaster] = None
|
507
|
+
) -> Tuple[pd.DataFrame, MLForecaster]:
|
403
508
|
"""Tune a forecast model hyperparameters using bayesian optimization.
|
404
509
|
|
405
510
|
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
@@ -416,31 +521,139 @@ def forecast_model_tune(input_data_dict: dict, logger: logging.Logger,
|
|
416
521
|
"""
|
417
522
|
# Load model
|
418
523
|
model_type = input_data_dict['params']['passed_data']['model_type']
|
419
|
-
root = input_data_dict['root']
|
420
524
|
filename = model_type+'_mlf.pkl'
|
421
|
-
filename_path =
|
525
|
+
filename_path = input_data_dict['emhass_conf']['data_path'] / filename
|
422
526
|
if not debug:
|
423
527
|
if filename_path.is_file():
|
424
|
-
with open(filename_path,
|
528
|
+
with open(filename_path, "rb") as inp:
|
425
529
|
mlf = pickle.load(inp)
|
426
530
|
else:
|
427
|
-
logger.error(
|
531
|
+
logger.error(
|
532
|
+
"The ML forecaster file was not found, please run a model fit method before this tune method",
|
533
|
+
)
|
428
534
|
return None, None
|
429
535
|
# Tune the model
|
430
536
|
df_pred_optim = mlf.tune(debug=debug)
|
431
537
|
# Save model
|
432
538
|
if not debug:
|
433
539
|
filename = model_type+'_mlf.pkl'
|
434
|
-
|
540
|
+
filename_path = input_data_dict['emhass_conf']['data_path'] / filename
|
541
|
+
with open(filename_path, 'wb') as outp:
|
435
542
|
pickle.dump(mlf, outp, pickle.HIGHEST_PROTOCOL)
|
436
543
|
return df_pred_optim, mlf
|
437
544
|
|
438
|
-
|
439
|
-
|
440
|
-
|
545
|
+
|
546
|
+
def regressor_model_fit(input_data_dict: dict, logger: logging.Logger,
|
547
|
+
debug: Optional[bool] = False) -> MLRegressor:
|
548
|
+
"""Perform a forecast model fit from training data retrieved from Home Assistant.
|
549
|
+
|
550
|
+
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
551
|
+
:type input_data_dict: dict
|
552
|
+
:param logger: The passed logger object
|
553
|
+
:type logger: logging.Logger
|
554
|
+
:param debug: True to debug, useful for unit testing, defaults to False
|
555
|
+
:type debug: Optional[bool], optional
|
556
|
+
"""
|
557
|
+
data = copy.deepcopy(input_data_dict["df_input_data"])
|
558
|
+
if "model_type" in input_data_dict["params"]["passed_data"]:
|
559
|
+
model_type = input_data_dict["params"]["passed_data"]["model_type"]
|
560
|
+
else:
|
561
|
+
logger.error("parameter: 'model_type' not passed")
|
562
|
+
return False
|
563
|
+
if "regression_model" in input_data_dict["params"]["passed_data"]:
|
564
|
+
regression_model = input_data_dict["params"]["passed_data"]["regression_model"]
|
565
|
+
else:
|
566
|
+
logger.error("parameter: 'regression_model' not passed")
|
567
|
+
return False
|
568
|
+
if "features" in input_data_dict["params"]["passed_data"]:
|
569
|
+
features = input_data_dict["params"]["passed_data"]["features"]
|
570
|
+
else:
|
571
|
+
logger.error("parameter: 'features' not passed")
|
572
|
+
return False
|
573
|
+
if "target" in input_data_dict["params"]["passed_data"]:
|
574
|
+
target = input_data_dict["params"]["passed_data"]["target"]
|
575
|
+
else:
|
576
|
+
logger.error("parameter: 'target' not passed")
|
577
|
+
return False
|
578
|
+
if "timestamp" in input_data_dict["params"]["passed_data"]:
|
579
|
+
timestamp = input_data_dict["params"]["passed_data"]["timestamp"]
|
580
|
+
else:
|
581
|
+
logger.error("parameter: 'timestamp' not passed")
|
582
|
+
return False
|
583
|
+
if "date_features" in input_data_dict["params"]["passed_data"]:
|
584
|
+
date_features = input_data_dict["params"]["passed_data"]["date_features"]
|
585
|
+
else:
|
586
|
+
logger.error("parameter: 'date_features' not passed")
|
587
|
+
return False
|
588
|
+
# The MLRegressor object
|
589
|
+
mlr = MLRegressor(data, model_type, regression_model, features, target, timestamp, logger)
|
590
|
+
# Fit the ML model
|
591
|
+
mlr.fit(date_features=date_features)
|
592
|
+
# Save model
|
593
|
+
if not debug:
|
594
|
+
filename = model_type + "_mlr.pkl"
|
595
|
+
filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
|
596
|
+
with open(filename_path, "wb") as outp:
|
597
|
+
pickle.dump(mlr, outp, pickle.HIGHEST_PROTOCOL)
|
598
|
+
return mlr
|
599
|
+
|
600
|
+
|
601
|
+
def regressor_model_predict(input_data_dict: dict, logger: logging.Logger,
|
602
|
+
debug: Optional[bool] = False, mlr: Optional[MLRegressor] = None
|
603
|
+
) -> np.ndarray:
|
604
|
+
"""Perform a prediction from csv file.
|
605
|
+
|
606
|
+
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
607
|
+
:type input_data_dict: dict
|
608
|
+
:param logger: The passed logger object
|
609
|
+
:type logger: logging.Logger
|
610
|
+
:param debug: True to debug, useful for unit testing, defaults to False
|
611
|
+
:type debug: Optional[bool], optional
|
612
|
+
"""
|
613
|
+
if "model_type" in input_data_dict["params"]["passed_data"]:
|
614
|
+
model_type = input_data_dict["params"]["passed_data"]["model_type"]
|
615
|
+
else:
|
616
|
+
logger.error("parameter: 'model_type' not passed")
|
617
|
+
return False
|
618
|
+
filename = model_type + "_mlr.pkl"
|
619
|
+
filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
|
620
|
+
if not debug:
|
621
|
+
if filename_path.is_file():
|
622
|
+
with open(filename_path, "rb") as inp:
|
623
|
+
mlr = pickle.load(inp)
|
624
|
+
else:
|
625
|
+
logger.error(
|
626
|
+
"The ML forecaster file was not found, please run a model fit method before this predict method",
|
627
|
+
)
|
628
|
+
return False
|
629
|
+
if "new_values" in input_data_dict["params"]["passed_data"]:
|
630
|
+
new_values = input_data_dict["params"]["passed_data"]["new_values"]
|
631
|
+
else:
|
632
|
+
logger.error("parameter: 'new_values' not passed")
|
633
|
+
return False
|
634
|
+
# Predict from csv file
|
635
|
+
prediction = mlr.predict(new_values)
|
636
|
+
mlr_predict_entity_id = input_data_dict["params"]["passed_data"].get(
|
637
|
+
"mlr_predict_entity_id", "sensor.mlr_predict")
|
638
|
+
mlr_predict_unit_of_measurement = input_data_dict["params"]["passed_data"].get(
|
639
|
+
"mlr_predict_unit_of_measurement", "h")
|
640
|
+
mlr_predict_friendly_name = input_data_dict["params"]["passed_data"].get(
|
641
|
+
"mlr_predict_friendly_name", "mlr predictor")
|
642
|
+
# Publish prediction
|
643
|
+
idx = 0
|
644
|
+
if not debug:
|
645
|
+
input_data_dict["rh"].post_data(prediction, idx, mlr_predict_entity_id,
|
646
|
+
mlr_predict_unit_of_measurement, mlr_predict_friendly_name,
|
647
|
+
type_var="mlregressor")
|
648
|
+
return prediction
|
649
|
+
|
650
|
+
|
651
|
+
def publish_data(input_data_dict: dict, logger: logging.Logger,
|
652
|
+
save_data_to_file: Optional[bool] = False,
|
653
|
+
opt_res_latest: Optional[pd.DataFrame] = None) -> pd.DataFrame:
|
441
654
|
"""
|
442
655
|
Publish the data obtained from the optimization results.
|
443
|
-
|
656
|
+
|
444
657
|
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
445
658
|
:type input_data_dict: dict
|
446
659
|
:param logger: The passed logger object
|
@@ -454,224 +667,347 @@ def publish_data(input_data_dict: dict, logger: logging.Logger,
|
|
454
667
|
logger.info("Publishing data to HASS instance")
|
455
668
|
# Check if a day ahead optimization has been performed (read CSV file)
|
456
669
|
if save_data_to_file:
|
457
|
-
today = datetime.now(timezone.utc).replace(
|
458
|
-
|
670
|
+
today = datetime.now(timezone.utc).replace(
|
671
|
+
hour=0, minute=0, second=0, microsecond=0
|
672
|
+
)
|
673
|
+
filename = "opt_res_dayahead_" + today.strftime("%Y_%m_%d") + ".csv"
|
459
674
|
else:
|
460
|
-
filename =
|
675
|
+
filename = "opt_res_latest.csv"
|
461
676
|
if opt_res_latest is None:
|
462
|
-
if not os.path.isfile(
|
463
|
-
logger.error(
|
677
|
+
if not os.path.isfile(input_data_dict['emhass_conf']['data_path'] / filename):
|
678
|
+
logger.error(
|
679
|
+
"File not found error, run an optimization task first.")
|
464
680
|
return
|
465
681
|
else:
|
466
|
-
opt_res_latest = pd.read_csv(
|
682
|
+
opt_res_latest = pd.read_csv(
|
683
|
+
input_data_dict['emhass_conf']['data_path'] / filename, index_col='timestamp')
|
467
684
|
opt_res_latest.index = pd.to_datetime(opt_res_latest.index)
|
468
|
-
opt_res_latest.index.freq = input_data_dict[
|
685
|
+
opt_res_latest.index.freq = input_data_dict["retrieve_hass_conf"]["freq"]
|
469
686
|
# Estimate the current index
|
470
|
-
now_precise = datetime.now(
|
471
|
-
|
472
|
-
|
473
|
-
|
474
|
-
idx_closest = opt_res_latest.index.get_indexer([now_precise], method=
|
475
|
-
elif input_data_dict[
|
476
|
-
idx_closest = opt_res_latest.index.get_indexer(
|
687
|
+
now_precise = datetime.now(
|
688
|
+
input_data_dict["retrieve_hass_conf"]["time_zone"]
|
689
|
+
).replace(second=0, microsecond=0)
|
690
|
+
if input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "nearest":
|
691
|
+
idx_closest = opt_res_latest.index.get_indexer([now_precise], method="nearest")[0]
|
692
|
+
elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "first":
|
693
|
+
idx_closest = opt_res_latest.index.get_indexer(
|
694
|
+
[now_precise], method="ffill")[0]
|
695
|
+
elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "last":
|
696
|
+
idx_closest = opt_res_latest.index.get_indexer(
|
697
|
+
[now_precise], method="bfill")[0]
|
477
698
|
if idx_closest == -1:
|
478
|
-
idx_closest = opt_res_latest.index.get_indexer([now_precise], method=
|
699
|
+
idx_closest = opt_res_latest.index.get_indexer([now_precise], method="nearest")[0]
|
479
700
|
# Publish the data
|
480
|
-
params = json.loads(input_data_dict[
|
481
|
-
publish_prefix = params[
|
701
|
+
params = json.loads(input_data_dict["params"])
|
702
|
+
publish_prefix = params["passed_data"]["publish_prefix"]
|
482
703
|
# Publish PV forecast
|
483
|
-
custom_pv_forecast_id = params[
|
484
|
-
input_data_dict[
|
485
|
-
|
486
|
-
|
487
|
-
|
488
|
-
|
489
|
-
|
704
|
+
custom_pv_forecast_id = params["passed_data"]["custom_pv_forecast_id"]
|
705
|
+
input_data_dict["rh"].post_data(
|
706
|
+
opt_res_latest["P_PV"],
|
707
|
+
idx_closest,
|
708
|
+
custom_pv_forecast_id["entity_id"],
|
709
|
+
custom_pv_forecast_id["unit_of_measurement"],
|
710
|
+
custom_pv_forecast_id["friendly_name"],
|
711
|
+
type_var="power",
|
712
|
+
publish_prefix=publish_prefix,
|
713
|
+
)
|
490
714
|
# Publish Load forecast
|
491
|
-
custom_load_forecast_id = params[
|
492
|
-
input_data_dict[
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
|
715
|
+
custom_load_forecast_id = params["passed_data"]["custom_load_forecast_id"]
|
716
|
+
input_data_dict["rh"].post_data(
|
717
|
+
opt_res_latest["P_Load"],
|
718
|
+
idx_closest,
|
719
|
+
custom_load_forecast_id["entity_id"],
|
720
|
+
custom_load_forecast_id["unit_of_measurement"],
|
721
|
+
custom_load_forecast_id["friendly_name"],
|
722
|
+
type_var="power",
|
723
|
+
publish_prefix=publish_prefix,
|
724
|
+
)
|
725
|
+
cols_published = ["P_PV", "P_Load"]
|
499
726
|
# Publish deferrable loads
|
500
|
-
custom_deferrable_forecast_id = params[
|
501
|
-
|
727
|
+
custom_deferrable_forecast_id = params["passed_data"][
|
728
|
+
"custom_deferrable_forecast_id"
|
729
|
+
]
|
730
|
+
for k in range(input_data_dict["opt"].optim_conf["num_def_loads"]):
|
502
731
|
if "P_deferrable{}".format(k) not in opt_res_latest.columns:
|
503
|
-
logger.error(
|
732
|
+
logger.error(
|
733
|
+
"P_deferrable{}".format(k)
|
734
|
+
+ " was not found in results DataFrame. Optimization task may need to be relaunched or it did not converge to a solution.",
|
735
|
+
)
|
504
736
|
else:
|
505
|
-
input_data_dict[
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
|
737
|
+
input_data_dict["rh"].post_data(
|
738
|
+
opt_res_latest["P_deferrable{}".format(k)],
|
739
|
+
idx_closest,
|
740
|
+
custom_deferrable_forecast_id[k]["entity_id"],
|
741
|
+
custom_deferrable_forecast_id[k]["unit_of_measurement"],
|
742
|
+
custom_deferrable_forecast_id[k]["friendly_name"],
|
743
|
+
type_var="deferrable",
|
744
|
+
publish_prefix=publish_prefix,
|
745
|
+
)
|
746
|
+
cols_published = cols_published + ["P_deferrable{}".format(k)]
|
512
747
|
# Publish battery power
|
513
|
-
if input_data_dict[
|
514
|
-
if
|
515
|
-
logger.error(
|
748
|
+
if input_data_dict["opt"].optim_conf["set_use_battery"]:
|
749
|
+
if "P_batt" not in opt_res_latest.columns:
|
750
|
+
logger.error(
|
751
|
+
"P_batt was not found in results DataFrame. Optimization task may need to be relaunched or it did not converge to a solution.",
|
752
|
+
)
|
516
753
|
else:
|
517
|
-
custom_batt_forecast_id = params[
|
518
|
-
input_data_dict[
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
524
|
-
|
525
|
-
|
526
|
-
|
527
|
-
|
528
|
-
|
529
|
-
|
530
|
-
|
531
|
-
|
532
|
-
|
754
|
+
custom_batt_forecast_id = params["passed_data"]["custom_batt_forecast_id"]
|
755
|
+
input_data_dict["rh"].post_data(
|
756
|
+
opt_res_latest["P_batt"],
|
757
|
+
idx_closest,
|
758
|
+
custom_batt_forecast_id["entity_id"],
|
759
|
+
custom_batt_forecast_id["unit_of_measurement"],
|
760
|
+
custom_batt_forecast_id["friendly_name"],
|
761
|
+
type_var="batt",
|
762
|
+
publish_prefix=publish_prefix,
|
763
|
+
)
|
764
|
+
cols_published = cols_published + ["P_batt"]
|
765
|
+
custom_batt_soc_forecast_id = params["passed_data"][
|
766
|
+
"custom_batt_soc_forecast_id"
|
767
|
+
]
|
768
|
+
input_data_dict["rh"].post_data(
|
769
|
+
opt_res_latest["SOC_opt"] * 100,
|
770
|
+
idx_closest,
|
771
|
+
custom_batt_soc_forecast_id["entity_id"],
|
772
|
+
custom_batt_soc_forecast_id["unit_of_measurement"],
|
773
|
+
custom_batt_soc_forecast_id["friendly_name"],
|
774
|
+
type_var="SOC",
|
775
|
+
publish_prefix=publish_prefix,
|
776
|
+
)
|
777
|
+
cols_published = cols_published + ["SOC_opt"]
|
533
778
|
# Publish grid power
|
534
|
-
custom_grid_forecast_id = params[
|
535
|
-
input_data_dict[
|
536
|
-
|
537
|
-
|
538
|
-
|
539
|
-
|
540
|
-
|
541
|
-
|
779
|
+
custom_grid_forecast_id = params["passed_data"]["custom_grid_forecast_id"]
|
780
|
+
input_data_dict["rh"].post_data(
|
781
|
+
opt_res_latest["P_grid"],
|
782
|
+
idx_closest,
|
783
|
+
custom_grid_forecast_id["entity_id"],
|
784
|
+
custom_grid_forecast_id["unit_of_measurement"],
|
785
|
+
custom_grid_forecast_id["friendly_name"],
|
786
|
+
type_var="power",
|
787
|
+
publish_prefix=publish_prefix,
|
788
|
+
)
|
789
|
+
cols_published = cols_published + ["P_grid"]
|
542
790
|
# Publish total value of cost function
|
543
|
-
custom_cost_fun_id = params[
|
544
|
-
col_cost_fun = [i for i in opt_res_latest.columns if
|
545
|
-
input_data_dict[
|
546
|
-
|
547
|
-
|
548
|
-
|
549
|
-
|
550
|
-
|
791
|
+
custom_cost_fun_id = params["passed_data"]["custom_cost_fun_id"]
|
792
|
+
col_cost_fun = [i for i in opt_res_latest.columns if "cost_fun_" in i]
|
793
|
+
input_data_dict["rh"].post_data(
|
794
|
+
opt_res_latest[col_cost_fun],
|
795
|
+
idx_closest,
|
796
|
+
custom_cost_fun_id["entity_id"],
|
797
|
+
custom_cost_fun_id["unit_of_measurement"],
|
798
|
+
custom_cost_fun_id["friendly_name"],
|
799
|
+
type_var="cost_fun",
|
800
|
+
publish_prefix=publish_prefix,
|
801
|
+
)
|
551
802
|
# Publish the optimization status
|
552
|
-
custom_cost_fun_id = params[
|
803
|
+
custom_cost_fun_id = params["passed_data"]["custom_optim_status_id"]
|
553
804
|
if "optim_status" not in opt_res_latest:
|
554
|
-
opt_res_latest["optim_status"] =
|
555
|
-
logger.warning(
|
556
|
-
|
557
|
-
|
558
|
-
|
559
|
-
|
560
|
-
|
561
|
-
|
562
|
-
|
805
|
+
opt_res_latest["optim_status"] = "Optimal"
|
806
|
+
logger.warning(
|
807
|
+
"no optim_status in opt_res_latest, run an optimization task first",
|
808
|
+
)
|
809
|
+
input_data_dict["rh"].post_data(
|
810
|
+
opt_res_latest["optim_status"],
|
811
|
+
idx_closest,
|
812
|
+
custom_cost_fun_id["entity_id"],
|
813
|
+
custom_cost_fun_id["unit_of_measurement"],
|
814
|
+
custom_cost_fun_id["friendly_name"],
|
815
|
+
type_var="optim_status",
|
816
|
+
publish_prefix=publish_prefix,
|
817
|
+
)
|
818
|
+
cols_published = cols_published + ["optim_status"]
|
563
819
|
# Publish unit_load_cost
|
564
|
-
custom_unit_load_cost_id = params[
|
565
|
-
input_data_dict[
|
566
|
-
|
567
|
-
|
568
|
-
|
569
|
-
|
570
|
-
|
571
|
-
|
820
|
+
custom_unit_load_cost_id = params["passed_data"]["custom_unit_load_cost_id"]
|
821
|
+
input_data_dict["rh"].post_data(
|
822
|
+
opt_res_latest["unit_load_cost"],
|
823
|
+
idx_closest,
|
824
|
+
custom_unit_load_cost_id["entity_id"],
|
825
|
+
custom_unit_load_cost_id["unit_of_measurement"],
|
826
|
+
custom_unit_load_cost_id["friendly_name"],
|
827
|
+
type_var="unit_load_cost",
|
828
|
+
publish_prefix=publish_prefix,
|
829
|
+
)
|
830
|
+
cols_published = cols_published + ["unit_load_cost"]
|
572
831
|
# Publish unit_prod_price
|
573
|
-
custom_unit_prod_price_id = params[
|
574
|
-
input_data_dict[
|
575
|
-
|
576
|
-
|
577
|
-
|
578
|
-
|
579
|
-
|
580
|
-
|
832
|
+
custom_unit_prod_price_id = params["passed_data"]["custom_unit_prod_price_id"]
|
833
|
+
input_data_dict["rh"].post_data(
|
834
|
+
opt_res_latest["unit_prod_price"],
|
835
|
+
idx_closest,
|
836
|
+
custom_unit_prod_price_id["entity_id"],
|
837
|
+
custom_unit_prod_price_id["unit_of_measurement"],
|
838
|
+
custom_unit_prod_price_id["friendly_name"],
|
839
|
+
type_var="unit_prod_price",
|
840
|
+
publish_prefix=publish_prefix,
|
841
|
+
)
|
842
|
+
cols_published = cols_published + ["unit_prod_price"]
|
581
843
|
# Create a DF resuming what has been published
|
582
|
-
opt_res = opt_res_latest[cols_published].loc[[
|
844
|
+
opt_res = opt_res_latest[cols_published].loc[[
|
845
|
+
opt_res_latest.index[idx_closest]]]
|
583
846
|
return opt_res
|
584
|
-
|
585
|
-
|
847
|
+
|
848
|
+
|
586
849
|
def main():
|
587
850
|
r"""Define the main command line entry function.
|
588
851
|
|
589
852
|
This function may take several arguments as inputs. You can type `emhass --help` to see the list of options:
|
590
|
-
|
853
|
+
|
591
854
|
- action: Set the desired action, options are: perfect-optim, dayahead-optim,
|
592
855
|
naive-mpc-optim, publish-data, forecast-model-fit, forecast-model-predict, forecast-model-tune
|
593
|
-
|
856
|
+
|
594
857
|
- config: Define path to the config.yaml file
|
595
|
-
|
858
|
+
|
596
859
|
- costfun: Define the type of cost function, options are: profit, cost, self-consumption
|
597
|
-
|
860
|
+
|
598
861
|
- log2file: Define if we should log to a file or not
|
599
|
-
|
862
|
+
|
600
863
|
- params: Configuration parameters passed from data/options.json if using the add-on
|
601
|
-
|
864
|
+
|
602
865
|
- runtimeparams: Pass runtime optimization parameters as dictionnary
|
603
|
-
|
866
|
+
|
604
867
|
- debug: Use True for testing purposes
|
605
|
-
|
868
|
+
|
606
869
|
"""
|
607
870
|
# Parsing arguments
|
608
871
|
parser = argparse.ArgumentParser()
|
609
872
|
parser.add_argument('--action', type=str, help='Set the desired action, options are: perfect-optim, dayahead-optim,\
|
610
873
|
naive-mpc-optim, publish-data, forecast-model-fit, forecast-model-predict, forecast-model-tune')
|
611
|
-
parser.add_argument('--config', type=str,
|
612
|
-
|
613
|
-
parser.add_argument('--
|
614
|
-
|
615
|
-
parser.add_argument('--
|
616
|
-
parser.add_argument('--
|
874
|
+
parser.add_argument('--config', type=str,
|
875
|
+
help='Define path to the config.yaml file')
|
876
|
+
parser.add_argument('--data', type=str,
|
877
|
+
help='Define path to the Data files (.csv & .pkl)')
|
878
|
+
parser.add_argument('--root', type=str, help='Define path emhass root')
|
879
|
+
parser.add_argument('--costfun', type=str, default='profit',
|
880
|
+
help='Define the type of cost function, options are: profit, cost, self-consumption')
|
881
|
+
parser.add_argument('--log2file', type=strtobool, default='False',
|
882
|
+
help='Define if we should log to a file or not')
|
883
|
+
parser.add_argument('--params', type=str, default=None,
|
884
|
+
help='Configuration parameters passed from data/options.json')
|
885
|
+
parser.add_argument('--runtimeparams', type=str, default=None,
|
886
|
+
help='Pass runtime optimization parameters as dictionnary')
|
887
|
+
parser.add_argument('--debug', type=strtobool,
|
888
|
+
default='False', help='Use True for testing purposes')
|
617
889
|
args = parser.parse_args()
|
618
890
|
# The path to the configuration files
|
619
|
-
|
620
|
-
|
891
|
+
if args.config is not None:
|
892
|
+
config_path = pathlib.Path(args.config)
|
893
|
+
else:
|
894
|
+
config_path = pathlib.Path(
|
895
|
+
str(utils.get_root(__file__, num_parent=2) / 'config_emhass.yaml'))
|
896
|
+
if args.data is not None:
|
897
|
+
data_path = pathlib.Path(args.data)
|
898
|
+
else:
|
899
|
+
data_path = (config_path.parent / 'data/')
|
900
|
+
if args.root is not None:
|
901
|
+
root_path = pathlib.Path(args.root)
|
902
|
+
else:
|
903
|
+
root_path = config_path.parent
|
904
|
+
emhass_conf = {}
|
905
|
+
emhass_conf['config_path'] = config_path
|
906
|
+
emhass_conf['data_path'] = data_path
|
907
|
+
emhass_conf['root_path'] = root_path
|
621
908
|
# create logger
|
622
|
-
logger, ch = utils.get_logger(
|
909
|
+
logger, ch = utils.get_logger(
|
910
|
+
__name__, emhass_conf, save_to_file=bool(args.log2file))
|
911
|
+
logger.debug("config path: " + str(config_path))
|
912
|
+
logger.debug("data path: " + str(data_path))
|
913
|
+
logger.debug("root path: " + str(root_path))
|
914
|
+
if not config_path.exists():
|
915
|
+
logger.error(
|
916
|
+
"Could not find config_emhass.yaml file in: " + str(config_path))
|
917
|
+
logger.error("Try setting config file path with --config")
|
918
|
+
return False
|
919
|
+
if not os.path.isdir(data_path):
|
920
|
+
logger.error("Could not find data foulder in: " + str(data_path))
|
921
|
+
logger.error("Try setting data path with --data")
|
922
|
+
return False
|
923
|
+
if not os.path.isdir(root_path / 'src'):
|
924
|
+
logger.error("Could not find emhass/src foulder in: " + str(root_path))
|
925
|
+
logger.error("Try setting emhass root path with --root")
|
926
|
+
return False
|
623
927
|
# Additionnal argument
|
624
928
|
try:
|
625
|
-
parser.add_argument(
|
929
|
+
parser.add_argument(
|
930
|
+
"--version",
|
931
|
+
action="version",
|
932
|
+
version="%(prog)s " + version("emhass"),
|
933
|
+
)
|
626
934
|
args = parser.parse_args()
|
627
935
|
except Exception:
|
628
|
-
logger.info(
|
936
|
+
logger.info(
|
937
|
+
"Version not found for emhass package. Or importlib exited with PackageNotFoundError.",
|
938
|
+
)
|
629
939
|
# Setup parameters
|
630
|
-
input_data_dict = set_input_data_dict(
|
631
|
-
args.costfun, args.params, args.runtimeparams, args.action,
|
940
|
+
input_data_dict = set_input_data_dict(emhass_conf,
|
941
|
+
args.costfun, args.params, args.runtimeparams, args.action,
|
632
942
|
logger, args.debug)
|
633
943
|
# Perform selected action
|
634
|
-
if args.action ==
|
635
|
-
opt_res = perfect_forecast_optim(
|
636
|
-
|
637
|
-
|
638
|
-
|
944
|
+
if args.action == "perfect-optim":
|
945
|
+
opt_res = perfect_forecast_optim(
|
946
|
+
input_data_dict, logger, debug=args.debug)
|
947
|
+
elif args.action == "dayahead-optim":
|
948
|
+
opt_res = dayahead_forecast_optim(
|
949
|
+
input_data_dict, logger, debug=args.debug)
|
950
|
+
elif args.action == "naive-mpc-optim":
|
639
951
|
opt_res = naive_mpc_optim(input_data_dict, logger, debug=args.debug)
|
640
|
-
elif args.action ==
|
641
|
-
df_fit_pred, df_fit_pred_backtest, mlf = forecast_model_fit(
|
952
|
+
elif args.action == "forecast-model-fit":
|
953
|
+
df_fit_pred, df_fit_pred_backtest, mlf = forecast_model_fit(
|
954
|
+
input_data_dict, logger, debug=args.debug
|
955
|
+
)
|
642
956
|
opt_res = None
|
643
|
-
elif args.action ==
|
957
|
+
elif args.action == "forecast-model-predict":
|
644
958
|
if args.debug:
|
645
959
|
_, _, mlf = forecast_model_fit(input_data_dict, logger, debug=args.debug)
|
646
960
|
else:
|
647
961
|
mlf = None
|
648
962
|
df_pred = forecast_model_predict(input_data_dict, logger, debug=args.debug, mlf=mlf)
|
649
963
|
opt_res = None
|
650
|
-
elif args.action ==
|
964
|
+
elif args.action == "forecast-model-tune":
|
651
965
|
if args.debug:
|
652
966
|
_, _, mlf = forecast_model_fit(input_data_dict, logger, debug=args.debug)
|
653
967
|
else:
|
654
968
|
mlf = None
|
655
969
|
df_pred_optim, mlf = forecast_model_tune(input_data_dict, logger, debug=args.debug, mlf=mlf)
|
656
970
|
opt_res = None
|
657
|
-
elif args.action ==
|
971
|
+
elif args.action == "regressor-model-fit":
|
972
|
+
mlr = regressor_model_fit(input_data_dict, logger, debug=args.debug)
|
973
|
+
opt_res = None
|
974
|
+
elif args.action == "regressor-model-predict":
|
975
|
+
if args.debug:
|
976
|
+
mlr = regressor_model_fit(input_data_dict, logger, debug=args.debug)
|
977
|
+
else:
|
978
|
+
mlr = None
|
979
|
+
prediction = regressor_model_predict(input_data_dict, logger, debug=args.debug,mlr=mlr)
|
980
|
+
opt_res = None
|
981
|
+
elif args.action == "publish-data":
|
658
982
|
opt_res = publish_data(input_data_dict, logger)
|
659
983
|
else:
|
660
984
|
logger.error("The passed action argument is not valid")
|
985
|
+
logger.error("Try setting --action: perfect-optim, dayahead-optim, naive-mpc-optim, forecast-model-fit, forecast-model-predict, forecast-model-tune or publish-data")
|
661
986
|
opt_res = None
|
662
987
|
logger.info(opt_res)
|
663
988
|
# Flush the logger
|
664
989
|
ch.close()
|
665
990
|
logger.removeHandler(ch)
|
666
|
-
if
|
667
|
-
args.action ==
|
991
|
+
if (
|
992
|
+
args.action == "perfect-optim"
|
993
|
+
or args.action == "dayahead-optim"
|
994
|
+
or args.action == "naive-mpc-optim"
|
995
|
+
or args.action == "publish-data"
|
996
|
+
):
|
668
997
|
return opt_res
|
669
|
-
elif args.action ==
|
998
|
+
elif args.action == "forecast-model-fit":
|
670
999
|
return df_fit_pred, df_fit_pred_backtest, mlf
|
671
|
-
elif args.action ==
|
1000
|
+
elif args.action == "forecast-model-predict":
|
672
1001
|
return df_pred
|
673
|
-
elif args.action ==
|
1002
|
+
elif args.action == "regressor-model-fit":
|
1003
|
+
return mlr
|
1004
|
+
elif args.action == "regressor-model-predict":
|
1005
|
+
return prediction
|
1006
|
+
elif args.action == "forecast-model-tune":
|
674
1007
|
return df_pred_optim, mlf
|
1008
|
+
else:
|
1009
|
+
return opt_res
|
1010
|
+
|
675
1011
|
|
676
|
-
if __name__ ==
|
1012
|
+
if __name__ == "__main__":
|
677
1013
|
main()
|