emhass 0.12.4__py3-none-any.whl → 0.12.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {emhass-0.12.4.dist-info → emhass-0.12.6.dist-info}/METADATA +34 -17
- {emhass-0.12.4.dist-info → emhass-0.12.6.dist-info}/RECORD +5 -17
- emhass/__init__.py +0 -0
- emhass/command_line.py +0 -1748
- emhass/data/emhass_inverters.csv +0 -8
- emhass/data/emhass_modules.csv +0 -6
- emhass/forecast.py +0 -1348
- emhass/img/emhass_icon.png +0 -0
- emhass/machine_learning_forecaster.py +0 -397
- emhass/machine_learning_regressor.py +0 -275
- emhass/optimization.py +0 -1504
- emhass/retrieve_hass.py +0 -670
- emhass/utils.py +0 -1678
- emhass/web_server.py +0 -756
- {emhass-0.12.4.dist-info → emhass-0.12.6.dist-info}/WHEEL +0 -0
- {emhass-0.12.4.dist-info → emhass-0.12.6.dist-info}/entry_points.txt +0 -0
- {emhass-0.12.4.dist-info → emhass-0.12.6.dist-info}/licenses/LICENSE +0 -0
emhass/command_line.py
DELETED
@@ -1,1748 +0,0 @@
|
|
1
|
-
#!/usr/bin/env python3
|
2
|
-
# -*- coding: utf-8 -*-
|
3
|
-
|
4
|
-
import argparse
|
5
|
-
import copy
|
6
|
-
import json
|
7
|
-
import logging
|
8
|
-
import os
|
9
|
-
import pathlib
|
10
|
-
import pickle
|
11
|
-
import re
|
12
|
-
import time
|
13
|
-
from datetime import datetime, timezone
|
14
|
-
from importlib.metadata import version
|
15
|
-
from typing import Optional, Tuple
|
16
|
-
|
17
|
-
import numpy as np
|
18
|
-
import pandas as pd
|
19
|
-
|
20
|
-
from emhass import utils
|
21
|
-
from emhass.forecast import Forecast
|
22
|
-
from emhass.machine_learning_forecaster import MLForecaster
|
23
|
-
from emhass.machine_learning_regressor import MLRegressor
|
24
|
-
from emhass.optimization import Optimization
|
25
|
-
from emhass.retrieve_hass import RetrieveHass
|
26
|
-
|
27
|
-
|
28
|
-
def set_input_data_dict(
|
29
|
-
emhass_conf: dict,
|
30
|
-
costfun: str,
|
31
|
-
params: str,
|
32
|
-
runtimeparams: str,
|
33
|
-
set_type: str,
|
34
|
-
logger: logging.Logger,
|
35
|
-
get_data_from_file: Optional[bool] = False,
|
36
|
-
) -> dict:
|
37
|
-
"""
|
38
|
-
Set up some of the data needed for the different actions.
|
39
|
-
|
40
|
-
:param emhass_conf: Dictionary containing the needed emhass paths
|
41
|
-
:type emhass_conf: dict
|
42
|
-
:param costfun: The type of cost function to use for optimization problem
|
43
|
-
:type costfun: str
|
44
|
-
:param params: Configuration parameters passed from data/options.json
|
45
|
-
:type params: str
|
46
|
-
:param runtimeparams: Runtime optimization parameters passed as a dictionary
|
47
|
-
:type runtimeparams: str
|
48
|
-
:param set_type: Set the type of setup based on following type of optimization
|
49
|
-
:type set_type: str
|
50
|
-
:param logger: The passed logger object
|
51
|
-
:type logger: logging object
|
52
|
-
:param get_data_from_file: Use data from saved CSV file (useful for debug)
|
53
|
-
:type get_data_from_file: bool, optional
|
54
|
-
:return: A dictionnary with multiple data used by the action functions
|
55
|
-
:rtype: dict
|
56
|
-
|
57
|
-
"""
|
58
|
-
logger.info("Setting up needed data")
|
59
|
-
|
60
|
-
# check if passed params is a dict
|
61
|
-
if (params != None) and (params != "null"):
|
62
|
-
if type(params) is str:
|
63
|
-
params = json.loads(params)
|
64
|
-
else:
|
65
|
-
params = {}
|
66
|
-
|
67
|
-
# Parsing yaml
|
68
|
-
retrieve_hass_conf, optim_conf, plant_conf = utils.get_yaml_parse(params, logger)
|
69
|
-
if type(retrieve_hass_conf) is bool:
|
70
|
-
return False
|
71
|
-
|
72
|
-
# Treat runtimeparams
|
73
|
-
params, retrieve_hass_conf, optim_conf, plant_conf = utils.treat_runtimeparams(
|
74
|
-
runtimeparams,
|
75
|
-
params,
|
76
|
-
retrieve_hass_conf,
|
77
|
-
optim_conf,
|
78
|
-
plant_conf,
|
79
|
-
set_type,
|
80
|
-
logger,
|
81
|
-
emhass_conf,
|
82
|
-
)
|
83
|
-
|
84
|
-
# Define the data retrieve object
|
85
|
-
rh = RetrieveHass(
|
86
|
-
retrieve_hass_conf["hass_url"],
|
87
|
-
retrieve_hass_conf["long_lived_token"],
|
88
|
-
retrieve_hass_conf["optimization_time_step"],
|
89
|
-
retrieve_hass_conf["time_zone"],
|
90
|
-
params,
|
91
|
-
emhass_conf,
|
92
|
-
logger,
|
93
|
-
get_data_from_file=get_data_from_file,
|
94
|
-
)
|
95
|
-
|
96
|
-
# Retrieve basic configuration data from hass
|
97
|
-
if get_data_from_file:
|
98
|
-
with open(emhass_conf["data_path"] / "test_df_final.pkl", "rb") as inp:
|
99
|
-
_, _, _, rh.ha_config = pickle.load(inp)
|
100
|
-
else:
|
101
|
-
response = rh.get_ha_config()
|
102
|
-
if type(response) is bool:
|
103
|
-
return False
|
104
|
-
|
105
|
-
# Update the params dict using data from the HA configuration
|
106
|
-
params = utils.update_params_with_ha_config(
|
107
|
-
params,
|
108
|
-
rh.ha_config,
|
109
|
-
)
|
110
|
-
|
111
|
-
# Define the forecast and optimization objects
|
112
|
-
fcst = Forecast(
|
113
|
-
retrieve_hass_conf,
|
114
|
-
optim_conf,
|
115
|
-
plant_conf,
|
116
|
-
params,
|
117
|
-
emhass_conf,
|
118
|
-
logger,
|
119
|
-
get_data_from_file=get_data_from_file,
|
120
|
-
)
|
121
|
-
opt = Optimization(
|
122
|
-
retrieve_hass_conf,
|
123
|
-
optim_conf,
|
124
|
-
plant_conf,
|
125
|
-
fcst.var_load_cost,
|
126
|
-
fcst.var_prod_price,
|
127
|
-
costfun,
|
128
|
-
emhass_conf,
|
129
|
-
logger,
|
130
|
-
)
|
131
|
-
|
132
|
-
# Perform setup based on type of action
|
133
|
-
if set_type == "perfect-optim":
|
134
|
-
# Retrieve data from hass
|
135
|
-
if get_data_from_file:
|
136
|
-
with open(emhass_conf["data_path"] / "test_df_final.pkl", "rb") as inp:
|
137
|
-
rh.df_final, days_list, var_list, rh.ha_config = pickle.load(inp)
|
138
|
-
retrieve_hass_conf["sensor_power_load_no_var_loads"] = str(var_list[0])
|
139
|
-
retrieve_hass_conf["sensor_power_photovoltaics"] = str(var_list[1])
|
140
|
-
retrieve_hass_conf["sensor_linear_interp"] = [
|
141
|
-
retrieve_hass_conf["sensor_power_photovoltaics"],
|
142
|
-
retrieve_hass_conf["sensor_power_load_no_var_loads"],
|
143
|
-
]
|
144
|
-
retrieve_hass_conf["sensor_replace_zero"] = [
|
145
|
-
retrieve_hass_conf["sensor_power_photovoltaics"]
|
146
|
-
]
|
147
|
-
else:
|
148
|
-
days_list = utils.get_days_list(
|
149
|
-
retrieve_hass_conf["historic_days_to_retrieve"]
|
150
|
-
)
|
151
|
-
var_list = [
|
152
|
-
retrieve_hass_conf["sensor_power_load_no_var_loads"],
|
153
|
-
retrieve_hass_conf["sensor_power_photovoltaics"],
|
154
|
-
]
|
155
|
-
if not rh.get_data(
|
156
|
-
days_list,
|
157
|
-
var_list,
|
158
|
-
minimal_response=False,
|
159
|
-
significant_changes_only=False,
|
160
|
-
):
|
161
|
-
return False
|
162
|
-
if not rh.prepare_data(
|
163
|
-
retrieve_hass_conf["sensor_power_load_no_var_loads"],
|
164
|
-
load_negative=retrieve_hass_conf["load_negative"],
|
165
|
-
set_zero_min=retrieve_hass_conf["set_zero_min"],
|
166
|
-
var_replace_zero=retrieve_hass_conf["sensor_replace_zero"],
|
167
|
-
var_interp=retrieve_hass_conf["sensor_linear_interp"],
|
168
|
-
):
|
169
|
-
return False
|
170
|
-
df_input_data = rh.df_final.copy()
|
171
|
-
# What we don't need for this type of action
|
172
|
-
P_PV_forecast, P_load_forecast, df_input_data_dayahead = None, None, None
|
173
|
-
elif set_type == "dayahead-optim":
|
174
|
-
# Get PV and load forecasts
|
175
|
-
if (
|
176
|
-
optim_conf["set_use_pv"]
|
177
|
-
or optim_conf.get("weather_forecast_method", None) == "list"
|
178
|
-
):
|
179
|
-
df_weather = fcst.get_weather_forecast(
|
180
|
-
method=optim_conf["weather_forecast_method"]
|
181
|
-
)
|
182
|
-
if isinstance(df_weather, bool) and not df_weather:
|
183
|
-
return False
|
184
|
-
P_PV_forecast = fcst.get_power_from_weather(df_weather)
|
185
|
-
else:
|
186
|
-
P_PV_forecast = pd.Series(0, index=fcst.forecast_dates)
|
187
|
-
P_load_forecast = fcst.get_load_forecast(
|
188
|
-
method=optim_conf["load_forecast_method"]
|
189
|
-
)
|
190
|
-
if isinstance(P_load_forecast, bool) and not P_load_forecast:
|
191
|
-
logger.error(
|
192
|
-
"Unable to get sensor power photovoltaics, or sensor power load no var loads. Check HA sensors and their daily data"
|
193
|
-
)
|
194
|
-
return False
|
195
|
-
df_input_data_dayahead = pd.DataFrame(
|
196
|
-
np.transpose(np.vstack([P_PV_forecast.values, P_load_forecast.values])),
|
197
|
-
index=P_PV_forecast.index,
|
198
|
-
columns=["P_PV_forecast", "P_load_forecast"],
|
199
|
-
)
|
200
|
-
if (
|
201
|
-
"optimization_time_step" in retrieve_hass_conf
|
202
|
-
and retrieve_hass_conf["optimization_time_step"]
|
203
|
-
):
|
204
|
-
if not isinstance(
|
205
|
-
retrieve_hass_conf["optimization_time_step"],
|
206
|
-
pd._libs.tslibs.timedeltas.Timedelta,
|
207
|
-
):
|
208
|
-
optimization_time_step = pd.to_timedelta(
|
209
|
-
retrieve_hass_conf["optimization_time_step"], "minute"
|
210
|
-
)
|
211
|
-
else:
|
212
|
-
optimization_time_step = retrieve_hass_conf["optimization_time_step"]
|
213
|
-
df_input_data_dayahead = df_input_data_dayahead.asfreq(
|
214
|
-
optimization_time_step
|
215
|
-
)
|
216
|
-
else:
|
217
|
-
df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead)
|
218
|
-
params = json.loads(params)
|
219
|
-
if (
|
220
|
-
"prediction_horizon" in params["passed_data"]
|
221
|
-
and params["passed_data"]["prediction_horizon"] is not None
|
222
|
-
):
|
223
|
-
prediction_horizon = params["passed_data"]["prediction_horizon"]
|
224
|
-
df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[
|
225
|
-
df_input_data_dayahead.index[0] : df_input_data_dayahead.index[
|
226
|
-
prediction_horizon - 1
|
227
|
-
]
|
228
|
-
]
|
229
|
-
# What we don't need for this type of action
|
230
|
-
df_input_data, days_list = None, None
|
231
|
-
elif set_type == "naive-mpc-optim":
|
232
|
-
# Retrieve data from hass
|
233
|
-
if get_data_from_file:
|
234
|
-
with open(emhass_conf["data_path"] / "test_df_final.pkl", "rb") as inp:
|
235
|
-
rh.df_final, days_list, var_list, rh.ha_config = pickle.load(inp)
|
236
|
-
retrieve_hass_conf["sensor_power_load_no_var_loads"] = str(var_list[0])
|
237
|
-
retrieve_hass_conf["sensor_power_photovoltaics"] = str(var_list[1])
|
238
|
-
retrieve_hass_conf["sensor_linear_interp"] = [
|
239
|
-
retrieve_hass_conf["sensor_power_photovoltaics"],
|
240
|
-
retrieve_hass_conf["sensor_power_load_no_var_loads"],
|
241
|
-
]
|
242
|
-
retrieve_hass_conf["sensor_replace_zero"] = [
|
243
|
-
retrieve_hass_conf["sensor_power_photovoltaics"]
|
244
|
-
]
|
245
|
-
else:
|
246
|
-
days_list = utils.get_days_list(1)
|
247
|
-
var_list = [
|
248
|
-
retrieve_hass_conf["sensor_power_load_no_var_loads"],
|
249
|
-
retrieve_hass_conf["sensor_power_photovoltaics"],
|
250
|
-
]
|
251
|
-
if not rh.get_data(
|
252
|
-
days_list,
|
253
|
-
var_list,
|
254
|
-
minimal_response=False,
|
255
|
-
significant_changes_only=False,
|
256
|
-
):
|
257
|
-
return False
|
258
|
-
if not rh.prepare_data(
|
259
|
-
retrieve_hass_conf["sensor_power_load_no_var_loads"],
|
260
|
-
load_negative=retrieve_hass_conf["load_negative"],
|
261
|
-
set_zero_min=retrieve_hass_conf["set_zero_min"],
|
262
|
-
var_replace_zero=retrieve_hass_conf["sensor_replace_zero"],
|
263
|
-
var_interp=retrieve_hass_conf["sensor_linear_interp"],
|
264
|
-
):
|
265
|
-
return False
|
266
|
-
df_input_data = rh.df_final.copy()
|
267
|
-
# Get PV and load forecasts
|
268
|
-
if (
|
269
|
-
optim_conf["set_use_pv"]
|
270
|
-
or optim_conf.get("weather_forecast_method", None) == "list"
|
271
|
-
):
|
272
|
-
df_weather = fcst.get_weather_forecast(
|
273
|
-
method=optim_conf["weather_forecast_method"]
|
274
|
-
)
|
275
|
-
if isinstance(df_weather, bool) and not df_weather:
|
276
|
-
return False
|
277
|
-
P_PV_forecast = fcst.get_power_from_weather(
|
278
|
-
df_weather, set_mix_forecast=True, df_now=df_input_data
|
279
|
-
)
|
280
|
-
else:
|
281
|
-
P_PV_forecast = pd.Series(0, index=fcst.forecast_dates)
|
282
|
-
P_load_forecast = fcst.get_load_forecast(
|
283
|
-
method=optim_conf["load_forecast_method"],
|
284
|
-
set_mix_forecast=True,
|
285
|
-
df_now=df_input_data,
|
286
|
-
)
|
287
|
-
if isinstance(P_load_forecast, bool) and not P_load_forecast:
|
288
|
-
logger.error(
|
289
|
-
"Unable to get sensor power photovoltaics, or sensor power load no var loads. Check HA sensors and their daily data"
|
290
|
-
)
|
291
|
-
return False
|
292
|
-
df_input_data_dayahead = pd.concat([P_PV_forecast, P_load_forecast], axis=1)
|
293
|
-
if (
|
294
|
-
"optimization_time_step" in retrieve_hass_conf
|
295
|
-
and retrieve_hass_conf["optimization_time_step"]
|
296
|
-
):
|
297
|
-
if not isinstance(
|
298
|
-
retrieve_hass_conf["optimization_time_step"],
|
299
|
-
pd._libs.tslibs.timedeltas.Timedelta,
|
300
|
-
):
|
301
|
-
optimization_time_step = pd.to_timedelta(
|
302
|
-
retrieve_hass_conf["optimization_time_step"], "minute"
|
303
|
-
)
|
304
|
-
else:
|
305
|
-
optimization_time_step = retrieve_hass_conf["optimization_time_step"]
|
306
|
-
df_input_data_dayahead = df_input_data_dayahead.asfreq(
|
307
|
-
optimization_time_step
|
308
|
-
)
|
309
|
-
else:
|
310
|
-
df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead)
|
311
|
-
df_input_data_dayahead.columns = ["P_PV_forecast", "P_load_forecast"]
|
312
|
-
params = json.loads(params)
|
313
|
-
if (
|
314
|
-
"prediction_horizon" in params["passed_data"]
|
315
|
-
and params["passed_data"]["prediction_horizon"] is not None
|
316
|
-
):
|
317
|
-
prediction_horizon = params["passed_data"]["prediction_horizon"]
|
318
|
-
df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[
|
319
|
-
df_input_data_dayahead.index[0] : df_input_data_dayahead.index[
|
320
|
-
prediction_horizon - 1
|
321
|
-
]
|
322
|
-
]
|
323
|
-
elif (
|
324
|
-
set_type == "forecast-model-fit"
|
325
|
-
or set_type == "forecast-model-predict"
|
326
|
-
or set_type == "forecast-model-tune"
|
327
|
-
):
|
328
|
-
df_input_data_dayahead = None
|
329
|
-
P_PV_forecast, P_load_forecast = None, None
|
330
|
-
params = json.loads(params)
|
331
|
-
# Retrieve data from hass
|
332
|
-
days_to_retrieve = params["passed_data"]["historic_days_to_retrieve"]
|
333
|
-
model_type = params["passed_data"]["model_type"]
|
334
|
-
var_model = params["passed_data"]["var_model"]
|
335
|
-
if get_data_from_file:
|
336
|
-
days_list = None
|
337
|
-
filename = "data_train_" + model_type + ".pkl"
|
338
|
-
filename_path = emhass_conf["data_path"] / filename
|
339
|
-
with open(filename_path, "rb") as inp:
|
340
|
-
df_input_data, _ = pickle.load(inp)
|
341
|
-
df_input_data = df_input_data[
|
342
|
-
df_input_data.index[-1] - pd.offsets.Day(days_to_retrieve) :
|
343
|
-
]
|
344
|
-
else:
|
345
|
-
days_list = utils.get_days_list(days_to_retrieve)
|
346
|
-
var_list = [var_model]
|
347
|
-
if not rh.get_data(days_list, var_list):
|
348
|
-
return False
|
349
|
-
df_input_data = rh.df_final.copy()
|
350
|
-
elif set_type == "regressor-model-fit" or set_type == "regressor-model-predict":
|
351
|
-
df_input_data, df_input_data_dayahead = None, None
|
352
|
-
P_PV_forecast, P_load_forecast = None, None
|
353
|
-
params = json.loads(params)
|
354
|
-
days_list = None
|
355
|
-
csv_file = params["passed_data"].get("csv_file", None)
|
356
|
-
if "features" in params["passed_data"]:
|
357
|
-
features = params["passed_data"]["features"]
|
358
|
-
if "target" in params["passed_data"]:
|
359
|
-
target = params["passed_data"]["target"]
|
360
|
-
if "timestamp" in params["passed_data"]:
|
361
|
-
timestamp = params["passed_data"]["timestamp"]
|
362
|
-
if csv_file:
|
363
|
-
if get_data_from_file:
|
364
|
-
base_path = emhass_conf["data_path"] # + "/data"
|
365
|
-
filename_path = pathlib.Path(base_path) / csv_file
|
366
|
-
else:
|
367
|
-
filename_path = emhass_conf["data_path"] / csv_file
|
368
|
-
if filename_path.is_file():
|
369
|
-
df_input_data = pd.read_csv(filename_path, parse_dates=True)
|
370
|
-
else:
|
371
|
-
logger.error(
|
372
|
-
"The CSV file "
|
373
|
-
+ csv_file
|
374
|
-
+ " was not found in path: "
|
375
|
-
+ str(emhass_conf["data_path"])
|
376
|
-
)
|
377
|
-
return False
|
378
|
-
# raise ValueError("The CSV file " + csv_file + " was not found.")
|
379
|
-
required_columns = []
|
380
|
-
required_columns.extend(features)
|
381
|
-
required_columns.append(target)
|
382
|
-
if timestamp is not None:
|
383
|
-
required_columns.append(timestamp)
|
384
|
-
if not set(required_columns).issubset(df_input_data.columns):
|
385
|
-
logger.error("The cvs file does not contain the required columns.")
|
386
|
-
msg = f"CSV file should contain the following columns: {', '.join(required_columns)}"
|
387
|
-
logger.error(msg)
|
388
|
-
return False
|
389
|
-
elif set_type == "publish-data":
|
390
|
-
df_input_data, df_input_data_dayahead = None, None
|
391
|
-
P_PV_forecast, P_load_forecast = None, None
|
392
|
-
days_list = None
|
393
|
-
else:
|
394
|
-
logger.error(
|
395
|
-
"The passed action argument and hence the set_type parameter for setup is not valid",
|
396
|
-
)
|
397
|
-
df_input_data, df_input_data_dayahead = None, None
|
398
|
-
P_PV_forecast, P_load_forecast = None, None
|
399
|
-
days_list = None
|
400
|
-
# The input data dictionary to return
|
401
|
-
input_data_dict = {
|
402
|
-
"emhass_conf": emhass_conf,
|
403
|
-
"retrieve_hass_conf": retrieve_hass_conf,
|
404
|
-
"rh": rh,
|
405
|
-
"opt": opt,
|
406
|
-
"fcst": fcst,
|
407
|
-
"df_input_data": df_input_data,
|
408
|
-
"df_input_data_dayahead": df_input_data_dayahead,
|
409
|
-
"P_PV_forecast": P_PV_forecast,
|
410
|
-
"P_load_forecast": P_load_forecast,
|
411
|
-
"costfun": costfun,
|
412
|
-
"params": params,
|
413
|
-
"days_list": days_list,
|
414
|
-
}
|
415
|
-
return input_data_dict
|
416
|
-
|
417
|
-
|
418
|
-
def weather_forecast_cache(
|
419
|
-
emhass_conf: dict, params: str, runtimeparams: str, logger: logging.Logger
|
420
|
-
) -> bool:
|
421
|
-
"""
|
422
|
-
Perform a call to get forecast function, intend to save results to cache.
|
423
|
-
|
424
|
-
:param emhass_conf: Dictionary containing the needed emhass paths
|
425
|
-
:type emhass_conf: dict
|
426
|
-
:param params: Configuration parameters passed from data/options.json
|
427
|
-
:type params: str
|
428
|
-
:param runtimeparams: Runtime optimization parameters passed as a dictionary
|
429
|
-
:type runtimeparams: str
|
430
|
-
:param logger: The passed logger object
|
431
|
-
:type logger: logging object
|
432
|
-
:return: A bool for function completion
|
433
|
-
:rtype: bool
|
434
|
-
|
435
|
-
"""
|
436
|
-
# Parsing yaml
|
437
|
-
retrieve_hass_conf, optim_conf, plant_conf = utils.get_yaml_parse(params, logger)
|
438
|
-
# Treat runtimeparams
|
439
|
-
params, retrieve_hass_conf, optim_conf, plant_conf = utils.treat_runtimeparams(
|
440
|
-
runtimeparams,
|
441
|
-
params,
|
442
|
-
retrieve_hass_conf,
|
443
|
-
optim_conf,
|
444
|
-
plant_conf,
|
445
|
-
"forecast",
|
446
|
-
logger,
|
447
|
-
emhass_conf,
|
448
|
-
)
|
449
|
-
# Make sure weather_forecast_cache is true
|
450
|
-
if (params != None) and (params != "null"):
|
451
|
-
params = json.loads(params)
|
452
|
-
else:
|
453
|
-
params = {}
|
454
|
-
params["passed_data"]["weather_forecast_cache"] = True
|
455
|
-
params = json.dumps(params)
|
456
|
-
# Create Forecast object
|
457
|
-
fcst = Forecast(
|
458
|
-
retrieve_hass_conf, optim_conf, plant_conf, params, emhass_conf, logger
|
459
|
-
)
|
460
|
-
result = fcst.get_weather_forecast(optim_conf["weather_forecast_method"])
|
461
|
-
if isinstance(result, bool) and not result:
|
462
|
-
return False
|
463
|
-
|
464
|
-
return True
|
465
|
-
|
466
|
-
|
467
|
-
def perfect_forecast_optim(
|
468
|
-
input_data_dict: dict,
|
469
|
-
logger: logging.Logger,
|
470
|
-
save_data_to_file: Optional[bool] = True,
|
471
|
-
debug: Optional[bool] = False,
|
472
|
-
) -> pd.DataFrame:
|
473
|
-
"""
|
474
|
-
Perform a call to the perfect forecast optimization routine.
|
475
|
-
|
476
|
-
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
477
|
-
:type input_data_dict: dict
|
478
|
-
:param logger: The passed logger object
|
479
|
-
:type logger: logging object
|
480
|
-
:param save_data_to_file: Save optimization results to CSV file
|
481
|
-
:type save_data_to_file: bool, optional
|
482
|
-
:param debug: A debug option useful for unittests
|
483
|
-
:type debug: bool, optional
|
484
|
-
:return: The output data of the optimization
|
485
|
-
:rtype: pd.DataFrame
|
486
|
-
|
487
|
-
"""
|
488
|
-
logger.info("Performing perfect forecast optimization")
|
489
|
-
# Load cost and prod price forecast
|
490
|
-
df_input_data = input_data_dict["fcst"].get_load_cost_forecast(
|
491
|
-
input_data_dict["df_input_data"],
|
492
|
-
method=input_data_dict["fcst"].optim_conf["load_cost_forecast_method"],
|
493
|
-
list_and_perfect=True,
|
494
|
-
)
|
495
|
-
if isinstance(df_input_data, bool) and not df_input_data:
|
496
|
-
return False
|
497
|
-
df_input_data = input_data_dict["fcst"].get_prod_price_forecast(
|
498
|
-
df_input_data,
|
499
|
-
method=input_data_dict["fcst"].optim_conf["production_price_forecast_method"],
|
500
|
-
list_and_perfect=True,
|
501
|
-
)
|
502
|
-
if isinstance(df_input_data, bool) and not df_input_data:
|
503
|
-
return False
|
504
|
-
opt_res = input_data_dict["opt"].perform_perfect_forecast_optim(
|
505
|
-
df_input_data, input_data_dict["days_list"]
|
506
|
-
)
|
507
|
-
# Save CSV file for analysis
|
508
|
-
if save_data_to_file:
|
509
|
-
filename = "opt_res_perfect_optim_" + input_data_dict["costfun"] + ".csv"
|
510
|
-
else: # Just save the latest optimization results
|
511
|
-
filename = "opt_res_latest.csv"
|
512
|
-
if not debug:
|
513
|
-
opt_res.to_csv(
|
514
|
-
input_data_dict["emhass_conf"]["data_path"] / filename,
|
515
|
-
index_label="timestamp",
|
516
|
-
)
|
517
|
-
if not isinstance(input_data_dict["params"], dict):
|
518
|
-
params = json.loads(input_data_dict["params"])
|
519
|
-
else:
|
520
|
-
params = input_data_dict["params"]
|
521
|
-
|
522
|
-
# if continual_publish, save perfect results to data_path/entities json
|
523
|
-
if input_data_dict["retrieve_hass_conf"].get("continual_publish", False) or params[
|
524
|
-
"passed_data"
|
525
|
-
].get("entity_save", False):
|
526
|
-
# Trigger the publish function, save entity data and not post to HA
|
527
|
-
publish_data(input_data_dict, logger, entity_save=True, dont_post=True)
|
528
|
-
|
529
|
-
return opt_res
|
530
|
-
|
531
|
-
|
532
|
-
def dayahead_forecast_optim(
|
533
|
-
input_data_dict: dict,
|
534
|
-
logger: logging.Logger,
|
535
|
-
save_data_to_file: Optional[bool] = False,
|
536
|
-
debug: Optional[bool] = False,
|
537
|
-
) -> pd.DataFrame:
|
538
|
-
"""
|
539
|
-
Perform a call to the day-ahead optimization routine.
|
540
|
-
|
541
|
-
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
542
|
-
:type input_data_dict: dict
|
543
|
-
:param logger: The passed logger object
|
544
|
-
:type logger: logging object
|
545
|
-
:param save_data_to_file: Save optimization results to CSV file
|
546
|
-
:type save_data_to_file: bool, optional
|
547
|
-
:param debug: A debug option useful for unittests
|
548
|
-
:type debug: bool, optional
|
549
|
-
:return: The output data of the optimization
|
550
|
-
:rtype: pd.DataFrame
|
551
|
-
|
552
|
-
"""
|
553
|
-
logger.info("Performing day-ahead forecast optimization")
|
554
|
-
# Load cost and prod price forecast
|
555
|
-
df_input_data_dayahead = input_data_dict["fcst"].get_load_cost_forecast(
|
556
|
-
input_data_dict["df_input_data_dayahead"],
|
557
|
-
method=input_data_dict["fcst"].optim_conf["load_cost_forecast_method"],
|
558
|
-
)
|
559
|
-
if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
|
560
|
-
return False
|
561
|
-
df_input_data_dayahead = input_data_dict["fcst"].get_prod_price_forecast(
|
562
|
-
df_input_data_dayahead,
|
563
|
-
method=input_data_dict["fcst"].optim_conf["production_price_forecast_method"],
|
564
|
-
)
|
565
|
-
if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
|
566
|
-
return False
|
567
|
-
if "outdoor_temperature_forecast" in input_data_dict["params"]["passed_data"]:
|
568
|
-
df_input_data_dayahead["outdoor_temperature_forecast"] = input_data_dict[
|
569
|
-
"params"
|
570
|
-
]["passed_data"]["outdoor_temperature_forecast"]
|
571
|
-
opt_res_dayahead = input_data_dict["opt"].perform_dayahead_forecast_optim(
|
572
|
-
df_input_data_dayahead,
|
573
|
-
input_data_dict["P_PV_forecast"],
|
574
|
-
input_data_dict["P_load_forecast"],
|
575
|
-
)
|
576
|
-
# Save CSV file for publish_data
|
577
|
-
if save_data_to_file:
|
578
|
-
today = datetime.now(timezone.utc).replace(
|
579
|
-
hour=0, minute=0, second=0, microsecond=0
|
580
|
-
)
|
581
|
-
filename = "opt_res_dayahead_" + today.strftime("%Y_%m_%d") + ".csv"
|
582
|
-
else: # Just save the latest optimization results
|
583
|
-
filename = "opt_res_latest.csv"
|
584
|
-
if not debug:
|
585
|
-
opt_res_dayahead.to_csv(
|
586
|
-
input_data_dict["emhass_conf"]["data_path"] / filename,
|
587
|
-
index_label="timestamp",
|
588
|
-
)
|
589
|
-
|
590
|
-
if not isinstance(input_data_dict["params"], dict):
|
591
|
-
params = json.loads(input_data_dict["params"])
|
592
|
-
else:
|
593
|
-
params = input_data_dict["params"]
|
594
|
-
|
595
|
-
# if continual_publish, save day_ahead results to data_path/entities json
|
596
|
-
if input_data_dict["retrieve_hass_conf"].get("continual_publish", False) or params[
|
597
|
-
"passed_data"
|
598
|
-
].get("entity_save", False):
|
599
|
-
# Trigger the publish function, save entity data and not post to HA
|
600
|
-
publish_data(input_data_dict, logger, entity_save=True, dont_post=True)
|
601
|
-
|
602
|
-
return opt_res_dayahead
|
603
|
-
|
604
|
-
|
605
|
-
def naive_mpc_optim(
|
606
|
-
input_data_dict: dict,
|
607
|
-
logger: logging.Logger,
|
608
|
-
save_data_to_file: Optional[bool] = False,
|
609
|
-
debug: Optional[bool] = False,
|
610
|
-
) -> pd.DataFrame:
|
611
|
-
"""
|
612
|
-
Perform a call to the naive Model Predictive Controller optimization routine.
|
613
|
-
|
614
|
-
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
615
|
-
:type input_data_dict: dict
|
616
|
-
:param logger: The passed logger object
|
617
|
-
:type logger: logging object
|
618
|
-
:param save_data_to_file: Save optimization results to CSV file
|
619
|
-
:type save_data_to_file: bool, optional
|
620
|
-
:param debug: A debug option useful for unittests
|
621
|
-
:type debug: bool, optional
|
622
|
-
:return: The output data of the optimization
|
623
|
-
:rtype: pd.DataFrame
|
624
|
-
|
625
|
-
"""
|
626
|
-
logger.info("Performing naive MPC optimization")
|
627
|
-
# Load cost and prod price forecast
|
628
|
-
df_input_data_dayahead = input_data_dict["fcst"].get_load_cost_forecast(
|
629
|
-
input_data_dict["df_input_data_dayahead"],
|
630
|
-
method=input_data_dict["fcst"].optim_conf["load_cost_forecast_method"],
|
631
|
-
)
|
632
|
-
if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
|
633
|
-
return False
|
634
|
-
df_input_data_dayahead = input_data_dict["fcst"].get_prod_price_forecast(
|
635
|
-
df_input_data_dayahead,
|
636
|
-
method=input_data_dict["fcst"].optim_conf["production_price_forecast_method"],
|
637
|
-
)
|
638
|
-
if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
|
639
|
-
return False
|
640
|
-
if "outdoor_temperature_forecast" in input_data_dict["params"]["passed_data"]:
|
641
|
-
df_input_data_dayahead["outdoor_temperature_forecast"] = input_data_dict[
|
642
|
-
"params"
|
643
|
-
]["passed_data"]["outdoor_temperature_forecast"]
|
644
|
-
# The specifics params for the MPC at runtime
|
645
|
-
prediction_horizon = input_data_dict["params"]["passed_data"]["prediction_horizon"]
|
646
|
-
soc_init = input_data_dict["params"]["passed_data"]["soc_init"]
|
647
|
-
soc_final = input_data_dict["params"]["passed_data"]["soc_final"]
|
648
|
-
def_total_hours = input_data_dict["params"]["optim_conf"].get(
|
649
|
-
"operating_hours_of_each_deferrable_load", None
|
650
|
-
)
|
651
|
-
def_total_timestep = input_data_dict["params"]["optim_conf"].get(
|
652
|
-
"operating_timesteps_of_each_deferrable_load", None
|
653
|
-
)
|
654
|
-
def_start_timestep = input_data_dict["params"]["optim_conf"][
|
655
|
-
"start_timesteps_of_each_deferrable_load"
|
656
|
-
]
|
657
|
-
def_end_timestep = input_data_dict["params"]["optim_conf"][
|
658
|
-
"end_timesteps_of_each_deferrable_load"
|
659
|
-
]
|
660
|
-
opt_res_naive_mpc = input_data_dict["opt"].perform_naive_mpc_optim(
|
661
|
-
df_input_data_dayahead,
|
662
|
-
input_data_dict["P_PV_forecast"],
|
663
|
-
input_data_dict["P_load_forecast"],
|
664
|
-
prediction_horizon,
|
665
|
-
soc_init,
|
666
|
-
soc_final,
|
667
|
-
def_total_hours,
|
668
|
-
def_total_timestep,
|
669
|
-
def_start_timestep,
|
670
|
-
def_end_timestep,
|
671
|
-
)
|
672
|
-
# Save CSV file for publish_data
|
673
|
-
if save_data_to_file:
|
674
|
-
today = datetime.now(timezone.utc).replace(
|
675
|
-
hour=0, minute=0, second=0, microsecond=0
|
676
|
-
)
|
677
|
-
filename = "opt_res_naive_mpc_" + today.strftime("%Y_%m_%d") + ".csv"
|
678
|
-
else: # Just save the latest optimization results
|
679
|
-
filename = "opt_res_latest.csv"
|
680
|
-
if not debug:
|
681
|
-
opt_res_naive_mpc.to_csv(
|
682
|
-
input_data_dict["emhass_conf"]["data_path"] / filename,
|
683
|
-
index_label="timestamp",
|
684
|
-
)
|
685
|
-
|
686
|
-
if not isinstance(input_data_dict["params"], dict):
|
687
|
-
params = json.loads(input_data_dict["params"])
|
688
|
-
else:
|
689
|
-
params = input_data_dict["params"]
|
690
|
-
|
691
|
-
# if continual_publish, save mpc results to data_path/entities json
|
692
|
-
if input_data_dict["retrieve_hass_conf"].get("continual_publish", False) or params[
|
693
|
-
"passed_data"
|
694
|
-
].get("entity_save", False):
|
695
|
-
# Trigger the publish function, save entity data and not post to HA
|
696
|
-
publish_data(input_data_dict, logger, entity_save=True, dont_post=True)
|
697
|
-
|
698
|
-
return opt_res_naive_mpc
|
699
|
-
|
700
|
-
|
701
|
-
def forecast_model_fit(
|
702
|
-
input_data_dict: dict, logger: logging.Logger, debug: Optional[bool] = False
|
703
|
-
) -> Tuple[pd.DataFrame, pd.DataFrame, MLForecaster]:
|
704
|
-
"""Perform a forecast model fit from training data retrieved from Home Assistant.
|
705
|
-
|
706
|
-
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
707
|
-
:type input_data_dict: dict
|
708
|
-
:param logger: The passed logger object
|
709
|
-
:type logger: logging.Logger
|
710
|
-
:param debug: True to debug, useful for unit testing, defaults to False
|
711
|
-
:type debug: Optional[bool], optional
|
712
|
-
:return: The DataFrame containing the forecast data results without and with backtest and the `mlforecaster` object
|
713
|
-
:rtype: Tuple[pd.DataFrame, pd.DataFrame, mlforecaster]
|
714
|
-
"""
|
715
|
-
data = copy.deepcopy(input_data_dict["df_input_data"])
|
716
|
-
model_type = input_data_dict["params"]["passed_data"]["model_type"]
|
717
|
-
var_model = input_data_dict["params"]["passed_data"]["var_model"]
|
718
|
-
sklearn_model = input_data_dict["params"]["passed_data"]["sklearn_model"]
|
719
|
-
num_lags = input_data_dict["params"]["passed_data"]["num_lags"]
|
720
|
-
split_date_delta = input_data_dict["params"]["passed_data"]["split_date_delta"]
|
721
|
-
perform_backtest = input_data_dict["params"]["passed_data"]["perform_backtest"]
|
722
|
-
# The ML forecaster object
|
723
|
-
mlf = MLForecaster(
|
724
|
-
data,
|
725
|
-
model_type,
|
726
|
-
var_model,
|
727
|
-
sklearn_model,
|
728
|
-
num_lags,
|
729
|
-
input_data_dict["emhass_conf"],
|
730
|
-
logger,
|
731
|
-
)
|
732
|
-
# Fit the ML model
|
733
|
-
df_pred, df_pred_backtest = mlf.fit(
|
734
|
-
split_date_delta=split_date_delta, perform_backtest=perform_backtest
|
735
|
-
)
|
736
|
-
# Save model
|
737
|
-
if not debug:
|
738
|
-
filename = model_type + "_mlf.pkl"
|
739
|
-
filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
|
740
|
-
with open(filename_path, "wb") as outp:
|
741
|
-
pickle.dump(mlf, outp, pickle.HIGHEST_PROTOCOL)
|
742
|
-
return df_pred, df_pred_backtest, mlf
|
743
|
-
|
744
|
-
|
745
|
-
def forecast_model_predict(
|
746
|
-
input_data_dict: dict,
|
747
|
-
logger: logging.Logger,
|
748
|
-
use_last_window: Optional[bool] = True,
|
749
|
-
debug: Optional[bool] = False,
|
750
|
-
mlf: Optional[MLForecaster] = None,
|
751
|
-
) -> pd.DataFrame:
|
752
|
-
r"""Perform a forecast model predict using a previously trained skforecast model.
|
753
|
-
|
754
|
-
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
755
|
-
:type input_data_dict: dict
|
756
|
-
:param logger: The passed logger object
|
757
|
-
:type logger: logging.Logger
|
758
|
-
:param use_last_window: True if the 'last_window' option should be used for the \
|
759
|
-
custom machine learning forecast model. The 'last_window=True' means that the data \
|
760
|
-
that will be used to generate the new forecast will be freshly retrieved from \
|
761
|
-
Home Assistant. This data is needed because the forecast model is an auto-regressive \
|
762
|
-
model with lags. If 'False' then the data using during the model train is used. Defaults to True
|
763
|
-
:type use_last_window: Optional[bool], optional
|
764
|
-
:param debug: True to debug, useful for unit testing, defaults to False
|
765
|
-
:type debug: Optional[bool], optional
|
766
|
-
:param mlf: The 'mlforecaster' object previously trained. This is mainly used for debug \
|
767
|
-
and unit testing. In production the actual model will be read from a saved pickle file. Defaults to None
|
768
|
-
:type mlf: Optional[mlforecaster], optional
|
769
|
-
:return: The DataFrame containing the forecast prediction data
|
770
|
-
:rtype: pd.DataFrame
|
771
|
-
"""
|
772
|
-
# Load model
|
773
|
-
model_type = input_data_dict["params"]["passed_data"]["model_type"]
|
774
|
-
filename = model_type + "_mlf.pkl"
|
775
|
-
filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
|
776
|
-
if not debug:
|
777
|
-
if filename_path.is_file():
|
778
|
-
with open(filename_path, "rb") as inp:
|
779
|
-
mlf = pickle.load(inp)
|
780
|
-
else:
|
781
|
-
logger.error(
|
782
|
-
"The ML forecaster file was not found, please run a model fit method before this predict method",
|
783
|
-
)
|
784
|
-
return
|
785
|
-
# Make predictions
|
786
|
-
if use_last_window:
|
787
|
-
data_last_window = copy.deepcopy(input_data_dict["df_input_data"])
|
788
|
-
else:
|
789
|
-
data_last_window = None
|
790
|
-
predictions = mlf.predict(data_last_window)
|
791
|
-
# Publish data to a Home Assistant sensor
|
792
|
-
model_predict_publish = input_data_dict["params"]["passed_data"][
|
793
|
-
"model_predict_publish"
|
794
|
-
]
|
795
|
-
model_predict_entity_id = input_data_dict["params"]["passed_data"][
|
796
|
-
"model_predict_entity_id"
|
797
|
-
]
|
798
|
-
model_predict_unit_of_measurement = input_data_dict["params"]["passed_data"][
|
799
|
-
"model_predict_unit_of_measurement"
|
800
|
-
]
|
801
|
-
model_predict_friendly_name = input_data_dict["params"]["passed_data"][
|
802
|
-
"model_predict_friendly_name"
|
803
|
-
]
|
804
|
-
publish_prefix = input_data_dict["params"]["passed_data"]["publish_prefix"]
|
805
|
-
if model_predict_publish is True:
|
806
|
-
# Estimate the current index
|
807
|
-
now_precise = datetime.now(
|
808
|
-
input_data_dict["retrieve_hass_conf"]["time_zone"]
|
809
|
-
).replace(second=0, microsecond=0)
|
810
|
-
if input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "nearest":
|
811
|
-
idx_closest = predictions.index.get_indexer(
|
812
|
-
[now_precise], method="nearest"
|
813
|
-
)[0]
|
814
|
-
elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "first":
|
815
|
-
idx_closest = predictions.index.get_indexer([now_precise], method="ffill")[
|
816
|
-
0
|
817
|
-
]
|
818
|
-
elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "last":
|
819
|
-
idx_closest = predictions.index.get_indexer([now_precise], method="bfill")[
|
820
|
-
0
|
821
|
-
]
|
822
|
-
if idx_closest == -1:
|
823
|
-
idx_closest = predictions.index.get_indexer(
|
824
|
-
[now_precise], method="nearest"
|
825
|
-
)[0]
|
826
|
-
# Publish Load forecast
|
827
|
-
input_data_dict["rh"].post_data(
|
828
|
-
predictions,
|
829
|
-
idx_closest,
|
830
|
-
model_predict_entity_id,
|
831
|
-
model_predict_unit_of_measurement,
|
832
|
-
model_predict_friendly_name,
|
833
|
-
type_var="mlforecaster",
|
834
|
-
publish_prefix=publish_prefix,
|
835
|
-
)
|
836
|
-
return predictions
|
837
|
-
|
838
|
-
|
839
|
-
def forecast_model_tune(
|
840
|
-
input_data_dict: dict,
|
841
|
-
logger: logging.Logger,
|
842
|
-
debug: Optional[bool] = False,
|
843
|
-
mlf: Optional[MLForecaster] = None,
|
844
|
-
) -> Tuple[pd.DataFrame, MLForecaster]:
|
845
|
-
"""Tune a forecast model hyperparameters using bayesian optimization.
|
846
|
-
|
847
|
-
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
848
|
-
:type input_data_dict: dict
|
849
|
-
:param logger: The passed logger object
|
850
|
-
:type logger: logging.Logger
|
851
|
-
:param debug: True to debug, useful for unit testing, defaults to False
|
852
|
-
:type debug: Optional[bool], optional
|
853
|
-
:param mlf: The 'mlforecaster' object previously trained. This is mainly used for debug \
|
854
|
-
and unit testing. In production the actual model will be read from a saved pickle file. Defaults to None
|
855
|
-
:type mlf: Optional[mlforecaster], optional
|
856
|
-
:return: The DataFrame containing the forecast data results using the optimized model
|
857
|
-
:rtype: pd.DataFrame
|
858
|
-
"""
|
859
|
-
# Load model
|
860
|
-
model_type = input_data_dict["params"]["passed_data"]["model_type"]
|
861
|
-
filename = model_type + "_mlf.pkl"
|
862
|
-
filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
|
863
|
-
if not debug:
|
864
|
-
if filename_path.is_file():
|
865
|
-
with open(filename_path, "rb") as inp:
|
866
|
-
mlf = pickle.load(inp)
|
867
|
-
else:
|
868
|
-
logger.error(
|
869
|
-
"The ML forecaster file was not found, please run a model fit method before this tune method",
|
870
|
-
)
|
871
|
-
return None, None
|
872
|
-
# Tune the model
|
873
|
-
df_pred_optim = mlf.tune(debug=debug)
|
874
|
-
# Save model
|
875
|
-
if not debug:
|
876
|
-
filename = model_type + "_mlf.pkl"
|
877
|
-
filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
|
878
|
-
with open(filename_path, "wb") as outp:
|
879
|
-
pickle.dump(mlf, outp, pickle.HIGHEST_PROTOCOL)
|
880
|
-
return df_pred_optim, mlf
|
881
|
-
|
882
|
-
|
883
|
-
def regressor_model_fit(
|
884
|
-
input_data_dict: dict, logger: logging.Logger, debug: Optional[bool] = False
|
885
|
-
) -> MLRegressor:
|
886
|
-
"""Perform a forecast model fit from training data retrieved from Home Assistant.
|
887
|
-
|
888
|
-
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
889
|
-
:type input_data_dict: dict
|
890
|
-
:param logger: The passed logger object
|
891
|
-
:type logger: logging.Logger
|
892
|
-
:param debug: True to debug, useful for unit testing, defaults to False
|
893
|
-
:type debug: Optional[bool], optional
|
894
|
-
"""
|
895
|
-
data = copy.deepcopy(input_data_dict["df_input_data"])
|
896
|
-
if "model_type" in input_data_dict["params"]["passed_data"]:
|
897
|
-
model_type = input_data_dict["params"]["passed_data"]["model_type"]
|
898
|
-
else:
|
899
|
-
logger.error("parameter: 'model_type' not passed")
|
900
|
-
return False
|
901
|
-
if "regression_model" in input_data_dict["params"]["passed_data"]:
|
902
|
-
regression_model = input_data_dict["params"]["passed_data"]["regression_model"]
|
903
|
-
else:
|
904
|
-
logger.error("parameter: 'regression_model' not passed")
|
905
|
-
return False
|
906
|
-
if "features" in input_data_dict["params"]["passed_data"]:
|
907
|
-
features = input_data_dict["params"]["passed_data"]["features"]
|
908
|
-
else:
|
909
|
-
logger.error("parameter: 'features' not passed")
|
910
|
-
return False
|
911
|
-
if "target" in input_data_dict["params"]["passed_data"]:
|
912
|
-
target = input_data_dict["params"]["passed_data"]["target"]
|
913
|
-
else:
|
914
|
-
logger.error("parameter: 'target' not passed")
|
915
|
-
return False
|
916
|
-
if "timestamp" in input_data_dict["params"]["passed_data"]:
|
917
|
-
timestamp = input_data_dict["params"]["passed_data"]["timestamp"]
|
918
|
-
else:
|
919
|
-
logger.error("parameter: 'timestamp' not passed")
|
920
|
-
return False
|
921
|
-
if "date_features" in input_data_dict["params"]["passed_data"]:
|
922
|
-
date_features = input_data_dict["params"]["passed_data"]["date_features"]
|
923
|
-
else:
|
924
|
-
logger.error("parameter: 'date_features' not passed")
|
925
|
-
return False
|
926
|
-
# The MLRegressor object
|
927
|
-
mlr = MLRegressor(
|
928
|
-
data, model_type, regression_model, features, target, timestamp, logger
|
929
|
-
)
|
930
|
-
# Fit the ML model
|
931
|
-
fit = mlr.fit(date_features=date_features)
|
932
|
-
if not fit:
|
933
|
-
return False
|
934
|
-
# Save model
|
935
|
-
if not debug:
|
936
|
-
filename = model_type + "_mlr.pkl"
|
937
|
-
filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
|
938
|
-
with open(filename_path, "wb") as outp:
|
939
|
-
pickle.dump(mlr, outp, pickle.HIGHEST_PROTOCOL)
|
940
|
-
return mlr
|
941
|
-
|
942
|
-
|
943
|
-
def regressor_model_predict(
|
944
|
-
input_data_dict: dict,
|
945
|
-
logger: logging.Logger,
|
946
|
-
debug: Optional[bool] = False,
|
947
|
-
mlr: Optional[MLRegressor] = None,
|
948
|
-
) -> np.ndarray:
|
949
|
-
"""Perform a prediction from csv file.
|
950
|
-
|
951
|
-
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
952
|
-
:type input_data_dict: dict
|
953
|
-
:param logger: The passed logger object
|
954
|
-
:type logger: logging.Logger
|
955
|
-
:param debug: True to debug, useful for unit testing, defaults to False
|
956
|
-
:type debug: Optional[bool], optional
|
957
|
-
"""
|
958
|
-
if "model_type" in input_data_dict["params"]["passed_data"]:
|
959
|
-
model_type = input_data_dict["params"]["passed_data"]["model_type"]
|
960
|
-
else:
|
961
|
-
logger.error("parameter: 'model_type' not passed")
|
962
|
-
return False
|
963
|
-
filename = model_type + "_mlr.pkl"
|
964
|
-
filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
|
965
|
-
if not debug:
|
966
|
-
if filename_path.is_file():
|
967
|
-
with open(filename_path, "rb") as inp:
|
968
|
-
mlr = pickle.load(inp)
|
969
|
-
else:
|
970
|
-
logger.error(
|
971
|
-
"The ML forecaster file was not found, please run a model fit method before this predict method",
|
972
|
-
)
|
973
|
-
return False
|
974
|
-
if "new_values" in input_data_dict["params"]["passed_data"]:
|
975
|
-
new_values = input_data_dict["params"]["passed_data"]["new_values"]
|
976
|
-
else:
|
977
|
-
logger.error("parameter: 'new_values' not passed")
|
978
|
-
return False
|
979
|
-
# Predict from csv file
|
980
|
-
prediction = mlr.predict(new_values)
|
981
|
-
mlr_predict_entity_id = input_data_dict["params"]["passed_data"].get(
|
982
|
-
"mlr_predict_entity_id", "sensor.mlr_predict"
|
983
|
-
)
|
984
|
-
mlr_predict_unit_of_measurement = input_data_dict["params"]["passed_data"].get(
|
985
|
-
"mlr_predict_unit_of_measurement", "h"
|
986
|
-
)
|
987
|
-
mlr_predict_friendly_name = input_data_dict["params"]["passed_data"].get(
|
988
|
-
"mlr_predict_friendly_name", "mlr predictor"
|
989
|
-
)
|
990
|
-
# Publish prediction
|
991
|
-
idx = 0
|
992
|
-
if not debug:
|
993
|
-
input_data_dict["rh"].post_data(
|
994
|
-
prediction,
|
995
|
-
idx,
|
996
|
-
mlr_predict_entity_id,
|
997
|
-
mlr_predict_unit_of_measurement,
|
998
|
-
mlr_predict_friendly_name,
|
999
|
-
type_var="mlregressor",
|
1000
|
-
)
|
1001
|
-
return prediction
|
1002
|
-
|
1003
|
-
|
1004
|
-
def publish_data(
|
1005
|
-
input_data_dict: dict,
|
1006
|
-
logger: logging.Logger,
|
1007
|
-
save_data_to_file: Optional[bool] = False,
|
1008
|
-
opt_res_latest: Optional[pd.DataFrame] = None,
|
1009
|
-
entity_save: Optional[bool] = False,
|
1010
|
-
dont_post: Optional[bool] = False,
|
1011
|
-
) -> pd.DataFrame:
|
1012
|
-
"""
|
1013
|
-
Publish the data obtained from the optimization results.
|
1014
|
-
|
1015
|
-
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
1016
|
-
:type input_data_dict: dict
|
1017
|
-
:param logger: The passed logger object
|
1018
|
-
:type logger: logging object
|
1019
|
-
:param save_data_to_file: If True we will read data from optimization results in dayahead CSV file
|
1020
|
-
:type save_data_to_file: bool, optional
|
1021
|
-
:return: The output data of the optimization readed from a CSV file in the data folder
|
1022
|
-
:rtype: pd.DataFrame
|
1023
|
-
:param entity_save: Save built entities to data_path/entities
|
1024
|
-
:type entity_save: bool, optional
|
1025
|
-
:param dont_post: Do not post to Home Assistant. Works with entity_save
|
1026
|
-
:type dont_post: bool, optional
|
1027
|
-
|
1028
|
-
"""
|
1029
|
-
logger.info("Publishing data to HASS instance")
|
1030
|
-
if input_data_dict:
|
1031
|
-
if not isinstance(input_data_dict.get("params", {}), dict):
|
1032
|
-
params = json.loads(input_data_dict["params"])
|
1033
|
-
else:
|
1034
|
-
params = input_data_dict.get("params", {})
|
1035
|
-
|
1036
|
-
# Check if a day ahead optimization has been performed (read CSV file)
|
1037
|
-
if save_data_to_file:
|
1038
|
-
today = datetime.now(timezone.utc).replace(
|
1039
|
-
hour=0, minute=0, second=0, microsecond=0
|
1040
|
-
)
|
1041
|
-
filename = "opt_res_dayahead_" + today.strftime("%Y_%m_%d") + ".csv"
|
1042
|
-
# If publish_prefix is passed, check if there is saved entities in data_path/entities with prefix, publish to results
|
1043
|
-
elif params["passed_data"].get("publish_prefix", "") != "" and not dont_post:
|
1044
|
-
opt_res_list = []
|
1045
|
-
opt_res_list_names = []
|
1046
|
-
publish_prefix = params["passed_data"]["publish_prefix"]
|
1047
|
-
entity_path = input_data_dict["emhass_conf"]["data_path"] / "entities"
|
1048
|
-
# Check if items in entity_path
|
1049
|
-
if os.path.exists(entity_path) and len(os.listdir(entity_path)) > 0:
|
1050
|
-
# Obtain all files in entity_path
|
1051
|
-
entity_path_contents = os.listdir(entity_path)
|
1052
|
-
# Confirm the entity path contains at least one file containing publish prefix or publish_prefix='all'
|
1053
|
-
if (
|
1054
|
-
any(publish_prefix in entity for entity in entity_path_contents)
|
1055
|
-
or publish_prefix == "all"
|
1056
|
-
):
|
1057
|
-
# Loop through all items in entity path
|
1058
|
-
for entity in entity_path_contents:
|
1059
|
-
# If publish_prefix is "all" publish all saved entities to Home Assistant
|
1060
|
-
# If publish_prefix matches the prefix from saved entities, publish to Home Assistant
|
1061
|
-
if entity != "metadata.json" and (
|
1062
|
-
publish_prefix in entity or publish_prefix == "all"
|
1063
|
-
):
|
1064
|
-
entity_data = publish_json(
|
1065
|
-
entity, input_data_dict, entity_path, logger
|
1066
|
-
)
|
1067
|
-
if not isinstance(entity_data, bool):
|
1068
|
-
opt_res_list.append(entity_data)
|
1069
|
-
opt_res_list_names.append(entity.replace(".json", ""))
|
1070
|
-
else:
|
1071
|
-
return False
|
1072
|
-
# Build a DataFrame with published entities
|
1073
|
-
opt_res = pd.concat(opt_res_list, axis=1)
|
1074
|
-
opt_res.columns = opt_res_list_names
|
1075
|
-
return opt_res
|
1076
|
-
else:
|
1077
|
-
logger.warning(
|
1078
|
-
"No saved entity json files that match prefix: "
|
1079
|
-
+ str(publish_prefix)
|
1080
|
-
)
|
1081
|
-
logger.warning("Falling back to opt_res_latest")
|
1082
|
-
else:
|
1083
|
-
logger.warning("No saved entity json files in path:" + str(entity_path))
|
1084
|
-
logger.warning("Falling back to opt_res_latest")
|
1085
|
-
filename = "opt_res_latest.csv"
|
1086
|
-
else:
|
1087
|
-
filename = "opt_res_latest.csv"
|
1088
|
-
if opt_res_latest is None:
|
1089
|
-
if not os.path.isfile(input_data_dict["emhass_conf"]["data_path"] / filename):
|
1090
|
-
logger.error("File not found error, run an optimization task first.")
|
1091
|
-
return
|
1092
|
-
else:
|
1093
|
-
opt_res_latest = pd.read_csv(
|
1094
|
-
input_data_dict["emhass_conf"]["data_path"] / filename,
|
1095
|
-
index_col="timestamp",
|
1096
|
-
)
|
1097
|
-
opt_res_latest.index = pd.to_datetime(opt_res_latest.index)
|
1098
|
-
opt_res_latest.index.freq = input_data_dict["retrieve_hass_conf"][
|
1099
|
-
"optimization_time_step"
|
1100
|
-
]
|
1101
|
-
# Estimate the current index
|
1102
|
-
now_precise = datetime.now(
|
1103
|
-
input_data_dict["retrieve_hass_conf"]["time_zone"]
|
1104
|
-
).replace(second=0, microsecond=0)
|
1105
|
-
if input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "nearest":
|
1106
|
-
idx_closest = opt_res_latest.index.get_indexer([now_precise], method="nearest")[
|
1107
|
-
0
|
1108
|
-
]
|
1109
|
-
elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "first":
|
1110
|
-
idx_closest = opt_res_latest.index.get_indexer([now_precise], method="ffill")[0]
|
1111
|
-
elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "last":
|
1112
|
-
idx_closest = opt_res_latest.index.get_indexer([now_precise], method="bfill")[0]
|
1113
|
-
if idx_closest == -1:
|
1114
|
-
idx_closest = opt_res_latest.index.get_indexer([now_precise], method="nearest")[
|
1115
|
-
0
|
1116
|
-
]
|
1117
|
-
# Publish the data
|
1118
|
-
publish_prefix = params["passed_data"]["publish_prefix"]
|
1119
|
-
# Publish PV forecast
|
1120
|
-
custom_pv_forecast_id = params["passed_data"]["custom_pv_forecast_id"]
|
1121
|
-
input_data_dict["rh"].post_data(
|
1122
|
-
opt_res_latest["P_PV"],
|
1123
|
-
idx_closest,
|
1124
|
-
custom_pv_forecast_id["entity_id"],
|
1125
|
-
custom_pv_forecast_id["unit_of_measurement"],
|
1126
|
-
custom_pv_forecast_id["friendly_name"],
|
1127
|
-
type_var="power",
|
1128
|
-
publish_prefix=publish_prefix,
|
1129
|
-
save_entities=entity_save,
|
1130
|
-
dont_post=dont_post,
|
1131
|
-
)
|
1132
|
-
# Publish Load forecast
|
1133
|
-
custom_load_forecast_id = params["passed_data"]["custom_load_forecast_id"]
|
1134
|
-
input_data_dict["rh"].post_data(
|
1135
|
-
opt_res_latest["P_Load"],
|
1136
|
-
idx_closest,
|
1137
|
-
custom_load_forecast_id["entity_id"],
|
1138
|
-
custom_load_forecast_id["unit_of_measurement"],
|
1139
|
-
custom_load_forecast_id["friendly_name"],
|
1140
|
-
type_var="power",
|
1141
|
-
publish_prefix=publish_prefix,
|
1142
|
-
save_entities=entity_save,
|
1143
|
-
dont_post=dont_post,
|
1144
|
-
)
|
1145
|
-
cols_published = ["P_PV", "P_Load"]
|
1146
|
-
# Publish PV curtailment
|
1147
|
-
if input_data_dict["fcst"].plant_conf["compute_curtailment"]:
|
1148
|
-
custom_pv_curtailment_id = params["passed_data"]["custom_pv_curtailment_id"]
|
1149
|
-
input_data_dict["rh"].post_data(
|
1150
|
-
opt_res_latest["P_PV_curtailment"],
|
1151
|
-
idx_closest,
|
1152
|
-
custom_pv_curtailment_id["entity_id"],
|
1153
|
-
custom_pv_curtailment_id["unit_of_measurement"],
|
1154
|
-
custom_pv_curtailment_id["friendly_name"],
|
1155
|
-
type_var="power",
|
1156
|
-
publish_prefix=publish_prefix,
|
1157
|
-
save_entities=entity_save,
|
1158
|
-
dont_post=dont_post,
|
1159
|
-
)
|
1160
|
-
cols_published = cols_published + ["P_PV_curtailment"]
|
1161
|
-
# Publish P_hybrid_inverter
|
1162
|
-
if input_data_dict["fcst"].plant_conf["inverter_is_hybrid"]:
|
1163
|
-
custom_hybrid_inverter_id = params["passed_data"]["custom_hybrid_inverter_id"]
|
1164
|
-
input_data_dict["rh"].post_data(
|
1165
|
-
opt_res_latest["P_hybrid_inverter"],
|
1166
|
-
idx_closest,
|
1167
|
-
custom_hybrid_inverter_id["entity_id"],
|
1168
|
-
custom_hybrid_inverter_id["unit_of_measurement"],
|
1169
|
-
custom_hybrid_inverter_id["friendly_name"],
|
1170
|
-
type_var="power",
|
1171
|
-
publish_prefix=publish_prefix,
|
1172
|
-
save_entities=entity_save,
|
1173
|
-
dont_post=dont_post,
|
1174
|
-
)
|
1175
|
-
cols_published = cols_published + ["P_hybrid_inverter"]
|
1176
|
-
# Publish deferrable loads
|
1177
|
-
custom_deferrable_forecast_id = params["passed_data"][
|
1178
|
-
"custom_deferrable_forecast_id"
|
1179
|
-
]
|
1180
|
-
for k in range(input_data_dict["opt"].optim_conf["number_of_deferrable_loads"]):
|
1181
|
-
if "P_deferrable{}".format(k) not in opt_res_latest.columns:
|
1182
|
-
logger.error(
|
1183
|
-
"P_deferrable{}".format(k)
|
1184
|
-
+ " was not found in results DataFrame. Optimization task may need to be relaunched or it did not converge to a solution.",
|
1185
|
-
)
|
1186
|
-
else:
|
1187
|
-
input_data_dict["rh"].post_data(
|
1188
|
-
opt_res_latest["P_deferrable{}".format(k)],
|
1189
|
-
idx_closest,
|
1190
|
-
custom_deferrable_forecast_id[k]["entity_id"],
|
1191
|
-
custom_deferrable_forecast_id[k]["unit_of_measurement"],
|
1192
|
-
custom_deferrable_forecast_id[k]["friendly_name"],
|
1193
|
-
type_var="deferrable",
|
1194
|
-
publish_prefix=publish_prefix,
|
1195
|
-
save_entities=entity_save,
|
1196
|
-
dont_post=dont_post,
|
1197
|
-
)
|
1198
|
-
cols_published = cols_published + ["P_deferrable{}".format(k)]
|
1199
|
-
# Publish thermal model data (predicted temperature)
|
1200
|
-
custom_predicted_temperature_id = params["passed_data"][
|
1201
|
-
"custom_predicted_temperature_id"
|
1202
|
-
]
|
1203
|
-
for k in range(input_data_dict["opt"].optim_conf["number_of_deferrable_loads"]):
|
1204
|
-
if "def_load_config" in input_data_dict["opt"].optim_conf.keys():
|
1205
|
-
if (
|
1206
|
-
"thermal_config"
|
1207
|
-
in input_data_dict["opt"].optim_conf["def_load_config"][k]
|
1208
|
-
):
|
1209
|
-
input_data_dict["rh"].post_data(
|
1210
|
-
opt_res_latest["predicted_temp_heater{}".format(k)],
|
1211
|
-
idx_closest,
|
1212
|
-
custom_predicted_temperature_id[k]["entity_id"],
|
1213
|
-
custom_predicted_temperature_id[k]["unit_of_measurement"],
|
1214
|
-
custom_predicted_temperature_id[k]["friendly_name"],
|
1215
|
-
type_var="temperature",
|
1216
|
-
publish_prefix=publish_prefix,
|
1217
|
-
save_entities=entity_save,
|
1218
|
-
dont_post=dont_post,
|
1219
|
-
)
|
1220
|
-
cols_published = cols_published + ["predicted_temp_heater{}".format(k)]
|
1221
|
-
# Publish battery power
|
1222
|
-
if input_data_dict["opt"].optim_conf["set_use_battery"]:
|
1223
|
-
if "P_batt" not in opt_res_latest.columns:
|
1224
|
-
logger.error(
|
1225
|
-
"P_batt was not found in results DataFrame. Optimization task may need to be relaunched or it did not converge to a solution.",
|
1226
|
-
)
|
1227
|
-
else:
|
1228
|
-
custom_batt_forecast_id = params["passed_data"]["custom_batt_forecast_id"]
|
1229
|
-
input_data_dict["rh"].post_data(
|
1230
|
-
opt_res_latest["P_batt"],
|
1231
|
-
idx_closest,
|
1232
|
-
custom_batt_forecast_id["entity_id"],
|
1233
|
-
custom_batt_forecast_id["unit_of_measurement"],
|
1234
|
-
custom_batt_forecast_id["friendly_name"],
|
1235
|
-
type_var="batt",
|
1236
|
-
publish_prefix=publish_prefix,
|
1237
|
-
save_entities=entity_save,
|
1238
|
-
dont_post=dont_post,
|
1239
|
-
)
|
1240
|
-
cols_published = cols_published + ["P_batt"]
|
1241
|
-
custom_batt_soc_forecast_id = params["passed_data"][
|
1242
|
-
"custom_batt_soc_forecast_id"
|
1243
|
-
]
|
1244
|
-
input_data_dict["rh"].post_data(
|
1245
|
-
opt_res_latest["SOC_opt"] * 100,
|
1246
|
-
idx_closest,
|
1247
|
-
custom_batt_soc_forecast_id["entity_id"],
|
1248
|
-
custom_batt_soc_forecast_id["unit_of_measurement"],
|
1249
|
-
custom_batt_soc_forecast_id["friendly_name"],
|
1250
|
-
type_var="SOC",
|
1251
|
-
publish_prefix=publish_prefix,
|
1252
|
-
save_entities=entity_save,
|
1253
|
-
dont_post=dont_post,
|
1254
|
-
)
|
1255
|
-
cols_published = cols_published + ["SOC_opt"]
|
1256
|
-
# Publish grid power
|
1257
|
-
custom_grid_forecast_id = params["passed_data"]["custom_grid_forecast_id"]
|
1258
|
-
input_data_dict["rh"].post_data(
|
1259
|
-
opt_res_latest["P_grid"],
|
1260
|
-
idx_closest,
|
1261
|
-
custom_grid_forecast_id["entity_id"],
|
1262
|
-
custom_grid_forecast_id["unit_of_measurement"],
|
1263
|
-
custom_grid_forecast_id["friendly_name"],
|
1264
|
-
type_var="power",
|
1265
|
-
publish_prefix=publish_prefix,
|
1266
|
-
save_entities=entity_save,
|
1267
|
-
dont_post=dont_post,
|
1268
|
-
)
|
1269
|
-
cols_published = cols_published + ["P_grid"]
|
1270
|
-
# Publish total value of cost function
|
1271
|
-
custom_cost_fun_id = params["passed_data"]["custom_cost_fun_id"]
|
1272
|
-
col_cost_fun = [i for i in opt_res_latest.columns if "cost_fun_" in i]
|
1273
|
-
input_data_dict["rh"].post_data(
|
1274
|
-
opt_res_latest[col_cost_fun],
|
1275
|
-
idx_closest,
|
1276
|
-
custom_cost_fun_id["entity_id"],
|
1277
|
-
custom_cost_fun_id["unit_of_measurement"],
|
1278
|
-
custom_cost_fun_id["friendly_name"],
|
1279
|
-
type_var="cost_fun",
|
1280
|
-
publish_prefix=publish_prefix,
|
1281
|
-
save_entities=entity_save,
|
1282
|
-
dont_post=dont_post,
|
1283
|
-
)
|
1284
|
-
# cols_published = cols_published + col_cost_fun
|
1285
|
-
# Publish the optimization status
|
1286
|
-
custom_cost_fun_id = params["passed_data"]["custom_optim_status_id"]
|
1287
|
-
if "optim_status" not in opt_res_latest:
|
1288
|
-
opt_res_latest["optim_status"] = "Optimal"
|
1289
|
-
logger.warning(
|
1290
|
-
"no optim_status in opt_res_latest, run an optimization task first",
|
1291
|
-
)
|
1292
|
-
else:
|
1293
|
-
input_data_dict["rh"].post_data(
|
1294
|
-
opt_res_latest["optim_status"],
|
1295
|
-
idx_closest,
|
1296
|
-
custom_cost_fun_id["entity_id"],
|
1297
|
-
custom_cost_fun_id["unit_of_measurement"],
|
1298
|
-
custom_cost_fun_id["friendly_name"],
|
1299
|
-
type_var="optim_status",
|
1300
|
-
publish_prefix=publish_prefix,
|
1301
|
-
save_entities=entity_save,
|
1302
|
-
dont_post=dont_post,
|
1303
|
-
)
|
1304
|
-
cols_published = cols_published + ["optim_status"]
|
1305
|
-
# Publish unit_load_cost
|
1306
|
-
custom_unit_load_cost_id = params["passed_data"]["custom_unit_load_cost_id"]
|
1307
|
-
input_data_dict["rh"].post_data(
|
1308
|
-
opt_res_latest["unit_load_cost"],
|
1309
|
-
idx_closest,
|
1310
|
-
custom_unit_load_cost_id["entity_id"],
|
1311
|
-
custom_unit_load_cost_id["unit_of_measurement"],
|
1312
|
-
custom_unit_load_cost_id["friendly_name"],
|
1313
|
-
type_var="unit_load_cost",
|
1314
|
-
publish_prefix=publish_prefix,
|
1315
|
-
save_entities=entity_save,
|
1316
|
-
dont_post=dont_post,
|
1317
|
-
)
|
1318
|
-
cols_published = cols_published + ["unit_load_cost"]
|
1319
|
-
# Publish unit_prod_price
|
1320
|
-
custom_unit_prod_price_id = params["passed_data"]["custom_unit_prod_price_id"]
|
1321
|
-
input_data_dict["rh"].post_data(
|
1322
|
-
opt_res_latest["unit_prod_price"],
|
1323
|
-
idx_closest,
|
1324
|
-
custom_unit_prod_price_id["entity_id"],
|
1325
|
-
custom_unit_prod_price_id["unit_of_measurement"],
|
1326
|
-
custom_unit_prod_price_id["friendly_name"],
|
1327
|
-
type_var="unit_prod_price",
|
1328
|
-
publish_prefix=publish_prefix,
|
1329
|
-
save_entities=entity_save,
|
1330
|
-
dont_post=dont_post,
|
1331
|
-
)
|
1332
|
-
cols_published = cols_published + ["unit_prod_price"]
|
1333
|
-
# Create a DF resuming what has been published
|
1334
|
-
opt_res = opt_res_latest[cols_published].loc[[opt_res_latest.index[idx_closest]]]
|
1335
|
-
return opt_res
|
1336
|
-
|
1337
|
-
|
1338
|
-
def continual_publish(
|
1339
|
-
input_data_dict: dict, entity_path: pathlib.Path, logger: logging.Logger
|
1340
|
-
):
|
1341
|
-
"""
|
1342
|
-
If continual_publish is true and a entity file saved in /data_path/entities, continually publish sensor on freq rate, updating entity current state value based on timestamp
|
1343
|
-
|
1344
|
-
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
1345
|
-
:type input_data_dict: dict
|
1346
|
-
:param entity_path: Path for entities folder in data_path
|
1347
|
-
:type entity_path: Path
|
1348
|
-
:param logger: The passed logger object
|
1349
|
-
:type logger: logging.Logger
|
1350
|
-
|
1351
|
-
"""
|
1352
|
-
logger.info("Continual publish thread service started")
|
1353
|
-
freq = input_data_dict["retrieve_hass_conf"].get(
|
1354
|
-
"optimization_time_step", pd.to_timedelta(1, "minutes")
|
1355
|
-
)
|
1356
|
-
entity_path_contents = []
|
1357
|
-
while True:
|
1358
|
-
# Sleep for x seconds (using current time as a reference for time left)
|
1359
|
-
time.sleep(
|
1360
|
-
max(
|
1361
|
-
0,
|
1362
|
-
freq.total_seconds()
|
1363
|
-
- (
|
1364
|
-
datetime.now(
|
1365
|
-
input_data_dict["retrieve_hass_conf"]["time_zone"]
|
1366
|
-
).timestamp()
|
1367
|
-
% 60
|
1368
|
-
),
|
1369
|
-
)
|
1370
|
-
)
|
1371
|
-
# Loop through all saved entity files
|
1372
|
-
if os.path.exists(entity_path) and len(os.listdir(entity_path)) > 0:
|
1373
|
-
entity_path_contents = os.listdir(entity_path)
|
1374
|
-
for entity in entity_path_contents:
|
1375
|
-
if entity != "metadata.json":
|
1376
|
-
# Call publish_json with entity file, build entity, and publish
|
1377
|
-
publish_json(
|
1378
|
-
entity,
|
1379
|
-
input_data_dict,
|
1380
|
-
entity_path,
|
1381
|
-
logger,
|
1382
|
-
"continual_publish",
|
1383
|
-
)
|
1384
|
-
# Retrieve entity metadata from file
|
1385
|
-
if os.path.isfile(entity_path / "metadata.json"):
|
1386
|
-
with open(entity_path / "metadata.json", "r") as file:
|
1387
|
-
metadata = json.load(file)
|
1388
|
-
# Check if freq should be shorter
|
1389
|
-
if not metadata.get("lowest_time_step", None) == None:
|
1390
|
-
freq = pd.to_timedelta(metadata["lowest_time_step"], "minutes")
|
1391
|
-
pass
|
1392
|
-
# This function should never return
|
1393
|
-
return False
|
1394
|
-
|
1395
|
-
|
1396
|
-
def publish_json(
|
1397
|
-
entity: dict,
|
1398
|
-
input_data_dict: dict,
|
1399
|
-
entity_path: pathlib.Path,
|
1400
|
-
logger: logging.Logger,
|
1401
|
-
reference: Optional[str] = "",
|
1402
|
-
):
|
1403
|
-
"""
|
1404
|
-
Extract saved entity data from .json (in data_path/entities), build entity, post results to post_data
|
1405
|
-
|
1406
|
-
:param entity: json file containing entity data
|
1407
|
-
:type entity: dict
|
1408
|
-
:param input_data_dict: A dictionnary with multiple data used by the action functions
|
1409
|
-
:type input_data_dict: dict
|
1410
|
-
:param entity_path: Path for entities folder in data_path
|
1411
|
-
:type entity_path: Path
|
1412
|
-
:param logger: The passed logger object
|
1413
|
-
:type logger: logging.Logger
|
1414
|
-
:param reference: String for identifying who ran the function
|
1415
|
-
:type reference: str, optional
|
1416
|
-
|
1417
|
-
"""
|
1418
|
-
# Retrieve entity metadata from file
|
1419
|
-
if os.path.isfile(entity_path / "metadata.json"):
|
1420
|
-
with open(entity_path / "metadata.json", "r") as file:
|
1421
|
-
metadata = json.load(file)
|
1422
|
-
else:
|
1423
|
-
logger.error("unable to located metadata.json in:" + entity_path)
|
1424
|
-
return False
|
1425
|
-
# Round current timecode (now)
|
1426
|
-
now_precise = datetime.now(
|
1427
|
-
input_data_dict["retrieve_hass_conf"]["time_zone"]
|
1428
|
-
).replace(second=0, microsecond=0)
|
1429
|
-
# Retrieve entity data from file
|
1430
|
-
entity_data = pd.read_json(entity_path / entity, orient="index")
|
1431
|
-
# Remove ".json" from string for entity_id
|
1432
|
-
entity_id = entity.replace(".json", "")
|
1433
|
-
# Adjust Dataframe from received entity json file
|
1434
|
-
entity_data.columns = [metadata[entity_id]["name"]]
|
1435
|
-
entity_data.index.name = "timestamp"
|
1436
|
-
entity_data.index = pd.to_datetime(entity_data.index).tz_convert(
|
1437
|
-
input_data_dict["retrieve_hass_conf"]["time_zone"]
|
1438
|
-
)
|
1439
|
-
entity_data.index.freq = pd.to_timedelta(
|
1440
|
-
int(metadata[entity_id]["optimization_time_step"]), "minutes"
|
1441
|
-
)
|
1442
|
-
# Calculate the current state value
|
1443
|
-
if input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "nearest":
|
1444
|
-
idx_closest = entity_data.index.get_indexer([now_precise], method="nearest")[0]
|
1445
|
-
elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "first":
|
1446
|
-
idx_closest = entity_data.index.get_indexer([now_precise], method="ffill")[0]
|
1447
|
-
elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "last":
|
1448
|
-
idx_closest = entity_data.index.get_indexer([now_precise], method="bfill")[0]
|
1449
|
-
if idx_closest == -1:
|
1450
|
-
idx_closest = entity_data.index.get_indexer([now_precise], method="nearest")[0]
|
1451
|
-
# Call post data
|
1452
|
-
if reference == "continual_publish":
|
1453
|
-
logger.debug("Auto Published sensor:")
|
1454
|
-
logger_levels = "DEBUG"
|
1455
|
-
else:
|
1456
|
-
logger_levels = "INFO"
|
1457
|
-
# post/save entity
|
1458
|
-
input_data_dict["rh"].post_data(
|
1459
|
-
data_df=entity_data[metadata[entity_id]["name"]],
|
1460
|
-
idx=idx_closest,
|
1461
|
-
entity_id=entity_id,
|
1462
|
-
unit_of_measurement=metadata[entity_id]["unit_of_measurement"],
|
1463
|
-
friendly_name=metadata[entity_id]["friendly_name"],
|
1464
|
-
type_var=metadata[entity_id].get("type_var", ""),
|
1465
|
-
save_entities=False,
|
1466
|
-
logger_levels=logger_levels,
|
1467
|
-
)
|
1468
|
-
return entity_data[metadata[entity_id]["name"]]
|
1469
|
-
|
1470
|
-
|
1471
|
-
def main():
|
1472
|
-
r"""Define the main command line entry function.
|
1473
|
-
|
1474
|
-
This function may take several arguments as inputs. You can type `emhass --help` to see the list of options:
|
1475
|
-
|
1476
|
-
- action: Set the desired action, options are: perfect-optim, dayahead-optim,
|
1477
|
-
naive-mpc-optim, publish-data, forecast-model-fit, forecast-model-predict, forecast-model-tune
|
1478
|
-
|
1479
|
-
- config: Define path to the config.yaml file
|
1480
|
-
|
1481
|
-
- costfun: Define the type of cost function, options are: profit, cost, self-consumption
|
1482
|
-
|
1483
|
-
- log2file: Define if we should log to a file or not
|
1484
|
-
|
1485
|
-
- params: Configuration parameters passed from data/options.json if using the add-on
|
1486
|
-
|
1487
|
-
- runtimeparams: Pass runtime optimization parameters as dictionnary
|
1488
|
-
|
1489
|
-
- debug: Use True for testing purposes
|
1490
|
-
|
1491
|
-
"""
|
1492
|
-
# Parsing arguments
|
1493
|
-
parser = argparse.ArgumentParser()
|
1494
|
-
parser.add_argument(
|
1495
|
-
"--action",
|
1496
|
-
type=str,
|
1497
|
-
help="Set the desired action, options are: perfect-optim, dayahead-optim,\
|
1498
|
-
naive-mpc-optim, publish-data, forecast-model-fit, forecast-model-predict, forecast-model-tune",
|
1499
|
-
)
|
1500
|
-
parser.add_argument(
|
1501
|
-
"--config", type=str, help="Define path to the config.json/defaults.json file"
|
1502
|
-
)
|
1503
|
-
parser.add_argument(
|
1504
|
-
"--params",
|
1505
|
-
type=str,
|
1506
|
-
default=None,
|
1507
|
-
help="String of configuration parameters passed",
|
1508
|
-
)
|
1509
|
-
parser.add_argument(
|
1510
|
-
"--data", type=str, help="Define path to the Data files (.csv & .pkl)"
|
1511
|
-
)
|
1512
|
-
parser.add_argument("--root", type=str, help="Define path emhass root")
|
1513
|
-
parser.add_argument(
|
1514
|
-
"--costfun",
|
1515
|
-
type=str,
|
1516
|
-
default="profit",
|
1517
|
-
help="Define the type of cost function, options are: profit, cost, self-consumption",
|
1518
|
-
)
|
1519
|
-
parser.add_argument(
|
1520
|
-
"--log2file",
|
1521
|
-
type=bool,
|
1522
|
-
default=False,
|
1523
|
-
help="Define if we should log to a file or not",
|
1524
|
-
)
|
1525
|
-
parser.add_argument(
|
1526
|
-
"--secrets",
|
1527
|
-
type=str,
|
1528
|
-
default=None,
|
1529
|
-
help="Define secret parameter file (secrets_emhass.yaml) path",
|
1530
|
-
)
|
1531
|
-
parser.add_argument(
|
1532
|
-
"--runtimeparams",
|
1533
|
-
type=str,
|
1534
|
-
default=None,
|
1535
|
-
help="Pass runtime optimization parameters as dictionnary",
|
1536
|
-
)
|
1537
|
-
parser.add_argument(
|
1538
|
-
"--debug",
|
1539
|
-
type=bool,
|
1540
|
-
default=False,
|
1541
|
-
help="Use True for testing purposes",
|
1542
|
-
)
|
1543
|
-
args = parser.parse_args()
|
1544
|
-
|
1545
|
-
# The path to the configuration files
|
1546
|
-
if args.config is not None:
|
1547
|
-
config_path = pathlib.Path(args.config)
|
1548
|
-
else:
|
1549
|
-
config_path = pathlib.Path(
|
1550
|
-
str(utils.get_root(__file__, num_parent=3) / "config.json")
|
1551
|
-
)
|
1552
|
-
if args.data is not None:
|
1553
|
-
data_path = pathlib.Path(args.data)
|
1554
|
-
else:
|
1555
|
-
data_path = config_path.parent / "data/"
|
1556
|
-
if args.root is not None:
|
1557
|
-
root_path = pathlib.Path(args.root)
|
1558
|
-
else:
|
1559
|
-
root_path = utils.get_root(__file__, num_parent=1)
|
1560
|
-
if args.secrets is not None:
|
1561
|
-
secrets_path = pathlib.Path(args.secrets)
|
1562
|
-
else:
|
1563
|
-
secrets_path = pathlib.Path(config_path.parent / "secrets_emhass.yaml")
|
1564
|
-
|
1565
|
-
associations_path = root_path / "data/associations.csv"
|
1566
|
-
defaults_path = root_path / "data/config_defaults.json"
|
1567
|
-
|
1568
|
-
emhass_conf = {}
|
1569
|
-
emhass_conf["config_path"] = config_path
|
1570
|
-
emhass_conf["data_path"] = data_path
|
1571
|
-
emhass_conf["root_path"] = root_path
|
1572
|
-
emhass_conf["associations_path"] = associations_path
|
1573
|
-
emhass_conf["defaults_path"] = defaults_path
|
1574
|
-
# create logger
|
1575
|
-
logger, ch = utils.get_logger(
|
1576
|
-
__name__, emhass_conf, save_to_file=bool(args.log2file)
|
1577
|
-
)
|
1578
|
-
|
1579
|
-
# Check paths
|
1580
|
-
logger.debug("config path: " + str(config_path))
|
1581
|
-
logger.debug("data path: " + str(data_path))
|
1582
|
-
logger.debug("root path: " + str(root_path))
|
1583
|
-
if not associations_path.exists():
|
1584
|
-
logger.error(
|
1585
|
-
"Could not find associations.csv file in: " + str(associations_path)
|
1586
|
-
)
|
1587
|
-
logger.error("Try setting config file path with --associations")
|
1588
|
-
return False
|
1589
|
-
if not config_path.exists():
|
1590
|
-
logger.warning("Could not find config.json file in: " + str(config_path))
|
1591
|
-
logger.warning("Try setting config file path with --config")
|
1592
|
-
if not secrets_path.exists():
|
1593
|
-
logger.warning("Could not find secrets file in: " + str(secrets_path))
|
1594
|
-
logger.warning("Try setting secrets file path with --secrets")
|
1595
|
-
if not os.path.isdir(data_path):
|
1596
|
-
logger.error("Could not find data folder in: " + str(data_path))
|
1597
|
-
logger.error("Try setting data path with --data")
|
1598
|
-
return False
|
1599
|
-
if not os.path.isdir(root_path):
|
1600
|
-
logger.error("Could not find emhass/src folder in: " + str(root_path))
|
1601
|
-
logger.error("Try setting emhass root path with --root")
|
1602
|
-
return False
|
1603
|
-
|
1604
|
-
# Additional argument
|
1605
|
-
try:
|
1606
|
-
parser.add_argument(
|
1607
|
-
"--version",
|
1608
|
-
action="version",
|
1609
|
-
version="%(prog)s " + version("emhass"),
|
1610
|
-
)
|
1611
|
-
args = parser.parse_args()
|
1612
|
-
except Exception:
|
1613
|
-
logger.info(
|
1614
|
-
"Version not found for emhass package. Or importlib exited with PackageNotFoundError.",
|
1615
|
-
)
|
1616
|
-
|
1617
|
-
# Setup config
|
1618
|
-
config = {}
|
1619
|
-
# Check if passed config file is yaml of json, build config accordingly
|
1620
|
-
if config_path.exists():
|
1621
|
-
config_file_ending = re.findall("(?<=\.).*$", str(config_path))
|
1622
|
-
if len(config_file_ending) > 0:
|
1623
|
-
match config_file_ending[0]:
|
1624
|
-
case "json":
|
1625
|
-
config = utils.build_config(
|
1626
|
-
emhass_conf, logger, defaults_path, config_path
|
1627
|
-
)
|
1628
|
-
case "yaml":
|
1629
|
-
config = utils.build_config(
|
1630
|
-
emhass_conf, logger, defaults_path, config_path=config_path
|
1631
|
-
)
|
1632
|
-
case "yml":
|
1633
|
-
config = utils.build_config(
|
1634
|
-
emhass_conf, logger, defaults_path, config_path=config_path
|
1635
|
-
)
|
1636
|
-
# If unable to find config file, use only defaults_config.json
|
1637
|
-
else:
|
1638
|
-
logger.warning(
|
1639
|
-
"Unable to obtain config.json file, building parameters with only defaults"
|
1640
|
-
)
|
1641
|
-
config = utils.build_config(emhass_conf, logger, defaults_path)
|
1642
|
-
if type(config) is bool and not config:
|
1643
|
-
raise Exception("Failed to find default config")
|
1644
|
-
|
1645
|
-
# Obtain secrets from secrets_emhass.yaml?
|
1646
|
-
params_secrets = {}
|
1647
|
-
emhass_conf, built_secrets = utils.build_secrets(
|
1648
|
-
emhass_conf, logger, secrets_path=secrets_path
|
1649
|
-
)
|
1650
|
-
params_secrets.update(built_secrets)
|
1651
|
-
|
1652
|
-
# Build params
|
1653
|
-
params = utils.build_params(emhass_conf, params_secrets, config, logger)
|
1654
|
-
if type(params) is bool:
|
1655
|
-
raise Exception("A error has occurred while building parameters")
|
1656
|
-
# Add any passed params from args to params
|
1657
|
-
if args.params:
|
1658
|
-
params.update(json.loads(args.params))
|
1659
|
-
|
1660
|
-
input_data_dict = set_input_data_dict(
|
1661
|
-
emhass_conf,
|
1662
|
-
args.costfun,
|
1663
|
-
json.dumps(params),
|
1664
|
-
args.runtimeparams,
|
1665
|
-
args.action,
|
1666
|
-
logger,
|
1667
|
-
args.debug,
|
1668
|
-
)
|
1669
|
-
if type(input_data_dict) is bool:
|
1670
|
-
raise Exception("A error has occurred while creating action objects")
|
1671
|
-
|
1672
|
-
# Perform selected action
|
1673
|
-
if args.action == "perfect-optim":
|
1674
|
-
opt_res = perfect_forecast_optim(input_data_dict, logger, debug=args.debug)
|
1675
|
-
elif args.action == "dayahead-optim":
|
1676
|
-
opt_res = dayahead_forecast_optim(input_data_dict, logger, debug=args.debug)
|
1677
|
-
elif args.action == "naive-mpc-optim":
|
1678
|
-
opt_res = naive_mpc_optim(input_data_dict, logger, debug=args.debug)
|
1679
|
-
elif args.action == "forecast-model-fit":
|
1680
|
-
df_fit_pred, df_fit_pred_backtest, mlf = forecast_model_fit(
|
1681
|
-
input_data_dict, logger, debug=args.debug
|
1682
|
-
)
|
1683
|
-
opt_res = None
|
1684
|
-
elif args.action == "forecast-model-predict":
|
1685
|
-
if args.debug:
|
1686
|
-
_, _, mlf = forecast_model_fit(input_data_dict, logger, debug=args.debug)
|
1687
|
-
else:
|
1688
|
-
mlf = None
|
1689
|
-
df_pred = forecast_model_predict(
|
1690
|
-
input_data_dict, logger, debug=args.debug, mlf=mlf
|
1691
|
-
)
|
1692
|
-
opt_res = None
|
1693
|
-
elif args.action == "forecast-model-tune":
|
1694
|
-
if args.debug:
|
1695
|
-
_, _, mlf = forecast_model_fit(input_data_dict, logger, debug=args.debug)
|
1696
|
-
else:
|
1697
|
-
mlf = None
|
1698
|
-
df_pred_optim, mlf = forecast_model_tune(
|
1699
|
-
input_data_dict, logger, debug=args.debug, mlf=mlf
|
1700
|
-
)
|
1701
|
-
opt_res = None
|
1702
|
-
elif args.action == "regressor-model-fit":
|
1703
|
-
mlr = regressor_model_fit(input_data_dict, logger, debug=args.debug)
|
1704
|
-
opt_res = None
|
1705
|
-
elif args.action == "regressor-model-predict":
|
1706
|
-
if args.debug:
|
1707
|
-
mlr = regressor_model_fit(input_data_dict, logger, debug=args.debug)
|
1708
|
-
else:
|
1709
|
-
mlr = None
|
1710
|
-
prediction = regressor_model_predict(
|
1711
|
-
input_data_dict, logger, debug=args.debug, mlr=mlr
|
1712
|
-
)
|
1713
|
-
opt_res = None
|
1714
|
-
elif args.action == "publish-data":
|
1715
|
-
opt_res = publish_data(input_data_dict, logger)
|
1716
|
-
else:
|
1717
|
-
logger.error("The passed action argument is not valid")
|
1718
|
-
logger.error(
|
1719
|
-
"Try setting --action: perfect-optim, dayahead-optim, naive-mpc-optim, forecast-model-fit, forecast-model-predict, forecast-model-tune or publish-data"
|
1720
|
-
)
|
1721
|
-
opt_res = None
|
1722
|
-
logger.info(opt_res)
|
1723
|
-
# Flush the logger
|
1724
|
-
ch.close()
|
1725
|
-
logger.removeHandler(ch)
|
1726
|
-
if (
|
1727
|
-
args.action == "perfect-optim"
|
1728
|
-
or args.action == "dayahead-optim"
|
1729
|
-
or args.action == "naive-mpc-optim"
|
1730
|
-
or args.action == "publish-data"
|
1731
|
-
):
|
1732
|
-
return opt_res
|
1733
|
-
elif args.action == "forecast-model-fit":
|
1734
|
-
return df_fit_pred, df_fit_pred_backtest, mlf
|
1735
|
-
elif args.action == "forecast-model-predict":
|
1736
|
-
return df_pred
|
1737
|
-
elif args.action == "regressor-model-fit":
|
1738
|
-
return mlr
|
1739
|
-
elif args.action == "regressor-model-predict":
|
1740
|
-
return prediction
|
1741
|
-
elif args.action == "forecast-model-tune":
|
1742
|
-
return df_pred_optim, mlf
|
1743
|
-
else:
|
1744
|
-
return opt_res
|
1745
|
-
|
1746
|
-
|
1747
|
-
if __name__ == "__main__":
|
1748
|
-
main()
|