emhass 0.11.1__py3-none-any.whl → 0.11.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
emhass/command_line.py CHANGED
@@ -2,33 +2,39 @@
2
2
  # -*- coding: utf-8 -*-
3
3
 
4
4
  import argparse
5
+ import copy
6
+ import json
7
+ import logging
5
8
  import os
6
- import re
7
- import time
8
9
  import pathlib
9
- import logging
10
- import json
11
- import copy
12
10
  import pickle
11
+ import re
12
+ import time
13
13
  from datetime import datetime, timezone
14
- from typing import Optional, Tuple
14
+ from distutils.util import strtobool
15
15
  from importlib.metadata import version
16
+ from typing import Optional, Tuple
17
+
16
18
  import numpy as np
17
19
  import pandas as pd
18
20
 
19
- from distutils.util import strtobool
20
-
21
- from emhass.retrieve_hass import RetrieveHass
21
+ from emhass import utils
22
22
  from emhass.forecast import Forecast
23
23
  from emhass.machine_learning_forecaster import MLForecaster
24
- from emhass.optimization import Optimization
25
24
  from emhass.machine_learning_regressor import MLRegressor
26
- from emhass import utils
25
+ from emhass.optimization import Optimization
26
+ from emhass.retrieve_hass import RetrieveHass
27
27
 
28
28
 
29
- def set_input_data_dict(emhass_conf: dict, costfun: str,
30
- params: str, runtimeparams: str, set_type: str, logger: logging.Logger,
31
- get_data_from_file: Optional[bool] = False) -> dict:
29
+ def set_input_data_dict(
30
+ emhass_conf: dict,
31
+ costfun: str,
32
+ params: str,
33
+ runtimeparams: str,
34
+ set_type: str,
35
+ logger: logging.Logger,
36
+ get_data_from_file: Optional[bool] = False,
37
+ ) -> dict:
32
38
  """
33
39
  Set up some of the data needed for the different actions.
34
40
 
@@ -60,46 +66,88 @@ def set_input_data_dict(emhass_conf: dict, costfun: str,
60
66
  params = {}
61
67
 
62
68
  # Parsing yaml
63
- retrieve_hass_conf, optim_conf, plant_conf = utils.get_yaml_parse(params,logger)
69
+ retrieve_hass_conf, optim_conf, plant_conf = utils.get_yaml_parse(params, logger)
64
70
  if type(retrieve_hass_conf) is bool:
65
71
  return False
66
-
72
+
67
73
  # Treat runtimeparams
68
74
  params, retrieve_hass_conf, optim_conf, plant_conf = utils.treat_runtimeparams(
69
- runtimeparams, params, retrieve_hass_conf, optim_conf, plant_conf, set_type, logger)
75
+ runtimeparams,
76
+ params,
77
+ retrieve_hass_conf,
78
+ optim_conf,
79
+ plant_conf,
80
+ set_type,
81
+ logger,
82
+ emhass_conf,
83
+ )
70
84
  # Define main objects
71
- rh = RetrieveHass(retrieve_hass_conf['hass_url'], retrieve_hass_conf['long_lived_token'],
72
- retrieve_hass_conf['optimization_time_step'], retrieve_hass_conf['time_zone'],
73
- params, emhass_conf, logger, get_data_from_file=get_data_from_file)
74
- fcst = Forecast(retrieve_hass_conf, optim_conf, plant_conf,
75
- params, emhass_conf, logger, get_data_from_file=get_data_from_file)
76
- opt = Optimization(retrieve_hass_conf, optim_conf, plant_conf,
77
- fcst.var_load_cost, fcst.var_prod_price,
78
- costfun, emhass_conf, logger)
85
+ rh = RetrieveHass(
86
+ retrieve_hass_conf["hass_url"],
87
+ retrieve_hass_conf["long_lived_token"],
88
+ retrieve_hass_conf["optimization_time_step"],
89
+ retrieve_hass_conf["time_zone"],
90
+ params,
91
+ emhass_conf,
92
+ logger,
93
+ get_data_from_file=get_data_from_file,
94
+ )
95
+ fcst = Forecast(
96
+ retrieve_hass_conf,
97
+ optim_conf,
98
+ plant_conf,
99
+ params,
100
+ emhass_conf,
101
+ logger,
102
+ get_data_from_file=get_data_from_file,
103
+ )
104
+ opt = Optimization(
105
+ retrieve_hass_conf,
106
+ optim_conf,
107
+ plant_conf,
108
+ fcst.var_load_cost,
109
+ fcst.var_prod_price,
110
+ costfun,
111
+ emhass_conf,
112
+ logger,
113
+ )
79
114
  # Perform setup based on type of action
80
115
  if set_type == "perfect-optim":
81
116
  # Retrieve data from hass
82
117
  if get_data_from_file:
83
- with open(emhass_conf['data_path'] / 'test_df_final.pkl', 'rb') as inp:
118
+ with open(emhass_conf["data_path"] / "test_df_final.pkl", "rb") as inp:
84
119
  rh.df_final, days_list, var_list = pickle.load(inp)
85
- retrieve_hass_conf['sensor_power_load_no_var_loads'] = str(var_list[0])
86
- retrieve_hass_conf['sensor_power_photovoltaics'] = str(var_list[1])
87
- retrieve_hass_conf['sensor_linear_interp'] = [
88
- retrieve_hass_conf['sensor_power_photovoltaics'], retrieve_hass_conf['sensor_power_load_no_var_loads']]
89
- retrieve_hass_conf['sensor_replace_zero'] = [
90
- retrieve_hass_conf['sensor_power_photovoltaics']]
120
+ retrieve_hass_conf["sensor_power_load_no_var_loads"] = str(var_list[0])
121
+ retrieve_hass_conf["sensor_power_photovoltaics"] = str(var_list[1])
122
+ retrieve_hass_conf["sensor_linear_interp"] = [
123
+ retrieve_hass_conf["sensor_power_photovoltaics"],
124
+ retrieve_hass_conf["sensor_power_load_no_var_loads"],
125
+ ]
126
+ retrieve_hass_conf["sensor_replace_zero"] = [
127
+ retrieve_hass_conf["sensor_power_photovoltaics"]
128
+ ]
91
129
  else:
92
130
  days_list = utils.get_days_list(
93
- retrieve_hass_conf['historic_days_to_retrieve'])
94
- var_list = [retrieve_hass_conf['sensor_power_load_no_var_loads'],
95
- retrieve_hass_conf['sensor_power_photovoltaics']]
96
- if not rh.get_data(days_list, var_list, minimal_response=False, significant_changes_only=False):
131
+ retrieve_hass_conf["historic_days_to_retrieve"]
132
+ )
133
+ var_list = [
134
+ retrieve_hass_conf["sensor_power_load_no_var_loads"],
135
+ retrieve_hass_conf["sensor_power_photovoltaics"],
136
+ ]
137
+ if not rh.get_data(
138
+ days_list,
139
+ var_list,
140
+ minimal_response=False,
141
+ significant_changes_only=False,
142
+ ):
97
143
  return False
98
- if not rh.prepare_data(retrieve_hass_conf['sensor_power_load_no_var_loads'],
99
- load_negative=retrieve_hass_conf['load_negative'],
100
- set_zero_min=retrieve_hass_conf['set_zero_min'],
101
- var_replace_zero=retrieve_hass_conf['sensor_replace_zero'],
102
- var_interp=retrieve_hass_conf['sensor_linear_interp']):
144
+ if not rh.prepare_data(
145
+ retrieve_hass_conf["sensor_power_load_no_var_loads"],
146
+ load_negative=retrieve_hass_conf["load_negative"],
147
+ set_zero_min=retrieve_hass_conf["set_zero_min"],
148
+ var_replace_zero=retrieve_hass_conf["sensor_replace_zero"],
149
+ var_interp=retrieve_hass_conf["sensor_linear_interp"],
150
+ ):
103
151
  return False
104
152
  df_input_data = rh.df_final.copy()
105
153
  # What we don't need for this type of action
@@ -107,101 +155,162 @@ def set_input_data_dict(emhass_conf: dict, costfun: str,
107
155
  elif set_type == "dayahead-optim":
108
156
  # Get PV and load forecasts
109
157
  df_weather = fcst.get_weather_forecast(
110
- method=optim_conf['weather_forecast_method'])
158
+ method=optim_conf["weather_forecast_method"]
159
+ )
111
160
  if isinstance(df_weather, bool) and not df_weather:
112
161
  return False
113
162
  P_PV_forecast = fcst.get_power_from_weather(df_weather)
114
163
  P_load_forecast = fcst.get_load_forecast(
115
- method=optim_conf['load_forecast_method'])
164
+ method=optim_conf["load_forecast_method"]
165
+ )
116
166
  if isinstance(P_load_forecast, bool) and not P_load_forecast:
117
167
  logger.error(
118
- "Unable to get sensor power photovoltaics, or sensor power load no var loads. Check HA sensors and their daily data")
168
+ "Unable to get sensor power photovoltaics, or sensor power load no var loads. Check HA sensors and their daily data"
169
+ )
119
170
  return False
120
- df_input_data_dayahead = pd.DataFrame(np.transpose(np.vstack(
121
- [P_PV_forecast.values, P_load_forecast.values])), index=P_PV_forecast.index,
122
- columns=["P_PV_forecast", "P_load_forecast"])
123
- if "optimization_time_step" in retrieve_hass_conf and retrieve_hass_conf["optimization_time_step"]:
124
- if not isinstance(retrieve_hass_conf["optimization_time_step"], pd._libs.tslibs.timedeltas.Timedelta):
125
- optimization_time_step = pd.to_timedelta(retrieve_hass_conf["optimization_time_step"], "minute")
171
+ df_input_data_dayahead = pd.DataFrame(
172
+ np.transpose(np.vstack([P_PV_forecast.values, P_load_forecast.values])),
173
+ index=P_PV_forecast.index,
174
+ columns=["P_PV_forecast", "P_load_forecast"],
175
+ )
176
+ if (
177
+ "optimization_time_step" in retrieve_hass_conf
178
+ and retrieve_hass_conf["optimization_time_step"]
179
+ ):
180
+ if not isinstance(
181
+ retrieve_hass_conf["optimization_time_step"],
182
+ pd._libs.tslibs.timedeltas.Timedelta,
183
+ ):
184
+ optimization_time_step = pd.to_timedelta(
185
+ retrieve_hass_conf["optimization_time_step"], "minute"
186
+ )
126
187
  else:
127
188
  optimization_time_step = retrieve_hass_conf["optimization_time_step"]
128
- df_input_data_dayahead = df_input_data_dayahead.asfreq(optimization_time_step)
189
+ df_input_data_dayahead = df_input_data_dayahead.asfreq(
190
+ optimization_time_step
191
+ )
129
192
  else:
130
193
  df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead)
131
194
  params = json.loads(params)
132
- if ("prediction_horizon" in params["passed_data"] and params["passed_data"]["prediction_horizon"] is not None):
195
+ if (
196
+ "prediction_horizon" in params["passed_data"]
197
+ and params["passed_data"]["prediction_horizon"] is not None
198
+ ):
133
199
  prediction_horizon = params["passed_data"]["prediction_horizon"]
134
200
  df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[
135
- df_input_data_dayahead.index[0]: df_input_data_dayahead.index[prediction_horizon - 1]]
201
+ df_input_data_dayahead.index[0] : df_input_data_dayahead.index[
202
+ prediction_horizon - 1
203
+ ]
204
+ ]
136
205
  # What we don't need for this type of action
137
206
  df_input_data, days_list = None, None
138
207
  elif set_type == "naive-mpc-optim":
139
208
  # Retrieve data from hass
140
209
  if get_data_from_file:
141
- with open(emhass_conf['data_path'] / 'test_df_final.pkl', 'rb') as inp:
210
+ with open(emhass_conf["data_path"] / "test_df_final.pkl", "rb") as inp:
142
211
  rh.df_final, days_list, var_list = pickle.load(inp)
143
- retrieve_hass_conf['sensor_power_load_no_var_loads'] = str(var_list[0])
144
- retrieve_hass_conf['sensor_power_photovoltaics'] = str(var_list[1])
145
- retrieve_hass_conf['sensor_linear_interp'] = [
146
- retrieve_hass_conf['sensor_power_photovoltaics'], retrieve_hass_conf['sensor_power_load_no_var_loads']]
147
- retrieve_hass_conf['sensor_replace_zero'] = [
148
- retrieve_hass_conf['sensor_power_photovoltaics']]
212
+ retrieve_hass_conf["sensor_power_load_no_var_loads"] = str(var_list[0])
213
+ retrieve_hass_conf["sensor_power_photovoltaics"] = str(var_list[1])
214
+ retrieve_hass_conf["sensor_linear_interp"] = [
215
+ retrieve_hass_conf["sensor_power_photovoltaics"],
216
+ retrieve_hass_conf["sensor_power_load_no_var_loads"],
217
+ ]
218
+ retrieve_hass_conf["sensor_replace_zero"] = [
219
+ retrieve_hass_conf["sensor_power_photovoltaics"]
220
+ ]
149
221
  else:
150
222
  days_list = utils.get_days_list(1)
151
- var_list = [retrieve_hass_conf['sensor_power_load_no_var_loads'],
152
- retrieve_hass_conf['sensor_power_photovoltaics']]
153
- if not rh.get_data(days_list, var_list, minimal_response=False, significant_changes_only=False):
223
+ var_list = [
224
+ retrieve_hass_conf["sensor_power_load_no_var_loads"],
225
+ retrieve_hass_conf["sensor_power_photovoltaics"],
226
+ ]
227
+ if not rh.get_data(
228
+ days_list,
229
+ var_list,
230
+ minimal_response=False,
231
+ significant_changes_only=False,
232
+ ):
154
233
  return False
155
- if not rh.prepare_data(retrieve_hass_conf['sensor_power_load_no_var_loads'],
156
- load_negative=retrieve_hass_conf['load_negative'],
157
- set_zero_min=retrieve_hass_conf['set_zero_min'],
158
- var_replace_zero=retrieve_hass_conf['sensor_replace_zero'],
159
- var_interp=retrieve_hass_conf['sensor_linear_interp']):
234
+ if not rh.prepare_data(
235
+ retrieve_hass_conf["sensor_power_load_no_var_loads"],
236
+ load_negative=retrieve_hass_conf["load_negative"],
237
+ set_zero_min=retrieve_hass_conf["set_zero_min"],
238
+ var_replace_zero=retrieve_hass_conf["sensor_replace_zero"],
239
+ var_interp=retrieve_hass_conf["sensor_linear_interp"],
240
+ ):
160
241
  return False
161
242
  df_input_data = rh.df_final.copy()
162
243
  # Get PV and load forecasts
163
244
  df_weather = fcst.get_weather_forecast(
164
- method=optim_conf['weather_forecast_method'])
245
+ method=optim_conf["weather_forecast_method"]
246
+ )
165
247
  if isinstance(df_weather, bool) and not df_weather:
166
248
  return False
167
249
  P_PV_forecast = fcst.get_power_from_weather(
168
- df_weather, set_mix_forecast=True, df_now=df_input_data)
250
+ df_weather, set_mix_forecast=True, df_now=df_input_data
251
+ )
169
252
  P_load_forecast = fcst.get_load_forecast(
170
- method=optim_conf['load_forecast_method'], set_mix_forecast=True, df_now=df_input_data)
253
+ method=optim_conf["load_forecast_method"],
254
+ set_mix_forecast=True,
255
+ df_now=df_input_data,
256
+ )
171
257
  if isinstance(P_load_forecast, bool) and not P_load_forecast:
172
258
  logger.error(
173
- "Unable to get sensor power photovoltaics, or sensor power load no var loads. Check HA sensors and their daily data")
259
+ "Unable to get sensor power photovoltaics, or sensor power load no var loads. Check HA sensors and their daily data"
260
+ )
174
261
  return False
175
262
  df_input_data_dayahead = pd.concat([P_PV_forecast, P_load_forecast], axis=1)
176
- if "optimization_time_step" in retrieve_hass_conf and retrieve_hass_conf["optimization_time_step"]:
177
- if not isinstance(retrieve_hass_conf["optimization_time_step"], pd._libs.tslibs.timedeltas.Timedelta):
178
- optimization_time_step = pd.to_timedelta(retrieve_hass_conf["optimization_time_step"], "minute")
263
+ if (
264
+ "optimization_time_step" in retrieve_hass_conf
265
+ and retrieve_hass_conf["optimization_time_step"]
266
+ ):
267
+ if not isinstance(
268
+ retrieve_hass_conf["optimization_time_step"],
269
+ pd._libs.tslibs.timedeltas.Timedelta,
270
+ ):
271
+ optimization_time_step = pd.to_timedelta(
272
+ retrieve_hass_conf["optimization_time_step"], "minute"
273
+ )
179
274
  else:
180
275
  optimization_time_step = retrieve_hass_conf["optimization_time_step"]
181
- df_input_data_dayahead = df_input_data_dayahead.asfreq(optimization_time_step)
276
+ df_input_data_dayahead = df_input_data_dayahead.asfreq(
277
+ optimization_time_step
278
+ )
182
279
  else:
183
280
  df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead)
184
281
  df_input_data_dayahead.columns = ["P_PV_forecast", "P_load_forecast"]
185
282
  params = json.loads(params)
186
- if ("prediction_horizon" in params["passed_data"] and params["passed_data"]["prediction_horizon"] is not None):
283
+ if (
284
+ "prediction_horizon" in params["passed_data"]
285
+ and params["passed_data"]["prediction_horizon"] is not None
286
+ ):
187
287
  prediction_horizon = params["passed_data"]["prediction_horizon"]
188
288
  df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[
189
- df_input_data_dayahead.index[0]: df_input_data_dayahead.index[prediction_horizon - 1]]
190
- elif (set_type == "forecast-model-fit" or set_type == "forecast-model-predict" or set_type == "forecast-model-tune"):
289
+ df_input_data_dayahead.index[0] : df_input_data_dayahead.index[
290
+ prediction_horizon - 1
291
+ ]
292
+ ]
293
+ elif (
294
+ set_type == "forecast-model-fit"
295
+ or set_type == "forecast-model-predict"
296
+ or set_type == "forecast-model-tune"
297
+ ):
191
298
  df_input_data_dayahead = None
192
299
  P_PV_forecast, P_load_forecast = None, None
193
300
  params = json.loads(params)
194
301
  # Retrieve data from hass
195
- days_to_retrieve = params["passed_data"]['historic_days_to_retrieve']
302
+ days_to_retrieve = params["passed_data"]["historic_days_to_retrieve"]
196
303
  model_type = params["passed_data"]["model_type"]
197
304
  var_model = params["passed_data"]["var_model"]
198
305
  if get_data_from_file:
199
306
  days_list = None
200
- filename = 'data_train_'+model_type+'.pkl'
201
- filename_path = emhass_conf['data_path'] / filename
202
- with open(filename_path, 'rb') as inp:
307
+ filename = "data_train_" + model_type + ".pkl"
308
+ filename_path = emhass_conf["data_path"] / filename
309
+ with open(filename_path, "rb") as inp:
203
310
  df_input_data, _ = pickle.load(inp)
204
- df_input_data = df_input_data[df_input_data.index[-1] - pd.offsets.Day(days_to_retrieve):]
311
+ df_input_data = df_input_data[
312
+ df_input_data.index[-1] - pd.offsets.Day(days_to_retrieve) :
313
+ ]
205
314
  else:
206
315
  days_list = utils.get_days_list(days_to_retrieve)
207
316
  var_list = [var_model]
@@ -229,8 +338,12 @@ def set_input_data_dict(emhass_conf: dict, costfun: str,
229
338
  if filename_path.is_file():
230
339
  df_input_data = pd.read_csv(filename_path, parse_dates=True)
231
340
  else:
232
- logger.error("The CSV file " + csv_file +
233
- " was not found in path: " + str(emhass_conf["data_path"]))
341
+ logger.error(
342
+ "The CSV file "
343
+ + csv_file
344
+ + " was not found in path: "
345
+ + str(emhass_conf["data_path"])
346
+ )
234
347
  return False
235
348
  # raise ValueError("The CSV file " + csv_file + " was not found.")
236
349
  required_columns = []
@@ -239,8 +352,7 @@ def set_input_data_dict(emhass_conf: dict, costfun: str,
239
352
  if timestamp is not None:
240
353
  required_columns.append(timestamp)
241
354
  if not set(required_columns).issubset(df_input_data.columns):
242
- logger.error(
243
- "The cvs file does not contain the required columns.")
355
+ logger.error("The cvs file does not contain the required columns.")
244
356
  msg = f"CSV file should contain the following columns: {', '.join(required_columns)}"
245
357
  logger.error(msg)
246
358
  return False
@@ -257,23 +369,25 @@ def set_input_data_dict(emhass_conf: dict, costfun: str,
257
369
  days_list = None
258
370
  # The input data dictionary to return
259
371
  input_data_dict = {
260
- 'emhass_conf': emhass_conf,
261
- 'retrieve_hass_conf': retrieve_hass_conf,
262
- 'rh': rh,
263
- 'opt': opt,
264
- 'fcst': fcst,
265
- 'df_input_data': df_input_data,
266
- 'df_input_data_dayahead': df_input_data_dayahead,
267
- 'P_PV_forecast': P_PV_forecast,
268
- 'P_load_forecast': P_load_forecast,
269
- 'costfun': costfun,
270
- 'params': params,
271
- 'days_list': days_list
372
+ "emhass_conf": emhass_conf,
373
+ "retrieve_hass_conf": retrieve_hass_conf,
374
+ "rh": rh,
375
+ "opt": opt,
376
+ "fcst": fcst,
377
+ "df_input_data": df_input_data,
378
+ "df_input_data_dayahead": df_input_data_dayahead,
379
+ "P_PV_forecast": P_PV_forecast,
380
+ "P_load_forecast": P_load_forecast,
381
+ "costfun": costfun,
382
+ "params": params,
383
+ "days_list": days_list,
272
384
  }
273
385
  return input_data_dict
274
386
 
275
- def weather_forecast_cache(emhass_conf: dict, params: str,
276
- runtimeparams: str, logger: logging.Logger) -> bool:
387
+
388
+ def weather_forecast_cache(
389
+ emhass_conf: dict, params: str, runtimeparams: str, logger: logging.Logger
390
+ ) -> bool:
277
391
  """
278
392
  Perform a call to get forecast function, intend to save results to cache.
279
393
 
@@ -289,14 +403,22 @@ def weather_forecast_cache(emhass_conf: dict, params: str,
289
403
  :rtype: bool
290
404
 
291
405
  """
292
-
406
+
293
407
  # Parsing yaml
294
408
  retrieve_hass_conf, optim_conf, plant_conf = utils.get_yaml_parse(params, logger)
295
-
409
+
296
410
  # Treat runtimeparams
297
411
  params, retrieve_hass_conf, optim_conf, plant_conf = utils.treat_runtimeparams(
298
- runtimeparams, params, retrieve_hass_conf, optim_conf, plant_conf, "forecast", logger)
299
-
412
+ runtimeparams,
413
+ params,
414
+ retrieve_hass_conf,
415
+ optim_conf,
416
+ plant_conf,
417
+ "forecast",
418
+ logger,
419
+ emhass_conf,
420
+ )
421
+
300
422
  # Make sure weather_forecast_cache is true
301
423
  if (params != None) and (params != "null"):
302
424
  params = json.loads(params)
@@ -306,18 +428,23 @@ def weather_forecast_cache(emhass_conf: dict, params: str,
306
428
  params = json.dumps(params)
307
429
 
308
430
  # Create Forecast object
309
- fcst = Forecast(retrieve_hass_conf, optim_conf, plant_conf,
310
- params, emhass_conf, logger)
431
+ fcst = Forecast(
432
+ retrieve_hass_conf, optim_conf, plant_conf, params, emhass_conf, logger
433
+ )
311
434
 
312
- result = fcst.get_weather_forecast(optim_conf['weather_forecast_method'])
435
+ result = fcst.get_weather_forecast(optim_conf["weather_forecast_method"])
313
436
  if isinstance(result, bool) and not result:
314
437
  return False
315
438
 
316
439
  return True
317
440
 
318
- def perfect_forecast_optim(input_data_dict: dict, logger: logging.Logger,
319
- save_data_to_file: Optional[bool] = True,
320
- debug: Optional[bool] = False) -> pd.DataFrame:
441
+
442
+ def perfect_forecast_optim(
443
+ input_data_dict: dict,
444
+ logger: logging.Logger,
445
+ save_data_to_file: Optional[bool] = True,
446
+ debug: Optional[bool] = False,
447
+ ) -> pd.DataFrame:
321
448
  """
322
449
  Perform a call to the perfect forecast optimization routine.
323
450
 
@@ -335,43 +462,54 @@ def perfect_forecast_optim(input_data_dict: dict, logger: logging.Logger,
335
462
  """
336
463
  logger.info("Performing perfect forecast optimization")
337
464
  # Load cost and prod price forecast
338
- df_input_data = input_data_dict['fcst'].get_load_cost_forecast(
339
- input_data_dict['df_input_data'],
340
- method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'],
341
- list_and_perfect=True)
465
+ df_input_data = input_data_dict["fcst"].get_load_cost_forecast(
466
+ input_data_dict["df_input_data"],
467
+ method=input_data_dict["fcst"].optim_conf["load_cost_forecast_method"],
468
+ list_and_perfect=True,
469
+ )
342
470
  if isinstance(df_input_data, bool) and not df_input_data:
343
471
  return False
344
- df_input_data = input_data_dict['fcst'].get_prod_price_forecast(
345
- df_input_data, method=input_data_dict['fcst'].optim_conf['production_price_forecast_method'],
346
- list_and_perfect=True)
472
+ df_input_data = input_data_dict["fcst"].get_prod_price_forecast(
473
+ df_input_data,
474
+ method=input_data_dict["fcst"].optim_conf["production_price_forecast_method"],
475
+ list_and_perfect=True,
476
+ )
347
477
  if isinstance(df_input_data, bool) and not df_input_data:
348
478
  return False
349
- opt_res = input_data_dict['opt'].perform_perfect_forecast_optim(
350
- df_input_data, input_data_dict['days_list'])
479
+ opt_res = input_data_dict["opt"].perform_perfect_forecast_optim(
480
+ df_input_data, input_data_dict["days_list"]
481
+ )
351
482
  # Save CSV file for analysis
352
483
  if save_data_to_file:
353
- filename = "opt_res_perfect_optim_" + \
354
- input_data_dict["costfun"] + ".csv"
484
+ filename = "opt_res_perfect_optim_" + input_data_dict["costfun"] + ".csv"
355
485
  else: # Just save the latest optimization results
356
486
  filename = "opt_res_latest.csv"
357
487
  if not debug:
358
488
  opt_res.to_csv(
359
- input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
360
- if not isinstance(input_data_dict["params"],dict):
489
+ input_data_dict["emhass_conf"]["data_path"] / filename,
490
+ index_label="timestamp",
491
+ )
492
+ if not isinstance(input_data_dict["params"], dict):
361
493
  params = json.loads(input_data_dict["params"])
362
494
  else:
363
495
  params = input_data_dict["params"]
364
496
 
365
497
  # if continual_publish, save perfect results to data_path/entities json
366
- if input_data_dict["retrieve_hass_conf"].get('continual_publish',False) or params["passed_data"].get("entity_save",False):
367
- #Trigger the publish function, save entity data and not post to HA
368
- publish_data(input_data_dict, logger, entity_save=True, dont_post=True)
498
+ if input_data_dict["retrieve_hass_conf"].get("continual_publish", False) or params[
499
+ "passed_data"
500
+ ].get("entity_save", False):
501
+ # Trigger the publish function, save entity data and not post to HA
502
+ publish_data(input_data_dict, logger, entity_save=True, dont_post=True)
369
503
 
370
504
  return opt_res
371
505
 
372
- def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger,
373
- save_data_to_file: Optional[bool] = False,
374
- debug: Optional[bool] = False) -> pd.DataFrame:
506
+
507
+ def dayahead_forecast_optim(
508
+ input_data_dict: dict,
509
+ logger: logging.Logger,
510
+ save_data_to_file: Optional[bool] = False,
511
+ debug: Optional[bool] = False,
512
+ ) -> pd.DataFrame:
375
513
  """
376
514
  Perform a call to the day-ahead optimization routine.
377
515
 
@@ -389,21 +527,27 @@ def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger,
389
527
  """
390
528
  logger.info("Performing day-ahead forecast optimization")
391
529
  # Load cost and prod price forecast
392
- df_input_data_dayahead = input_data_dict['fcst'].get_load_cost_forecast(
393
- input_data_dict['df_input_data_dayahead'],
394
- method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'])
530
+ df_input_data_dayahead = input_data_dict["fcst"].get_load_cost_forecast(
531
+ input_data_dict["df_input_data_dayahead"],
532
+ method=input_data_dict["fcst"].optim_conf["load_cost_forecast_method"],
533
+ )
395
534
  if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
396
535
  return False
397
- df_input_data_dayahead = input_data_dict['fcst'].get_prod_price_forecast(
536
+ df_input_data_dayahead = input_data_dict["fcst"].get_prod_price_forecast(
398
537
  df_input_data_dayahead,
399
- method=input_data_dict['fcst'].optim_conf['production_price_forecast_method'])
538
+ method=input_data_dict["fcst"].optim_conf["production_price_forecast_method"],
539
+ )
400
540
  if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
401
541
  return False
402
542
  if "outdoor_temperature_forecast" in input_data_dict["params"]["passed_data"]:
403
- df_input_data_dayahead["outdoor_temperature_forecast"] = \
404
- input_data_dict["params"]["passed_data"]["outdoor_temperature_forecast"]
405
- opt_res_dayahead = input_data_dict['opt'].perform_dayahead_forecast_optim(
406
- df_input_data_dayahead, input_data_dict['P_PV_forecast'], input_data_dict['P_load_forecast'])
543
+ df_input_data_dayahead["outdoor_temperature_forecast"] = input_data_dict[
544
+ "params"
545
+ ]["passed_data"]["outdoor_temperature_forecast"]
546
+ opt_res_dayahead = input_data_dict["opt"].perform_dayahead_forecast_optim(
547
+ df_input_data_dayahead,
548
+ input_data_dict["P_PV_forecast"],
549
+ input_data_dict["P_load_forecast"],
550
+ )
407
551
  # Save CSV file for publish_data
408
552
  if save_data_to_file:
409
553
  today = datetime.now(timezone.utc).replace(
@@ -414,23 +558,31 @@ def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger,
414
558
  filename = "opt_res_latest.csv"
415
559
  if not debug:
416
560
  opt_res_dayahead.to_csv(
417
- input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
418
-
419
- if not isinstance(input_data_dict["params"],dict):
561
+ input_data_dict["emhass_conf"]["data_path"] / filename,
562
+ index_label="timestamp",
563
+ )
564
+
565
+ if not isinstance(input_data_dict["params"], dict):
420
566
  params = json.loads(input_data_dict["params"])
421
567
  else:
422
568
  params = input_data_dict["params"]
423
-
569
+
424
570
  # if continual_publish, save day_ahead results to data_path/entities json
425
- if input_data_dict["retrieve_hass_conf"].get('continual_publish',False) or params["passed_data"].get("entity_save",False):
426
- #Trigger the publish function, save entity data and not post to HA
427
- publish_data(input_data_dict, logger, entity_save=True, dont_post=True)
428
-
571
+ if input_data_dict["retrieve_hass_conf"].get("continual_publish", False) or params[
572
+ "passed_data"
573
+ ].get("entity_save", False):
574
+ # Trigger the publish function, save entity data and not post to HA
575
+ publish_data(input_data_dict, logger, entity_save=True, dont_post=True)
576
+
429
577
  return opt_res_dayahead
430
578
 
431
- def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger,
432
- save_data_to_file: Optional[bool] = False,
433
- debug: Optional[bool] = False) -> pd.DataFrame:
579
+
580
+ def naive_mpc_optim(
581
+ input_data_dict: dict,
582
+ logger: logging.Logger,
583
+ save_data_to_file: Optional[bool] = False,
584
+ debug: Optional[bool] = False,
585
+ ) -> pd.DataFrame:
434
586
  """
435
587
  Perform a call to the naive Model Predictive Controller optimization routine.
436
588
 
@@ -448,29 +600,46 @@ def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger,
448
600
  """
449
601
  logger.info("Performing naive MPC optimization")
450
602
  # Load cost and prod price forecast
451
- df_input_data_dayahead = input_data_dict['fcst'].get_load_cost_forecast(
452
- input_data_dict['df_input_data_dayahead'],
453
- method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'])
603
+ df_input_data_dayahead = input_data_dict["fcst"].get_load_cost_forecast(
604
+ input_data_dict["df_input_data_dayahead"],
605
+ method=input_data_dict["fcst"].optim_conf["load_cost_forecast_method"],
606
+ )
454
607
  if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
455
608
  return False
456
- df_input_data_dayahead = input_data_dict['fcst'].get_prod_price_forecast(
457
- df_input_data_dayahead, method=input_data_dict['fcst'].optim_conf['production_price_forecast_method'])
609
+ df_input_data_dayahead = input_data_dict["fcst"].get_prod_price_forecast(
610
+ df_input_data_dayahead,
611
+ method=input_data_dict["fcst"].optim_conf["production_price_forecast_method"],
612
+ )
458
613
  if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
459
614
  return False
460
615
  if "outdoor_temperature_forecast" in input_data_dict["params"]["passed_data"]:
461
- df_input_data_dayahead["outdoor_temperature_forecast"] = \
462
- input_data_dict["params"]["passed_data"]["outdoor_temperature_forecast"]
616
+ df_input_data_dayahead["outdoor_temperature_forecast"] = input_data_dict[
617
+ "params"
618
+ ]["passed_data"]["outdoor_temperature_forecast"]
463
619
  # The specifics params for the MPC at runtime
464
620
  prediction_horizon = input_data_dict["params"]["passed_data"]["prediction_horizon"]
465
621
  soc_init = input_data_dict["params"]["passed_data"]["soc_init"]
466
622
  soc_final = input_data_dict["params"]["passed_data"]["soc_final"]
467
- def_total_hours = input_data_dict["params"]["passed_data"]['operating_hours_of_each_deferrable_load']
468
- def_start_timestep = input_data_dict["params"]["passed_data"]['start_timesteps_of_each_deferrable_load']
469
- def_end_timestep = input_data_dict["params"]["passed_data"]['end_timesteps_of_each_deferrable_load']
623
+ def_total_hours = input_data_dict["params"]["optim_conf"][
624
+ "operating_hours_of_each_deferrable_load"
625
+ ]
626
+ def_start_timestep = input_data_dict["params"]["optim_conf"][
627
+ "start_timesteps_of_each_deferrable_load"
628
+ ]
629
+ def_end_timestep = input_data_dict["params"]["optim_conf"][
630
+ "end_timesteps_of_each_deferrable_load"
631
+ ]
470
632
  opt_res_naive_mpc = input_data_dict["opt"].perform_naive_mpc_optim(
471
- df_input_data_dayahead, input_data_dict["P_PV_forecast"], input_data_dict["P_load_forecast"],
472
- prediction_horizon, soc_init, soc_final, def_total_hours,
473
- def_start_timestep, def_end_timestep)
633
+ df_input_data_dayahead,
634
+ input_data_dict["P_PV_forecast"],
635
+ input_data_dict["P_load_forecast"],
636
+ prediction_horizon,
637
+ soc_init,
638
+ soc_final,
639
+ def_total_hours,
640
+ def_start_timestep,
641
+ def_end_timestep,
642
+ )
474
643
  # Save CSV file for publish_data
475
644
  if save_data_to_file:
476
645
  today = datetime.now(timezone.utc).replace(
@@ -481,22 +650,28 @@ def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger,
481
650
  filename = "opt_res_latest.csv"
482
651
  if not debug:
483
652
  opt_res_naive_mpc.to_csv(
484
- input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
485
-
486
- if not isinstance(input_data_dict["params"],dict):
653
+ input_data_dict["emhass_conf"]["data_path"] / filename,
654
+ index_label="timestamp",
655
+ )
656
+
657
+ if not isinstance(input_data_dict["params"], dict):
487
658
  params = json.loads(input_data_dict["params"])
488
659
  else:
489
660
  params = input_data_dict["params"]
490
661
 
491
662
  # if continual_publish, save mpc results to data_path/entities json
492
- if input_data_dict["retrieve_hass_conf"].get('continual_publish',False) or params["passed_data"].get("entity_save",False):
493
- #Trigger the publish function, save entity data and not post to HA
494
- publish_data(input_data_dict, logger, entity_save=True, dont_post=True)
663
+ if input_data_dict["retrieve_hass_conf"].get("continual_publish", False) or params[
664
+ "passed_data"
665
+ ].get("entity_save", False):
666
+ # Trigger the publish function, save entity data and not post to HA
667
+ publish_data(input_data_dict, logger, entity_save=True, dont_post=True)
495
668
 
496
669
  return opt_res_naive_mpc
497
670
 
498
- def forecast_model_fit(input_data_dict: dict, logger: logging.Logger,
499
- debug: Optional[bool] = False) -> Tuple[pd.DataFrame, pd.DataFrame, MLForecaster]:
671
+
672
+ def forecast_model_fit(
673
+ input_data_dict: dict, logger: logging.Logger, debug: Optional[bool] = False
674
+ ) -> Tuple[pd.DataFrame, pd.DataFrame, MLForecaster]:
500
675
  """Perform a forecast model fit from training data retrieved from Home Assistant.
501
676
 
502
677
  :param input_data_dict: A dictionnary with multiple data used by the action functions
@@ -508,32 +683,43 @@ def forecast_model_fit(input_data_dict: dict, logger: logging.Logger,
508
683
  :return: The DataFrame containing the forecast data results without and with backtest and the `mlforecaster` object
509
684
  :rtype: Tuple[pd.DataFrame, pd.DataFrame, mlforecaster]
510
685
  """
511
- data = copy.deepcopy(input_data_dict['df_input_data'])
512
- model_type = input_data_dict['params']['passed_data']['model_type']
513
- var_model = input_data_dict['params']['passed_data']['var_model']
514
- sklearn_model = input_data_dict['params']['passed_data']['sklearn_model']
515
- num_lags = input_data_dict['params']['passed_data']['num_lags']
516
- split_date_delta = input_data_dict['params']['passed_data']['split_date_delta']
517
- perform_backtest = input_data_dict['params']['passed_data']['perform_backtest']
686
+ data = copy.deepcopy(input_data_dict["df_input_data"])
687
+ model_type = input_data_dict["params"]["passed_data"]["model_type"]
688
+ var_model = input_data_dict["params"]["passed_data"]["var_model"]
689
+ sklearn_model = input_data_dict["params"]["passed_data"]["sklearn_model"]
690
+ num_lags = input_data_dict["params"]["passed_data"]["num_lags"]
691
+ split_date_delta = input_data_dict["params"]["passed_data"]["split_date_delta"]
692
+ perform_backtest = input_data_dict["params"]["passed_data"]["perform_backtest"]
518
693
  # The ML forecaster object
519
- mlf = MLForecaster(data, model_type, var_model, sklearn_model,
520
- num_lags, input_data_dict['emhass_conf'], logger)
694
+ mlf = MLForecaster(
695
+ data,
696
+ model_type,
697
+ var_model,
698
+ sklearn_model,
699
+ num_lags,
700
+ input_data_dict["emhass_conf"],
701
+ logger,
702
+ )
521
703
  # Fit the ML model
522
704
  df_pred, df_pred_backtest = mlf.fit(
523
705
  split_date_delta=split_date_delta, perform_backtest=perform_backtest
524
706
  )
525
707
  # Save model
526
708
  if not debug:
527
- filename = model_type+'_mlf.pkl'
528
- filename_path = input_data_dict['emhass_conf']['data_path'] / filename
529
- with open(filename_path, 'wb') as outp:
709
+ filename = model_type + "_mlf.pkl"
710
+ filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
711
+ with open(filename_path, "wb") as outp:
530
712
  pickle.dump(mlf, outp, pickle.HIGHEST_PROTOCOL)
531
713
  return df_pred, df_pred_backtest, mlf
532
714
 
533
- def forecast_model_predict(input_data_dict: dict, logger: logging.Logger,
534
- use_last_window: Optional[bool] = True,
535
- debug: Optional[bool] = False, mlf: Optional[MLForecaster] = None
536
- ) -> pd.DataFrame:
715
+
716
+ def forecast_model_predict(
717
+ input_data_dict: dict,
718
+ logger: logging.Logger,
719
+ use_last_window: Optional[bool] = True,
720
+ debug: Optional[bool] = False,
721
+ mlf: Optional[MLForecaster] = None,
722
+ ) -> pd.DataFrame:
537
723
  r"""Perform a forecast model predict using a previously trained skforecast model.
538
724
 
539
725
  :param input_data_dict: A dictionnary with multiple data used by the action functions
@@ -555,9 +741,9 @@ def forecast_model_predict(input_data_dict: dict, logger: logging.Logger,
555
741
  :rtype: pd.DataFrame
556
742
  """
557
743
  # Load model
558
- model_type = input_data_dict['params']['passed_data']['model_type']
559
- filename = model_type+'_mlf.pkl'
560
- filename_path = input_data_dict['emhass_conf']['data_path'] / filename
744
+ model_type = input_data_dict["params"]["passed_data"]["model_type"]
745
+ filename = model_type + "_mlf.pkl"
746
+ filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
561
747
  if not debug:
562
748
  if filename_path.is_file():
563
749
  with open(filename_path, "rb") as inp:
@@ -592,24 +778,41 @@ def forecast_model_predict(input_data_dict: dict, logger: logging.Logger,
592
778
  now_precise = datetime.now(
593
779
  input_data_dict["retrieve_hass_conf"]["time_zone"]
594
780
  ).replace(second=0, microsecond=0)
595
- if input_data_dict["retrieve_hass_conf"]['method_ts_round'] == "nearest":
596
- idx_closest = predictions.index.get_indexer([now_precise], method="nearest")[0]
597
- elif input_data_dict["retrieve_hass_conf"]['method_ts_round'] == "first":
598
- idx_closest = predictions.index.get_indexer([now_precise], method="ffill")[0]
599
- elif input_data_dict["retrieve_hass_conf"]['method_ts_round'] == "last":
600
- idx_closest = predictions.index.get_indexer([now_precise], method="bfill")[0]
781
+ if input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "nearest":
782
+ idx_closest = predictions.index.get_indexer(
783
+ [now_precise], method="nearest"
784
+ )[0]
785
+ elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "first":
786
+ idx_closest = predictions.index.get_indexer([now_precise], method="ffill")[
787
+ 0
788
+ ]
789
+ elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "last":
790
+ idx_closest = predictions.index.get_indexer([now_precise], method="bfill")[
791
+ 0
792
+ ]
601
793
  if idx_closest == -1:
602
- idx_closest = predictions.index.get_indexer([now_precise], method="nearest")[0]
794
+ idx_closest = predictions.index.get_indexer(
795
+ [now_precise], method="nearest"
796
+ )[0]
603
797
  # Publish Load forecast
604
798
  input_data_dict["rh"].post_data(
605
- predictions, idx_closest, model_predict_entity_id,
606
- model_predict_unit_of_measurement, model_predict_friendly_name,
607
- type_var="mlforecaster", publish_prefix=publish_prefix)
799
+ predictions,
800
+ idx_closest,
801
+ model_predict_entity_id,
802
+ model_predict_unit_of_measurement,
803
+ model_predict_friendly_name,
804
+ type_var="mlforecaster",
805
+ publish_prefix=publish_prefix,
806
+ )
608
807
  return predictions
609
808
 
610
- def forecast_model_tune(input_data_dict: dict, logger: logging.Logger,
611
- debug: Optional[bool] = False, mlf: Optional[MLForecaster] = None
612
- ) -> Tuple[pd.DataFrame, MLForecaster]:
809
+
810
+ def forecast_model_tune(
811
+ input_data_dict: dict,
812
+ logger: logging.Logger,
813
+ debug: Optional[bool] = False,
814
+ mlf: Optional[MLForecaster] = None,
815
+ ) -> Tuple[pd.DataFrame, MLForecaster]:
613
816
  """Tune a forecast model hyperparameters using bayesian optimization.
614
817
 
615
818
  :param input_data_dict: A dictionnary with multiple data used by the action functions
@@ -625,9 +828,9 @@ def forecast_model_tune(input_data_dict: dict, logger: logging.Logger,
625
828
  :rtype: pd.DataFrame
626
829
  """
627
830
  # Load model
628
- model_type = input_data_dict['params']['passed_data']['model_type']
629
- filename = model_type+'_mlf.pkl'
630
- filename_path = input_data_dict['emhass_conf']['data_path'] / filename
831
+ model_type = input_data_dict["params"]["passed_data"]["model_type"]
832
+ filename = model_type + "_mlf.pkl"
833
+ filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
631
834
  if not debug:
632
835
  if filename_path.is_file():
633
836
  with open(filename_path, "rb") as inp:
@@ -641,14 +844,16 @@ def forecast_model_tune(input_data_dict: dict, logger: logging.Logger,
641
844
  df_pred_optim = mlf.tune(debug=debug)
642
845
  # Save model
643
846
  if not debug:
644
- filename = model_type+'_mlf.pkl'
645
- filename_path = input_data_dict['emhass_conf']['data_path'] / filename
646
- with open(filename_path, 'wb') as outp:
847
+ filename = model_type + "_mlf.pkl"
848
+ filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
849
+ with open(filename_path, "wb") as outp:
647
850
  pickle.dump(mlf, outp, pickle.HIGHEST_PROTOCOL)
648
851
  return df_pred_optim, mlf
649
852
 
650
- def regressor_model_fit(input_data_dict: dict, logger: logging.Logger,
651
- debug: Optional[bool] = False) -> MLRegressor:
853
+
854
+ def regressor_model_fit(
855
+ input_data_dict: dict, logger: logging.Logger, debug: Optional[bool] = False
856
+ ) -> MLRegressor:
652
857
  """Perform a forecast model fit from training data retrieved from Home Assistant.
653
858
 
654
859
  :param input_data_dict: A dictionnary with multiple data used by the action functions
@@ -690,7 +895,9 @@ def regressor_model_fit(input_data_dict: dict, logger: logging.Logger,
690
895
  logger.error("parameter: 'date_features' not passed")
691
896
  return False
692
897
  # The MLRegressor object
693
- mlr = MLRegressor(data, model_type, regression_model, features, target, timestamp, logger)
898
+ mlr = MLRegressor(
899
+ data, model_type, regression_model, features, target, timestamp, logger
900
+ )
694
901
  # Fit the ML model
695
902
  fit = mlr.fit(date_features=date_features)
696
903
  if not fit:
@@ -703,9 +910,13 @@ def regressor_model_fit(input_data_dict: dict, logger: logging.Logger,
703
910
  pickle.dump(mlr, outp, pickle.HIGHEST_PROTOCOL)
704
911
  return mlr
705
912
 
706
- def regressor_model_predict(input_data_dict: dict, logger: logging.Logger,
707
- debug: Optional[bool] = False, mlr: Optional[MLRegressor] = None
708
- ) -> np.ndarray:
913
+
914
+ def regressor_model_predict(
915
+ input_data_dict: dict,
916
+ logger: logging.Logger,
917
+ debug: Optional[bool] = False,
918
+ mlr: Optional[MLRegressor] = None,
919
+ ) -> np.ndarray:
709
920
  """Perform a prediction from csv file.
710
921
 
711
922
  :param input_data_dict: A dictionnary with multiple data used by the action functions
@@ -739,24 +950,36 @@ def regressor_model_predict(input_data_dict: dict, logger: logging.Logger,
739
950
  # Predict from csv file
740
951
  prediction = mlr.predict(new_values)
741
952
  mlr_predict_entity_id = input_data_dict["params"]["passed_data"].get(
742
- "mlr_predict_entity_id", "sensor.mlr_predict")
953
+ "mlr_predict_entity_id", "sensor.mlr_predict"
954
+ )
743
955
  mlr_predict_unit_of_measurement = input_data_dict["params"]["passed_data"].get(
744
- "mlr_predict_unit_of_measurement", "h")
956
+ "mlr_predict_unit_of_measurement", "h"
957
+ )
745
958
  mlr_predict_friendly_name = input_data_dict["params"]["passed_data"].get(
746
- "mlr_predict_friendly_name", "mlr predictor")
959
+ "mlr_predict_friendly_name", "mlr predictor"
960
+ )
747
961
  # Publish prediction
748
962
  idx = 0
749
963
  if not debug:
750
- input_data_dict["rh"].post_data(prediction, idx, mlr_predict_entity_id,
751
- mlr_predict_unit_of_measurement, mlr_predict_friendly_name,
752
- type_var="mlregressor")
964
+ input_data_dict["rh"].post_data(
965
+ prediction,
966
+ idx,
967
+ mlr_predict_entity_id,
968
+ mlr_predict_unit_of_measurement,
969
+ mlr_predict_friendly_name,
970
+ type_var="mlregressor",
971
+ )
753
972
  return prediction
754
973
 
755
- def publish_data(input_data_dict: dict, logger: logging.Logger,
756
- save_data_to_file: Optional[bool] = False,
757
- opt_res_latest: Optional[pd.DataFrame] = None,
758
- entity_save: Optional[bool] = False,
759
- dont_post: Optional[bool] = False) -> pd.DataFrame:
974
+
975
+ def publish_data(
976
+ input_data_dict: dict,
977
+ logger: logging.Logger,
978
+ save_data_to_file: Optional[bool] = False,
979
+ opt_res_latest: Optional[pd.DataFrame] = None,
980
+ entity_save: Optional[bool] = False,
981
+ dont_post: Optional[bool] = False,
982
+ ) -> pd.DataFrame:
760
983
  """
761
984
  Publish the data obtained from the optimization results.
762
985
 
@@ -776,12 +999,11 @@ def publish_data(input_data_dict: dict, logger: logging.Logger,
776
999
  """
777
1000
  logger.info("Publishing data to HASS instance")
778
1001
  if input_data_dict:
779
- if not isinstance(input_data_dict.get("params",{}),dict):
1002
+ if not isinstance(input_data_dict.get("params", {}), dict):
780
1003
  params = json.loads(input_data_dict["params"])
781
1004
  else:
782
- params = input_data_dict.get("params",{})
1005
+ params = input_data_dict.get("params", {})
783
1006
 
784
-
785
1007
  # Check if a day ahead optimization has been performed (read CSV file)
786
1008
  if save_data_to_file:
787
1009
  today = datetime.now(timezone.utc).replace(
@@ -789,60 +1011,80 @@ def publish_data(input_data_dict: dict, logger: logging.Logger,
789
1011
  )
790
1012
  filename = "opt_res_dayahead_" + today.strftime("%Y_%m_%d") + ".csv"
791
1013
  # If publish_prefix is passed, check if there is saved entities in data_path/entities with prefix, publish to results
792
- elif params["passed_data"].get("publish_prefix","") != "" and not dont_post:
1014
+ elif params["passed_data"].get("publish_prefix", "") != "" and not dont_post:
793
1015
  opt_res_list = []
794
1016
  opt_res_list_names = []
795
1017
  publish_prefix = params["passed_data"]["publish_prefix"]
796
- entity_path = input_data_dict['emhass_conf']['data_path'] / "entities"
1018
+ entity_path = input_data_dict["emhass_conf"]["data_path"] / "entities"
797
1019
  # Check if items in entity_path
798
1020
  if os.path.exists(entity_path) and len(os.listdir(entity_path)) > 0:
799
1021
  # Obtain all files in entity_path
800
- entity_path_contents = os.listdir(entity_path)
801
- for entity in entity_path_contents:
802
- if entity != "metadata.json":
803
- # If publish_prefix is "all" publish all saved entities to Home Assistant
1022
+ entity_path_contents = os.listdir(entity_path)
1023
+ # Confirm the entity path contains at least one file containing publish prefix or publish_prefix='all'
1024
+ if (
1025
+ any(publish_prefix in entity for entity in entity_path_contents)
1026
+ or publish_prefix == "all"
1027
+ ):
1028
+ # Loop through all items in entity path
1029
+ for entity in entity_path_contents:
1030
+ # If publish_prefix is "all" publish all saved entities to Home Assistant
804
1031
  # If publish_prefix matches the prefix from saved entities, publish to Home Assistant
805
- if publish_prefix in entity or publish_prefix == "all":
806
- entity_data = publish_json(entity,input_data_dict,entity_path,logger)
1032
+ if entity != "metadata.json" and (
1033
+ publish_prefix in entity or publish_prefix == "all"
1034
+ ):
1035
+ entity_data = publish_json(
1036
+ entity, input_data_dict, entity_path, logger
1037
+ )
807
1038
  if not isinstance(entity_data, bool):
808
1039
  opt_res_list.append(entity_data)
809
1040
  opt_res_list_names.append(entity.replace(".json", ""))
810
1041
  else:
811
- return False
812
- # Build a DataFrame with published entities
813
- opt_res = pd.concat(opt_res_list, axis=1)
814
- opt_res.columns = opt_res_list_names
815
- return opt_res
1042
+ return False
1043
+ # Build a DataFrame with published entities
1044
+ opt_res = pd.concat(opt_res_list, axis=1)
1045
+ opt_res.columns = opt_res_list_names
1046
+ return opt_res
1047
+ else:
1048
+ logger.warning(
1049
+ "No saved entity json files that match prefix: "
1050
+ + str(publish_prefix)
1051
+ )
1052
+ logger.warning("Falling back to opt_res_latest")
816
1053
  else:
817
- logger.warning("no saved entity json files in path:" + str(entity_path))
818
- logger.warning("falling back to opt_res_latest")
819
- filename = "opt_res_latest.csv"
1054
+ logger.warning("No saved entity json files in path:" + str(entity_path))
1055
+ logger.warning("Falling back to opt_res_latest")
1056
+ filename = "opt_res_latest.csv"
820
1057
  else:
821
1058
  filename = "opt_res_latest.csv"
822
1059
  if opt_res_latest is None:
823
- if not os.path.isfile(input_data_dict['emhass_conf']['data_path'] / filename):
824
- logger.error(
825
- "File not found error, run an optimization task first.")
1060
+ if not os.path.isfile(input_data_dict["emhass_conf"]["data_path"] / filename):
1061
+ logger.error("File not found error, run an optimization task first.")
826
1062
  return
827
1063
  else:
828
1064
  opt_res_latest = pd.read_csv(
829
- input_data_dict['emhass_conf']['data_path'] / filename, index_col='timestamp')
1065
+ input_data_dict["emhass_conf"]["data_path"] / filename,
1066
+ index_col="timestamp",
1067
+ )
830
1068
  opt_res_latest.index = pd.to_datetime(opt_res_latest.index)
831
- opt_res_latest.index.freq = input_data_dict["retrieve_hass_conf"]['optimization_time_step']
1069
+ opt_res_latest.index.freq = input_data_dict["retrieve_hass_conf"][
1070
+ "optimization_time_step"
1071
+ ]
832
1072
  # Estimate the current index
833
1073
  now_precise = datetime.now(
834
1074
  input_data_dict["retrieve_hass_conf"]["time_zone"]
835
1075
  ).replace(second=0, microsecond=0)
836
- if input_data_dict["retrieve_hass_conf"]['method_ts_round'] == "nearest":
837
- idx_closest = opt_res_latest.index.get_indexer([now_precise], method="nearest")[0]
838
- elif input_data_dict["retrieve_hass_conf"]['method_ts_round'] == "first":
839
- idx_closest = opt_res_latest.index.get_indexer(
840
- [now_precise], method="ffill")[0]
841
- elif input_data_dict["retrieve_hass_conf"]['method_ts_round'] == "last":
842
- idx_closest = opt_res_latest.index.get_indexer(
843
- [now_precise], method="bfill")[0]
1076
+ if input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "nearest":
1077
+ idx_closest = opt_res_latest.index.get_indexer([now_precise], method="nearest")[
1078
+ 0
1079
+ ]
1080
+ elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "first":
1081
+ idx_closest = opt_res_latest.index.get_indexer([now_precise], method="ffill")[0]
1082
+ elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "last":
1083
+ idx_closest = opt_res_latest.index.get_indexer([now_precise], method="bfill")[0]
844
1084
  if idx_closest == -1:
845
- idx_closest = opt_res_latest.index.get_indexer([now_precise], method="nearest")[0]
1085
+ idx_closest = opt_res_latest.index.get_indexer([now_precise], method="nearest")[
1086
+ 0
1087
+ ]
846
1088
  # Publish the data
847
1089
  publish_prefix = params["passed_data"]["publish_prefix"]
848
1090
  # Publish PV forecast
@@ -856,7 +1098,7 @@ def publish_data(input_data_dict: dict, logger: logging.Logger,
856
1098
  type_var="power",
857
1099
  publish_prefix=publish_prefix,
858
1100
  save_entities=entity_save,
859
- dont_post=dont_post
1101
+ dont_post=dont_post,
860
1102
  )
861
1103
  # Publish Load forecast
862
1104
  custom_load_forecast_id = params["passed_data"]["custom_load_forecast_id"]
@@ -869,11 +1111,11 @@ def publish_data(input_data_dict: dict, logger: logging.Logger,
869
1111
  type_var="power",
870
1112
  publish_prefix=publish_prefix,
871
1113
  save_entities=entity_save,
872
- dont_post=dont_post
1114
+ dont_post=dont_post,
873
1115
  )
874
1116
  cols_published = ["P_PV", "P_Load"]
875
1117
  # Publish PV curtailment
876
- if input_data_dict["fcst"].plant_conf['compute_curtailment']:
1118
+ if input_data_dict["fcst"].plant_conf["compute_curtailment"]:
877
1119
  custom_pv_curtailment_id = params["passed_data"]["custom_pv_curtailment_id"]
878
1120
  input_data_dict["rh"].post_data(
879
1121
  opt_res_latest["P_PV_curtailment"],
@@ -884,11 +1126,11 @@ def publish_data(input_data_dict: dict, logger: logging.Logger,
884
1126
  type_var="power",
885
1127
  publish_prefix=publish_prefix,
886
1128
  save_entities=entity_save,
887
- dont_post=dont_post
1129
+ dont_post=dont_post,
888
1130
  )
889
1131
  cols_published = cols_published + ["P_PV_curtailment"]
890
1132
  # Publish P_hybrid_inverter
891
- if input_data_dict["fcst"].plant_conf['inverter_is_hybrid']:
1133
+ if input_data_dict["fcst"].plant_conf["inverter_is_hybrid"]:
892
1134
  custom_hybrid_inverter_id = params["passed_data"]["custom_hybrid_inverter_id"]
893
1135
  input_data_dict["rh"].post_data(
894
1136
  opt_res_latest["P_hybrid_inverter"],
@@ -899,14 +1141,14 @@ def publish_data(input_data_dict: dict, logger: logging.Logger,
899
1141
  type_var="power",
900
1142
  publish_prefix=publish_prefix,
901
1143
  save_entities=entity_save,
902
- dont_post=dont_post
1144
+ dont_post=dont_post,
903
1145
  )
904
1146
  cols_published = cols_published + ["P_hybrid_inverter"]
905
1147
  # Publish deferrable loads
906
1148
  custom_deferrable_forecast_id = params["passed_data"][
907
1149
  "custom_deferrable_forecast_id"
908
1150
  ]
909
- for k in range(input_data_dict["opt"].optim_conf['number_of_deferrable_loads']):
1151
+ for k in range(input_data_dict["opt"].optim_conf["number_of_deferrable_loads"]):
910
1152
  if "P_deferrable{}".format(k) not in opt_res_latest.columns:
911
1153
  logger.error(
912
1154
  "P_deferrable{}".format(k)
@@ -922,16 +1164,19 @@ def publish_data(input_data_dict: dict, logger: logging.Logger,
922
1164
  type_var="deferrable",
923
1165
  publish_prefix=publish_prefix,
924
1166
  save_entities=entity_save,
925
- dont_post=dont_post
1167
+ dont_post=dont_post,
926
1168
  )
927
1169
  cols_published = cols_published + ["P_deferrable{}".format(k)]
928
1170
  # Publish thermal model data (predicted temperature)
929
1171
  custom_predicted_temperature_id = params["passed_data"][
930
1172
  "custom_predicted_temperature_id"
931
1173
  ]
932
- for k in range(input_data_dict["opt"].optim_conf['number_of_deferrable_loads']):
1174
+ for k in range(input_data_dict["opt"].optim_conf["number_of_deferrable_loads"]):
933
1175
  if "def_load_config" in input_data_dict["opt"].optim_conf.keys():
934
- if "thermal_config" in input_data_dict["opt"].optim_conf["def_load_config"][k]:
1176
+ if (
1177
+ "thermal_config"
1178
+ in input_data_dict["opt"].optim_conf["def_load_config"][k]
1179
+ ):
935
1180
  input_data_dict["rh"].post_data(
936
1181
  opt_res_latest["predicted_temp_heater{}".format(k)],
937
1182
  idx_closest,
@@ -941,11 +1186,11 @@ def publish_data(input_data_dict: dict, logger: logging.Logger,
941
1186
  type_var="temperature",
942
1187
  publish_prefix=publish_prefix,
943
1188
  save_entities=entity_save,
944
- dont_post=dont_post
1189
+ dont_post=dont_post,
945
1190
  )
946
1191
  cols_published = cols_published + ["predicted_temp_heater{}".format(k)]
947
1192
  # Publish battery power
948
- if input_data_dict["opt"].optim_conf['set_use_battery']:
1193
+ if input_data_dict["opt"].optim_conf["set_use_battery"]:
949
1194
  if "P_batt" not in opt_res_latest.columns:
950
1195
  logger.error(
951
1196
  "P_batt was not found in results DataFrame. Optimization task may need to be relaunched or it did not converge to a solution.",
@@ -961,7 +1206,7 @@ def publish_data(input_data_dict: dict, logger: logging.Logger,
961
1206
  type_var="batt",
962
1207
  publish_prefix=publish_prefix,
963
1208
  save_entities=entity_save,
964
- dont_post=dont_post
1209
+ dont_post=dont_post,
965
1210
  )
966
1211
  cols_published = cols_published + ["P_batt"]
967
1212
  custom_batt_soc_forecast_id = params["passed_data"][
@@ -976,7 +1221,7 @@ def publish_data(input_data_dict: dict, logger: logging.Logger,
976
1221
  type_var="SOC",
977
1222
  publish_prefix=publish_prefix,
978
1223
  save_entities=entity_save,
979
- dont_post=dont_post
1224
+ dont_post=dont_post,
980
1225
  )
981
1226
  cols_published = cols_published + ["SOC_opt"]
982
1227
  # Publish grid power
@@ -990,7 +1235,7 @@ def publish_data(input_data_dict: dict, logger: logging.Logger,
990
1235
  type_var="power",
991
1236
  publish_prefix=publish_prefix,
992
1237
  save_entities=entity_save,
993
- dont_post=dont_post
1238
+ dont_post=dont_post,
994
1239
  )
995
1240
  cols_published = cols_published + ["P_grid"]
996
1241
  # Publish total value of cost function
@@ -1005,7 +1250,7 @@ def publish_data(input_data_dict: dict, logger: logging.Logger,
1005
1250
  type_var="cost_fun",
1006
1251
  publish_prefix=publish_prefix,
1007
1252
  save_entities=entity_save,
1008
- dont_post=dont_post
1253
+ dont_post=dont_post,
1009
1254
  )
1010
1255
  # cols_published = cols_published + col_cost_fun
1011
1256
  # Publish the optimization status
@@ -1025,7 +1270,7 @@ def publish_data(input_data_dict: dict, logger: logging.Logger,
1025
1270
  type_var="optim_status",
1026
1271
  publish_prefix=publish_prefix,
1027
1272
  save_entities=entity_save,
1028
- dont_post=dont_post
1273
+ dont_post=dont_post,
1029
1274
  )
1030
1275
  cols_published = cols_published + ["optim_status"]
1031
1276
  # Publish unit_load_cost
@@ -1039,7 +1284,7 @@ def publish_data(input_data_dict: dict, logger: logging.Logger,
1039
1284
  type_var="unit_load_cost",
1040
1285
  publish_prefix=publish_prefix,
1041
1286
  save_entities=entity_save,
1042
- dont_post=dont_post
1287
+ dont_post=dont_post,
1043
1288
  )
1044
1289
  cols_published = cols_published + ["unit_load_cost"]
1045
1290
  # Publish unit_prod_price
@@ -1053,15 +1298,17 @@ def publish_data(input_data_dict: dict, logger: logging.Logger,
1053
1298
  type_var="unit_prod_price",
1054
1299
  publish_prefix=publish_prefix,
1055
1300
  save_entities=entity_save,
1056
- dont_post=dont_post
1301
+ dont_post=dont_post,
1057
1302
  )
1058
1303
  cols_published = cols_published + ["unit_prod_price"]
1059
1304
  # Create a DF resuming what has been published
1060
- opt_res = opt_res_latest[cols_published].loc[[
1061
- opt_res_latest.index[idx_closest]]]
1305
+ opt_res = opt_res_latest[cols_published].loc[[opt_res_latest.index[idx_closest]]]
1062
1306
  return opt_res
1063
1307
 
1064
- def continual_publish(input_data_dict: dict, entity_path: pathlib.Path, logger: logging.Logger):
1308
+
1309
+ def continual_publish(
1310
+ input_data_dict: dict, entity_path: pathlib.Path, logger: logging.Logger
1311
+ ):
1065
1312
  """
1066
1313
  If continual_publish is true and a entity file saved in /data_path/entities, continually publish sensor on freq rate, updating entity current state value based on timestamp
1067
1314
 
@@ -1074,31 +1321,56 @@ def continual_publish(input_data_dict: dict, entity_path: pathlib.Path, logger:
1074
1321
 
1075
1322
  """
1076
1323
  logger.info("Continual publish thread service started")
1077
- freq = input_data_dict['retrieve_hass_conf'].get('optimization_time_step', pd.to_timedelta(1, "minutes"))
1324
+ freq = input_data_dict["retrieve_hass_conf"].get(
1325
+ "optimization_time_step", pd.to_timedelta(1, "minutes")
1326
+ )
1078
1327
  entity_path_contents = []
1079
1328
  while True:
1080
1329
  # Sleep for x seconds (using current time as a reference for time left)
1081
- time.sleep(max(0,freq.total_seconds() - (datetime.now(input_data_dict["retrieve_hass_conf"]["time_zone"]).timestamp() % 60)))
1330
+ time.sleep(
1331
+ max(
1332
+ 0,
1333
+ freq.total_seconds()
1334
+ - (
1335
+ datetime.now(
1336
+ input_data_dict["retrieve_hass_conf"]["time_zone"]
1337
+ ).timestamp()
1338
+ % 60
1339
+ ),
1340
+ )
1341
+ )
1082
1342
  # Loop through all saved entity files
1083
1343
  if os.path.exists(entity_path) and len(os.listdir(entity_path)) > 0:
1084
- entity_path_contents = os.listdir(entity_path)
1344
+ entity_path_contents = os.listdir(entity_path)
1085
1345
  for entity in entity_path_contents:
1086
1346
  if entity != "metadata.json":
1087
- # Call publish_json with entity file, build entity, and publish
1088
- publish_json(entity, input_data_dict, entity_path, logger, 'continual_publish')
1089
- # Retrieve entity metadata from file
1347
+ # Call publish_json with entity file, build entity, and publish
1348
+ publish_json(
1349
+ entity,
1350
+ input_data_dict,
1351
+ entity_path,
1352
+ logger,
1353
+ "continual_publish",
1354
+ )
1355
+ # Retrieve entity metadata from file
1090
1356
  if os.path.isfile(entity_path / "metadata.json"):
1091
1357
  with open(entity_path / "metadata.json", "r") as file:
1092
1358
  metadata = json.load(file)
1093
1359
  # Check if freq should be shorter
1094
- if not metadata.get("lowest_time_step",None) == None:
1360
+ if not metadata.get("lowest_time_step", None) == None:
1095
1361
  freq = pd.to_timedelta(metadata["lowest_time_step"], "minutes")
1096
- pass
1097
- # This function should never return
1098
- return False
1099
-
1100
- def publish_json(entity: dict, input_data_dict: dict, entity_path: pathlib.Path,
1101
- logger: logging.Logger, reference: Optional[str] = ""):
1362
+ pass
1363
+ # This function should never return
1364
+ return False
1365
+
1366
+
1367
+ def publish_json(
1368
+ entity: dict,
1369
+ input_data_dict: dict,
1370
+ entity_path: pathlib.Path,
1371
+ logger: logging.Logger,
1372
+ reference: Optional[str] = "",
1373
+ ):
1102
1374
  """
1103
1375
  Extract saved entity data from .json (in data_path/entities), build entity, post results to post_data
1104
1376
 
@@ -1110,9 +1382,9 @@ def publish_json(entity: dict, input_data_dict: dict, entity_path: pathlib.Path,
1110
1382
  :type entity_path: Path
1111
1383
  :param logger: The passed logger object
1112
1384
  :type logger: logging.Logger
1113
- :param reference: String for identifying who ran the function
1385
+ :param reference: String for identifying who ran the function
1114
1386
  :type reference: str, optional
1115
-
1387
+
1116
1388
  """
1117
1389
  # Retrieve entity metadata from file
1118
1390
  if os.path.isfile(entity_path / "metadata.json"):
@@ -1120,32 +1392,38 @@ def publish_json(entity: dict, input_data_dict: dict, entity_path: pathlib.Path,
1120
1392
  metadata = json.load(file)
1121
1393
  else:
1122
1394
  logger.error("unable to located metadata.json in:" + entity_path)
1123
- return False
1395
+ return False
1124
1396
  # Round current timecode (now)
1125
- now_precise = datetime.now(input_data_dict["retrieve_hass_conf"]["time_zone"]).replace(second=0, microsecond=0)
1397
+ now_precise = datetime.now(
1398
+ input_data_dict["retrieve_hass_conf"]["time_zone"]
1399
+ ).replace(second=0, microsecond=0)
1126
1400
  # Retrieve entity data from file
1127
- entity_data = pd.read_json(entity_path / entity , orient='index')
1401
+ entity_data = pd.read_json(entity_path / entity, orient="index")
1128
1402
  # Remove ".json" from string for entity_id
1129
1403
  entity_id = entity.replace(".json", "")
1130
1404
  # Adjust Dataframe from received entity json file
1131
1405
  entity_data.columns = [metadata[entity_id]["name"]]
1132
1406
  entity_data.index.name = "timestamp"
1133
- entity_data.index = pd.to_datetime(entity_data.index).tz_convert(input_data_dict["retrieve_hass_conf"]["time_zone"])
1134
- entity_data.index.freq = pd.to_timedelta(int(metadata[entity_id]['optimization_time_step']), "minutes")
1407
+ entity_data.index = pd.to_datetime(entity_data.index).tz_convert(
1408
+ input_data_dict["retrieve_hass_conf"]["time_zone"]
1409
+ )
1410
+ entity_data.index.freq = pd.to_timedelta(
1411
+ int(metadata[entity_id]["optimization_time_step"]), "minutes"
1412
+ )
1135
1413
  # Calculate the current state value
1136
- if input_data_dict["retrieve_hass_conf"]['method_ts_round'] == "nearest":
1414
+ if input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "nearest":
1137
1415
  idx_closest = entity_data.index.get_indexer([now_precise], method="nearest")[0]
1138
- elif input_data_dict["retrieve_hass_conf"]['method_ts_round'] == "first":
1416
+ elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "first":
1139
1417
  idx_closest = entity_data.index.get_indexer([now_precise], method="ffill")[0]
1140
- elif input_data_dict["retrieve_hass_conf"]['method_ts_round'] == "last":
1418
+ elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "last":
1141
1419
  idx_closest = entity_data.index.get_indexer([now_precise], method="bfill")[0]
1142
1420
  if idx_closest == -1:
1143
1421
  idx_closest = entity_data.index.get_indexer([now_precise], method="nearest")[0]
1144
- # Call post data
1145
- if reference == 'continual_publish':
1422
+ # Call post data
1423
+ if reference == "continual_publish":
1146
1424
  logger.debug("Auto Published sensor:")
1147
1425
  logger_levels = "DEBUG"
1148
- else:
1426
+ else:
1149
1427
  logger_levels = "INFO"
1150
1428
  # post/save entity
1151
1429
  input_data_dict["rh"].post_data(
@@ -1154,9 +1432,9 @@ def publish_json(entity: dict, input_data_dict: dict, entity_path: pathlib.Path,
1154
1432
  entity_id=entity_id,
1155
1433
  unit_of_measurement=metadata[entity_id]["unit_of_measurement"],
1156
1434
  friendly_name=metadata[entity_id]["friendly_name"],
1157
- type_var=metadata[entity_id].get("type_var",""),
1435
+ type_var=metadata[entity_id].get("type_var", ""),
1158
1436
  save_entities=False,
1159
- logger_levels=logger_levels
1437
+ logger_levels=logger_levels,
1160
1438
  )
1161
1439
  return entity_data[metadata[entity_id]["name"]]
1162
1440
 
@@ -1184,70 +1462,100 @@ def main():
1184
1462
  """
1185
1463
  # Parsing arguments
1186
1464
  parser = argparse.ArgumentParser()
1187
- parser.add_argument('--action', type=str, help='Set the desired action, options are: perfect-optim, dayahead-optim,\
1188
- naive-mpc-optim, publish-data, forecast-model-fit, forecast-model-predict, forecast-model-tune')
1189
- parser.add_argument('--config', type=str,
1190
- help='Define path to the config.json/defaults.json file')
1191
- parser.add_argument('--params', type=str, default=None,
1192
- help='String of configuration parameters passed')
1193
- parser.add_argument('--data', type=str,
1194
- help='Define path to the Data files (.csv & .pkl)')
1195
- parser.add_argument('--root', type=str, help='Define path emhass root')
1196
- parser.add_argument('--costfun', type=str, default='profit',
1197
- help='Define the type of cost function, options are: profit, cost, self-consumption')
1198
- parser.add_argument('--log2file', type=strtobool, default='False',
1199
- help='Define if we should log to a file or not')
1200
- parser.add_argument('--secrets', type=str, default=None,
1201
- help='Define secret parameter file (secrets_emhass.yaml) path')
1202
- parser.add_argument('--runtimeparams', type=str, default=None,
1203
- help='Pass runtime optimization parameters as dictionnary')
1204
- parser.add_argument('--debug', type=strtobool,
1205
- default='False', help='Use True for testing purposes')
1465
+ parser.add_argument(
1466
+ "--action",
1467
+ type=str,
1468
+ help="Set the desired action, options are: perfect-optim, dayahead-optim,\
1469
+ naive-mpc-optim, publish-data, forecast-model-fit, forecast-model-predict, forecast-model-tune",
1470
+ )
1471
+ parser.add_argument(
1472
+ "--config", type=str, help="Define path to the config.json/defaults.json file"
1473
+ )
1474
+ parser.add_argument(
1475
+ "--params",
1476
+ type=str,
1477
+ default=None,
1478
+ help="String of configuration parameters passed",
1479
+ )
1480
+ parser.add_argument(
1481
+ "--data", type=str, help="Define path to the Data files (.csv & .pkl)"
1482
+ )
1483
+ parser.add_argument("--root", type=str, help="Define path emhass root")
1484
+ parser.add_argument(
1485
+ "--costfun",
1486
+ type=str,
1487
+ default="profit",
1488
+ help="Define the type of cost function, options are: profit, cost, self-consumption",
1489
+ )
1490
+ parser.add_argument(
1491
+ "--log2file",
1492
+ type=strtobool,
1493
+ default="False",
1494
+ help="Define if we should log to a file or not",
1495
+ )
1496
+ parser.add_argument(
1497
+ "--secrets",
1498
+ type=str,
1499
+ default=None,
1500
+ help="Define secret parameter file (secrets_emhass.yaml) path",
1501
+ )
1502
+ parser.add_argument(
1503
+ "--runtimeparams",
1504
+ type=str,
1505
+ default=None,
1506
+ help="Pass runtime optimization parameters as dictionnary",
1507
+ )
1508
+ parser.add_argument(
1509
+ "--debug", type=strtobool, default="False", help="Use True for testing purposes"
1510
+ )
1206
1511
  args = parser.parse_args()
1207
-
1512
+
1208
1513
  # The path to the configuration files
1209
1514
  if args.config is not None:
1210
1515
  config_path = pathlib.Path(args.config)
1211
1516
  else:
1212
- config_path = pathlib.Path(str(utils.get_root(__file__, num_parent=3) / 'config.json'))
1517
+ config_path = pathlib.Path(
1518
+ str(utils.get_root(__file__, num_parent=3) / "config.json")
1519
+ )
1213
1520
  if args.data is not None:
1214
1521
  data_path = pathlib.Path(args.data)
1215
1522
  else:
1216
- data_path = (config_path.parent / 'data/')
1523
+ data_path = config_path.parent / "data/"
1217
1524
  if args.root is not None:
1218
1525
  root_path = pathlib.Path(args.root)
1219
1526
  else:
1220
- root_path = utils.get_root(__file__, num_parent=1)
1527
+ root_path = utils.get_root(__file__, num_parent=1)
1221
1528
  if args.secrets is not None:
1222
1529
  secrets_path = pathlib.Path(args.secrets)
1223
1530
  else:
1224
- secrets_path = pathlib.Path(config_path.parent / 'secrets_emhass.yaml')
1531
+ secrets_path = pathlib.Path(config_path.parent / "secrets_emhass.yaml")
1532
+
1533
+ associations_path = root_path / "data/associations.csv"
1534
+ defaults_path = root_path / "data/config_defaults.json"
1225
1535
 
1226
- associations_path = root_path / 'data/associations.csv'
1227
- defaults_path = root_path / 'data/config_defaults.json'
1228
-
1229
1536
  emhass_conf = {}
1230
- emhass_conf['config_path'] = config_path
1231
- emhass_conf['data_path'] = data_path
1232
- emhass_conf['root_path'] = root_path
1233
- emhass_conf['associations_path'] = associations_path
1234
- emhass_conf['defaults_path'] = defaults_path
1537
+ emhass_conf["config_path"] = config_path
1538
+ emhass_conf["data_path"] = data_path
1539
+ emhass_conf["root_path"] = root_path
1540
+ emhass_conf["associations_path"] = associations_path
1541
+ emhass_conf["defaults_path"] = defaults_path
1235
1542
  # create logger
1236
1543
  logger, ch = utils.get_logger(
1237
- __name__, emhass_conf, save_to_file=bool(args.log2file))
1238
-
1544
+ __name__, emhass_conf, save_to_file=bool(args.log2file)
1545
+ )
1546
+
1239
1547
  # Check paths
1240
1548
  logger.debug("config path: " + str(config_path))
1241
1549
  logger.debug("data path: " + str(data_path))
1242
1550
  logger.debug("root path: " + str(root_path))
1243
- if not associations_path.exists():
1551
+ if not associations_path.exists():
1244
1552
  logger.error(
1245
- "Could not find associations.csv file in: " + str(associations_path))
1553
+ "Could not find associations.csv file in: " + str(associations_path)
1554
+ )
1246
1555
  logger.error("Try setting config file path with --associations")
1247
1556
  return False
1248
- if not config_path.exists():
1249
- logger.warning(
1250
- "Could not find config.json file in: " + str(config_path))
1557
+ if not config_path.exists():
1558
+ logger.warning("Could not find config.json file in: " + str(config_path))
1251
1559
  logger.warning("Try setting config file path with --config")
1252
1560
  if not secrets_path.exists():
1253
1561
  logger.warning("Could not find secrets file in: " + str(secrets_path))
@@ -1260,7 +1568,7 @@ def main():
1260
1568
  logger.error("Could not find emhass/src folder in: " + str(root_path))
1261
1569
  logger.error("Try setting emhass root path with --root")
1262
1570
  return False
1263
-
1571
+
1264
1572
  # Additional argument
1265
1573
  try:
1266
1574
  parser.add_argument(
@@ -1280,47 +1588,60 @@ def main():
1280
1588
  if config_path.exists():
1281
1589
  config_file_ending = re.findall("(?<=\.).*$", str(config_path))
1282
1590
  if len(config_file_ending) > 0:
1283
- match(config_file_ending[0]):
1591
+ match config_file_ending[0]:
1284
1592
  case "json":
1285
- config = utils.build_config(emhass_conf,logger,defaults_path,config_path)
1593
+ config = utils.build_config(
1594
+ emhass_conf, logger, defaults_path, config_path
1595
+ )
1286
1596
  case "yaml":
1287
- config = utils.build_config(emhass_conf,logger,defaults_path,config_path=config_path)
1597
+ config = utils.build_config(
1598
+ emhass_conf, logger, defaults_path, config_path=config_path
1599
+ )
1288
1600
  case "yml":
1289
- config = utils.build_config(emhass_conf,logger,defaults_path,config_path=config_path)
1601
+ config = utils.build_config(
1602
+ emhass_conf, logger, defaults_path, config_path=config_path
1603
+ )
1290
1604
  # If unable to find config file, use only defaults_config.json
1291
1605
  else:
1292
- logger.warning("Unable to obtain config.json file, building parameters with only defaults")
1293
- config = utils.build_config(emhass_conf,logger,defaults_path)
1606
+ logger.warning(
1607
+ "Unable to obtain config.json file, building parameters with only defaults"
1608
+ )
1609
+ config = utils.build_config(emhass_conf, logger, defaults_path)
1294
1610
  if type(config) is bool and not config:
1295
1611
  raise Exception("Failed to find default config")
1296
-
1297
1612
 
1298
1613
  # Obtain secrets from secrets_emhass.yaml?
1299
1614
  params_secrets = {}
1300
- emhass_conf, built_secrets = utils.build_secrets(emhass_conf,logger,secrets_path=secrets_path)
1615
+ emhass_conf, built_secrets = utils.build_secrets(
1616
+ emhass_conf, logger, secrets_path=secrets_path
1617
+ )
1301
1618
  params_secrets.update(built_secrets)
1302
1619
 
1303
1620
  # Build params
1304
1621
  params = utils.build_params(emhass_conf, params_secrets, config, logger)
1305
1622
  if type(params) is bool:
1306
- raise Exception("A error has occurred while building parameters")
1623
+ raise Exception("A error has occurred while building parameters")
1307
1624
  # Add any passed params from args to params
1308
1625
  if args.params:
1309
1626
  params.update(json.loads(args.params))
1310
-
1311
- input_data_dict = set_input_data_dict(emhass_conf,
1312
- args.costfun, json.dumps(params), args.runtimeparams, args.action,
1313
- logger, args.debug)
1627
+
1628
+ input_data_dict = set_input_data_dict(
1629
+ emhass_conf,
1630
+ args.costfun,
1631
+ json.dumps(params),
1632
+ args.runtimeparams,
1633
+ args.action,
1634
+ logger,
1635
+ args.debug,
1636
+ )
1314
1637
  if type(input_data_dict) is bool:
1315
- raise Exception("A error has occurred while creating action objects")
1638
+ raise Exception("A error has occurred while creating action objects")
1316
1639
 
1317
1640
  # Perform selected action
1318
1641
  if args.action == "perfect-optim":
1319
- opt_res = perfect_forecast_optim(
1320
- input_data_dict, logger, debug=args.debug)
1642
+ opt_res = perfect_forecast_optim(input_data_dict, logger, debug=args.debug)
1321
1643
  elif args.action == "dayahead-optim":
1322
- opt_res = dayahead_forecast_optim(
1323
- input_data_dict, logger, debug=args.debug)
1644
+ opt_res = dayahead_forecast_optim(input_data_dict, logger, debug=args.debug)
1324
1645
  elif args.action == "naive-mpc-optim":
1325
1646
  opt_res = naive_mpc_optim(input_data_dict, logger, debug=args.debug)
1326
1647
  elif args.action == "forecast-model-fit":
@@ -1333,14 +1654,18 @@ def main():
1333
1654
  _, _, mlf = forecast_model_fit(input_data_dict, logger, debug=args.debug)
1334
1655
  else:
1335
1656
  mlf = None
1336
- df_pred = forecast_model_predict(input_data_dict, logger, debug=args.debug, mlf=mlf)
1657
+ df_pred = forecast_model_predict(
1658
+ input_data_dict, logger, debug=args.debug, mlf=mlf
1659
+ )
1337
1660
  opt_res = None
1338
1661
  elif args.action == "forecast-model-tune":
1339
1662
  if args.debug:
1340
1663
  _, _, mlf = forecast_model_fit(input_data_dict, logger, debug=args.debug)
1341
1664
  else:
1342
1665
  mlf = None
1343
- df_pred_optim, mlf = forecast_model_tune(input_data_dict, logger, debug=args.debug, mlf=mlf)
1666
+ df_pred_optim, mlf = forecast_model_tune(
1667
+ input_data_dict, logger, debug=args.debug, mlf=mlf
1668
+ )
1344
1669
  opt_res = None
1345
1670
  elif args.action == "regressor-model-fit":
1346
1671
  mlr = regressor_model_fit(input_data_dict, logger, debug=args.debug)
@@ -1350,13 +1675,17 @@ def main():
1350
1675
  mlr = regressor_model_fit(input_data_dict, logger, debug=args.debug)
1351
1676
  else:
1352
1677
  mlr = None
1353
- prediction = regressor_model_predict(input_data_dict, logger, debug=args.debug,mlr=mlr)
1678
+ prediction = regressor_model_predict(
1679
+ input_data_dict, logger, debug=args.debug, mlr=mlr
1680
+ )
1354
1681
  opt_res = None
1355
1682
  elif args.action == "publish-data":
1356
- opt_res = publish_data(input_data_dict,logger)
1683
+ opt_res = publish_data(input_data_dict, logger)
1357
1684
  else:
1358
1685
  logger.error("The passed action argument is not valid")
1359
- logger.error("Try setting --action: perfect-optim, dayahead-optim, naive-mpc-optim, forecast-model-fit, forecast-model-predict, forecast-model-tune or publish-data")
1686
+ logger.error(
1687
+ "Try setting --action: perfect-optim, dayahead-optim, naive-mpc-optim, forecast-model-fit, forecast-model-predict, forecast-model-tune or publish-data"
1688
+ )
1360
1689
  opt_res = None
1361
1690
  logger.info(opt_res)
1362
1691
  # Flush the logger