emhass 0.9.0__py3-none-any.whl → 0.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
emhass/command_line.py CHANGED
@@ -3,6 +3,7 @@
3
3
 
4
4
  import argparse
5
5
  import os
6
+ import time
6
7
  import pathlib
7
8
  import logging
8
9
  import json
@@ -24,12 +25,12 @@ from emhass.machine_learning_regressor import MLRegressor
24
25
  from emhass import utils
25
26
 
26
27
 
27
- def set_input_data_dict(emhass_conf: dict, costfun: str,
28
- params: str, runtimeparams: str, set_type: str, logger: logging.Logger,
29
- get_data_from_file: Optional[bool] = False) -> dict:
28
+ def set_input_data_dict(emhass_conf: dict, costfun: str,
29
+ params: str, runtimeparams: str, set_type: str, logger: logging.Logger,
30
+ get_data_from_file: Optional[bool] = False) -> dict:
30
31
  """
31
32
  Set up some of the data needed for the different actions.
32
-
33
+
33
34
  :param emhass_conf: Dictionary containing the needed emhass paths
34
35
  :type emhass_conf: dict
35
36
  :param costfun: The type of cost function to use for optimization problem
@@ -51,25 +52,18 @@ def set_input_data_dict(emhass_conf: dict, costfun: str,
51
52
  logger.info("Setting up needed data")
52
53
  # Parsing yaml
53
54
  retrieve_hass_conf, optim_conf, plant_conf = utils.get_yaml_parse(
54
- emhass_conf, use_secrets=not(get_data_from_file), params=params)
55
+ emhass_conf, use_secrets=not (get_data_from_file), params=params)
55
56
  # Treat runtimeparams
56
57
  params, retrieve_hass_conf, optim_conf, plant_conf = utils.treat_runtimeparams(
57
- runtimeparams,
58
- params,
59
- retrieve_hass_conf,
60
- optim_conf,
61
- plant_conf,
62
- set_type,
63
- logger,
64
- )
58
+ runtimeparams, params, retrieve_hass_conf, optim_conf, plant_conf, set_type, logger)
65
59
  # Define main objects
66
- rh = RetrieveHass(retrieve_hass_conf['hass_url'], retrieve_hass_conf['long_lived_token'],
67
- retrieve_hass_conf['freq'], retrieve_hass_conf['time_zone'],
60
+ rh = RetrieveHass(retrieve_hass_conf['hass_url'], retrieve_hass_conf['long_lived_token'],
61
+ retrieve_hass_conf['freq'], retrieve_hass_conf['time_zone'],
68
62
  params, emhass_conf, logger, get_data_from_file=get_data_from_file)
69
63
  fcst = Forecast(retrieve_hass_conf, optim_conf, plant_conf,
70
64
  params, emhass_conf, logger, get_data_from_file=get_data_from_file)
71
- opt = Optimization(retrieve_hass_conf, optim_conf, plant_conf,
72
- fcst.var_load_cost, fcst.var_prod_price,
65
+ opt = Optimization(retrieve_hass_conf, optim_conf, plant_conf,
66
+ fcst.var_load_cost, fcst.var_prod_price,
73
67
  costfun, emhass_conf, logger)
74
68
  # Perform setup based on type of action
75
69
  if set_type == "perfect-optim":
@@ -79,25 +73,22 @@ def set_input_data_dict(emhass_conf: dict, costfun: str,
79
73
  rh.df_final, days_list, var_list = pickle.load(inp)
80
74
  retrieve_hass_conf['var_load'] = str(var_list[0])
81
75
  retrieve_hass_conf['var_PV'] = str(var_list[1])
82
- retrieve_hass_conf['var_interp'] = [retrieve_hass_conf['var_PV'], retrieve_hass_conf['var_load']]
83
- retrieve_hass_conf['var_replace_zero'] = [retrieve_hass_conf['var_PV']]
76
+ retrieve_hass_conf['var_interp'] = [
77
+ retrieve_hass_conf['var_PV'], retrieve_hass_conf['var_load']]
78
+ retrieve_hass_conf['var_replace_zero'] = [
79
+ retrieve_hass_conf['var_PV']]
84
80
  else:
85
- days_list = utils.get_days_list(retrieve_hass_conf["days_to_retrieve"])
86
- var_list = [retrieve_hass_conf["var_load"], retrieve_hass_conf["var_PV"]]
87
- if not rh.get_data(
88
- days_list,
89
- var_list,
90
- minimal_response=False,
91
- significant_changes_only=False,
92
- ):
81
+ days_list = utils.get_days_list(
82
+ retrieve_hass_conf["days_to_retrieve"])
83
+ var_list = [retrieve_hass_conf["var_load"],
84
+ retrieve_hass_conf["var_PV"]]
85
+ if not rh.get_data(days_list, var_list, minimal_response=False, significant_changes_only=False):
93
86
  return False
94
- if not rh.prepare_data(
95
- retrieve_hass_conf["var_load"],
96
- load_negative=retrieve_hass_conf["load_negative"],
97
- set_zero_min=retrieve_hass_conf["set_zero_min"],
98
- var_replace_zero=retrieve_hass_conf["var_replace_zero"],
99
- var_interp=retrieve_hass_conf["var_interp"],
100
- ):
87
+ if not rh.prepare_data(retrieve_hass_conf["var_load"],
88
+ load_negative=retrieve_hass_conf["load_negative"],
89
+ set_zero_min=retrieve_hass_conf["set_zero_min"],
90
+ var_replace_zero=retrieve_hass_conf["var_replace_zero"],
91
+ var_interp=retrieve_hass_conf["var_interp"]):
101
92
  return False
102
93
  df_input_data = rh.df_final.copy()
103
94
  # What we don't need for this type of action
@@ -105,30 +96,23 @@ def set_input_data_dict(emhass_conf: dict, costfun: str,
105
96
  elif set_type == "dayahead-optim":
106
97
  # Get PV and load forecasts
107
98
  df_weather = fcst.get_weather_forecast(
108
- method=optim_conf["weather_forecast_method"]
109
- )
99
+ method=optim_conf["weather_forecast_method"])
110
100
  P_PV_forecast = fcst.get_power_from_weather(df_weather)
111
- P_load_forecast = fcst.get_load_forecast(method=optim_conf['load_forecast_method'])
112
- if isinstance(P_load_forecast,bool) and not P_load_forecast:
113
- logger.error("Unable to get sensor power photovoltaics, or sensor power load no var loads. Check HA sensors and their daily data")
101
+ P_load_forecast = fcst.get_load_forecast(
102
+ method=optim_conf['load_forecast_method'])
103
+ if isinstance(P_load_forecast, bool) and not P_load_forecast:
104
+ logger.error(
105
+ "Unable to get sensor power photovoltaics, or sensor power load no var loads. Check HA sensors and their daily data")
114
106
  return False
115
- df_input_data_dayahead = pd.DataFrame(
116
- np.transpose(np.vstack([P_PV_forecast.values, P_load_forecast.values])),
117
- index=P_PV_forecast.index,
118
- columns=["P_PV_forecast", "P_load_forecast"],
119
- )
107
+ df_input_data_dayahead = pd.DataFrame(np.transpose(np.vstack(
108
+ [P_PV_forecast.values, P_load_forecast.values])), index=P_PV_forecast.index,
109
+ columns=["P_PV_forecast", "P_load_forecast"])
120
110
  df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead)
121
111
  params = json.loads(params)
122
- if (
123
- "prediction_horizon" in params["passed_data"]
124
- and params["passed_data"]["prediction_horizon"] is not None
125
- ):
112
+ if ("prediction_horizon" in params["passed_data"] and params["passed_data"]["prediction_horizon"] is not None):
126
113
  prediction_horizon = params["passed_data"]["prediction_horizon"]
127
114
  df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[
128
- df_input_data_dayahead.index[0] : df_input_data_dayahead.index[
129
- prediction_horizon - 1
130
- ]
131
- ]
115
+ df_input_data_dayahead.index[0]: df_input_data_dayahead.index[prediction_horizon - 1]]
132
116
  # What we don't need for this type of action
133
117
  df_input_data, days_list = None, None
134
118
  elif set_type == "naive-mpc-optim":
@@ -138,53 +122,43 @@ def set_input_data_dict(emhass_conf: dict, costfun: str,
138
122
  rh.df_final, days_list, var_list = pickle.load(inp)
139
123
  retrieve_hass_conf['var_load'] = str(var_list[0])
140
124
  retrieve_hass_conf['var_PV'] = str(var_list[1])
141
- retrieve_hass_conf['var_interp'] = [retrieve_hass_conf['var_PV'], retrieve_hass_conf['var_load']]
142
- retrieve_hass_conf['var_replace_zero'] = [retrieve_hass_conf['var_PV']]
125
+ retrieve_hass_conf['var_interp'] = [
126
+ retrieve_hass_conf['var_PV'], retrieve_hass_conf['var_load']]
127
+ retrieve_hass_conf['var_replace_zero'] = [
128
+ retrieve_hass_conf['var_PV']]
143
129
  else:
144
130
  days_list = utils.get_days_list(1)
145
- var_list = [retrieve_hass_conf["var_load"], retrieve_hass_conf["var_PV"]]
146
- if not rh.get_data(
147
- days_list,
148
- var_list,
149
- minimal_response=False,
150
- significant_changes_only=False,
151
- ):
131
+ var_list = [retrieve_hass_conf["var_load"],
132
+ retrieve_hass_conf["var_PV"]]
133
+ if not rh.get_data(days_list, var_list, minimal_response=False, significant_changes_only=False):
152
134
  return False
153
- if not rh.prepare_data(
154
- retrieve_hass_conf["var_load"],
155
- load_negative=retrieve_hass_conf["load_negative"],
156
- set_zero_min=retrieve_hass_conf["set_zero_min"],
157
- var_replace_zero=retrieve_hass_conf["var_replace_zero"],
158
- var_interp=retrieve_hass_conf["var_interp"],
159
- ):
135
+ if not rh.prepare_data(retrieve_hass_conf["var_load"],
136
+ load_negative=retrieve_hass_conf["load_negative"],
137
+ set_zero_min=retrieve_hass_conf["set_zero_min"],
138
+ var_replace_zero=retrieve_hass_conf["var_replace_zero"],
139
+ var_interp=retrieve_hass_conf["var_interp"]):
160
140
  return False
161
141
  df_input_data = rh.df_final.copy()
162
142
  # Get PV and load forecasts
163
- df_weather = fcst.get_weather_forecast(method=optim_conf['weather_forecast_method'])
164
- P_PV_forecast = fcst.get_power_from_weather(df_weather, set_mix_forecast=True, df_now=df_input_data)
165
- P_load_forecast = fcst.get_load_forecast(method=optim_conf['load_forecast_method'], set_mix_forecast=True, df_now=df_input_data)
166
- if isinstance(P_load_forecast,bool) and not P_load_forecast:
167
- logger.error("Unable to get sensor power photovoltaics, or sensor power load no var loads. Check HA sensors and their daily data")
143
+ df_weather = fcst.get_weather_forecast(
144
+ method=optim_conf['weather_forecast_method'])
145
+ P_PV_forecast = fcst.get_power_from_weather(
146
+ df_weather, set_mix_forecast=True, df_now=df_input_data)
147
+ P_load_forecast = fcst.get_load_forecast(
148
+ method=optim_conf['load_forecast_method'], set_mix_forecast=True, df_now=df_input_data)
149
+ if isinstance(P_load_forecast, bool) and not P_load_forecast:
150
+ logger.error(
151
+ "Unable to get sensor power photovoltaics, or sensor power load no var loads. Check HA sensors and their daily data")
168
152
  return False
169
153
  df_input_data_dayahead = pd.concat([P_PV_forecast, P_load_forecast], axis=1)
170
154
  df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead)
171
155
  df_input_data_dayahead.columns = ["P_PV_forecast", "P_load_forecast"]
172
156
  params = json.loads(params)
173
- if (
174
- "prediction_horizon" in params["passed_data"]
175
- and params["passed_data"]["prediction_horizon"] is not None
176
- ):
157
+ if ("prediction_horizon" in params["passed_data"] and params["passed_data"]["prediction_horizon"] is not None):
177
158
  prediction_horizon = params["passed_data"]["prediction_horizon"]
178
159
  df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[
179
- df_input_data_dayahead.index[0] : df_input_data_dayahead.index[
180
- prediction_horizon - 1
181
- ]
182
- ]
183
- elif (
184
- set_type == "forecast-model-fit"
185
- or set_type == "forecast-model-predict"
186
- or set_type == "forecast-model-tune"
187
- ):
160
+ df_input_data_dayahead.index[0]: df_input_data_dayahead.index[prediction_horizon - 1]]
161
+ elif (set_type == "forecast-model-fit" or set_type == "forecast-model-predict" or set_type == "forecast-model-tune"):
188
162
  df_input_data_dayahead = None
189
163
  P_PV_forecast, P_load_forecast = None, None
190
164
  params = json.loads(params)
@@ -198,18 +172,14 @@ def set_input_data_dict(emhass_conf: dict, costfun: str,
198
172
  filename_path = emhass_conf['data_path'] / filename
199
173
  with open(filename_path, 'rb') as inp:
200
174
  df_input_data, _ = pickle.load(inp)
201
- df_input_data = df_input_data[
202
- df_input_data.index[-1] - pd.offsets.Day(days_to_retrieve) :
203
- ]
175
+ df_input_data = df_input_data[df_input_data.index[-1] - pd.offsets.Day(days_to_retrieve):]
204
176
  else:
205
177
  days_list = utils.get_days_list(days_to_retrieve)
206
178
  var_list = [var_model]
207
179
  if not rh.get_data(days_list, var_list):
208
180
  return False
209
181
  df_input_data = rh.df_final.copy()
210
-
211
182
  elif set_type == "regressor-model-fit" or set_type == "regressor-model-predict":
212
-
213
183
  df_input_data, df_input_data_dayahead = None, None
214
184
  P_PV_forecast, P_load_forecast = None, None
215
185
  params = json.loads(params)
@@ -225,32 +195,26 @@ def set_input_data_dict(emhass_conf: dict, costfun: str,
225
195
  if get_data_from_file:
226
196
  base_path = emhass_conf["data_path"] # + "/data"
227
197
  filename_path = pathlib.Path(base_path) / csv_file
228
-
229
198
  else:
230
199
  filename_path = emhass_conf["data_path"] / csv_file
231
-
232
200
  if filename_path.is_file():
233
201
  df_input_data = pd.read_csv(filename_path, parse_dates=True)
234
-
235
202
  else:
236
- logger.error("The CSV file " + csv_file + " was not found in path: " + str(emhass_conf["data_path"]))
203
+ logger.error("The CSV file " + csv_file +
204
+ " was not found in path: " + str(emhass_conf["data_path"]))
237
205
  return False
238
- #raise ValueError("The CSV file " + csv_file + " was not found.")
206
+ # raise ValueError("The CSV file " + csv_file + " was not found.")
239
207
  required_columns = []
240
208
  required_columns.extend(features)
241
209
  required_columns.append(target)
242
210
  if timestamp is not None:
243
211
  required_columns.append(timestamp)
244
-
245
212
  if not set(required_columns).issubset(df_input_data.columns):
246
- logger.error("The cvs file does not contain the required columns.")
213
+ logger.error(
214
+ "The cvs file does not contain the required columns.")
247
215
  msg = f"CSV file should contain the following columns: {', '.join(required_columns)}"
248
216
  logger.error(msg)
249
217
  return False
250
- #raise ValueError(
251
- # msg,
252
- #)
253
-
254
218
  elif set_type == "publish-data":
255
219
  df_input_data, df_input_data_dayahead = None, None
256
220
  P_PV_forecast, P_load_forecast = None, None
@@ -262,7 +226,6 @@ def set_input_data_dict(emhass_conf: dict, costfun: str,
262
226
  df_input_data, df_input_data_dayahead = None, None
263
227
  P_PV_forecast, P_load_forecast = None, None
264
228
  days_list = None
265
-
266
229
  # The input data dictionary to return
267
230
  input_data_dict = {
268
231
  'emhass_conf': emhass_conf,
@@ -281,12 +244,9 @@ def set_input_data_dict(emhass_conf: dict, costfun: str,
281
244
  return input_data_dict
282
245
 
283
246
 
284
- def perfect_forecast_optim(
285
- input_data_dict: dict,
286
- logger: logging.Logger,
287
- save_data_to_file: Optional[bool] = True,
288
- debug: Optional[bool] = False,
289
- ) -> pd.DataFrame:
247
+ def perfect_forecast_optim(input_data_dict: dict, logger: logging.Logger,
248
+ save_data_to_file: Optional[bool] = True,
249
+ debug: Optional[bool] = False) -> pd.DataFrame:
290
250
  """
291
251
  Perform a call to the perfect forecast optimization routine.
292
252
 
@@ -305,33 +265,45 @@ def perfect_forecast_optim(
305
265
  logger.info("Performing perfect forecast optimization")
306
266
  # Load cost and prod price forecast
307
267
  df_input_data = input_data_dict['fcst'].get_load_cost_forecast(
308
- input_data_dict['df_input_data'],
268
+ input_data_dict['df_input_data'],
309
269
  method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'],
310
270
  list_and_perfect=True)
311
- if isinstance(df_input_data,bool) and not df_input_data:
271
+ if isinstance(df_input_data, bool) and not df_input_data:
312
272
  return False
313
273
  df_input_data = input_data_dict['fcst'].get_prod_price_forecast(
314
274
  df_input_data, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'],
315
275
  list_and_perfect=True)
316
- if isinstance(df_input_data,bool) and not df_input_data:
317
- return False
318
- opt_res = input_data_dict['opt'].perform_perfect_forecast_optim(df_input_data, input_data_dict['days_list'])
276
+ if isinstance(df_input_data, bool) and not df_input_data:
277
+ return False
278
+ opt_res = input_data_dict['opt'].perform_perfect_forecast_optim(
279
+ df_input_data, input_data_dict['days_list'])
319
280
  # Save CSV file for analysis
320
281
  if save_data_to_file:
321
- filename = "opt_res_perfect_optim_" + input_data_dict["costfun"] + ".csv"
282
+ filename = "opt_res_perfect_optim_" + \
283
+ input_data_dict["costfun"] + ".csv"
322
284
  else: # Just save the latest optimization results
323
285
  filename = "opt_res_latest.csv"
324
286
  if not debug:
325
- opt_res.to_csv(input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
287
+ opt_res.to_csv(
288
+ input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
289
+
290
+
291
+ if not isinstance(input_data_dict["params"],dict):
292
+ params = json.loads(input_data_dict["params"])
293
+ else:
294
+ params = input_data_dict["params"]
295
+
296
+ # if continual_publish, save perfect results to data_path/entities json
297
+ if input_data_dict["retrieve_hass_conf"].get("continual_publish",False) or params["passed_data"].get("entity_save",False):
298
+ #Trigger the publish function, save entity data and not post to HA
299
+ publish_data(input_data_dict, logger, entity_save=True, dont_post=True)
300
+
326
301
  return opt_res
327
302
 
328
303
 
329
- def dayahead_forecast_optim(
330
- input_data_dict: dict,
331
- logger: logging.Logger,
332
- save_data_to_file: Optional[bool] = False,
333
- debug: Optional[bool] = False,
334
- ) -> pd.DataFrame:
304
+ def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger,
305
+ save_data_to_file: Optional[bool] = False,
306
+ debug: Optional[bool] = False) -> pd.DataFrame:
335
307
  """
336
308
  Perform a call to the day-ahead optimization routine.
337
309
 
@@ -352,13 +324,13 @@ def dayahead_forecast_optim(
352
324
  df_input_data_dayahead = input_data_dict['fcst'].get_load_cost_forecast(
353
325
  input_data_dict['df_input_data_dayahead'],
354
326
  method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'])
355
- if isinstance(df_input_data_dayahead,bool) and not df_input_data_dayahead:
356
- return False
327
+ if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
328
+ return False
357
329
  df_input_data_dayahead = input_data_dict['fcst'].get_prod_price_forecast(
358
- df_input_data_dayahead,
330
+ df_input_data_dayahead,
359
331
  method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'])
360
- if isinstance(df_input_data_dayahead,bool) and not df_input_data_dayahead:
361
- return False
332
+ if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
333
+ return False
362
334
  opt_res_dayahead = input_data_dict['opt'].perform_dayahead_forecast_optim(
363
335
  df_input_data_dayahead, input_data_dict['P_PV_forecast'], input_data_dict['P_load_forecast'])
364
336
  # Save CSV file for publish_data
@@ -370,16 +342,26 @@ def dayahead_forecast_optim(
370
342
  else: # Just save the latest optimization results
371
343
  filename = "opt_res_latest.csv"
372
344
  if not debug:
373
- opt_res_dayahead.to_csv(input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
345
+ opt_res_dayahead.to_csv(
346
+ input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
347
+
348
+ if not isinstance(input_data_dict["params"],dict):
349
+ params = json.loads(input_data_dict["params"])
350
+ else:
351
+ params = input_data_dict["params"]
352
+
353
+
354
+ # if continual_publish, save day_ahead results to data_path/entities json
355
+ if input_data_dict["retrieve_hass_conf"].get("continual_publish",False) or params["passed_data"].get("entity_save",False):
356
+ #Trigger the publish function, save entity data and not post to HA
357
+ publish_data(input_data_dict, logger, entity_save=True, dont_post=True)
358
+
374
359
  return opt_res_dayahead
375
360
 
376
361
 
377
- def naive_mpc_optim(
378
- input_data_dict: dict,
379
- logger: logging.Logger,
380
- save_data_to_file: Optional[bool] = False,
381
- debug: Optional[bool] = False,
382
- ) -> pd.DataFrame:
362
+ def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger,
363
+ save_data_to_file: Optional[bool] = False,
364
+ debug: Optional[bool] = False) -> pd.DataFrame:
383
365
  """
384
366
  Perform a call to the naive Model Predictive Controller optimization routine.
385
367
 
@@ -400,12 +382,12 @@ def naive_mpc_optim(
400
382
  df_input_data_dayahead = input_data_dict['fcst'].get_load_cost_forecast(
401
383
  input_data_dict['df_input_data_dayahead'],
402
384
  method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'])
403
- if isinstance(df_input_data_dayahead,bool) and not df_input_data_dayahead:
404
- return False
385
+ if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
386
+ return False
405
387
  df_input_data_dayahead = input_data_dict['fcst'].get_prod_price_forecast(
406
388
  df_input_data_dayahead, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'])
407
- if isinstance(df_input_data_dayahead,bool) and not df_input_data_dayahead:
408
- return False
389
+ if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
390
+ return False
409
391
  # The specifics params for the MPC at runtime
410
392
  prediction_horizon = input_data_dict["params"]["passed_data"]["prediction_horizon"]
411
393
  soc_init = input_data_dict["params"]["passed_data"]["soc_init"]
@@ -414,16 +396,9 @@ def naive_mpc_optim(
414
396
  def_start_timestep = input_data_dict["params"]["passed_data"]["def_start_timestep"]
415
397
  def_end_timestep = input_data_dict["params"]["passed_data"]["def_end_timestep"]
416
398
  opt_res_naive_mpc = input_data_dict["opt"].perform_naive_mpc_optim(
417
- df_input_data_dayahead,
418
- input_data_dict["P_PV_forecast"],
419
- input_data_dict["P_load_forecast"],
420
- prediction_horizon,
421
- soc_init,
422
- soc_final,
423
- def_total_hours,
424
- def_start_timestep,
425
- def_end_timestep,
426
- )
399
+ df_input_data_dayahead, input_data_dict["P_PV_forecast"], input_data_dict["P_load_forecast"],
400
+ prediction_horizon, soc_init, soc_final, def_total_hours,
401
+ def_start_timestep, def_end_timestep)
427
402
  # Save CSV file for publish_data
428
403
  if save_data_to_file:
429
404
  today = datetime.now(timezone.utc).replace(
@@ -433,13 +408,24 @@ def naive_mpc_optim(
433
408
  else: # Just save the latest optimization results
434
409
  filename = "opt_res_latest.csv"
435
410
  if not debug:
436
- opt_res_naive_mpc.to_csv(input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
411
+ opt_res_naive_mpc.to_csv(
412
+ input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
413
+
414
+ if not isinstance(input_data_dict["params"],dict):
415
+ params = json.loads(input_data_dict["params"])
416
+ else:
417
+ params = input_data_dict["params"]
418
+
419
+ # if continual_publish, save mpc results to data_path/entities json
420
+ if input_data_dict["retrieve_hass_conf"].get("continual_publish",False) or params["passed_data"].get("entity_save",False):
421
+ #Trigger the publish function, save entity data and not post to HA
422
+ publish_data(input_data_dict, logger, entity_save=True, dont_post=True)
423
+
437
424
  return opt_res_naive_mpc
438
425
 
439
426
 
440
- def forecast_model_fit(
441
- input_data_dict: dict, logger: logging.Logger, debug: Optional[bool] = False
442
- ) -> Tuple[pd.DataFrame, pd.DataFrame, MLForecaster]:
427
+ def forecast_model_fit(input_data_dict: dict, logger: logging.Logger,
428
+ debug: Optional[bool] = False) -> Tuple[pd.DataFrame, pd.DataFrame, MLForecaster]:
443
429
  """Perform a forecast model fit from training data retrieved from Home Assistant.
444
430
 
445
431
  :param input_data_dict: A dictionnary with multiple data used by the action functions
@@ -459,7 +445,8 @@ def forecast_model_fit(
459
445
  split_date_delta = input_data_dict['params']['passed_data']['split_date_delta']
460
446
  perform_backtest = input_data_dict['params']['passed_data']['perform_backtest']
461
447
  # The ML forecaster object
462
- mlf = MLForecaster(data, model_type, var_model, sklearn_model, num_lags, input_data_dict['emhass_conf'], logger)
448
+ mlf = MLForecaster(data, model_type, var_model, sklearn_model,
449
+ num_lags, input_data_dict['emhass_conf'], logger)
463
450
  # Fit the ML model
464
451
  df_pred, df_pred_backtest = mlf.fit(
465
452
  split_date_delta=split_date_delta, perform_backtest=perform_backtest
@@ -473,13 +460,10 @@ def forecast_model_fit(
473
460
  return df_pred, df_pred_backtest, mlf
474
461
 
475
462
 
476
- def forecast_model_predict(
477
- input_data_dict: dict,
478
- logger: logging.Logger,
479
- use_last_window: Optional[bool] = True,
480
- debug: Optional[bool] = False,
481
- mlf: Optional[MLForecaster] = None,
482
- ) -> pd.DataFrame:
463
+ def forecast_model_predict(input_data_dict: dict, logger: logging.Logger,
464
+ use_last_window: Optional[bool] = True,
465
+ debug: Optional[bool] = False, mlf: Optional[MLForecaster] = None
466
+ ) -> pd.DataFrame:
483
467
  r"""Perform a forecast model predict using a previously trained skforecast model.
484
468
 
485
469
  :param input_data_dict: A dictionnary with multiple data used by the action functions
@@ -539,40 +523,24 @@ def forecast_model_predict(
539
523
  input_data_dict["retrieve_hass_conf"]["time_zone"]
540
524
  ).replace(second=0, microsecond=0)
541
525
  if input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "nearest":
542
- idx_closest = predictions.index.get_indexer(
543
- [now_precise], method="nearest"
544
- )[0]
526
+ idx_closest = predictions.index.get_indexer([now_precise], method="nearest")[0]
545
527
  elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "first":
546
- idx_closest = predictions.index.get_indexer([now_precise], method="ffill")[
547
- 0
548
- ]
528
+ idx_closest = predictions.index.get_indexer([now_precise], method="ffill")[0]
549
529
  elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "last":
550
- idx_closest = predictions.index.get_indexer([now_precise], method="bfill")[
551
- 0
552
- ]
530
+ idx_closest = predictions.index.get_indexer([now_precise], method="bfill")[0]
553
531
  if idx_closest == -1:
554
- idx_closest = predictions.index.get_indexer(
555
- [now_precise], method="nearest"
556
- )[0]
532
+ idx_closest = predictions.index.get_indexer([now_precise], method="nearest")[0]
557
533
  # Publish Load forecast
558
534
  input_data_dict["rh"].post_data(
559
- predictions,
560
- idx_closest,
561
- model_predict_entity_id,
562
- model_predict_unit_of_measurement,
563
- model_predict_friendly_name,
564
- type_var="mlforecaster",
565
- publish_prefix=publish_prefix,
566
- )
535
+ predictions, idx_closest, model_predict_entity_id,
536
+ model_predict_unit_of_measurement, model_predict_friendly_name,
537
+ type_var="mlforecaster", publish_prefix=publish_prefix)
567
538
  return predictions
568
539
 
569
540
 
570
- def forecast_model_tune(
571
- input_data_dict: dict,
572
- logger: logging.Logger,
573
- debug: Optional[bool] = False,
574
- mlf: Optional[MLForecaster] = None,
575
- ) -> Tuple[pd.DataFrame, MLForecaster]:
541
+ def forecast_model_tune(input_data_dict: dict, logger: logging.Logger,
542
+ debug: Optional[bool] = False, mlf: Optional[MLForecaster] = None
543
+ ) -> Tuple[pd.DataFrame, MLForecaster]:
576
544
  """Tune a forecast model hyperparameters using bayesian optimization.
577
545
 
578
546
  :param input_data_dict: A dictionnary with multiple data used by the action functions
@@ -607,15 +575,12 @@ def forecast_model_tune(
607
575
  filename = model_type+'_mlf.pkl'
608
576
  filename_path = input_data_dict['emhass_conf']['data_path'] / filename
609
577
  with open(filename_path, 'wb') as outp:
610
- pickle.dump(mlf, outp, pickle.HIGHEST_PROTOCOL)
578
+ pickle.dump(mlf, outp, pickle.HIGHEST_PROTOCOL)
611
579
  return df_pred_optim, mlf
612
580
 
613
581
 
614
- def regressor_model_fit(
615
- input_data_dict: dict,
616
- logger: logging.Logger,
617
- debug: Optional[bool] = False,
618
- ) -> None:
582
+ def regressor_model_fit(input_data_dict: dict, logger: logging.Logger,
583
+ debug: Optional[bool] = False) -> MLRegressor:
619
584
  """Perform a forecast model fit from training data retrieved from Home Assistant.
620
585
 
621
586
  :param input_data_dict: A dictionnary with multiple data used by the action functions
@@ -640,33 +605,24 @@ def regressor_model_fit(
640
605
  features = input_data_dict["params"]["passed_data"]["features"]
641
606
  else:
642
607
  logger.error("parameter: 'features' not passed")
643
- return False
608
+ return False
644
609
  if "target" in input_data_dict["params"]["passed_data"]:
645
610
  target = input_data_dict["params"]["passed_data"]["target"]
646
611
  else:
647
612
  logger.error("parameter: 'target' not passed")
648
- return False
613
+ return False
649
614
  if "timestamp" in input_data_dict["params"]["passed_data"]:
650
615
  timestamp = input_data_dict["params"]["passed_data"]["timestamp"]
651
616
  else:
652
617
  logger.error("parameter: 'timestamp' not passed")
653
- return False
618
+ return False
654
619
  if "date_features" in input_data_dict["params"]["passed_data"]:
655
620
  date_features = input_data_dict["params"]["passed_data"]["date_features"]
656
621
  else:
657
622
  logger.error("parameter: 'date_features' not passed")
658
- return False
659
-
623
+ return False
660
624
  # The MLRegressor object
661
- mlr = MLRegressor(
662
- data,
663
- model_type,
664
- regression_model,
665
- features,
666
- target,
667
- timestamp,
668
- logger,
669
- )
625
+ mlr = MLRegressor(data, model_type, regression_model, features, target, timestamp, logger)
670
626
  # Fit the ML model
671
627
  mlr.fit(date_features=date_features)
672
628
  # Save model
@@ -678,12 +634,9 @@ def regressor_model_fit(
678
634
  return mlr
679
635
 
680
636
 
681
- def regressor_model_predict(
682
- input_data_dict: dict,
683
- logger: logging.Logger,
684
- debug: Optional[bool] = False,
685
- mlr: Optional[MLRegressor] = None,
686
- ) -> None:
637
+ def regressor_model_predict(input_data_dict: dict, logger: logging.Logger,
638
+ debug: Optional[bool] = False, mlr: Optional[MLRegressor] = None
639
+ ) -> np.ndarray:
687
640
  """Perform a prediction from csv file.
688
641
 
689
642
  :param input_data_dict: A dictionnary with multiple data used by the action functions
@@ -697,7 +650,7 @@ def regressor_model_predict(
697
650
  model_type = input_data_dict["params"]["passed_data"]["model_type"]
698
651
  else:
699
652
  logger.error("parameter: 'model_type' not passed")
700
- return False
653
+ return False
701
654
  filename = model_type + "_mlr.pkl"
702
655
  filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
703
656
  if not debug:
@@ -709,37 +662,34 @@ def regressor_model_predict(
709
662
  "The ML forecaster file was not found, please run a model fit method before this predict method",
710
663
  )
711
664
  return False
712
- if "new_values" in input_data_dict["params"]["passed_data"]:
665
+ if "new_values" in input_data_dict["params"]["passed_data"]:
713
666
  new_values = input_data_dict["params"]["passed_data"]["new_values"]
714
667
  else:
715
668
  logger.error("parameter: 'new_values' not passed")
716
- return False
669
+ return False
717
670
  # Predict from csv file
718
671
  prediction = mlr.predict(new_values)
719
-
720
- mlr_predict_entity_id = input_data_dict["params"]["passed_data"].get("mlr_predict_entity_id","sensor.mlr_predict")
721
- mlr_predict_unit_of_measurement = input_data_dict["params"]["passed_data"].get("mlr_predict_unit_of_measurement","h")
722
- mlr_predict_friendly_name = input_data_dict["params"]["passed_data"].get("mlr_predict_friendly_name","mlr predictor")
672
+ mlr_predict_entity_id = input_data_dict["params"]["passed_data"].get(
673
+ "mlr_predict_entity_id", "sensor.mlr_predict")
674
+ mlr_predict_unit_of_measurement = input_data_dict["params"]["passed_data"].get(
675
+ "mlr_predict_unit_of_measurement", "h")
676
+ mlr_predict_friendly_name = input_data_dict["params"]["passed_data"].get(
677
+ "mlr_predict_friendly_name", "mlr predictor")
723
678
  # Publish prediction
724
679
  idx = 0
725
680
  if not debug:
726
- input_data_dict["rh"].post_data(
727
- prediction,
728
- idx,
729
- mlr_predict_entity_id,
730
- mlr_predict_unit_of_measurement,
731
- mlr_predict_friendly_name,
732
- type_var="mlregressor",
733
- )
681
+ input_data_dict["rh"].post_data(prediction, idx, mlr_predict_entity_id,
682
+ mlr_predict_unit_of_measurement, mlr_predict_friendly_name,
683
+ type_var="mlregressor")
734
684
  return prediction
735
685
 
736
686
 
737
- def publish_data(
738
- input_data_dict: dict,
739
- logger: logging.Logger,
740
- save_data_to_file: Optional[bool] = False,
741
- opt_res_latest: Optional[pd.DataFrame] = None,
742
- ) -> pd.DataFrame:
687
+
688
+ def publish_data(input_data_dict: dict, logger: logging.Logger,
689
+ save_data_to_file: Optional[bool] = False,
690
+ opt_res_latest: Optional[pd.DataFrame] = None,
691
+ entity_save: Optional[bool] = False,
692
+ dont_post: Optional[bool] = False) -> pd.DataFrame:
743
693
  """
744
694
  Publish the data obtained from the optimization results.
745
695
 
@@ -751,23 +701,65 @@ def publish_data(
751
701
  :type save_data_to_file: bool, optional
752
702
  :return: The output data of the optimization readed from a CSV file in the data folder
753
703
  :rtype: pd.DataFrame
704
+ :param entity_save: Save built entities to data_path/entities
705
+ :type entity_save: bool, optional
706
+ :param dont_post: Do not post to Home Assistant. Works with entity_save
707
+ :type dont_post: bool, optional
754
708
 
755
709
  """
756
710
  logger.info("Publishing data to HASS instance")
711
+
712
+ if not isinstance(input_data_dict["params"],dict):
713
+ params = json.loads(input_data_dict["params"])
714
+ else:
715
+ params = input_data_dict["params"]
716
+
757
717
  # Check if a day ahead optimization has been performed (read CSV file)
758
718
  if save_data_to_file:
759
719
  today = datetime.now(timezone.utc).replace(
760
720
  hour=0, minute=0, second=0, microsecond=0
761
721
  )
762
722
  filename = "opt_res_dayahead_" + today.strftime("%Y_%m_%d") + ".csv"
723
+ # If publish_prefix is passed, check if there is saved entities in data_path/entities with prefix, publish to results
724
+ elif params["passed_data"].get("publish_prefix","") != "" and not dont_post:
725
+ opt_res_list = []
726
+ opt_res_list_names = []
727
+ publish_prefix = params["passed_data"]["publish_prefix"]
728
+ entity_path = input_data_dict['emhass_conf']['data_path'] / "entities"
729
+
730
+ # Check if items in entity_path
731
+ if os.path.exists(entity_path) and len(os.listdir(entity_path)) > 0:
732
+ # Obtain all files in entity_path
733
+ entity_path_contents = os.listdir(entity_path)
734
+ for entity in entity_path_contents:
735
+ if entity != "metadata.json":
736
+ # If publish_prefix is "all" publish all saved entities to Home Assistant
737
+ # If publish_prefix matches the prefix from saved entities, publish to Home Assistant
738
+ if publish_prefix in entity or publish_prefix == "all":
739
+ entity_data = publish_json(entity,input_data_dict,entity_path,logger)
740
+ if not isinstance(entity_data, bool):
741
+ opt_res_list.append(entity_data)
742
+ opt_res_list_names.append(entity.replace(".json", ""))
743
+ else:
744
+ return False
745
+ # Build a DataFrame with published entities
746
+ opt_res = pd.concat(opt_res_list, axis=1)
747
+ opt_res.columns = opt_res_list_names
748
+ return opt_res
749
+ else:
750
+ logger.warning("no saved entity json files in path:" + str(entity_path))
751
+ logger.warning("falling back to opt_res_latest")
752
+ filename = "opt_res_latest.csv"
763
753
  else:
764
754
  filename = "opt_res_latest.csv"
765
755
  if opt_res_latest is None:
766
756
  if not os.path.isfile(input_data_dict['emhass_conf']['data_path'] / filename):
767
- logger.error("File not found error, run an optimization task first.")
757
+ logger.error(
758
+ "File not found error, run an optimization task first.")
768
759
  return
769
760
  else:
770
- opt_res_latest = pd.read_csv(input_data_dict['emhass_conf']['data_path'] / filename, index_col='timestamp')
761
+ opt_res_latest = pd.read_csv(
762
+ input_data_dict['emhass_conf']['data_path'] / filename, index_col='timestamp')
771
763
  opt_res_latest.index = pd.to_datetime(opt_res_latest.index)
772
764
  opt_res_latest.index.freq = input_data_dict["retrieve_hass_conf"]["freq"]
773
765
  # Estimate the current index
@@ -775,19 +767,16 @@ def publish_data(
775
767
  input_data_dict["retrieve_hass_conf"]["time_zone"]
776
768
  ).replace(second=0, microsecond=0)
777
769
  if input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "nearest":
778
- idx_closest = opt_res_latest.index.get_indexer([now_precise], method="nearest")[
779
- 0
780
- ]
770
+ idx_closest = opt_res_latest.index.get_indexer([now_precise], method="nearest")[0]
781
771
  elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "first":
782
- idx_closest = opt_res_latest.index.get_indexer([now_precise], method="ffill")[0]
772
+ idx_closest = opt_res_latest.index.get_indexer(
773
+ [now_precise], method="ffill")[0]
783
774
  elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "last":
784
- idx_closest = opt_res_latest.index.get_indexer([now_precise], method="bfill")[0]
775
+ idx_closest = opt_res_latest.index.get_indexer(
776
+ [now_precise], method="bfill")[0]
785
777
  if idx_closest == -1:
786
- idx_closest = opt_res_latest.index.get_indexer([now_precise], method="nearest")[
787
- 0
788
- ]
778
+ idx_closest = opt_res_latest.index.get_indexer([now_precise], method="nearest")[0]
789
779
  # Publish the data
790
- params = json.loads(input_data_dict["params"])
791
780
  publish_prefix = params["passed_data"]["publish_prefix"]
792
781
  # Publish PV forecast
793
782
  custom_pv_forecast_id = params["passed_data"]["custom_pv_forecast_id"]
@@ -799,6 +788,8 @@ def publish_data(
799
788
  custom_pv_forecast_id["friendly_name"],
800
789
  type_var="power",
801
790
  publish_prefix=publish_prefix,
791
+ save_entities=entity_save,
792
+ dont_post=dont_post
802
793
  )
803
794
  # Publish Load forecast
804
795
  custom_load_forecast_id = params["passed_data"]["custom_load_forecast_id"]
@@ -810,6 +801,8 @@ def publish_data(
810
801
  custom_load_forecast_id["friendly_name"],
811
802
  type_var="power",
812
803
  publish_prefix=publish_prefix,
804
+ save_entities=entity_save,
805
+ dont_post=dont_post
813
806
  )
814
807
  cols_published = ["P_PV", "P_Load"]
815
808
  # Publish deferrable loads
@@ -831,6 +824,8 @@ def publish_data(
831
824
  custom_deferrable_forecast_id[k]["friendly_name"],
832
825
  type_var="deferrable",
833
826
  publish_prefix=publish_prefix,
827
+ save_entities=entity_save,
828
+ dont_post=dont_post
834
829
  )
835
830
  cols_published = cols_published + ["P_deferrable{}".format(k)]
836
831
  # Publish battery power
@@ -849,6 +844,8 @@ def publish_data(
849
844
  custom_batt_forecast_id["friendly_name"],
850
845
  type_var="batt",
851
846
  publish_prefix=publish_prefix,
847
+ save_entities=entity_save,
848
+ dont_post=dont_post
852
849
  )
853
850
  cols_published = cols_published + ["P_batt"]
854
851
  custom_batt_soc_forecast_id = params["passed_data"][
@@ -862,6 +859,8 @@ def publish_data(
862
859
  custom_batt_soc_forecast_id["friendly_name"],
863
860
  type_var="SOC",
864
861
  publish_prefix=publish_prefix,
862
+ save_entities=entity_save,
863
+ dont_post=dont_post
865
864
  )
866
865
  cols_published = cols_published + ["SOC_opt"]
867
866
  # Publish grid power
@@ -874,6 +873,8 @@ def publish_data(
874
873
  custom_grid_forecast_id["friendly_name"],
875
874
  type_var="power",
876
875
  publish_prefix=publish_prefix,
876
+ save_entities=entity_save,
877
+ dont_post=dont_post
877
878
  )
878
879
  cols_published = cols_published + ["P_grid"]
879
880
  # Publish total value of cost function
@@ -887,7 +888,10 @@ def publish_data(
887
888
  custom_cost_fun_id["friendly_name"],
888
889
  type_var="cost_fun",
889
890
  publish_prefix=publish_prefix,
891
+ save_entities=entity_save,
892
+ dont_post=dont_post
890
893
  )
894
+ # cols_published = cols_published + col_cost_fun
891
895
  # Publish the optimization status
892
896
  custom_cost_fun_id = params["passed_data"]["custom_optim_status_id"]
893
897
  if "optim_status" not in opt_res_latest:
@@ -903,6 +907,8 @@ def publish_data(
903
907
  custom_cost_fun_id["friendly_name"],
904
908
  type_var="optim_status",
905
909
  publish_prefix=publish_prefix,
910
+ save_entities=entity_save,
911
+ dont_post=dont_post
906
912
  )
907
913
  cols_published = cols_published + ["optim_status"]
908
914
  # Publish unit_load_cost
@@ -915,6 +921,8 @@ def publish_data(
915
921
  custom_unit_load_cost_id["friendly_name"],
916
922
  type_var="unit_load_cost",
917
923
  publish_prefix=publish_prefix,
924
+ save_entities=entity_save,
925
+ dont_post=dont_post
918
926
  )
919
927
  cols_published = cols_published + ["unit_load_cost"]
920
928
  # Publish unit_prod_price
@@ -927,12 +935,117 @@ def publish_data(
927
935
  custom_unit_prod_price_id["friendly_name"],
928
936
  type_var="unit_prod_price",
929
937
  publish_prefix=publish_prefix,
938
+ save_entities=entity_save,
939
+ dont_post=dont_post
930
940
  )
931
941
  cols_published = cols_published + ["unit_prod_price"]
932
942
  # Create a DF resuming what has been published
933
- opt_res = opt_res_latest[cols_published].loc[[opt_res_latest.index[idx_closest]]]
943
+ opt_res = opt_res_latest[cols_published].loc[[
944
+ opt_res_latest.index[idx_closest]]]
934
945
  return opt_res
935
946
 
947
+ def continual_publish(input_data_dict,entity_path,logger):
948
+ """
949
+ If continual_publish is true and a entity file saved in /data_path/entities, continually publish sensor on freq rate, updating entity current state value based on timestamp
950
+
951
+ :param input_data_dict: A dictionnary with multiple data used by the action functions
952
+ :type input_data_dict: dict
953
+ :param entity_path: Path for entities folder in data_path
954
+ :type entity_path: Path
955
+ :param logger: The passed logger object
956
+ :type logger: logging.Logger
957
+
958
+ """
959
+ logger.info("Continual publish thread service started")
960
+ freq = input_data_dict['retrieve_hass_conf'].get("freq", pd.to_timedelta(1, "minutes"))
961
+ entity_path_contents = []
962
+
963
+ while True:
964
+ # Sleep for x seconds (using current time as a reference for time left)
965
+ time.sleep(max(0,freq.total_seconds() - (datetime.now(input_data_dict["retrieve_hass_conf"]["time_zone"]).timestamp() % 60)))
966
+
967
+ # Loop through all saved entity files
968
+ if os.path.exists(entity_path) and len(os.listdir(entity_path)) > 0:
969
+ entity_path_contents = os.listdir(entity_path)
970
+ for entity in entity_path_contents:
971
+ if entity != "metadata.json":
972
+ # Call publish_json with entity file, build entity, and publish
973
+ publish_json(entity,input_data_dict,entity_path,logger,"continual_publish")
974
+ pass
975
+ # This function should never return
976
+ return False
977
+
978
+ def publish_json(entity,input_data_dict,entity_path,logger,reference: Optional[str] = ""):
979
+ """
980
+ Extract saved entity data from .json (in data_path/entities), build entity, post results to post_data
981
+
982
+ :param entity: json file containing entity data
983
+ :type entity: dict
984
+ :param input_data_dict: A dictionnary with multiple data used by the action functions
985
+ :type input_data_dict: dict
986
+ :param entity_path: Path for entities folder in data_path
987
+ :type entity_path: Path
988
+ :param logger: The passed logger object
989
+ :type logger: logging.Logger
990
+ :param reference: String for identifying who ran the function
991
+ :type reference: str, optional
992
+
993
+ """
994
+
995
+ # Retrieve entity metadata from file
996
+ if os.path.isfile(entity_path / "metadata.json"):
997
+ with open(entity_path / "metadata.json", "r") as file:
998
+ metadata = json.load(file)
999
+ if not metadata.get("lowest_freq",None) == None:
1000
+ freq = pd.to_timedelta(metadata["lowest_freq"], "minutes")
1001
+ else:
1002
+ logger.error("unable to located metadata.json in:" + entity_path)
1003
+ return False
1004
+
1005
+ # Round current timecode (now)
1006
+ now_precise = datetime.now(input_data_dict["retrieve_hass_conf"]["time_zone"]).replace(second=0, microsecond=0)
1007
+
1008
+ # Retrieve entity data from file
1009
+ entity_data = pd.read_json(entity_path / entity , orient='index')
1010
+
1011
+ # Remove ".json" from string for entity_id
1012
+ entity_id = entity.replace(".json", "")
1013
+
1014
+ # Adjust Dataframe from received entity json file
1015
+ entity_data.columns = [metadata[entity_id]["name"]]
1016
+ entity_data.index.name = "timestamp"
1017
+ entity_data.index = pd.to_datetime(entity_data.index).tz_convert(input_data_dict["retrieve_hass_conf"]["time_zone"])
1018
+ entity_data.index.freq = pd.to_timedelta(int(metadata[entity_id]["freq"]), "minutes")
1019
+ # Calculate the current state value
1020
+ if input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "nearest":
1021
+ idx_closest = entity_data.index.get_indexer([now_precise], method="nearest")[0]
1022
+ elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "first":
1023
+ idx_closest = entity_data.index.get_indexer([now_precise], method="ffill")[0]
1024
+ elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "last":
1025
+ idx_closest = entity_data.index.get_indexer([now_precise], method="bfill")[0]
1026
+ if idx_closest == -1:
1027
+ idx_closest = entity_data.index.get_indexer([now_precise], method="nearest")[0]
1028
+
1029
+ # Call post data
1030
+ if reference == "continual_publish":
1031
+ logger.debug("Auto Published sensor:")
1032
+ logger_levels = "DEBUG"
1033
+ else:
1034
+ logger_levels = "INFO"
1035
+
1036
+ #post/save entity
1037
+ input_data_dict["rh"].post_data(
1038
+ data_df=entity_data[metadata[entity_id]["name"]],
1039
+ idx=idx_closest,
1040
+ entity_id=entity_id,
1041
+ unit_of_measurement=metadata[entity_id]["unit_of_measurement"],
1042
+ friendly_name=metadata[entity_id]["friendly_name"],
1043
+ type_var=metadata[entity_id].get("type_var",""),
1044
+ save_entities=False,
1045
+ logger_levels=logger_levels
1046
+ )
1047
+ return entity_data[metadata[entity_id]["name"]]
1048
+
936
1049
 
937
1050
  def main():
938
1051
  r"""Define the main command line entry function.
@@ -959,59 +1072,59 @@ def main():
959
1072
  parser = argparse.ArgumentParser()
960
1073
  parser.add_argument('--action', type=str, help='Set the desired action, options are: perfect-optim, dayahead-optim,\
961
1074
  naive-mpc-optim, publish-data, forecast-model-fit, forecast-model-predict, forecast-model-tune')
962
- parser.add_argument('--config', type=str, help='Define path to the config.yaml file')
963
- parser.add_argument('--data', type=str, help='Define path to the Data files (.csv & .pkl)')
1075
+ parser.add_argument('--config', type=str,
1076
+ help='Define path to the config.yaml file')
1077
+ parser.add_argument('--data', type=str,
1078
+ help='Define path to the Data files (.csv & .pkl)')
964
1079
  parser.add_argument('--root', type=str, help='Define path emhass root')
965
- parser.add_argument('--costfun', type=str, default='profit', help='Define the type of cost function, options are: profit, cost, self-consumption')
966
- parser.add_argument('--log2file', type=strtobool, default='False', help='Define if we should log to a file or not')
967
- parser.add_argument('--params', type=str, default=None, help='Configuration parameters passed from data/options.json')
968
- parser.add_argument('--runtimeparams', type=str, default=None, help='Pass runtime optimization parameters as dictionnary')
969
- parser.add_argument('--debug', type=strtobool, default='False', help='Use True for testing purposes')
1080
+ parser.add_argument('--costfun', type=str, default='profit',
1081
+ help='Define the type of cost function, options are: profit, cost, self-consumption')
1082
+ parser.add_argument('--log2file', type=strtobool, default='False',
1083
+ help='Define if we should log to a file or not')
1084
+ parser.add_argument('--params', type=str, default=None,
1085
+ help='Configuration parameters passed from data/options.json')
1086
+ parser.add_argument('--runtimeparams', type=str, default=None,
1087
+ help='Pass runtime optimization parameters as dictionnary')
1088
+ parser.add_argument('--debug', type=strtobool,
1089
+ default='False', help='Use True for testing purposes')
970
1090
  args = parser.parse_args()
971
1091
  # The path to the configuration files
972
-
973
1092
  if args.config is not None:
974
1093
  config_path = pathlib.Path(args.config)
975
1094
  else:
976
- config_path = pathlib.Path(str(utils.get_root(__file__, num_parent=2) / 'config_emhass.yaml' ))
977
-
1095
+ config_path = pathlib.Path(
1096
+ str(utils.get_root(__file__, num_parent=2) / 'config_emhass.yaml'))
978
1097
  if args.data is not None:
979
1098
  data_path = pathlib.Path(args.data)
980
1099
  else:
981
1100
  data_path = (config_path.parent / 'data/')
982
-
983
1101
  if args.root is not None:
984
1102
  root_path = pathlib.Path(args.root)
985
1103
  else:
986
1104
  root_path = config_path.parent
987
-
988
1105
  emhass_conf = {}
989
1106
  emhass_conf['config_path'] = config_path
990
1107
  emhass_conf['data_path'] = data_path
991
1108
  emhass_conf['root_path'] = root_path
992
1109
  # create logger
993
- logger, ch = utils.get_logger(__name__, emhass_conf, save_to_file=bool(args.log2file))
994
-
1110
+ logger, ch = utils.get_logger(
1111
+ __name__, emhass_conf, save_to_file=bool(args.log2file))
995
1112
  logger.debug("config path: " + str(config_path))
996
1113
  logger.debug("data path: " + str(data_path))
997
1114
  logger.debug("root path: " + str(root_path))
998
-
999
-
1000
1115
  if not config_path.exists():
1001
- logger.error("Could not find config_emhass.yaml file in: " + str(config_path))
1002
- logger.error("Try setting config file path with --config" )
1116
+ logger.error(
1117
+ "Could not find config_emhass.yaml file in: " + str(config_path))
1118
+ logger.error("Try setting config file path with --config")
1003
1119
  return False
1004
-
1005
1120
  if not os.path.isdir(data_path):
1006
1121
  logger.error("Could not find data foulder in: " + str(data_path))
1007
- logger.error("Try setting data path with --data" )
1122
+ logger.error("Try setting data path with --data")
1008
1123
  return False
1009
-
1010
1124
  if not os.path.isdir(root_path / 'src'):
1011
1125
  logger.error("Could not find emhass/src foulder in: " + str(root_path))
1012
- logger.error("Try setting emhass root path with --root" )
1126
+ logger.error("Try setting emhass root path with --root")
1013
1127
  return False
1014
-
1015
1128
  # Additionnal argument
1016
1129
  try:
1017
1130
  parser.add_argument(
@@ -1025,14 +1138,16 @@ def main():
1025
1138
  "Version not found for emhass package. Or importlib exited with PackageNotFoundError.",
1026
1139
  )
1027
1140
  # Setup parameters
1028
- input_data_dict = set_input_data_dict(emhass_conf,
1029
- args.costfun, args.params, args.runtimeparams, args.action,
1141
+ input_data_dict = set_input_data_dict(emhass_conf,
1142
+ args.costfun, args.params, args.runtimeparams, args.action,
1030
1143
  logger, args.debug)
1031
1144
  # Perform selected action
1032
1145
  if args.action == "perfect-optim":
1033
- opt_res = perfect_forecast_optim(input_data_dict, logger, debug=args.debug)
1146
+ opt_res = perfect_forecast_optim(
1147
+ input_data_dict, logger, debug=args.debug)
1034
1148
  elif args.action == "dayahead-optim":
1035
- opt_res = dayahead_forecast_optim(input_data_dict, logger, debug=args.debug)
1149
+ opt_res = dayahead_forecast_optim(
1150
+ input_data_dict, logger, debug=args.debug)
1036
1151
  elif args.action == "naive-mpc-optim":
1037
1152
  opt_res = naive_mpc_optim(input_data_dict, logger, debug=args.debug)
1038
1153
  elif args.action == "forecast-model-fit":
@@ -1045,18 +1160,14 @@ def main():
1045
1160
  _, _, mlf = forecast_model_fit(input_data_dict, logger, debug=args.debug)
1046
1161
  else:
1047
1162
  mlf = None
1048
- df_pred = forecast_model_predict(
1049
- input_data_dict, logger, debug=args.debug, mlf=mlf
1050
- )
1163
+ df_pred = forecast_model_predict(input_data_dict, logger, debug=args.debug, mlf=mlf)
1051
1164
  opt_res = None
1052
1165
  elif args.action == "forecast-model-tune":
1053
1166
  if args.debug:
1054
1167
  _, _, mlf = forecast_model_fit(input_data_dict, logger, debug=args.debug)
1055
1168
  else:
1056
1169
  mlf = None
1057
- df_pred_optim, mlf = forecast_model_tune(
1058
- input_data_dict, logger, debug=args.debug, mlf=mlf
1059
- )
1170
+ df_pred_optim, mlf = forecast_model_tune(input_data_dict, logger, debug=args.debug, mlf=mlf)
1060
1171
  opt_res = None
1061
1172
  elif args.action == "regressor-model-fit":
1062
1173
  mlr = regressor_model_fit(input_data_dict, logger, debug=args.debug)
@@ -1066,15 +1177,10 @@ def main():
1066
1177
  mlr = regressor_model_fit(input_data_dict, logger, debug=args.debug)
1067
1178
  else:
1068
1179
  mlr = None
1069
- prediction = regressor_model_predict(
1070
- input_data_dict,
1071
- logger,
1072
- debug=args.debug,
1073
- mlr=mlr,
1074
- )
1180
+ prediction = regressor_model_predict(input_data_dict, logger, debug=args.debug,mlr=mlr)
1075
1181
  opt_res = None
1076
1182
  elif args.action == "publish-data":
1077
- opt_res = publish_data(input_data_dict, logger)
1183
+ opt_res = publish_data(input_data_dict,logger)
1078
1184
  else:
1079
1185
  logger.error("The passed action argument is not valid")
1080
1186
  logger.error("Try setting --action: perfect-optim, dayahead-optim, naive-mpc-optim, forecast-model-fit, forecast-model-predict, forecast-model-tune or publish-data")
@@ -1100,7 +1206,7 @@ def main():
1100
1206
  return prediction
1101
1207
  elif args.action == "forecast-model-tune":
1102
1208
  return df_pred_optim, mlf
1103
- else:
1209
+ else:
1104
1210
  return opt_res
1105
1211
 
1106
1212