emhass 0.8.6__py3-none-any.whl → 0.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
emhass/command_line.py CHANGED
@@ -8,36 +8,35 @@ import logging
8
8
  import json
9
9
  import copy
10
10
  import pickle
11
- import time
12
- import numpy as np
13
- import pandas as pd
14
11
  from datetime import datetime, timezone
15
12
  from typing import Optional, Tuple
13
+ from importlib.metadata import version
14
+ import numpy as np
15
+ import pandas as pd
16
+
16
17
  from distutils.util import strtobool
17
18
 
18
- from importlib.metadata import version
19
19
  from emhass.retrieve_hass import RetrieveHass
20
20
  from emhass.forecast import Forecast
21
21
  from emhass.machine_learning_forecaster import MLForecaster
22
22
  from emhass.optimization import Optimization
23
+ from emhass.machine_learning_regressor import MLRegressor
23
24
  from emhass import utils
24
25
 
25
26
 
26
- def set_input_data_dict(config_path: pathlib.Path, base_path: str, costfun: str,
27
+ def set_input_data_dict(emhass_conf: dict, costfun: str,
27
28
  params: str, runtimeparams: str, set_type: str, logger: logging.Logger,
28
29
  get_data_from_file: Optional[bool] = False) -> dict:
29
30
  """
30
31
  Set up some of the data needed for the different actions.
31
32
 
32
- :param config_path: The complete absolute path where the config.yaml file is located
33
- :type config_path: pathlib.Path
34
- :param base_path: The parent folder of the config_path
35
- :type base_path: str
33
+ :param emhass_conf: Dictionary containing the needed emhass paths
34
+ :type emhass_conf: dict
36
35
  :param costfun: The type of cost function to use for optimization problem
37
36
  :type costfun: str
38
37
  :param params: Configuration parameters passed from data/options.json
39
38
  :type params: str
40
- :param runtimeparams: Runtime optimization parameters passed as a dictionnary
39
+ :param runtimeparams: Runtime optimization parameters passed as a dictionary
41
40
  :type runtimeparams: str
42
41
  :param set_type: Set the type of setup based on following type of optimization
43
42
  :type set_type: str
@@ -52,120 +51,221 @@ def set_input_data_dict(config_path: pathlib.Path, base_path: str, costfun: str,
52
51
  logger.info("Setting up needed data")
53
52
  # Parsing yaml
54
53
  retrieve_hass_conf, optim_conf, plant_conf = utils.get_yaml_parse(
55
- config_path, use_secrets=not(get_data_from_file), params=params)
54
+ emhass_conf, use_secrets=not(get_data_from_file), params=params)
56
55
  # Treat runtimeparams
57
56
  params, retrieve_hass_conf, optim_conf, plant_conf = utils.treat_runtimeparams(
58
- runtimeparams, params, retrieve_hass_conf,
59
- optim_conf, plant_conf, set_type, logger)
57
+ runtimeparams,
58
+ params,
59
+ retrieve_hass_conf,
60
+ optim_conf,
61
+ plant_conf,
62
+ set_type,
63
+ logger,
64
+ )
60
65
  # Define main objects
61
66
  rh = RetrieveHass(retrieve_hass_conf['hass_url'], retrieve_hass_conf['long_lived_token'],
62
67
  retrieve_hass_conf['freq'], retrieve_hass_conf['time_zone'],
63
- params, base_path, logger, get_data_from_file=get_data_from_file)
68
+ params, emhass_conf, logger, get_data_from_file=get_data_from_file)
64
69
  fcst = Forecast(retrieve_hass_conf, optim_conf, plant_conf,
65
- params, base_path, logger, get_data_from_file=get_data_from_file)
70
+ params, emhass_conf, logger, get_data_from_file=get_data_from_file)
66
71
  opt = Optimization(retrieve_hass_conf, optim_conf, plant_conf,
67
72
  fcst.var_load_cost, fcst.var_prod_price,
68
- costfun, base_path, logger)
73
+ costfun, emhass_conf, logger)
69
74
  # Perform setup based on type of action
70
75
  if set_type == "perfect-optim":
71
76
  # Retrieve data from hass
72
77
  if get_data_from_file:
73
- with open(pathlib.Path(base_path) / 'data' / 'test_df_final.pkl', 'rb') as inp:
78
+ with open(emhass_conf['data_path'] / 'test_df_final.pkl', 'rb') as inp:
74
79
  rh.df_final, days_list, var_list = pickle.load(inp)
80
+ retrieve_hass_conf['var_load'] = str(var_list[0])
81
+ retrieve_hass_conf['var_PV'] = str(var_list[1])
82
+ retrieve_hass_conf['var_interp'] = [retrieve_hass_conf['var_PV'], retrieve_hass_conf['var_load']]
83
+ retrieve_hass_conf['var_replace_zero'] = [retrieve_hass_conf['var_PV']]
75
84
  else:
76
- days_list = utils.get_days_list(retrieve_hass_conf['days_to_retrieve'])
77
- var_list = [retrieve_hass_conf['var_load'], retrieve_hass_conf['var_PV']]
78
- if not rh.get_data(days_list, var_list,
79
- minimal_response=False, significant_changes_only=False):
80
- return False
81
- if not rh.prepare_data(retrieve_hass_conf['var_load'], load_negative = retrieve_hass_conf['load_negative'],
82
- set_zero_min = retrieve_hass_conf['set_zero_min'],
83
- var_replace_zero = retrieve_hass_conf['var_replace_zero'],
84
- var_interp = retrieve_hass_conf['var_interp']):
85
+ days_list = utils.get_days_list(retrieve_hass_conf["days_to_retrieve"])
86
+ var_list = [retrieve_hass_conf["var_load"], retrieve_hass_conf["var_PV"]]
87
+ if not rh.get_data(
88
+ days_list,
89
+ var_list,
90
+ minimal_response=False,
91
+ significant_changes_only=False,
92
+ ):
93
+ return False
94
+ if not rh.prepare_data(
95
+ retrieve_hass_conf["var_load"],
96
+ load_negative=retrieve_hass_conf["load_negative"],
97
+ set_zero_min=retrieve_hass_conf["set_zero_min"],
98
+ var_replace_zero=retrieve_hass_conf["var_replace_zero"],
99
+ var_interp=retrieve_hass_conf["var_interp"],
100
+ ):
85
101
  return False
86
102
  df_input_data = rh.df_final.copy()
87
103
  # What we don't need for this type of action
88
104
  P_PV_forecast, P_load_forecast, df_input_data_dayahead = None, None, None
89
105
  elif set_type == "dayahead-optim":
90
106
  # Get PV and load forecasts
91
- df_weather = fcst.get_weather_forecast(method=optim_conf['weather_forecast_method'])
107
+ df_weather = fcst.get_weather_forecast(
108
+ method=optim_conf["weather_forecast_method"]
109
+ )
92
110
  P_PV_forecast = fcst.get_power_from_weather(df_weather)
93
111
  P_load_forecast = fcst.get_load_forecast(method=optim_conf['load_forecast_method'])
94
112
  if isinstance(P_load_forecast,bool) and not P_load_forecast:
95
113
  logger.error("Unable to get sensor power photovoltaics, or sensor power load no var loads. Check HA sensors and their daily data")
96
114
  return False
97
- df_input_data_dayahead = pd.DataFrame(np.transpose(np.vstack([P_PV_forecast.values,P_load_forecast.values])),
98
- index=P_PV_forecast.index,
99
- columns=['P_PV_forecast', 'P_load_forecast'])
115
+ df_input_data_dayahead = pd.DataFrame(
116
+ np.transpose(np.vstack([P_PV_forecast.values, P_load_forecast.values])),
117
+ index=P_PV_forecast.index,
118
+ columns=["P_PV_forecast", "P_load_forecast"],
119
+ )
100
120
  df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead)
101
121
  params = json.loads(params)
102
- if 'prediction_horizon' in params['passed_data'] and params['passed_data']['prediction_horizon'] is not None:
103
- prediction_horizon = params['passed_data']['prediction_horizon']
104
- df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[df_input_data_dayahead.index[0]:df_input_data_dayahead.index[prediction_horizon-1]]
122
+ if (
123
+ "prediction_horizon" in params["passed_data"]
124
+ and params["passed_data"]["prediction_horizon"] is not None
125
+ ):
126
+ prediction_horizon = params["passed_data"]["prediction_horizon"]
127
+ df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[
128
+ df_input_data_dayahead.index[0] : df_input_data_dayahead.index[
129
+ prediction_horizon - 1
130
+ ]
131
+ ]
105
132
  # What we don't need for this type of action
106
133
  df_input_data, days_list = None, None
107
134
  elif set_type == "naive-mpc-optim":
108
135
  # Retrieve data from hass
109
136
  if get_data_from_file:
110
- with open(pathlib.Path(base_path) / 'data' / 'test_df_final.pkl', 'rb') as inp:
137
+ with open(emhass_conf['data_path'] / 'test_df_final.pkl', 'rb') as inp:
111
138
  rh.df_final, days_list, var_list = pickle.load(inp)
139
+ retrieve_hass_conf['var_load'] = str(var_list[0])
140
+ retrieve_hass_conf['var_PV'] = str(var_list[1])
141
+ retrieve_hass_conf['var_interp'] = [retrieve_hass_conf['var_PV'], retrieve_hass_conf['var_load']]
142
+ retrieve_hass_conf['var_replace_zero'] = [retrieve_hass_conf['var_PV']]
112
143
  else:
113
144
  days_list = utils.get_days_list(1)
114
- var_list = [retrieve_hass_conf['var_load'], retrieve_hass_conf['var_PV']]
115
- if not rh.get_data(days_list, var_list,
116
- minimal_response=False, significant_changes_only=False):
145
+ var_list = [retrieve_hass_conf["var_load"], retrieve_hass_conf["var_PV"]]
146
+ if not rh.get_data(
147
+ days_list,
148
+ var_list,
149
+ minimal_response=False,
150
+ significant_changes_only=False,
151
+ ):
117
152
  return False
118
- if not rh.prepare_data(retrieve_hass_conf['var_load'], load_negative = retrieve_hass_conf['load_negative'],
119
- set_zero_min = retrieve_hass_conf['set_zero_min'],
120
- var_replace_zero = retrieve_hass_conf['var_replace_zero'],
121
- var_interp = retrieve_hass_conf['var_interp']):
153
+ if not rh.prepare_data(
154
+ retrieve_hass_conf["var_load"],
155
+ load_negative=retrieve_hass_conf["load_negative"],
156
+ set_zero_min=retrieve_hass_conf["set_zero_min"],
157
+ var_replace_zero=retrieve_hass_conf["var_replace_zero"],
158
+ var_interp=retrieve_hass_conf["var_interp"],
159
+ ):
122
160
  return False
123
161
  df_input_data = rh.df_final.copy()
124
162
  # Get PV and load forecasts
125
163
  df_weather = fcst.get_weather_forecast(method=optim_conf['weather_forecast_method'])
126
164
  P_PV_forecast = fcst.get_power_from_weather(df_weather, set_mix_forecast=True, df_now=df_input_data)
127
165
  P_load_forecast = fcst.get_load_forecast(method=optim_conf['load_forecast_method'], set_mix_forecast=True, df_now=df_input_data)
166
+ if isinstance(P_load_forecast,bool) and not P_load_forecast:
167
+ logger.error("Unable to get sensor power photovoltaics, or sensor power load no var loads. Check HA sensors and their daily data")
168
+ return False
128
169
  df_input_data_dayahead = pd.concat([P_PV_forecast, P_load_forecast], axis=1)
129
170
  df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead)
130
- df_input_data_dayahead.columns = ['P_PV_forecast', 'P_load_forecast']
171
+ df_input_data_dayahead.columns = ["P_PV_forecast", "P_load_forecast"]
131
172
  params = json.loads(params)
132
- if 'prediction_horizon' in params['passed_data'] and params['passed_data']['prediction_horizon'] is not None:
133
- prediction_horizon = params['passed_data']['prediction_horizon']
134
- df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[df_input_data_dayahead.index[0]:df_input_data_dayahead.index[prediction_horizon-1]]
135
- elif set_type == "forecast-model-fit" or set_type == "forecast-model-predict" or set_type == "forecast-model-tune":
173
+ if (
174
+ "prediction_horizon" in params["passed_data"]
175
+ and params["passed_data"]["prediction_horizon"] is not None
176
+ ):
177
+ prediction_horizon = params["passed_data"]["prediction_horizon"]
178
+ df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[
179
+ df_input_data_dayahead.index[0] : df_input_data_dayahead.index[
180
+ prediction_horizon - 1
181
+ ]
182
+ ]
183
+ elif (
184
+ set_type == "forecast-model-fit"
185
+ or set_type == "forecast-model-predict"
186
+ or set_type == "forecast-model-tune"
187
+ ):
136
188
  df_input_data_dayahead = None
137
189
  P_PV_forecast, P_load_forecast = None, None
138
190
  params = json.loads(params)
139
191
  # Retrieve data from hass
140
- days_to_retrieve = params['passed_data']['days_to_retrieve']
141
- model_type = params['passed_data']['model_type']
142
- var_model = params['passed_data']['var_model']
192
+ days_to_retrieve = params["passed_data"]["days_to_retrieve"]
193
+ model_type = params["passed_data"]["model_type"]
194
+ var_model = params["passed_data"]["var_model"]
143
195
  if get_data_from_file:
144
196
  days_list = None
145
197
  filename = 'data_train_'+model_type+'.pkl'
146
- data_path = pathlib.Path(base_path) / 'data' / filename
147
- with open(data_path, 'rb') as inp:
198
+ filename_path = emhass_conf['data_path'] / filename
199
+ with open(filename_path, 'rb') as inp:
148
200
  df_input_data, _ = pickle.load(inp)
149
- df_input_data = df_input_data[df_input_data.index[-1] - pd.offsets.Day(days_to_retrieve):]
201
+ df_input_data = df_input_data[
202
+ df_input_data.index[-1] - pd.offsets.Day(days_to_retrieve) :
203
+ ]
150
204
  else:
151
205
  days_list = utils.get_days_list(days_to_retrieve)
152
206
  var_list = [var_model]
153
207
  if not rh.get_data(days_list, var_list):
154
208
  return False
155
209
  df_input_data = rh.df_final.copy()
210
+
211
+ elif set_type == "regressor-model-fit" or set_type == "regressor-model-predict":
212
+
213
+ df_input_data, df_input_data_dayahead = None, None
214
+ P_PV_forecast, P_load_forecast = None, None
215
+ params = json.loads(params)
216
+ days_list = None
217
+ csv_file = params["passed_data"].get("csv_file", None)
218
+ if "features" in params["passed_data"]:
219
+ features = params["passed_data"]["features"]
220
+ if "target" in params["passed_data"]:
221
+ target = params["passed_data"]["target"]
222
+ if "timestamp" in params["passed_data"]:
223
+ timestamp = params["passed_data"]["timestamp"]
224
+ if csv_file:
225
+ if get_data_from_file:
226
+ base_path = emhass_conf["data_path"] # + "/data"
227
+ filename_path = pathlib.Path(base_path) / csv_file
228
+
229
+ else:
230
+ filename_path = emhass_conf["data_path"] / csv_file
231
+
232
+ if filename_path.is_file():
233
+ df_input_data = pd.read_csv(filename_path, parse_dates=True)
234
+
235
+ else:
236
+ logger.error("The CSV file " + csv_file + " was not found in path: " + str(emhass_conf["data_path"]))
237
+ return False
238
+ #raise ValueError("The CSV file " + csv_file + " was not found.")
239
+ required_columns = []
240
+ required_columns.extend(features)
241
+ required_columns.append(target)
242
+ if timestamp is not None:
243
+ required_columns.append(timestamp)
244
+
245
+ if not set(required_columns).issubset(df_input_data.columns):
246
+ logger.error("The cvs file does not contain the required columns.")
247
+ msg = f"CSV file should contain the following columns: {', '.join(required_columns)}"
248
+ logger.error(msg)
249
+ return False
250
+ #raise ValueError(
251
+ # msg,
252
+ #)
253
+
156
254
  elif set_type == "publish-data":
157
255
  df_input_data, df_input_data_dayahead = None, None
158
256
  P_PV_forecast, P_load_forecast = None, None
159
257
  days_list = None
160
258
  else:
161
- logger.error("The passed action argument and hence the set_type parameter for setup is not valid")
259
+ logger.error(
260
+ "The passed action argument and hence the set_type parameter for setup is not valid",
261
+ )
162
262
  df_input_data, df_input_data_dayahead = None, None
163
263
  P_PV_forecast, P_load_forecast = None, None
164
264
  days_list = None
165
265
 
166
- # The input data dictionnary to return
266
+ # The input data dictionary to return
167
267
  input_data_dict = {
168
- 'root': base_path,
268
+ 'emhass_conf': emhass_conf,
169
269
  'retrieve_hass_conf': retrieve_hass_conf,
170
270
  'rh': rh,
171
271
  'opt': opt,
@@ -179,12 +279,17 @@ def set_input_data_dict(config_path: pathlib.Path, base_path: str, costfun: str,
179
279
  'days_list': days_list
180
280
  }
181
281
  return input_data_dict
182
-
183
- def perfect_forecast_optim(input_data_dict: dict, logger: logging.Logger,
184
- save_data_to_file: Optional[bool] = True, debug: Optional[bool] = False) -> pd.DataFrame:
282
+
283
+
284
+ def perfect_forecast_optim(
285
+ input_data_dict: dict,
286
+ logger: logging.Logger,
287
+ save_data_to_file: Optional[bool] = True,
288
+ debug: Optional[bool] = False,
289
+ ) -> pd.DataFrame:
185
290
  """
186
291
  Perform a call to the perfect forecast optimization routine.
187
-
292
+
188
293
  :param input_data_dict: A dictionnary with multiple data used by the action functions
189
294
  :type input_data_dict: dict
190
295
  :param logger: The passed logger object
@@ -203,24 +308,33 @@ def perfect_forecast_optim(input_data_dict: dict, logger: logging.Logger,
203
308
  input_data_dict['df_input_data'],
204
309
  method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'],
205
310
  list_and_perfect=True)
311
+ if isinstance(df_input_data,bool) and not df_input_data:
312
+ return False
206
313
  df_input_data = input_data_dict['fcst'].get_prod_price_forecast(
207
314
  df_input_data, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'],
208
315
  list_and_perfect=True)
316
+ if isinstance(df_input_data,bool) and not df_input_data:
317
+ return False
209
318
  opt_res = input_data_dict['opt'].perform_perfect_forecast_optim(df_input_data, input_data_dict['days_list'])
210
319
  # Save CSV file for analysis
211
320
  if save_data_to_file:
212
- filename = 'opt_res_perfect_optim_'+input_data_dict['costfun']+'.csv'
213
- else: # Just save the latest optimization results
214
- filename = 'opt_res_latest.csv'
321
+ filename = "opt_res_perfect_optim_" + input_data_dict["costfun"] + ".csv"
322
+ else: # Just save the latest optimization results
323
+ filename = "opt_res_latest.csv"
215
324
  if not debug:
216
- opt_res.to_csv(pathlib.Path(input_data_dict['root']) / filename, index_label='timestamp')
325
+ opt_res.to_csv(input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
217
326
  return opt_res
218
-
219
- def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger,
220
- save_data_to_file: Optional[bool] = False, debug: Optional[bool] = False) -> pd.DataFrame:
327
+
328
+
329
+ def dayahead_forecast_optim(
330
+ input_data_dict: dict,
331
+ logger: logging.Logger,
332
+ save_data_to_file: Optional[bool] = False,
333
+ debug: Optional[bool] = False,
334
+ ) -> pd.DataFrame:
221
335
  """
222
336
  Perform a call to the day-ahead optimization routine.
223
-
337
+
224
338
  :param input_data_dict: A dictionnary with multiple data used by the action functions
225
339
  :type input_data_dict: dict
226
340
  :param logger: The passed logger object
@@ -238,26 +352,37 @@ def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger,
238
352
  df_input_data_dayahead = input_data_dict['fcst'].get_load_cost_forecast(
239
353
  input_data_dict['df_input_data_dayahead'],
240
354
  method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'])
355
+ if isinstance(df_input_data_dayahead,bool) and not df_input_data_dayahead:
356
+ return False
241
357
  df_input_data_dayahead = input_data_dict['fcst'].get_prod_price_forecast(
242
358
  df_input_data_dayahead,
243
359
  method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'])
360
+ if isinstance(df_input_data_dayahead,bool) and not df_input_data_dayahead:
361
+ return False
244
362
  opt_res_dayahead = input_data_dict['opt'].perform_dayahead_forecast_optim(
245
363
  df_input_data_dayahead, input_data_dict['P_PV_forecast'], input_data_dict['P_load_forecast'])
246
364
  # Save CSV file for publish_data
247
365
  if save_data_to_file:
248
- today = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
249
- filename = 'opt_res_dayahead_'+today.strftime("%Y_%m_%d")+'.csv'
250
- else: # Just save the latest optimization results
251
- filename = 'opt_res_latest.csv'
366
+ today = datetime.now(timezone.utc).replace(
367
+ hour=0, minute=0, second=0, microsecond=0
368
+ )
369
+ filename = "opt_res_dayahead_" + today.strftime("%Y_%m_%d") + ".csv"
370
+ else: # Just save the latest optimization results
371
+ filename = "opt_res_latest.csv"
252
372
  if not debug:
253
- opt_res_dayahead.to_csv(pathlib.Path(input_data_dict['root']) / filename, index_label='timestamp')
373
+ opt_res_dayahead.to_csv(input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
254
374
  return opt_res_dayahead
255
375
 
256
- def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger,
257
- save_data_to_file: Optional[bool] = False, debug: Optional[bool] = False) -> pd.DataFrame:
376
+
377
+ def naive_mpc_optim(
378
+ input_data_dict: dict,
379
+ logger: logging.Logger,
380
+ save_data_to_file: Optional[bool] = False,
381
+ debug: Optional[bool] = False,
382
+ ) -> pd.DataFrame:
258
383
  """
259
384
  Perform a call to the naive Model Predictive Controller optimization routine.
260
-
385
+
261
386
  :param input_data_dict: A dictionnary with multiple data used by the action functions
262
387
  :type input_data_dict: dict
263
388
  :param logger: The passed logger object
@@ -275,30 +400,46 @@ def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger,
275
400
  df_input_data_dayahead = input_data_dict['fcst'].get_load_cost_forecast(
276
401
  input_data_dict['df_input_data_dayahead'],
277
402
  method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'])
403
+ if isinstance(df_input_data_dayahead,bool) and not df_input_data_dayahead:
404
+ return False
278
405
  df_input_data_dayahead = input_data_dict['fcst'].get_prod_price_forecast(
279
406
  df_input_data_dayahead, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'])
407
+ if isinstance(df_input_data_dayahead,bool) and not df_input_data_dayahead:
408
+ return False
280
409
  # The specifics params for the MPC at runtime
281
- prediction_horizon = input_data_dict['params']['passed_data']['prediction_horizon']
282
- soc_init = input_data_dict['params']['passed_data']['soc_init']
283
- soc_final = input_data_dict['params']['passed_data']['soc_final']
284
- def_total_hours = input_data_dict['params']['passed_data']['def_total_hours']
285
- def_start_timestep = input_data_dict['params']['passed_data']['def_start_timestep']
286
- def_end_timestep = input_data_dict['params']['passed_data']['def_end_timestep']
287
- opt_res_naive_mpc = input_data_dict['opt'].perform_naive_mpc_optim(
288
- df_input_data_dayahead, input_data_dict['P_PV_forecast'], input_data_dict['P_load_forecast'],
289
- prediction_horizon, soc_init, soc_final, def_total_hours, def_start_timestep, def_end_timestep)
410
+ prediction_horizon = input_data_dict["params"]["passed_data"]["prediction_horizon"]
411
+ soc_init = input_data_dict["params"]["passed_data"]["soc_init"]
412
+ soc_final = input_data_dict["params"]["passed_data"]["soc_final"]
413
+ def_total_hours = input_data_dict["params"]["passed_data"]["def_total_hours"]
414
+ def_start_timestep = input_data_dict["params"]["passed_data"]["def_start_timestep"]
415
+ def_end_timestep = input_data_dict["params"]["passed_data"]["def_end_timestep"]
416
+ opt_res_naive_mpc = input_data_dict["opt"].perform_naive_mpc_optim(
417
+ df_input_data_dayahead,
418
+ input_data_dict["P_PV_forecast"],
419
+ input_data_dict["P_load_forecast"],
420
+ prediction_horizon,
421
+ soc_init,
422
+ soc_final,
423
+ def_total_hours,
424
+ def_start_timestep,
425
+ def_end_timestep,
426
+ )
290
427
  # Save CSV file for publish_data
291
428
  if save_data_to_file:
292
- today = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
293
- filename = 'opt_res_naive_mpc_'+today.strftime("%Y_%m_%d")+'.csv'
294
- else: # Just save the latest optimization results
295
- filename = 'opt_res_latest.csv'
429
+ today = datetime.now(timezone.utc).replace(
430
+ hour=0, minute=0, second=0, microsecond=0
431
+ )
432
+ filename = "opt_res_naive_mpc_" + today.strftime("%Y_%m_%d") + ".csv"
433
+ else: # Just save the latest optimization results
434
+ filename = "opt_res_latest.csv"
296
435
  if not debug:
297
- opt_res_naive_mpc.to_csv(pathlib.Path(input_data_dict['root']) / filename, index_label='timestamp')
436
+ opt_res_naive_mpc.to_csv(input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
298
437
  return opt_res_naive_mpc
299
438
 
300
- def forecast_model_fit(input_data_dict: dict, logger: logging.Logger,
301
- debug: Optional[bool] = False) -> Tuple[pd.DataFrame, pd.DataFrame, MLForecaster]:
439
+
440
+ def forecast_model_fit(
441
+ input_data_dict: dict, logger: logging.Logger, debug: Optional[bool] = False
442
+ ) -> Tuple[pd.DataFrame, pd.DataFrame, MLForecaster]:
302
443
  """Perform a forecast model fit from training data retrieved from Home Assistant.
303
444
 
304
445
  :param input_data_dict: A dictionnary with multiple data used by the action functions
@@ -317,22 +458,28 @@ def forecast_model_fit(input_data_dict: dict, logger: logging.Logger,
317
458
  num_lags = input_data_dict['params']['passed_data']['num_lags']
318
459
  split_date_delta = input_data_dict['params']['passed_data']['split_date_delta']
319
460
  perform_backtest = input_data_dict['params']['passed_data']['perform_backtest']
320
- root = input_data_dict['root']
321
461
  # The ML forecaster object
322
- mlf = MLForecaster(data, model_type, var_model, sklearn_model, num_lags, root, logger)
462
+ mlf = MLForecaster(data, model_type, var_model, sklearn_model, num_lags, input_data_dict['emhass_conf'], logger)
323
463
  # Fit the ML model
324
- df_pred, df_pred_backtest = mlf.fit(split_date_delta=split_date_delta,
325
- perform_backtest=perform_backtest)
464
+ df_pred, df_pred_backtest = mlf.fit(
465
+ split_date_delta=split_date_delta, perform_backtest=perform_backtest
466
+ )
326
467
  # Save model
327
468
  if not debug:
328
469
  filename = model_type+'_mlf.pkl'
329
- with open(pathlib.Path(root) / filename, 'wb') as outp:
470
+ filename_path = input_data_dict['emhass_conf']['data_path'] / filename
471
+ with open(filename_path, 'wb') as outp:
330
472
  pickle.dump(mlf, outp, pickle.HIGHEST_PROTOCOL)
331
473
  return df_pred, df_pred_backtest, mlf
332
474
 
333
- def forecast_model_predict(input_data_dict: dict, logger: logging.Logger,
334
- use_last_window: Optional[bool] = True, debug: Optional[bool] = False,
335
- mlf: Optional[MLForecaster] = None) -> pd.DataFrame:
475
+
476
+ def forecast_model_predict(
477
+ input_data_dict: dict,
478
+ logger: logging.Logger,
479
+ use_last_window: Optional[bool] = True,
480
+ debug: Optional[bool] = False,
481
+ mlf: Optional[MLForecaster] = None,
482
+ ) -> pd.DataFrame:
336
483
  r"""Perform a forecast model predict using a previously trained skforecast model.
337
484
 
338
485
  :param input_data_dict: A dictionnary with multiple data used by the action functions
@@ -355,51 +502,77 @@ def forecast_model_predict(input_data_dict: dict, logger: logging.Logger,
355
502
  """
356
503
  # Load model
357
504
  model_type = input_data_dict['params']['passed_data']['model_type']
358
- root = input_data_dict['root']
359
505
  filename = model_type+'_mlf.pkl'
360
- filename_path = pathlib.Path(root) / filename
506
+ filename_path = input_data_dict['emhass_conf']['data_path'] / filename
361
507
  if not debug:
362
508
  if filename_path.is_file():
363
- with open(filename_path, 'rb') as inp:
509
+ with open(filename_path, "rb") as inp:
364
510
  mlf = pickle.load(inp)
365
511
  else:
366
- logger.error("The ML forecaster file was not found, please run a model fit method before this predict method")
512
+ logger.error(
513
+ "The ML forecaster file was not found, please run a model fit method before this predict method",
514
+ )
367
515
  return
368
516
  # Make predictions
369
517
  if use_last_window:
370
- data_last_window = copy.deepcopy(input_data_dict['df_input_data'])
518
+ data_last_window = copy.deepcopy(input_data_dict["df_input_data"])
371
519
  else:
372
520
  data_last_window = None
373
521
  predictions = mlf.predict(data_last_window)
374
522
  # Publish data to a Home Assistant sensor
375
- model_predict_publish = input_data_dict['params']['passed_data']['model_predict_publish']
376
- model_predict_entity_id = input_data_dict['params']['passed_data']['model_predict_entity_id']
377
- model_predict_unit_of_measurement = input_data_dict['params']['passed_data']['model_predict_unit_of_measurement']
378
- model_predict_friendly_name = input_data_dict['params']['passed_data']['model_predict_friendly_name']
379
- publish_prefix = input_data_dict['params']['passed_data']['publish_prefix']
523
+ model_predict_publish = input_data_dict["params"]["passed_data"][
524
+ "model_predict_publish"
525
+ ]
526
+ model_predict_entity_id = input_data_dict["params"]["passed_data"][
527
+ "model_predict_entity_id"
528
+ ]
529
+ model_predict_unit_of_measurement = input_data_dict["params"]["passed_data"][
530
+ "model_predict_unit_of_measurement"
531
+ ]
532
+ model_predict_friendly_name = input_data_dict["params"]["passed_data"][
533
+ "model_predict_friendly_name"
534
+ ]
535
+ publish_prefix = input_data_dict["params"]["passed_data"]["publish_prefix"]
380
536
  if model_predict_publish is True:
381
537
  # Estimate the current index
382
- now_precise = datetime.now(input_data_dict['retrieve_hass_conf']['time_zone']).replace(second=0, microsecond=0)
383
- if input_data_dict['retrieve_hass_conf']['method_ts_round'] == 'nearest':
384
- idx_closest = predictions.index.get_indexer([now_precise], method='nearest')[0]
385
- elif input_data_dict['retrieve_hass_conf']['method_ts_round'] == 'first':
386
- idx_closest = predictions.index.get_indexer([now_precise], method='ffill')[0]
387
- elif input_data_dict['retrieve_hass_conf']['method_ts_round'] == 'last':
388
- idx_closest = predictions.index.get_indexer([now_precise], method='bfill')[0]
538
+ now_precise = datetime.now(
539
+ input_data_dict["retrieve_hass_conf"]["time_zone"]
540
+ ).replace(second=0, microsecond=0)
541
+ if input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "nearest":
542
+ idx_closest = predictions.index.get_indexer(
543
+ [now_precise], method="nearest"
544
+ )[0]
545
+ elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "first":
546
+ idx_closest = predictions.index.get_indexer([now_precise], method="ffill")[
547
+ 0
548
+ ]
549
+ elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "last":
550
+ idx_closest = predictions.index.get_indexer([now_precise], method="bfill")[
551
+ 0
552
+ ]
389
553
  if idx_closest == -1:
390
- idx_closest = predictions.index.get_indexer([now_precise], method='nearest')[0]
554
+ idx_closest = predictions.index.get_indexer(
555
+ [now_precise], method="nearest"
556
+ )[0]
391
557
  # Publish Load forecast
392
- input_data_dict['rh'].post_data(predictions, idx_closest,
393
- model_predict_entity_id,
394
- model_predict_unit_of_measurement,
395
- model_predict_friendly_name,
396
- type_var = 'mlforecaster',
397
- publish_prefix=publish_prefix)
558
+ input_data_dict["rh"].post_data(
559
+ predictions,
560
+ idx_closest,
561
+ model_predict_entity_id,
562
+ model_predict_unit_of_measurement,
563
+ model_predict_friendly_name,
564
+ type_var="mlforecaster",
565
+ publish_prefix=publish_prefix,
566
+ )
398
567
  return predictions
399
568
 
400
- def forecast_model_tune(input_data_dict: dict, logger: logging.Logger,
401
- debug: Optional[bool] = False, mlf: Optional[MLForecaster] = None
402
- ) -> Tuple[pd.DataFrame, MLForecaster]:
569
+
570
+ def forecast_model_tune(
571
+ input_data_dict: dict,
572
+ logger: logging.Logger,
573
+ debug: Optional[bool] = False,
574
+ mlf: Optional[MLForecaster] = None,
575
+ ) -> Tuple[pd.DataFrame, MLForecaster]:
403
576
  """Tune a forecast model hyperparameters using bayesian optimization.
404
577
 
405
578
  :param input_data_dict: A dictionnary with multiple data used by the action functions
@@ -416,31 +589,160 @@ def forecast_model_tune(input_data_dict: dict, logger: logging.Logger,
416
589
  """
417
590
  # Load model
418
591
  model_type = input_data_dict['params']['passed_data']['model_type']
419
- root = input_data_dict['root']
420
592
  filename = model_type+'_mlf.pkl'
421
- filename_path = pathlib.Path(root) / filename
593
+ filename_path = input_data_dict['emhass_conf']['data_path'] / filename
422
594
  if not debug:
423
595
  if filename_path.is_file():
424
- with open(filename_path, 'rb') as inp:
596
+ with open(filename_path, "rb") as inp:
425
597
  mlf = pickle.load(inp)
426
598
  else:
427
- logger.error("The ML forecaster file was not found, please run a model fit method before this tune method")
599
+ logger.error(
600
+ "The ML forecaster file was not found, please run a model fit method before this tune method",
601
+ )
428
602
  return None, None
429
603
  # Tune the model
430
604
  df_pred_optim = mlf.tune(debug=debug)
431
605
  # Save model
432
606
  if not debug:
433
607
  filename = model_type+'_mlf.pkl'
434
- with open(pathlib.Path(root) / filename, 'wb') as outp:
435
- pickle.dump(mlf, outp, pickle.HIGHEST_PROTOCOL)
608
+ filename_path = input_data_dict['emhass_conf']['data_path'] / filename
609
+ with open(filename_path, 'wb') as outp:
610
+ pickle.dump(mlf, outp, pickle.HIGHEST_PROTOCOL)
436
611
  return df_pred_optim, mlf
437
612
 
438
- def publish_data(input_data_dict: dict, logger: logging.Logger,
439
- save_data_to_file: Optional[bool] = False,
440
- opt_res_latest: Optional[pd.DataFrame] = None) -> pd.DataFrame:
613
+
614
+ def regressor_model_fit(
615
+ input_data_dict: dict,
616
+ logger: logging.Logger,
617
+ debug: Optional[bool] = False,
618
+ ) -> None:
619
+ """Perform a forecast model fit from training data retrieved from Home Assistant.
620
+
621
+ :param input_data_dict: A dictionnary with multiple data used by the action functions
622
+ :type input_data_dict: dict
623
+ :param logger: The passed logger object
624
+ :type logger: logging.Logger
625
+ :param debug: True to debug, useful for unit testing, defaults to False
626
+ :type debug: Optional[bool], optional
441
627
  """
442
- Publish the data obtained from the optimization results.
628
+ data = copy.deepcopy(input_data_dict["df_input_data"])
629
+ if "model_type" in input_data_dict["params"]["passed_data"]:
630
+ model_type = input_data_dict["params"]["passed_data"]["model_type"]
631
+ else:
632
+ logger.error("parameter: 'model_type' not passed")
633
+ return False
634
+ if "regression_model" in input_data_dict["params"]["passed_data"]:
635
+ regression_model = input_data_dict["params"]["passed_data"]["regression_model"]
636
+ else:
637
+ logger.error("parameter: 'regression_model' not passed")
638
+ return False
639
+ if "features" in input_data_dict["params"]["passed_data"]:
640
+ features = input_data_dict["params"]["passed_data"]["features"]
641
+ else:
642
+ logger.error("parameter: 'features' not passed")
643
+ return False
644
+ if "target" in input_data_dict["params"]["passed_data"]:
645
+ target = input_data_dict["params"]["passed_data"]["target"]
646
+ else:
647
+ logger.error("parameter: 'target' not passed")
648
+ return False
649
+ if "timestamp" in input_data_dict["params"]["passed_data"]:
650
+ timestamp = input_data_dict["params"]["passed_data"]["timestamp"]
651
+ else:
652
+ logger.error("parameter: 'timestamp' not passed")
653
+ return False
654
+ if "date_features" in input_data_dict["params"]["passed_data"]:
655
+ date_features = input_data_dict["params"]["passed_data"]["date_features"]
656
+ else:
657
+ logger.error("parameter: 'date_features' not passed")
658
+ return False
659
+
660
+ # The MLRegressor object
661
+ mlr = MLRegressor(
662
+ data,
663
+ model_type,
664
+ regression_model,
665
+ features,
666
+ target,
667
+ timestamp,
668
+ logger,
669
+ )
670
+ # Fit the ML model
671
+ mlr.fit(date_features=date_features)
672
+ # Save model
673
+ if not debug:
674
+ filename = model_type + "_mlr.pkl"
675
+ filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
676
+ with open(filename_path, "wb") as outp:
677
+ pickle.dump(mlr, outp, pickle.HIGHEST_PROTOCOL)
678
+ return mlr
679
+
680
+
681
+ def regressor_model_predict(
682
+ input_data_dict: dict,
683
+ logger: logging.Logger,
684
+ debug: Optional[bool] = False,
685
+ mlr: Optional[MLRegressor] = None,
686
+ ) -> None:
687
+ """Perform a prediction from csv file.
688
+
689
+ :param input_data_dict: A dictionnary with multiple data used by the action functions
690
+ :type input_data_dict: dict
691
+ :param logger: The passed logger object
692
+ :type logger: logging.Logger
693
+ :param debug: True to debug, useful for unit testing, defaults to False
694
+ :type debug: Optional[bool], optional
695
+ """
696
+ if "model_type" in input_data_dict["params"]["passed_data"]:
697
+ model_type = input_data_dict["params"]["passed_data"]["model_type"]
698
+ else:
699
+ logger.error("parameter: 'model_type' not passed")
700
+ return False
701
+ filename = model_type + "_mlr.pkl"
702
+ filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
703
+ if not debug:
704
+ if filename_path.is_file():
705
+ with open(filename_path, "rb") as inp:
706
+ mlr = pickle.load(inp)
707
+ else:
708
+ logger.error(
709
+ "The ML forecaster file was not found, please run a model fit method before this predict method",
710
+ )
711
+ return False
712
+ if "new_values" in input_data_dict["params"]["passed_data"]:
713
+ new_values = input_data_dict["params"]["passed_data"]["new_values"]
714
+ else:
715
+ logger.error("parameter: 'new_values' not passed")
716
+ return False
717
+ # Predict from csv file
718
+ prediction = mlr.predict(new_values)
443
719
 
720
+ mlr_predict_entity_id = input_data_dict["params"]["passed_data"].get("mlr_predict_entity_id","sensor.mlr_predict")
721
+ mlr_predict_unit_of_measurement = input_data_dict["params"]["passed_data"].get("mlr_predict_unit_of_measurement","h")
722
+ mlr_predict_friendly_name = input_data_dict["params"]["passed_data"].get("mlr_predict_friendly_name","mlr predictor")
723
+ # Publish prediction
724
+ idx = 0
725
+ if not debug:
726
+ input_data_dict["rh"].post_data(
727
+ prediction,
728
+ idx,
729
+ mlr_predict_entity_id,
730
+ mlr_predict_unit_of_measurement,
731
+ mlr_predict_friendly_name,
732
+ type_var="mlregressor",
733
+ )
734
+ return prediction
735
+
736
+
737
+ def publish_data(
738
+ input_data_dict: dict,
739
+ logger: logging.Logger,
740
+ save_data_to_file: Optional[bool] = False,
741
+ opt_res_latest: Optional[pd.DataFrame] = None,
742
+ ) -> pd.DataFrame:
743
+ """
744
+ Publish the data obtained from the optimization results.
745
+
444
746
  :param input_data_dict: A dictionnary with multiple data used by the action functions
445
747
  :type input_data_dict: dict
446
748
  :param logger: The passed logger object
@@ -454,161 +756,212 @@ def publish_data(input_data_dict: dict, logger: logging.Logger,
454
756
  logger.info("Publishing data to HASS instance")
455
757
  # Check if a day ahead optimization has been performed (read CSV file)
456
758
  if save_data_to_file:
457
- today = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
458
- filename = 'opt_res_dayahead_'+today.strftime("%Y_%m_%d")+'.csv'
759
+ today = datetime.now(timezone.utc).replace(
760
+ hour=0, minute=0, second=0, microsecond=0
761
+ )
762
+ filename = "opt_res_dayahead_" + today.strftime("%Y_%m_%d") + ".csv"
459
763
  else:
460
- filename = 'opt_res_latest.csv'
764
+ filename = "opt_res_latest.csv"
461
765
  if opt_res_latest is None:
462
- if not os.path.isfile(pathlib.Path(input_data_dict['root']) / filename):
766
+ if not os.path.isfile(input_data_dict['emhass_conf']['data_path'] / filename):
463
767
  logger.error("File not found error, run an optimization task first.")
464
768
  return
465
769
  else:
466
- opt_res_latest = pd.read_csv(pathlib.Path(input_data_dict['root']) / filename, index_col='timestamp')
770
+ opt_res_latest = pd.read_csv(input_data_dict['emhass_conf']['data_path'] / filename, index_col='timestamp')
467
771
  opt_res_latest.index = pd.to_datetime(opt_res_latest.index)
468
- opt_res_latest.index.freq = input_data_dict['retrieve_hass_conf']['freq']
772
+ opt_res_latest.index.freq = input_data_dict["retrieve_hass_conf"]["freq"]
469
773
  # Estimate the current index
470
- now_precise = datetime.now(input_data_dict['retrieve_hass_conf']['time_zone']).replace(second=0, microsecond=0)
471
- if input_data_dict['retrieve_hass_conf']['method_ts_round'] == 'nearest':
472
- idx_closest = opt_res_latest.index.get_indexer([now_precise], method='nearest')[0]
473
- elif input_data_dict['retrieve_hass_conf']['method_ts_round'] == 'first':
474
- idx_closest = opt_res_latest.index.get_indexer([now_precise], method='ffill')[0]
475
- elif input_data_dict['retrieve_hass_conf']['method_ts_round'] == 'last':
476
- idx_closest = opt_res_latest.index.get_indexer([now_precise], method='bfill')[0]
774
+ now_precise = datetime.now(
775
+ input_data_dict["retrieve_hass_conf"]["time_zone"]
776
+ ).replace(second=0, microsecond=0)
777
+ if input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "nearest":
778
+ idx_closest = opt_res_latest.index.get_indexer([now_precise], method="nearest")[
779
+ 0
780
+ ]
781
+ elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "first":
782
+ idx_closest = opt_res_latest.index.get_indexer([now_precise], method="ffill")[0]
783
+ elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "last":
784
+ idx_closest = opt_res_latest.index.get_indexer([now_precise], method="bfill")[0]
477
785
  if idx_closest == -1:
478
- idx_closest = opt_res_latest.index.get_indexer([now_precise], method='nearest')[0]
786
+ idx_closest = opt_res_latest.index.get_indexer([now_precise], method="nearest")[
787
+ 0
788
+ ]
479
789
  # Publish the data
480
- params = json.loads(input_data_dict['params'])
481
- publish_prefix = params['passed_data']['publish_prefix']
790
+ params = json.loads(input_data_dict["params"])
791
+ publish_prefix = params["passed_data"]["publish_prefix"]
482
792
  # Publish PV forecast
483
- custom_pv_forecast_id = params['passed_data']['custom_pv_forecast_id']
484
- input_data_dict['rh'].post_data(opt_res_latest['P_PV'], idx_closest,
485
- custom_pv_forecast_id["entity_id"],
486
- custom_pv_forecast_id["unit_of_measurement"],
487
- custom_pv_forecast_id["friendly_name"],
488
- type_var = 'power',
489
- publish_prefix = publish_prefix)
793
+ custom_pv_forecast_id = params["passed_data"]["custom_pv_forecast_id"]
794
+ input_data_dict["rh"].post_data(
795
+ opt_res_latest["P_PV"],
796
+ idx_closest,
797
+ custom_pv_forecast_id["entity_id"],
798
+ custom_pv_forecast_id["unit_of_measurement"],
799
+ custom_pv_forecast_id["friendly_name"],
800
+ type_var="power",
801
+ publish_prefix=publish_prefix,
802
+ )
490
803
  # Publish Load forecast
491
- custom_load_forecast_id = params['passed_data']['custom_load_forecast_id']
492
- input_data_dict['rh'].post_data(opt_res_latest['P_Load'], idx_closest,
493
- custom_load_forecast_id["entity_id"],
494
- custom_load_forecast_id["unit_of_measurement"],
495
- custom_load_forecast_id["friendly_name"],
496
- type_var = 'power',
497
- publish_prefix = publish_prefix)
498
- cols_published = ['P_PV', 'P_Load']
804
+ custom_load_forecast_id = params["passed_data"]["custom_load_forecast_id"]
805
+ input_data_dict["rh"].post_data(
806
+ opt_res_latest["P_Load"],
807
+ idx_closest,
808
+ custom_load_forecast_id["entity_id"],
809
+ custom_load_forecast_id["unit_of_measurement"],
810
+ custom_load_forecast_id["friendly_name"],
811
+ type_var="power",
812
+ publish_prefix=publish_prefix,
813
+ )
814
+ cols_published = ["P_PV", "P_Load"]
499
815
  # Publish deferrable loads
500
- custom_deferrable_forecast_id = params['passed_data']['custom_deferrable_forecast_id']
501
- for k in range(input_data_dict['opt'].optim_conf['num_def_loads']):
816
+ custom_deferrable_forecast_id = params["passed_data"][
817
+ "custom_deferrable_forecast_id"
818
+ ]
819
+ for k in range(input_data_dict["opt"].optim_conf["num_def_loads"]):
502
820
  if "P_deferrable{}".format(k) not in opt_res_latest.columns:
503
- logger.error("P_deferrable{}".format(k)+" was not found in results DataFrame. Optimization task may need to be relaunched or it did not converge to a solution.")
821
+ logger.error(
822
+ "P_deferrable{}".format(k)
823
+ + " was not found in results DataFrame. Optimization task may need to be relaunched or it did not converge to a solution.",
824
+ )
504
825
  else:
505
- input_data_dict['rh'].post_data(opt_res_latest["P_deferrable{}".format(k)], idx_closest,
506
- custom_deferrable_forecast_id[k]["entity_id"],
507
- custom_deferrable_forecast_id[k]["unit_of_measurement"],
508
- custom_deferrable_forecast_id[k]["friendly_name"],
509
- type_var = 'deferrable',
510
- publish_prefix = publish_prefix)
511
- cols_published = cols_published+["P_deferrable{}".format(k)]
826
+ input_data_dict["rh"].post_data(
827
+ opt_res_latest["P_deferrable{}".format(k)],
828
+ idx_closest,
829
+ custom_deferrable_forecast_id[k]["entity_id"],
830
+ custom_deferrable_forecast_id[k]["unit_of_measurement"],
831
+ custom_deferrable_forecast_id[k]["friendly_name"],
832
+ type_var="deferrable",
833
+ publish_prefix=publish_prefix,
834
+ )
835
+ cols_published = cols_published + ["P_deferrable{}".format(k)]
512
836
  # Publish battery power
513
- if input_data_dict['opt'].optim_conf['set_use_battery']:
514
- if 'P_batt' not in opt_res_latest.columns:
515
- logger.error("P_batt was not found in results DataFrame. Optimization task may need to be relaunched or it did not converge to a solution.")
837
+ if input_data_dict["opt"].optim_conf["set_use_battery"]:
838
+ if "P_batt" not in opt_res_latest.columns:
839
+ logger.error(
840
+ "P_batt was not found in results DataFrame. Optimization task may need to be relaunched or it did not converge to a solution.",
841
+ )
516
842
  else:
517
- custom_batt_forecast_id = params['passed_data']['custom_batt_forecast_id']
518
- input_data_dict['rh'].post_data(opt_res_latest['P_batt'], idx_closest,
519
- custom_batt_forecast_id["entity_id"],
520
- custom_batt_forecast_id["unit_of_measurement"],
521
- custom_batt_forecast_id["friendly_name"],
522
- type_var = 'batt',
523
- publish_prefix = publish_prefix)
524
- cols_published = cols_published+["P_batt"]
525
- custom_batt_soc_forecast_id = params['passed_data']['custom_batt_soc_forecast_id']
526
- input_data_dict['rh'].post_data(opt_res_latest['SOC_opt']*100, idx_closest,
527
- custom_batt_soc_forecast_id["entity_id"],
528
- custom_batt_soc_forecast_id["unit_of_measurement"],
529
- custom_batt_soc_forecast_id["friendly_name"],
530
- type_var = 'SOC',
531
- publish_prefix = publish_prefix)
532
- cols_published = cols_published+["SOC_opt"]
843
+ custom_batt_forecast_id = params["passed_data"]["custom_batt_forecast_id"]
844
+ input_data_dict["rh"].post_data(
845
+ opt_res_latest["P_batt"],
846
+ idx_closest,
847
+ custom_batt_forecast_id["entity_id"],
848
+ custom_batt_forecast_id["unit_of_measurement"],
849
+ custom_batt_forecast_id["friendly_name"],
850
+ type_var="batt",
851
+ publish_prefix=publish_prefix,
852
+ )
853
+ cols_published = cols_published + ["P_batt"]
854
+ custom_batt_soc_forecast_id = params["passed_data"][
855
+ "custom_batt_soc_forecast_id"
856
+ ]
857
+ input_data_dict["rh"].post_data(
858
+ opt_res_latest["SOC_opt"] * 100,
859
+ idx_closest,
860
+ custom_batt_soc_forecast_id["entity_id"],
861
+ custom_batt_soc_forecast_id["unit_of_measurement"],
862
+ custom_batt_soc_forecast_id["friendly_name"],
863
+ type_var="SOC",
864
+ publish_prefix=publish_prefix,
865
+ )
866
+ cols_published = cols_published + ["SOC_opt"]
533
867
  # Publish grid power
534
- custom_grid_forecast_id = params['passed_data']['custom_grid_forecast_id']
535
- input_data_dict['rh'].post_data(opt_res_latest['P_grid'], idx_closest,
536
- custom_grid_forecast_id["entity_id"],
537
- custom_grid_forecast_id["unit_of_measurement"],
538
- custom_grid_forecast_id["friendly_name"],
539
- type_var = 'power',
540
- publish_prefix = publish_prefix)
541
- cols_published = cols_published+["P_grid"]
868
+ custom_grid_forecast_id = params["passed_data"]["custom_grid_forecast_id"]
869
+ input_data_dict["rh"].post_data(
870
+ opt_res_latest["P_grid"],
871
+ idx_closest,
872
+ custom_grid_forecast_id["entity_id"],
873
+ custom_grid_forecast_id["unit_of_measurement"],
874
+ custom_grid_forecast_id["friendly_name"],
875
+ type_var="power",
876
+ publish_prefix=publish_prefix,
877
+ )
878
+ cols_published = cols_published + ["P_grid"]
542
879
  # Publish total value of cost function
543
- custom_cost_fun_id = params['passed_data']['custom_cost_fun_id']
544
- col_cost_fun = [i for i in opt_res_latest.columns if 'cost_fun_' in i]
545
- input_data_dict['rh'].post_data(opt_res_latest[col_cost_fun], idx_closest,
546
- custom_cost_fun_id["entity_id"],
547
- custom_cost_fun_id["unit_of_measurement"],
548
- custom_cost_fun_id["friendly_name"],
549
- type_var = 'cost_fun',
550
- publish_prefix = publish_prefix)
880
+ custom_cost_fun_id = params["passed_data"]["custom_cost_fun_id"]
881
+ col_cost_fun = [i for i in opt_res_latest.columns if "cost_fun_" in i]
882
+ input_data_dict["rh"].post_data(
883
+ opt_res_latest[col_cost_fun],
884
+ idx_closest,
885
+ custom_cost_fun_id["entity_id"],
886
+ custom_cost_fun_id["unit_of_measurement"],
887
+ custom_cost_fun_id["friendly_name"],
888
+ type_var="cost_fun",
889
+ publish_prefix=publish_prefix,
890
+ )
551
891
  # Publish the optimization status
552
- custom_cost_fun_id = params['passed_data']['custom_optim_status_id']
892
+ custom_cost_fun_id = params["passed_data"]["custom_optim_status_id"]
553
893
  if "optim_status" not in opt_res_latest:
554
- opt_res_latest["optim_status"] = 'Optimal'
555
- logger.warning("no optim_status in opt_res_latest, run an optimization task first")
556
- input_data_dict['rh'].post_data(opt_res_latest['optim_status'], idx_closest,
557
- custom_cost_fun_id["entity_id"],
558
- custom_cost_fun_id["unit_of_measurement"],
559
- custom_cost_fun_id["friendly_name"],
560
- type_var = 'optim_status',
561
- publish_prefix = publish_prefix)
562
- cols_published = cols_published+["optim_status"]
894
+ opt_res_latest["optim_status"] = "Optimal"
895
+ logger.warning(
896
+ "no optim_status in opt_res_latest, run an optimization task first",
897
+ )
898
+ input_data_dict["rh"].post_data(
899
+ opt_res_latest["optim_status"],
900
+ idx_closest,
901
+ custom_cost_fun_id["entity_id"],
902
+ custom_cost_fun_id["unit_of_measurement"],
903
+ custom_cost_fun_id["friendly_name"],
904
+ type_var="optim_status",
905
+ publish_prefix=publish_prefix,
906
+ )
907
+ cols_published = cols_published + ["optim_status"]
563
908
  # Publish unit_load_cost
564
- custom_unit_load_cost_id = params['passed_data']['custom_unit_load_cost_id']
565
- input_data_dict['rh'].post_data(opt_res_latest['unit_load_cost'], idx_closest,
566
- custom_unit_load_cost_id["entity_id"],
567
- custom_unit_load_cost_id["unit_of_measurement"],
568
- custom_unit_load_cost_id["friendly_name"],
569
- type_var = 'unit_load_cost',
570
- publish_prefix = publish_prefix)
571
- cols_published = cols_published+["unit_load_cost"]
909
+ custom_unit_load_cost_id = params["passed_data"]["custom_unit_load_cost_id"]
910
+ input_data_dict["rh"].post_data(
911
+ opt_res_latest["unit_load_cost"],
912
+ idx_closest,
913
+ custom_unit_load_cost_id["entity_id"],
914
+ custom_unit_load_cost_id["unit_of_measurement"],
915
+ custom_unit_load_cost_id["friendly_name"],
916
+ type_var="unit_load_cost",
917
+ publish_prefix=publish_prefix,
918
+ )
919
+ cols_published = cols_published + ["unit_load_cost"]
572
920
  # Publish unit_prod_price
573
- custom_unit_prod_price_id = params['passed_data']['custom_unit_prod_price_id']
574
- input_data_dict['rh'].post_data(opt_res_latest['unit_prod_price'], idx_closest,
575
- custom_unit_prod_price_id["entity_id"],
576
- custom_unit_prod_price_id["unit_of_measurement"],
577
- custom_unit_prod_price_id["friendly_name"],
578
- type_var = 'unit_prod_price',
579
- publish_prefix = publish_prefix)
580
- cols_published = cols_published+["unit_prod_price"]
921
+ custom_unit_prod_price_id = params["passed_data"]["custom_unit_prod_price_id"]
922
+ input_data_dict["rh"].post_data(
923
+ opt_res_latest["unit_prod_price"],
924
+ idx_closest,
925
+ custom_unit_prod_price_id["entity_id"],
926
+ custom_unit_prod_price_id["unit_of_measurement"],
927
+ custom_unit_prod_price_id["friendly_name"],
928
+ type_var="unit_prod_price",
929
+ publish_prefix=publish_prefix,
930
+ )
931
+ cols_published = cols_published + ["unit_prod_price"]
581
932
  # Create a DF resuming what has been published
582
933
  opt_res = opt_res_latest[cols_published].loc[[opt_res_latest.index[idx_closest]]]
583
934
  return opt_res
584
-
585
-
935
+
936
+
586
937
  def main():
587
938
  r"""Define the main command line entry function.
588
939
 
589
940
  This function may take several arguments as inputs. You can type `emhass --help` to see the list of options:
590
-
941
+
591
942
  - action: Set the desired action, options are: perfect-optim, dayahead-optim,
592
943
  naive-mpc-optim, publish-data, forecast-model-fit, forecast-model-predict, forecast-model-tune
593
-
944
+
594
945
  - config: Define path to the config.yaml file
595
-
946
+
596
947
  - costfun: Define the type of cost function, options are: profit, cost, self-consumption
597
-
948
+
598
949
  - log2file: Define if we should log to a file or not
599
-
950
+
600
951
  - params: Configuration parameters passed from data/options.json if using the add-on
601
-
952
+
602
953
  - runtimeparams: Pass runtime optimization parameters as dictionnary
603
-
954
+
604
955
  - debug: Use True for testing purposes
605
-
956
+
606
957
  """
607
958
  # Parsing arguments
608
959
  parser = argparse.ArgumentParser()
609
960
  parser.add_argument('--action', type=str, help='Set the desired action, options are: perfect-optim, dayahead-optim,\
610
961
  naive-mpc-optim, publish-data, forecast-model-fit, forecast-model-predict, forecast-model-tune')
611
962
  parser.add_argument('--config', type=str, help='Define path to the config.yaml file')
963
+ parser.add_argument('--data', type=str, help='Define path to the Data files (.csv & .pkl)')
964
+ parser.add_argument('--root', type=str, help='Define path emhass root')
612
965
  parser.add_argument('--costfun', type=str, default='profit', help='Define the type of cost function, options are: profit, cost, self-consumption')
613
966
  parser.add_argument('--log2file', type=strtobool, default='False', help='Define if we should log to a file or not')
614
967
  parser.add_argument('--params', type=str, default=None, help='Configuration parameters passed from data/options.json')
@@ -616,62 +969,140 @@ def main():
616
969
  parser.add_argument('--debug', type=strtobool, default='False', help='Use True for testing purposes')
617
970
  args = parser.parse_args()
618
971
  # The path to the configuration files
619
- config_path = pathlib.Path(args.config)
620
- base_path = str(config_path.parent)
972
+
973
+ if args.config is not None:
974
+ config_path = pathlib.Path(args.config)
975
+ else:
976
+ config_path = pathlib.Path(str(utils.get_root(__file__, num_parent=2) / 'config_emhass.yaml' ))
977
+
978
+ if args.data is not None:
979
+ data_path = pathlib.Path(args.data)
980
+ else:
981
+ data_path = (config_path.parent / 'data/')
982
+
983
+ if args.root is not None:
984
+ root_path = pathlib.Path(args.root)
985
+ else:
986
+ root_path = config_path.parent
987
+
988
+ emhass_conf = {}
989
+ emhass_conf['config_path'] = config_path
990
+ emhass_conf['data_path'] = data_path
991
+ emhass_conf['root_path'] = root_path
621
992
  # create logger
622
- logger, ch = utils.get_logger(__name__, base_path, save_to_file=bool(args.log2file))
993
+ logger, ch = utils.get_logger(__name__, emhass_conf, save_to_file=bool(args.log2file))
994
+
995
+ logger.debug("config path: " + str(config_path))
996
+ logger.debug("data path: " + str(data_path))
997
+ logger.debug("root path: " + str(root_path))
998
+
999
+
1000
+ if not config_path.exists():
1001
+ logger.error("Could not find config_emhass.yaml file in: " + str(config_path))
1002
+ logger.error("Try setting config file path with --config" )
1003
+ return False
1004
+
1005
+ if not os.path.isdir(data_path):
1006
+ logger.error("Could not find data foulder in: " + str(data_path))
1007
+ logger.error("Try setting data path with --data" )
1008
+ return False
1009
+
1010
+ if not os.path.isdir(root_path / 'src'):
1011
+ logger.error("Could not find emhass/src foulder in: " + str(root_path))
1012
+ logger.error("Try setting emhass root path with --root" )
1013
+ return False
1014
+
623
1015
  # Additionnal argument
624
1016
  try:
625
- parser.add_argument('--version', action='version', version='%(prog)s '+version('emhass'))
1017
+ parser.add_argument(
1018
+ "--version",
1019
+ action="version",
1020
+ version="%(prog)s " + version("emhass"),
1021
+ )
626
1022
  args = parser.parse_args()
627
1023
  except Exception:
628
- logger.info("Version not found for emhass package. Or importlib exited with PackageNotFoundError.")
1024
+ logger.info(
1025
+ "Version not found for emhass package. Or importlib exited with PackageNotFoundError.",
1026
+ )
629
1027
  # Setup parameters
630
- input_data_dict = set_input_data_dict(config_path, base_path,
1028
+ input_data_dict = set_input_data_dict(emhass_conf,
631
1029
  args.costfun, args.params, args.runtimeparams, args.action,
632
1030
  logger, args.debug)
633
1031
  # Perform selected action
634
- if args.action == 'perfect-optim':
1032
+ if args.action == "perfect-optim":
635
1033
  opt_res = perfect_forecast_optim(input_data_dict, logger, debug=args.debug)
636
- elif args.action == 'dayahead-optim':
1034
+ elif args.action == "dayahead-optim":
637
1035
  opt_res = dayahead_forecast_optim(input_data_dict, logger, debug=args.debug)
638
- elif args.action == 'naive-mpc-optim':
1036
+ elif args.action == "naive-mpc-optim":
639
1037
  opt_res = naive_mpc_optim(input_data_dict, logger, debug=args.debug)
640
- elif args.action == 'forecast-model-fit':
641
- df_fit_pred, df_fit_pred_backtest, mlf = forecast_model_fit(input_data_dict, logger, debug=args.debug)
1038
+ elif args.action == "forecast-model-fit":
1039
+ df_fit_pred, df_fit_pred_backtest, mlf = forecast_model_fit(
1040
+ input_data_dict, logger, debug=args.debug
1041
+ )
642
1042
  opt_res = None
643
- elif args.action == 'forecast-model-predict':
1043
+ elif args.action == "forecast-model-predict":
644
1044
  if args.debug:
645
1045
  _, _, mlf = forecast_model_fit(input_data_dict, logger, debug=args.debug)
646
1046
  else:
647
1047
  mlf = None
648
- df_pred = forecast_model_predict(input_data_dict, logger, debug=args.debug, mlf=mlf)
1048
+ df_pred = forecast_model_predict(
1049
+ input_data_dict, logger, debug=args.debug, mlf=mlf
1050
+ )
649
1051
  opt_res = None
650
- elif args.action == 'forecast-model-tune':
1052
+ elif args.action == "forecast-model-tune":
651
1053
  if args.debug:
652
1054
  _, _, mlf = forecast_model_fit(input_data_dict, logger, debug=args.debug)
653
1055
  else:
654
1056
  mlf = None
655
- df_pred_optim, mlf = forecast_model_tune(input_data_dict, logger, debug=args.debug, mlf=mlf)
1057
+ df_pred_optim, mlf = forecast_model_tune(
1058
+ input_data_dict, logger, debug=args.debug, mlf=mlf
1059
+ )
1060
+ opt_res = None
1061
+ elif args.action == "regressor-model-fit":
1062
+ mlr = regressor_model_fit(input_data_dict, logger, debug=args.debug)
656
1063
  opt_res = None
657
- elif args.action == 'publish-data':
1064
+ elif args.action == "regressor-model-predict":
1065
+ if args.debug:
1066
+ mlr = regressor_model_fit(input_data_dict, logger, debug=args.debug)
1067
+ else:
1068
+ mlr = None
1069
+ prediction = regressor_model_predict(
1070
+ input_data_dict,
1071
+ logger,
1072
+ debug=args.debug,
1073
+ mlr=mlr,
1074
+ )
1075
+ opt_res = None
1076
+ elif args.action == "publish-data":
658
1077
  opt_res = publish_data(input_data_dict, logger)
659
1078
  else:
660
1079
  logger.error("The passed action argument is not valid")
1080
+ logger.error("Try setting --action: perfect-optim, dayahead-optim, naive-mpc-optim, forecast-model-fit, forecast-model-predict, forecast-model-tune or publish-data")
661
1081
  opt_res = None
662
1082
  logger.info(opt_res)
663
1083
  # Flush the logger
664
1084
  ch.close()
665
1085
  logger.removeHandler(ch)
666
- if args.action == 'perfect-optim' or args.action == 'dayahead-optim' or \
667
- args.action == 'naive-mpc-optim' or args.action == 'publish-data':
1086
+ if (
1087
+ args.action == "perfect-optim"
1088
+ or args.action == "dayahead-optim"
1089
+ or args.action == "naive-mpc-optim"
1090
+ or args.action == "publish-data"
1091
+ ):
668
1092
  return opt_res
669
- elif args.action == 'forecast-model-fit':
1093
+ elif args.action == "forecast-model-fit":
670
1094
  return df_fit_pred, df_fit_pred_backtest, mlf
671
- elif args.action == 'forecast-model-predict':
1095
+ elif args.action == "forecast-model-predict":
672
1096
  return df_pred
673
- elif args.action == 'forecast-model-tune':
1097
+ elif args.action == "regressor-model-fit":
1098
+ return mlr
1099
+ elif args.action == "regressor-model-predict":
1100
+ return prediction
1101
+ elif args.action == "forecast-model-tune":
674
1102
  return df_pred_optim, mlf
1103
+ else:
1104
+ return opt_res
1105
+
675
1106
 
676
- if __name__ == '__main__':
1107
+ if __name__ == "__main__":
677
1108
  main()