emhass 0.8.6__py3-none-any.whl → 0.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
emhass/command_line.py CHANGED
@@ -8,36 +8,35 @@ import logging
8
8
  import json
9
9
  import copy
10
10
  import pickle
11
- import time
12
- import numpy as np
13
- import pandas as pd
14
11
  from datetime import datetime, timezone
15
12
  from typing import Optional, Tuple
13
+ from importlib.metadata import version
14
+ import numpy as np
15
+ import pandas as pd
16
+
16
17
  from distutils.util import strtobool
17
18
 
18
- from importlib.metadata import version
19
19
  from emhass.retrieve_hass import RetrieveHass
20
20
  from emhass.forecast import Forecast
21
21
  from emhass.machine_learning_forecaster import MLForecaster
22
22
  from emhass.optimization import Optimization
23
+ from emhass.machine_learning_regressor import MLRegressor
23
24
  from emhass import utils
24
25
 
25
26
 
26
- def set_input_data_dict(config_path: pathlib.Path, base_path: str, costfun: str,
27
- params: str, runtimeparams: str, set_type: str, logger: logging.Logger,
28
- get_data_from_file: Optional[bool] = False) -> dict:
27
+ def set_input_data_dict(emhass_conf: dict, costfun: str,
28
+ params: str, runtimeparams: str, set_type: str, logger: logging.Logger,
29
+ get_data_from_file: Optional[bool] = False) -> dict:
29
30
  """
30
31
  Set up some of the data needed for the different actions.
31
-
32
- :param config_path: The complete absolute path where the config.yaml file is located
33
- :type config_path: pathlib.Path
34
- :param base_path: The parent folder of the config_path
35
- :type base_path: str
32
+
33
+ :param emhass_conf: Dictionary containing the needed emhass paths
34
+ :type emhass_conf: dict
36
35
  :param costfun: The type of cost function to use for optimization problem
37
36
  :type costfun: str
38
37
  :param params: Configuration parameters passed from data/options.json
39
38
  :type params: str
40
- :param runtimeparams: Runtime optimization parameters passed as a dictionnary
39
+ :param runtimeparams: Runtime optimization parameters passed as a dictionary
41
40
  :type runtimeparams: str
42
41
  :param set_type: Set the type of setup based on following type of optimization
43
42
  :type set_type: str
@@ -52,99 +51,125 @@ def set_input_data_dict(config_path: pathlib.Path, base_path: str, costfun: str,
52
51
  logger.info("Setting up needed data")
53
52
  # Parsing yaml
54
53
  retrieve_hass_conf, optim_conf, plant_conf = utils.get_yaml_parse(
55
- config_path, use_secrets=not(get_data_from_file), params=params)
54
+ emhass_conf, use_secrets=not (get_data_from_file), params=params)
56
55
  # Treat runtimeparams
57
56
  params, retrieve_hass_conf, optim_conf, plant_conf = utils.treat_runtimeparams(
58
- runtimeparams, params, retrieve_hass_conf,
59
- optim_conf, plant_conf, set_type, logger)
57
+ runtimeparams, params, retrieve_hass_conf, optim_conf, plant_conf, set_type, logger)
60
58
  # Define main objects
61
- rh = RetrieveHass(retrieve_hass_conf['hass_url'], retrieve_hass_conf['long_lived_token'],
62
- retrieve_hass_conf['freq'], retrieve_hass_conf['time_zone'],
63
- params, base_path, logger, get_data_from_file=get_data_from_file)
59
+ rh = RetrieveHass(retrieve_hass_conf['hass_url'], retrieve_hass_conf['long_lived_token'],
60
+ retrieve_hass_conf['freq'], retrieve_hass_conf['time_zone'],
61
+ params, emhass_conf, logger, get_data_from_file=get_data_from_file)
64
62
  fcst = Forecast(retrieve_hass_conf, optim_conf, plant_conf,
65
- params, base_path, logger, get_data_from_file=get_data_from_file)
66
- opt = Optimization(retrieve_hass_conf, optim_conf, plant_conf,
67
- fcst.var_load_cost, fcst.var_prod_price,
68
- costfun, base_path, logger)
63
+ params, emhass_conf, logger, get_data_from_file=get_data_from_file)
64
+ opt = Optimization(retrieve_hass_conf, optim_conf, plant_conf,
65
+ fcst.var_load_cost, fcst.var_prod_price,
66
+ costfun, emhass_conf, logger)
69
67
  # Perform setup based on type of action
70
68
  if set_type == "perfect-optim":
71
69
  # Retrieve data from hass
72
70
  if get_data_from_file:
73
- with open(pathlib.Path(base_path) / 'data' / 'test_df_final.pkl', 'rb') as inp:
71
+ with open(emhass_conf['data_path'] / 'test_df_final.pkl', 'rb') as inp:
74
72
  rh.df_final, days_list, var_list = pickle.load(inp)
73
+ retrieve_hass_conf['var_load'] = str(var_list[0])
74
+ retrieve_hass_conf['var_PV'] = str(var_list[1])
75
+ retrieve_hass_conf['var_interp'] = [
76
+ retrieve_hass_conf['var_PV'], retrieve_hass_conf['var_load']]
77
+ retrieve_hass_conf['var_replace_zero'] = [
78
+ retrieve_hass_conf['var_PV']]
75
79
  else:
76
- days_list = utils.get_days_list(retrieve_hass_conf['days_to_retrieve'])
77
- var_list = [retrieve_hass_conf['var_load'], retrieve_hass_conf['var_PV']]
78
- if not rh.get_data(days_list, var_list,
79
- minimal_response=False, significant_changes_only=False):
80
- return False
81
- if not rh.prepare_data(retrieve_hass_conf['var_load'], load_negative = retrieve_hass_conf['load_negative'],
82
- set_zero_min = retrieve_hass_conf['set_zero_min'],
83
- var_replace_zero = retrieve_hass_conf['var_replace_zero'],
84
- var_interp = retrieve_hass_conf['var_interp']):
80
+ days_list = utils.get_days_list(
81
+ retrieve_hass_conf["days_to_retrieve"])
82
+ var_list = [retrieve_hass_conf["var_load"],
83
+ retrieve_hass_conf["var_PV"]]
84
+ if not rh.get_data(days_list, var_list, minimal_response=False, significant_changes_only=False):
85
+ return False
86
+ if not rh.prepare_data(retrieve_hass_conf["var_load"],
87
+ load_negative=retrieve_hass_conf["load_negative"],
88
+ set_zero_min=retrieve_hass_conf["set_zero_min"],
89
+ var_replace_zero=retrieve_hass_conf["var_replace_zero"],
90
+ var_interp=retrieve_hass_conf["var_interp"]):
85
91
  return False
86
92
  df_input_data = rh.df_final.copy()
87
93
  # What we don't need for this type of action
88
94
  P_PV_forecast, P_load_forecast, df_input_data_dayahead = None, None, None
89
95
  elif set_type == "dayahead-optim":
90
96
  # Get PV and load forecasts
91
- df_weather = fcst.get_weather_forecast(method=optim_conf['weather_forecast_method'])
97
+ df_weather = fcst.get_weather_forecast(
98
+ method=optim_conf["weather_forecast_method"])
92
99
  P_PV_forecast = fcst.get_power_from_weather(df_weather)
93
- P_load_forecast = fcst.get_load_forecast(method=optim_conf['load_forecast_method'])
94
- if isinstance(P_load_forecast,bool) and not P_load_forecast:
95
- logger.error("Unable to get sensor power photovoltaics, or sensor power load no var loads. Check HA sensors and their daily data")
100
+ P_load_forecast = fcst.get_load_forecast(
101
+ method=optim_conf['load_forecast_method'])
102
+ if isinstance(P_load_forecast, bool) and not P_load_forecast:
103
+ logger.error(
104
+ "Unable to get sensor power photovoltaics, or sensor power load no var loads. Check HA sensors and their daily data")
96
105
  return False
97
- df_input_data_dayahead = pd.DataFrame(np.transpose(np.vstack([P_PV_forecast.values,P_load_forecast.values])),
98
- index=P_PV_forecast.index,
99
- columns=['P_PV_forecast', 'P_load_forecast'])
106
+ df_input_data_dayahead = pd.DataFrame(np.transpose(np.vstack(
107
+ [P_PV_forecast.values, P_load_forecast.values])), index=P_PV_forecast.index,
108
+ columns=["P_PV_forecast", "P_load_forecast"])
100
109
  df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead)
101
110
  params = json.loads(params)
102
- if 'prediction_horizon' in params['passed_data'] and params['passed_data']['prediction_horizon'] is not None:
103
- prediction_horizon = params['passed_data']['prediction_horizon']
104
- df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[df_input_data_dayahead.index[0]:df_input_data_dayahead.index[prediction_horizon-1]]
111
+ if ("prediction_horizon" in params["passed_data"] and params["passed_data"]["prediction_horizon"] is not None):
112
+ prediction_horizon = params["passed_data"]["prediction_horizon"]
113
+ df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[
114
+ df_input_data_dayahead.index[0]: df_input_data_dayahead.index[prediction_horizon - 1]]
105
115
  # What we don't need for this type of action
106
116
  df_input_data, days_list = None, None
107
117
  elif set_type == "naive-mpc-optim":
108
118
  # Retrieve data from hass
109
119
  if get_data_from_file:
110
- with open(pathlib.Path(base_path) / 'data' / 'test_df_final.pkl', 'rb') as inp:
120
+ with open(emhass_conf['data_path'] / 'test_df_final.pkl', 'rb') as inp:
111
121
  rh.df_final, days_list, var_list = pickle.load(inp)
122
+ retrieve_hass_conf['var_load'] = str(var_list[0])
123
+ retrieve_hass_conf['var_PV'] = str(var_list[1])
124
+ retrieve_hass_conf['var_interp'] = [
125
+ retrieve_hass_conf['var_PV'], retrieve_hass_conf['var_load']]
126
+ retrieve_hass_conf['var_replace_zero'] = [
127
+ retrieve_hass_conf['var_PV']]
112
128
  else:
113
129
  days_list = utils.get_days_list(1)
114
- var_list = [retrieve_hass_conf['var_load'], retrieve_hass_conf['var_PV']]
115
- if not rh.get_data(days_list, var_list,
116
- minimal_response=False, significant_changes_only=False):
130
+ var_list = [retrieve_hass_conf["var_load"],
131
+ retrieve_hass_conf["var_PV"]]
132
+ if not rh.get_data(days_list, var_list, minimal_response=False, significant_changes_only=False):
117
133
  return False
118
- if not rh.prepare_data(retrieve_hass_conf['var_load'], load_negative = retrieve_hass_conf['load_negative'],
119
- set_zero_min = retrieve_hass_conf['set_zero_min'],
120
- var_replace_zero = retrieve_hass_conf['var_replace_zero'],
121
- var_interp = retrieve_hass_conf['var_interp']):
134
+ if not rh.prepare_data(retrieve_hass_conf["var_load"],
135
+ load_negative=retrieve_hass_conf["load_negative"],
136
+ set_zero_min=retrieve_hass_conf["set_zero_min"],
137
+ var_replace_zero=retrieve_hass_conf["var_replace_zero"],
138
+ var_interp=retrieve_hass_conf["var_interp"]):
122
139
  return False
123
140
  df_input_data = rh.df_final.copy()
124
141
  # Get PV and load forecasts
125
- df_weather = fcst.get_weather_forecast(method=optim_conf['weather_forecast_method'])
126
- P_PV_forecast = fcst.get_power_from_weather(df_weather, set_mix_forecast=True, df_now=df_input_data)
127
- P_load_forecast = fcst.get_load_forecast(method=optim_conf['load_forecast_method'], set_mix_forecast=True, df_now=df_input_data)
142
+ df_weather = fcst.get_weather_forecast(
143
+ method=optim_conf['weather_forecast_method'])
144
+ P_PV_forecast = fcst.get_power_from_weather(
145
+ df_weather, set_mix_forecast=True, df_now=df_input_data)
146
+ P_load_forecast = fcst.get_load_forecast(
147
+ method=optim_conf['load_forecast_method'], set_mix_forecast=True, df_now=df_input_data)
148
+ if isinstance(P_load_forecast, bool) and not P_load_forecast:
149
+ logger.error(
150
+ "Unable to get sensor power photovoltaics, or sensor power load no var loads. Check HA sensors and their daily data")
151
+ return False
128
152
  df_input_data_dayahead = pd.concat([P_PV_forecast, P_load_forecast], axis=1)
129
153
  df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead)
130
- df_input_data_dayahead.columns = ['P_PV_forecast', 'P_load_forecast']
154
+ df_input_data_dayahead.columns = ["P_PV_forecast", "P_load_forecast"]
131
155
  params = json.loads(params)
132
- if 'prediction_horizon' in params['passed_data'] and params['passed_data']['prediction_horizon'] is not None:
133
- prediction_horizon = params['passed_data']['prediction_horizon']
134
- df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[df_input_data_dayahead.index[0]:df_input_data_dayahead.index[prediction_horizon-1]]
135
- elif set_type == "forecast-model-fit" or set_type == "forecast-model-predict" or set_type == "forecast-model-tune":
156
+ if ("prediction_horizon" in params["passed_data"] and params["passed_data"]["prediction_horizon"] is not None):
157
+ prediction_horizon = params["passed_data"]["prediction_horizon"]
158
+ df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[
159
+ df_input_data_dayahead.index[0]: df_input_data_dayahead.index[prediction_horizon - 1]]
160
+ elif (set_type == "forecast-model-fit" or set_type == "forecast-model-predict" or set_type == "forecast-model-tune"):
136
161
  df_input_data_dayahead = None
137
162
  P_PV_forecast, P_load_forecast = None, None
138
163
  params = json.loads(params)
139
164
  # Retrieve data from hass
140
- days_to_retrieve = params['passed_data']['days_to_retrieve']
141
- model_type = params['passed_data']['model_type']
142
- var_model = params['passed_data']['var_model']
165
+ days_to_retrieve = params["passed_data"]["days_to_retrieve"]
166
+ model_type = params["passed_data"]["model_type"]
167
+ var_model = params["passed_data"]["var_model"]
143
168
  if get_data_from_file:
144
169
  days_list = None
145
170
  filename = 'data_train_'+model_type+'.pkl'
146
- data_path = pathlib.Path(base_path) / 'data' / filename
147
- with open(data_path, 'rb') as inp:
171
+ filename_path = emhass_conf['data_path'] / filename
172
+ with open(filename_path, 'rb') as inp:
148
173
  df_input_data, _ = pickle.load(inp)
149
174
  df_input_data = df_input_data[df_input_data.index[-1] - pd.offsets.Day(days_to_retrieve):]
150
175
  else:
@@ -153,19 +178,56 @@ def set_input_data_dict(config_path: pathlib.Path, base_path: str, costfun: str,
153
178
  if not rh.get_data(days_list, var_list):
154
179
  return False
155
180
  df_input_data = rh.df_final.copy()
181
+ elif set_type == "regressor-model-fit" or set_type == "regressor-model-predict":
182
+ df_input_data, df_input_data_dayahead = None, None
183
+ P_PV_forecast, P_load_forecast = None, None
184
+ params = json.loads(params)
185
+ days_list = None
186
+ csv_file = params["passed_data"].get("csv_file", None)
187
+ if "features" in params["passed_data"]:
188
+ features = params["passed_data"]["features"]
189
+ if "target" in params["passed_data"]:
190
+ target = params["passed_data"]["target"]
191
+ if "timestamp" in params["passed_data"]:
192
+ timestamp = params["passed_data"]["timestamp"]
193
+ if csv_file:
194
+ if get_data_from_file:
195
+ base_path = emhass_conf["data_path"] # + "/data"
196
+ filename_path = pathlib.Path(base_path) / csv_file
197
+ else:
198
+ filename_path = emhass_conf["data_path"] / csv_file
199
+ if filename_path.is_file():
200
+ df_input_data = pd.read_csv(filename_path, parse_dates=True)
201
+ else:
202
+ logger.error("The CSV file " + csv_file +
203
+ " was not found in path: " + str(emhass_conf["data_path"]))
204
+ return False
205
+ # raise ValueError("The CSV file " + csv_file + " was not found.")
206
+ required_columns = []
207
+ required_columns.extend(features)
208
+ required_columns.append(target)
209
+ if timestamp is not None:
210
+ required_columns.append(timestamp)
211
+ if not set(required_columns).issubset(df_input_data.columns):
212
+ logger.error(
213
+ "The cvs file does not contain the required columns.")
214
+ msg = f"CSV file should contain the following columns: {', '.join(required_columns)}"
215
+ logger.error(msg)
216
+ return False
156
217
  elif set_type == "publish-data":
157
218
  df_input_data, df_input_data_dayahead = None, None
158
219
  P_PV_forecast, P_load_forecast = None, None
159
220
  days_list = None
160
221
  else:
161
- logger.error("The passed action argument and hence the set_type parameter for setup is not valid")
222
+ logger.error(
223
+ "The passed action argument and hence the set_type parameter for setup is not valid",
224
+ )
162
225
  df_input_data, df_input_data_dayahead = None, None
163
226
  P_PV_forecast, P_load_forecast = None, None
164
227
  days_list = None
165
-
166
- # The input data dictionnary to return
228
+ # The input data dictionary to return
167
229
  input_data_dict = {
168
- 'root': base_path,
230
+ 'emhass_conf': emhass_conf,
169
231
  'retrieve_hass_conf': retrieve_hass_conf,
170
232
  'rh': rh,
171
233
  'opt': opt,
@@ -179,12 +241,14 @@ def set_input_data_dict(config_path: pathlib.Path, base_path: str, costfun: str,
179
241
  'days_list': days_list
180
242
  }
181
243
  return input_data_dict
182
-
244
+
245
+
183
246
  def perfect_forecast_optim(input_data_dict: dict, logger: logging.Logger,
184
- save_data_to_file: Optional[bool] = True, debug: Optional[bool] = False) -> pd.DataFrame:
247
+ save_data_to_file: Optional[bool] = True,
248
+ debug: Optional[bool] = False) -> pd.DataFrame:
185
249
  """
186
250
  Perform a call to the perfect forecast optimization routine.
187
-
251
+
188
252
  :param input_data_dict: A dictionnary with multiple data used by the action functions
189
253
  :type input_data_dict: dict
190
254
  :param logger: The passed logger object
@@ -200,27 +264,36 @@ def perfect_forecast_optim(input_data_dict: dict, logger: logging.Logger,
200
264
  logger.info("Performing perfect forecast optimization")
201
265
  # Load cost and prod price forecast
202
266
  df_input_data = input_data_dict['fcst'].get_load_cost_forecast(
203
- input_data_dict['df_input_data'],
267
+ input_data_dict['df_input_data'],
204
268
  method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'],
205
269
  list_and_perfect=True)
270
+ if isinstance(df_input_data, bool) and not df_input_data:
271
+ return False
206
272
  df_input_data = input_data_dict['fcst'].get_prod_price_forecast(
207
273
  df_input_data, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'],
208
274
  list_and_perfect=True)
209
- opt_res = input_data_dict['opt'].perform_perfect_forecast_optim(df_input_data, input_data_dict['days_list'])
275
+ if isinstance(df_input_data, bool) and not df_input_data:
276
+ return False
277
+ opt_res = input_data_dict['opt'].perform_perfect_forecast_optim(
278
+ df_input_data, input_data_dict['days_list'])
210
279
  # Save CSV file for analysis
211
280
  if save_data_to_file:
212
- filename = 'opt_res_perfect_optim_'+input_data_dict['costfun']+'.csv'
213
- else: # Just save the latest optimization results
214
- filename = 'opt_res_latest.csv'
281
+ filename = "opt_res_perfect_optim_" + \
282
+ input_data_dict["costfun"] + ".csv"
283
+ else: # Just save the latest optimization results
284
+ filename = "opt_res_latest.csv"
215
285
  if not debug:
216
- opt_res.to_csv(pathlib.Path(input_data_dict['root']) / filename, index_label='timestamp')
286
+ opt_res.to_csv(
287
+ input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
217
288
  return opt_res
218
-
219
- def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger,
220
- save_data_to_file: Optional[bool] = False, debug: Optional[bool] = False) -> pd.DataFrame:
289
+
290
+
291
+ def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger,
292
+ save_data_to_file: Optional[bool] = False,
293
+ debug: Optional[bool] = False) -> pd.DataFrame:
221
294
  """
222
295
  Perform a call to the day-ahead optimization routine.
223
-
296
+
224
297
  :param input_data_dict: A dictionnary with multiple data used by the action functions
225
298
  :type input_data_dict: dict
226
299
  :param logger: The passed logger object
@@ -238,26 +311,35 @@ def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger,
238
311
  df_input_data_dayahead = input_data_dict['fcst'].get_load_cost_forecast(
239
312
  input_data_dict['df_input_data_dayahead'],
240
313
  method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'])
314
+ if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
315
+ return False
241
316
  df_input_data_dayahead = input_data_dict['fcst'].get_prod_price_forecast(
242
- df_input_data_dayahead,
317
+ df_input_data_dayahead,
243
318
  method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'])
319
+ if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
320
+ return False
244
321
  opt_res_dayahead = input_data_dict['opt'].perform_dayahead_forecast_optim(
245
322
  df_input_data_dayahead, input_data_dict['P_PV_forecast'], input_data_dict['P_load_forecast'])
246
323
  # Save CSV file for publish_data
247
324
  if save_data_to_file:
248
- today = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
249
- filename = 'opt_res_dayahead_'+today.strftime("%Y_%m_%d")+'.csv'
250
- else: # Just save the latest optimization results
251
- filename = 'opt_res_latest.csv'
325
+ today = datetime.now(timezone.utc).replace(
326
+ hour=0, minute=0, second=0, microsecond=0
327
+ )
328
+ filename = "opt_res_dayahead_" + today.strftime("%Y_%m_%d") + ".csv"
329
+ else: # Just save the latest optimization results
330
+ filename = "opt_res_latest.csv"
252
331
  if not debug:
253
- opt_res_dayahead.to_csv(pathlib.Path(input_data_dict['root']) / filename, index_label='timestamp')
332
+ opt_res_dayahead.to_csv(
333
+ input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
254
334
  return opt_res_dayahead
255
335
 
256
- def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger,
257
- save_data_to_file: Optional[bool] = False, debug: Optional[bool] = False) -> pd.DataFrame:
336
+
337
+ def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger,
338
+ save_data_to_file: Optional[bool] = False,
339
+ debug: Optional[bool] = False) -> pd.DataFrame:
258
340
  """
259
341
  Perform a call to the naive Model Predictive Controller optimization routine.
260
-
342
+
261
343
  :param input_data_dict: A dictionnary with multiple data used by the action functions
262
344
  :type input_data_dict: dict
263
345
  :param logger: The passed logger object
@@ -275,30 +357,39 @@ def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger,
275
357
  df_input_data_dayahead = input_data_dict['fcst'].get_load_cost_forecast(
276
358
  input_data_dict['df_input_data_dayahead'],
277
359
  method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'])
360
+ if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
361
+ return False
278
362
  df_input_data_dayahead = input_data_dict['fcst'].get_prod_price_forecast(
279
363
  df_input_data_dayahead, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'])
364
+ if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
365
+ return False
280
366
  # The specifics params for the MPC at runtime
281
- prediction_horizon = input_data_dict['params']['passed_data']['prediction_horizon']
282
- soc_init = input_data_dict['params']['passed_data']['soc_init']
283
- soc_final = input_data_dict['params']['passed_data']['soc_final']
284
- def_total_hours = input_data_dict['params']['passed_data']['def_total_hours']
285
- def_start_timestep = input_data_dict['params']['passed_data']['def_start_timestep']
286
- def_end_timestep = input_data_dict['params']['passed_data']['def_end_timestep']
287
- opt_res_naive_mpc = input_data_dict['opt'].perform_naive_mpc_optim(
288
- df_input_data_dayahead, input_data_dict['P_PV_forecast'], input_data_dict['P_load_forecast'],
289
- prediction_horizon, soc_init, soc_final, def_total_hours, def_start_timestep, def_end_timestep)
367
+ prediction_horizon = input_data_dict["params"]["passed_data"]["prediction_horizon"]
368
+ soc_init = input_data_dict["params"]["passed_data"]["soc_init"]
369
+ soc_final = input_data_dict["params"]["passed_data"]["soc_final"]
370
+ def_total_hours = input_data_dict["params"]["passed_data"]["def_total_hours"]
371
+ def_start_timestep = input_data_dict["params"]["passed_data"]["def_start_timestep"]
372
+ def_end_timestep = input_data_dict["params"]["passed_data"]["def_end_timestep"]
373
+ opt_res_naive_mpc = input_data_dict["opt"].perform_naive_mpc_optim(
374
+ df_input_data_dayahead, input_data_dict["P_PV_forecast"], input_data_dict["P_load_forecast"],
375
+ prediction_horizon, soc_init, soc_final, def_total_hours,
376
+ def_start_timestep, def_end_timestep)
290
377
  # Save CSV file for publish_data
291
378
  if save_data_to_file:
292
- today = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
293
- filename = 'opt_res_naive_mpc_'+today.strftime("%Y_%m_%d")+'.csv'
294
- else: # Just save the latest optimization results
295
- filename = 'opt_res_latest.csv'
379
+ today = datetime.now(timezone.utc).replace(
380
+ hour=0, minute=0, second=0, microsecond=0
381
+ )
382
+ filename = "opt_res_naive_mpc_" + today.strftime("%Y_%m_%d") + ".csv"
383
+ else: # Just save the latest optimization results
384
+ filename = "opt_res_latest.csv"
296
385
  if not debug:
297
- opt_res_naive_mpc.to_csv(pathlib.Path(input_data_dict['root']) / filename, index_label='timestamp')
386
+ opt_res_naive_mpc.to_csv(
387
+ input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
298
388
  return opt_res_naive_mpc
299
389
 
300
- def forecast_model_fit(input_data_dict: dict, logger: logging.Logger,
301
- debug: Optional[bool] = False) -> Tuple[pd.DataFrame, pd.DataFrame, MLForecaster]:
390
+
391
+ def forecast_model_fit(input_data_dict: dict, logger: logging.Logger,
392
+ debug: Optional[bool] = False) -> Tuple[pd.DataFrame, pd.DataFrame, MLForecaster]:
302
393
  """Perform a forecast model fit from training data retrieved from Home Assistant.
303
394
 
304
395
  :param input_data_dict: A dictionnary with multiple data used by the action functions
@@ -317,22 +408,26 @@ def forecast_model_fit(input_data_dict: dict, logger: logging.Logger,
317
408
  num_lags = input_data_dict['params']['passed_data']['num_lags']
318
409
  split_date_delta = input_data_dict['params']['passed_data']['split_date_delta']
319
410
  perform_backtest = input_data_dict['params']['passed_data']['perform_backtest']
320
- root = input_data_dict['root']
321
411
  # The ML forecaster object
322
- mlf = MLForecaster(data, model_type, var_model, sklearn_model, num_lags, root, logger)
412
+ mlf = MLForecaster(data, model_type, var_model, sklearn_model,
413
+ num_lags, input_data_dict['emhass_conf'], logger)
323
414
  # Fit the ML model
324
- df_pred, df_pred_backtest = mlf.fit(split_date_delta=split_date_delta,
325
- perform_backtest=perform_backtest)
415
+ df_pred, df_pred_backtest = mlf.fit(
416
+ split_date_delta=split_date_delta, perform_backtest=perform_backtest
417
+ )
326
418
  # Save model
327
419
  if not debug:
328
420
  filename = model_type+'_mlf.pkl'
329
- with open(pathlib.Path(root) / filename, 'wb') as outp:
421
+ filename_path = input_data_dict['emhass_conf']['data_path'] / filename
422
+ with open(filename_path, 'wb') as outp:
330
423
  pickle.dump(mlf, outp, pickle.HIGHEST_PROTOCOL)
331
424
  return df_pred, df_pred_backtest, mlf
332
425
 
333
- def forecast_model_predict(input_data_dict: dict, logger: logging.Logger,
334
- use_last_window: Optional[bool] = True, debug: Optional[bool] = False,
335
- mlf: Optional[MLForecaster] = None) -> pd.DataFrame:
426
+
427
+ def forecast_model_predict(input_data_dict: dict, logger: logging.Logger,
428
+ use_last_window: Optional[bool] = True,
429
+ debug: Optional[bool] = False, mlf: Optional[MLForecaster] = None
430
+ ) -> pd.DataFrame:
336
431
  r"""Perform a forecast model predict using a previously trained skforecast model.
337
432
 
338
433
  :param input_data_dict: A dictionnary with multiple data used by the action functions
@@ -355,51 +450,61 @@ def forecast_model_predict(input_data_dict: dict, logger: logging.Logger,
355
450
  """
356
451
  # Load model
357
452
  model_type = input_data_dict['params']['passed_data']['model_type']
358
- root = input_data_dict['root']
359
453
  filename = model_type+'_mlf.pkl'
360
- filename_path = pathlib.Path(root) / filename
454
+ filename_path = input_data_dict['emhass_conf']['data_path'] / filename
361
455
  if not debug:
362
456
  if filename_path.is_file():
363
- with open(filename_path, 'rb') as inp:
457
+ with open(filename_path, "rb") as inp:
364
458
  mlf = pickle.load(inp)
365
459
  else:
366
- logger.error("The ML forecaster file was not found, please run a model fit method before this predict method")
460
+ logger.error(
461
+ "The ML forecaster file was not found, please run a model fit method before this predict method",
462
+ )
367
463
  return
368
464
  # Make predictions
369
465
  if use_last_window:
370
- data_last_window = copy.deepcopy(input_data_dict['df_input_data'])
466
+ data_last_window = copy.deepcopy(input_data_dict["df_input_data"])
371
467
  else:
372
468
  data_last_window = None
373
469
  predictions = mlf.predict(data_last_window)
374
470
  # Publish data to a Home Assistant sensor
375
- model_predict_publish = input_data_dict['params']['passed_data']['model_predict_publish']
376
- model_predict_entity_id = input_data_dict['params']['passed_data']['model_predict_entity_id']
377
- model_predict_unit_of_measurement = input_data_dict['params']['passed_data']['model_predict_unit_of_measurement']
378
- model_predict_friendly_name = input_data_dict['params']['passed_data']['model_predict_friendly_name']
379
- publish_prefix = input_data_dict['params']['passed_data']['publish_prefix']
471
+ model_predict_publish = input_data_dict["params"]["passed_data"][
472
+ "model_predict_publish"
473
+ ]
474
+ model_predict_entity_id = input_data_dict["params"]["passed_data"][
475
+ "model_predict_entity_id"
476
+ ]
477
+ model_predict_unit_of_measurement = input_data_dict["params"]["passed_data"][
478
+ "model_predict_unit_of_measurement"
479
+ ]
480
+ model_predict_friendly_name = input_data_dict["params"]["passed_data"][
481
+ "model_predict_friendly_name"
482
+ ]
483
+ publish_prefix = input_data_dict["params"]["passed_data"]["publish_prefix"]
380
484
  if model_predict_publish is True:
381
485
  # Estimate the current index
382
- now_precise = datetime.now(input_data_dict['retrieve_hass_conf']['time_zone']).replace(second=0, microsecond=0)
383
- if input_data_dict['retrieve_hass_conf']['method_ts_round'] == 'nearest':
384
- idx_closest = predictions.index.get_indexer([now_precise], method='nearest')[0]
385
- elif input_data_dict['retrieve_hass_conf']['method_ts_round'] == 'first':
386
- idx_closest = predictions.index.get_indexer([now_precise], method='ffill')[0]
387
- elif input_data_dict['retrieve_hass_conf']['method_ts_round'] == 'last':
388
- idx_closest = predictions.index.get_indexer([now_precise], method='bfill')[0]
486
+ now_precise = datetime.now(
487
+ input_data_dict["retrieve_hass_conf"]["time_zone"]
488
+ ).replace(second=0, microsecond=0)
489
+ if input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "nearest":
490
+ idx_closest = predictions.index.get_indexer([now_precise], method="nearest")[0]
491
+ elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "first":
492
+ idx_closest = predictions.index.get_indexer([now_precise], method="ffill")[0]
493
+ elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "last":
494
+ idx_closest = predictions.index.get_indexer([now_precise], method="bfill")[0]
389
495
  if idx_closest == -1:
390
- idx_closest = predictions.index.get_indexer([now_precise], method='nearest')[0]
496
+ idx_closest = predictions.index.get_indexer([now_precise], method="nearest")[0]
391
497
  # Publish Load forecast
392
- input_data_dict['rh'].post_data(predictions, idx_closest,
393
- model_predict_entity_id,
394
- model_predict_unit_of_measurement,
395
- model_predict_friendly_name,
396
- type_var = 'mlforecaster',
397
- publish_prefix=publish_prefix)
498
+ input_data_dict["rh"].post_data(
499
+ predictions, idx_closest, model_predict_entity_id,
500
+ model_predict_unit_of_measurement, model_predict_friendly_name,
501
+ type_var="mlforecaster", publish_prefix=publish_prefix)
398
502
  return predictions
399
503
 
400
- def forecast_model_tune(input_data_dict: dict, logger: logging.Logger,
401
- debug: Optional[bool] = False, mlf: Optional[MLForecaster] = None
402
- ) -> Tuple[pd.DataFrame, MLForecaster]:
504
+
505
+ def forecast_model_tune(input_data_dict: dict, logger: logging.Logger,
506
+ debug: Optional[bool] = False, mlf: Optional[MLForecaster] = None
507
+ ) -> Tuple[pd.DataFrame, MLForecaster]:
403
508
  """Tune a forecast model hyperparameters using bayesian optimization.
404
509
 
405
510
  :param input_data_dict: A dictionnary with multiple data used by the action functions
@@ -416,31 +521,139 @@ def forecast_model_tune(input_data_dict: dict, logger: logging.Logger,
416
521
  """
417
522
  # Load model
418
523
  model_type = input_data_dict['params']['passed_data']['model_type']
419
- root = input_data_dict['root']
420
524
  filename = model_type+'_mlf.pkl'
421
- filename_path = pathlib.Path(root) / filename
525
+ filename_path = input_data_dict['emhass_conf']['data_path'] / filename
422
526
  if not debug:
423
527
  if filename_path.is_file():
424
- with open(filename_path, 'rb') as inp:
528
+ with open(filename_path, "rb") as inp:
425
529
  mlf = pickle.load(inp)
426
530
  else:
427
- logger.error("The ML forecaster file was not found, please run a model fit method before this tune method")
531
+ logger.error(
532
+ "The ML forecaster file was not found, please run a model fit method before this tune method",
533
+ )
428
534
  return None, None
429
535
  # Tune the model
430
536
  df_pred_optim = mlf.tune(debug=debug)
431
537
  # Save model
432
538
  if not debug:
433
539
  filename = model_type+'_mlf.pkl'
434
- with open(pathlib.Path(root) / filename, 'wb') as outp:
540
+ filename_path = input_data_dict['emhass_conf']['data_path'] / filename
541
+ with open(filename_path, 'wb') as outp:
435
542
  pickle.dump(mlf, outp, pickle.HIGHEST_PROTOCOL)
436
543
  return df_pred_optim, mlf
437
544
 
438
- def publish_data(input_data_dict: dict, logger: logging.Logger,
439
- save_data_to_file: Optional[bool] = False,
440
- opt_res_latest: Optional[pd.DataFrame] = None) -> pd.DataFrame:
545
+
546
+ def regressor_model_fit(input_data_dict: dict, logger: logging.Logger,
547
+ debug: Optional[bool] = False) -> MLRegressor:
548
+ """Perform a forecast model fit from training data retrieved from Home Assistant.
549
+
550
+ :param input_data_dict: A dictionnary with multiple data used by the action functions
551
+ :type input_data_dict: dict
552
+ :param logger: The passed logger object
553
+ :type logger: logging.Logger
554
+ :param debug: True to debug, useful for unit testing, defaults to False
555
+ :type debug: Optional[bool], optional
556
+ """
557
+ data = copy.deepcopy(input_data_dict["df_input_data"])
558
+ if "model_type" in input_data_dict["params"]["passed_data"]:
559
+ model_type = input_data_dict["params"]["passed_data"]["model_type"]
560
+ else:
561
+ logger.error("parameter: 'model_type' not passed")
562
+ return False
563
+ if "regression_model" in input_data_dict["params"]["passed_data"]:
564
+ regression_model = input_data_dict["params"]["passed_data"]["regression_model"]
565
+ else:
566
+ logger.error("parameter: 'regression_model' not passed")
567
+ return False
568
+ if "features" in input_data_dict["params"]["passed_data"]:
569
+ features = input_data_dict["params"]["passed_data"]["features"]
570
+ else:
571
+ logger.error("parameter: 'features' not passed")
572
+ return False
573
+ if "target" in input_data_dict["params"]["passed_data"]:
574
+ target = input_data_dict["params"]["passed_data"]["target"]
575
+ else:
576
+ logger.error("parameter: 'target' not passed")
577
+ return False
578
+ if "timestamp" in input_data_dict["params"]["passed_data"]:
579
+ timestamp = input_data_dict["params"]["passed_data"]["timestamp"]
580
+ else:
581
+ logger.error("parameter: 'timestamp' not passed")
582
+ return False
583
+ if "date_features" in input_data_dict["params"]["passed_data"]:
584
+ date_features = input_data_dict["params"]["passed_data"]["date_features"]
585
+ else:
586
+ logger.error("parameter: 'date_features' not passed")
587
+ return False
588
+ # The MLRegressor object
589
+ mlr = MLRegressor(data, model_type, regression_model, features, target, timestamp, logger)
590
+ # Fit the ML model
591
+ mlr.fit(date_features=date_features)
592
+ # Save model
593
+ if not debug:
594
+ filename = model_type + "_mlr.pkl"
595
+ filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
596
+ with open(filename_path, "wb") as outp:
597
+ pickle.dump(mlr, outp, pickle.HIGHEST_PROTOCOL)
598
+ return mlr
599
+
600
+
601
+ def regressor_model_predict(input_data_dict: dict, logger: logging.Logger,
602
+ debug: Optional[bool] = False, mlr: Optional[MLRegressor] = None
603
+ ) -> np.ndarray:
604
+ """Perform a prediction from csv file.
605
+
606
+ :param input_data_dict: A dictionnary with multiple data used by the action functions
607
+ :type input_data_dict: dict
608
+ :param logger: The passed logger object
609
+ :type logger: logging.Logger
610
+ :param debug: True to debug, useful for unit testing, defaults to False
611
+ :type debug: Optional[bool], optional
612
+ """
613
+ if "model_type" in input_data_dict["params"]["passed_data"]:
614
+ model_type = input_data_dict["params"]["passed_data"]["model_type"]
615
+ else:
616
+ logger.error("parameter: 'model_type' not passed")
617
+ return False
618
+ filename = model_type + "_mlr.pkl"
619
+ filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
620
+ if not debug:
621
+ if filename_path.is_file():
622
+ with open(filename_path, "rb") as inp:
623
+ mlr = pickle.load(inp)
624
+ else:
625
+ logger.error(
626
+ "The ML forecaster file was not found, please run a model fit method before this predict method",
627
+ )
628
+ return False
629
+ if "new_values" in input_data_dict["params"]["passed_data"]:
630
+ new_values = input_data_dict["params"]["passed_data"]["new_values"]
631
+ else:
632
+ logger.error("parameter: 'new_values' not passed")
633
+ return False
634
+ # Predict from csv file
635
+ prediction = mlr.predict(new_values)
636
+ mlr_predict_entity_id = input_data_dict["params"]["passed_data"].get(
637
+ "mlr_predict_entity_id", "sensor.mlr_predict")
638
+ mlr_predict_unit_of_measurement = input_data_dict["params"]["passed_data"].get(
639
+ "mlr_predict_unit_of_measurement", "h")
640
+ mlr_predict_friendly_name = input_data_dict["params"]["passed_data"].get(
641
+ "mlr_predict_friendly_name", "mlr predictor")
642
+ # Publish prediction
643
+ idx = 0
644
+ if not debug:
645
+ input_data_dict["rh"].post_data(prediction, idx, mlr_predict_entity_id,
646
+ mlr_predict_unit_of_measurement, mlr_predict_friendly_name,
647
+ type_var="mlregressor")
648
+ return prediction
649
+
650
+
651
+ def publish_data(input_data_dict: dict, logger: logging.Logger,
652
+ save_data_to_file: Optional[bool] = False,
653
+ opt_res_latest: Optional[pd.DataFrame] = None) -> pd.DataFrame:
441
654
  """
442
655
  Publish the data obtained from the optimization results.
443
-
656
+
444
657
  :param input_data_dict: A dictionnary with multiple data used by the action functions
445
658
  :type input_data_dict: dict
446
659
  :param logger: The passed logger object
@@ -454,224 +667,347 @@ def publish_data(input_data_dict: dict, logger: logging.Logger,
454
667
  logger.info("Publishing data to HASS instance")
455
668
  # Check if a day ahead optimization has been performed (read CSV file)
456
669
  if save_data_to_file:
457
- today = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
458
- filename = 'opt_res_dayahead_'+today.strftime("%Y_%m_%d")+'.csv'
670
+ today = datetime.now(timezone.utc).replace(
671
+ hour=0, minute=0, second=0, microsecond=0
672
+ )
673
+ filename = "opt_res_dayahead_" + today.strftime("%Y_%m_%d") + ".csv"
459
674
  else:
460
- filename = 'opt_res_latest.csv'
675
+ filename = "opt_res_latest.csv"
461
676
  if opt_res_latest is None:
462
- if not os.path.isfile(pathlib.Path(input_data_dict['root']) / filename):
463
- logger.error("File not found error, run an optimization task first.")
677
+ if not os.path.isfile(input_data_dict['emhass_conf']['data_path'] / filename):
678
+ logger.error(
679
+ "File not found error, run an optimization task first.")
464
680
  return
465
681
  else:
466
- opt_res_latest = pd.read_csv(pathlib.Path(input_data_dict['root']) / filename, index_col='timestamp')
682
+ opt_res_latest = pd.read_csv(
683
+ input_data_dict['emhass_conf']['data_path'] / filename, index_col='timestamp')
467
684
  opt_res_latest.index = pd.to_datetime(opt_res_latest.index)
468
- opt_res_latest.index.freq = input_data_dict['retrieve_hass_conf']['freq']
685
+ opt_res_latest.index.freq = input_data_dict["retrieve_hass_conf"]["freq"]
469
686
  # Estimate the current index
470
- now_precise = datetime.now(input_data_dict['retrieve_hass_conf']['time_zone']).replace(second=0, microsecond=0)
471
- if input_data_dict['retrieve_hass_conf']['method_ts_round'] == 'nearest':
472
- idx_closest = opt_res_latest.index.get_indexer([now_precise], method='nearest')[0]
473
- elif input_data_dict['retrieve_hass_conf']['method_ts_round'] == 'first':
474
- idx_closest = opt_res_latest.index.get_indexer([now_precise], method='ffill')[0]
475
- elif input_data_dict['retrieve_hass_conf']['method_ts_round'] == 'last':
476
- idx_closest = opt_res_latest.index.get_indexer([now_precise], method='bfill')[0]
687
+ now_precise = datetime.now(
688
+ input_data_dict["retrieve_hass_conf"]["time_zone"]
689
+ ).replace(second=0, microsecond=0)
690
+ if input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "nearest":
691
+ idx_closest = opt_res_latest.index.get_indexer([now_precise], method="nearest")[0]
692
+ elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "first":
693
+ idx_closest = opt_res_latest.index.get_indexer(
694
+ [now_precise], method="ffill")[0]
695
+ elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "last":
696
+ idx_closest = opt_res_latest.index.get_indexer(
697
+ [now_precise], method="bfill")[0]
477
698
  if idx_closest == -1:
478
- idx_closest = opt_res_latest.index.get_indexer([now_precise], method='nearest')[0]
699
+ idx_closest = opt_res_latest.index.get_indexer([now_precise], method="nearest")[0]
479
700
  # Publish the data
480
- params = json.loads(input_data_dict['params'])
481
- publish_prefix = params['passed_data']['publish_prefix']
701
+ params = json.loads(input_data_dict["params"])
702
+ publish_prefix = params["passed_data"]["publish_prefix"]
482
703
  # Publish PV forecast
483
- custom_pv_forecast_id = params['passed_data']['custom_pv_forecast_id']
484
- input_data_dict['rh'].post_data(opt_res_latest['P_PV'], idx_closest,
485
- custom_pv_forecast_id["entity_id"],
486
- custom_pv_forecast_id["unit_of_measurement"],
487
- custom_pv_forecast_id["friendly_name"],
488
- type_var = 'power',
489
- publish_prefix = publish_prefix)
704
+ custom_pv_forecast_id = params["passed_data"]["custom_pv_forecast_id"]
705
+ input_data_dict["rh"].post_data(
706
+ opt_res_latest["P_PV"],
707
+ idx_closest,
708
+ custom_pv_forecast_id["entity_id"],
709
+ custom_pv_forecast_id["unit_of_measurement"],
710
+ custom_pv_forecast_id["friendly_name"],
711
+ type_var="power",
712
+ publish_prefix=publish_prefix,
713
+ )
490
714
  # Publish Load forecast
491
- custom_load_forecast_id = params['passed_data']['custom_load_forecast_id']
492
- input_data_dict['rh'].post_data(opt_res_latest['P_Load'], idx_closest,
493
- custom_load_forecast_id["entity_id"],
494
- custom_load_forecast_id["unit_of_measurement"],
495
- custom_load_forecast_id["friendly_name"],
496
- type_var = 'power',
497
- publish_prefix = publish_prefix)
498
- cols_published = ['P_PV', 'P_Load']
715
+ custom_load_forecast_id = params["passed_data"]["custom_load_forecast_id"]
716
+ input_data_dict["rh"].post_data(
717
+ opt_res_latest["P_Load"],
718
+ idx_closest,
719
+ custom_load_forecast_id["entity_id"],
720
+ custom_load_forecast_id["unit_of_measurement"],
721
+ custom_load_forecast_id["friendly_name"],
722
+ type_var="power",
723
+ publish_prefix=publish_prefix,
724
+ )
725
+ cols_published = ["P_PV", "P_Load"]
499
726
  # Publish deferrable loads
500
- custom_deferrable_forecast_id = params['passed_data']['custom_deferrable_forecast_id']
501
- for k in range(input_data_dict['opt'].optim_conf['num_def_loads']):
727
+ custom_deferrable_forecast_id = params["passed_data"][
728
+ "custom_deferrable_forecast_id"
729
+ ]
730
+ for k in range(input_data_dict["opt"].optim_conf["num_def_loads"]):
502
731
  if "P_deferrable{}".format(k) not in opt_res_latest.columns:
503
- logger.error("P_deferrable{}".format(k)+" was not found in results DataFrame. Optimization task may need to be relaunched or it did not converge to a solution.")
732
+ logger.error(
733
+ "P_deferrable{}".format(k)
734
+ + " was not found in results DataFrame. Optimization task may need to be relaunched or it did not converge to a solution.",
735
+ )
504
736
  else:
505
- input_data_dict['rh'].post_data(opt_res_latest["P_deferrable{}".format(k)], idx_closest,
506
- custom_deferrable_forecast_id[k]["entity_id"],
507
- custom_deferrable_forecast_id[k]["unit_of_measurement"],
508
- custom_deferrable_forecast_id[k]["friendly_name"],
509
- type_var = 'deferrable',
510
- publish_prefix = publish_prefix)
511
- cols_published = cols_published+["P_deferrable{}".format(k)]
737
+ input_data_dict["rh"].post_data(
738
+ opt_res_latest["P_deferrable{}".format(k)],
739
+ idx_closest,
740
+ custom_deferrable_forecast_id[k]["entity_id"],
741
+ custom_deferrable_forecast_id[k]["unit_of_measurement"],
742
+ custom_deferrable_forecast_id[k]["friendly_name"],
743
+ type_var="deferrable",
744
+ publish_prefix=publish_prefix,
745
+ )
746
+ cols_published = cols_published + ["P_deferrable{}".format(k)]
512
747
  # Publish battery power
513
- if input_data_dict['opt'].optim_conf['set_use_battery']:
514
- if 'P_batt' not in opt_res_latest.columns:
515
- logger.error("P_batt was not found in results DataFrame. Optimization task may need to be relaunched or it did not converge to a solution.")
748
+ if input_data_dict["opt"].optim_conf["set_use_battery"]:
749
+ if "P_batt" not in opt_res_latest.columns:
750
+ logger.error(
751
+ "P_batt was not found in results DataFrame. Optimization task may need to be relaunched or it did not converge to a solution.",
752
+ )
516
753
  else:
517
- custom_batt_forecast_id = params['passed_data']['custom_batt_forecast_id']
518
- input_data_dict['rh'].post_data(opt_res_latest['P_batt'], idx_closest,
519
- custom_batt_forecast_id["entity_id"],
520
- custom_batt_forecast_id["unit_of_measurement"],
521
- custom_batt_forecast_id["friendly_name"],
522
- type_var = 'batt',
523
- publish_prefix = publish_prefix)
524
- cols_published = cols_published+["P_batt"]
525
- custom_batt_soc_forecast_id = params['passed_data']['custom_batt_soc_forecast_id']
526
- input_data_dict['rh'].post_data(opt_res_latest['SOC_opt']*100, idx_closest,
527
- custom_batt_soc_forecast_id["entity_id"],
528
- custom_batt_soc_forecast_id["unit_of_measurement"],
529
- custom_batt_soc_forecast_id["friendly_name"],
530
- type_var = 'SOC',
531
- publish_prefix = publish_prefix)
532
- cols_published = cols_published+["SOC_opt"]
754
+ custom_batt_forecast_id = params["passed_data"]["custom_batt_forecast_id"]
755
+ input_data_dict["rh"].post_data(
756
+ opt_res_latest["P_batt"],
757
+ idx_closest,
758
+ custom_batt_forecast_id["entity_id"],
759
+ custom_batt_forecast_id["unit_of_measurement"],
760
+ custom_batt_forecast_id["friendly_name"],
761
+ type_var="batt",
762
+ publish_prefix=publish_prefix,
763
+ )
764
+ cols_published = cols_published + ["P_batt"]
765
+ custom_batt_soc_forecast_id = params["passed_data"][
766
+ "custom_batt_soc_forecast_id"
767
+ ]
768
+ input_data_dict["rh"].post_data(
769
+ opt_res_latest["SOC_opt"] * 100,
770
+ idx_closest,
771
+ custom_batt_soc_forecast_id["entity_id"],
772
+ custom_batt_soc_forecast_id["unit_of_measurement"],
773
+ custom_batt_soc_forecast_id["friendly_name"],
774
+ type_var="SOC",
775
+ publish_prefix=publish_prefix,
776
+ )
777
+ cols_published = cols_published + ["SOC_opt"]
533
778
  # Publish grid power
534
- custom_grid_forecast_id = params['passed_data']['custom_grid_forecast_id']
535
- input_data_dict['rh'].post_data(opt_res_latest['P_grid'], idx_closest,
536
- custom_grid_forecast_id["entity_id"],
537
- custom_grid_forecast_id["unit_of_measurement"],
538
- custom_grid_forecast_id["friendly_name"],
539
- type_var = 'power',
540
- publish_prefix = publish_prefix)
541
- cols_published = cols_published+["P_grid"]
779
+ custom_grid_forecast_id = params["passed_data"]["custom_grid_forecast_id"]
780
+ input_data_dict["rh"].post_data(
781
+ opt_res_latest["P_grid"],
782
+ idx_closest,
783
+ custom_grid_forecast_id["entity_id"],
784
+ custom_grid_forecast_id["unit_of_measurement"],
785
+ custom_grid_forecast_id["friendly_name"],
786
+ type_var="power",
787
+ publish_prefix=publish_prefix,
788
+ )
789
+ cols_published = cols_published + ["P_grid"]
542
790
  # Publish total value of cost function
543
- custom_cost_fun_id = params['passed_data']['custom_cost_fun_id']
544
- col_cost_fun = [i for i in opt_res_latest.columns if 'cost_fun_' in i]
545
- input_data_dict['rh'].post_data(opt_res_latest[col_cost_fun], idx_closest,
546
- custom_cost_fun_id["entity_id"],
547
- custom_cost_fun_id["unit_of_measurement"],
548
- custom_cost_fun_id["friendly_name"],
549
- type_var = 'cost_fun',
550
- publish_prefix = publish_prefix)
791
+ custom_cost_fun_id = params["passed_data"]["custom_cost_fun_id"]
792
+ col_cost_fun = [i for i in opt_res_latest.columns if "cost_fun_" in i]
793
+ input_data_dict["rh"].post_data(
794
+ opt_res_latest[col_cost_fun],
795
+ idx_closest,
796
+ custom_cost_fun_id["entity_id"],
797
+ custom_cost_fun_id["unit_of_measurement"],
798
+ custom_cost_fun_id["friendly_name"],
799
+ type_var="cost_fun",
800
+ publish_prefix=publish_prefix,
801
+ )
551
802
  # Publish the optimization status
552
- custom_cost_fun_id = params['passed_data']['custom_optim_status_id']
803
+ custom_cost_fun_id = params["passed_data"]["custom_optim_status_id"]
553
804
  if "optim_status" not in opt_res_latest:
554
- opt_res_latest["optim_status"] = 'Optimal'
555
- logger.warning("no optim_status in opt_res_latest, run an optimization task first")
556
- input_data_dict['rh'].post_data(opt_res_latest['optim_status'], idx_closest,
557
- custom_cost_fun_id["entity_id"],
558
- custom_cost_fun_id["unit_of_measurement"],
559
- custom_cost_fun_id["friendly_name"],
560
- type_var = 'optim_status',
561
- publish_prefix = publish_prefix)
562
- cols_published = cols_published+["optim_status"]
805
+ opt_res_latest["optim_status"] = "Optimal"
806
+ logger.warning(
807
+ "no optim_status in opt_res_latest, run an optimization task first",
808
+ )
809
+ input_data_dict["rh"].post_data(
810
+ opt_res_latest["optim_status"],
811
+ idx_closest,
812
+ custom_cost_fun_id["entity_id"],
813
+ custom_cost_fun_id["unit_of_measurement"],
814
+ custom_cost_fun_id["friendly_name"],
815
+ type_var="optim_status",
816
+ publish_prefix=publish_prefix,
817
+ )
818
+ cols_published = cols_published + ["optim_status"]
563
819
  # Publish unit_load_cost
564
- custom_unit_load_cost_id = params['passed_data']['custom_unit_load_cost_id']
565
- input_data_dict['rh'].post_data(opt_res_latest['unit_load_cost'], idx_closest,
566
- custom_unit_load_cost_id["entity_id"],
567
- custom_unit_load_cost_id["unit_of_measurement"],
568
- custom_unit_load_cost_id["friendly_name"],
569
- type_var = 'unit_load_cost',
570
- publish_prefix = publish_prefix)
571
- cols_published = cols_published+["unit_load_cost"]
820
+ custom_unit_load_cost_id = params["passed_data"]["custom_unit_load_cost_id"]
821
+ input_data_dict["rh"].post_data(
822
+ opt_res_latest["unit_load_cost"],
823
+ idx_closest,
824
+ custom_unit_load_cost_id["entity_id"],
825
+ custom_unit_load_cost_id["unit_of_measurement"],
826
+ custom_unit_load_cost_id["friendly_name"],
827
+ type_var="unit_load_cost",
828
+ publish_prefix=publish_prefix,
829
+ )
830
+ cols_published = cols_published + ["unit_load_cost"]
572
831
  # Publish unit_prod_price
573
- custom_unit_prod_price_id = params['passed_data']['custom_unit_prod_price_id']
574
- input_data_dict['rh'].post_data(opt_res_latest['unit_prod_price'], idx_closest,
575
- custom_unit_prod_price_id["entity_id"],
576
- custom_unit_prod_price_id["unit_of_measurement"],
577
- custom_unit_prod_price_id["friendly_name"],
578
- type_var = 'unit_prod_price',
579
- publish_prefix = publish_prefix)
580
- cols_published = cols_published+["unit_prod_price"]
832
+ custom_unit_prod_price_id = params["passed_data"]["custom_unit_prod_price_id"]
833
+ input_data_dict["rh"].post_data(
834
+ opt_res_latest["unit_prod_price"],
835
+ idx_closest,
836
+ custom_unit_prod_price_id["entity_id"],
837
+ custom_unit_prod_price_id["unit_of_measurement"],
838
+ custom_unit_prod_price_id["friendly_name"],
839
+ type_var="unit_prod_price",
840
+ publish_prefix=publish_prefix,
841
+ )
842
+ cols_published = cols_published + ["unit_prod_price"]
581
843
  # Create a DF resuming what has been published
582
- opt_res = opt_res_latest[cols_published].loc[[opt_res_latest.index[idx_closest]]]
844
+ opt_res = opt_res_latest[cols_published].loc[[
845
+ opt_res_latest.index[idx_closest]]]
583
846
  return opt_res
584
-
585
-
847
+
848
+
586
849
  def main():
587
850
  r"""Define the main command line entry function.
588
851
 
589
852
  This function may take several arguments as inputs. You can type `emhass --help` to see the list of options:
590
-
853
+
591
854
  - action: Set the desired action, options are: perfect-optim, dayahead-optim,
592
855
  naive-mpc-optim, publish-data, forecast-model-fit, forecast-model-predict, forecast-model-tune
593
-
856
+
594
857
  - config: Define path to the config.yaml file
595
-
858
+
596
859
  - costfun: Define the type of cost function, options are: profit, cost, self-consumption
597
-
860
+
598
861
  - log2file: Define if we should log to a file or not
599
-
862
+
600
863
  - params: Configuration parameters passed from data/options.json if using the add-on
601
-
864
+
602
865
  - runtimeparams: Pass runtime optimization parameters as dictionnary
603
-
866
+
604
867
  - debug: Use True for testing purposes
605
-
868
+
606
869
  """
607
870
  # Parsing arguments
608
871
  parser = argparse.ArgumentParser()
609
872
  parser.add_argument('--action', type=str, help='Set the desired action, options are: perfect-optim, dayahead-optim,\
610
873
  naive-mpc-optim, publish-data, forecast-model-fit, forecast-model-predict, forecast-model-tune')
611
- parser.add_argument('--config', type=str, help='Define path to the config.yaml file')
612
- parser.add_argument('--costfun', type=str, default='profit', help='Define the type of cost function, options are: profit, cost, self-consumption')
613
- parser.add_argument('--log2file', type=strtobool, default='False', help='Define if we should log to a file or not')
614
- parser.add_argument('--params', type=str, default=None, help='Configuration parameters passed from data/options.json')
615
- parser.add_argument('--runtimeparams', type=str, default=None, help='Pass runtime optimization parameters as dictionnary')
616
- parser.add_argument('--debug', type=strtobool, default='False', help='Use True for testing purposes')
874
+ parser.add_argument('--config', type=str,
875
+ help='Define path to the config.yaml file')
876
+ parser.add_argument('--data', type=str,
877
+ help='Define path to the Data files (.csv & .pkl)')
878
+ parser.add_argument('--root', type=str, help='Define path emhass root')
879
+ parser.add_argument('--costfun', type=str, default='profit',
880
+ help='Define the type of cost function, options are: profit, cost, self-consumption')
881
+ parser.add_argument('--log2file', type=strtobool, default='False',
882
+ help='Define if we should log to a file or not')
883
+ parser.add_argument('--params', type=str, default=None,
884
+ help='Configuration parameters passed from data/options.json')
885
+ parser.add_argument('--runtimeparams', type=str, default=None,
886
+ help='Pass runtime optimization parameters as dictionnary')
887
+ parser.add_argument('--debug', type=strtobool,
888
+ default='False', help='Use True for testing purposes')
617
889
  args = parser.parse_args()
618
890
  # The path to the configuration files
619
- config_path = pathlib.Path(args.config)
620
- base_path = str(config_path.parent)
891
+ if args.config is not None:
892
+ config_path = pathlib.Path(args.config)
893
+ else:
894
+ config_path = pathlib.Path(
895
+ str(utils.get_root(__file__, num_parent=2) / 'config_emhass.yaml'))
896
+ if args.data is not None:
897
+ data_path = pathlib.Path(args.data)
898
+ else:
899
+ data_path = (config_path.parent / 'data/')
900
+ if args.root is not None:
901
+ root_path = pathlib.Path(args.root)
902
+ else:
903
+ root_path = config_path.parent
904
+ emhass_conf = {}
905
+ emhass_conf['config_path'] = config_path
906
+ emhass_conf['data_path'] = data_path
907
+ emhass_conf['root_path'] = root_path
621
908
  # create logger
622
- logger, ch = utils.get_logger(__name__, base_path, save_to_file=bool(args.log2file))
909
+ logger, ch = utils.get_logger(
910
+ __name__, emhass_conf, save_to_file=bool(args.log2file))
911
+ logger.debug("config path: " + str(config_path))
912
+ logger.debug("data path: " + str(data_path))
913
+ logger.debug("root path: " + str(root_path))
914
+ if not config_path.exists():
915
+ logger.error(
916
+ "Could not find config_emhass.yaml file in: " + str(config_path))
917
+ logger.error("Try setting config file path with --config")
918
+ return False
919
+ if not os.path.isdir(data_path):
920
+ logger.error("Could not find data foulder in: " + str(data_path))
921
+ logger.error("Try setting data path with --data")
922
+ return False
923
+ if not os.path.isdir(root_path / 'src'):
924
+ logger.error("Could not find emhass/src foulder in: " + str(root_path))
925
+ logger.error("Try setting emhass root path with --root")
926
+ return False
623
927
  # Additionnal argument
624
928
  try:
625
- parser.add_argument('--version', action='version', version='%(prog)s '+version('emhass'))
929
+ parser.add_argument(
930
+ "--version",
931
+ action="version",
932
+ version="%(prog)s " + version("emhass"),
933
+ )
626
934
  args = parser.parse_args()
627
935
  except Exception:
628
- logger.info("Version not found for emhass package. Or importlib exited with PackageNotFoundError.")
936
+ logger.info(
937
+ "Version not found for emhass package. Or importlib exited with PackageNotFoundError.",
938
+ )
629
939
  # Setup parameters
630
- input_data_dict = set_input_data_dict(config_path, base_path,
631
- args.costfun, args.params, args.runtimeparams, args.action,
940
+ input_data_dict = set_input_data_dict(emhass_conf,
941
+ args.costfun, args.params, args.runtimeparams, args.action,
632
942
  logger, args.debug)
633
943
  # Perform selected action
634
- if args.action == 'perfect-optim':
635
- opt_res = perfect_forecast_optim(input_data_dict, logger, debug=args.debug)
636
- elif args.action == 'dayahead-optim':
637
- opt_res = dayahead_forecast_optim(input_data_dict, logger, debug=args.debug)
638
- elif args.action == 'naive-mpc-optim':
944
+ if args.action == "perfect-optim":
945
+ opt_res = perfect_forecast_optim(
946
+ input_data_dict, logger, debug=args.debug)
947
+ elif args.action == "dayahead-optim":
948
+ opt_res = dayahead_forecast_optim(
949
+ input_data_dict, logger, debug=args.debug)
950
+ elif args.action == "naive-mpc-optim":
639
951
  opt_res = naive_mpc_optim(input_data_dict, logger, debug=args.debug)
640
- elif args.action == 'forecast-model-fit':
641
- df_fit_pred, df_fit_pred_backtest, mlf = forecast_model_fit(input_data_dict, logger, debug=args.debug)
952
+ elif args.action == "forecast-model-fit":
953
+ df_fit_pred, df_fit_pred_backtest, mlf = forecast_model_fit(
954
+ input_data_dict, logger, debug=args.debug
955
+ )
642
956
  opt_res = None
643
- elif args.action == 'forecast-model-predict':
957
+ elif args.action == "forecast-model-predict":
644
958
  if args.debug:
645
959
  _, _, mlf = forecast_model_fit(input_data_dict, logger, debug=args.debug)
646
960
  else:
647
961
  mlf = None
648
962
  df_pred = forecast_model_predict(input_data_dict, logger, debug=args.debug, mlf=mlf)
649
963
  opt_res = None
650
- elif args.action == 'forecast-model-tune':
964
+ elif args.action == "forecast-model-tune":
651
965
  if args.debug:
652
966
  _, _, mlf = forecast_model_fit(input_data_dict, logger, debug=args.debug)
653
967
  else:
654
968
  mlf = None
655
969
  df_pred_optim, mlf = forecast_model_tune(input_data_dict, logger, debug=args.debug, mlf=mlf)
656
970
  opt_res = None
657
- elif args.action == 'publish-data':
971
+ elif args.action == "regressor-model-fit":
972
+ mlr = regressor_model_fit(input_data_dict, logger, debug=args.debug)
973
+ opt_res = None
974
+ elif args.action == "regressor-model-predict":
975
+ if args.debug:
976
+ mlr = regressor_model_fit(input_data_dict, logger, debug=args.debug)
977
+ else:
978
+ mlr = None
979
+ prediction = regressor_model_predict(input_data_dict, logger, debug=args.debug,mlr=mlr)
980
+ opt_res = None
981
+ elif args.action == "publish-data":
658
982
  opt_res = publish_data(input_data_dict, logger)
659
983
  else:
660
984
  logger.error("The passed action argument is not valid")
985
+ logger.error("Try setting --action: perfect-optim, dayahead-optim, naive-mpc-optim, forecast-model-fit, forecast-model-predict, forecast-model-tune or publish-data")
661
986
  opt_res = None
662
987
  logger.info(opt_res)
663
988
  # Flush the logger
664
989
  ch.close()
665
990
  logger.removeHandler(ch)
666
- if args.action == 'perfect-optim' or args.action == 'dayahead-optim' or \
667
- args.action == 'naive-mpc-optim' or args.action == 'publish-data':
991
+ if (
992
+ args.action == "perfect-optim"
993
+ or args.action == "dayahead-optim"
994
+ or args.action == "naive-mpc-optim"
995
+ or args.action == "publish-data"
996
+ ):
668
997
  return opt_res
669
- elif args.action == 'forecast-model-fit':
998
+ elif args.action == "forecast-model-fit":
670
999
  return df_fit_pred, df_fit_pred_backtest, mlf
671
- elif args.action == 'forecast-model-predict':
1000
+ elif args.action == "forecast-model-predict":
672
1001
  return df_pred
673
- elif args.action == 'forecast-model-tune':
1002
+ elif args.action == "regressor-model-fit":
1003
+ return mlr
1004
+ elif args.action == "regressor-model-predict":
1005
+ return prediction
1006
+ elif args.action == "forecast-model-tune":
674
1007
  return df_pred_optim, mlf
1008
+ else:
1009
+ return opt_res
1010
+
675
1011
 
676
- if __name__ == '__main__':
1012
+ if __name__ == "__main__":
677
1013
  main()