emhass 0.8.6__py3-none-any.whl → 0.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
emhass/utils.py CHANGED
@@ -2,10 +2,19 @@
2
2
  # -*- coding: utf-8 -*-
3
3
 
4
4
  from typing import Tuple, Optional
5
- import numpy as np, pandas as pd
6
- import yaml, pytz, logging, pathlib, json, copy
7
5
  from datetime import datetime, timedelta, timezone
6
+ import logging
7
+ import pathlib
8
+ import json
9
+ import copy
10
+ import numpy as np
11
+ import pandas as pd
12
+ import yaml
13
+ import pytz
14
+
15
+
8
16
  import plotly.express as px
17
+
9
18
  pd.options.plotting.backend = "plotly"
10
19
 
11
20
  from emhass.machine_learning_forecaster import MLForecaster
@@ -14,13 +23,13 @@ from emhass.machine_learning_forecaster import MLForecaster
14
23
  def get_root(file: str, num_parent: Optional[int] = 3) -> str:
15
24
  """
16
25
  Get the root absolute path of the working directory.
17
-
26
+
18
27
  :param file: The passed file path with __file__
19
28
  :return: The root path
20
29
  :param num_parent: The number of parents levels up to desired root folder
21
30
  :type num_parent: int, optional
22
31
  :rtype: str
23
-
32
+
24
33
  """
25
34
  if num_parent == 3:
26
35
  root = pathlib.Path(file).resolve().parent.parent.parent
@@ -32,27 +41,28 @@ def get_root(file: str, num_parent: Optional[int] = 3) -> str:
32
41
  raise ValueError("num_parent value not valid, must be between 1 and 3")
33
42
  return root
34
43
 
35
- def get_logger(fun_name: str, config_path: str, save_to_file: Optional[bool] = True,
44
+
45
+ def get_logger(fun_name: str, emhass_conf: dict, save_to_file: Optional[bool] = True,
36
46
  logging_level: Optional[str] = "DEBUG") -> Tuple[logging.Logger, logging.StreamHandler]:
37
47
  """
38
48
  Create a simple logger object.
39
-
49
+
40
50
  :param fun_name: The Python function object name where the logger will be used
41
51
  :type fun_name: str
42
- :param config_path: The path to the yaml configuration file
43
- :type config_path: str
52
+ :param emhass_conf: Dictionary containing the needed emhass paths
53
+ :type emhass_conf: dict
44
54
  :param save_to_file: Write log to a file, defaults to True
45
55
  :type save_to_file: bool, optional
46
56
  :return: The logger object and the handler
47
57
  :rtype: object
48
-
58
+
49
59
  """
50
- # create logger object
60
+ # create logger object
51
61
  logger = logging.getLogger(fun_name)
52
62
  logger.propagate = True
53
63
  logger.fileSetting = save_to_file
54
64
  if save_to_file:
55
- ch = logging.FileHandler(config_path + '/data/logger_emhass.log')
65
+ ch = logging.FileHandler(emhass_conf['data_path'] / 'logger_emhass.log')
56
66
  else:
57
67
  ch = logging.StreamHandler()
58
68
  if logging_level == "DEBUG":
@@ -70,14 +80,17 @@ def get_logger(fun_name: str, config_path: str, save_to_file: Optional[bool] = T
70
80
  else:
71
81
  logger.setLevel(logging.DEBUG)
72
82
  ch.setLevel(logging.DEBUG)
73
- formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
83
+ formatter = logging.Formatter(
84
+ "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
85
+ )
74
86
  ch.setFormatter(formatter)
75
87
  logger.addHandler(ch)
76
88
 
77
89
  return logger, ch
78
90
 
79
- def get_forecast_dates(freq: int, delta_forecast: int,
80
- timedelta_days: Optional[int] = 0) -> pd.core.indexes.datetimes.DatetimeIndex:
91
+
92
+ def get_forecast_dates(freq: int, delta_forecast: int, timedelta_days: Optional[int] = 0
93
+ ) -> pd.core.indexes.datetimes.DatetimeIndex:
81
94
  """
82
95
  Get the date_range list of the needed future dates using the delta_forecast parameter.
83
96
 
@@ -89,7 +102,7 @@ def get_forecast_dates(freq: int, delta_forecast: int,
89
102
  :type timedelta_days: Optional[int], optional
90
103
  :return: A list of future forecast dates.
91
104
  :rtype: pd.core.indexes.datetimes.DatetimeIndex
92
-
105
+
93
106
  """
94
107
  freq = pd.to_timedelta(freq, "minutes")
95
108
  start_forecast = pd.Timestamp(datetime.now()).replace(hour=0, minute=0, second=0, microsecond=0)
@@ -99,11 +112,13 @@ def get_forecast_dates(freq: int, delta_forecast: int,
99
112
  freq=freq).round(freq, ambiguous='infer', nonexistent='shift_forward')
100
113
  return forecast_dates
101
114
 
102
- def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dict, optim_conf: dict, plant_conf: dict,
103
- set_type: str, logger: logging.Logger) -> Tuple[str, dict]:
115
+
116
+ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dict, optim_conf: dict,
117
+ plant_conf: dict, set_type: str, logger: logging.Logger
118
+ ) -> Tuple[str, dict]:
104
119
  """
105
- Treat the passed optimization runtime parameters.
106
-
120
+ Treat the passed optimization runtime parameters.
121
+
107
122
  :param runtimeparams: Json string containing the runtime parameters dict.
108
123
  :type runtimeparams: str
109
124
  :param params: Configuration parameters passed from data/options.json
@@ -120,93 +135,167 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
120
135
  :type logger: logging.Logger
121
136
  :return: Returning the params and optimization parameter container.
122
137
  :rtype: Tuple[str, dict]
123
-
138
+
124
139
  """
125
- if (params != None) and (params != 'null'):
140
+ if (params != None) and (params != "null"):
126
141
  params = json.loads(params)
127
142
  else:
128
143
  params = {}
129
144
  # Some default data needed
130
145
  custom_deferrable_forecast_id = []
131
- for k in range(optim_conf['num_def_loads']):
132
- custom_deferrable_forecast_id.append({
133
- "entity_id": "sensor.p_deferrable{}".format(k),
134
- "unit_of_measurement": "W",
135
- "friendly_name": "Deferrable Load {}".format(k)
136
- })
137
- default_passed_dict = {'custom_pv_forecast_id': {"entity_id": "sensor.p_pv_forecast", "unit_of_measurement": "W", "friendly_name": "PV Power Forecast"},
138
- 'custom_load_forecast_id': {"entity_id": "sensor.p_load_forecast", "unit_of_measurement": "W", "friendly_name": "Load Power Forecast"},
139
- 'custom_batt_forecast_id': {"entity_id": "sensor.p_batt_forecast", "unit_of_measurement": "W", "friendly_name": "Battery Power Forecast"},
140
- 'custom_batt_soc_forecast_id': {"entity_id": "sensor.soc_batt_forecast", "unit_of_measurement": "%", "friendly_name": "Battery SOC Forecast"},
141
- 'custom_grid_forecast_id': {"entity_id": "sensor.p_grid_forecast", "unit_of_measurement": "W", "friendly_name": "Grid Power Forecast"},
142
- 'custom_cost_fun_id': {"entity_id": "sensor.total_cost_fun_value", "unit_of_measurement": "", "friendly_name": "Total cost function value"},
143
- 'custom_optim_status_id': {"entity_id": "sensor.optim_status", "unit_of_measurement": "", "friendly_name": "EMHASS optimization status"},
144
- 'custom_unit_load_cost_id': {"entity_id": "sensor.unit_load_cost", "unit_of_measurement": "€/kWh", "friendly_name": "Unit Load Cost"},
145
- 'custom_unit_prod_price_id': {"entity_id": "sensor.unit_prod_price", "unit_of_measurement": "€/kWh", "friendly_name": "Unit Prod Price"},
146
- 'custom_deferrable_forecast_id': custom_deferrable_forecast_id,
147
- 'publish_prefix': ""}
148
- if 'passed_data' in params.keys():
146
+ for k in range(optim_conf["num_def_loads"]):
147
+ custom_deferrable_forecast_id.append(
148
+ {
149
+ "entity_id": "sensor.p_deferrable{}".format(k),
150
+ "unit_of_measurement": "W",
151
+ "friendly_name": "Deferrable Load {}".format(k),
152
+ }
153
+ )
154
+ default_passed_dict = {
155
+ "custom_pv_forecast_id": {
156
+ "entity_id": "sensor.p_pv_forecast",
157
+ "unit_of_measurement": "W",
158
+ "friendly_name": "PV Power Forecast",
159
+ },
160
+ "custom_load_forecast_id": {
161
+ "entity_id": "sensor.p_load_forecast",
162
+ "unit_of_measurement": "W",
163
+ "friendly_name": "Load Power Forecast",
164
+ },
165
+ "custom_batt_forecast_id": {
166
+ "entity_id": "sensor.p_batt_forecast",
167
+ "unit_of_measurement": "W",
168
+ "friendly_name": "Battery Power Forecast",
169
+ },
170
+ "custom_batt_soc_forecast_id": {
171
+ "entity_id": "sensor.soc_batt_forecast",
172
+ "unit_of_measurement": "%",
173
+ "friendly_name": "Battery SOC Forecast",
174
+ },
175
+ "custom_grid_forecast_id": {
176
+ "entity_id": "sensor.p_grid_forecast",
177
+ "unit_of_measurement": "W",
178
+ "friendly_name": "Grid Power Forecast",
179
+ },
180
+ "custom_cost_fun_id": {
181
+ "entity_id": "sensor.total_cost_fun_value",
182
+ "unit_of_measurement": "",
183
+ "friendly_name": "Total cost function value",
184
+ },
185
+ "custom_optim_status_id": {
186
+ "entity_id": "sensor.optim_status",
187
+ "unit_of_measurement": "",
188
+ "friendly_name": "EMHASS optimization status",
189
+ },
190
+ "custom_unit_load_cost_id": {
191
+ "entity_id": "sensor.unit_load_cost",
192
+ "unit_of_measurement": "€/kWh",
193
+ "friendly_name": "Unit Load Cost",
194
+ },
195
+ "custom_unit_prod_price_id": {
196
+ "entity_id": "sensor.unit_prod_price",
197
+ "unit_of_measurement": "€/kWh",
198
+ "friendly_name": "Unit Prod Price",
199
+ },
200
+ "custom_deferrable_forecast_id": custom_deferrable_forecast_id,
201
+ "publish_prefix": "",
202
+ }
203
+ if "passed_data" in params.keys():
149
204
  for key, value in default_passed_dict.items():
150
- params['passed_data'][key] = value
205
+ params["passed_data"][key] = value
151
206
  else:
152
- params['passed_data'] = default_passed_dict
207
+ params["passed_data"] = default_passed_dict
153
208
  if runtimeparams is not None:
154
209
  runtimeparams = json.loads(runtimeparams)
155
- freq = int(retrieve_hass_conf['freq'].seconds/60.0)
156
- delta_forecast = int(optim_conf['delta_forecast'].days)
210
+ freq = int(retrieve_hass_conf["freq"].seconds / 60.0)
211
+ delta_forecast = int(optim_conf["delta_forecast"].days)
157
212
  forecast_dates = get_forecast_dates(freq, delta_forecast)
213
+ if set_type == "regressor-model-fit":
214
+ if "csv_file" in runtimeparams:
215
+ csv_file = runtimeparams["csv_file"]
216
+ params["passed_data"]["csv_file"] = csv_file
217
+ if "features" in runtimeparams:
218
+ features = runtimeparams["features"]
219
+ params["passed_data"]["features"] = features
220
+ if "target" in runtimeparams:
221
+ target = runtimeparams["target"]
222
+ params["passed_data"]["target"] = target
223
+ if "timestamp" not in runtimeparams:
224
+ params["passed_data"]["timestamp"] = None
225
+ else:
226
+ timestamp = runtimeparams["timestamp"]
227
+ params["passed_data"]["timestamp"] = timestamp
228
+ if "date_features" not in runtimeparams:
229
+ params["passed_data"]["date_features"] = []
230
+ else:
231
+ date_features = runtimeparams["date_features"]
232
+ params["passed_data"]["date_features"] = date_features
233
+ if set_type == "regressor-model-predict":
234
+ if "new_values" in runtimeparams:
235
+ new_values = runtimeparams["new_values"]
236
+ params["passed_data"]["new_values"] = new_values
237
+ if "csv_file" in runtimeparams:
238
+ csv_file = runtimeparams["csv_file"]
239
+ params["passed_data"]["csv_file"] = csv_file
240
+ if "features" in runtimeparams:
241
+ features = runtimeparams["features"]
242
+ params["passed_data"]["features"] = features
243
+ if "target" in runtimeparams:
244
+ target = runtimeparams["target"]
245
+ params["passed_data"]["target"] = target
246
+
158
247
  # Treating special data passed for MPC control case
159
- if set_type == 'naive-mpc-optim':
160
- if 'prediction_horizon' not in runtimeparams.keys():
161
- prediction_horizon = 10 # 10 time steps by default
248
+ if set_type == "naive-mpc-optim":
249
+ if "prediction_horizon" not in runtimeparams.keys():
250
+ prediction_horizon = 10 # 10 time steps by default
162
251
  else:
163
- prediction_horizon = runtimeparams['prediction_horizon']
164
- params['passed_data']['prediction_horizon'] = prediction_horizon
165
- if 'soc_init' not in runtimeparams.keys():
166
- soc_init = plant_conf['SOCtarget']
252
+ prediction_horizon = runtimeparams["prediction_horizon"]
253
+ params["passed_data"]["prediction_horizon"] = prediction_horizon
254
+ if "soc_init" not in runtimeparams.keys():
255
+ soc_init = plant_conf["SOCtarget"]
167
256
  else:
168
- soc_init = runtimeparams['soc_init']
169
- params['passed_data']['soc_init'] = soc_init
170
- if 'soc_final' not in runtimeparams.keys():
171
- soc_final = plant_conf['SOCtarget']
257
+ soc_init = runtimeparams["soc_init"]
258
+ params["passed_data"]["soc_init"] = soc_init
259
+ if "soc_final" not in runtimeparams.keys():
260
+ soc_final = plant_conf["SOCtarget"]
172
261
  else:
173
- soc_final = runtimeparams['soc_final']
174
- params['passed_data']['soc_final'] = soc_final
175
- if 'def_total_hours' not in runtimeparams.keys():
176
- def_total_hours = optim_conf['def_total_hours']
262
+ soc_final = runtimeparams["soc_final"]
263
+ params["passed_data"]["soc_final"] = soc_final
264
+ if "def_total_hours" not in runtimeparams.keys():
265
+ def_total_hours = optim_conf["def_total_hours"]
177
266
  else:
178
- def_total_hours = runtimeparams['def_total_hours']
179
- params['passed_data']['def_total_hours'] = def_total_hours
180
- if 'def_start_timestep' not in runtimeparams.keys():
181
- def_start_timestep = optim_conf['def_start_timestep']
267
+ def_total_hours = runtimeparams["def_total_hours"]
268
+ params["passed_data"]["def_total_hours"] = def_total_hours
269
+ if "def_start_timestep" not in runtimeparams.keys():
270
+ def_start_timestep = optim_conf["def_start_timestep"]
182
271
  else:
183
- def_start_timestep = runtimeparams['def_start_timestep']
184
- params['passed_data']['def_start_timestep'] = def_start_timestep
185
- if 'def_end_timestep' not in runtimeparams.keys():
186
- def_end_timestep = optim_conf['def_end_timestep']
272
+ def_start_timestep = runtimeparams["def_start_timestep"]
273
+ params["passed_data"]["def_start_timestep"] = def_start_timestep
274
+ if "def_end_timestep" not in runtimeparams.keys():
275
+ def_end_timestep = optim_conf["def_end_timestep"]
187
276
  else:
188
- def_end_timestep = runtimeparams['def_end_timestep']
189
- params['passed_data']['def_end_timestep'] = def_end_timestep
190
- if 'alpha' not in runtimeparams.keys():
277
+ def_end_timestep = runtimeparams["def_end_timestep"]
278
+ params["passed_data"]["def_end_timestep"] = def_end_timestep
279
+ if "alpha" not in runtimeparams.keys():
191
280
  alpha = 0.5
192
281
  else:
193
- alpha = runtimeparams['alpha']
194
- params['passed_data']['alpha'] = alpha
195
- if 'beta' not in runtimeparams.keys():
282
+ alpha = runtimeparams["alpha"]
283
+ params["passed_data"]["alpha"] = alpha
284
+ if "beta" not in runtimeparams.keys():
196
285
  beta = 0.5
197
286
  else:
198
- beta = runtimeparams['beta']
199
- params['passed_data']['beta'] = beta
287
+ beta = runtimeparams["beta"]
288
+ params["passed_data"]["beta"] = beta
200
289
  forecast_dates = copy.deepcopy(forecast_dates)[0:prediction_horizon]
201
290
  else:
202
- params['passed_data']['prediction_horizon'] = None
203
- params['passed_data']['soc_init'] = None
204
- params['passed_data']['soc_final'] = None
205
- params['passed_data']['def_total_hours'] = None
206
- params['passed_data']['def_start_timestep'] = None
207
- params['passed_data']['def_end_timestep'] = None
208
- params['passed_data']['alpha'] = None
209
- params['passed_data']['beta'] = None
291
+ params["passed_data"]["prediction_horizon"] = None
292
+ params["passed_data"]["soc_init"] = None
293
+ params["passed_data"]["soc_final"] = None
294
+ params["passed_data"]["def_total_hours"] = None
295
+ params["passed_data"]["def_start_timestep"] = None
296
+ params["passed_data"]["def_end_timestep"] = None
297
+ params["passed_data"]["alpha"] = None
298
+ params["passed_data"]["beta"] = None
210
299
  # Treat passed forecast data lists
211
300
  list_forecast_key = ['pv_power_forecast', 'load_power_forecast', 'load_cost_forecast', 'prod_price_forecast']
212
301
  forecast_methods = ['weather_forecast_method', 'load_forecast_method', 'load_cost_forecast_method', 'prod_price_forecast_method']
@@ -226,130 +315,196 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
226
315
  else:
227
316
  params['passed_data'][forecast_key] = None
228
317
  # Treat passed data for forecast model fit/predict/tune at runtime
229
- if 'days_to_retrieve' not in runtimeparams.keys():
318
+ if "days_to_retrieve" not in runtimeparams.keys():
230
319
  days_to_retrieve = 9
231
320
  else:
232
- days_to_retrieve = runtimeparams['days_to_retrieve']
233
- params['passed_data']['days_to_retrieve'] = days_to_retrieve
234
- if 'model_type' not in runtimeparams.keys():
321
+ days_to_retrieve = runtimeparams["days_to_retrieve"]
322
+ params["passed_data"]["days_to_retrieve"] = days_to_retrieve
323
+ if "model_type" not in runtimeparams.keys():
235
324
  model_type = "load_forecast"
236
325
  else:
237
- model_type = runtimeparams['model_type']
238
- params['passed_data']['model_type'] = model_type
239
- if 'var_model' not in runtimeparams.keys():
326
+ model_type = runtimeparams["model_type"]
327
+ params["passed_data"]["model_type"] = model_type
328
+ if "var_model" not in runtimeparams.keys():
240
329
  var_model = "sensor.power_load_no_var_loads"
241
330
  else:
242
- var_model = runtimeparams['var_model']
243
- params['passed_data']['var_model'] = var_model
244
- if 'sklearn_model' not in runtimeparams.keys():
331
+ var_model = runtimeparams["var_model"]
332
+ params["passed_data"]["var_model"] = var_model
333
+ if "sklearn_model" not in runtimeparams.keys():
245
334
  sklearn_model = "KNeighborsRegressor"
246
335
  else:
247
- sklearn_model = runtimeparams['sklearn_model']
248
- params['passed_data']['sklearn_model'] = sklearn_model
249
- if 'num_lags' not in runtimeparams.keys():
336
+ sklearn_model = runtimeparams["sklearn_model"]
337
+ params["passed_data"]["sklearn_model"] = sklearn_model
338
+ if "regression_model" not in runtimeparams.keys():
339
+ regression_model = "AdaBoostRegression"
340
+ else:
341
+ regression_model = runtimeparams["regression_model"]
342
+ params["passed_data"]["regression_model"] = regression_model
343
+ if "num_lags" not in runtimeparams.keys():
250
344
  num_lags = 48
251
345
  else:
252
- num_lags = runtimeparams['num_lags']
253
- params['passed_data']['num_lags'] = num_lags
254
- if 'split_date_delta' not in runtimeparams.keys():
255
- split_date_delta = '48h'
346
+ num_lags = runtimeparams["num_lags"]
347
+ params["passed_data"]["num_lags"] = num_lags
348
+ if "split_date_delta" not in runtimeparams.keys():
349
+ split_date_delta = "48h"
256
350
  else:
257
- split_date_delta = runtimeparams['split_date_delta']
258
- params['passed_data']['split_date_delta'] = split_date_delta
259
- if 'perform_backtest' not in runtimeparams.keys():
351
+ split_date_delta = runtimeparams["split_date_delta"]
352
+ params["passed_data"]["split_date_delta"] = split_date_delta
353
+ if "perform_backtest" not in runtimeparams.keys():
260
354
  perform_backtest = False
261
355
  else:
262
- perform_backtest = eval(str(runtimeparams['perform_backtest']).capitalize())
263
- params['passed_data']['perform_backtest'] = perform_backtest
264
- if 'model_predict_publish' not in runtimeparams.keys():
356
+ perform_backtest = eval(str(runtimeparams["perform_backtest"]).capitalize())
357
+ params["passed_data"]["perform_backtest"] = perform_backtest
358
+ if "model_predict_publish" not in runtimeparams.keys():
265
359
  model_predict_publish = False
266
360
  else:
267
- model_predict_publish = eval(str(runtimeparams['model_predict_publish']).capitalize())
268
- params['passed_data']['model_predict_publish'] = model_predict_publish
269
- if 'model_predict_entity_id' not in runtimeparams.keys():
361
+ model_predict_publish = eval(
362
+ str(runtimeparams["model_predict_publish"]).capitalize()
363
+ )
364
+ params["passed_data"]["model_predict_publish"] = model_predict_publish
365
+ if "model_predict_entity_id" not in runtimeparams.keys():
270
366
  model_predict_entity_id = "sensor.p_load_forecast_custom_model"
271
367
  else:
272
- model_predict_entity_id = runtimeparams['model_predict_entity_id']
273
- params['passed_data']['model_predict_entity_id'] = model_predict_entity_id
274
- if 'model_predict_unit_of_measurement' not in runtimeparams.keys():
368
+ model_predict_entity_id = runtimeparams["model_predict_entity_id"]
369
+ params["passed_data"]["model_predict_entity_id"] = model_predict_entity_id
370
+ if "model_predict_unit_of_measurement" not in runtimeparams.keys():
275
371
  model_predict_unit_of_measurement = "W"
276
372
  else:
277
- model_predict_unit_of_measurement = runtimeparams['model_predict_unit_of_measurement']
278
- params['passed_data']['model_predict_unit_of_measurement'] = model_predict_unit_of_measurement
279
- if 'model_predict_friendly_name' not in runtimeparams.keys():
373
+ model_predict_unit_of_measurement = runtimeparams[
374
+ "model_predict_unit_of_measurement"
375
+ ]
376
+ params["passed_data"][
377
+ "model_predict_unit_of_measurement"
378
+ ] = model_predict_unit_of_measurement
379
+ if "model_predict_friendly_name" not in runtimeparams.keys():
280
380
  model_predict_friendly_name = "Load Power Forecast custom ML model"
281
381
  else:
282
- model_predict_friendly_name = runtimeparams['model_predict_friendly_name']
283
- params['passed_data']['model_predict_friendly_name'] = model_predict_friendly_name
284
- # Treat optimization configuration parameters passed at runtime
285
- if 'num_def_loads' in runtimeparams.keys():
286
- optim_conf['num_def_loads'] = runtimeparams['num_def_loads']
287
- if 'P_deferrable_nom' in runtimeparams.keys():
288
- optim_conf['P_deferrable_nom'] = runtimeparams['P_deferrable_nom']
289
- if 'def_total_hours' in runtimeparams.keys():
290
- optim_conf['def_total_hours'] = runtimeparams['def_total_hours']
291
- if 'def_start_timestep' in runtimeparams.keys():
292
- optim_conf['def_start_timestep'] = runtimeparams['def_start_timestep']
293
- if 'def_end_timestep' in runtimeparams.keys():
294
- optim_conf['def_end_timestep'] = runtimeparams['def_end_timestep']
295
- if 'treat_def_as_semi_cont' in runtimeparams.keys():
296
- optim_conf['treat_def_as_semi_cont'] = [eval(str(k).capitalize()) for k in runtimeparams['treat_def_as_semi_cont']]
297
- if 'set_def_constant' in runtimeparams.keys():
298
- optim_conf['set_def_constant'] = [eval(str(k).capitalize()) for k in runtimeparams['set_def_constant']]
299
- if 'solcast_api_key' in runtimeparams.keys():
300
- retrieve_hass_conf['solcast_api_key'] = runtimeparams['solcast_api_key']
301
- optim_conf['weather_forecast_method'] = 'solcast'
302
- if 'solcast_rooftop_id' in runtimeparams.keys():
303
- retrieve_hass_conf['solcast_rooftop_id'] = runtimeparams['solcast_rooftop_id']
304
- optim_conf['weather_forecast_method'] = 'solcast'
305
- if 'solar_forecast_kwp' in runtimeparams.keys():
306
- retrieve_hass_conf['solar_forecast_kwp'] = runtimeparams['solar_forecast_kwp']
307
- optim_conf['weather_forecast_method'] = 'solar.forecast'
308
- if 'weight_battery_discharge' in runtimeparams.keys():
309
- optim_conf['weight_battery_discharge'] = runtimeparams['weight_battery_discharge']
310
- if 'weight_battery_charge' in runtimeparams.keys():
311
- optim_conf['weight_battery_charge'] = runtimeparams['weight_battery_charge']
382
+ model_predict_friendly_name = runtimeparams["model_predict_friendly_name"]
383
+ params["passed_data"][
384
+ "model_predict_friendly_name"
385
+ ] = model_predict_friendly_name
386
+ if "mlr_predict_entity_id" not in runtimeparams.keys():
387
+ mlr_predict_entity_id = "sensor.mlr_predict"
388
+ else:
389
+ mlr_predict_entity_id = runtimeparams["mlr_predict_entity_id"]
390
+ params["passed_data"]["mlr_predict_entity_id"] = mlr_predict_entity_id
391
+ if "mlr_predict_unit_of_measurement" not in runtimeparams.keys():
392
+ mlr_predict_unit_of_measurement = None
393
+ else:
394
+ mlr_predict_unit_of_measurement = runtimeparams[
395
+ "mlr_predict_unit_of_measurement"
396
+ ]
397
+ params["passed_data"][
398
+ "mlr_predict_unit_of_measurement"
399
+ ] = mlr_predict_unit_of_measurement
400
+ if "mlr_predict_friendly_name" not in runtimeparams.keys():
401
+ mlr_predict_friendly_name = "mlr predictor"
402
+ else:
403
+ mlr_predict_friendly_name = runtimeparams["mlr_predict_friendly_name"]
404
+ params["passed_data"]["mlr_predict_friendly_name"] = mlr_predict_friendly_name
405
+ # Treat optimization configuration parameters passed at runtime
406
+ if "num_def_loads" in runtimeparams.keys():
407
+ optim_conf["num_def_loads"] = runtimeparams["num_def_loads"]
408
+ if "P_deferrable_nom" in runtimeparams.keys():
409
+ optim_conf["P_deferrable_nom"] = runtimeparams["P_deferrable_nom"]
410
+ if "def_total_hours" in runtimeparams.keys():
411
+ optim_conf["def_total_hours"] = runtimeparams["def_total_hours"]
412
+ if "def_start_timestep" in runtimeparams.keys():
413
+ optim_conf["def_start_timestep"] = runtimeparams["def_start_timestep"]
414
+ if "def_end_timestep" in runtimeparams.keys():
415
+ optim_conf["def_end_timestep"] = runtimeparams["def_end_timestep"]
416
+ if "treat_def_as_semi_cont" in runtimeparams.keys():
417
+ optim_conf["treat_def_as_semi_cont"] = [
418
+ eval(str(k).capitalize())
419
+ for k in runtimeparams["treat_def_as_semi_cont"]
420
+ ]
421
+ if "set_def_constant" in runtimeparams.keys():
422
+ optim_conf["set_def_constant"] = [
423
+ eval(str(k).capitalize()) for k in runtimeparams["set_def_constant"]
424
+ ]
425
+ if "solcast_api_key" in runtimeparams.keys():
426
+ retrieve_hass_conf["solcast_api_key"] = runtimeparams["solcast_api_key"]
427
+ optim_conf["weather_forecast_method"] = "solcast"
428
+ if "solcast_rooftop_id" in runtimeparams.keys():
429
+ retrieve_hass_conf["solcast_rooftop_id"] = runtimeparams[
430
+ "solcast_rooftop_id"
431
+ ]
432
+ optim_conf["weather_forecast_method"] = "solcast"
433
+ if "solar_forecast_kwp" in runtimeparams.keys():
434
+ retrieve_hass_conf["solar_forecast_kwp"] = runtimeparams[
435
+ "solar_forecast_kwp"
436
+ ]
437
+ optim_conf["weather_forecast_method"] = "solar.forecast"
438
+ if "weight_battery_discharge" in runtimeparams.keys():
439
+ optim_conf["weight_battery_discharge"] = runtimeparams[
440
+ "weight_battery_discharge"
441
+ ]
442
+ if "weight_battery_charge" in runtimeparams.keys():
443
+ optim_conf["weight_battery_charge"] = runtimeparams["weight_battery_charge"]
444
+ if 'freq' in runtimeparams.keys():
445
+ retrieve_hass_conf['freq'] = pd.to_timedelta(runtimeparams['freq'], "minutes")
312
446
  # Treat plant configuration parameters passed at runtime
313
- if 'SOCtarget' in runtimeparams.keys():
314
- plant_conf['SOCtarget'] = runtimeparams['SOCtarget']
447
+ if "SOCtarget" in runtimeparams.keys():
448
+ plant_conf["SOCtarget"] = runtimeparams["SOCtarget"]
315
449
  # Treat custom entities id's and friendly names for variables
316
- if 'custom_pv_forecast_id' in runtimeparams.keys():
317
- params['passed_data']['custom_pv_forecast_id'] = runtimeparams['custom_pv_forecast_id']
318
- if 'custom_load_forecast_id' in runtimeparams.keys():
319
- params['passed_data']['custom_load_forecast_id'] = runtimeparams['custom_load_forecast_id']
320
- if 'custom_batt_forecast_id' in runtimeparams.keys():
321
- params['passed_data']['custom_batt_forecast_id'] = runtimeparams['custom_batt_forecast_id']
322
- if 'custom_batt_soc_forecast_id' in runtimeparams.keys():
323
- params['passed_data']['custom_batt_soc_forecast_id'] = runtimeparams['custom_batt_soc_forecast_id']
324
- if 'custom_grid_forecast_id' in runtimeparams.keys():
325
- params['passed_data']['custom_grid_forecast_id'] = runtimeparams['custom_grid_forecast_id']
326
- if 'custom_cost_fun_id' in runtimeparams.keys():
327
- params['passed_data']['custom_cost_fun_id'] = runtimeparams['custom_cost_fun_id']
328
- if 'custom_optim_status_id' in runtimeparams.keys():
329
- params['passed_data']['custom_optim_status_id'] = runtimeparams['custom_optim_status_id']
330
- if 'custom_unit_load_cost_id' in runtimeparams.keys():
331
- params['passed_data']['custom_unit_load_cost_id'] = runtimeparams['custom_unit_load_cost_id']
332
- if 'custom_unit_prod_price_id' in runtimeparams.keys():
333
- params['passed_data']['custom_unit_prod_price_id'] = runtimeparams['custom_unit_prod_price_id']
334
- if 'custom_deferrable_forecast_id' in runtimeparams.keys():
335
- params['passed_data']['custom_deferrable_forecast_id'] = runtimeparams['custom_deferrable_forecast_id']
450
+ if "custom_pv_forecast_id" in runtimeparams.keys():
451
+ params["passed_data"]["custom_pv_forecast_id"] = runtimeparams[
452
+ "custom_pv_forecast_id"
453
+ ]
454
+ if "custom_load_forecast_id" in runtimeparams.keys():
455
+ params["passed_data"]["custom_load_forecast_id"] = runtimeparams[
456
+ "custom_load_forecast_id"
457
+ ]
458
+ if "custom_batt_forecast_id" in runtimeparams.keys():
459
+ params["passed_data"]["custom_batt_forecast_id"] = runtimeparams[
460
+ "custom_batt_forecast_id"
461
+ ]
462
+ if "custom_batt_soc_forecast_id" in runtimeparams.keys():
463
+ params["passed_data"]["custom_batt_soc_forecast_id"] = runtimeparams[
464
+ "custom_batt_soc_forecast_id"
465
+ ]
466
+ if "custom_grid_forecast_id" in runtimeparams.keys():
467
+ params["passed_data"]["custom_grid_forecast_id"] = runtimeparams[
468
+ "custom_grid_forecast_id"
469
+ ]
470
+ if "custom_cost_fun_id" in runtimeparams.keys():
471
+ params["passed_data"]["custom_cost_fun_id"] = runtimeparams[
472
+ "custom_cost_fun_id"
473
+ ]
474
+ if "custom_optim_status_id" in runtimeparams.keys():
475
+ params["passed_data"]["custom_optim_status_id"] = runtimeparams[
476
+ "custom_optim_status_id"
477
+ ]
478
+ if "custom_unit_load_cost_id" in runtimeparams.keys():
479
+ params["passed_data"]["custom_unit_load_cost_id"] = runtimeparams[
480
+ "custom_unit_load_cost_id"
481
+ ]
482
+ if "custom_unit_prod_price_id" in runtimeparams.keys():
483
+ params["passed_data"]["custom_unit_prod_price_id"] = runtimeparams[
484
+ "custom_unit_prod_price_id"
485
+ ]
486
+ if "custom_deferrable_forecast_id" in runtimeparams.keys():
487
+ params["passed_data"]["custom_deferrable_forecast_id"] = runtimeparams[
488
+ "custom_deferrable_forecast_id"
489
+ ]
336
490
  # A condition to put a prefix on all published data
337
- if 'publish_prefix' not in runtimeparams.keys():
491
+ if "publish_prefix" not in runtimeparams.keys():
338
492
  publish_prefix = ""
339
493
  else:
340
- publish_prefix = runtimeparams['publish_prefix']
341
- params['passed_data']['publish_prefix'] = publish_prefix
494
+ publish_prefix = runtimeparams["publish_prefix"]
495
+ params["passed_data"]["publish_prefix"] = publish_prefix
342
496
  # Serialize the final params
343
497
  params = json.dumps(params)
344
498
  return params, retrieve_hass_conf, optim_conf, plant_conf
345
499
 
346
- def get_yaml_parse(config_path: str, use_secrets: Optional[bool] = True,
500
+
501
+ def get_yaml_parse(emhass_conf: dict, use_secrets: Optional[bool] = True,
347
502
  params: Optional[str] = None) -> Tuple[dict, dict, dict]:
348
503
  """
349
504
  Perform parsing of the config.yaml file.
350
505
 
351
- :param config_path: The path to the yaml configuration file
352
- :type config_path: str
506
+ :param emhass_conf: Dictionary containing the needed emhass paths
507
+ :type emhass_conf: dict
353
508
  :param use_secrets: Indicate if we should use a secrets file or not.
354
509
  Set to False for unit tests.
355
510
  :type use_secrets: bool, optional
@@ -359,51 +514,55 @@ def get_yaml_parse(config_path: str, use_secrets: Optional[bool] = True,
359
514
  :rtype: tuple(dict)
360
515
 
361
516
  """
362
- base = config_path.parent
363
517
  if params is None:
364
- with open(config_path, 'r') as file:
518
+ with open(emhass_conf["config_path"], 'r') as file:
365
519
  input_conf = yaml.load(file, Loader=yaml.FullLoader)
366
520
  else:
367
521
  input_conf = json.loads(params)
368
522
  if use_secrets:
369
523
  if params is None:
370
- with open(base / 'secrets_emhass.yaml', 'r') as file:
524
+ with open(emhass_conf["root_path"] / 'secrets_emhass.yaml', 'r') as file: #assume secrets file is in root path
371
525
  input_secrets = yaml.load(file, Loader=yaml.FullLoader)
372
526
  else:
373
- input_secrets = input_conf.pop('params_secrets', None)
374
-
375
- if (type(input_conf['retrieve_hass_conf']) == list): #if using old config version
376
- retrieve_hass_conf = dict({key:d[key] for d in input_conf['retrieve_hass_conf'] for key in d})
527
+ input_secrets = input_conf.pop("params_secrets", None)
528
+
529
+ if type(input_conf["retrieve_hass_conf"]) == list: # if using old config version
530
+ retrieve_hass_conf = dict(
531
+ {key: d[key] for d in input_conf["retrieve_hass_conf"] for key in d}
532
+ )
377
533
  else:
378
- retrieve_hass_conf = input_conf.get('retrieve_hass_conf', {})
379
-
534
+ retrieve_hass_conf = input_conf.get("retrieve_hass_conf", {})
535
+
380
536
  if use_secrets:
381
537
  retrieve_hass_conf.update(input_secrets)
382
538
  else:
383
- retrieve_hass_conf['hass_url'] = 'http://supervisor/core/api'
384
- retrieve_hass_conf['long_lived_token'] = '${SUPERVISOR_TOKEN}'
385
- retrieve_hass_conf['time_zone'] = 'Europe/Paris'
386
- retrieve_hass_conf['lat'] = 45.83
387
- retrieve_hass_conf['lon'] = 6.86
388
- retrieve_hass_conf['alt'] = 4807.8
389
- retrieve_hass_conf['freq'] = pd.to_timedelta(retrieve_hass_conf['freq'], "minutes")
390
- retrieve_hass_conf['time_zone'] = pytz.timezone(retrieve_hass_conf['time_zone'])
391
-
392
- if (type(input_conf['optim_conf']) == list):
393
- optim_conf = dict({key:d[key] for d in input_conf['optim_conf'] for key in d})
539
+ retrieve_hass_conf["hass_url"] = "http://supervisor/core/api"
540
+ retrieve_hass_conf["long_lived_token"] = "${SUPERVISOR_TOKEN}"
541
+ retrieve_hass_conf["time_zone"] = "Europe/Paris"
542
+ retrieve_hass_conf["lat"] = 45.83
543
+ retrieve_hass_conf["lon"] = 6.86
544
+ retrieve_hass_conf["alt"] = 4807.8
545
+ retrieve_hass_conf["freq"] = pd.to_timedelta(retrieve_hass_conf["freq"], "minutes")
546
+ retrieve_hass_conf["time_zone"] = pytz.timezone(retrieve_hass_conf["time_zone"])
547
+
548
+ if type(input_conf["optim_conf"]) == list:
549
+ optim_conf = dict({key: d[key] for d in input_conf["optim_conf"] for key in d})
394
550
  else:
395
- optim_conf = input_conf.get('optim_conf', {})
551
+ optim_conf = input_conf.get("optim_conf", {})
396
552
 
397
- optim_conf['list_hp_periods'] = dict((key,d[key]) for d in optim_conf['list_hp_periods'] for key in d)
398
- optim_conf['delta_forecast'] = pd.Timedelta(days=optim_conf['delta_forecast'])
399
-
400
- if (type(input_conf['plant_conf']) == list):
401
- plant_conf = dict({key:d[key] for d in input_conf['plant_conf'] for key in d})
553
+ optim_conf["list_hp_periods"] = dict(
554
+ (key, d[key]) for d in optim_conf["list_hp_periods"] for key in d
555
+ )
556
+ optim_conf["delta_forecast"] = pd.Timedelta(days=optim_conf["delta_forecast"])
557
+
558
+ if type(input_conf["plant_conf"]) == list:
559
+ plant_conf = dict({key: d[key] for d in input_conf["plant_conf"] for key in d})
402
560
  else:
403
- plant_conf = input_conf.get('plant_conf', {})
404
-
561
+ plant_conf = input_conf.get("plant_conf", {})
562
+
405
563
  return retrieve_hass_conf, optim_conf, plant_conf
406
564
 
565
+
407
566
  def get_injection_dict(df: pd.DataFrame, plot_size: Optional[int] = 1366) -> dict:
408
567
  """
409
568
  Build a dictionary with graphs and tables for the webui.
@@ -414,60 +573,83 @@ def get_injection_dict(df: pd.DataFrame, plot_size: Optional[int] = 1366) -> dic
414
573
  :type plot_size: Optional[int], optional
415
574
  :return: A dictionary containing the graphs and tables in html format
416
575
  :rtype: dict
417
-
576
+
418
577
  """
419
- cols_p = [i for i in df.columns.to_list() if 'P_' in i]
578
+ cols_p = [i for i in df.columns.to_list() if "P_" in i]
420
579
  # Let's round the data in the DF
421
- optim_status = df['optim_status'].unique().item()
422
- df.drop('optim_status', axis=1, inplace=True)
423
- cols_else = [i for i in df.columns.to_list() if 'P_' not in i]
580
+ optim_status = df["optim_status"].unique().item()
581
+ df.drop("optim_status", axis=1, inplace=True)
582
+ cols_else = [i for i in df.columns.to_list() if "P_" not in i]
424
583
  df = df.apply(pd.to_numeric)
425
584
  df[cols_p] = df[cols_p].astype(int)
426
585
  df[cols_else] = df[cols_else].round(3)
427
586
  # Create plots
428
587
  n_colors = len(cols_p)
429
- colors = px.colors.sample_colorscale("jet", [n/(n_colors -1) for n in range(n_colors)])
430
- fig_0 = px.line(df[cols_p], title='Systems powers schedule after optimization results',
431
- template='presentation', line_shape="hv",
432
- color_discrete_sequence=colors)
433
- fig_0.update_layout(xaxis_title='Timestamp', yaxis_title='System powers (W)')
434
- if 'SOC_opt' in df.columns.to_list():
435
- fig_1 = px.line(df['SOC_opt'], title='Battery state of charge schedule after optimization results',
436
- template='presentation', line_shape="hv",
437
- color_discrete_sequence=colors)
438
- fig_1.update_layout(xaxis_title='Timestamp', yaxis_title='Battery SOC (%)')
439
- cols_cost = [i for i in df.columns.to_list() if 'cost_' in i or 'unit_' in i]
588
+ colors = px.colors.sample_colorscale(
589
+ "jet", [n / (n_colors - 1) for n in range(n_colors)]
590
+ )
591
+ fig_0 = px.line(
592
+ df[cols_p],
593
+ title="Systems powers schedule after optimization results",
594
+ template="presentation",
595
+ line_shape="hv",
596
+ color_discrete_sequence=colors,
597
+ )
598
+ fig_0.update_layout(xaxis_title="Timestamp", yaxis_title="System powers (W)")
599
+ if "SOC_opt" in df.columns.to_list():
600
+ fig_1 = px.line(
601
+ df["SOC_opt"],
602
+ title="Battery state of charge schedule after optimization results",
603
+ template="presentation",
604
+ line_shape="hv",
605
+ color_discrete_sequence=colors,
606
+ )
607
+ fig_1.update_layout(xaxis_title="Timestamp", yaxis_title="Battery SOC (%)")
608
+ cols_cost = [i for i in df.columns.to_list() if "cost_" in i or "unit_" in i]
440
609
  n_colors = len(cols_cost)
441
- colors = px.colors.sample_colorscale("jet", [n/(n_colors -1) for n in range(n_colors)])
442
- fig_2 = px.line(df[cols_cost], title='Systems costs obtained from optimization results',
443
- template='presentation', line_shape="hv",
444
- color_discrete_sequence=colors)
445
- fig_2.update_layout(xaxis_title='Timestamp', yaxis_title='System costs (currency)')
610
+ colors = px.colors.sample_colorscale(
611
+ "jet", [n / (n_colors - 1) for n in range(n_colors)]
612
+ )
613
+ fig_2 = px.line(
614
+ df[cols_cost],
615
+ title="Systems costs obtained from optimization results",
616
+ template="presentation",
617
+ line_shape="hv",
618
+ color_discrete_sequence=colors,
619
+ )
620
+ fig_2.update_layout(xaxis_title="Timestamp", yaxis_title="System costs (currency)")
446
621
  # Get full path to image
447
- image_path_0 = fig_0.to_html(full_html=False, default_width='75%')
448
- if 'SOC_opt' in df.columns.to_list():
449
- image_path_1 = fig_1.to_html(full_html=False, default_width='75%')
450
- image_path_2 = fig_2.to_html(full_html=False, default_width='75%')
622
+ image_path_0 = fig_0.to_html(full_html=False, default_width="75%")
623
+ if "SOC_opt" in df.columns.to_list():
624
+ image_path_1 = fig_1.to_html(full_html=False, default_width="75%")
625
+ image_path_2 = fig_2.to_html(full_html=False, default_width="75%")
451
626
  # The tables
452
- table1 = df.reset_index().to_html(classes='mystyle', index=False)
453
- cost_cols = [i for i in df.columns if 'cost_' in i]
627
+ table1 = df.reset_index().to_html(classes="mystyle", index=False)
628
+ cost_cols = [i for i in df.columns if "cost_" in i]
454
629
  table2 = df[cost_cols].reset_index().sum(numeric_only=True)
455
- table2['optim_status'] = optim_status
456
- table2 = table2.to_frame(name='Value').reset_index(names='Variable').to_html(classes='mystyle', index=False)
630
+ table2["optim_status"] = optim_status
631
+ table2 = (
632
+ table2.to_frame(name="Value")
633
+ .reset_index(names="Variable")
634
+ .to_html(classes="mystyle", index=False)
635
+ )
457
636
  # The dict of plots
458
637
  injection_dict = {}
459
- injection_dict['title'] = '<h2>EMHASS optimization results</h2>'
460
- injection_dict['subsubtitle0'] = '<h4>Plotting latest optimization results</h4>'
461
- injection_dict['figure_0'] = image_path_0
462
- if 'SOC_opt' in df.columns.to_list():
463
- injection_dict['figure_1'] = image_path_1
464
- injection_dict['figure_2'] = image_path_2
465
- injection_dict['subsubtitle1'] = '<h4>Last run optimization results table</h4>'
466
- injection_dict['table1'] = table1
467
- injection_dict['subsubtitle2'] = '<h4>Summary table for latest optimization results</h4>'
468
- injection_dict['table2'] = table2
638
+ injection_dict["title"] = "<h2>EMHASS optimization results</h2>"
639
+ injection_dict["subsubtitle0"] = "<h4>Plotting latest optimization results</h4>"
640
+ injection_dict["figure_0"] = image_path_0
641
+ if "SOC_opt" in df.columns.to_list():
642
+ injection_dict["figure_1"] = image_path_1
643
+ injection_dict["figure_2"] = image_path_2
644
+ injection_dict["subsubtitle1"] = "<h4>Last run optimization results table</h4>"
645
+ injection_dict["table1"] = table1
646
+ injection_dict["subsubtitle2"] = (
647
+ "<h4>Summary table for latest optimization results</h4>"
648
+ )
649
+ injection_dict["table2"] = table2
469
650
  return injection_dict
470
651
 
652
+
471
653
  def get_injection_dict_forecast_model_fit(df_fit_pred: pd.DataFrame, mlf: MLForecaster) -> dict:
472
654
  """
473
655
  Build a dictionary with graphs and tables for the webui for special MLF fit case.
@@ -480,18 +662,23 @@ def get_injection_dict_forecast_model_fit(df_fit_pred: pd.DataFrame, mlf: MLFore
480
662
  :rtype: dict
481
663
  """
482
664
  fig = df_fit_pred.plot()
483
- fig.layout.template = 'presentation'
484
- fig.update_yaxes(title_text = mlf.model_type)
485
- fig.update_xaxes(title_text = "Time")
486
- image_path_0 = fig.to_html(full_html=False, default_width='75%')
665
+ fig.layout.template = "presentation"
666
+ fig.update_yaxes(title_text=mlf.model_type)
667
+ fig.update_xaxes(title_text="Time")
668
+ image_path_0 = fig.to_html(full_html=False, default_width="75%")
487
669
  # The dict of plots
488
670
  injection_dict = {}
489
- injection_dict['title'] = '<h2>Custom machine learning forecast model fit</h2>'
490
- injection_dict['subsubtitle0'] = '<h4>Plotting train/test forecast model results for '+mlf.model_type+'</h4>'
491
- injection_dict['subsubtitle0'] = '<h4>Forecasting variable '+mlf.var_model+'</h4>'
492
- injection_dict['figure_0'] = image_path_0
671
+ injection_dict["title"] = "<h2>Custom machine learning forecast model fit</h2>"
672
+ injection_dict["subsubtitle0"] = (
673
+ "<h4>Plotting train/test forecast model results for " + mlf.model_type + "</h4>"
674
+ )
675
+ injection_dict["subsubtitle0"] = (
676
+ "<h4>Forecasting variable " + mlf.var_model + "</h4>"
677
+ )
678
+ injection_dict["figure_0"] = image_path_0
493
679
  return injection_dict
494
680
 
681
+
495
682
  def get_injection_dict_forecast_model_tune(df_pred_optim: pd.DataFrame, mlf: MLForecaster) -> dict:
496
683
  """
497
684
  Build a dictionary with graphs and tables for the webui for special MLF tune case.
@@ -504,19 +691,27 @@ def get_injection_dict_forecast_model_tune(df_pred_optim: pd.DataFrame, mlf: MLF
504
691
  :rtype: dict
505
692
  """
506
693
  fig = df_pred_optim.plot()
507
- fig.layout.template = 'presentation'
508
- fig.update_yaxes(title_text = mlf.model_type)
509
- fig.update_xaxes(title_text = "Time")
510
- image_path_0 = fig.to_html(full_html=False, default_width='75%')
694
+ fig.layout.template = "presentation"
695
+ fig.update_yaxes(title_text=mlf.model_type)
696
+ fig.update_xaxes(title_text="Time")
697
+ image_path_0 = fig.to_html(full_html=False, default_width="75%")
511
698
  # The dict of plots
512
699
  injection_dict = {}
513
- injection_dict['title'] = '<h2>Custom machine learning forecast model tune</h2>'
514
- injection_dict['subsubtitle0'] = '<h4>Performed a tuning routine using bayesian optimization for '+mlf.model_type+'</h4>'
515
- injection_dict['subsubtitle0'] = '<h4>Forecasting variable '+mlf.var_model+'</h4>'
516
- injection_dict['figure_0'] = image_path_0
700
+ injection_dict["title"] = "<h2>Custom machine learning forecast model tune</h2>"
701
+ injection_dict["subsubtitle0"] = (
702
+ "<h4>Performed a tuning routine using bayesian optimization for "
703
+ + mlf.model_type
704
+ + "</h4>"
705
+ )
706
+ injection_dict["subsubtitle0"] = (
707
+ "<h4>Forecasting variable " + mlf.var_model + "</h4>"
708
+ )
709
+ injection_dict["figure_0"] = image_path_0
517
710
  return injection_dict
518
711
 
519
- def build_params(params: dict, params_secrets: dict, options: dict, addon: int, logger: logging.Logger) -> dict:
712
+
713
+ def build_params(params: dict, params_secrets: dict, options: dict, addon: int,
714
+ logger: logging.Logger) -> dict:
520
715
  """
521
716
  Build the main params dictionary from the loaded options.json when using the add-on.
522
717
 
@@ -535,45 +730,120 @@ def build_params(params: dict, params_secrets: dict, options: dict, addon: int,
535
730
  """
536
731
  if addon == 1:
537
732
  # Updating variables in retrieve_hass_conf
538
- params['retrieve_hass_conf']['freq'] = options.get('optimization_time_step',params['retrieve_hass_conf']['freq'])
539
- params['retrieve_hass_conf']['days_to_retrieve'] = options.get('historic_days_to_retrieve',params['retrieve_hass_conf']['days_to_retrieve'])
540
- params['retrieve_hass_conf']['var_PV'] = options.get('sensor_power_photovoltaics',params['retrieve_hass_conf']['var_PV'])
541
- params['retrieve_hass_conf']['var_load'] = options.get('sensor_power_load_no_var_loads',params['retrieve_hass_conf']['var_load'])
542
- params['retrieve_hass_conf']['load_negative'] = options.get('load_negative',params['retrieve_hass_conf']['load_negative'])
543
- params['retrieve_hass_conf']['set_zero_min'] = options.get('set_zero_min',params['retrieve_hass_conf']['set_zero_min'])
544
- params['retrieve_hass_conf']['var_replace_zero'] = [options.get('sensor_power_photovoltaics',params['retrieve_hass_conf']['var_replace_zero'])]
545
- params['retrieve_hass_conf']['var_interp'] = [options.get('sensor_power_photovoltaics',params['retrieve_hass_conf']['var_PV']), options.get('sensor_power_load_no_var_loads',params['retrieve_hass_conf']['var_load'])]
546
- params['retrieve_hass_conf']['method_ts_round'] = options.get('method_ts_round',params['retrieve_hass_conf']['method_ts_round'])
733
+ params["retrieve_hass_conf"]["freq"] = options.get(
734
+ "optimization_time_step", params["retrieve_hass_conf"]["freq"]
735
+ )
736
+ params["retrieve_hass_conf"]["days_to_retrieve"] = options.get(
737
+ "historic_days_to_retrieve",
738
+ params["retrieve_hass_conf"]["days_to_retrieve"],
739
+ )
740
+ params["retrieve_hass_conf"]["var_PV"] = options.get(
741
+ "sensor_power_photovoltaics", params["retrieve_hass_conf"]["var_PV"]
742
+ )
743
+ params["retrieve_hass_conf"]["var_load"] = options.get(
744
+ "sensor_power_load_no_var_loads", params["retrieve_hass_conf"]["var_load"]
745
+ )
746
+ params["retrieve_hass_conf"]["load_negative"] = options.get(
747
+ "load_negative", params["retrieve_hass_conf"]["load_negative"]
748
+ )
749
+ params["retrieve_hass_conf"]["set_zero_min"] = options.get(
750
+ "set_zero_min", params["retrieve_hass_conf"]["set_zero_min"]
751
+ )
752
+ params["retrieve_hass_conf"]["var_replace_zero"] = [
753
+ options.get(
754
+ "sensor_power_photovoltaics",
755
+ params["retrieve_hass_conf"]["var_replace_zero"],
756
+ )
757
+ ]
758
+ params["retrieve_hass_conf"]["var_interp"] = [
759
+ options.get(
760
+ "sensor_power_photovoltaics", params["retrieve_hass_conf"]["var_PV"]
761
+ ),
762
+ options.get(
763
+ "sensor_power_load_no_var_loads",
764
+ params["retrieve_hass_conf"]["var_load"],
765
+ ),
766
+ ]
767
+ params["retrieve_hass_conf"]["method_ts_round"] = options.get(
768
+ "method_ts_round", params["retrieve_hass_conf"]["method_ts_round"]
769
+ )
547
770
  # Update params Secrets if specified
548
- params['params_secrets'] = params_secrets
549
- params['params_secrets']['time_zone'] = options.get('time_zone',params_secrets['time_zone'])
550
- params['params_secrets']['lat'] = options.get('Latitude',params_secrets['lat'])
551
- params['params_secrets']['lon'] = options.get('Longitude',params_secrets['lon'])
552
- params['params_secrets']['alt'] = options.get('Altitude',params_secrets['alt'])
771
+ params["params_secrets"] = params_secrets
772
+ params["params_secrets"]["time_zone"] = options.get(
773
+ "time_zone", params_secrets["time_zone"]
774
+ )
775
+ params["params_secrets"]["lat"] = options.get("Latitude", params_secrets["lat"])
776
+ params["params_secrets"]["lon"] = options.get(
777
+ "Longitude", params_secrets["lon"]
778
+ )
779
+ params["params_secrets"]["alt"] = options.get("Altitude", params_secrets["alt"])
553
780
  # Updating variables in optim_conf
554
- params['optim_conf']['set_use_battery'] = options.get('set_use_battery',params['optim_conf']['set_use_battery'])
555
- params['optim_conf']['num_def_loads'] = options.get('number_of_deferrable_loads',params['optim_conf']['num_def_loads'])
556
- if options.get('list_nominal_power_of_deferrable_loads',None) != None:
557
- params['optim_conf']['P_deferrable_nom'] = [i['nominal_power_of_deferrable_loads'] for i in options.get('list_nominal_power_of_deferrable_loads')]
558
- if options.get('list_operating_hours_of_each_deferrable_load',None) != None:
559
- params['optim_conf']['def_total_hours'] = [i['operating_hours_of_each_deferrable_load'] for i in options.get('list_operating_hours_of_each_deferrable_load')]
560
- if options.get('list_treat_deferrable_load_as_semi_cont',None) != None:
561
- params['optim_conf']['treat_def_as_semi_cont'] = [i['treat_deferrable_load_as_semi_cont'] for i in options.get('list_treat_deferrable_load_as_semi_cont')]
562
- params['optim_conf']['weather_forecast_method'] = options.get('weather_forecast_method',params['optim_conf']['weather_forecast_method'])
781
+ params["optim_conf"]["set_use_battery"] = options.get(
782
+ "set_use_battery", params["optim_conf"]["set_use_battery"]
783
+ )
784
+ params["optim_conf"]["num_def_loads"] = options.get(
785
+ "number_of_deferrable_loads", params["optim_conf"]["num_def_loads"]
786
+ )
787
+ if options.get("list_nominal_power_of_deferrable_loads", None) != None:
788
+ params["optim_conf"]["P_deferrable_nom"] = [
789
+ i["nominal_power_of_deferrable_loads"]
790
+ for i in options.get("list_nominal_power_of_deferrable_loads")
791
+ ]
792
+ if options.get("list_operating_hours_of_each_deferrable_load", None) != None:
793
+ params["optim_conf"]["def_total_hours"] = [
794
+ i["operating_hours_of_each_deferrable_load"]
795
+ for i in options.get("list_operating_hours_of_each_deferrable_load")
796
+ ]
797
+ if options.get("list_treat_deferrable_load_as_semi_cont", None) != None:
798
+ params["optim_conf"]["treat_def_as_semi_cont"] = [
799
+ i["treat_deferrable_load_as_semi_cont"]
800
+ for i in options.get("list_treat_deferrable_load_as_semi_cont")
801
+ ]
802
+ params["optim_conf"]["weather_forecast_method"] = options.get(
803
+ "weather_forecast_method", params["optim_conf"]["weather_forecast_method"]
804
+ )
563
805
  # Update optional param secrets
564
- if params['optim_conf']['weather_forecast_method'] == "solcast":
565
- params['params_secrets']['solcast_api_key'] = options.get('optional_solcast_api_key',params_secrets.get('solcast_api_key',"123456"))
566
- params['params_secrets']['solcast_rooftop_id'] = options.get('optional_solcast_rooftop_id',params_secrets.get('solcast_rooftop_id',"123456"))
567
- elif params['optim_conf']['weather_forecast_method'] == "solar.forecast":
568
- params['params_secrets']['solar_forecast_kwp'] = options.get('optional_solar_forecast_kwp',params_secrets.get('solar_forecast_kwp',5))
569
- params['optim_conf']['load_forecast_method'] = options.get('load_forecast_method',params['optim_conf']['load_forecast_method'])
570
- params['optim_conf']['delta_forecast'] = options.get('delta_forecast_daily',params['optim_conf']['delta_forecast'])
571
- params['optim_conf']['load_cost_forecast_method'] = options.get('load_cost_forecast_method',params['optim_conf']['load_cost_forecast_method'])
572
- if options.get('list_set_deferrable_load_single_constant',None) != None:
573
- params['optim_conf']['set_def_constant'] = [i['set_deferrable_load_single_constant'] for i in options.get('list_set_deferrable_load_single_constant')]
574
- if options.get('list_peak_hours_periods_start_hours',None) != None and options.get('list_peak_hours_periods_end_hours',None) != None:
575
- start_hours_list = [i['peak_hours_periods_start_hours'] for i in options['list_peak_hours_periods_start_hours']]
576
- end_hours_list = [i['peak_hours_periods_end_hours'] for i in options['list_peak_hours_periods_end_hours']]
806
+ if params["optim_conf"]["weather_forecast_method"] == "solcast":
807
+ params["params_secrets"]["solcast_api_key"] = options.get(
808
+ "optional_solcast_api_key",
809
+ params_secrets.get("solcast_api_key", "123456"),
810
+ )
811
+ params["params_secrets"]["solcast_rooftop_id"] = options.get(
812
+ "optional_solcast_rooftop_id",
813
+ params_secrets.get("solcast_rooftop_id", "123456"),
814
+ )
815
+ elif params["optim_conf"]["weather_forecast_method"] == "solar.forecast":
816
+ params["params_secrets"]["solar_forecast_kwp"] = options.get(
817
+ "optional_solar_forecast_kwp",
818
+ params_secrets.get("solar_forecast_kwp", 5),
819
+ )
820
+ params["optim_conf"]["load_forecast_method"] = options.get(
821
+ "load_forecast_method", params["optim_conf"]["load_forecast_method"]
822
+ )
823
+ params["optim_conf"]["delta_forecast"] = options.get(
824
+ "delta_forecast_daily", params["optim_conf"]["delta_forecast"]
825
+ )
826
+ params["optim_conf"]["load_cost_forecast_method"] = options.get(
827
+ "load_cost_forecast_method",
828
+ params["optim_conf"]["load_cost_forecast_method"],
829
+ )
830
+ if options.get("list_set_deferrable_load_single_constant", None) != None:
831
+ params["optim_conf"]["set_def_constant"] = [
832
+ i["set_deferrable_load_single_constant"]
833
+ for i in options.get("list_set_deferrable_load_single_constant")
834
+ ]
835
+ if (
836
+ options.get("list_peak_hours_periods_start_hours", None) != None
837
+ and options.get("list_peak_hours_periods_end_hours", None) != None
838
+ ):
839
+ start_hours_list = [
840
+ i["peak_hours_periods_start_hours"]
841
+ for i in options["list_peak_hours_periods_start_hours"]
842
+ ]
843
+ end_hours_list = [
844
+ i["peak_hours_periods_end_hours"]
845
+ for i in options["list_peak_hours_periods_end_hours"]
846
+ ]
577
847
  num_peak_hours = len(start_hours_list)
578
848
  list_hp_periods_list = [{'period_hp_'+str(i+1):[{'start':start_hours_list[i]},{'end':end_hours_list[i]}]} for i in range(num_peak_hours)]
579
849
  params['optim_conf']['list_hp_periods'] = list_hp_periods_list
@@ -645,20 +915,35 @@ def build_params(params: dict, params_secrets: dict, options: dict, addon: int,
645
915
  for x in range(len(params['optim_conf']['P_deferrable_nom']), params['optim_conf']['num_def_loads']):
646
916
  params['optim_conf']['P_deferrable_nom'].append(0)
647
917
  # days_to_retrieve should be no less then 2
648
- if params['retrieve_hass_conf']['days_to_retrieve'] < 2:
649
- params['retrieve_hass_conf']['days_to_retrieve'] = 2
650
- logger.warning("days_to_retrieve should not be lower then 2, setting days_to_retrieve to 2. Make sure your sensors also have at least 2 days of history")
918
+ if params["retrieve_hass_conf"]["days_to_retrieve"] < 2:
919
+ params["retrieve_hass_conf"]["days_to_retrieve"] = 2
920
+ logger.warning(
921
+ "days_to_retrieve should not be lower then 2, setting days_to_retrieve to 2. Make sure your sensors also have at least 2 days of history"
922
+ )
651
923
  else:
652
- params['params_secrets'] = params_secrets
924
+ params["params_secrets"] = params_secrets
653
925
  # The params dict
654
- params['passed_data'] = {'pv_power_forecast':None,'load_power_forecast':None,'load_cost_forecast':None,'prod_price_forecast':None,
655
- 'prediction_horizon':None,'soc_init':None,'soc_final':None,'def_total_hours':None,'def_start_timestep':None,'def_end_timestep':None,'alpha':None,'beta':None}
926
+ params["passed_data"] = {
927
+ "pv_power_forecast": None,
928
+ "load_power_forecast": None,
929
+ "load_cost_forecast": None,
930
+ "prod_price_forecast": None,
931
+ "prediction_horizon": None,
932
+ "soc_init": None,
933
+ "soc_final": None,
934
+ "def_total_hours": None,
935
+ "def_start_timestep": None,
936
+ "def_end_timestep": None,
937
+ "alpha": None,
938
+ "beta": None,
939
+ }
656
940
  return params
657
941
 
942
+
658
943
  def get_days_list(days_to_retrieve: int) -> pd.date_range:
659
944
  """
660
945
  Get list of past days from today to days_to_retrieve.
661
-
946
+
662
947
  :param days_to_retrieve: Total number of days to retrieve from the past
663
948
  :type days_to_retrieve: int
664
949
  :return: The list of days
@@ -667,19 +952,19 @@ def get_days_list(days_to_retrieve: int) -> pd.date_range:
667
952
  """
668
953
  today = datetime.now(timezone.utc).replace(minute=0, second=0, microsecond=0)
669
954
  d = (today - timedelta(days=days_to_retrieve)).isoformat()
670
- days_list = pd.date_range(start=d, end=today.isoformat(), freq='D')
671
-
955
+ days_list = pd.date_range(start=d, end=today.isoformat(), freq="D")
672
956
  return days_list
673
957
 
958
+
674
959
  def set_df_index_freq(df: pd.DataFrame) -> pd.DataFrame:
675
960
  """
676
961
  Set the freq of a DataFrame DateTimeIndex.
677
-
962
+
678
963
  :param df: Input DataFrame
679
964
  :type df: pd.DataFrame
680
965
  :return: Input DataFrame with freq defined
681
966
  :rtype: pd.DataFrame
682
-
967
+
683
968
  """
684
969
  idx_diff = np.diff(df.index)
685
970
  sampling = pd.to_timedelta(np.median(idx_diff))