emhass 0.13.0__py3-none-any.whl → 0.13.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
emhass/utils.py ADDED
@@ -0,0 +1,1764 @@
1
+ #!/usr/bin/env python3
2
+ from __future__ import annotations
3
+
4
+ import ast
5
+ import copy
6
+ import csv
7
+ import json
8
+ import logging
9
+ import os
10
+ import pathlib
11
+ from datetime import UTC, datetime, timedelta
12
+ from typing import TYPE_CHECKING
13
+
14
+ import numpy as np
15
+ import pandas as pd
16
+ import plotly.express as px
17
+ import pytz
18
+ import yaml
19
+ from requests import get
20
+
21
+ if TYPE_CHECKING:
22
+ from emhass.machine_learning_forecaster import MLForecaster
23
+
24
+ pd.options.plotting.backend = "plotly"
25
+
26
+
27
+ def get_root(file: str, num_parent: int | None = 3) -> str:
28
+ """
29
+ Get the root absolute path of the working directory.
30
+
31
+ :param file: The passed file path with __file__
32
+ :return: The root path
33
+ :param num_parent: The number of parents levels up to desired root folder
34
+ :type num_parent: int, optional
35
+ :rtype: str
36
+
37
+ """
38
+ if num_parent == 3:
39
+ root = pathlib.Path(file).resolve().parent.parent.parent
40
+ elif num_parent == 2:
41
+ root = pathlib.Path(file).resolve().parent.parent
42
+ elif num_parent == 1:
43
+ root = pathlib.Path(file).resolve().parent
44
+ else:
45
+ raise ValueError("num_parent value not valid, must be between 1 and 3")
46
+ return root
47
+
48
+
49
+ def get_logger(
50
+ fun_name: str,
51
+ emhass_conf: dict,
52
+ save_to_file: bool | None = True,
53
+ logging_level: str | None = "DEBUG",
54
+ ) -> tuple[logging.Logger, logging.StreamHandler]:
55
+ """
56
+ Create a simple logger object.
57
+
58
+ :param fun_name: The Python function object name where the logger will be used
59
+ :type fun_name: str
60
+ :param emhass_conf: Dictionary containing the needed emhass paths
61
+ :type emhass_conf: dict
62
+ :param save_to_file: Write log to a file, defaults to True
63
+ :type save_to_file: bool, optional
64
+ :return: The logger object and the handler
65
+ :rtype: object
66
+
67
+ """
68
+ # create logger object
69
+ logger = logging.getLogger(fun_name)
70
+ logger.propagate = True
71
+ logger.fileSetting = save_to_file
72
+ if save_to_file:
73
+ if os.path.isdir(emhass_conf["data_path"]):
74
+ ch = logging.FileHandler(emhass_conf["data_path"] / "logger_emhass.log")
75
+ else:
76
+ raise Exception("Unable to access data_path: " + emhass_conf["data_path"])
77
+ else:
78
+ ch = logging.StreamHandler()
79
+ if logging_level == "DEBUG":
80
+ logger.setLevel(logging.DEBUG)
81
+ ch.setLevel(logging.DEBUG)
82
+ elif logging_level == "INFO":
83
+ logger.setLevel(logging.INFO)
84
+ ch.setLevel(logging.INFO)
85
+ elif logging_level == "WARNING":
86
+ logger.setLevel(logging.WARNING)
87
+ ch.setLevel(logging.WARNING)
88
+ elif logging_level == "ERROR":
89
+ logger.setLevel(logging.ERROR)
90
+ ch.setLevel(logging.ERROR)
91
+ else:
92
+ logger.setLevel(logging.DEBUG)
93
+ ch.setLevel(logging.DEBUG)
94
+ formatter = logging.Formatter(
95
+ "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
96
+ )
97
+ ch.setFormatter(formatter)
98
+ logger.addHandler(ch)
99
+
100
+ return logger, ch
101
+
102
+
103
+ def get_forecast_dates(
104
+ freq: int,
105
+ delta_forecast: int,
106
+ time_zone: datetime.tzinfo,
107
+ timedelta_days: int | None = 0,
108
+ ) -> pd.core.indexes.datetimes.DatetimeIndex:
109
+ """
110
+ Get the date_range list of the needed future dates using the delta_forecast parameter.
111
+
112
+ :param freq: Optimization time step.
113
+ :type freq: int
114
+ :param delta_forecast: Number of days to forecast in the future to be used for the optimization.
115
+ :type delta_forecast: int
116
+ :param timedelta_days: Number of truncated days needed for each optimization iteration, defaults to 0
117
+ :type timedelta_days: Optional[int], optional
118
+ :return: A list of future forecast dates.
119
+ :rtype: pd.core.indexes.datetimes.DatetimeIndex
120
+
121
+ """
122
+ freq = pd.to_timedelta(freq, "minutes")
123
+ start_forecast = pd.Timestamp(datetime.now()).replace(
124
+ hour=0, minute=0, second=0, microsecond=0
125
+ )
126
+ end_forecast = (start_forecast + pd.Timedelta(days=delta_forecast)).replace(
127
+ microsecond=0
128
+ )
129
+ forecast_dates = (
130
+ pd.date_range(
131
+ start=start_forecast,
132
+ end=end_forecast + timedelta(days=timedelta_days) - freq,
133
+ freq=freq,
134
+ tz=time_zone,
135
+ )
136
+ .tz_convert("utc")
137
+ .round(freq, ambiguous="infer", nonexistent="shift_forward")
138
+ .tz_convert(time_zone)
139
+ )
140
+ return forecast_dates
141
+
142
+
143
+ def update_params_with_ha_config(
144
+ params: str,
145
+ ha_config: dict,
146
+ ) -> dict:
147
+ """
148
+ Update the params with the Home Assistant configuration.
149
+
150
+ Parameters
151
+ ----------
152
+ params : str
153
+ The serialized params.
154
+ ha_config : dict
155
+ The Home Assistant configuration.
156
+
157
+ Returns
158
+ -------
159
+ dict
160
+ The updated params.
161
+ """
162
+ # Load serialized params
163
+ params = json.loads(params)
164
+ # Update params
165
+ currency_to_symbol = {
166
+ "EUR": "€",
167
+ "USD": "$",
168
+ "GBP": "£",
169
+ "YEN": "¥",
170
+ "JPY": "¥",
171
+ "AUD": "A$",
172
+ "CAD": "C$",
173
+ "CHF": "CHF", # Swiss Franc has no special symbol
174
+ "CNY": "¥",
175
+ "INR": "₹",
176
+ "CZK": "Kč",
177
+ "BGN": "лв",
178
+ "DKK": "kr",
179
+ "HUF": "Ft",
180
+ "PLN": "zł",
181
+ "RON": "Leu",
182
+ "SEK": "kr",
183
+ "TRY": "Lira",
184
+ "VEF": "Bolivar",
185
+ "VND": "Dong",
186
+ "THB": "Baht",
187
+ "SGD": "S$",
188
+ "IDR": "Roepia",
189
+ "ZAR": "Rand",
190
+ # Add more as needed
191
+ }
192
+ if "currency" in ha_config.keys():
193
+ ha_config["currency"] = currency_to_symbol.get(ha_config["currency"], "Unknown")
194
+ else:
195
+ ha_config["currency"] = "€"
196
+
197
+ updated_passed_dict = {
198
+ "custom_cost_fun_id": {
199
+ "unit_of_measurement": ha_config["currency"],
200
+ },
201
+ "custom_unit_load_cost_id": {
202
+ "unit_of_measurement": f"{ha_config['currency']}/kWh",
203
+ },
204
+ "custom_unit_prod_price_id": {
205
+ "unit_of_measurement": f"{ha_config['currency']}/kWh",
206
+ },
207
+ }
208
+ for key, value in updated_passed_dict.items():
209
+ params["passed_data"][key]["unit_of_measurement"] = value["unit_of_measurement"]
210
+ # Serialize the final params
211
+ params = json.dumps(params, default=str)
212
+ return params
213
+
214
+
215
+ def treat_runtimeparams(
216
+ runtimeparams: str,
217
+ params: str,
218
+ retrieve_hass_conf: dict,
219
+ optim_conf: dict,
220
+ plant_conf: dict,
221
+ set_type: str,
222
+ logger: logging.Logger,
223
+ emhass_conf: dict,
224
+ ) -> tuple[str, dict]:
225
+ """
226
+ Treat the passed optimization runtime parameters.
227
+
228
+ :param runtimeparams: Json string containing the runtime parameters dict.
229
+ :type runtimeparams: str
230
+ :param params: Built configuration parameters
231
+ :type params: str
232
+ :param retrieve_hass_conf: Config dictionary for data retrieving parameters.
233
+ :type retrieve_hass_conf: dict
234
+ :param optim_conf: Config dictionary for optimization parameters.
235
+ :type optim_conf: dict
236
+ :param plant_conf: Config dictionary for technical plant parameters.
237
+ :type plant_conf: dict
238
+ :param set_type: The type of action to be performed.
239
+ :type set_type: str
240
+ :param logger: The logger object.
241
+ :type logger: logging.Logger
242
+ :param emhass_conf: Dictionary containing the needed emhass paths
243
+ :type emhass_conf: dict
244
+ :return: Returning the params and optimization parameter container.
245
+ :rtype: Tuple[str, dict]
246
+
247
+ """
248
+ # Check if passed params is a dict
249
+ if (params is not None) and (params != "null"):
250
+ if type(params) is str:
251
+ params = json.loads(params)
252
+ else:
253
+ params = {}
254
+
255
+ # Merge current config categories to params
256
+ params["retrieve_hass_conf"].update(retrieve_hass_conf)
257
+ params["optim_conf"].update(optim_conf)
258
+ params["plant_conf"].update(plant_conf)
259
+
260
+ # Check defaults on HA retrieved config
261
+ default_currency_unit = "€"
262
+ default_temperature_unit = "°C"
263
+
264
+ # Some default data needed
265
+ custom_deferrable_forecast_id = []
266
+ custom_predicted_temperature_id = []
267
+ for k in range(params["optim_conf"]["number_of_deferrable_loads"]):
268
+ custom_deferrable_forecast_id.append(
269
+ {
270
+ "entity_id": f"sensor.p_deferrable{k}",
271
+ "device_class": "power",
272
+ "unit_of_measurement": "W",
273
+ "friendly_name": f"Deferrable Load {k}",
274
+ }
275
+ )
276
+ custom_predicted_temperature_id.append(
277
+ {
278
+ "entity_id": f"sensor.temp_predicted{k}",
279
+ "device_class": "temperature",
280
+ "unit_of_measurement": default_temperature_unit,
281
+ "friendly_name": f"Predicted temperature {k}",
282
+ }
283
+ )
284
+ default_passed_dict = {
285
+ "custom_pv_forecast_id": {
286
+ "entity_id": "sensor.p_pv_forecast",
287
+ "device_class": "power",
288
+ "unit_of_measurement": "W",
289
+ "friendly_name": "PV Power Forecast",
290
+ },
291
+ "custom_load_forecast_id": {
292
+ "entity_id": "sensor.p_load_forecast",
293
+ "device_class": "power",
294
+ "unit_of_measurement": "W",
295
+ "friendly_name": "Load Power Forecast",
296
+ },
297
+ "custom_pv_curtailment_id": {
298
+ "entity_id": "sensor.p_pv_curtailment",
299
+ "device_class": "power",
300
+ "unit_of_measurement": "W",
301
+ "friendly_name": "PV Power Curtailment",
302
+ },
303
+ "custom_hybrid_inverter_id": {
304
+ "entity_id": "sensor.p_hybrid_inverter",
305
+ "device_class": "power",
306
+ "unit_of_measurement": "W",
307
+ "friendly_name": "PV Hybrid Inverter",
308
+ },
309
+ "custom_batt_forecast_id": {
310
+ "entity_id": "sensor.p_batt_forecast",
311
+ "device_class": "power",
312
+ "unit_of_measurement": "W",
313
+ "friendly_name": "Battery Power Forecast",
314
+ },
315
+ "custom_batt_soc_forecast_id": {
316
+ "entity_id": "sensor.soc_batt_forecast",
317
+ "device_class": "battery",
318
+ "unit_of_measurement": "%",
319
+ "friendly_name": "Battery SOC Forecast",
320
+ },
321
+ "custom_grid_forecast_id": {
322
+ "entity_id": "sensor.p_grid_forecast",
323
+ "device_class": "power",
324
+ "unit_of_measurement": "W",
325
+ "friendly_name": "Grid Power Forecast",
326
+ },
327
+ "custom_cost_fun_id": {
328
+ "entity_id": "sensor.total_cost_fun_value",
329
+ "device_class": "monetary",
330
+ "unit_of_measurement": default_currency_unit,
331
+ "friendly_name": "Total cost function value",
332
+ },
333
+ "custom_optim_status_id": {
334
+ "entity_id": "sensor.optim_status",
335
+ "device_class": "",
336
+ "unit_of_measurement": "",
337
+ "friendly_name": "EMHASS optimization status",
338
+ },
339
+ "custom_unit_load_cost_id": {
340
+ "entity_id": "sensor.unit_load_cost",
341
+ "device_class": "monetary",
342
+ "unit_of_measurement": f"{default_currency_unit}/kWh",
343
+ "friendly_name": "Unit Load Cost",
344
+ },
345
+ "custom_unit_prod_price_id": {
346
+ "entity_id": "sensor.unit_prod_price",
347
+ "device_class": "monetary",
348
+ "unit_of_measurement": f"{default_currency_unit}/kWh",
349
+ "friendly_name": "Unit Prod Price",
350
+ },
351
+ "custom_deferrable_forecast_id": custom_deferrable_forecast_id,
352
+ "custom_predicted_temperature_id": custom_predicted_temperature_id,
353
+ "publish_prefix": "",
354
+ }
355
+ if "passed_data" in params.keys():
356
+ for key, value in default_passed_dict.items():
357
+ params["passed_data"][key] = value
358
+ else:
359
+ params["passed_data"] = default_passed_dict
360
+
361
+ # If any runtime parameters where passed in action call
362
+ if runtimeparams is not None:
363
+ if type(runtimeparams) is str:
364
+ runtimeparams = json.loads(runtimeparams)
365
+
366
+ # Loop though parameters stored in association file, Check to see if any stored in runtime
367
+ # If true, set runtime parameter to params
368
+ if emhass_conf["associations_path"].exists():
369
+ with emhass_conf["associations_path"].open("r") as data:
370
+ associations = list(csv.reader(data, delimiter=","))
371
+ # Association file key reference
372
+ # association[0] = config categories
373
+ # association[1] = legacy parameter name
374
+ # association[2] = parameter (config.json/config_defaults.json)
375
+ # association[3] = parameter list name if exists (not used, from legacy options.json)
376
+ for association in associations:
377
+ # Check parameter name exists in runtime
378
+ if runtimeparams.get(association[2], None) is not None:
379
+ params[association[0]][association[2]] = runtimeparams[
380
+ association[2]
381
+ ]
382
+ # Check Legacy parameter name runtime
383
+ elif runtimeparams.get(association[1], None) is not None:
384
+ params[association[0]][association[2]] = runtimeparams[
385
+ association[1]
386
+ ]
387
+ else:
388
+ logger.warning(
389
+ "Cant find associations file (associations.csv) in: "
390
+ + str(emhass_conf["associations_path"])
391
+ )
392
+
393
+ # Generate forecast_dates
394
+ if (
395
+ "optimization_time_step" in runtimeparams.keys()
396
+ or "freq" in runtimeparams.keys()
397
+ ):
398
+ optimization_time_step = int(
399
+ runtimeparams.get("optimization_time_step", runtimeparams.get("freq"))
400
+ )
401
+ params["retrieve_hass_conf"]["optimization_time_step"] = pd.to_timedelta(
402
+ optimization_time_step, "minutes"
403
+ )
404
+ else:
405
+ optimization_time_step = int(
406
+ params["retrieve_hass_conf"]["optimization_time_step"].seconds / 60.0
407
+ )
408
+ if (
409
+ runtimeparams.get("delta_forecast_daily", None) is not None
410
+ or runtimeparams.get("delta_forecast", None) is not None
411
+ ):
412
+ delta_forecast = int(
413
+ runtimeparams.get(
414
+ "delta_forecast_daily", runtimeparams["delta_forecast"]
415
+ )
416
+ )
417
+ params["optim_conf"]["delta_forecast_daily"] = pd.Timedelta(
418
+ days=delta_forecast
419
+ )
420
+ else:
421
+ delta_forecast = int(params["optim_conf"]["delta_forecast_daily"].days)
422
+ if runtimeparams.get("time_zone", None) is not None:
423
+ time_zone = pytz.timezone(params["retrieve_hass_conf"]["time_zone"])
424
+ params["retrieve_hass_conf"]["time_zone"] = time_zone
425
+ else:
426
+ time_zone = params["retrieve_hass_conf"]["time_zone"]
427
+
428
+ forecast_dates = get_forecast_dates(
429
+ optimization_time_step, delta_forecast, time_zone
430
+ )
431
+
432
+ # Add runtime exclusive (not in config) parameters to params
433
+ # regressor-model-fit
434
+ if set_type == "regressor-model-fit":
435
+ if "csv_file" in runtimeparams:
436
+ csv_file = runtimeparams["csv_file"]
437
+ params["passed_data"]["csv_file"] = csv_file
438
+ if "features" in runtimeparams:
439
+ features = runtimeparams["features"]
440
+ params["passed_data"]["features"] = features
441
+ if "target" in runtimeparams:
442
+ target = runtimeparams["target"]
443
+ params["passed_data"]["target"] = target
444
+ if "timestamp" not in runtimeparams:
445
+ params["passed_data"]["timestamp"] = None
446
+ else:
447
+ timestamp = runtimeparams["timestamp"]
448
+ params["passed_data"]["timestamp"] = timestamp
449
+ if "date_features" not in runtimeparams:
450
+ params["passed_data"]["date_features"] = []
451
+ else:
452
+ date_features = runtimeparams["date_features"]
453
+ params["passed_data"]["date_features"] = date_features
454
+
455
+ # regressor-model-predict
456
+ if set_type == "regressor-model-predict":
457
+ if "new_values" in runtimeparams:
458
+ new_values = runtimeparams["new_values"]
459
+ params["passed_data"]["new_values"] = new_values
460
+ if "csv_file" in runtimeparams:
461
+ csv_file = runtimeparams["csv_file"]
462
+ params["passed_data"]["csv_file"] = csv_file
463
+ if "features" in runtimeparams:
464
+ features = runtimeparams["features"]
465
+ params["passed_data"]["features"] = features
466
+ if "target" in runtimeparams:
467
+ target = runtimeparams["target"]
468
+ params["passed_data"]["target"] = target
469
+
470
+ # MPC control case
471
+ if set_type == "naive-mpc-optim":
472
+ if "prediction_horizon" not in runtimeparams.keys():
473
+ prediction_horizon = 10 # 10 time steps by default
474
+ else:
475
+ prediction_horizon = runtimeparams["prediction_horizon"]
476
+ params["passed_data"]["prediction_horizon"] = prediction_horizon
477
+ if "soc_init" not in runtimeparams.keys():
478
+ soc_init = params["plant_conf"]["battery_target_state_of_charge"]
479
+ else:
480
+ soc_init = runtimeparams["soc_init"]
481
+ if soc_init < params["plant_conf"]["battery_minimum_state_of_charge"]:
482
+ logger.warning(
483
+ f"Passed soc_init={soc_init} is lower than soc_min={params['plant_conf']['battery_minimum_state_of_charge']}, setting soc_init=soc_min"
484
+ )
485
+ soc_init = params["plant_conf"]["battery_minimum_state_of_charge"]
486
+ if soc_init > params["plant_conf"]["battery_maximum_state_of_charge"]:
487
+ logger.warning(
488
+ f"Passed soc_init={soc_init} is greater than soc_max={params['plant_conf']['battery_maximum_state_of_charge']}, setting soc_init=soc_max"
489
+ )
490
+ soc_init = params["plant_conf"]["battery_maximum_state_of_charge"]
491
+ params["passed_data"]["soc_init"] = soc_init
492
+ if "soc_final" not in runtimeparams.keys():
493
+ soc_final = params["plant_conf"]["battery_target_state_of_charge"]
494
+ else:
495
+ soc_final = runtimeparams["soc_final"]
496
+ if soc_final < params["plant_conf"]["battery_minimum_state_of_charge"]:
497
+ logger.warning(
498
+ f"Passed soc_final={soc_final} is lower than soc_min={params['plant_conf']['battery_minimum_state_of_charge']}, setting soc_final=soc_min"
499
+ )
500
+ soc_final = params["plant_conf"]["battery_minimum_state_of_charge"]
501
+ if soc_final > params["plant_conf"]["battery_maximum_state_of_charge"]:
502
+ logger.warning(
503
+ f"Passed soc_final={soc_final} is greater than soc_max={params['plant_conf']['battery_maximum_state_of_charge']}, setting soc_final=soc_max"
504
+ )
505
+ soc_final = params["plant_conf"]["battery_maximum_state_of_charge"]
506
+ params["passed_data"]["soc_final"] = soc_final
507
+ if "operating_timesteps_of_each_deferrable_load" in runtimeparams.keys():
508
+ params["passed_data"]["operating_timesteps_of_each_deferrable_load"] = (
509
+ runtimeparams["operating_timesteps_of_each_deferrable_load"]
510
+ )
511
+ params["optim_conf"]["operating_timesteps_of_each_deferrable_load"] = (
512
+ runtimeparams["operating_timesteps_of_each_deferrable_load"]
513
+ )
514
+ if "operating_hours_of_each_deferrable_load" in params["optim_conf"].keys():
515
+ params["passed_data"]["operating_hours_of_each_deferrable_load"] = (
516
+ params["optim_conf"]["operating_hours_of_each_deferrable_load"]
517
+ )
518
+ params["passed_data"]["start_timesteps_of_each_deferrable_load"] = params[
519
+ "optim_conf"
520
+ ].get("start_timesteps_of_each_deferrable_load", None)
521
+ params["passed_data"]["end_timesteps_of_each_deferrable_load"] = params[
522
+ "optim_conf"
523
+ ].get("end_timesteps_of_each_deferrable_load", None)
524
+
525
+ forecast_dates = copy.deepcopy(forecast_dates)[0:prediction_horizon]
526
+
527
+ # Load the default config
528
+ if "def_load_config" in runtimeparams:
529
+ params["optim_conf"]["def_load_config"] = runtimeparams[
530
+ "def_load_config"
531
+ ]
532
+ if "def_load_config" in params["optim_conf"]:
533
+ for k in range(len(params["optim_conf"]["def_load_config"])):
534
+ if "thermal_config" in params["optim_conf"]["def_load_config"][k]:
535
+ if (
536
+ "heater_desired_temperatures" in runtimeparams
537
+ and len(runtimeparams["heater_desired_temperatures"]) > k
538
+ ):
539
+ params["optim_conf"]["def_load_config"][k][
540
+ "thermal_config"
541
+ ]["desired_temperatures"] = runtimeparams[
542
+ "heater_desired_temperatures"
543
+ ][k]
544
+ if (
545
+ "heater_start_temperatures" in runtimeparams
546
+ and len(runtimeparams["heater_start_temperatures"]) > k
547
+ ):
548
+ params["optim_conf"]["def_load_config"][k][
549
+ "thermal_config"
550
+ ]["start_temperature"] = runtimeparams[
551
+ "heater_start_temperatures"
552
+ ][k]
553
+ else:
554
+ params["passed_data"]["prediction_horizon"] = None
555
+ params["passed_data"]["soc_init"] = None
556
+ params["passed_data"]["soc_final"] = None
557
+
558
+ # Treat passed forecast data lists
559
+ list_forecast_key = [
560
+ "pv_power_forecast",
561
+ "load_power_forecast",
562
+ "load_cost_forecast",
563
+ "prod_price_forecast",
564
+ "outdoor_temperature_forecast",
565
+ ]
566
+ forecast_methods = [
567
+ "weather_forecast_method",
568
+ "load_forecast_method",
569
+ "load_cost_forecast_method",
570
+ "production_price_forecast_method",
571
+ "outdoor_temperature_forecast_method",
572
+ ]
573
+
574
+ # Loop forecasts, check if value is a list and greater than or equal to forecast_dates
575
+ for method, forecast_key in enumerate(list_forecast_key):
576
+ if forecast_key in runtimeparams.keys():
577
+ if isinstance(runtimeparams[forecast_key], list) and len(
578
+ runtimeparams[forecast_key]
579
+ ) >= len(forecast_dates):
580
+ params["passed_data"][forecast_key] = runtimeparams[forecast_key]
581
+ params["optim_conf"][forecast_methods[method]] = "list"
582
+ else:
583
+ logger.error(
584
+ f"ERROR: The passed data is either not a list or the length is not correct, length should be {str(len(forecast_dates))}"
585
+ )
586
+ logger.error(
587
+ f"Passed type is {str(type(runtimeparams[forecast_key]))} and length is {str(len(runtimeparams[forecast_key]))}"
588
+ )
589
+ # Check if string contains list, if so extract
590
+ if isinstance(runtimeparams[forecast_key], str):
591
+ if isinstance(ast.literal_eval(runtimeparams[forecast_key]), list):
592
+ runtimeparams[forecast_key] = ast.literal_eval(
593
+ runtimeparams[forecast_key]
594
+ )
595
+ list_non_digits = [
596
+ x
597
+ for x in runtimeparams[forecast_key]
598
+ if not (isinstance(x, int) or isinstance(x, float))
599
+ ]
600
+ if len(list_non_digits) > 0:
601
+ logger.warning(
602
+ f"There are non numeric values on the passed data for {forecast_key}, check for missing values (nans, null, etc)"
603
+ )
604
+ for x in list_non_digits:
605
+ logger.warning(
606
+ f"This value in {forecast_key} was detected as non digits: {str(x)}"
607
+ )
608
+ else:
609
+ params["passed_data"][forecast_key] = None
610
+
611
+ # Treat passed data for forecast model fit/predict/tune at runtime
612
+ if (
613
+ params["passed_data"].get("historic_days_to_retrieve", None) is not None
614
+ and params["passed_data"]["historic_days_to_retrieve"] < 9
615
+ ):
616
+ logger.warning(
617
+ "warning `days_to_retrieve` is set to a value less than 9, this could cause an error with the fit"
618
+ )
619
+ logger.warning(
620
+ "setting`passed_data:days_to_retrieve` to 9 for fit/predict/tune"
621
+ )
622
+ params["passed_data"]["historic_days_to_retrieve"] = 9
623
+ else:
624
+ if params["retrieve_hass_conf"].get("historic_days_to_retrieve", 0) < 9:
625
+ logger.debug(
626
+ "setting`passed_data:days_to_retrieve` to 9 for fit/predict/tune"
627
+ )
628
+ params["passed_data"]["historic_days_to_retrieve"] = 9
629
+ else:
630
+ params["passed_data"]["historic_days_to_retrieve"] = params[
631
+ "retrieve_hass_conf"
632
+ ]["historic_days_to_retrieve"]
633
+ if "model_type" not in runtimeparams.keys():
634
+ model_type = "long_train_data"
635
+ else:
636
+ model_type = runtimeparams["model_type"]
637
+ params["passed_data"]["model_type"] = model_type
638
+ if "var_model" not in runtimeparams.keys():
639
+ var_model = params["retrieve_hass_conf"]["sensor_power_load_no_var_loads"]
640
+ else:
641
+ var_model = runtimeparams["var_model"]
642
+ params["passed_data"]["var_model"] = var_model
643
+ if "sklearn_model" not in runtimeparams.keys():
644
+ sklearn_model = "KNeighborsRegressor"
645
+ else:
646
+ sklearn_model = runtimeparams["sklearn_model"]
647
+ params["passed_data"]["sklearn_model"] = sklearn_model
648
+ if "regression_model" not in runtimeparams.keys():
649
+ regression_model = "AdaBoostRegression"
650
+ else:
651
+ regression_model = runtimeparams["regression_model"]
652
+ params["passed_data"]["regression_model"] = regression_model
653
+ if "num_lags" not in runtimeparams.keys():
654
+ num_lags = 48
655
+ else:
656
+ num_lags = runtimeparams["num_lags"]
657
+ params["passed_data"]["num_lags"] = num_lags
658
+ if "split_date_delta" not in runtimeparams.keys():
659
+ split_date_delta = "48h"
660
+ else:
661
+ split_date_delta = runtimeparams["split_date_delta"]
662
+ params["passed_data"]["split_date_delta"] = split_date_delta
663
+ if "perform_backtest" not in runtimeparams.keys():
664
+ perform_backtest = False
665
+ else:
666
+ perform_backtest = ast.literal_eval(
667
+ str(runtimeparams["perform_backtest"]).capitalize()
668
+ )
669
+ params["passed_data"]["perform_backtest"] = perform_backtest
670
+ if "model_predict_publish" not in runtimeparams.keys():
671
+ model_predict_publish = False
672
+ else:
673
+ model_predict_publish = ast.literal_eval(
674
+ str(runtimeparams["model_predict_publish"]).capitalize()
675
+ )
676
+ params["passed_data"]["model_predict_publish"] = model_predict_publish
677
+ if "model_predict_entity_id" not in runtimeparams.keys():
678
+ model_predict_entity_id = "sensor.p_load_forecast_custom_model"
679
+ else:
680
+ model_predict_entity_id = runtimeparams["model_predict_entity_id"]
681
+ params["passed_data"]["model_predict_entity_id"] = model_predict_entity_id
682
+ if "model_predict_device_class" not in runtimeparams.keys():
683
+ model_predict_device_class = "power"
684
+ else:
685
+ model_predict_device_class = runtimeparams["model_predict_device_class"]
686
+ params["passed_data"]["model_predict_device_class"] = model_predict_device_class
687
+ if "model_predict_unit_of_measurement" not in runtimeparams.keys():
688
+ model_predict_unit_of_measurement = "W"
689
+ else:
690
+ model_predict_unit_of_measurement = runtimeparams[
691
+ "model_predict_unit_of_measurement"
692
+ ]
693
+ params["passed_data"]["model_predict_unit_of_measurement"] = (
694
+ model_predict_unit_of_measurement
695
+ )
696
+ if "model_predict_friendly_name" not in runtimeparams.keys():
697
+ model_predict_friendly_name = "Load Power Forecast custom ML model"
698
+ else:
699
+ model_predict_friendly_name = runtimeparams["model_predict_friendly_name"]
700
+ params["passed_data"]["model_predict_friendly_name"] = (
701
+ model_predict_friendly_name
702
+ )
703
+ if "mlr_predict_entity_id" not in runtimeparams.keys():
704
+ mlr_predict_entity_id = "sensor.mlr_predict"
705
+ else:
706
+ mlr_predict_entity_id = runtimeparams["mlr_predict_entity_id"]
707
+ params["passed_data"]["mlr_predict_entity_id"] = mlr_predict_entity_id
708
+ if "mlr_predict_device_class" not in runtimeparams.keys():
709
+ mlr_predict_device_class = "power"
710
+ else:
711
+ mlr_predict_device_class = runtimeparams["mlr_predict_device_class"]
712
+ params["passed_data"]["mlr_predict_device_class"] = mlr_predict_device_class
713
+ if "mlr_predict_unit_of_measurement" not in runtimeparams.keys():
714
+ mlr_predict_unit_of_measurement = None
715
+ else:
716
+ mlr_predict_unit_of_measurement = runtimeparams[
717
+ "mlr_predict_unit_of_measurement"
718
+ ]
719
+ params["passed_data"]["mlr_predict_unit_of_measurement"] = (
720
+ mlr_predict_unit_of_measurement
721
+ )
722
+ if "mlr_predict_friendly_name" not in runtimeparams.keys():
723
+ mlr_predict_friendly_name = "mlr predictor"
724
+ else:
725
+ mlr_predict_friendly_name = runtimeparams["mlr_predict_friendly_name"]
726
+ params["passed_data"]["mlr_predict_friendly_name"] = mlr_predict_friendly_name
727
+
728
+ # Treat passed data for other parameters
729
+ if "alpha" not in runtimeparams.keys():
730
+ alpha = 0.5
731
+ else:
732
+ alpha = runtimeparams["alpha"]
733
+ params["passed_data"]["alpha"] = alpha
734
+ if "beta" not in runtimeparams.keys():
735
+ beta = 0.5
736
+ else:
737
+ beta = runtimeparams["beta"]
738
+ params["passed_data"]["beta"] = beta
739
+
740
+ # Param to save forecast cache (i.e. Solcast)
741
+ if "weather_forecast_cache" not in runtimeparams.keys():
742
+ weather_forecast_cache = False
743
+ else:
744
+ weather_forecast_cache = runtimeparams["weather_forecast_cache"]
745
+ params["passed_data"]["weather_forecast_cache"] = weather_forecast_cache
746
+
747
+ # Param to make sure optimization only uses cached data. (else produce error)
748
+ if "weather_forecast_cache_only" not in runtimeparams.keys():
749
+ weather_forecast_cache_only = False
750
+ else:
751
+ weather_forecast_cache_only = runtimeparams["weather_forecast_cache_only"]
752
+ params["passed_data"]["weather_forecast_cache_only"] = (
753
+ weather_forecast_cache_only
754
+ )
755
+
756
+ # A condition to manually save entity data under data_path/entities after optimization
757
+ if "entity_save" not in runtimeparams.keys():
758
+ entity_save = ""
759
+ else:
760
+ entity_save = runtimeparams["entity_save"]
761
+ params["passed_data"]["entity_save"] = entity_save
762
+
763
+ # A condition to put a prefix on all published data, or check for saved data under prefix name
764
+ if "publish_prefix" not in runtimeparams.keys():
765
+ publish_prefix = ""
766
+ else:
767
+ publish_prefix = runtimeparams["publish_prefix"]
768
+ params["passed_data"]["publish_prefix"] = publish_prefix
769
+
770
+ # Treat optimization (optim_conf) configuration parameters passed at runtime
771
+ if "def_current_state" in runtimeparams.keys():
772
+ params["optim_conf"]["def_current_state"] = [
773
+ bool(s) for s in runtimeparams["def_current_state"]
774
+ ]
775
+
776
+ # Treat retrieve data from Home Assistant (retrieve_hass_conf) configuration parameters passed at runtime
777
+ # Secrets passed at runtime
778
+ if "solcast_api_key" in runtimeparams.keys():
779
+ params["retrieve_hass_conf"]["solcast_api_key"] = runtimeparams[
780
+ "solcast_api_key"
781
+ ]
782
+ if "solcast_rooftop_id" in runtimeparams.keys():
783
+ params["retrieve_hass_conf"]["solcast_rooftop_id"] = runtimeparams[
784
+ "solcast_rooftop_id"
785
+ ]
786
+ if "solar_forecast_kwp" in runtimeparams.keys():
787
+ params["retrieve_hass_conf"]["solar_forecast_kwp"] = runtimeparams[
788
+ "solar_forecast_kwp"
789
+ ]
790
+ # Treat custom entities id's and friendly names for variables
791
+ if "custom_pv_forecast_id" in runtimeparams.keys():
792
+ params["passed_data"]["custom_pv_forecast_id"] = runtimeparams[
793
+ "custom_pv_forecast_id"
794
+ ]
795
+ if "custom_load_forecast_id" in runtimeparams.keys():
796
+ params["passed_data"]["custom_load_forecast_id"] = runtimeparams[
797
+ "custom_load_forecast_id"
798
+ ]
799
+ if "custom_pv_curtailment_id" in runtimeparams.keys():
800
+ params["passed_data"]["custom_pv_curtailment_id"] = runtimeparams[
801
+ "custom_pv_curtailment_id"
802
+ ]
803
+ if "custom_hybrid_inverter_id" in runtimeparams.keys():
804
+ params["passed_data"]["custom_hybrid_inverter_id"] = runtimeparams[
805
+ "custom_hybrid_inverter_id"
806
+ ]
807
+ if "custom_batt_forecast_id" in runtimeparams.keys():
808
+ params["passed_data"]["custom_batt_forecast_id"] = runtimeparams[
809
+ "custom_batt_forecast_id"
810
+ ]
811
+ if "custom_batt_soc_forecast_id" in runtimeparams.keys():
812
+ params["passed_data"]["custom_batt_soc_forecast_id"] = runtimeparams[
813
+ "custom_batt_soc_forecast_id"
814
+ ]
815
+ if "custom_grid_forecast_id" in runtimeparams.keys():
816
+ params["passed_data"]["custom_grid_forecast_id"] = runtimeparams[
817
+ "custom_grid_forecast_id"
818
+ ]
819
+ if "custom_cost_fun_id" in runtimeparams.keys():
820
+ params["passed_data"]["custom_cost_fun_id"] = runtimeparams[
821
+ "custom_cost_fun_id"
822
+ ]
823
+ if "custom_optim_status_id" in runtimeparams.keys():
824
+ params["passed_data"]["custom_optim_status_id"] = runtimeparams[
825
+ "custom_optim_status_id"
826
+ ]
827
+ if "custom_unit_load_cost_id" in runtimeparams.keys():
828
+ params["passed_data"]["custom_unit_load_cost_id"] = runtimeparams[
829
+ "custom_unit_load_cost_id"
830
+ ]
831
+ if "custom_unit_prod_price_id" in runtimeparams.keys():
832
+ params["passed_data"]["custom_unit_prod_price_id"] = runtimeparams[
833
+ "custom_unit_prod_price_id"
834
+ ]
835
+ if "custom_deferrable_forecast_id" in runtimeparams.keys():
836
+ params["passed_data"]["custom_deferrable_forecast_id"] = runtimeparams[
837
+ "custom_deferrable_forecast_id"
838
+ ]
839
+ if "custom_predicted_temperature_id" in runtimeparams.keys():
840
+ params["passed_data"]["custom_predicted_temperature_id"] = runtimeparams[
841
+ "custom_predicted_temperature_id"
842
+ ]
843
+
844
+ # split config categories from params
845
+ retrieve_hass_conf = params["retrieve_hass_conf"]
846
+ optim_conf = params["optim_conf"]
847
+ plant_conf = params["plant_conf"]
848
+
849
+ # Serialize the final params
850
+ params = json.dumps(params, default=str)
851
+ return params, retrieve_hass_conf, optim_conf, plant_conf
852
+
853
+
854
+ def get_yaml_parse(params: str, logger: logging.Logger) -> tuple[dict, dict, dict]:
855
+ """
856
+ Perform parsing of the params into the configuration catagories
857
+
858
+ :param params: Built configuration parameters
859
+ :type params: str
860
+ :param logger: The logger object
861
+ :type logger: logging.Logger
862
+ :return: A tuple with the dictionaries containing the parsed data
863
+ :rtype: tuple(dict)
864
+
865
+ """
866
+ if params:
867
+ if type(params) is str:
868
+ input_conf = json.loads(params)
869
+ else:
870
+ input_conf = params
871
+ else:
872
+ input_conf = {}
873
+ logger.error("No params have been detected for get_yaml_parse")
874
+ return False, False, False
875
+
876
+ optim_conf = input_conf.get("optim_conf", {})
877
+ retrieve_hass_conf = input_conf.get("retrieve_hass_conf", {})
878
+ plant_conf = input_conf.get("plant_conf", {})
879
+
880
+ # Format time parameters
881
+ if optim_conf.get("delta_forecast_daily", None) is not None:
882
+ optim_conf["delta_forecast_daily"] = pd.Timedelta(
883
+ days=optim_conf["delta_forecast_daily"]
884
+ )
885
+ if retrieve_hass_conf.get("optimization_time_step", None) is not None:
886
+ retrieve_hass_conf["optimization_time_step"] = pd.to_timedelta(
887
+ retrieve_hass_conf["optimization_time_step"], "minutes"
888
+ )
889
+ if retrieve_hass_conf.get("time_zone", None) is not None:
890
+ retrieve_hass_conf["time_zone"] = pytz.timezone(retrieve_hass_conf["time_zone"])
891
+
892
+ return retrieve_hass_conf, optim_conf, plant_conf
893
+
894
+
895
+ def get_injection_dict(df: pd.DataFrame, plot_size: int | None = 1366) -> dict:
896
+ """
897
+ Build a dictionary with graphs and tables for the webui.
898
+
899
+ :param df: The optimization result DataFrame
900
+ :type df: pd.DataFrame
901
+ :param plot_size: Size of the plot figure in pixels, defaults to 1366
902
+ :type plot_size: Optional[int], optional
903
+ :return: A dictionary containing the graphs and tables in html format
904
+ :rtype: dict
905
+
906
+ """
907
+ cols_p = [i for i in df.columns.to_list() if "P_" in i]
908
+ # Let's round the data in the DF
909
+ optim_status = df["optim_status"].unique().item()
910
+ df.drop("optim_status", axis=1, inplace=True)
911
+ cols_else = [i for i in df.columns.to_list() if "P_" not in i]
912
+ df = df.apply(pd.to_numeric)
913
+ df[cols_p] = df[cols_p].astype(int)
914
+ df[cols_else] = df[cols_else].round(3)
915
+ # Create plots
916
+ n_colors = len(cols_p)
917
+ colors = px.colors.sample_colorscale(
918
+ "jet", [n / (n_colors - 1) for n in range(n_colors)]
919
+ )
920
+ fig_0 = px.line(
921
+ df[cols_p],
922
+ title="Systems powers schedule after optimization results",
923
+ template="presentation",
924
+ line_shape="hv",
925
+ color_discrete_sequence=colors,
926
+ )
927
+ fig_0.update_layout(xaxis_title="Timestamp", yaxis_title="System powers (W)")
928
+ if "SOC_opt" in df.columns.to_list():
929
+ fig_1 = px.line(
930
+ df["SOC_opt"],
931
+ title="Battery state of charge schedule after optimization results",
932
+ template="presentation",
933
+ line_shape="hv",
934
+ color_discrete_sequence=colors,
935
+ )
936
+ fig_1.update_layout(xaxis_title="Timestamp", yaxis_title="Battery SOC (%)")
937
+ cols_cost = [i for i in df.columns.to_list() if "cost_" in i or "unit_" in i]
938
+ n_colors = len(cols_cost)
939
+ colors = px.colors.sample_colorscale(
940
+ "jet", [n / (n_colors - 1) for n in range(n_colors)]
941
+ )
942
+ fig_2 = px.line(
943
+ df[cols_cost],
944
+ title="Systems costs obtained from optimization results",
945
+ template="presentation",
946
+ line_shape="hv",
947
+ color_discrete_sequence=colors,
948
+ )
949
+ fig_2.update_layout(xaxis_title="Timestamp", yaxis_title="System costs (currency)")
950
+ # Get full path to image
951
+ image_path_0 = fig_0.to_html(full_html=False, default_width="75%")
952
+ if "SOC_opt" in df.columns.to_list():
953
+ image_path_1 = fig_1.to_html(full_html=False, default_width="75%")
954
+ image_path_2 = fig_2.to_html(full_html=False, default_width="75%")
955
+ # The tables
956
+ table1 = df.reset_index().to_html(classes="mystyle", index=False)
957
+ cost_cols = [i for i in df.columns if "cost_" in i]
958
+ table2 = df[cost_cols].reset_index().sum(numeric_only=True)
959
+ table2["optim_status"] = optim_status
960
+ table2 = (
961
+ table2.to_frame(name="Value")
962
+ .reset_index(names="Variable")
963
+ .to_html(classes="mystyle", index=False)
964
+ )
965
+ # The dict of plots
966
+ injection_dict = {}
967
+ injection_dict["title"] = "<h2>EMHASS optimization results</h2>"
968
+ injection_dict["subsubtitle0"] = "<h4>Plotting latest optimization results</h4>"
969
+ injection_dict["figure_0"] = image_path_0
970
+ if "SOC_opt" in df.columns.to_list():
971
+ injection_dict["figure_1"] = image_path_1
972
+ injection_dict["figure_2"] = image_path_2
973
+ injection_dict["subsubtitle1"] = "<h4>Last run optimization results table</h4>"
974
+ injection_dict["table1"] = table1
975
+ injection_dict["subsubtitle2"] = (
976
+ "<h4>Summary table for latest optimization results</h4>"
977
+ )
978
+ injection_dict["table2"] = table2
979
+ return injection_dict
980
+
981
+
982
+ def get_injection_dict_forecast_model_fit(
983
+ df_fit_pred: pd.DataFrame, mlf: MLForecaster
984
+ ) -> dict:
985
+ """
986
+ Build a dictionary with graphs and tables for the webui for special MLF fit case.
987
+
988
+ :param df_fit_pred: The fit result DataFrame
989
+ :type df_fit_pred: pd.DataFrame
990
+ :param mlf: The MLForecaster object
991
+ :type mlf: MLForecaster
992
+ :return: A dictionary containing the graphs and tables in html format
993
+ :rtype: dict
994
+ """
995
+ fig = df_fit_pred.plot()
996
+ fig.layout.template = "presentation"
997
+ fig.update_yaxes(title_text=mlf.model_type)
998
+ fig.update_xaxes(title_text="Time")
999
+ image_path_0 = fig.to_html(full_html=False, default_width="75%")
1000
+ # The dict of plots
1001
+ injection_dict = {}
1002
+ injection_dict["title"] = "<h2>Custom machine learning forecast model fit</h2>"
1003
+ injection_dict["subsubtitle0"] = (
1004
+ "<h4>Plotting train/test forecast model results for " + mlf.model_type + "</h4>"
1005
+ )
1006
+ injection_dict["subsubtitle0"] = (
1007
+ "<h4>Forecasting variable " + mlf.var_model + "</h4>"
1008
+ )
1009
+ injection_dict["figure_0"] = image_path_0
1010
+ return injection_dict
1011
+
1012
+
1013
+ def get_injection_dict_forecast_model_tune(
1014
+ df_pred_optim: pd.DataFrame, mlf: MLForecaster
1015
+ ) -> dict:
1016
+ """
1017
+ Build a dictionary with graphs and tables for the webui for special MLF tune case.
1018
+
1019
+ :param df_pred_optim: The tune result DataFrame
1020
+ :type df_pred_optim: pd.DataFrame
1021
+ :param mlf: The MLForecaster object
1022
+ :type mlf: MLForecaster
1023
+ :return: A dictionary containing the graphs and tables in html format
1024
+ :rtype: dict
1025
+ """
1026
+ fig = df_pred_optim.plot()
1027
+ fig.layout.template = "presentation"
1028
+ fig.update_yaxes(title_text=mlf.model_type)
1029
+ fig.update_xaxes(title_text="Time")
1030
+ image_path_0 = fig.to_html(full_html=False, default_width="75%")
1031
+ # The dict of plots
1032
+ injection_dict = {}
1033
+ injection_dict["title"] = "<h2>Custom machine learning forecast model tune</h2>"
1034
+ injection_dict["subsubtitle0"] = (
1035
+ "<h4>Performed a tuning routine using bayesian optimization for "
1036
+ + mlf.model_type
1037
+ + "</h4>"
1038
+ )
1039
+ injection_dict["subsubtitle0"] = (
1040
+ "<h4>Forecasting variable " + mlf.var_model + "</h4>"
1041
+ )
1042
+ injection_dict["figure_0"] = image_path_0
1043
+ return injection_dict
1044
+
1045
+
1046
+ def build_config(
1047
+ emhass_conf: dict,
1048
+ logger: logging.Logger,
1049
+ defaults_path: str,
1050
+ config_path: str | None = None,
1051
+ legacy_config_path: str | None = None,
1052
+ ) -> dict:
1053
+ """
1054
+ Retrieve parameters from configuration files.
1055
+ priority order (low - high) = defaults_path, config_path legacy_config_path
1056
+
1057
+ :param emhass_conf: Dictionary containing the needed emhass paths
1058
+ :type emhass_conf: dict
1059
+ :param logger: The logger object
1060
+ :type logger: logging.Logger
1061
+ :param defaults_path: path to config file for parameter defaults (config_defaults.json)
1062
+ :type defaults_path: str
1063
+ :param config_path: path to the main configuration file (config.json)
1064
+ :type config_path: str
1065
+ :param legacy_config_path: path to legacy config file (config_emhass.yaml)
1066
+ :type legacy_config_path: str
1067
+ :return: The built config dictionary
1068
+ :rtype: dict
1069
+ """
1070
+
1071
+ # Read default parameters (default root_path/data/config_defaults.json)
1072
+ if defaults_path and pathlib.Path(defaults_path).is_file():
1073
+ with defaults_path.open("r") as data:
1074
+ config = json.load(data)
1075
+ else:
1076
+ logger.error("config_defaults.json. does not exist ")
1077
+ return False
1078
+
1079
+ # Read user config parameters if provided (default /share/config.json)
1080
+ if config_path and pathlib.Path(config_path).is_file():
1081
+ with config_path.open("r") as data:
1082
+ # Set override default parameters (config_defaults) with user given parameters (config.json)
1083
+ logger.info("Obtaining parameters from config.json:")
1084
+ config.update(json.load(data))
1085
+ else:
1086
+ logger.info(
1087
+ "config.json does not exist, or has not been passed. config parameters may default to config_defaults.json"
1088
+ )
1089
+ logger.info(
1090
+ "you may like to generate the config.json file on the configuration page"
1091
+ )
1092
+
1093
+ # Check to see if legacy config_emhass.yaml was provided (default /app/config_emhass.yaml)
1094
+ # Convert legacy parameter definitions/format to match config.json
1095
+ if legacy_config_path and pathlib.Path(legacy_config_path).is_file():
1096
+ with open(legacy_config_path) as data:
1097
+ legacy_config = yaml.load(data, Loader=yaml.FullLoader)
1098
+ legacy_config_parameters = build_legacy_config_params(
1099
+ emhass_conf, legacy_config, logger
1100
+ )
1101
+ if type(legacy_config_parameters) is not bool:
1102
+ logger.info(
1103
+ "Obtaining parameters from config_emhass.yaml: (will overwrite config parameters)"
1104
+ )
1105
+ config.update(legacy_config_parameters)
1106
+
1107
+ return config
1108
+
1109
+
1110
+ def build_legacy_config_params(
1111
+ emhass_conf: dict, legacy_config: dict, logger: logging.Logger
1112
+ ) -> dict:
1113
+ """
1114
+ Build a config dictionary with legacy config_emhass.yaml file.
1115
+ Uses the associations file to convert parameter naming conventions (to config.json/config_defaults.json).
1116
+ Extracts the parameter values and formats to match config.json.
1117
+
1118
+ :param emhass_conf: Dictionary containing the needed emhass paths
1119
+ :type emhass_conf: dict
1120
+ :param legacy_config: The legacy config dictionary
1121
+ :type legacy_config: dict
1122
+ :param logger: The logger object
1123
+ :type logger: logging.Logger
1124
+ :return: The built config dictionary
1125
+ :rtype: dict
1126
+ """
1127
+
1128
+ # Association file key reference
1129
+ # association[0] = config catagories
1130
+ # association[1] = legacy parameter name
1131
+ # association[2] = parameter (config.json/config_defaults.json)
1132
+ # association[3] = parameter list name if exists (not used, from legacy options.json)
1133
+
1134
+ # Check each config catagories exists, else create blank dict for categories (avoid errors)
1135
+ legacy_config["retrieve_hass_conf"] = legacy_config.get("retrieve_hass_conf", {})
1136
+ legacy_config["optim_conf"] = legacy_config.get("optim_conf", {})
1137
+ legacy_config["plant_conf"] = legacy_config.get("plant_conf", {})
1138
+ config = {}
1139
+
1140
+ # Use associations list to map legacy parameter name with config.json parameter name
1141
+ if emhass_conf["associations_path"].exists():
1142
+ with emhass_conf["associations_path"].open("r") as data:
1143
+ associations = list(csv.reader(data, delimiter=","))
1144
+ else:
1145
+ logger.error(
1146
+ "Cant find associations file (associations.csv) in: "
1147
+ + str(emhass_conf["associations_path"])
1148
+ )
1149
+ return False
1150
+
1151
+ # Loop through all parameters in association file
1152
+ # Append config with existing legacy config parameters (converting alternative parameter naming conventions with associations list)
1153
+ for association in associations:
1154
+ # if legacy config catagories exists and if legacy parameter exists in config catagories
1155
+ if (
1156
+ legacy_config.get(association[0], None) is not None
1157
+ and legacy_config[association[0]].get(association[1], None) is not None
1158
+ ):
1159
+ config[association[2]] = legacy_config[association[0]][association[1]]
1160
+
1161
+ # If config now has load_peak_hour_periods, extract from list of dict
1162
+ if (
1163
+ association[2] == "load_peak_hour_periods"
1164
+ and type(config[association[2]]) is list
1165
+ ):
1166
+ config[association[2]] = {
1167
+ key: d[key] for d in config[association[2]] for key in d
1168
+ }
1169
+
1170
+ return config
1171
+ # params['associations_dict'] = associations_dict
1172
+
1173
+
1174
+ def param_to_config(param: dict, logger: logging.Logger) -> dict:
1175
+ """
1176
+ A function that extracts the parameters from param back to the config.json format.
1177
+ Extracts parameters from config catagories.
1178
+ Attempts to exclude secrets hosed in retrieve_hass_conf.
1179
+
1180
+ :param params: Built configuration parameters
1181
+ :type param: dict
1182
+ :param logger: The logger object
1183
+ :type logger: logging.Logger
1184
+ :return: The built config dictionary
1185
+ :rtype: dict
1186
+ """
1187
+ logger.debug("Converting param to config")
1188
+
1189
+ return_config = {}
1190
+
1191
+ config_catagories = ["retrieve_hass_conf", "optim_conf", "plant_conf"]
1192
+ secret_params = [
1193
+ "hass_url",
1194
+ "time_zone",
1195
+ "Latitude",
1196
+ "Longitude",
1197
+ "Altitude",
1198
+ "long_lived_token",
1199
+ "solcast_api_key",
1200
+ "solcast_rooftop_id",
1201
+ "solar_forecast_kwp",
1202
+ ]
1203
+
1204
+ # Loop through config catagories that contain config params, and extract
1205
+ for config in config_catagories:
1206
+ for parameter in param[config]:
1207
+ # If parameter is not a secret, append to return_config
1208
+ if parameter not in secret_params:
1209
+ return_config[str(parameter)] = param[config][parameter]
1210
+
1211
+ return return_config
1212
+
1213
+
1214
+ def build_secrets(
1215
+ emhass_conf: dict,
1216
+ logger: logging.Logger,
1217
+ argument: dict | None = None,
1218
+ options_path: str | None = None,
1219
+ secrets_path: str | None = None,
1220
+ no_response: bool | None = False,
1221
+ ) -> tuple[dict, dict]:
1222
+ """
1223
+ Retrieve and build parameters from secrets locations (ENV, ARG, Secrets file (secrets_emhass.yaml/options.json) and/or Home Assistant (via API))
1224
+ priority order (lwo to high) = Defaults (written in function), ENV, Options json file, Home Assistant API, Secrets yaml file, Arguments
1225
+
1226
+ :param emhass_conf: Dictionary containing the needed emhass paths
1227
+ :type emhass_conf: dict
1228
+ :param logger: The logger object
1229
+ :type logger: logging.Logger
1230
+ :param argument: dictionary of secrets arguments passed (url,key)
1231
+ :type argument: dict
1232
+ :param options_path: path to the options file (options.json) (usually provided by EMHASS-Add-on)
1233
+ :type options_path: str
1234
+ :param secrets_path: path to secrets file (secrets_emhass.yaml)
1235
+ :type secrets_path: str
1236
+ :param no_response: bypass get request to Home Assistant (json response errors)
1237
+ :type no_response: bool
1238
+ :return: Updated emhass_conf, the built secrets dictionary
1239
+ :rtype: Tuple[dict, dict]:
1240
+ """
1241
+
1242
+ # Set defaults to be overwritten
1243
+ if argument is None:
1244
+ argument = {}
1245
+ params_secrets = {
1246
+ "hass_url": "https://myhass.duckdns.org/",
1247
+ "long_lived_token": "thatverylongtokenhere",
1248
+ "time_zone": "Europe/Paris",
1249
+ "Latitude": 45.83,
1250
+ "Longitude": 6.86,
1251
+ "Altitude": 4807.8,
1252
+ "solcast_api_key": "yoursecretsolcastapikey",
1253
+ "solcast_rooftop_id": "yourrooftopid",
1254
+ "solar_forecast_kwp": 5,
1255
+ }
1256
+
1257
+ # Obtain Secrets from ENV?
1258
+ params_secrets["hass_url"] = os.getenv("EMHASS_URL", params_secrets["hass_url"])
1259
+ params_secrets["long_lived_token"] = os.getenv(
1260
+ "SUPERVISOR_TOKEN", params_secrets["long_lived_token"]
1261
+ )
1262
+ params_secrets["time_zone"] = os.getenv("TIME_ZONE", params_secrets["time_zone"])
1263
+ params_secrets["Latitude"] = float(os.getenv("LAT", params_secrets["Latitude"]))
1264
+ params_secrets["Longitude"] = float(os.getenv("LON", params_secrets["Longitude"]))
1265
+ params_secrets["Altitude"] = float(os.getenv("ALT", params_secrets["Altitude"]))
1266
+
1267
+ # Obtain secrets from options.json (Generated from EMHASS-Add-on, Home Assistant addon Configuration page) or Home Assistant API (from local Supervisor API)?
1268
+ # Use local supervisor API to obtain secrets from Home Assistant if hass_url in options.json is empty and SUPERVISOR_TOKEN ENV exists (provided by Home Assistant when running the container as addon)
1269
+ options = {}
1270
+ if options_path and pathlib.Path(options_path).is_file():
1271
+ with options_path.open("r") as data:
1272
+ options = json.load(data)
1273
+
1274
+ # Obtain secrets from Home Assistant?
1275
+ url_from_options = options.get("hass_url", "empty")
1276
+ key_from_options = options.get("long_lived_token", "empty")
1277
+
1278
+ # If data path specified by options.json, overwrite emhass_conf['data_path']
1279
+ if (
1280
+ options.get("data_path", None) is not None
1281
+ and pathlib.Path(options["data_path"]).exists()
1282
+ ):
1283
+ emhass_conf["data_path"] = pathlib.Path(options["data_path"])
1284
+
1285
+ # Check to use Home Assistant local API
1286
+ if (
1287
+ not no_response
1288
+ and (
1289
+ url_from_options == "empty"
1290
+ or url_from_options == ""
1291
+ or url_from_options == "http://supervisor/core/api"
1292
+ )
1293
+ and os.getenv("SUPERVISOR_TOKEN", None) is not None
1294
+ ):
1295
+ params_secrets["long_lived_token"] = os.getenv("SUPERVISOR_TOKEN", None)
1296
+ params_secrets["hass_url"] = "http://supervisor/core/api"
1297
+ headers = {
1298
+ "Authorization": "Bearer " + params_secrets["long_lived_token"],
1299
+ "content-type": "application/json",
1300
+ }
1301
+ # Obtain secrets from Home Assistant via API
1302
+ logger.debug("Obtaining secrets from Home Assistant Supervisor API")
1303
+ response = get(
1304
+ (params_secrets["hass_url"] + "/config"), headers=headers
1305
+ )
1306
+ if response.status_code < 400:
1307
+ config_hass = response.json()
1308
+ params_secrets = {
1309
+ "hass_url": params_secrets["hass_url"],
1310
+ "long_lived_token": params_secrets["long_lived_token"],
1311
+ "time_zone": config_hass["time_zone"],
1312
+ "Latitude": config_hass["latitude"],
1313
+ "Longitude": config_hass["longitude"],
1314
+ "Altitude": config_hass["elevation"],
1315
+ }
1316
+ else:
1317
+ # Obtain the url and key secrets if any from options.json (default /app/options.json)
1318
+ logger.warning(
1319
+ "Error obtaining secrets from Home Assistant Supervisor API"
1320
+ )
1321
+ logger.debug("Obtaining url and key secrets from options.json")
1322
+ if url_from_options != "empty" and url_from_options != "":
1323
+ params_secrets["hass_url"] = url_from_options
1324
+ if key_from_options != "empty" and key_from_options != "":
1325
+ params_secrets["long_lived_token"] = key_from_options
1326
+ if (
1327
+ options.get("time_zone", "empty") != "empty"
1328
+ and options["time_zone"] != ""
1329
+ ):
1330
+ params_secrets["time_zone"] = options["time_zone"]
1331
+ if options.get("Latitude", None) is not None and bool(
1332
+ options["Latitude"]
1333
+ ):
1334
+ params_secrets["Latitude"] = options["Latitude"]
1335
+ if options.get("Longitude", None) is not None and bool(
1336
+ options["Longitude"]
1337
+ ):
1338
+ params_secrets["Longitude"] = options["Longitude"]
1339
+ if options.get("Altitude", None) is not None and bool(
1340
+ options["Altitude"]
1341
+ ):
1342
+ params_secrets["Altitude"] = options["Altitude"]
1343
+ else:
1344
+ # Obtain the url and key secrets if any from options.json (default /app/options.json)
1345
+ logger.debug("Obtaining url and key secrets from options.json")
1346
+ if url_from_options != "empty" and url_from_options != "":
1347
+ params_secrets["hass_url"] = url_from_options
1348
+ if key_from_options != "empty" and key_from_options != "":
1349
+ params_secrets["long_lived_token"] = key_from_options
1350
+ if (
1351
+ options.get("time_zone", "empty") != "empty"
1352
+ and options["time_zone"] != ""
1353
+ ):
1354
+ params_secrets["time_zone"] = options["time_zone"]
1355
+ if options.get("Latitude", None) is not None and bool(
1356
+ options["Latitude"]
1357
+ ):
1358
+ params_secrets["Latitude"] = options["Latitude"]
1359
+ if options.get("Longitude", None) is not None and bool(
1360
+ options["Longitude"]
1361
+ ):
1362
+ params_secrets["Longitude"] = options["Longitude"]
1363
+ if options.get("Altitude", None) is not None and bool(
1364
+ options["Altitude"]
1365
+ ):
1366
+ params_secrets["Altitude"] = options["Altitude"]
1367
+
1368
+ # Obtain the forecast secrets (if any) from options.json (default /app/options.json)
1369
+ forecast_secrets = [
1370
+ "solcast_api_key",
1371
+ "solcast_rooftop_id",
1372
+ "solar_forecast_kwp",
1373
+ ]
1374
+ if any(x in forecast_secrets for x in list(options.keys())):
1375
+ logger.debug("Obtaining forecast secrets from options.json")
1376
+ if (
1377
+ options.get("solcast_api_key", "empty") != "empty"
1378
+ and options["solcast_api_key"] != ""
1379
+ ):
1380
+ params_secrets["solcast_api_key"] = options["solcast_api_key"]
1381
+ if (
1382
+ options.get("solcast_rooftop_id", "empty") != "empty"
1383
+ and options["solcast_rooftop_id"] != ""
1384
+ ):
1385
+ params_secrets["solcast_rooftop_id"] = options["solcast_rooftop_id"]
1386
+ if options.get("solar_forecast_kwp", None) and bool(
1387
+ options["solar_forecast_kwp"]
1388
+ ):
1389
+ params_secrets["solar_forecast_kwp"] = options["solar_forecast_kwp"]
1390
+
1391
+ # Obtain secrets from secrets_emhass.yaml? (default /app/secrets_emhass.yaml)
1392
+ if secrets_path and pathlib.Path(secrets_path).is_file():
1393
+ logger.debug("Obtaining secrets from secrets file")
1394
+ with open(pathlib.Path(secrets_path)) as file:
1395
+ params_secrets.update(yaml.load(file, Loader=yaml.FullLoader))
1396
+
1397
+ # Receive key and url from ARG/arguments?
1398
+ if argument.get("url", None) is not None:
1399
+ params_secrets["hass_url"] = argument["url"]
1400
+ logger.debug("Obtaining url from passed argument")
1401
+ if argument.get("key", None) is not None:
1402
+ params_secrets["long_lived_token"] = argument["key"]
1403
+ logger.debug("Obtaining long_lived_token from passed argument")
1404
+
1405
+ return emhass_conf, params_secrets
1406
+
1407
+
1408
+ def build_params(
1409
+ emhass_conf: dict, params_secrets: dict, config: dict, logger: logging.Logger
1410
+ ) -> dict:
1411
+ """
1412
+ Build the main params dictionary from the config and secrets
1413
+ Appends configuration catagories used by emhass to the parameters. (with use of the associations file as a reference)
1414
+
1415
+ :param emhass_conf: Dictionary containing the needed emhass paths
1416
+ :type emhass_conf: dict
1417
+ :param params_secrets: The dictionary containing the built secret variables
1418
+ :type params_secrets: dict
1419
+ :param config: The dictionary of built config parameters
1420
+ :type config: dict
1421
+ :param logger: The logger object
1422
+ :type logger: logging.Logger
1423
+ :return: The built param dictionary
1424
+ :rtype: dict
1425
+ """
1426
+ if type(params_secrets) is not dict:
1427
+ params_secrets = {}
1428
+
1429
+ params = {}
1430
+ # Start with blank config catagories
1431
+ params["retrieve_hass_conf"] = {}
1432
+ params["params_secrets"] = {}
1433
+ params["optim_conf"] = {}
1434
+ params["plant_conf"] = {}
1435
+
1436
+ # Obtain associations to categorize parameters to their corresponding config catagories
1437
+ if emhass_conf.get(
1438
+ "associations_path", get_root(__file__, num_parent=2) / "data/associations.csv"
1439
+ ).exists():
1440
+ with emhass_conf["associations_path"].open("r") as data:
1441
+ associations = list(csv.reader(data, delimiter=","))
1442
+ else:
1443
+ logger.error(
1444
+ "Unable to obtain the associations file (associations.csv) in: "
1445
+ + str(emhass_conf["associations_path"])
1446
+ )
1447
+ return False
1448
+
1449
+ # Association file key reference
1450
+ # association[0] = config catagories
1451
+ # association[1] = legacy parameter name
1452
+ # association[2] = parameter (config.json/config_defaults.json)
1453
+ # association[3] = parameter list name if exists (not used, from legacy options.json)
1454
+ # Use association list to append parameters from config into params (with corresponding config catagories)
1455
+ for association in associations:
1456
+ # If parameter has list_ name and parameter in config is presented with its list name
1457
+ # (ie, config parameter is in legacy options.json format)
1458
+ if len(association) == 4 and config.get(association[3], None) is not None:
1459
+ # Extract lists of dictionaries
1460
+ if config[association[3]] and type(config[association[3]][0]) is dict:
1461
+ params[association[0]][association[2]] = [
1462
+ i[association[2]] for i in config[association[3]]
1463
+ ]
1464
+ else:
1465
+ params[association[0]][association[2]] = config[association[3]]
1466
+ # Else, directly set value of config parameter to param
1467
+ elif config.get(association[2], None) is not None:
1468
+ params[association[0]][association[2]] = config[association[2]]
1469
+
1470
+ # Check if we need to create `list_hp_periods` from config (ie. legacy options.json format)
1471
+ if (
1472
+ params.get("optim_conf", None) is not None
1473
+ and config.get("list_peak_hours_periods_start_hours", None) is not None
1474
+ and config.get("list_peak_hours_periods_end_hours", None) is not None
1475
+ ):
1476
+ start_hours_list = [
1477
+ i["peak_hours_periods_start_hours"]
1478
+ for i in config["list_peak_hours_periods_start_hours"]
1479
+ ]
1480
+ end_hours_list = [
1481
+ i["peak_hours_periods_end_hours"]
1482
+ for i in config["list_peak_hours_periods_end_hours"]
1483
+ ]
1484
+ num_peak_hours = len(start_hours_list)
1485
+ list_hp_periods_list = {
1486
+ "period_hp_" + str(i + 1): [
1487
+ {"start": start_hours_list[i]},
1488
+ {"end": end_hours_list[i]},
1489
+ ]
1490
+ for i in range(num_peak_hours)
1491
+ }
1492
+ params["optim_conf"]["load_peak_hour_periods"] = list_hp_periods_list
1493
+ else:
1494
+ # Else, check param already contains load_peak_hour_periods from config
1495
+ if params["optim_conf"].get("load_peak_hour_periods", None) is None:
1496
+ logger.warning(
1497
+ "Unable to detect or create load_peak_hour_periods parameter"
1498
+ )
1499
+
1500
+ # Format load_peak_hour_periods list to dict if necessary
1501
+ if params["optim_conf"].get(
1502
+ "load_peak_hour_periods", None
1503
+ ) is not None and isinstance(params["optim_conf"]["load_peak_hour_periods"], list):
1504
+ params["optim_conf"]["load_peak_hour_periods"] = {
1505
+ key: d[key]
1506
+ for d in params["optim_conf"]["load_peak_hour_periods"]
1507
+ for key in d
1508
+ }
1509
+
1510
+ # Call function to check parameter lists that require the same length as deferrable loads
1511
+ # If not, set defaults it fill in gaps
1512
+ if params["optim_conf"].get("number_of_deferrable_loads", None) is not None:
1513
+ num_def_loads = params["optim_conf"]["number_of_deferrable_loads"]
1514
+ params["optim_conf"]["start_timesteps_of_each_deferrable_load"] = (
1515
+ check_def_loads(
1516
+ num_def_loads,
1517
+ params["optim_conf"],
1518
+ 0,
1519
+ "start_timesteps_of_each_deferrable_load",
1520
+ logger,
1521
+ )
1522
+ )
1523
+ params["optim_conf"]["end_timesteps_of_each_deferrable_load"] = check_def_loads(
1524
+ num_def_loads,
1525
+ params["optim_conf"],
1526
+ 0,
1527
+ "end_timesteps_of_each_deferrable_load",
1528
+ logger,
1529
+ )
1530
+ params["optim_conf"]["set_deferrable_load_single_constant"] = check_def_loads(
1531
+ num_def_loads,
1532
+ params["optim_conf"],
1533
+ False,
1534
+ "set_deferrable_load_single_constant",
1535
+ logger,
1536
+ )
1537
+ params["optim_conf"]["treat_deferrable_load_as_semi_cont"] = check_def_loads(
1538
+ num_def_loads,
1539
+ params["optim_conf"],
1540
+ True,
1541
+ "treat_deferrable_load_as_semi_cont",
1542
+ logger,
1543
+ )
1544
+ params["optim_conf"]["set_deferrable_startup_penalty"] = check_def_loads(
1545
+ num_def_loads,
1546
+ params["optim_conf"],
1547
+ 0.0,
1548
+ "set_deferrable_startup_penalty",
1549
+ logger,
1550
+ )
1551
+ params["optim_conf"]["operating_hours_of_each_deferrable_load"] = (
1552
+ check_def_loads(
1553
+ num_def_loads,
1554
+ params["optim_conf"],
1555
+ 0,
1556
+ "operating_hours_of_each_deferrable_load",
1557
+ logger,
1558
+ )
1559
+ )
1560
+ params["optim_conf"]["nominal_power_of_deferrable_loads"] = check_def_loads(
1561
+ num_def_loads,
1562
+ params["optim_conf"],
1563
+ 0,
1564
+ "nominal_power_of_deferrable_loads",
1565
+ logger,
1566
+ )
1567
+ else:
1568
+ logger.warning("unable to obtain parameter: number_of_deferrable_loads")
1569
+ # historic_days_to_retrieve should be no less then 2
1570
+ if params["retrieve_hass_conf"].get("historic_days_to_retrieve", None) is not None:
1571
+ if params["retrieve_hass_conf"]["historic_days_to_retrieve"] < 2:
1572
+ params["retrieve_hass_conf"]["historic_days_to_retrieve"] = 2
1573
+ logger.warning(
1574
+ "days_to_retrieve should not be lower then 2, setting days_to_retrieve to 2. Make sure your sensors also have at least 2 days of history"
1575
+ )
1576
+ else:
1577
+ logger.warning("unable to obtain parameter: historic_days_to_retrieve")
1578
+
1579
+ # Configure secrets, set params to correct config categorie
1580
+ # retrieve_hass_conf
1581
+ params["retrieve_hass_conf"]["hass_url"] = params_secrets.get("hass_url", None)
1582
+ params["retrieve_hass_conf"]["long_lived_token"] = params_secrets.get(
1583
+ "long_lived_token", None
1584
+ )
1585
+ params["retrieve_hass_conf"]["time_zone"] = params_secrets.get("time_zone", None)
1586
+ params["retrieve_hass_conf"]["Latitude"] = params_secrets.get("Latitude", None)
1587
+ params["retrieve_hass_conf"]["Longitude"] = params_secrets.get("Longitude", None)
1588
+ params["retrieve_hass_conf"]["Altitude"] = params_secrets.get("Altitude", None)
1589
+ # Update optional param secrets
1590
+ if params["optim_conf"].get("weather_forecast_method", None) is not None:
1591
+ if params["optim_conf"]["weather_forecast_method"] == "solcast":
1592
+ params["retrieve_hass_conf"]["solcast_api_key"] = params_secrets.get(
1593
+ "solcast_api_key", "123456"
1594
+ )
1595
+ params["params_secrets"]["solcast_api_key"] = params_secrets.get(
1596
+ "solcast_api_key", "123456"
1597
+ )
1598
+ params["retrieve_hass_conf"]["solcast_rooftop_id"] = params_secrets.get(
1599
+ "solcast_rooftop_id", "123456"
1600
+ )
1601
+ params["params_secrets"]["solcast_rooftop_id"] = params_secrets.get(
1602
+ "solcast_rooftop_id", "123456"
1603
+ )
1604
+ elif params["optim_conf"]["weather_forecast_method"] == "solar.forecast":
1605
+ params["retrieve_hass_conf"]["solar_forecast_kwp"] = params_secrets.get(
1606
+ "solar_forecast_kwp", 5
1607
+ )
1608
+ params["params_secrets"]["solar_forecast_kwp"] = params_secrets.get(
1609
+ "solar_forecast_kwp", 5
1610
+ )
1611
+ else:
1612
+ logger.warning("Unable to detect weather_forecast_method parameter")
1613
+ # Check if secrets parameters still defaults values
1614
+ secret_params = [
1615
+ "https://myhass.duckdns.org/",
1616
+ "thatverylongtokenhere",
1617
+ 45.83,
1618
+ 6.86,
1619
+ 4807.8,
1620
+ ]
1621
+ if any(x in secret_params for x in params["retrieve_hass_conf"].values()):
1622
+ logger.warning(
1623
+ "Some secret parameters values are still matching their defaults"
1624
+ )
1625
+
1626
+ # Set empty dict objects for params passed_data
1627
+ # To be latter populated with runtime parameters (treat_runtimeparams)
1628
+ params["passed_data"] = {
1629
+ "pv_power_forecast": None,
1630
+ "load_power_forecast": None,
1631
+ "load_cost_forecast": None,
1632
+ "prod_price_forecast": None,
1633
+ "prediction_horizon": None,
1634
+ "soc_init": None,
1635
+ "soc_final": None,
1636
+ "operating_hours_of_each_deferrable_load": None,
1637
+ "start_timesteps_of_each_deferrable_load": None,
1638
+ "end_timesteps_of_each_deferrable_load": None,
1639
+ "alpha": None,
1640
+ "beta": None,
1641
+ }
1642
+
1643
+ return params
1644
+
1645
+
1646
+ def check_def_loads(
1647
+ num_def_loads: int, parameter: list[dict], default, parameter_name: str, logger
1648
+ ):
1649
+ """
1650
+ Check parameter lists with deferrable loads number, if they do not match, enlarge to fit.
1651
+
1652
+ :param num_def_loads: Total number deferrable loads
1653
+ :type num_def_loads: int
1654
+ :param parameter: parameter config dict containing paramater
1655
+ :type: list[dict]
1656
+ :param default: default value for parameter to pad missing
1657
+ :type: obj
1658
+ :param parameter_name: name of parameter
1659
+ :type logger: str
1660
+ :param logger: The logger object
1661
+ :type logger: logging.Logger
1662
+ return: parameter list
1663
+ :rtype: list[dict]
1664
+
1665
+ """
1666
+ if (
1667
+ parameter.get(parameter_name, None) is not None
1668
+ and type(parameter[parameter_name]) is list
1669
+ and num_def_loads > len(parameter[parameter_name])
1670
+ ):
1671
+ logger.warning(
1672
+ parameter_name
1673
+ + " does not match number in num_def_loads, adding default values ("
1674
+ + str(default)
1675
+ + ") to parameter"
1676
+ )
1677
+ for _x in range(len(parameter[parameter_name]), num_def_loads):
1678
+ parameter[parameter_name].append(default)
1679
+ return parameter[parameter_name]
1680
+
1681
+
1682
+ def get_days_list(days_to_retrieve: int) -> pd.date_range:
1683
+ """
1684
+ Get list of past days from today to days_to_retrieve.
1685
+
1686
+ :param days_to_retrieve: Total number of days to retrieve from the past
1687
+ :type days_to_retrieve: int
1688
+ :return: The list of days
1689
+ :rtype: pd.date_range
1690
+
1691
+ """
1692
+ today = datetime.now(UTC).replace(minute=0, second=0, microsecond=0)
1693
+ d = (today - timedelta(days=days_to_retrieve)).isoformat()
1694
+ days_list = pd.date_range(start=d, end=today.isoformat(), freq="D").normalize()
1695
+ return days_list
1696
+
1697
+
1698
+ def add_date_features(
1699
+ data: pd.DataFrame,
1700
+ timestamp: str | None = None,
1701
+ date_features: list[str] | None = None,
1702
+ ) -> pd.DataFrame:
1703
+ """Add date-related features from a DateTimeIndex or a timestamp column.
1704
+
1705
+ :param data: The input DataFrame.
1706
+ :type data: pd.DataFrame
1707
+ :param timestamp: The column containing the timestamp (optional if DataFrame has a DateTimeIndex).
1708
+ :type timestamp: Optional[str]
1709
+ :param date_features: List of date features to extract (default: all).
1710
+ :type date_features: Optional[List[str]]
1711
+ :return: The DataFrame with added date features.
1712
+ :rtype: pd.DataFrame
1713
+ """
1714
+
1715
+ df = copy.deepcopy(data) # Avoid modifying the original DataFrame
1716
+
1717
+ # If no specific features are requested, extract all by default
1718
+ default_features = ["year", "month", "day_of_week", "day_of_year", "day", "hour"]
1719
+ date_features = date_features or default_features
1720
+
1721
+ # Determine whether to use index or a timestamp column
1722
+ if timestamp:
1723
+ df[timestamp] = pd.to_datetime(df[timestamp], utc=True)
1724
+ source = df[timestamp].dt
1725
+ else:
1726
+ if not isinstance(df.index, pd.DatetimeIndex):
1727
+ raise ValueError(
1728
+ "DataFrame must have a DateTimeIndex or a valid timestamp column."
1729
+ )
1730
+ source = df.index
1731
+
1732
+ # Extract date features
1733
+ if "year" in date_features:
1734
+ df["year"] = source.year
1735
+ if "month" in date_features:
1736
+ df["month"] = source.month
1737
+ if "day_of_week" in date_features:
1738
+ df["day_of_week"] = source.dayofweek
1739
+ if "day_of_year" in date_features:
1740
+ df["day_of_year"] = source.dayofyear
1741
+ if "day" in date_features:
1742
+ df["day"] = source.day
1743
+ if "hour" in date_features:
1744
+ df["hour"] = source.hour
1745
+
1746
+ return df
1747
+
1748
+
1749
+ def set_df_index_freq(df: pd.DataFrame) -> pd.DataFrame:
1750
+ """
1751
+ Set the freq of a DataFrame DateTimeIndex.
1752
+
1753
+ :param df: Input DataFrame
1754
+ :type df: pd.DataFrame
1755
+ :return: Input DataFrame with freq defined
1756
+ :rtype: pd.DataFrame
1757
+
1758
+ """
1759
+ idx_diff = np.diff(df.index)
1760
+ # Sometimes there are zero values in this list.
1761
+ idx_diff = idx_diff[np.nonzero(idx_diff)]
1762
+ sampling = pd.to_timedelta(np.median(idx_diff))
1763
+ df = df[~df.index.duplicated()]
1764
+ return df.asfreq(sampling)