emhass 0.10.6__py3-none-any.whl → 0.15.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
emhass/utils.py CHANGED
@@ -1,26 +1,30 @@
1
- #!/usr/bin/env python3
2
- # -*- coding: utf-8 -*-
1
+ from __future__ import annotations
3
2
 
4
- from typing import Tuple, Optional
5
- from datetime import datetime, timedelta, timezone
3
+ import ast
4
+ import copy
5
+ import csv
6
6
  import logging
7
+ import os
7
8
  import pathlib
8
- import json
9
- import copy
9
+ from datetime import UTC, datetime, timedelta
10
+ from typing import TYPE_CHECKING
11
+
12
+ import aiofiles
13
+ import aiohttp
10
14
  import numpy as np
15
+ import orjson
11
16
  import pandas as pd
12
- import yaml
17
+ import plotly.express as px
13
18
  import pytz
14
- import ast
19
+ import yaml
15
20
 
16
- import plotly.express as px
21
+ if TYPE_CHECKING:
22
+ from emhass.machine_learning_forecaster import MLForecaster
17
23
 
18
24
  pd.options.plotting.backend = "plotly"
19
25
 
20
- from emhass.machine_learning_forecaster import MLForecaster
21
-
22
26
 
23
- def get_root(file: str, num_parent: Optional[int] = 3) -> str:
27
+ def get_root(file: str, num_parent: int = 3) -> str:
24
28
  """
25
29
  Get the root absolute path of the working directory.
26
30
 
@@ -42,8 +46,12 @@ def get_root(file: str, num_parent: Optional[int] = 3) -> str:
42
46
  return root
43
47
 
44
48
 
45
- def get_logger(fun_name: str, emhass_conf: dict, save_to_file: Optional[bool] = True,
46
- logging_level: Optional[str] = "DEBUG") -> Tuple[logging.Logger, logging.StreamHandler]:
49
+ def get_logger(
50
+ fun_name: str,
51
+ emhass_conf: dict[str, pathlib.Path],
52
+ save_to_file: bool = True,
53
+ logging_level: str = "DEBUG",
54
+ ) -> tuple[logging.Logger, logging.StreamHandler]:
47
55
  """
48
56
  Create a simple logger object.
49
57
 
@@ -62,7 +70,10 @@ def get_logger(fun_name: str, emhass_conf: dict, save_to_file: Optional[bool] =
62
70
  logger.propagate = True
63
71
  logger.fileSetting = save_to_file
64
72
  if save_to_file:
65
- ch = logging.FileHandler(emhass_conf['data_path'] / 'logger_emhass.log')
73
+ if os.path.isdir(emhass_conf["data_path"]):
74
+ ch = logging.FileHandler(emhass_conf["data_path"] / "logger_emhass.log")
75
+ else:
76
+ raise Exception("Unable to access data_path: " + emhass_conf["data_path"])
66
77
  else:
67
78
  ch = logging.StreamHandler()
68
79
  if logging_level == "DEBUG":
@@ -80,17 +91,24 @@ def get_logger(fun_name: str, emhass_conf: dict, save_to_file: Optional[bool] =
80
91
  else:
81
92
  logger.setLevel(logging.DEBUG)
82
93
  ch.setLevel(logging.DEBUG)
83
- formatter = logging.Formatter(
84
- "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
85
- )
94
+ formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
86
95
  ch.setFormatter(formatter)
87
96
  logger.addHandler(ch)
88
97
 
89
98
  return logger, ch
90
99
 
91
100
 
92
- def get_forecast_dates(freq: int, delta_forecast: int, timedelta_days: Optional[int] = 0
93
- ) -> pd.core.indexes.datetimes.DatetimeIndex:
101
+ def _get_now() -> datetime:
102
+ """Helper function to get the current time, for easier mocking."""
103
+ return datetime.now()
104
+
105
+
106
+ def get_forecast_dates(
107
+ freq: int,
108
+ delta_forecast: int,
109
+ time_zone: datetime.tzinfo,
110
+ timedelta_days: int | None = 0,
111
+ ) -> pd.core.indexes.datetimes.DatetimeIndex:
94
112
  """
95
113
  Get the date_range list of the needed future dates using the delta_forecast parameter.
96
114
 
@@ -105,118 +123,560 @@ def get_forecast_dates(freq: int, delta_forecast: int, timedelta_days: Optional[
105
123
 
106
124
  """
107
125
  freq = pd.to_timedelta(freq, "minutes")
108
- start_forecast = pd.Timestamp(datetime.now()).replace(hour=0, minute=0, second=0, microsecond=0)
109
- end_forecast = (start_forecast + pd.Timedelta(days=delta_forecast)).replace(microsecond=0)
110
- forecast_dates = pd.date_range(start=start_forecast,
111
- end=end_forecast+timedelta(days=timedelta_days)-freq,
112
- freq=freq).round(freq, ambiguous='infer', nonexistent='shift_forward')
113
- return forecast_dates
126
+ start_time = _get_now()
127
+
128
+ start_forecast = pd.Timestamp(start_time, tz=time_zone).replace(microsecond=0).floor(freq=freq)
129
+ end_forecast = start_forecast + pd.tseries.offsets.DateOffset(days=delta_forecast)
130
+ final_end_date = end_forecast + pd.tseries.offsets.DateOffset(days=timedelta_days) - freq
131
+
132
+ forecast_dates = pd.date_range(
133
+ start=start_forecast,
134
+ end=final_end_date,
135
+ freq=freq,
136
+ tz=time_zone,
137
+ )
138
+
139
+ return [ts.isoformat() for ts in forecast_dates]
140
+
141
+
142
+ def calculate_cop_heatpump(
143
+ supply_temperature: float,
144
+ carnot_efficiency: float,
145
+ outdoor_temperature_forecast: np.ndarray | pd.Series,
146
+ ) -> np.ndarray:
147
+ r"""
148
+ Calculate heat pump Coefficient of Performance (COP) for each timestep in the prediction horizon.
149
+
150
+ The COP is calculated using a Carnot-based formula:
151
+
152
+ .. math::
153
+ COP(h) = \eta_{carnot} \times \frac{T_{supply\_K}}{|T_{supply\_K} - T_{outdoor\_K}(h)|}
154
+
155
+ Where temperatures are converted to Kelvin (K = °C + 273.15).
156
+
157
+ This formula models real heat pump behavior where COP decreases as the temperature lift
158
+ (difference between supply and outdoor temperature) increases. The carnot_efficiency factor
159
+ represents the real-world efficiency as a fraction of the ideal Carnot cycle efficiency.
160
+
161
+ :param supply_temperature: The heat pump supply temperature in degrees Celsius (constant value). \
162
+ Typical values: 30-40°C for underfloor heating, 50-70°C for radiator systems.
163
+ :type supply_temperature: float
164
+ :param carnot_efficiency: Real-world efficiency factor as fraction of ideal Carnot cycle. \
165
+ Typical range: 0.35-0.50 (35-50%). Default in thermal battery config: 0.4 (40%). \
166
+ Higher values represent more efficient heat pumps.
167
+ :type carnot_efficiency: float
168
+ :param outdoor_temperature_forecast: Array of outdoor temperature forecasts in degrees Celsius, \
169
+ one value per timestep in the prediction horizon.
170
+ :type outdoor_temperature_forecast: np.ndarray or pd.Series
171
+ :return: Array of COP values for each timestep, same length as outdoor_temperature_forecast. \
172
+ Typical COP range: 2-6 for normal operating conditions.
173
+ :rtype: np.ndarray
174
+
175
+ Example:
176
+ >>> supply_temp = 35.0 # °C, underfloor heating
177
+ >>> carnot_eff = 0.4 # 40% of ideal Carnot efficiency
178
+ >>> outdoor_temps = np.array([0.0, 5.0, 10.0, 15.0, 20.0])
179
+ >>> cops = calculate_cop_heatpump(supply_temp, carnot_eff, outdoor_temps)
180
+ >>> cops
181
+ array([3.521..., 4.108..., 4.926..., 6.163..., 8.217...])
182
+ >>> # At 5°C outdoor: COP = 0.4 × 308.15K / 30K = 4.11
183
+
184
+ """
185
+ # Convert to numpy array if pandas Series
186
+ if isinstance(outdoor_temperature_forecast, pd.Series):
187
+ outdoor_temps = outdoor_temperature_forecast.values
188
+ else:
189
+ outdoor_temps = np.asarray(outdoor_temperature_forecast)
190
+
191
+ # Convert temperatures from Celsius to Kelvin for Carnot formula
192
+ supply_temperature_kelvin = supply_temperature + 273.15
193
+ outdoor_temperature_kelvin = outdoor_temps + 273.15
194
+
195
+ # Calculate temperature difference (supply - outdoor)
196
+ # For heating, supply temperature should be higher than outdoor temperature
197
+ temperature_diff = supply_temperature_kelvin - outdoor_temperature_kelvin
198
+
199
+ # Check for non-physical scenarios where outdoor temp >= supply temp
200
+ # This indicates cooling mode or invalid configuration for heating
201
+ if np.any(temperature_diff <= 0):
202
+ # Log warning about non-physical temperature scenario
203
+ logger = logging.getLogger(__name__)
204
+ num_invalid = np.sum(temperature_diff <= 0)
205
+ invalid_indices = np.nonzero(temperature_diff <= 0)[0]
206
+ logger.warning(
207
+ f"COP calculation: {num_invalid} timestep(s) have outdoor temperature >= supply temperature. "
208
+ f"This is non-physical for heating mode. Indices: {invalid_indices.tolist()[:5]}{'...' if len(invalid_indices) > 5 else ''}. "
209
+ f"Supply temp: {supply_temperature:.1f}°C. Setting COP to 1.0 (direct electric heating) for these periods."
210
+ )
211
+
212
+ # Vectorized Carnot-based COP calculation
213
+ # COP = carnot_efficiency × T_supply / (T_supply - T_outdoor)
214
+ # For non-physical cases (outdoor >= supply), we use a neutral COP of 1.0
215
+ # This prevents the optimizer from exploiting unrealistic high COP values
216
+
217
+ # Avoid division by zero: use a mask to only calculate for valid cases
218
+ cop_values = np.ones_like(outdoor_temperature_kelvin) # Default to 1.0 everywhere
219
+ valid_mask = temperature_diff > 0
220
+ if np.any(valid_mask):
221
+ cop_values[valid_mask] = (
222
+ carnot_efficiency * supply_temperature_kelvin / temperature_diff[valid_mask]
223
+ )
224
+
225
+ # Apply realistic bounds: minimum 1.0, maximum 8.0
226
+ # - Lower bound: 1.0 means direct electric heating (no efficiency gain)
227
+ # - Upper bound: 8.0 is an optimistic but reasonable maximum for modern heat pumps
228
+ # (prevents numerical instability from very small temperature differences)
229
+ cop_values = np.clip(cop_values, 1.0, 8.0)
230
+
231
+ return cop_values
232
+
233
+
234
+ def calculate_thermal_loss_signed(
235
+ outdoor_temperature_forecast: np.ndarray | pd.Series,
236
+ indoor_temperature: float,
237
+ base_loss: float,
238
+ ) -> np.ndarray:
239
+ r"""
240
+ Calculate signed thermal loss factor based on indoor/outdoor temperature difference.
241
+
242
+ **SIGN CONVENTION:**
243
+ - **Positive** (+loss): outdoor < indoor → heat loss, building cools, heating required
244
+ - **Negative** (-loss): outdoor ≥ indoor → heat gain, building warms passively
245
+
246
+ Formula: loss * (1 - 2 * Hot(h)), where Hot(h) = 1 if outdoor ≥ indoor, else 0.
247
+ Based on Langer & Volling (2020) Equation B.13.
248
+
249
+ :param outdoor_temperature_forecast: Outdoor temperature forecast (°C)
250
+ :type outdoor_temperature_forecast: np.ndarray or pd.Series
251
+ :param indoor_temperature: Indoor/target temperature threshold (°C)
252
+ :type indoor_temperature: float
253
+ :param base_loss: Base thermal loss coefficient in kW
254
+ :type base_loss: float
255
+ :return: Signed loss array (positive = heat loss, negative = heat gain)
256
+ :rtype: np.ndarray
257
+
258
+ """
259
+ # Convert to numpy array if pandas Series
260
+ if isinstance(outdoor_temperature_forecast, pd.Series):
261
+ outdoor_temps = outdoor_temperature_forecast.values
262
+ else:
263
+ outdoor_temps = np.asarray(outdoor_temperature_forecast)
264
+
265
+ # Create binary hot indicator: 1 if outdoor temp >= indoor temp, 0 otherwise
266
+ hot_indicator = (outdoor_temps >= indoor_temperature).astype(float)
267
+
268
+ return base_loss * (1.0 - 2.0 * hot_indicator)
269
+
270
+
271
+ def calculate_heating_demand(
272
+ specific_heating_demand: float,
273
+ floor_area: float,
274
+ outdoor_temperature_forecast: np.ndarray | pd.Series,
275
+ base_temperature: float = 18.0,
276
+ annual_reference_hdd: float = 3000.0,
277
+ optimization_time_step: int | None = None,
278
+ ) -> np.ndarray:
279
+ """
280
+ Calculate heating demand per timestep based on heating degree days method.
281
+
282
+ Uses heating degree days (HDD) to calculate heating demand based on outdoor temperature
283
+ forecast, specific heating demand, and floor area. The specific heating demand should be
284
+ calibrated to the annual reference HDD value.
114
285
 
286
+ :param specific_heating_demand: Specific heating demand in kWh/m²/year (calibrated to annual_reference_hdd)
287
+ :type specific_heating_demand: float
288
+ :param floor_area: Floor area in m²
289
+ :type floor_area: float
290
+ :param outdoor_temperature_forecast: Outdoor temperature forecast in °C for each timestep
291
+ :type outdoor_temperature_forecast: np.ndarray | pd.Series
292
+ :param base_temperature: Base temperature for HDD calculation in °C, defaults to 18.0 (European standard)
293
+ :type base_temperature: float, optional
294
+ :param annual_reference_hdd: Annual reference HDD value for normalization, defaults to 3000.0 (Central Europe)
295
+ :type annual_reference_hdd: float, optional
296
+ :param optimization_time_step: Optimization time step in minutes. If None, automatically infers from
297
+ pandas Series DatetimeIndex frequency. Falls back to 30 minutes if not inferrable.
298
+ :type optimization_time_step: int | None, optional
299
+ :return: Array of heating demand values (kWh) per timestep
300
+ :rtype: np.ndarray
115
301
 
116
- def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dict, optim_conf: dict,
117
- plant_conf: dict, set_type: str, logger: logging.Logger
118
- ) -> Tuple[str, dict]:
302
+ """
303
+
304
+ # Convert outdoor temperature forecast to numpy array if pandas Series
305
+ outdoor_temps = (
306
+ outdoor_temperature_forecast.values
307
+ if isinstance(outdoor_temperature_forecast, pd.Series)
308
+ else np.asarray(outdoor_temperature_forecast)
309
+ )
310
+
311
+ # Calculate heating degree days per timestep
312
+ # HDD = max(base_temperature - outdoor_temperature, 0)
313
+ hdd_per_timestep = np.maximum(base_temperature - outdoor_temps, 0.0)
314
+
315
+ # Determine timestep duration in hours
316
+ if optimization_time_step is None:
317
+ # Try to infer from pandas Series DatetimeIndex
318
+ if isinstance(outdoor_temperature_forecast, pd.Series) and isinstance(
319
+ outdoor_temperature_forecast.index, pd.DatetimeIndex
320
+ ):
321
+ if len(outdoor_temperature_forecast.index) > 1:
322
+ freq_minutes = (
323
+ outdoor_temperature_forecast.index[1] - outdoor_temperature_forecast.index[0]
324
+ ).total_seconds() / 60.0
325
+ hours_per_timestep = freq_minutes / 60.0
326
+ else:
327
+ # Single datapoint, fallback to default 30 min
328
+ hours_per_timestep = 0.5
329
+ else:
330
+ # Cannot infer, use default 30 minutes
331
+ hours_per_timestep = 0.5
332
+ else:
333
+ # Convert minutes to hours
334
+ hours_per_timestep = optimization_time_step / 60.0
335
+
336
+ # Scale HDD to timestep duration (standard HDD is per 24 hours)
337
+ hdd_per_timestep_scaled = hdd_per_timestep * (hours_per_timestep / 24.0)
338
+
339
+ return specific_heating_demand * floor_area * (hdd_per_timestep_scaled / annual_reference_hdd)
340
+
341
+
342
+ def calculate_heating_demand_physics(
343
+ u_value: float,
344
+ envelope_area: float,
345
+ ventilation_rate: float,
346
+ heated_volume: float,
347
+ indoor_target_temperature: float,
348
+ outdoor_temperature_forecast: np.ndarray | pd.Series,
349
+ optimization_time_step: int,
350
+ solar_irradiance_forecast: np.ndarray | pd.Series | None = None,
351
+ window_area: float | None = None,
352
+ shgc: float = 0.6,
353
+ ) -> np.ndarray:
354
+ """
355
+ Calculate heating demand per timestep based on building physics heat loss model.
356
+
357
+ More accurate than HDD method as it directly calculates transmission and ventilation
358
+ losses based on building thermal properties. Optionally accounts for solar gains
359
+ through windows to reduce heating demand.
360
+
361
+ :param u_value: Overall thermal transmittance (U-value) in W/(m²·K). Typical values:
362
+ - 0.2-0.3: Well-insulated modern building
363
+ - 0.4-0.6: Average insulation
364
+ - 0.8-1.2: Poor insulation / old building
365
+ :type u_value: float
366
+ :param envelope_area: Total building envelope area (walls + roof + floor + windows) in m²
367
+ :type envelope_area: float
368
+ :param ventilation_rate: Air changes per hour (ACH). Typical values:
369
+ - 0.3-0.5: Well-sealed modern building with controlled ventilation
370
+ - 0.5-1.0: Average building
371
+ - 1.0-2.0: Leaky old building
372
+ :type ventilation_rate: float
373
+ :param heated_volume: Total heated volume in m³
374
+ :type heated_volume: float
375
+ :param indoor_target_temperature: Target indoor temperature in °C
376
+ :type indoor_target_temperature: float
377
+ :param outdoor_temperature_forecast: Outdoor temperature forecast in °C for each timestep
378
+ :type outdoor_temperature_forecast: np.ndarray | pd.Series
379
+ :param optimization_time_step: Optimization time step in minutes
380
+ :type optimization_time_step: int
381
+ :param solar_irradiance_forecast: Global Horizontal Irradiance (GHI) in W/m² for each timestep.
382
+ If provided along with window_area, solar gains will be subtracted from heating demand.
383
+ :type solar_irradiance_forecast: np.ndarray | pd.Series | None, optional
384
+ :param window_area: Total window area in m². If provided along with solar_irradiance_forecast,
385
+ solar gains will reduce heating demand. Typical values: 15-25% of floor area.
386
+ :type window_area: float | None, optional
387
+ :param shgc: Solar Heat Gain Coefficient (dimensionless, 0-1). Fraction of solar radiation
388
+ that becomes heat inside the building. Typical values:
389
+ - 0.5-0.6: Modern low-e double-glazed windows
390
+ - 0.6-0.7: Standard double-glazed windows
391
+ - 0.7-0.8: Single-glazed windows
392
+ Default: 0.6
393
+ :type shgc: float, optional
394
+ :return: Array of heating demand values (kWh) per timestep
395
+ :rtype: np.ndarray
396
+
397
+ Example:
398
+ >>> outdoor_temps = np.array([5, 8, 12, 15])
399
+ >>> ghi = np.array([0, 100, 400, 600]) # W/m²
400
+ >>> demand = calculate_heating_demand_physics(
401
+ ... u_value=0.3,
402
+ ... envelope_area=400,
403
+ ... ventilation_rate=0.5,
404
+ ... heated_volume=250,
405
+ ... indoor_target_temperature=20,
406
+ ... outdoor_temperature_forecast=outdoor_temps,
407
+ ... optimization_time_step=30,
408
+ ... solar_irradiance_forecast=ghi,
409
+ ... window_area=50,
410
+ ... shgc=0.6
411
+ ... )
412
+ """
413
+
414
+ # Convert outdoor temperature forecast to numpy array if pandas Series
415
+ outdoor_temps = (
416
+ outdoor_temperature_forecast.values
417
+ if isinstance(outdoor_temperature_forecast, pd.Series)
418
+ else np.asarray(outdoor_temperature_forecast)
419
+ )
420
+
421
+ # Calculate temperature difference (only heat when outdoor < indoor)
422
+ temp_diff = indoor_target_temperature - outdoor_temps
423
+ temp_diff = np.maximum(temp_diff, 0.0)
424
+
425
+ # Transmission losses: Q_trans = U * A * ΔT (W to kW)
426
+ transmission_loss_kw = u_value * envelope_area * temp_diff / 1000.0
427
+
428
+ # Ventilation losses: Q_vent = V * ρ * c * n * ΔT / 3600
429
+ # ρ = air density (kg/m³), c = specific heat capacity (kJ/(kg·K)), n = ACH
430
+ air_density = 1.2 # kg/m³ at 20°C
431
+ air_heat_capacity = 1.005 # kJ/(kg·K)
432
+ ventilation_loss_kw = (
433
+ ventilation_rate * heated_volume * air_density * air_heat_capacity * temp_diff / 3600.0
434
+ )
435
+
436
+ # Total heat loss in kW
437
+ total_loss_kw = transmission_loss_kw + ventilation_loss_kw
438
+
439
+ # Calculate solar gains if irradiance and window area are provided
440
+ if solar_irradiance_forecast is not None and window_area is not None:
441
+ # Convert solar irradiance to numpy array if pandas Series
442
+ solar_irradiance = (
443
+ solar_irradiance_forecast.values
444
+ if isinstance(solar_irradiance_forecast, pd.Series)
445
+ else np.asarray(solar_irradiance_forecast)
446
+ )
447
+
448
+ # Solar gains: Q_solar = window_area * SHGC * GHI (W to kW)
449
+ # GHI is in W/m², so multiply by window_area (m²) gives W, then divide by 1000 for kW
450
+ solar_gains_kw = window_area * shgc * solar_irradiance / 1000.0
451
+
452
+ # Subtract solar gains from heat loss (but never go negative)
453
+ total_loss_kw = np.maximum(total_loss_kw - solar_gains_kw, 0.0)
454
+
455
+ # Convert to kWh for the timestep
456
+ hours_per_timestep = optimization_time_step / 60.0
457
+ return total_loss_kw * hours_per_timestep
458
+
459
+
460
+ def update_params_with_ha_config(
461
+ params: str,
462
+ ha_config: dict,
463
+ ) -> dict:
464
+ """
465
+ Update the params with the Home Assistant configuration.
466
+
467
+ Parameters
468
+ ----------
469
+ params : str
470
+ The serialized params.
471
+ ha_config : dict
472
+ The Home Assistant configuration.
473
+
474
+ Returns
475
+ -------
476
+ dict
477
+ The updated params.
478
+ """
479
+ # Load serialized params
480
+ params = orjson.loads(params)
481
+ # Update params
482
+ currency_to_symbol = {
483
+ "EUR": "€",
484
+ "USD": "$",
485
+ "GBP": "£",
486
+ "YEN": "¥",
487
+ "JPY": "¥",
488
+ "AUD": "A$",
489
+ "CAD": "C$",
490
+ "CHF": "CHF", # Swiss Franc has no special symbol
491
+ "CNY": "¥",
492
+ "INR": "₹",
493
+ "CZK": "Kč",
494
+ "BGN": "лв",
495
+ "DKK": "kr",
496
+ "HUF": "Ft",
497
+ "PLN": "zł",
498
+ "RON": "Leu",
499
+ "SEK": "kr",
500
+ "TRY": "Lira",
501
+ "VEF": "Bolivar",
502
+ "VND": "Dong",
503
+ "THB": "Baht",
504
+ "SGD": "S$",
505
+ "IDR": "Roepia",
506
+ "ZAR": "Rand",
507
+ # Add more as needed
508
+ }
509
+ if "currency" in ha_config.keys():
510
+ ha_config["currency"] = currency_to_symbol.get(ha_config["currency"], "Unknown")
511
+ else:
512
+ ha_config["currency"] = "€"
513
+
514
+ updated_passed_dict = {
515
+ "custom_cost_fun_id": {
516
+ "unit_of_measurement": ha_config["currency"],
517
+ },
518
+ "custom_unit_load_cost_id": {
519
+ "unit_of_measurement": f"{ha_config['currency']}/kWh",
520
+ },
521
+ "custom_unit_prod_price_id": {
522
+ "unit_of_measurement": f"{ha_config['currency']}/kWh",
523
+ },
524
+ }
525
+ for key, value in updated_passed_dict.items():
526
+ params["passed_data"][key]["unit_of_measurement"] = value["unit_of_measurement"]
527
+ # Serialize the final params
528
+ params = orjson.dumps(params, default=str).decode("utf-8")
529
+ return params
530
+
531
+
532
+ async def treat_runtimeparams(
533
+ runtimeparams: str,
534
+ params: dict[str, dict],
535
+ retrieve_hass_conf: dict[str, str],
536
+ optim_conf: dict[str, str],
537
+ plant_conf: dict[str, str],
538
+ set_type: str,
539
+ logger: logging.Logger,
540
+ emhass_conf: dict[str, pathlib.Path],
541
+ ) -> tuple[str, dict[str, dict]]:
119
542
  """
120
543
  Treat the passed optimization runtime parameters.
121
544
 
122
545
  :param runtimeparams: Json string containing the runtime parameters dict.
123
546
  :type runtimeparams: str
124
- :param params: Configuration parameters passed from data/options.json
547
+ :param params: Built configuration parameters
125
548
  :type params: str
126
- :param retrieve_hass_conf: Container for data retrieving parameters.
549
+ :param retrieve_hass_conf: Config dictionary for data retrieving parameters.
127
550
  :type retrieve_hass_conf: dict
128
- :param optim_conf: Container for optimization parameters.
551
+ :param optim_conf: Config dictionary for optimization parameters.
129
552
  :type optim_conf: dict
130
- :param plant_conf: Container for technical plant parameters.
553
+ :param plant_conf: Config dictionary for technical plant parameters.
131
554
  :type plant_conf: dict
132
555
  :param set_type: The type of action to be performed.
133
556
  :type set_type: str
134
557
  :param logger: The logger object.
135
558
  :type logger: logging.Logger
559
+ :param emhass_conf: Dictionary containing the needed emhass paths
560
+ :type emhass_conf: dict
136
561
  :return: Returning the params and optimization parameter container.
137
562
  :rtype: Tuple[str, dict]
138
563
 
139
564
  """
140
- if (params != None) and (params != "null"):
141
- params = json.loads(params)
565
+ # Check if passed params is a dict
566
+ if (params is not None) and (params != "null"):
567
+ if type(params) is str:
568
+ params = orjson.loads(params)
142
569
  else:
143
570
  params = {}
571
+
572
+ # Merge current config categories to params
573
+ params["retrieve_hass_conf"].update(retrieve_hass_conf)
574
+ params["optim_conf"].update(optim_conf)
575
+ params["plant_conf"].update(plant_conf)
576
+
577
+ # Check defaults on HA retrieved config
578
+ default_currency_unit = "€"
579
+ default_temperature_unit = "°C"
580
+
144
581
  # Some default data needed
145
582
  custom_deferrable_forecast_id = []
146
583
  custom_predicted_temperature_id = []
147
- for k in range(optim_conf["num_def_loads"]):
584
+ custom_heating_demand_id = []
585
+ for k in range(params["optim_conf"]["number_of_deferrable_loads"]):
148
586
  custom_deferrable_forecast_id.append(
149
587
  {
150
- "entity_id": "sensor.p_deferrable{}".format(k),
588
+ "entity_id": f"sensor.p_deferrable{k}",
589
+ "device_class": "power",
151
590
  "unit_of_measurement": "W",
152
- "friendly_name": "Deferrable Load {}".format(k),
591
+ "friendly_name": f"Deferrable Load {k}",
153
592
  }
154
593
  )
155
594
  custom_predicted_temperature_id.append(
156
595
  {
157
- "entity_id": "sensor.temp_predicted{}".format(k),
158
- "unit_of_measurement": "°C",
159
- "friendly_name": "Predicted temperature {}".format(k),
596
+ "entity_id": f"sensor.temp_predicted{k}",
597
+ "device_class": "temperature",
598
+ "unit_of_measurement": default_temperature_unit,
599
+ "friendly_name": f"Predicted temperature {k}",
600
+ }
601
+ )
602
+ custom_heating_demand_id.append(
603
+ {
604
+ "entity_id": f"sensor.heating_demand{k}",
605
+ "device_class": "energy",
606
+ "unit_of_measurement": "kWh",
607
+ "friendly_name": f"Heating demand {k}",
160
608
  }
161
609
  )
162
610
  default_passed_dict = {
163
611
  "custom_pv_forecast_id": {
164
612
  "entity_id": "sensor.p_pv_forecast",
613
+ "device_class": "power",
165
614
  "unit_of_measurement": "W",
166
615
  "friendly_name": "PV Power Forecast",
167
616
  },
168
617
  "custom_load_forecast_id": {
169
618
  "entity_id": "sensor.p_load_forecast",
619
+ "device_class": "power",
170
620
  "unit_of_measurement": "W",
171
621
  "friendly_name": "Load Power Forecast",
172
622
  },
173
623
  "custom_pv_curtailment_id": {
174
624
  "entity_id": "sensor.p_pv_curtailment",
625
+ "device_class": "power",
175
626
  "unit_of_measurement": "W",
176
627
  "friendly_name": "PV Power Curtailment",
177
628
  },
178
629
  "custom_hybrid_inverter_id": {
179
630
  "entity_id": "sensor.p_hybrid_inverter",
631
+ "device_class": "power",
180
632
  "unit_of_measurement": "W",
181
633
  "friendly_name": "PV Hybrid Inverter",
182
634
  },
183
635
  "custom_batt_forecast_id": {
184
636
  "entity_id": "sensor.p_batt_forecast",
637
+ "device_class": "power",
185
638
  "unit_of_measurement": "W",
186
639
  "friendly_name": "Battery Power Forecast",
187
640
  },
188
641
  "custom_batt_soc_forecast_id": {
189
642
  "entity_id": "sensor.soc_batt_forecast",
643
+ "device_class": "battery",
190
644
  "unit_of_measurement": "%",
191
645
  "friendly_name": "Battery SOC Forecast",
192
646
  },
193
647
  "custom_grid_forecast_id": {
194
648
  "entity_id": "sensor.p_grid_forecast",
649
+ "device_class": "power",
195
650
  "unit_of_measurement": "W",
196
651
  "friendly_name": "Grid Power Forecast",
197
652
  },
198
653
  "custom_cost_fun_id": {
199
654
  "entity_id": "sensor.total_cost_fun_value",
200
- "unit_of_measurement": "",
655
+ "device_class": "monetary",
656
+ "unit_of_measurement": default_currency_unit,
201
657
  "friendly_name": "Total cost function value",
202
658
  },
203
659
  "custom_optim_status_id": {
204
660
  "entity_id": "sensor.optim_status",
661
+ "device_class": "",
205
662
  "unit_of_measurement": "",
206
663
  "friendly_name": "EMHASS optimization status",
207
664
  },
208
665
  "custom_unit_load_cost_id": {
209
666
  "entity_id": "sensor.unit_load_cost",
210
- "unit_of_measurement": "€/kWh",
667
+ "device_class": "monetary",
668
+ "unit_of_measurement": f"{default_currency_unit}/kWh",
211
669
  "friendly_name": "Unit Load Cost",
212
670
  },
213
671
  "custom_unit_prod_price_id": {
214
672
  "entity_id": "sensor.unit_prod_price",
215
- "unit_of_measurement": "€/kWh",
673
+ "device_class": "monetary",
674
+ "unit_of_measurement": f"{default_currency_unit}/kWh",
216
675
  "friendly_name": "Unit Prod Price",
217
676
  },
218
677
  "custom_deferrable_forecast_id": custom_deferrable_forecast_id,
219
678
  "custom_predicted_temperature_id": custom_predicted_temperature_id,
679
+ "custom_heating_demand_id": custom_heating_demand_id,
220
680
  "publish_prefix": "",
221
681
  }
222
682
  if "passed_data" in params.keys():
@@ -224,12 +684,94 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
224
684
  params["passed_data"][key] = value
225
685
  else:
226
686
  params["passed_data"] = default_passed_dict
687
+
688
+ # If any runtime parameters where passed in action call
227
689
  if runtimeparams is not None:
228
- runtimeparams = json.loads(runtimeparams)
229
- freq = int(retrieve_hass_conf["freq"].seconds / 60.0)
230
- delta_forecast = int(optim_conf["delta_forecast"].days)
231
- forecast_dates = get_forecast_dates(freq, delta_forecast)
232
- if set_type == "regressor-model-fit":
690
+ if type(runtimeparams) is str:
691
+ runtimeparams = orjson.loads(runtimeparams)
692
+
693
+ # Loop though parameters stored in association file, Check to see if any stored in runtime
694
+ # If true, set runtime parameter to params
695
+ if emhass_conf["associations_path"].exists():
696
+ async with aiofiles.open(emhass_conf["associations_path"]) as data:
697
+ content = await data.read()
698
+ associations = list(csv.reader(content.splitlines(), delimiter=","))
699
+ # Association file key reference
700
+ # association[0] = config categories
701
+ # association[1] = legacy parameter name
702
+ # association[2] = parameter (config.json/config_defaults.json)
703
+ # association[3] = parameter list name if exists (not used, from legacy options.json)
704
+ for association in associations:
705
+ # Check parameter name exists in runtime
706
+ if runtimeparams.get(association[2], None) is not None:
707
+ params[association[0]][association[2]] = runtimeparams[association[2]]
708
+ # Check Legacy parameter name runtime
709
+ elif runtimeparams.get(association[1], None) is not None:
710
+ params[association[0]][association[2]] = runtimeparams[association[1]]
711
+ else:
712
+ logger.warning(
713
+ "Cant find associations file (associations.csv) in: "
714
+ + str(emhass_conf["associations_path"])
715
+ )
716
+
717
+ # Generate forecast_dates
718
+ # Force update optimization_time_step if present in runtimeparams
719
+ if "optimization_time_step" in runtimeparams:
720
+ optimization_time_step = int(runtimeparams["optimization_time_step"])
721
+ params["retrieve_hass_conf"]["optimization_time_step"] = pd.to_timedelta(
722
+ optimization_time_step, "minutes"
723
+ )
724
+ elif "freq" in runtimeparams:
725
+ optimization_time_step = int(runtimeparams["freq"])
726
+ params["retrieve_hass_conf"]["optimization_time_step"] = pd.to_timedelta(
727
+ optimization_time_step, "minutes"
728
+ )
729
+ else:
730
+ optimization_time_step = int(
731
+ params["retrieve_hass_conf"]["optimization_time_step"].seconds / 60.0
732
+ )
733
+
734
+ if (
735
+ runtimeparams.get("delta_forecast_daily", None) is not None
736
+ or runtimeparams.get("delta_forecast", None) is not None
737
+ ):
738
+ # Use old param name delta_forecast (if provided) for backwards compatibility
739
+ delta_forecast = runtimeparams.get("delta_forecast", None)
740
+ # Prefer new param name delta_forecast_daily
741
+ delta_forecast = runtimeparams.get("delta_forecast_daily", delta_forecast)
742
+ # Ensure delta_forecast is numeric and at least 1 day
743
+ if delta_forecast is None:
744
+ logger.warning("delta_forecast_daily is missing so defaulting to 1 day")
745
+ delta_forecast = 1
746
+ else:
747
+ try:
748
+ delta_forecast = int(delta_forecast)
749
+ except ValueError:
750
+ logger.warning(
751
+ "Invalid delta_forecast_daily value (%s) so defaulting to 1 day",
752
+ delta_forecast,
753
+ )
754
+ delta_forecast = 1
755
+ if delta_forecast <= 0:
756
+ logger.warning(
757
+ "delta_forecast_daily is too low (%s) so defaulting to 1 day",
758
+ delta_forecast,
759
+ )
760
+ delta_forecast = 1
761
+ params["optim_conf"]["delta_forecast_daily"] = pd.Timedelta(days=delta_forecast)
762
+ else:
763
+ delta_forecast = int(params["optim_conf"]["delta_forecast_daily"].days)
764
+ if runtimeparams.get("time_zone", None) is not None:
765
+ time_zone = pytz.timezone(params["retrieve_hass_conf"]["time_zone"])
766
+ params["retrieve_hass_conf"]["time_zone"] = time_zone
767
+ else:
768
+ time_zone = params["retrieve_hass_conf"]["time_zone"]
769
+
770
+ forecast_dates = get_forecast_dates(optimization_time_step, delta_forecast, time_zone)
771
+
772
+ # Add runtime exclusive (not in config) parameters to params
773
+ # regressor-model-fit
774
+ if set_type == "regressor-model-fit":
233
775
  if "csv_file" in runtimeparams:
234
776
  csv_file = runtimeparams["csv_file"]
235
777
  params["passed_data"]["csv_file"] = csv_file
@@ -249,6 +791,8 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
249
791
  else:
250
792
  date_features = runtimeparams["date_features"]
251
793
  params["passed_data"]["date_features"] = date_features
794
+
795
+ # regressor-model-predict
252
796
  if set_type == "regressor-model-predict":
253
797
  if "new_values" in runtimeparams:
254
798
  new_values = runtimeparams["new_values"]
@@ -262,7 +806,27 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
262
806
  if "target" in runtimeparams:
263
807
  target = runtimeparams["target"]
264
808
  params["passed_data"]["target"] = target
265
- # Treating special data passed for MPC control case
809
+
810
+ # export-influxdb-to-csv
811
+ if set_type == "export-influxdb-to-csv":
812
+ # Use dictionary comprehension to simplify parameter assignment
813
+ export_keys = {
814
+ k: runtimeparams[k]
815
+ for k in (
816
+ "sensor_list",
817
+ "csv_filename",
818
+ "start_time",
819
+ "end_time",
820
+ "resample_freq",
821
+ "timestamp_col_name",
822
+ "decimal_places",
823
+ "handle_nan",
824
+ )
825
+ if k in runtimeparams
826
+ }
827
+ params["passed_data"].update(export_keys)
828
+
829
+ # MPC control case
266
830
  if set_type == "naive-mpc-optim":
267
831
  if "prediction_horizon" not in runtimeparams.keys():
268
832
  prediction_horizon = 10 # 10 time steps by default
@@ -270,124 +834,255 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
270
834
  prediction_horizon = runtimeparams["prediction_horizon"]
271
835
  params["passed_data"]["prediction_horizon"] = prediction_horizon
272
836
  if "soc_init" not in runtimeparams.keys():
273
- soc_init = plant_conf["SOCtarget"]
837
+ soc_init = params["plant_conf"]["battery_target_state_of_charge"]
274
838
  else:
275
839
  soc_init = runtimeparams["soc_init"]
840
+ if soc_init < params["plant_conf"]["battery_minimum_state_of_charge"]:
841
+ logger.warning(
842
+ f"Passed soc_init={soc_init} is lower than soc_min={params['plant_conf']['battery_minimum_state_of_charge']}, setting soc_init=soc_min"
843
+ )
844
+ soc_init = params["plant_conf"]["battery_minimum_state_of_charge"]
845
+ if soc_init > params["plant_conf"]["battery_maximum_state_of_charge"]:
846
+ logger.warning(
847
+ f"Passed soc_init={soc_init} is greater than soc_max={params['plant_conf']['battery_maximum_state_of_charge']}, setting soc_init=soc_max"
848
+ )
849
+ soc_init = params["plant_conf"]["battery_maximum_state_of_charge"]
276
850
  params["passed_data"]["soc_init"] = soc_init
277
851
  if "soc_final" not in runtimeparams.keys():
278
- soc_final = plant_conf["SOCtarget"]
852
+ soc_final = params["plant_conf"]["battery_target_state_of_charge"]
279
853
  else:
280
854
  soc_final = runtimeparams["soc_final"]
855
+ if soc_final < params["plant_conf"]["battery_minimum_state_of_charge"]:
856
+ logger.warning(
857
+ f"Passed soc_final={soc_final} is lower than soc_min={params['plant_conf']['battery_minimum_state_of_charge']}, setting soc_final=soc_min"
858
+ )
859
+ soc_final = params["plant_conf"]["battery_minimum_state_of_charge"]
860
+ if soc_final > params["plant_conf"]["battery_maximum_state_of_charge"]:
861
+ logger.warning(
862
+ f"Passed soc_final={soc_final} is greater than soc_max={params['plant_conf']['battery_maximum_state_of_charge']}, setting soc_final=soc_max"
863
+ )
864
+ soc_final = params["plant_conf"]["battery_maximum_state_of_charge"]
281
865
  params["passed_data"]["soc_final"] = soc_final
282
- if "def_total_hours" not in runtimeparams.keys():
283
- def_total_hours = optim_conf["def_total_hours"]
284
- else:
285
- def_total_hours = runtimeparams["def_total_hours"]
286
- params["passed_data"]["def_total_hours"] = def_total_hours
287
- if "def_start_timestep" not in runtimeparams.keys():
288
- def_start_timestep = optim_conf["def_start_timestep"]
289
- else:
290
- def_start_timestep = runtimeparams["def_start_timestep"]
291
- params["passed_data"]["def_start_timestep"] = def_start_timestep
292
- if "def_end_timestep" not in runtimeparams.keys():
293
- def_end_timestep = optim_conf["def_end_timestep"]
294
- else:
295
- def_end_timestep = runtimeparams["def_end_timestep"]
296
- params["passed_data"]["def_end_timestep"] = def_end_timestep
866
+ if "operating_timesteps_of_each_deferrable_load" in runtimeparams.keys():
867
+ params["passed_data"]["operating_timesteps_of_each_deferrable_load"] = (
868
+ runtimeparams["operating_timesteps_of_each_deferrable_load"]
869
+ )
870
+ params["optim_conf"]["operating_timesteps_of_each_deferrable_load"] = runtimeparams[
871
+ "operating_timesteps_of_each_deferrable_load"
872
+ ]
873
+ if "operating_hours_of_each_deferrable_load" in params["optim_conf"].keys():
874
+ params["passed_data"]["operating_hours_of_each_deferrable_load"] = params[
875
+ "optim_conf"
876
+ ]["operating_hours_of_each_deferrable_load"]
877
+ params["passed_data"]["start_timesteps_of_each_deferrable_load"] = params[
878
+ "optim_conf"
879
+ ].get("start_timesteps_of_each_deferrable_load", None)
880
+ params["passed_data"]["end_timesteps_of_each_deferrable_load"] = params[
881
+ "optim_conf"
882
+ ].get("end_timesteps_of_each_deferrable_load", None)
883
+
297
884
  forecast_dates = copy.deepcopy(forecast_dates)[0:prediction_horizon]
298
885
  else:
299
886
  params["passed_data"]["prediction_horizon"] = None
300
887
  params["passed_data"]["soc_init"] = None
301
888
  params["passed_data"]["soc_final"] = None
302
- params["passed_data"]["def_total_hours"] = None
303
- params["passed_data"]["def_start_timestep"] = None
304
- params["passed_data"]["def_end_timestep"] = None
889
+
890
+ # Parsing the thermal model parameters
891
+ # Load the default config
892
+ if "def_load_config" in runtimeparams:
893
+ params["optim_conf"]["def_load_config"] = runtimeparams["def_load_config"]
894
+ if "def_load_config" in params["optim_conf"]:
895
+ for k in range(len(params["optim_conf"]["def_load_config"])):
896
+ if "thermal_config" in params["optim_conf"]["def_load_config"][k]:
897
+ if (
898
+ "heater_desired_temperatures" in runtimeparams
899
+ and len(runtimeparams["heater_desired_temperatures"]) > k
900
+ ):
901
+ params["optim_conf"]["def_load_config"][k]["thermal_config"][
902
+ "desired_temperatures"
903
+ ] = runtimeparams["heater_desired_temperatures"][k]
904
+ if (
905
+ "heater_start_temperatures" in runtimeparams
906
+ and len(runtimeparams["heater_start_temperatures"]) > k
907
+ ):
908
+ params["optim_conf"]["def_load_config"][k]["thermal_config"][
909
+ "start_temperature"
910
+ ] = runtimeparams["heater_start_temperatures"][k]
911
+
305
912
  # Treat passed forecast data lists
306
- list_forecast_key = ['pv_power_forecast', 'load_power_forecast', 'load_cost_forecast', 'prod_price_forecast', 'outdoor_temperature_forecast']
307
- forecast_methods = ['weather_forecast_method', 'load_forecast_method', 'load_cost_forecast_method', 'prod_price_forecast_method', 'outdoor_temperature_forecast_method']
308
- # Param to save forecast cache (i.e. Solcast)
309
- if "weather_forecast_cache" not in runtimeparams.keys():
310
- weather_forecast_cache = False
311
- else:
312
- weather_forecast_cache = runtimeparams["weather_forecast_cache"]
313
- params["passed_data"]["weather_forecast_cache"] = weather_forecast_cache
314
- # Param to make sure optimization only uses cached data. (else produce error)
315
- if "weather_forecast_cache_only" not in runtimeparams.keys():
316
- weather_forecast_cache_only = False
317
- else:
318
- weather_forecast_cache_only = runtimeparams["weather_forecast_cache_only"]
319
- params["passed_data"]["weather_forecast_cache_only"] = weather_forecast_cache_only
913
+ list_forecast_key = [
914
+ "pv_power_forecast",
915
+ "load_power_forecast",
916
+ "load_cost_forecast",
917
+ "prod_price_forecast",
918
+ "outdoor_temperature_forecast",
919
+ ]
920
+ forecast_methods = [
921
+ "weather_forecast_method",
922
+ "load_forecast_method",
923
+ "load_cost_forecast_method",
924
+ "production_price_forecast_method",
925
+ "outdoor_temperature_forecast_method",
926
+ ]
927
+
928
+ # Loop forecasts, check if value is a list and greater than or equal to forecast_dates
320
929
  for method, forecast_key in enumerate(list_forecast_key):
321
930
  if forecast_key in runtimeparams.keys():
322
- if type(runtimeparams[forecast_key]) == list and len(runtimeparams[forecast_key]) >= len(forecast_dates):
323
- params['passed_data'][forecast_key] = runtimeparams[forecast_key]
324
- optim_conf[forecast_methods[method]] = 'list'
931
+ forecast_input = runtimeparams[forecast_key]
932
+ if isinstance(forecast_input, dict):
933
+ forecast_data_df = pd.DataFrame.from_dict(
934
+ forecast_input, orient="index"
935
+ ).reset_index()
936
+ forecast_data_df.columns = ["time", "value"]
937
+ forecast_data_df["time"] = pd.to_datetime(
938
+ forecast_data_df["time"], format="ISO8601", utc=True
939
+ ).dt.tz_convert(time_zone)
940
+
941
+ # align index with forecast_dates
942
+ forecast_data_df = (
943
+ forecast_data_df.resample(
944
+ pd.to_timedelta(optimization_time_step, "minutes"),
945
+ on="time",
946
+ )
947
+ .aggregate({"value": "mean"})
948
+ .reindex(forecast_dates, method="nearest")
949
+ )
950
+ forecast_data_df["value"] = forecast_data_df["value"].ffill().bfill()
951
+ forecast_input = forecast_data_df["value"].tolist()
952
+ if isinstance(forecast_input, list) and len(forecast_input) >= len(forecast_dates):
953
+ params["passed_data"][forecast_key] = forecast_input
954
+ params["optim_conf"][forecast_methods[method]] = "list"
325
955
  else:
326
- logger.error(f"ERROR: The passed data is either not a list or the length is not correct, length should be {str(len(forecast_dates))}")
327
- logger.error(f"Passed type is {str(type(runtimeparams[forecast_key]))} and length is {str(len(runtimeparams[forecast_key]))}")
328
- list_non_digits = [x for x in runtimeparams[forecast_key] if not (isinstance(x, int) or isinstance(x, float))]
956
+ logger.error(
957
+ f"ERROR: The passed data is either the wrong type or the length is not correct, length should be {str(len(forecast_dates))}"
958
+ )
959
+ logger.error(
960
+ f"Passed type is {str(type(runtimeparams[forecast_key]))} and length is {str(len(runtimeparams[forecast_key]))}"
961
+ )
962
+ # Check if string contains list, if so extract
963
+ if isinstance(forecast_input, str) and isinstance(
964
+ ast.literal_eval(forecast_input), list
965
+ ):
966
+ forecast_input = ast.literal_eval(forecast_input)
967
+ runtimeparams[forecast_key] = forecast_input
968
+ list_non_digits = [
969
+ x for x in forecast_input if not (isinstance(x, int) or isinstance(x, float))
970
+ ]
329
971
  if len(list_non_digits) > 0:
330
- logger.warning(f"There are non numeric values on the passed data for {forecast_key}, check for missing values (nans, null, etc)")
972
+ logger.warning(
973
+ f"There are non numeric values on the passed data for {forecast_key}, check for missing values (nans, null, etc)"
974
+ )
331
975
  for x in list_non_digits:
332
- logger.warning(f"This value in {forecast_key} was detected as non digits: {str(x)}")
976
+ logger.warning(
977
+ f"This value in {forecast_key} was detected as non digits: {str(x)}"
978
+ )
333
979
  else:
334
- params['passed_data'][forecast_key] = None
980
+ params["passed_data"][forecast_key] = None
981
+
982
+ # Explicitly handle historic_days_to_retrieve from runtimeparams BEFORE validation
983
+ if "historic_days_to_retrieve" in runtimeparams:
984
+ params["retrieve_hass_conf"]["historic_days_to_retrieve"] = int(
985
+ runtimeparams["historic_days_to_retrieve"]
986
+ )
987
+
335
988
  # Treat passed data for forecast model fit/predict/tune at runtime
336
- if "days_to_retrieve" not in runtimeparams.keys():
337
- days_to_retrieve = 9
338
- else:
339
- days_to_retrieve = runtimeparams["days_to_retrieve"]
340
- params["passed_data"]["days_to_retrieve"] = days_to_retrieve
341
- if "model_type" not in runtimeparams.keys():
342
- model_type = "load_forecast"
343
- else:
344
- model_type = runtimeparams["model_type"]
345
- params["passed_data"]["model_type"] = model_type
346
- if "var_model" not in runtimeparams.keys():
347
- var_model = "sensor.power_load_no_var_loads"
348
- else:
349
- var_model = runtimeparams["var_model"]
350
- params["passed_data"]["var_model"] = var_model
351
- if "sklearn_model" not in runtimeparams.keys():
352
- sklearn_model = "KNeighborsRegressor"
353
- else:
354
- sklearn_model = runtimeparams["sklearn_model"]
355
- params["passed_data"]["sklearn_model"] = sklearn_model
356
- if "regression_model" not in runtimeparams.keys():
357
- regression_model = "AdaBoostRegression"
358
- else:
359
- regression_model = runtimeparams["regression_model"]
360
- params["passed_data"]["regression_model"] = regression_model
361
- if "num_lags" not in runtimeparams.keys():
362
- num_lags = 48
363
- else:
364
- num_lags = runtimeparams["num_lags"]
365
- params["passed_data"]["num_lags"] = num_lags
366
- if "split_date_delta" not in runtimeparams.keys():
367
- split_date_delta = "48h"
368
- else:
369
- split_date_delta = runtimeparams["split_date_delta"]
370
- params["passed_data"]["split_date_delta"] = split_date_delta
371
- if "perform_backtest" not in runtimeparams.keys():
372
- perform_backtest = False
989
+ if (
990
+ params["passed_data"].get("historic_days_to_retrieve", None) is not None
991
+ and params["passed_data"]["historic_days_to_retrieve"] < 9
992
+ ):
993
+ logger.warning(
994
+ "warning `days_to_retrieve` is set to a value less than 9, this could cause an error with the fit"
995
+ )
996
+ logger.warning("setting`passed_data:days_to_retrieve` to 9 for fit/predict/tune")
997
+ params["passed_data"]["historic_days_to_retrieve"] = 9
373
998
  else:
374
- perform_backtest = ast.literal_eval(str(runtimeparams["perform_backtest"]).capitalize())
375
- params["passed_data"]["perform_backtest"] = perform_backtest
999
+ if params["retrieve_hass_conf"].get("historic_days_to_retrieve", 0) < 9:
1000
+ logger.debug("setting`passed_data:days_to_retrieve` to 9 for fit/predict/tune")
1001
+ params["passed_data"]["historic_days_to_retrieve"] = 9
1002
+ else:
1003
+ params["passed_data"]["historic_days_to_retrieve"] = params["retrieve_hass_conf"][
1004
+ "historic_days_to_retrieve"
1005
+ ]
1006
+
1007
+ # UPDATED ML PARAMETER HANDLING
1008
+ # Define Helper Functions
1009
+ def _cast_bool(value):
1010
+ """Helper to cast string inputs to boolean safely."""
1011
+ try:
1012
+ return ast.literal_eval(str(value).capitalize())
1013
+ except (ValueError, SyntaxError):
1014
+ return False
1015
+
1016
+ def _get_ml_param(name, params, runtimeparams, default=None, cast=None):
1017
+ """
1018
+ Prioritize Runtime Params -> Config Params (optim_conf) -> Default.
1019
+ """
1020
+ if name in runtimeparams:
1021
+ value = runtimeparams[name]
1022
+ else:
1023
+ value = params["optim_conf"].get(name, default)
1024
+
1025
+ if cast is not None and value is not None:
1026
+ try:
1027
+ value = cast(value)
1028
+ except Exception:
1029
+ pass
1030
+ return value
1031
+
1032
+ # Compute dynamic defaults
1033
+ # Default for var_model falls back to the configured load sensor
1034
+ default_var_model = params["retrieve_hass_conf"].get(
1035
+ "sensor_power_load_no_var_loads", "sensor.power_load_no_var_loads"
1036
+ )
1037
+
1038
+ # Define Configuration Table
1039
+ # Format: (parameter_name, default_value, cast_function)
1040
+ ml_param_defs = [
1041
+ ("model_type", "long_train_data", None),
1042
+ ("var_model", default_var_model, None),
1043
+ ("sklearn_model", "KNeighborsRegressor", None),
1044
+ ("regression_model", "AdaBoostRegression", None),
1045
+ ("num_lags", 48, None),
1046
+ ("split_date_delta", "48h", None),
1047
+ ("n_trials", 10, int),
1048
+ ("perform_backtest", False, _cast_bool),
1049
+ ]
1050
+
1051
+ # Apply Configuration
1052
+ for name, default, caster in ml_param_defs:
1053
+ params["passed_data"][name] = _get_ml_param(
1054
+ name=name,
1055
+ params=params,
1056
+ runtimeparams=runtimeparams,
1057
+ default=default,
1058
+ cast=caster,
1059
+ )
1060
+
1061
+ # Other non-dynamic options
376
1062
  if "model_predict_publish" not in runtimeparams.keys():
377
1063
  model_predict_publish = False
378
1064
  else:
379
- model_predict_publish = ast.literal_eval(str(runtimeparams["model_predict_publish"]).capitalize())
1065
+ model_predict_publish = ast.literal_eval(
1066
+ str(runtimeparams["model_predict_publish"]).capitalize()
1067
+ )
380
1068
  params["passed_data"]["model_predict_publish"] = model_predict_publish
381
1069
  if "model_predict_entity_id" not in runtimeparams.keys():
382
1070
  model_predict_entity_id = "sensor.p_load_forecast_custom_model"
383
1071
  else:
384
1072
  model_predict_entity_id = runtimeparams["model_predict_entity_id"]
385
1073
  params["passed_data"]["model_predict_entity_id"] = model_predict_entity_id
1074
+ if "model_predict_device_class" not in runtimeparams.keys():
1075
+ model_predict_device_class = "power"
1076
+ else:
1077
+ model_predict_device_class = runtimeparams["model_predict_device_class"]
1078
+ params["passed_data"]["model_predict_device_class"] = model_predict_device_class
386
1079
  if "model_predict_unit_of_measurement" not in runtimeparams.keys():
387
1080
  model_predict_unit_of_measurement = "W"
388
1081
  else:
389
1082
  model_predict_unit_of_measurement = runtimeparams["model_predict_unit_of_measurement"]
390
- params["passed_data"]["model_predict_unit_of_measurement"] = model_predict_unit_of_measurement
1083
+ params["passed_data"]["model_predict_unit_of_measurement"] = (
1084
+ model_predict_unit_of_measurement
1085
+ )
391
1086
  if "model_predict_friendly_name" not in runtimeparams.keys():
392
1087
  model_predict_friendly_name = "Load Power Forecast custom ML model"
393
1088
  else:
@@ -398,6 +1093,11 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
398
1093
  else:
399
1094
  mlr_predict_entity_id = runtimeparams["mlr_predict_entity_id"]
400
1095
  params["passed_data"]["mlr_predict_entity_id"] = mlr_predict_entity_id
1096
+ if "mlr_predict_device_class" not in runtimeparams.keys():
1097
+ mlr_predict_device_class = "power"
1098
+ else:
1099
+ mlr_predict_device_class = runtimeparams["mlr_predict_device_class"]
1100
+ params["passed_data"]["mlr_predict_device_class"] = mlr_predict_device_class
401
1101
  if "mlr_predict_unit_of_measurement" not in runtimeparams.keys():
402
1102
  mlr_predict_unit_of_measurement = None
403
1103
  else:
@@ -408,6 +1108,7 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
408
1108
  else:
409
1109
  mlr_predict_friendly_name = runtimeparams["mlr_predict_friendly_name"]
410
1110
  params["passed_data"]["mlr_predict_friendly_name"] = mlr_predict_friendly_name
1111
+
411
1112
  # Treat passed data for other parameters
412
1113
  if "alpha" not in runtimeparams.keys():
413
1114
  alpha = 0.5
@@ -419,73 +1120,52 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
419
1120
  else:
420
1121
  beta = runtimeparams["beta"]
421
1122
  params["passed_data"]["beta"] = beta
422
- # Treat optimization configuration parameters passed at runtime
423
- if "num_def_loads" in runtimeparams.keys():
424
- optim_conf["num_def_loads"] = runtimeparams["num_def_loads"]
425
- if "P_deferrable_nom" in runtimeparams.keys():
426
- optim_conf["P_deferrable_nom"] = runtimeparams["P_deferrable_nom"]
427
- if "def_total_hours" in runtimeparams.keys():
428
- optim_conf["def_total_hours"] = runtimeparams["def_total_hours"]
429
- if "def_start_timestep" in runtimeparams.keys():
430
- optim_conf["def_start_timestep"] = runtimeparams["def_start_timestep"]
431
- if "def_end_timestep" in runtimeparams.keys():
432
- optim_conf["def_end_timestep"] = runtimeparams["def_end_timestep"]
1123
+
1124
+ # Param to save forecast cache (i.e. Solcast)
1125
+ if "weather_forecast_cache" not in runtimeparams.keys():
1126
+ weather_forecast_cache = False
1127
+ else:
1128
+ weather_forecast_cache = runtimeparams["weather_forecast_cache"]
1129
+ params["passed_data"]["weather_forecast_cache"] = weather_forecast_cache
1130
+
1131
+ # Param to make sure optimization only uses cached data. (else produce error)
1132
+ if "weather_forecast_cache_only" not in runtimeparams.keys():
1133
+ weather_forecast_cache_only = False
1134
+ else:
1135
+ weather_forecast_cache_only = runtimeparams["weather_forecast_cache_only"]
1136
+ params["passed_data"]["weather_forecast_cache_only"] = weather_forecast_cache_only
1137
+
1138
+ # A condition to manually save entity data under data_path/entities after optimization
1139
+ if "entity_save" not in runtimeparams.keys():
1140
+ entity_save = ""
1141
+ else:
1142
+ entity_save = runtimeparams["entity_save"]
1143
+ params["passed_data"]["entity_save"] = entity_save
1144
+
1145
+ # A condition to put a prefix on all published data, or check for saved data under prefix name
1146
+ if "publish_prefix" not in runtimeparams.keys():
1147
+ publish_prefix = ""
1148
+ else:
1149
+ publish_prefix = runtimeparams["publish_prefix"]
1150
+ params["passed_data"]["publish_prefix"] = publish_prefix
1151
+
1152
+ # Treat optimization (optim_conf) configuration parameters passed at runtime
433
1153
  if "def_current_state" in runtimeparams.keys():
434
- optim_conf["def_current_state"] = [bool(s) for s in runtimeparams["def_current_state"]]
435
- if "treat_def_as_semi_cont" in runtimeparams.keys():
436
- optim_conf["treat_def_as_semi_cont"] = [
437
- ast.literal_eval(str(k).capitalize())
438
- for k in runtimeparams["treat_def_as_semi_cont"]
1154
+ params["optim_conf"]["def_current_state"] = [
1155
+ bool(s) for s in runtimeparams["def_current_state"]
439
1156
  ]
440
- if "set_def_constant" in runtimeparams.keys():
441
- optim_conf["set_def_constant"] = [
442
- ast.literal_eval(str(k).capitalize()) for k in runtimeparams["set_def_constant"]
443
- ]
444
- if "def_start_penalty" in runtimeparams.keys():
445
- optim_conf["def_start_penalty"] = [
446
- ast.literal_eval(str(k).capitalize()) for k in runtimeparams["def_start_penalty"]
447
- ]
448
- if 'def_load_config' in runtimeparams:
449
- optim_conf["def_load_config"] = runtimeparams['def_load_config']
1157
+
1158
+ # Treat retrieve data from Home Assistant (retrieve_hass_conf) configuration parameters passed at runtime
1159
+ # Secrets passed at runtime
450
1160
  if "solcast_api_key" in runtimeparams.keys():
451
- retrieve_hass_conf["solcast_api_key"] = runtimeparams["solcast_api_key"]
452
- optim_conf["weather_forecast_method"] = "solcast"
1161
+ params["retrieve_hass_conf"]["solcast_api_key"] = runtimeparams["solcast_api_key"]
453
1162
  if "solcast_rooftop_id" in runtimeparams.keys():
454
- retrieve_hass_conf["solcast_rooftop_id"] = runtimeparams[
455
- "solcast_rooftop_id"
456
- ]
457
- optim_conf["weather_forecast_method"] = "solcast"
1163
+ params["retrieve_hass_conf"]["solcast_rooftop_id"] = runtimeparams["solcast_rooftop_id"]
458
1164
  if "solar_forecast_kwp" in runtimeparams.keys():
459
- retrieve_hass_conf["solar_forecast_kwp"] = runtimeparams[
460
- "solar_forecast_kwp"
461
- ]
462
- optim_conf["weather_forecast_method"] = "solar.forecast"
463
- if "weight_battery_discharge" in runtimeparams.keys():
464
- optim_conf["weight_battery_discharge"] = runtimeparams[
465
- "weight_battery_discharge"
466
- ]
467
- if "weight_battery_charge" in runtimeparams.keys():
468
- optim_conf["weight_battery_charge"] = runtimeparams["weight_battery_charge"]
469
- if 'freq' in runtimeparams.keys():
470
- retrieve_hass_conf['freq'] = pd.to_timedelta(runtimeparams['freq'], "minutes")
471
- if 'continual_publish' in runtimeparams.keys():
472
- retrieve_hass_conf['continual_publish'] = bool(runtimeparams['continual_publish'])
473
- # Treat plant configuration parameters passed at runtime
474
- if "SOCmin" in runtimeparams.keys():
475
- plant_conf["SOCmin"] = runtimeparams["SOCmin"]
476
- if "SOCmax" in runtimeparams.keys():
477
- plant_conf["SOCmax"] = runtimeparams["SOCmax"]
478
- if "SOCtarget" in runtimeparams.keys():
479
- plant_conf["SOCtarget"] = runtimeparams["SOCtarget"]
480
- if "Pd_max" in runtimeparams.keys():
481
- plant_conf["Pd_max"] = runtimeparams["Pd_max"]
482
- if "Pc_max" in runtimeparams.keys():
483
- plant_conf["Pc_max"] = runtimeparams["Pc_max"]
1165
+ params["retrieve_hass_conf"]["solar_forecast_kwp"] = runtimeparams["solar_forecast_kwp"]
484
1166
  # Treat custom entities id's and friendly names for variables
485
1167
  if "custom_pv_forecast_id" in runtimeparams.keys():
486
- params["passed_data"]["custom_pv_forecast_id"] = runtimeparams[
487
- "custom_pv_forecast_id"
488
- ]
1168
+ params["passed_data"]["custom_pv_forecast_id"] = runtimeparams["custom_pv_forecast_id"]
489
1169
  if "custom_load_forecast_id" in runtimeparams.keys():
490
1170
  params["passed_data"]["custom_load_forecast_id"] = runtimeparams[
491
1171
  "custom_load_forecast_id"
@@ -511,9 +1191,7 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
511
1191
  "custom_grid_forecast_id"
512
1192
  ]
513
1193
  if "custom_cost_fun_id" in runtimeparams.keys():
514
- params["passed_data"]["custom_cost_fun_id"] = runtimeparams[
515
- "custom_cost_fun_id"
516
- ]
1194
+ params["passed_data"]["custom_cost_fun_id"] = runtimeparams["custom_cost_fun_id"]
517
1195
  if "custom_optim_status_id" in runtimeparams.keys():
518
1196
  params["passed_data"]["custom_optim_status_id"] = runtimeparams[
519
1197
  "custom_optim_status_id"
@@ -534,89 +1212,61 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
534
1212
  params["passed_data"]["custom_predicted_temperature_id"] = runtimeparams[
535
1213
  "custom_predicted_temperature_id"
536
1214
  ]
537
- # A condition to put a prefix on all published data, or check for saved data under prefix name
538
- if "publish_prefix" not in runtimeparams.keys():
539
- publish_prefix = ""
540
- else:
541
- publish_prefix = runtimeparams["publish_prefix"]
542
- params["passed_data"]["publish_prefix"] = publish_prefix
543
- # A condition to manually save entity data under data_path/entities after optimization
544
- if "entity_save" not in runtimeparams.keys():
545
- entity_save = ""
546
- else:
547
- entity_save = runtimeparams["entity_save"]
548
- params["passed_data"]["entity_save"] = entity_save
1215
+ if "custom_heating_demand_id" in runtimeparams.keys():
1216
+ params["passed_data"]["custom_heating_demand_id"] = runtimeparams[
1217
+ "custom_heating_demand_id"
1218
+ ]
1219
+
1220
+ # split config categories from params
1221
+ retrieve_hass_conf = params["retrieve_hass_conf"]
1222
+ optim_conf = params["optim_conf"]
1223
+ plant_conf = params["plant_conf"]
1224
+
549
1225
  # Serialize the final params
550
- params = json.dumps(params)
1226
+ params = orjson.dumps(params, default=str).decode()
551
1227
  return params, retrieve_hass_conf, optim_conf, plant_conf
552
1228
 
553
1229
 
554
- def get_yaml_parse(emhass_conf: dict, use_secrets: Optional[bool] = True,
555
- params: Optional[str] = None) -> Tuple[dict, dict, dict]:
1230
+ def get_yaml_parse(params: str | dict, logger: logging.Logger) -> tuple[dict, dict, dict]:
556
1231
  """
557
- Perform parsing of the config.yaml file.
558
-
559
- :param emhass_conf: Dictionary containing the needed emhass paths
560
- :type emhass_conf: dict
561
- :param use_secrets: Indicate if we should use a secrets file or not.
562
- Set to False for unit tests.
563
- :type use_secrets: bool, optional
564
- :param params: Configuration parameters passed from data/options.json
565
- :type params: str
1232
+ Perform parsing of the params into the configuration catagories
1233
+
1234
+ :param params: Built configuration parameters
1235
+ :type params: str or dict
1236
+ :param logger: The logger object
1237
+ :type logger: logging.Logger
566
1238
  :return: A tuple with the dictionaries containing the parsed data
567
1239
  :rtype: tuple(dict)
568
1240
 
569
1241
  """
570
- if params is None:
571
- with open(emhass_conf["config_path"], 'r') as file:
572
- input_conf = yaml.load(file, Loader=yaml.FullLoader)
573
- else:
574
- input_conf = json.loads(params)
575
- if use_secrets:
576
- if params is None:
577
- with open(emhass_conf["config_path"].parent / 'secrets_emhass.yaml', 'r') as file: # Assume secrets and config file paths are the same
578
- input_secrets = yaml.load(file, Loader=yaml.FullLoader)
1242
+ if params:
1243
+ if type(params) is str:
1244
+ input_conf = orjson.loads(params)
579
1245
  else:
580
- input_secrets = input_conf.pop("params_secrets", None)
581
-
582
- if type(input_conf["retrieve_hass_conf"]) == list: # if using old config version
583
- retrieve_hass_conf = dict(
584
- {key: d[key] for d in input_conf["retrieve_hass_conf"] for key in d}
585
- )
586
- else:
587
- retrieve_hass_conf = input_conf.get("retrieve_hass_conf", {})
588
-
589
- if use_secrets:
590
- retrieve_hass_conf.update(input_secrets)
591
- else:
592
- retrieve_hass_conf["hass_url"] = "http://supervisor/core/api"
593
- retrieve_hass_conf["long_lived_token"] = "${SUPERVISOR_TOKEN}"
594
- retrieve_hass_conf["time_zone"] = "Europe/Paris"
595
- retrieve_hass_conf["lat"] = 45.83
596
- retrieve_hass_conf["lon"] = 6.86
597
- retrieve_hass_conf["alt"] = 4807.8
598
- retrieve_hass_conf["freq"] = pd.to_timedelta(retrieve_hass_conf["freq"], "minutes")
599
- retrieve_hass_conf["time_zone"] = pytz.timezone(retrieve_hass_conf["time_zone"])
600
-
601
- if type(input_conf["optim_conf"]) == list:
602
- optim_conf = dict({key: d[key] for d in input_conf["optim_conf"] for key in d})
1246
+ input_conf = params
603
1247
  else:
604
- optim_conf = input_conf.get("optim_conf", {})
1248
+ input_conf = {}
1249
+ logger.error("No params have been detected for get_yaml_parse")
1250
+ return False, False, False
605
1251
 
606
- optim_conf["list_hp_periods"] = dict(
607
- (key, d[key]) for d in optim_conf["list_hp_periods"] for key in d
608
- )
609
- optim_conf["delta_forecast"] = pd.Timedelta(days=optim_conf["delta_forecast"])
1252
+ optim_conf = input_conf.get("optim_conf", {})
1253
+ retrieve_hass_conf = input_conf.get("retrieve_hass_conf", {})
1254
+ plant_conf = input_conf.get("plant_conf", {})
610
1255
 
611
- if type(input_conf["plant_conf"]) == list:
612
- plant_conf = dict({key: d[key] for d in input_conf["plant_conf"] for key in d})
613
- else:
614
- plant_conf = input_conf.get("plant_conf", {})
1256
+ # Format time parameters
1257
+ if optim_conf.get("delta_forecast_daily", None) is not None:
1258
+ optim_conf["delta_forecast_daily"] = pd.Timedelta(days=optim_conf["delta_forecast_daily"])
1259
+ if retrieve_hass_conf.get("optimization_time_step", None) is not None:
1260
+ retrieve_hass_conf["optimization_time_step"] = pd.to_timedelta(
1261
+ retrieve_hass_conf["optimization_time_step"], "minutes"
1262
+ )
1263
+ if retrieve_hass_conf.get("time_zone", None) is not None:
1264
+ retrieve_hass_conf["time_zone"] = pytz.timezone(retrieve_hass_conf["time_zone"])
615
1265
 
616
1266
  return retrieve_hass_conf, optim_conf, plant_conf
617
1267
 
618
1268
 
619
- def get_injection_dict(df: pd.DataFrame, plot_size: Optional[int] = 1366) -> dict:
1269
+ def get_injection_dict(df: pd.DataFrame, plot_size: int | None = 1366) -> dict:
620
1270
  """
621
1271
  Build a dictionary with graphs and tables for the webui.
622
1272
 
@@ -637,9 +1287,10 @@ def get_injection_dict(df: pd.DataFrame, plot_size: Optional[int] = 1366) -> dic
637
1287
  df[cols_p] = df[cols_p].astype(int)
638
1288
  df[cols_else] = df[cols_else].round(3)
639
1289
  # Create plots
1290
+ # Figure 0: Systems Powers
640
1291
  n_colors = len(cols_p)
641
1292
  colors = px.colors.sample_colorscale(
642
- "jet", [n / (n_colors - 1) for n in range(n_colors)]
1293
+ "jet", [n / (n_colors - 1) if n_colors > 1 else 0 for n in range(n_colors)]
643
1294
  )
644
1295
  fig_0 = px.line(
645
1296
  df[cols_p],
@@ -647,8 +1298,12 @@ def get_injection_dict(df: pd.DataFrame, plot_size: Optional[int] = 1366) -> dic
647
1298
  template="presentation",
648
1299
  line_shape="hv",
649
1300
  color_discrete_sequence=colors,
1301
+ render_mode="svg",
650
1302
  )
651
1303
  fig_0.update_layout(xaxis_title="Timestamp", yaxis_title="System powers (W)")
1304
+ image_path_0 = fig_0.to_html(full_html=False, default_width="75%")
1305
+ # Figure 1: Battery SOC (Optional)
1306
+ image_path_1 = None
652
1307
  if "SOC_opt" in df.columns.to_list():
653
1308
  fig_1 = px.line(
654
1309
  df["SOC_opt"],
@@ -656,12 +1311,36 @@ def get_injection_dict(df: pd.DataFrame, plot_size: Optional[int] = 1366) -> dic
656
1311
  template="presentation",
657
1312
  line_shape="hv",
658
1313
  color_discrete_sequence=colors,
1314
+ render_mode="svg",
659
1315
  )
660
1316
  fig_1.update_layout(xaxis_title="Timestamp", yaxis_title="Battery SOC (%)")
1317
+ image_path_1 = fig_1.to_html(full_html=False, default_width="75%")
1318
+ # Figure Thermal: Temperatures (Optional)
1319
+ # Detect columns for predicted or target temperatures
1320
+ cols_temp = [
1321
+ i for i in df.columns.to_list() if "predicted_temp_heater" in i or "target_temp_heater" in i
1322
+ ]
1323
+ image_path_temp = None
1324
+ if len(cols_temp) > 0:
1325
+ n_colors = len(cols_temp)
1326
+ colors = px.colors.sample_colorscale(
1327
+ "jet", [n / (n_colors - 1) if n_colors > 1 else 0 for n in range(n_colors)]
1328
+ )
1329
+ fig_temp = px.line(
1330
+ df[cols_temp],
1331
+ title="Thermal loads temperature schedule",
1332
+ template="presentation",
1333
+ line_shape="hv",
1334
+ color_discrete_sequence=colors,
1335
+ render_mode="svg",
1336
+ )
1337
+ fig_temp.update_layout(xaxis_title="Timestamp", yaxis_title="Temperature (&deg;C)")
1338
+ image_path_temp = fig_temp.to_html(full_html=False, default_width="75%")
1339
+ # Figure 2: Costs
661
1340
  cols_cost = [i for i in df.columns.to_list() if "cost_" in i or "unit_" in i]
662
1341
  n_colors = len(cols_cost)
663
1342
  colors = px.colors.sample_colorscale(
664
- "jet", [n / (n_colors - 1) for n in range(n_colors)]
1343
+ "jet", [n / (n_colors - 1) if n_colors > 1 else 0 for n in range(n_colors)]
665
1344
  )
666
1345
  fig_2 = px.line(
667
1346
  df[cols_cost],
@@ -669,14 +1348,11 @@ def get_injection_dict(df: pd.DataFrame, plot_size: Optional[int] = 1366) -> dic
669
1348
  template="presentation",
670
1349
  line_shape="hv",
671
1350
  color_discrete_sequence=colors,
1351
+ render_mode="svg",
672
1352
  )
673
1353
  fig_2.update_layout(xaxis_title="Timestamp", yaxis_title="System costs (currency)")
674
- # Get full path to image
675
- image_path_0 = fig_0.to_html(full_html=False, default_width="75%")
676
- if "SOC_opt" in df.columns.to_list():
677
- image_path_1 = fig_1.to_html(full_html=False, default_width="75%")
678
1354
  image_path_2 = fig_2.to_html(full_html=False, default_width="75%")
679
- # The tables
1355
+ # Tables
680
1356
  table1 = df.reset_index().to_html(classes="mystyle", index=False)
681
1357
  cost_cols = [i for i in df.columns if "cost_" in i]
682
1358
  table2 = df[cost_cols].reset_index().sum(numeric_only=True)
@@ -686,19 +1362,23 @@ def get_injection_dict(df: pd.DataFrame, plot_size: Optional[int] = 1366) -> dic
686
1362
  .reset_index(names="Variable")
687
1363
  .to_html(classes="mystyle", index=False)
688
1364
  )
689
- # The dict of plots
1365
+ # Construct Injection Dict
690
1366
  injection_dict = {}
691
1367
  injection_dict["title"] = "<h2>EMHASS optimization results</h2>"
692
1368
  injection_dict["subsubtitle0"] = "<h4>Plotting latest optimization results</h4>"
1369
+ # Add Powers
693
1370
  injection_dict["figure_0"] = image_path_0
694
- if "SOC_opt" in df.columns.to_list():
1371
+ # Add Thermal
1372
+ if image_path_temp is not None:
1373
+ injection_dict["figure_thermal"] = image_path_temp
1374
+ # Add SOC
1375
+ if image_path_1 is not None:
695
1376
  injection_dict["figure_1"] = image_path_1
1377
+ # Add Costs
696
1378
  injection_dict["figure_2"] = image_path_2
697
1379
  injection_dict["subsubtitle1"] = "<h4>Last run optimization results table</h4>"
698
1380
  injection_dict["table1"] = table1
699
- injection_dict["subsubtitle2"] = (
700
- "<h4>Summary table for latest optimization results</h4>"
701
- )
1381
+ injection_dict["subsubtitle2"] = "<h4>Summary table for latest optimization results</h4>"
702
1382
  injection_dict["table2"] = table2
703
1383
  return injection_dict
704
1384
 
@@ -723,10 +1403,12 @@ def get_injection_dict_forecast_model_fit(df_fit_pred: pd.DataFrame, mlf: MLFore
723
1403
  injection_dict = {}
724
1404
  injection_dict["title"] = "<h2>Custom machine learning forecast model fit</h2>"
725
1405
  injection_dict["subsubtitle0"] = (
726
- "<h4>Plotting train/test forecast model results for " + mlf.model_type + "</h4>"
727
- )
728
- injection_dict["subsubtitle0"] = (
729
- "<h4>Forecasting variable " + mlf.var_model + "</h4>"
1406
+ "<h4>Plotting train/test forecast model results for "
1407
+ + mlf.model_type
1408
+ + "<br>"
1409
+ + "Forecasting variable "
1410
+ + mlf.var_model
1411
+ + "</h4>"
730
1412
  )
731
1413
  injection_dict["figure_0"] = image_path_0
732
1414
  return injection_dict
@@ -754,164 +1436,574 @@ def get_injection_dict_forecast_model_tune(df_pred_optim: pd.DataFrame, mlf: MLF
754
1436
  injection_dict["subsubtitle0"] = (
755
1437
  "<h4>Performed a tuning routine using bayesian optimization for "
756
1438
  + mlf.model_type
1439
+ + "<br>"
1440
+ + "Forecasting variable "
1441
+ + mlf.var_model
757
1442
  + "</h4>"
758
1443
  )
759
- injection_dict["subsubtitle0"] = (
760
- "<h4>Forecasting variable " + mlf.var_model + "</h4>"
761
- )
762
1444
  injection_dict["figure_0"] = image_path_0
763
1445
  return injection_dict
764
1446
 
765
1447
 
766
- def build_params(params: dict, params_secrets: dict, options: dict, addon: int,
767
- logger: logging.Logger) -> dict:
1448
+ async def build_config(
1449
+ emhass_conf: dict,
1450
+ logger: logging.Logger,
1451
+ defaults_path: str,
1452
+ config_path: str | None = None,
1453
+ legacy_config_path: str | None = None,
1454
+ ) -> dict:
768
1455
  """
769
- Build the main params dictionary from the loaded options.json when using the add-on.
1456
+ Retrieve parameters from configuration files.
1457
+ priority order (low - high) = defaults_path, config_path legacy_config_path
770
1458
 
771
- :param params: The main params dictionary
772
- :type params: dict
773
- :param params_secrets: The dictionary containing the secret protected variables
774
- :type params_secrets: dict
775
- :param options: The load dictionary from options.json
776
- :type options: dict
777
- :param addon: A "bool" to select if we are using the add-on
778
- :type addon: int
1459
+ :param emhass_conf: Dictionary containing the needed emhass paths
1460
+ :type emhass_conf: dict
779
1461
  :param logger: The logger object
780
1462
  :type logger: logging.Logger
781
- :return: The builded dictionary
1463
+ :param defaults_path: path to config file for parameter defaults (config_defaults.json)
1464
+ :type defaults_path: str
1465
+ :param config_path: path to the main configuration file (config.json)
1466
+ :type config_path: str
1467
+ :param legacy_config_path: path to legacy config file (config_emhass.yaml)
1468
+ :type legacy_config_path: str
1469
+ :return: The built config dictionary
782
1470
  :rtype: dict
783
1471
  """
784
- if addon == 1:
785
- # Updating variables in retrieve_hass_conf
786
- params["retrieve_hass_conf"]["freq"] = options.get("optimization_time_step", params["retrieve_hass_conf"]["freq"])
787
- params["retrieve_hass_conf"]["days_to_retrieve"] = options.get("historic_days_to_retrieve", params["retrieve_hass_conf"]["days_to_retrieve"])
788
- params["retrieve_hass_conf"]["var_PV"] = options.get("sensor_power_photovoltaics", params["retrieve_hass_conf"]["var_PV"])
789
- params["retrieve_hass_conf"]["var_load"] = options.get("sensor_power_load_no_var_loads", params["retrieve_hass_conf"]["var_load"])
790
- params["retrieve_hass_conf"]["load_negative"] = options.get("load_negative", params["retrieve_hass_conf"]["load_negative"])
791
- params["retrieve_hass_conf"]["set_zero_min"] = options.get("set_zero_min", params["retrieve_hass_conf"]["set_zero_min"])
792
- params["retrieve_hass_conf"]["var_replace_zero"] = [options.get("sensor_power_photovoltaics", params["retrieve_hass_conf"]["var_replace_zero"])]
793
- params["retrieve_hass_conf"]["var_interp"] = [
794
- options.get("sensor_power_photovoltaics", params["retrieve_hass_conf"]["var_PV"]),
795
- options.get("sensor_power_load_no_var_loads", params["retrieve_hass_conf"]["var_load"])
1472
+
1473
+ # Read default parameters (default root_path/data/config_defaults.json)
1474
+ if defaults_path and pathlib.Path(defaults_path).is_file():
1475
+ async with aiofiles.open(defaults_path) as data:
1476
+ content = await data.read()
1477
+ config = orjson.loads(content)
1478
+ else:
1479
+ logger.error("config_defaults.json. does not exist ")
1480
+ return False
1481
+
1482
+ # Read user config parameters if provided (default /share/config.json)
1483
+ if config_path and pathlib.Path(config_path).is_file():
1484
+ async with aiofiles.open(config_path) as data:
1485
+ content = await data.read()
1486
+ # Set override default parameters (config_defaults) with user given parameters (config.json)
1487
+ logger.info("Obtaining parameters from config.json:")
1488
+ config.update(orjson.loads(content))
1489
+ else:
1490
+ logger.info(
1491
+ "config.json does not exist, or has not been passed. config parameters may default to config_defaults.json"
1492
+ )
1493
+ logger.info("you may like to generate the config.json file on the configuration page")
1494
+
1495
+ # Check to see if legacy config_emhass.yaml was provided (default /app/config_emhass.yaml)
1496
+ # Convert legacy parameter definitions/format to match config.json
1497
+ if legacy_config_path and pathlib.Path(legacy_config_path).is_file():
1498
+ async with aiofiles.open(legacy_config_path) as data:
1499
+ content = await data.read()
1500
+ legacy_config = yaml.safe_load(content)
1501
+ legacy_config_parameters = await build_legacy_config_params(
1502
+ emhass_conf, legacy_config, logger
1503
+ )
1504
+ if type(legacy_config_parameters) is not bool:
1505
+ logger.info(
1506
+ "Obtaining parameters from config_emhass.yaml: (will overwrite config parameters)"
1507
+ )
1508
+ config.update(legacy_config_parameters)
1509
+
1510
+ return config
1511
+
1512
+
1513
+ async def build_legacy_config_params(
1514
+ emhass_conf: dict[str, pathlib.Path],
1515
+ legacy_config: dict[str, str],
1516
+ logger: logging.Logger,
1517
+ ) -> dict[str, str]:
1518
+ """
1519
+ Build a config dictionary with legacy config_emhass.yaml file.
1520
+ Uses the associations file to convert parameter naming conventions (to config.json/config_defaults.json).
1521
+ Extracts the parameter values and formats to match config.json.
1522
+
1523
+ :param emhass_conf: Dictionary containing the needed emhass paths
1524
+ :type emhass_conf: dict
1525
+ :param legacy_config: The legacy config dictionary
1526
+ :type legacy_config: dict
1527
+ :param logger: The logger object
1528
+ :type logger: logging.Logger
1529
+ :return: The built config dictionary
1530
+ :rtype: dict
1531
+ """
1532
+
1533
+ # Association file key reference
1534
+ # association[0] = config catagories
1535
+ # association[1] = legacy parameter name
1536
+ # association[2] = parameter (config.json/config_defaults.json)
1537
+ # association[3] = parameter list name if exists (not used, from legacy options.json)
1538
+
1539
+ # Check each config catagories exists, else create blank dict for categories (avoid errors)
1540
+ legacy_config["retrieve_hass_conf"] = legacy_config.get("retrieve_hass_conf", {})
1541
+ legacy_config["optim_conf"] = legacy_config.get("optim_conf", {})
1542
+ legacy_config["plant_conf"] = legacy_config.get("plant_conf", {})
1543
+ config = {}
1544
+
1545
+ # Use associations list to map legacy parameter name with config.json parameter name
1546
+ if emhass_conf["associations_path"].exists():
1547
+ async with aiofiles.open(emhass_conf["associations_path"]) as data:
1548
+ content = await data.read()
1549
+ associations = list(csv.reader(content.splitlines(), delimiter=","))
1550
+ else:
1551
+ logger.error(
1552
+ "Cant find associations file (associations.csv) in: "
1553
+ + str(emhass_conf["associations_path"])
1554
+ )
1555
+ return False
1556
+
1557
+ # Loop through all parameters in association file
1558
+ # Append config with existing legacy config parameters (converting alternative parameter naming conventions with associations list)
1559
+ for association in associations:
1560
+ # if legacy config catagories exists and if legacy parameter exists in config catagories
1561
+ if (
1562
+ legacy_config.get(association[0]) is not None
1563
+ and legacy_config[association[0]].get(association[1], None) is not None
1564
+ ):
1565
+ config[association[2]] = legacy_config[association[0]][association[1]]
1566
+
1567
+ # If config now has load_peak_hour_periods, extract from list of dict
1568
+ if association[2] == "load_peak_hour_periods" and type(config[association[2]]) is list:
1569
+ config[association[2]] = {key: d[key] for d in config[association[2]] for key in d}
1570
+
1571
+ return config
1572
+
1573
+
1574
+ def param_to_config(param: dict[str, dict], logger: logging.Logger) -> dict[str, str]:
1575
+ """
1576
+ A function that extracts the parameters from param back to the config.json format.
1577
+ Extracts parameters from config catagories.
1578
+ Attempts to exclude secrets hosed in retrieve_hass_conf.
1579
+
1580
+ :param params: Built configuration parameters
1581
+ :type param: dict[str, dict]
1582
+ :param logger: The logger object
1583
+ :type logger: logging.Logger
1584
+ :return: The built config dictionary
1585
+ :rtype: dict[str, str]
1586
+ """
1587
+ logger.debug("Converting param to config")
1588
+
1589
+ return_config = {}
1590
+
1591
+ config_catagories = ["retrieve_hass_conf", "optim_conf", "plant_conf"]
1592
+ secret_params = [
1593
+ "hass_url",
1594
+ "time_zone",
1595
+ "Latitude",
1596
+ "Longitude",
1597
+ "Altitude",
1598
+ "long_lived_token",
1599
+ "solcast_api_key",
1600
+ "solcast_rooftop_id",
1601
+ "solar_forecast_kwp",
1602
+ ]
1603
+
1604
+ # Loop through config catagories that contain config params, and extract
1605
+ for config in config_catagories:
1606
+ for parameter in param[config]:
1607
+ # If parameter is not a secret, append to return_config
1608
+ if parameter not in secret_params:
1609
+ return_config[str(parameter)] = param[config][parameter]
1610
+
1611
+ return return_config
1612
+
1613
+
1614
+ async def build_secrets(
1615
+ emhass_conf: dict[str, pathlib.Path],
1616
+ logger: logging.Logger,
1617
+ argument: dict[str, str] | None = None,
1618
+ options_path: str | None = None,
1619
+ secrets_path: str | None = None,
1620
+ no_response: bool = False,
1621
+ ) -> tuple[dict[str, pathlib.Path], dict[str, str | float]]:
1622
+ """
1623
+ Retrieve and build parameters from secrets locations (ENV, ARG, Secrets file (secrets_emhass.yaml/options.json) and/or Home Assistant (via API))
1624
+ priority order (lwo to high) = Defaults (written in function), ENV, Options json file, Home Assistant API, Secrets yaml file, Arguments
1625
+
1626
+ :param emhass_conf: Dictionary containing the needed emhass paths
1627
+ :type emhass_conf: dict
1628
+ :param logger: The logger object
1629
+ :type logger: logging.Logger
1630
+ :param argument: dictionary of secrets arguments passed (url,key)
1631
+ :type argument: dict
1632
+ :param options_path: path to the options file (options.json) (usually provided by EMHASS-Add-on)
1633
+ :type options_path: str
1634
+ :param secrets_path: path to secrets file (secrets_emhass.yaml)
1635
+ :type secrets_path: str
1636
+ :param no_response: bypass get request to Home Assistant (json response errors)
1637
+ :type no_response: bool
1638
+ :return: Updated emhass_conf, the built secrets dictionary
1639
+ :rtype: Tuple[dict, dict]:
1640
+ """
1641
+ # Set defaults to be overwritten
1642
+ if argument is None:
1643
+ argument = {}
1644
+ params_secrets = {
1645
+ "hass_url": "https://myhass.duckdns.org/",
1646
+ "long_lived_token": "thatverylongtokenhere",
1647
+ "time_zone": "Europe/Paris",
1648
+ "Latitude": 45.83,
1649
+ "Longitude": 6.86,
1650
+ "Altitude": 4807.8,
1651
+ "solcast_api_key": "yoursecretsolcastapikey",
1652
+ "solcast_rooftop_id": "yourrooftopid",
1653
+ "solar_forecast_kwp": 5,
1654
+ }
1655
+
1656
+ # Obtain Secrets from ENV?
1657
+ params_secrets["hass_url"] = os.getenv("EMHASS_URL", params_secrets["hass_url"])
1658
+ params_secrets["long_lived_token"] = os.getenv(
1659
+ "SUPERVISOR_TOKEN", params_secrets["long_lived_token"]
1660
+ )
1661
+ params_secrets["time_zone"] = os.getenv("TIME_ZONE", params_secrets["time_zone"])
1662
+ params_secrets["Latitude"] = float(os.getenv("LAT", params_secrets["Latitude"]))
1663
+ params_secrets["Longitude"] = float(os.getenv("LON", params_secrets["Longitude"]))
1664
+ params_secrets["Altitude"] = float(os.getenv("ALT", params_secrets["Altitude"]))
1665
+
1666
+ # Obtain secrets from options.json (Generated from EMHASS-Add-on, Home Assistant addon Configuration page) or Home Assistant API (from local Supervisor API)?
1667
+ # Use local supervisor API to obtain secrets from Home Assistant if hass_url in options.json is empty and SUPERVISOR_TOKEN ENV exists (provided by Home Assistant when running the container as addon)
1668
+ options = {}
1669
+ if options_path and pathlib.Path(options_path).is_file():
1670
+ async with aiofiles.open(options_path) as data:
1671
+ content = await data.read()
1672
+ options = orjson.loads(content)
1673
+
1674
+ # Obtain secrets from Home Assistant?
1675
+ url_from_options = options.get("hass_url", "empty")
1676
+ key_from_options = options.get("long_lived_token", "empty")
1677
+
1678
+ # If data path specified by options.json, overwrite emhass_conf['data_path']
1679
+ if (
1680
+ options.get("data_path", None) is not None
1681
+ and pathlib.Path(options["data_path"]).exists()
1682
+ ):
1683
+ emhass_conf["data_path"] = pathlib.Path(options["data_path"])
1684
+
1685
+ # Check to use Home Assistant local API
1686
+ if not no_response and os.getenv("SUPERVISOR_TOKEN", None) is not None:
1687
+ params_secrets["long_lived_token"] = os.getenv("SUPERVISOR_TOKEN", None)
1688
+ # Use hass_url from options.json if available, otherwise use supervisor API for addon
1689
+ if url_from_options != "empty" and url_from_options != "":
1690
+ params_secrets["hass_url"] = url_from_options
1691
+ else:
1692
+ # For addons, use supervisor API for both REST and WebSocket access
1693
+ params_secrets["hass_url"] = "http://supervisor/core/api"
1694
+ headers = {
1695
+ "Authorization": "Bearer " + params_secrets["long_lived_token"],
1696
+ "content-type": "application/json",
1697
+ }
1698
+ # Obtain secrets from Home Assistant via API
1699
+ logger.debug("Obtaining secrets from Home Assistant Supervisor API")
1700
+ async with aiohttp.ClientSession() as session:
1701
+ async with session.get(
1702
+ params_secrets["hass_url"] + "/config", headers=headers
1703
+ ) as response:
1704
+ if response.status < 400:
1705
+ config_hass = await response.json()
1706
+ params_secrets = {
1707
+ "hass_url": params_secrets["hass_url"],
1708
+ "long_lived_token": params_secrets["long_lived_token"],
1709
+ "time_zone": config_hass["time_zone"],
1710
+ "Latitude": config_hass["latitude"],
1711
+ "Longitude": config_hass["longitude"],
1712
+ "Altitude": config_hass["elevation"],
1713
+ }
1714
+ else:
1715
+ # Obtain the url and key secrets if any from options.json (default /app/options.json)
1716
+ logger.warning(
1717
+ "Error obtaining secrets from Home Assistant Supervisor API"
1718
+ )
1719
+ logger.debug("Obtaining url and key secrets from options.json")
1720
+ if url_from_options != "empty" and url_from_options != "":
1721
+ params_secrets["hass_url"] = url_from_options
1722
+ if key_from_options != "empty" and key_from_options != "":
1723
+ params_secrets["long_lived_token"] = key_from_options
1724
+ if (
1725
+ options.get("time_zone", "empty") != "empty"
1726
+ and options["time_zone"] != ""
1727
+ ):
1728
+ params_secrets["time_zone"] = options["time_zone"]
1729
+ if options.get("Latitude", None) is not None and bool(
1730
+ options["Latitude"]
1731
+ ):
1732
+ params_secrets["Latitude"] = options["Latitude"]
1733
+ if options.get("Longitude", None) is not None and bool(
1734
+ options["Longitude"]
1735
+ ):
1736
+ params_secrets["Longitude"] = options["Longitude"]
1737
+ if options.get("Altitude", None) is not None and bool(
1738
+ options["Altitude"]
1739
+ ):
1740
+ params_secrets["Altitude"] = options["Altitude"]
1741
+ else:
1742
+ # Obtain the url and key secrets if any from options.json (default /app/options.json)
1743
+ logger.debug("Obtaining url and key secrets from options.json")
1744
+ if url_from_options != "empty" and url_from_options != "":
1745
+ params_secrets["hass_url"] = url_from_options
1746
+ if key_from_options != "empty" and key_from_options != "":
1747
+ params_secrets["long_lived_token"] = key_from_options
1748
+ if options.get("time_zone", "empty") != "empty" and options["time_zone"] != "":
1749
+ params_secrets["time_zone"] = options["time_zone"]
1750
+ if options.get("Latitude", None) is not None and bool(options["Latitude"]):
1751
+ params_secrets["Latitude"] = options["Latitude"]
1752
+ if options.get("Longitude", None) is not None and bool(options["Longitude"]):
1753
+ params_secrets["Longitude"] = options["Longitude"]
1754
+ if options.get("Altitude", None) is not None and bool(options["Altitude"]):
1755
+ params_secrets["Altitude"] = options["Altitude"]
1756
+
1757
+ # Obtain the forecast secrets (if any) from options.json (default /app/options.json)
1758
+ forecast_secrets = [
1759
+ "solcast_api_key",
1760
+ "solcast_rooftop_id",
1761
+ "solar_forecast_kwp",
1762
+ ]
1763
+ if any(x in forecast_secrets for x in list(options.keys())):
1764
+ logger.debug("Obtaining forecast secrets from options.json")
1765
+ if (
1766
+ options.get("solcast_api_key", "empty") != "empty"
1767
+ and options["solcast_api_key"] != ""
1768
+ ):
1769
+ params_secrets["solcast_api_key"] = options["solcast_api_key"]
1770
+ if (
1771
+ options.get("solcast_rooftop_id", "empty") != "empty"
1772
+ and options["solcast_rooftop_id"] != ""
1773
+ ):
1774
+ params_secrets["solcast_rooftop_id"] = options["solcast_rooftop_id"]
1775
+ if options.get("solar_forecast_kwp", None) and bool(options["solar_forecast_kwp"]):
1776
+ params_secrets["solar_forecast_kwp"] = options["solar_forecast_kwp"]
1777
+
1778
+ # Obtain secrets from secrets_emhass.yaml? (default /app/secrets_emhass.yaml)
1779
+ if secrets_path and pathlib.Path(secrets_path).is_file():
1780
+ logger.debug("Obtaining secrets from secrets file")
1781
+ async with aiofiles.open(pathlib.Path(secrets_path)) as file:
1782
+ content = await file.read()
1783
+ params_secrets.update(yaml.safe_load(content))
1784
+
1785
+ # Receive key and url from ARG/arguments?
1786
+ if argument.get("url") is not None:
1787
+ params_secrets["hass_url"] = argument["url"]
1788
+ logger.debug("Obtaining url from passed argument")
1789
+ if argument.get("key") is not None:
1790
+ params_secrets["long_lived_token"] = argument["key"]
1791
+ logger.debug("Obtaining long_lived_token from passed argument")
1792
+
1793
+ return emhass_conf, params_secrets
1794
+
1795
+
1796
+ async def build_params(
1797
+ emhass_conf: dict[str, pathlib.Path],
1798
+ params_secrets: dict[str, str | float],
1799
+ config: dict[str, str],
1800
+ logger: logging.Logger,
1801
+ ) -> dict[str, dict]:
1802
+ """
1803
+ Build the main params dictionary from the config and secrets
1804
+ Appends configuration catagories used by emhass to the parameters. (with use of the associations file as a reference)
1805
+
1806
+ :param emhass_conf: Dictionary containing the needed emhass paths
1807
+ :type emhass_conf: dict[str, pathlib.Path]
1808
+ :param params_secrets: The dictionary containing the built secret variables
1809
+ :type params_secrets: dict[str, str | float]
1810
+ :param config: The dictionary of built config parameters
1811
+ :type config: dict[str, str]
1812
+ :param logger: The logger object
1813
+ :type logger: logging.Logger
1814
+ :return: The built param dictionary
1815
+ :rtype: dict[str, dict]
1816
+ """
1817
+ if not isinstance(params_secrets, dict):
1818
+ params_secrets = {}
1819
+
1820
+ params = {}
1821
+ # Start with blank config catagories
1822
+ params["retrieve_hass_conf"] = {}
1823
+ params["params_secrets"] = {}
1824
+ params["optim_conf"] = {}
1825
+ params["plant_conf"] = {}
1826
+
1827
+ # Obtain associations to categorize parameters to their corresponding config catagories
1828
+ if emhass_conf.get(
1829
+ "associations_path", get_root(__file__, num_parent=2) / "data/associations.csv"
1830
+ ).exists():
1831
+ async with aiofiles.open(emhass_conf["associations_path"]) as data:
1832
+ content = await data.read()
1833
+ associations = list(csv.reader(content.splitlines(), delimiter=","))
1834
+ else:
1835
+ logger.error(
1836
+ "Unable to obtain the associations file (associations.csv) in: "
1837
+ + str(emhass_conf["associations_path"])
1838
+ )
1839
+ return False
1840
+
1841
+ # Association file key reference
1842
+ # association[0] = config catagories
1843
+ # association[1] = legacy parameter name
1844
+ # association[2] = parameter (config.json/config_defaults.json)
1845
+ # association[3] = parameter list name if exists (not used, from legacy options.json)
1846
+ # Use association list to append parameters from config into params (with corresponding config catagories)
1847
+ for association in associations:
1848
+ # If parameter has list_ name and parameter in config is presented with its list name
1849
+ # (ie, config parameter is in legacy options.json format)
1850
+ if len(association) == 4 and config.get(association[3]) is not None:
1851
+ # Extract lists of dictionaries
1852
+ if config[association[3]] and type(config[association[3]][0]) is dict:
1853
+ params[association[0]][association[2]] = [
1854
+ i[association[2]] for i in config[association[3]]
1855
+ ]
1856
+ else:
1857
+ params[association[0]][association[2]] = config[association[3]]
1858
+ # Else, directly set value of config parameter to param
1859
+ elif config.get(association[2]) is not None:
1860
+ params[association[0]][association[2]] = config[association[2]]
1861
+
1862
+ # Check if we need to create `list_hp_periods` from config (ie. legacy options.json format)
1863
+ if (
1864
+ params.get("optim_conf") is not None
1865
+ and config.get("list_peak_hours_periods_start_hours") is not None
1866
+ and config.get("list_peak_hours_periods_end_hours") is not None
1867
+ ):
1868
+ start_hours_list = [
1869
+ i["peak_hours_periods_start_hours"]
1870
+ for i in config["list_peak_hours_periods_start_hours"]
1871
+ ]
1872
+ end_hours_list = [
1873
+ i["peak_hours_periods_end_hours"] for i in config["list_peak_hours_periods_end_hours"]
796
1874
  ]
797
- params["retrieve_hass_conf"]["method_ts_round"] = options.get("method_ts_round", params["retrieve_hass_conf"]["method_ts_round"])
798
- params["retrieve_hass_conf"]["continual_publish"] = options.get("continual_publish", params["retrieve_hass_conf"]["continual_publish"])
799
- # Update params Secrets if specified
800
- params["params_secrets"] = params_secrets
801
- params["params_secrets"]["time_zone"] = options.get("time_zone", params_secrets["time_zone"])
802
- params["params_secrets"]["lat"] = options.get("Latitude", params_secrets["lat"])
803
- params["params_secrets"]["lon"] = options.get("Longitude", params_secrets["lon"])
804
- params["params_secrets"]["alt"] = options.get("Altitude", params_secrets["alt"])
805
- # Updating variables in optim_conf
806
- params["optim_conf"]["set_use_battery"] = options.get("set_use_battery", params["optim_conf"]["set_use_battery"])
807
- params["optim_conf"]["num_def_loads"] = options.get("number_of_deferrable_loads", params["optim_conf"]["num_def_loads"])
808
- if options.get("list_nominal_power_of_deferrable_loads", None) != None:
809
- params["optim_conf"]["P_deferrable_nom"] = [i["nominal_power_of_deferrable_loads"] for i in options.get("list_nominal_power_of_deferrable_loads")]
810
- if options.get("list_operating_hours_of_each_deferrable_load", None) != None:
811
- params["optim_conf"]["def_total_hours"] = [i["operating_hours_of_each_deferrable_load"] for i in options.get("list_operating_hours_of_each_deferrable_load")]
812
- if options.get("list_treat_deferrable_load_as_semi_cont", None) != None:
813
- params["optim_conf"]["treat_def_as_semi_cont"] = [i["treat_deferrable_load_as_semi_cont"] for i in options.get("list_treat_deferrable_load_as_semi_cont")]
814
- if options.get("list_set_deferrable_load_single_constant", None) != None:
815
- params["optim_conf"]["set_def_constant"] = [i["set_deferrable_load_single_constant"] for i in options.get("list_set_deferrable_load_single_constant")]
816
- if options.get("list_set_deferrable_startup_penalty", None) != None:
817
- params["optim_conf"]["def_start_penalty"] = [i["set_deferrable_startup_penalty"] for i in options.get("list_set_deferrable_startup_penalty")]
818
- params["optim_conf"]["weather_forecast_method"] = options.get("weather_forecast_method", params["optim_conf"]["weather_forecast_method"])
819
- # Update optional param secrets
1875
+ num_peak_hours = len(start_hours_list)
1876
+ list_hp_periods_list = {
1877
+ "period_hp_" + str(i + 1): [
1878
+ {"start": start_hours_list[i]},
1879
+ {"end": end_hours_list[i]},
1880
+ ]
1881
+ for i in range(num_peak_hours)
1882
+ }
1883
+ params["optim_conf"]["load_peak_hour_periods"] = list_hp_periods_list
1884
+ else:
1885
+ # Else, check param already contains load_peak_hour_periods from config
1886
+ if params["optim_conf"].get("load_peak_hour_periods", None) is None:
1887
+ logger.warning("Unable to detect or create load_peak_hour_periods parameter")
1888
+
1889
+ # Format load_peak_hour_periods list to dict if necessary
1890
+ if params["optim_conf"].get("load_peak_hour_periods", None) is not None and isinstance(
1891
+ params["optim_conf"]["load_peak_hour_periods"], list
1892
+ ):
1893
+ params["optim_conf"]["load_peak_hour_periods"] = {
1894
+ key: d[key] for d in params["optim_conf"]["load_peak_hour_periods"] for key in d
1895
+ }
1896
+
1897
+ # Call function to check parameter lists that require the same length as deferrable loads
1898
+ # If not, set defaults it fill in gaps
1899
+ if params["optim_conf"].get("number_of_deferrable_loads", None) is not None:
1900
+ num_def_loads = params["optim_conf"]["number_of_deferrable_loads"]
1901
+ params["optim_conf"]["start_timesteps_of_each_deferrable_load"] = check_def_loads(
1902
+ num_def_loads,
1903
+ params["optim_conf"],
1904
+ 0,
1905
+ "start_timesteps_of_each_deferrable_load",
1906
+ logger,
1907
+ )
1908
+ params["optim_conf"]["end_timesteps_of_each_deferrable_load"] = check_def_loads(
1909
+ num_def_loads,
1910
+ params["optim_conf"],
1911
+ 0,
1912
+ "end_timesteps_of_each_deferrable_load",
1913
+ logger,
1914
+ )
1915
+ params["optim_conf"]["set_deferrable_load_single_constant"] = check_def_loads(
1916
+ num_def_loads,
1917
+ params["optim_conf"],
1918
+ False,
1919
+ "set_deferrable_load_single_constant",
1920
+ logger,
1921
+ )
1922
+ params["optim_conf"]["treat_deferrable_load_as_semi_cont"] = check_def_loads(
1923
+ num_def_loads,
1924
+ params["optim_conf"],
1925
+ True,
1926
+ "treat_deferrable_load_as_semi_cont",
1927
+ logger,
1928
+ )
1929
+ params["optim_conf"]["set_deferrable_startup_penalty"] = check_def_loads(
1930
+ num_def_loads,
1931
+ params["optim_conf"],
1932
+ 0.0,
1933
+ "set_deferrable_startup_penalty",
1934
+ logger,
1935
+ )
1936
+ params["optim_conf"]["operating_hours_of_each_deferrable_load"] = check_def_loads(
1937
+ num_def_loads,
1938
+ params["optim_conf"],
1939
+ 0,
1940
+ "operating_hours_of_each_deferrable_load",
1941
+ logger,
1942
+ )
1943
+ params["optim_conf"]["nominal_power_of_deferrable_loads"] = check_def_loads(
1944
+ num_def_loads,
1945
+ params["optim_conf"],
1946
+ 0,
1947
+ "nominal_power_of_deferrable_loads",
1948
+ logger,
1949
+ )
1950
+ else:
1951
+ logger.warning("unable to obtain parameter: number_of_deferrable_loads")
1952
+ # historic_days_to_retrieve should be no less then 2
1953
+ if params["retrieve_hass_conf"].get("historic_days_to_retrieve", None) is not None:
1954
+ if params["retrieve_hass_conf"]["historic_days_to_retrieve"] < 2:
1955
+ params["retrieve_hass_conf"]["historic_days_to_retrieve"] = 2
1956
+ logger.warning(
1957
+ "days_to_retrieve should not be lower then 2, setting days_to_retrieve to 2. Make sure your sensors also have at least 2 days of history"
1958
+ )
1959
+ else:
1960
+ logger.warning("unable to obtain parameter: historic_days_to_retrieve")
1961
+
1962
+ # Configure secrets, set params to correct config categorie
1963
+ # retrieve_hass_conf
1964
+ params["retrieve_hass_conf"]["hass_url"] = params_secrets.get("hass_url")
1965
+ params["retrieve_hass_conf"]["long_lived_token"] = params_secrets.get("long_lived_token")
1966
+ params["retrieve_hass_conf"]["time_zone"] = params_secrets.get("time_zone")
1967
+ params["retrieve_hass_conf"]["Latitude"] = params_secrets.get("Latitude")
1968
+ params["retrieve_hass_conf"]["Longitude"] = params_secrets.get("Longitude")
1969
+ params["retrieve_hass_conf"]["Altitude"] = params_secrets.get("Altitude")
1970
+ # Update optional param secrets
1971
+ if params["optim_conf"].get("weather_forecast_method", None) is not None:
820
1972
  if params["optim_conf"]["weather_forecast_method"] == "solcast":
821
- params["params_secrets"]["solcast_api_key"] = options.get("optional_solcast_api_key", params_secrets.get("solcast_api_key", "123456"))
822
- params["params_secrets"]["solcast_rooftop_id"] = options.get("optional_solcast_rooftop_id", params_secrets.get("solcast_rooftop_id", "123456"))
1973
+ params["retrieve_hass_conf"]["solcast_api_key"] = params_secrets.get(
1974
+ "solcast_api_key", "123456"
1975
+ )
1976
+ params["params_secrets"]["solcast_api_key"] = params_secrets.get(
1977
+ "solcast_api_key", "123456"
1978
+ )
1979
+ params["retrieve_hass_conf"]["solcast_rooftop_id"] = params_secrets.get(
1980
+ "solcast_rooftop_id", "123456"
1981
+ )
1982
+ params["params_secrets"]["solcast_rooftop_id"] = params_secrets.get(
1983
+ "solcast_rooftop_id", "123456"
1984
+ )
823
1985
  elif params["optim_conf"]["weather_forecast_method"] == "solar.forecast":
824
- params["params_secrets"]["solar_forecast_kwp"] = options.get("optional_solar_forecast_kwp", params_secrets.get("solar_forecast_kwp", 5))
825
- params["optim_conf"]["load_forecast_method"] = options.get("load_forecast_method", params["optim_conf"]["load_forecast_method"])
826
- params["optim_conf"]["delta_forecast"] = options.get("delta_forecast_daily", params["optim_conf"]["delta_forecast"])
827
- params["optim_conf"]["load_cost_forecast_method"] = options.get("load_cost_forecast_method", params["optim_conf"]["load_cost_forecast_method"])
828
- if (options.get("list_peak_hours_periods_start_hours", None) != None and options.get("list_peak_hours_periods_end_hours", None) != None):
829
- start_hours_list = [i["peak_hours_periods_start_hours"] for i in options["list_peak_hours_periods_start_hours"]]
830
- end_hours_list = [i["peak_hours_periods_end_hours"] for i in options["list_peak_hours_periods_end_hours"]]
831
- num_peak_hours = len(start_hours_list)
832
- list_hp_periods_list = [{'period_hp_'+str(i+1):[{'start':start_hours_list[i]},{'end':end_hours_list[i]}]} for i in range(num_peak_hours)]
833
- params['optim_conf']['list_hp_periods'] = list_hp_periods_list
834
- params['optim_conf']['load_cost_hp'] = options.get('load_peak_hours_cost', params['optim_conf']['load_cost_hp'])
835
- params['optim_conf']['load_cost_hc'] = options.get('load_offpeak_hours_cost', params['optim_conf']['load_cost_hc'])
836
- params['optim_conf']['prod_price_forecast_method'] = options.get('production_price_forecast_method', params['optim_conf']['prod_price_forecast_method'])
837
- params['optim_conf']['prod_sell_price'] = options.get('photovoltaic_production_sell_price', params['optim_conf']['prod_sell_price'])
838
- params['optim_conf']['set_total_pv_sell'] = options.get('set_total_pv_sell', params['optim_conf']['set_total_pv_sell'])
839
- params['optim_conf']['lp_solver'] = options.get('lp_solver', params['optim_conf']['lp_solver'])
840
- params['optim_conf']['lp_solver_path'] = options.get('lp_solver_path', params['optim_conf']['lp_solver_path'])
841
- params['optim_conf']['set_nocharge_from_grid'] = options.get('set_nocharge_from_grid', params['optim_conf']['set_nocharge_from_grid'])
842
- params['optim_conf']['set_nodischarge_to_grid'] = options.get('set_nodischarge_to_grid', params['optim_conf']['set_nodischarge_to_grid'])
843
- params['optim_conf']['set_battery_dynamic'] = options.get('set_battery_dynamic', params['optim_conf']['set_battery_dynamic'])
844
- params['optim_conf']['battery_dynamic_max'] = options.get('battery_dynamic_max', params['optim_conf']['battery_dynamic_max'])
845
- params['optim_conf']['battery_dynamic_min'] = options.get('battery_dynamic_min', params['optim_conf']['battery_dynamic_min'])
846
- params['optim_conf']['weight_battery_discharge'] = options.get('weight_battery_discharge', params['optim_conf']['weight_battery_discharge'])
847
- params['optim_conf']['weight_battery_charge'] = options.get('weight_battery_charge', params['optim_conf']['weight_battery_charge'])
848
- if options.get('list_start_timesteps_of_each_deferrable_load',None) != None:
849
- params['optim_conf']['def_start_timestep'] = [i['start_timesteps_of_each_deferrable_load'] for i in options.get('list_start_timesteps_of_each_deferrable_load')]
850
- if options.get('list_end_timesteps_of_each_deferrable_load',None) != None:
851
- params['optim_conf']['def_end_timestep'] = [i['end_timesteps_of_each_deferrable_load'] for i in options.get('list_end_timesteps_of_each_deferrable_load')]
852
- # Updating variables in plant_conf
853
- params['plant_conf']['P_from_grid_max'] = options.get('maximum_power_from_grid', params['plant_conf']['P_from_grid_max'])
854
- params['plant_conf']['P_to_grid_max'] = options.get('maximum_power_to_grid', params['plant_conf']['P_to_grid_max'])
855
- if options.get('list_pv_module_model',None) != None:
856
- params['plant_conf']['module_model'] = [i['pv_module_model'] for i in options.get('list_pv_module_model')]
857
- if options.get('list_pv_inverter_model',None) != None:
858
- params['plant_conf']['inverter_model'] = [i['pv_inverter_model'] for i in options.get('list_pv_inverter_model')]
859
- if options.get('list_surface_tilt',None) != None:
860
- params['plant_conf']['surface_tilt'] = [i['surface_tilt'] for i in options.get('list_surface_tilt')]
861
- if options.get('list_surface_azimuth',None) != None:
862
- params['plant_conf']['surface_azimuth'] = [i['surface_azimuth'] for i in options.get('list_surface_azimuth')]
863
- if options.get('list_modules_per_string',None) != None:
864
- params['plant_conf']['modules_per_string'] = [i['modules_per_string'] for i in options.get('list_modules_per_string')]
865
- if options.get('list_strings_per_inverter',None) != None:
866
- params['plant_conf']['strings_per_inverter'] = [i['strings_per_inverter'] for i in options.get('list_strings_per_inverter')]
867
- params["plant_conf"]["inverter_is_hybrid"] = options.get("inverter_is_hybrid", params["plant_conf"]["inverter_is_hybrid"])
868
- params["plant_conf"]["compute_curtailment"] = options.get("compute_curtailment", params["plant_conf"]["compute_curtailment"])
869
- params['plant_conf']['Pd_max'] = options.get('battery_discharge_power_max', params['plant_conf']['Pd_max'])
870
- params['plant_conf']['Pc_max'] = options.get('battery_charge_power_max', params['plant_conf']['Pc_max'])
871
- params['plant_conf']['eta_disch'] = options.get('battery_discharge_efficiency', params['plant_conf']['eta_disch'])
872
- params['plant_conf']['eta_ch'] = options.get('battery_charge_efficiency', params['plant_conf']['eta_ch'])
873
- params['plant_conf']['Enom'] = options.get('battery_nominal_energy_capacity', params['plant_conf']['Enom'])
874
- params['plant_conf']['SOCmin'] = options.get('battery_minimum_state_of_charge', params['plant_conf']['SOCmin'])
875
- params['plant_conf']['SOCmax'] = options.get('battery_maximum_state_of_charge', params['plant_conf']['SOCmax'])
876
- params['plant_conf']['SOCtarget'] = options.get('battery_target_state_of_charge', params['plant_conf']['SOCtarget'])
877
- # Check parameter lists have the same amounts as deferrable loads
878
- # If not, set defaults it fill in gaps
879
- if params['optim_conf']['num_def_loads'] is not len(params['optim_conf']['def_start_timestep']):
880
- logger.warning("def_start_timestep / list_start_timesteps_of_each_deferrable_load does not match number in num_def_loads, adding default values to parameter")
881
- for x in range(len(params['optim_conf']['def_start_timestep']), params['optim_conf']['num_def_loads']):
882
- params['optim_conf']['def_start_timestep'].append(0)
883
- if params['optim_conf']['num_def_loads'] is not len(params['optim_conf']['def_end_timestep']):
884
- logger.warning("def_end_timestep / list_end_timesteps_of_each_deferrable_load does not match number in num_def_loads, adding default values to parameter")
885
- for x in range(len(params['optim_conf']['def_end_timestep']), params['optim_conf']['num_def_loads']):
886
- params['optim_conf']['def_end_timestep'].append(0)
887
- if params['optim_conf']['num_def_loads'] is not len(params['optim_conf']['set_def_constant']):
888
- logger.warning("set_def_constant / list_set_deferrable_load_single_constant does not match number in num_def_loads, adding default values to parameter")
889
- for x in range(len(params['optim_conf']['set_def_constant']), params['optim_conf']['num_def_loads']):
890
- params['optim_conf']['set_def_constant'].append(False)
891
- if params['optim_conf']['num_def_loads'] is not len(params['optim_conf']['treat_def_as_semi_cont']):
892
- logger.warning("treat_def_as_semi_cont / list_treat_deferrable_load_as_semi_cont does not match number in num_def_loads, adding default values to parameter")
893
- for x in range(len(params['optim_conf']['treat_def_as_semi_cont']), params['optim_conf']['num_def_loads']):
894
- params['optim_conf']['treat_def_as_semi_cont'].append(True)
895
- if params['optim_conf']['num_def_loads'] is not len(params['optim_conf']['def_start_penalty']):
896
- logger.warning("def_start_penalty / list_set_deferrable_startup_penalty does not match number in num_def_loads, adding default values to parameter")
897
- for x in range(len(params['optim_conf']['def_start_penalty']), params['optim_conf']['num_def_loads']):
898
- params['optim_conf']['def_start_penalty'].append(0.0)
899
- # days_to_retrieve should be no less then 2
900
- if params['optim_conf']['num_def_loads'] is not len(params['optim_conf']['def_total_hours']):
901
- logger.warning("def_total_hours / list_operating_hours_of_each_deferrable_load does not match number in num_def_loads, adding default values to parameter")
902
- for x in range(len(params['optim_conf']['def_total_hours']), params['optim_conf']['num_def_loads']):
903
- params['optim_conf']['def_total_hours'].append(0)
904
- if params['optim_conf']['num_def_loads'] is not len(params['optim_conf']['P_deferrable_nom']):
905
- logger.warning("P_deferrable_nom / list_nominal_power_of_deferrable_loads does not match number in num_def_loads, adding default values to parameter")
906
- for x in range(len(params['optim_conf']['P_deferrable_nom']), params['optim_conf']['num_def_loads']):
907
- params['optim_conf']['P_deferrable_nom'].append(0)
908
- # days_to_retrieve should be no less then 2
909
- if params["retrieve_hass_conf"]["days_to_retrieve"] < 2:
910
- params["retrieve_hass_conf"]["days_to_retrieve"] = 2
911
- logger.warning("days_to_retrieve should not be lower then 2, setting days_to_retrieve to 2. Make sure your sensors also have at least 2 days of history")
1986
+ params["retrieve_hass_conf"]["solar_forecast_kwp"] = params_secrets.get(
1987
+ "solar_forecast_kwp", 5
1988
+ )
1989
+ params["params_secrets"]["solar_forecast_kwp"] = params_secrets.get(
1990
+ "solar_forecast_kwp", 5
1991
+ )
912
1992
  else:
913
- params["params_secrets"] = params_secrets
914
- # The params dict
1993
+ logger.warning("Unable to detect weather_forecast_method parameter")
1994
+ # Check if secrets parameters still defaults values
1995
+ secret_params = [
1996
+ "https://myhass.duckdns.org/",
1997
+ "thatverylongtokenhere",
1998
+ 45.83,
1999
+ 6.86,
2000
+ 4807.8,
2001
+ ]
2002
+ if any(x in secret_params for x in params["retrieve_hass_conf"].values()):
2003
+ logger.warning("Some secret parameters values are still matching their defaults")
2004
+
2005
+ # Set empty dict objects for params passed_data
2006
+ # To be latter populated with runtime parameters (treat_runtimeparams)
915
2007
  params["passed_data"] = {
916
2008
  "pv_power_forecast": None,
917
2009
  "load_power_forecast": None,
@@ -920,31 +2012,120 @@ def build_params(params: dict, params_secrets: dict, options: dict, addon: int,
920
2012
  "prediction_horizon": None,
921
2013
  "soc_init": None,
922
2014
  "soc_final": None,
923
- "def_total_hours": None,
924
- "def_start_timestep": None,
925
- "def_end_timestep": None,
2015
+ "operating_hours_of_each_deferrable_load": None,
2016
+ "start_timesteps_of_each_deferrable_load": None,
2017
+ "end_timesteps_of_each_deferrable_load": None,
926
2018
  "alpha": None,
927
2019
  "beta": None,
928
2020
  }
2021
+
929
2022
  return params
930
2023
 
931
2024
 
932
- def get_days_list(days_to_retrieve: int) -> pd.date_range:
2025
+ def check_def_loads(
2026
+ num_def_loads: int,
2027
+ parameter: list[dict],
2028
+ default: str | float,
2029
+ parameter_name: str,
2030
+ logger: logging.Logger,
2031
+ ) -> list[dict]:
2032
+ """
2033
+ Check parameter lists with deferrable loads number, if they do not match, enlarge to fit.
2034
+
2035
+ :param num_def_loads: Total number deferrable loads
2036
+ :type num_def_loads: int
2037
+ :param parameter: parameter config dict containing paramater
2038
+ :type parameter: list[dict]
2039
+ :param default: default value for parameter to pad missing
2040
+ :type default: str | int | float
2041
+ :param parameter_name: name of parameter
2042
+ :type parameter_name: str
2043
+ :param logger: The logger object
2044
+ :type logger: logging.Logger
2045
+ :return: parameter list
2046
+ :rtype: list[dict]
2047
+ """
2048
+ if (
2049
+ parameter.get(parameter_name, None) is not None
2050
+ and type(parameter[parameter_name]) is list
2051
+ and num_def_loads > len(parameter[parameter_name])
2052
+ ):
2053
+ logger.warning(
2054
+ parameter_name
2055
+ + " does not match number in num_def_loads, adding default values ("
2056
+ + str(default)
2057
+ + ") to parameter"
2058
+ )
2059
+ for _x in range(len(parameter[parameter_name]), num_def_loads):
2060
+ parameter[parameter_name].append(default)
2061
+ return parameter[parameter_name]
2062
+
2063
+
2064
+ def get_days_list(days_to_retrieve: int) -> pd.DatetimeIndex:
933
2065
  """
934
2066
  Get list of past days from today to days_to_retrieve.
935
2067
 
936
2068
  :param days_to_retrieve: Total number of days to retrieve from the past
937
2069
  :type days_to_retrieve: int
938
2070
  :return: The list of days
939
- :rtype: pd.date_range
2071
+ :rtype: pd.DatetimeIndex
940
2072
 
941
2073
  """
942
- today = datetime.now(timezone.utc).replace(minute=0, second=0, microsecond=0)
2074
+ today = datetime.now(UTC).replace(minute=0, second=0, microsecond=0)
943
2075
  d = (today - timedelta(days=days_to_retrieve)).isoformat()
944
- days_list = pd.date_range(start=d, end=today.isoformat(), freq="D")
2076
+ days_list = pd.date_range(start=d, end=today.isoformat(), freq="D").normalize()
945
2077
  return days_list
946
2078
 
947
2079
 
2080
+ def add_date_features(
2081
+ data: pd.DataFrame,
2082
+ timestamp: str | None = None,
2083
+ date_features: list[str] | None = None,
2084
+ ) -> pd.DataFrame:
2085
+ """Add date-related features from a DateTimeIndex or a timestamp column.
2086
+
2087
+ :param data: The input DataFrame.
2088
+ :type data: pd.DataFrame
2089
+ :param timestamp: The column containing the timestamp (optional if DataFrame has a DateTimeIndex).
2090
+ :type timestamp: Optional[str]
2091
+ :param date_features: List of date features to extract (default: all).
2092
+ :type date_features: Optional[List[str]]
2093
+ :return: The DataFrame with added date features.
2094
+ :rtype: pd.DataFrame
2095
+ """
2096
+
2097
+ df = copy.deepcopy(data) # Avoid modifying the original DataFrame
2098
+
2099
+ # If no specific features are requested, extract all by default
2100
+ default_features = ["year", "month", "day_of_week", "day_of_year", "day", "hour"]
2101
+ date_features = date_features or default_features
2102
+
2103
+ # Determine whether to use index or a timestamp column
2104
+ if timestamp:
2105
+ df[timestamp] = pd.to_datetime(df[timestamp], utc=True)
2106
+ source = df[timestamp].dt
2107
+ else:
2108
+ if not isinstance(df.index, pd.DatetimeIndex):
2109
+ raise ValueError("DataFrame must have a DateTimeIndex or a valid timestamp column.")
2110
+ source = df.index
2111
+
2112
+ # Extract date features
2113
+ if "year" in date_features:
2114
+ df["year"] = source.year
2115
+ if "month" in date_features:
2116
+ df["month"] = source.month
2117
+ if "day_of_week" in date_features:
2118
+ df["day_of_week"] = source.dayofweek
2119
+ if "day_of_year" in date_features:
2120
+ df["day_of_year"] = source.dayofyear
2121
+ if "day" in date_features:
2122
+ df["day"] = source.day
2123
+ if "hour" in date_features:
2124
+ df["hour"] = source.hour
2125
+
2126
+ return df
2127
+
2128
+
948
2129
  def set_df_index_freq(df: pd.DataFrame) -> pd.DataFrame:
949
2130
  """
950
2131
  Set the freq of a DataFrame DateTimeIndex.
@@ -961,3 +2142,191 @@ def set_df_index_freq(df: pd.DataFrame) -> pd.DataFrame:
961
2142
  sampling = pd.to_timedelta(np.median(idx_diff))
962
2143
  df = df[~df.index.duplicated()]
963
2144
  return df.asfreq(sampling)
2145
+
2146
+
2147
+ def parse_export_time_range(
2148
+ start_time: str,
2149
+ end_time: str | None,
2150
+ time_zone: pd.Timestamp.tz,
2151
+ logger: logging.Logger,
2152
+ ) -> tuple[pd.Timestamp, pd.Timestamp] | tuple[bool, bool]:
2153
+ """
2154
+ Parse and validate start_time and end_time for export operations.
2155
+
2156
+ :param start_time: Start time string in ISO format
2157
+ :type start_time: str
2158
+ :param end_time: End time string in ISO format (optional)
2159
+ :type end_time: str | None
2160
+ :param time_zone: Timezone for localization
2161
+ :type time_zone: pd.Timestamp.tz
2162
+ :param logger: Logger object
2163
+ :type logger: logging.Logger
2164
+ :return: Tuple of (start_dt, end_dt) or (False, False) on error
2165
+ :rtype: tuple[pd.Timestamp, pd.Timestamp] | tuple[bool, bool]
2166
+ """
2167
+ try:
2168
+ start_dt = pd.to_datetime(start_time)
2169
+ if start_dt.tz is None:
2170
+ start_dt = start_dt.tz_localize(time_zone)
2171
+ except Exception as e:
2172
+ logger.error(f"Invalid start_time format: {start_time}. Error: {e}")
2173
+ logger.error("Use format like '2024-01-01' or '2024-01-01 00:00:00'")
2174
+ return False, False
2175
+
2176
+ if end_time:
2177
+ try:
2178
+ end_dt = pd.to_datetime(end_time)
2179
+ if end_dt.tz is None:
2180
+ end_dt = end_dt.tz_localize(time_zone)
2181
+ except Exception as e:
2182
+ logger.error(f"Invalid end_time format: {end_time}. Error: {e}")
2183
+ return False, False
2184
+ else:
2185
+ end_dt = pd.Timestamp.now(tz=time_zone)
2186
+ logger.info(f"No end_time specified, using current time: {end_dt}")
2187
+
2188
+ return start_dt, end_dt
2189
+
2190
+
2191
+ def clean_sensor_column_names(df: pd.DataFrame, timestamp_col: str) -> pd.DataFrame:
2192
+ """
2193
+ Clean sensor column names by removing 'sensor.' prefix.
2194
+
2195
+ :param df: Input DataFrame with sensor columns
2196
+ :type df: pd.DataFrame
2197
+ :param timestamp_col: Name of timestamp column to preserve
2198
+ :type timestamp_col: str
2199
+ :return: DataFrame with cleaned column names
2200
+ :rtype: pd.DataFrame
2201
+ """
2202
+ column_mapping = {}
2203
+ for col in df.columns:
2204
+ if col != timestamp_col and col.startswith("sensor."):
2205
+ column_mapping[col] = col.replace("sensor.", "")
2206
+ return df.rename(columns=column_mapping)
2207
+
2208
+
2209
+ def handle_nan_values(
2210
+ df: pd.DataFrame,
2211
+ handle_nan: str,
2212
+ timestamp_col: str,
2213
+ logger: logging.Logger,
2214
+ ) -> pd.DataFrame:
2215
+ """
2216
+ Handle NaN values in DataFrame according to specified strategy.
2217
+
2218
+ :param df: Input DataFrame
2219
+ :type df: pd.DataFrame
2220
+ :param handle_nan: Strategy for handling NaN values
2221
+ :type handle_nan: str
2222
+ :param timestamp_col: Name of timestamp column to exclude from processing
2223
+ :type timestamp_col: str
2224
+ :param logger: Logger object
2225
+ :type logger: logging.Logger
2226
+ :return: DataFrame with NaN values handled
2227
+ :rtype: pd.DataFrame
2228
+ """
2229
+ nan_count_before = df.isna().sum().sum()
2230
+ if nan_count_before == 0:
2231
+ return df
2232
+
2233
+ logger.info(f"Found {nan_count_before} NaN values, applying handle_nan method: {handle_nan}")
2234
+
2235
+ if handle_nan == "drop":
2236
+ df = df.dropna()
2237
+ logger.info(f"Dropped rows with NaN. Remaining rows: {len(df)}")
2238
+ elif handle_nan == "fill_zero":
2239
+ # Exclude timestamp_col from fillna to avoid unintended changes
2240
+ fill_cols = [col for col in df.columns if col != timestamp_col]
2241
+ df[fill_cols] = df[fill_cols].fillna(0)
2242
+ logger.info("Filled NaN values with 0 (excluding timestamp)")
2243
+ elif handle_nan == "interpolate":
2244
+ numeric_cols = df.select_dtypes(include=[np.number]).columns
2245
+ # Exclude timestamp_col from interpolation
2246
+ interp_cols = [col for col in numeric_cols if col != timestamp_col]
2247
+ df[interp_cols] = df[interp_cols].interpolate(method="linear", limit_direction="both")
2248
+ df[interp_cols] = df[interp_cols].ffill().bfill()
2249
+ logger.info("Interpolated NaN values (excluding timestamp)")
2250
+ elif handle_nan == "forward_fill":
2251
+ # Exclude timestamp_col from forward fill
2252
+ fill_cols = [col for col in df.columns if col != timestamp_col]
2253
+ df[fill_cols] = df[fill_cols].ffill()
2254
+ logger.info("Forward filled NaN values (excluding timestamp)")
2255
+ elif handle_nan == "backward_fill":
2256
+ # Exclude timestamp_col from backward fill
2257
+ fill_cols = [col for col in df.columns if col != timestamp_col]
2258
+ df[fill_cols] = df[fill_cols].bfill()
2259
+ logger.info("Backward filled NaN values (excluding timestamp)")
2260
+ elif handle_nan == "keep":
2261
+ logger.info("Keeping NaN values as-is")
2262
+ else:
2263
+ logger.warning(f"Unknown handle_nan option '{handle_nan}', keeping NaN values")
2264
+
2265
+ return df
2266
+
2267
+
2268
+ def resample_and_filter_data(
2269
+ df: pd.DataFrame,
2270
+ start_dt: pd.Timestamp,
2271
+ end_dt: pd.Timestamp,
2272
+ resample_freq: str,
2273
+ logger: logging.Logger,
2274
+ ) -> pd.DataFrame | bool:
2275
+ """
2276
+ Filter DataFrame to time range and resample to specified frequency.
2277
+
2278
+ :param df: Input DataFrame with datetime index
2279
+ :type df: pd.DataFrame
2280
+ :param start_dt: Start datetime for filtering
2281
+ :type start_dt: pd.Timestamp
2282
+ :param end_dt: End datetime for filtering
2283
+ :type end_dt: pd.Timestamp
2284
+ :param resample_freq: Resampling frequency string (e.g., '1h', '30min')
2285
+ :type resample_freq: str
2286
+ :param logger: Logger object
2287
+ :type logger: logging.Logger
2288
+ :return: Resampled DataFrame or False on error
2289
+ :rtype: pd.DataFrame | bool
2290
+ """
2291
+ # Validate that DataFrame index is datetime and properly localized
2292
+ if not isinstance(df.index, pd.DatetimeIndex):
2293
+ logger.error(f"DataFrame index must be DatetimeIndex, got {type(df.index).__name__}")
2294
+ return False
2295
+
2296
+ # Check if timezone aware and matches expected timezone
2297
+ if df.index.tz is None:
2298
+ logger.warning("DataFrame index is timezone-naive, localizing to match start/end times")
2299
+ df = df.copy()
2300
+ df.index = df.index.tz_localize(start_dt.tz)
2301
+ elif df.index.tz != start_dt.tz:
2302
+ logger.warning(
2303
+ f"DataFrame timezone ({df.index.tz}) differs from filter timezone ({start_dt.tz}), converting"
2304
+ )
2305
+ df = df.copy()
2306
+ df.index = df.index.tz_convert(start_dt.tz)
2307
+
2308
+ # Filter to exact time range
2309
+ df_filtered = df[(df.index >= start_dt) & (df.index <= end_dt)]
2310
+
2311
+ if df_filtered.empty:
2312
+ logger.error("No data in the specified time range after filtering")
2313
+ return False
2314
+
2315
+ logger.info(f"Retrieved {len(df_filtered)} data points")
2316
+
2317
+ # Resample to specified frequency
2318
+ logger.info(f"Resampling data to frequency: {resample_freq}")
2319
+ try:
2320
+ df_resampled = df_filtered.resample(resample_freq).mean()
2321
+ df_resampled = df_resampled.dropna(how="all")
2322
+
2323
+ if df_resampled.empty:
2324
+ logger.error("No data after resampling. Check frequency and data availability.")
2325
+ return False
2326
+
2327
+ logger.info(f"After resampling: {len(df_resampled)} data points")
2328
+ return df_resampled
2329
+
2330
+ except Exception as e:
2331
+ logger.error(f"Error during resampling: {e}")
2332
+ return False