emhass 0.11.4__py3-none-any.whl → 0.15.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- emhass/command_line.py +1481 -811
- emhass/connection_manager.py +108 -0
- emhass/data/associations.csv +37 -2
- emhass/data/cec_inverters.pbz2 +0 -0
- emhass/data/cec_modules.pbz2 +0 -0
- emhass/data/config_defaults.json +53 -49
- emhass/forecast.py +1264 -731
- emhass/img/emhass_icon.png +0 -0
- emhass/machine_learning_forecaster.py +534 -281
- emhass/machine_learning_regressor.py +141 -125
- emhass/optimization.py +1173 -585
- emhass/retrieve_hass.py +958 -263
- emhass/static/advanced.html +7 -0
- emhass/static/configuration_list.html +5 -1
- emhass/static/configuration_script.js +146 -62
- emhass/static/data/param_definitions.json +215 -48
- emhass/static/script.js +58 -26
- emhass/static/style.css +6 -8
- emhass/templates/configuration.html +5 -3
- emhass/templates/index.html +8 -6
- emhass/templates/template.html +4 -5
- emhass/utils.py +1152 -403
- emhass/web_server.py +565 -379
- emhass/websocket_client.py +224 -0
- emhass-0.15.5.dist-info/METADATA +164 -0
- emhass-0.15.5.dist-info/RECORD +34 -0
- {emhass-0.11.4.dist-info → emhass-0.15.5.dist-info}/WHEEL +1 -2
- emhass-0.15.5.dist-info/entry_points.txt +2 -0
- emhass-0.11.4.dist-info/METADATA +0 -666
- emhass-0.11.4.dist-info/RECORD +0 -32
- emhass-0.11.4.dist-info/entry_points.txt +0 -2
- emhass-0.11.4.dist-info/top_level.txt +0 -1
- {emhass-0.11.4.dist-info → emhass-0.15.5.dist-info/licenses}/LICENSE +0 -0
emhass/utils.py
CHANGED
|
@@ -1,28 +1,30 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
3
|
import ast
|
|
4
4
|
import copy
|
|
5
5
|
import csv
|
|
6
|
-
import json
|
|
7
6
|
import logging
|
|
8
7
|
import os
|
|
9
8
|
import pathlib
|
|
10
|
-
from datetime import datetime, timedelta
|
|
11
|
-
from typing import
|
|
9
|
+
from datetime import UTC, datetime, timedelta
|
|
10
|
+
from typing import TYPE_CHECKING
|
|
12
11
|
|
|
12
|
+
import aiofiles
|
|
13
|
+
import aiohttp
|
|
13
14
|
import numpy as np
|
|
15
|
+
import orjson
|
|
14
16
|
import pandas as pd
|
|
15
17
|
import plotly.express as px
|
|
16
18
|
import pytz
|
|
17
19
|
import yaml
|
|
18
|
-
from requests import get
|
|
19
20
|
|
|
20
|
-
|
|
21
|
+
if TYPE_CHECKING:
|
|
22
|
+
from emhass.machine_learning_forecaster import MLForecaster
|
|
21
23
|
|
|
22
24
|
pd.options.plotting.backend = "plotly"
|
|
23
25
|
|
|
24
26
|
|
|
25
|
-
def get_root(file: str, num_parent:
|
|
27
|
+
def get_root(file: str, num_parent: int = 3) -> str:
|
|
26
28
|
"""
|
|
27
29
|
Get the root absolute path of the working directory.
|
|
28
30
|
|
|
@@ -46,10 +48,10 @@ def get_root(file: str, num_parent: Optional[int] = 3) -> str:
|
|
|
46
48
|
|
|
47
49
|
def get_logger(
|
|
48
50
|
fun_name: str,
|
|
49
|
-
emhass_conf: dict,
|
|
50
|
-
save_to_file:
|
|
51
|
-
logging_level:
|
|
52
|
-
) ->
|
|
51
|
+
emhass_conf: dict[str, pathlib.Path],
|
|
52
|
+
save_to_file: bool = True,
|
|
53
|
+
logging_level: str = "DEBUG",
|
|
54
|
+
) -> tuple[logging.Logger, logging.StreamHandler]:
|
|
53
55
|
"""
|
|
54
56
|
Create a simple logger object.
|
|
55
57
|
|
|
@@ -89,20 +91,23 @@ def get_logger(
|
|
|
89
91
|
else:
|
|
90
92
|
logger.setLevel(logging.DEBUG)
|
|
91
93
|
ch.setLevel(logging.DEBUG)
|
|
92
|
-
formatter = logging.Formatter(
|
|
93
|
-
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
|
94
|
-
)
|
|
94
|
+
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
|
95
95
|
ch.setFormatter(formatter)
|
|
96
96
|
logger.addHandler(ch)
|
|
97
97
|
|
|
98
98
|
return logger, ch
|
|
99
99
|
|
|
100
100
|
|
|
101
|
+
def _get_now() -> datetime:
|
|
102
|
+
"""Helper function to get the current time, for easier mocking."""
|
|
103
|
+
return datetime.now()
|
|
104
|
+
|
|
105
|
+
|
|
101
106
|
def get_forecast_dates(
|
|
102
107
|
freq: int,
|
|
103
108
|
delta_forecast: int,
|
|
104
109
|
time_zone: datetime.tzinfo,
|
|
105
|
-
timedelta_days:
|
|
110
|
+
timedelta_days: int | None = 0,
|
|
106
111
|
) -> pd.core.indexes.datetimes.DatetimeIndex:
|
|
107
112
|
"""
|
|
108
113
|
Get the date_range list of the needed future dates using the delta_forecast parameter.
|
|
@@ -118,36 +123,422 @@ def get_forecast_dates(
|
|
|
118
123
|
|
|
119
124
|
"""
|
|
120
125
|
freq = pd.to_timedelta(freq, "minutes")
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
)
|
|
124
|
-
end_forecast =
|
|
125
|
-
|
|
126
|
+
start_time = _get_now()
|
|
127
|
+
|
|
128
|
+
start_forecast = pd.Timestamp(start_time, tz=time_zone).replace(microsecond=0).floor(freq=freq)
|
|
129
|
+
end_forecast = start_forecast + pd.tseries.offsets.DateOffset(days=delta_forecast)
|
|
130
|
+
final_end_date = end_forecast + pd.tseries.offsets.DateOffset(days=timedelta_days) - freq
|
|
131
|
+
|
|
132
|
+
forecast_dates = pd.date_range(
|
|
133
|
+
start=start_forecast,
|
|
134
|
+
end=final_end_date,
|
|
135
|
+
freq=freq,
|
|
136
|
+
tz=time_zone,
|
|
126
137
|
)
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
138
|
+
|
|
139
|
+
return [ts.isoformat() for ts in forecast_dates]
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def calculate_cop_heatpump(
|
|
143
|
+
supply_temperature: float,
|
|
144
|
+
carnot_efficiency: float,
|
|
145
|
+
outdoor_temperature_forecast: np.ndarray | pd.Series,
|
|
146
|
+
) -> np.ndarray:
|
|
147
|
+
r"""
|
|
148
|
+
Calculate heat pump Coefficient of Performance (COP) for each timestep in the prediction horizon.
|
|
149
|
+
|
|
150
|
+
The COP is calculated using a Carnot-based formula:
|
|
151
|
+
|
|
152
|
+
.. math::
|
|
153
|
+
COP(h) = \eta_{carnot} \times \frac{T_{supply\_K}}{|T_{supply\_K} - T_{outdoor\_K}(h)|}
|
|
154
|
+
|
|
155
|
+
Where temperatures are converted to Kelvin (K = °C + 273.15).
|
|
156
|
+
|
|
157
|
+
This formula models real heat pump behavior where COP decreases as the temperature lift
|
|
158
|
+
(difference between supply and outdoor temperature) increases. The carnot_efficiency factor
|
|
159
|
+
represents the real-world efficiency as a fraction of the ideal Carnot cycle efficiency.
|
|
160
|
+
|
|
161
|
+
:param supply_temperature: The heat pump supply temperature in degrees Celsius (constant value). \
|
|
162
|
+
Typical values: 30-40°C for underfloor heating, 50-70°C for radiator systems.
|
|
163
|
+
:type supply_temperature: float
|
|
164
|
+
:param carnot_efficiency: Real-world efficiency factor as fraction of ideal Carnot cycle. \
|
|
165
|
+
Typical range: 0.35-0.50 (35-50%). Default in thermal battery config: 0.4 (40%). \
|
|
166
|
+
Higher values represent more efficient heat pumps.
|
|
167
|
+
:type carnot_efficiency: float
|
|
168
|
+
:param outdoor_temperature_forecast: Array of outdoor temperature forecasts in degrees Celsius, \
|
|
169
|
+
one value per timestep in the prediction horizon.
|
|
170
|
+
:type outdoor_temperature_forecast: np.ndarray or pd.Series
|
|
171
|
+
:return: Array of COP values for each timestep, same length as outdoor_temperature_forecast. \
|
|
172
|
+
Typical COP range: 2-6 for normal operating conditions.
|
|
173
|
+
:rtype: np.ndarray
|
|
174
|
+
|
|
175
|
+
Example:
|
|
176
|
+
>>> supply_temp = 35.0 # °C, underfloor heating
|
|
177
|
+
>>> carnot_eff = 0.4 # 40% of ideal Carnot efficiency
|
|
178
|
+
>>> outdoor_temps = np.array([0.0, 5.0, 10.0, 15.0, 20.0])
|
|
179
|
+
>>> cops = calculate_cop_heatpump(supply_temp, carnot_eff, outdoor_temps)
|
|
180
|
+
>>> cops
|
|
181
|
+
array([3.521..., 4.108..., 4.926..., 6.163..., 8.217...])
|
|
182
|
+
>>> # At 5°C outdoor: COP = 0.4 × 308.15K / 30K = 4.11
|
|
183
|
+
|
|
184
|
+
"""
|
|
185
|
+
# Convert to numpy array if pandas Series
|
|
186
|
+
if isinstance(outdoor_temperature_forecast, pd.Series):
|
|
187
|
+
outdoor_temps = outdoor_temperature_forecast.values
|
|
188
|
+
else:
|
|
189
|
+
outdoor_temps = np.asarray(outdoor_temperature_forecast)
|
|
190
|
+
|
|
191
|
+
# Convert temperatures from Celsius to Kelvin for Carnot formula
|
|
192
|
+
supply_temperature_kelvin = supply_temperature + 273.15
|
|
193
|
+
outdoor_temperature_kelvin = outdoor_temps + 273.15
|
|
194
|
+
|
|
195
|
+
# Calculate temperature difference (supply - outdoor)
|
|
196
|
+
# For heating, supply temperature should be higher than outdoor temperature
|
|
197
|
+
temperature_diff = supply_temperature_kelvin - outdoor_temperature_kelvin
|
|
198
|
+
|
|
199
|
+
# Check for non-physical scenarios where outdoor temp >= supply temp
|
|
200
|
+
# This indicates cooling mode or invalid configuration for heating
|
|
201
|
+
if np.any(temperature_diff <= 0):
|
|
202
|
+
# Log warning about non-physical temperature scenario
|
|
203
|
+
logger = logging.getLogger(__name__)
|
|
204
|
+
num_invalid = np.sum(temperature_diff <= 0)
|
|
205
|
+
invalid_indices = np.nonzero(temperature_diff <= 0)[0]
|
|
206
|
+
logger.warning(
|
|
207
|
+
f"COP calculation: {num_invalid} timestep(s) have outdoor temperature >= supply temperature. "
|
|
208
|
+
f"This is non-physical for heating mode. Indices: {invalid_indices.tolist()[:5]}{'...' if len(invalid_indices) > 5 else ''}. "
|
|
209
|
+
f"Supply temp: {supply_temperature:.1f}°C. Setting COP to 1.0 (direct electric heating) for these periods."
|
|
133
210
|
)
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
211
|
+
|
|
212
|
+
# Vectorized Carnot-based COP calculation
|
|
213
|
+
# COP = carnot_efficiency × T_supply / (T_supply - T_outdoor)
|
|
214
|
+
# For non-physical cases (outdoor >= supply), we use a neutral COP of 1.0
|
|
215
|
+
# This prevents the optimizer from exploiting unrealistic high COP values
|
|
216
|
+
|
|
217
|
+
# Avoid division by zero: use a mask to only calculate for valid cases
|
|
218
|
+
cop_values = np.ones_like(outdoor_temperature_kelvin) # Default to 1.0 everywhere
|
|
219
|
+
valid_mask = temperature_diff > 0
|
|
220
|
+
if np.any(valid_mask):
|
|
221
|
+
cop_values[valid_mask] = (
|
|
222
|
+
carnot_efficiency * supply_temperature_kelvin / temperature_diff[valid_mask]
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
# Apply realistic bounds: minimum 1.0, maximum 8.0
|
|
226
|
+
# - Lower bound: 1.0 means direct electric heating (no efficiency gain)
|
|
227
|
+
# - Upper bound: 8.0 is an optimistic but reasonable maximum for modern heat pumps
|
|
228
|
+
# (prevents numerical instability from very small temperature differences)
|
|
229
|
+
cop_values = np.clip(cop_values, 1.0, 8.0)
|
|
230
|
+
|
|
231
|
+
return cop_values
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
def calculate_thermal_loss_signed(
|
|
235
|
+
outdoor_temperature_forecast: np.ndarray | pd.Series,
|
|
236
|
+
indoor_temperature: float,
|
|
237
|
+
base_loss: float,
|
|
238
|
+
) -> np.ndarray:
|
|
239
|
+
r"""
|
|
240
|
+
Calculate signed thermal loss factor based on indoor/outdoor temperature difference.
|
|
241
|
+
|
|
242
|
+
**SIGN CONVENTION:**
|
|
243
|
+
- **Positive** (+loss): outdoor < indoor → heat loss, building cools, heating required
|
|
244
|
+
- **Negative** (-loss): outdoor ≥ indoor → heat gain, building warms passively
|
|
245
|
+
|
|
246
|
+
Formula: loss * (1 - 2 * Hot(h)), where Hot(h) = 1 if outdoor ≥ indoor, else 0.
|
|
247
|
+
Based on Langer & Volling (2020) Equation B.13.
|
|
248
|
+
|
|
249
|
+
:param outdoor_temperature_forecast: Outdoor temperature forecast (°C)
|
|
250
|
+
:type outdoor_temperature_forecast: np.ndarray or pd.Series
|
|
251
|
+
:param indoor_temperature: Indoor/target temperature threshold (°C)
|
|
252
|
+
:type indoor_temperature: float
|
|
253
|
+
:param base_loss: Base thermal loss coefficient in kW
|
|
254
|
+
:type base_loss: float
|
|
255
|
+
:return: Signed loss array (positive = heat loss, negative = heat gain)
|
|
256
|
+
:rtype: np.ndarray
|
|
257
|
+
|
|
258
|
+
"""
|
|
259
|
+
# Convert to numpy array if pandas Series
|
|
260
|
+
if isinstance(outdoor_temperature_forecast, pd.Series):
|
|
261
|
+
outdoor_temps = outdoor_temperature_forecast.values
|
|
262
|
+
else:
|
|
263
|
+
outdoor_temps = np.asarray(outdoor_temperature_forecast)
|
|
264
|
+
|
|
265
|
+
# Create binary hot indicator: 1 if outdoor temp >= indoor temp, 0 otherwise
|
|
266
|
+
hot_indicator = (outdoor_temps >= indoor_temperature).astype(float)
|
|
267
|
+
|
|
268
|
+
return base_loss * (1.0 - 2.0 * hot_indicator)
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
def calculate_heating_demand(
|
|
272
|
+
specific_heating_demand: float,
|
|
273
|
+
floor_area: float,
|
|
274
|
+
outdoor_temperature_forecast: np.ndarray | pd.Series,
|
|
275
|
+
base_temperature: float = 18.0,
|
|
276
|
+
annual_reference_hdd: float = 3000.0,
|
|
277
|
+
optimization_time_step: int | None = None,
|
|
278
|
+
) -> np.ndarray:
|
|
279
|
+
"""
|
|
280
|
+
Calculate heating demand per timestep based on heating degree days method.
|
|
281
|
+
|
|
282
|
+
Uses heating degree days (HDD) to calculate heating demand based on outdoor temperature
|
|
283
|
+
forecast, specific heating demand, and floor area. The specific heating demand should be
|
|
284
|
+
calibrated to the annual reference HDD value.
|
|
285
|
+
|
|
286
|
+
:param specific_heating_demand: Specific heating demand in kWh/m²/year (calibrated to annual_reference_hdd)
|
|
287
|
+
:type specific_heating_demand: float
|
|
288
|
+
:param floor_area: Floor area in m²
|
|
289
|
+
:type floor_area: float
|
|
290
|
+
:param outdoor_temperature_forecast: Outdoor temperature forecast in °C for each timestep
|
|
291
|
+
:type outdoor_temperature_forecast: np.ndarray | pd.Series
|
|
292
|
+
:param base_temperature: Base temperature for HDD calculation in °C, defaults to 18.0 (European standard)
|
|
293
|
+
:type base_temperature: float, optional
|
|
294
|
+
:param annual_reference_hdd: Annual reference HDD value for normalization, defaults to 3000.0 (Central Europe)
|
|
295
|
+
:type annual_reference_hdd: float, optional
|
|
296
|
+
:param optimization_time_step: Optimization time step in minutes. If None, automatically infers from
|
|
297
|
+
pandas Series DatetimeIndex frequency. Falls back to 30 minutes if not inferrable.
|
|
298
|
+
:type optimization_time_step: int | None, optional
|
|
299
|
+
:return: Array of heating demand values (kWh) per timestep
|
|
300
|
+
:rtype: np.ndarray
|
|
301
|
+
|
|
302
|
+
"""
|
|
303
|
+
|
|
304
|
+
# Convert outdoor temperature forecast to numpy array if pandas Series
|
|
305
|
+
outdoor_temps = (
|
|
306
|
+
outdoor_temperature_forecast.values
|
|
307
|
+
if isinstance(outdoor_temperature_forecast, pd.Series)
|
|
308
|
+
else np.asarray(outdoor_temperature_forecast)
|
|
137
309
|
)
|
|
138
|
-
return forecast_dates
|
|
139
310
|
|
|
311
|
+
# Calculate heating degree days per timestep
|
|
312
|
+
# HDD = max(base_temperature - outdoor_temperature, 0)
|
|
313
|
+
hdd_per_timestep = np.maximum(base_temperature - outdoor_temps, 0.0)
|
|
140
314
|
|
|
141
|
-
|
|
142
|
-
|
|
315
|
+
# Determine timestep duration in hours
|
|
316
|
+
if optimization_time_step is None:
|
|
317
|
+
# Try to infer from pandas Series DatetimeIndex
|
|
318
|
+
if isinstance(outdoor_temperature_forecast, pd.Series) and isinstance(
|
|
319
|
+
outdoor_temperature_forecast.index, pd.DatetimeIndex
|
|
320
|
+
):
|
|
321
|
+
if len(outdoor_temperature_forecast.index) > 1:
|
|
322
|
+
freq_minutes = (
|
|
323
|
+
outdoor_temperature_forecast.index[1] - outdoor_temperature_forecast.index[0]
|
|
324
|
+
).total_seconds() / 60.0
|
|
325
|
+
hours_per_timestep = freq_minutes / 60.0
|
|
326
|
+
else:
|
|
327
|
+
# Single datapoint, fallback to default 30 min
|
|
328
|
+
hours_per_timestep = 0.5
|
|
329
|
+
else:
|
|
330
|
+
# Cannot infer, use default 30 minutes
|
|
331
|
+
hours_per_timestep = 0.5
|
|
332
|
+
else:
|
|
333
|
+
# Convert minutes to hours
|
|
334
|
+
hours_per_timestep = optimization_time_step / 60.0
|
|
335
|
+
|
|
336
|
+
# Scale HDD to timestep duration (standard HDD is per 24 hours)
|
|
337
|
+
hdd_per_timestep_scaled = hdd_per_timestep * (hours_per_timestep / 24.0)
|
|
338
|
+
|
|
339
|
+
return specific_heating_demand * floor_area * (hdd_per_timestep_scaled / annual_reference_hdd)
|
|
340
|
+
|
|
341
|
+
|
|
342
|
+
def calculate_heating_demand_physics(
|
|
343
|
+
u_value: float,
|
|
344
|
+
envelope_area: float,
|
|
345
|
+
ventilation_rate: float,
|
|
346
|
+
heated_volume: float,
|
|
347
|
+
indoor_target_temperature: float,
|
|
348
|
+
outdoor_temperature_forecast: np.ndarray | pd.Series,
|
|
349
|
+
optimization_time_step: int,
|
|
350
|
+
solar_irradiance_forecast: np.ndarray | pd.Series | None = None,
|
|
351
|
+
window_area: float | None = None,
|
|
352
|
+
shgc: float = 0.6,
|
|
353
|
+
) -> np.ndarray:
|
|
354
|
+
"""
|
|
355
|
+
Calculate heating demand per timestep based on building physics heat loss model.
|
|
356
|
+
|
|
357
|
+
More accurate than HDD method as it directly calculates transmission and ventilation
|
|
358
|
+
losses based on building thermal properties. Optionally accounts for solar gains
|
|
359
|
+
through windows to reduce heating demand.
|
|
360
|
+
|
|
361
|
+
:param u_value: Overall thermal transmittance (U-value) in W/(m²·K). Typical values:
|
|
362
|
+
- 0.2-0.3: Well-insulated modern building
|
|
363
|
+
- 0.4-0.6: Average insulation
|
|
364
|
+
- 0.8-1.2: Poor insulation / old building
|
|
365
|
+
:type u_value: float
|
|
366
|
+
:param envelope_area: Total building envelope area (walls + roof + floor + windows) in m²
|
|
367
|
+
:type envelope_area: float
|
|
368
|
+
:param ventilation_rate: Air changes per hour (ACH). Typical values:
|
|
369
|
+
- 0.3-0.5: Well-sealed modern building with controlled ventilation
|
|
370
|
+
- 0.5-1.0: Average building
|
|
371
|
+
- 1.0-2.0: Leaky old building
|
|
372
|
+
:type ventilation_rate: float
|
|
373
|
+
:param heated_volume: Total heated volume in m³
|
|
374
|
+
:type heated_volume: float
|
|
375
|
+
:param indoor_target_temperature: Target indoor temperature in °C
|
|
376
|
+
:type indoor_target_temperature: float
|
|
377
|
+
:param outdoor_temperature_forecast: Outdoor temperature forecast in °C for each timestep
|
|
378
|
+
:type outdoor_temperature_forecast: np.ndarray | pd.Series
|
|
379
|
+
:param optimization_time_step: Optimization time step in minutes
|
|
380
|
+
:type optimization_time_step: int
|
|
381
|
+
:param solar_irradiance_forecast: Global Horizontal Irradiance (GHI) in W/m² for each timestep.
|
|
382
|
+
If provided along with window_area, solar gains will be subtracted from heating demand.
|
|
383
|
+
:type solar_irradiance_forecast: np.ndarray | pd.Series | None, optional
|
|
384
|
+
:param window_area: Total window area in m². If provided along with solar_irradiance_forecast,
|
|
385
|
+
solar gains will reduce heating demand. Typical values: 15-25% of floor area.
|
|
386
|
+
:type window_area: float | None, optional
|
|
387
|
+
:param shgc: Solar Heat Gain Coefficient (dimensionless, 0-1). Fraction of solar radiation
|
|
388
|
+
that becomes heat inside the building. Typical values:
|
|
389
|
+
- 0.5-0.6: Modern low-e double-glazed windows
|
|
390
|
+
- 0.6-0.7: Standard double-glazed windows
|
|
391
|
+
- 0.7-0.8: Single-glazed windows
|
|
392
|
+
Default: 0.6
|
|
393
|
+
:type shgc: float, optional
|
|
394
|
+
:return: Array of heating demand values (kWh) per timestep
|
|
395
|
+
:rtype: np.ndarray
|
|
396
|
+
|
|
397
|
+
Example:
|
|
398
|
+
>>> outdoor_temps = np.array([5, 8, 12, 15])
|
|
399
|
+
>>> ghi = np.array([0, 100, 400, 600]) # W/m²
|
|
400
|
+
>>> demand = calculate_heating_demand_physics(
|
|
401
|
+
... u_value=0.3,
|
|
402
|
+
... envelope_area=400,
|
|
403
|
+
... ventilation_rate=0.5,
|
|
404
|
+
... heated_volume=250,
|
|
405
|
+
... indoor_target_temperature=20,
|
|
406
|
+
... outdoor_temperature_forecast=outdoor_temps,
|
|
407
|
+
... optimization_time_step=30,
|
|
408
|
+
... solar_irradiance_forecast=ghi,
|
|
409
|
+
... window_area=50,
|
|
410
|
+
... shgc=0.6
|
|
411
|
+
... )
|
|
412
|
+
"""
|
|
413
|
+
|
|
414
|
+
# Convert outdoor temperature forecast to numpy array if pandas Series
|
|
415
|
+
outdoor_temps = (
|
|
416
|
+
outdoor_temperature_forecast.values
|
|
417
|
+
if isinstance(outdoor_temperature_forecast, pd.Series)
|
|
418
|
+
else np.asarray(outdoor_temperature_forecast)
|
|
419
|
+
)
|
|
420
|
+
|
|
421
|
+
# Calculate temperature difference (only heat when outdoor < indoor)
|
|
422
|
+
temp_diff = indoor_target_temperature - outdoor_temps
|
|
423
|
+
temp_diff = np.maximum(temp_diff, 0.0)
|
|
424
|
+
|
|
425
|
+
# Transmission losses: Q_trans = U * A * ΔT (W to kW)
|
|
426
|
+
transmission_loss_kw = u_value * envelope_area * temp_diff / 1000.0
|
|
427
|
+
|
|
428
|
+
# Ventilation losses: Q_vent = V * ρ * c * n * ΔT / 3600
|
|
429
|
+
# ρ = air density (kg/m³), c = specific heat capacity (kJ/(kg·K)), n = ACH
|
|
430
|
+
air_density = 1.2 # kg/m³ at 20°C
|
|
431
|
+
air_heat_capacity = 1.005 # kJ/(kg·K)
|
|
432
|
+
ventilation_loss_kw = (
|
|
433
|
+
ventilation_rate * heated_volume * air_density * air_heat_capacity * temp_diff / 3600.0
|
|
434
|
+
)
|
|
435
|
+
|
|
436
|
+
# Total heat loss in kW
|
|
437
|
+
total_loss_kw = transmission_loss_kw + ventilation_loss_kw
|
|
438
|
+
|
|
439
|
+
# Calculate solar gains if irradiance and window area are provided
|
|
440
|
+
if solar_irradiance_forecast is not None and window_area is not None:
|
|
441
|
+
# Convert solar irradiance to numpy array if pandas Series
|
|
442
|
+
solar_irradiance = (
|
|
443
|
+
solar_irradiance_forecast.values
|
|
444
|
+
if isinstance(solar_irradiance_forecast, pd.Series)
|
|
445
|
+
else np.asarray(solar_irradiance_forecast)
|
|
446
|
+
)
|
|
447
|
+
|
|
448
|
+
# Solar gains: Q_solar = window_area * SHGC * GHI (W to kW)
|
|
449
|
+
# GHI is in W/m², so multiply by window_area (m²) gives W, then divide by 1000 for kW
|
|
450
|
+
solar_gains_kw = window_area * shgc * solar_irradiance / 1000.0
|
|
451
|
+
|
|
452
|
+
# Subtract solar gains from heat loss (but never go negative)
|
|
453
|
+
total_loss_kw = np.maximum(total_loss_kw - solar_gains_kw, 0.0)
|
|
454
|
+
|
|
455
|
+
# Convert to kWh for the timestep
|
|
456
|
+
hours_per_timestep = optimization_time_step / 60.0
|
|
457
|
+
return total_loss_kw * hours_per_timestep
|
|
458
|
+
|
|
459
|
+
|
|
460
|
+
def update_params_with_ha_config(
|
|
143
461
|
params: str,
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
462
|
+
ha_config: dict,
|
|
463
|
+
) -> dict:
|
|
464
|
+
"""
|
|
465
|
+
Update the params with the Home Assistant configuration.
|
|
466
|
+
|
|
467
|
+
Parameters
|
|
468
|
+
----------
|
|
469
|
+
params : str
|
|
470
|
+
The serialized params.
|
|
471
|
+
ha_config : dict
|
|
472
|
+
The Home Assistant configuration.
|
|
473
|
+
|
|
474
|
+
Returns
|
|
475
|
+
-------
|
|
476
|
+
dict
|
|
477
|
+
The updated params.
|
|
478
|
+
"""
|
|
479
|
+
# Load serialized params
|
|
480
|
+
params = orjson.loads(params)
|
|
481
|
+
# Update params
|
|
482
|
+
currency_to_symbol = {
|
|
483
|
+
"EUR": "€",
|
|
484
|
+
"USD": "$",
|
|
485
|
+
"GBP": "£",
|
|
486
|
+
"YEN": "¥",
|
|
487
|
+
"JPY": "¥",
|
|
488
|
+
"AUD": "A$",
|
|
489
|
+
"CAD": "C$",
|
|
490
|
+
"CHF": "CHF", # Swiss Franc has no special symbol
|
|
491
|
+
"CNY": "¥",
|
|
492
|
+
"INR": "₹",
|
|
493
|
+
"CZK": "Kč",
|
|
494
|
+
"BGN": "лв",
|
|
495
|
+
"DKK": "kr",
|
|
496
|
+
"HUF": "Ft",
|
|
497
|
+
"PLN": "zł",
|
|
498
|
+
"RON": "Leu",
|
|
499
|
+
"SEK": "kr",
|
|
500
|
+
"TRY": "Lira",
|
|
501
|
+
"VEF": "Bolivar",
|
|
502
|
+
"VND": "Dong",
|
|
503
|
+
"THB": "Baht",
|
|
504
|
+
"SGD": "S$",
|
|
505
|
+
"IDR": "Roepia",
|
|
506
|
+
"ZAR": "Rand",
|
|
507
|
+
# Add more as needed
|
|
508
|
+
}
|
|
509
|
+
if "currency" in ha_config.keys():
|
|
510
|
+
ha_config["currency"] = currency_to_symbol.get(ha_config["currency"], "Unknown")
|
|
511
|
+
else:
|
|
512
|
+
ha_config["currency"] = "€"
|
|
513
|
+
|
|
514
|
+
updated_passed_dict = {
|
|
515
|
+
"custom_cost_fun_id": {
|
|
516
|
+
"unit_of_measurement": ha_config["currency"],
|
|
517
|
+
},
|
|
518
|
+
"custom_unit_load_cost_id": {
|
|
519
|
+
"unit_of_measurement": f"{ha_config['currency']}/kWh",
|
|
520
|
+
},
|
|
521
|
+
"custom_unit_prod_price_id": {
|
|
522
|
+
"unit_of_measurement": f"{ha_config['currency']}/kWh",
|
|
523
|
+
},
|
|
524
|
+
}
|
|
525
|
+
for key, value in updated_passed_dict.items():
|
|
526
|
+
params["passed_data"][key]["unit_of_measurement"] = value["unit_of_measurement"]
|
|
527
|
+
# Serialize the final params
|
|
528
|
+
params = orjson.dumps(params, default=str).decode("utf-8")
|
|
529
|
+
return params
|
|
530
|
+
|
|
531
|
+
|
|
532
|
+
async def treat_runtimeparams(
|
|
533
|
+
runtimeparams: str,
|
|
534
|
+
params: dict[str, dict],
|
|
535
|
+
retrieve_hass_conf: dict[str, str],
|
|
536
|
+
optim_conf: dict[str, str],
|
|
537
|
+
plant_conf: dict[str, str],
|
|
147
538
|
set_type: str,
|
|
148
539
|
logger: logging.Logger,
|
|
149
|
-
emhass_conf: dict,
|
|
150
|
-
) ->
|
|
540
|
+
emhass_conf: dict[str, pathlib.Path],
|
|
541
|
+
) -> tuple[str, dict[str, dict]]:
|
|
151
542
|
"""
|
|
152
543
|
Treat the passed optimization runtime parameters.
|
|
153
544
|
|
|
@@ -172,9 +563,9 @@ def treat_runtimeparams(
|
|
|
172
563
|
|
|
173
564
|
"""
|
|
174
565
|
# Check if passed params is a dict
|
|
175
|
-
if (params
|
|
566
|
+
if (params is not None) and (params != "null"):
|
|
176
567
|
if type(params) is str:
|
|
177
|
-
params =
|
|
568
|
+
params = orjson.loads(params)
|
|
178
569
|
else:
|
|
179
570
|
params = {}
|
|
180
571
|
|
|
@@ -183,82 +574,109 @@ def treat_runtimeparams(
|
|
|
183
574
|
params["optim_conf"].update(optim_conf)
|
|
184
575
|
params["plant_conf"].update(plant_conf)
|
|
185
576
|
|
|
577
|
+
# Check defaults on HA retrieved config
|
|
578
|
+
default_currency_unit = "€"
|
|
579
|
+
default_temperature_unit = "°C"
|
|
580
|
+
|
|
186
581
|
# Some default data needed
|
|
187
582
|
custom_deferrable_forecast_id = []
|
|
188
583
|
custom_predicted_temperature_id = []
|
|
584
|
+
custom_heating_demand_id = []
|
|
189
585
|
for k in range(params["optim_conf"]["number_of_deferrable_loads"]):
|
|
190
586
|
custom_deferrable_forecast_id.append(
|
|
191
587
|
{
|
|
192
|
-
"entity_id": "sensor.p_deferrable{}"
|
|
588
|
+
"entity_id": f"sensor.p_deferrable{k}",
|
|
589
|
+
"device_class": "power",
|
|
193
590
|
"unit_of_measurement": "W",
|
|
194
|
-
"friendly_name": "Deferrable Load {}"
|
|
591
|
+
"friendly_name": f"Deferrable Load {k}",
|
|
195
592
|
}
|
|
196
593
|
)
|
|
197
594
|
custom_predicted_temperature_id.append(
|
|
198
595
|
{
|
|
199
|
-
"entity_id": "sensor.temp_predicted{}"
|
|
200
|
-
"
|
|
201
|
-
"
|
|
596
|
+
"entity_id": f"sensor.temp_predicted{k}",
|
|
597
|
+
"device_class": "temperature",
|
|
598
|
+
"unit_of_measurement": default_temperature_unit,
|
|
599
|
+
"friendly_name": f"Predicted temperature {k}",
|
|
600
|
+
}
|
|
601
|
+
)
|
|
602
|
+
custom_heating_demand_id.append(
|
|
603
|
+
{
|
|
604
|
+
"entity_id": f"sensor.heating_demand{k}",
|
|
605
|
+
"device_class": "energy",
|
|
606
|
+
"unit_of_measurement": "kWh",
|
|
607
|
+
"friendly_name": f"Heating demand {k}",
|
|
202
608
|
}
|
|
203
609
|
)
|
|
204
610
|
default_passed_dict = {
|
|
205
611
|
"custom_pv_forecast_id": {
|
|
206
612
|
"entity_id": "sensor.p_pv_forecast",
|
|
613
|
+
"device_class": "power",
|
|
207
614
|
"unit_of_measurement": "W",
|
|
208
615
|
"friendly_name": "PV Power Forecast",
|
|
209
616
|
},
|
|
210
617
|
"custom_load_forecast_id": {
|
|
211
618
|
"entity_id": "sensor.p_load_forecast",
|
|
619
|
+
"device_class": "power",
|
|
212
620
|
"unit_of_measurement": "W",
|
|
213
621
|
"friendly_name": "Load Power Forecast",
|
|
214
622
|
},
|
|
215
623
|
"custom_pv_curtailment_id": {
|
|
216
624
|
"entity_id": "sensor.p_pv_curtailment",
|
|
625
|
+
"device_class": "power",
|
|
217
626
|
"unit_of_measurement": "W",
|
|
218
627
|
"friendly_name": "PV Power Curtailment",
|
|
219
628
|
},
|
|
220
629
|
"custom_hybrid_inverter_id": {
|
|
221
630
|
"entity_id": "sensor.p_hybrid_inverter",
|
|
631
|
+
"device_class": "power",
|
|
222
632
|
"unit_of_measurement": "W",
|
|
223
633
|
"friendly_name": "PV Hybrid Inverter",
|
|
224
634
|
},
|
|
225
635
|
"custom_batt_forecast_id": {
|
|
226
636
|
"entity_id": "sensor.p_batt_forecast",
|
|
637
|
+
"device_class": "power",
|
|
227
638
|
"unit_of_measurement": "W",
|
|
228
639
|
"friendly_name": "Battery Power Forecast",
|
|
229
640
|
},
|
|
230
641
|
"custom_batt_soc_forecast_id": {
|
|
231
642
|
"entity_id": "sensor.soc_batt_forecast",
|
|
643
|
+
"device_class": "battery",
|
|
232
644
|
"unit_of_measurement": "%",
|
|
233
645
|
"friendly_name": "Battery SOC Forecast",
|
|
234
646
|
},
|
|
235
647
|
"custom_grid_forecast_id": {
|
|
236
648
|
"entity_id": "sensor.p_grid_forecast",
|
|
649
|
+
"device_class": "power",
|
|
237
650
|
"unit_of_measurement": "W",
|
|
238
651
|
"friendly_name": "Grid Power Forecast",
|
|
239
652
|
},
|
|
240
653
|
"custom_cost_fun_id": {
|
|
241
654
|
"entity_id": "sensor.total_cost_fun_value",
|
|
242
|
-
"
|
|
655
|
+
"device_class": "monetary",
|
|
656
|
+
"unit_of_measurement": default_currency_unit,
|
|
243
657
|
"friendly_name": "Total cost function value",
|
|
244
658
|
},
|
|
245
659
|
"custom_optim_status_id": {
|
|
246
660
|
"entity_id": "sensor.optim_status",
|
|
661
|
+
"device_class": "",
|
|
247
662
|
"unit_of_measurement": "",
|
|
248
663
|
"friendly_name": "EMHASS optimization status",
|
|
249
664
|
},
|
|
250
665
|
"custom_unit_load_cost_id": {
|
|
251
666
|
"entity_id": "sensor.unit_load_cost",
|
|
252
|
-
"
|
|
667
|
+
"device_class": "monetary",
|
|
668
|
+
"unit_of_measurement": f"{default_currency_unit}/kWh",
|
|
253
669
|
"friendly_name": "Unit Load Cost",
|
|
254
670
|
},
|
|
255
671
|
"custom_unit_prod_price_id": {
|
|
256
672
|
"entity_id": "sensor.unit_prod_price",
|
|
257
|
-
"
|
|
673
|
+
"device_class": "monetary",
|
|
674
|
+
"unit_of_measurement": f"{default_currency_unit}/kWh",
|
|
258
675
|
"friendly_name": "Unit Prod Price",
|
|
259
676
|
},
|
|
260
677
|
"custom_deferrable_forecast_id": custom_deferrable_forecast_id,
|
|
261
678
|
"custom_predicted_temperature_id": custom_predicted_temperature_id,
|
|
679
|
+
"custom_heating_demand_id": custom_heating_demand_id,
|
|
262
680
|
"publish_prefix": "",
|
|
263
681
|
}
|
|
264
682
|
if "passed_data" in params.keys():
|
|
@@ -270,13 +688,14 @@ def treat_runtimeparams(
|
|
|
270
688
|
# If any runtime parameters where passed in action call
|
|
271
689
|
if runtimeparams is not None:
|
|
272
690
|
if type(runtimeparams) is str:
|
|
273
|
-
runtimeparams =
|
|
691
|
+
runtimeparams = orjson.loads(runtimeparams)
|
|
274
692
|
|
|
275
693
|
# Loop though parameters stored in association file, Check to see if any stored in runtime
|
|
276
694
|
# If true, set runtime parameter to params
|
|
277
695
|
if emhass_conf["associations_path"].exists():
|
|
278
|
-
with emhass_conf["associations_path"]
|
|
279
|
-
|
|
696
|
+
async with aiofiles.open(emhass_conf["associations_path"]) as data:
|
|
697
|
+
content = await data.read()
|
|
698
|
+
associations = list(csv.reader(content.splitlines(), delimiter=","))
|
|
280
699
|
# Association file key reference
|
|
281
700
|
# association[0] = config categories
|
|
282
701
|
# association[1] = legacy parameter name
|
|
@@ -285,14 +704,10 @@ def treat_runtimeparams(
|
|
|
285
704
|
for association in associations:
|
|
286
705
|
# Check parameter name exists in runtime
|
|
287
706
|
if runtimeparams.get(association[2], None) is not None:
|
|
288
|
-
params[association[0]][association[2]] = runtimeparams[
|
|
289
|
-
association[2]
|
|
290
|
-
]
|
|
707
|
+
params[association[0]][association[2]] = runtimeparams[association[2]]
|
|
291
708
|
# Check Legacy parameter name runtime
|
|
292
709
|
elif runtimeparams.get(association[1], None) is not None:
|
|
293
|
-
params[association[0]][association[2]] = runtimeparams[
|
|
294
|
-
association[1]
|
|
295
|
-
]
|
|
710
|
+
params[association[0]][association[2]] = runtimeparams[association[1]]
|
|
296
711
|
else:
|
|
297
712
|
logger.warning(
|
|
298
713
|
"Cant find associations file (associations.csv) in: "
|
|
@@ -300,13 +715,14 @@ def treat_runtimeparams(
|
|
|
300
715
|
)
|
|
301
716
|
|
|
302
717
|
# Generate forecast_dates
|
|
303
|
-
if
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
runtimeparams.get("optimization_time_step", runtimeparams.get("freq"))
|
|
718
|
+
# Force update optimization_time_step if present in runtimeparams
|
|
719
|
+
if "optimization_time_step" in runtimeparams:
|
|
720
|
+
optimization_time_step = int(runtimeparams["optimization_time_step"])
|
|
721
|
+
params["retrieve_hass_conf"]["optimization_time_step"] = pd.to_timedelta(
|
|
722
|
+
optimization_time_step, "minutes"
|
|
309
723
|
)
|
|
724
|
+
elif "freq" in runtimeparams:
|
|
725
|
+
optimization_time_step = int(runtimeparams["freq"])
|
|
310
726
|
params["retrieve_hass_conf"]["optimization_time_step"] = pd.to_timedelta(
|
|
311
727
|
optimization_time_step, "minutes"
|
|
312
728
|
)
|
|
@@ -314,18 +730,35 @@ def treat_runtimeparams(
|
|
|
314
730
|
optimization_time_step = int(
|
|
315
731
|
params["retrieve_hass_conf"]["optimization_time_step"].seconds / 60.0
|
|
316
732
|
)
|
|
733
|
+
|
|
317
734
|
if (
|
|
318
735
|
runtimeparams.get("delta_forecast_daily", None) is not None
|
|
319
736
|
or runtimeparams.get("delta_forecast", None) is not None
|
|
320
737
|
):
|
|
321
|
-
delta_forecast
|
|
322
|
-
|
|
323
|
-
|
|
738
|
+
# Use old param name delta_forecast (if provided) for backwards compatibility
|
|
739
|
+
delta_forecast = runtimeparams.get("delta_forecast", None)
|
|
740
|
+
# Prefer new param name delta_forecast_daily
|
|
741
|
+
delta_forecast = runtimeparams.get("delta_forecast_daily", delta_forecast)
|
|
742
|
+
# Ensure delta_forecast is numeric and at least 1 day
|
|
743
|
+
if delta_forecast is None:
|
|
744
|
+
logger.warning("delta_forecast_daily is missing so defaulting to 1 day")
|
|
745
|
+
delta_forecast = 1
|
|
746
|
+
else:
|
|
747
|
+
try:
|
|
748
|
+
delta_forecast = int(delta_forecast)
|
|
749
|
+
except ValueError:
|
|
750
|
+
logger.warning(
|
|
751
|
+
"Invalid delta_forecast_daily value (%s) so defaulting to 1 day",
|
|
752
|
+
delta_forecast,
|
|
753
|
+
)
|
|
754
|
+
delta_forecast = 1
|
|
755
|
+
if delta_forecast <= 0:
|
|
756
|
+
logger.warning(
|
|
757
|
+
"delta_forecast_daily is too low (%s) so defaulting to 1 day",
|
|
758
|
+
delta_forecast,
|
|
324
759
|
)
|
|
325
|
-
|
|
326
|
-
params["optim_conf"]["delta_forecast_daily"] = pd.Timedelta(
|
|
327
|
-
days=optim_conf["delta_forecast_daily"]
|
|
328
|
-
)
|
|
760
|
+
delta_forecast = 1
|
|
761
|
+
params["optim_conf"]["delta_forecast_daily"] = pd.Timedelta(days=delta_forecast)
|
|
329
762
|
else:
|
|
330
763
|
delta_forecast = int(params["optim_conf"]["delta_forecast_daily"].days)
|
|
331
764
|
if runtimeparams.get("time_zone", None) is not None:
|
|
@@ -334,9 +767,7 @@ def treat_runtimeparams(
|
|
|
334
767
|
else:
|
|
335
768
|
time_zone = params["retrieve_hass_conf"]["time_zone"]
|
|
336
769
|
|
|
337
|
-
forecast_dates = get_forecast_dates(
|
|
338
|
-
optimization_time_step, delta_forecast, time_zone
|
|
339
|
-
)
|
|
770
|
+
forecast_dates = get_forecast_dates(optimization_time_step, delta_forecast, time_zone)
|
|
340
771
|
|
|
341
772
|
# Add runtime exclusive (not in config) parameters to params
|
|
342
773
|
# regressor-model-fit
|
|
@@ -376,6 +807,25 @@ def treat_runtimeparams(
|
|
|
376
807
|
target = runtimeparams["target"]
|
|
377
808
|
params["passed_data"]["target"] = target
|
|
378
809
|
|
|
810
|
+
# export-influxdb-to-csv
|
|
811
|
+
if set_type == "export-influxdb-to-csv":
|
|
812
|
+
# Use dictionary comprehension to simplify parameter assignment
|
|
813
|
+
export_keys = {
|
|
814
|
+
k: runtimeparams[k]
|
|
815
|
+
for k in (
|
|
816
|
+
"sensor_list",
|
|
817
|
+
"csv_filename",
|
|
818
|
+
"start_time",
|
|
819
|
+
"end_time",
|
|
820
|
+
"resample_freq",
|
|
821
|
+
"timestamp_col_name",
|
|
822
|
+
"decimal_places",
|
|
823
|
+
"handle_nan",
|
|
824
|
+
)
|
|
825
|
+
if k in runtimeparams
|
|
826
|
+
}
|
|
827
|
+
params["passed_data"].update(export_keys)
|
|
828
|
+
|
|
379
829
|
# MPC control case
|
|
380
830
|
if set_type == "naive-mpc-optim":
|
|
381
831
|
if "prediction_horizon" not in runtimeparams.keys():
|
|
@@ -387,16 +837,43 @@ def treat_runtimeparams(
|
|
|
387
837
|
soc_init = params["plant_conf"]["battery_target_state_of_charge"]
|
|
388
838
|
else:
|
|
389
839
|
soc_init = runtimeparams["soc_init"]
|
|
840
|
+
if soc_init < params["plant_conf"]["battery_minimum_state_of_charge"]:
|
|
841
|
+
logger.warning(
|
|
842
|
+
f"Passed soc_init={soc_init} is lower than soc_min={params['plant_conf']['battery_minimum_state_of_charge']}, setting soc_init=soc_min"
|
|
843
|
+
)
|
|
844
|
+
soc_init = params["plant_conf"]["battery_minimum_state_of_charge"]
|
|
845
|
+
if soc_init > params["plant_conf"]["battery_maximum_state_of_charge"]:
|
|
846
|
+
logger.warning(
|
|
847
|
+
f"Passed soc_init={soc_init} is greater than soc_max={params['plant_conf']['battery_maximum_state_of_charge']}, setting soc_init=soc_max"
|
|
848
|
+
)
|
|
849
|
+
soc_init = params["plant_conf"]["battery_maximum_state_of_charge"]
|
|
390
850
|
params["passed_data"]["soc_init"] = soc_init
|
|
391
851
|
if "soc_final" not in runtimeparams.keys():
|
|
392
852
|
soc_final = params["plant_conf"]["battery_target_state_of_charge"]
|
|
393
853
|
else:
|
|
394
854
|
soc_final = runtimeparams["soc_final"]
|
|
855
|
+
if soc_final < params["plant_conf"]["battery_minimum_state_of_charge"]:
|
|
856
|
+
logger.warning(
|
|
857
|
+
f"Passed soc_final={soc_final} is lower than soc_min={params['plant_conf']['battery_minimum_state_of_charge']}, setting soc_final=soc_min"
|
|
858
|
+
)
|
|
859
|
+
soc_final = params["plant_conf"]["battery_minimum_state_of_charge"]
|
|
860
|
+
if soc_final > params["plant_conf"]["battery_maximum_state_of_charge"]:
|
|
861
|
+
logger.warning(
|
|
862
|
+
f"Passed soc_final={soc_final} is greater than soc_max={params['plant_conf']['battery_maximum_state_of_charge']}, setting soc_final=soc_max"
|
|
863
|
+
)
|
|
864
|
+
soc_final = params["plant_conf"]["battery_maximum_state_of_charge"]
|
|
395
865
|
params["passed_data"]["soc_final"] = soc_final
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
866
|
+
if "operating_timesteps_of_each_deferrable_load" in runtimeparams.keys():
|
|
867
|
+
params["passed_data"]["operating_timesteps_of_each_deferrable_load"] = (
|
|
868
|
+
runtimeparams["operating_timesteps_of_each_deferrable_load"]
|
|
869
|
+
)
|
|
870
|
+
params["optim_conf"]["operating_timesteps_of_each_deferrable_load"] = runtimeparams[
|
|
871
|
+
"operating_timesteps_of_each_deferrable_load"
|
|
872
|
+
]
|
|
873
|
+
if "operating_hours_of_each_deferrable_load" in params["optim_conf"].keys():
|
|
874
|
+
params["passed_data"]["operating_hours_of_each_deferrable_load"] = params[
|
|
875
|
+
"optim_conf"
|
|
876
|
+
]["operating_hours_of_each_deferrable_load"]
|
|
400
877
|
params["passed_data"]["start_timesteps_of_each_deferrable_load"] = params[
|
|
401
878
|
"optim_conf"
|
|
402
879
|
].get("start_timesteps_of_each_deferrable_load", None)
|
|
@@ -405,38 +882,33 @@ def treat_runtimeparams(
|
|
|
405
882
|
].get("end_timesteps_of_each_deferrable_load", None)
|
|
406
883
|
|
|
407
884
|
forecast_dates = copy.deepcopy(forecast_dates)[0:prediction_horizon]
|
|
408
|
-
|
|
409
|
-
# Load the default config
|
|
410
|
-
if "def_load_config" in runtimeparams:
|
|
411
|
-
params["optim_conf"]["def_load_config"] = runtimeparams[
|
|
412
|
-
"def_load_config"
|
|
413
|
-
]
|
|
414
|
-
if "def_load_config" in params["optim_conf"]:
|
|
415
|
-
for k in range(len(params["optim_conf"]["def_load_config"])):
|
|
416
|
-
if "thermal_config" in params["optim_conf"]["def_load_config"][k]:
|
|
417
|
-
if (
|
|
418
|
-
"heater_desired_temperatures" in runtimeparams
|
|
419
|
-
and len(runtimeparams["heater_desired_temperatures"]) > k
|
|
420
|
-
):
|
|
421
|
-
params["optim_conf"]["def_load_config"][k][
|
|
422
|
-
"thermal_config"
|
|
423
|
-
]["desired_temperatures"] = runtimeparams[
|
|
424
|
-
"heater_desired_temperatures"
|
|
425
|
-
][k]
|
|
426
|
-
if (
|
|
427
|
-
"heater_start_temperatures" in runtimeparams
|
|
428
|
-
and len(runtimeparams["heater_start_temperatures"]) > k
|
|
429
|
-
):
|
|
430
|
-
params["optim_conf"]["def_load_config"][k][
|
|
431
|
-
"thermal_config"
|
|
432
|
-
]["start_temperature"] = runtimeparams[
|
|
433
|
-
"heater_start_temperatures"
|
|
434
|
-
][k]
|
|
435
885
|
else:
|
|
436
886
|
params["passed_data"]["prediction_horizon"] = None
|
|
437
887
|
params["passed_data"]["soc_init"] = None
|
|
438
888
|
params["passed_data"]["soc_final"] = None
|
|
439
889
|
|
|
890
|
+
# Parsing the thermal model parameters
|
|
891
|
+
# Load the default config
|
|
892
|
+
if "def_load_config" in runtimeparams:
|
|
893
|
+
params["optim_conf"]["def_load_config"] = runtimeparams["def_load_config"]
|
|
894
|
+
if "def_load_config" in params["optim_conf"]:
|
|
895
|
+
for k in range(len(params["optim_conf"]["def_load_config"])):
|
|
896
|
+
if "thermal_config" in params["optim_conf"]["def_load_config"][k]:
|
|
897
|
+
if (
|
|
898
|
+
"heater_desired_temperatures" in runtimeparams
|
|
899
|
+
and len(runtimeparams["heater_desired_temperatures"]) > k
|
|
900
|
+
):
|
|
901
|
+
params["optim_conf"]["def_load_config"][k]["thermal_config"][
|
|
902
|
+
"desired_temperatures"
|
|
903
|
+
] = runtimeparams["heater_desired_temperatures"][k]
|
|
904
|
+
if (
|
|
905
|
+
"heater_start_temperatures" in runtimeparams
|
|
906
|
+
and len(runtimeparams["heater_start_temperatures"]) > k
|
|
907
|
+
):
|
|
908
|
+
params["optim_conf"]["def_load_config"][k]["thermal_config"][
|
|
909
|
+
"start_temperature"
|
|
910
|
+
] = runtimeparams["heater_start_temperatures"][k]
|
|
911
|
+
|
|
440
912
|
# Treat passed forecast data lists
|
|
441
913
|
list_forecast_key = [
|
|
442
914
|
"pv_power_forecast",
|
|
@@ -456,28 +928,45 @@ def treat_runtimeparams(
|
|
|
456
928
|
# Loop forecasts, check if value is a list and greater than or equal to forecast_dates
|
|
457
929
|
for method, forecast_key in enumerate(list_forecast_key):
|
|
458
930
|
if forecast_key in runtimeparams.keys():
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
931
|
+
forecast_input = runtimeparams[forecast_key]
|
|
932
|
+
if isinstance(forecast_input, dict):
|
|
933
|
+
forecast_data_df = pd.DataFrame.from_dict(
|
|
934
|
+
forecast_input, orient="index"
|
|
935
|
+
).reset_index()
|
|
936
|
+
forecast_data_df.columns = ["time", "value"]
|
|
937
|
+
forecast_data_df["time"] = pd.to_datetime(
|
|
938
|
+
forecast_data_df["time"], format="ISO8601", utc=True
|
|
939
|
+
).dt.tz_convert(time_zone)
|
|
940
|
+
|
|
941
|
+
# align index with forecast_dates
|
|
942
|
+
forecast_data_df = (
|
|
943
|
+
forecast_data_df.resample(
|
|
944
|
+
pd.to_timedelta(optimization_time_step, "minutes"),
|
|
945
|
+
on="time",
|
|
946
|
+
)
|
|
947
|
+
.aggregate({"value": "mean"})
|
|
948
|
+
.reindex(forecast_dates, method="nearest")
|
|
949
|
+
)
|
|
950
|
+
forecast_data_df["value"] = forecast_data_df["value"].ffill().bfill()
|
|
951
|
+
forecast_input = forecast_data_df["value"].tolist()
|
|
952
|
+
if isinstance(forecast_input, list) and len(forecast_input) >= len(forecast_dates):
|
|
953
|
+
params["passed_data"][forecast_key] = forecast_input
|
|
463
954
|
params["optim_conf"][forecast_methods[method]] = "list"
|
|
464
955
|
else:
|
|
465
956
|
logger.error(
|
|
466
|
-
f"ERROR: The passed data is either
|
|
957
|
+
f"ERROR: The passed data is either the wrong type or the length is not correct, length should be {str(len(forecast_dates))}"
|
|
467
958
|
)
|
|
468
959
|
logger.error(
|
|
469
960
|
f"Passed type is {str(type(runtimeparams[forecast_key]))} and length is {str(len(runtimeparams[forecast_key]))}"
|
|
470
961
|
)
|
|
471
962
|
# Check if string contains list, if so extract
|
|
472
|
-
if isinstance(
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
963
|
+
if isinstance(forecast_input, str) and isinstance(
|
|
964
|
+
ast.literal_eval(forecast_input), list
|
|
965
|
+
):
|
|
966
|
+
forecast_input = ast.literal_eval(forecast_input)
|
|
967
|
+
runtimeparams[forecast_key] = forecast_input
|
|
477
968
|
list_non_digits = [
|
|
478
|
-
x
|
|
479
|
-
for x in runtimeparams[forecast_key]
|
|
480
|
-
if not (isinstance(x, int) or isinstance(x, float))
|
|
969
|
+
x for x in forecast_input if not (isinstance(x, int) or isinstance(x, float))
|
|
481
970
|
]
|
|
482
971
|
if len(list_non_digits) > 0:
|
|
483
972
|
logger.warning(
|
|
@@ -490,6 +979,12 @@ def treat_runtimeparams(
|
|
|
490
979
|
else:
|
|
491
980
|
params["passed_data"][forecast_key] = None
|
|
492
981
|
|
|
982
|
+
# Explicitly handle historic_days_to_retrieve from runtimeparams BEFORE validation
|
|
983
|
+
if "historic_days_to_retrieve" in runtimeparams:
|
|
984
|
+
params["retrieve_hass_conf"]["historic_days_to_retrieve"] = int(
|
|
985
|
+
runtimeparams["historic_days_to_retrieve"]
|
|
986
|
+
)
|
|
987
|
+
|
|
493
988
|
# Treat passed data for forecast model fit/predict/tune at runtime
|
|
494
989
|
if (
|
|
495
990
|
params["passed_data"].get("historic_days_to_retrieve", None) is not None
|
|
@@ -498,57 +993,72 @@ def treat_runtimeparams(
|
|
|
498
993
|
logger.warning(
|
|
499
994
|
"warning `days_to_retrieve` is set to a value less than 9, this could cause an error with the fit"
|
|
500
995
|
)
|
|
501
|
-
logger.warning(
|
|
502
|
-
"setting`passed_data:days_to_retrieve` to 9 for fit/predict/tune"
|
|
503
|
-
)
|
|
996
|
+
logger.warning("setting`passed_data:days_to_retrieve` to 9 for fit/predict/tune")
|
|
504
997
|
params["passed_data"]["historic_days_to_retrieve"] = 9
|
|
505
998
|
else:
|
|
506
999
|
if params["retrieve_hass_conf"].get("historic_days_to_retrieve", 0) < 9:
|
|
507
|
-
logger.debug(
|
|
508
|
-
"setting`passed_data:days_to_retrieve` to 9 for fit/predict/tune"
|
|
509
|
-
)
|
|
1000
|
+
logger.debug("setting`passed_data:days_to_retrieve` to 9 for fit/predict/tune")
|
|
510
1001
|
params["passed_data"]["historic_days_to_retrieve"] = 9
|
|
511
1002
|
else:
|
|
512
|
-
params["passed_data"]["historic_days_to_retrieve"] = params[
|
|
513
|
-
"
|
|
514
|
-
]
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
1003
|
+
params["passed_data"]["historic_days_to_retrieve"] = params["retrieve_hass_conf"][
|
|
1004
|
+
"historic_days_to_retrieve"
|
|
1005
|
+
]
|
|
1006
|
+
|
|
1007
|
+
# UPDATED ML PARAMETER HANDLING
|
|
1008
|
+
# Define Helper Functions
|
|
1009
|
+
def _cast_bool(value):
|
|
1010
|
+
"""Helper to cast string inputs to boolean safely."""
|
|
1011
|
+
try:
|
|
1012
|
+
return ast.literal_eval(str(value).capitalize())
|
|
1013
|
+
except (ValueError, SyntaxError):
|
|
1014
|
+
return False
|
|
1015
|
+
|
|
1016
|
+
def _get_ml_param(name, params, runtimeparams, default=None, cast=None):
|
|
1017
|
+
"""
|
|
1018
|
+
Prioritize Runtime Params -> Config Params (optim_conf) -> Default.
|
|
1019
|
+
"""
|
|
1020
|
+
if name in runtimeparams:
|
|
1021
|
+
value = runtimeparams[name]
|
|
1022
|
+
else:
|
|
1023
|
+
value = params["optim_conf"].get(name, default)
|
|
1024
|
+
|
|
1025
|
+
if cast is not None and value is not None:
|
|
1026
|
+
try:
|
|
1027
|
+
value = cast(value)
|
|
1028
|
+
except Exception:
|
|
1029
|
+
pass
|
|
1030
|
+
return value
|
|
1031
|
+
|
|
1032
|
+
# Compute dynamic defaults
|
|
1033
|
+
# Default for var_model falls back to the configured load sensor
|
|
1034
|
+
default_var_model = params["retrieve_hass_conf"].get(
|
|
1035
|
+
"sensor_power_load_no_var_loads", "sensor.power_load_no_var_loads"
|
|
1036
|
+
)
|
|
1037
|
+
|
|
1038
|
+
# Define Configuration Table
|
|
1039
|
+
# Format: (parameter_name, default_value, cast_function)
|
|
1040
|
+
ml_param_defs = [
|
|
1041
|
+
("model_type", "long_train_data", None),
|
|
1042
|
+
("var_model", default_var_model, None),
|
|
1043
|
+
("sklearn_model", "KNeighborsRegressor", None),
|
|
1044
|
+
("regression_model", "AdaBoostRegression", None),
|
|
1045
|
+
("num_lags", 48, None),
|
|
1046
|
+
("split_date_delta", "48h", None),
|
|
1047
|
+
("n_trials", 10, int),
|
|
1048
|
+
("perform_backtest", False, _cast_bool),
|
|
1049
|
+
]
|
|
1050
|
+
|
|
1051
|
+
# Apply Configuration
|
|
1052
|
+
for name, default, caster in ml_param_defs:
|
|
1053
|
+
params["passed_data"][name] = _get_ml_param(
|
|
1054
|
+
name=name,
|
|
1055
|
+
params=params,
|
|
1056
|
+
runtimeparams=runtimeparams,
|
|
1057
|
+
default=default,
|
|
1058
|
+
cast=caster,
|
|
550
1059
|
)
|
|
551
|
-
|
|
1060
|
+
|
|
1061
|
+
# Other non-dynamic options
|
|
552
1062
|
if "model_predict_publish" not in runtimeparams.keys():
|
|
553
1063
|
model_predict_publish = False
|
|
554
1064
|
else:
|
|
@@ -561,12 +1071,15 @@ def treat_runtimeparams(
|
|
|
561
1071
|
else:
|
|
562
1072
|
model_predict_entity_id = runtimeparams["model_predict_entity_id"]
|
|
563
1073
|
params["passed_data"]["model_predict_entity_id"] = model_predict_entity_id
|
|
1074
|
+
if "model_predict_device_class" not in runtimeparams.keys():
|
|
1075
|
+
model_predict_device_class = "power"
|
|
1076
|
+
else:
|
|
1077
|
+
model_predict_device_class = runtimeparams["model_predict_device_class"]
|
|
1078
|
+
params["passed_data"]["model_predict_device_class"] = model_predict_device_class
|
|
564
1079
|
if "model_predict_unit_of_measurement" not in runtimeparams.keys():
|
|
565
1080
|
model_predict_unit_of_measurement = "W"
|
|
566
1081
|
else:
|
|
567
|
-
model_predict_unit_of_measurement = runtimeparams[
|
|
568
|
-
"model_predict_unit_of_measurement"
|
|
569
|
-
]
|
|
1082
|
+
model_predict_unit_of_measurement = runtimeparams["model_predict_unit_of_measurement"]
|
|
570
1083
|
params["passed_data"]["model_predict_unit_of_measurement"] = (
|
|
571
1084
|
model_predict_unit_of_measurement
|
|
572
1085
|
)
|
|
@@ -574,23 +1087,22 @@ def treat_runtimeparams(
|
|
|
574
1087
|
model_predict_friendly_name = "Load Power Forecast custom ML model"
|
|
575
1088
|
else:
|
|
576
1089
|
model_predict_friendly_name = runtimeparams["model_predict_friendly_name"]
|
|
577
|
-
params["passed_data"]["model_predict_friendly_name"] =
|
|
578
|
-
model_predict_friendly_name
|
|
579
|
-
)
|
|
1090
|
+
params["passed_data"]["model_predict_friendly_name"] = model_predict_friendly_name
|
|
580
1091
|
if "mlr_predict_entity_id" not in runtimeparams.keys():
|
|
581
1092
|
mlr_predict_entity_id = "sensor.mlr_predict"
|
|
582
1093
|
else:
|
|
583
1094
|
mlr_predict_entity_id = runtimeparams["mlr_predict_entity_id"]
|
|
584
1095
|
params["passed_data"]["mlr_predict_entity_id"] = mlr_predict_entity_id
|
|
1096
|
+
if "mlr_predict_device_class" not in runtimeparams.keys():
|
|
1097
|
+
mlr_predict_device_class = "power"
|
|
1098
|
+
else:
|
|
1099
|
+
mlr_predict_device_class = runtimeparams["mlr_predict_device_class"]
|
|
1100
|
+
params["passed_data"]["mlr_predict_device_class"] = mlr_predict_device_class
|
|
585
1101
|
if "mlr_predict_unit_of_measurement" not in runtimeparams.keys():
|
|
586
1102
|
mlr_predict_unit_of_measurement = None
|
|
587
1103
|
else:
|
|
588
|
-
mlr_predict_unit_of_measurement = runtimeparams[
|
|
589
|
-
|
|
590
|
-
]
|
|
591
|
-
params["passed_data"]["mlr_predict_unit_of_measurement"] = (
|
|
592
|
-
mlr_predict_unit_of_measurement
|
|
593
|
-
)
|
|
1104
|
+
mlr_predict_unit_of_measurement = runtimeparams["mlr_predict_unit_of_measurement"]
|
|
1105
|
+
params["passed_data"]["mlr_predict_unit_of_measurement"] = mlr_predict_unit_of_measurement
|
|
594
1106
|
if "mlr_predict_friendly_name" not in runtimeparams.keys():
|
|
595
1107
|
mlr_predict_friendly_name = "mlr predictor"
|
|
596
1108
|
else:
|
|
@@ -621,9 +1133,7 @@ def treat_runtimeparams(
|
|
|
621
1133
|
weather_forecast_cache_only = False
|
|
622
1134
|
else:
|
|
623
1135
|
weather_forecast_cache_only = runtimeparams["weather_forecast_cache_only"]
|
|
624
|
-
params["passed_data"]["weather_forecast_cache_only"] =
|
|
625
|
-
weather_forecast_cache_only
|
|
626
|
-
)
|
|
1136
|
+
params["passed_data"]["weather_forecast_cache_only"] = weather_forecast_cache_only
|
|
627
1137
|
|
|
628
1138
|
# A condition to manually save entity data under data_path/entities after optimization
|
|
629
1139
|
if "entity_save" not in runtimeparams.keys():
|
|
@@ -648,22 +1158,14 @@ def treat_runtimeparams(
|
|
|
648
1158
|
# Treat retrieve data from Home Assistant (retrieve_hass_conf) configuration parameters passed at runtime
|
|
649
1159
|
# Secrets passed at runtime
|
|
650
1160
|
if "solcast_api_key" in runtimeparams.keys():
|
|
651
|
-
params["retrieve_hass_conf"]["solcast_api_key"] = runtimeparams[
|
|
652
|
-
"solcast_api_key"
|
|
653
|
-
]
|
|
1161
|
+
params["retrieve_hass_conf"]["solcast_api_key"] = runtimeparams["solcast_api_key"]
|
|
654
1162
|
if "solcast_rooftop_id" in runtimeparams.keys():
|
|
655
|
-
params["retrieve_hass_conf"]["solcast_rooftop_id"] = runtimeparams[
|
|
656
|
-
"solcast_rooftop_id"
|
|
657
|
-
]
|
|
1163
|
+
params["retrieve_hass_conf"]["solcast_rooftop_id"] = runtimeparams["solcast_rooftop_id"]
|
|
658
1164
|
if "solar_forecast_kwp" in runtimeparams.keys():
|
|
659
|
-
params["retrieve_hass_conf"]["solar_forecast_kwp"] = runtimeparams[
|
|
660
|
-
"solar_forecast_kwp"
|
|
661
|
-
]
|
|
1165
|
+
params["retrieve_hass_conf"]["solar_forecast_kwp"] = runtimeparams["solar_forecast_kwp"]
|
|
662
1166
|
# Treat custom entities id's and friendly names for variables
|
|
663
1167
|
if "custom_pv_forecast_id" in runtimeparams.keys():
|
|
664
|
-
params["passed_data"]["custom_pv_forecast_id"] = runtimeparams[
|
|
665
|
-
"custom_pv_forecast_id"
|
|
666
|
-
]
|
|
1168
|
+
params["passed_data"]["custom_pv_forecast_id"] = runtimeparams["custom_pv_forecast_id"]
|
|
667
1169
|
if "custom_load_forecast_id" in runtimeparams.keys():
|
|
668
1170
|
params["passed_data"]["custom_load_forecast_id"] = runtimeparams[
|
|
669
1171
|
"custom_load_forecast_id"
|
|
@@ -689,9 +1191,7 @@ def treat_runtimeparams(
|
|
|
689
1191
|
"custom_grid_forecast_id"
|
|
690
1192
|
]
|
|
691
1193
|
if "custom_cost_fun_id" in runtimeparams.keys():
|
|
692
|
-
params["passed_data"]["custom_cost_fun_id"] = runtimeparams[
|
|
693
|
-
"custom_cost_fun_id"
|
|
694
|
-
]
|
|
1194
|
+
params["passed_data"]["custom_cost_fun_id"] = runtimeparams["custom_cost_fun_id"]
|
|
695
1195
|
if "custom_optim_status_id" in runtimeparams.keys():
|
|
696
1196
|
params["passed_data"]["custom_optim_status_id"] = runtimeparams[
|
|
697
1197
|
"custom_optim_status_id"
|
|
@@ -712,6 +1212,10 @@ def treat_runtimeparams(
|
|
|
712
1212
|
params["passed_data"]["custom_predicted_temperature_id"] = runtimeparams[
|
|
713
1213
|
"custom_predicted_temperature_id"
|
|
714
1214
|
]
|
|
1215
|
+
if "custom_heating_demand_id" in runtimeparams.keys():
|
|
1216
|
+
params["passed_data"]["custom_heating_demand_id"] = runtimeparams[
|
|
1217
|
+
"custom_heating_demand_id"
|
|
1218
|
+
]
|
|
715
1219
|
|
|
716
1220
|
# split config categories from params
|
|
717
1221
|
retrieve_hass_conf = params["retrieve_hass_conf"]
|
|
@@ -719,16 +1223,16 @@ def treat_runtimeparams(
|
|
|
719
1223
|
plant_conf = params["plant_conf"]
|
|
720
1224
|
|
|
721
1225
|
# Serialize the final params
|
|
722
|
-
params =
|
|
1226
|
+
params = orjson.dumps(params, default=str).decode()
|
|
723
1227
|
return params, retrieve_hass_conf, optim_conf, plant_conf
|
|
724
1228
|
|
|
725
1229
|
|
|
726
|
-
def get_yaml_parse(params: str, logger: logging.Logger) ->
|
|
1230
|
+
def get_yaml_parse(params: str | dict, logger: logging.Logger) -> tuple[dict, dict, dict]:
|
|
727
1231
|
"""
|
|
728
1232
|
Perform parsing of the params into the configuration catagories
|
|
729
1233
|
|
|
730
1234
|
:param params: Built configuration parameters
|
|
731
|
-
:type params: str
|
|
1235
|
+
:type params: str or dict
|
|
732
1236
|
:param logger: The logger object
|
|
733
1237
|
:type logger: logging.Logger
|
|
734
1238
|
:return: A tuple with the dictionaries containing the parsed data
|
|
@@ -737,7 +1241,7 @@ def get_yaml_parse(params: str, logger: logging.Logger) -> Tuple[dict, dict, dic
|
|
|
737
1241
|
"""
|
|
738
1242
|
if params:
|
|
739
1243
|
if type(params) is str:
|
|
740
|
-
input_conf =
|
|
1244
|
+
input_conf = orjson.loads(params)
|
|
741
1245
|
else:
|
|
742
1246
|
input_conf = params
|
|
743
1247
|
else:
|
|
@@ -751,9 +1255,7 @@ def get_yaml_parse(params: str, logger: logging.Logger) -> Tuple[dict, dict, dic
|
|
|
751
1255
|
|
|
752
1256
|
# Format time parameters
|
|
753
1257
|
if optim_conf.get("delta_forecast_daily", None) is not None:
|
|
754
|
-
optim_conf["delta_forecast_daily"] = pd.Timedelta(
|
|
755
|
-
days=optim_conf["delta_forecast_daily"]
|
|
756
|
-
)
|
|
1258
|
+
optim_conf["delta_forecast_daily"] = pd.Timedelta(days=optim_conf["delta_forecast_daily"])
|
|
757
1259
|
if retrieve_hass_conf.get("optimization_time_step", None) is not None:
|
|
758
1260
|
retrieve_hass_conf["optimization_time_step"] = pd.to_timedelta(
|
|
759
1261
|
retrieve_hass_conf["optimization_time_step"], "minutes"
|
|
@@ -764,7 +1266,7 @@ def get_yaml_parse(params: str, logger: logging.Logger) -> Tuple[dict, dict, dic
|
|
|
764
1266
|
return retrieve_hass_conf, optim_conf, plant_conf
|
|
765
1267
|
|
|
766
1268
|
|
|
767
|
-
def get_injection_dict(df: pd.DataFrame, plot_size:
|
|
1269
|
+
def get_injection_dict(df: pd.DataFrame, plot_size: int | None = 1366) -> dict:
|
|
768
1270
|
"""
|
|
769
1271
|
Build a dictionary with graphs and tables for the webui.
|
|
770
1272
|
|
|
@@ -785,9 +1287,10 @@ def get_injection_dict(df: pd.DataFrame, plot_size: Optional[int] = 1366) -> dic
|
|
|
785
1287
|
df[cols_p] = df[cols_p].astype(int)
|
|
786
1288
|
df[cols_else] = df[cols_else].round(3)
|
|
787
1289
|
# Create plots
|
|
1290
|
+
# Figure 0: Systems Powers
|
|
788
1291
|
n_colors = len(cols_p)
|
|
789
1292
|
colors = px.colors.sample_colorscale(
|
|
790
|
-
"jet", [n / (n_colors - 1) for n in range(n_colors)]
|
|
1293
|
+
"jet", [n / (n_colors - 1) if n_colors > 1 else 0 for n in range(n_colors)]
|
|
791
1294
|
)
|
|
792
1295
|
fig_0 = px.line(
|
|
793
1296
|
df[cols_p],
|
|
@@ -795,8 +1298,12 @@ def get_injection_dict(df: pd.DataFrame, plot_size: Optional[int] = 1366) -> dic
|
|
|
795
1298
|
template="presentation",
|
|
796
1299
|
line_shape="hv",
|
|
797
1300
|
color_discrete_sequence=colors,
|
|
1301
|
+
render_mode="svg",
|
|
798
1302
|
)
|
|
799
1303
|
fig_0.update_layout(xaxis_title="Timestamp", yaxis_title="System powers (W)")
|
|
1304
|
+
image_path_0 = fig_0.to_html(full_html=False, default_width="75%")
|
|
1305
|
+
# Figure 1: Battery SOC (Optional)
|
|
1306
|
+
image_path_1 = None
|
|
800
1307
|
if "SOC_opt" in df.columns.to_list():
|
|
801
1308
|
fig_1 = px.line(
|
|
802
1309
|
df["SOC_opt"],
|
|
@@ -804,12 +1311,36 @@ def get_injection_dict(df: pd.DataFrame, plot_size: Optional[int] = 1366) -> dic
|
|
|
804
1311
|
template="presentation",
|
|
805
1312
|
line_shape="hv",
|
|
806
1313
|
color_discrete_sequence=colors,
|
|
1314
|
+
render_mode="svg",
|
|
807
1315
|
)
|
|
808
1316
|
fig_1.update_layout(xaxis_title="Timestamp", yaxis_title="Battery SOC (%)")
|
|
1317
|
+
image_path_1 = fig_1.to_html(full_html=False, default_width="75%")
|
|
1318
|
+
# Figure Thermal: Temperatures (Optional)
|
|
1319
|
+
# Detect columns for predicted or target temperatures
|
|
1320
|
+
cols_temp = [
|
|
1321
|
+
i for i in df.columns.to_list() if "predicted_temp_heater" in i or "target_temp_heater" in i
|
|
1322
|
+
]
|
|
1323
|
+
image_path_temp = None
|
|
1324
|
+
if len(cols_temp) > 0:
|
|
1325
|
+
n_colors = len(cols_temp)
|
|
1326
|
+
colors = px.colors.sample_colorscale(
|
|
1327
|
+
"jet", [n / (n_colors - 1) if n_colors > 1 else 0 for n in range(n_colors)]
|
|
1328
|
+
)
|
|
1329
|
+
fig_temp = px.line(
|
|
1330
|
+
df[cols_temp],
|
|
1331
|
+
title="Thermal loads temperature schedule",
|
|
1332
|
+
template="presentation",
|
|
1333
|
+
line_shape="hv",
|
|
1334
|
+
color_discrete_sequence=colors,
|
|
1335
|
+
render_mode="svg",
|
|
1336
|
+
)
|
|
1337
|
+
fig_temp.update_layout(xaxis_title="Timestamp", yaxis_title="Temperature (°C)")
|
|
1338
|
+
image_path_temp = fig_temp.to_html(full_html=False, default_width="75%")
|
|
1339
|
+
# Figure 2: Costs
|
|
809
1340
|
cols_cost = [i for i in df.columns.to_list() if "cost_" in i or "unit_" in i]
|
|
810
1341
|
n_colors = len(cols_cost)
|
|
811
1342
|
colors = px.colors.sample_colorscale(
|
|
812
|
-
"jet", [n / (n_colors - 1) for n in range(n_colors)]
|
|
1343
|
+
"jet", [n / (n_colors - 1) if n_colors > 1 else 0 for n in range(n_colors)]
|
|
813
1344
|
)
|
|
814
1345
|
fig_2 = px.line(
|
|
815
1346
|
df[cols_cost],
|
|
@@ -817,14 +1348,11 @@ def get_injection_dict(df: pd.DataFrame, plot_size: Optional[int] = 1366) -> dic
|
|
|
817
1348
|
template="presentation",
|
|
818
1349
|
line_shape="hv",
|
|
819
1350
|
color_discrete_sequence=colors,
|
|
1351
|
+
render_mode="svg",
|
|
820
1352
|
)
|
|
821
1353
|
fig_2.update_layout(xaxis_title="Timestamp", yaxis_title="System costs (currency)")
|
|
822
|
-
# Get full path to image
|
|
823
|
-
image_path_0 = fig_0.to_html(full_html=False, default_width="75%")
|
|
824
|
-
if "SOC_opt" in df.columns.to_list():
|
|
825
|
-
image_path_1 = fig_1.to_html(full_html=False, default_width="75%")
|
|
826
1354
|
image_path_2 = fig_2.to_html(full_html=False, default_width="75%")
|
|
827
|
-
#
|
|
1355
|
+
# Tables
|
|
828
1356
|
table1 = df.reset_index().to_html(classes="mystyle", index=False)
|
|
829
1357
|
cost_cols = [i for i in df.columns if "cost_" in i]
|
|
830
1358
|
table2 = df[cost_cols].reset_index().sum(numeric_only=True)
|
|
@@ -834,26 +1362,28 @@ def get_injection_dict(df: pd.DataFrame, plot_size: Optional[int] = 1366) -> dic
|
|
|
834
1362
|
.reset_index(names="Variable")
|
|
835
1363
|
.to_html(classes="mystyle", index=False)
|
|
836
1364
|
)
|
|
837
|
-
#
|
|
1365
|
+
# Construct Injection Dict
|
|
838
1366
|
injection_dict = {}
|
|
839
1367
|
injection_dict["title"] = "<h2>EMHASS optimization results</h2>"
|
|
840
1368
|
injection_dict["subsubtitle0"] = "<h4>Plotting latest optimization results</h4>"
|
|
1369
|
+
# Add Powers
|
|
841
1370
|
injection_dict["figure_0"] = image_path_0
|
|
842
|
-
|
|
1371
|
+
# Add Thermal
|
|
1372
|
+
if image_path_temp is not None:
|
|
1373
|
+
injection_dict["figure_thermal"] = image_path_temp
|
|
1374
|
+
# Add SOC
|
|
1375
|
+
if image_path_1 is not None:
|
|
843
1376
|
injection_dict["figure_1"] = image_path_1
|
|
1377
|
+
# Add Costs
|
|
844
1378
|
injection_dict["figure_2"] = image_path_2
|
|
845
1379
|
injection_dict["subsubtitle1"] = "<h4>Last run optimization results table</h4>"
|
|
846
1380
|
injection_dict["table1"] = table1
|
|
847
|
-
injection_dict["subsubtitle2"] =
|
|
848
|
-
"<h4>Summary table for latest optimization results</h4>"
|
|
849
|
-
)
|
|
1381
|
+
injection_dict["subsubtitle2"] = "<h4>Summary table for latest optimization results</h4>"
|
|
850
1382
|
injection_dict["table2"] = table2
|
|
851
1383
|
return injection_dict
|
|
852
1384
|
|
|
853
1385
|
|
|
854
|
-
def get_injection_dict_forecast_model_fit(
|
|
855
|
-
df_fit_pred: pd.DataFrame, mlf: MLForecaster
|
|
856
|
-
) -> dict:
|
|
1386
|
+
def get_injection_dict_forecast_model_fit(df_fit_pred: pd.DataFrame, mlf: MLForecaster) -> dict:
|
|
857
1387
|
"""
|
|
858
1388
|
Build a dictionary with graphs and tables for the webui for special MLF fit case.
|
|
859
1389
|
|
|
@@ -873,18 +1403,18 @@ def get_injection_dict_forecast_model_fit(
|
|
|
873
1403
|
injection_dict = {}
|
|
874
1404
|
injection_dict["title"] = "<h2>Custom machine learning forecast model fit</h2>"
|
|
875
1405
|
injection_dict["subsubtitle0"] = (
|
|
876
|
-
"<h4>Plotting train/test forecast model results for "
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
"
|
|
1406
|
+
"<h4>Plotting train/test forecast model results for "
|
|
1407
|
+
+ mlf.model_type
|
|
1408
|
+
+ "<br>"
|
|
1409
|
+
+ "Forecasting variable "
|
|
1410
|
+
+ mlf.var_model
|
|
1411
|
+
+ "</h4>"
|
|
880
1412
|
)
|
|
881
1413
|
injection_dict["figure_0"] = image_path_0
|
|
882
1414
|
return injection_dict
|
|
883
1415
|
|
|
884
1416
|
|
|
885
|
-
def get_injection_dict_forecast_model_tune(
|
|
886
|
-
df_pred_optim: pd.DataFrame, mlf: MLForecaster
|
|
887
|
-
) -> dict:
|
|
1417
|
+
def get_injection_dict_forecast_model_tune(df_pred_optim: pd.DataFrame, mlf: MLForecaster) -> dict:
|
|
888
1418
|
"""
|
|
889
1419
|
Build a dictionary with graphs and tables for the webui for special MLF tune case.
|
|
890
1420
|
|
|
@@ -906,21 +1436,21 @@ def get_injection_dict_forecast_model_tune(
|
|
|
906
1436
|
injection_dict["subsubtitle0"] = (
|
|
907
1437
|
"<h4>Performed a tuning routine using bayesian optimization for "
|
|
908
1438
|
+ mlf.model_type
|
|
1439
|
+
+ "<br>"
|
|
1440
|
+
+ "Forecasting variable "
|
|
1441
|
+
+ mlf.var_model
|
|
909
1442
|
+ "</h4>"
|
|
910
1443
|
)
|
|
911
|
-
injection_dict["subsubtitle0"] = (
|
|
912
|
-
"<h4>Forecasting variable " + mlf.var_model + "</h4>"
|
|
913
|
-
)
|
|
914
1444
|
injection_dict["figure_0"] = image_path_0
|
|
915
1445
|
return injection_dict
|
|
916
1446
|
|
|
917
1447
|
|
|
918
|
-
def build_config(
|
|
1448
|
+
async def build_config(
|
|
919
1449
|
emhass_conf: dict,
|
|
920
1450
|
logger: logging.Logger,
|
|
921
1451
|
defaults_path: str,
|
|
922
|
-
config_path:
|
|
923
|
-
legacy_config_path:
|
|
1452
|
+
config_path: str | None = None,
|
|
1453
|
+
legacy_config_path: str | None = None,
|
|
924
1454
|
) -> dict:
|
|
925
1455
|
"""
|
|
926
1456
|
Retrieve parameters from configuration files.
|
|
@@ -942,32 +1472,33 @@ def build_config(
|
|
|
942
1472
|
|
|
943
1473
|
# Read default parameters (default root_path/data/config_defaults.json)
|
|
944
1474
|
if defaults_path and pathlib.Path(defaults_path).is_file():
|
|
945
|
-
with
|
|
946
|
-
|
|
1475
|
+
async with aiofiles.open(defaults_path) as data:
|
|
1476
|
+
content = await data.read()
|
|
1477
|
+
config = orjson.loads(content)
|
|
947
1478
|
else:
|
|
948
1479
|
logger.error("config_defaults.json. does not exist ")
|
|
949
1480
|
return False
|
|
950
1481
|
|
|
951
1482
|
# Read user config parameters if provided (default /share/config.json)
|
|
952
1483
|
if config_path and pathlib.Path(config_path).is_file():
|
|
953
|
-
with
|
|
1484
|
+
async with aiofiles.open(config_path) as data:
|
|
1485
|
+
content = await data.read()
|
|
954
1486
|
# Set override default parameters (config_defaults) with user given parameters (config.json)
|
|
955
1487
|
logger.info("Obtaining parameters from config.json:")
|
|
956
|
-
config.update(
|
|
1488
|
+
config.update(orjson.loads(content))
|
|
957
1489
|
else:
|
|
958
1490
|
logger.info(
|
|
959
1491
|
"config.json does not exist, or has not been passed. config parameters may default to config_defaults.json"
|
|
960
1492
|
)
|
|
961
|
-
logger.info(
|
|
962
|
-
"you may like to generate the config.json file on the configuration page"
|
|
963
|
-
)
|
|
1493
|
+
logger.info("you may like to generate the config.json file on the configuration page")
|
|
964
1494
|
|
|
965
1495
|
# Check to see if legacy config_emhass.yaml was provided (default /app/config_emhass.yaml)
|
|
966
1496
|
# Convert legacy parameter definitions/format to match config.json
|
|
967
1497
|
if legacy_config_path and pathlib.Path(legacy_config_path).is_file():
|
|
968
|
-
with open(legacy_config_path
|
|
969
|
-
|
|
970
|
-
|
|
1498
|
+
async with aiofiles.open(legacy_config_path) as data:
|
|
1499
|
+
content = await data.read()
|
|
1500
|
+
legacy_config = yaml.safe_load(content)
|
|
1501
|
+
legacy_config_parameters = await build_legacy_config_params(
|
|
971
1502
|
emhass_conf, legacy_config, logger
|
|
972
1503
|
)
|
|
973
1504
|
if type(legacy_config_parameters) is not bool:
|
|
@@ -979,9 +1510,11 @@ def build_config(
|
|
|
979
1510
|
return config
|
|
980
1511
|
|
|
981
1512
|
|
|
982
|
-
def build_legacy_config_params(
|
|
983
|
-
emhass_conf: dict,
|
|
984
|
-
|
|
1513
|
+
async def build_legacy_config_params(
|
|
1514
|
+
emhass_conf: dict[str, pathlib.Path],
|
|
1515
|
+
legacy_config: dict[str, str],
|
|
1516
|
+
logger: logging.Logger,
|
|
1517
|
+
) -> dict[str, str]:
|
|
985
1518
|
"""
|
|
986
1519
|
Build a config dictionary with legacy config_emhass.yaml file.
|
|
987
1520
|
Uses the associations file to convert parameter naming conventions (to config.json/config_defaults.json).
|
|
@@ -1011,8 +1544,9 @@ def build_legacy_config_params(
|
|
|
1011
1544
|
|
|
1012
1545
|
# Use associations list to map legacy parameter name with config.json parameter name
|
|
1013
1546
|
if emhass_conf["associations_path"].exists():
|
|
1014
|
-
with emhass_conf["associations_path"]
|
|
1015
|
-
|
|
1547
|
+
async with aiofiles.open(emhass_conf["associations_path"]) as data:
|
|
1548
|
+
content = await data.read()
|
|
1549
|
+
associations = list(csv.reader(content.splitlines(), delimiter=","))
|
|
1016
1550
|
else:
|
|
1017
1551
|
logger.error(
|
|
1018
1552
|
"Cant find associations file (associations.csv) in: "
|
|
@@ -1025,36 +1559,30 @@ def build_legacy_config_params(
|
|
|
1025
1559
|
for association in associations:
|
|
1026
1560
|
# if legacy config catagories exists and if legacy parameter exists in config catagories
|
|
1027
1561
|
if (
|
|
1028
|
-
legacy_config.get(association[0]
|
|
1562
|
+
legacy_config.get(association[0]) is not None
|
|
1029
1563
|
and legacy_config[association[0]].get(association[1], None) is not None
|
|
1030
1564
|
):
|
|
1031
1565
|
config[association[2]] = legacy_config[association[0]][association[1]]
|
|
1032
1566
|
|
|
1033
1567
|
# If config now has load_peak_hour_periods, extract from list of dict
|
|
1034
|
-
if (
|
|
1035
|
-
association[2]
|
|
1036
|
-
and type(config[association[2]]) is list
|
|
1037
|
-
):
|
|
1038
|
-
config[association[2]] = dict(
|
|
1039
|
-
(key, d[key]) for d in config[association[2]] for key in d
|
|
1040
|
-
)
|
|
1568
|
+
if association[2] == "load_peak_hour_periods" and type(config[association[2]]) is list:
|
|
1569
|
+
config[association[2]] = {key: d[key] for d in config[association[2]] for key in d}
|
|
1041
1570
|
|
|
1042
1571
|
return config
|
|
1043
|
-
# params['associations_dict'] = associations_dict
|
|
1044
1572
|
|
|
1045
1573
|
|
|
1046
|
-
def param_to_config(param: dict, logger: logging.Logger) -> dict:
|
|
1574
|
+
def param_to_config(param: dict[str, dict], logger: logging.Logger) -> dict[str, str]:
|
|
1047
1575
|
"""
|
|
1048
1576
|
A function that extracts the parameters from param back to the config.json format.
|
|
1049
1577
|
Extracts parameters from config catagories.
|
|
1050
1578
|
Attempts to exclude secrets hosed in retrieve_hass_conf.
|
|
1051
1579
|
|
|
1052
1580
|
:param params: Built configuration parameters
|
|
1053
|
-
:type param: dict
|
|
1581
|
+
:type param: dict[str, dict]
|
|
1054
1582
|
:param logger: The logger object
|
|
1055
1583
|
:type logger: logging.Logger
|
|
1056
1584
|
:return: The built config dictionary
|
|
1057
|
-
:rtype: dict
|
|
1585
|
+
:rtype: dict[str, str]
|
|
1058
1586
|
"""
|
|
1059
1587
|
logger.debug("Converting param to config")
|
|
1060
1588
|
|
|
@@ -1083,14 +1611,14 @@ def param_to_config(param: dict, logger: logging.Logger) -> dict:
|
|
|
1083
1611
|
return return_config
|
|
1084
1612
|
|
|
1085
1613
|
|
|
1086
|
-
def build_secrets(
|
|
1087
|
-
emhass_conf: dict,
|
|
1614
|
+
async def build_secrets(
|
|
1615
|
+
emhass_conf: dict[str, pathlib.Path],
|
|
1088
1616
|
logger: logging.Logger,
|
|
1089
|
-
argument:
|
|
1090
|
-
options_path:
|
|
1091
|
-
secrets_path:
|
|
1092
|
-
no_response:
|
|
1093
|
-
) ->
|
|
1617
|
+
argument: dict[str, str] | None = None,
|
|
1618
|
+
options_path: str | None = None,
|
|
1619
|
+
secrets_path: str | None = None,
|
|
1620
|
+
no_response: bool = False,
|
|
1621
|
+
) -> tuple[dict[str, pathlib.Path], dict[str, str | float]]:
|
|
1094
1622
|
"""
|
|
1095
1623
|
Retrieve and build parameters from secrets locations (ENV, ARG, Secrets file (secrets_emhass.yaml/options.json) and/or Home Assistant (via API))
|
|
1096
1624
|
priority order (lwo to high) = Defaults (written in function), ENV, Options json file, Home Assistant API, Secrets yaml file, Arguments
|
|
@@ -1110,8 +1638,9 @@ def build_secrets(
|
|
|
1110
1638
|
:return: Updated emhass_conf, the built secrets dictionary
|
|
1111
1639
|
:rtype: Tuple[dict, dict]:
|
|
1112
1640
|
"""
|
|
1113
|
-
|
|
1114
1641
|
# Set defaults to be overwritten
|
|
1642
|
+
if argument is None:
|
|
1643
|
+
argument = {}
|
|
1115
1644
|
params_secrets = {
|
|
1116
1645
|
"hass_url": "https://myhass.duckdns.org/",
|
|
1117
1646
|
"long_lived_token": "thatverylongtokenhere",
|
|
@@ -1138,8 +1667,9 @@ def build_secrets(
|
|
|
1138
1667
|
# Use local supervisor API to obtain secrets from Home Assistant if hass_url in options.json is empty and SUPERVISOR_TOKEN ENV exists (provided by Home Assistant when running the container as addon)
|
|
1139
1668
|
options = {}
|
|
1140
1669
|
if options_path and pathlib.Path(options_path).is_file():
|
|
1141
|
-
with
|
|
1142
|
-
|
|
1670
|
+
async with aiofiles.open(options_path) as data:
|
|
1671
|
+
content = await data.read()
|
|
1672
|
+
options = orjson.loads(content)
|
|
1143
1673
|
|
|
1144
1674
|
# Obtain secrets from Home Assistant?
|
|
1145
1675
|
url_from_options = options.get("hass_url", "empty")
|
|
@@ -1147,69 +1677,67 @@ def build_secrets(
|
|
|
1147
1677
|
|
|
1148
1678
|
# If data path specified by options.json, overwrite emhass_conf['data_path']
|
|
1149
1679
|
if (
|
|
1150
|
-
options.get("data_path", None)
|
|
1680
|
+
options.get("data_path", None) is not None
|
|
1151
1681
|
and pathlib.Path(options["data_path"]).exists()
|
|
1152
1682
|
):
|
|
1153
1683
|
emhass_conf["data_path"] = pathlib.Path(options["data_path"])
|
|
1154
1684
|
|
|
1155
1685
|
# Check to use Home Assistant local API
|
|
1156
|
-
if (
|
|
1157
|
-
not no_response
|
|
1158
|
-
and (
|
|
1159
|
-
url_from_options == "empty"
|
|
1160
|
-
or url_from_options == ""
|
|
1161
|
-
or url_from_options == "http://supervisor/core/api"
|
|
1162
|
-
)
|
|
1163
|
-
and os.getenv("SUPERVISOR_TOKEN", None) is not None
|
|
1164
|
-
):
|
|
1686
|
+
if not no_response and os.getenv("SUPERVISOR_TOKEN", None) is not None:
|
|
1165
1687
|
params_secrets["long_lived_token"] = os.getenv("SUPERVISOR_TOKEN", None)
|
|
1166
|
-
|
|
1688
|
+
# Use hass_url from options.json if available, otherwise use supervisor API for addon
|
|
1689
|
+
if url_from_options != "empty" and url_from_options != "":
|
|
1690
|
+
params_secrets["hass_url"] = url_from_options
|
|
1691
|
+
else:
|
|
1692
|
+
# For addons, use supervisor API for both REST and WebSocket access
|
|
1693
|
+
params_secrets["hass_url"] = "http://supervisor/core/api"
|
|
1167
1694
|
headers = {
|
|
1168
1695
|
"Authorization": "Bearer " + params_secrets["long_lived_token"],
|
|
1169
1696
|
"content-type": "application/json",
|
|
1170
1697
|
}
|
|
1171
1698
|
# Obtain secrets from Home Assistant via API
|
|
1172
1699
|
logger.debug("Obtaining secrets from Home Assistant Supervisor API")
|
|
1173
|
-
|
|
1174
|
-
|
|
1175
|
-
|
|
1176
|
-
|
|
1177
|
-
|
|
1178
|
-
|
|
1179
|
-
|
|
1180
|
-
|
|
1181
|
-
|
|
1182
|
-
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
|
|
1203
|
-
|
|
1204
|
-
|
|
1205
|
-
|
|
1206
|
-
|
|
1207
|
-
|
|
1208
|
-
|
|
1209
|
-
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
1700
|
+
async with aiohttp.ClientSession() as session:
|
|
1701
|
+
async with session.get(
|
|
1702
|
+
params_secrets["hass_url"] + "/config", headers=headers
|
|
1703
|
+
) as response:
|
|
1704
|
+
if response.status < 400:
|
|
1705
|
+
config_hass = await response.json()
|
|
1706
|
+
params_secrets = {
|
|
1707
|
+
"hass_url": params_secrets["hass_url"],
|
|
1708
|
+
"long_lived_token": params_secrets["long_lived_token"],
|
|
1709
|
+
"time_zone": config_hass["time_zone"],
|
|
1710
|
+
"Latitude": config_hass["latitude"],
|
|
1711
|
+
"Longitude": config_hass["longitude"],
|
|
1712
|
+
"Altitude": config_hass["elevation"],
|
|
1713
|
+
}
|
|
1714
|
+
else:
|
|
1715
|
+
# Obtain the url and key secrets if any from options.json (default /app/options.json)
|
|
1716
|
+
logger.warning(
|
|
1717
|
+
"Error obtaining secrets from Home Assistant Supervisor API"
|
|
1718
|
+
)
|
|
1719
|
+
logger.debug("Obtaining url and key secrets from options.json")
|
|
1720
|
+
if url_from_options != "empty" and url_from_options != "":
|
|
1721
|
+
params_secrets["hass_url"] = url_from_options
|
|
1722
|
+
if key_from_options != "empty" and key_from_options != "":
|
|
1723
|
+
params_secrets["long_lived_token"] = key_from_options
|
|
1724
|
+
if (
|
|
1725
|
+
options.get("time_zone", "empty") != "empty"
|
|
1726
|
+
and options["time_zone"] != ""
|
|
1727
|
+
):
|
|
1728
|
+
params_secrets["time_zone"] = options["time_zone"]
|
|
1729
|
+
if options.get("Latitude", None) is not None and bool(
|
|
1730
|
+
options["Latitude"]
|
|
1731
|
+
):
|
|
1732
|
+
params_secrets["Latitude"] = options["Latitude"]
|
|
1733
|
+
if options.get("Longitude", None) is not None and bool(
|
|
1734
|
+
options["Longitude"]
|
|
1735
|
+
):
|
|
1736
|
+
params_secrets["Longitude"] = options["Longitude"]
|
|
1737
|
+
if options.get("Altitude", None) is not None and bool(
|
|
1738
|
+
options["Altitude"]
|
|
1739
|
+
):
|
|
1740
|
+
params_secrets["Altitude"] = options["Altitude"]
|
|
1213
1741
|
else:
|
|
1214
1742
|
# Obtain the url and key secrets if any from options.json (default /app/options.json)
|
|
1215
1743
|
logger.debug("Obtaining url and key secrets from options.json")
|
|
@@ -1217,22 +1745,13 @@ def build_secrets(
|
|
|
1217
1745
|
params_secrets["hass_url"] = url_from_options
|
|
1218
1746
|
if key_from_options != "empty" and key_from_options != "":
|
|
1219
1747
|
params_secrets["long_lived_token"] = key_from_options
|
|
1220
|
-
if (
|
|
1221
|
-
options.get("time_zone", "empty") != "empty"
|
|
1222
|
-
and options["time_zone"] != ""
|
|
1223
|
-
):
|
|
1748
|
+
if options.get("time_zone", "empty") != "empty" and options["time_zone"] != "":
|
|
1224
1749
|
params_secrets["time_zone"] = options["time_zone"]
|
|
1225
|
-
if options.get("Latitude", None) is not None and bool(
|
|
1226
|
-
options["Latitude"]
|
|
1227
|
-
):
|
|
1750
|
+
if options.get("Latitude", None) is not None and bool(options["Latitude"]):
|
|
1228
1751
|
params_secrets["Latitude"] = options["Latitude"]
|
|
1229
|
-
if options.get("Longitude", None) is not None and bool(
|
|
1230
|
-
options["Longitude"]
|
|
1231
|
-
):
|
|
1752
|
+
if options.get("Longitude", None) is not None and bool(options["Longitude"]):
|
|
1232
1753
|
params_secrets["Longitude"] = options["Longitude"]
|
|
1233
|
-
if options.get("Altitude", None) is not None and bool(
|
|
1234
|
-
options["Altitude"]
|
|
1235
|
-
):
|
|
1754
|
+
if options.get("Altitude", None) is not None and bool(options["Altitude"]):
|
|
1236
1755
|
params_secrets["Altitude"] = options["Altitude"]
|
|
1237
1756
|
|
|
1238
1757
|
# Obtain the forecast secrets (if any) from options.json (default /app/options.json)
|
|
@@ -1253,47 +1772,49 @@ def build_secrets(
|
|
|
1253
1772
|
and options["solcast_rooftop_id"] != ""
|
|
1254
1773
|
):
|
|
1255
1774
|
params_secrets["solcast_rooftop_id"] = options["solcast_rooftop_id"]
|
|
1256
|
-
if options.get("solar_forecast_kwp", None) and bool(
|
|
1257
|
-
options["solar_forecast_kwp"]
|
|
1258
|
-
):
|
|
1775
|
+
if options.get("solar_forecast_kwp", None) and bool(options["solar_forecast_kwp"]):
|
|
1259
1776
|
params_secrets["solar_forecast_kwp"] = options["solar_forecast_kwp"]
|
|
1260
1777
|
|
|
1261
1778
|
# Obtain secrets from secrets_emhass.yaml? (default /app/secrets_emhass.yaml)
|
|
1262
1779
|
if secrets_path and pathlib.Path(secrets_path).is_file():
|
|
1263
1780
|
logger.debug("Obtaining secrets from secrets file")
|
|
1264
|
-
with open(pathlib.Path(secrets_path)
|
|
1265
|
-
|
|
1781
|
+
async with aiofiles.open(pathlib.Path(secrets_path)) as file:
|
|
1782
|
+
content = await file.read()
|
|
1783
|
+
params_secrets.update(yaml.safe_load(content))
|
|
1266
1784
|
|
|
1267
1785
|
# Receive key and url from ARG/arguments?
|
|
1268
|
-
if argument.get("url"
|
|
1786
|
+
if argument.get("url") is not None:
|
|
1269
1787
|
params_secrets["hass_url"] = argument["url"]
|
|
1270
1788
|
logger.debug("Obtaining url from passed argument")
|
|
1271
|
-
if argument.get("key"
|
|
1789
|
+
if argument.get("key") is not None:
|
|
1272
1790
|
params_secrets["long_lived_token"] = argument["key"]
|
|
1273
1791
|
logger.debug("Obtaining long_lived_token from passed argument")
|
|
1274
1792
|
|
|
1275
1793
|
return emhass_conf, params_secrets
|
|
1276
1794
|
|
|
1277
1795
|
|
|
1278
|
-
def build_params(
|
|
1279
|
-
emhass_conf: dict,
|
|
1280
|
-
|
|
1796
|
+
async def build_params(
|
|
1797
|
+
emhass_conf: dict[str, pathlib.Path],
|
|
1798
|
+
params_secrets: dict[str, str | float],
|
|
1799
|
+
config: dict[str, str],
|
|
1800
|
+
logger: logging.Logger,
|
|
1801
|
+
) -> dict[str, dict]:
|
|
1281
1802
|
"""
|
|
1282
1803
|
Build the main params dictionary from the config and secrets
|
|
1283
1804
|
Appends configuration catagories used by emhass to the parameters. (with use of the associations file as a reference)
|
|
1284
1805
|
|
|
1285
1806
|
:param emhass_conf: Dictionary containing the needed emhass paths
|
|
1286
|
-
:type emhass_conf: dict
|
|
1807
|
+
:type emhass_conf: dict[str, pathlib.Path]
|
|
1287
1808
|
:param params_secrets: The dictionary containing the built secret variables
|
|
1288
|
-
:type params_secrets: dict
|
|
1809
|
+
:type params_secrets: dict[str, str | float]
|
|
1289
1810
|
:param config: The dictionary of built config parameters
|
|
1290
|
-
:type config: dict
|
|
1811
|
+
:type config: dict[str, str]
|
|
1291
1812
|
:param logger: The logger object
|
|
1292
1813
|
:type logger: logging.Logger
|
|
1293
1814
|
:return: The built param dictionary
|
|
1294
|
-
:rtype: dict
|
|
1815
|
+
:rtype: dict[str, dict]
|
|
1295
1816
|
"""
|
|
1296
|
-
if
|
|
1817
|
+
if not isinstance(params_secrets, dict):
|
|
1297
1818
|
params_secrets = {}
|
|
1298
1819
|
|
|
1299
1820
|
params = {}
|
|
@@ -1307,8 +1828,9 @@ def build_params(
|
|
|
1307
1828
|
if emhass_conf.get(
|
|
1308
1829
|
"associations_path", get_root(__file__, num_parent=2) / "data/associations.csv"
|
|
1309
1830
|
).exists():
|
|
1310
|
-
with emhass_conf["associations_path"]
|
|
1311
|
-
|
|
1831
|
+
async with aiofiles.open(emhass_conf["associations_path"]) as data:
|
|
1832
|
+
content = await data.read()
|
|
1833
|
+
associations = list(csv.reader(content.splitlines(), delimiter=","))
|
|
1312
1834
|
else:
|
|
1313
1835
|
logger.error(
|
|
1314
1836
|
"Unable to obtain the associations file (associations.csv) in: "
|
|
@@ -1325,7 +1847,7 @@ def build_params(
|
|
|
1325
1847
|
for association in associations:
|
|
1326
1848
|
# If parameter has list_ name and parameter in config is presented with its list name
|
|
1327
1849
|
# (ie, config parameter is in legacy options.json format)
|
|
1328
|
-
if len(association) == 4 and config.get(association[3]
|
|
1850
|
+
if len(association) == 4 and config.get(association[3]) is not None:
|
|
1329
1851
|
# Extract lists of dictionaries
|
|
1330
1852
|
if config[association[3]] and type(config[association[3]][0]) is dict:
|
|
1331
1853
|
params[association[0]][association[2]] = [
|
|
@@ -1334,22 +1856,21 @@ def build_params(
|
|
|
1334
1856
|
else:
|
|
1335
1857
|
params[association[0]][association[2]] = config[association[3]]
|
|
1336
1858
|
# Else, directly set value of config parameter to param
|
|
1337
|
-
elif config.get(association[2]
|
|
1859
|
+
elif config.get(association[2]) is not None:
|
|
1338
1860
|
params[association[0]][association[2]] = config[association[2]]
|
|
1339
1861
|
|
|
1340
1862
|
# Check if we need to create `list_hp_periods` from config (ie. legacy options.json format)
|
|
1341
1863
|
if (
|
|
1342
|
-
params.get("optim_conf"
|
|
1343
|
-
and config.get("list_peak_hours_periods_start_hours"
|
|
1344
|
-
and config.get("list_peak_hours_periods_end_hours"
|
|
1864
|
+
params.get("optim_conf") is not None
|
|
1865
|
+
and config.get("list_peak_hours_periods_start_hours") is not None
|
|
1866
|
+
and config.get("list_peak_hours_periods_end_hours") is not None
|
|
1345
1867
|
):
|
|
1346
1868
|
start_hours_list = [
|
|
1347
1869
|
i["peak_hours_periods_start_hours"]
|
|
1348
1870
|
for i in config["list_peak_hours_periods_start_hours"]
|
|
1349
1871
|
]
|
|
1350
1872
|
end_hours_list = [
|
|
1351
|
-
i["peak_hours_periods_end_hours"]
|
|
1352
|
-
for i in config["list_peak_hours_periods_end_hours"]
|
|
1873
|
+
i["peak_hours_periods_end_hours"] for i in config["list_peak_hours_periods_end_hours"]
|
|
1353
1874
|
]
|
|
1354
1875
|
num_peak_hours = len(start_hours_list)
|
|
1355
1876
|
list_hp_periods_list = {
|
|
@@ -1363,32 +1884,26 @@ def build_params(
|
|
|
1363
1884
|
else:
|
|
1364
1885
|
# Else, check param already contains load_peak_hour_periods from config
|
|
1365
1886
|
if params["optim_conf"].get("load_peak_hour_periods", None) is None:
|
|
1366
|
-
logger.warning(
|
|
1367
|
-
"Unable to detect or create load_peak_hour_periods parameter"
|
|
1368
|
-
)
|
|
1887
|
+
logger.warning("Unable to detect or create load_peak_hour_periods parameter")
|
|
1369
1888
|
|
|
1370
1889
|
# Format load_peak_hour_periods list to dict if necessary
|
|
1371
|
-
if params["optim_conf"].get(
|
|
1372
|
-
"load_peak_hour_periods",
|
|
1373
|
-
)
|
|
1374
|
-
params["optim_conf"]["load_peak_hour_periods"] =
|
|
1375
|
-
|
|
1376
|
-
|
|
1377
|
-
for key in d
|
|
1378
|
-
)
|
|
1890
|
+
if params["optim_conf"].get("load_peak_hour_periods", None) is not None and isinstance(
|
|
1891
|
+
params["optim_conf"]["load_peak_hour_periods"], list
|
|
1892
|
+
):
|
|
1893
|
+
params["optim_conf"]["load_peak_hour_periods"] = {
|
|
1894
|
+
key: d[key] for d in params["optim_conf"]["load_peak_hour_periods"] for key in d
|
|
1895
|
+
}
|
|
1379
1896
|
|
|
1380
1897
|
# Call function to check parameter lists that require the same length as deferrable loads
|
|
1381
1898
|
# If not, set defaults it fill in gaps
|
|
1382
1899
|
if params["optim_conf"].get("number_of_deferrable_loads", None) is not None:
|
|
1383
1900
|
num_def_loads = params["optim_conf"]["number_of_deferrable_loads"]
|
|
1384
|
-
params["optim_conf"]["start_timesteps_of_each_deferrable_load"] = (
|
|
1385
|
-
|
|
1386
|
-
|
|
1387
|
-
|
|
1388
|
-
|
|
1389
|
-
|
|
1390
|
-
logger,
|
|
1391
|
-
)
|
|
1901
|
+
params["optim_conf"]["start_timesteps_of_each_deferrable_load"] = check_def_loads(
|
|
1902
|
+
num_def_loads,
|
|
1903
|
+
params["optim_conf"],
|
|
1904
|
+
0,
|
|
1905
|
+
"start_timesteps_of_each_deferrable_load",
|
|
1906
|
+
logger,
|
|
1392
1907
|
)
|
|
1393
1908
|
params["optim_conf"]["end_timesteps_of_each_deferrable_load"] = check_def_loads(
|
|
1394
1909
|
num_def_loads,
|
|
@@ -1418,14 +1933,12 @@ def build_params(
|
|
|
1418
1933
|
"set_deferrable_startup_penalty",
|
|
1419
1934
|
logger,
|
|
1420
1935
|
)
|
|
1421
|
-
params["optim_conf"]["operating_hours_of_each_deferrable_load"] = (
|
|
1422
|
-
|
|
1423
|
-
|
|
1424
|
-
|
|
1425
|
-
|
|
1426
|
-
|
|
1427
|
-
logger,
|
|
1428
|
-
)
|
|
1936
|
+
params["optim_conf"]["operating_hours_of_each_deferrable_load"] = check_def_loads(
|
|
1937
|
+
num_def_loads,
|
|
1938
|
+
params["optim_conf"],
|
|
1939
|
+
0,
|
|
1940
|
+
"operating_hours_of_each_deferrable_load",
|
|
1941
|
+
logger,
|
|
1429
1942
|
)
|
|
1430
1943
|
params["optim_conf"]["nominal_power_of_deferrable_loads"] = check_def_loads(
|
|
1431
1944
|
num_def_loads,
|
|
@@ -1448,14 +1961,12 @@ def build_params(
|
|
|
1448
1961
|
|
|
1449
1962
|
# Configure secrets, set params to correct config categorie
|
|
1450
1963
|
# retrieve_hass_conf
|
|
1451
|
-
params["retrieve_hass_conf"]["hass_url"] = params_secrets.get("hass_url"
|
|
1452
|
-
params["retrieve_hass_conf"]["long_lived_token"] = params_secrets.get(
|
|
1453
|
-
|
|
1454
|
-
)
|
|
1455
|
-
params["retrieve_hass_conf"]["
|
|
1456
|
-
params["retrieve_hass_conf"]["
|
|
1457
|
-
params["retrieve_hass_conf"]["Longitude"] = params_secrets.get("Longitude", None)
|
|
1458
|
-
params["retrieve_hass_conf"]["Altitude"] = params_secrets.get("Altitude", None)
|
|
1964
|
+
params["retrieve_hass_conf"]["hass_url"] = params_secrets.get("hass_url")
|
|
1965
|
+
params["retrieve_hass_conf"]["long_lived_token"] = params_secrets.get("long_lived_token")
|
|
1966
|
+
params["retrieve_hass_conf"]["time_zone"] = params_secrets.get("time_zone")
|
|
1967
|
+
params["retrieve_hass_conf"]["Latitude"] = params_secrets.get("Latitude")
|
|
1968
|
+
params["retrieve_hass_conf"]["Longitude"] = params_secrets.get("Longitude")
|
|
1969
|
+
params["retrieve_hass_conf"]["Altitude"] = params_secrets.get("Altitude")
|
|
1459
1970
|
# Update optional param secrets
|
|
1460
1971
|
if params["optim_conf"].get("weather_forecast_method", None) is not None:
|
|
1461
1972
|
if params["optim_conf"]["weather_forecast_method"] == "solcast":
|
|
@@ -1489,9 +2000,7 @@ def build_params(
|
|
|
1489
2000
|
4807.8,
|
|
1490
2001
|
]
|
|
1491
2002
|
if any(x in secret_params for x in params["retrieve_hass_conf"].values()):
|
|
1492
|
-
logger.warning(
|
|
1493
|
-
"Some secret parameters values are still matching their defaults"
|
|
1494
|
-
)
|
|
2003
|
+
logger.warning("Some secret parameters values are still matching their defaults")
|
|
1495
2004
|
|
|
1496
2005
|
# Set empty dict objects for params passed_data
|
|
1497
2006
|
# To be latter populated with runtime parameters (treat_runtimeparams)
|
|
@@ -1514,24 +2023,27 @@ def build_params(
|
|
|
1514
2023
|
|
|
1515
2024
|
|
|
1516
2025
|
def check_def_loads(
|
|
1517
|
-
num_def_loads: int,
|
|
1518
|
-
|
|
2026
|
+
num_def_loads: int,
|
|
2027
|
+
parameter: list[dict],
|
|
2028
|
+
default: str | float,
|
|
2029
|
+
parameter_name: str,
|
|
2030
|
+
logger: logging.Logger,
|
|
2031
|
+
) -> list[dict]:
|
|
1519
2032
|
"""
|
|
1520
2033
|
Check parameter lists with deferrable loads number, if they do not match, enlarge to fit.
|
|
1521
2034
|
|
|
1522
2035
|
:param num_def_loads: Total number deferrable loads
|
|
1523
2036
|
:type num_def_loads: int
|
|
1524
2037
|
:param parameter: parameter config dict containing paramater
|
|
1525
|
-
:type: list[dict]
|
|
2038
|
+
:type parameter: list[dict]
|
|
1526
2039
|
:param default: default value for parameter to pad missing
|
|
1527
|
-
:type:
|
|
2040
|
+
:type default: str | int | float
|
|
1528
2041
|
:param parameter_name: name of parameter
|
|
1529
|
-
:type
|
|
2042
|
+
:type parameter_name: str
|
|
1530
2043
|
:param logger: The logger object
|
|
1531
2044
|
:type logger: logging.Logger
|
|
1532
|
-
return: parameter list
|
|
2045
|
+
:return: parameter list
|
|
1533
2046
|
:rtype: list[dict]
|
|
1534
|
-
|
|
1535
2047
|
"""
|
|
1536
2048
|
if (
|
|
1537
2049
|
parameter.get(parameter_name, None) is not None
|
|
@@ -1544,27 +2056,76 @@ def check_def_loads(
|
|
|
1544
2056
|
+ str(default)
|
|
1545
2057
|
+ ") to parameter"
|
|
1546
2058
|
)
|
|
1547
|
-
for
|
|
2059
|
+
for _x in range(len(parameter[parameter_name]), num_def_loads):
|
|
1548
2060
|
parameter[parameter_name].append(default)
|
|
1549
2061
|
return parameter[parameter_name]
|
|
1550
2062
|
|
|
1551
2063
|
|
|
1552
|
-
def get_days_list(days_to_retrieve: int) -> pd.
|
|
2064
|
+
def get_days_list(days_to_retrieve: int) -> pd.DatetimeIndex:
|
|
1553
2065
|
"""
|
|
1554
2066
|
Get list of past days from today to days_to_retrieve.
|
|
1555
2067
|
|
|
1556
2068
|
:param days_to_retrieve: Total number of days to retrieve from the past
|
|
1557
2069
|
:type days_to_retrieve: int
|
|
1558
2070
|
:return: The list of days
|
|
1559
|
-
:rtype: pd.
|
|
2071
|
+
:rtype: pd.DatetimeIndex
|
|
1560
2072
|
|
|
1561
2073
|
"""
|
|
1562
|
-
today = datetime.now(
|
|
2074
|
+
today = datetime.now(UTC).replace(minute=0, second=0, microsecond=0)
|
|
1563
2075
|
d = (today - timedelta(days=days_to_retrieve)).isoformat()
|
|
1564
|
-
days_list = pd.date_range(start=d, end=today.isoformat(), freq="D")
|
|
2076
|
+
days_list = pd.date_range(start=d, end=today.isoformat(), freq="D").normalize()
|
|
1565
2077
|
return days_list
|
|
1566
2078
|
|
|
1567
2079
|
|
|
2080
|
+
def add_date_features(
|
|
2081
|
+
data: pd.DataFrame,
|
|
2082
|
+
timestamp: str | None = None,
|
|
2083
|
+
date_features: list[str] | None = None,
|
|
2084
|
+
) -> pd.DataFrame:
|
|
2085
|
+
"""Add date-related features from a DateTimeIndex or a timestamp column.
|
|
2086
|
+
|
|
2087
|
+
:param data: The input DataFrame.
|
|
2088
|
+
:type data: pd.DataFrame
|
|
2089
|
+
:param timestamp: The column containing the timestamp (optional if DataFrame has a DateTimeIndex).
|
|
2090
|
+
:type timestamp: Optional[str]
|
|
2091
|
+
:param date_features: List of date features to extract (default: all).
|
|
2092
|
+
:type date_features: Optional[List[str]]
|
|
2093
|
+
:return: The DataFrame with added date features.
|
|
2094
|
+
:rtype: pd.DataFrame
|
|
2095
|
+
"""
|
|
2096
|
+
|
|
2097
|
+
df = copy.deepcopy(data) # Avoid modifying the original DataFrame
|
|
2098
|
+
|
|
2099
|
+
# If no specific features are requested, extract all by default
|
|
2100
|
+
default_features = ["year", "month", "day_of_week", "day_of_year", "day", "hour"]
|
|
2101
|
+
date_features = date_features or default_features
|
|
2102
|
+
|
|
2103
|
+
# Determine whether to use index or a timestamp column
|
|
2104
|
+
if timestamp:
|
|
2105
|
+
df[timestamp] = pd.to_datetime(df[timestamp], utc=True)
|
|
2106
|
+
source = df[timestamp].dt
|
|
2107
|
+
else:
|
|
2108
|
+
if not isinstance(df.index, pd.DatetimeIndex):
|
|
2109
|
+
raise ValueError("DataFrame must have a DateTimeIndex or a valid timestamp column.")
|
|
2110
|
+
source = df.index
|
|
2111
|
+
|
|
2112
|
+
# Extract date features
|
|
2113
|
+
if "year" in date_features:
|
|
2114
|
+
df["year"] = source.year
|
|
2115
|
+
if "month" in date_features:
|
|
2116
|
+
df["month"] = source.month
|
|
2117
|
+
if "day_of_week" in date_features:
|
|
2118
|
+
df["day_of_week"] = source.dayofweek
|
|
2119
|
+
if "day_of_year" in date_features:
|
|
2120
|
+
df["day_of_year"] = source.dayofyear
|
|
2121
|
+
if "day" in date_features:
|
|
2122
|
+
df["day"] = source.day
|
|
2123
|
+
if "hour" in date_features:
|
|
2124
|
+
df["hour"] = source.hour
|
|
2125
|
+
|
|
2126
|
+
return df
|
|
2127
|
+
|
|
2128
|
+
|
|
1568
2129
|
def set_df_index_freq(df: pd.DataFrame) -> pd.DataFrame:
|
|
1569
2130
|
"""
|
|
1570
2131
|
Set the freq of a DataFrame DateTimeIndex.
|
|
@@ -1581,3 +2142,191 @@ def set_df_index_freq(df: pd.DataFrame) -> pd.DataFrame:
|
|
|
1581
2142
|
sampling = pd.to_timedelta(np.median(idx_diff))
|
|
1582
2143
|
df = df[~df.index.duplicated()]
|
|
1583
2144
|
return df.asfreq(sampling)
|
|
2145
|
+
|
|
2146
|
+
|
|
2147
|
+
def parse_export_time_range(
|
|
2148
|
+
start_time: str,
|
|
2149
|
+
end_time: str | None,
|
|
2150
|
+
time_zone: pd.Timestamp.tz,
|
|
2151
|
+
logger: logging.Logger,
|
|
2152
|
+
) -> tuple[pd.Timestamp, pd.Timestamp] | tuple[bool, bool]:
|
|
2153
|
+
"""
|
|
2154
|
+
Parse and validate start_time and end_time for export operations.
|
|
2155
|
+
|
|
2156
|
+
:param start_time: Start time string in ISO format
|
|
2157
|
+
:type start_time: str
|
|
2158
|
+
:param end_time: End time string in ISO format (optional)
|
|
2159
|
+
:type end_time: str | None
|
|
2160
|
+
:param time_zone: Timezone for localization
|
|
2161
|
+
:type time_zone: pd.Timestamp.tz
|
|
2162
|
+
:param logger: Logger object
|
|
2163
|
+
:type logger: logging.Logger
|
|
2164
|
+
:return: Tuple of (start_dt, end_dt) or (False, False) on error
|
|
2165
|
+
:rtype: tuple[pd.Timestamp, pd.Timestamp] | tuple[bool, bool]
|
|
2166
|
+
"""
|
|
2167
|
+
try:
|
|
2168
|
+
start_dt = pd.to_datetime(start_time)
|
|
2169
|
+
if start_dt.tz is None:
|
|
2170
|
+
start_dt = start_dt.tz_localize(time_zone)
|
|
2171
|
+
except Exception as e:
|
|
2172
|
+
logger.error(f"Invalid start_time format: {start_time}. Error: {e}")
|
|
2173
|
+
logger.error("Use format like '2024-01-01' or '2024-01-01 00:00:00'")
|
|
2174
|
+
return False, False
|
|
2175
|
+
|
|
2176
|
+
if end_time:
|
|
2177
|
+
try:
|
|
2178
|
+
end_dt = pd.to_datetime(end_time)
|
|
2179
|
+
if end_dt.tz is None:
|
|
2180
|
+
end_dt = end_dt.tz_localize(time_zone)
|
|
2181
|
+
except Exception as e:
|
|
2182
|
+
logger.error(f"Invalid end_time format: {end_time}. Error: {e}")
|
|
2183
|
+
return False, False
|
|
2184
|
+
else:
|
|
2185
|
+
end_dt = pd.Timestamp.now(tz=time_zone)
|
|
2186
|
+
logger.info(f"No end_time specified, using current time: {end_dt}")
|
|
2187
|
+
|
|
2188
|
+
return start_dt, end_dt
|
|
2189
|
+
|
|
2190
|
+
|
|
2191
|
+
def clean_sensor_column_names(df: pd.DataFrame, timestamp_col: str) -> pd.DataFrame:
|
|
2192
|
+
"""
|
|
2193
|
+
Clean sensor column names by removing 'sensor.' prefix.
|
|
2194
|
+
|
|
2195
|
+
:param df: Input DataFrame with sensor columns
|
|
2196
|
+
:type df: pd.DataFrame
|
|
2197
|
+
:param timestamp_col: Name of timestamp column to preserve
|
|
2198
|
+
:type timestamp_col: str
|
|
2199
|
+
:return: DataFrame with cleaned column names
|
|
2200
|
+
:rtype: pd.DataFrame
|
|
2201
|
+
"""
|
|
2202
|
+
column_mapping = {}
|
|
2203
|
+
for col in df.columns:
|
|
2204
|
+
if col != timestamp_col and col.startswith("sensor."):
|
|
2205
|
+
column_mapping[col] = col.replace("sensor.", "")
|
|
2206
|
+
return df.rename(columns=column_mapping)
|
|
2207
|
+
|
|
2208
|
+
|
|
2209
|
+
def handle_nan_values(
|
|
2210
|
+
df: pd.DataFrame,
|
|
2211
|
+
handle_nan: str,
|
|
2212
|
+
timestamp_col: str,
|
|
2213
|
+
logger: logging.Logger,
|
|
2214
|
+
) -> pd.DataFrame:
|
|
2215
|
+
"""
|
|
2216
|
+
Handle NaN values in DataFrame according to specified strategy.
|
|
2217
|
+
|
|
2218
|
+
:param df: Input DataFrame
|
|
2219
|
+
:type df: pd.DataFrame
|
|
2220
|
+
:param handle_nan: Strategy for handling NaN values
|
|
2221
|
+
:type handle_nan: str
|
|
2222
|
+
:param timestamp_col: Name of timestamp column to exclude from processing
|
|
2223
|
+
:type timestamp_col: str
|
|
2224
|
+
:param logger: Logger object
|
|
2225
|
+
:type logger: logging.Logger
|
|
2226
|
+
:return: DataFrame with NaN values handled
|
|
2227
|
+
:rtype: pd.DataFrame
|
|
2228
|
+
"""
|
|
2229
|
+
nan_count_before = df.isna().sum().sum()
|
|
2230
|
+
if nan_count_before == 0:
|
|
2231
|
+
return df
|
|
2232
|
+
|
|
2233
|
+
logger.info(f"Found {nan_count_before} NaN values, applying handle_nan method: {handle_nan}")
|
|
2234
|
+
|
|
2235
|
+
if handle_nan == "drop":
|
|
2236
|
+
df = df.dropna()
|
|
2237
|
+
logger.info(f"Dropped rows with NaN. Remaining rows: {len(df)}")
|
|
2238
|
+
elif handle_nan == "fill_zero":
|
|
2239
|
+
# Exclude timestamp_col from fillna to avoid unintended changes
|
|
2240
|
+
fill_cols = [col for col in df.columns if col != timestamp_col]
|
|
2241
|
+
df[fill_cols] = df[fill_cols].fillna(0)
|
|
2242
|
+
logger.info("Filled NaN values with 0 (excluding timestamp)")
|
|
2243
|
+
elif handle_nan == "interpolate":
|
|
2244
|
+
numeric_cols = df.select_dtypes(include=[np.number]).columns
|
|
2245
|
+
# Exclude timestamp_col from interpolation
|
|
2246
|
+
interp_cols = [col for col in numeric_cols if col != timestamp_col]
|
|
2247
|
+
df[interp_cols] = df[interp_cols].interpolate(method="linear", limit_direction="both")
|
|
2248
|
+
df[interp_cols] = df[interp_cols].ffill().bfill()
|
|
2249
|
+
logger.info("Interpolated NaN values (excluding timestamp)")
|
|
2250
|
+
elif handle_nan == "forward_fill":
|
|
2251
|
+
# Exclude timestamp_col from forward fill
|
|
2252
|
+
fill_cols = [col for col in df.columns if col != timestamp_col]
|
|
2253
|
+
df[fill_cols] = df[fill_cols].ffill()
|
|
2254
|
+
logger.info("Forward filled NaN values (excluding timestamp)")
|
|
2255
|
+
elif handle_nan == "backward_fill":
|
|
2256
|
+
# Exclude timestamp_col from backward fill
|
|
2257
|
+
fill_cols = [col for col in df.columns if col != timestamp_col]
|
|
2258
|
+
df[fill_cols] = df[fill_cols].bfill()
|
|
2259
|
+
logger.info("Backward filled NaN values (excluding timestamp)")
|
|
2260
|
+
elif handle_nan == "keep":
|
|
2261
|
+
logger.info("Keeping NaN values as-is")
|
|
2262
|
+
else:
|
|
2263
|
+
logger.warning(f"Unknown handle_nan option '{handle_nan}', keeping NaN values")
|
|
2264
|
+
|
|
2265
|
+
return df
|
|
2266
|
+
|
|
2267
|
+
|
|
2268
|
+
def resample_and_filter_data(
|
|
2269
|
+
df: pd.DataFrame,
|
|
2270
|
+
start_dt: pd.Timestamp,
|
|
2271
|
+
end_dt: pd.Timestamp,
|
|
2272
|
+
resample_freq: str,
|
|
2273
|
+
logger: logging.Logger,
|
|
2274
|
+
) -> pd.DataFrame | bool:
|
|
2275
|
+
"""
|
|
2276
|
+
Filter DataFrame to time range and resample to specified frequency.
|
|
2277
|
+
|
|
2278
|
+
:param df: Input DataFrame with datetime index
|
|
2279
|
+
:type df: pd.DataFrame
|
|
2280
|
+
:param start_dt: Start datetime for filtering
|
|
2281
|
+
:type start_dt: pd.Timestamp
|
|
2282
|
+
:param end_dt: End datetime for filtering
|
|
2283
|
+
:type end_dt: pd.Timestamp
|
|
2284
|
+
:param resample_freq: Resampling frequency string (e.g., '1h', '30min')
|
|
2285
|
+
:type resample_freq: str
|
|
2286
|
+
:param logger: Logger object
|
|
2287
|
+
:type logger: logging.Logger
|
|
2288
|
+
:return: Resampled DataFrame or False on error
|
|
2289
|
+
:rtype: pd.DataFrame | bool
|
|
2290
|
+
"""
|
|
2291
|
+
# Validate that DataFrame index is datetime and properly localized
|
|
2292
|
+
if not isinstance(df.index, pd.DatetimeIndex):
|
|
2293
|
+
logger.error(f"DataFrame index must be DatetimeIndex, got {type(df.index).__name__}")
|
|
2294
|
+
return False
|
|
2295
|
+
|
|
2296
|
+
# Check if timezone aware and matches expected timezone
|
|
2297
|
+
if df.index.tz is None:
|
|
2298
|
+
logger.warning("DataFrame index is timezone-naive, localizing to match start/end times")
|
|
2299
|
+
df = df.copy()
|
|
2300
|
+
df.index = df.index.tz_localize(start_dt.tz)
|
|
2301
|
+
elif df.index.tz != start_dt.tz:
|
|
2302
|
+
logger.warning(
|
|
2303
|
+
f"DataFrame timezone ({df.index.tz}) differs from filter timezone ({start_dt.tz}), converting"
|
|
2304
|
+
)
|
|
2305
|
+
df = df.copy()
|
|
2306
|
+
df.index = df.index.tz_convert(start_dt.tz)
|
|
2307
|
+
|
|
2308
|
+
# Filter to exact time range
|
|
2309
|
+
df_filtered = df[(df.index >= start_dt) & (df.index <= end_dt)]
|
|
2310
|
+
|
|
2311
|
+
if df_filtered.empty:
|
|
2312
|
+
logger.error("No data in the specified time range after filtering")
|
|
2313
|
+
return False
|
|
2314
|
+
|
|
2315
|
+
logger.info(f"Retrieved {len(df_filtered)} data points")
|
|
2316
|
+
|
|
2317
|
+
# Resample to specified frequency
|
|
2318
|
+
logger.info(f"Resampling data to frequency: {resample_freq}")
|
|
2319
|
+
try:
|
|
2320
|
+
df_resampled = df_filtered.resample(resample_freq).mean()
|
|
2321
|
+
df_resampled = df_resampled.dropna(how="all")
|
|
2322
|
+
|
|
2323
|
+
if df_resampled.empty:
|
|
2324
|
+
logger.error("No data after resampling. Check frequency and data availability.")
|
|
2325
|
+
return False
|
|
2326
|
+
|
|
2327
|
+
logger.info(f"After resampling: {len(df_resampled)} data points")
|
|
2328
|
+
return df_resampled
|
|
2329
|
+
|
|
2330
|
+
except Exception as e:
|
|
2331
|
+
logger.error(f"Error during resampling: {e}")
|
|
2332
|
+
return False
|