emhass 0.11.2__py3-none-any.whl → 0.11.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- emhass/command_line.py +702 -373
- emhass/data/associations.csv +1 -1
- emhass/forecast.py +671 -346
- emhass/machine_learning_forecaster.py +204 -105
- emhass/machine_learning_regressor.py +26 -7
- emhass/optimization.py +1017 -471
- emhass/retrieve_hass.py +226 -79
- emhass/static/data/param_definitions.json +5 -4
- emhass/utils.py +689 -455
- emhass/web_server.py +339 -225
- {emhass-0.11.2.dist-info → emhass-0.11.3.dist-info}/METADATA +17 -8
- {emhass-0.11.2.dist-info → emhass-0.11.3.dist-info}/RECORD +16 -16
- {emhass-0.11.2.dist-info → emhass-0.11.3.dist-info}/WHEEL +1 -1
- {emhass-0.11.2.dist-info → emhass-0.11.3.dist-info}/LICENSE +0 -0
- {emhass-0.11.2.dist-info → emhass-0.11.3.dist-info}/entry_points.txt +0 -0
- {emhass-0.11.2.dist-info → emhass-0.11.3.dist-info}/top_level.txt +0 -0
emhass/utils.py
CHANGED
@@ -1,26 +1,26 @@
|
|
1
1
|
#!/usr/bin/env python3
|
2
2
|
# -*- coding: utf-8 -*-
|
3
|
-
|
3
|
+
import ast
|
4
|
+
import copy
|
4
5
|
import csv
|
5
|
-
import
|
6
|
-
from typing import Tuple, Optional
|
7
|
-
from datetime import datetime, timedelta, timezone
|
6
|
+
import json
|
8
7
|
import logging
|
8
|
+
import os
|
9
9
|
import pathlib
|
10
|
-
import
|
11
|
-
import
|
10
|
+
from datetime import datetime, timedelta, timezone
|
11
|
+
from typing import Optional, Tuple
|
12
|
+
|
12
13
|
import numpy as np
|
13
14
|
import pandas as pd
|
14
|
-
|
15
|
-
import yaml
|
15
|
+
import plotly.express as px
|
16
16
|
import pytz
|
17
|
-
import
|
17
|
+
import yaml
|
18
|
+
from requests import get
|
18
19
|
|
19
|
-
|
20
|
+
from emhass.machine_learning_forecaster import MLForecaster
|
20
21
|
|
21
22
|
pd.options.plotting.backend = "plotly"
|
22
23
|
|
23
|
-
from emhass.machine_learning_forecaster import MLForecaster
|
24
24
|
|
25
25
|
def get_root(file: str, num_parent: Optional[int] = 3) -> str:
|
26
26
|
"""
|
@@ -44,8 +44,12 @@ def get_root(file: str, num_parent: Optional[int] = 3) -> str:
|
|
44
44
|
return root
|
45
45
|
|
46
46
|
|
47
|
-
def get_logger(
|
48
|
-
|
47
|
+
def get_logger(
|
48
|
+
fun_name: str,
|
49
|
+
emhass_conf: dict,
|
50
|
+
save_to_file: Optional[bool] = True,
|
51
|
+
logging_level: Optional[str] = "DEBUG",
|
52
|
+
) -> Tuple[logging.Logger, logging.StreamHandler]:
|
49
53
|
"""
|
50
54
|
Create a simple logger object.
|
51
55
|
|
@@ -64,10 +68,10 @@ def get_logger(fun_name: str, emhass_conf: dict, save_to_file: Optional[bool] =
|
|
64
68
|
logger.propagate = True
|
65
69
|
logger.fileSetting = save_to_file
|
66
70
|
if save_to_file:
|
67
|
-
if
|
68
|
-
ch = logging.FileHandler(emhass_conf[
|
71
|
+
if os.path.isdir(emhass_conf["data_path"]):
|
72
|
+
ch = logging.FileHandler(emhass_conf["data_path"] / "logger_emhass.log")
|
69
73
|
else:
|
70
|
-
raise Exception("Unable to access data_path: "+emhass_conf[
|
74
|
+
raise Exception("Unable to access data_path: " + emhass_conf["data_path"])
|
71
75
|
else:
|
72
76
|
ch = logging.StreamHandler()
|
73
77
|
if logging_level == "DEBUG":
|
@@ -94,8 +98,12 @@ def get_logger(fun_name: str, emhass_conf: dict, save_to_file: Optional[bool] =
|
|
94
98
|
return logger, ch
|
95
99
|
|
96
100
|
|
97
|
-
def get_forecast_dates(
|
98
|
-
|
101
|
+
def get_forecast_dates(
|
102
|
+
freq: int,
|
103
|
+
delta_forecast: int,
|
104
|
+
time_zone: datetime.tzinfo,
|
105
|
+
timedelta_days: Optional[int] = 0,
|
106
|
+
) -> pd.core.indexes.datetimes.DatetimeIndex:
|
99
107
|
"""
|
100
108
|
Get the date_range list of the needed future dates using the delta_forecast parameter.
|
101
109
|
|
@@ -110,17 +118,36 @@ def get_forecast_dates(freq: int, delta_forecast: int, time_zone: datetime.tzinf
|
|
110
118
|
|
111
119
|
"""
|
112
120
|
freq = pd.to_timedelta(freq, "minutes")
|
113
|
-
start_forecast = pd.Timestamp(datetime.now()).replace(
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
121
|
+
start_forecast = pd.Timestamp(datetime.now()).replace(
|
122
|
+
hour=0, minute=0, second=0, microsecond=0
|
123
|
+
)
|
124
|
+
end_forecast = (start_forecast + pd.Timedelta(days=delta_forecast)).replace(
|
125
|
+
microsecond=0
|
126
|
+
)
|
127
|
+
forecast_dates = (
|
128
|
+
pd.date_range(
|
129
|
+
start=start_forecast,
|
130
|
+
end=end_forecast + timedelta(days=timedelta_days) - freq,
|
131
|
+
freq=freq,
|
132
|
+
tz=time_zone,
|
133
|
+
)
|
134
|
+
.tz_convert("utc")
|
135
|
+
.round(freq, ambiguous="infer", nonexistent="shift_forward")
|
136
|
+
.tz_convert(time_zone)
|
137
|
+
)
|
118
138
|
return forecast_dates
|
119
139
|
|
120
140
|
|
121
|
-
def treat_runtimeparams(
|
122
|
-
|
123
|
-
|
141
|
+
def treat_runtimeparams(
|
142
|
+
runtimeparams: str,
|
143
|
+
params: str,
|
144
|
+
retrieve_hass_conf: dict,
|
145
|
+
optim_conf: dict,
|
146
|
+
plant_conf: dict,
|
147
|
+
set_type: str,
|
148
|
+
logger: logging.Logger,
|
149
|
+
emhass_conf: dict,
|
150
|
+
) -> Tuple[str, dict]:
|
124
151
|
"""
|
125
152
|
Treat the passed optimization runtime parameters.
|
126
153
|
|
@@ -128,31 +155,38 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
|
|
128
155
|
:type runtimeparams: str
|
129
156
|
:param params: Built configuration parameters
|
130
157
|
:type params: str
|
131
|
-
:param retrieve_hass_conf:
|
158
|
+
:param retrieve_hass_conf: Config dictionary for data retrieving parameters.
|
132
159
|
:type retrieve_hass_conf: dict
|
133
|
-
:param optim_conf:
|
160
|
+
:param optim_conf: Config dictionary for optimization parameters.
|
134
161
|
:type optim_conf: dict
|
135
|
-
:param plant_conf:
|
162
|
+
:param plant_conf: Config dictionary for technical plant parameters.
|
136
163
|
:type plant_conf: dict
|
137
164
|
:param set_type: The type of action to be performed.
|
138
165
|
:type set_type: str
|
139
166
|
:param logger: The logger object.
|
140
167
|
:type logger: logging.Logger
|
168
|
+
:param emhass_conf: Dictionary containing the needed emhass paths
|
169
|
+
:type emhass_conf: dict
|
141
170
|
:return: Returning the params and optimization parameter container.
|
142
171
|
:rtype: Tuple[str, dict]
|
143
172
|
|
144
173
|
"""
|
145
|
-
#
|
174
|
+
# Check if passed params is a dict
|
146
175
|
if (params != None) and (params != "null"):
|
147
176
|
if type(params) is str:
|
148
177
|
params = json.loads(params)
|
149
178
|
else:
|
150
179
|
params = {}
|
151
180
|
|
181
|
+
# Merge current config categories to params
|
182
|
+
params["retrieve_hass_conf"].update(retrieve_hass_conf)
|
183
|
+
params["optim_conf"].update(optim_conf)
|
184
|
+
params["plant_conf"].update(plant_conf)
|
185
|
+
|
152
186
|
# Some default data needed
|
153
187
|
custom_deferrable_forecast_id = []
|
154
188
|
custom_predicted_temperature_id = []
|
155
|
-
for k in range(optim_conf[
|
189
|
+
for k in range(params["optim_conf"]["number_of_deferrable_loads"]):
|
156
190
|
custom_deferrable_forecast_id.append(
|
157
191
|
{
|
158
192
|
"entity_id": "sensor.p_deferrable{}".format(k),
|
@@ -233,18 +267,132 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
|
|
233
267
|
else:
|
234
268
|
params["passed_data"] = default_passed_dict
|
235
269
|
|
270
|
+
# If any runtime parameters where passed in action call
|
236
271
|
if runtimeparams is not None:
|
237
272
|
if type(runtimeparams) is str:
|
238
273
|
runtimeparams = json.loads(runtimeparams)
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
274
|
+
|
275
|
+
# Loop though parameters stored in association file, Check to see if any stored in runtime
|
276
|
+
# If true, set runtime parameter to params
|
277
|
+
if emhass_conf["associations_path"].exists():
|
278
|
+
with emhass_conf["associations_path"].open("r") as data:
|
279
|
+
associations = list(csv.reader(data, delimiter=","))
|
280
|
+
# Association file key reference
|
281
|
+
# association[0] = config categories
|
282
|
+
# association[1] = legacy parameter name
|
283
|
+
# association[2] = parameter (config.json/config_defaults.json)
|
284
|
+
# association[3] = parameter list name if exists (not used, from legacy options.json)
|
285
|
+
for association in associations:
|
286
|
+
# Check parameter name exists in runtime
|
287
|
+
if runtimeparams.get(association[2], None) is not None:
|
288
|
+
params[association[0]][association[2]] = runtimeparams[
|
289
|
+
association[2]
|
290
|
+
]
|
291
|
+
# Check Legacy parameter name runtime
|
292
|
+
elif runtimeparams.get(association[1], None) is not None:
|
293
|
+
params[association[0]][association[2]] = runtimeparams[
|
294
|
+
association[1]
|
295
|
+
]
|
296
|
+
else:
|
297
|
+
logger.warning(
|
298
|
+
"Cant find associations file (associations.csv) in: "
|
299
|
+
+ str(emhass_conf["associations_path"])
|
300
|
+
)
|
301
|
+
|
302
|
+
# Generate forecast_dates
|
303
|
+
if (
|
304
|
+
"optimization_time_step" in runtimeparams.keys()
|
305
|
+
or "freq" in runtimeparams.keys()
|
306
|
+
):
|
307
|
+
optimization_time_step = int(
|
308
|
+
runtimeparams.get("optimization_time_step", runtimeparams.get("freq"))
|
309
|
+
)
|
310
|
+
params["retrieve_hass_conf"]["optimization_time_step"] = pd.to_timedelta(
|
311
|
+
optimization_time_step
|
312
|
+
)
|
313
|
+
else:
|
314
|
+
optimization_time_step = int(
|
315
|
+
params["retrieve_hass_conf"]["optimization_time_step"].seconds / 60.0
|
316
|
+
)
|
317
|
+
if (
|
318
|
+
runtimeparams.get("delta_forecast_daily", None) is not None
|
319
|
+
or runtimeparams.get("delta_forecast", None) is not None
|
320
|
+
):
|
321
|
+
delta_forecast = int(
|
322
|
+
runtimeparams.get(
|
323
|
+
"delta_forecast_daily", runtimeparams["delta_forecast"]
|
324
|
+
)
|
325
|
+
)
|
326
|
+
params["optim_conf"]["delta_forecast_daily"] = pd.Timedelta(
|
327
|
+
days=optim_conf["delta_forecast_daily"]
|
328
|
+
)
|
329
|
+
else:
|
330
|
+
delta_forecast = int(params["optim_conf"]["delta_forecast_daily"].days)
|
331
|
+
if runtimeparams.get("time_zone", None) is not None:
|
332
|
+
time_zone = pytz.timezone(params["retrieve_hass_conf"]["time_zone"])
|
333
|
+
params["retrieve_hass_conf"]["time_zone"] = time_zone
|
334
|
+
else:
|
335
|
+
time_zone = params["retrieve_hass_conf"]["time_zone"]
|
336
|
+
|
244
337
|
forecast_dates = get_forecast_dates(
|
245
|
-
optimization_time_step, delta_forecast, time_zone
|
246
|
-
|
247
|
-
|
338
|
+
optimization_time_step, delta_forecast, time_zone
|
339
|
+
)
|
340
|
+
|
341
|
+
# Treat passed forecast data lists
|
342
|
+
list_forecast_key = [
|
343
|
+
"pv_power_forecast",
|
344
|
+
"load_power_forecast",
|
345
|
+
"load_cost_forecast",
|
346
|
+
"prod_price_forecast",
|
347
|
+
"outdoor_temperature_forecast",
|
348
|
+
]
|
349
|
+
forecast_methods = [
|
350
|
+
"weather_forecast_method",
|
351
|
+
"load_forecast_method",
|
352
|
+
"load_cost_forecast_method",
|
353
|
+
"production_price_forecast_method",
|
354
|
+
"outdoor_temperature_forecast_method",
|
355
|
+
]
|
356
|
+
|
357
|
+
# Loop forecasts, check if value is a list and greater than or equal to forecast_dates
|
358
|
+
for method, forecast_key in enumerate(list_forecast_key):
|
359
|
+
if forecast_key in runtimeparams.keys():
|
360
|
+
if isinstance(runtimeparams[forecast_key], list) and len(
|
361
|
+
runtimeparams[forecast_key]
|
362
|
+
) >= len(forecast_dates):
|
363
|
+
params["passed_data"][forecast_key] = runtimeparams[forecast_key]
|
364
|
+
params["optim_conf"][forecast_methods[method]] = "list"
|
365
|
+
else:
|
366
|
+
logger.error(
|
367
|
+
f"ERROR: The passed data is either not a list or the length is not correct, length should be {str(len(forecast_dates))}"
|
368
|
+
)
|
369
|
+
logger.error(
|
370
|
+
f"Passed type is {str(type(runtimeparams[forecast_key]))} and length is {str(len(runtimeparams[forecast_key]))}"
|
371
|
+
)
|
372
|
+
# Check if string contains list, if so extract
|
373
|
+
if isinstance(runtimeparams[forecast_key], str):
|
374
|
+
if isinstance(ast.literal_eval(runtimeparams[forecast_key]), list):
|
375
|
+
runtimeparams[forecast_key] = ast.literal_eval(
|
376
|
+
runtimeparams[forecast_key]
|
377
|
+
)
|
378
|
+
list_non_digits = [
|
379
|
+
x
|
380
|
+
for x in runtimeparams[forecast_key]
|
381
|
+
if not (isinstance(x, int) or isinstance(x, float))
|
382
|
+
]
|
383
|
+
if len(list_non_digits) > 0:
|
384
|
+
logger.warning(
|
385
|
+
f"There are non numeric values on the passed data for {forecast_key}, check for missing values (nans, null, etc)"
|
386
|
+
)
|
387
|
+
for x in list_non_digits:
|
388
|
+
logger.warning(
|
389
|
+
f"This value in {forecast_key} was detected as non digits: {str(x)}"
|
390
|
+
)
|
391
|
+
else:
|
392
|
+
params["passed_data"][forecast_key] = None
|
393
|
+
|
394
|
+
# Add runtime exclusive (not in config) parameters to params
|
395
|
+
# regressor-model-fit
|
248
396
|
if set_type == "regressor-model-fit":
|
249
397
|
if "csv_file" in runtimeparams:
|
250
398
|
csv_file = runtimeparams["csv_file"]
|
@@ -265,7 +413,7 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
|
|
265
413
|
else:
|
266
414
|
date_features = runtimeparams["date_features"]
|
267
415
|
params["passed_data"]["date_features"] = date_features
|
268
|
-
|
416
|
+
|
269
417
|
# regressor-model-predict
|
270
418
|
if set_type == "regressor-model-predict":
|
271
419
|
if "new_values" in runtimeparams:
|
@@ -280,101 +428,80 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
|
|
280
428
|
if "target" in runtimeparams:
|
281
429
|
target = runtimeparams["target"]
|
282
430
|
params["passed_data"]["target"] = target
|
283
|
-
|
284
|
-
#
|
431
|
+
|
432
|
+
# MPC control case
|
285
433
|
if set_type == "naive-mpc-optim":
|
286
434
|
if "prediction_horizon" not in runtimeparams.keys():
|
287
435
|
prediction_horizon = 10 # 10 time steps by default
|
288
436
|
else:
|
289
437
|
prediction_horizon = runtimeparams["prediction_horizon"]
|
290
438
|
params["passed_data"]["prediction_horizon"] = prediction_horizon
|
291
|
-
if
|
292
|
-
soc_init = plant_conf[
|
439
|
+
if "soc_init" not in runtimeparams.keys():
|
440
|
+
soc_init = params["plant_conf"]["battery_target_state_of_charge"]
|
293
441
|
else:
|
294
442
|
soc_init = runtimeparams["soc_init"]
|
295
443
|
params["passed_data"]["soc_init"] = soc_init
|
296
444
|
if "soc_final" not in runtimeparams.keys():
|
297
|
-
soc_final = plant_conf[
|
445
|
+
soc_final = params["plant_conf"]["battery_target_state_of_charge"]
|
298
446
|
else:
|
299
447
|
soc_final = runtimeparams["soc_final"]
|
300
448
|
params["passed_data"]["soc_final"] = soc_final
|
301
|
-
|
302
|
-
def_total_hours = optim_conf.get('operating_hours_of_each_deferrable_load')
|
303
|
-
else:
|
304
|
-
def_total_hours = runtimeparams.get(
|
305
|
-
'operating_hours_of_each_deferrable_load', runtimeparams.get('def_total_hours'))
|
306
|
-
params["passed_data"]['operating_hours_of_each_deferrable_load'] = def_total_hours
|
307
|
-
if 'start_timesteps_of_each_deferrable_load' not in runtimeparams.keys() and 'def_start_timestep' in runtimeparams.keys():
|
308
|
-
def_start_timestep = optim_conf.get('start_timesteps_of_each_deferrable_load')
|
309
|
-
else:
|
310
|
-
def_start_timestep = runtimeparams.get(
|
311
|
-
'start_timesteps_of_each_deferrable_load', runtimeparams.get('def_start_timestep'))
|
312
|
-
params["passed_data"]['start_timesteps_of_each_deferrable_load'] = def_start_timestep
|
313
|
-
if 'end_timesteps_of_each_deferrable_load' not in runtimeparams.keys() and 'def_end_timestep' not in runtimeparams.keys():
|
314
|
-
def_end_timestep = optim_conf.get('end_timesteps_of_each_deferrable_load')
|
315
|
-
else:
|
316
|
-
def_end_timestep = runtimeparams.get(
|
317
|
-
'end_timesteps_of_each_deferrable_load', runtimeparams.get('def_end_timestep'))
|
318
|
-
params["passed_data"]['end_timesteps_of_each_deferrable_load'] = def_end_timestep
|
449
|
+
|
319
450
|
forecast_dates = copy.deepcopy(forecast_dates)[0:prediction_horizon]
|
451
|
+
|
320
452
|
# Load the default config
|
321
|
-
if "def_load_config" in
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
453
|
+
if "def_load_config" in runtimeparams:
|
454
|
+
params["optim_conf"]["def_load_config"] = runtimeparams[
|
455
|
+
"def_load_config"
|
456
|
+
]
|
457
|
+
if "def_load_config" in params["optim_conf"]:
|
458
|
+
for k in range(len(params["optim_conf"]["def_load_config"])):
|
459
|
+
if "thermal_config" in params["optim_conf"]["def_load_config"][k]:
|
460
|
+
if (
|
461
|
+
"heater_desired_temperatures" in runtimeparams
|
462
|
+
and len(runtimeparams["heater_desired_temperatures"]) > k
|
463
|
+
):
|
464
|
+
params["optim_conf"]["def_load_config"][k][
|
465
|
+
"thermal_config"
|
466
|
+
]["desired_temperatures"] = runtimeparams[
|
467
|
+
"heater_desired_temperatures"
|
468
|
+
][k]
|
469
|
+
if (
|
470
|
+
"heater_start_temperatures" in runtimeparams
|
471
|
+
and len(runtimeparams["heater_start_temperatures"]) > k
|
472
|
+
):
|
473
|
+
params["optim_conf"]["def_load_config"][k][
|
474
|
+
"thermal_config"
|
475
|
+
]["start_temperature"] = runtimeparams[
|
476
|
+
"heater_start_temperatures"
|
477
|
+
][k]
|
328
478
|
else:
|
329
479
|
params["passed_data"]["prediction_horizon"] = None
|
330
480
|
params["passed_data"]["soc_init"] = None
|
331
481
|
params["passed_data"]["soc_final"] = None
|
332
|
-
|
333
|
-
params["passed_data"]['start_timesteps_of_each_deferrable_load'] = None
|
334
|
-
params["passed_data"]['end_timesteps_of_each_deferrable_load'] = None
|
335
|
-
# Treat passed forecast data lists
|
336
|
-
list_forecast_key = ['pv_power_forecast', 'load_power_forecast',
|
337
|
-
'load_cost_forecast', 'prod_price_forecast', 'outdoor_temperature_forecast']
|
338
|
-
forecast_methods = ['weather_forecast_method', 'load_forecast_method', 'load_cost_forecast_method',
|
339
|
-
'production_price_forecast_method', 'outdoor_temperature_forecast_method']
|
340
|
-
|
341
|
-
# Loop forecasts, check if value is a list and greater than or equal to forecast_dates
|
342
|
-
for method, forecast_key in enumerate(list_forecast_key):
|
343
|
-
if forecast_key in runtimeparams.keys():
|
344
|
-
if type(runtimeparams[forecast_key]) == list and len(runtimeparams[forecast_key]) >= len(forecast_dates):
|
345
|
-
params['passed_data'][forecast_key] = runtimeparams[forecast_key]
|
346
|
-
optim_conf[forecast_methods[method]] = 'list'
|
347
|
-
else:
|
348
|
-
logger.error(
|
349
|
-
f"ERROR: The passed data is either not a list or the length is not correct, length should be {str(len(forecast_dates))}")
|
350
|
-
logger.error(
|
351
|
-
f"Passed type is {str(type(runtimeparams[forecast_key]))} and length is {str(len(runtimeparams[forecast_key]))}")
|
352
|
-
# Check if string contains list, if so extract
|
353
|
-
if type(runtimeparams[forecast_key]) == str:
|
354
|
-
if type(ast.literal_eval(runtimeparams[forecast_key])) == list:
|
355
|
-
runtimeparams[forecast_key] = ast.literal_eval(runtimeparams[forecast_key])
|
356
|
-
list_non_digits = [x for x in runtimeparams[forecast_key] if not (
|
357
|
-
isinstance(x, int) or isinstance(x, float))]
|
358
|
-
if len(list_non_digits) > 0:
|
359
|
-
logger.warning(
|
360
|
-
f"There are non numeric values on the passed data for {forecast_key}, check for missing values (nans, null, etc)")
|
361
|
-
for x in list_non_digits:
|
362
|
-
logger.warning(
|
363
|
-
f"This value in {forecast_key} was detected as non digits: {str(x)}")
|
364
|
-
else:
|
365
|
-
params['passed_data'][forecast_key] = None
|
366
|
-
|
482
|
+
|
367
483
|
# Treat passed data for forecast model fit/predict/tune at runtime
|
368
|
-
if
|
369
|
-
|
484
|
+
if (
|
485
|
+
params["passed_data"].get("historic_days_to_retrieve", None) is not None
|
486
|
+
and params["passed_data"]["historic_days_to_retrieve"] < 9
|
487
|
+
):
|
488
|
+
logger.warning(
|
489
|
+
"warning `days_to_retrieve` is set to a value less than 9, this could cause an error with the fit"
|
490
|
+
)
|
491
|
+
logger.warning(
|
492
|
+
"setting`passed_data:days_to_retrieve` to 9 for fit/predict/tune"
|
493
|
+
)
|
494
|
+
params["passed_data"]["historic_days_to_retrieve"] = 9
|
370
495
|
else:
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
|
496
|
+
if params["retrieve_hass_conf"].get("historic_days_to_retrieve", 0) < 9:
|
497
|
+
logger.debug(
|
498
|
+
"setting`passed_data:days_to_retrieve` to 9 for fit/predict/tune"
|
499
|
+
)
|
500
|
+
params["passed_data"]["historic_days_to_retrieve"] = 9
|
501
|
+
else:
|
502
|
+
params["passed_data"]["historic_days_to_retrieve"] = params[
|
503
|
+
"retrieve_hass_conf"
|
504
|
+
]["historic_days_to_retrieve"]
|
378
505
|
if "model_type" not in runtimeparams.keys():
|
379
506
|
model_type = "load_forecast"
|
380
507
|
else:
|
@@ -409,13 +536,15 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
|
|
409
536
|
perform_backtest = False
|
410
537
|
else:
|
411
538
|
perform_backtest = ast.literal_eval(
|
412
|
-
str(runtimeparams["perform_backtest"]).capitalize()
|
539
|
+
str(runtimeparams["perform_backtest"]).capitalize()
|
540
|
+
)
|
413
541
|
params["passed_data"]["perform_backtest"] = perform_backtest
|
414
542
|
if "model_predict_publish" not in runtimeparams.keys():
|
415
543
|
model_predict_publish = False
|
416
544
|
else:
|
417
545
|
model_predict_publish = ast.literal_eval(
|
418
|
-
str(runtimeparams["model_predict_publish"]).capitalize()
|
546
|
+
str(runtimeparams["model_predict_publish"]).capitalize()
|
547
|
+
)
|
419
548
|
params["passed_data"]["model_predict_publish"] = model_predict_publish
|
420
549
|
if "model_predict_entity_id" not in runtimeparams.keys():
|
421
550
|
model_predict_entity_id = "sensor.p_load_forecast_custom_model"
|
@@ -425,13 +554,19 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
|
|
425
554
|
if "model_predict_unit_of_measurement" not in runtimeparams.keys():
|
426
555
|
model_predict_unit_of_measurement = "W"
|
427
556
|
else:
|
428
|
-
model_predict_unit_of_measurement = runtimeparams[
|
429
|
-
|
557
|
+
model_predict_unit_of_measurement = runtimeparams[
|
558
|
+
"model_predict_unit_of_measurement"
|
559
|
+
]
|
560
|
+
params["passed_data"]["model_predict_unit_of_measurement"] = (
|
561
|
+
model_predict_unit_of_measurement
|
562
|
+
)
|
430
563
|
if "model_predict_friendly_name" not in runtimeparams.keys():
|
431
564
|
model_predict_friendly_name = "Load Power Forecast custom ML model"
|
432
565
|
else:
|
433
566
|
model_predict_friendly_name = runtimeparams["model_predict_friendly_name"]
|
434
|
-
params["passed_data"]["model_predict_friendly_name"] =
|
567
|
+
params["passed_data"]["model_predict_friendly_name"] = (
|
568
|
+
model_predict_friendly_name
|
569
|
+
)
|
435
570
|
if "mlr_predict_entity_id" not in runtimeparams.keys():
|
436
571
|
mlr_predict_entity_id = "sensor.mlr_predict"
|
437
572
|
else:
|
@@ -440,14 +575,18 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
|
|
440
575
|
if "mlr_predict_unit_of_measurement" not in runtimeparams.keys():
|
441
576
|
mlr_predict_unit_of_measurement = None
|
442
577
|
else:
|
443
|
-
mlr_predict_unit_of_measurement = runtimeparams[
|
444
|
-
|
578
|
+
mlr_predict_unit_of_measurement = runtimeparams[
|
579
|
+
"mlr_predict_unit_of_measurement"
|
580
|
+
]
|
581
|
+
params["passed_data"]["mlr_predict_unit_of_measurement"] = (
|
582
|
+
mlr_predict_unit_of_measurement
|
583
|
+
)
|
445
584
|
if "mlr_predict_friendly_name" not in runtimeparams.keys():
|
446
585
|
mlr_predict_friendly_name = "mlr predictor"
|
447
586
|
else:
|
448
587
|
mlr_predict_friendly_name = runtimeparams["mlr_predict_friendly_name"]
|
449
588
|
params["passed_data"]["mlr_predict_friendly_name"] = mlr_predict_friendly_name
|
450
|
-
|
589
|
+
|
451
590
|
# Treat passed data for other parameters
|
452
591
|
if "alpha" not in runtimeparams.keys():
|
453
592
|
alpha = 0.5
|
@@ -459,24 +598,30 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
|
|
459
598
|
else:
|
460
599
|
beta = runtimeparams["beta"]
|
461
600
|
params["passed_data"]["beta"] = beta
|
601
|
+
|
462
602
|
# Param to save forecast cache (i.e. Solcast)
|
463
603
|
if "weather_forecast_cache" not in runtimeparams.keys():
|
464
604
|
weather_forecast_cache = False
|
465
605
|
else:
|
466
606
|
weather_forecast_cache = runtimeparams["weather_forecast_cache"]
|
467
607
|
params["passed_data"]["weather_forecast_cache"] = weather_forecast_cache
|
608
|
+
|
468
609
|
# Param to make sure optimization only uses cached data. (else produce error)
|
469
610
|
if "weather_forecast_cache_only" not in runtimeparams.keys():
|
470
611
|
weather_forecast_cache_only = False
|
471
612
|
else:
|
472
613
|
weather_forecast_cache_only = runtimeparams["weather_forecast_cache_only"]
|
473
|
-
params["passed_data"]["weather_forecast_cache_only"] =
|
614
|
+
params["passed_data"]["weather_forecast_cache_only"] = (
|
615
|
+
weather_forecast_cache_only
|
616
|
+
)
|
617
|
+
|
474
618
|
# A condition to manually save entity data under data_path/entities after optimization
|
475
619
|
if "entity_save" not in runtimeparams.keys():
|
476
620
|
entity_save = ""
|
477
621
|
else:
|
478
622
|
entity_save = runtimeparams["entity_save"]
|
479
623
|
params["passed_data"]["entity_save"] = entity_save
|
624
|
+
|
480
625
|
# A condition to put a prefix on all published data, or check for saved data under prefix name
|
481
626
|
if "publish_prefix" not in runtimeparams.keys():
|
482
627
|
publish_prefix = ""
|
@@ -485,83 +630,25 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
|
|
485
630
|
params["passed_data"]["publish_prefix"] = publish_prefix
|
486
631
|
|
487
632
|
# Treat optimization (optim_conf) configuration parameters passed at runtime
|
488
|
-
if 'number_of_deferrable_loads' in runtimeparams.keys() or 'num_def_loads' in runtimeparams.keys():
|
489
|
-
optim_conf['number_of_deferrable_loads'] = runtimeparams.get(
|
490
|
-
'number_of_deferrable_loads', runtimeparams.get('num_def_loads'))
|
491
|
-
if 'nominal_power_of_deferrable_loads' in runtimeparams.keys() or 'P_deferrable_nom' in runtimeparams.keys():
|
492
|
-
optim_conf['nominal_power_of_deferrable_loads'] = runtimeparams.get(
|
493
|
-
'nominal_power_of_deferrable_loads', runtimeparams.get('P_deferrable_nom'))
|
494
|
-
if 'operating_hours_of_each_deferrable_load' in runtimeparams.keys() or 'def_total_hours' in runtimeparams.keys():
|
495
|
-
optim_conf['operating_hours_of_each_deferrable_load'] = runtimeparams.get(
|
496
|
-
'operating_hours_of_each_deferrable_load', runtimeparams.get('def_total_hours'))
|
497
|
-
if 'start_timesteps_of_each_deferrable_load' in runtimeparams.keys() or 'def_start_timestep' in runtimeparams.keys():
|
498
|
-
optim_conf['start_timesteps_of_each_deferrable_load'] = runtimeparams.get(
|
499
|
-
'start_timesteps_of_each_deferrable_load', runtimeparams.get('def_start_timestep'))
|
500
|
-
if 'end_timesteps_of_each_deferrable_load' in runtimeparams.keys() or 'def_end_timestep' in runtimeparams.keys():
|
501
|
-
optim_conf['end_timesteps_of_each_deferrable_load'] = runtimeparams.get(
|
502
|
-
'end_timesteps_of_each_deferrable_load', runtimeparams.get('def_end_timestep'))
|
503
633
|
if "def_current_state" in runtimeparams.keys():
|
504
|
-
optim_conf["def_current_state"] = [
|
505
|
-
bool(s) for s in runtimeparams["def_current_state"]
|
506
|
-
if 'treat_deferrable_load_as_semi_cont' in runtimeparams.keys() or 'treat_def_as_semi_cont' in runtimeparams.keys():
|
507
|
-
optim_conf['treat_deferrable_load_as_semi_cont'] = [
|
508
|
-
ast.literal_eval(str(k).capitalize()) for k in runtimeparams.get('treat_deferrable_load_as_semi_cont',runtimeparams.get('treat_def_as_semi_cont'))
|
509
|
-
]
|
510
|
-
if 'set_deferrable_load_single_constant' in runtimeparams.keys() or 'set_def_constant' in runtimeparams.keys():
|
511
|
-
optim_conf['set_deferrable_load_single_constant'] = [
|
512
|
-
ast.literal_eval(str(k).capitalize()) for k in runtimeparams.get('set_deferrable_load_single_constant',runtimeparams.get('set_def_constant'))
|
513
|
-
]
|
514
|
-
if 'set_deferrable_startup_penalty' in runtimeparams.keys() or 'def_start_penalty' in runtimeparams.keys():
|
515
|
-
optim_conf['set_deferrable_startup_penalty'] = [
|
516
|
-
ast.literal_eval(str(k).capitalize()) for k in runtimeparams.get('set_deferrable_startup_penalty',runtimeparams.get('def_start_penalty'))
|
634
|
+
params["optim_conf"]["def_current_state"] = [
|
635
|
+
bool(s) for s in runtimeparams["def_current_state"]
|
517
636
|
]
|
518
|
-
if 'def_load_config' in runtimeparams:
|
519
|
-
optim_conf["def_load_config"] = runtimeparams['def_load_config']
|
520
|
-
if 'weight_battery_discharge' in runtimeparams.keys():
|
521
|
-
optim_conf['weight_battery_discharge'] = runtimeparams[
|
522
|
-
'weight_battery_discharge'
|
523
|
-
]
|
524
|
-
if 'weight_battery_charge' in runtimeparams.keys():
|
525
|
-
optim_conf['weight_battery_charge'] = runtimeparams['weight_battery_charge']
|
526
637
|
|
527
638
|
# Treat retrieve data from Home Assistant (retrieve_hass_conf) configuration parameters passed at runtime
|
528
|
-
|
529
|
-
retrieve_hass_conf['optimization_time_step'] = pd.to_timedelta(runtimeparams.get(
|
530
|
-
'optimization_time_step', runtimeparams.get('freq')), "minutes")
|
531
|
-
if 'continual_publish' in runtimeparams.keys():
|
532
|
-
retrieve_hass_conf['continual_publish'] = bool(
|
533
|
-
runtimeparams['continual_publish'])
|
639
|
+
# Secrets passed at runtime
|
534
640
|
if "solcast_api_key" in runtimeparams.keys():
|
535
|
-
retrieve_hass_conf["solcast_api_key"] = runtimeparams[
|
536
|
-
|
641
|
+
params["retrieve_hass_conf"]["solcast_api_key"] = runtimeparams[
|
642
|
+
"solcast_api_key"
|
643
|
+
]
|
537
644
|
if "solcast_rooftop_id" in runtimeparams.keys():
|
538
|
-
retrieve_hass_conf["solcast_rooftop_id"] = runtimeparams[
|
645
|
+
params["retrieve_hass_conf"]["solcast_rooftop_id"] = runtimeparams[
|
539
646
|
"solcast_rooftop_id"
|
540
647
|
]
|
541
|
-
optim_conf['weather_forecast_method'] = "solcast"
|
542
648
|
if "solar_forecast_kwp" in runtimeparams.keys():
|
543
|
-
retrieve_hass_conf["solar_forecast_kwp"] = runtimeparams[
|
649
|
+
params["retrieve_hass_conf"]["solar_forecast_kwp"] = runtimeparams[
|
544
650
|
"solar_forecast_kwp"
|
545
651
|
]
|
546
|
-
optim_conf['weather_forecast_method'] = "solar.forecast"
|
547
|
-
|
548
|
-
# Treat system model parameters (plant) configuration parameters passed at runtime
|
549
|
-
if 'battery_minimum_state_of_charge' in runtimeparams.keys() or 'SOCmin' in runtimeparams.keys():
|
550
|
-
plant_conf['battery_minimum_state_of_charge'] = runtimeparams.get(
|
551
|
-
'battery_minimum_state_of_charge', runtimeparams.get('SOCmin'))
|
552
|
-
if 'battery_maximum_state_of_charge' in runtimeparams.keys() or 'SOCmax' in runtimeparams.keys():
|
553
|
-
plant_conf['battery_maximum_state_of_charge'] = runtimeparams.get(
|
554
|
-
'battery_maximum_state_of_charge', runtimeparams.get('SOCmax'))
|
555
|
-
if 'battery_target_state_of_charge' in runtimeparams.keys() or 'SOCtarget' in runtimeparams.keys():
|
556
|
-
plant_conf['battery_target_state_of_charge'] = runtimeparams.get(
|
557
|
-
'battery_target_state_of_charge', runtimeparams.get('SOCtarget'))
|
558
|
-
if 'battery_discharge_power_max' in runtimeparams.keys() or 'Pd_max' in runtimeparams.keys():
|
559
|
-
plant_conf['battery_discharge_power_max'] = runtimeparams.get(
|
560
|
-
'battery_discharge_power_max', runtimeparams.get('Pd_max'))
|
561
|
-
if 'battery_charge_power_max' in runtimeparams.keys() or 'Pc_max' in runtimeparams.keys():
|
562
|
-
plant_conf['battery_charge_power_max'] = runtimeparams.get(
|
563
|
-
'battery_charge_power_max', runtimeparams.get('Pc_max'))
|
564
|
-
|
565
652
|
# Treat custom entities id's and friendly names for variables
|
566
653
|
if "custom_pv_forecast_id" in runtimeparams.keys():
|
567
654
|
params["passed_data"]["custom_pv_forecast_id"] = runtimeparams[
|
@@ -615,7 +702,12 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
|
|
615
702
|
params["passed_data"]["custom_predicted_temperature_id"] = runtimeparams[
|
616
703
|
"custom_predicted_temperature_id"
|
617
704
|
]
|
618
|
-
|
705
|
+
|
706
|
+
# split config categories from params
|
707
|
+
retrieve_hass_conf = params["retrieve_hass_conf"]
|
708
|
+
optim_conf = params["optim_conf"]
|
709
|
+
plant_conf = params["plant_conf"]
|
710
|
+
|
619
711
|
# Serialize the final params
|
620
712
|
params = json.dumps(params, default=str)
|
621
713
|
return params, retrieve_hass_conf, optim_conf, plant_conf
|
@@ -623,8 +715,8 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
|
|
623
715
|
|
624
716
|
def get_yaml_parse(params: str, logger: logging.Logger) -> Tuple[dict, dict, dict]:
|
625
717
|
"""
|
626
|
-
Perform parsing of the params into the configuration catagories
|
627
|
-
|
718
|
+
Perform parsing of the params into the configuration catagories
|
719
|
+
|
628
720
|
:param params: Built configuration parameters
|
629
721
|
:type params: str
|
630
722
|
:param logger: The logger object
|
@@ -648,76 +740,16 @@ def get_yaml_parse(params: str, logger: logging.Logger) -> Tuple[dict, dict, dic
|
|
648
740
|
plant_conf = input_conf.get("plant_conf", {})
|
649
741
|
|
650
742
|
# Format time parameters
|
651
|
-
if optim_conf.get(
|
652
|
-
optim_conf[
|
653
|
-
|
654
|
-
retrieve_hass_conf['optimization_time_step'] = pd.to_timedelta(retrieve_hass_conf['optimization_time_step'], "minutes")
|
655
|
-
if retrieve_hass_conf.get('time_zone',None) is not None:
|
656
|
-
retrieve_hass_conf["time_zone"] = pytz.timezone(retrieve_hass_conf["time_zone"])
|
657
|
-
|
658
|
-
return retrieve_hass_conf, optim_conf, plant_conf
|
659
|
-
|
660
|
-
def get_legacy_yaml_parse(emhass_conf: dict, use_secrets: Optional[bool] = True,
|
661
|
-
params: Optional[str] = None) -> Tuple[dict, dict, dict]:
|
662
|
-
"""
|
663
|
-
Perform parsing of the config.yaml file.
|
664
|
-
|
665
|
-
:param emhass_conf: Dictionary containing the needed emhass paths
|
666
|
-
:type emhass_conf: dict
|
667
|
-
:param use_secrets: Indicate if we should use a secrets file or not.
|
668
|
-
Set to False for unit tests.
|
669
|
-
:type use_secrets: bool, optional
|
670
|
-
:param params: Configuration parameters passed from data/options.json
|
671
|
-
:type params: str
|
672
|
-
:return: A tuple with the dictionaries containing the parsed data
|
673
|
-
:rtype: tuple(dict)
|
674
|
-
|
675
|
-
"""
|
676
|
-
if params is None:
|
677
|
-
with open(emhass_conf["config_path"], 'r') as file:
|
678
|
-
input_conf = yaml.load(file, Loader=yaml.FullLoader)
|
679
|
-
else:
|
680
|
-
input_conf = json.loads(params)
|
681
|
-
if use_secrets:
|
682
|
-
if params is None:
|
683
|
-
with open(emhass_conf["config_path"].parent / 'secrets_emhass.yaml', 'r') as file: # Assume secrets and config file paths are the same
|
684
|
-
input_secrets = yaml.load(file, Loader=yaml.FullLoader)
|
685
|
-
else:
|
686
|
-
input_secrets = input_conf.pop("params_secrets", None)
|
687
|
-
|
688
|
-
if type(input_conf["retrieve_hass_conf"]) == list: # if using old config version
|
689
|
-
retrieve_hass_conf = dict(
|
690
|
-
{key: d[key] for d in input_conf["retrieve_hass_conf"] for key in d}
|
743
|
+
if optim_conf.get("delta_forecast_daily", None) is not None:
|
744
|
+
optim_conf["delta_forecast_daily"] = pd.Timedelta(
|
745
|
+
days=optim_conf["delta_forecast_daily"]
|
691
746
|
)
|
692
|
-
|
693
|
-
retrieve_hass_conf =
|
694
|
-
|
695
|
-
|
696
|
-
|
697
|
-
|
698
|
-
retrieve_hass_conf["hass_url"] = "http://supervisor/core/api"
|
699
|
-
retrieve_hass_conf["long_lived_token"] = "${SUPERVISOR_TOKEN}"
|
700
|
-
retrieve_hass_conf["time_zone"] = "Europe/Paris"
|
701
|
-
retrieve_hass_conf["lat"] = 45.83
|
702
|
-
retrieve_hass_conf["lon"] = 6.86
|
703
|
-
retrieve_hass_conf["alt"] = 4807.8
|
704
|
-
retrieve_hass_conf["freq"] = pd.to_timedelta(retrieve_hass_conf["freq"], "minutes")
|
705
|
-
retrieve_hass_conf["time_zone"] = pytz.timezone(retrieve_hass_conf["time_zone"])
|
706
|
-
|
707
|
-
if type(input_conf["optim_conf"]) == list:
|
708
|
-
optim_conf = dict({key: d[key] for d in input_conf["optim_conf"] for key in d})
|
709
|
-
else:
|
710
|
-
optim_conf = input_conf.get("optim_conf", {})
|
711
|
-
|
712
|
-
optim_conf["list_hp_periods"] = dict(
|
713
|
-
(key, d[key]) for d in optim_conf["list_hp_periods"] for key in d
|
714
|
-
)
|
715
|
-
optim_conf["delta_forecast"] = pd.Timedelta(days=optim_conf["delta_forecast"])
|
716
|
-
|
717
|
-
if type(input_conf["plant_conf"]) == list:
|
718
|
-
plant_conf = dict({key: d[key] for d in input_conf["plant_conf"] for key in d})
|
719
|
-
else:
|
720
|
-
plant_conf = input_conf.get("plant_conf", {})
|
747
|
+
if retrieve_hass_conf.get("optimization_time_step", None) is not None:
|
748
|
+
retrieve_hass_conf["optimization_time_step"] = pd.to_timedelta(
|
749
|
+
retrieve_hass_conf["optimization_time_step"], "minutes"
|
750
|
+
)
|
751
|
+
if retrieve_hass_conf.get("time_zone", None) is not None:
|
752
|
+
retrieve_hass_conf["time_zone"] = pytz.timezone(retrieve_hass_conf["time_zone"])
|
721
753
|
|
722
754
|
return retrieve_hass_conf, optim_conf, plant_conf
|
723
755
|
|
@@ -809,7 +841,9 @@ def get_injection_dict(df: pd.DataFrame, plot_size: Optional[int] = 1366) -> dic
|
|
809
841
|
return injection_dict
|
810
842
|
|
811
843
|
|
812
|
-
def get_injection_dict_forecast_model_fit(
|
844
|
+
def get_injection_dict_forecast_model_fit(
|
845
|
+
df_fit_pred: pd.DataFrame, mlf: MLForecaster
|
846
|
+
) -> dict:
|
813
847
|
"""
|
814
848
|
Build a dictionary with graphs and tables for the webui for special MLF fit case.
|
815
849
|
|
@@ -838,7 +872,9 @@ def get_injection_dict_forecast_model_fit(df_fit_pred: pd.DataFrame, mlf: MLFore
|
|
838
872
|
return injection_dict
|
839
873
|
|
840
874
|
|
841
|
-
def get_injection_dict_forecast_model_tune(
|
875
|
+
def get_injection_dict_forecast_model_tune(
|
876
|
+
df_pred_optim: pd.DataFrame, mlf: MLForecaster
|
877
|
+
) -> dict:
|
842
878
|
"""
|
843
879
|
Build a dictionary with graphs and tables for the webui for special MLF tune case.
|
844
880
|
|
@@ -868,10 +904,16 @@ def get_injection_dict_forecast_model_tune(df_pred_optim: pd.DataFrame, mlf: MLF
|
|
868
904
|
injection_dict["figure_0"] = image_path_0
|
869
905
|
return injection_dict
|
870
906
|
|
871
|
-
|
872
|
-
|
907
|
+
|
908
|
+
def build_config(
|
909
|
+
emhass_conf: dict,
|
910
|
+
logger: logging.Logger,
|
911
|
+
defaults_path: str,
|
912
|
+
config_path: Optional[str] = None,
|
913
|
+
legacy_config_path: Optional[str] = None,
|
914
|
+
) -> dict:
|
873
915
|
"""
|
874
|
-
Retrieve parameters from configuration files.
|
916
|
+
Retrieve parameters from configuration files.
|
875
917
|
priority order (low - high) = defaults_path, config_path legacy_config_path
|
876
918
|
|
877
919
|
:param emhass_conf: Dictionary containing the needed emhass paths
|
@@ -890,39 +932,48 @@ def build_config(emhass_conf: dict, logger: logging.Logger, defaults_path: str,
|
|
890
932
|
|
891
933
|
# Read default parameters (default root_path/data/config_defaults.json)
|
892
934
|
if defaults_path and pathlib.Path(defaults_path).is_file():
|
893
|
-
with defaults_path.open(
|
935
|
+
with defaults_path.open("r") as data:
|
894
936
|
config = json.load(data)
|
895
937
|
else:
|
896
938
|
logger.error("config_defaults.json. does not exist ")
|
897
939
|
return False
|
898
|
-
|
940
|
+
|
899
941
|
# Read user config parameters if provided (default /share/config.json)
|
900
942
|
if config_path and pathlib.Path(config_path).is_file():
|
901
|
-
with config_path.open(
|
943
|
+
with config_path.open("r") as data:
|
902
944
|
# Set override default parameters (config_defaults) with user given parameters (config.json)
|
903
945
|
logger.info("Obtaining parameters from config.json:")
|
904
946
|
config.update(json.load(data))
|
905
947
|
else:
|
906
|
-
logger.info(
|
907
|
-
|
948
|
+
logger.info(
|
949
|
+
"config.json does not exist, or has not been passed. config parameters may default to config_defaults.json"
|
950
|
+
)
|
951
|
+
logger.info(
|
952
|
+
"you may like to generate the config.json file on the configuration page"
|
953
|
+
)
|
908
954
|
|
909
955
|
# Check to see if legacy config_emhass.yaml was provided (default /app/config_emhass.yaml)
|
910
956
|
# Convert legacy parameter definitions/format to match config.json
|
911
957
|
if legacy_config_path and pathlib.Path(legacy_config_path).is_file():
|
912
|
-
with open(legacy_config_path,
|
958
|
+
with open(legacy_config_path, "r") as data:
|
913
959
|
legacy_config = yaml.load(data, Loader=yaml.FullLoader)
|
914
|
-
legacy_config_parameters = build_legacy_config_params(
|
960
|
+
legacy_config_parameters = build_legacy_config_params(
|
961
|
+
emhass_conf, legacy_config, logger
|
962
|
+
)
|
915
963
|
if type(legacy_config_parameters) is not bool:
|
916
|
-
logger.info(
|
917
|
-
|
964
|
+
logger.info(
|
965
|
+
"Obtaining parameters from config_emhass.yaml: (will overwrite config parameters)"
|
966
|
+
)
|
967
|
+
config.update(legacy_config_parameters)
|
918
968
|
|
919
969
|
return config
|
920
970
|
|
921
971
|
|
922
|
-
def build_legacy_config_params(
|
923
|
-
|
972
|
+
def build_legacy_config_params(
|
973
|
+
emhass_conf: dict, legacy_config: dict, logger: logging.Logger
|
974
|
+
) -> dict:
|
924
975
|
"""
|
925
|
-
Build a config dictionary with legacy config_emhass.yaml file.
|
976
|
+
Build a config dictionary with legacy config_emhass.yaml file.
|
926
977
|
Uses the associations file to convert parameter naming conventions (to config.json/config_defaults.json).
|
927
978
|
Extracts the parameter values and formats to match config.json.
|
928
979
|
|
@@ -936,76 +987,104 @@ def build_legacy_config_params(emhass_conf: dict, legacy_config: dict,
|
|
936
987
|
:rtype: dict
|
937
988
|
"""
|
938
989
|
|
939
|
-
|
940
990
|
# Association file key reference
|
941
991
|
# association[0] = config catagories
|
942
992
|
# association[1] = legacy parameter name
|
943
993
|
# association[2] = parameter (config.json/config_defaults.json)
|
944
|
-
# association[3] = parameter list name if exists (not used, from legacy options.json)
|
994
|
+
# association[3] = parameter list name if exists (not used, from legacy options.json)
|
945
995
|
|
946
996
|
# Check each config catagories exists, else create blank dict for categories (avoid errors)
|
947
|
-
legacy_config[
|
948
|
-
legacy_config[
|
949
|
-
legacy_config[
|
997
|
+
legacy_config["retrieve_hass_conf"] = legacy_config.get("retrieve_hass_conf", {})
|
998
|
+
legacy_config["optim_conf"] = legacy_config.get("optim_conf", {})
|
999
|
+
legacy_config["plant_conf"] = legacy_config.get("plant_conf", {})
|
950
1000
|
config = {}
|
951
1001
|
|
952
1002
|
# Use associations list to map legacy parameter name with config.json parameter name
|
953
|
-
if emhass_conf[
|
954
|
-
|
955
|
-
|
1003
|
+
if emhass_conf["associations_path"].exists():
|
1004
|
+
with emhass_conf["associations_path"].open("r") as data:
|
1005
|
+
associations = list(csv.reader(data, delimiter=","))
|
956
1006
|
else:
|
957
|
-
logger.error(
|
1007
|
+
logger.error(
|
1008
|
+
"Cant find associations file (associations.csv) in: "
|
1009
|
+
+ str(emhass_conf["associations_path"])
|
1010
|
+
)
|
958
1011
|
return False
|
959
|
-
|
1012
|
+
|
960
1013
|
# Loop through all parameters in association file
|
961
1014
|
# Append config with existing legacy config parameters (converting alternative parameter naming conventions with associations list)
|
962
1015
|
for association in associations:
|
963
1016
|
# if legacy config catagories exists and if legacy parameter exists in config catagories
|
964
|
-
if
|
1017
|
+
if (
|
1018
|
+
legacy_config.get(association[0], None) is not None
|
1019
|
+
and legacy_config[association[0]].get(association[1], None) is not None
|
1020
|
+
):
|
965
1021
|
config[association[2]] = legacy_config[association[0]][association[1]]
|
966
|
-
|
1022
|
+
|
967
1023
|
# If config now has load_peak_hour_periods, extract from list of dict
|
968
|
-
if
|
969
|
-
|
970
|
-
|
1024
|
+
if (
|
1025
|
+
association[2] == "load_peak_hour_periods"
|
1026
|
+
and type(config[association[2]]) is list
|
1027
|
+
):
|
1028
|
+
config[association[2]] = dict(
|
1029
|
+
(key, d[key]) for d in config[association[2]] for key in d
|
1030
|
+
)
|
1031
|
+
|
971
1032
|
return config
|
972
1033
|
# params['associations_dict'] = associations_dict
|
973
1034
|
|
1035
|
+
|
974
1036
|
def param_to_config(param: dict, logger: logging.Logger) -> dict:
|
975
1037
|
"""
|
976
1038
|
A function that extracts the parameters from param back to the config.json format.
|
977
1039
|
Extracts parameters from config catagories.
|
978
1040
|
Attempts to exclude secrets hosed in retrieve_hass_conf.
|
979
|
-
|
1041
|
+
|
980
1042
|
:param params: Built configuration parameters
|
981
1043
|
:type param: dict
|
982
1044
|
:param logger: The logger object
|
983
1045
|
:type logger: logging.Logger
|
984
1046
|
:return: The built config dictionary
|
985
1047
|
:rtype: dict
|
986
|
-
"""
|
1048
|
+
"""
|
987
1049
|
logger.debug("Converting param to config")
|
988
1050
|
|
989
1051
|
return_config = {}
|
990
1052
|
|
991
|
-
config_catagories = ["retrieve_hass_conf","optim_conf","plant_conf"]
|
992
|
-
secret_params = [
|
993
|
-
|
1053
|
+
config_catagories = ["retrieve_hass_conf", "optim_conf", "plant_conf"]
|
1054
|
+
secret_params = [
|
1055
|
+
"hass_url",
|
1056
|
+
"time_zone",
|
1057
|
+
"Latitude",
|
1058
|
+
"Longitude",
|
1059
|
+
"Altitude",
|
1060
|
+
"long_lived_token",
|
1061
|
+
"solcast_api_key",
|
1062
|
+
"solcast_rooftop_id",
|
1063
|
+
"solar_forecast_kwp",
|
1064
|
+
]
|
1065
|
+
|
994
1066
|
# Loop through config catagories that contain config params, and extract
|
995
1067
|
for config in config_catagories:
|
996
1068
|
for parameter in param[config]:
|
997
|
-
|
998
|
-
|
999
|
-
|
1000
|
-
|
1069
|
+
# If parameter is not a secret, append to return_config
|
1070
|
+
if parameter not in secret_params:
|
1071
|
+
return_config[str(parameter)] = param[config][parameter]
|
1072
|
+
|
1001
1073
|
return return_config
|
1002
1074
|
|
1003
|
-
|
1004
|
-
|
1075
|
+
|
1076
|
+
def build_secrets(
|
1077
|
+
emhass_conf: dict,
|
1078
|
+
logger: logging.Logger,
|
1079
|
+
argument: Optional[dict] = {},
|
1080
|
+
options_path: Optional[str] = None,
|
1081
|
+
secrets_path: Optional[str] = None,
|
1082
|
+
no_response: Optional[bool] = False,
|
1083
|
+
) -> Tuple[dict, dict]:
|
1005
1084
|
"""
|
1006
1085
|
Retrieve and build parameters from secrets locations (ENV, ARG, Secrets file (secrets_emhass.yaml/options.json) and/or Home Assistant (via API))
|
1007
1086
|
priority order (lwo to high) = Defaults (written in function), ENV, Options json file, Home Assistant API, Secrets yaml file, Arguments
|
1008
|
-
|
1087
|
+
|
1009
1088
|
:param emhass_conf: Dictionary containing the needed emhass paths
|
1010
1089
|
:type emhass_conf: dict
|
1011
1090
|
:param logger: The logger object
|
@@ -1022,7 +1101,7 @@ def build_secrets(emhass_conf: dict, logger: logging.Logger, argument: Optional[
|
|
1022
1101
|
:rtype: Tuple[dict, dict]:
|
1023
1102
|
"""
|
1024
1103
|
|
1025
|
-
#Set defaults to be overwritten
|
1104
|
+
# Set defaults to be overwritten
|
1026
1105
|
params_secrets = {
|
1027
1106
|
"hass_url": "https://myhass.duckdns.org/",
|
1028
1107
|
"long_lived_token": "thatverylongtokenhere",
|
@@ -1032,128 +1111,172 @@ def build_secrets(emhass_conf: dict, logger: logging.Logger, argument: Optional[
|
|
1032
1111
|
"Altitude": 4807.8,
|
1033
1112
|
"solcast_api_key": "yoursecretsolcastapikey",
|
1034
1113
|
"solcast_rooftop_id": "yourrooftopid",
|
1035
|
-
"solar_forecast_kwp": 5
|
1114
|
+
"solar_forecast_kwp": 5,
|
1036
1115
|
}
|
1037
1116
|
|
1038
1117
|
# Obtain Secrets from ENV?
|
1039
|
-
params_secrets[
|
1040
|
-
params_secrets[
|
1041
|
-
|
1042
|
-
|
1043
|
-
params_secrets[
|
1044
|
-
params_secrets[
|
1118
|
+
params_secrets["hass_url"] = os.getenv("EMHASS_URL", params_secrets["hass_url"])
|
1119
|
+
params_secrets["long_lived_token"] = os.getenv(
|
1120
|
+
"SUPERVISOR_TOKEN", params_secrets["long_lived_token"]
|
1121
|
+
)
|
1122
|
+
params_secrets["time_zone"] = os.getenv("TIME_ZONE", params_secrets["time_zone"])
|
1123
|
+
params_secrets["Latitude"] = float(os.getenv("LAT", params_secrets["Latitude"]))
|
1124
|
+
params_secrets["Longitude"] = float(os.getenv("LON", params_secrets["Longitude"]))
|
1125
|
+
params_secrets["Altitude"] = float(os.getenv("ALT", params_secrets["Altitude"]))
|
1045
1126
|
|
1046
1127
|
# Obtain secrets from options.json (Generated from EMHASS-Add-on, Home Assistant addon Configuration page) or Home Assistant API (from local Supervisor API)?
|
1047
1128
|
# Use local supervisor API to obtain secrets from Home Assistant if hass_url in options.json is empty and SUPERVISOR_TOKEN ENV exists (provided by Home Assistant when running the container as addon)
|
1048
1129
|
options = {}
|
1049
1130
|
if options_path and pathlib.Path(options_path).is_file():
|
1050
|
-
with options_path.open(
|
1131
|
+
with options_path.open("r") as data:
|
1051
1132
|
options = json.load(data)
|
1052
|
-
|
1133
|
+
|
1053
1134
|
# Obtain secrets from Home Assistant?
|
1054
|
-
url_from_options = options.get(
|
1055
|
-
key_from_options = options.get(
|
1135
|
+
url_from_options = options.get("hass_url", "empty")
|
1136
|
+
key_from_options = options.get("long_lived_token", "empty")
|
1056
1137
|
|
1057
1138
|
# If data path specified by options.json, overwrite emhass_conf['data_path']
|
1058
|
-
if
|
1059
|
-
|
1060
|
-
|
1139
|
+
if (
|
1140
|
+
options.get("data_path", None) != None
|
1141
|
+
and pathlib.Path(options["data_path"]).exists()
|
1142
|
+
):
|
1143
|
+
emhass_conf["data_path"] = pathlib.Path(options["data_path"])
|
1144
|
+
|
1061
1145
|
# Check to use Home Assistant local API
|
1062
|
-
if
|
1063
|
-
|
1064
|
-
|
1065
|
-
|
1066
|
-
|
1067
|
-
|
1146
|
+
if (
|
1147
|
+
not no_response
|
1148
|
+
and (
|
1149
|
+
url_from_options == "empty"
|
1150
|
+
or url_from_options == ""
|
1151
|
+
or url_from_options == "http://supervisor/core/api"
|
1152
|
+
)
|
1153
|
+
and os.getenv("SUPERVISOR_TOKEN", None) is not None
|
1154
|
+
):
|
1155
|
+
params_secrets["long_lived_token"] = os.getenv("SUPERVISOR_TOKEN", None)
|
1156
|
+
params_secrets["hass_url"] = "http://supervisor/core/api"
|
1068
1157
|
headers = {
|
1069
|
-
|
1070
|
-
|
1158
|
+
"Authorization": "Bearer " + params_secrets["long_lived_token"],
|
1159
|
+
"content-type": "application/json",
|
1071
1160
|
}
|
1072
1161
|
# Obtain secrets from Home Assistant via API
|
1073
1162
|
logger.debug("Obtaining secrets from Home Assistant Supervisor API")
|
1074
|
-
response = get(
|
1163
|
+
response = get(
|
1164
|
+
(params_secrets["hass_url"] + "/config"), headers=headers
|
1165
|
+
)
|
1075
1166
|
if response.status_code < 400:
|
1076
1167
|
config_hass = response.json()
|
1077
1168
|
params_secrets = {
|
1078
|
-
|
1079
|
-
|
1080
|
-
|
1081
|
-
|
1082
|
-
|
1083
|
-
|
1169
|
+
"hass_url": params_secrets["hass_url"],
|
1170
|
+
"long_lived_token": params_secrets["long_lived_token"],
|
1171
|
+
"time_zone": config_hass["time_zone"],
|
1172
|
+
"Latitude": config_hass["latitude"],
|
1173
|
+
"Longitude": config_hass["longitude"],
|
1174
|
+
"Altitude": config_hass["elevation"],
|
1084
1175
|
}
|
1085
|
-
else:
|
1176
|
+
else:
|
1086
1177
|
# Obtain the url and key secrets if any from options.json (default /app/options.json)
|
1087
|
-
logger.warning(
|
1178
|
+
logger.warning(
|
1179
|
+
"Error obtaining secrets from Home Assistant Supervisor API"
|
1180
|
+
)
|
1088
1181
|
logger.debug("Obtaining url and key secrets from options.json")
|
1089
|
-
if url_from_options !=
|
1090
|
-
params_secrets[
|
1091
|
-
if key_from_options !=
|
1092
|
-
params_secrets[
|
1093
|
-
if
|
1094
|
-
|
1095
|
-
|
1096
|
-
|
1097
|
-
|
1098
|
-
|
1099
|
-
|
1100
|
-
|
1182
|
+
if url_from_options != "empty" and url_from_options != "":
|
1183
|
+
params_secrets["hass_url"] = url_from_options
|
1184
|
+
if key_from_options != "empty" and key_from_options != "":
|
1185
|
+
params_secrets["long_lived_token"] = key_from_options
|
1186
|
+
if (
|
1187
|
+
options.get("time_zone", "empty") != "empty"
|
1188
|
+
and options["time_zone"] != ""
|
1189
|
+
):
|
1190
|
+
params_secrets["time_zone"] = options["time_zone"]
|
1191
|
+
if options.get("Latitude", None) is not None and bool(
|
1192
|
+
options["Latitude"]
|
1193
|
+
):
|
1194
|
+
params_secrets["Latitude"] = options["Latitude"]
|
1195
|
+
if options.get("Longitude", None) is not None and bool(
|
1196
|
+
options["Longitude"]
|
1197
|
+
):
|
1198
|
+
params_secrets["Longitude"] = options["Longitude"]
|
1199
|
+
if options.get("Altitude", None) is not None and bool(
|
1200
|
+
options["Altitude"]
|
1201
|
+
):
|
1202
|
+
params_secrets["Altitude"] = options["Altitude"]
|
1101
1203
|
else:
|
1102
1204
|
# Obtain the url and key secrets if any from options.json (default /app/options.json)
|
1103
1205
|
logger.debug("Obtaining url and key secrets from options.json")
|
1104
|
-
if url_from_options !=
|
1105
|
-
params_secrets[
|
1106
|
-
if key_from_options !=
|
1107
|
-
params_secrets[
|
1108
|
-
if
|
1109
|
-
|
1110
|
-
|
1111
|
-
|
1112
|
-
|
1113
|
-
|
1114
|
-
|
1115
|
-
|
1116
|
-
|
1206
|
+
if url_from_options != "empty" and url_from_options != "":
|
1207
|
+
params_secrets["hass_url"] = url_from_options
|
1208
|
+
if key_from_options != "empty" and key_from_options != "":
|
1209
|
+
params_secrets["long_lived_token"] = key_from_options
|
1210
|
+
if (
|
1211
|
+
options.get("time_zone", "empty") != "empty"
|
1212
|
+
and options["time_zone"] != ""
|
1213
|
+
):
|
1214
|
+
params_secrets["time_zone"] = options["time_zone"]
|
1215
|
+
if options.get("Latitude", None) is not None and bool(
|
1216
|
+
options["Latitude"]
|
1217
|
+
):
|
1218
|
+
params_secrets["Latitude"] = options["Latitude"]
|
1219
|
+
if options.get("Longitude", None) is not None and bool(
|
1220
|
+
options["Longitude"]
|
1221
|
+
):
|
1222
|
+
params_secrets["Longitude"] = options["Longitude"]
|
1223
|
+
if options.get("Altitude", None) is not None and bool(
|
1224
|
+
options["Altitude"]
|
1225
|
+
):
|
1226
|
+
params_secrets["Altitude"] = options["Altitude"]
|
1227
|
+
|
1117
1228
|
# Obtain the forecast secrets (if any) from options.json (default /app/options.json)
|
1118
|
-
forecast_secrets = [
|
1229
|
+
forecast_secrets = [
|
1230
|
+
"solcast_api_key",
|
1231
|
+
"solcast_rooftop_id",
|
1232
|
+
"solar_forecast_kwp",
|
1233
|
+
]
|
1119
1234
|
if any(x in forecast_secrets for x in list(options.keys())):
|
1120
1235
|
logger.debug("Obtaining forecast secrets from options.json")
|
1121
|
-
if
|
1122
|
-
|
1123
|
-
|
1124
|
-
|
1125
|
-
|
1126
|
-
|
1127
|
-
|
1236
|
+
if (
|
1237
|
+
options.get("solcast_api_key", "empty") != "empty"
|
1238
|
+
and options["solcast_api_key"] != ""
|
1239
|
+
):
|
1240
|
+
params_secrets["solcast_api_key"] = options["solcast_api_key"]
|
1241
|
+
if (
|
1242
|
+
options.get("solcast_rooftop_id", "empty") != "empty"
|
1243
|
+
and options["solcast_rooftop_id"] != ""
|
1244
|
+
):
|
1245
|
+
params_secrets["solcast_rooftop_id"] = options["solcast_rooftop_id"]
|
1246
|
+
if options.get("solar_forecast_kwp", None) and bool(
|
1247
|
+
options["solar_forecast_kwp"]
|
1248
|
+
):
|
1249
|
+
params_secrets["solar_forecast_kwp"] = options["solar_forecast_kwp"]
|
1250
|
+
|
1128
1251
|
# Obtain secrets from secrets_emhass.yaml? (default /app/secrets_emhass.yaml)
|
1129
1252
|
if secrets_path and pathlib.Path(secrets_path).is_file():
|
1130
1253
|
logger.debug("Obtaining secrets from secrets file")
|
1131
|
-
with open(pathlib.Path(secrets_path),
|
1254
|
+
with open(pathlib.Path(secrets_path), "r") as file:
|
1132
1255
|
params_secrets.update(yaml.load(file, Loader=yaml.FullLoader))
|
1133
1256
|
|
1134
|
-
# Receive key and url from ARG/arguments?
|
1135
|
-
if argument.get(
|
1136
|
-
|
1137
|
-
|
1138
|
-
if argument.get(
|
1139
|
-
params_secrets[
|
1140
|
-
logger.debug("Obtaining long_lived_token from passed argument")
|
1141
|
-
|
1142
|
-
return emhass_conf, params_secrets
|
1143
|
-
|
1144
|
-
|
1145
|
-
|
1146
|
-
|
1147
|
-
|
1257
|
+
# Receive key and url from ARG/arguments?
|
1258
|
+
if argument.get("url", None) is not None:
|
1259
|
+
params_secrets["hass_url"] = argument["url"]
|
1260
|
+
logger.debug("Obtaining url from passed argument")
|
1261
|
+
if argument.get("key", None) is not None:
|
1262
|
+
params_secrets["long_lived_token"] = argument["key"]
|
1263
|
+
logger.debug("Obtaining long_lived_token from passed argument")
|
1264
|
+
|
1265
|
+
return emhass_conf, params_secrets
|
1266
|
+
|
1267
|
+
|
1268
|
+
def build_params(
|
1269
|
+
emhass_conf: dict, params_secrets: dict, config: dict, logger: logging.Logger
|
1270
|
+
) -> dict:
|
1148
1271
|
"""
|
1149
1272
|
Build the main params dictionary from the config and secrets
|
1150
1273
|
Appends configuration catagories used by emhass to the parameters. (with use of the associations file as a reference)
|
1151
|
-
|
1274
|
+
|
1152
1275
|
:param emhass_conf: Dictionary containing the needed emhass paths
|
1153
1276
|
:type emhass_conf: dict
|
1154
1277
|
:param params_secrets: The dictionary containing the built secret variables
|
1155
1278
|
:type params_secrets: dict
|
1156
|
-
:param config: The dictionary of built config parameters
|
1279
|
+
:param config: The dictionary of built config parameters
|
1157
1280
|
:type config: dict
|
1158
1281
|
:param logger: The logger object
|
1159
1282
|
:type logger: logging.Logger
|
@@ -1162,104 +1285,203 @@ def build_params(emhass_conf: dict, params_secrets: dict, config: dict,
|
|
1162
1285
|
"""
|
1163
1286
|
if type(params_secrets) is not dict:
|
1164
1287
|
params_secrets = {}
|
1165
|
-
|
1288
|
+
|
1166
1289
|
params = {}
|
1167
|
-
#Start with blank config catagories
|
1168
|
-
params[
|
1169
|
-
params[
|
1170
|
-
params[
|
1171
|
-
params[
|
1172
|
-
|
1173
|
-
# Obtain associations to categorize parameters to their corresponding config catagories
|
1174
|
-
if emhass_conf.get(
|
1175
|
-
|
1290
|
+
# Start with blank config catagories
|
1291
|
+
params["retrieve_hass_conf"] = {}
|
1292
|
+
params["params_secrets"] = {}
|
1293
|
+
params["optim_conf"] = {}
|
1294
|
+
params["plant_conf"] = {}
|
1295
|
+
|
1296
|
+
# Obtain associations to categorize parameters to their corresponding config catagories
|
1297
|
+
if emhass_conf.get(
|
1298
|
+
"associations_path", get_root(__file__, num_parent=2) / "data/associations.csv"
|
1299
|
+
).exists():
|
1300
|
+
with emhass_conf["associations_path"].open("r") as data:
|
1176
1301
|
associations = list(csv.reader(data, delimiter=","))
|
1177
1302
|
else:
|
1178
|
-
logger.error(
|
1303
|
+
logger.error(
|
1304
|
+
"Unable to obtain the associations file (associations.csv) in: "
|
1305
|
+
+ str(emhass_conf["associations_path"])
|
1306
|
+
)
|
1179
1307
|
return False
|
1180
1308
|
|
1181
1309
|
# Association file key reference
|
1182
1310
|
# association[0] = config catagories
|
1183
1311
|
# association[1] = legacy parameter name
|
1184
1312
|
# association[2] = parameter (config.json/config_defaults.json)
|
1185
|
-
# association[3] = parameter list name if exists (not used, from legacy options.json)
|
1186
|
-
|
1313
|
+
# association[3] = parameter list name if exists (not used, from legacy options.json)
|
1187
1314
|
# Use association list to append parameters from config into params (with corresponding config catagories)
|
1188
1315
|
for association in associations:
|
1189
|
-
# If parameter has list_ name and parameter in config is presented with its list name
|
1316
|
+
# If parameter has list_ name and parameter in config is presented with its list name
|
1190
1317
|
# (ie, config parameter is in legacy options.json format)
|
1191
|
-
if len(association) == 4 and config.get(association[3],None) is not None:
|
1318
|
+
if len(association) == 4 and config.get(association[3], None) is not None:
|
1192
1319
|
# Extract lists of dictionaries
|
1193
1320
|
if config[association[3]] and type(config[association[3]][0]) is dict:
|
1194
|
-
params[association[0]][association[2]] = [
|
1321
|
+
params[association[0]][association[2]] = [
|
1322
|
+
i[association[2]] for i in config[association[3]]
|
1323
|
+
]
|
1195
1324
|
else:
|
1196
1325
|
params[association[0]][association[2]] = config[association[3]]
|
1197
|
-
# Else, directly set value of config parameter to param
|
1198
|
-
elif config.get(association[2],None) is not None:
|
1326
|
+
# Else, directly set value of config parameter to param
|
1327
|
+
elif config.get(association[2], None) is not None:
|
1199
1328
|
params[association[0]][association[2]] = config[association[2]]
|
1200
1329
|
|
1201
1330
|
# Check if we need to create `list_hp_periods` from config (ie. legacy options.json format)
|
1202
|
-
if
|
1203
|
-
|
1204
|
-
|
1205
|
-
|
1206
|
-
|
1207
|
-
|
1331
|
+
if (
|
1332
|
+
params.get("optim_conf", None) is not None
|
1333
|
+
and config.get("list_peak_hours_periods_start_hours", None) is not None
|
1334
|
+
and config.get("list_peak_hours_periods_end_hours", None) is not None
|
1335
|
+
):
|
1336
|
+
start_hours_list = [
|
1337
|
+
i["peak_hours_periods_start_hours"]
|
1338
|
+
for i in config["list_peak_hours_periods_start_hours"]
|
1339
|
+
]
|
1340
|
+
end_hours_list = [
|
1341
|
+
i["peak_hours_periods_end_hours"]
|
1342
|
+
for i in config["list_peak_hours_periods_end_hours"]
|
1343
|
+
]
|
1344
|
+
num_peak_hours = len(start_hours_list)
|
1345
|
+
list_hp_periods_list = {
|
1346
|
+
"period_hp_" + str(i + 1): [
|
1347
|
+
{"start": start_hours_list[i]},
|
1348
|
+
{"end": end_hours_list[i]},
|
1349
|
+
]
|
1350
|
+
for i in range(num_peak_hours)
|
1351
|
+
}
|
1352
|
+
params["optim_conf"]["load_peak_hour_periods"] = list_hp_periods_list
|
1208
1353
|
else:
|
1209
1354
|
# Else, check param already contains load_peak_hour_periods from config
|
1210
|
-
if params[
|
1211
|
-
logger.warning(
|
1355
|
+
if params["optim_conf"].get("load_peak_hour_periods", None) is None:
|
1356
|
+
logger.warning(
|
1357
|
+
"Unable to detect or create load_peak_hour_periods parameter"
|
1358
|
+
)
|
1212
1359
|
|
1213
1360
|
# Format load_peak_hour_periods list to dict if necessary
|
1214
|
-
if params[
|
1215
|
-
|
1361
|
+
if params["optim_conf"].get(
|
1362
|
+
"load_peak_hour_periods", None
|
1363
|
+
) is not None and isinstance(params["optim_conf"]["load_peak_hour_periods"], list):
|
1364
|
+
params["optim_conf"]["load_peak_hour_periods"] = dict(
|
1365
|
+
(key, d[key])
|
1366
|
+
for d in params["optim_conf"]["load_peak_hour_periods"]
|
1367
|
+
for key in d
|
1368
|
+
)
|
1216
1369
|
|
1217
1370
|
# Call function to check parameter lists that require the same length as deferrable loads
|
1218
1371
|
# If not, set defaults it fill in gaps
|
1219
|
-
if params[
|
1220
|
-
num_def_loads = params[
|
1221
|
-
params[
|
1222
|
-
|
1223
|
-
|
1224
|
-
|
1225
|
-
|
1226
|
-
|
1227
|
-
|
1372
|
+
if params["optim_conf"].get("number_of_deferrable_loads", None) is not None:
|
1373
|
+
num_def_loads = params["optim_conf"]["number_of_deferrable_loads"]
|
1374
|
+
params["optim_conf"]["start_timesteps_of_each_deferrable_load"] = (
|
1375
|
+
check_def_loads(
|
1376
|
+
num_def_loads,
|
1377
|
+
params["optim_conf"],
|
1378
|
+
0,
|
1379
|
+
"start_timesteps_of_each_deferrable_load",
|
1380
|
+
logger,
|
1381
|
+
)
|
1382
|
+
)
|
1383
|
+
params["optim_conf"]["end_timesteps_of_each_deferrable_load"] = check_def_loads(
|
1384
|
+
num_def_loads,
|
1385
|
+
params["optim_conf"],
|
1386
|
+
0,
|
1387
|
+
"end_timesteps_of_each_deferrable_load",
|
1388
|
+
logger,
|
1389
|
+
)
|
1390
|
+
params["optim_conf"]["set_deferrable_load_single_constant"] = check_def_loads(
|
1391
|
+
num_def_loads,
|
1392
|
+
params["optim_conf"],
|
1393
|
+
False,
|
1394
|
+
"set_deferrable_load_single_constant",
|
1395
|
+
logger,
|
1396
|
+
)
|
1397
|
+
params["optim_conf"]["treat_deferrable_load_as_semi_cont"] = check_def_loads(
|
1398
|
+
num_def_loads,
|
1399
|
+
params["optim_conf"],
|
1400
|
+
True,
|
1401
|
+
"treat_deferrable_load_as_semi_cont",
|
1402
|
+
logger,
|
1403
|
+
)
|
1404
|
+
params["optim_conf"]["set_deferrable_startup_penalty"] = check_def_loads(
|
1405
|
+
num_def_loads,
|
1406
|
+
params["optim_conf"],
|
1407
|
+
0.0,
|
1408
|
+
"set_deferrable_startup_penalty",
|
1409
|
+
logger,
|
1410
|
+
)
|
1411
|
+
params["optim_conf"]["operating_hours_of_each_deferrable_load"] = (
|
1412
|
+
check_def_loads(
|
1413
|
+
num_def_loads,
|
1414
|
+
params["optim_conf"],
|
1415
|
+
0,
|
1416
|
+
"operating_hours_of_each_deferrable_load",
|
1417
|
+
logger,
|
1418
|
+
)
|
1419
|
+
)
|
1420
|
+
params["optim_conf"]["nominal_power_of_deferrable_loads"] = check_def_loads(
|
1421
|
+
num_def_loads,
|
1422
|
+
params["optim_conf"],
|
1423
|
+
0,
|
1424
|
+
"nominal_power_of_deferrable_loads",
|
1425
|
+
logger,
|
1426
|
+
)
|
1228
1427
|
else:
|
1229
1428
|
logger.warning("unable to obtain parameter: number_of_deferrable_loads")
|
1230
1429
|
# historic_days_to_retrieve should be no less then 2
|
1231
|
-
if params["retrieve_hass_conf"].get(
|
1232
|
-
if params["retrieve_hass_conf"][
|
1233
|
-
params["retrieve_hass_conf"][
|
1234
|
-
logger.warning(
|
1430
|
+
if params["retrieve_hass_conf"].get("historic_days_to_retrieve", None) is not None:
|
1431
|
+
if params["retrieve_hass_conf"]["historic_days_to_retrieve"] < 2:
|
1432
|
+
params["retrieve_hass_conf"]["historic_days_to_retrieve"] = 2
|
1433
|
+
logger.warning(
|
1434
|
+
"days_to_retrieve should not be lower then 2, setting days_to_retrieve to 2. Make sure your sensors also have at least 2 days of history"
|
1435
|
+
)
|
1235
1436
|
else:
|
1236
1437
|
logger.warning("unable to obtain parameter: historic_days_to_retrieve")
|
1237
1438
|
|
1238
1439
|
# Configure secrets, set params to correct config categorie
|
1239
1440
|
# retrieve_hass_conf
|
1240
|
-
params[
|
1241
|
-
params[
|
1242
|
-
|
1243
|
-
|
1244
|
-
params[
|
1245
|
-
params[
|
1441
|
+
params["retrieve_hass_conf"]["hass_url"] = params_secrets.get("hass_url", None)
|
1442
|
+
params["retrieve_hass_conf"]["long_lived_token"] = params_secrets.get(
|
1443
|
+
"long_lived_token", None
|
1444
|
+
)
|
1445
|
+
params["retrieve_hass_conf"]["time_zone"] = params_secrets.get("time_zone", None)
|
1446
|
+
params["retrieve_hass_conf"]["Latitude"] = params_secrets.get("Latitude", None)
|
1447
|
+
params["retrieve_hass_conf"]["Longitude"] = params_secrets.get("Longitude", None)
|
1448
|
+
params["retrieve_hass_conf"]["Altitude"] = params_secrets.get("Altitude", None)
|
1246
1449
|
# Update optional param secrets
|
1247
|
-
if params["optim_conf"].get(
|
1248
|
-
if params["optim_conf"][
|
1249
|
-
params["retrieve_hass_conf"]["solcast_api_key"] = params_secrets.get(
|
1250
|
-
|
1251
|
-
|
1252
|
-
params["params_secrets"]["
|
1253
|
-
|
1254
|
-
|
1255
|
-
params["
|
1450
|
+
if params["optim_conf"].get("weather_forecast_method", None) is not None:
|
1451
|
+
if params["optim_conf"]["weather_forecast_method"] == "solcast":
|
1452
|
+
params["retrieve_hass_conf"]["solcast_api_key"] = params_secrets.get(
|
1453
|
+
"solcast_api_key", "123456"
|
1454
|
+
)
|
1455
|
+
params["params_secrets"]["solcast_api_key"] = params_secrets.get(
|
1456
|
+
"solcast_api_key", "123456"
|
1457
|
+
)
|
1458
|
+
params["retrieve_hass_conf"]["solcast_rooftop_id"] = params_secrets.get(
|
1459
|
+
"solcast_rooftop_id", "123456"
|
1460
|
+
)
|
1461
|
+
params["params_secrets"]["solcast_rooftop_id"] = params_secrets.get(
|
1462
|
+
"solcast_rooftop_id", "123456"
|
1463
|
+
)
|
1464
|
+
elif params["optim_conf"]["weather_forecast_method"] == "solar.forecast":
|
1465
|
+
params["retrieve_hass_conf"]["solar_forecast_kwp"] = params_secrets.get(
|
1466
|
+
"solar_forecast_kwp", 5
|
1467
|
+
)
|
1468
|
+
params["params_secrets"]["solar_forecast_kwp"] = params_secrets.get(
|
1469
|
+
"solar_forecast_kwp", 5
|
1470
|
+
)
|
1256
1471
|
else:
|
1257
|
-
logger.warning("Unable to detect weather_forecast_method parameter")
|
1472
|
+
logger.warning("Unable to detect weather_forecast_method parameter")
|
1258
1473
|
# Check if secrets parameters still defaults values
|
1259
|
-
secret_params = [
|
1260
|
-
|
1261
|
-
|
1262
|
-
|
1474
|
+
secret_params = [
|
1475
|
+
"https://myhass.duckdns.org/",
|
1476
|
+
"thatverylongtokenhere",
|
1477
|
+
45.83,
|
1478
|
+
6.86,
|
1479
|
+
4807.8,
|
1480
|
+
]
|
1481
|
+
if any(x in secret_params for x in params["retrieve_hass_conf"].values()):
|
1482
|
+
logger.warning(
|
1483
|
+
"Some secret parameters values are still matching their defaults"
|
1484
|
+
)
|
1263
1485
|
|
1264
1486
|
# Set empty dict objects for params passed_data
|
1265
1487
|
# To be latter populated with runtime parameters (treat_runtimeparams)
|
@@ -1271,16 +1493,19 @@ def build_params(emhass_conf: dict, params_secrets: dict, config: dict,
|
|
1271
1493
|
"prediction_horizon": None,
|
1272
1494
|
"soc_init": None,
|
1273
1495
|
"soc_final": None,
|
1274
|
-
|
1275
|
-
|
1276
|
-
|
1496
|
+
"operating_hours_of_each_deferrable_load": None,
|
1497
|
+
"start_timesteps_of_each_deferrable_load": None,
|
1498
|
+
"end_timesteps_of_each_deferrable_load": None,
|
1277
1499
|
"alpha": None,
|
1278
1500
|
"beta": None,
|
1279
1501
|
}
|
1280
1502
|
|
1281
1503
|
return params
|
1282
1504
|
|
1283
|
-
|
1505
|
+
|
1506
|
+
def check_def_loads(
|
1507
|
+
num_def_loads: int, parameter: list[dict], default, parameter_name: str, logger
|
1508
|
+
):
|
1284
1509
|
"""
|
1285
1510
|
Check parameter lists with deferrable loads number, if they do not match, enlarge to fit.
|
1286
1511
|
|
@@ -1294,12 +1519,21 @@ def check_def_loads(num_def_loads: int, parameter: list[dict], default, paramete
|
|
1294
1519
|
:type logger: str
|
1295
1520
|
:param logger: The logger object
|
1296
1521
|
:type logger: logging.Logger
|
1297
|
-
return: parameter list
|
1522
|
+
return: parameter list
|
1298
1523
|
:rtype: list[dict]
|
1299
1524
|
|
1300
1525
|
"""
|
1301
|
-
if
|
1302
|
-
|
1526
|
+
if (
|
1527
|
+
parameter.get(parameter_name, None) is not None
|
1528
|
+
and type(parameter[parameter_name]) is list
|
1529
|
+
and num_def_loads > len(parameter[parameter_name])
|
1530
|
+
):
|
1531
|
+
logger.warning(
|
1532
|
+
parameter_name
|
1533
|
+
+ " does not match number in num_def_loads, adding default values ("
|
1534
|
+
+ str(default)
|
1535
|
+
+ ") to parameter"
|
1536
|
+
)
|
1303
1537
|
for x in range(len(parameter[parameter_name]), num_def_loads):
|
1304
1538
|
parameter[parameter_name].append(default)
|
1305
1539
|
return parameter[parameter_name]
|