emhass 0.12.4__py3-none-any.whl → 0.12.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {emhass-0.12.4.dist-info → emhass-0.12.5.dist-info}/METADATA +34 -17
- {emhass-0.12.4.dist-info → emhass-0.12.5.dist-info}/RECORD +5 -17
- emhass/__init__.py +0 -0
- emhass/command_line.py +0 -1748
- emhass/data/emhass_inverters.csv +0 -8
- emhass/data/emhass_modules.csv +0 -6
- emhass/forecast.py +0 -1348
- emhass/img/emhass_icon.png +0 -0
- emhass/machine_learning_forecaster.py +0 -397
- emhass/machine_learning_regressor.py +0 -275
- emhass/optimization.py +0 -1504
- emhass/retrieve_hass.py +0 -670
- emhass/utils.py +0 -1678
- emhass/web_server.py +0 -756
- {emhass-0.12.4.dist-info → emhass-0.12.5.dist-info}/WHEEL +0 -0
- {emhass-0.12.4.dist-info → emhass-0.12.5.dist-info}/entry_points.txt +0 -0
- {emhass-0.12.4.dist-info → emhass-0.12.5.dist-info}/licenses/LICENSE +0 -0
emhass/utils.py
DELETED
@@ -1,1678 +0,0 @@
|
|
1
|
-
#!/usr/bin/env python3
|
2
|
-
# -*- coding: utf-8 -*-
|
3
|
-
import ast
|
4
|
-
import copy
|
5
|
-
import csv
|
6
|
-
import json
|
7
|
-
import logging
|
8
|
-
import os
|
9
|
-
import pathlib
|
10
|
-
from datetime import datetime, timedelta, timezone
|
11
|
-
from typing import Optional, Tuple
|
12
|
-
|
13
|
-
import numpy as np
|
14
|
-
import pandas as pd
|
15
|
-
import plotly.express as px
|
16
|
-
import pytz
|
17
|
-
import yaml
|
18
|
-
from requests import get
|
19
|
-
|
20
|
-
from emhass.machine_learning_forecaster import MLForecaster
|
21
|
-
|
22
|
-
pd.options.plotting.backend = "plotly"
|
23
|
-
|
24
|
-
|
25
|
-
def get_root(file: str, num_parent: Optional[int] = 3) -> str:
|
26
|
-
"""
|
27
|
-
Get the root absolute path of the working directory.
|
28
|
-
|
29
|
-
:param file: The passed file path with __file__
|
30
|
-
:return: The root path
|
31
|
-
:param num_parent: The number of parents levels up to desired root folder
|
32
|
-
:type num_parent: int, optional
|
33
|
-
:rtype: str
|
34
|
-
|
35
|
-
"""
|
36
|
-
if num_parent == 3:
|
37
|
-
root = pathlib.Path(file).resolve().parent.parent.parent
|
38
|
-
elif num_parent == 2:
|
39
|
-
root = pathlib.Path(file).resolve().parent.parent
|
40
|
-
elif num_parent == 1:
|
41
|
-
root = pathlib.Path(file).resolve().parent
|
42
|
-
else:
|
43
|
-
raise ValueError("num_parent value not valid, must be between 1 and 3")
|
44
|
-
return root
|
45
|
-
|
46
|
-
|
47
|
-
def get_logger(
|
48
|
-
fun_name: str,
|
49
|
-
emhass_conf: dict,
|
50
|
-
save_to_file: Optional[bool] = True,
|
51
|
-
logging_level: Optional[str] = "DEBUG",
|
52
|
-
) -> Tuple[logging.Logger, logging.StreamHandler]:
|
53
|
-
"""
|
54
|
-
Create a simple logger object.
|
55
|
-
|
56
|
-
:param fun_name: The Python function object name where the logger will be used
|
57
|
-
:type fun_name: str
|
58
|
-
:param emhass_conf: Dictionary containing the needed emhass paths
|
59
|
-
:type emhass_conf: dict
|
60
|
-
:param save_to_file: Write log to a file, defaults to True
|
61
|
-
:type save_to_file: bool, optional
|
62
|
-
:return: The logger object and the handler
|
63
|
-
:rtype: object
|
64
|
-
|
65
|
-
"""
|
66
|
-
# create logger object
|
67
|
-
logger = logging.getLogger(fun_name)
|
68
|
-
logger.propagate = True
|
69
|
-
logger.fileSetting = save_to_file
|
70
|
-
if save_to_file:
|
71
|
-
if os.path.isdir(emhass_conf["data_path"]):
|
72
|
-
ch = logging.FileHandler(emhass_conf["data_path"] / "logger_emhass.log")
|
73
|
-
else:
|
74
|
-
raise Exception("Unable to access data_path: " + emhass_conf["data_path"])
|
75
|
-
else:
|
76
|
-
ch = logging.StreamHandler()
|
77
|
-
if logging_level == "DEBUG":
|
78
|
-
logger.setLevel(logging.DEBUG)
|
79
|
-
ch.setLevel(logging.DEBUG)
|
80
|
-
elif logging_level == "INFO":
|
81
|
-
logger.setLevel(logging.INFO)
|
82
|
-
ch.setLevel(logging.INFO)
|
83
|
-
elif logging_level == "WARNING":
|
84
|
-
logger.setLevel(logging.WARNING)
|
85
|
-
ch.setLevel(logging.WARNING)
|
86
|
-
elif logging_level == "ERROR":
|
87
|
-
logger.setLevel(logging.ERROR)
|
88
|
-
ch.setLevel(logging.ERROR)
|
89
|
-
else:
|
90
|
-
logger.setLevel(logging.DEBUG)
|
91
|
-
ch.setLevel(logging.DEBUG)
|
92
|
-
formatter = logging.Formatter(
|
93
|
-
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
94
|
-
)
|
95
|
-
ch.setFormatter(formatter)
|
96
|
-
logger.addHandler(ch)
|
97
|
-
|
98
|
-
return logger, ch
|
99
|
-
|
100
|
-
|
101
|
-
def get_forecast_dates(
|
102
|
-
freq: int,
|
103
|
-
delta_forecast: int,
|
104
|
-
time_zone: datetime.tzinfo,
|
105
|
-
timedelta_days: Optional[int] = 0,
|
106
|
-
) -> pd.core.indexes.datetimes.DatetimeIndex:
|
107
|
-
"""
|
108
|
-
Get the date_range list of the needed future dates using the delta_forecast parameter.
|
109
|
-
|
110
|
-
:param freq: Optimization time step.
|
111
|
-
:type freq: int
|
112
|
-
:param delta_forecast: Number of days to forecast in the future to be used for the optimization.
|
113
|
-
:type delta_forecast: int
|
114
|
-
:param timedelta_days: Number of truncated days needed for each optimization iteration, defaults to 0
|
115
|
-
:type timedelta_days: Optional[int], optional
|
116
|
-
:return: A list of future forecast dates.
|
117
|
-
:rtype: pd.core.indexes.datetimes.DatetimeIndex
|
118
|
-
|
119
|
-
"""
|
120
|
-
freq = pd.to_timedelta(freq, "minutes")
|
121
|
-
start_forecast = pd.Timestamp(datetime.now()).replace(
|
122
|
-
hour=0, minute=0, second=0, microsecond=0
|
123
|
-
)
|
124
|
-
end_forecast = (start_forecast + pd.Timedelta(days=delta_forecast)).replace(
|
125
|
-
microsecond=0
|
126
|
-
)
|
127
|
-
forecast_dates = (
|
128
|
-
pd.date_range(
|
129
|
-
start=start_forecast,
|
130
|
-
end=end_forecast + timedelta(days=timedelta_days) - freq,
|
131
|
-
freq=freq,
|
132
|
-
tz=time_zone,
|
133
|
-
)
|
134
|
-
.tz_convert("utc")
|
135
|
-
.round(freq, ambiguous="infer", nonexistent="shift_forward")
|
136
|
-
.tz_convert(time_zone)
|
137
|
-
)
|
138
|
-
return forecast_dates
|
139
|
-
|
140
|
-
|
141
|
-
def update_params_with_ha_config(
|
142
|
-
params: str,
|
143
|
-
ha_config: dict,
|
144
|
-
) -> dict:
|
145
|
-
"""
|
146
|
-
Update the params with the Home Assistant configuration.
|
147
|
-
|
148
|
-
Parameters
|
149
|
-
----------
|
150
|
-
params : str
|
151
|
-
The serialized params.
|
152
|
-
ha_config : dict
|
153
|
-
The Home Assistant configuration.
|
154
|
-
|
155
|
-
Returns
|
156
|
-
-------
|
157
|
-
dict
|
158
|
-
The updated params.
|
159
|
-
"""
|
160
|
-
# Load serialized params
|
161
|
-
params = json.loads(params)
|
162
|
-
# Update params
|
163
|
-
currency_to_symbol = {
|
164
|
-
"EUR": "€",
|
165
|
-
"USD": "$",
|
166
|
-
"GBP": "£",
|
167
|
-
"YEN": "¥",
|
168
|
-
"JPY": "¥",
|
169
|
-
"AUD": "A$",
|
170
|
-
"CAD": "C$",
|
171
|
-
"CHF": "CHF", # Swiss Franc has no special symbol
|
172
|
-
"CNY": "¥",
|
173
|
-
"INR": "₹",
|
174
|
-
"CZK": "Kč",
|
175
|
-
"BGN": "лв",
|
176
|
-
"DKK": "kr",
|
177
|
-
"HUF": "Ft",
|
178
|
-
"PLN": "zł",
|
179
|
-
"RON": "Leu",
|
180
|
-
"SEK": "kr",
|
181
|
-
"TRY": "Lira",
|
182
|
-
"VEF": "Bolivar",
|
183
|
-
"VND": "Dong",
|
184
|
-
"THB": "Baht",
|
185
|
-
"SGD": "S$",
|
186
|
-
"IDR": "Roepia",
|
187
|
-
"ZAR": "Rand",
|
188
|
-
# Add more as needed
|
189
|
-
}
|
190
|
-
if "currency" in ha_config.keys():
|
191
|
-
ha_config["currency"] = currency_to_symbol.get(ha_config["currency"], "Unknown")
|
192
|
-
else:
|
193
|
-
ha_config["currency"] = "€"
|
194
|
-
if "unit_system" not in ha_config.keys():
|
195
|
-
ha_config["unit_system"] = {"temperature": "°C"}
|
196
|
-
|
197
|
-
number_of_deferrable_loads = params["optim_conf"]["number_of_deferrable_loads"]
|
198
|
-
if "num_def_loads" in params["passed_data"].keys():
|
199
|
-
number_of_deferrable_loads = params["passed_data"]["num_def_loads"]
|
200
|
-
if "number_of_deferrable_loads" in params["passed_data"].keys():
|
201
|
-
number_of_deferrable_loads = params["passed_data"]["number_of_deferrable_loads"]
|
202
|
-
|
203
|
-
for k in range(number_of_deferrable_loads):
|
204
|
-
params["passed_data"]["custom_predicted_temperature_id"][k].update(
|
205
|
-
{"unit_of_measurement": ha_config["unit_system"]["temperature"]}
|
206
|
-
)
|
207
|
-
updated_passed_dict = {
|
208
|
-
"custom_cost_fun_id": {
|
209
|
-
"unit_of_measurement": ha_config["currency"],
|
210
|
-
},
|
211
|
-
"custom_unit_load_cost_id": {
|
212
|
-
"unit_of_measurement": f"{ha_config['currency']}/kWh",
|
213
|
-
},
|
214
|
-
"custom_unit_prod_price_id": {
|
215
|
-
"unit_of_measurement": f"{ha_config['currency']}/kWh",
|
216
|
-
},
|
217
|
-
}
|
218
|
-
for key, value in updated_passed_dict.items():
|
219
|
-
params["passed_data"][key]["unit_of_measurement"] = value["unit_of_measurement"]
|
220
|
-
# Serialize the final params
|
221
|
-
params = json.dumps(params, default=str)
|
222
|
-
return params
|
223
|
-
|
224
|
-
|
225
|
-
def treat_runtimeparams(
|
226
|
-
runtimeparams: str,
|
227
|
-
params: str,
|
228
|
-
retrieve_hass_conf: dict,
|
229
|
-
optim_conf: dict,
|
230
|
-
plant_conf: dict,
|
231
|
-
set_type: str,
|
232
|
-
logger: logging.Logger,
|
233
|
-
emhass_conf: dict,
|
234
|
-
) -> Tuple[str, dict]:
|
235
|
-
"""
|
236
|
-
Treat the passed optimization runtime parameters.
|
237
|
-
|
238
|
-
:param runtimeparams: Json string containing the runtime parameters dict.
|
239
|
-
:type runtimeparams: str
|
240
|
-
:param params: Built configuration parameters
|
241
|
-
:type params: str
|
242
|
-
:param retrieve_hass_conf: Config dictionary for data retrieving parameters.
|
243
|
-
:type retrieve_hass_conf: dict
|
244
|
-
:param optim_conf: Config dictionary for optimization parameters.
|
245
|
-
:type optim_conf: dict
|
246
|
-
:param plant_conf: Config dictionary for technical plant parameters.
|
247
|
-
:type plant_conf: dict
|
248
|
-
:param set_type: The type of action to be performed.
|
249
|
-
:type set_type: str
|
250
|
-
:param logger: The logger object.
|
251
|
-
:type logger: logging.Logger
|
252
|
-
:param emhass_conf: Dictionary containing the needed emhass paths
|
253
|
-
:type emhass_conf: dict
|
254
|
-
:return: Returning the params and optimization parameter container.
|
255
|
-
:rtype: Tuple[str, dict]
|
256
|
-
|
257
|
-
"""
|
258
|
-
# Check if passed params is a dict
|
259
|
-
if (params != None) and (params != "null"):
|
260
|
-
if type(params) is str:
|
261
|
-
params = json.loads(params)
|
262
|
-
else:
|
263
|
-
params = {}
|
264
|
-
|
265
|
-
# Merge current config categories to params
|
266
|
-
params["retrieve_hass_conf"].update(retrieve_hass_conf)
|
267
|
-
params["optim_conf"].update(optim_conf)
|
268
|
-
params["plant_conf"].update(plant_conf)
|
269
|
-
|
270
|
-
# Check defaults on HA retrieved config
|
271
|
-
default_currency_unit = "€"
|
272
|
-
default_temperature_unit = "°C"
|
273
|
-
|
274
|
-
# Some default data needed
|
275
|
-
custom_deferrable_forecast_id = []
|
276
|
-
custom_predicted_temperature_id = []
|
277
|
-
for k in range(params["optim_conf"]["number_of_deferrable_loads"]):
|
278
|
-
custom_deferrable_forecast_id.append(
|
279
|
-
{
|
280
|
-
"entity_id": "sensor.p_deferrable{}".format(k),
|
281
|
-
"unit_of_measurement": "W",
|
282
|
-
"friendly_name": "Deferrable Load {}".format(k),
|
283
|
-
}
|
284
|
-
)
|
285
|
-
custom_predicted_temperature_id.append(
|
286
|
-
{
|
287
|
-
"entity_id": "sensor.temp_predicted{}".format(k),
|
288
|
-
"unit_of_measurement": default_temperature_unit,
|
289
|
-
"friendly_name": "Predicted temperature {}".format(k),
|
290
|
-
}
|
291
|
-
)
|
292
|
-
default_passed_dict = {
|
293
|
-
"custom_pv_forecast_id": {
|
294
|
-
"entity_id": "sensor.p_pv_forecast",
|
295
|
-
"unit_of_measurement": "W",
|
296
|
-
"friendly_name": "PV Power Forecast",
|
297
|
-
},
|
298
|
-
"custom_load_forecast_id": {
|
299
|
-
"entity_id": "sensor.p_load_forecast",
|
300
|
-
"unit_of_measurement": "W",
|
301
|
-
"friendly_name": "Load Power Forecast",
|
302
|
-
},
|
303
|
-
"custom_pv_curtailment_id": {
|
304
|
-
"entity_id": "sensor.p_pv_curtailment",
|
305
|
-
"unit_of_measurement": "W",
|
306
|
-
"friendly_name": "PV Power Curtailment",
|
307
|
-
},
|
308
|
-
"custom_hybrid_inverter_id": {
|
309
|
-
"entity_id": "sensor.p_hybrid_inverter",
|
310
|
-
"unit_of_measurement": "W",
|
311
|
-
"friendly_name": "PV Hybrid Inverter",
|
312
|
-
},
|
313
|
-
"custom_batt_forecast_id": {
|
314
|
-
"entity_id": "sensor.p_batt_forecast",
|
315
|
-
"unit_of_measurement": "W",
|
316
|
-
"friendly_name": "Battery Power Forecast",
|
317
|
-
},
|
318
|
-
"custom_batt_soc_forecast_id": {
|
319
|
-
"entity_id": "sensor.soc_batt_forecast",
|
320
|
-
"unit_of_measurement": "%",
|
321
|
-
"friendly_name": "Battery SOC Forecast",
|
322
|
-
},
|
323
|
-
"custom_grid_forecast_id": {
|
324
|
-
"entity_id": "sensor.p_grid_forecast",
|
325
|
-
"unit_of_measurement": "W",
|
326
|
-
"friendly_name": "Grid Power Forecast",
|
327
|
-
},
|
328
|
-
"custom_cost_fun_id": {
|
329
|
-
"entity_id": "sensor.total_cost_fun_value",
|
330
|
-
"unit_of_measurement": default_currency_unit,
|
331
|
-
"friendly_name": "Total cost function value",
|
332
|
-
},
|
333
|
-
"custom_optim_status_id": {
|
334
|
-
"entity_id": "sensor.optim_status",
|
335
|
-
"unit_of_measurement": "",
|
336
|
-
"friendly_name": "EMHASS optimization status",
|
337
|
-
},
|
338
|
-
"custom_unit_load_cost_id": {
|
339
|
-
"entity_id": "sensor.unit_load_cost",
|
340
|
-
"unit_of_measurement": f"{default_currency_unit}/kWh",
|
341
|
-
"friendly_name": "Unit Load Cost",
|
342
|
-
},
|
343
|
-
"custom_unit_prod_price_id": {
|
344
|
-
"entity_id": "sensor.unit_prod_price",
|
345
|
-
"unit_of_measurement": f"{default_currency_unit}/kWh",
|
346
|
-
"friendly_name": "Unit Prod Price",
|
347
|
-
},
|
348
|
-
"custom_deferrable_forecast_id": custom_deferrable_forecast_id,
|
349
|
-
"custom_predicted_temperature_id": custom_predicted_temperature_id,
|
350
|
-
"publish_prefix": "",
|
351
|
-
}
|
352
|
-
if "passed_data" in params.keys():
|
353
|
-
for key, value in default_passed_dict.items():
|
354
|
-
params["passed_data"][key] = value
|
355
|
-
else:
|
356
|
-
params["passed_data"] = default_passed_dict
|
357
|
-
|
358
|
-
# If any runtime parameters where passed in action call
|
359
|
-
if runtimeparams is not None:
|
360
|
-
if type(runtimeparams) is str:
|
361
|
-
runtimeparams = json.loads(runtimeparams)
|
362
|
-
|
363
|
-
# Loop though parameters stored in association file, Check to see if any stored in runtime
|
364
|
-
# If true, set runtime parameter to params
|
365
|
-
if emhass_conf["associations_path"].exists():
|
366
|
-
with emhass_conf["associations_path"].open("r") as data:
|
367
|
-
associations = list(csv.reader(data, delimiter=","))
|
368
|
-
# Association file key reference
|
369
|
-
# association[0] = config categories
|
370
|
-
# association[1] = legacy parameter name
|
371
|
-
# association[2] = parameter (config.json/config_defaults.json)
|
372
|
-
# association[3] = parameter list name if exists (not used, from legacy options.json)
|
373
|
-
for association in associations:
|
374
|
-
# Check parameter name exists in runtime
|
375
|
-
if runtimeparams.get(association[2], None) is not None:
|
376
|
-
params[association[0]][association[2]] = runtimeparams[
|
377
|
-
association[2]
|
378
|
-
]
|
379
|
-
# Check Legacy parameter name runtime
|
380
|
-
elif runtimeparams.get(association[1], None) is not None:
|
381
|
-
params[association[0]][association[2]] = runtimeparams[
|
382
|
-
association[1]
|
383
|
-
]
|
384
|
-
else:
|
385
|
-
logger.warning(
|
386
|
-
"Cant find associations file (associations.csv) in: "
|
387
|
-
+ str(emhass_conf["associations_path"])
|
388
|
-
)
|
389
|
-
|
390
|
-
# Generate forecast_dates
|
391
|
-
if (
|
392
|
-
"optimization_time_step" in runtimeparams.keys()
|
393
|
-
or "freq" in runtimeparams.keys()
|
394
|
-
):
|
395
|
-
optimization_time_step = int(
|
396
|
-
runtimeparams.get("optimization_time_step", runtimeparams.get("freq"))
|
397
|
-
)
|
398
|
-
params["retrieve_hass_conf"]["optimization_time_step"] = pd.to_timedelta(
|
399
|
-
optimization_time_step, "minutes"
|
400
|
-
)
|
401
|
-
else:
|
402
|
-
optimization_time_step = int(
|
403
|
-
params["retrieve_hass_conf"]["optimization_time_step"].seconds / 60.0
|
404
|
-
)
|
405
|
-
if (
|
406
|
-
runtimeparams.get("delta_forecast_daily", None) is not None
|
407
|
-
or runtimeparams.get("delta_forecast", None) is not None
|
408
|
-
):
|
409
|
-
delta_forecast = int(
|
410
|
-
runtimeparams.get(
|
411
|
-
"delta_forecast_daily", runtimeparams["delta_forecast"]
|
412
|
-
)
|
413
|
-
)
|
414
|
-
params["optim_conf"]["delta_forecast_daily"] = pd.Timedelta(
|
415
|
-
days=optim_conf["delta_forecast_daily"]
|
416
|
-
)
|
417
|
-
else:
|
418
|
-
delta_forecast = int(params["optim_conf"]["delta_forecast_daily"].days)
|
419
|
-
if runtimeparams.get("time_zone", None) is not None:
|
420
|
-
time_zone = pytz.timezone(params["retrieve_hass_conf"]["time_zone"])
|
421
|
-
params["retrieve_hass_conf"]["time_zone"] = time_zone
|
422
|
-
else:
|
423
|
-
time_zone = params["retrieve_hass_conf"]["time_zone"]
|
424
|
-
|
425
|
-
forecast_dates = get_forecast_dates(
|
426
|
-
optimization_time_step, delta_forecast, time_zone
|
427
|
-
)
|
428
|
-
|
429
|
-
# Add runtime exclusive (not in config) parameters to params
|
430
|
-
# regressor-model-fit
|
431
|
-
if set_type == "regressor-model-fit":
|
432
|
-
if "csv_file" in runtimeparams:
|
433
|
-
csv_file = runtimeparams["csv_file"]
|
434
|
-
params["passed_data"]["csv_file"] = csv_file
|
435
|
-
if "features" in runtimeparams:
|
436
|
-
features = runtimeparams["features"]
|
437
|
-
params["passed_data"]["features"] = features
|
438
|
-
if "target" in runtimeparams:
|
439
|
-
target = runtimeparams["target"]
|
440
|
-
params["passed_data"]["target"] = target
|
441
|
-
if "timestamp" not in runtimeparams:
|
442
|
-
params["passed_data"]["timestamp"] = None
|
443
|
-
else:
|
444
|
-
timestamp = runtimeparams["timestamp"]
|
445
|
-
params["passed_data"]["timestamp"] = timestamp
|
446
|
-
if "date_features" not in runtimeparams:
|
447
|
-
params["passed_data"]["date_features"] = []
|
448
|
-
else:
|
449
|
-
date_features = runtimeparams["date_features"]
|
450
|
-
params["passed_data"]["date_features"] = date_features
|
451
|
-
|
452
|
-
# regressor-model-predict
|
453
|
-
if set_type == "regressor-model-predict":
|
454
|
-
if "new_values" in runtimeparams:
|
455
|
-
new_values = runtimeparams["new_values"]
|
456
|
-
params["passed_data"]["new_values"] = new_values
|
457
|
-
if "csv_file" in runtimeparams:
|
458
|
-
csv_file = runtimeparams["csv_file"]
|
459
|
-
params["passed_data"]["csv_file"] = csv_file
|
460
|
-
if "features" in runtimeparams:
|
461
|
-
features = runtimeparams["features"]
|
462
|
-
params["passed_data"]["features"] = features
|
463
|
-
if "target" in runtimeparams:
|
464
|
-
target = runtimeparams["target"]
|
465
|
-
params["passed_data"]["target"] = target
|
466
|
-
|
467
|
-
# MPC control case
|
468
|
-
if set_type == "naive-mpc-optim":
|
469
|
-
if "prediction_horizon" not in runtimeparams.keys():
|
470
|
-
prediction_horizon = 10 # 10 time steps by default
|
471
|
-
else:
|
472
|
-
prediction_horizon = runtimeparams["prediction_horizon"]
|
473
|
-
params["passed_data"]["prediction_horizon"] = prediction_horizon
|
474
|
-
if "soc_init" not in runtimeparams.keys():
|
475
|
-
soc_init = params["plant_conf"]["battery_target_state_of_charge"]
|
476
|
-
else:
|
477
|
-
soc_init = runtimeparams["soc_init"]
|
478
|
-
params["passed_data"]["soc_init"] = soc_init
|
479
|
-
if "soc_final" not in runtimeparams.keys():
|
480
|
-
soc_final = params["plant_conf"]["battery_target_state_of_charge"]
|
481
|
-
else:
|
482
|
-
soc_final = runtimeparams["soc_final"]
|
483
|
-
params["passed_data"]["soc_final"] = soc_final
|
484
|
-
if "operating_timesteps_of_each_deferrable_load" in runtimeparams.keys():
|
485
|
-
params["passed_data"]["operating_timesteps_of_each_deferrable_load"] = (
|
486
|
-
runtimeparams["operating_timesteps_of_each_deferrable_load"]
|
487
|
-
)
|
488
|
-
params["optim_conf"]["operating_timesteps_of_each_deferrable_load"] = (
|
489
|
-
runtimeparams["operating_timesteps_of_each_deferrable_load"]
|
490
|
-
)
|
491
|
-
if "operating_hours_of_each_deferrable_load" in params["optim_conf"].keys():
|
492
|
-
params["passed_data"]["operating_hours_of_each_deferrable_load"] = (
|
493
|
-
params["optim_conf"]["operating_hours_of_each_deferrable_load"]
|
494
|
-
)
|
495
|
-
params["passed_data"]["start_timesteps_of_each_deferrable_load"] = params[
|
496
|
-
"optim_conf"
|
497
|
-
].get("start_timesteps_of_each_deferrable_load", None)
|
498
|
-
params["passed_data"]["end_timesteps_of_each_deferrable_load"] = params[
|
499
|
-
"optim_conf"
|
500
|
-
].get("end_timesteps_of_each_deferrable_load", None)
|
501
|
-
|
502
|
-
forecast_dates = copy.deepcopy(forecast_dates)[0:prediction_horizon]
|
503
|
-
|
504
|
-
# Load the default config
|
505
|
-
if "def_load_config" in runtimeparams:
|
506
|
-
params["optim_conf"]["def_load_config"] = runtimeparams[
|
507
|
-
"def_load_config"
|
508
|
-
]
|
509
|
-
if "def_load_config" in params["optim_conf"]:
|
510
|
-
for k in range(len(params["optim_conf"]["def_load_config"])):
|
511
|
-
if "thermal_config" in params["optim_conf"]["def_load_config"][k]:
|
512
|
-
if (
|
513
|
-
"heater_desired_temperatures" in runtimeparams
|
514
|
-
and len(runtimeparams["heater_desired_temperatures"]) > k
|
515
|
-
):
|
516
|
-
params["optim_conf"]["def_load_config"][k][
|
517
|
-
"thermal_config"
|
518
|
-
]["desired_temperatures"] = runtimeparams[
|
519
|
-
"heater_desired_temperatures"
|
520
|
-
][k]
|
521
|
-
if (
|
522
|
-
"heater_start_temperatures" in runtimeparams
|
523
|
-
and len(runtimeparams["heater_start_temperatures"]) > k
|
524
|
-
):
|
525
|
-
params["optim_conf"]["def_load_config"][k][
|
526
|
-
"thermal_config"
|
527
|
-
]["start_temperature"] = runtimeparams[
|
528
|
-
"heater_start_temperatures"
|
529
|
-
][k]
|
530
|
-
else:
|
531
|
-
params["passed_data"]["prediction_horizon"] = None
|
532
|
-
params["passed_data"]["soc_init"] = None
|
533
|
-
params["passed_data"]["soc_final"] = None
|
534
|
-
|
535
|
-
# Treat passed forecast data lists
|
536
|
-
list_forecast_key = [
|
537
|
-
"pv_power_forecast",
|
538
|
-
"load_power_forecast",
|
539
|
-
"load_cost_forecast",
|
540
|
-
"prod_price_forecast",
|
541
|
-
"outdoor_temperature_forecast",
|
542
|
-
]
|
543
|
-
forecast_methods = [
|
544
|
-
"weather_forecast_method",
|
545
|
-
"load_forecast_method",
|
546
|
-
"load_cost_forecast_method",
|
547
|
-
"production_price_forecast_method",
|
548
|
-
"outdoor_temperature_forecast_method",
|
549
|
-
]
|
550
|
-
|
551
|
-
# Loop forecasts, check if value is a list and greater than or equal to forecast_dates
|
552
|
-
for method, forecast_key in enumerate(list_forecast_key):
|
553
|
-
if forecast_key in runtimeparams.keys():
|
554
|
-
if isinstance(runtimeparams[forecast_key], list) and len(
|
555
|
-
runtimeparams[forecast_key]
|
556
|
-
) >= len(forecast_dates):
|
557
|
-
params["passed_data"][forecast_key] = runtimeparams[forecast_key]
|
558
|
-
params["optim_conf"][forecast_methods[method]] = "list"
|
559
|
-
else:
|
560
|
-
logger.error(
|
561
|
-
f"ERROR: The passed data is either not a list or the length is not correct, length should be {str(len(forecast_dates))}"
|
562
|
-
)
|
563
|
-
logger.error(
|
564
|
-
f"Passed type is {str(type(runtimeparams[forecast_key]))} and length is {str(len(runtimeparams[forecast_key]))}"
|
565
|
-
)
|
566
|
-
# Check if string contains list, if so extract
|
567
|
-
if isinstance(runtimeparams[forecast_key], str):
|
568
|
-
if isinstance(ast.literal_eval(runtimeparams[forecast_key]), list):
|
569
|
-
runtimeparams[forecast_key] = ast.literal_eval(
|
570
|
-
runtimeparams[forecast_key]
|
571
|
-
)
|
572
|
-
list_non_digits = [
|
573
|
-
x
|
574
|
-
for x in runtimeparams[forecast_key]
|
575
|
-
if not (isinstance(x, int) or isinstance(x, float))
|
576
|
-
]
|
577
|
-
if len(list_non_digits) > 0:
|
578
|
-
logger.warning(
|
579
|
-
f"There are non numeric values on the passed data for {forecast_key}, check for missing values (nans, null, etc)"
|
580
|
-
)
|
581
|
-
for x in list_non_digits:
|
582
|
-
logger.warning(
|
583
|
-
f"This value in {forecast_key} was detected as non digits: {str(x)}"
|
584
|
-
)
|
585
|
-
else:
|
586
|
-
params["passed_data"][forecast_key] = None
|
587
|
-
|
588
|
-
# Treat passed data for forecast model fit/predict/tune at runtime
|
589
|
-
if (
|
590
|
-
params["passed_data"].get("historic_days_to_retrieve", None) is not None
|
591
|
-
and params["passed_data"]["historic_days_to_retrieve"] < 9
|
592
|
-
):
|
593
|
-
logger.warning(
|
594
|
-
"warning `days_to_retrieve` is set to a value less than 9, this could cause an error with the fit"
|
595
|
-
)
|
596
|
-
logger.warning(
|
597
|
-
"setting`passed_data:days_to_retrieve` to 9 for fit/predict/tune"
|
598
|
-
)
|
599
|
-
params["passed_data"]["historic_days_to_retrieve"] = 9
|
600
|
-
else:
|
601
|
-
if params["retrieve_hass_conf"].get("historic_days_to_retrieve", 0) < 9:
|
602
|
-
logger.debug(
|
603
|
-
"setting`passed_data:days_to_retrieve` to 9 for fit/predict/tune"
|
604
|
-
)
|
605
|
-
params["passed_data"]["historic_days_to_retrieve"] = 9
|
606
|
-
else:
|
607
|
-
params["passed_data"]["historic_days_to_retrieve"] = params[
|
608
|
-
"retrieve_hass_conf"
|
609
|
-
]["historic_days_to_retrieve"]
|
610
|
-
if "model_type" not in runtimeparams.keys():
|
611
|
-
model_type = "load_forecast"
|
612
|
-
else:
|
613
|
-
model_type = runtimeparams["model_type"]
|
614
|
-
params["passed_data"]["model_type"] = model_type
|
615
|
-
if "var_model" not in runtimeparams.keys():
|
616
|
-
var_model = "sensor.power_load_no_var_loads"
|
617
|
-
else:
|
618
|
-
var_model = runtimeparams["var_model"]
|
619
|
-
params["passed_data"]["var_model"] = var_model
|
620
|
-
if "sklearn_model" not in runtimeparams.keys():
|
621
|
-
sklearn_model = "KNeighborsRegressor"
|
622
|
-
else:
|
623
|
-
sklearn_model = runtimeparams["sklearn_model"]
|
624
|
-
params["passed_data"]["sklearn_model"] = sklearn_model
|
625
|
-
if "regression_model" not in runtimeparams.keys():
|
626
|
-
regression_model = "AdaBoostRegression"
|
627
|
-
else:
|
628
|
-
regression_model = runtimeparams["regression_model"]
|
629
|
-
params["passed_data"]["regression_model"] = regression_model
|
630
|
-
if "num_lags" not in runtimeparams.keys():
|
631
|
-
num_lags = 48
|
632
|
-
else:
|
633
|
-
num_lags = runtimeparams["num_lags"]
|
634
|
-
params["passed_data"]["num_lags"] = num_lags
|
635
|
-
if "split_date_delta" not in runtimeparams.keys():
|
636
|
-
split_date_delta = "48h"
|
637
|
-
else:
|
638
|
-
split_date_delta = runtimeparams["split_date_delta"]
|
639
|
-
params["passed_data"]["split_date_delta"] = split_date_delta
|
640
|
-
if "perform_backtest" not in runtimeparams.keys():
|
641
|
-
perform_backtest = False
|
642
|
-
else:
|
643
|
-
perform_backtest = ast.literal_eval(
|
644
|
-
str(runtimeparams["perform_backtest"]).capitalize()
|
645
|
-
)
|
646
|
-
params["passed_data"]["perform_backtest"] = perform_backtest
|
647
|
-
if "model_predict_publish" not in runtimeparams.keys():
|
648
|
-
model_predict_publish = False
|
649
|
-
else:
|
650
|
-
model_predict_publish = ast.literal_eval(
|
651
|
-
str(runtimeparams["model_predict_publish"]).capitalize()
|
652
|
-
)
|
653
|
-
params["passed_data"]["model_predict_publish"] = model_predict_publish
|
654
|
-
if "model_predict_entity_id" not in runtimeparams.keys():
|
655
|
-
model_predict_entity_id = "sensor.p_load_forecast_custom_model"
|
656
|
-
else:
|
657
|
-
model_predict_entity_id = runtimeparams["model_predict_entity_id"]
|
658
|
-
params["passed_data"]["model_predict_entity_id"] = model_predict_entity_id
|
659
|
-
if "model_predict_unit_of_measurement" not in runtimeparams.keys():
|
660
|
-
model_predict_unit_of_measurement = "W"
|
661
|
-
else:
|
662
|
-
model_predict_unit_of_measurement = runtimeparams[
|
663
|
-
"model_predict_unit_of_measurement"
|
664
|
-
]
|
665
|
-
params["passed_data"]["model_predict_unit_of_measurement"] = (
|
666
|
-
model_predict_unit_of_measurement
|
667
|
-
)
|
668
|
-
if "model_predict_friendly_name" not in runtimeparams.keys():
|
669
|
-
model_predict_friendly_name = "Load Power Forecast custom ML model"
|
670
|
-
else:
|
671
|
-
model_predict_friendly_name = runtimeparams["model_predict_friendly_name"]
|
672
|
-
params["passed_data"]["model_predict_friendly_name"] = (
|
673
|
-
model_predict_friendly_name
|
674
|
-
)
|
675
|
-
if "mlr_predict_entity_id" not in runtimeparams.keys():
|
676
|
-
mlr_predict_entity_id = "sensor.mlr_predict"
|
677
|
-
else:
|
678
|
-
mlr_predict_entity_id = runtimeparams["mlr_predict_entity_id"]
|
679
|
-
params["passed_data"]["mlr_predict_entity_id"] = mlr_predict_entity_id
|
680
|
-
if "mlr_predict_unit_of_measurement" not in runtimeparams.keys():
|
681
|
-
mlr_predict_unit_of_measurement = None
|
682
|
-
else:
|
683
|
-
mlr_predict_unit_of_measurement = runtimeparams[
|
684
|
-
"mlr_predict_unit_of_measurement"
|
685
|
-
]
|
686
|
-
params["passed_data"]["mlr_predict_unit_of_measurement"] = (
|
687
|
-
mlr_predict_unit_of_measurement
|
688
|
-
)
|
689
|
-
if "mlr_predict_friendly_name" not in runtimeparams.keys():
|
690
|
-
mlr_predict_friendly_name = "mlr predictor"
|
691
|
-
else:
|
692
|
-
mlr_predict_friendly_name = runtimeparams["mlr_predict_friendly_name"]
|
693
|
-
params["passed_data"]["mlr_predict_friendly_name"] = mlr_predict_friendly_name
|
694
|
-
|
695
|
-
# Treat passed data for other parameters
|
696
|
-
if "alpha" not in runtimeparams.keys():
|
697
|
-
alpha = 0.5
|
698
|
-
else:
|
699
|
-
alpha = runtimeparams["alpha"]
|
700
|
-
params["passed_data"]["alpha"] = alpha
|
701
|
-
if "beta" not in runtimeparams.keys():
|
702
|
-
beta = 0.5
|
703
|
-
else:
|
704
|
-
beta = runtimeparams["beta"]
|
705
|
-
params["passed_data"]["beta"] = beta
|
706
|
-
|
707
|
-
# Param to save forecast cache (i.e. Solcast)
|
708
|
-
if "weather_forecast_cache" not in runtimeparams.keys():
|
709
|
-
weather_forecast_cache = False
|
710
|
-
else:
|
711
|
-
weather_forecast_cache = runtimeparams["weather_forecast_cache"]
|
712
|
-
params["passed_data"]["weather_forecast_cache"] = weather_forecast_cache
|
713
|
-
|
714
|
-
# Param to make sure optimization only uses cached data. (else produce error)
|
715
|
-
if "weather_forecast_cache_only" not in runtimeparams.keys():
|
716
|
-
weather_forecast_cache_only = False
|
717
|
-
else:
|
718
|
-
weather_forecast_cache_only = runtimeparams["weather_forecast_cache_only"]
|
719
|
-
params["passed_data"]["weather_forecast_cache_only"] = (
|
720
|
-
weather_forecast_cache_only
|
721
|
-
)
|
722
|
-
|
723
|
-
# A condition to manually save entity data under data_path/entities after optimization
|
724
|
-
if "entity_save" not in runtimeparams.keys():
|
725
|
-
entity_save = ""
|
726
|
-
else:
|
727
|
-
entity_save = runtimeparams["entity_save"]
|
728
|
-
params["passed_data"]["entity_save"] = entity_save
|
729
|
-
|
730
|
-
# A condition to put a prefix on all published data, or check for saved data under prefix name
|
731
|
-
if "publish_prefix" not in runtimeparams.keys():
|
732
|
-
publish_prefix = ""
|
733
|
-
else:
|
734
|
-
publish_prefix = runtimeparams["publish_prefix"]
|
735
|
-
params["passed_data"]["publish_prefix"] = publish_prefix
|
736
|
-
|
737
|
-
# Treat optimization (optim_conf) configuration parameters passed at runtime
|
738
|
-
if "def_current_state" in runtimeparams.keys():
|
739
|
-
params["optim_conf"]["def_current_state"] = [
|
740
|
-
bool(s) for s in runtimeparams["def_current_state"]
|
741
|
-
]
|
742
|
-
|
743
|
-
# Treat retrieve data from Home Assistant (retrieve_hass_conf) configuration parameters passed at runtime
|
744
|
-
# Secrets passed at runtime
|
745
|
-
if "solcast_api_key" in runtimeparams.keys():
|
746
|
-
params["retrieve_hass_conf"]["solcast_api_key"] = runtimeparams[
|
747
|
-
"solcast_api_key"
|
748
|
-
]
|
749
|
-
if "solcast_rooftop_id" in runtimeparams.keys():
|
750
|
-
params["retrieve_hass_conf"]["solcast_rooftop_id"] = runtimeparams[
|
751
|
-
"solcast_rooftop_id"
|
752
|
-
]
|
753
|
-
if "solar_forecast_kwp" in runtimeparams.keys():
|
754
|
-
params["retrieve_hass_conf"]["solar_forecast_kwp"] = runtimeparams[
|
755
|
-
"solar_forecast_kwp"
|
756
|
-
]
|
757
|
-
# Treat custom entities id's and friendly names for variables
|
758
|
-
if "custom_pv_forecast_id" in runtimeparams.keys():
|
759
|
-
params["passed_data"]["custom_pv_forecast_id"] = runtimeparams[
|
760
|
-
"custom_pv_forecast_id"
|
761
|
-
]
|
762
|
-
if "custom_load_forecast_id" in runtimeparams.keys():
|
763
|
-
params["passed_data"]["custom_load_forecast_id"] = runtimeparams[
|
764
|
-
"custom_load_forecast_id"
|
765
|
-
]
|
766
|
-
if "custom_pv_curtailment_id" in runtimeparams.keys():
|
767
|
-
params["passed_data"]["custom_pv_curtailment_id"] = runtimeparams[
|
768
|
-
"custom_pv_curtailment_id"
|
769
|
-
]
|
770
|
-
if "custom_hybrid_inverter_id" in runtimeparams.keys():
|
771
|
-
params["passed_data"]["custom_hybrid_inverter_id"] = runtimeparams[
|
772
|
-
"custom_hybrid_inverter_id"
|
773
|
-
]
|
774
|
-
if "custom_batt_forecast_id" in runtimeparams.keys():
|
775
|
-
params["passed_data"]["custom_batt_forecast_id"] = runtimeparams[
|
776
|
-
"custom_batt_forecast_id"
|
777
|
-
]
|
778
|
-
if "custom_batt_soc_forecast_id" in runtimeparams.keys():
|
779
|
-
params["passed_data"]["custom_batt_soc_forecast_id"] = runtimeparams[
|
780
|
-
"custom_batt_soc_forecast_id"
|
781
|
-
]
|
782
|
-
if "custom_grid_forecast_id" in runtimeparams.keys():
|
783
|
-
params["passed_data"]["custom_grid_forecast_id"] = runtimeparams[
|
784
|
-
"custom_grid_forecast_id"
|
785
|
-
]
|
786
|
-
if "custom_cost_fun_id" in runtimeparams.keys():
|
787
|
-
params["passed_data"]["custom_cost_fun_id"] = runtimeparams[
|
788
|
-
"custom_cost_fun_id"
|
789
|
-
]
|
790
|
-
if "custom_optim_status_id" in runtimeparams.keys():
|
791
|
-
params["passed_data"]["custom_optim_status_id"] = runtimeparams[
|
792
|
-
"custom_optim_status_id"
|
793
|
-
]
|
794
|
-
if "custom_unit_load_cost_id" in runtimeparams.keys():
|
795
|
-
params["passed_data"]["custom_unit_load_cost_id"] = runtimeparams[
|
796
|
-
"custom_unit_load_cost_id"
|
797
|
-
]
|
798
|
-
if "custom_unit_prod_price_id" in runtimeparams.keys():
|
799
|
-
params["passed_data"]["custom_unit_prod_price_id"] = runtimeparams[
|
800
|
-
"custom_unit_prod_price_id"
|
801
|
-
]
|
802
|
-
if "custom_deferrable_forecast_id" in runtimeparams.keys():
|
803
|
-
params["passed_data"]["custom_deferrable_forecast_id"] = runtimeparams[
|
804
|
-
"custom_deferrable_forecast_id"
|
805
|
-
]
|
806
|
-
if "custom_predicted_temperature_id" in runtimeparams.keys():
|
807
|
-
params["passed_data"]["custom_predicted_temperature_id"] = runtimeparams[
|
808
|
-
"custom_predicted_temperature_id"
|
809
|
-
]
|
810
|
-
|
811
|
-
# split config categories from params
|
812
|
-
retrieve_hass_conf = params["retrieve_hass_conf"]
|
813
|
-
optim_conf = params["optim_conf"]
|
814
|
-
plant_conf = params["plant_conf"]
|
815
|
-
|
816
|
-
# Serialize the final params
|
817
|
-
params = json.dumps(params, default=str)
|
818
|
-
return params, retrieve_hass_conf, optim_conf, plant_conf
|
819
|
-
|
820
|
-
|
821
|
-
def get_yaml_parse(params: str, logger: logging.Logger) -> Tuple[dict, dict, dict]:
|
822
|
-
"""
|
823
|
-
Perform parsing of the params into the configuration catagories
|
824
|
-
|
825
|
-
:param params: Built configuration parameters
|
826
|
-
:type params: str
|
827
|
-
:param logger: The logger object
|
828
|
-
:type logger: logging.Logger
|
829
|
-
:return: A tuple with the dictionaries containing the parsed data
|
830
|
-
:rtype: tuple(dict)
|
831
|
-
|
832
|
-
"""
|
833
|
-
if params:
|
834
|
-
if type(params) is str:
|
835
|
-
input_conf = json.loads(params)
|
836
|
-
else:
|
837
|
-
input_conf = params
|
838
|
-
else:
|
839
|
-
input_conf = {}
|
840
|
-
logger.error("No params have been detected for get_yaml_parse")
|
841
|
-
return False, False, False
|
842
|
-
|
843
|
-
optim_conf = input_conf.get("optim_conf", {})
|
844
|
-
retrieve_hass_conf = input_conf.get("retrieve_hass_conf", {})
|
845
|
-
plant_conf = input_conf.get("plant_conf", {})
|
846
|
-
|
847
|
-
# Format time parameters
|
848
|
-
if optim_conf.get("delta_forecast_daily", None) is not None:
|
849
|
-
optim_conf["delta_forecast_daily"] = pd.Timedelta(
|
850
|
-
days=optim_conf["delta_forecast_daily"]
|
851
|
-
)
|
852
|
-
if retrieve_hass_conf.get("optimization_time_step", None) is not None:
|
853
|
-
retrieve_hass_conf["optimization_time_step"] = pd.to_timedelta(
|
854
|
-
retrieve_hass_conf["optimization_time_step"], "minutes"
|
855
|
-
)
|
856
|
-
if retrieve_hass_conf.get("time_zone", None) is not None:
|
857
|
-
retrieve_hass_conf["time_zone"] = pytz.timezone(retrieve_hass_conf["time_zone"])
|
858
|
-
|
859
|
-
return retrieve_hass_conf, optim_conf, plant_conf
|
860
|
-
|
861
|
-
|
862
|
-
def get_injection_dict(df: pd.DataFrame, plot_size: Optional[int] = 1366) -> dict:
|
863
|
-
"""
|
864
|
-
Build a dictionary with graphs and tables for the webui.
|
865
|
-
|
866
|
-
:param df: The optimization result DataFrame
|
867
|
-
:type df: pd.DataFrame
|
868
|
-
:param plot_size: Size of the plot figure in pixels, defaults to 1366
|
869
|
-
:type plot_size: Optional[int], optional
|
870
|
-
:return: A dictionary containing the graphs and tables in html format
|
871
|
-
:rtype: dict
|
872
|
-
|
873
|
-
"""
|
874
|
-
cols_p = [i for i in df.columns.to_list() if "P_" in i]
|
875
|
-
# Let's round the data in the DF
|
876
|
-
optim_status = df["optim_status"].unique().item()
|
877
|
-
df.drop("optim_status", axis=1, inplace=True)
|
878
|
-
cols_else = [i for i in df.columns.to_list() if "P_" not in i]
|
879
|
-
df = df.apply(pd.to_numeric)
|
880
|
-
df[cols_p] = df[cols_p].astype(int)
|
881
|
-
df[cols_else] = df[cols_else].round(3)
|
882
|
-
# Create plots
|
883
|
-
n_colors = len(cols_p)
|
884
|
-
colors = px.colors.sample_colorscale(
|
885
|
-
"jet", [n / (n_colors - 1) for n in range(n_colors)]
|
886
|
-
)
|
887
|
-
fig_0 = px.line(
|
888
|
-
df[cols_p],
|
889
|
-
title="Systems powers schedule after optimization results",
|
890
|
-
template="presentation",
|
891
|
-
line_shape="hv",
|
892
|
-
color_discrete_sequence=colors,
|
893
|
-
)
|
894
|
-
fig_0.update_layout(xaxis_title="Timestamp", yaxis_title="System powers (W)")
|
895
|
-
if "SOC_opt" in df.columns.to_list():
|
896
|
-
fig_1 = px.line(
|
897
|
-
df["SOC_opt"],
|
898
|
-
title="Battery state of charge schedule after optimization results",
|
899
|
-
template="presentation",
|
900
|
-
line_shape="hv",
|
901
|
-
color_discrete_sequence=colors,
|
902
|
-
)
|
903
|
-
fig_1.update_layout(xaxis_title="Timestamp", yaxis_title="Battery SOC (%)")
|
904
|
-
cols_cost = [i for i in df.columns.to_list() if "cost_" in i or "unit_" in i]
|
905
|
-
n_colors = len(cols_cost)
|
906
|
-
colors = px.colors.sample_colorscale(
|
907
|
-
"jet", [n / (n_colors - 1) for n in range(n_colors)]
|
908
|
-
)
|
909
|
-
fig_2 = px.line(
|
910
|
-
df[cols_cost],
|
911
|
-
title="Systems costs obtained from optimization results",
|
912
|
-
template="presentation",
|
913
|
-
line_shape="hv",
|
914
|
-
color_discrete_sequence=colors,
|
915
|
-
)
|
916
|
-
fig_2.update_layout(xaxis_title="Timestamp", yaxis_title="System costs (currency)")
|
917
|
-
# Get full path to image
|
918
|
-
image_path_0 = fig_0.to_html(full_html=False, default_width="75%")
|
919
|
-
if "SOC_opt" in df.columns.to_list():
|
920
|
-
image_path_1 = fig_1.to_html(full_html=False, default_width="75%")
|
921
|
-
image_path_2 = fig_2.to_html(full_html=False, default_width="75%")
|
922
|
-
# The tables
|
923
|
-
table1 = df.reset_index().to_html(classes="mystyle", index=False)
|
924
|
-
cost_cols = [i for i in df.columns if "cost_" in i]
|
925
|
-
table2 = df[cost_cols].reset_index().sum(numeric_only=True)
|
926
|
-
table2["optim_status"] = optim_status
|
927
|
-
table2 = (
|
928
|
-
table2.to_frame(name="Value")
|
929
|
-
.reset_index(names="Variable")
|
930
|
-
.to_html(classes="mystyle", index=False)
|
931
|
-
)
|
932
|
-
# The dict of plots
|
933
|
-
injection_dict = {}
|
934
|
-
injection_dict["title"] = "<h2>EMHASS optimization results</h2>"
|
935
|
-
injection_dict["subsubtitle0"] = "<h4>Plotting latest optimization results</h4>"
|
936
|
-
injection_dict["figure_0"] = image_path_0
|
937
|
-
if "SOC_opt" in df.columns.to_list():
|
938
|
-
injection_dict["figure_1"] = image_path_1
|
939
|
-
injection_dict["figure_2"] = image_path_2
|
940
|
-
injection_dict["subsubtitle1"] = "<h4>Last run optimization results table</h4>"
|
941
|
-
injection_dict["table1"] = table1
|
942
|
-
injection_dict["subsubtitle2"] = (
|
943
|
-
"<h4>Summary table for latest optimization results</h4>"
|
944
|
-
)
|
945
|
-
injection_dict["table2"] = table2
|
946
|
-
return injection_dict
|
947
|
-
|
948
|
-
|
949
|
-
def get_injection_dict_forecast_model_fit(
|
950
|
-
df_fit_pred: pd.DataFrame, mlf: MLForecaster
|
951
|
-
) -> dict:
|
952
|
-
"""
|
953
|
-
Build a dictionary with graphs and tables for the webui for special MLF fit case.
|
954
|
-
|
955
|
-
:param df_fit_pred: The fit result DataFrame
|
956
|
-
:type df_fit_pred: pd.DataFrame
|
957
|
-
:param mlf: The MLForecaster object
|
958
|
-
:type mlf: MLForecaster
|
959
|
-
:return: A dictionary containing the graphs and tables in html format
|
960
|
-
:rtype: dict
|
961
|
-
"""
|
962
|
-
fig = df_fit_pred.plot()
|
963
|
-
fig.layout.template = "presentation"
|
964
|
-
fig.update_yaxes(title_text=mlf.model_type)
|
965
|
-
fig.update_xaxes(title_text="Time")
|
966
|
-
image_path_0 = fig.to_html(full_html=False, default_width="75%")
|
967
|
-
# The dict of plots
|
968
|
-
injection_dict = {}
|
969
|
-
injection_dict["title"] = "<h2>Custom machine learning forecast model fit</h2>"
|
970
|
-
injection_dict["subsubtitle0"] = (
|
971
|
-
"<h4>Plotting train/test forecast model results for " + mlf.model_type + "</h4>"
|
972
|
-
)
|
973
|
-
injection_dict["subsubtitle0"] = (
|
974
|
-
"<h4>Forecasting variable " + mlf.var_model + "</h4>"
|
975
|
-
)
|
976
|
-
injection_dict["figure_0"] = image_path_0
|
977
|
-
return injection_dict
|
978
|
-
|
979
|
-
|
980
|
-
def get_injection_dict_forecast_model_tune(
|
981
|
-
df_pred_optim: pd.DataFrame, mlf: MLForecaster
|
982
|
-
) -> dict:
|
983
|
-
"""
|
984
|
-
Build a dictionary with graphs and tables for the webui for special MLF tune case.
|
985
|
-
|
986
|
-
:param df_pred_optim: The tune result DataFrame
|
987
|
-
:type df_pred_optim: pd.DataFrame
|
988
|
-
:param mlf: The MLForecaster object
|
989
|
-
:type mlf: MLForecaster
|
990
|
-
:return: A dictionary containing the graphs and tables in html format
|
991
|
-
:rtype: dict
|
992
|
-
"""
|
993
|
-
fig = df_pred_optim.plot()
|
994
|
-
fig.layout.template = "presentation"
|
995
|
-
fig.update_yaxes(title_text=mlf.model_type)
|
996
|
-
fig.update_xaxes(title_text="Time")
|
997
|
-
image_path_0 = fig.to_html(full_html=False, default_width="75%")
|
998
|
-
# The dict of plots
|
999
|
-
injection_dict = {}
|
1000
|
-
injection_dict["title"] = "<h2>Custom machine learning forecast model tune</h2>"
|
1001
|
-
injection_dict["subsubtitle0"] = (
|
1002
|
-
"<h4>Performed a tuning routine using bayesian optimization for "
|
1003
|
-
+ mlf.model_type
|
1004
|
-
+ "</h4>"
|
1005
|
-
)
|
1006
|
-
injection_dict["subsubtitle0"] = (
|
1007
|
-
"<h4>Forecasting variable " + mlf.var_model + "</h4>"
|
1008
|
-
)
|
1009
|
-
injection_dict["figure_0"] = image_path_0
|
1010
|
-
return injection_dict
|
1011
|
-
|
1012
|
-
|
1013
|
-
def build_config(
|
1014
|
-
emhass_conf: dict,
|
1015
|
-
logger: logging.Logger,
|
1016
|
-
defaults_path: str,
|
1017
|
-
config_path: Optional[str] = None,
|
1018
|
-
legacy_config_path: Optional[str] = None,
|
1019
|
-
) -> dict:
|
1020
|
-
"""
|
1021
|
-
Retrieve parameters from configuration files.
|
1022
|
-
priority order (low - high) = defaults_path, config_path legacy_config_path
|
1023
|
-
|
1024
|
-
:param emhass_conf: Dictionary containing the needed emhass paths
|
1025
|
-
:type emhass_conf: dict
|
1026
|
-
:param logger: The logger object
|
1027
|
-
:type logger: logging.Logger
|
1028
|
-
:param defaults_path: path to config file for parameter defaults (config_defaults.json)
|
1029
|
-
:type defaults_path: str
|
1030
|
-
:param config_path: path to the main configuration file (config.json)
|
1031
|
-
:type config_path: str
|
1032
|
-
:param legacy_config_path: path to legacy config file (config_emhass.yaml)
|
1033
|
-
:type legacy_config_path: str
|
1034
|
-
:return: The built config dictionary
|
1035
|
-
:rtype: dict
|
1036
|
-
"""
|
1037
|
-
|
1038
|
-
# Read default parameters (default root_path/data/config_defaults.json)
|
1039
|
-
if defaults_path and pathlib.Path(defaults_path).is_file():
|
1040
|
-
with defaults_path.open("r") as data:
|
1041
|
-
config = json.load(data)
|
1042
|
-
else:
|
1043
|
-
logger.error("config_defaults.json. does not exist ")
|
1044
|
-
return False
|
1045
|
-
|
1046
|
-
# Read user config parameters if provided (default /share/config.json)
|
1047
|
-
if config_path and pathlib.Path(config_path).is_file():
|
1048
|
-
with config_path.open("r") as data:
|
1049
|
-
# Set override default parameters (config_defaults) with user given parameters (config.json)
|
1050
|
-
logger.info("Obtaining parameters from config.json:")
|
1051
|
-
config.update(json.load(data))
|
1052
|
-
else:
|
1053
|
-
logger.info(
|
1054
|
-
"config.json does not exist, or has not been passed. config parameters may default to config_defaults.json"
|
1055
|
-
)
|
1056
|
-
logger.info(
|
1057
|
-
"you may like to generate the config.json file on the configuration page"
|
1058
|
-
)
|
1059
|
-
|
1060
|
-
# Check to see if legacy config_emhass.yaml was provided (default /app/config_emhass.yaml)
|
1061
|
-
# Convert legacy parameter definitions/format to match config.json
|
1062
|
-
if legacy_config_path and pathlib.Path(legacy_config_path).is_file():
|
1063
|
-
with open(legacy_config_path, "r") as data:
|
1064
|
-
legacy_config = yaml.load(data, Loader=yaml.FullLoader)
|
1065
|
-
legacy_config_parameters = build_legacy_config_params(
|
1066
|
-
emhass_conf, legacy_config, logger
|
1067
|
-
)
|
1068
|
-
if type(legacy_config_parameters) is not bool:
|
1069
|
-
logger.info(
|
1070
|
-
"Obtaining parameters from config_emhass.yaml: (will overwrite config parameters)"
|
1071
|
-
)
|
1072
|
-
config.update(legacy_config_parameters)
|
1073
|
-
|
1074
|
-
return config
|
1075
|
-
|
1076
|
-
|
1077
|
-
def build_legacy_config_params(
|
1078
|
-
emhass_conf: dict, legacy_config: dict, logger: logging.Logger
|
1079
|
-
) -> dict:
|
1080
|
-
"""
|
1081
|
-
Build a config dictionary with legacy config_emhass.yaml file.
|
1082
|
-
Uses the associations file to convert parameter naming conventions (to config.json/config_defaults.json).
|
1083
|
-
Extracts the parameter values and formats to match config.json.
|
1084
|
-
|
1085
|
-
:param emhass_conf: Dictionary containing the needed emhass paths
|
1086
|
-
:type emhass_conf: dict
|
1087
|
-
:param legacy_config: The legacy config dictionary
|
1088
|
-
:type legacy_config: dict
|
1089
|
-
:param logger: The logger object
|
1090
|
-
:type logger: logging.Logger
|
1091
|
-
:return: The built config dictionary
|
1092
|
-
:rtype: dict
|
1093
|
-
"""
|
1094
|
-
|
1095
|
-
# Association file key reference
|
1096
|
-
# association[0] = config catagories
|
1097
|
-
# association[1] = legacy parameter name
|
1098
|
-
# association[2] = parameter (config.json/config_defaults.json)
|
1099
|
-
# association[3] = parameter list name if exists (not used, from legacy options.json)
|
1100
|
-
|
1101
|
-
# Check each config catagories exists, else create blank dict for categories (avoid errors)
|
1102
|
-
legacy_config["retrieve_hass_conf"] = legacy_config.get("retrieve_hass_conf", {})
|
1103
|
-
legacy_config["optim_conf"] = legacy_config.get("optim_conf", {})
|
1104
|
-
legacy_config["plant_conf"] = legacy_config.get("plant_conf", {})
|
1105
|
-
config = {}
|
1106
|
-
|
1107
|
-
# Use associations list to map legacy parameter name with config.json parameter name
|
1108
|
-
if emhass_conf["associations_path"].exists():
|
1109
|
-
with emhass_conf["associations_path"].open("r") as data:
|
1110
|
-
associations = list(csv.reader(data, delimiter=","))
|
1111
|
-
else:
|
1112
|
-
logger.error(
|
1113
|
-
"Cant find associations file (associations.csv) in: "
|
1114
|
-
+ str(emhass_conf["associations_path"])
|
1115
|
-
)
|
1116
|
-
return False
|
1117
|
-
|
1118
|
-
# Loop through all parameters in association file
|
1119
|
-
# Append config with existing legacy config parameters (converting alternative parameter naming conventions with associations list)
|
1120
|
-
for association in associations:
|
1121
|
-
# if legacy config catagories exists and if legacy parameter exists in config catagories
|
1122
|
-
if (
|
1123
|
-
legacy_config.get(association[0], None) is not None
|
1124
|
-
and legacy_config[association[0]].get(association[1], None) is not None
|
1125
|
-
):
|
1126
|
-
config[association[2]] = legacy_config[association[0]][association[1]]
|
1127
|
-
|
1128
|
-
# If config now has load_peak_hour_periods, extract from list of dict
|
1129
|
-
if (
|
1130
|
-
association[2] == "load_peak_hour_periods"
|
1131
|
-
and type(config[association[2]]) is list
|
1132
|
-
):
|
1133
|
-
config[association[2]] = dict(
|
1134
|
-
(key, d[key]) for d in config[association[2]] for key in d
|
1135
|
-
)
|
1136
|
-
|
1137
|
-
return config
|
1138
|
-
# params['associations_dict'] = associations_dict
|
1139
|
-
|
1140
|
-
|
1141
|
-
def param_to_config(param: dict, logger: logging.Logger) -> dict:
|
1142
|
-
"""
|
1143
|
-
A function that extracts the parameters from param back to the config.json format.
|
1144
|
-
Extracts parameters from config catagories.
|
1145
|
-
Attempts to exclude secrets hosed in retrieve_hass_conf.
|
1146
|
-
|
1147
|
-
:param params: Built configuration parameters
|
1148
|
-
:type param: dict
|
1149
|
-
:param logger: The logger object
|
1150
|
-
:type logger: logging.Logger
|
1151
|
-
:return: The built config dictionary
|
1152
|
-
:rtype: dict
|
1153
|
-
"""
|
1154
|
-
logger.debug("Converting param to config")
|
1155
|
-
|
1156
|
-
return_config = {}
|
1157
|
-
|
1158
|
-
config_catagories = ["retrieve_hass_conf", "optim_conf", "plant_conf"]
|
1159
|
-
secret_params = [
|
1160
|
-
"hass_url",
|
1161
|
-
"time_zone",
|
1162
|
-
"Latitude",
|
1163
|
-
"Longitude",
|
1164
|
-
"Altitude",
|
1165
|
-
"long_lived_token",
|
1166
|
-
"solcast_api_key",
|
1167
|
-
"solcast_rooftop_id",
|
1168
|
-
"solar_forecast_kwp",
|
1169
|
-
]
|
1170
|
-
|
1171
|
-
# Loop through config catagories that contain config params, and extract
|
1172
|
-
for config in config_catagories:
|
1173
|
-
for parameter in param[config]:
|
1174
|
-
# If parameter is not a secret, append to return_config
|
1175
|
-
if parameter not in secret_params:
|
1176
|
-
return_config[str(parameter)] = param[config][parameter]
|
1177
|
-
|
1178
|
-
return return_config
|
1179
|
-
|
1180
|
-
|
1181
|
-
def build_secrets(
|
1182
|
-
emhass_conf: dict,
|
1183
|
-
logger: logging.Logger,
|
1184
|
-
argument: Optional[dict] = {},
|
1185
|
-
options_path: Optional[str] = None,
|
1186
|
-
secrets_path: Optional[str] = None,
|
1187
|
-
no_response: Optional[bool] = False,
|
1188
|
-
) -> Tuple[dict, dict]:
|
1189
|
-
"""
|
1190
|
-
Retrieve and build parameters from secrets locations (ENV, ARG, Secrets file (secrets_emhass.yaml/options.json) and/or Home Assistant (via API))
|
1191
|
-
priority order (lwo to high) = Defaults (written in function), ENV, Options json file, Home Assistant API, Secrets yaml file, Arguments
|
1192
|
-
|
1193
|
-
:param emhass_conf: Dictionary containing the needed emhass paths
|
1194
|
-
:type emhass_conf: dict
|
1195
|
-
:param logger: The logger object
|
1196
|
-
:type logger: logging.Logger
|
1197
|
-
:param argument: dictionary of secrets arguments passed (url,key)
|
1198
|
-
:type argument: dict
|
1199
|
-
:param options_path: path to the options file (options.json) (usually provided by EMHASS-Add-on)
|
1200
|
-
:type options_path: str
|
1201
|
-
:param secrets_path: path to secrets file (secrets_emhass.yaml)
|
1202
|
-
:type secrets_path: str
|
1203
|
-
:param no_response: bypass get request to Home Assistant (json response errors)
|
1204
|
-
:type no_response: bool
|
1205
|
-
:return: Updated emhass_conf, the built secrets dictionary
|
1206
|
-
:rtype: Tuple[dict, dict]:
|
1207
|
-
"""
|
1208
|
-
|
1209
|
-
# Set defaults to be overwritten
|
1210
|
-
params_secrets = {
|
1211
|
-
"hass_url": "https://myhass.duckdns.org/",
|
1212
|
-
"long_lived_token": "thatverylongtokenhere",
|
1213
|
-
"time_zone": "Europe/Paris",
|
1214
|
-
"Latitude": 45.83,
|
1215
|
-
"Longitude": 6.86,
|
1216
|
-
"Altitude": 4807.8,
|
1217
|
-
"solcast_api_key": "yoursecretsolcastapikey",
|
1218
|
-
"solcast_rooftop_id": "yourrooftopid",
|
1219
|
-
"solar_forecast_kwp": 5,
|
1220
|
-
}
|
1221
|
-
|
1222
|
-
# Obtain Secrets from ENV?
|
1223
|
-
params_secrets["hass_url"] = os.getenv("EMHASS_URL", params_secrets["hass_url"])
|
1224
|
-
params_secrets["long_lived_token"] = os.getenv(
|
1225
|
-
"SUPERVISOR_TOKEN", params_secrets["long_lived_token"]
|
1226
|
-
)
|
1227
|
-
params_secrets["time_zone"] = os.getenv("TIME_ZONE", params_secrets["time_zone"])
|
1228
|
-
params_secrets["Latitude"] = float(os.getenv("LAT", params_secrets["Latitude"]))
|
1229
|
-
params_secrets["Longitude"] = float(os.getenv("LON", params_secrets["Longitude"]))
|
1230
|
-
params_secrets["Altitude"] = float(os.getenv("ALT", params_secrets["Altitude"]))
|
1231
|
-
|
1232
|
-
# Obtain secrets from options.json (Generated from EMHASS-Add-on, Home Assistant addon Configuration page) or Home Assistant API (from local Supervisor API)?
|
1233
|
-
# Use local supervisor API to obtain secrets from Home Assistant if hass_url in options.json is empty and SUPERVISOR_TOKEN ENV exists (provided by Home Assistant when running the container as addon)
|
1234
|
-
options = {}
|
1235
|
-
if options_path and pathlib.Path(options_path).is_file():
|
1236
|
-
with options_path.open("r") as data:
|
1237
|
-
options = json.load(data)
|
1238
|
-
|
1239
|
-
# Obtain secrets from Home Assistant?
|
1240
|
-
url_from_options = options.get("hass_url", "empty")
|
1241
|
-
key_from_options = options.get("long_lived_token", "empty")
|
1242
|
-
|
1243
|
-
# If data path specified by options.json, overwrite emhass_conf['data_path']
|
1244
|
-
if (
|
1245
|
-
options.get("data_path", None) != None
|
1246
|
-
and pathlib.Path(options["data_path"]).exists()
|
1247
|
-
):
|
1248
|
-
emhass_conf["data_path"] = pathlib.Path(options["data_path"])
|
1249
|
-
|
1250
|
-
# Check to use Home Assistant local API
|
1251
|
-
if (
|
1252
|
-
not no_response
|
1253
|
-
and (
|
1254
|
-
url_from_options == "empty"
|
1255
|
-
or url_from_options == ""
|
1256
|
-
or url_from_options == "http://supervisor/core/api"
|
1257
|
-
)
|
1258
|
-
and os.getenv("SUPERVISOR_TOKEN", None) is not None
|
1259
|
-
):
|
1260
|
-
params_secrets["long_lived_token"] = os.getenv("SUPERVISOR_TOKEN", None)
|
1261
|
-
params_secrets["hass_url"] = "http://supervisor/core/api"
|
1262
|
-
headers = {
|
1263
|
-
"Authorization": "Bearer " + params_secrets["long_lived_token"],
|
1264
|
-
"content-type": "application/json",
|
1265
|
-
}
|
1266
|
-
# Obtain secrets from Home Assistant via API
|
1267
|
-
logger.debug("Obtaining secrets from Home Assistant Supervisor API")
|
1268
|
-
response = get(
|
1269
|
-
(params_secrets["hass_url"] + "/config"), headers=headers
|
1270
|
-
)
|
1271
|
-
if response.status_code < 400:
|
1272
|
-
config_hass = response.json()
|
1273
|
-
params_secrets = {
|
1274
|
-
"hass_url": params_secrets["hass_url"],
|
1275
|
-
"long_lived_token": params_secrets["long_lived_token"],
|
1276
|
-
"time_zone": config_hass["time_zone"],
|
1277
|
-
"Latitude": config_hass["latitude"],
|
1278
|
-
"Longitude": config_hass["longitude"],
|
1279
|
-
"Altitude": config_hass["elevation"],
|
1280
|
-
}
|
1281
|
-
else:
|
1282
|
-
# Obtain the url and key secrets if any from options.json (default /app/options.json)
|
1283
|
-
logger.warning(
|
1284
|
-
"Error obtaining secrets from Home Assistant Supervisor API"
|
1285
|
-
)
|
1286
|
-
logger.debug("Obtaining url and key secrets from options.json")
|
1287
|
-
if url_from_options != "empty" and url_from_options != "":
|
1288
|
-
params_secrets["hass_url"] = url_from_options
|
1289
|
-
if key_from_options != "empty" and key_from_options != "":
|
1290
|
-
params_secrets["long_lived_token"] = key_from_options
|
1291
|
-
if (
|
1292
|
-
options.get("time_zone", "empty") != "empty"
|
1293
|
-
and options["time_zone"] != ""
|
1294
|
-
):
|
1295
|
-
params_secrets["time_zone"] = options["time_zone"]
|
1296
|
-
if options.get("Latitude", None) is not None and bool(
|
1297
|
-
options["Latitude"]
|
1298
|
-
):
|
1299
|
-
params_secrets["Latitude"] = options["Latitude"]
|
1300
|
-
if options.get("Longitude", None) is not None and bool(
|
1301
|
-
options["Longitude"]
|
1302
|
-
):
|
1303
|
-
params_secrets["Longitude"] = options["Longitude"]
|
1304
|
-
if options.get("Altitude", None) is not None and bool(
|
1305
|
-
options["Altitude"]
|
1306
|
-
):
|
1307
|
-
params_secrets["Altitude"] = options["Altitude"]
|
1308
|
-
else:
|
1309
|
-
# Obtain the url and key secrets if any from options.json (default /app/options.json)
|
1310
|
-
logger.debug("Obtaining url and key secrets from options.json")
|
1311
|
-
if url_from_options != "empty" and url_from_options != "":
|
1312
|
-
params_secrets["hass_url"] = url_from_options
|
1313
|
-
if key_from_options != "empty" and key_from_options != "":
|
1314
|
-
params_secrets["long_lived_token"] = key_from_options
|
1315
|
-
if (
|
1316
|
-
options.get("time_zone", "empty") != "empty"
|
1317
|
-
and options["time_zone"] != ""
|
1318
|
-
):
|
1319
|
-
params_secrets["time_zone"] = options["time_zone"]
|
1320
|
-
if options.get("Latitude", None) is not None and bool(
|
1321
|
-
options["Latitude"]
|
1322
|
-
):
|
1323
|
-
params_secrets["Latitude"] = options["Latitude"]
|
1324
|
-
if options.get("Longitude", None) is not None and bool(
|
1325
|
-
options["Longitude"]
|
1326
|
-
):
|
1327
|
-
params_secrets["Longitude"] = options["Longitude"]
|
1328
|
-
if options.get("Altitude", None) is not None and bool(
|
1329
|
-
options["Altitude"]
|
1330
|
-
):
|
1331
|
-
params_secrets["Altitude"] = options["Altitude"]
|
1332
|
-
|
1333
|
-
# Obtain the forecast secrets (if any) from options.json (default /app/options.json)
|
1334
|
-
forecast_secrets = [
|
1335
|
-
"solcast_api_key",
|
1336
|
-
"solcast_rooftop_id",
|
1337
|
-
"solar_forecast_kwp",
|
1338
|
-
]
|
1339
|
-
if any(x in forecast_secrets for x in list(options.keys())):
|
1340
|
-
logger.debug("Obtaining forecast secrets from options.json")
|
1341
|
-
if (
|
1342
|
-
options.get("solcast_api_key", "empty") != "empty"
|
1343
|
-
and options["solcast_api_key"] != ""
|
1344
|
-
):
|
1345
|
-
params_secrets["solcast_api_key"] = options["solcast_api_key"]
|
1346
|
-
if (
|
1347
|
-
options.get("solcast_rooftop_id", "empty") != "empty"
|
1348
|
-
and options["solcast_rooftop_id"] != ""
|
1349
|
-
):
|
1350
|
-
params_secrets["solcast_rooftop_id"] = options["solcast_rooftop_id"]
|
1351
|
-
if options.get("solar_forecast_kwp", None) and bool(
|
1352
|
-
options["solar_forecast_kwp"]
|
1353
|
-
):
|
1354
|
-
params_secrets["solar_forecast_kwp"] = options["solar_forecast_kwp"]
|
1355
|
-
|
1356
|
-
# Obtain secrets from secrets_emhass.yaml? (default /app/secrets_emhass.yaml)
|
1357
|
-
if secrets_path and pathlib.Path(secrets_path).is_file():
|
1358
|
-
logger.debug("Obtaining secrets from secrets file")
|
1359
|
-
with open(pathlib.Path(secrets_path), "r") as file:
|
1360
|
-
params_secrets.update(yaml.load(file, Loader=yaml.FullLoader))
|
1361
|
-
|
1362
|
-
# Receive key and url from ARG/arguments?
|
1363
|
-
if argument.get("url", None) is not None:
|
1364
|
-
params_secrets["hass_url"] = argument["url"]
|
1365
|
-
logger.debug("Obtaining url from passed argument")
|
1366
|
-
if argument.get("key", None) is not None:
|
1367
|
-
params_secrets["long_lived_token"] = argument["key"]
|
1368
|
-
logger.debug("Obtaining long_lived_token from passed argument")
|
1369
|
-
|
1370
|
-
return emhass_conf, params_secrets
|
1371
|
-
|
1372
|
-
|
1373
|
-
def build_params(
|
1374
|
-
emhass_conf: dict, params_secrets: dict, config: dict, logger: logging.Logger
|
1375
|
-
) -> dict:
|
1376
|
-
"""
|
1377
|
-
Build the main params dictionary from the config and secrets
|
1378
|
-
Appends configuration catagories used by emhass to the parameters. (with use of the associations file as a reference)
|
1379
|
-
|
1380
|
-
:param emhass_conf: Dictionary containing the needed emhass paths
|
1381
|
-
:type emhass_conf: dict
|
1382
|
-
:param params_secrets: The dictionary containing the built secret variables
|
1383
|
-
:type params_secrets: dict
|
1384
|
-
:param config: The dictionary of built config parameters
|
1385
|
-
:type config: dict
|
1386
|
-
:param logger: The logger object
|
1387
|
-
:type logger: logging.Logger
|
1388
|
-
:return: The built param dictionary
|
1389
|
-
:rtype: dict
|
1390
|
-
"""
|
1391
|
-
if type(params_secrets) is not dict:
|
1392
|
-
params_secrets = {}
|
1393
|
-
|
1394
|
-
params = {}
|
1395
|
-
# Start with blank config catagories
|
1396
|
-
params["retrieve_hass_conf"] = {}
|
1397
|
-
params["params_secrets"] = {}
|
1398
|
-
params["optim_conf"] = {}
|
1399
|
-
params["plant_conf"] = {}
|
1400
|
-
|
1401
|
-
# Obtain associations to categorize parameters to their corresponding config catagories
|
1402
|
-
if emhass_conf.get(
|
1403
|
-
"associations_path", get_root(__file__, num_parent=2) / "data/associations.csv"
|
1404
|
-
).exists():
|
1405
|
-
with emhass_conf["associations_path"].open("r") as data:
|
1406
|
-
associations = list(csv.reader(data, delimiter=","))
|
1407
|
-
else:
|
1408
|
-
logger.error(
|
1409
|
-
"Unable to obtain the associations file (associations.csv) in: "
|
1410
|
-
+ str(emhass_conf["associations_path"])
|
1411
|
-
)
|
1412
|
-
return False
|
1413
|
-
|
1414
|
-
# Association file key reference
|
1415
|
-
# association[0] = config catagories
|
1416
|
-
# association[1] = legacy parameter name
|
1417
|
-
# association[2] = parameter (config.json/config_defaults.json)
|
1418
|
-
# association[3] = parameter list name if exists (not used, from legacy options.json)
|
1419
|
-
# Use association list to append parameters from config into params (with corresponding config catagories)
|
1420
|
-
for association in associations:
|
1421
|
-
# If parameter has list_ name and parameter in config is presented with its list name
|
1422
|
-
# (ie, config parameter is in legacy options.json format)
|
1423
|
-
if len(association) == 4 and config.get(association[3], None) is not None:
|
1424
|
-
# Extract lists of dictionaries
|
1425
|
-
if config[association[3]] and type(config[association[3]][0]) is dict:
|
1426
|
-
params[association[0]][association[2]] = [
|
1427
|
-
i[association[2]] for i in config[association[3]]
|
1428
|
-
]
|
1429
|
-
else:
|
1430
|
-
params[association[0]][association[2]] = config[association[3]]
|
1431
|
-
# Else, directly set value of config parameter to param
|
1432
|
-
elif config.get(association[2], None) is not None:
|
1433
|
-
params[association[0]][association[2]] = config[association[2]]
|
1434
|
-
|
1435
|
-
# Check if we need to create `list_hp_periods` from config (ie. legacy options.json format)
|
1436
|
-
if (
|
1437
|
-
params.get("optim_conf", None) is not None
|
1438
|
-
and config.get("list_peak_hours_periods_start_hours", None) is not None
|
1439
|
-
and config.get("list_peak_hours_periods_end_hours", None) is not None
|
1440
|
-
):
|
1441
|
-
start_hours_list = [
|
1442
|
-
i["peak_hours_periods_start_hours"]
|
1443
|
-
for i in config["list_peak_hours_periods_start_hours"]
|
1444
|
-
]
|
1445
|
-
end_hours_list = [
|
1446
|
-
i["peak_hours_periods_end_hours"]
|
1447
|
-
for i in config["list_peak_hours_periods_end_hours"]
|
1448
|
-
]
|
1449
|
-
num_peak_hours = len(start_hours_list)
|
1450
|
-
list_hp_periods_list = {
|
1451
|
-
"period_hp_" + str(i + 1): [
|
1452
|
-
{"start": start_hours_list[i]},
|
1453
|
-
{"end": end_hours_list[i]},
|
1454
|
-
]
|
1455
|
-
for i in range(num_peak_hours)
|
1456
|
-
}
|
1457
|
-
params["optim_conf"]["load_peak_hour_periods"] = list_hp_periods_list
|
1458
|
-
else:
|
1459
|
-
# Else, check param already contains load_peak_hour_periods from config
|
1460
|
-
if params["optim_conf"].get("load_peak_hour_periods", None) is None:
|
1461
|
-
logger.warning(
|
1462
|
-
"Unable to detect or create load_peak_hour_periods parameter"
|
1463
|
-
)
|
1464
|
-
|
1465
|
-
# Format load_peak_hour_periods list to dict if necessary
|
1466
|
-
if params["optim_conf"].get(
|
1467
|
-
"load_peak_hour_periods", None
|
1468
|
-
) is not None and isinstance(params["optim_conf"]["load_peak_hour_periods"], list):
|
1469
|
-
params["optim_conf"]["load_peak_hour_periods"] = dict(
|
1470
|
-
(key, d[key])
|
1471
|
-
for d in params["optim_conf"]["load_peak_hour_periods"]
|
1472
|
-
for key in d
|
1473
|
-
)
|
1474
|
-
|
1475
|
-
# Call function to check parameter lists that require the same length as deferrable loads
|
1476
|
-
# If not, set defaults it fill in gaps
|
1477
|
-
if params["optim_conf"].get("number_of_deferrable_loads", None) is not None:
|
1478
|
-
num_def_loads = params["optim_conf"]["number_of_deferrable_loads"]
|
1479
|
-
params["optim_conf"]["start_timesteps_of_each_deferrable_load"] = (
|
1480
|
-
check_def_loads(
|
1481
|
-
num_def_loads,
|
1482
|
-
params["optim_conf"],
|
1483
|
-
0,
|
1484
|
-
"start_timesteps_of_each_deferrable_load",
|
1485
|
-
logger,
|
1486
|
-
)
|
1487
|
-
)
|
1488
|
-
params["optim_conf"]["end_timesteps_of_each_deferrable_load"] = check_def_loads(
|
1489
|
-
num_def_loads,
|
1490
|
-
params["optim_conf"],
|
1491
|
-
0,
|
1492
|
-
"end_timesteps_of_each_deferrable_load",
|
1493
|
-
logger,
|
1494
|
-
)
|
1495
|
-
params["optim_conf"]["set_deferrable_load_single_constant"] = check_def_loads(
|
1496
|
-
num_def_loads,
|
1497
|
-
params["optim_conf"],
|
1498
|
-
False,
|
1499
|
-
"set_deferrable_load_single_constant",
|
1500
|
-
logger,
|
1501
|
-
)
|
1502
|
-
params["optim_conf"]["treat_deferrable_load_as_semi_cont"] = check_def_loads(
|
1503
|
-
num_def_loads,
|
1504
|
-
params["optim_conf"],
|
1505
|
-
True,
|
1506
|
-
"treat_deferrable_load_as_semi_cont",
|
1507
|
-
logger,
|
1508
|
-
)
|
1509
|
-
params["optim_conf"]["set_deferrable_startup_penalty"] = check_def_loads(
|
1510
|
-
num_def_loads,
|
1511
|
-
params["optim_conf"],
|
1512
|
-
0.0,
|
1513
|
-
"set_deferrable_startup_penalty",
|
1514
|
-
logger,
|
1515
|
-
)
|
1516
|
-
params["optim_conf"]["operating_hours_of_each_deferrable_load"] = (
|
1517
|
-
check_def_loads(
|
1518
|
-
num_def_loads,
|
1519
|
-
params["optim_conf"],
|
1520
|
-
0,
|
1521
|
-
"operating_hours_of_each_deferrable_load",
|
1522
|
-
logger,
|
1523
|
-
)
|
1524
|
-
)
|
1525
|
-
params["optim_conf"]["nominal_power_of_deferrable_loads"] = check_def_loads(
|
1526
|
-
num_def_loads,
|
1527
|
-
params["optim_conf"],
|
1528
|
-
0,
|
1529
|
-
"nominal_power_of_deferrable_loads",
|
1530
|
-
logger,
|
1531
|
-
)
|
1532
|
-
else:
|
1533
|
-
logger.warning("unable to obtain parameter: number_of_deferrable_loads")
|
1534
|
-
# historic_days_to_retrieve should be no less then 2
|
1535
|
-
if params["retrieve_hass_conf"].get("historic_days_to_retrieve", None) is not None:
|
1536
|
-
if params["retrieve_hass_conf"]["historic_days_to_retrieve"] < 2:
|
1537
|
-
params["retrieve_hass_conf"]["historic_days_to_retrieve"] = 2
|
1538
|
-
logger.warning(
|
1539
|
-
"days_to_retrieve should not be lower then 2, setting days_to_retrieve to 2. Make sure your sensors also have at least 2 days of history"
|
1540
|
-
)
|
1541
|
-
else:
|
1542
|
-
logger.warning("unable to obtain parameter: historic_days_to_retrieve")
|
1543
|
-
|
1544
|
-
# Configure secrets, set params to correct config categorie
|
1545
|
-
# retrieve_hass_conf
|
1546
|
-
params["retrieve_hass_conf"]["hass_url"] = params_secrets.get("hass_url", None)
|
1547
|
-
params["retrieve_hass_conf"]["long_lived_token"] = params_secrets.get(
|
1548
|
-
"long_lived_token", None
|
1549
|
-
)
|
1550
|
-
params["retrieve_hass_conf"]["time_zone"] = params_secrets.get("time_zone", None)
|
1551
|
-
params["retrieve_hass_conf"]["Latitude"] = params_secrets.get("Latitude", None)
|
1552
|
-
params["retrieve_hass_conf"]["Longitude"] = params_secrets.get("Longitude", None)
|
1553
|
-
params["retrieve_hass_conf"]["Altitude"] = params_secrets.get("Altitude", None)
|
1554
|
-
# Update optional param secrets
|
1555
|
-
if params["optim_conf"].get("weather_forecast_method", None) is not None:
|
1556
|
-
if params["optim_conf"]["weather_forecast_method"] == "solcast":
|
1557
|
-
params["retrieve_hass_conf"]["solcast_api_key"] = params_secrets.get(
|
1558
|
-
"solcast_api_key", "123456"
|
1559
|
-
)
|
1560
|
-
params["params_secrets"]["solcast_api_key"] = params_secrets.get(
|
1561
|
-
"solcast_api_key", "123456"
|
1562
|
-
)
|
1563
|
-
params["retrieve_hass_conf"]["solcast_rooftop_id"] = params_secrets.get(
|
1564
|
-
"solcast_rooftop_id", "123456"
|
1565
|
-
)
|
1566
|
-
params["params_secrets"]["solcast_rooftop_id"] = params_secrets.get(
|
1567
|
-
"solcast_rooftop_id", "123456"
|
1568
|
-
)
|
1569
|
-
elif params["optim_conf"]["weather_forecast_method"] == "solar.forecast":
|
1570
|
-
params["retrieve_hass_conf"]["solar_forecast_kwp"] = params_secrets.get(
|
1571
|
-
"solar_forecast_kwp", 5
|
1572
|
-
)
|
1573
|
-
params["params_secrets"]["solar_forecast_kwp"] = params_secrets.get(
|
1574
|
-
"solar_forecast_kwp", 5
|
1575
|
-
)
|
1576
|
-
else:
|
1577
|
-
logger.warning("Unable to detect weather_forecast_method parameter")
|
1578
|
-
# Check if secrets parameters still defaults values
|
1579
|
-
secret_params = [
|
1580
|
-
"https://myhass.duckdns.org/",
|
1581
|
-
"thatverylongtokenhere",
|
1582
|
-
45.83,
|
1583
|
-
6.86,
|
1584
|
-
4807.8,
|
1585
|
-
]
|
1586
|
-
if any(x in secret_params for x in params["retrieve_hass_conf"].values()):
|
1587
|
-
logger.warning(
|
1588
|
-
"Some secret parameters values are still matching their defaults"
|
1589
|
-
)
|
1590
|
-
|
1591
|
-
# Set empty dict objects for params passed_data
|
1592
|
-
# To be latter populated with runtime parameters (treat_runtimeparams)
|
1593
|
-
params["passed_data"] = {
|
1594
|
-
"pv_power_forecast": None,
|
1595
|
-
"load_power_forecast": None,
|
1596
|
-
"load_cost_forecast": None,
|
1597
|
-
"prod_price_forecast": None,
|
1598
|
-
"prediction_horizon": None,
|
1599
|
-
"soc_init": None,
|
1600
|
-
"soc_final": None,
|
1601
|
-
"operating_hours_of_each_deferrable_load": None,
|
1602
|
-
"start_timesteps_of_each_deferrable_load": None,
|
1603
|
-
"end_timesteps_of_each_deferrable_load": None,
|
1604
|
-
"alpha": None,
|
1605
|
-
"beta": None,
|
1606
|
-
}
|
1607
|
-
|
1608
|
-
return params
|
1609
|
-
|
1610
|
-
|
1611
|
-
def check_def_loads(
|
1612
|
-
num_def_loads: int, parameter: list[dict], default, parameter_name: str, logger
|
1613
|
-
):
|
1614
|
-
"""
|
1615
|
-
Check parameter lists with deferrable loads number, if they do not match, enlarge to fit.
|
1616
|
-
|
1617
|
-
:param num_def_loads: Total number deferrable loads
|
1618
|
-
:type num_def_loads: int
|
1619
|
-
:param parameter: parameter config dict containing paramater
|
1620
|
-
:type: list[dict]
|
1621
|
-
:param default: default value for parameter to pad missing
|
1622
|
-
:type: obj
|
1623
|
-
:param parameter_name: name of parameter
|
1624
|
-
:type logger: str
|
1625
|
-
:param logger: The logger object
|
1626
|
-
:type logger: logging.Logger
|
1627
|
-
return: parameter list
|
1628
|
-
:rtype: list[dict]
|
1629
|
-
|
1630
|
-
"""
|
1631
|
-
if (
|
1632
|
-
parameter.get(parameter_name, None) is not None
|
1633
|
-
and type(parameter[parameter_name]) is list
|
1634
|
-
and num_def_loads > len(parameter[parameter_name])
|
1635
|
-
):
|
1636
|
-
logger.warning(
|
1637
|
-
parameter_name
|
1638
|
-
+ " does not match number in num_def_loads, adding default values ("
|
1639
|
-
+ str(default)
|
1640
|
-
+ ") to parameter"
|
1641
|
-
)
|
1642
|
-
for x in range(len(parameter[parameter_name]), num_def_loads):
|
1643
|
-
parameter[parameter_name].append(default)
|
1644
|
-
return parameter[parameter_name]
|
1645
|
-
|
1646
|
-
|
1647
|
-
def get_days_list(days_to_retrieve: int) -> pd.date_range:
|
1648
|
-
"""
|
1649
|
-
Get list of past days from today to days_to_retrieve.
|
1650
|
-
|
1651
|
-
:param days_to_retrieve: Total number of days to retrieve from the past
|
1652
|
-
:type days_to_retrieve: int
|
1653
|
-
:return: The list of days
|
1654
|
-
:rtype: pd.date_range
|
1655
|
-
|
1656
|
-
"""
|
1657
|
-
today = datetime.now(timezone.utc).replace(minute=0, second=0, microsecond=0)
|
1658
|
-
d = (today - timedelta(days=days_to_retrieve)).isoformat()
|
1659
|
-
days_list = pd.date_range(start=d, end=today.isoformat(), freq="D")
|
1660
|
-
return days_list
|
1661
|
-
|
1662
|
-
|
1663
|
-
def set_df_index_freq(df: pd.DataFrame) -> pd.DataFrame:
|
1664
|
-
"""
|
1665
|
-
Set the freq of a DataFrame DateTimeIndex.
|
1666
|
-
|
1667
|
-
:param df: Input DataFrame
|
1668
|
-
:type df: pd.DataFrame
|
1669
|
-
:return: Input DataFrame with freq defined
|
1670
|
-
:rtype: pd.DataFrame
|
1671
|
-
|
1672
|
-
"""
|
1673
|
-
idx_diff = np.diff(df.index)
|
1674
|
-
# Sometimes there are zero values in this list.
|
1675
|
-
idx_diff = idx_diff[np.nonzero(idx_diff)]
|
1676
|
-
sampling = pd.to_timedelta(np.median(idx_diff))
|
1677
|
-
df = df[~df.index.duplicated()]
|
1678
|
-
return df.asfreq(sampling)
|