emhass 0.11.2__py3-none-any.whl → 0.11.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- emhass/command_line.py +702 -373
- emhass/data/associations.csv +1 -1
- emhass/forecast.py +671 -346
- emhass/machine_learning_forecaster.py +204 -105
- emhass/machine_learning_regressor.py +26 -7
- emhass/optimization.py +1017 -471
- emhass/retrieve_hass.py +226 -79
- emhass/static/data/param_definitions.json +5 -4
- emhass/utils.py +689 -455
- emhass/web_server.py +339 -225
- {emhass-0.11.2.dist-info → emhass-0.11.3.dist-info}/METADATA +17 -8
- {emhass-0.11.2.dist-info → emhass-0.11.3.dist-info}/RECORD +16 -16
- {emhass-0.11.2.dist-info → emhass-0.11.3.dist-info}/WHEEL +1 -1
- {emhass-0.11.2.dist-info → emhass-0.11.3.dist-info}/LICENSE +0 -0
- {emhass-0.11.2.dist-info → emhass-0.11.3.dist-info}/entry_points.txt +0 -0
- {emhass-0.11.2.dist-info → emhass-0.11.3.dist-info}/top_level.txt +0 -0
emhass/retrieve_hass.py
CHANGED
@@ -1,13 +1,14 @@
|
|
1
1
|
#!/usr/bin/env python3
|
2
2
|
# -*- coding: utf-8 -*-
|
3
3
|
|
4
|
-
import json
|
5
4
|
import copy
|
6
|
-
import os
|
7
|
-
import pathlib
|
8
5
|
import datetime
|
6
|
+
import json
|
9
7
|
import logging
|
8
|
+
import os
|
9
|
+
import pathlib
|
10
10
|
from typing import Optional
|
11
|
+
|
11
12
|
import numpy as np
|
12
13
|
import pandas as pd
|
13
14
|
from requests import get, post
|
@@ -32,9 +33,17 @@ class RetrieveHass:
|
|
32
33
|
|
33
34
|
"""
|
34
35
|
|
35
|
-
def __init__(
|
36
|
-
|
37
|
-
|
36
|
+
def __init__(
|
37
|
+
self,
|
38
|
+
hass_url: str,
|
39
|
+
long_lived_token: str,
|
40
|
+
freq: pd.Timedelta,
|
41
|
+
time_zone: datetime.timezone,
|
42
|
+
params: str,
|
43
|
+
emhass_conf: dict,
|
44
|
+
logger: logging.Logger,
|
45
|
+
get_data_from_file: Optional[bool] = False,
|
46
|
+
) -> None:
|
38
47
|
"""
|
39
48
|
Define constructor for RetrieveHass class.
|
40
49
|
|
@@ -75,19 +84,24 @@ class RetrieveHass:
|
|
75
84
|
def get_ha_config(self):
|
76
85
|
"""
|
77
86
|
Extract some configuration data from HA.
|
78
|
-
|
87
|
+
|
79
88
|
"""
|
80
89
|
headers = {
|
81
90
|
"Authorization": "Bearer " + self.long_lived_token,
|
82
|
-
"content-type": "application/json"
|
83
|
-
|
84
|
-
url = self.hass_url+"api/config"
|
91
|
+
"content-type": "application/json",
|
92
|
+
}
|
93
|
+
url = self.hass_url + "api/config"
|
85
94
|
response_config = get(url, headers=headers)
|
86
95
|
self.ha_config = response_config.json()
|
87
|
-
|
88
|
-
def get_data(
|
89
|
-
|
90
|
-
|
96
|
+
|
97
|
+
def get_data(
|
98
|
+
self,
|
99
|
+
days_list: pd.date_range,
|
100
|
+
var_list: list,
|
101
|
+
minimal_response: Optional[bool] = False,
|
102
|
+
significant_changes_only: Optional[bool] = False,
|
103
|
+
test_url: Optional[str] = "empty",
|
104
|
+
) -> None:
|
91
105
|
r"""
|
92
106
|
Retrieve the actual data from hass.
|
93
107
|
|
@@ -113,15 +127,17 @@ class RetrieveHass:
|
|
113
127
|
self.logger.info("Retrieve hass get data method initiated...")
|
114
128
|
headers = {
|
115
129
|
"Authorization": "Bearer " + self.long_lived_token,
|
116
|
-
"content-type": "application/json"
|
117
|
-
|
130
|
+
"content-type": "application/json",
|
131
|
+
}
|
118
132
|
# Looping on each day from days list
|
119
133
|
self.df_final = pd.DataFrame()
|
120
134
|
x = 0 # iterate based on days
|
121
135
|
for day in days_list:
|
122
136
|
for i, var in enumerate(var_list):
|
123
137
|
if test_url == "empty":
|
124
|
-
if (
|
138
|
+
if (
|
139
|
+
self.hass_url == "http://supervisor/core/api"
|
140
|
+
): # If we are using the supervisor API
|
125
141
|
url = (
|
126
142
|
self.hass_url
|
127
143
|
+ "/history/period/"
|
@@ -139,7 +155,7 @@ class RetrieveHass:
|
|
139
155
|
)
|
140
156
|
if minimal_response: # A support for minimal response
|
141
157
|
url = url + "?minimal_response"
|
142
|
-
if
|
158
|
+
if significant_changes_only: # And for signicant changes only (check the HASS restful API for more info)
|
143
159
|
url = url + "?significant_changes_only"
|
144
160
|
else:
|
145
161
|
url = test_url
|
@@ -172,9 +188,19 @@ class RetrieveHass:
|
|
172
188
|
data = response.json()[0]
|
173
189
|
except IndexError:
|
174
190
|
if x == 0:
|
175
|
-
self.logger.error(
|
191
|
+
self.logger.error(
|
192
|
+
"The retrieved JSON is empty, A sensor:"
|
193
|
+
+ var
|
194
|
+
+ " may have 0 days of history, passed sensor may not be correct, or days to retrieve is set too heigh"
|
195
|
+
)
|
176
196
|
else:
|
177
|
-
self.logger.error(
|
197
|
+
self.logger.error(
|
198
|
+
"The retrieved JSON is empty for day:"
|
199
|
+
+ str(day)
|
200
|
+
+ ", days_to_retrieve may be larger than the recorded history of sensor:"
|
201
|
+
+ var
|
202
|
+
+ " (check your recorder settings)"
|
203
|
+
)
|
178
204
|
return False
|
179
205
|
df_raw = pd.DataFrame.from_dict(data)
|
180
206
|
# self.logger.info(str(df_raw))
|
@@ -186,17 +212,41 @@ class RetrieveHass:
|
|
186
212
|
+ " may have 0 days of history or passed sensor may not be correct"
|
187
213
|
)
|
188
214
|
else:
|
189
|
-
self.logger.error(
|
215
|
+
self.logger.error(
|
216
|
+
"Retrieved empty Dataframe for day:"
|
217
|
+
+ str(day)
|
218
|
+
+ ", days_to_retrieve may be larger than the recorded history of sensor:"
|
219
|
+
+ var
|
220
|
+
+ " (check your recorder settings)"
|
221
|
+
)
|
190
222
|
return False
|
191
223
|
# self.logger.info(self.freq.seconds)
|
192
|
-
if
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
224
|
+
if (
|
225
|
+
len(df_raw) < ((60 / (self.freq.seconds / 60)) * 24)
|
226
|
+
and x != len(days_list) - 1
|
227
|
+
): # check if there is enough Dataframes for passed frequency per day (not inc current day)
|
228
|
+
self.logger.debug(
|
229
|
+
"sensor:"
|
230
|
+
+ var
|
231
|
+
+ " retrieved Dataframe count: "
|
232
|
+
+ str(len(df_raw))
|
233
|
+
+ ", on day: "
|
234
|
+
+ str(day)
|
235
|
+
+ ". This is less than freq value passed: "
|
236
|
+
+ str(self.freq)
|
237
|
+
)
|
238
|
+
if i == 0: # Defining the DataFrame container
|
239
|
+
from_date = pd.to_datetime(
|
240
|
+
df_raw["last_changed"], format="ISO8601"
|
241
|
+
).min()
|
242
|
+
to_date = pd.to_datetime(
|
243
|
+
df_raw["last_changed"], format="ISO8601"
|
244
|
+
).max()
|
245
|
+
ts = pd.to_datetime(
|
246
|
+
pd.date_range(start=from_date, end=to_date, freq=self.freq),
|
247
|
+
format="%Y-%d-%m %H:%M",
|
248
|
+
).round(self.freq, ambiguous="infer", nonexistent="shift_forward")
|
249
|
+
df_day = pd.DataFrame(index=ts)
|
200
250
|
# Caution with undefined string data: unknown, unavailable, etc.
|
201
251
|
df_tp = (
|
202
252
|
df_raw.copy()[["state"]]
|
@@ -212,16 +262,26 @@ class RetrieveHass:
|
|
212
262
|
df_tp = df_tp.resample(self.freq).mean()
|
213
263
|
df_day = pd.concat([df_day, df_tp], axis=1)
|
214
264
|
self.df_final = pd.concat([self.df_final, df_day], axis=0)
|
215
|
-
x += 1
|
265
|
+
x += 1
|
216
266
|
self.df_final = set_df_index_freq(self.df_final)
|
217
267
|
if self.df_final.index.freq != self.freq:
|
218
|
-
self.logger.error(
|
268
|
+
self.logger.error(
|
269
|
+
"The inferred freq:"
|
270
|
+
+ str(self.df_final.index.freq)
|
271
|
+
+ " from data is not equal to the defined freq in passed:"
|
272
|
+
+ str(self.freq)
|
273
|
+
)
|
219
274
|
return False
|
220
275
|
return True
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
276
|
+
|
277
|
+
def prepare_data(
|
278
|
+
self,
|
279
|
+
var_load: str,
|
280
|
+
load_negative: Optional[bool] = False,
|
281
|
+
set_zero_min: Optional[bool] = True,
|
282
|
+
var_replace_zero: Optional[list] = None,
|
283
|
+
var_interp: Optional[list] = None,
|
284
|
+
) -> None:
|
225
285
|
r"""
|
226
286
|
Apply some data treatment in preparation for the optimization task.
|
227
287
|
|
@@ -298,8 +358,15 @@ class RetrieveHass:
|
|
298
358
|
return True
|
299
359
|
|
300
360
|
@staticmethod
|
301
|
-
def get_attr_data_dict(
|
302
|
-
|
361
|
+
def get_attr_data_dict(
|
362
|
+
data_df: pd.DataFrame,
|
363
|
+
idx: int,
|
364
|
+
entity_id: str,
|
365
|
+
unit_of_measurement: str,
|
366
|
+
friendly_name: str,
|
367
|
+
list_name: str,
|
368
|
+
state: float,
|
369
|
+
) -> dict:
|
303
370
|
list_df = copy.deepcopy(data_df).loc[data_df.index[idx] :].reset_index()
|
304
371
|
list_df.columns = ["timestamps", entity_id]
|
305
372
|
ts_list = [str(i) for i in list_df["timestamps"].tolist()]
|
@@ -320,11 +387,20 @@ class RetrieveHass:
|
|
320
387
|
}
|
321
388
|
return data
|
322
389
|
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
390
|
+
def post_data(
|
391
|
+
self,
|
392
|
+
data_df: pd.DataFrame,
|
393
|
+
idx: int,
|
394
|
+
entity_id: str,
|
395
|
+
unit_of_measurement: str,
|
396
|
+
friendly_name: str,
|
397
|
+
type_var: str,
|
398
|
+
from_mlforecaster: Optional[bool] = False,
|
399
|
+
publish_prefix: Optional[str] = "",
|
400
|
+
save_entities: Optional[bool] = False,
|
401
|
+
logger_levels: Optional[str] = "info",
|
402
|
+
dont_post: Optional[bool] = False,
|
403
|
+
) -> None:
|
328
404
|
r"""
|
329
405
|
Post passed data to hass.
|
330
406
|
|
@@ -364,10 +440,10 @@ class RetrieveHass:
|
|
364
440
|
headers = {
|
365
441
|
"Authorization": "Bearer " + self.long_lived_token,
|
366
442
|
"content-type": "application/json",
|
367
|
-
}
|
443
|
+
}
|
368
444
|
# Preparing the data dict to be published
|
369
445
|
if type_var == "cost_fun":
|
370
|
-
if isinstance(data_df.iloc[0],pd.Series):
|
446
|
+
if isinstance(data_df.iloc[0], pd.Series): # if Series extract
|
371
447
|
data_df = data_df.iloc[:, 0]
|
372
448
|
state = np.round(data_df.sum(), 2)
|
373
449
|
elif type_var == "unit_load_cost" or type_var == "unit_prod_price":
|
@@ -379,29 +455,85 @@ class RetrieveHass:
|
|
379
455
|
else:
|
380
456
|
state = np.round(data_df.loc[data_df.index[idx]], 2)
|
381
457
|
if type_var == "power":
|
382
|
-
data = RetrieveHass.get_attr_data_dict(
|
383
|
-
|
458
|
+
data = RetrieveHass.get_attr_data_dict(
|
459
|
+
data_df,
|
460
|
+
idx,
|
461
|
+
entity_id,
|
462
|
+
unit_of_measurement,
|
463
|
+
friendly_name,
|
464
|
+
"forecasts",
|
465
|
+
state,
|
466
|
+
)
|
384
467
|
elif type_var == "deferrable":
|
385
|
-
data = RetrieveHass.get_attr_data_dict(
|
386
|
-
|
468
|
+
data = RetrieveHass.get_attr_data_dict(
|
469
|
+
data_df,
|
470
|
+
idx,
|
471
|
+
entity_id,
|
472
|
+
unit_of_measurement,
|
473
|
+
friendly_name,
|
474
|
+
"deferrables_schedule",
|
475
|
+
state,
|
476
|
+
)
|
387
477
|
elif type_var == "temperature":
|
388
|
-
data = RetrieveHass.get_attr_data_dict(
|
389
|
-
|
478
|
+
data = RetrieveHass.get_attr_data_dict(
|
479
|
+
data_df,
|
480
|
+
idx,
|
481
|
+
entity_id,
|
482
|
+
unit_of_measurement,
|
483
|
+
friendly_name,
|
484
|
+
"predicted_temperatures",
|
485
|
+
state,
|
486
|
+
)
|
390
487
|
elif type_var == "batt":
|
391
|
-
data = RetrieveHass.get_attr_data_dict(
|
392
|
-
|
488
|
+
data = RetrieveHass.get_attr_data_dict(
|
489
|
+
data_df,
|
490
|
+
idx,
|
491
|
+
entity_id,
|
492
|
+
unit_of_measurement,
|
493
|
+
friendly_name,
|
494
|
+
"battery_scheduled_power",
|
495
|
+
state,
|
496
|
+
)
|
393
497
|
elif type_var == "SOC":
|
394
|
-
data = RetrieveHass.get_attr_data_dict(
|
395
|
-
|
498
|
+
data = RetrieveHass.get_attr_data_dict(
|
499
|
+
data_df,
|
500
|
+
idx,
|
501
|
+
entity_id,
|
502
|
+
unit_of_measurement,
|
503
|
+
friendly_name,
|
504
|
+
"battery_scheduled_soc",
|
505
|
+
state,
|
506
|
+
)
|
396
507
|
elif type_var == "unit_load_cost":
|
397
|
-
data = RetrieveHass.get_attr_data_dict(
|
398
|
-
|
508
|
+
data = RetrieveHass.get_attr_data_dict(
|
509
|
+
data_df,
|
510
|
+
idx,
|
511
|
+
entity_id,
|
512
|
+
unit_of_measurement,
|
513
|
+
friendly_name,
|
514
|
+
"unit_load_cost_forecasts",
|
515
|
+
state,
|
516
|
+
)
|
399
517
|
elif type_var == "unit_prod_price":
|
400
|
-
data = RetrieveHass.get_attr_data_dict(
|
401
|
-
|
518
|
+
data = RetrieveHass.get_attr_data_dict(
|
519
|
+
data_df,
|
520
|
+
idx,
|
521
|
+
entity_id,
|
522
|
+
unit_of_measurement,
|
523
|
+
friendly_name,
|
524
|
+
"unit_prod_price_forecasts",
|
525
|
+
state,
|
526
|
+
)
|
402
527
|
elif type_var == "mlforecaster":
|
403
|
-
data = RetrieveHass.get_attr_data_dict(
|
404
|
-
|
528
|
+
data = RetrieveHass.get_attr_data_dict(
|
529
|
+
data_df,
|
530
|
+
idx,
|
531
|
+
entity_id,
|
532
|
+
unit_of_measurement,
|
533
|
+
friendly_name,
|
534
|
+
"scheduled_forecast",
|
535
|
+
state,
|
536
|
+
)
|
405
537
|
elif type_var == "optim_status":
|
406
538
|
data = {
|
407
539
|
"state": state,
|
@@ -428,8 +560,10 @@ class RetrieveHass:
|
|
428
560
|
}
|
429
561
|
# Actually post the data
|
430
562
|
if self.get_data_from_file or dont_post:
|
563
|
+
|
431
564
|
class response:
|
432
565
|
pass
|
566
|
+
|
433
567
|
response.status_code = 200
|
434
568
|
response.ok = True
|
435
569
|
else:
|
@@ -437,42 +571,55 @@ class RetrieveHass:
|
|
437
571
|
|
438
572
|
# Treating the response status and posting them on the logger
|
439
573
|
if response.ok:
|
440
|
-
|
441
574
|
if logger_levels == "DEBUG":
|
442
|
-
self.logger.debug(
|
575
|
+
self.logger.debug(
|
576
|
+
"Successfully posted to " + entity_id + " = " + str(state)
|
577
|
+
)
|
443
578
|
else:
|
444
|
-
self.logger.info(
|
579
|
+
self.logger.info(
|
580
|
+
"Successfully posted to " + entity_id + " = " + str(state)
|
581
|
+
)
|
445
582
|
|
446
583
|
# If save entities is set, save entity data to /data_path/entities
|
447
|
-
if
|
448
|
-
entities_path = self.emhass_conf[
|
449
|
-
|
584
|
+
if save_entities:
|
585
|
+
entities_path = self.emhass_conf["data_path"] / "entities"
|
586
|
+
|
450
587
|
# Clarify folder exists
|
451
588
|
pathlib.Path(entities_path).mkdir(parents=True, exist_ok=True)
|
452
|
-
|
589
|
+
|
453
590
|
# Save entity data to json file
|
454
|
-
result = data_df.to_json(
|
591
|
+
result = data_df.to_json(
|
592
|
+
index="timestamp", orient="index", date_unit="s", date_format="iso"
|
593
|
+
)
|
455
594
|
parsed = json.loads(result)
|
456
|
-
with open(entities_path / (entity_id + ".json"), "w") as file:
|
595
|
+
with open(entities_path / (entity_id + ".json"), "w") as file:
|
457
596
|
json.dump(parsed, file, indent=4)
|
458
|
-
|
597
|
+
|
459
598
|
# Save the required metadata to json file
|
460
599
|
if os.path.isfile(entities_path / "metadata.json"):
|
461
600
|
with open(entities_path / "metadata.json", "r") as file:
|
462
|
-
metadata = json.load(file)
|
601
|
+
metadata = json.load(file)
|
463
602
|
else:
|
464
603
|
metadata = {}
|
465
|
-
with open(entities_path / "metadata.json", "w") as file:
|
466
|
-
# Save entity metadata, key = entity_id
|
467
|
-
metadata[entity_id] = {
|
468
|
-
|
604
|
+
with open(entities_path / "metadata.json", "w") as file:
|
605
|
+
# Save entity metadata, key = entity_id
|
606
|
+
metadata[entity_id] = {
|
607
|
+
"name": data_df.name,
|
608
|
+
"unit_of_measurement": unit_of_measurement,
|
609
|
+
"friendly_name": friendly_name,
|
610
|
+
"type_var": type_var,
|
611
|
+
"optimization_time_step": int(self.freq.seconds / 60),
|
612
|
+
}
|
613
|
+
|
469
614
|
# Find lowest frequency to set for continual loop freq
|
470
|
-
if metadata.get("lowest_time_step",None) == None or metadata[
|
615
|
+
if metadata.get("lowest_time_step", None) == None or metadata[
|
616
|
+
"lowest_time_step"
|
617
|
+
] > int(self.freq.seconds / 60):
|
471
618
|
metadata["lowest_time_step"] = int(self.freq.seconds / 60)
|
472
|
-
json.dump(metadata,file, indent=4)
|
619
|
+
json.dump(metadata, file, indent=4)
|
620
|
+
|
621
|
+
self.logger.debug("Saved " + entity_id + " to json file")
|
473
622
|
|
474
|
-
self.logger.debug("Saved " + entity_id + " to json file")
|
475
|
-
|
476
623
|
else:
|
477
624
|
self.logger.warning(
|
478
625
|
"The status code for received curl command response is: "
|
@@ -43,7 +43,7 @@
|
|
43
43
|
},
|
44
44
|
"logging_level": {
|
45
45
|
"friendly_name": "Logging level",
|
46
|
-
"Description": "
|
46
|
+
"Description": "DEBUG provides detailed diagnostic information, INFO gives general operational messages, WARNING highlights potential issues, and ERROR indicates critical problems that may disrupt functionality.",
|
47
47
|
"input": "select",
|
48
48
|
"select_options": [
|
49
49
|
"INFO",
|
@@ -102,6 +102,7 @@
|
|
102
102
|
"input": "select",
|
103
103
|
"select_options": [
|
104
104
|
"naive",
|
105
|
+
"mlforecaster",
|
105
106
|
"csv"
|
106
107
|
],
|
107
108
|
"default_value": "naive"
|
@@ -364,13 +365,13 @@
|
|
364
365
|
"friendly_name": "Add cost weight for battery discharge",
|
365
366
|
"Description": "An additional weight (currency/ kWh) applied in the cost function to battery usage for discharging",
|
366
367
|
"input": "float",
|
367
|
-
"default_value":
|
368
|
+
"default_value": 0.0
|
368
369
|
},
|
369
370
|
"weight_battery_charge": {
|
370
371
|
"friendly_name": "Add cost weight for battery charge",
|
371
372
|
"Description": "An additional weight (currency/ kWh) applied in the cost function to battery usage for charging",
|
372
373
|
"input": "float",
|
373
|
-
"default_value":
|
374
|
+
"default_value": 0.0
|
374
375
|
},
|
375
376
|
"battery_discharge_power_max": {
|
376
377
|
"friendly_name": "Max battery discharge power",
|
@@ -421,4 +422,4 @@
|
|
421
422
|
"default_value": 0.6
|
422
423
|
}
|
423
424
|
}
|
424
|
-
}
|
425
|
+
}
|