emhass 0.12.4__py3-none-any.whl → 0.12.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {emhass-0.12.4.dist-info → emhass-0.12.5.dist-info}/METADATA +34 -17
- {emhass-0.12.4.dist-info → emhass-0.12.5.dist-info}/RECORD +5 -17
- emhass/__init__.py +0 -0
- emhass/command_line.py +0 -1748
- emhass/data/emhass_inverters.csv +0 -8
- emhass/data/emhass_modules.csv +0 -6
- emhass/forecast.py +0 -1348
- emhass/img/emhass_icon.png +0 -0
- emhass/machine_learning_forecaster.py +0 -397
- emhass/machine_learning_regressor.py +0 -275
- emhass/optimization.py +0 -1504
- emhass/retrieve_hass.py +0 -670
- emhass/utils.py +0 -1678
- emhass/web_server.py +0 -756
- {emhass-0.12.4.dist-info → emhass-0.12.5.dist-info}/WHEEL +0 -0
- {emhass-0.12.4.dist-info → emhass-0.12.5.dist-info}/entry_points.txt +0 -0
- {emhass-0.12.4.dist-info → emhass-0.12.5.dist-info}/licenses/LICENSE +0 -0
emhass/retrieve_hass.py
DELETED
@@ -1,670 +0,0 @@
|
|
1
|
-
#!/usr/bin/env python3
|
2
|
-
# -*- coding: utf-8 -*-
|
3
|
-
|
4
|
-
import copy
|
5
|
-
import datetime
|
6
|
-
import json
|
7
|
-
import logging
|
8
|
-
import os
|
9
|
-
import pathlib
|
10
|
-
from typing import Optional
|
11
|
-
|
12
|
-
import numpy as np
|
13
|
-
import pandas as pd
|
14
|
-
from requests import get, post
|
15
|
-
|
16
|
-
from emhass.utils import set_df_index_freq
|
17
|
-
|
18
|
-
|
19
|
-
class RetrieveHass:
|
20
|
-
r"""
|
21
|
-
Retrieve data from Home Assistant using the restful API.
|
22
|
-
|
23
|
-
This class allows the user to retrieve data from a Home Assistant instance \
|
24
|
-
using the provided restful API (https://developers.home-assistant.io/docs/api/rest/)
|
25
|
-
|
26
|
-
This class methods are:
|
27
|
-
|
28
|
-
- get_data: to retrieve the actual data from hass
|
29
|
-
|
30
|
-
- prepare_data: to apply some data treatment in preparation for the optimization task
|
31
|
-
|
32
|
-
- post_data: Post passed data to hass
|
33
|
-
|
34
|
-
"""
|
35
|
-
|
36
|
-
def __init__(
|
37
|
-
self,
|
38
|
-
hass_url: str,
|
39
|
-
long_lived_token: str,
|
40
|
-
freq: pd.Timedelta,
|
41
|
-
time_zone: datetime.timezone,
|
42
|
-
params: str,
|
43
|
-
emhass_conf: dict,
|
44
|
-
logger: logging.Logger,
|
45
|
-
get_data_from_file: Optional[bool] = False,
|
46
|
-
) -> None:
|
47
|
-
"""
|
48
|
-
Define constructor for RetrieveHass class.
|
49
|
-
|
50
|
-
:param hass_url: The URL of the Home Assistant instance
|
51
|
-
:type hass_url: str
|
52
|
-
:param long_lived_token: The long lived token retrieved from the configuration pane
|
53
|
-
:type long_lived_token: str
|
54
|
-
:param freq: The frequency of the data DateTimeIndexes
|
55
|
-
:type freq: pd.TimeDelta
|
56
|
-
:param time_zone: The time zone
|
57
|
-
:type time_zone: datetime.timezone
|
58
|
-
:param params: Configuration parameters passed from data/options.json
|
59
|
-
:type params: str
|
60
|
-
:param emhass_conf: Dictionary containing the needed emhass paths
|
61
|
-
:type emhass_conf: dict
|
62
|
-
:param logger: The passed logger object
|
63
|
-
:type logger: logging object
|
64
|
-
:param get_data_from_file: Select if data should be retrieved from a
|
65
|
-
previously saved pickle useful for testing or directly from connection to
|
66
|
-
hass database
|
67
|
-
:type get_data_from_file: bool, optional
|
68
|
-
|
69
|
-
"""
|
70
|
-
self.hass_url = hass_url
|
71
|
-
self.long_lived_token = long_lived_token
|
72
|
-
self.freq = freq
|
73
|
-
self.time_zone = time_zone
|
74
|
-
if (params == None) or (params == "null"):
|
75
|
-
self.params = {}
|
76
|
-
elif type(params) is dict:
|
77
|
-
self.params = params
|
78
|
-
else:
|
79
|
-
self.params = json.loads(params)
|
80
|
-
self.emhass_conf = emhass_conf
|
81
|
-
self.logger = logger
|
82
|
-
self.get_data_from_file = get_data_from_file
|
83
|
-
self.var_list = []
|
84
|
-
|
85
|
-
def get_ha_config(self):
|
86
|
-
"""
|
87
|
-
Extract some configuration data from HA.
|
88
|
-
|
89
|
-
"""
|
90
|
-
headers = {
|
91
|
-
"Authorization": "Bearer " + self.long_lived_token,
|
92
|
-
"content-type": "application/json",
|
93
|
-
}
|
94
|
-
if self.hass_url == "http://supervisor/core/api":
|
95
|
-
url = self.hass_url + "/config"
|
96
|
-
else:
|
97
|
-
url = self.hass_url + "api/config"
|
98
|
-
|
99
|
-
try:
|
100
|
-
response_config = get(url, headers=headers)
|
101
|
-
except Exception:
|
102
|
-
self.logger.error("Unable to access Home Assistance instance, check URL")
|
103
|
-
self.logger.error("If using addon, try setting url and token to 'empty'")
|
104
|
-
return False
|
105
|
-
|
106
|
-
try:
|
107
|
-
self.ha_config = response_config.json()
|
108
|
-
except Exception:
|
109
|
-
self.logger.error("EMHASS was unable to obtain configuration data from HA")
|
110
|
-
return False
|
111
|
-
|
112
|
-
def get_data(
|
113
|
-
self,
|
114
|
-
days_list: pd.date_range,
|
115
|
-
var_list: list,
|
116
|
-
minimal_response: Optional[bool] = False,
|
117
|
-
significant_changes_only: Optional[bool] = False,
|
118
|
-
test_url: Optional[str] = "empty",
|
119
|
-
) -> None:
|
120
|
-
r"""
|
121
|
-
Retrieve the actual data from hass.
|
122
|
-
|
123
|
-
:param days_list: A list of days to retrieve. The ISO format should be used \
|
124
|
-
and the timezone is UTC. The frequency of the data_range should be freq='D'
|
125
|
-
:type days_list: pandas.date_range
|
126
|
-
:param var_list: The list of variables to retrive from hass. These should \
|
127
|
-
be the exact name of the sensor in Home Assistant. \
|
128
|
-
For example: ['sensor.home_load', 'sensor.home_pv']
|
129
|
-
:type var_list: list
|
130
|
-
:param minimal_response: Retrieve a minimal response using the hass \
|
131
|
-
restful API, defaults to False
|
132
|
-
:type minimal_response: bool, optional
|
133
|
-
:param significant_changes_only: Retrieve significant changes only \
|
134
|
-
using the hass restful API, defaults to False
|
135
|
-
:type significant_changes_only: bool, optional
|
136
|
-
:return: The DataFrame populated with the retrieved data from hass
|
137
|
-
:rtype: pandas.DataFrame
|
138
|
-
|
139
|
-
.. warning:: The minimal_response and significant_changes_only options \
|
140
|
-
are experimental
|
141
|
-
"""
|
142
|
-
self.logger.info("Retrieve hass get data method initiated...")
|
143
|
-
headers = {
|
144
|
-
"Authorization": "Bearer " + self.long_lived_token,
|
145
|
-
"content-type": "application/json",
|
146
|
-
}
|
147
|
-
# Looping on each day from days list
|
148
|
-
self.df_final = pd.DataFrame()
|
149
|
-
x = 0 # iterate based on days
|
150
|
-
for day in days_list:
|
151
|
-
for i, var in enumerate(var_list):
|
152
|
-
if test_url == "empty":
|
153
|
-
if (
|
154
|
-
self.hass_url == "http://supervisor/core/api"
|
155
|
-
): # If we are using the supervisor API
|
156
|
-
url = (
|
157
|
-
self.hass_url
|
158
|
-
+ "/history/period/"
|
159
|
-
+ day.isoformat()
|
160
|
-
+ "?filter_entity_id="
|
161
|
-
+ var
|
162
|
-
)
|
163
|
-
else: # Otherwise the Home Assistant Core API it is
|
164
|
-
url = (
|
165
|
-
self.hass_url
|
166
|
-
+ "api/history/period/"
|
167
|
-
+ day.isoformat()
|
168
|
-
+ "?filter_entity_id="
|
169
|
-
+ var
|
170
|
-
)
|
171
|
-
if minimal_response: # A support for minimal response
|
172
|
-
url = url + "?minimal_response"
|
173
|
-
if significant_changes_only: # And for signicant changes only (check the HASS restful API for more info)
|
174
|
-
url = url + "?significant_changes_only"
|
175
|
-
else:
|
176
|
-
url = test_url
|
177
|
-
try:
|
178
|
-
response = get(url, headers=headers)
|
179
|
-
except Exception:
|
180
|
-
self.logger.error(
|
181
|
-
"Unable to access Home Assistance instance, check URL"
|
182
|
-
)
|
183
|
-
self.logger.error(
|
184
|
-
"If using addon, try setting url and token to 'empty'"
|
185
|
-
)
|
186
|
-
return False
|
187
|
-
else:
|
188
|
-
if response.status_code == 401:
|
189
|
-
self.logger.error(
|
190
|
-
"Unable to access Home Assistance instance, TOKEN/KEY"
|
191
|
-
)
|
192
|
-
self.logger.error(
|
193
|
-
"If using addon, try setting url and token to 'empty'"
|
194
|
-
)
|
195
|
-
return False
|
196
|
-
if response.status_code > 299:
|
197
|
-
return f"Request Get Error: {response.status_code}"
|
198
|
-
"""import bz2 # Uncomment to save a serialized data for tests
|
199
|
-
import _pickle as cPickle
|
200
|
-
with bz2.BZ2File("data/test_response_get_data_get_method.pbz2", "w") as f:
|
201
|
-
cPickle.dump(response, f)"""
|
202
|
-
try: # Sometimes when there are connection problems we need to catch empty retrieved json
|
203
|
-
data = response.json()[0]
|
204
|
-
except IndexError:
|
205
|
-
if x == 0:
|
206
|
-
self.logger.error(
|
207
|
-
"The retrieved JSON is empty, A sensor:"
|
208
|
-
+ var
|
209
|
-
+ " may have 0 days of history, passed sensor may not be correct, or days to retrieve is set too heigh"
|
210
|
-
)
|
211
|
-
else:
|
212
|
-
self.logger.error(
|
213
|
-
"The retrieved JSON is empty for day:"
|
214
|
-
+ str(day)
|
215
|
-
+ ", days_to_retrieve may be larger than the recorded history of sensor:"
|
216
|
-
+ var
|
217
|
-
+ " (check your recorder settings)"
|
218
|
-
)
|
219
|
-
return False
|
220
|
-
df_raw = pd.DataFrame.from_dict(data)
|
221
|
-
# self.logger.info(str(df_raw))
|
222
|
-
if len(df_raw) == 0:
|
223
|
-
if x == 0:
|
224
|
-
self.logger.error(
|
225
|
-
"The retrieved Dataframe is empty, A sensor:"
|
226
|
-
+ var
|
227
|
-
+ " may have 0 days of history or passed sensor may not be correct"
|
228
|
-
)
|
229
|
-
else:
|
230
|
-
self.logger.error(
|
231
|
-
"Retrieved empty Dataframe for day:"
|
232
|
-
+ str(day)
|
233
|
-
+ ", days_to_retrieve may be larger than the recorded history of sensor:"
|
234
|
-
+ var
|
235
|
-
+ " (check your recorder settings)"
|
236
|
-
)
|
237
|
-
return False
|
238
|
-
# self.logger.info(self.freq.seconds)
|
239
|
-
if (
|
240
|
-
len(df_raw) < ((60 / (self.freq.seconds / 60)) * 24)
|
241
|
-
and x != len(days_list) - 1
|
242
|
-
): # check if there is enough Dataframes for passed frequency per day (not inc current day)
|
243
|
-
self.logger.debug(
|
244
|
-
"sensor:"
|
245
|
-
+ var
|
246
|
-
+ " retrieved Dataframe count: "
|
247
|
-
+ str(len(df_raw))
|
248
|
-
+ ", on day: "
|
249
|
-
+ str(day)
|
250
|
-
+ ". This is less than freq value passed: "
|
251
|
-
+ str(self.freq)
|
252
|
-
)
|
253
|
-
if i == 0: # Defining the DataFrame container
|
254
|
-
from_date = pd.to_datetime(
|
255
|
-
df_raw["last_changed"], format="ISO8601"
|
256
|
-
).min()
|
257
|
-
to_date = pd.to_datetime(
|
258
|
-
df_raw["last_changed"], format="ISO8601"
|
259
|
-
).max()
|
260
|
-
ts = pd.to_datetime(
|
261
|
-
pd.date_range(start=from_date, end=to_date, freq=self.freq),
|
262
|
-
format="%Y-%d-%m %H:%M",
|
263
|
-
).round(self.freq, ambiguous="infer", nonexistent="shift_forward")
|
264
|
-
df_day = pd.DataFrame(index=ts)
|
265
|
-
# Caution with undefined string data: unknown, unavailable, etc.
|
266
|
-
df_tp = (
|
267
|
-
df_raw.copy()[["state"]]
|
268
|
-
.replace(["unknown", "unavailable", ""], np.nan)
|
269
|
-
.astype(float)
|
270
|
-
.rename(columns={"state": var})
|
271
|
-
)
|
272
|
-
# Setting index, resampling and concatenation
|
273
|
-
df_tp.set_index(
|
274
|
-
pd.to_datetime(df_raw["last_changed"], format="ISO8601"),
|
275
|
-
inplace=True,
|
276
|
-
)
|
277
|
-
df_tp = df_tp.resample(self.freq).mean()
|
278
|
-
df_day = pd.concat([df_day, df_tp], axis=1)
|
279
|
-
self.df_final = pd.concat([self.df_final, df_day], axis=0)
|
280
|
-
x += 1
|
281
|
-
self.df_final = set_df_index_freq(self.df_final)
|
282
|
-
if self.df_final.index.freq != self.freq:
|
283
|
-
self.logger.error(
|
284
|
-
"The inferred freq:"
|
285
|
-
+ str(self.df_final.index.freq)
|
286
|
-
+ " from data is not equal to the defined freq in passed:"
|
287
|
-
+ str(self.freq)
|
288
|
-
)
|
289
|
-
return False
|
290
|
-
self.var_list = var_list
|
291
|
-
return True
|
292
|
-
|
293
|
-
def prepare_data(
|
294
|
-
self,
|
295
|
-
var_load: str,
|
296
|
-
load_negative: Optional[bool] = False,
|
297
|
-
set_zero_min: Optional[bool] = True,
|
298
|
-
var_replace_zero: Optional[list] = None,
|
299
|
-
var_interp: Optional[list] = None,
|
300
|
-
) -> None:
|
301
|
-
r"""
|
302
|
-
Apply some data treatment in preparation for the optimization task.
|
303
|
-
|
304
|
-
:param var_load: The name of the variable for the household load consumption.
|
305
|
-
:type var_load: str
|
306
|
-
:param load_negative: Set to True if the retrived load variable is \
|
307
|
-
negative by convention, defaults to False
|
308
|
-
:type load_negative: bool, optional
|
309
|
-
:param set_zero_min: A special treatment for a minimum value saturation \
|
310
|
-
to zero. Values below zero are replaced by nans, defaults to True
|
311
|
-
:type set_zero_min: bool, optional
|
312
|
-
:param var_replace_zero: A list of retrived variables that we would want \
|
313
|
-
to replace nans with zeros, defaults to None
|
314
|
-
:type var_replace_zero: list, optional
|
315
|
-
:param var_interp: A list of retrived variables that we would want to \
|
316
|
-
interpolate nan values using linear interpolation, defaults to None
|
317
|
-
:type var_interp: list, optional
|
318
|
-
:return: The DataFrame populated with the retrieved data from hass and \
|
319
|
-
after the data treatment
|
320
|
-
:rtype: pandas.DataFrame
|
321
|
-
|
322
|
-
"""
|
323
|
-
try:
|
324
|
-
if load_negative: # Apply the correct sign to load power
|
325
|
-
self.df_final[var_load + "_positive"] = -self.df_final[var_load]
|
326
|
-
else:
|
327
|
-
self.df_final[var_load + "_positive"] = self.df_final[var_load]
|
328
|
-
self.df_final.drop([var_load], inplace=True, axis=1)
|
329
|
-
except KeyError:
|
330
|
-
self.logger.error(
|
331
|
-
"Variable "
|
332
|
-
+ var_load
|
333
|
-
+ " was not found. This is typically because no data could be retrieved from Home Assistant"
|
334
|
-
)
|
335
|
-
return False
|
336
|
-
except ValueError:
|
337
|
-
self.logger.error(
|
338
|
-
"sensor.power_photovoltaics and sensor.power_load_no_var_loads should not be the same"
|
339
|
-
)
|
340
|
-
return False
|
341
|
-
# Confirm var_replace_zero & var_interp contain only sensors contained in var_list
|
342
|
-
if isinstance(var_replace_zero, list) and all(
|
343
|
-
item in var_replace_zero for item in self.var_list
|
344
|
-
):
|
345
|
-
pass
|
346
|
-
else:
|
347
|
-
var_replace_zero = []
|
348
|
-
if isinstance(var_interp, list) and all(
|
349
|
-
item in var_interp for item in self.var_list
|
350
|
-
):
|
351
|
-
pass
|
352
|
-
else:
|
353
|
-
var_interp = []
|
354
|
-
# Apply minimum values
|
355
|
-
if set_zero_min:
|
356
|
-
self.df_final.clip(lower=0.0, inplace=True, axis=1)
|
357
|
-
self.df_final.replace(to_replace=0.0, value=np.nan, inplace=True)
|
358
|
-
new_var_replace_zero = []
|
359
|
-
new_var_interp = []
|
360
|
-
# Just changing the names of variables to contain the fact that they are considered positive
|
361
|
-
if var_replace_zero is not None:
|
362
|
-
for string in var_replace_zero:
|
363
|
-
new_string = string.replace(var_load, var_load + "_positive")
|
364
|
-
new_var_replace_zero.append(new_string)
|
365
|
-
else:
|
366
|
-
self.logger.warning(
|
367
|
-
"Unable to find all the sensors in sensor_replace_zero parameter"
|
368
|
-
)
|
369
|
-
self.logger.warning(
|
370
|
-
"Confirm sure all sensors in sensor_replace_zero are sensor_power_photovoltaics and/or ensor_power_load_no_var_loads "
|
371
|
-
)
|
372
|
-
new_var_replace_zero = None
|
373
|
-
if var_interp is not None:
|
374
|
-
for string in var_interp:
|
375
|
-
new_string = string.replace(var_load, var_load + "_positive")
|
376
|
-
new_var_interp.append(new_string)
|
377
|
-
else:
|
378
|
-
new_var_interp = None
|
379
|
-
self.logger.warning(
|
380
|
-
"Unable to find all the sensors in sensor_linear_interp parameter"
|
381
|
-
)
|
382
|
-
self.logger.warning(
|
383
|
-
"Confirm all sensors in sensor_linear_interp are sensor_power_photovoltaics and/or ensor_power_load_no_var_loads "
|
384
|
-
)
|
385
|
-
# Treating NaN replacement: either by zeros or by linear interpolation
|
386
|
-
if new_var_replace_zero is not None:
|
387
|
-
self.df_final[new_var_replace_zero] = self.df_final[
|
388
|
-
new_var_replace_zero
|
389
|
-
].fillna(0.0)
|
390
|
-
if new_var_interp is not None:
|
391
|
-
self.df_final[new_var_interp] = self.df_final[new_var_interp].interpolate(
|
392
|
-
method="linear", axis=0, limit=None
|
393
|
-
)
|
394
|
-
self.df_final[new_var_interp] = self.df_final[new_var_interp].fillna(0.0)
|
395
|
-
# Setting the correct time zone on DF index
|
396
|
-
if self.time_zone is not None:
|
397
|
-
self.df_final.index = self.df_final.index.tz_convert(self.time_zone)
|
398
|
-
# Drop datetimeindex duplicates on final DF
|
399
|
-
self.df_final = self.df_final[~self.df_final.index.duplicated(keep="first")]
|
400
|
-
return True
|
401
|
-
|
402
|
-
@staticmethod
|
403
|
-
def get_attr_data_dict(
|
404
|
-
data_df: pd.DataFrame,
|
405
|
-
idx: int,
|
406
|
-
entity_id: str,
|
407
|
-
unit_of_measurement: str,
|
408
|
-
friendly_name: str,
|
409
|
-
list_name: str,
|
410
|
-
state: float,
|
411
|
-
) -> dict:
|
412
|
-
list_df = copy.deepcopy(data_df).loc[data_df.index[idx] :].reset_index()
|
413
|
-
list_df.columns = ["timestamps", entity_id]
|
414
|
-
ts_list = [str(i) for i in list_df["timestamps"].tolist()]
|
415
|
-
vals_list = [str(np.round(i, 2)) for i in list_df[entity_id].tolist()]
|
416
|
-
forecast_list = []
|
417
|
-
for i, ts in enumerate(ts_list):
|
418
|
-
datum = {}
|
419
|
-
datum["date"] = ts
|
420
|
-
datum[entity_id.split("sensor.")[1]] = vals_list[i]
|
421
|
-
forecast_list.append(datum)
|
422
|
-
data = {
|
423
|
-
"state": "{:.2f}".format(state),
|
424
|
-
"attributes": {
|
425
|
-
"unit_of_measurement": unit_of_measurement,
|
426
|
-
"friendly_name": friendly_name,
|
427
|
-
list_name: forecast_list,
|
428
|
-
},
|
429
|
-
}
|
430
|
-
return data
|
431
|
-
|
432
|
-
def post_data(
|
433
|
-
self,
|
434
|
-
data_df: pd.DataFrame,
|
435
|
-
idx: int,
|
436
|
-
entity_id: str,
|
437
|
-
unit_of_measurement: str,
|
438
|
-
friendly_name: str,
|
439
|
-
type_var: str,
|
440
|
-
from_mlforecaster: Optional[bool] = False,
|
441
|
-
publish_prefix: Optional[str] = "",
|
442
|
-
save_entities: Optional[bool] = False,
|
443
|
-
logger_levels: Optional[str] = "info",
|
444
|
-
dont_post: Optional[bool] = False,
|
445
|
-
) -> None:
|
446
|
-
r"""
|
447
|
-
Post passed data to hass.
|
448
|
-
|
449
|
-
:param data_df: The DataFrame containing the data that will be posted \
|
450
|
-
to hass. This should be a one columns DF or a series.
|
451
|
-
:type data_df: pd.DataFrame
|
452
|
-
:param idx: The int index of the location of the data within the passed \
|
453
|
-
DataFrame. We will post just one value at a time.
|
454
|
-
:type idx: int
|
455
|
-
:param entity_id: The unique entity_id of the sensor in hass.
|
456
|
-
:type entity_id: str
|
457
|
-
:param unit_of_measurement: The units of the sensor.
|
458
|
-
:type unit_of_measurement: str
|
459
|
-
:param friendly_name: The friendly name that will be used in the hass frontend.
|
460
|
-
:type friendly_name: str
|
461
|
-
:param type_var: A variable to indicate the type of variable: power, SOC, etc.
|
462
|
-
:type type_var: str
|
463
|
-
:param publish_prefix: A common prefix for all published data entity_id.
|
464
|
-
:type publish_prefix: str, optional
|
465
|
-
:param save_entities: if entity data should be saved in data_path/entities
|
466
|
-
:type save_entities: bool, optional
|
467
|
-
:param logger_levels: set logger level, info or debug, to output
|
468
|
-
:type logger_levels: str, optional
|
469
|
-
:param dont_post: dont post to HA
|
470
|
-
:type dont_post: bool, optional
|
471
|
-
|
472
|
-
"""
|
473
|
-
# Add a possible prefix to the entity ID
|
474
|
-
entity_id = entity_id.replace("sensor.", "sensor." + publish_prefix)
|
475
|
-
# Set the URL
|
476
|
-
if (
|
477
|
-
self.hass_url == "http://supervisor/core/api"
|
478
|
-
): # If we are using the supervisor API
|
479
|
-
url = self.hass_url + "/states/" + entity_id
|
480
|
-
else: # Otherwise the Home Assistant Core API it is
|
481
|
-
url = self.hass_url + "api/states/" + entity_id
|
482
|
-
headers = {
|
483
|
-
"Authorization": "Bearer " + self.long_lived_token,
|
484
|
-
"content-type": "application/json",
|
485
|
-
}
|
486
|
-
# Preparing the data dict to be published
|
487
|
-
if type_var == "cost_fun":
|
488
|
-
if isinstance(data_df.iloc[0], pd.Series): # if Series extract
|
489
|
-
data_df = data_df.iloc[:, 0]
|
490
|
-
state = np.round(data_df.sum(), 2)
|
491
|
-
elif type_var == "unit_load_cost" or type_var == "unit_prod_price":
|
492
|
-
state = np.round(data_df.loc[data_df.index[idx]], 4)
|
493
|
-
elif type_var == "optim_status":
|
494
|
-
state = data_df.loc[data_df.index[idx]]
|
495
|
-
elif type_var == "mlregressor":
|
496
|
-
state = data_df[idx]
|
497
|
-
else:
|
498
|
-
state = np.round(data_df.loc[data_df.index[idx]], 2)
|
499
|
-
if type_var == "power":
|
500
|
-
data = RetrieveHass.get_attr_data_dict(
|
501
|
-
data_df,
|
502
|
-
idx,
|
503
|
-
entity_id,
|
504
|
-
unit_of_measurement,
|
505
|
-
friendly_name,
|
506
|
-
"forecasts",
|
507
|
-
state,
|
508
|
-
)
|
509
|
-
elif type_var == "deferrable":
|
510
|
-
data = RetrieveHass.get_attr_data_dict(
|
511
|
-
data_df,
|
512
|
-
idx,
|
513
|
-
entity_id,
|
514
|
-
unit_of_measurement,
|
515
|
-
friendly_name,
|
516
|
-
"deferrables_schedule",
|
517
|
-
state,
|
518
|
-
)
|
519
|
-
elif type_var == "temperature":
|
520
|
-
data = RetrieveHass.get_attr_data_dict(
|
521
|
-
data_df,
|
522
|
-
idx,
|
523
|
-
entity_id,
|
524
|
-
unit_of_measurement,
|
525
|
-
friendly_name,
|
526
|
-
"predicted_temperatures",
|
527
|
-
state,
|
528
|
-
)
|
529
|
-
elif type_var == "batt":
|
530
|
-
data = RetrieveHass.get_attr_data_dict(
|
531
|
-
data_df,
|
532
|
-
idx,
|
533
|
-
entity_id,
|
534
|
-
unit_of_measurement,
|
535
|
-
friendly_name,
|
536
|
-
"battery_scheduled_power",
|
537
|
-
state,
|
538
|
-
)
|
539
|
-
elif type_var == "SOC":
|
540
|
-
data = RetrieveHass.get_attr_data_dict(
|
541
|
-
data_df,
|
542
|
-
idx,
|
543
|
-
entity_id,
|
544
|
-
unit_of_measurement,
|
545
|
-
friendly_name,
|
546
|
-
"battery_scheduled_soc",
|
547
|
-
state,
|
548
|
-
)
|
549
|
-
elif type_var == "unit_load_cost":
|
550
|
-
data = RetrieveHass.get_attr_data_dict(
|
551
|
-
data_df,
|
552
|
-
idx,
|
553
|
-
entity_id,
|
554
|
-
unit_of_measurement,
|
555
|
-
friendly_name,
|
556
|
-
"unit_load_cost_forecasts",
|
557
|
-
state,
|
558
|
-
)
|
559
|
-
elif type_var == "unit_prod_price":
|
560
|
-
data = RetrieveHass.get_attr_data_dict(
|
561
|
-
data_df,
|
562
|
-
idx,
|
563
|
-
entity_id,
|
564
|
-
unit_of_measurement,
|
565
|
-
friendly_name,
|
566
|
-
"unit_prod_price_forecasts",
|
567
|
-
state,
|
568
|
-
)
|
569
|
-
elif type_var == "mlforecaster":
|
570
|
-
data = RetrieveHass.get_attr_data_dict(
|
571
|
-
data_df,
|
572
|
-
idx,
|
573
|
-
entity_id,
|
574
|
-
unit_of_measurement,
|
575
|
-
friendly_name,
|
576
|
-
"scheduled_forecast",
|
577
|
-
state,
|
578
|
-
)
|
579
|
-
elif type_var == "optim_status":
|
580
|
-
data = {
|
581
|
-
"state": state,
|
582
|
-
"attributes": {
|
583
|
-
"unit_of_measurement": unit_of_measurement,
|
584
|
-
"friendly_name": friendly_name,
|
585
|
-
},
|
586
|
-
}
|
587
|
-
elif type_var == "mlregressor":
|
588
|
-
data = {
|
589
|
-
"state": state,
|
590
|
-
"attributes": {
|
591
|
-
"unit_of_measurement": unit_of_measurement,
|
592
|
-
"friendly_name": friendly_name,
|
593
|
-
},
|
594
|
-
}
|
595
|
-
else:
|
596
|
-
data = {
|
597
|
-
"state": "{:.2f}".format(state),
|
598
|
-
"attributes": {
|
599
|
-
"unit_of_measurement": unit_of_measurement,
|
600
|
-
"friendly_name": friendly_name,
|
601
|
-
},
|
602
|
-
}
|
603
|
-
# Actually post the data
|
604
|
-
if self.get_data_from_file or dont_post:
|
605
|
-
|
606
|
-
class response:
|
607
|
-
pass
|
608
|
-
|
609
|
-
response.status_code = 200
|
610
|
-
response.ok = True
|
611
|
-
else:
|
612
|
-
response = post(url, headers=headers, data=json.dumps(data))
|
613
|
-
|
614
|
-
# Treating the response status and posting them on the logger
|
615
|
-
if response.ok:
|
616
|
-
if logger_levels == "DEBUG":
|
617
|
-
self.logger.debug(
|
618
|
-
"Successfully posted to " + entity_id + " = " + str(state)
|
619
|
-
)
|
620
|
-
else:
|
621
|
-
self.logger.info(
|
622
|
-
"Successfully posted to " + entity_id + " = " + str(state)
|
623
|
-
)
|
624
|
-
|
625
|
-
# If save entities is set, save entity data to /data_path/entities
|
626
|
-
if save_entities:
|
627
|
-
entities_path = self.emhass_conf["data_path"] / "entities"
|
628
|
-
|
629
|
-
# Clarify folder exists
|
630
|
-
pathlib.Path(entities_path).mkdir(parents=True, exist_ok=True)
|
631
|
-
|
632
|
-
# Save entity data to json file
|
633
|
-
result = data_df.to_json(
|
634
|
-
index="timestamp", orient="index", date_unit="s", date_format="iso"
|
635
|
-
)
|
636
|
-
parsed = json.loads(result)
|
637
|
-
with open(entities_path / (entity_id + ".json"), "w") as file:
|
638
|
-
json.dump(parsed, file, indent=4)
|
639
|
-
|
640
|
-
# Save the required metadata to json file
|
641
|
-
if os.path.isfile(entities_path / "metadata.json"):
|
642
|
-
with open(entities_path / "metadata.json", "r") as file:
|
643
|
-
metadata = json.load(file)
|
644
|
-
else:
|
645
|
-
metadata = {}
|
646
|
-
with open(entities_path / "metadata.json", "w") as file:
|
647
|
-
# Save entity metadata, key = entity_id
|
648
|
-
metadata[entity_id] = {
|
649
|
-
"name": data_df.name,
|
650
|
-
"unit_of_measurement": unit_of_measurement,
|
651
|
-
"friendly_name": friendly_name,
|
652
|
-
"type_var": type_var,
|
653
|
-
"optimization_time_step": int(self.freq.seconds / 60),
|
654
|
-
}
|
655
|
-
|
656
|
-
# Find lowest frequency to set for continual loop freq
|
657
|
-
if metadata.get("lowest_time_step", None) == None or metadata[
|
658
|
-
"lowest_time_step"
|
659
|
-
] > int(self.freq.seconds / 60):
|
660
|
-
metadata["lowest_time_step"] = int(self.freq.seconds / 60)
|
661
|
-
json.dump(metadata, file, indent=4)
|
662
|
-
|
663
|
-
self.logger.debug("Saved " + entity_id + " to json file")
|
664
|
-
|
665
|
-
else:
|
666
|
-
self.logger.warning(
|
667
|
-
"The status code for received curl command response is: "
|
668
|
-
+ str(response.status_code)
|
669
|
-
)
|
670
|
-
return response, data
|