emhass 0.11.1__py3-none-any.whl → 0.11.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
emhass/retrieve_hass.py CHANGED
@@ -1,13 +1,14 @@
1
1
  #!/usr/bin/env python3
2
2
  # -*- coding: utf-8 -*-
3
3
 
4
- import json
5
4
  import copy
6
- import os
7
- import pathlib
8
5
  import datetime
6
+ import json
9
7
  import logging
8
+ import os
9
+ import pathlib
10
10
  from typing import Optional
11
+
11
12
  import numpy as np
12
13
  import pandas as pd
13
14
  from requests import get, post
@@ -32,9 +33,17 @@ class RetrieveHass:
32
33
 
33
34
  """
34
35
 
35
- def __init__(self, hass_url: str, long_lived_token: str, freq: pd.Timedelta,
36
- time_zone: datetime.timezone, params: str, emhass_conf: dict, logger: logging.Logger,
37
- get_data_from_file: Optional[bool] = False) -> None:
36
+ def __init__(
37
+ self,
38
+ hass_url: str,
39
+ long_lived_token: str,
40
+ freq: pd.Timedelta,
41
+ time_zone: datetime.timezone,
42
+ params: str,
43
+ emhass_conf: dict,
44
+ logger: logging.Logger,
45
+ get_data_from_file: Optional[bool] = False,
46
+ ) -> None:
38
47
  """
39
48
  Define constructor for RetrieveHass class.
40
49
 
@@ -72,9 +81,27 @@ class RetrieveHass:
72
81
  self.logger = logger
73
82
  self.get_data_from_file = get_data_from_file
74
83
 
75
- def get_data(self, days_list: pd.date_range, var_list: list,
76
- minimal_response: Optional[bool] = False, significant_changes_only: Optional[bool] = False,
77
- test_url: Optional[str] = "empty") -> None:
84
+ def get_ha_config(self):
85
+ """
86
+ Extract some configuration data from HA.
87
+
88
+ """
89
+ headers = {
90
+ "Authorization": "Bearer " + self.long_lived_token,
91
+ "content-type": "application/json",
92
+ }
93
+ url = self.hass_url + "api/config"
94
+ response_config = get(url, headers=headers)
95
+ self.ha_config = response_config.json()
96
+
97
+ def get_data(
98
+ self,
99
+ days_list: pd.date_range,
100
+ var_list: list,
101
+ minimal_response: Optional[bool] = False,
102
+ significant_changes_only: Optional[bool] = False,
103
+ test_url: Optional[str] = "empty",
104
+ ) -> None:
78
105
  r"""
79
106
  Retrieve the actual data from hass.
80
107
 
@@ -98,9 +125,13 @@ class RetrieveHass:
98
125
  are experimental
99
126
  """
100
127
  self.logger.info("Retrieve hass get data method initiated...")
128
+ headers = {
129
+ "Authorization": "Bearer " + self.long_lived_token,
130
+ "content-type": "application/json",
131
+ }
132
+ # Looping on each day from days list
101
133
  self.df_final = pd.DataFrame()
102
134
  x = 0 # iterate based on days
103
- # Looping on each day from days list
104
135
  for day in days_list:
105
136
  for i, var in enumerate(var_list):
106
137
  if test_url == "empty":
@@ -124,16 +155,10 @@ class RetrieveHass:
124
155
  )
125
156
  if minimal_response: # A support for minimal response
126
157
  url = url + "?minimal_response"
127
- if (
128
- significant_changes_only
129
- ): # And for signicant changes only (check the HASS restful API for more info)
158
+ if significant_changes_only: # And for signicant changes only (check the HASS restful API for more info)
130
159
  url = url + "?significant_changes_only"
131
160
  else:
132
161
  url = test_url
133
- headers = {
134
- "Authorization": "Bearer " + self.long_lived_token,
135
- "content-type": "application/json",
136
- }
137
162
  try:
138
163
  response = get(url, headers=headers)
139
164
  except Exception:
@@ -163,9 +188,19 @@ class RetrieveHass:
163
188
  data = response.json()[0]
164
189
  except IndexError:
165
190
  if x == 0:
166
- self.logger.error("The retrieved JSON is empty, A sensor:" + var + " may have 0 days of history, passed sensor may not be correct, or days to retrieve is set too heigh")
191
+ self.logger.error(
192
+ "The retrieved JSON is empty, A sensor:"
193
+ + var
194
+ + " may have 0 days of history, passed sensor may not be correct, or days to retrieve is set too heigh"
195
+ )
167
196
  else:
168
- self.logger.error("The retrieved JSON is empty for day:"+ str(day) +", days_to_retrieve may be larger than the recorded history of sensor:" + var + " (check your recorder settings)")
197
+ self.logger.error(
198
+ "The retrieved JSON is empty for day:"
199
+ + str(day)
200
+ + ", days_to_retrieve may be larger than the recorded history of sensor:"
201
+ + var
202
+ + " (check your recorder settings)"
203
+ )
169
204
  return False
170
205
  df_raw = pd.DataFrame.from_dict(data)
171
206
  # self.logger.info(str(df_raw))
@@ -177,17 +212,41 @@ class RetrieveHass:
177
212
  + " may have 0 days of history or passed sensor may not be correct"
178
213
  )
179
214
  else:
180
- self.logger.error("Retrieved empty Dataframe for day:"+ str(day) +", days_to_retrieve may be larger than the recorded history of sensor:" + var + " (check your recorder settings)")
215
+ self.logger.error(
216
+ "Retrieved empty Dataframe for day:"
217
+ + str(day)
218
+ + ", days_to_retrieve may be larger than the recorded history of sensor:"
219
+ + var
220
+ + " (check your recorder settings)"
221
+ )
181
222
  return False
182
223
  # self.logger.info(self.freq.seconds)
183
- if len(df_raw) < ((60 / (self.freq.seconds / 60)) * 24) and x != len(days_list) -1: #check if there is enough Dataframes for passed frequency per day (not inc current day)
184
- self.logger.debug("sensor:" + var + " retrieved Dataframe count: " + str(len(df_raw)) + ", on day: " + str(day) + ". This is less than freq value passed: " + str(self.freq))
185
- if i == 0: # Defining the DataFrame container
186
- from_date = pd.to_datetime(df_raw['last_changed'], format="ISO8601").min()
187
- to_date = pd.to_datetime(df_raw['last_changed'], format="ISO8601").max()
188
- ts = pd.to_datetime(pd.date_range(start=from_date, end=to_date, freq=self.freq),
189
- format='%Y-%d-%m %H:%M').round(self.freq, ambiguous='infer', nonexistent='shift_forward')
190
- df_day = pd.DataFrame(index = ts)
224
+ if (
225
+ len(df_raw) < ((60 / (self.freq.seconds / 60)) * 24)
226
+ and x != len(days_list) - 1
227
+ ): # check if there is enough Dataframes for passed frequency per day (not inc current day)
228
+ self.logger.debug(
229
+ "sensor:"
230
+ + var
231
+ + " retrieved Dataframe count: "
232
+ + str(len(df_raw))
233
+ + ", on day: "
234
+ + str(day)
235
+ + ". This is less than freq value passed: "
236
+ + str(self.freq)
237
+ )
238
+ if i == 0: # Defining the DataFrame container
239
+ from_date = pd.to_datetime(
240
+ df_raw["last_changed"], format="ISO8601"
241
+ ).min()
242
+ to_date = pd.to_datetime(
243
+ df_raw["last_changed"], format="ISO8601"
244
+ ).max()
245
+ ts = pd.to_datetime(
246
+ pd.date_range(start=from_date, end=to_date, freq=self.freq),
247
+ format="%Y-%d-%m %H:%M",
248
+ ).round(self.freq, ambiguous="infer", nonexistent="shift_forward")
249
+ df_day = pd.DataFrame(index=ts)
191
250
  # Caution with undefined string data: unknown, unavailable, etc.
192
251
  df_tp = (
193
252
  df_raw.copy()[["state"]]
@@ -203,16 +262,26 @@ class RetrieveHass:
203
262
  df_tp = df_tp.resample(self.freq).mean()
204
263
  df_day = pd.concat([df_day, df_tp], axis=1)
205
264
  self.df_final = pd.concat([self.df_final, df_day], axis=0)
206
- x += 1
265
+ x += 1
207
266
  self.df_final = set_df_index_freq(self.df_final)
208
267
  if self.df_final.index.freq != self.freq:
209
- self.logger.error("The inferred freq:" + str(self.df_final.index.freq) + " from data is not equal to the defined freq in passed:" + str(self.freq))
268
+ self.logger.error(
269
+ "The inferred freq:"
270
+ + str(self.df_final.index.freq)
271
+ + " from data is not equal to the defined freq in passed:"
272
+ + str(self.freq)
273
+ )
210
274
  return False
211
275
  return True
212
-
213
-
214
- def prepare_data(self, var_load: str, load_negative: Optional[bool] = False, set_zero_min: Optional[bool] = True,
215
- var_replace_zero: Optional[list] = None, var_interp: Optional[list] = None) -> None:
276
+
277
+ def prepare_data(
278
+ self,
279
+ var_load: str,
280
+ load_negative: Optional[bool] = False,
281
+ set_zero_min: Optional[bool] = True,
282
+ var_replace_zero: Optional[list] = None,
283
+ var_interp: Optional[list] = None,
284
+ ) -> None:
216
285
  r"""
217
286
  Apply some data treatment in preparation for the optimization task.
218
287
 
@@ -289,8 +358,15 @@ class RetrieveHass:
289
358
  return True
290
359
 
291
360
  @staticmethod
292
- def get_attr_data_dict(data_df: pd.DataFrame, idx: int, entity_id: str, unit_of_measurement: str,
293
- friendly_name: str, list_name: str, state: float) -> dict:
361
+ def get_attr_data_dict(
362
+ data_df: pd.DataFrame,
363
+ idx: int,
364
+ entity_id: str,
365
+ unit_of_measurement: str,
366
+ friendly_name: str,
367
+ list_name: str,
368
+ state: float,
369
+ ) -> dict:
294
370
  list_df = copy.deepcopy(data_df).loc[data_df.index[idx] :].reset_index()
295
371
  list_df.columns = ["timestamps", entity_id]
296
372
  ts_list = [str(i) for i in list_df["timestamps"].tolist()]
@@ -311,11 +387,20 @@ class RetrieveHass:
311
387
  }
312
388
  return data
313
389
 
314
-
315
- def post_data(self, data_df: pd.DataFrame, idx: int, entity_id: str, unit_of_measurement: str,
316
- friendly_name: str, type_var: str, from_mlforecaster: Optional[bool] = False,
317
- publish_prefix: Optional[str] = "", save_entities: Optional[bool] = False,
318
- logger_levels: Optional[str] = "info", dont_post: Optional[bool] = False) -> None:
390
+ def post_data(
391
+ self,
392
+ data_df: pd.DataFrame,
393
+ idx: int,
394
+ entity_id: str,
395
+ unit_of_measurement: str,
396
+ friendly_name: str,
397
+ type_var: str,
398
+ from_mlforecaster: Optional[bool] = False,
399
+ publish_prefix: Optional[str] = "",
400
+ save_entities: Optional[bool] = False,
401
+ logger_levels: Optional[str] = "info",
402
+ dont_post: Optional[bool] = False,
403
+ ) -> None:
319
404
  r"""
320
405
  Post passed data to hass.
321
406
 
@@ -355,10 +440,10 @@ class RetrieveHass:
355
440
  headers = {
356
441
  "Authorization": "Bearer " + self.long_lived_token,
357
442
  "content-type": "application/json",
358
- }
443
+ }
359
444
  # Preparing the data dict to be published
360
445
  if type_var == "cost_fun":
361
- if isinstance(data_df.iloc[0],pd.Series): #if Series extract
446
+ if isinstance(data_df.iloc[0], pd.Series): # if Series extract
362
447
  data_df = data_df.iloc[:, 0]
363
448
  state = np.round(data_df.sum(), 2)
364
449
  elif type_var == "unit_load_cost" or type_var == "unit_prod_price":
@@ -370,29 +455,85 @@ class RetrieveHass:
370
455
  else:
371
456
  state = np.round(data_df.loc[data_df.index[idx]], 2)
372
457
  if type_var == "power":
373
- data = RetrieveHass.get_attr_data_dict(data_df, idx, entity_id, unit_of_measurement,
374
- friendly_name, "forecasts", state)
458
+ data = RetrieveHass.get_attr_data_dict(
459
+ data_df,
460
+ idx,
461
+ entity_id,
462
+ unit_of_measurement,
463
+ friendly_name,
464
+ "forecasts",
465
+ state,
466
+ )
375
467
  elif type_var == "deferrable":
376
- data = RetrieveHass.get_attr_data_dict(data_df, idx, entity_id, unit_of_measurement,
377
- friendly_name, "deferrables_schedule", state)
468
+ data = RetrieveHass.get_attr_data_dict(
469
+ data_df,
470
+ idx,
471
+ entity_id,
472
+ unit_of_measurement,
473
+ friendly_name,
474
+ "deferrables_schedule",
475
+ state,
476
+ )
378
477
  elif type_var == "temperature":
379
- data = RetrieveHass.get_attr_data_dict(data_df, idx, entity_id, unit_of_measurement,
380
- friendly_name, "predicted_temperatures", state)
478
+ data = RetrieveHass.get_attr_data_dict(
479
+ data_df,
480
+ idx,
481
+ entity_id,
482
+ unit_of_measurement,
483
+ friendly_name,
484
+ "predicted_temperatures",
485
+ state,
486
+ )
381
487
  elif type_var == "batt":
382
- data = RetrieveHass.get_attr_data_dict(data_df, idx, entity_id, unit_of_measurement,
383
- friendly_name, "battery_scheduled_power", state)
488
+ data = RetrieveHass.get_attr_data_dict(
489
+ data_df,
490
+ idx,
491
+ entity_id,
492
+ unit_of_measurement,
493
+ friendly_name,
494
+ "battery_scheduled_power",
495
+ state,
496
+ )
384
497
  elif type_var == "SOC":
385
- data = RetrieveHass.get_attr_data_dict(data_df, idx, entity_id, unit_of_measurement,
386
- friendly_name, "battery_scheduled_soc", state)
498
+ data = RetrieveHass.get_attr_data_dict(
499
+ data_df,
500
+ idx,
501
+ entity_id,
502
+ unit_of_measurement,
503
+ friendly_name,
504
+ "battery_scheduled_soc",
505
+ state,
506
+ )
387
507
  elif type_var == "unit_load_cost":
388
- data = RetrieveHass.get_attr_data_dict(data_df, idx, entity_id, unit_of_measurement,
389
- friendly_name, "unit_load_cost_forecasts", state)
508
+ data = RetrieveHass.get_attr_data_dict(
509
+ data_df,
510
+ idx,
511
+ entity_id,
512
+ unit_of_measurement,
513
+ friendly_name,
514
+ "unit_load_cost_forecasts",
515
+ state,
516
+ )
390
517
  elif type_var == "unit_prod_price":
391
- data = RetrieveHass.get_attr_data_dict(data_df, idx, entity_id, unit_of_measurement,
392
- friendly_name, "unit_prod_price_forecasts", state)
518
+ data = RetrieveHass.get_attr_data_dict(
519
+ data_df,
520
+ idx,
521
+ entity_id,
522
+ unit_of_measurement,
523
+ friendly_name,
524
+ "unit_prod_price_forecasts",
525
+ state,
526
+ )
393
527
  elif type_var == "mlforecaster":
394
- data = RetrieveHass.get_attr_data_dict(data_df, idx, entity_id, unit_of_measurement,
395
- friendly_name, "scheduled_forecast", state)
528
+ data = RetrieveHass.get_attr_data_dict(
529
+ data_df,
530
+ idx,
531
+ entity_id,
532
+ unit_of_measurement,
533
+ friendly_name,
534
+ "scheduled_forecast",
535
+ state,
536
+ )
396
537
  elif type_var == "optim_status":
397
538
  data = {
398
539
  "state": state,
@@ -419,8 +560,10 @@ class RetrieveHass:
419
560
  }
420
561
  # Actually post the data
421
562
  if self.get_data_from_file or dont_post:
563
+
422
564
  class response:
423
565
  pass
566
+
424
567
  response.status_code = 200
425
568
  response.ok = True
426
569
  else:
@@ -428,42 +571,55 @@ class RetrieveHass:
428
571
 
429
572
  # Treating the response status and posting them on the logger
430
573
  if response.ok:
431
-
432
574
  if logger_levels == "DEBUG":
433
- self.logger.debug("Successfully posted to " + entity_id + " = " + str(state))
575
+ self.logger.debug(
576
+ "Successfully posted to " + entity_id + " = " + str(state)
577
+ )
434
578
  else:
435
- self.logger.info("Successfully posted to " + entity_id + " = " + str(state))
579
+ self.logger.info(
580
+ "Successfully posted to " + entity_id + " = " + str(state)
581
+ )
436
582
 
437
583
  # If save entities is set, save entity data to /data_path/entities
438
- if (save_entities):
439
- entities_path = self.emhass_conf['data_path'] / "entities"
440
-
584
+ if save_entities:
585
+ entities_path = self.emhass_conf["data_path"] / "entities"
586
+
441
587
  # Clarify folder exists
442
588
  pathlib.Path(entities_path).mkdir(parents=True, exist_ok=True)
443
-
589
+
444
590
  # Save entity data to json file
445
- result = data_df.to_json(index="timestamp", orient='index', date_unit='s', date_format='iso')
591
+ result = data_df.to_json(
592
+ index="timestamp", orient="index", date_unit="s", date_format="iso"
593
+ )
446
594
  parsed = json.loads(result)
447
- with open(entities_path / (entity_id + ".json"), "w") as file:
595
+ with open(entities_path / (entity_id + ".json"), "w") as file:
448
596
  json.dump(parsed, file, indent=4)
449
-
597
+
450
598
  # Save the required metadata to json file
451
599
  if os.path.isfile(entities_path / "metadata.json"):
452
600
  with open(entities_path / "metadata.json", "r") as file:
453
- metadata = json.load(file)
601
+ metadata = json.load(file)
454
602
  else:
455
603
  metadata = {}
456
- with open(entities_path / "metadata.json", "w") as file:
457
- # Save entity metadata, key = entity_id
458
- metadata[entity_id] = {'name': data_df.name, 'unit_of_measurement': unit_of_measurement,'friendly_name': friendly_name,'type_var': type_var, 'optimization_time_step': int(self.freq.seconds / 60)}
459
-
604
+ with open(entities_path / "metadata.json", "w") as file:
605
+ # Save entity metadata, key = entity_id
606
+ metadata[entity_id] = {
607
+ "name": data_df.name,
608
+ "unit_of_measurement": unit_of_measurement,
609
+ "friendly_name": friendly_name,
610
+ "type_var": type_var,
611
+ "optimization_time_step": int(self.freq.seconds / 60),
612
+ }
613
+
460
614
  # Find lowest frequency to set for continual loop freq
461
- if metadata.get("lowest_time_step",None) == None or metadata["lowest_time_step"] > int(self.freq.seconds / 60):
615
+ if metadata.get("lowest_time_step", None) == None or metadata[
616
+ "lowest_time_step"
617
+ ] > int(self.freq.seconds / 60):
462
618
  metadata["lowest_time_step"] = int(self.freq.seconds / 60)
463
- json.dump(metadata,file, indent=4)
619
+ json.dump(metadata, file, indent=4)
620
+
621
+ self.logger.debug("Saved " + entity_id + " to json file")
464
622
 
465
- self.logger.debug("Saved " + entity_id + " to json file")
466
-
467
623
  else:
468
624
  self.logger.warning(
469
625
  "The status code for received curl command response is: "
@@ -407,7 +407,7 @@ function buildParamElement(
407
407
  else {
408
408
  return `
409
409
  ${type_specific_html}
410
- <input class="param_input" type="${type}" value=${value} placeholder=${parameter_definition_object["default_value"]}>
410
+ <input class="param_input" type="${type}" placeholder=${parameter_definition_object["default_value"]} value=${value} >
411
411
  ${type_specific_html_end}
412
412
  `;
413
413
  }
@@ -418,9 +418,9 @@ function buildParamElement(
418
418
  if (typeof Object.values(value)[0] === "object") {
419
419
  for (param of Object.values(value)) {
420
420
  for (items of Object.values(param)) {
421
- inputs += `<input class="param_input" type="${type}" value=${
421
+ inputs += `<input class="param_input" type="${type}" placeholder=${Object.values(items)[0]} value=${
422
422
  Object.values(items)[0]
423
- } placeholder=${Object.values(items)[0]}>`;
423
+ }>`;
424
424
  }
425
425
  inputs += `</br>`;
426
426
  }
@@ -432,7 +432,7 @@ function buildParamElement(
432
432
  for (param of value) {
433
433
  inputs += `
434
434
  ${type_specific_html}
435
- <input class="param_input" type="${type}" value=${param} placeholder=${parameter_definition_object["default_value"]}>
435
+ <input class="param_input" type="${type}" placeholder=${parameter_definition_object["default_value"]} value=${param}>
436
436
  ${type_specific_html_end}
437
437
  `;
438
438
  }
@@ -43,7 +43,7 @@
43
43
  },
44
44
  "logging_level": {
45
45
  "friendly_name": "Logging level",
46
- "Description": "This is the name of the photovoltaic power-produced sensor in Watts from Home Assistant. For example: ‘sensor.power_photovoltaics’.",
46
+ "Description": "DEBUG provides detailed diagnostic information, INFO gives general operational messages, WARNING highlights potential issues, and ERROR indicates critical problems that may disrupt functionality.",
47
47
  "input": "select",
48
48
  "select_options": [
49
49
  "INFO",
@@ -102,6 +102,7 @@
102
102
  "input": "select",
103
103
  "select_options": [
104
104
  "naive",
105
+ "mlforecaster",
105
106
  "csv"
106
107
  ],
107
108
  "default_value": "naive"
@@ -364,13 +365,13 @@
364
365
  "friendly_name": "Add cost weight for battery discharge",
365
366
  "Description": "An additional weight (currency/ kWh) applied in the cost function to battery usage for discharging",
366
367
  "input": "float",
367
- "default_value": 1.0
368
+ "default_value": 0.0
368
369
  },
369
370
  "weight_battery_charge": {
370
371
  "friendly_name": "Add cost weight for battery charge",
371
372
  "Description": "An additional weight (currency/ kWh) applied in the cost function to battery usage for charging",
372
373
  "input": "float",
373
- "default_value": 1.0
374
+ "default_value": 0.0
374
375
  },
375
376
  "battery_discharge_power_max": {
376
377
  "friendly_name": "Max battery discharge power",
@@ -421,4 +422,4 @@
421
422
  "default_value": 0.6
422
423
  }
423
424
  }
424
- }
425
+ }