emhass 0.10.2__tar.gz → 0.10.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. {emhass-0.10.2 → emhass-0.10.4}/CHANGELOG.md +10 -0
  2. {emhass-0.10.2 → emhass-0.10.4}/PKG-INFO +2 -2
  3. {emhass-0.10.2 → emhass-0.10.4}/README.md +1 -1
  4. {emhass-0.10.2 → emhass-0.10.4}/setup.py +1 -1
  5. {emhass-0.10.2 → emhass-0.10.4}/src/emhass/command_line.py +6 -11
  6. {emhass-0.10.2 → emhass-0.10.4}/src/emhass/optimization.py +194 -145
  7. {emhass-0.10.2 → emhass-0.10.4}/src/emhass/utils.py +34 -39
  8. {emhass-0.10.2 → emhass-0.10.4}/src/emhass.egg-info/PKG-INFO +2 -2
  9. {emhass-0.10.2 → emhass-0.10.4}/tests/test_optimization.py +37 -12
  10. {emhass-0.10.2 → emhass-0.10.4}/CODE_OF_CONDUCT.md +0 -0
  11. {emhass-0.10.2 → emhass-0.10.4}/CONTRIBUTING.md +0 -0
  12. {emhass-0.10.2 → emhass-0.10.4}/LICENSE +0 -0
  13. {emhass-0.10.2 → emhass-0.10.4}/MANIFEST.in +0 -0
  14. {emhass-0.10.2 → emhass-0.10.4}/data/data_load_cost_forecast.csv +0 -0
  15. {emhass-0.10.2 → emhass-0.10.4}/data/data_load_forecast.csv +0 -0
  16. {emhass-0.10.2 → emhass-0.10.4}/data/data_prod_price_forecast.csv +0 -0
  17. {emhass-0.10.2 → emhass-0.10.4}/data/data_train_load_clustering.pkl +0 -0
  18. {emhass-0.10.2 → emhass-0.10.4}/data/data_train_load_forecast.pkl +0 -0
  19. {emhass-0.10.2 → emhass-0.10.4}/data/data_weather_forecast.csv +0 -0
  20. {emhass-0.10.2 → emhass-0.10.4}/data/heating_prediction.csv +0 -0
  21. {emhass-0.10.2 → emhass-0.10.4}/data/opt_res_latest.csv +0 -0
  22. {emhass-0.10.2 → emhass-0.10.4}/data/opt_res_perfect_optim_cost.csv +0 -0
  23. {emhass-0.10.2 → emhass-0.10.4}/data/opt_res_perfect_optim_profit.csv +0 -0
  24. {emhass-0.10.2 → emhass-0.10.4}/data/opt_res_perfect_optim_self-consumption.csv +0 -0
  25. {emhass-0.10.2 → emhass-0.10.4}/data/test_df_final.pkl +0 -0
  26. {emhass-0.10.2 → emhass-0.10.4}/data/test_response_get_data_get_method.pbz2 +0 -0
  27. {emhass-0.10.2 → emhass-0.10.4}/data/test_response_scrapper_get_method.pbz2 +0 -0
  28. {emhass-0.10.2 → emhass-0.10.4}/data/test_response_solarforecast_get_method.pbz2 +0 -0
  29. {emhass-0.10.2 → emhass-0.10.4}/data/test_response_solcast_get_method.pbz2 +0 -0
  30. {emhass-0.10.2 → emhass-0.10.4}/pyproject.toml +0 -0
  31. {emhass-0.10.2 → emhass-0.10.4}/setup.cfg +0 -0
  32. {emhass-0.10.2 → emhass-0.10.4}/src/emhass/__init__.py +0 -0
  33. {emhass-0.10.2 → emhass-0.10.4}/src/emhass/data/cec_inverters.pbz2 +0 -0
  34. {emhass-0.10.2 → emhass-0.10.4}/src/emhass/data/cec_modules.pbz2 +0 -0
  35. {emhass-0.10.2 → emhass-0.10.4}/src/emhass/forecast.py +0 -0
  36. {emhass-0.10.2 → emhass-0.10.4}/src/emhass/machine_learning_forecaster.py +0 -0
  37. {emhass-0.10.2 → emhass-0.10.4}/src/emhass/machine_learning_regressor.py +0 -0
  38. {emhass-0.10.2 → emhass-0.10.4}/src/emhass/retrieve_hass.py +0 -0
  39. {emhass-0.10.2 → emhass-0.10.4}/src/emhass/static/advanced.html +0 -0
  40. {emhass-0.10.2 → emhass-0.10.4}/src/emhass/static/basic.html +0 -0
  41. {emhass-0.10.2 → emhass-0.10.4}/src/emhass/static/img/emhass_icon.png +0 -0
  42. {emhass-0.10.2 → emhass-0.10.4}/src/emhass/static/img/emhass_logo_short.svg +0 -0
  43. {emhass-0.10.2 → emhass-0.10.4}/src/emhass/static/img/feather-sprite.svg +0 -0
  44. {emhass-0.10.2 → emhass-0.10.4}/src/emhass/static/script.js +0 -0
  45. {emhass-0.10.2 → emhass-0.10.4}/src/emhass/static/style.css +0 -0
  46. {emhass-0.10.2 → emhass-0.10.4}/src/emhass/templates/index.html +0 -0
  47. {emhass-0.10.2 → emhass-0.10.4}/src/emhass/templates/template.html +0 -0
  48. {emhass-0.10.2 → emhass-0.10.4}/src/emhass/web_server.py +0 -0
  49. {emhass-0.10.2 → emhass-0.10.4}/src/emhass.egg-info/SOURCES.txt +0 -0
  50. {emhass-0.10.2 → emhass-0.10.4}/src/emhass.egg-info/dependency_links.txt +0 -0
  51. {emhass-0.10.2 → emhass-0.10.4}/src/emhass.egg-info/entry_points.txt +0 -0
  52. {emhass-0.10.2 → emhass-0.10.4}/src/emhass.egg-info/requires.txt +0 -0
  53. {emhass-0.10.2 → emhass-0.10.4}/src/emhass.egg-info/top_level.txt +0 -0
  54. {emhass-0.10.2 → emhass-0.10.4}/tests/test_command_line_utils.py +0 -0
  55. {emhass-0.10.2 → emhass-0.10.4}/tests/test_forecast.py +0 -0
  56. {emhass-0.10.2 → emhass-0.10.4}/tests/test_machine_learning_forecaster.py +0 -0
  57. {emhass-0.10.2 → emhass-0.10.4}/tests/test_machine_learning_regressor.py +0 -0
  58. {emhass-0.10.2 → emhass-0.10.4}/tests/test_retrieve_hass.py +0 -0
  59. {emhass-0.10.2 → emhass-0.10.4}/tests/test_utils.py +0 -0
@@ -1,5 +1,15 @@
1
1
  # Changelog
2
2
 
3
+ ## 0.10.4 - 2024-07-10
4
+ ### Improvement
5
+ - Added a new thermal modeling, see the new section in the documentation for help to implement this of model for thermal deferrable loads
6
+ - Improved documentation
7
+
8
+ ## 0.10.3 - 2024-07-06
9
+ ### Improvement
10
+ - Added improved support for `def_start_penalty` option
11
+ - Improved documentation
12
+
3
13
  ## 0.10.2 - 2024-07-06
4
14
  ### Improvement
5
15
  - Weather forecast caching and Solcast method fix by @GeoDerp
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: emhass
3
- Version: 0.10.2
3
+ Version: 0.10.4
4
4
  Summary: An Energy Management System for Home Assistant
5
5
  Home-page: https://github.com/davidusb-geek/emhass
6
6
  Author: David HERNANDEZ
@@ -122,7 +122,7 @@ Installation instructions and example Home Assistant automation configurations a
122
122
 
123
123
  You must follow these steps to make EMHASS work properly:
124
124
 
125
- 1) Define all the parameters in the configuration file according to your installation. See the description for each parameter in the **configuration** section.
125
+ 1) Define all the parameters in the configuration file according to your installation method. For the add-on method you need to use the configuration pane directly on the add-on page. For other installation methods it should be needed to set the variables using the `config_emhass.yaml` file. See below for details on the installation methods. See the description for each parameter in the **configuration** section. If you have a PV installation then this dedicated webapp can be useful to find your inverter and solar panel models: [https://emhass-pvlib-database.streamlit.app/](https://emhass-pvlib-database.streamlit.app/)
126
126
 
127
127
  2) You most notably will need to define the main data entering EMHASS. This will be the `sensor.power_photovoltaics` for the name of the your hass variable containing the PV produced power and the variable `sensor.power_load_no_var_loads` for the load power of your household excluding the power of the deferrable loads that you want to optimize.
128
128
 
@@ -87,7 +87,7 @@ Installation instructions and example Home Assistant automation configurations a
87
87
 
88
88
  You must follow these steps to make EMHASS work properly:
89
89
 
90
- 1) Define all the parameters in the configuration file according to your installation. See the description for each parameter in the **configuration** section.
90
+ 1) Define all the parameters in the configuration file according to your installation method. For the add-on method you need to use the configuration pane directly on the add-on page. For other installation methods it should be needed to set the variables using the `config_emhass.yaml` file. See below for details on the installation methods. See the description for each parameter in the **configuration** section. If you have a PV installation then this dedicated webapp can be useful to find your inverter and solar panel models: [https://emhass-pvlib-database.streamlit.app/](https://emhass-pvlib-database.streamlit.app/)
91
91
 
92
92
  2) You most notably will need to define the main data entering EMHASS. This will be the `sensor.power_photovoltaics` for the name of the your hass variable containing the PV produced power and the variable `sensor.power_load_no_var_loads` for the load power of your household excluding the power of the deferrable loads that you want to optimize.
93
93
 
@@ -19,7 +19,7 @@ long_description = (here / 'README.md').read_text(encoding='utf-8')
19
19
 
20
20
  setup(
21
21
  name='emhass', # Required
22
- version='0.10.2', # Required
22
+ version='0.10.4', # Required
23
23
  description='An Energy Management System for Home Assistant', # Optional
24
24
  long_description=long_description, # Optional
25
25
  long_description_content_type='text/markdown', # Optional (see note above)
@@ -291,7 +291,6 @@ def weather_forecast_cache(emhass_conf: dict, params: str,
291
291
 
292
292
  return True
293
293
 
294
-
295
294
  def perfect_forecast_optim(input_data_dict: dict, logger: logging.Logger,
296
295
  save_data_to_file: Optional[bool] = True,
297
296
  debug: Optional[bool] = False) -> pd.DataFrame:
@@ -334,8 +333,6 @@ def perfect_forecast_optim(input_data_dict: dict, logger: logging.Logger,
334
333
  if not debug:
335
334
  opt_res.to_csv(
336
335
  input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
337
-
338
-
339
336
  if not isinstance(input_data_dict["params"],dict):
340
337
  params = json.loads(input_data_dict["params"])
341
338
  else:
@@ -348,7 +345,6 @@ def perfect_forecast_optim(input_data_dict: dict, logger: logging.Logger,
348
345
 
349
346
  return opt_res
350
347
 
351
-
352
348
  def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger,
353
349
  save_data_to_file: Optional[bool] = False,
354
350
  debug: Optional[bool] = False) -> pd.DataFrame:
@@ -379,6 +375,9 @@ def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger,
379
375
  method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'])
380
376
  if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
381
377
  return False
378
+ if "outdoor_temperature_forecast" in input_data_dict["params"]["passed_data"]:
379
+ df_input_data_dayahead["outdoor_temperature_forecast"] = \
380
+ input_data_dict["params"]["passed_data"]["outdoor_temperature_forecast"]
382
381
  opt_res_dayahead = input_data_dict['opt'].perform_dayahead_forecast_optim(
383
382
  df_input_data_dayahead, input_data_dict['P_PV_forecast'], input_data_dict['P_load_forecast'])
384
383
  # Save CSV file for publish_data
@@ -397,7 +396,6 @@ def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger,
397
396
  params = json.loads(input_data_dict["params"])
398
397
  else:
399
398
  params = input_data_dict["params"]
400
-
401
399
 
402
400
  # if continual_publish, save day_ahead results to data_path/entities json
403
401
  if input_data_dict["retrieve_hass_conf"].get("continual_publish",False) or params["passed_data"].get("entity_save",False):
@@ -406,7 +404,6 @@ def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger,
406
404
 
407
405
  return opt_res_dayahead
408
406
 
409
-
410
407
  def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger,
411
408
  save_data_to_file: Optional[bool] = False,
412
409
  debug: Optional[bool] = False) -> pd.DataFrame:
@@ -436,6 +433,9 @@ def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger,
436
433
  df_input_data_dayahead, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'])
437
434
  if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
438
435
  return False
436
+ if "outdoor_temperature_forecast" in input_data_dict["params"]["passed_data"]:
437
+ df_input_data_dayahead["outdoor_temperature_forecast"] = \
438
+ input_data_dict["params"]["passed_data"]["outdoor_temperature_forecast"]
439
439
  # The specifics params for the MPC at runtime
440
440
  prediction_horizon = input_data_dict["params"]["passed_data"]["prediction_horizon"]
441
441
  soc_init = input_data_dict["params"]["passed_data"]["soc_init"]
@@ -471,7 +471,6 @@ def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger,
471
471
 
472
472
  return opt_res_naive_mpc
473
473
 
474
-
475
474
  def forecast_model_fit(input_data_dict: dict, logger: logging.Logger,
476
475
  debug: Optional[bool] = False) -> Tuple[pd.DataFrame, pd.DataFrame, MLForecaster]:
477
476
  """Perform a forecast model fit from training data retrieved from Home Assistant.
@@ -507,7 +506,6 @@ def forecast_model_fit(input_data_dict: dict, logger: logging.Logger,
507
506
  pickle.dump(mlf, outp, pickle.HIGHEST_PROTOCOL)
508
507
  return df_pred, df_pred_backtest, mlf
509
508
 
510
-
511
509
  def forecast_model_predict(input_data_dict: dict, logger: logging.Logger,
512
510
  use_last_window: Optional[bool] = True,
513
511
  debug: Optional[bool] = False, mlf: Optional[MLForecaster] = None
@@ -585,7 +583,6 @@ def forecast_model_predict(input_data_dict: dict, logger: logging.Logger,
585
583
  type_var="mlforecaster", publish_prefix=publish_prefix)
586
584
  return predictions
587
585
 
588
-
589
586
  def forecast_model_tune(input_data_dict: dict, logger: logging.Logger,
590
587
  debug: Optional[bool] = False, mlf: Optional[MLForecaster] = None
591
588
  ) -> Tuple[pd.DataFrame, MLForecaster]:
@@ -626,7 +623,6 @@ def forecast_model_tune(input_data_dict: dict, logger: logging.Logger,
626
623
  pickle.dump(mlf, outp, pickle.HIGHEST_PROTOCOL)
627
624
  return df_pred_optim, mlf
628
625
 
629
-
630
626
  def regressor_model_fit(input_data_dict: dict, logger: logging.Logger,
631
627
  debug: Optional[bool] = False) -> MLRegressor:
632
628
  """Perform a forecast model fit from training data retrieved from Home Assistant.
@@ -681,7 +677,6 @@ def regressor_model_fit(input_data_dict: dict, logger: logging.Logger,
681
677
  pickle.dump(mlr, outp, pickle.HIGHEST_PROTOCOL)
682
678
  return mlr
683
679
 
684
-
685
680
  def regressor_model_predict(input_data_dict: dict, logger: logging.Logger,
686
681
  debug: Optional[bool] = False, mlr: Optional[MLRegressor] = None
687
682
  ) -> np.ndarray:
@@ -103,9 +103,9 @@ class Optimization:
103
103
  r"""
104
104
  Perform the actual optimization using linear programming (LP).
105
105
 
106
- :param data_tp: A DataFrame containing the input data. The results of the \
106
+ :param data_opt: A DataFrame containing the input data. The results of the \
107
107
  optimization will be appended (decision variables, cost function values, etc)
108
- :type data_tp: pd.DataFrame
108
+ :type data_opt: pd.DataFrame
109
109
  :param P_PV: The photovoltaic power values. This can be real historical \
110
110
  values or forecasted values.
111
111
  :type P_PV: numpy.array
@@ -327,47 +327,6 @@ class Optimization:
327
327
  sense = plp.LpConstraintLE,
328
328
  rhs = 0)
329
329
  for i in set_I})
330
-
331
- # Constraint for sequence of deferrable
332
- # WARNING: This is experimental, formulation seems correct but feasibility problems.
333
- # Probably uncomptabile with other constraints
334
- for k in range(self.optim_conf['num_def_loads']):
335
- if type(self.optim_conf['P_deferrable_nom'][k]) == list:
336
- power_sequence = self.optim_conf['P_deferrable_nom'][k]
337
- sequence_length = len(power_sequence)
338
- def create_matrix(input_list, n):
339
- matrix = []
340
- for i in range(n + 1):
341
- row = [0] * i + input_list + [0] * (n - i)
342
- matrix.append(row[:n*2])
343
- return matrix
344
- matrix = create_matrix(power_sequence, n-sequence_length)
345
- y = plp.LpVariable.dicts(f"y{k}", (i for i in range(len(matrix))), cat='Binary')
346
- constraints.update({f"single_value_constraint_{k}" :
347
- plp.LpConstraint(
348
- e = plp.lpSum(y[i] for i in range(len(matrix))) - 1,
349
- sense = plp.LpConstraintEQ,
350
- rhs = 0)
351
- })
352
- constraints.update({f"pdef{k}_sumconstraint_{i}" :
353
- plp.LpConstraint(
354
- e = plp.lpSum(P_deferrable[k][i] for i in set_I) - np.sum(power_sequence),
355
- sense = plp.LpConstraintEQ,
356
- rhs = 0)
357
- })
358
- constraints.update({f"pdef{k}_positive_constraint_{i}" :
359
- plp.LpConstraint(
360
- e = P_deferrable[k][i],
361
- sense = plp.LpConstraintGE,
362
- rhs = 0)
363
- for i in set_I})
364
- for num, mat in enumerate(matrix):
365
- constraints.update({f"pdef{k}_value_constraint_{num}_{i}" :
366
- plp.LpConstraint(
367
- e = P_deferrable[k][i] - mat[i]*y[num],
368
- sense = plp.LpConstraintEQ,
369
- rhs = 0)
370
- for i in set_I})
371
330
 
372
331
  # Two special constraints just for a self-consumption cost function
373
332
  if self.costfun == 'self-consumption':
@@ -400,128 +359,215 @@ class Optimization:
400
359
  for i in set_I})
401
360
 
402
361
  # Treat deferrable loads constraints
362
+ predicted_temps = {}
403
363
  for k in range(self.optim_conf['num_def_loads']):
364
+
404
365
  if type(self.optim_conf['P_deferrable_nom'][k]) == list:
405
- continue
406
- else:
407
- # Total time of deferrable load
408
- constraints.update({"constraint_defload{}_energy".format(k) :
366
+ # Constraint for sequence of deferrable
367
+ # WARNING: This is experimental, formulation seems correct but feasibility problems.
368
+ # Probably uncomptabile with other constraints
369
+ power_sequence = self.optim_conf['P_deferrable_nom'][k]
370
+ sequence_length = len(power_sequence)
371
+ def create_matrix(input_list, n):
372
+ matrix = []
373
+ for i in range(n + 1):
374
+ row = [0] * i + input_list + [0] * (n - i)
375
+ matrix.append(row[:n*2])
376
+ return matrix
377
+ matrix = create_matrix(power_sequence, n-sequence_length)
378
+ y = plp.LpVariable.dicts(f"y{k}", (i for i in range(len(matrix))), cat='Binary')
379
+ constraints.update({f"single_value_constraint_{k}" :
409
380
  plp.LpConstraint(
410
- e = plp.lpSum(P_deferrable[k][i]*self.timeStep for i in set_I),
381
+ e = plp.lpSum(y[i] for i in range(len(matrix))) - 1,
411
382
  sense = plp.LpConstraintEQ,
412
- rhs = def_total_hours[k]*self.optim_conf['P_deferrable_nom'][k])
383
+ rhs = 0)
413
384
  })
414
- # Ensure deferrable loads consume energy between def_start_timestep & def_end_timestep
415
- self.logger.debug("Deferrable load {}: Proposed optimization window: {} --> {}".format(
416
- k, def_start_timestep[k], def_end_timestep[k]))
417
- def_start, def_end, warning = Optimization.validate_def_timewindow(
418
- def_start_timestep[k], def_end_timestep[k], ceil(def_total_hours[k]/self.timeStep), n)
419
- if warning is not None:
420
- self.logger.warning("Deferrable load {} : {}".format(k, warning))
421
- self.logger.debug("Deferrable load {}: Validated optimization window: {} --> {}".format(
422
- k, def_start, def_end))
423
- if def_start > 0:
424
- constraints.update({"constraint_defload{}_start_timestep".format(k) :
385
+ constraints.update({f"pdef{k}_sumconstraint_{i}" :
386
+ plp.LpConstraint(
387
+ e = plp.lpSum(P_deferrable[k][i] for i in set_I) - np.sum(power_sequence),
388
+ sense = plp.LpConstraintEQ,
389
+ rhs = 0)
390
+ })
391
+ constraints.update({f"pdef{k}_positive_constraint_{i}" :
392
+ plp.LpConstraint(
393
+ e = P_deferrable[k][i],
394
+ sense = plp.LpConstraintGE,
395
+ rhs = 0)
396
+ for i in set_I})
397
+ for num, mat in enumerate(matrix):
398
+ constraints.update({f"pdef{k}_value_constraint_{num}_{i}" :
425
399
  plp.LpConstraint(
426
- e = plp.lpSum(P_deferrable[k][i]*self.timeStep for i in range(0, def_start)),
400
+ e = P_deferrable[k][i] - mat[i]*y[num],
427
401
  sense = plp.LpConstraintEQ,
428
402
  rhs = 0)
429
- })
430
- if def_end > 0:
431
- constraints.update({"constraint_defload{}_end_timestep".format(k) :
403
+ for i in set_I})
404
+
405
+ elif "def_load_config" in self.optim_conf.keys():
406
+ if "thermal_config" in self.optim_conf["def_load_config"][k]:
407
+ # Special case of a thermal deferrable load
408
+ def_load_config = self.optim_conf['def_load_config'][k]
409
+ if def_load_config and 'thermal_config' in def_load_config:
410
+ hc = def_load_config["thermal_config"]
411
+ start_temperature = hc["start_temperature"]
412
+ cooling_constant = hc["cooling_constant"]
413
+ heating_rate = hc["heating_rate"]
414
+ overshoot_temperature = hc["overshoot_temperature"]
415
+ outdoor_temperature_forecast = data_opt['outdoor_temperature_forecast']
416
+ desired_temperatures = hc["desired_temperatures"]
417
+ sense = hc.get('sense', 'heat')
418
+ predicted_temp = [start_temperature]
419
+ for I in set_I:
420
+ if I == 0:
421
+ continue
422
+ predicted_temp.append(
423
+ predicted_temp[I-1]
424
+ + (P_deferrable[k][I-1] * (heating_rate * self.timeStep / self.optim_conf['P_deferrable_nom'][k]))
425
+ - (cooling_constant * (predicted_temp[I-1] - outdoor_temperature_forecast[I-1])))
426
+ if len(desired_temperatures) > I and desired_temperatures[I]:
427
+ constraints.update({"constraint_defload{}_temperature_{}".format(k, I):
428
+ plp.LpConstraint(
429
+ e = predicted_temp[I],
430
+ sense = plp.LpConstraintGE if sense == 'heat' else plp.LpConstraintLE,
431
+ rhs = desired_temperatures[I],
432
+ )
433
+ })
434
+ constraints.update({"constraint_defload{}_overshoot_temp_{}".format(k, I):
435
+ plp.LpConstraint(
436
+ e = predicted_temp[I],
437
+ sense = plp.LpConstraintLE if sense == 'heat' else plp.LpConstraintGE,
438
+ rhs = overshoot_temperature,
439
+ )
440
+ for I in set_I})
441
+ predicted_temps[k] = predicted_temp
442
+
443
+ else:
444
+
445
+ if def_total_hours[k] > 0:
446
+ # Total time of deferrable load
447
+ constraints.update({"constraint_defload{}_energy".format(k) :
432
448
  plp.LpConstraint(
433
- e = plp.lpSum(P_deferrable[k][i]*self.timeStep for i in range(def_end, n)),
449
+ e = plp.lpSum(P_deferrable[k][i]*self.timeStep for i in set_I),
434
450
  sense = plp.LpConstraintEQ,
435
- rhs = 0)
451
+ rhs = def_total_hours[k]*self.optim_conf['P_deferrable_nom'][k])
436
452
  })
437
- # Treat deferrable load as a semi-continuous variable
438
- if self.optim_conf['treat_def_as_semi_cont'][k]:
439
- constraints.update({"constraint_pdef{}_semicont1_{}".format(k, i) :
440
- plp.LpConstraint(
441
- e=P_deferrable[k][i] - self.optim_conf['P_deferrable_nom'][k]*P_def_bin1[k][i],
442
- sense=plp.LpConstraintGE,
443
- rhs=0)
444
- for i in set_I})
445
- constraints.update({"constraint_pdef{}_semicont2_{}".format(k, i) :
446
- plp.LpConstraint(
447
- e=P_deferrable[k][i] - self.optim_conf['P_deferrable_nom'][k]*P_def_bin1[k][i],
448
- sense=plp.LpConstraintLE,
449
- rhs=0)
450
- for i in set_I})
451
- # Treat the number of starts for a deferrable load (old method, kept here just in case)
452
- # if self.optim_conf['set_def_constant'][k]:
453
- # constraints.update({"constraint_pdef{}_start1_{}".format(k, i) :
454
- # plp.LpConstraint(
455
- # e=P_deferrable[k][i] - P_def_bin2[k][i]*M,
456
- # sense=plp.LpConstraintLE,
457
- # rhs=0)
458
- # for i in set_I})
459
- # constraints.update({"constraint_pdef{}_start2_{}".format(k, i):
460
- # plp.LpConstraint(
461
- # e=P_def_start[k][i] - P_def_bin2[k][i] + (P_def_bin2[k][i-1] if i-1 >= 0 else 0),
462
- # sense=plp.LpConstraintGE,
463
- # rhs=0)
464
- # for i in set_I})
465
- # constraints.update({"constraint_pdef{}_start3".format(k) :
466
- # plp.LpConstraint(
467
- # e = plp.lpSum(P_def_start[k][i] for i in set_I),
468
- # sense = plp.LpConstraintEQ,
469
- # rhs = 1)
470
- # })
471
- # Treat the number of starts for a deferrable load (new method considering current state)
472
- current_state = 0
473
- if ("def_current_state" in self.optim_conf and len(self.optim_conf["def_current_state"]) > k):
474
- current_state = 1 if self.optim_conf["def_current_state"][k] else 0
475
- # P_deferrable < P_def_bin2 * 1 million
476
- # P_deferrable must be zero if P_def_bin2 is zero
477
- constraints.update({"constraint_pdef{}_start1_{}".format(k, i):
453
+
454
+ # Ensure deferrable loads consume energy between def_start_timestep & def_end_timestep
455
+ self.logger.debug("Deferrable load {}: Proposed optimization window: {} --> {}".format(
456
+ k, def_start_timestep[k], def_end_timestep[k]))
457
+ def_start, def_end, warning = Optimization.validate_def_timewindow(
458
+ def_start_timestep[k], def_end_timestep[k], ceil(def_total_hours[k]/self.timeStep), n)
459
+ if warning is not None:
460
+ self.logger.warning("Deferrable load {} : {}".format(k, warning))
461
+ self.logger.debug("Deferrable load {}: Validated optimization window: {} --> {}".format(
462
+ k, def_start, def_end))
463
+ if def_start > 0:
464
+ constraints.update({"constraint_defload{}_start_timestep".format(k) :
478
465
  plp.LpConstraint(
479
- e=P_deferrable[k][i] - P_def_bin2[k][i] * M,
480
- sense=plp.LpConstraintLE,
481
- rhs=0)
482
- for i in set_I})
483
- # P_deferrable - P_def_bin2 <= 0
484
- # P_def_bin2 must be zero if P_deferrable is zero
485
- constraints.update({"constraint_pdef{}_start1a_{}".format(k, i):
466
+ e = plp.lpSum(P_deferrable[k][i]*self.timeStep for i in range(0, def_start)),
467
+ sense = plp.LpConstraintEQ,
468
+ rhs = 0)
469
+ })
470
+ if def_end > 0:
471
+ constraints.update({"constraint_defload{}_end_timestep".format(k) :
486
472
  plp.LpConstraint(
487
- e=P_def_bin2[k][i] - P_deferrable[k][i],
488
- sense=plp.LpConstraintLE,
489
- rhs=0)
490
- for i in set_I})
491
- # P_def_start + P_def_bin2[i-1] >= P_def_bin2[i]
492
- # If load is on this cycle (P_def_bin2[i] is 1) then P_def_start must be 1 OR P_def_bin2[i-1] must be 1
493
- # For first timestep, use current state if provided by caller.
494
- constraints.update({"constraint_pdef{}_start2_{}".format(k, i):
473
+ e = plp.lpSum(P_deferrable[k][i]*self.timeStep for i in range(def_end, n)),
474
+ sense = plp.LpConstraintEQ,
475
+ rhs = 0)
476
+ })
477
+
478
+ # Treat the number of starts for a deferrable load (new method considering current state)
479
+ current_state = 0
480
+ if ("def_current_state" in self.optim_conf and len(self.optim_conf["def_current_state"]) > k):
481
+ current_state = 1 if self.optim_conf["def_current_state"][k] else 0
482
+ # P_deferrable < P_def_bin2 * 1 million
483
+ # P_deferrable must be zero if P_def_bin2 is zero
484
+ constraints.update({"constraint_pdef{}_start1_{}".format(k, i):
485
+ plp.LpConstraint(
486
+ e=P_deferrable[k][i] - P_def_bin2[k][i] * M,
487
+ sense=plp.LpConstraintLE,
488
+ rhs=0)
489
+ for i in set_I})
490
+ # P_deferrable - P_def_bin2 <= 0
491
+ # P_def_bin2 must be zero if P_deferrable is zero
492
+ constraints.update({"constraint_pdef{}_start1a_{}".format(k, i):
493
+ plp.LpConstraint(
494
+ e=P_def_bin2[k][i] - P_deferrable[k][i],
495
+ sense=plp.LpConstraintLE,
496
+ rhs=0)
497
+ for i in set_I})
498
+ # P_def_start + P_def_bin2[i-1] >= P_def_bin2[i]
499
+ # If load is on this cycle (P_def_bin2[i] is 1) then P_def_start must be 1 OR P_def_bin2[i-1] must be 1
500
+ # For first timestep, use current state if provided by caller.
501
+ constraints.update({"constraint_pdef{}_start2_{}".format(k, i):
502
+ plp.LpConstraint(
503
+ e=P_def_start[k][i]
504
+ - P_def_bin2[k][i]
505
+ + (P_def_bin2[k][i - 1] if i - 1 >= 0 else current_state),
506
+ sense=plp.LpConstraintGE,
507
+ rhs=0)
508
+ for i in set_I})
509
+ # P_def_bin2[i-1] + P_def_start <= 1
510
+ # If load started this cycle (P_def_start[i] is 1) then P_def_bin2[i-1] must be 0
511
+ constraints.update({"constraint_pdef{}_start3_{}".format(k, i):
512
+ plp.LpConstraint(
513
+ e=(P_def_bin2[k][i-1] if i-1 >= 0 else 0) + P_def_start[k][i],
514
+ sense=plp.LpConstraintLE,
515
+ rhs=1)
516
+ for i in set_I})
517
+
518
+ # Treat deferrable as a fixed value variable with just one startup
519
+ if self.optim_conf['set_def_constant'][k]:
520
+ # P_def_start[i] must be 1 for exactly 1 value of i
521
+ constraints.update({"constraint_pdef{}_start4".format(k) :
522
+ plp.LpConstraint(
523
+ e = plp.lpSum(P_def_start[k][i] for i in set_I),
524
+ sense = plp.LpConstraintEQ,
525
+ rhs = 1)
526
+ })
527
+ # P_def_bin2 must be 1 for exactly the correct number of timesteps.
528
+ constraints.update({"constraint_pdef{}_start5".format(k) :
529
+ plp.LpConstraint(
530
+ e = plp.lpSum(P_def_bin2[k][i] for i in set_I),
531
+ sense = plp.LpConstraintEQ,
532
+ rhs = def_total_hours[k]/self.timeStep)
533
+ })
534
+
535
+ # Treat deferrable load as a semi-continuous variable
536
+ if self.optim_conf['treat_def_as_semi_cont'][k]:
537
+ constraints.update({"constraint_pdef{}_semicont1_{}".format(k, i) :
495
538
  plp.LpConstraint(
496
- e=P_def_start[k][i]
497
- - P_def_bin2[k][i]
498
- + (P_def_bin2[k][i - 1] if i - 1 >= 0 else current_state),
539
+ e=P_deferrable[k][i] - self.optim_conf['P_deferrable_nom'][k]*P_def_bin1[k][i],
499
540
  sense=plp.LpConstraintGE,
500
541
  rhs=0)
501
542
  for i in set_I})
502
- # P_def_bin2[i-1] + P_def_start <= 1
503
- # If load started this cycle (P_def_start[i] is 1) then P_def_bin2[i-1] must be 0
504
- constraints.update({"constraint_pdef{}_start3_{}".format(k, i):
543
+ constraints.update({"constraint_pdef{}_semicont2_{}".format(k, i) :
505
544
  plp.LpConstraint(
506
- e=(P_def_bin2[k][i-1] if i-1 >= 0 else 0) + P_def_start[k][i],
545
+ e=P_deferrable[k][i] - self.optim_conf['P_deferrable_nom'][k]*P_def_bin1[k][i],
507
546
  sense=plp.LpConstraintLE,
508
- rhs=1)
547
+ rhs=0)
509
548
  for i in set_I})
510
- if self.optim_conf['set_def_constant'][k]:
511
- # P_def_start[i] must be 1 for exactly 1 value of i
512
- constraints.update({"constraint_pdef{}_start4".format(k) :
513
- plp.LpConstraint(
514
- e = plp.lpSum(P_def_start[k][i] for i in set_I),
515
- sense = plp.LpConstraintEQ,
516
- rhs = 1)
517
- })
518
- # P_def_bin2 must be 1 for exactly the correct number of timesteps.
519
- constraints.update({"constraint_pdef{}_start5".format(k) :
520
- plp.LpConstraint(
521
- e = plp.lpSum(P_def_bin2[k][i] for i in set_I),
522
- sense = plp.LpConstraintEQ,
523
- rhs = def_total_hours[k]/self.timeStep)
524
- })
549
+
550
+
551
+ # Treat the number of starts for a deferrable load (old method, kept here just in case)
552
+ # if self.optim_conf['set_def_constant'][k]:
553
+ # constraints.update({"constraint_pdef{}_start1_{}".format(k, i) :
554
+ # plp.LpConstraint(
555
+ # e=P_deferrable[k][i] - P_def_bin2[k][i]*M,
556
+ # sense=plp.LpConstraintLE,
557
+ # rhs=0)
558
+ # for i in set_I})
559
+ # constraints.update({"constraint_pdef{}_start2_{}".format(k, i):
560
+ # plp.LpConstraint(
561
+ # e=P_def_start[k][i] - P_def_bin2[k][i] + (P_def_bin2[k][i-1] if i-1 >= 0 else 0),
562
+ # sense=plp.LpConstraintGE,
563
+ # rhs=0)
564
+ # for i in set_I})
565
+ # constraints.update({"constraint_pdef{}_start3".format(k) :
566
+ # plp.LpConstraint(
567
+ # e = plp.lpSum(P_def_start[k][i] for i in set_I),
568
+ # sense = plp.LpConstraintEQ,
569
+ # rhs = 1)
570
+ # })
525
571
 
526
572
  # The battery constraints
527
573
  if self.optim_conf['set_use_battery']:
@@ -685,6 +731,9 @@ class Optimization:
685
731
  for k in range(self.optim_conf["num_def_loads"]):
686
732
  opt_tp[f"P_def_start_{k}"] = [P_def_start[k][i].varValue for i in set_I]
687
733
  opt_tp[f"P_def_bin2_{k}"] = [P_def_bin2[k][i].varValue for i in set_I]
734
+ for i, predicted_temp in predicted_temps.items():
735
+ opt_tp[f"predicted_temp_heater{i}"] = pd.Series([round(pt.value(), 2) if isinstance(pt, plp.LpAffineExpression) else pt for pt in predicted_temp], index=opt_tp.index)
736
+ opt_tp[f"target_temp_heater{i}"] = pd.Series(self.optim_conf["def_load_config"][i]['thermal_config']["desired_temperatures"], index=opt_tp.index)
688
737
 
689
738
  return opt_tp
690
739
 
@@ -285,16 +285,6 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
285
285
  else:
286
286
  def_end_timestep = runtimeparams["def_end_timestep"]
287
287
  params["passed_data"]["def_end_timestep"] = def_end_timestep
288
- if "alpha" not in runtimeparams.keys():
289
- alpha = 0.5
290
- else:
291
- alpha = runtimeparams["alpha"]
292
- params["passed_data"]["alpha"] = alpha
293
- if "beta" not in runtimeparams.keys():
294
- beta = 0.5
295
- else:
296
- beta = runtimeparams["beta"]
297
- params["passed_data"]["beta"] = beta
298
288
  forecast_dates = copy.deepcopy(forecast_dates)[0:prediction_horizon]
299
289
  else:
300
290
  params["passed_data"]["prediction_horizon"] = None
@@ -303,11 +293,9 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
303
293
  params["passed_data"]["def_total_hours"] = None
304
294
  params["passed_data"]["def_start_timestep"] = None
305
295
  params["passed_data"]["def_end_timestep"] = None
306
- params["passed_data"]["alpha"] = None
307
- params["passed_data"]["beta"] = None
308
296
  # Treat passed forecast data lists
309
- list_forecast_key = ['pv_power_forecast', 'load_power_forecast', 'load_cost_forecast', 'prod_price_forecast']
310
- forecast_methods = ['weather_forecast_method', 'load_forecast_method', 'load_cost_forecast_method', 'prod_price_forecast_method']
297
+ list_forecast_key = ['pv_power_forecast', 'load_power_forecast', 'load_cost_forecast', 'prod_price_forecast', 'outdoor_temperature_forecast']
298
+ forecast_methods = ['weather_forecast_method', 'load_forecast_method', 'load_cost_forecast_method', 'prod_price_forecast_method', 'outdoor_temperature_forecast_method']
311
299
  # Param to save forecast cache (i.e. Solcast)
312
300
  if "weather_forecast_cache" not in runtimeparams.keys():
313
301
  weather_forecast_cache = False
@@ -389,19 +377,13 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
389
377
  if "model_predict_unit_of_measurement" not in runtimeparams.keys():
390
378
  model_predict_unit_of_measurement = "W"
391
379
  else:
392
- model_predict_unit_of_measurement = runtimeparams[
393
- "model_predict_unit_of_measurement"
394
- ]
395
- params["passed_data"][
396
- "model_predict_unit_of_measurement"
397
- ] = model_predict_unit_of_measurement
380
+ model_predict_unit_of_measurement = runtimeparams["model_predict_unit_of_measurement"]
381
+ params["passed_data"]["model_predict_unit_of_measurement"] = model_predict_unit_of_measurement
398
382
  if "model_predict_friendly_name" not in runtimeparams.keys():
399
383
  model_predict_friendly_name = "Load Power Forecast custom ML model"
400
384
  else:
401
385
  model_predict_friendly_name = runtimeparams["model_predict_friendly_name"]
402
- params["passed_data"][
403
- "model_predict_friendly_name"
404
- ] = model_predict_friendly_name
386
+ params["passed_data"]["model_predict_friendly_name"] = model_predict_friendly_name
405
387
  if "mlr_predict_entity_id" not in runtimeparams.keys():
406
388
  mlr_predict_entity_id = "sensor.mlr_predict"
407
389
  else:
@@ -410,17 +392,24 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
410
392
  if "mlr_predict_unit_of_measurement" not in runtimeparams.keys():
411
393
  mlr_predict_unit_of_measurement = None
412
394
  else:
413
- mlr_predict_unit_of_measurement = runtimeparams[
414
- "mlr_predict_unit_of_measurement"
415
- ]
416
- params["passed_data"][
417
- "mlr_predict_unit_of_measurement"
418
- ] = mlr_predict_unit_of_measurement
395
+ mlr_predict_unit_of_measurement = runtimeparams["mlr_predict_unit_of_measurement"]
396
+ params["passed_data"]["mlr_predict_unit_of_measurement"] = mlr_predict_unit_of_measurement
419
397
  if "mlr_predict_friendly_name" not in runtimeparams.keys():
420
398
  mlr_predict_friendly_name = "mlr predictor"
421
399
  else:
422
400
  mlr_predict_friendly_name = runtimeparams["mlr_predict_friendly_name"]
423
401
  params["passed_data"]["mlr_predict_friendly_name"] = mlr_predict_friendly_name
402
+ # Treat passed data for other parameters
403
+ if "alpha" not in runtimeparams.keys():
404
+ alpha = 0.5
405
+ else:
406
+ alpha = runtimeparams["alpha"]
407
+ params["passed_data"]["alpha"] = alpha
408
+ if "beta" not in runtimeparams.keys():
409
+ beta = 0.5
410
+ else:
411
+ beta = runtimeparams["beta"]
412
+ params["passed_data"]["beta"] = beta
424
413
  # Treat optimization configuration parameters passed at runtime
425
414
  if "num_def_loads" in runtimeparams.keys():
426
415
  optim_conf["num_def_loads"] = runtimeparams["num_def_loads"]
@@ -447,6 +436,8 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
447
436
  optim_conf["def_start_penalty"] = [
448
437
  ast.literal_eval(str(k).capitalize()) for k in runtimeparams["def_start_penalty"]
449
438
  ]
439
+ if 'def_load_config' in runtimeparams:
440
+ optim_conf["def_load_config"] = runtimeparams['def_load_config']
450
441
  if "solcast_api_key" in runtimeparams.keys():
451
442
  retrieve_hass_conf["solcast_api_key"] = runtimeparams["solcast_api_key"]
452
443
  optim_conf["weather_forecast_method"] = "solcast"
@@ -469,7 +460,7 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
469
460
  if 'freq' in runtimeparams.keys():
470
461
  retrieve_hass_conf['freq'] = pd.to_timedelta(runtimeparams['freq'], "minutes")
471
462
  if 'continual_publish' in runtimeparams.keys():
472
- retrieve_hass_conf['continual_publish'] = bool(runtimeparams['continual_publish'])
463
+ retrieve_hass_conf['continual_publish'] = bool(runtimeparams['continual_publish'])
473
464
  # Treat plant configuration parameters passed at runtime
474
465
  if "SOCmin" in runtimeparams.keys():
475
466
  plant_conf["SOCmin"] = runtimeparams["SOCmin"]
@@ -807,6 +798,10 @@ def build_params(params: dict, params_secrets: dict, options: dict, addon: int,
807
798
  params["optim_conf"]["def_total_hours"] = [i["operating_hours_of_each_deferrable_load"] for i in options.get("list_operating_hours_of_each_deferrable_load")]
808
799
  if options.get("list_treat_deferrable_load_as_semi_cont", None) != None:
809
800
  params["optim_conf"]["treat_def_as_semi_cont"] = [i["treat_deferrable_load_as_semi_cont"] for i in options.get("list_treat_deferrable_load_as_semi_cont")]
801
+ if options.get("list_set_deferrable_load_single_constant", None) != None:
802
+ params["optim_conf"]["set_def_constant"] = [i["set_deferrable_load_single_constant"] for i in options.get("list_set_deferrable_load_single_constant")]
803
+ if options.get("list_set_deferrable_startup_penalty", None) != None:
804
+ params["optim_conf"]["def_start_penalty"] = [i["set_deferrable_startup_penalty"] for i in options.get("list_set_deferrable_startup_penalty")]
810
805
  params["optim_conf"]["weather_forecast_method"] = options.get("weather_forecast_method", params["optim_conf"]["weather_forecast_method"])
811
806
  # Update optional param secrets
812
807
  if params["optim_conf"]["weather_forecast_method"] == "solcast":
@@ -817,12 +812,6 @@ def build_params(params: dict, params_secrets: dict, options: dict, addon: int,
817
812
  params["optim_conf"]["load_forecast_method"] = options.get("load_forecast_method", params["optim_conf"]["load_forecast_method"])
818
813
  params["optim_conf"]["delta_forecast"] = options.get("delta_forecast_daily", params["optim_conf"]["delta_forecast"])
819
814
  params["optim_conf"]["load_cost_forecast_method"] = options.get("load_cost_forecast_method", params["optim_conf"]["load_cost_forecast_method"])
820
- if options.get("list_set_deferrable_load_single_constant", None) != None:
821
- params["optim_conf"]["set_def_constant"] = [i["set_deferrable_load_single_constant"] for i in options.get("list_set_deferrable_load_single_constant")]
822
-
823
- if options.get("list_set_deferrable_startup_penalty", None) != None:
824
- params["optim_conf"]["def_start_penalty"] = [i["set_deferrable_startup_penalty"] for i in options.get("list_set_deferrable_startup_penalty")]
825
-
826
815
  if (options.get("list_peak_hours_periods_start_hours", None) != None and options.get("list_peak_hours_periods_end_hours", None) != None):
827
816
  start_hours_list = [i["peak_hours_periods_start_hours"] for i in options["list_peak_hours_periods_start_hours"]]
828
817
  end_hours_list = [i["peak_hours_periods_end_hours"] for i in options["list_peak_hours_periods_end_hours"]]
@@ -889,7 +878,12 @@ def build_params(params: dict, params_secrets: dict, options: dict, addon: int,
889
878
  if params['optim_conf']['num_def_loads'] is not len(params['optim_conf']['treat_def_as_semi_cont']):
890
879
  logger.warning("treat_def_as_semi_cont / list_treat_deferrable_load_as_semi_cont does not match number in num_def_loads, adding default values to parameter")
891
880
  for x in range(len(params['optim_conf']['treat_def_as_semi_cont']), params['optim_conf']['num_def_loads']):
892
- params['optim_conf']['treat_def_as_semi_cont'].append(True)
881
+ params['optim_conf']['treat_def_as_semi_cont'].append(True)
882
+ if params['optim_conf']['num_def_loads'] is not len(params['optim_conf']['def_start_penalty']):
883
+ logger.warning("def_start_penalty / list_set_deferrable_startup_penalty does not match number in num_def_loads, adding default values to parameter")
884
+ for x in range(len(params['optim_conf']['def_start_penalty']), params['optim_conf']['num_def_loads']):
885
+ params['optim_conf']['def_start_penalty'].append(0.0)
886
+ # days_to_retrieve should be no less then 2
893
887
  if params['optim_conf']['num_def_loads'] is not len(params['optim_conf']['def_total_hours']):
894
888
  logger.warning("def_total_hours / list_operating_hours_of_each_deferrable_load does not match number in num_def_loads, adding default values to parameter")
895
889
  for x in range(len(params['optim_conf']['def_total_hours']), params['optim_conf']['num_def_loads']):
@@ -949,7 +943,8 @@ def set_df_index_freq(df: pd.DataFrame) -> pd.DataFrame:
949
943
 
950
944
  """
951
945
  idx_diff = np.diff(df.index)
946
+ # Sometimes there are zero values in this list.
947
+ idx_diff = idx_diff[np.nonzero(idx_diff)]
952
948
  sampling = pd.to_timedelta(np.median(idx_diff))
953
949
  df = df[~df.index.duplicated()]
954
- df = df.asfreq(sampling)
955
- return df
950
+ return df.asfreq(sampling)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: emhass
3
- Version: 0.10.2
3
+ Version: 0.10.4
4
4
  Summary: An Energy Management System for Home Assistant
5
5
  Home-page: https://github.com/davidusb-geek/emhass
6
6
  Author: David HERNANDEZ
@@ -122,7 +122,7 @@ Installation instructions and example Home Assistant automation configurations a
122
122
 
123
123
  You must follow these steps to make EMHASS work properly:
124
124
 
125
- 1) Define all the parameters in the configuration file according to your installation. See the description for each parameter in the **configuration** section.
125
+ 1) Define all the parameters in the configuration file according to your installation method. For the add-on method you need to use the configuration pane directly on the add-on page. For other installation methods it should be needed to set the variables using the `config_emhass.yaml` file. See below for details on the installation methods. See the description for each parameter in the **configuration** section. If you have a PV installation then this dedicated webapp can be useful to find your inverter and solar panel models: [https://emhass-pvlib-database.streamlit.app/](https://emhass-pvlib-database.streamlit.app/)
126
126
 
127
127
  2) You most notably will need to define the main data entering EMHASS. This will be the `sensor.power_photovoltaics` for the name of the your hass variable containing the PV produced power and the variable `sensor.power_load_no_var_loads` for the load power of your household excluding the power of the deferrable loads that you want to optimize.
128
128
 
@@ -7,6 +7,7 @@ import pandas as pd
7
7
  import numpy as np
8
8
  import pathlib
9
9
  import pickle
10
+ import random
10
11
  from datetime import datetime, timezone
11
12
 
12
13
  from emhass.retrieve_hass import RetrieveHass
@@ -265,20 +266,44 @@ class TestOptimization(unittest.TestCase):
265
266
  self.df_input_data_dayahead, self.P_PV_forecast, self.P_load_forecast, prediction_horizon,
266
267
  soc_init=soc_init, soc_final=soc_final, def_total_hours=def_total_hours, def_start_timestep=def_start_timestep, def_end_timestep=def_end_timestep)
267
268
  self.assertAlmostEqual(self.opt_res_dayahead.loc[self.opt_res_dayahead.index[-1],'SOC_opt'], soc_final)
269
+
270
+ def test_thermal_load_optim(self):
271
+ self.df_input_data_dayahead = self.fcst.get_load_cost_forecast(self.df_input_data_dayahead)
272
+ self.df_input_data_dayahead = self.fcst.get_prod_price_forecast(self.df_input_data_dayahead)
273
+ self.df_input_data_dayahead['outdoor_temperature_forecast'] = [random.normalvariate(10.0, 3.0) for _ in range(48)]
274
+ runtimeparams = {
275
+ 'def_load_config': [
276
+ {},
277
+ {'thermal_config': {
278
+ 'heating_rate': 5.0,
279
+ 'cooling_constant': 0.1,
280
+ 'overshoot_temperature': 24.0,
281
+ 'start_temperature': 20,
282
+ 'desired_temperatures': [21]*48,
283
+ }
284
+ }
285
+ ]
286
+ }
287
+ self.optim_conf["def_load_config"] = runtimeparams['def_load_config']
288
+ self.opt = Optimization(self.retrieve_hass_conf, self.optim_conf, self.plant_conf,
289
+ self.fcst.var_load_cost, self.fcst.var_prod_price,
290
+ self.costfun, emhass_conf, logger)
291
+ unit_load_cost = self.df_input_data_dayahead[self.opt.var_load_cost].values # €/kWh
292
+ unit_prod_price = self.df_input_data_dayahead[self.opt.var_prod_price].values # €/kWh
293
+ self.opt_res_dayahead = self.opt.perform_optimization(self.df_input_data_dayahead,
294
+ self.P_PV_forecast.values.ravel(),
295
+ self.P_load_forecast.values.ravel(),
296
+ unit_load_cost, unit_prod_price)
297
+ self.assertIsInstance(self.opt_res_dayahead, type(pd.DataFrame()))
298
+ self.assertIsInstance(self.opt_res_dayahead.index, pd.core.indexes.datetimes.DatetimeIndex)
299
+ self.assertIsInstance(self.opt_res_dayahead.index.dtype, pd.core.dtypes.dtypes.DatetimeTZDtype)
300
+ self.assertTrue('cost_fun_'+self.costfun in self.opt_res_dayahead.columns)
301
+ self.assertTrue(self.opt.optim_status == 'Optimal')
268
302
 
269
-
270
-
271
303
  def run_penalty_test_forecast(self):
272
- self.opt = Optimization(
273
- self.retrieve_hass_conf,
274
- self.optim_conf,
275
- self.plant_conf,
276
- self.fcst.var_load_cost,
277
- self.fcst.var_prod_price,
278
- self.costfun,
279
- emhass_conf,
280
- logger,
281
- )
304
+ self.opt = Optimization(self.retrieve_hass_conf, self.optim_conf, self.plant_conf,
305
+ self.fcst.var_load_cost, self.fcst.var_prod_price,
306
+ self.costfun, emhass_conf, logger)
282
307
  def_total_hours = [5 * self.retrieve_hass_conf["freq"].seconds / 3600.0]
283
308
  def_start_timestep = [0]
284
309
  def_end_timestep = [0]
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes