emhass 0.10.3__tar.gz → 0.10.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. {emhass-0.10.3 → emhass-0.10.5}/CHANGELOG.md +9 -0
  2. {emhass-0.10.3 → emhass-0.10.5}/PKG-INFO +1 -1
  3. {emhass-0.10.3 → emhass-0.10.5}/setup.py +1 -1
  4. {emhass-0.10.3 → emhass-0.10.5}/src/emhass/command_line.py +38 -23
  5. {emhass-0.10.3 → emhass-0.10.5}/src/emhass/optimization.py +194 -145
  6. {emhass-0.10.3 → emhass-0.10.5}/src/emhass/retrieve_hass.py +3 -0
  7. {emhass-0.10.3 → emhass-0.10.5}/src/emhass/utils.py +37 -32
  8. {emhass-0.10.3 → emhass-0.10.5}/src/emhass.egg-info/PKG-INFO +1 -1
  9. {emhass-0.10.3 → emhass-0.10.5}/tests/test_optimization.py +37 -12
  10. {emhass-0.10.3 → emhass-0.10.5}/CODE_OF_CONDUCT.md +0 -0
  11. {emhass-0.10.3 → emhass-0.10.5}/CONTRIBUTING.md +0 -0
  12. {emhass-0.10.3 → emhass-0.10.5}/LICENSE +0 -0
  13. {emhass-0.10.3 → emhass-0.10.5}/MANIFEST.in +0 -0
  14. {emhass-0.10.3 → emhass-0.10.5}/README.md +0 -0
  15. {emhass-0.10.3 → emhass-0.10.5}/data/data_load_cost_forecast.csv +0 -0
  16. {emhass-0.10.3 → emhass-0.10.5}/data/data_load_forecast.csv +0 -0
  17. {emhass-0.10.3 → emhass-0.10.5}/data/data_prod_price_forecast.csv +0 -0
  18. {emhass-0.10.3 → emhass-0.10.5}/data/data_train_load_clustering.pkl +0 -0
  19. {emhass-0.10.3 → emhass-0.10.5}/data/data_train_load_forecast.pkl +0 -0
  20. {emhass-0.10.3 → emhass-0.10.5}/data/data_weather_forecast.csv +0 -0
  21. {emhass-0.10.3 → emhass-0.10.5}/data/heating_prediction.csv +0 -0
  22. {emhass-0.10.3 → emhass-0.10.5}/data/opt_res_latest.csv +0 -0
  23. {emhass-0.10.3 → emhass-0.10.5}/data/opt_res_perfect_optim_cost.csv +0 -0
  24. {emhass-0.10.3 → emhass-0.10.5}/data/opt_res_perfect_optim_profit.csv +0 -0
  25. {emhass-0.10.3 → emhass-0.10.5}/data/opt_res_perfect_optim_self-consumption.csv +0 -0
  26. {emhass-0.10.3 → emhass-0.10.5}/data/test_df_final.pkl +0 -0
  27. {emhass-0.10.3 → emhass-0.10.5}/data/test_response_get_data_get_method.pbz2 +0 -0
  28. {emhass-0.10.3 → emhass-0.10.5}/data/test_response_scrapper_get_method.pbz2 +0 -0
  29. {emhass-0.10.3 → emhass-0.10.5}/data/test_response_solarforecast_get_method.pbz2 +0 -0
  30. {emhass-0.10.3 → emhass-0.10.5}/data/test_response_solcast_get_method.pbz2 +0 -0
  31. {emhass-0.10.3 → emhass-0.10.5}/pyproject.toml +0 -0
  32. {emhass-0.10.3 → emhass-0.10.5}/setup.cfg +0 -0
  33. {emhass-0.10.3 → emhass-0.10.5}/src/emhass/__init__.py +0 -0
  34. {emhass-0.10.3 → emhass-0.10.5}/src/emhass/data/cec_inverters.pbz2 +0 -0
  35. {emhass-0.10.3 → emhass-0.10.5}/src/emhass/data/cec_modules.pbz2 +0 -0
  36. {emhass-0.10.3 → emhass-0.10.5}/src/emhass/forecast.py +0 -0
  37. {emhass-0.10.3 → emhass-0.10.5}/src/emhass/machine_learning_forecaster.py +0 -0
  38. {emhass-0.10.3 → emhass-0.10.5}/src/emhass/machine_learning_regressor.py +0 -0
  39. {emhass-0.10.3 → emhass-0.10.5}/src/emhass/static/advanced.html +0 -0
  40. {emhass-0.10.3 → emhass-0.10.5}/src/emhass/static/basic.html +0 -0
  41. {emhass-0.10.3 → emhass-0.10.5}/src/emhass/static/img/emhass_icon.png +0 -0
  42. {emhass-0.10.3 → emhass-0.10.5}/src/emhass/static/img/emhass_logo_short.svg +0 -0
  43. {emhass-0.10.3 → emhass-0.10.5}/src/emhass/static/img/feather-sprite.svg +0 -0
  44. {emhass-0.10.3 → emhass-0.10.5}/src/emhass/static/script.js +0 -0
  45. {emhass-0.10.3 → emhass-0.10.5}/src/emhass/static/style.css +0 -0
  46. {emhass-0.10.3 → emhass-0.10.5}/src/emhass/templates/index.html +0 -0
  47. {emhass-0.10.3 → emhass-0.10.5}/src/emhass/templates/template.html +0 -0
  48. {emhass-0.10.3 → emhass-0.10.5}/src/emhass/web_server.py +0 -0
  49. {emhass-0.10.3 → emhass-0.10.5}/src/emhass.egg-info/SOURCES.txt +0 -0
  50. {emhass-0.10.3 → emhass-0.10.5}/src/emhass.egg-info/dependency_links.txt +0 -0
  51. {emhass-0.10.3 → emhass-0.10.5}/src/emhass.egg-info/entry_points.txt +0 -0
  52. {emhass-0.10.3 → emhass-0.10.5}/src/emhass.egg-info/requires.txt +0 -0
  53. {emhass-0.10.3 → emhass-0.10.5}/src/emhass.egg-info/top_level.txt +0 -0
  54. {emhass-0.10.3 → emhass-0.10.5}/tests/test_command_line_utils.py +0 -0
  55. {emhass-0.10.3 → emhass-0.10.5}/tests/test_forecast.py +0 -0
  56. {emhass-0.10.3 → emhass-0.10.5}/tests/test_machine_learning_forecaster.py +0 -0
  57. {emhass-0.10.3 → emhass-0.10.5}/tests/test_machine_learning_regressor.py +0 -0
  58. {emhass-0.10.3 → emhass-0.10.5}/tests/test_retrieve_hass.py +0 -0
  59. {emhass-0.10.3 → emhass-0.10.5}/tests/test_utils.py +0 -0
@@ -1,5 +1,14 @@
1
1
  # Changelog
2
2
 
3
+ ## 0.10.5 - 2024-07-12
4
+ ### Improvement
5
+ - Added support for pubishing thermal load data, namely the predicted room temperature
6
+
7
+ ## 0.10.4 - 2024-07-10
8
+ ### Improvement
9
+ - Added a new thermal modeling, see the new section in the documentation for help to implement this of model for thermal deferrable loads
10
+ - Improved documentation
11
+
3
12
  ## 0.10.3 - 2024-07-06
4
13
  ### Improvement
5
14
  - Added improved support for `def_start_penalty` option
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: emhass
3
- Version: 0.10.3
3
+ Version: 0.10.5
4
4
  Summary: An Energy Management System for Home Assistant
5
5
  Home-page: https://github.com/davidusb-geek/emhass
6
6
  Author: David HERNANDEZ
@@ -19,7 +19,7 @@ long_description = (here / 'README.md').read_text(encoding='utf-8')
19
19
 
20
20
  setup(
21
21
  name='emhass', # Required
22
- version='0.10.3', # Required
22
+ version='0.10.5', # Required
23
23
  description='An Energy Management System for Home Assistant', # Optional
24
24
  long_description=long_description, # Optional
25
25
  long_description_content_type='text/markdown', # Optional (see note above)
@@ -291,7 +291,6 @@ def weather_forecast_cache(emhass_conf: dict, params: str,
291
291
 
292
292
  return True
293
293
 
294
-
295
294
  def perfect_forecast_optim(input_data_dict: dict, logger: logging.Logger,
296
295
  save_data_to_file: Optional[bool] = True,
297
296
  debug: Optional[bool] = False) -> pd.DataFrame:
@@ -334,8 +333,6 @@ def perfect_forecast_optim(input_data_dict: dict, logger: logging.Logger,
334
333
  if not debug:
335
334
  opt_res.to_csv(
336
335
  input_data_dict['emhass_conf']['data_path'] / filename, index_label='timestamp')
337
-
338
-
339
336
  if not isinstance(input_data_dict["params"],dict):
340
337
  params = json.loads(input_data_dict["params"])
341
338
  else:
@@ -348,7 +345,6 @@ def perfect_forecast_optim(input_data_dict: dict, logger: logging.Logger,
348
345
 
349
346
  return opt_res
350
347
 
351
-
352
348
  def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger,
353
349
  save_data_to_file: Optional[bool] = False,
354
350
  debug: Optional[bool] = False) -> pd.DataFrame:
@@ -379,6 +375,9 @@ def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger,
379
375
  method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'])
380
376
  if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
381
377
  return False
378
+ if "outdoor_temperature_forecast" in input_data_dict["params"]["passed_data"]:
379
+ df_input_data_dayahead["outdoor_temperature_forecast"] = \
380
+ input_data_dict["params"]["passed_data"]["outdoor_temperature_forecast"]
382
381
  opt_res_dayahead = input_data_dict['opt'].perform_dayahead_forecast_optim(
383
382
  df_input_data_dayahead, input_data_dict['P_PV_forecast'], input_data_dict['P_load_forecast'])
384
383
  # Save CSV file for publish_data
@@ -397,7 +396,6 @@ def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger,
397
396
  params = json.loads(input_data_dict["params"])
398
397
  else:
399
398
  params = input_data_dict["params"]
400
-
401
399
 
402
400
  # if continual_publish, save day_ahead results to data_path/entities json
403
401
  if input_data_dict["retrieve_hass_conf"].get("continual_publish",False) or params["passed_data"].get("entity_save",False):
@@ -406,7 +404,6 @@ def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger,
406
404
 
407
405
  return opt_res_dayahead
408
406
 
409
-
410
407
  def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger,
411
408
  save_data_to_file: Optional[bool] = False,
412
409
  debug: Optional[bool] = False) -> pd.DataFrame:
@@ -436,6 +433,9 @@ def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger,
436
433
  df_input_data_dayahead, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'])
437
434
  if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
438
435
  return False
436
+ if "outdoor_temperature_forecast" in input_data_dict["params"]["passed_data"]:
437
+ df_input_data_dayahead["outdoor_temperature_forecast"] = \
438
+ input_data_dict["params"]["passed_data"]["outdoor_temperature_forecast"]
439
439
  # The specifics params for the MPC at runtime
440
440
  prediction_horizon = input_data_dict["params"]["passed_data"]["prediction_horizon"]
441
441
  soc_init = input_data_dict["params"]["passed_data"]["soc_init"]
@@ -471,7 +471,6 @@ def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger,
471
471
 
472
472
  return opt_res_naive_mpc
473
473
 
474
-
475
474
  def forecast_model_fit(input_data_dict: dict, logger: logging.Logger,
476
475
  debug: Optional[bool] = False) -> Tuple[pd.DataFrame, pd.DataFrame, MLForecaster]:
477
476
  """Perform a forecast model fit from training data retrieved from Home Assistant.
@@ -507,7 +506,6 @@ def forecast_model_fit(input_data_dict: dict, logger: logging.Logger,
507
506
  pickle.dump(mlf, outp, pickle.HIGHEST_PROTOCOL)
508
507
  return df_pred, df_pred_backtest, mlf
509
508
 
510
-
511
509
  def forecast_model_predict(input_data_dict: dict, logger: logging.Logger,
512
510
  use_last_window: Optional[bool] = True,
513
511
  debug: Optional[bool] = False, mlf: Optional[MLForecaster] = None
@@ -585,7 +583,6 @@ def forecast_model_predict(input_data_dict: dict, logger: logging.Logger,
585
583
  type_var="mlforecaster", publish_prefix=publish_prefix)
586
584
  return predictions
587
585
 
588
-
589
586
  def forecast_model_tune(input_data_dict: dict, logger: logging.Logger,
590
587
  debug: Optional[bool] = False, mlf: Optional[MLForecaster] = None
591
588
  ) -> Tuple[pd.DataFrame, MLForecaster]:
@@ -626,7 +623,6 @@ def forecast_model_tune(input_data_dict: dict, logger: logging.Logger,
626
623
  pickle.dump(mlf, outp, pickle.HIGHEST_PROTOCOL)
627
624
  return df_pred_optim, mlf
628
625
 
629
-
630
626
  def regressor_model_fit(input_data_dict: dict, logger: logging.Logger,
631
627
  debug: Optional[bool] = False) -> MLRegressor:
632
628
  """Perform a forecast model fit from training data retrieved from Home Assistant.
@@ -681,7 +677,6 @@ def regressor_model_fit(input_data_dict: dict, logger: logging.Logger,
681
677
  pickle.dump(mlr, outp, pickle.HIGHEST_PROTOCOL)
682
678
  return mlr
683
679
 
684
-
685
680
  def regressor_model_predict(input_data_dict: dict, logger: logging.Logger,
686
681
  debug: Optional[bool] = False, mlr: Optional[MLRegressor] = None
687
682
  ) -> np.ndarray:
@@ -901,6 +896,25 @@ def publish_data(input_data_dict: dict, logger: logging.Logger,
901
896
  dont_post=dont_post
902
897
  )
903
898
  cols_published = cols_published + ["P_deferrable{}".format(k)]
899
+ # Publish thermal model data (predicted temperature)
900
+ custom_predicted_temperature_id = params["passed_data"][
901
+ "custom_predicted_temperature_id"
902
+ ]
903
+ for k in range(input_data_dict["opt"].optim_conf["num_def_loads"]):
904
+ if "def_load_config" in input_data_dict["opt"].optim_conf.keys():
905
+ if "thermal_config" in input_data_dict["opt"].optim_conf["def_load_config"][k]:
906
+ input_data_dict["rh"].post_data(
907
+ opt_res_latest["P_deferrable{}".format(k)],
908
+ idx_closest,
909
+ custom_predicted_temperature_id[k]["entity_id"],
910
+ custom_predicted_temperature_id[k]["unit_of_measurement"],
911
+ custom_predicted_temperature_id[k]["friendly_name"],
912
+ type_var="temperature",
913
+ publish_prefix=publish_prefix,
914
+ save_entities=entity_save,
915
+ dont_post=dont_post
916
+ )
917
+ cols_published = cols_published + ["predicted_temp_heater{}".format(k)]
904
918
  # Publish battery power
905
919
  if input_data_dict["opt"].optim_conf["set_use_battery"]:
906
920
  if "P_batt" not in opt_res_latest.columns:
@@ -972,18 +986,19 @@ def publish_data(input_data_dict: dict, logger: logging.Logger,
972
986
  logger.warning(
973
987
  "no optim_status in opt_res_latest, run an optimization task first",
974
988
  )
975
- input_data_dict["rh"].post_data(
976
- opt_res_latest["optim_status"],
977
- idx_closest,
978
- custom_cost_fun_id["entity_id"],
979
- custom_cost_fun_id["unit_of_measurement"],
980
- custom_cost_fun_id["friendly_name"],
981
- type_var="optim_status",
982
- publish_prefix=publish_prefix,
983
- save_entities=entity_save,
984
- dont_post=dont_post
985
- )
986
- cols_published = cols_published + ["optim_status"]
989
+ else:
990
+ input_data_dict["rh"].post_data(
991
+ opt_res_latest["optim_status"],
992
+ idx_closest,
993
+ custom_cost_fun_id["entity_id"],
994
+ custom_cost_fun_id["unit_of_measurement"],
995
+ custom_cost_fun_id["friendly_name"],
996
+ type_var="optim_status",
997
+ publish_prefix=publish_prefix,
998
+ save_entities=entity_save,
999
+ dont_post=dont_post
1000
+ )
1001
+ cols_published = cols_published + ["optim_status"]
987
1002
  # Publish unit_load_cost
988
1003
  custom_unit_load_cost_id = params["passed_data"]["custom_unit_load_cost_id"]
989
1004
  input_data_dict["rh"].post_data(
@@ -103,9 +103,9 @@ class Optimization:
103
103
  r"""
104
104
  Perform the actual optimization using linear programming (LP).
105
105
 
106
- :param data_tp: A DataFrame containing the input data. The results of the \
106
+ :param data_opt: A DataFrame containing the input data. The results of the \
107
107
  optimization will be appended (decision variables, cost function values, etc)
108
- :type data_tp: pd.DataFrame
108
+ :type data_opt: pd.DataFrame
109
109
  :param P_PV: The photovoltaic power values. This can be real historical \
110
110
  values or forecasted values.
111
111
  :type P_PV: numpy.array
@@ -327,47 +327,6 @@ class Optimization:
327
327
  sense = plp.LpConstraintLE,
328
328
  rhs = 0)
329
329
  for i in set_I})
330
-
331
- # Constraint for sequence of deferrable
332
- # WARNING: This is experimental, formulation seems correct but feasibility problems.
333
- # Probably uncomptabile with other constraints
334
- for k in range(self.optim_conf['num_def_loads']):
335
- if type(self.optim_conf['P_deferrable_nom'][k]) == list:
336
- power_sequence = self.optim_conf['P_deferrable_nom'][k]
337
- sequence_length = len(power_sequence)
338
- def create_matrix(input_list, n):
339
- matrix = []
340
- for i in range(n + 1):
341
- row = [0] * i + input_list + [0] * (n - i)
342
- matrix.append(row[:n*2])
343
- return matrix
344
- matrix = create_matrix(power_sequence, n-sequence_length)
345
- y = plp.LpVariable.dicts(f"y{k}", (i for i in range(len(matrix))), cat='Binary')
346
- constraints.update({f"single_value_constraint_{k}" :
347
- plp.LpConstraint(
348
- e = plp.lpSum(y[i] for i in range(len(matrix))) - 1,
349
- sense = plp.LpConstraintEQ,
350
- rhs = 0)
351
- })
352
- constraints.update({f"pdef{k}_sumconstraint_{i}" :
353
- plp.LpConstraint(
354
- e = plp.lpSum(P_deferrable[k][i] for i in set_I) - np.sum(power_sequence),
355
- sense = plp.LpConstraintEQ,
356
- rhs = 0)
357
- })
358
- constraints.update({f"pdef{k}_positive_constraint_{i}" :
359
- plp.LpConstraint(
360
- e = P_deferrable[k][i],
361
- sense = plp.LpConstraintGE,
362
- rhs = 0)
363
- for i in set_I})
364
- for num, mat in enumerate(matrix):
365
- constraints.update({f"pdef{k}_value_constraint_{num}_{i}" :
366
- plp.LpConstraint(
367
- e = P_deferrable[k][i] - mat[i]*y[num],
368
- sense = plp.LpConstraintEQ,
369
- rhs = 0)
370
- for i in set_I})
371
330
 
372
331
  # Two special constraints just for a self-consumption cost function
373
332
  if self.costfun == 'self-consumption':
@@ -400,128 +359,215 @@ class Optimization:
400
359
  for i in set_I})
401
360
 
402
361
  # Treat deferrable loads constraints
362
+ predicted_temps = {}
403
363
  for k in range(self.optim_conf['num_def_loads']):
364
+
404
365
  if type(self.optim_conf['P_deferrable_nom'][k]) == list:
405
- continue
406
- else:
407
- # Total time of deferrable load
408
- constraints.update({"constraint_defload{}_energy".format(k) :
366
+ # Constraint for sequence of deferrable
367
+ # WARNING: This is experimental, formulation seems correct but feasibility problems.
368
+ # Probably uncomptabile with other constraints
369
+ power_sequence = self.optim_conf['P_deferrable_nom'][k]
370
+ sequence_length = len(power_sequence)
371
+ def create_matrix(input_list, n):
372
+ matrix = []
373
+ for i in range(n + 1):
374
+ row = [0] * i + input_list + [0] * (n - i)
375
+ matrix.append(row[:n*2])
376
+ return matrix
377
+ matrix = create_matrix(power_sequence, n-sequence_length)
378
+ y = plp.LpVariable.dicts(f"y{k}", (i for i in range(len(matrix))), cat='Binary')
379
+ constraints.update({f"single_value_constraint_{k}" :
409
380
  plp.LpConstraint(
410
- e = plp.lpSum(P_deferrable[k][i]*self.timeStep for i in set_I),
381
+ e = plp.lpSum(y[i] for i in range(len(matrix))) - 1,
411
382
  sense = plp.LpConstraintEQ,
412
- rhs = def_total_hours[k]*self.optim_conf['P_deferrable_nom'][k])
383
+ rhs = 0)
413
384
  })
414
- # Ensure deferrable loads consume energy between def_start_timestep & def_end_timestep
415
- self.logger.debug("Deferrable load {}: Proposed optimization window: {} --> {}".format(
416
- k, def_start_timestep[k], def_end_timestep[k]))
417
- def_start, def_end, warning = Optimization.validate_def_timewindow(
418
- def_start_timestep[k], def_end_timestep[k], ceil(def_total_hours[k]/self.timeStep), n)
419
- if warning is not None:
420
- self.logger.warning("Deferrable load {} : {}".format(k, warning))
421
- self.logger.debug("Deferrable load {}: Validated optimization window: {} --> {}".format(
422
- k, def_start, def_end))
423
- if def_start > 0:
424
- constraints.update({"constraint_defload{}_start_timestep".format(k) :
385
+ constraints.update({f"pdef{k}_sumconstraint_{i}" :
386
+ plp.LpConstraint(
387
+ e = plp.lpSum(P_deferrable[k][i] for i in set_I) - np.sum(power_sequence),
388
+ sense = plp.LpConstraintEQ,
389
+ rhs = 0)
390
+ })
391
+ constraints.update({f"pdef{k}_positive_constraint_{i}" :
392
+ plp.LpConstraint(
393
+ e = P_deferrable[k][i],
394
+ sense = plp.LpConstraintGE,
395
+ rhs = 0)
396
+ for i in set_I})
397
+ for num, mat in enumerate(matrix):
398
+ constraints.update({f"pdef{k}_value_constraint_{num}_{i}" :
425
399
  plp.LpConstraint(
426
- e = plp.lpSum(P_deferrable[k][i]*self.timeStep for i in range(0, def_start)),
400
+ e = P_deferrable[k][i] - mat[i]*y[num],
427
401
  sense = plp.LpConstraintEQ,
428
402
  rhs = 0)
429
- })
430
- if def_end > 0:
431
- constraints.update({"constraint_defload{}_end_timestep".format(k) :
403
+ for i in set_I})
404
+
405
+ elif "def_load_config" in self.optim_conf.keys():
406
+ if "thermal_config" in self.optim_conf["def_load_config"][k]:
407
+ # Special case of a thermal deferrable load
408
+ def_load_config = self.optim_conf['def_load_config'][k]
409
+ if def_load_config and 'thermal_config' in def_load_config:
410
+ hc = def_load_config["thermal_config"]
411
+ start_temperature = hc["start_temperature"]
412
+ cooling_constant = hc["cooling_constant"]
413
+ heating_rate = hc["heating_rate"]
414
+ overshoot_temperature = hc["overshoot_temperature"]
415
+ outdoor_temperature_forecast = data_opt['outdoor_temperature_forecast']
416
+ desired_temperatures = hc["desired_temperatures"]
417
+ sense = hc.get('sense', 'heat')
418
+ predicted_temp = [start_temperature]
419
+ for I in set_I:
420
+ if I == 0:
421
+ continue
422
+ predicted_temp.append(
423
+ predicted_temp[I-1]
424
+ + (P_deferrable[k][I-1] * (heating_rate * self.timeStep / self.optim_conf['P_deferrable_nom'][k]))
425
+ - (cooling_constant * (predicted_temp[I-1] - outdoor_temperature_forecast[I-1])))
426
+ if len(desired_temperatures) > I and desired_temperatures[I]:
427
+ constraints.update({"constraint_defload{}_temperature_{}".format(k, I):
428
+ plp.LpConstraint(
429
+ e = predicted_temp[I],
430
+ sense = plp.LpConstraintGE if sense == 'heat' else plp.LpConstraintLE,
431
+ rhs = desired_temperatures[I],
432
+ )
433
+ })
434
+ constraints.update({"constraint_defload{}_overshoot_temp_{}".format(k, I):
435
+ plp.LpConstraint(
436
+ e = predicted_temp[I],
437
+ sense = plp.LpConstraintLE if sense == 'heat' else plp.LpConstraintGE,
438
+ rhs = overshoot_temperature,
439
+ )
440
+ for I in set_I})
441
+ predicted_temps[k] = predicted_temp
442
+
443
+ else:
444
+
445
+ if def_total_hours[k] > 0:
446
+ # Total time of deferrable load
447
+ constraints.update({"constraint_defload{}_energy".format(k) :
432
448
  plp.LpConstraint(
433
- e = plp.lpSum(P_deferrable[k][i]*self.timeStep for i in range(def_end, n)),
449
+ e = plp.lpSum(P_deferrable[k][i]*self.timeStep for i in set_I),
434
450
  sense = plp.LpConstraintEQ,
435
- rhs = 0)
451
+ rhs = def_total_hours[k]*self.optim_conf['P_deferrable_nom'][k])
436
452
  })
437
- # Treat deferrable load as a semi-continuous variable
438
- if self.optim_conf['treat_def_as_semi_cont'][k]:
439
- constraints.update({"constraint_pdef{}_semicont1_{}".format(k, i) :
440
- plp.LpConstraint(
441
- e=P_deferrable[k][i] - self.optim_conf['P_deferrable_nom'][k]*P_def_bin1[k][i],
442
- sense=plp.LpConstraintGE,
443
- rhs=0)
444
- for i in set_I})
445
- constraints.update({"constraint_pdef{}_semicont2_{}".format(k, i) :
446
- plp.LpConstraint(
447
- e=P_deferrable[k][i] - self.optim_conf['P_deferrable_nom'][k]*P_def_bin1[k][i],
448
- sense=plp.LpConstraintLE,
449
- rhs=0)
450
- for i in set_I})
451
- # Treat the number of starts for a deferrable load (old method, kept here just in case)
452
- # if self.optim_conf['set_def_constant'][k]:
453
- # constraints.update({"constraint_pdef{}_start1_{}".format(k, i) :
454
- # plp.LpConstraint(
455
- # e=P_deferrable[k][i] - P_def_bin2[k][i]*M,
456
- # sense=plp.LpConstraintLE,
457
- # rhs=0)
458
- # for i in set_I})
459
- # constraints.update({"constraint_pdef{}_start2_{}".format(k, i):
460
- # plp.LpConstraint(
461
- # e=P_def_start[k][i] - P_def_bin2[k][i] + (P_def_bin2[k][i-1] if i-1 >= 0 else 0),
462
- # sense=plp.LpConstraintGE,
463
- # rhs=0)
464
- # for i in set_I})
465
- # constraints.update({"constraint_pdef{}_start3".format(k) :
466
- # plp.LpConstraint(
467
- # e = plp.lpSum(P_def_start[k][i] for i in set_I),
468
- # sense = plp.LpConstraintEQ,
469
- # rhs = 1)
470
- # })
471
- # Treat the number of starts for a deferrable load (new method considering current state)
472
- current_state = 0
473
- if ("def_current_state" in self.optim_conf and len(self.optim_conf["def_current_state"]) > k):
474
- current_state = 1 if self.optim_conf["def_current_state"][k] else 0
475
- # P_deferrable < P_def_bin2 * 1 million
476
- # P_deferrable must be zero if P_def_bin2 is zero
477
- constraints.update({"constraint_pdef{}_start1_{}".format(k, i):
453
+
454
+ # Ensure deferrable loads consume energy between def_start_timestep & def_end_timestep
455
+ self.logger.debug("Deferrable load {}: Proposed optimization window: {} --> {}".format(
456
+ k, def_start_timestep[k], def_end_timestep[k]))
457
+ def_start, def_end, warning = Optimization.validate_def_timewindow(
458
+ def_start_timestep[k], def_end_timestep[k], ceil(def_total_hours[k]/self.timeStep), n)
459
+ if warning is not None:
460
+ self.logger.warning("Deferrable load {} : {}".format(k, warning))
461
+ self.logger.debug("Deferrable load {}: Validated optimization window: {} --> {}".format(
462
+ k, def_start, def_end))
463
+ if def_start > 0:
464
+ constraints.update({"constraint_defload{}_start_timestep".format(k) :
478
465
  plp.LpConstraint(
479
- e=P_deferrable[k][i] - P_def_bin2[k][i] * M,
480
- sense=plp.LpConstraintLE,
481
- rhs=0)
482
- for i in set_I})
483
- # P_deferrable - P_def_bin2 <= 0
484
- # P_def_bin2 must be zero if P_deferrable is zero
485
- constraints.update({"constraint_pdef{}_start1a_{}".format(k, i):
466
+ e = plp.lpSum(P_deferrable[k][i]*self.timeStep for i in range(0, def_start)),
467
+ sense = plp.LpConstraintEQ,
468
+ rhs = 0)
469
+ })
470
+ if def_end > 0:
471
+ constraints.update({"constraint_defload{}_end_timestep".format(k) :
486
472
  plp.LpConstraint(
487
- e=P_def_bin2[k][i] - P_deferrable[k][i],
488
- sense=plp.LpConstraintLE,
489
- rhs=0)
490
- for i in set_I})
491
- # P_def_start + P_def_bin2[i-1] >= P_def_bin2[i]
492
- # If load is on this cycle (P_def_bin2[i] is 1) then P_def_start must be 1 OR P_def_bin2[i-1] must be 1
493
- # For first timestep, use current state if provided by caller.
494
- constraints.update({"constraint_pdef{}_start2_{}".format(k, i):
473
+ e = plp.lpSum(P_deferrable[k][i]*self.timeStep for i in range(def_end, n)),
474
+ sense = plp.LpConstraintEQ,
475
+ rhs = 0)
476
+ })
477
+
478
+ # Treat the number of starts for a deferrable load (new method considering current state)
479
+ current_state = 0
480
+ if ("def_current_state" in self.optim_conf and len(self.optim_conf["def_current_state"]) > k):
481
+ current_state = 1 if self.optim_conf["def_current_state"][k] else 0
482
+ # P_deferrable < P_def_bin2 * 1 million
483
+ # P_deferrable must be zero if P_def_bin2 is zero
484
+ constraints.update({"constraint_pdef{}_start1_{}".format(k, i):
485
+ plp.LpConstraint(
486
+ e=P_deferrable[k][i] - P_def_bin2[k][i] * M,
487
+ sense=plp.LpConstraintLE,
488
+ rhs=0)
489
+ for i in set_I})
490
+ # P_deferrable - P_def_bin2 <= 0
491
+ # P_def_bin2 must be zero if P_deferrable is zero
492
+ constraints.update({"constraint_pdef{}_start1a_{}".format(k, i):
493
+ plp.LpConstraint(
494
+ e=P_def_bin2[k][i] - P_deferrable[k][i],
495
+ sense=plp.LpConstraintLE,
496
+ rhs=0)
497
+ for i in set_I})
498
+ # P_def_start + P_def_bin2[i-1] >= P_def_bin2[i]
499
+ # If load is on this cycle (P_def_bin2[i] is 1) then P_def_start must be 1 OR P_def_bin2[i-1] must be 1
500
+ # For first timestep, use current state if provided by caller.
501
+ constraints.update({"constraint_pdef{}_start2_{}".format(k, i):
502
+ plp.LpConstraint(
503
+ e=P_def_start[k][i]
504
+ - P_def_bin2[k][i]
505
+ + (P_def_bin2[k][i - 1] if i - 1 >= 0 else current_state),
506
+ sense=plp.LpConstraintGE,
507
+ rhs=0)
508
+ for i in set_I})
509
+ # P_def_bin2[i-1] + P_def_start <= 1
510
+ # If load started this cycle (P_def_start[i] is 1) then P_def_bin2[i-1] must be 0
511
+ constraints.update({"constraint_pdef{}_start3_{}".format(k, i):
512
+ plp.LpConstraint(
513
+ e=(P_def_bin2[k][i-1] if i-1 >= 0 else 0) + P_def_start[k][i],
514
+ sense=plp.LpConstraintLE,
515
+ rhs=1)
516
+ for i in set_I})
517
+
518
+ # Treat deferrable as a fixed value variable with just one startup
519
+ if self.optim_conf['set_def_constant'][k]:
520
+ # P_def_start[i] must be 1 for exactly 1 value of i
521
+ constraints.update({"constraint_pdef{}_start4".format(k) :
522
+ plp.LpConstraint(
523
+ e = plp.lpSum(P_def_start[k][i] for i in set_I),
524
+ sense = plp.LpConstraintEQ,
525
+ rhs = 1)
526
+ })
527
+ # P_def_bin2 must be 1 for exactly the correct number of timesteps.
528
+ constraints.update({"constraint_pdef{}_start5".format(k) :
529
+ plp.LpConstraint(
530
+ e = plp.lpSum(P_def_bin2[k][i] for i in set_I),
531
+ sense = plp.LpConstraintEQ,
532
+ rhs = def_total_hours[k]/self.timeStep)
533
+ })
534
+
535
+ # Treat deferrable load as a semi-continuous variable
536
+ if self.optim_conf['treat_def_as_semi_cont'][k]:
537
+ constraints.update({"constraint_pdef{}_semicont1_{}".format(k, i) :
495
538
  plp.LpConstraint(
496
- e=P_def_start[k][i]
497
- - P_def_bin2[k][i]
498
- + (P_def_bin2[k][i - 1] if i - 1 >= 0 else current_state),
539
+ e=P_deferrable[k][i] - self.optim_conf['P_deferrable_nom'][k]*P_def_bin1[k][i],
499
540
  sense=plp.LpConstraintGE,
500
541
  rhs=0)
501
542
  for i in set_I})
502
- # P_def_bin2[i-1] + P_def_start <= 1
503
- # If load started this cycle (P_def_start[i] is 1) then P_def_bin2[i-1] must be 0
504
- constraints.update({"constraint_pdef{}_start3_{}".format(k, i):
543
+ constraints.update({"constraint_pdef{}_semicont2_{}".format(k, i) :
505
544
  plp.LpConstraint(
506
- e=(P_def_bin2[k][i-1] if i-1 >= 0 else 0) + P_def_start[k][i],
545
+ e=P_deferrable[k][i] - self.optim_conf['P_deferrable_nom'][k]*P_def_bin1[k][i],
507
546
  sense=plp.LpConstraintLE,
508
- rhs=1)
547
+ rhs=0)
509
548
  for i in set_I})
510
- if self.optim_conf['set_def_constant'][k]:
511
- # P_def_start[i] must be 1 for exactly 1 value of i
512
- constraints.update({"constraint_pdef{}_start4".format(k) :
513
- plp.LpConstraint(
514
- e = plp.lpSum(P_def_start[k][i] for i in set_I),
515
- sense = plp.LpConstraintEQ,
516
- rhs = 1)
517
- })
518
- # P_def_bin2 must be 1 for exactly the correct number of timesteps.
519
- constraints.update({"constraint_pdef{}_start5".format(k) :
520
- plp.LpConstraint(
521
- e = plp.lpSum(P_def_bin2[k][i] for i in set_I),
522
- sense = plp.LpConstraintEQ,
523
- rhs = def_total_hours[k]/self.timeStep)
524
- })
549
+
550
+
551
+ # Treat the number of starts for a deferrable load (old method, kept here just in case)
552
+ # if self.optim_conf['set_def_constant'][k]:
553
+ # constraints.update({"constraint_pdef{}_start1_{}".format(k, i) :
554
+ # plp.LpConstraint(
555
+ # e=P_deferrable[k][i] - P_def_bin2[k][i]*M,
556
+ # sense=plp.LpConstraintLE,
557
+ # rhs=0)
558
+ # for i in set_I})
559
+ # constraints.update({"constraint_pdef{}_start2_{}".format(k, i):
560
+ # plp.LpConstraint(
561
+ # e=P_def_start[k][i] - P_def_bin2[k][i] + (P_def_bin2[k][i-1] if i-1 >= 0 else 0),
562
+ # sense=plp.LpConstraintGE,
563
+ # rhs=0)
564
+ # for i in set_I})
565
+ # constraints.update({"constraint_pdef{}_start3".format(k) :
566
+ # plp.LpConstraint(
567
+ # e = plp.lpSum(P_def_start[k][i] for i in set_I),
568
+ # sense = plp.LpConstraintEQ,
569
+ # rhs = 1)
570
+ # })
525
571
 
526
572
  # The battery constraints
527
573
  if self.optim_conf['set_use_battery']:
@@ -685,6 +731,9 @@ class Optimization:
685
731
  for k in range(self.optim_conf["num_def_loads"]):
686
732
  opt_tp[f"P_def_start_{k}"] = [P_def_start[k][i].varValue for i in set_I]
687
733
  opt_tp[f"P_def_bin2_{k}"] = [P_def_bin2[k][i].varValue for i in set_I]
734
+ for i, predicted_temp in predicted_temps.items():
735
+ opt_tp[f"predicted_temp_heater{i}"] = pd.Series([round(pt.value(), 2) if isinstance(pt, plp.LpAffineExpression) else pt for pt in predicted_temp], index=opt_tp.index)
736
+ opt_tp[f"target_temp_heater{i}"] = pd.Series(self.optim_conf["def_load_config"][i]['thermal_config']["desired_temperatures"], index=opt_tp.index)
688
737
 
689
738
  return opt_tp
690
739
 
@@ -370,6 +370,9 @@ class RetrieveHass:
370
370
  elif type_var == "deferrable":
371
371
  data = RetrieveHass.get_attr_data_dict(data_df, idx, entity_id, unit_of_measurement,
372
372
  friendly_name, "deferrables_schedule", state)
373
+ elif type_var == "temperature":
374
+ data = RetrieveHass.get_attr_data_dict(data_df, idx, entity_id, unit_of_measurement,
375
+ friendly_name, "predicted_temperatures", state)
373
376
  elif type_var == "batt":
374
377
  data = RetrieveHass.get_attr_data_dict(data_df, idx, entity_id, unit_of_measurement,
375
378
  friendly_name, "battery_scheduled_power", state)
@@ -143,6 +143,7 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
143
143
  params = {}
144
144
  # Some default data needed
145
145
  custom_deferrable_forecast_id = []
146
+ custom_predicted_temperature_id = []
146
147
  for k in range(optim_conf["num_def_loads"]):
147
148
  custom_deferrable_forecast_id.append(
148
149
  {
@@ -151,6 +152,13 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
151
152
  "friendly_name": "Deferrable Load {}".format(k),
152
153
  }
153
154
  )
155
+ custom_predicted_temperature_id.append(
156
+ {
157
+ "entity_id": "sensor.temp_predicted{}".format(k),
158
+ "unit_of_measurement": "°C",
159
+ "friendly_name": "Predicted temperature {}".format(k),
160
+ }
161
+ )
154
162
  default_passed_dict = {
155
163
  "custom_pv_forecast_id": {
156
164
  "entity_id": "sensor.p_pv_forecast",
@@ -208,6 +216,7 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
208
216
  "friendly_name": "Unit Prod Price",
209
217
  },
210
218
  "custom_deferrable_forecast_id": custom_deferrable_forecast_id,
219
+ "custom_predicted_temperature_id": custom_predicted_temperature_id,
211
220
  "publish_prefix": "",
212
221
  }
213
222
  if "passed_data" in params.keys():
@@ -285,16 +294,6 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
285
294
  else:
286
295
  def_end_timestep = runtimeparams["def_end_timestep"]
287
296
  params["passed_data"]["def_end_timestep"] = def_end_timestep
288
- if "alpha" not in runtimeparams.keys():
289
- alpha = 0.5
290
- else:
291
- alpha = runtimeparams["alpha"]
292
- params["passed_data"]["alpha"] = alpha
293
- if "beta" not in runtimeparams.keys():
294
- beta = 0.5
295
- else:
296
- beta = runtimeparams["beta"]
297
- params["passed_data"]["beta"] = beta
298
297
  forecast_dates = copy.deepcopy(forecast_dates)[0:prediction_horizon]
299
298
  else:
300
299
  params["passed_data"]["prediction_horizon"] = None
@@ -303,11 +302,9 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
303
302
  params["passed_data"]["def_total_hours"] = None
304
303
  params["passed_data"]["def_start_timestep"] = None
305
304
  params["passed_data"]["def_end_timestep"] = None
306
- params["passed_data"]["alpha"] = None
307
- params["passed_data"]["beta"] = None
308
305
  # Treat passed forecast data lists
309
- list_forecast_key = ['pv_power_forecast', 'load_power_forecast', 'load_cost_forecast', 'prod_price_forecast']
310
- forecast_methods = ['weather_forecast_method', 'load_forecast_method', 'load_cost_forecast_method', 'prod_price_forecast_method']
306
+ list_forecast_key = ['pv_power_forecast', 'load_power_forecast', 'load_cost_forecast', 'prod_price_forecast', 'outdoor_temperature_forecast']
307
+ forecast_methods = ['weather_forecast_method', 'load_forecast_method', 'load_cost_forecast_method', 'prod_price_forecast_method', 'outdoor_temperature_forecast_method']
311
308
  # Param to save forecast cache (i.e. Solcast)
312
309
  if "weather_forecast_cache" not in runtimeparams.keys():
313
310
  weather_forecast_cache = False
@@ -389,19 +386,13 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
389
386
  if "model_predict_unit_of_measurement" not in runtimeparams.keys():
390
387
  model_predict_unit_of_measurement = "W"
391
388
  else:
392
- model_predict_unit_of_measurement = runtimeparams[
393
- "model_predict_unit_of_measurement"
394
- ]
395
- params["passed_data"][
396
- "model_predict_unit_of_measurement"
397
- ] = model_predict_unit_of_measurement
389
+ model_predict_unit_of_measurement = runtimeparams["model_predict_unit_of_measurement"]
390
+ params["passed_data"]["model_predict_unit_of_measurement"] = model_predict_unit_of_measurement
398
391
  if "model_predict_friendly_name" not in runtimeparams.keys():
399
392
  model_predict_friendly_name = "Load Power Forecast custom ML model"
400
393
  else:
401
394
  model_predict_friendly_name = runtimeparams["model_predict_friendly_name"]
402
- params["passed_data"][
403
- "model_predict_friendly_name"
404
- ] = model_predict_friendly_name
395
+ params["passed_data"]["model_predict_friendly_name"] = model_predict_friendly_name
405
396
  if "mlr_predict_entity_id" not in runtimeparams.keys():
406
397
  mlr_predict_entity_id = "sensor.mlr_predict"
407
398
  else:
@@ -410,17 +401,24 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
410
401
  if "mlr_predict_unit_of_measurement" not in runtimeparams.keys():
411
402
  mlr_predict_unit_of_measurement = None
412
403
  else:
413
- mlr_predict_unit_of_measurement = runtimeparams[
414
- "mlr_predict_unit_of_measurement"
415
- ]
416
- params["passed_data"][
417
- "mlr_predict_unit_of_measurement"
418
- ] = mlr_predict_unit_of_measurement
404
+ mlr_predict_unit_of_measurement = runtimeparams["mlr_predict_unit_of_measurement"]
405
+ params["passed_data"]["mlr_predict_unit_of_measurement"] = mlr_predict_unit_of_measurement
419
406
  if "mlr_predict_friendly_name" not in runtimeparams.keys():
420
407
  mlr_predict_friendly_name = "mlr predictor"
421
408
  else:
422
409
  mlr_predict_friendly_name = runtimeparams["mlr_predict_friendly_name"]
423
410
  params["passed_data"]["mlr_predict_friendly_name"] = mlr_predict_friendly_name
411
+ # Treat passed data for other parameters
412
+ if "alpha" not in runtimeparams.keys():
413
+ alpha = 0.5
414
+ else:
415
+ alpha = runtimeparams["alpha"]
416
+ params["passed_data"]["alpha"] = alpha
417
+ if "beta" not in runtimeparams.keys():
418
+ beta = 0.5
419
+ else:
420
+ beta = runtimeparams["beta"]
421
+ params["passed_data"]["beta"] = beta
424
422
  # Treat optimization configuration parameters passed at runtime
425
423
  if "num_def_loads" in runtimeparams.keys():
426
424
  optim_conf["num_def_loads"] = runtimeparams["num_def_loads"]
@@ -447,6 +445,8 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
447
445
  optim_conf["def_start_penalty"] = [
448
446
  ast.literal_eval(str(k).capitalize()) for k in runtimeparams["def_start_penalty"]
449
447
  ]
448
+ if 'def_load_config' in runtimeparams:
449
+ optim_conf["def_load_config"] = runtimeparams['def_load_config']
450
450
  if "solcast_api_key" in runtimeparams.keys():
451
451
  retrieve_hass_conf["solcast_api_key"] = runtimeparams["solcast_api_key"]
452
452
  optim_conf["weather_forecast_method"] = "solcast"
@@ -469,7 +469,7 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
469
469
  if 'freq' in runtimeparams.keys():
470
470
  retrieve_hass_conf['freq'] = pd.to_timedelta(runtimeparams['freq'], "minutes")
471
471
  if 'continual_publish' in runtimeparams.keys():
472
- retrieve_hass_conf['continual_publish'] = bool(runtimeparams['continual_publish'])
472
+ retrieve_hass_conf['continual_publish'] = bool(runtimeparams['continual_publish'])
473
473
  # Treat plant configuration parameters passed at runtime
474
474
  if "SOCmin" in runtimeparams.keys():
475
475
  plant_conf["SOCmin"] = runtimeparams["SOCmin"]
@@ -530,6 +530,10 @@ def treat_runtimeparams(runtimeparams: str, params: str, retrieve_hass_conf: dic
530
530
  params["passed_data"]["custom_deferrable_forecast_id"] = runtimeparams[
531
531
  "custom_deferrable_forecast_id"
532
532
  ]
533
+ if "custom_predicted_temperature_id" in runtimeparams.keys():
534
+ params["passed_data"]["custom_predicted_temperature_id"] = runtimeparams[
535
+ "custom_predicted_temperature_id"
536
+ ]
533
537
  # A condition to put a prefix on all published data, or check for saved data under prefix name
534
538
  if "publish_prefix" not in runtimeparams.keys():
535
539
  publish_prefix = ""
@@ -952,7 +956,8 @@ def set_df_index_freq(df: pd.DataFrame) -> pd.DataFrame:
952
956
 
953
957
  """
954
958
  idx_diff = np.diff(df.index)
959
+ # Sometimes there are zero values in this list.
960
+ idx_diff = idx_diff[np.nonzero(idx_diff)]
955
961
  sampling = pd.to_timedelta(np.median(idx_diff))
956
962
  df = df[~df.index.duplicated()]
957
- df = df.asfreq(sampling)
958
- return df
963
+ return df.asfreq(sampling)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: emhass
3
- Version: 0.10.3
3
+ Version: 0.10.5
4
4
  Summary: An Energy Management System for Home Assistant
5
5
  Home-page: https://github.com/davidusb-geek/emhass
6
6
  Author: David HERNANDEZ
@@ -7,6 +7,7 @@ import pandas as pd
7
7
  import numpy as np
8
8
  import pathlib
9
9
  import pickle
10
+ import random
10
11
  from datetime import datetime, timezone
11
12
 
12
13
  from emhass.retrieve_hass import RetrieveHass
@@ -265,20 +266,44 @@ class TestOptimization(unittest.TestCase):
265
266
  self.df_input_data_dayahead, self.P_PV_forecast, self.P_load_forecast, prediction_horizon,
266
267
  soc_init=soc_init, soc_final=soc_final, def_total_hours=def_total_hours, def_start_timestep=def_start_timestep, def_end_timestep=def_end_timestep)
267
268
  self.assertAlmostEqual(self.opt_res_dayahead.loc[self.opt_res_dayahead.index[-1],'SOC_opt'], soc_final)
269
+
270
+ def test_thermal_load_optim(self):
271
+ self.df_input_data_dayahead = self.fcst.get_load_cost_forecast(self.df_input_data_dayahead)
272
+ self.df_input_data_dayahead = self.fcst.get_prod_price_forecast(self.df_input_data_dayahead)
273
+ self.df_input_data_dayahead['outdoor_temperature_forecast'] = [random.normalvariate(10.0, 3.0) for _ in range(48)]
274
+ runtimeparams = {
275
+ 'def_load_config': [
276
+ {},
277
+ {'thermal_config': {
278
+ 'heating_rate': 5.0,
279
+ 'cooling_constant': 0.1,
280
+ 'overshoot_temperature': 24.0,
281
+ 'start_temperature': 20,
282
+ 'desired_temperatures': [21]*48,
283
+ }
284
+ }
285
+ ]
286
+ }
287
+ self.optim_conf["def_load_config"] = runtimeparams['def_load_config']
288
+ self.opt = Optimization(self.retrieve_hass_conf, self.optim_conf, self.plant_conf,
289
+ self.fcst.var_load_cost, self.fcst.var_prod_price,
290
+ self.costfun, emhass_conf, logger)
291
+ unit_load_cost = self.df_input_data_dayahead[self.opt.var_load_cost].values # €/kWh
292
+ unit_prod_price = self.df_input_data_dayahead[self.opt.var_prod_price].values # €/kWh
293
+ self.opt_res_dayahead = self.opt.perform_optimization(self.df_input_data_dayahead,
294
+ self.P_PV_forecast.values.ravel(),
295
+ self.P_load_forecast.values.ravel(),
296
+ unit_load_cost, unit_prod_price)
297
+ self.assertIsInstance(self.opt_res_dayahead, type(pd.DataFrame()))
298
+ self.assertIsInstance(self.opt_res_dayahead.index, pd.core.indexes.datetimes.DatetimeIndex)
299
+ self.assertIsInstance(self.opt_res_dayahead.index.dtype, pd.core.dtypes.dtypes.DatetimeTZDtype)
300
+ self.assertTrue('cost_fun_'+self.costfun in self.opt_res_dayahead.columns)
301
+ self.assertTrue(self.opt.optim_status == 'Optimal')
268
302
 
269
-
270
-
271
303
  def run_penalty_test_forecast(self):
272
- self.opt = Optimization(
273
- self.retrieve_hass_conf,
274
- self.optim_conf,
275
- self.plant_conf,
276
- self.fcst.var_load_cost,
277
- self.fcst.var_prod_price,
278
- self.costfun,
279
- emhass_conf,
280
- logger,
281
- )
304
+ self.opt = Optimization(self.retrieve_hass_conf, self.optim_conf, self.plant_conf,
305
+ self.fcst.var_load_cost, self.fcst.var_prod_price,
306
+ self.costfun, emhass_conf, logger)
282
307
  def_total_hours = [5 * self.retrieve_hass_conf["freq"].seconds / 3600.0]
283
308
  def_start_timestep = [0]
284
309
  def_end_timestep = [0]
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes