emhass 0.13.3__py3-none-any.whl → 0.13.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- emhass/command_line.py +6 -2
- emhass/data/associations.csv +4 -0
- emhass/data/config_defaults.json +4 -0
- emhass/forecast.py +52 -12
- emhass/machine_learning_forecaster.py +0 -2
- emhass/optimization.py +222 -102
- emhass/retrieve_hass.py +7 -5
- emhass/static/data/param_definitions.json +30 -0
- emhass/utils.py +64 -21
- emhass/web_server.py +10 -28
- {emhass-0.13.3.dist-info → emhass-0.13.5.dist-info}/METADATA +45 -22
- {emhass-0.13.3.dist-info → emhass-0.13.5.dist-info}/RECORD +15 -15
- {emhass-0.13.3.dist-info → emhass-0.13.5.dist-info}/WHEEL +0 -0
- {emhass-0.13.3.dist-info → emhass-0.13.5.dist-info}/entry_points.txt +0 -0
- {emhass-0.13.3.dist-info → emhass-0.13.5.dist-info}/licenses/LICENSE +0 -0
emhass/command_line.py
CHANGED
@@ -72,7 +72,9 @@ def retrieve_home_assistant_data(
|
|
72
72
|
if optim_conf.get("set_use_pv", True):
|
73
73
|
var_list.append(retrieve_hass_conf["sensor_power_photovoltaics"])
|
74
74
|
if optim_conf.get("set_use_adjusted_pv", True):
|
75
|
-
var_list.append(
|
75
|
+
var_list.append(
|
76
|
+
retrieve_hass_conf["sensor_power_photovoltaics_forecast"]
|
77
|
+
)
|
76
78
|
if not rh.get_data(
|
77
79
|
days_list, var_list, minimal_response=False, significant_changes_only=False
|
78
80
|
):
|
@@ -302,7 +304,8 @@ def set_input_data_dict(
|
|
302
304
|
else:
|
303
305
|
P_PV_forecast = pd.Series(0, index=fcst.forecast_dates)
|
304
306
|
P_load_forecast = fcst.get_load_forecast(
|
305
|
-
|
307
|
+
days_min_load_forecast=optim_conf["delta_forecast_daily"].days,
|
308
|
+
method=optim_conf["load_forecast_method"],
|
306
309
|
)
|
307
310
|
if isinstance(P_load_forecast, bool) and not P_load_forecast:
|
308
311
|
logger.error(
|
@@ -400,6 +403,7 @@ def set_input_data_dict(
|
|
400
403
|
else:
|
401
404
|
P_PV_forecast = pd.Series(0, index=fcst.forecast_dates)
|
402
405
|
P_load_forecast = fcst.get_load_forecast(
|
406
|
+
days_min_load_forecast=optim_conf["delta_forecast_daily"].days,
|
403
407
|
method=optim_conf["load_forecast_method"],
|
404
408
|
set_mix_forecast=set_mix_forecast,
|
405
409
|
df_now=df_input_data,
|
emhass/data/associations.csv
CHANGED
@@ -61,6 +61,10 @@ plant_conf,surface_azimuth,surface_azimuth,list_surface_azimuth
|
|
61
61
|
plant_conf,modules_per_string,modules_per_string,list_modules_per_string
|
62
62
|
plant_conf,strings_per_inverter,strings_per_inverter,list_strings_per_inverter
|
63
63
|
plant_conf,inverter_is_hybrid,inverter_is_hybrid
|
64
|
+
plant_conf,inverter_ac_output_max,inverter_ac_output_max
|
65
|
+
plant_conf,inverter_ac_input_max,inverter_ac_input_max
|
66
|
+
plant_conf,inverter_efficiency_dc_ac,inverter_efficiency_dc_ac
|
67
|
+
plant_conf,inverter_efficiency_ac_dc,inverter_efficiency_ac_dc
|
64
68
|
plant_conf,compute_curtailment,compute_curtailment
|
65
69
|
plant_conf,Pd_max,battery_discharge_power_max
|
66
70
|
plant_conf,Pc_max,battery_charge_power_max
|
emhass/data/config_defaults.json
CHANGED
@@ -116,6 +116,10 @@
|
|
116
116
|
1
|
117
117
|
],
|
118
118
|
"inverter_is_hybrid": false,
|
119
|
+
"inverter_ac_output_max": 1000,
|
120
|
+
"inverter_ac_input_max": 1000,
|
121
|
+
"inverter_efficiency_dc_ac": 1.0,
|
122
|
+
"inverter_efficiency_ac_dc": 1.0,
|
119
123
|
"compute_curtailment": false,
|
120
124
|
"set_use_battery": false,
|
121
125
|
"battery_discharge_power_max": 1000,
|
emhass/forecast.py
CHANGED
@@ -1,5 +1,3 @@
|
|
1
|
-
#!/usr/bin/env python3
|
2
|
-
|
3
1
|
import bz2
|
4
2
|
import copy
|
5
3
|
import json
|
@@ -216,8 +214,7 @@ class Forecast:
|
|
216
214
|
]
|
217
215
|
|
218
216
|
def get_cached_open_meteo_forecast_json(
|
219
|
-
self,
|
220
|
-
max_age: int | None = 30,
|
217
|
+
self, max_age: int | None = 30, forecast_days: int = 3
|
221
218
|
) -> dict:
|
222
219
|
r"""
|
223
220
|
Get weather forecast json from Open-Meteo and cache it for re-use.
|
@@ -235,10 +232,30 @@ class Forecast:
|
|
235
232
|
before it is discarded and a new version fetched from Open-Meteo.
|
236
233
|
Defaults to 30 minutes.
|
237
234
|
:type max_age: int, optional
|
235
|
+
:param forecast_days: The number of days of forecast data required from Open-Meteo.
|
236
|
+
One additional day is always fetched from Open-Meteo so there is an extra data in the cache.
|
237
|
+
Defaults to 2 days (3 days fetched) to match the prior default.
|
238
|
+
:type forecast_days: int, optional
|
238
239
|
:return: The json containing the Open-Meteo forecast data
|
239
240
|
:rtype: dict
|
240
241
|
|
241
242
|
"""
|
243
|
+
|
244
|
+
# Ensure at least 3 weather forecast days (and 1 more than requested)
|
245
|
+
if forecast_days is None:
|
246
|
+
self.logger.warning(
|
247
|
+
"Open-Meteo forecast_days is missing so defaulting to 3 days"
|
248
|
+
)
|
249
|
+
forecast_days = 3
|
250
|
+
elif forecast_days < 3:
|
251
|
+
self.logger.warning(
|
252
|
+
"Open-Meteo forecast_days is too low (%s) so defaulting to 3 days",
|
253
|
+
forecast_days,
|
254
|
+
)
|
255
|
+
forecast_days = 3
|
256
|
+
else:
|
257
|
+
forecast_days = forecast_days + 1
|
258
|
+
|
242
259
|
json_path = os.path.abspath(
|
243
260
|
self.emhass_conf["data_path"] / "cached-open-meteo-forecast.json"
|
244
261
|
)
|
@@ -287,10 +304,13 @@ class Forecast:
|
|
287
304
|
+ "shortwave_radiation_instant,"
|
288
305
|
+ "diffuse_radiation_instant,"
|
289
306
|
+ "direct_normal_irradiance_instant"
|
307
|
+
+ "&forecast_days="
|
308
|
+
+ str(forecast_days)
|
290
309
|
+ "&timezone="
|
291
310
|
+ quote(str(self.time_zone), safe="")
|
292
311
|
)
|
293
312
|
try:
|
313
|
+
self.logger.debug("Fetching data from Open-Meteo using URL: %s", url)
|
294
314
|
response = get(url, headers=headers)
|
295
315
|
self.logger.debug("Returned HTTP status code: %s", response.status_code)
|
296
316
|
response.raise_for_status()
|
@@ -349,7 +369,8 @@ class Forecast:
|
|
349
369
|
): # The scrapper option is being left here for backward compatibility
|
350
370
|
if not os.path.isfile(w_forecast_cache_path):
|
351
371
|
data_raw = self.get_cached_open_meteo_forecast_json(
|
352
|
-
self.optim_conf["open_meteo_cache_max_age"]
|
372
|
+
self.optim_conf["open_meteo_cache_max_age"],
|
373
|
+
self.optim_conf["delta_forecast_daily"].days,
|
353
374
|
)
|
354
375
|
data_15min = pd.DataFrame.from_dict(data_raw["minutely_15"])
|
355
376
|
data_15min["time"] = pd.to_datetime(data_15min["time"])
|
@@ -671,6 +692,7 @@ class Forecast:
|
|
671
692
|
alpha: float,
|
672
693
|
beta: float,
|
673
694
|
col: str,
|
695
|
+
ignore_pv_feedback: bool = False,
|
674
696
|
) -> pd.DataFrame:
|
675
697
|
"""A simple correction method for forecasted data using the current real values of a variable.
|
676
698
|
|
@@ -684,9 +706,15 @@ class Forecast:
|
|
684
706
|
:type beta: float
|
685
707
|
:param col: The column variable name
|
686
708
|
:type col: str
|
709
|
+
:param ignore_pv_feedback: If True, bypass mixing and return original forecast (used during curtailment)
|
710
|
+
:type ignore_pv_feedback: bool
|
687
711
|
:return: The output DataFrame with the corrected values
|
688
712
|
:rtype: pd.DataFrame
|
689
713
|
"""
|
714
|
+
# If ignoring PV feedback (e.g., during curtailment), return original forecast
|
715
|
+
if ignore_pv_feedback:
|
716
|
+
return df_forecast
|
717
|
+
|
690
718
|
first_fcst = alpha * df_forecast.iloc[0] + beta * df_now[col].iloc[-1]
|
691
719
|
df_forecast.iloc[0] = int(round(first_fcst))
|
692
720
|
return df_forecast
|
@@ -787,12 +815,14 @@ class Forecast:
|
|
787
815
|
# Extracting results for AC power
|
788
816
|
P_PV_forecast = mc.results.ac
|
789
817
|
if set_mix_forecast:
|
818
|
+
ignore_pv_feedback = self.params["passed_data"].get("ignore_pv_feedback_during_curtailment", False)
|
790
819
|
P_PV_forecast = Forecast.get_mix_forecast(
|
791
820
|
df_now,
|
792
821
|
P_PV_forecast,
|
793
822
|
self.params["passed_data"]["alpha"],
|
794
823
|
self.params["passed_data"]["beta"],
|
795
824
|
self.var_PV,
|
825
|
+
ignore_pv_feedback,
|
796
826
|
)
|
797
827
|
P_PV_forecast[P_PV_forecast < 0] = 0 # replace any negative PV values with zero
|
798
828
|
self.logger.debug("get_power_from_weather returning:\n%s", P_PV_forecast)
|
@@ -1410,14 +1440,22 @@ class Forecast:
|
|
1410
1440
|
forecast_out.index.name = "ts"
|
1411
1441
|
forecast_out = forecast_out.rename(columns={"load": "yhat"})
|
1412
1442
|
elif method == "naive": # using a naive approach
|
1413
|
-
|
1414
|
-
|
1443
|
+
# Old code logic (shifted timestamp problem)
|
1444
|
+
# mask_forecast_out = (
|
1445
|
+
# df.index > days_list[-1] - self.optim_conf["delta_forecast_daily"]
|
1446
|
+
# )
|
1447
|
+
# forecast_out = df.copy().loc[mask_forecast_out]
|
1448
|
+
# forecast_out = forecast_out.rename(columns={self.var_load_new: "yhat"})
|
1449
|
+
# forecast_out = forecast_out.iloc[0 : len(self.forecast_dates)]
|
1450
|
+
# forecast_out.index = self.forecast_dates
|
1451
|
+
# New code logic
|
1452
|
+
forecast_horizon = len(self.forecast_dates)
|
1453
|
+
historical_values = df.iloc[-forecast_horizon:]
|
1454
|
+
forecast_out = pd.DataFrame(
|
1455
|
+
historical_values.values,
|
1456
|
+
index=self.forecast_dates,
|
1457
|
+
columns=["yhat"]
|
1415
1458
|
)
|
1416
|
-
forecast_out = df.copy().loc[mask_forecast_out]
|
1417
|
-
forecast_out = forecast_out.rename(columns={self.var_load_new: "yhat"})
|
1418
|
-
# Force forecast_out length to avoid mismatches
|
1419
|
-
forecast_out = forecast_out.iloc[0 : len(self.forecast_dates)]
|
1420
|
-
forecast_out.index = self.forecast_dates
|
1421
1459
|
elif (
|
1422
1460
|
method == "mlforecaster"
|
1423
1461
|
): # using a custom forecast model with machine learning
|
@@ -1506,12 +1544,14 @@ class Forecast:
|
|
1506
1544
|
return False
|
1507
1545
|
P_Load_forecast = copy.deepcopy(forecast_out["yhat"])
|
1508
1546
|
if set_mix_forecast:
|
1547
|
+
# Load forecasts don't need curtailment protection - always use feedback
|
1509
1548
|
P_Load_forecast = Forecast.get_mix_forecast(
|
1510
1549
|
df_now,
|
1511
1550
|
P_Load_forecast,
|
1512
1551
|
self.params["passed_data"]["alpha"],
|
1513
1552
|
self.params["passed_data"]["beta"],
|
1514
1553
|
self.var_load_new,
|
1554
|
+
False, # Never ignore feedback for load forecasts
|
1515
1555
|
)
|
1516
1556
|
self.logger.debug("get_load_forecast returning:\n%s", P_Load_forecast)
|
1517
1557
|
return P_Load_forecast
|
emhass/optimization.py
CHANGED
@@ -1,5 +1,3 @@
|
|
1
|
-
#!/usr/bin/env python3
|
2
|
-
|
3
1
|
import bz2
|
4
2
|
import copy
|
5
3
|
import logging
|
@@ -114,10 +112,14 @@ class Optimization:
|
|
114
112
|
"lp_solver=COIN_CMD but lp_solver_path=empty, attempting to use lp_solver_path=/usr/bin/cbc"
|
115
113
|
)
|
116
114
|
self.lp_solver_path = "/usr/bin/cbc"
|
117
|
-
self.logger.debug(
|
115
|
+
self.logger.debug(
|
116
|
+
f"Initialized Optimization with retrieve_hass_conf: {retrieve_hass_conf}"
|
117
|
+
)
|
118
118
|
self.logger.debug(f"Optimization configuration: {optim_conf}")
|
119
119
|
self.logger.debug(f"Plant configuration: {plant_conf}")
|
120
|
-
self.logger.debug(
|
120
|
+
self.logger.debug(
|
121
|
+
f"Solver configuration: lp_solver={self.lp_solver}, lp_solver_path={self.lp_solver_path}"
|
122
|
+
)
|
121
123
|
self.logger.debug(f"Number of threads: {self.num_threads}")
|
122
124
|
|
123
125
|
def perform_optimization(
|
@@ -187,10 +189,14 @@ class Optimization:
|
|
187
189
|
soc_final = soc_init
|
188
190
|
else:
|
189
191
|
soc_final = self.plant_conf["battery_target_state_of_charge"]
|
190
|
-
self.logger.debug(
|
192
|
+
self.logger.debug(
|
193
|
+
f"Battery usage enabled. Initial SOC: {soc_init}, Final SOC: {soc_final}"
|
194
|
+
)
|
191
195
|
|
192
196
|
# If def_total_timestep os set, bypass def_total_hours
|
193
197
|
if def_total_timestep is not None:
|
198
|
+
if def_total_hours is None:
|
199
|
+
def_total_hours = self.optim_conf["operating_hours_of_each_deferrable_load"]
|
194
200
|
def_total_hours = [0 if x != 0 else x for x in def_total_hours]
|
195
201
|
elif def_total_hours is None:
|
196
202
|
def_total_hours = self.optim_conf["operating_hours_of_each_deferrable_load"]
|
@@ -205,9 +211,21 @@ class Optimization:
|
|
205
211
|
|
206
212
|
num_deferrable_loads = self.optim_conf["number_of_deferrable_loads"]
|
207
213
|
|
208
|
-
|
209
|
-
|
210
|
-
|
214
|
+
# Retrieve the minimum power for each deferrable load, defaulting to 0 if not provided
|
215
|
+
min_power_of_deferrable_loads = self.optim_conf.get("minimum_power_of_deferrable_loads", [0] * num_deferrable_loads)
|
216
|
+
min_power_of_deferrable_loads = min_power_of_deferrable_loads + [0] * (
|
217
|
+
num_deferrable_loads - len(min_power_of_deferrable_loads)
|
218
|
+
)
|
219
|
+
|
220
|
+
def_total_hours = def_total_hours + [0] * (
|
221
|
+
num_deferrable_loads - len(def_total_hours)
|
222
|
+
)
|
223
|
+
def_start_timestep = def_start_timestep + [0] * (
|
224
|
+
num_deferrable_loads - len(def_start_timestep)
|
225
|
+
)
|
226
|
+
def_end_timestep = def_end_timestep + [0] * (
|
227
|
+
num_deferrable_loads - len(def_end_timestep)
|
228
|
+
)
|
211
229
|
|
212
230
|
#### The LP problem using Pulp ####
|
213
231
|
opt_model = plp.LpProblem("LP_Model", plp.LpMaximize)
|
@@ -473,71 +491,145 @@ class Optimization:
|
|
473
491
|
for i in set_I
|
474
492
|
}
|
475
493
|
|
476
|
-
# Constraint for hybrid inverter and curtailment cases
|
477
|
-
if isinstance(self.plant_conf["pv_module_model"], list):
|
478
|
-
P_nom_inverter = 0.0
|
479
|
-
for i in range(len(self.plant_conf["pv_inverter_model"])):
|
480
|
-
if isinstance(self.plant_conf["pv_inverter_model"][i], str):
|
481
|
-
cec_inverters = bz2.BZ2File(
|
482
|
-
self.emhass_conf["root_path"] / "data" / "cec_inverters.pbz2",
|
483
|
-
"rb",
|
484
|
-
)
|
485
|
-
cec_inverters = cPickle.load(cec_inverters)
|
486
|
-
inverter = cec_inverters[self.plant_conf["pv_inverter_model"][i]]
|
487
|
-
P_nom_inverter += inverter.Paco
|
488
|
-
else:
|
489
|
-
P_nom_inverter += self.plant_conf["pv_inverter_model"][i]
|
490
|
-
else:
|
491
|
-
if isinstance(self.plant_conf["pv_inverter_model"][i], str):
|
492
|
-
cec_inverters = bz2.BZ2File(
|
493
|
-
self.emhass_conf["root_path"] / "data" / "cec_inverters.pbz2", "rb"
|
494
|
-
)
|
495
|
-
cec_inverters = cPickle.load(cec_inverters)
|
496
|
-
inverter = cec_inverters[self.plant_conf["pv_inverter_model"]]
|
497
|
-
P_nom_inverter = inverter.Paco
|
498
|
-
else:
|
499
|
-
P_nom_inverter = self.plant_conf["pv_inverter_model"]
|
500
494
|
if self.plant_conf["inverter_is_hybrid"]:
|
495
|
+
P_nom_inverter_output = self.plant_conf.get("inverter_ac_output_max", None)
|
496
|
+
P_nom_inverter_input = self.plant_conf.get("inverter_ac_input_max", None)
|
497
|
+
|
498
|
+
# Fallback to legacy pv_inverter_model for output power if new setting is not provided
|
499
|
+
if P_nom_inverter_output is None:
|
500
|
+
if "pv_inverter_model" in self.plant_conf:
|
501
|
+
if isinstance(self.plant_conf["pv_inverter_model"], list):
|
502
|
+
P_nom_inverter_output = 0.0
|
503
|
+
for i in range(len(self.plant_conf["pv_inverter_model"])):
|
504
|
+
if isinstance(self.plant_conf["pv_inverter_model"][i], str):
|
505
|
+
cec_inverters = bz2.BZ2File(
|
506
|
+
self.emhass_conf["root_path"]
|
507
|
+
/ "data"
|
508
|
+
/ "cec_inverters.pbz2",
|
509
|
+
"rb",
|
510
|
+
)
|
511
|
+
cec_inverters = cPickle.load(cec_inverters)
|
512
|
+
inverter = cec_inverters[
|
513
|
+
self.plant_conf["pv_inverter_model"][i]
|
514
|
+
]
|
515
|
+
P_nom_inverter_output += inverter.Paco
|
516
|
+
else:
|
517
|
+
P_nom_inverter_output += self.plant_conf[
|
518
|
+
"pv_inverter_model"
|
519
|
+
][i]
|
520
|
+
else:
|
521
|
+
if isinstance(self.plant_conf["pv_inverter_model"], str):
|
522
|
+
cec_inverters = bz2.BZ2File(
|
523
|
+
self.emhass_conf["root_path"]
|
524
|
+
/ "data"
|
525
|
+
/ "cec_inverters.pbz2",
|
526
|
+
"rb",
|
527
|
+
)
|
528
|
+
cec_inverters = cPickle.load(cec_inverters)
|
529
|
+
inverter = cec_inverters[
|
530
|
+
self.plant_conf["pv_inverter_model"]
|
531
|
+
]
|
532
|
+
P_nom_inverter_output = inverter.Paco
|
533
|
+
else:
|
534
|
+
P_nom_inverter_output = self.plant_conf["pv_inverter_model"]
|
535
|
+
|
536
|
+
if P_nom_inverter_input is None:
|
537
|
+
P_nom_inverter_input = P_nom_inverter_output
|
538
|
+
|
539
|
+
# Get efficiency parameters, defaulting to 100%
|
540
|
+
eff_dc_ac = self.plant_conf.get("inverter_efficiency_dc_ac", 1.0)
|
541
|
+
eff_ac_dc = self.plant_conf.get("inverter_efficiency_ac_dc", 1.0)
|
542
|
+
|
543
|
+
# Calculate the maximum allowed DC power flows based on AC limits and efficiency.
|
544
|
+
P_dc_ac_max = P_nom_inverter_output / eff_dc_ac
|
545
|
+
P_ac_dc_max = P_nom_inverter_input * eff_ac_dc
|
546
|
+
|
547
|
+
# Define unidirectional DC power flow variables with the tight, calculated bounds.
|
548
|
+
P_dc_ac = {
|
549
|
+
(i): plp.LpVariable(
|
550
|
+
cat="Continuous",
|
551
|
+
lowBound=0,
|
552
|
+
upBound=P_dc_ac_max,
|
553
|
+
name=f"P_dc_ac_{i}",
|
554
|
+
)
|
555
|
+
for i in set_I
|
556
|
+
}
|
557
|
+
P_ac_dc = {
|
558
|
+
(i): plp.LpVariable(
|
559
|
+
cat="Continuous",
|
560
|
+
lowBound=0,
|
561
|
+
upBound=P_ac_dc_max,
|
562
|
+
name=f"P_ac_dc_{i}",
|
563
|
+
)
|
564
|
+
for i in set_I
|
565
|
+
}
|
566
|
+
# Binary variable to enforce unidirectional flow
|
567
|
+
is_dc_sourcing = {
|
568
|
+
(i): plp.LpVariable(cat="Binary", name=f"is_dc_sourcing_{i}")
|
569
|
+
for i in set_I
|
570
|
+
}
|
571
|
+
|
572
|
+
# Define the core energy balance equations for each timestep
|
573
|
+
for i in set_I:
|
574
|
+
# The net DC power from PV and battery must equal the net DC flow of the inverter
|
575
|
+
constraints.update(
|
576
|
+
{
|
577
|
+
f"constraint_dc_bus_balance_{i}": plp.LpConstraint(
|
578
|
+
e=(
|
579
|
+
P_PV[i]
|
580
|
+
- P_PV_curtailment[i]
|
581
|
+
+ P_sto_pos[i]
|
582
|
+
+ P_sto_neg[i]
|
583
|
+
)
|
584
|
+
- (P_dc_ac[i] - P_ac_dc[i]),
|
585
|
+
sense=plp.LpConstraintEQ,
|
586
|
+
rhs=0,
|
587
|
+
)
|
588
|
+
}
|
589
|
+
)
|
590
|
+
|
591
|
+
# The AC power is defined by the efficiency-adjusted DC flows
|
592
|
+
constraints.update(
|
593
|
+
{
|
594
|
+
f"constraint_ac_bus_balance_{i}": plp.LpConstraint(
|
595
|
+
e=P_hybrid_inverter[i]
|
596
|
+
- ((P_dc_ac[i] * eff_dc_ac) - (P_ac_dc[i] / eff_ac_dc)),
|
597
|
+
sense=plp.LpConstraintEQ,
|
598
|
+
rhs=0,
|
599
|
+
)
|
600
|
+
}
|
601
|
+
)
|
602
|
+
|
603
|
+
# Use the binary variable to ensure only one direction is active at a time
|
604
|
+
constraints.update(
|
605
|
+
{
|
606
|
+
# If is_dc_sourcing = 1 (DC->AC is active), then P_ac_dc must be 0.
|
607
|
+
f"constraint_enforce_ac_dc_zero_{i}": plp.LpConstraint(
|
608
|
+
e=P_ac_dc[i] - (1 - is_dc_sourcing[i]) * P_ac_dc_max,
|
609
|
+
sense=plp.LpConstraintLE,
|
610
|
+
rhs=0,
|
611
|
+
),
|
612
|
+
# If is_dc_sourcing = 0 (AC->DC is active), then P_dc_ac must be 0.
|
613
|
+
f"constraint_enforce_dc_ac_zero_{i}": plp.LpConstraint(
|
614
|
+
e=P_dc_ac[i] - is_dc_sourcing[i] * P_dc_ac_max,
|
615
|
+
sense=plp.LpConstraintLE,
|
616
|
+
rhs=0,
|
617
|
+
),
|
618
|
+
}
|
619
|
+
)
|
620
|
+
|
621
|
+
# Apply curtailment constraint if enabled, regardless of inverter type
|
622
|
+
if self.plant_conf["compute_curtailment"]:
|
501
623
|
constraints.update(
|
502
624
|
{
|
503
|
-
f"
|
504
|
-
e=P_PV[i]
|
505
|
-
- P_PV_curtailment[i]
|
506
|
-
+ P_sto_pos[i]
|
507
|
-
+ P_sto_neg[i]
|
508
|
-
- P_nom_inverter,
|
625
|
+
f"constraint_curtailment_{i}": plp.LpConstraint(
|
626
|
+
e=P_PV_curtailment[i] - max(P_PV[i], 0),
|
509
627
|
sense=plp.LpConstraintLE,
|
510
628
|
rhs=0,
|
511
629
|
)
|
512
630
|
for i in set_I
|
513
631
|
}
|
514
632
|
)
|
515
|
-
constraints.update(
|
516
|
-
{
|
517
|
-
f"constraint_hybrid_inverter2_{i}": plp.LpConstraint(
|
518
|
-
e=P_PV[i]
|
519
|
-
- P_PV_curtailment[i]
|
520
|
-
+ P_sto_pos[i]
|
521
|
-
+ P_sto_neg[i]
|
522
|
-
- P_hybrid_inverter[i],
|
523
|
-
sense=plp.LpConstraintEQ,
|
524
|
-
rhs=0,
|
525
|
-
)
|
526
|
-
for i in set_I
|
527
|
-
}
|
528
|
-
)
|
529
|
-
else:
|
530
|
-
if self.plant_conf["compute_curtailment"]:
|
531
|
-
constraints.update(
|
532
|
-
{
|
533
|
-
f"constraint_curtailment_{i}": plp.LpConstraint(
|
534
|
-
e=P_PV_curtailment[i] - max(P_PV[i], 0),
|
535
|
-
sense=plp.LpConstraintLE,
|
536
|
-
rhs=0,
|
537
|
-
)
|
538
|
-
for i in set_I
|
539
|
-
}
|
540
|
-
)
|
541
633
|
|
542
634
|
# Two special constraints just for a self-consumption cost function
|
543
635
|
if self.costfun == "self-consumption":
|
@@ -591,7 +683,9 @@ class Optimization:
|
|
591
683
|
if isinstance(
|
592
684
|
self.optim_conf["nominal_power_of_deferrable_loads"][k], list
|
593
685
|
):
|
594
|
-
self.logger.debug(
|
686
|
+
self.logger.debug(
|
687
|
+
f"Load {k} is sequence-based. Sequence: {self.optim_conf['nominal_power_of_deferrable_loads'][k]}"
|
688
|
+
)
|
595
689
|
# Constraint for sequence of deferrable
|
596
690
|
# WARNING: This is experimental, formulation seems correct but feasibility problems.
|
597
691
|
# Probably uncomptabile with other constraints
|
@@ -609,7 +703,9 @@ class Optimization:
|
|
609
703
|
y = plp.LpVariable.dicts(
|
610
704
|
f"y{k}", (i for i in range(len(matrix))), cat="Binary"
|
611
705
|
)
|
612
|
-
self.logger.debug(
|
706
|
+
self.logger.debug(
|
707
|
+
f"Load {k}: Created binary variables for sequence placement: y = {list(y.keys())}"
|
708
|
+
)
|
613
709
|
constraints.update(
|
614
710
|
{
|
615
711
|
f"single_value_constraint_{k}": plp.LpConstraint(
|
@@ -664,12 +760,16 @@ class Optimization:
|
|
664
760
|
cooling_constant = hc["cooling_constant"]
|
665
761
|
heating_rate = hc["heating_rate"]
|
666
762
|
overshoot_temperature = hc["overshoot_temperature"]
|
667
|
-
outdoor_temperature_forecast = data_opt[
|
763
|
+
outdoor_temperature_forecast = data_opt[
|
764
|
+
"outdoor_temperature_forecast"
|
765
|
+
]
|
668
766
|
desired_temperatures = hc["desired_temperatures"]
|
669
767
|
sense = hc.get("sense", "heat")
|
670
768
|
sense_coeff = 1 if sense == "heat" else -1
|
671
769
|
|
672
|
-
self.logger.debug(
|
770
|
+
self.logger.debug(
|
771
|
+
f"Load {k}: Thermal parameters: start_temperature={start_temperature}, cooling_constant={cooling_constant}, heating_rate={heating_rate}, overshoot_temperature={overshoot_temperature}"
|
772
|
+
)
|
673
773
|
|
674
774
|
predicted_temp = [start_temperature]
|
675
775
|
for Id in set_I:
|
@@ -682,7 +782,9 @@ class Optimization:
|
|
682
782
|
* (
|
683
783
|
heating_rate
|
684
784
|
* self.timeStep
|
685
|
-
/ self.optim_conf[
|
785
|
+
/ self.optim_conf[
|
786
|
+
"nominal_power_of_deferrable_loads"
|
787
|
+
][k]
|
686
788
|
)
|
687
789
|
)
|
688
790
|
- (
|
@@ -694,9 +796,7 @@ class Optimization:
|
|
694
796
|
)
|
695
797
|
)
|
696
798
|
|
697
|
-
is_overshoot = plp.LpVariable(
|
698
|
-
f"defload_{k}_overshoot_{Id}"
|
699
|
-
)
|
799
|
+
is_overshoot = plp.LpVariable(f"defload_{k}_overshoot_{Id}")
|
700
800
|
constraints.update(
|
701
801
|
{
|
702
802
|
f"constraint_defload{k}_overshoot_{Id}_1": plp.LpConstraint(
|
@@ -732,9 +832,10 @@ class Optimization:
|
|
732
832
|
"penalty_factor must be positive, otherwise the problem will become unsolvable"
|
733
833
|
)
|
734
834
|
penalty_value = (
|
735
|
-
predicted_temp[Id]
|
736
|
-
|
737
|
-
|
835
|
+
(predicted_temp[Id] - desired_temperatures[Id])
|
836
|
+
* penalty_factor
|
837
|
+
* sense_coeff
|
838
|
+
)
|
738
839
|
penalty_var = plp.LpVariable(
|
739
840
|
f"defload_{k}_thermal_penalty_{Id}",
|
740
841
|
cat="Continuous",
|
@@ -755,13 +856,14 @@ class Optimization:
|
|
755
856
|
self.logger.debug(f"Load {k}: Thermal constraints set.")
|
756
857
|
|
757
858
|
# --- Standard/non-thermal deferrable load logic comes after thermal ---
|
758
|
-
elif (
|
759
|
-
(
|
760
|
-
|
761
|
-
|
859
|
+
elif (def_total_timestep and def_total_timestep[k] > 0) or (
|
860
|
+
len(def_total_hours) > k and def_total_hours[k] > 0
|
861
|
+
):
|
762
862
|
self.logger.debug(f"Load {k} is standard/non-thermal.")
|
763
863
|
if def_total_timestep and def_total_timestep[k] > 0:
|
764
|
-
self.logger.debug(
|
864
|
+
self.logger.debug(
|
865
|
+
f"Load {k}: Using total timesteps constraint: {def_total_timestep[k]}"
|
866
|
+
)
|
765
867
|
constraints.update(
|
766
868
|
{
|
767
869
|
f"constraint_defload{k}_energy": plp.LpConstraint(
|
@@ -770,12 +872,16 @@ class Optimization:
|
|
770
872
|
),
|
771
873
|
sense=plp.LpConstraintEQ,
|
772
874
|
rhs=(self.timeStep * def_total_timestep[k])
|
773
|
-
* self.optim_conf["nominal_power_of_deferrable_loads"][
|
875
|
+
* self.optim_conf["nominal_power_of_deferrable_loads"][
|
876
|
+
k
|
877
|
+
],
|
774
878
|
)
|
775
879
|
}
|
776
880
|
)
|
777
881
|
else:
|
778
|
-
self.logger.debug(
|
882
|
+
self.logger.debug(
|
883
|
+
f"Load {k}: Using total hours constraint: {def_total_hours[k]}"
|
884
|
+
)
|
779
885
|
constraints.update(
|
780
886
|
{
|
781
887
|
f"constraint_defload{k}_energy": plp.LpConstraint(
|
@@ -784,13 +890,14 @@ class Optimization:
|
|
784
890
|
),
|
785
891
|
sense=plp.LpConstraintEQ,
|
786
892
|
rhs=def_total_hours[k]
|
787
|
-
* self.optim_conf["nominal_power_of_deferrable_loads"][
|
893
|
+
* self.optim_conf["nominal_power_of_deferrable_loads"][
|
894
|
+
k
|
895
|
+
],
|
788
896
|
)
|
789
897
|
}
|
790
898
|
)
|
791
899
|
self.logger.debug(f"Load {k}: Standard load constraints set.")
|
792
900
|
|
793
|
-
|
794
901
|
# Ensure deferrable loads consume energy between def_start_timestep & def_end_timestep
|
795
902
|
self.logger.debug(
|
796
903
|
f"Deferrable load {k}: Proposed optimization window: {def_start_timestep[k]} --> {def_end_timestep[k]}"
|
@@ -799,10 +906,7 @@ class Optimization:
|
|
799
906
|
def_start, def_end, warning = Optimization.validate_def_timewindow(
|
800
907
|
def_start_timestep[k],
|
801
908
|
def_end_timestep[k],
|
802
|
-
ceil(
|
803
|
-
(60 / ((self.freq.seconds / 60) * def_total_timestep[k]))
|
804
|
-
/ self.timeStep
|
805
|
-
),
|
909
|
+
ceil(def_total_timestep[k]),
|
806
910
|
n,
|
807
911
|
)
|
808
912
|
else:
|
@@ -844,6 +948,20 @@ class Optimization:
|
|
844
948
|
}
|
845
949
|
)
|
846
950
|
|
951
|
+
# Constraint for the minimum power of deferrable loads using the big-M method.
|
952
|
+
# This enforces: P_deferrable = 0 OR P_deferrable >= min_power.
|
953
|
+
if min_power_of_deferrable_loads[k] > 0:
|
954
|
+
self.logger.debug(f"Applying minimum power constraint for deferrable load {k}: {min_power_of_deferrable_loads[k]} W")
|
955
|
+
constraints.update(
|
956
|
+
{
|
957
|
+
f"constraint_pdef{k}_min_power_{i}": plp.LpConstraint(
|
958
|
+
e=P_deferrable[k][i] - (min_power_of_deferrable_loads[k] * P_def_bin2[k][i]),
|
959
|
+
sense=plp.LpConstraintGE,
|
960
|
+
rhs=0
|
961
|
+
) for i in set_I
|
962
|
+
}
|
963
|
+
)
|
964
|
+
|
847
965
|
# Treat the number of starts for a deferrable load (new method considering current state)
|
848
966
|
current_state = 0
|
849
967
|
if (
|
@@ -923,16 +1041,7 @@ class Optimization:
|
|
923
1041
|
f"constraint_pdef{k}_start5": plp.LpConstraint(
|
924
1042
|
e=plp.lpSum(P_def_bin2[k][i] for i in set_I),
|
925
1043
|
sense=plp.LpConstraintEQ,
|
926
|
-
rhs=
|
927
|
-
(
|
928
|
-
60
|
929
|
-
/ (
|
930
|
-
(self.freq.seconds / 60)
|
931
|
-
* def_total_timestep[k]
|
932
|
-
)
|
933
|
-
)
|
934
|
-
/ self.timeStep
|
935
|
-
),
|
1044
|
+
rhs=def_total_timestep[k],
|
936
1045
|
)
|
937
1046
|
}
|
938
1047
|
)
|
@@ -1142,18 +1251,27 @@ class Optimization:
|
|
1142
1251
|
timeout = self.optim_conf["lp_solver_timeout"]
|
1143
1252
|
# solving with default solver CBC
|
1144
1253
|
if self.lp_solver == "PULP_CBC_CMD":
|
1145
|
-
opt_model.solve(
|
1254
|
+
opt_model.solve(
|
1255
|
+
PULP_CBC_CMD(msg=0, timeLimit=timeout, threads=self.num_threads)
|
1256
|
+
)
|
1146
1257
|
elif self.lp_solver == "GLPK_CMD":
|
1147
1258
|
opt_model.solve(GLPK_CMD(msg=0, timeLimit=timeout))
|
1148
1259
|
elif self.lp_solver == "HiGHS":
|
1149
1260
|
opt_model.solve(HiGHS(msg=0, timeLimit=timeout))
|
1150
1261
|
elif self.lp_solver == "COIN_CMD":
|
1151
1262
|
opt_model.solve(
|
1152
|
-
COIN_CMD(
|
1263
|
+
COIN_CMD(
|
1264
|
+
msg=0,
|
1265
|
+
path=self.lp_solver_path,
|
1266
|
+
timeLimit=timeout,
|
1267
|
+
threads=self.num_threads,
|
1268
|
+
)
|
1153
1269
|
)
|
1154
1270
|
else:
|
1155
1271
|
self.logger.warning("Solver %s unknown, using default", self.lp_solver)
|
1156
|
-
opt_model.solve(
|
1272
|
+
opt_model.solve(
|
1273
|
+
PULP_CBC_CMD(msg=0, timeLimit=timeout, threads=self.num_threads)
|
1274
|
+
)
|
1157
1275
|
|
1158
1276
|
# The status of the solution is printed to the screen
|
1159
1277
|
self.optim_status = plp.LpStatus[opt_model.status]
|
@@ -1317,7 +1435,9 @@ class Optimization:
|
|
1317
1435
|
|
1318
1436
|
# Battery initialization logging
|
1319
1437
|
if self.optim_conf["set_use_battery"]:
|
1320
|
-
self.logger.debug(
|
1438
|
+
self.logger.debug(
|
1439
|
+
f"Battery usage enabled. Initial SOC: {soc_init}, Final SOC: {soc_final}"
|
1440
|
+
)
|
1321
1441
|
|
1322
1442
|
# Deferrable load initialization logging
|
1323
1443
|
self.logger.debug(f"Deferrable load operating hours: {def_total_hours}")
|
emhass/retrieve_hass.py
CHANGED
@@ -1,5 +1,3 @@
|
|
1
|
-
#!/usr/bin/env python3
|
2
|
-
|
3
1
|
import copy
|
4
2
|
import datetime
|
5
3
|
import json
|
@@ -109,7 +107,9 @@ class RetrieveHass:
|
|
109
107
|
try:
|
110
108
|
self.ha_config = response_config.json()
|
111
109
|
except Exception:
|
112
|
-
self.logger.error(
|
110
|
+
self.logger.error(
|
111
|
+
"EMHASS was unable to obtain configuration data from Home Assistant"
|
112
|
+
)
|
113
113
|
return False
|
114
114
|
|
115
115
|
def get_data(
|
@@ -204,7 +204,9 @@ class RetrieveHass:
|
|
204
204
|
)
|
205
205
|
return False
|
206
206
|
if response.status_code > 299:
|
207
|
-
self.logger.error(
|
207
|
+
self.logger.error(
|
208
|
+
f"Home assistant request GET error: {response.status_code} for var {var}"
|
209
|
+
)
|
208
210
|
return False
|
209
211
|
"""import bz2 # Uncomment to save a serialized data for tests
|
210
212
|
import _pickle as cPickle
|
@@ -268,7 +270,7 @@ class RetrieveHass:
|
|
268
270
|
).max()
|
269
271
|
ts = pd.to_datetime(
|
270
272
|
pd.date_range(start=from_date, end=to_date, freq=self.freq),
|
271
|
-
format="%Y-%d-%m %H:%M"
|
273
|
+
format="%Y-%d-%m %H:%M",
|
272
274
|
).round(self.freq, ambiguous="infer", nonexistent="shift_forward")
|
273
275
|
df_day = pd.DataFrame(index=ts)
|
274
276
|
# Caution with undefined string data: unknown, unavailable, etc.
|
@@ -187,6 +187,30 @@
|
|
187
187
|
"input": "boolean",
|
188
188
|
"default_value": false
|
189
189
|
},
|
190
|
+
"inverter_ac_output_max": {
|
191
|
+
"friendly_name": "Max hybrid inverter AC output power",
|
192
|
+
"Description": "Maximum hybrid inverter output power from combined PV and battery discharge.",
|
193
|
+
"input": "int",
|
194
|
+
"default_value": 0
|
195
|
+
},
|
196
|
+
"inverter_ac_input_max": {
|
197
|
+
"friendly_name": "Max hybrid inverter AC input power",
|
198
|
+
"Description": "Maximum hybrid inverter input power from grid to charge battery.",
|
199
|
+
"input": "int",
|
200
|
+
"default_value": 0
|
201
|
+
},
|
202
|
+
"inverter_efficiency_dc_ac": {
|
203
|
+
"friendly_name": "Hybrid inverter efficency DC to AC",
|
204
|
+
"Description": "Hybrid inverter efficiency from the DC bus to AC output. (percentage/100)",
|
205
|
+
"input": "float",
|
206
|
+
"default_value": 1.0
|
207
|
+
},
|
208
|
+
"inverter_efficiency_ac_dc": {
|
209
|
+
"friendly_name": "Hybrid inverter efficency AC to DC",
|
210
|
+
"Description": "Hybrid inverter efficiency when charging from the AC input to DC bus. (percentage/100)",
|
211
|
+
"input": "float",
|
212
|
+
"default_value": 1.0
|
213
|
+
},
|
190
214
|
"compute_curtailment": {
|
191
215
|
"friendly_name": "Set compute curtailment (grid export limit)",
|
192
216
|
"Description": "Set to True to compute a special PV curtailment variable (Default False)",
|
@@ -484,6 +508,12 @@
|
|
484
508
|
"Description": "The desired battery state of charge at the end of each optimization cycle. (percentage/100)",
|
485
509
|
"input": "float",
|
486
510
|
"default_value": 0.6
|
511
|
+
},
|
512
|
+
"ignore_pv_feedback_during_curtailment": {
|
513
|
+
"friendly_name": "Ignore PV feedback during curtailment",
|
514
|
+
"Description": "When set to true, prevents PV forecast from being updated with real PV data, avoiding flip-flop behavior during curtailment operations",
|
515
|
+
"input": "bool",
|
516
|
+
"default_value": false
|
487
517
|
}
|
488
518
|
}
|
489
519
|
}
|
emhass/utils.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1
|
-
#!/usr/bin/env python3
|
2
1
|
from __future__ import annotations
|
3
2
|
|
4
3
|
import ast
|
@@ -120,12 +119,12 @@ def get_forecast_dates(
|
|
120
119
|
|
121
120
|
"""
|
122
121
|
freq = pd.to_timedelta(freq, "minutes")
|
123
|
-
start_forecast =
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
microsecond=0
|
122
|
+
start_forecast = (
|
123
|
+
pd.Timestamp(datetime.now(), tz=time_zone)
|
124
|
+
.replace(microsecond=0)
|
125
|
+
.floor(freq=freq)
|
128
126
|
)
|
127
|
+
end_forecast = start_forecast + pd.Timedelta(days=delta_forecast)
|
129
128
|
forecast_dates = (
|
130
129
|
pd.date_range(
|
131
130
|
start=start_forecast,
|
@@ -409,11 +408,29 @@ def treat_runtimeparams(
|
|
409
408
|
runtimeparams.get("delta_forecast_daily", None) is not None
|
410
409
|
or runtimeparams.get("delta_forecast", None) is not None
|
411
410
|
):
|
412
|
-
delta_forecast
|
413
|
-
|
414
|
-
|
411
|
+
# Use old param name delta_forecast (if provided) for backwards compatibility
|
412
|
+
delta_forecast = runtimeparams.get("delta_forecast", None)
|
413
|
+
# Prefer new param name delta_forecast_daily
|
414
|
+
delta_forecast = runtimeparams.get("delta_forecast_daily", delta_forecast)
|
415
|
+
# Ensure delta_forecast is numeric and at least 1 day
|
416
|
+
if delta_forecast is None:
|
417
|
+
logger.warning("delta_forecast_daily is missing so defaulting to 1 day")
|
418
|
+
delta_forecast = 1
|
419
|
+
else:
|
420
|
+
try:
|
421
|
+
delta_forecast = int(delta_forecast)
|
422
|
+
except ValueError:
|
423
|
+
logger.warning(
|
424
|
+
"Invalid delta_forecast_daily value (%s) so defaulting to 1 day",
|
425
|
+
delta_forecast,
|
426
|
+
)
|
427
|
+
delta_forecast = 1
|
428
|
+
if delta_forecast <= 0:
|
429
|
+
logger.warning(
|
430
|
+
"delta_forecast_daily is too low (%s) so defaulting to 1 day",
|
431
|
+
delta_forecast,
|
415
432
|
)
|
416
|
-
|
433
|
+
delta_forecast = 1
|
417
434
|
params["optim_conf"]["delta_forecast_daily"] = pd.Timedelta(
|
418
435
|
days=delta_forecast
|
419
436
|
)
|
@@ -574,27 +591,49 @@ def treat_runtimeparams(
|
|
574
591
|
# Loop forecasts, check if value is a list and greater than or equal to forecast_dates
|
575
592
|
for method, forecast_key in enumerate(list_forecast_key):
|
576
593
|
if forecast_key in runtimeparams.keys():
|
577
|
-
|
578
|
-
|
579
|
-
|
580
|
-
|
594
|
+
forecast_input = runtimeparams[forecast_key]
|
595
|
+
if isinstance(forecast_input, dict):
|
596
|
+
forecast_data_df = pd.DataFrame.from_dict(
|
597
|
+
forecast_input, orient="index"
|
598
|
+
).reset_index()
|
599
|
+
forecast_data_df.columns = ["time", "value"]
|
600
|
+
forecast_data_df["time"] = pd.to_datetime(
|
601
|
+
forecast_data_df["time"], format="ISO8601", utc=True
|
602
|
+
).dt.tz_convert(time_zone)
|
603
|
+
|
604
|
+
# align index with forecast_dates
|
605
|
+
forecast_data_df = (
|
606
|
+
forecast_data_df.resample(
|
607
|
+
pd.to_timedelta(optimization_time_step, "minutes"),
|
608
|
+
on="time",
|
609
|
+
)
|
610
|
+
.aggregate({"value": "mean"})
|
611
|
+
.reindex(forecast_dates, method="nearest")
|
612
|
+
)
|
613
|
+
forecast_data_df["value"] = (
|
614
|
+
forecast_data_df["value"].ffill().bfill()
|
615
|
+
)
|
616
|
+
forecast_input = forecast_data_df["value"].tolist()
|
617
|
+
if isinstance(forecast_input, list) and len(forecast_input) >= len(
|
618
|
+
forecast_dates
|
619
|
+
):
|
620
|
+
params["passed_data"][forecast_key] = forecast_input
|
581
621
|
params["optim_conf"][forecast_methods[method]] = "list"
|
582
622
|
else:
|
583
623
|
logger.error(
|
584
|
-
f"ERROR: The passed data is either
|
624
|
+
f"ERROR: The passed data is either the wrong type or the length is not correct, length should be {str(len(forecast_dates))}"
|
585
625
|
)
|
586
626
|
logger.error(
|
587
627
|
f"Passed type is {str(type(runtimeparams[forecast_key]))} and length is {str(len(runtimeparams[forecast_key]))}"
|
588
628
|
)
|
589
629
|
# Check if string contains list, if so extract
|
590
|
-
if isinstance(
|
591
|
-
if isinstance(ast.literal_eval(
|
592
|
-
|
593
|
-
|
594
|
-
)
|
630
|
+
if isinstance(forecast_input, str):
|
631
|
+
if isinstance(ast.literal_eval(forecast_input), list):
|
632
|
+
forecast_input = ast.literal_eval(forecast_input)
|
633
|
+
runtimeparams[forecast_key] = forecast_input
|
595
634
|
list_non_digits = [
|
596
635
|
x
|
597
|
-
for x in
|
636
|
+
for x in forecast_input
|
598
637
|
if not (isinstance(x, int) or isinstance(x, float))
|
599
638
|
]
|
600
639
|
if len(list_non_digits) > 0:
|
@@ -923,6 +962,7 @@ def get_injection_dict(df: pd.DataFrame, plot_size: int | None = 1366) -> dict:
|
|
923
962
|
template="presentation",
|
924
963
|
line_shape="hv",
|
925
964
|
color_discrete_sequence=colors,
|
965
|
+
render_mode="svg",
|
926
966
|
)
|
927
967
|
fig_0.update_layout(xaxis_title="Timestamp", yaxis_title="System powers (W)")
|
928
968
|
if "SOC_opt" in df.columns.to_list():
|
@@ -932,6 +972,7 @@ def get_injection_dict(df: pd.DataFrame, plot_size: int | None = 1366) -> dict:
|
|
932
972
|
template="presentation",
|
933
973
|
line_shape="hv",
|
934
974
|
color_discrete_sequence=colors,
|
975
|
+
render_mode="svg",
|
935
976
|
)
|
936
977
|
fig_1.update_layout(xaxis_title="Timestamp", yaxis_title="Battery SOC (%)")
|
937
978
|
cols_cost = [i for i in df.columns.to_list() if "cost_" in i or "unit_" in i]
|
@@ -945,6 +986,7 @@ def get_injection_dict(df: pd.DataFrame, plot_size: int | None = 1366) -> dict:
|
|
945
986
|
template="presentation",
|
946
987
|
line_shape="hv",
|
947
988
|
color_discrete_sequence=colors,
|
989
|
+
render_mode="svg",
|
948
990
|
)
|
949
991
|
fig_2.update_layout(xaxis_title="Timestamp", yaxis_title="System costs (currency)")
|
950
992
|
# Get full path to image
|
@@ -1638,6 +1680,7 @@ def build_params(
|
|
1638
1680
|
"end_timesteps_of_each_deferrable_load": None,
|
1639
1681
|
"alpha": None,
|
1640
1682
|
"beta": None,
|
1683
|
+
"ignore_pv_feedback_during_curtailment": None,
|
1641
1684
|
}
|
1642
1685
|
|
1643
1686
|
return params
|
emhass/web_server.py
CHANGED
@@ -10,10 +10,10 @@ import threading
|
|
10
10
|
from importlib.metadata import PackageNotFoundError, version
|
11
11
|
from pathlib import Path
|
12
12
|
|
13
|
+
import jinja2
|
13
14
|
import yaml
|
14
15
|
from flask import Flask, make_response, request
|
15
16
|
from flask import logging as log
|
16
|
-
from jinja2 import Environment, PackageLoader
|
17
17
|
from waitress import serve
|
18
18
|
|
19
19
|
from emhass.command_line import (
|
@@ -50,6 +50,10 @@ params_secrets = {}
|
|
50
50
|
continual_publish_thread = []
|
51
51
|
injection_dict = {}
|
52
52
|
|
53
|
+
templates = jinja2.Environment(
|
54
|
+
loader=jinja2.PackageLoader("emhass", "templates"),
|
55
|
+
)
|
56
|
+
|
53
57
|
|
54
58
|
def create_app(settings_override=None):
|
55
59
|
"""
|
@@ -137,14 +141,6 @@ def index():
|
|
137
141
|
|
138
142
|
"""
|
139
143
|
app.logger.info("EMHASS server online, serving index.html...")
|
140
|
-
# Load HTML template
|
141
|
-
file_loader = PackageLoader("emhass", "templates")
|
142
|
-
env = Environment(loader=file_loader)
|
143
|
-
# check if index.html exists
|
144
|
-
if "index.html" not in env.list_templates():
|
145
|
-
app.logger.error("Unable to find index.html in emhass module")
|
146
|
-
return make_response(["ERROR: unable to find index.html in emhass module"], 404)
|
147
|
-
template = env.get_template("index.html")
|
148
144
|
# Load cached dict (if exists), to present generated plot tables
|
149
145
|
if (emhass_conf["data_path"] / "injection_dict.pkl").exists():
|
150
146
|
with open(str(emhass_conf["data_path"] / "injection_dict.pkl"), "rb") as fid:
|
@@ -159,6 +155,7 @@ def index():
|
|
159
155
|
# basename = request.headers.get("X-Ingress-Path", "")
|
160
156
|
# return make_response(template.render(injection_dict=injection_dict, basename=basename))
|
161
157
|
|
158
|
+
template = templates.get_template("index.html")
|
162
159
|
return make_response(template.render(injection_dict=injection_dict))
|
163
160
|
|
164
161
|
|
@@ -174,16 +171,8 @@ def configuration():
|
|
174
171
|
if (emhass_conf["data_path"] / "params.pkl").exists():
|
175
172
|
with open(str(emhass_conf["data_path"] / "params.pkl"), "rb") as fid:
|
176
173
|
emhass_conf["config_path"], params = pickle.load(fid)
|
177
|
-
|
178
|
-
|
179
|
-
env = Environment(loader=file_loader)
|
180
|
-
# check if configuration.html exists
|
181
|
-
if "configuration.html" not in env.list_templates():
|
182
|
-
app.logger.error("Unable to find configuration.html in emhass module")
|
183
|
-
return make_response(
|
184
|
-
["ERROR: unable to find configuration.html in emhass module"], 404
|
185
|
-
)
|
186
|
-
template = env.get_template("configuration.html")
|
174
|
+
|
175
|
+
template = templates.get_template("configuration.html")
|
187
176
|
return make_response(template.render(config=params))
|
188
177
|
|
189
178
|
|
@@ -195,15 +184,6 @@ def template_action():
|
|
195
184
|
|
196
185
|
"""
|
197
186
|
app.logger.info(" >> Sending rendered template table data")
|
198
|
-
file_loader = PackageLoader("emhass", "templates")
|
199
|
-
env = Environment(loader=file_loader)
|
200
|
-
# Check if template.html exists
|
201
|
-
if "template.html" not in env.list_templates():
|
202
|
-
app.logger.error("Unable to find template.html in emhass module")
|
203
|
-
return make_response(
|
204
|
-
["WARNING: unable to find template.html in emhass module"], 404
|
205
|
-
)
|
206
|
-
template = env.get_template("template.html")
|
207
187
|
if (emhass_conf["data_path"] / "injection_dict.pkl").exists():
|
208
188
|
with open(str(emhass_conf["data_path"] / "injection_dict.pkl"), "rb") as fid:
|
209
189
|
injection_dict = pickle.load(fid)
|
@@ -211,6 +191,8 @@ def template_action():
|
|
211
191
|
app.logger.warning("Unable to obtain plot data from injection_dict.pkl")
|
212
192
|
app.logger.warning("Try running an launch an optimization task")
|
213
193
|
injection_dict = {}
|
194
|
+
|
195
|
+
template = templates.get_template("template.html")
|
214
196
|
return make_response(template.render(injection_dict=injection_dict))
|
215
197
|
|
216
198
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: emhass
|
3
|
-
Version: 0.13.
|
3
|
+
Version: 0.13.5
|
4
4
|
Summary: An Energy Management System for Home Assistant
|
5
5
|
Project-URL: Homepage, https://github.com/davidusb-geek/emhass
|
6
6
|
Project-URL: Source, https://github.com/davidusb-geek/emhass
|
@@ -118,17 +118,10 @@ Description-Content-Type: text/markdown
|
|
118
118
|
</div>
|
119
119
|
|
120
120
|
<br>
|
121
|
-
<p align="
|
122
|
-
|
123
|
-
</p>
|
124
|
-
<p align="center">
|
125
|
-
<a href="https://www.buymeacoffee.com/davidusbgeek" target="_blank">
|
126
|
-
<img src="https://www.buymeacoffee.com/assets/img/custom_images/orange_img.png" alt="Buy Me A Coffee" style="height: 41px !important;width: 174px !important;box-shadow: 0px 3px 2px 0px rgba(190, 190, 190, 0.5) !important;-webkit-box-shadow: 0px 3px 2px 0px rgba(190, 190, 190, 0.5) !important;" >
|
127
|
-
</a>
|
121
|
+
<p align="left">
|
122
|
+
EMHASS is a Python module designed to optimize your home energy interfacing with Home Assistant.
|
128
123
|
</p>
|
129
124
|
|
130
|
-
EHMASS is a Python module designed to optimize your home energy interfacing with Home Assistant.
|
131
|
-
|
132
125
|
## Introduction
|
133
126
|
|
134
127
|
EMHASS (Energy Management for Home Assistant) is an optimization tool designed for residential households. The package uses a Linear Programming approach to optimize energy usage while considering factors such as electricity prices, power generation from solar panels, and energy storage from batteries. EMHASS provides a high degree of configurability, making it easy to integrate with Home Assistant and other smart home systems. Whether you have solar panels, energy storage, or just a controllable load, EMHASS can provide an optimized daily schedule for your devices, allowing you to save money and minimize your environmental impact.
|
@@ -197,12 +190,13 @@ _Note: Both EMHASS via Docker and EMHASS-Add-on contain the same Docker image. T
|
|
197
190
|
|
198
191
|
### Method 2) Running EMHASS in Docker
|
199
192
|
|
200
|
-
You can also install EMHASS using Docker as a container. This can be in the same machine as Home Assistant (if your running Home Assistant as a Docker container) or in a different distant machine.
|
193
|
+
You can also install EMHASS using Docker as a container. This can be in the same machine as Home Assistant (if your running Home Assistant as a Docker container) or in a different distant machine. The "share" folder is where EMHASS stores the config.json file. In the examples below adjust the "-v" volume mappings to reflect where your path to the local host directory needs to be mapped to.
|
194
|
+
To install first pull the latest image:
|
201
195
|
```bash
|
202
196
|
# pull Docker image
|
203
197
|
docker pull ghcr.io/davidusb-geek/emhass:latest
|
204
|
-
# run Docker image, mounting config.json and secrets_emhass.yaml from host
|
205
|
-
docker run --rm -it --restart always -p 5000:5000 --name emhass-container -v
|
198
|
+
# run Docker image, mounting the dir storing config.json and secrets_emhass.yaml from host
|
199
|
+
docker run --rm -it --restart always -p 5000:5000 --name emhass-container -v /emhass/share:/share/ -v /emhass/secrets_emhass.yaml:/app/secrets_emhass.yaml ghcr.io/davidusb-geek/emhass:latest
|
206
200
|
```
|
207
201
|
*Note it is not recommended to install the latest EMHASS image with `:latest` *(as you would likely want to control when you update EMHASS version)*. Instead, find the [latest version tag](https://github.com/davidusb-geek/emhass/pkgs/container/emhass) (E.g: `v0.2.1`) and replace `latest`*
|
208
202
|
|
@@ -216,7 +210,7 @@ cd emhass
|
|
216
210
|
# may need to set architecture tag (docker build --build-arg TARGETARCH=amd64 -t emhass-local .)
|
217
211
|
docker build -t emhass-local .
|
218
212
|
# run built Docker image, mounting config.json and secrets_emhass.yaml from host
|
219
|
-
docker run --rm -it -p 5000:5000 --name emhass-container -v
|
213
|
+
docker run --rm -it -p 5000:5000 --name emhass-container -v /emhass/share:/share -v /emhass/secrets_emhass.yaml:/app/secrets_emhass.yaml emhass-local
|
220
214
|
```
|
221
215
|
|
222
216
|
Before running the docker container, make sure you have a designated folder for emhass on your host device and a `secrets_emhass.yaml` file. You can get a example of the secrets file from [`secrets_emhass(example).yaml`](https://github.com/davidusb-geek/emhass/blob/master/secrets_emhass(example).yaml) file on this repository.
|
@@ -232,23 +226,23 @@ Latitude: 45.83
|
|
232
226
|
Longitude: 6.86
|
233
227
|
Altitude: 4807.8
|
234
228
|
EOT
|
235
|
-
docker run --rm -it --restart always -p 5000:5000 --name emhass-container -v
|
229
|
+
docker run --rm -it --restart always -p 5000:5000 --name emhass-container -v /emhass/share:/share -v /emhass/secrets_emhass.yaml:/app/secrets_emhass.yaml ghcr.io/davidusb-geek/emhass:latest
|
236
230
|
```
|
237
231
|
|
238
232
|
#### Docker, things to note
|
239
233
|
|
240
|
-
- You can create a `config.json` file prior to running emhass. *(obtain a example from: [config_defaults.json](https://github.com/davidusb-geek/emhass/blob/enhass-standalone-addon-merge/src/emhass/data/config_defaults.json)* Alteratively, you can insert your parameters into the configuration page on the EMHASS web server. (for EMHASS to auto create a config.json) With either option, the volume mount `-v
|
234
|
+
- You can create a `config.json` file prior to running emhass. *(obtain a example from: [config_defaults.json](https://github.com/davidusb-geek/emhass/blob/enhass-standalone-addon-merge/src/emhass/data/config_defaults.json)* Alteratively, you can insert your parameters into the configuration page on the EMHASS web server. (for EMHASS to auto create a config.json) With either option, the volume mount `-v /emhass/share:/share` should be applied to make sure your config is stored on the host device. (to be not deleted when the EMHASS container gets removed/image updated)*
|
241
235
|
|
242
236
|
- If you wish to keep a local, semi-persistent copy of the EMHASS-generated data, create a local folder on your device, then mount said folder inside the container.
|
243
237
|
```bash
|
244
238
|
#create data folder
|
245
239
|
mkdir -p ~/emhass/data
|
246
|
-
docker run -it --restart always -p 5000:5000 -e LOCAL_COSTFUN="profit" -v
|
240
|
+
docker run -it --restart always -p 5000:5000 -e LOCAL_COSTFUN="profit" -v /emhass/share:/share -v /emhass/data:/data -v /emhass/secrets_emhass.yaml:/app/secrets_emhass.yaml --name DockerEMHASS <REPOSITORY:TAG>
|
247
241
|
```
|
248
242
|
|
249
243
|
- If you wish to set the web_server's homepage optimization diagrams to a timezone other than UTC, set `TZ` environment variable on docker run:
|
250
244
|
```bash
|
251
|
-
docker run -it --restart always -p 5000:5000 -e TZ="Europe/Paris" -v
|
245
|
+
docker run -it --restart always -p 5000:5000 -e TZ="Europe/Paris" -v /emhass/share:/share -v /emhass/secrets_emhass.yaml:/app/secrets_emhass.yaml --name DockerEMHASS <REPOSITORY:TAG>
|
252
246
|
```
|
253
247
|
### Method 3) Legacy method using a Python virtual environment *(Legacy CLI)*
|
254
248
|
If you wish to run EMHASS optimizations with cli commands. *(no persistent web server session)* you can run EMHASS via the python package alone *(not wrapped in a Docker container)*.
|
@@ -320,12 +314,22 @@ Additional optimization strategies were developed later, that can be used in com
|
|
320
314
|
|
321
315
|
### Dayahead Optimization - Method 1) Add-on and docker standalone
|
322
316
|
|
323
|
-
|
317
|
+
We can use the `shell_command` integration in `configuration.yaml`:
|
324
318
|
```yaml
|
325
319
|
shell_command:
|
326
320
|
dayahead_optim: "curl -i -H \"Content-Type:application/json\" -X POST -d '{}' http://localhost:5000/action/dayahead-optim"
|
327
321
|
publish_data: "curl -i -H \"Content-Type:application/json\" -X POST -d '{}' http://localhost:5000/action/publish-data"
|
328
322
|
```
|
323
|
+
An alternative that will be useful when passing data at runtime (see dedicated section), we can use the the `rest_command` instead:
|
324
|
+
```yaml
|
325
|
+
rest_command:
|
326
|
+
url: http://127.0.0.1:5000/action/dayahead-optim
|
327
|
+
method: POST
|
328
|
+
headers:
|
329
|
+
content-type: application/json
|
330
|
+
payload: >-
|
331
|
+
{}
|
332
|
+
```
|
329
333
|
### Dayahead Optimization - Method 2) Legacy method using a Python virtual environment
|
330
334
|
|
331
335
|
In `configuration.yaml`:
|
@@ -388,8 +392,8 @@ In `automations.yaml`:
|
|
388
392
|
```
|
389
393
|
in configuration page/`config.json`
|
390
394
|
```json
|
391
|
-
|
392
|
-
|
395
|
+
"method_ts_round": "first"
|
396
|
+
"continual_publish": true
|
393
397
|
```
|
394
398
|
In this automation, the day-ahead optimization is performed once a day, every day at 5:30am.
|
395
399
|
If the `optimization_time_step` parameter is set to `30` *(default)* in the configuration, the results of the day-ahead optimization will generate 48 values *(for each entity)*, a value for every 30 minutes in a day *(i.e. 24 hrs x 2)*.
|
@@ -543,7 +547,7 @@ For users who wish to have full control of exactly when they would like to run a
|
|
543
547
|
|
544
548
|
in configuration page/`config.json` :
|
545
549
|
```json
|
546
|
-
|
550
|
+
"continual_publish": false
|
547
551
|
```
|
548
552
|
POST action :
|
549
553
|
```bash
|
@@ -667,6 +671,25 @@ curl -i -H 'Content-Type:application/json' -X POST -d '{"pv_power_forecast":[0,
|
|
667
671
|
curl -i -H 'Content-Type:application/json' -X POST -d '{"pv_power_forecast":[0, 70, 141.22, 246.18, 513.5, 753.27, 1049.89, 1797.93, 1697.3, 3078.93], "prediction_horizon":10, "soc_init":0.5,"soc_final":0.6,"operating_hours_of_each_deferrable_load":[1,3],"start_timesteps_of_each_deferrable_load":[0,3],"end_timesteps_of_each_deferrable_load":[0,6]}' http://localhost:5000/action/naive-mpc-optim
|
668
672
|
```
|
669
673
|
|
674
|
+
For a more readable option we can use the `rest_command` integration:
|
675
|
+
```yaml
|
676
|
+
rest_command:
|
677
|
+
url: http://127.0.0.1:5000/action/dayahead-optim
|
678
|
+
method: POST
|
679
|
+
headers:
|
680
|
+
content-type: application/json
|
681
|
+
payload: >-
|
682
|
+
{
|
683
|
+
"pv_power_forecast": [0, 70, 141.22, 246.18, 513.5, 753.27, 1049.89, 1797.93, 1697.3, 3078.93],
|
684
|
+
"prediction_horizon":10,
|
685
|
+
"soc_init":0.5,
|
686
|
+
"soc_final":0.6,
|
687
|
+
"operating_hours_of_each_deferrable_load":[1,3],
|
688
|
+
"start_timesteps_of_each_deferrable_load":[0,3],
|
689
|
+
"end_timesteps_of_each_deferrable_load":[0,6]
|
690
|
+
}
|
691
|
+
```
|
692
|
+
|
670
693
|
## A machine learning forecaster
|
671
694
|
|
672
695
|
Starting in v0.4.0 a new machine learning forecaster class was introduced.
|
@@ -1,16 +1,16 @@
|
|
1
1
|
emhass/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
|
-
emhass/command_line.py,sha256=
|
3
|
-
emhass/forecast.py,sha256=
|
4
|
-
emhass/machine_learning_forecaster.py,sha256=
|
2
|
+
emhass/command_line.py,sha256=XrD4uOONtaCDUQ3TMjJwDDIuOjyO_K8BZH3r_TBN1Eg,76048
|
3
|
+
emhass/forecast.py,sha256=hLau1KjvwHM6jRQtWyhFLMf3fehdHIQlWG7A_inToKI,85951
|
4
|
+
emhass/machine_learning_forecaster.py,sha256=tRC9P94-gMAPJsxWilT_jeful1Xmcev4PfMmuGFdzIk,16258
|
5
5
|
emhass/machine_learning_regressor.py,sha256=Ih1q-vUWHWbGFxv9F12omwgyMRp-iaMU_m-yploVbyU,9532
|
6
|
-
emhass/optimization.py,sha256=
|
7
|
-
emhass/retrieve_hass.py,sha256=
|
8
|
-
emhass/utils.py,sha256=
|
9
|
-
emhass/web_server.py,sha256=
|
10
|
-
emhass/data/associations.csv,sha256=
|
6
|
+
emhass/optimization.py,sha256=kZNqC3InaIUD9Oc7kyxUwThpJLWSpSvBATaRmRJL_3k,73417
|
7
|
+
emhass/retrieve_hass.py,sha256=FPWK43NOD8Hq-oLbrJ_M1Sh8h5rOLwfkL1zpRWVBLoE,29593
|
8
|
+
emhass/utils.py,sha256=nHY8HStgff-kMhTNoo6Gp4p1gbom2qYltfH1pDFsnow,76025
|
9
|
+
emhass/web_server.py,sha256=fwdGHPPu3_wJ3bqrQVctK357iW4Gz57-mVSfIluRs88,28075
|
10
|
+
emhass/data/associations.csv,sha256=BIQNjKpr-QC3cMJIRzJ7F2eMGNVubDe0544dYWMIJw4,4418
|
11
11
|
emhass/data/cec_inverters.pbz2,sha256=ca-dO6sv38_FI2w_6fkAIzcrEqzFBkG8MHKNGbCZPow,189400
|
12
12
|
emhass/data/cec_modules.pbz2,sha256=Y639TNqhaIxh2Ec7AUPxy8k4lQugY5rURVVVexj0fMU,1885444
|
13
|
-
emhass/data/config_defaults.json,sha256=
|
13
|
+
emhass/data/config_defaults.json,sha256=9GUT1zjEIMxijbLXRjO4_VAoi_x0tsq2pczAQoHuwoI,3287
|
14
14
|
emhass/img/emhass_icon.png,sha256=Kyx6hXQ1huJLHAq2CaBfjYXR25H9j99PSWHI0lShkaQ,19030
|
15
15
|
emhass/static/advanced.html,sha256=gAhsd14elDwh1Ts4lf9wn_ZkczzzObq5qOimi_la3Ic,2067
|
16
16
|
emhass/static/basic.html,sha256=ro2WwWgJyoUhqx_nJFzKCEG8FA8863vSHLmrjGYcEgs,677
|
@@ -18,15 +18,15 @@ emhass/static/configuration_list.html,sha256=i4v83RVduWjdjkjPhA74e-j8NSUpFzqMGU3
|
|
18
18
|
emhass/static/configuration_script.js,sha256=Ek0Ry1Ae6ZGMl28mYxno6bPTwY4rK7AHcL58C6T6qUo,31727
|
19
19
|
emhass/static/script.js,sha256=-JYS8fHjchrMi1hYYKMd9p7vZvPcnYiY8NNuRC99fJM,16323
|
20
20
|
emhass/static/style.css,sha256=a_8YlGubn1zoF5RTLJ_Qkrb8tAjUY9p7oAKxhCvJY2s,19288
|
21
|
-
emhass/static/data/param_definitions.json,sha256=
|
21
|
+
emhass/static/data/param_definitions.json,sha256=aKqXH4Cd3cucAIG2gs2AADNDsTxipDHY0SZ9lxRAqHQ,23330
|
22
22
|
emhass/static/img/emhass_icon.png,sha256=Kyx6hXQ1huJLHAq2CaBfjYXR25H9j99PSWHI0lShkaQ,19030
|
23
23
|
emhass/static/img/emhass_logo_short.svg,sha256=yzMcqtBRCV8rH84-MwnigZh45_f9Eoqwho9P8nCodJA,66736
|
24
24
|
emhass/static/img/feather-sprite.svg,sha256=VHjMJQg88wXa9CaeYrKGhNtyK0xdd47zCqwSIa-hxo8,60319
|
25
25
|
emhass/templates/configuration.html,sha256=M-_L__juYzcdGDaryGrz6LG2mguW2f1Sx6k01YfG7Dc,2885
|
26
26
|
emhass/templates/index.html,sha256=1V44c0yyliu_z8inl0K-zmmmkhQumH3Bqk8Jj1YJPzY,3076
|
27
27
|
emhass/templates/template.html,sha256=TkGgMecQEbFUZA4ymPwMUzNjKHsENvCgroUWbPt7G4Y,158
|
28
|
-
emhass-0.13.
|
29
|
-
emhass-0.13.
|
30
|
-
emhass-0.13.
|
31
|
-
emhass-0.13.
|
32
|
-
emhass-0.13.
|
28
|
+
emhass-0.13.5.dist-info/METADATA,sha256=hiTlDKqSEsdgc_JapuWUhJurOs-GGrt6ZJAsz9__pM0,52124
|
29
|
+
emhass-0.13.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
30
|
+
emhass-0.13.5.dist-info/entry_points.txt,sha256=6Bp1NFOGNv_fSTxYl1ke3K3h3aqAcBxI-bgq5yq-i1M,52
|
31
|
+
emhass-0.13.5.dist-info/licenses/LICENSE,sha256=1X3-S1yvOCBDBeox1aK3dq00m7dA8NDtcPrpKPISzbE,1077
|
32
|
+
emhass-0.13.5.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|