emhass 0.13.2__py3-none-any.whl → 0.13.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- emhass/command_line.py +6 -2
- emhass/data/associations.csv +6 -1
- emhass/data/config_defaults.json +5 -0
- emhass/forecast.py +52 -12
- emhass/machine_learning_forecaster.py +0 -2
- emhass/optimization.py +262 -126
- emhass/retrieve_hass.py +11 -9
- emhass/static/data/param_definitions.json +39 -2
- emhass/utils.py +61 -21
- emhass/web_server.py +10 -28
- {emhass-0.13.2.dist-info → emhass-0.13.4.dist-info}/METADATA +47 -23
- {emhass-0.13.2.dist-info → emhass-0.13.4.dist-info}/RECORD +15 -15
- {emhass-0.13.2.dist-info → emhass-0.13.4.dist-info}/WHEEL +0 -0
- {emhass-0.13.2.dist-info → emhass-0.13.4.dist-info}/entry_points.txt +0 -0
- {emhass-0.13.2.dist-info → emhass-0.13.4.dist-info}/licenses/LICENSE +0 -0
emhass/optimization.py
CHANGED
@@ -1,15 +1,14 @@
|
|
1
|
-
#!/usr/bin/env python3
|
2
|
-
|
3
1
|
import bz2
|
4
2
|
import copy
|
5
3
|
import logging
|
4
|
+
import os
|
6
5
|
import pickle as cPickle
|
7
6
|
from math import ceil
|
8
7
|
|
9
8
|
import numpy as np
|
10
9
|
import pandas as pd
|
11
10
|
import pulp as plp
|
12
|
-
from pulp import COIN_CMD, GLPK_CMD, PULP_CBC_CMD
|
11
|
+
from pulp import COIN_CMD, GLPK_CMD, PULP_CBC_CMD, HiGHS
|
13
12
|
|
14
13
|
|
15
14
|
class Optimization:
|
@@ -87,6 +86,13 @@ class Optimization:
|
|
87
86
|
self.var_load_cost = var_load_cost
|
88
87
|
self.var_prod_price = var_prod_price
|
89
88
|
self.optim_status = None
|
89
|
+
if "num_threads" in optim_conf.keys():
|
90
|
+
if optim_conf["num_threads"] == 0:
|
91
|
+
self.num_threads = int(os.cpu_count())
|
92
|
+
else:
|
93
|
+
self.num_threads = int(optim_conf["num_threads"])
|
94
|
+
else:
|
95
|
+
self.num_threads = int(os.cpu_count())
|
90
96
|
if "lp_solver" in optim_conf.keys():
|
91
97
|
self.lp_solver = optim_conf["lp_solver"]
|
92
98
|
else:
|
@@ -106,10 +112,15 @@ class Optimization:
|
|
106
112
|
"lp_solver=COIN_CMD but lp_solver_path=empty, attempting to use lp_solver_path=/usr/bin/cbc"
|
107
113
|
)
|
108
114
|
self.lp_solver_path = "/usr/bin/cbc"
|
109
|
-
self.logger.debug(
|
115
|
+
self.logger.debug(
|
116
|
+
f"Initialized Optimization with retrieve_hass_conf: {retrieve_hass_conf}"
|
117
|
+
)
|
110
118
|
self.logger.debug(f"Optimization configuration: {optim_conf}")
|
111
119
|
self.logger.debug(f"Plant configuration: {plant_conf}")
|
112
|
-
self.logger.debug(
|
120
|
+
self.logger.debug(
|
121
|
+
f"Solver configuration: lp_solver={self.lp_solver}, lp_solver_path={self.lp_solver_path}"
|
122
|
+
)
|
123
|
+
self.logger.debug(f"Number of threads: {self.num_threads}")
|
113
124
|
|
114
125
|
def perform_optimization(
|
115
126
|
self,
|
@@ -178,7 +189,9 @@ class Optimization:
|
|
178
189
|
soc_final = soc_init
|
179
190
|
else:
|
180
191
|
soc_final = self.plant_conf["battery_target_state_of_charge"]
|
181
|
-
self.logger.debug(
|
192
|
+
self.logger.debug(
|
193
|
+
f"Battery usage enabled. Initial SOC: {soc_init}, Final SOC: {soc_final}"
|
194
|
+
)
|
182
195
|
|
183
196
|
# If def_total_timestep os set, bypass def_total_hours
|
184
197
|
if def_total_timestep is not None:
|
@@ -196,9 +209,21 @@ class Optimization:
|
|
196
209
|
|
197
210
|
num_deferrable_loads = self.optim_conf["number_of_deferrable_loads"]
|
198
211
|
|
199
|
-
|
200
|
-
|
201
|
-
|
212
|
+
# Retrieve the minimum power for each deferrable load, defaulting to 0 if not provided
|
213
|
+
min_power_of_deferrable_loads = self.optim_conf.get("minimum_power_of_deferrable_loads", [0] * num_deferrable_loads)
|
214
|
+
min_power_of_deferrable_loads = min_power_of_deferrable_loads + [0] * (
|
215
|
+
num_deferrable_loads - len(min_power_of_deferrable_loads)
|
216
|
+
)
|
217
|
+
|
218
|
+
def_total_hours = def_total_hours + [0] * (
|
219
|
+
num_deferrable_loads - len(def_total_hours)
|
220
|
+
)
|
221
|
+
def_start_timestep = def_start_timestep + [0] * (
|
222
|
+
num_deferrable_loads - len(def_start_timestep)
|
223
|
+
)
|
224
|
+
def_end_timestep = def_end_timestep + [0] * (
|
225
|
+
num_deferrable_loads - len(def_end_timestep)
|
226
|
+
)
|
202
227
|
|
203
228
|
#### The LP problem using Pulp ####
|
204
229
|
opt_model = plp.LpProblem("LP_Model", plp.LpMaximize)
|
@@ -464,59 +489,133 @@ class Optimization:
|
|
464
489
|
for i in set_I
|
465
490
|
}
|
466
491
|
|
467
|
-
# Constraint for hybrid inverter and curtailment cases
|
468
|
-
if isinstance(self.plant_conf["pv_module_model"], list):
|
469
|
-
P_nom_inverter = 0.0
|
470
|
-
for i in range(len(self.plant_conf["pv_inverter_model"])):
|
471
|
-
if isinstance(self.plant_conf["pv_inverter_model"][i], str):
|
472
|
-
cec_inverters = bz2.BZ2File(
|
473
|
-
self.emhass_conf["root_path"] / "data" / "cec_inverters.pbz2",
|
474
|
-
"rb",
|
475
|
-
)
|
476
|
-
cec_inverters = cPickle.load(cec_inverters)
|
477
|
-
inverter = cec_inverters[self.plant_conf["pv_inverter_model"][i]]
|
478
|
-
P_nom_inverter += inverter.Paco
|
479
|
-
else:
|
480
|
-
P_nom_inverter += self.plant_conf["pv_inverter_model"][i]
|
481
|
-
else:
|
482
|
-
if isinstance(self.plant_conf["pv_inverter_model"][i], str):
|
483
|
-
cec_inverters = bz2.BZ2File(
|
484
|
-
self.emhass_conf["root_path"] / "data" / "cec_inverters.pbz2", "rb"
|
485
|
-
)
|
486
|
-
cec_inverters = cPickle.load(cec_inverters)
|
487
|
-
inverter = cec_inverters[self.plant_conf["pv_inverter_model"]]
|
488
|
-
P_nom_inverter = inverter.Paco
|
489
|
-
else:
|
490
|
-
P_nom_inverter = self.plant_conf["pv_inverter_model"]
|
491
492
|
if self.plant_conf["inverter_is_hybrid"]:
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
|
493
|
+
P_nom_inverter_output = self.plant_conf.get("inverter_ac_output_max", None)
|
494
|
+
P_nom_inverter_input = self.plant_conf.get("inverter_ac_input_max", None)
|
495
|
+
|
496
|
+
# Fallback to legacy pv_inverter_model for output power if new setting is not provided
|
497
|
+
if P_nom_inverter_output is None:
|
498
|
+
if "pv_inverter_model" in self.plant_conf:
|
499
|
+
if isinstance(self.plant_conf["pv_inverter_model"], list):
|
500
|
+
P_nom_inverter_output = 0.0
|
501
|
+
for i in range(len(self.plant_conf["pv_inverter_model"])):
|
502
|
+
if isinstance(self.plant_conf["pv_inverter_model"][i], str):
|
503
|
+
cec_inverters = bz2.BZ2File(
|
504
|
+
self.emhass_conf["root_path"]
|
505
|
+
/ "data"
|
506
|
+
/ "cec_inverters.pbz2",
|
507
|
+
"rb",
|
508
|
+
)
|
509
|
+
cec_inverters = cPickle.load(cec_inverters)
|
510
|
+
inverter = cec_inverters[
|
511
|
+
self.plant_conf["pv_inverter_model"][i]
|
512
|
+
]
|
513
|
+
P_nom_inverter_output += inverter.Paco
|
514
|
+
else:
|
515
|
+
P_nom_inverter_output += self.plant_conf[
|
516
|
+
"pv_inverter_model"
|
517
|
+
][i]
|
518
|
+
else:
|
519
|
+
if isinstance(self.plant_conf["pv_inverter_model"], str):
|
520
|
+
cec_inverters = bz2.BZ2File(
|
521
|
+
self.emhass_conf["root_path"]
|
522
|
+
/ "data"
|
523
|
+
/ "cec_inverters.pbz2",
|
524
|
+
"rb",
|
525
|
+
)
|
526
|
+
cec_inverters = cPickle.load(cec_inverters)
|
527
|
+
inverter = cec_inverters[
|
528
|
+
self.plant_conf["pv_inverter_model"]
|
529
|
+
]
|
530
|
+
P_nom_inverter_output = inverter.Paco
|
531
|
+
else:
|
532
|
+
P_nom_inverter_output = self.plant_conf["pv_inverter_model"]
|
533
|
+
|
534
|
+
if P_nom_inverter_input is None:
|
535
|
+
P_nom_inverter_input = P_nom_inverter_output
|
536
|
+
|
537
|
+
# Get efficiency parameters, defaulting to 100%
|
538
|
+
eff_dc_ac = self.plant_conf.get("inverter_efficiency_dc_ac", 1.0)
|
539
|
+
eff_ac_dc = self.plant_conf.get("inverter_efficiency_ac_dc", 1.0)
|
540
|
+
|
541
|
+
# Calculate the maximum allowed DC power flows based on AC limits and efficiency.
|
542
|
+
P_dc_ac_max = P_nom_inverter_output / eff_dc_ac
|
543
|
+
P_ac_dc_max = P_nom_inverter_input * eff_ac_dc
|
544
|
+
|
545
|
+
# Define unidirectional DC power flow variables with the tight, calculated bounds.
|
546
|
+
P_dc_ac = {
|
547
|
+
(i): plp.LpVariable(
|
548
|
+
cat="Continuous",
|
549
|
+
lowBound=0,
|
550
|
+
upBound=P_dc_ac_max,
|
551
|
+
name=f"P_dc_ac_{i}",
|
552
|
+
)
|
553
|
+
for i in set_I
|
554
|
+
}
|
555
|
+
P_ac_dc = {
|
556
|
+
(i): plp.LpVariable(
|
557
|
+
cat="Continuous",
|
558
|
+
lowBound=0,
|
559
|
+
upBound=P_ac_dc_max,
|
560
|
+
name=f"P_ac_dc_{i}",
|
561
|
+
)
|
562
|
+
for i in set_I
|
563
|
+
}
|
564
|
+
# Binary variable to enforce unidirectional flow
|
565
|
+
is_dc_sourcing = {
|
566
|
+
(i): plp.LpVariable(cat="Binary", name=f"is_dc_sourcing_{i}")
|
567
|
+
for i in set_I
|
568
|
+
}
|
569
|
+
|
570
|
+
# Define the core energy balance equations for each timestep
|
571
|
+
for i in set_I:
|
572
|
+
# The net DC power from PV and battery must equal the net DC flow of the inverter
|
573
|
+
constraints.update(
|
574
|
+
{
|
575
|
+
f"constraint_dc_bus_balance_{i}": plp.LpConstraint(
|
576
|
+
e=(
|
577
|
+
P_PV[i]
|
578
|
+
- P_PV_curtailment[i]
|
579
|
+
+ P_sto_pos[i]
|
580
|
+
+ P_sto_neg[i]
|
581
|
+
)
|
582
|
+
- (P_dc_ac[i] - P_ac_dc[i]),
|
583
|
+
sense=plp.LpConstraintEQ,
|
584
|
+
rhs=0,
|
585
|
+
)
|
586
|
+
}
|
587
|
+
)
|
588
|
+
|
589
|
+
# The AC power is defined by the efficiency-adjusted DC flows
|
590
|
+
constraints.update(
|
591
|
+
{
|
592
|
+
f"constraint_ac_bus_balance_{i}": plp.LpConstraint(
|
593
|
+
e=P_hybrid_inverter[i]
|
594
|
+
- ((P_dc_ac[i] * eff_dc_ac) - (P_ac_dc[i] / eff_ac_dc)),
|
595
|
+
sense=plp.LpConstraintEQ,
|
596
|
+
rhs=0,
|
597
|
+
)
|
598
|
+
}
|
599
|
+
)
|
600
|
+
|
601
|
+
# Use the binary variable to ensure only one direction is active at a time
|
602
|
+
constraints.update(
|
603
|
+
{
|
604
|
+
# If is_dc_sourcing = 1 (DC->AC is active), then P_ac_dc must be 0.
|
605
|
+
f"constraint_enforce_ac_dc_zero_{i}": plp.LpConstraint(
|
606
|
+
e=P_ac_dc[i] - (1 - is_dc_sourcing[i]) * P_ac_dc_max,
|
607
|
+
sense=plp.LpConstraintLE,
|
608
|
+
rhs=0,
|
609
|
+
),
|
610
|
+
# If is_dc_sourcing = 0 (AC->DC is active), then P_dc_ac must be 0.
|
611
|
+
f"constraint_enforce_dc_ac_zero_{i}": plp.LpConstraint(
|
612
|
+
e=P_dc_ac[i] - is_dc_sourcing[i] * P_dc_ac_max,
|
613
|
+
sense=plp.LpConstraintLE,
|
614
|
+
rhs=0,
|
615
|
+
),
|
616
|
+
}
|
617
|
+
)
|
618
|
+
|
520
619
|
else:
|
521
620
|
if self.plant_conf["compute_curtailment"]:
|
522
621
|
constraints.update(
|
@@ -582,7 +681,9 @@ class Optimization:
|
|
582
681
|
if isinstance(
|
583
682
|
self.optim_conf["nominal_power_of_deferrable_loads"][k], list
|
584
683
|
):
|
585
|
-
self.logger.debug(
|
684
|
+
self.logger.debug(
|
685
|
+
f"Load {k} is sequence-based. Sequence: {self.optim_conf['nominal_power_of_deferrable_loads'][k]}"
|
686
|
+
)
|
586
687
|
# Constraint for sequence of deferrable
|
587
688
|
# WARNING: This is experimental, formulation seems correct but feasibility problems.
|
588
689
|
# Probably uncomptabile with other constraints
|
@@ -600,7 +701,9 @@ class Optimization:
|
|
600
701
|
y = plp.LpVariable.dicts(
|
601
702
|
f"y{k}", (i for i in range(len(matrix))), cat="Binary"
|
602
703
|
)
|
603
|
-
self.logger.debug(
|
704
|
+
self.logger.debug(
|
705
|
+
f"Load {k}: Created binary variables for sequence placement: y = {list(y.keys())}"
|
706
|
+
)
|
604
707
|
constraints.update(
|
605
708
|
{
|
606
709
|
f"single_value_constraint_{k}": plp.LpConstraint(
|
@@ -642,54 +745,56 @@ class Optimization:
|
|
642
745
|
self.logger.debug(f"Load {k}: Sequence-based constraints set.")
|
643
746
|
|
644
747
|
# --- Thermal deferrable load logic first ---
|
645
|
-
elif (
|
748
|
+
elif (
|
749
|
+
"def_load_config" in self.optim_conf.keys()
|
646
750
|
and len(self.optim_conf["def_load_config"]) > k
|
647
|
-
and "thermal_config" in self.optim_conf["def_load_config"][k]
|
648
|
-
|
649
|
-
|
650
|
-
|
651
|
-
|
652
|
-
|
653
|
-
|
654
|
-
|
655
|
-
|
656
|
-
|
657
|
-
|
658
|
-
|
659
|
-
|
660
|
-
|
661
|
-
|
662
|
-
|
663
|
-
|
664
|
-
|
665
|
-
|
666
|
-
|
667
|
-
|
668
|
-
|
669
|
-
|
670
|
-
|
671
|
-
|
672
|
-
|
673
|
-
|
674
|
-
|
675
|
-
|
676
|
-
|
677
|
-
|
678
|
-
|
679
|
-
|
751
|
+
and "thermal_config" in self.optim_conf["def_load_config"][k]
|
752
|
+
):
|
753
|
+
self.logger.debug(f"Load {k} is a thermal deferrable load.")
|
754
|
+
def_load_config = self.optim_conf["def_load_config"][k]
|
755
|
+
if def_load_config and "thermal_config" in def_load_config:
|
756
|
+
hc = def_load_config["thermal_config"]
|
757
|
+
start_temperature = hc["start_temperature"]
|
758
|
+
cooling_constant = hc["cooling_constant"]
|
759
|
+
heating_rate = hc["heating_rate"]
|
760
|
+
overshoot_temperature = hc["overshoot_temperature"]
|
761
|
+
outdoor_temperature_forecast = data_opt[
|
762
|
+
"outdoor_temperature_forecast"
|
763
|
+
]
|
764
|
+
desired_temperatures = hc["desired_temperatures"]
|
765
|
+
sense = hc.get("sense", "heat")
|
766
|
+
sense_coeff = 1 if sense == "heat" else -1
|
767
|
+
|
768
|
+
self.logger.debug(
|
769
|
+
f"Load {k}: Thermal parameters: start_temperature={start_temperature}, cooling_constant={cooling_constant}, heating_rate={heating_rate}, overshoot_temperature={overshoot_temperature}"
|
770
|
+
)
|
771
|
+
|
772
|
+
predicted_temp = [start_temperature]
|
773
|
+
for Id in set_I:
|
774
|
+
if Id == 0:
|
775
|
+
continue
|
776
|
+
predicted_temp.append(
|
777
|
+
predicted_temp[Id - 1]
|
778
|
+
+ (
|
779
|
+
P_deferrable[k][Id - 1]
|
780
|
+
* (
|
781
|
+
heating_rate
|
782
|
+
* self.timeStep
|
783
|
+
/ self.optim_conf[
|
784
|
+
"nominal_power_of_deferrable_loads"
|
785
|
+
][k]
|
680
786
|
)
|
681
|
-
|
682
|
-
|
683
|
-
|
684
|
-
|
685
|
-
|
686
|
-
|
787
|
+
)
|
788
|
+
- (
|
789
|
+
cooling_constant
|
790
|
+
* (
|
791
|
+
predicted_temp[Id - 1]
|
792
|
+
- outdoor_temperature_forecast.iloc[Id - 1]
|
687
793
|
)
|
688
794
|
)
|
689
|
-
|
690
|
-
is_overshoot = plp.LpVariable(
|
691
|
-
f"defload_{k}_overshoot_{Id}"
|
692
795
|
)
|
796
|
+
|
797
|
+
is_overshoot = plp.LpVariable(f"defload_{k}_overshoot_{Id}")
|
693
798
|
constraints.update(
|
694
799
|
{
|
695
800
|
f"constraint_defload{k}_overshoot_{Id}_1": plp.LpConstraint(
|
@@ -725,9 +830,10 @@ class Optimization:
|
|
725
830
|
"penalty_factor must be positive, otherwise the problem will become unsolvable"
|
726
831
|
)
|
727
832
|
penalty_value = (
|
728
|
-
predicted_temp[Id]
|
729
|
-
|
730
|
-
|
833
|
+
(predicted_temp[Id] - desired_temperatures[Id])
|
834
|
+
* penalty_factor
|
835
|
+
* sense_coeff
|
836
|
+
)
|
731
837
|
penalty_var = plp.LpVariable(
|
732
838
|
f"defload_{k}_thermal_penalty_{Id}",
|
733
839
|
cat="Continuous",
|
@@ -748,13 +854,14 @@ class Optimization:
|
|
748
854
|
self.logger.debug(f"Load {k}: Thermal constraints set.")
|
749
855
|
|
750
856
|
# --- Standard/non-thermal deferrable load logic comes after thermal ---
|
751
|
-
elif (
|
752
|
-
(
|
753
|
-
|
754
|
-
|
857
|
+
elif (def_total_timestep and def_total_timestep[k] > 0) or (
|
858
|
+
len(def_total_hours) > k and def_total_hours[k] > 0
|
859
|
+
):
|
755
860
|
self.logger.debug(f"Load {k} is standard/non-thermal.")
|
756
861
|
if def_total_timestep and def_total_timestep[k] > 0:
|
757
|
-
self.logger.debug(
|
862
|
+
self.logger.debug(
|
863
|
+
f"Load {k}: Using total timesteps constraint: {def_total_timestep[k]}"
|
864
|
+
)
|
758
865
|
constraints.update(
|
759
866
|
{
|
760
867
|
f"constraint_defload{k}_energy": plp.LpConstraint(
|
@@ -763,12 +870,16 @@ class Optimization:
|
|
763
870
|
),
|
764
871
|
sense=plp.LpConstraintEQ,
|
765
872
|
rhs=(self.timeStep * def_total_timestep[k])
|
766
|
-
* self.optim_conf["nominal_power_of_deferrable_loads"][
|
873
|
+
* self.optim_conf["nominal_power_of_deferrable_loads"][
|
874
|
+
k
|
875
|
+
],
|
767
876
|
)
|
768
877
|
}
|
769
878
|
)
|
770
879
|
else:
|
771
|
-
self.logger.debug(
|
880
|
+
self.logger.debug(
|
881
|
+
f"Load {k}: Using total hours constraint: {def_total_hours[k]}"
|
882
|
+
)
|
772
883
|
constraints.update(
|
773
884
|
{
|
774
885
|
f"constraint_defload{k}_energy": plp.LpConstraint(
|
@@ -777,13 +888,14 @@ class Optimization:
|
|
777
888
|
),
|
778
889
|
sense=plp.LpConstraintEQ,
|
779
890
|
rhs=def_total_hours[k]
|
780
|
-
* self.optim_conf["nominal_power_of_deferrable_loads"][
|
891
|
+
* self.optim_conf["nominal_power_of_deferrable_loads"][
|
892
|
+
k
|
893
|
+
],
|
781
894
|
)
|
782
895
|
}
|
783
896
|
)
|
784
897
|
self.logger.debug(f"Load {k}: Standard load constraints set.")
|
785
898
|
|
786
|
-
|
787
899
|
# Ensure deferrable loads consume energy between def_start_timestep & def_end_timestep
|
788
900
|
self.logger.debug(
|
789
901
|
f"Deferrable load {k}: Proposed optimization window: {def_start_timestep[k]} --> {def_end_timestep[k]}"
|
@@ -837,6 +949,20 @@ class Optimization:
|
|
837
949
|
}
|
838
950
|
)
|
839
951
|
|
952
|
+
# Constraint for the minimum power of deferrable loads using the big-M method.
|
953
|
+
# This enforces: P_deferrable = 0 OR P_deferrable >= min_power.
|
954
|
+
if min_power_of_deferrable_loads[k] > 0:
|
955
|
+
self.logger.debug(f"Applying minimum power constraint for deferrable load {k}: {min_power_of_deferrable_loads[k]} W")
|
956
|
+
constraints.update(
|
957
|
+
{
|
958
|
+
f"constraint_pdef{k}_min_power_{i}": plp.LpConstraint(
|
959
|
+
e=P_deferrable[k][i] - (min_power_of_deferrable_loads[k] * P_def_bin2[k][i]),
|
960
|
+
sense=plp.LpConstraintGE,
|
961
|
+
rhs=0
|
962
|
+
) for i in set_I
|
963
|
+
}
|
964
|
+
)
|
965
|
+
|
840
966
|
# Treat the number of starts for a deferrable load (new method considering current state)
|
841
967
|
current_state = 0
|
842
968
|
if (
|
@@ -1135,16 +1261,27 @@ class Optimization:
|
|
1135
1261
|
timeout = self.optim_conf["lp_solver_timeout"]
|
1136
1262
|
# solving with default solver CBC
|
1137
1263
|
if self.lp_solver == "PULP_CBC_CMD":
|
1138
|
-
opt_model.solve(
|
1264
|
+
opt_model.solve(
|
1265
|
+
PULP_CBC_CMD(msg=0, timeLimit=timeout, threads=self.num_threads)
|
1266
|
+
)
|
1139
1267
|
elif self.lp_solver == "GLPK_CMD":
|
1140
|
-
opt_model.solve(GLPK_CMD(msg=0, timeLimit=timeout
|
1268
|
+
opt_model.solve(GLPK_CMD(msg=0, timeLimit=timeout))
|
1269
|
+
elif self.lp_solver == "HiGHS":
|
1270
|
+
opt_model.solve(HiGHS(msg=0, timeLimit=timeout))
|
1141
1271
|
elif self.lp_solver == "COIN_CMD":
|
1142
1272
|
opt_model.solve(
|
1143
|
-
COIN_CMD(
|
1273
|
+
COIN_CMD(
|
1274
|
+
msg=0,
|
1275
|
+
path=self.lp_solver_path,
|
1276
|
+
timeLimit=timeout,
|
1277
|
+
threads=self.num_threads,
|
1278
|
+
)
|
1144
1279
|
)
|
1145
1280
|
else:
|
1146
1281
|
self.logger.warning("Solver %s unknown, using default", self.lp_solver)
|
1147
|
-
opt_model.solve(
|
1282
|
+
opt_model.solve(
|
1283
|
+
PULP_CBC_CMD(msg=0, timeLimit=timeout, threads=self.num_threads)
|
1284
|
+
)
|
1148
1285
|
|
1149
1286
|
# The status of the solution is printed to the screen
|
1150
1287
|
self.optim_status = plp.LpStatus[opt_model.status]
|
@@ -1308,7 +1445,9 @@ class Optimization:
|
|
1308
1445
|
|
1309
1446
|
# Battery initialization logging
|
1310
1447
|
if self.optim_conf["set_use_battery"]:
|
1311
|
-
self.logger.debug(
|
1448
|
+
self.logger.debug(
|
1449
|
+
f"Battery usage enabled. Initial SOC: {soc_init}, Final SOC: {soc_final}"
|
1450
|
+
)
|
1312
1451
|
|
1313
1452
|
# Deferrable load initialization logging
|
1314
1453
|
self.logger.debug(f"Deferrable load operating hours: {def_total_hours}")
|
@@ -1322,9 +1461,6 @@ class Optimization:
|
|
1322
1461
|
# Solver execution logging
|
1323
1462
|
self.logger.debug(f"Solver selected: {self.lp_solver}")
|
1324
1463
|
self.logger.info(f"Optimization status: {self.optim_status}")
|
1325
|
-
|
1326
|
-
# Results logging
|
1327
|
-
|
1328
1464
|
return opt_tp
|
1329
1465
|
|
1330
1466
|
def perform_perfect_forecast_optim(
|
emhass/retrieve_hass.py
CHANGED
@@ -1,5 +1,3 @@
|
|
1
|
-
#!/usr/bin/env python3
|
2
|
-
|
3
1
|
import copy
|
4
2
|
import datetime
|
5
3
|
import json
|
@@ -102,14 +100,16 @@ class RetrieveHass:
|
|
102
100
|
try:
|
103
101
|
response_config = get(url, headers=headers)
|
104
102
|
except Exception:
|
105
|
-
self.logger.error("Unable to access Home
|
103
|
+
self.logger.error("Unable to access Home Assistant instance, check URL")
|
106
104
|
self.logger.error("If using addon, try setting url and token to 'empty'")
|
107
105
|
return False
|
108
106
|
|
109
107
|
try:
|
110
108
|
self.ha_config = response_config.json()
|
111
109
|
except Exception:
|
112
|
-
self.logger.error(
|
110
|
+
self.logger.error(
|
111
|
+
"EMHASS was unable to obtain configuration data from Home Assistant"
|
112
|
+
)
|
113
113
|
return False
|
114
114
|
|
115
115
|
def get_data(
|
@@ -188,7 +188,7 @@ class RetrieveHass:
|
|
188
188
|
response = get(url, headers=headers)
|
189
189
|
except Exception:
|
190
190
|
self.logger.error(
|
191
|
-
"Unable to access Home
|
191
|
+
"Unable to access Home Assistant instance, check URL"
|
192
192
|
)
|
193
193
|
self.logger.error(
|
194
194
|
"If using addon, try setting url and token to 'empty'"
|
@@ -197,14 +197,16 @@ class RetrieveHass:
|
|
197
197
|
else:
|
198
198
|
if response.status_code == 401:
|
199
199
|
self.logger.error(
|
200
|
-
"Unable to access Home
|
200
|
+
"Unable to access Home Assistant instance, TOKEN/KEY"
|
201
201
|
)
|
202
202
|
self.logger.error(
|
203
203
|
"If using addon, try setting url and token to 'empty'"
|
204
204
|
)
|
205
205
|
return False
|
206
206
|
if response.status_code > 299:
|
207
|
-
self.logger.error(
|
207
|
+
self.logger.error(
|
208
|
+
f"Home assistant request GET error: {response.status_code} for var {var}"
|
209
|
+
)
|
208
210
|
return False
|
209
211
|
"""import bz2 # Uncomment to save a serialized data for tests
|
210
212
|
import _pickle as cPickle
|
@@ -217,7 +219,7 @@ class RetrieveHass:
|
|
217
219
|
self.logger.error(
|
218
220
|
"The retrieved JSON is empty, A sensor:"
|
219
221
|
+ var
|
220
|
-
+ " may have 0 days of history, passed sensor may not be correct, or days to retrieve is set too
|
222
|
+
+ " may have 0 days of history, passed sensor may not be correct, or days to retrieve is set too high. Check your Logger configuration, ensuring the sensors are in the include list."
|
221
223
|
)
|
222
224
|
else:
|
223
225
|
self.logger.error(
|
@@ -268,7 +270,7 @@ class RetrieveHass:
|
|
268
270
|
).max()
|
269
271
|
ts = pd.to_datetime(
|
270
272
|
pd.date_range(start=from_date, end=to_date, freq=self.freq),
|
271
|
-
format="%Y-%d-%m %H:%M"
|
273
|
+
format="%Y-%d-%m %H:%M",
|
272
274
|
).round(self.freq, ambiguous="infer", nonexistent="shift_forward")
|
273
275
|
df_day = pd.DataFrame(index=ts)
|
274
276
|
# Caution with undefined string data: unknown, unavailable, etc.
|