ecopipeline 1.0.5__py3-none-any.whl → 1.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ecopipeline/event_tracking/Alarm.py +317 -0
- ecopipeline/event_tracking/__init__.py +18 -1
- ecopipeline/event_tracking/alarms/AbnormalCOP.py +76 -0
- ecopipeline/event_tracking/alarms/BackupUse.py +94 -0
- ecopipeline/event_tracking/alarms/BalancingValve.py +78 -0
- ecopipeline/event_tracking/alarms/BlownFuse.py +72 -0
- ecopipeline/event_tracking/alarms/Boundary.py +90 -0
- ecopipeline/event_tracking/alarms/HPWHInlet.py +73 -0
- ecopipeline/event_tracking/alarms/HPWHOutage.py +96 -0
- ecopipeline/event_tracking/alarms/HPWHOutlet.py +85 -0
- ecopipeline/event_tracking/alarms/LSInconsist.py +114 -0
- ecopipeline/event_tracking/alarms/PowerRatio.py +111 -0
- ecopipeline/event_tracking/alarms/SOOChange.py +127 -0
- ecopipeline/event_tracking/alarms/ShortCycle.py +59 -0
- ecopipeline/event_tracking/alarms/TMSetpoint.py +127 -0
- ecopipeline/event_tracking/alarms/TempRange.py +84 -0
- ecopipeline/event_tracking/alarms/__init__.py +0 -0
- ecopipeline/event_tracking/event_tracking.py +119 -1177
- ecopipeline/extract/extract.py +51 -0
- ecopipeline/extract/zip_to_lat_long.csv +41490 -0
- ecopipeline/load/__init__.py +2 -2
- ecopipeline/load/load.py +304 -3
- ecopipeline/utils/ConfigManager.py +30 -0
- {ecopipeline-1.0.5.dist-info → ecopipeline-1.1.1.dist-info}/METADATA +1 -1
- ecopipeline-1.1.1.dist-info/RECORD +42 -0
- {ecopipeline-1.0.5.dist-info → ecopipeline-1.1.1.dist-info}/WHEEL +1 -1
- ecopipeline-1.0.5.dist-info/RECORD +0 -25
- {ecopipeline-1.0.5.dist-info → ecopipeline-1.1.1.dist-info}/licenses/LICENSE +0 -0
- {ecopipeline-1.0.5.dist-info → ecopipeline-1.1.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
import numpy as np
|
|
3
|
+
import datetime as datetime
|
|
4
|
+
from ecopipeline import ConfigManager
|
|
5
|
+
import re
|
|
6
|
+
import mysql.connector.errors as mysqlerrors
|
|
7
|
+
from datetime import timedelta
|
|
8
|
+
from ecopipeline.event_tracking.Alarm import Alarm
|
|
9
|
+
|
|
10
|
+
class Boundary(Alarm):
|
|
11
|
+
"""
|
|
12
|
+
Detects when variable values fall outside their expected low/high boundary range for a sustained period.
|
|
13
|
+
An alarm triggers when a value stays below the low_alarm threshold or above the high_alarm threshold
|
|
14
|
+
for fault_time consecutive minutes.
|
|
15
|
+
|
|
16
|
+
Variable_Names.csv columns:
|
|
17
|
+
variable_name - Name of the variable to monitor
|
|
18
|
+
low_alarm - Lower bound threshold. Alarm triggers if value stays below this for fault_time minutes.
|
|
19
|
+
high_alarm - Upper bound threshold. Alarm triggers if value stays above this for fault_time minutes.
|
|
20
|
+
fault_time - (Optional) Number of consecutive minutes for this variable. Overrides default_fault_time.
|
|
21
|
+
pretty_name - (Optional) Display name for the variable in alarm messages. Defaults to variable_name.
|
|
22
|
+
|
|
23
|
+
Parameters
|
|
24
|
+
----------
|
|
25
|
+
default_fault_time : int
|
|
26
|
+
Number of consecutive minutes that a value must be outside bounds before triggering an alarm (default 15).
|
|
27
|
+
Can be overridden per-variable using the fault_time column in Variable_Names.csv.
|
|
28
|
+
"""
|
|
29
|
+
def __init__(self, bounds_df : pd.DataFrame, default_fault_time : int = 15):
|
|
30
|
+
self.default_fault_time = default_fault_time
|
|
31
|
+
super().__init__(bounds_df, None,{}, alarm_db_type='BOUNDARY')
|
|
32
|
+
|
|
33
|
+
def _process_bounds_df_alarm_codes(self, og_bounds_df : pd.DataFrame) -> pd.DataFrame:
|
|
34
|
+
bounds_df = og_bounds_df.copy()
|
|
35
|
+
required_columns = ["variable_name", "high_alarm", "low_alarm"]
|
|
36
|
+
for required_column in required_columns:
|
|
37
|
+
if not required_column in bounds_df.columns:
|
|
38
|
+
raise Exception(f"{required_column} is not present in Variable_Names.csv")
|
|
39
|
+
if not 'pretty_name' in bounds_df.columns:
|
|
40
|
+
bounds_df['pretty_name'] = bounds_df['variable_name']
|
|
41
|
+
else:
|
|
42
|
+
bounds_df['pretty_name'] = bounds_df['pretty_name'].fillna(bounds_df['variable_name'])
|
|
43
|
+
if not 'fault_time' in bounds_df.columns:
|
|
44
|
+
bounds_df['fault_time'] = self.default_fault_time
|
|
45
|
+
|
|
46
|
+
bounds_df = bounds_df.loc[:, ["variable_name", "high_alarm", "low_alarm", "fault_time", "pretty_name"]]
|
|
47
|
+
bounds_df.dropna(axis=0, thresh=2, inplace=True)
|
|
48
|
+
bounds_df.set_index(['variable_name'], inplace=True)
|
|
49
|
+
# ensure that lower and upper bounds are numbers
|
|
50
|
+
bounds_df['high_alarm'] = pd.to_numeric(bounds_df['high_alarm'], errors='coerce').astype(float)
|
|
51
|
+
bounds_df['low_alarm'] = pd.to_numeric(bounds_df['low_alarm'], errors='coerce').astype(float)
|
|
52
|
+
bounds_df['fault_time'] = pd.to_numeric(bounds_df['fault_time'], errors='coerce').astype('Int64')
|
|
53
|
+
bounds_df = bounds_df[bounds_df.index.notnull()]
|
|
54
|
+
return bounds_df
|
|
55
|
+
|
|
56
|
+
def specific_alarm_function(self, df: pd.DataFrame, daily_df : pd.DataFrame, config : ConfigManager):
|
|
57
|
+
idx = df.index
|
|
58
|
+
full_days = pd.to_datetime(pd.Series(idx).dt.normalize().unique())
|
|
59
|
+
for bound_var, bounds in self.bounds_df.iterrows():
|
|
60
|
+
if bound_var in df.columns:
|
|
61
|
+
lower_mask = df[bound_var] < bounds["low_alarm"]
|
|
62
|
+
upper_mask = df[bound_var] > bounds["high_alarm"]
|
|
63
|
+
if pd.isna(bounds['fault_time']):
|
|
64
|
+
bounds['fault_time'] = self.default_fault_time
|
|
65
|
+
for day in full_days:
|
|
66
|
+
if bounds['fault_time'] < 1 :
|
|
67
|
+
print(f"Could not process alarm for {bound_var}. Fault time must be greater than or equal to 1 minute.")
|
|
68
|
+
self._check_and_add_alarm(lower_mask, day, bounds["fault_time"], bound_var, bounds['pretty_name'])
|
|
69
|
+
self._check_and_add_alarm(upper_mask, day, bounds["fault_time"], bound_var, bounds['pretty_name'])
|
|
70
|
+
|
|
71
|
+
def _check_and_add_alarm(self, mask : pd.Series, day, fault_time : int, var_name : str, pretty_name : str):
|
|
72
|
+
next_day = day + pd.Timedelta(days=1)
|
|
73
|
+
filtered_df = mask.loc[(mask.index >= day) & (mask.index < next_day)]
|
|
74
|
+
consecutive_condition = filtered_df.rolling(window=fault_time).min() == 1
|
|
75
|
+
if consecutive_condition.any():
|
|
76
|
+
group = (consecutive_condition != consecutive_condition.shift()).cumsum()
|
|
77
|
+
|
|
78
|
+
# Iterate through each streak and add an alarm for each
|
|
79
|
+
for group_id in consecutive_condition.groupby(group).first()[lambda x: x].index:
|
|
80
|
+
streak_indices = consecutive_condition[group == group_id].index
|
|
81
|
+
# streak_length = len(streak_indices)
|
|
82
|
+
|
|
83
|
+
# Adjust start time because first (fault_time-1) minutes don't count in window
|
|
84
|
+
start_time = streak_indices[0] - pd.Timedelta(minutes=fault_time-1)
|
|
85
|
+
end_time = streak_indices[-1]
|
|
86
|
+
|
|
87
|
+
alarm_string = f"Boundary alarm for {pretty_name}"
|
|
88
|
+
|
|
89
|
+
# if start_time in alarms_dict:
|
|
90
|
+
self._add_an_alarm(start_time, end_time, var_name, alarm_string)
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
import numpy as np
|
|
3
|
+
import datetime as datetime
|
|
4
|
+
from ecopipeline import ConfigManager
|
|
5
|
+
import re
|
|
6
|
+
import mysql.connector.errors as mysqlerrors
|
|
7
|
+
from datetime import timedelta
|
|
8
|
+
from ecopipeline.event_tracking.Alarm import Alarm
|
|
9
|
+
|
|
10
|
+
class HPWHInlet(Alarm):
|
|
11
|
+
"""
|
|
12
|
+
Function will take a pandas dataframe and location of alarm information in a csv,
|
|
13
|
+
and create an dataframe with applicable alarm events
|
|
14
|
+
|
|
15
|
+
VarNames syntax:
|
|
16
|
+
HPINLET_POW_[OPTIONAL ID]:### - Indicates a power variable for the heat pump. ### is the power threshold (default 1.0) above which
|
|
17
|
+
the heat pump is considered 'on'
|
|
18
|
+
HPINLET_T_[OPTIONAL ID]:### - Indicates heat pump inlet temperature variable. ### is the temperature threshold (default 120.0)
|
|
19
|
+
that should not be exceeded while the heat pump is on
|
|
20
|
+
|
|
21
|
+
Parameters
|
|
22
|
+
----------
|
|
23
|
+
default_power_threshold : float
|
|
24
|
+
Default power threshold for POW alarm codes when no custom bound is specified (default 0.4). Heat pump is considered 'on'
|
|
25
|
+
when power exceeds this value.
|
|
26
|
+
default_temp_threshold : float
|
|
27
|
+
Default temperature threshold for T alarm codes when no custom bound is specified (default 120.0). Alarm triggers when
|
|
28
|
+
temperature exceeds this value while heat pump is on.
|
|
29
|
+
fault_time : int
|
|
30
|
+
Number of consecutive minutes that both power and temperature must exceed their thresholds before triggering an alarm (default 10).
|
|
31
|
+
|
|
32
|
+
"""
|
|
33
|
+
def __init__(self, bounds_df : pd.DataFrame, default_power_threshold : float = 1.0, default_temp_threshold : float = 115.0, fault_time : int = 5):
|
|
34
|
+
alarm_tag = 'HPINLET'
|
|
35
|
+
type_default_dict = {
|
|
36
|
+
'POW' : default_power_threshold,
|
|
37
|
+
'T' : default_temp_threshold
|
|
38
|
+
}
|
|
39
|
+
self.fault_time = fault_time
|
|
40
|
+
super().__init__(bounds_df, alarm_tag,type_default_dict, two_part_tag = True, alarm_db_type='HPWH_INLET')
|
|
41
|
+
|
|
42
|
+
def specific_alarm_function(self, df: pd.DataFrame, daily_df : pd.DataFrame, config : ConfigManager):
|
|
43
|
+
for alarm_id in self.bounds_df['alarm_code_id'].unique():
|
|
44
|
+
for day in daily_df.index:
|
|
45
|
+
next_day = day + pd.Timedelta(days=1)
|
|
46
|
+
filtered_df = df.loc[(df.index >= day) & (df.index < next_day)]
|
|
47
|
+
id_group = self.bounds_df[self.bounds_df['alarm_code_id'] == alarm_id]
|
|
48
|
+
pow_codes = id_group[id_group['alarm_code_type'] == 'POW']
|
|
49
|
+
pow_var_name = pow_codes.iloc[0]['variable_name']
|
|
50
|
+
pow_thresh = pow_codes.iloc[0]['bound']
|
|
51
|
+
t_codes = id_group[id_group['alarm_code_type'] == 'T']
|
|
52
|
+
t_var_name = t_codes.iloc[0]['variable_name']
|
|
53
|
+
t_pretty_name = t_codes.iloc[0]['pretty_name']
|
|
54
|
+
t_thresh = t_codes.iloc[0]['bound']
|
|
55
|
+
if len(t_codes) != 1 or len(pow_codes) != 1:
|
|
56
|
+
raise Exception(f"Improper alarm codes for balancing valve with id {alarm_id}")
|
|
57
|
+
if pow_var_name in filtered_df.columns and t_var_name in filtered_df.columns:
|
|
58
|
+
# Check for consecutive minutes where both power and temp exceed thresholds
|
|
59
|
+
power_mask = filtered_df[pow_var_name] > pow_thresh
|
|
60
|
+
temp_mask = filtered_df[t_var_name] > t_thresh
|
|
61
|
+
combined_mask = power_mask & temp_mask
|
|
62
|
+
|
|
63
|
+
# Check for fault_time consecutive minutes
|
|
64
|
+
consecutive_condition = combined_mask.rolling(window=self.fault_time).min() == 1
|
|
65
|
+
if consecutive_condition.any():
|
|
66
|
+
group = (consecutive_condition != consecutive_condition.shift()).cumsum()
|
|
67
|
+
for group_id in consecutive_condition.groupby(group).first()[lambda x: x].index:
|
|
68
|
+
streak_indices = consecutive_condition[group == group_id].index
|
|
69
|
+
start_time = streak_indices[0] - pd.Timedelta(minutes=self.fault_time - 1)
|
|
70
|
+
end_time = streak_indices[-1]
|
|
71
|
+
streak_length = len(streak_indices) + self.fault_time - 1
|
|
72
|
+
self._add_an_alarm(start_time, end_time, t_var_name,
|
|
73
|
+
f"High heat pump inlet temperature: {t_pretty_name} was above {t_thresh:.1f} F for {streak_length} minutes while HP was ON starting at {start_time}.")
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
import numpy as np
|
|
3
|
+
import datetime as datetime
|
|
4
|
+
from ecopipeline import ConfigManager
|
|
5
|
+
import re
|
|
6
|
+
import mysql.connector.errors as mysqlerrors
|
|
7
|
+
from datetime import timedelta
|
|
8
|
+
from ecopipeline.event_tracking.Alarm import Alarm
|
|
9
|
+
|
|
10
|
+
class HPWHOutage(Alarm):
|
|
11
|
+
"""
|
|
12
|
+
Detects possible heat pump failures or outages by checking if heat pump power consumption falls below
|
|
13
|
+
an expected ratio of total system power over a rolling period, or by checking for non-zero values in
|
|
14
|
+
a direct alarm variable from the heat pump controller.
|
|
15
|
+
|
|
16
|
+
VarNames syntax:
|
|
17
|
+
HPOUTGE_POW_[OPTIONAL ID]:### - Heat pump power variable. ### is the minimum expected ratio of HP power to total power
|
|
18
|
+
(default 0.3 for 30%). Must be in same power units as total system power.
|
|
19
|
+
HPOUTGE_TP_[OPTIONAL ID] - Total system power variable for ratio comparison. Required when using POW codes.
|
|
20
|
+
HPOUTGE_ALRM_[OPTIONAL ID] - Direct alarm variable from HP controller. Alarm triggers if any non-zero value is detected.
|
|
21
|
+
|
|
22
|
+
Parameters
|
|
23
|
+
----------
|
|
24
|
+
day_table_name : str
|
|
25
|
+
Name of the site's daily agregate value table in the database
|
|
26
|
+
default_power_ratio : float
|
|
27
|
+
Default minimum power ratio threshold (as decimal, e.g., 0.3 for 30%) for POW alarm codes when no custom bound is specified (default 0.3).
|
|
28
|
+
An alarm triggers if HP power falls below this ratio of total power over the rolling period.
|
|
29
|
+
ratio_period_days : int
|
|
30
|
+
Number of days to use for the rolling power ratio calculation (default 7). Must be greater than 1.
|
|
31
|
+
"""
|
|
32
|
+
def __init__(self, bounds_df : pd.DataFrame, day_table_name : str, default_power_ratio : float = 0.3,
|
|
33
|
+
ratio_period_days : int = 7):
|
|
34
|
+
alarm_tag = 'HPOUTGE'
|
|
35
|
+
type_default_dict = {
|
|
36
|
+
'POW': default_power_ratio,
|
|
37
|
+
'TP': None,
|
|
38
|
+
'ALRM': None
|
|
39
|
+
}
|
|
40
|
+
self.day_table_name = day_table_name # TODO this could be a security issue. Swap it for config manager
|
|
41
|
+
self.default_power_ratio = default_power_ratio
|
|
42
|
+
self.ratio_period_days = ratio_period_days
|
|
43
|
+
super().__init__(bounds_df, alarm_tag,type_default_dict, two_part_tag = True, alarm_db_type='HPWH_INLET')
|
|
44
|
+
|
|
45
|
+
def specific_alarm_function(self, df: pd.DataFrame, daily_df : pd.DataFrame, config : ConfigManager):
|
|
46
|
+
for alarm_id in self.bounds_df['alarm_code_id'].unique():
|
|
47
|
+
id_group = self.bounds_df[self.bounds_df['alarm_code_id'] == alarm_id]
|
|
48
|
+
# Get T and SP alarm codes for this ID
|
|
49
|
+
pow_codes = id_group[id_group['alarm_code_type'] == 'POW']
|
|
50
|
+
tp_codes = id_group[id_group['alarm_code_type'] == 'TP']
|
|
51
|
+
alrm_codes = id_group[id_group['alarm_code_type'] == 'ALRM']
|
|
52
|
+
if len(alrm_codes) > 0:
|
|
53
|
+
for i in range(len(alrm_codes)):
|
|
54
|
+
alrm_var_name = alrm_codes.iloc[i]['variable_name']
|
|
55
|
+
alrm_pretty_name = alrm_codes.iloc[i]['pretty_name']
|
|
56
|
+
if alrm_var_name in df.columns:
|
|
57
|
+
for day in daily_df.index:
|
|
58
|
+
next_day = day + pd.Timedelta(days=1)
|
|
59
|
+
filtered_df = df.loc[(df.index >= day) & (df.index < next_day)]
|
|
60
|
+
if not filtered_df.empty:
|
|
61
|
+
# Find all consecutive blocks where alarm variable is non-zero
|
|
62
|
+
alarm_mask = filtered_df[alrm_var_name] != 0
|
|
63
|
+
if alarm_mask.any():
|
|
64
|
+
# Find consecutive groups
|
|
65
|
+
group = (alarm_mask != alarm_mask.shift()).cumsum()
|
|
66
|
+
|
|
67
|
+
# Iterate through each consecutive block of non-zero values
|
|
68
|
+
for group_id in alarm_mask.groupby(group).first()[lambda x: x].index:
|
|
69
|
+
streak_indices = alarm_mask[group == group_id].index
|
|
70
|
+
start_time = streak_indices[0]
|
|
71
|
+
end_time = streak_indices[-1]
|
|
72
|
+
streak_length = len(streak_indices)
|
|
73
|
+
alarm_value = filtered_df.loc[start_time, alrm_var_name]
|
|
74
|
+
|
|
75
|
+
self._add_an_alarm(start_time, end_time, alrm_var_name,
|
|
76
|
+
f"Heat pump alarm triggered: {alrm_pretty_name} was {alarm_value} for {streak_length} minutes starting at {start_time}.")
|
|
77
|
+
elif len(pow_codes) > 0 and len(tp_codes) != 1:
|
|
78
|
+
raise Exception(f"Improper alarm codes for heat pump outage with id {alarm_id}. Requires 1 total power (TP) variable.")
|
|
79
|
+
elif len(pow_codes) > 0 and len(tp_codes) == 1:
|
|
80
|
+
if self.ratio_period_days <= 1:
|
|
81
|
+
print("HP Outage alarm period, ratio_period_days, must be more than 1")
|
|
82
|
+
else:
|
|
83
|
+
tp_var_name = tp_codes.iloc[0]['variable_name']
|
|
84
|
+
daily_df_copy = daily_df.copy()
|
|
85
|
+
daily_df_copy = self._append_previous_days_to_df(daily_df_copy, config, self.ratio_period_days, self.day_table_name)
|
|
86
|
+
for i in range(self.ratio_period_days - 1, len(daily_df_copy)):
|
|
87
|
+
start_idx = i - self.ratio_period_days + 1
|
|
88
|
+
end_idx = i + 1
|
|
89
|
+
day = daily_df_copy.index[i]
|
|
90
|
+
block_data = daily_df_copy.iloc[start_idx:end_idx].sum()
|
|
91
|
+
for j in range(len(pow_codes)):
|
|
92
|
+
pow_var_name = pow_codes.iloc[j]['variable_name']
|
|
93
|
+
pow_var_bound = pow_codes.iloc[j]['bound']
|
|
94
|
+
if block_data[pow_var_name] < block_data[tp_var_name] * pow_var_bound:
|
|
95
|
+
self._add_an_alarm(day, day + timedelta(1), pow_var_name, f"Possible Heat Pump failure or outage.", False,
|
|
96
|
+
certainty='med')
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
import numpy as np
|
|
3
|
+
import datetime as datetime
|
|
4
|
+
from ecopipeline import ConfigManager
|
|
5
|
+
import re
|
|
6
|
+
import mysql.connector.errors as mysqlerrors
|
|
7
|
+
from datetime import timedelta
|
|
8
|
+
from ecopipeline.event_tracking.Alarm import Alarm
|
|
9
|
+
|
|
10
|
+
class HPWHOutlet(Alarm):
|
|
11
|
+
"""
|
|
12
|
+
Detects low heat pump outlet temperature by checking if the outlet temperature falls below a threshold
|
|
13
|
+
while the heat pump is running. The first 10 minutes after each HP turn-on are excluded as a warmup
|
|
14
|
+
period. An alarm triggers if the temperature stays below the threshold for `fault_time` consecutive
|
|
15
|
+
minutes after the warmup period.
|
|
16
|
+
|
|
17
|
+
VarNames syntax:
|
|
18
|
+
HPOUTLT_POW_[OPTIONAL ID]:### - Indicates a power variable for the heat pump. ### is the power threshold (default 1.0) above which
|
|
19
|
+
the heat pump is considered 'on'.
|
|
20
|
+
HPOUTLT_T_[OPTIONAL ID]:### - Indicates heat pump outlet temperature variable. ### is the temperature threshold (default 140.0)
|
|
21
|
+
that should always be exceeded while the heat pump is on after the 10-minute warmup period.
|
|
22
|
+
|
|
23
|
+
Parameters
|
|
24
|
+
----------
|
|
25
|
+
default_power_threshold : float
|
|
26
|
+
Default power threshold for POW alarm codes when no custom bound is specified (default 1.0). Heat pump is considered 'on'
|
|
27
|
+
when power exceeds this value.
|
|
28
|
+
default_temp_threshold : float
|
|
29
|
+
Default temperature threshold for T alarm codes when no custom bound is specified (default 140.0). Alarm triggers when
|
|
30
|
+
temperature falls BELOW this value while heat pump is on (after warmup period).
|
|
31
|
+
fault_time : int
|
|
32
|
+
Number of consecutive minutes that temperature must be below threshold (after warmup) before triggering an alarm (default 5).
|
|
33
|
+
|
|
34
|
+
"""
|
|
35
|
+
def __init__(self, bounds_df : pd.DataFrame, default_power_threshold : float = 1.0, default_temp_threshold : float = 140.0, fault_time : int = 5):
|
|
36
|
+
alarm_tag = 'HPOUTLT'
|
|
37
|
+
type_default_dict = {
|
|
38
|
+
'POW' : default_power_threshold,
|
|
39
|
+
'T' : default_temp_threshold
|
|
40
|
+
}
|
|
41
|
+
self.fault_time = fault_time
|
|
42
|
+
super().__init__(bounds_df, alarm_tag,type_default_dict, two_part_tag = True, alarm_db_type='HPWH_OUTLET')
|
|
43
|
+
|
|
44
|
+
def specific_alarm_function(self, df: pd.DataFrame, daily_df : pd.DataFrame, config : ConfigManager):
|
|
45
|
+
for alarm_id in self.bounds_df['alarm_code_id'].unique():
|
|
46
|
+
for day in daily_df.index:
|
|
47
|
+
next_day = day + pd.Timedelta(days=1)
|
|
48
|
+
filtered_df = df.loc[(df.index >= day) & (df.index < next_day)]
|
|
49
|
+
id_group = self.bounds_df[self.bounds_df['alarm_code_id'] == alarm_id]
|
|
50
|
+
pow_codes = id_group[id_group['alarm_code_type'] == 'POW']
|
|
51
|
+
pow_var_name = pow_codes.iloc[0]['variable_name']
|
|
52
|
+
pow_thresh = pow_codes.iloc[0]['bound']
|
|
53
|
+
t_codes = id_group[id_group['alarm_code_type'] == 'T']
|
|
54
|
+
t_var_name = t_codes.iloc[0]['variable_name']
|
|
55
|
+
t_pretty_name = t_codes.iloc[0]['pretty_name']
|
|
56
|
+
t_thresh = t_codes.iloc[0]['bound']
|
|
57
|
+
if len(t_codes) != 1 or len(pow_codes) != 1:
|
|
58
|
+
raise Exception(f"Improper alarm codes for balancing valve with id {alarm_id}")
|
|
59
|
+
if pow_var_name in filtered_df.columns and t_var_name in filtered_df.columns:
|
|
60
|
+
# Check for consecutive minutes where both power and temp exceed thresholds
|
|
61
|
+
power_mask = filtered_df[pow_var_name] > pow_thresh
|
|
62
|
+
temp_mask = filtered_df[t_var_name] < t_thresh
|
|
63
|
+
|
|
64
|
+
# Exclude first 10 minutes after each HP turn-on (warmup period)
|
|
65
|
+
warmup_minutes = 10
|
|
66
|
+
mask_changes = power_mask != power_mask.shift(1)
|
|
67
|
+
run_groups = mask_changes.cumsum()
|
|
68
|
+
cumcount_in_run = power_mask.groupby(run_groups).cumcount() + 1
|
|
69
|
+
past_warmup_mask = power_mask & (cumcount_in_run > warmup_minutes)
|
|
70
|
+
|
|
71
|
+
combined_mask = past_warmup_mask & temp_mask
|
|
72
|
+
|
|
73
|
+
# Check for fault_time consecutive minutes
|
|
74
|
+
consecutive_condition = combined_mask.rolling(window=self.fault_time).min() == 1
|
|
75
|
+
if consecutive_condition.any():
|
|
76
|
+
# Find all consecutive groups where condition is true
|
|
77
|
+
group = (consecutive_condition != consecutive_condition.shift()).cumsum()
|
|
78
|
+
for group_id in consecutive_condition.groupby(group).first()[lambda x: x].index:
|
|
79
|
+
streak_indices = consecutive_condition[group == group_id].index
|
|
80
|
+
start_time = streak_indices[0] - pd.Timedelta(minutes=self.fault_time - 1)
|
|
81
|
+
end_time = streak_indices[-1]
|
|
82
|
+
streak_length = len(streak_indices) + self.fault_time - 1
|
|
83
|
+
actual_temp = filtered_df.loc[streak_indices[0], t_var_name]
|
|
84
|
+
self._add_an_alarm(start_time, end_time, t_var_name,
|
|
85
|
+
f"Low heat pump outlet temperature: {t_pretty_name} was {actual_temp:.1f} F (below {t_thresh:.1f} F) for {streak_length} minutes while HP was ON starting at {start_time}.")
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
import numpy as np
|
|
3
|
+
import datetime as datetime
|
|
4
|
+
from ecopipeline import ConfigManager
|
|
5
|
+
import re
|
|
6
|
+
import mysql.connector.errors as mysqlerrors
|
|
7
|
+
from datetime import timedelta
|
|
8
|
+
from ecopipeline.event_tracking.Alarm import Alarm
|
|
9
|
+
|
|
10
|
+
class LSInconsist(Alarm):
|
|
11
|
+
"""
|
|
12
|
+
Detects when reported loadshift mode does not match its expected value during a load shifting event.
|
|
13
|
+
An alarm is triggered if the variable value does not equal the expected value during the
|
|
14
|
+
time periods defined in the load shifting schedule for that mode.
|
|
15
|
+
|
|
16
|
+
VarNames syntax:
|
|
17
|
+
SOOSCHD_[mode]:### - Indicates a variable that should equal ### during [mode] load shifting events.
|
|
18
|
+
[mode] can be: normal, loadUp, shed, criticalPeak, gridEmergency, advLoadUp
|
|
19
|
+
### is the expected value (e.g., SOOSCHD_loadUp:1 means the variable should be 1 during loadUp events)
|
|
20
|
+
"""
|
|
21
|
+
def __init__(self, bounds_df : pd.DataFrame):
|
|
22
|
+
alarm_tag = 'SOOSCHD'
|
|
23
|
+
type_default_dict = {}
|
|
24
|
+
super().__init__(bounds_df, alarm_tag, type_default_dict, two_part_tag=True, alarm_db_type='LS_INCONSIST')
|
|
25
|
+
|
|
26
|
+
def specific_alarm_function(self, df: pd.DataFrame, daily_df : pd.DataFrame, config : ConfigManager):
|
|
27
|
+
ls_df = config.get_ls_df()
|
|
28
|
+
if ls_df.empty:
|
|
29
|
+
return # no load shifting events to check
|
|
30
|
+
|
|
31
|
+
valid_modes = ['loadUp', 'shed', 'criticalPeak', 'gridEmergency', 'advLoadUp']
|
|
32
|
+
|
|
33
|
+
for _, row in self.bounds_df.iterrows():
|
|
34
|
+
mode = row['alarm_code_type']
|
|
35
|
+
if mode not in valid_modes and mode != 'normal':
|
|
36
|
+
continue
|
|
37
|
+
|
|
38
|
+
var_name = row['variable_name']
|
|
39
|
+
pretty_name = row['pretty_name']
|
|
40
|
+
expected_value = row['bound']
|
|
41
|
+
|
|
42
|
+
if var_name not in df.columns:
|
|
43
|
+
continue
|
|
44
|
+
|
|
45
|
+
for day in daily_df.index:
|
|
46
|
+
next_day = day + pd.Timedelta(days=1)
|
|
47
|
+
filtered_df = df.loc[(df.index >= day) & (df.index < next_day)]
|
|
48
|
+
|
|
49
|
+
if filtered_df.empty:
|
|
50
|
+
continue
|
|
51
|
+
|
|
52
|
+
if mode == 'normal':
|
|
53
|
+
# For 'normal' mode, check periods NOT covered by any load shifting events
|
|
54
|
+
normal_df = filtered_df.copy()
|
|
55
|
+
if not ls_df.empty:
|
|
56
|
+
mask = pd.Series(True, index=normal_df.index)
|
|
57
|
+
for _, event_row in ls_df.iterrows():
|
|
58
|
+
event_start = event_row['startDateTime']
|
|
59
|
+
event_end = event_row['endDateTime']
|
|
60
|
+
mask &= ~((normal_df.index >= event_start) & (normal_df.index < event_end))
|
|
61
|
+
normal_df = normal_df[mask]
|
|
62
|
+
|
|
63
|
+
if normal_df.empty:
|
|
64
|
+
continue
|
|
65
|
+
|
|
66
|
+
# Check if any values don't match the expected value during normal periods
|
|
67
|
+
mismatch_mask = normal_df[var_name] != expected_value
|
|
68
|
+
|
|
69
|
+
if mismatch_mask.any():
|
|
70
|
+
# Find all consecutive streaks of mismatches
|
|
71
|
+
group = (mismatch_mask != mismatch_mask.shift()).cumsum()
|
|
72
|
+
|
|
73
|
+
for group_id in mismatch_mask.groupby(group).first()[lambda x: x].index:
|
|
74
|
+
streak_indices = mismatch_mask[group == group_id].index
|
|
75
|
+
start_time = streak_indices[0]
|
|
76
|
+
end_time = streak_indices[-1]
|
|
77
|
+
streak_length = len(streak_indices)
|
|
78
|
+
actual_value = normal_df.loc[start_time, var_name]
|
|
79
|
+
|
|
80
|
+
self._add_an_alarm(start_time, end_time, var_name,
|
|
81
|
+
f"Load shift mode inconsistency: {pretty_name} was {actual_value} for {streak_length} minutes starting at {start_time} during normal operation (expected {expected_value}).")
|
|
82
|
+
else:
|
|
83
|
+
# For load shifting modes, check periods covered by those specific events
|
|
84
|
+
mode_events = ls_df[ls_df['event'] == mode]
|
|
85
|
+
if mode_events.empty:
|
|
86
|
+
continue
|
|
87
|
+
|
|
88
|
+
# Check each load shifting event for this mode on this day
|
|
89
|
+
for _, event_row in mode_events.iterrows():
|
|
90
|
+
event_start = event_row['startDateTime']
|
|
91
|
+
event_end = event_row['endDateTime']
|
|
92
|
+
|
|
93
|
+
# Filter for data during this event
|
|
94
|
+
event_df = filtered_df.loc[(filtered_df.index >= event_start) & (filtered_df.index < event_end)]
|
|
95
|
+
|
|
96
|
+
if event_df.empty:
|
|
97
|
+
continue
|
|
98
|
+
|
|
99
|
+
# Check if any values don't match the expected value
|
|
100
|
+
mismatch_mask = event_df[var_name] != expected_value
|
|
101
|
+
|
|
102
|
+
if mismatch_mask.any():
|
|
103
|
+
# Find all consecutive streaks of mismatches
|
|
104
|
+
group = (mismatch_mask != mismatch_mask.shift()).cumsum()
|
|
105
|
+
|
|
106
|
+
for group_id in mismatch_mask.groupby(group).first()[lambda x: x].index:
|
|
107
|
+
streak_indices = mismatch_mask[group == group_id].index
|
|
108
|
+
start_time = streak_indices[0]
|
|
109
|
+
end_time = streak_indices[-1]
|
|
110
|
+
streak_length = len(streak_indices)
|
|
111
|
+
actual_value = event_df.loc[start_time, var_name]
|
|
112
|
+
|
|
113
|
+
self._add_an_alarm(start_time, end_time, var_name,
|
|
114
|
+
f"Load shift mode inconsistency: {pretty_name} was {actual_value} for {streak_length} minutes starting at {start_time} during {mode} event (expected {expected_value}).")
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
import numpy as np
|
|
3
|
+
import datetime as datetime
|
|
4
|
+
from ecopipeline import ConfigManager
|
|
5
|
+
import re
|
|
6
|
+
import mysql.connector.errors as mysqlerrors
|
|
7
|
+
from datetime import timedelta
|
|
8
|
+
from ecopipeline.event_tracking.Alarm import Alarm
|
|
9
|
+
|
|
10
|
+
class PowerRatio(Alarm):
|
|
11
|
+
"""
|
|
12
|
+
Detects when power variables fall outside their expected ratio of total power over a rolling period.
|
|
13
|
+
Variables are grouped by alarm ID, and each variable's ratio is checked against its expected low-high
|
|
14
|
+
range as a percentage of the group total.
|
|
15
|
+
|
|
16
|
+
VarNames syntax:
|
|
17
|
+
POWRRAT_[ID]:###-### - Power variable to track. [ID] groups variables together for ratio calculation.
|
|
18
|
+
###-### is the expected low-high percentage range (e.g., PR_HPWH:60-80 means this variable
|
|
19
|
+
should account for 60-80% of the HPWH group total).
|
|
20
|
+
|
|
21
|
+
Parameters
|
|
22
|
+
----------
|
|
23
|
+
day_table_name : str
|
|
24
|
+
Name of the site's daily aggregate value table in the database for fetching historical data.
|
|
25
|
+
ratio_period_days : int
|
|
26
|
+
Number of days to use for the rolling power ratio calculation (default 7). Each block sums
|
|
27
|
+
the values over this many days before calculating ratios.
|
|
28
|
+
"""
|
|
29
|
+
def __init__(self, bounds_df : pd.DataFrame, day_table_name : str, ratio_period_days : int = 7):
|
|
30
|
+
alarm_tag = 'POWRRAT'
|
|
31
|
+
type_default_dict = {}
|
|
32
|
+
self.ratio_period_days = ratio_period_days
|
|
33
|
+
self.day_table_name = day_table_name # TODO this could be a security issue. Swap it for config manager
|
|
34
|
+
super().__init__(bounds_df, alarm_tag,type_default_dict, two_part_tag = False, range_bounds=True, alarm_db_type='POWER_RATIO', daily_only=True)
|
|
35
|
+
|
|
36
|
+
def specific_alarm_function(self, df: pd.DataFrame, daily_df : pd.DataFrame, config : ConfigManager):
|
|
37
|
+
daily_df_copy = daily_df.copy()
|
|
38
|
+
if self.ratio_period_days > 1:
|
|
39
|
+
daily_df_copy = self._append_previous_days_to_df(daily_df_copy, config, self.ratio_period_days, self.day_table_name)
|
|
40
|
+
elif self.ratio_period_days < 1:
|
|
41
|
+
print("power ratio alarm period, ratio_period_days, must be more than 1")
|
|
42
|
+
return
|
|
43
|
+
|
|
44
|
+
# Create blocks of ratio_period_days
|
|
45
|
+
blocks_df = self._create_period_blocks(daily_df_copy)
|
|
46
|
+
|
|
47
|
+
if blocks_df.empty:
|
|
48
|
+
print("No complete blocks available for analysis")
|
|
49
|
+
return
|
|
50
|
+
|
|
51
|
+
for alarm_id in self.bounds_df['alarm_code_id'].unique():
|
|
52
|
+
# Calculate total for each block
|
|
53
|
+
var_list = self.bounds_df[self.bounds_df['alarm_code_id'] == alarm_id]['variable_name'].unique()
|
|
54
|
+
for var in var_list:
|
|
55
|
+
if var not in blocks_df.columns:
|
|
56
|
+
blocks_df[var] = 0.0
|
|
57
|
+
|
|
58
|
+
blocks_df[alarm_id] = blocks_df[var_list].sum(axis=1)
|
|
59
|
+
for variable in var_list:
|
|
60
|
+
# Calculate ratio for each block
|
|
61
|
+
blocks_df[f"{variable}_{alarm_id}"] = (blocks_df[variable]/blocks_df[alarm_id]) * 100
|
|
62
|
+
|
|
63
|
+
# Get bounds from bounds_df for this variable and alarm_id
|
|
64
|
+
var_row = self.bounds_df[(self.bounds_df['variable_name'] == variable) & (self.bounds_df['alarm_code_id'] == alarm_id)]
|
|
65
|
+
if var_row.empty:
|
|
66
|
+
continue
|
|
67
|
+
low_bound = var_row.iloc[0]['bound']
|
|
68
|
+
high_bound = var_row.iloc[0]['bound2']
|
|
69
|
+
pretty_name = var_row.iloc[0]['pretty_name']
|
|
70
|
+
|
|
71
|
+
alarm_blocks_df = blocks_df.loc[(blocks_df[f"{variable}_{alarm_id}"] < low_bound) | (blocks_df[f"{variable}_{alarm_id}"] > high_bound)]
|
|
72
|
+
if not alarm_blocks_df.empty:
|
|
73
|
+
for block_end_date, values in alarm_blocks_df.iterrows():
|
|
74
|
+
block_start_date = block_end_date - timedelta(days=self.ratio_period_days - 1)
|
|
75
|
+
actual_ratio = values[f'{variable}_{alarm_id}']
|
|
76
|
+
self._add_an_alarm(block_start_date, block_end_date + timedelta(1), variable,
|
|
77
|
+
f"Power ratio alarm ({self.ratio_period_days}-day block ending {block_end_date.strftime('%Y-%m-%d')}): {pretty_name} accounted for {actual_ratio:.1f}% of {alarm_id} energy use. {low_bound:.1f}-{high_bound:.1f}% expected.", add_one_minute_to_end=False)
|
|
78
|
+
|
|
79
|
+
def _create_period_blocks(self, daily_df: pd.DataFrame, verbose: bool = False) -> pd.DataFrame:
|
|
80
|
+
"""
|
|
81
|
+
Create blocks of ratio_period_days by summing values within each block.
|
|
82
|
+
Each block will be represented by its end date.
|
|
83
|
+
"""
|
|
84
|
+
if len(daily_df) < self.ratio_period_days:
|
|
85
|
+
if verbose:
|
|
86
|
+
print(f"Not enough data for {self.ratio_period_days}-day blocks. Need at least {self.ratio_period_days} days but only have {len(daily_df)} days in data")
|
|
87
|
+
return pd.DataFrame()
|
|
88
|
+
|
|
89
|
+
blocks = []
|
|
90
|
+
block_dates = []
|
|
91
|
+
|
|
92
|
+
# Create blocks by summing consecutive groups of ratio_period_days
|
|
93
|
+
for i in range(self.ratio_period_days - 1, len(daily_df)):
|
|
94
|
+
start_idx = i - self.ratio_period_days + 1
|
|
95
|
+
end_idx = i + 1
|
|
96
|
+
|
|
97
|
+
block_data = daily_df.iloc[start_idx:end_idx].sum()
|
|
98
|
+
blocks.append(block_data)
|
|
99
|
+
# Use the end date of the block as the identifier
|
|
100
|
+
block_dates.append(daily_df.index[i])
|
|
101
|
+
|
|
102
|
+
if not blocks:
|
|
103
|
+
return pd.DataFrame()
|
|
104
|
+
|
|
105
|
+
blocks_df = pd.DataFrame(blocks, index=block_dates)
|
|
106
|
+
|
|
107
|
+
if verbose:
|
|
108
|
+
print(f"Created {len(blocks_df)} blocks of {self.ratio_period_days} days each")
|
|
109
|
+
print(f"Block date range: {blocks_df.index.min()} to {blocks_df.index.max()}")
|
|
110
|
+
|
|
111
|
+
return blocks_df
|