ecopipeline 1.0.5__py3-none-any.whl → 1.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ecopipeline/event_tracking/Alarm.py +317 -0
- ecopipeline/event_tracking/__init__.py +18 -1
- ecopipeline/event_tracking/alarms/AbnormalCOP.py +76 -0
- ecopipeline/event_tracking/alarms/BackupUse.py +94 -0
- ecopipeline/event_tracking/alarms/BalancingValve.py +78 -0
- ecopipeline/event_tracking/alarms/BlownFuse.py +72 -0
- ecopipeline/event_tracking/alarms/Boundary.py +90 -0
- ecopipeline/event_tracking/alarms/HPWHInlet.py +73 -0
- ecopipeline/event_tracking/alarms/HPWHOutage.py +96 -0
- ecopipeline/event_tracking/alarms/HPWHOutlet.py +85 -0
- ecopipeline/event_tracking/alarms/LSInconsist.py +114 -0
- ecopipeline/event_tracking/alarms/PowerRatio.py +111 -0
- ecopipeline/event_tracking/alarms/SOOChange.py +127 -0
- ecopipeline/event_tracking/alarms/ShortCycle.py +59 -0
- ecopipeline/event_tracking/alarms/TMSetpoint.py +127 -0
- ecopipeline/event_tracking/alarms/TempRange.py +84 -0
- ecopipeline/event_tracking/alarms/__init__.py +0 -0
- ecopipeline/event_tracking/event_tracking.py +119 -1177
- ecopipeline/extract/extract.py +51 -0
- ecopipeline/extract/zip_to_lat_long.csv +41490 -0
- ecopipeline/load/__init__.py +2 -2
- ecopipeline/load/load.py +304 -3
- ecopipeline/utils/ConfigManager.py +30 -0
- {ecopipeline-1.0.5.dist-info → ecopipeline-1.1.1.dist-info}/METADATA +1 -1
- ecopipeline-1.1.1.dist-info/RECORD +42 -0
- {ecopipeline-1.0.5.dist-info → ecopipeline-1.1.1.dist-info}/WHEEL +1 -1
- ecopipeline-1.0.5.dist-info/RECORD +0 -25
- {ecopipeline-1.0.5.dist-info → ecopipeline-1.1.1.dist-info}/licenses/LICENSE +0 -0
- {ecopipeline-1.0.5.dist-info → ecopipeline-1.1.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,317 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
import numpy as np
|
|
3
|
+
import datetime as datetime
|
|
4
|
+
from ecopipeline import ConfigManager
|
|
5
|
+
import re
|
|
6
|
+
import mysql.connector.errors as mysqlerrors
|
|
7
|
+
from datetime import timedelta
|
|
8
|
+
|
|
9
|
+
class Alarm:
|
|
10
|
+
def __init__(self, bounds_df : pd.DataFrame, alarm_tag : str = None, type_default_dict : dict = {},
|
|
11
|
+
two_part_tag : bool = True, range_bounds : bool = False, alarm_db_type : str = 'SILENT_ALARM',
|
|
12
|
+
daily_only : bool = False):
|
|
13
|
+
self.daily_only = daily_only
|
|
14
|
+
self.alarm_tag = alarm_tag
|
|
15
|
+
self.two_part_tag = two_part_tag
|
|
16
|
+
self.range_bounds = range_bounds
|
|
17
|
+
self.type_default_dict = type_default_dict
|
|
18
|
+
self.alarm_db_type = alarm_db_type
|
|
19
|
+
self.triggered_alarms = {
|
|
20
|
+
'start_time_pt' : [],
|
|
21
|
+
'end_time_pt' : [],
|
|
22
|
+
'alarm_type' : [],
|
|
23
|
+
'event_detail' : [],
|
|
24
|
+
'variable_name' : [],
|
|
25
|
+
'certainty' : []
|
|
26
|
+
}
|
|
27
|
+
self.bounds_df = self._process_bounds_df_alarm_codes(bounds_df)
|
|
28
|
+
|
|
29
|
+
def find_alarms(self, df: pd.DataFrame, daily_data : pd.DataFrame, config : ConfigManager) -> pd.DataFrame:
|
|
30
|
+
"""
|
|
31
|
+
Parameters
|
|
32
|
+
----------
|
|
33
|
+
df: pd.DataFrame
|
|
34
|
+
Post-transformed dataframe for minute data. It should be noted that this function expects consecutive, in order minutes. If minutes
|
|
35
|
+
are out of order or have gaps, the function may return erroneous alarms.
|
|
36
|
+
daily_df: pd.DataFrame
|
|
37
|
+
Post-transformed dataframe for daily data.
|
|
38
|
+
config : ecopipeline.ConfigManager
|
|
39
|
+
The ConfigManager object that holds configuration data for the pipeline. Among other things, this object will point to a file
|
|
40
|
+
called Variable_Names.csv in the input folder of the pipeline (e.g. "full/path/to/pipeline/input/Variable_Names.csv").
|
|
41
|
+
The file must have at least two columns which must be titled "variable_name" and "alarm_codes" which should contain the
|
|
42
|
+
name of each variable in the dataframe that requires alarming and the appropriate alarm codes.
|
|
43
|
+
Returns
|
|
44
|
+
-------
|
|
45
|
+
pd.DataFrame:
|
|
46
|
+
Pandas dataframe with alarm events
|
|
47
|
+
"""
|
|
48
|
+
if self.bounds_df.empty:
|
|
49
|
+
return self._convert_silent_alarm_dict_to_df({}) # no alarms to look into
|
|
50
|
+
if self.daily_only:
|
|
51
|
+
if daily_data.empty:
|
|
52
|
+
print(f"cannot flag {self.alarm_tag} alarms. Dataframe is empty")
|
|
53
|
+
return pd.DataFrame()
|
|
54
|
+
elif df.empty:
|
|
55
|
+
print(f"cannot flag {self.alarm_tag} alarms. Dataframe is empty")
|
|
56
|
+
return pd.DataFrame()
|
|
57
|
+
self.specific_alarm_function(df, daily_data, config)
|
|
58
|
+
return self._convert_silent_alarm_dict_to_df(self.triggered_alarms)
|
|
59
|
+
|
|
60
|
+
def specific_alarm_function(self, df: pd.DataFrame, daily_df : pd.DataFrame, config : ConfigManager):
|
|
61
|
+
self.triggered_alarms = {}
|
|
62
|
+
|
|
63
|
+
def _add_an_alarm(self, start_time : datetime, end_time : datetime, var_name : str, alarm_string : str, add_one_minute_to_end : bool = True, certainty : str = "high"):
|
|
64
|
+
certainty_dict = {
|
|
65
|
+
"high" : 3,
|
|
66
|
+
"med" : 2,
|
|
67
|
+
"low" : 1
|
|
68
|
+
}
|
|
69
|
+
if certainty not in certainty_dict.keys():
|
|
70
|
+
raise Exception(f"{certainty} is not a valid certainty key. Valid keys are {certainty_dict.keys()}")
|
|
71
|
+
else:
|
|
72
|
+
certainty = certainty_dict[certainty]
|
|
73
|
+
if add_one_minute_to_end:
|
|
74
|
+
end_time = end_time + timedelta(minutes=1)
|
|
75
|
+
self.triggered_alarms['start_time_pt'].append(start_time)
|
|
76
|
+
self.triggered_alarms['end_time_pt'].append(end_time)
|
|
77
|
+
self.triggered_alarms['alarm_type'].append(self.alarm_db_type)
|
|
78
|
+
self.triggered_alarms['event_detail'].append(alarm_string)
|
|
79
|
+
self.triggered_alarms['variable_name'].append(var_name)
|
|
80
|
+
self.triggered_alarms['certainty'].append(certainty)
|
|
81
|
+
|
|
82
|
+
def _convert_silent_alarm_dict_to_df(self, alarm_dict : dict) -> pd.DataFrame:
|
|
83
|
+
|
|
84
|
+
alarm_df = pd.DataFrame(alarm_dict)
|
|
85
|
+
alarm_df = self._compress_alarm_df(alarm_df)
|
|
86
|
+
return alarm_df
|
|
87
|
+
|
|
88
|
+
def _compress_alarm_df(self, alarm_df: pd.DataFrame) -> pd.DataFrame:
|
|
89
|
+
"""
|
|
90
|
+
Compresses consecutive alarms of the same variable_name and alarm_type into single rows.
|
|
91
|
+
If one alarm's start_time_pt is within one minute of another alarm's end_time_pt,
|
|
92
|
+
they are merged into one row with the earliest start_time_pt and latest end_time_pt.
|
|
93
|
+
|
|
94
|
+
Parameters
|
|
95
|
+
----------
|
|
96
|
+
alarm_df : pd.DataFrame
|
|
97
|
+
DataFrame with columns: start_time_pt, end_time_pt, alarm_type, variable_name, event_detail
|
|
98
|
+
|
|
99
|
+
Returns
|
|
100
|
+
-------
|
|
101
|
+
pd.DataFrame
|
|
102
|
+
Compressed DataFrame with consecutive alarms merged
|
|
103
|
+
"""
|
|
104
|
+
if alarm_df.empty:
|
|
105
|
+
return alarm_df
|
|
106
|
+
|
|
107
|
+
# Sort entire DataFrame by start_time_pt before processing
|
|
108
|
+
alarm_df = alarm_df.sort_values('start_time_pt').reset_index(drop=True)
|
|
109
|
+
|
|
110
|
+
compressed_rows = []
|
|
111
|
+
|
|
112
|
+
# Group by variable_name and alarm_type
|
|
113
|
+
for (var_name, alarm_type), group in alarm_df.groupby(['variable_name', 'alarm_type'], sort=False):
|
|
114
|
+
# Group is already sorted since we sorted the whole DataFrame above
|
|
115
|
+
group = group.reset_index(drop=True)
|
|
116
|
+
|
|
117
|
+
current_start = None
|
|
118
|
+
current_end = None
|
|
119
|
+
current_detail = None
|
|
120
|
+
current_certainty = None
|
|
121
|
+
|
|
122
|
+
for _, row in group.iterrows():
|
|
123
|
+
row_start = row['start_time_pt']
|
|
124
|
+
row_end = row['end_time_pt']
|
|
125
|
+
|
|
126
|
+
if current_start is None:
|
|
127
|
+
# First row in group
|
|
128
|
+
current_start = row_start
|
|
129
|
+
current_end = row_end
|
|
130
|
+
current_detail = row['event_detail']
|
|
131
|
+
current_certainty = row['certainty']
|
|
132
|
+
elif row_start <= current_end + timedelta(minutes=1):
|
|
133
|
+
# This row is within 1 minute of current end - merge it after checking
|
|
134
|
+
row_certainty = row['certainty']
|
|
135
|
+
if row_certainty > current_certainty:
|
|
136
|
+
if row_start > current_start:
|
|
137
|
+
compressed_rows.append({
|
|
138
|
+
'start_time_pt': current_start,
|
|
139
|
+
'end_time_pt': row_start,
|
|
140
|
+
'alarm_type': alarm_type,
|
|
141
|
+
'event_detail': current_detail,
|
|
142
|
+
'variable_name': var_name,
|
|
143
|
+
'certainty': current_certainty
|
|
144
|
+
})
|
|
145
|
+
if row_end >= current_end:
|
|
146
|
+
current_start = row_start
|
|
147
|
+
current_end = row_end
|
|
148
|
+
current_detail = row['event_detail']
|
|
149
|
+
current_certainty = row_certainty
|
|
150
|
+
else:
|
|
151
|
+
#encompassed
|
|
152
|
+
compressed_rows.append({
|
|
153
|
+
'start_time_pt': row_start,
|
|
154
|
+
'end_time_pt': row_end,
|
|
155
|
+
'alarm_type': alarm_type,
|
|
156
|
+
'event_detail': row['event_detail'],
|
|
157
|
+
'variable_name': var_name,
|
|
158
|
+
'certainty': row_certainty
|
|
159
|
+
})
|
|
160
|
+
current_start = row_end
|
|
161
|
+
|
|
162
|
+
elif row_certainty < current_certainty:
|
|
163
|
+
if row_end > current_end:
|
|
164
|
+
compressed_rows.append({
|
|
165
|
+
'start_time_pt': current_start,
|
|
166
|
+
'end_time_pt': current_end,
|
|
167
|
+
'alarm_type': alarm_type,
|
|
168
|
+
'event_detail': current_detail,
|
|
169
|
+
'variable_name': var_name,
|
|
170
|
+
'certainty': current_certainty
|
|
171
|
+
})
|
|
172
|
+
current_start = current_end
|
|
173
|
+
current_end = row_end
|
|
174
|
+
current_detail = row['event_detail']
|
|
175
|
+
current_certainty = row_certainty
|
|
176
|
+
|
|
177
|
+
else:
|
|
178
|
+
current_end = max(current_end, row_end)
|
|
179
|
+
else:
|
|
180
|
+
# Gap is more than 1 minute - save current and start new
|
|
181
|
+
compressed_rows.append({
|
|
182
|
+
'start_time_pt': current_start,
|
|
183
|
+
'end_time_pt': current_end,
|
|
184
|
+
'alarm_type': alarm_type,
|
|
185
|
+
'event_detail': current_detail,
|
|
186
|
+
'variable_name': var_name,
|
|
187
|
+
'certainty': current_certainty
|
|
188
|
+
})
|
|
189
|
+
current_start = row_start
|
|
190
|
+
current_end = row_end
|
|
191
|
+
current_detail = row['event_detail']
|
|
192
|
+
current_certainty = row['certainty']
|
|
193
|
+
|
|
194
|
+
# Don't forget the last accumulated row
|
|
195
|
+
if current_start is not None:
|
|
196
|
+
compressed_rows.append({
|
|
197
|
+
'start_time_pt': current_start,
|
|
198
|
+
'end_time_pt': current_end,
|
|
199
|
+
'alarm_type': alarm_type,
|
|
200
|
+
'event_detail': current_detail,
|
|
201
|
+
'variable_name': var_name,
|
|
202
|
+
'certainty': current_certainty
|
|
203
|
+
})
|
|
204
|
+
return pd.DataFrame(compressed_rows)
|
|
205
|
+
|
|
206
|
+
def _process_bounds_df_alarm_codes(self, og_bounds_df : pd.DataFrame) -> pd.DataFrame:
|
|
207
|
+
# Should only do for alarm codes of format: [TAG]_[TYPE]_[OPTIONAL_ID]:[BOUND]
|
|
208
|
+
bounds_df = og_bounds_df.copy()
|
|
209
|
+
required_columns = ["variable_name", "alarm_codes"]
|
|
210
|
+
for required_column in required_columns:
|
|
211
|
+
if not required_column in bounds_df.columns:
|
|
212
|
+
raise Exception(f"{required_column} is not present in Variable_Names.csv")
|
|
213
|
+
if not 'pretty_name' in bounds_df.columns:
|
|
214
|
+
bounds_df['pretty_name'] = bounds_df['variable_name']
|
|
215
|
+
else:
|
|
216
|
+
bounds_df['pretty_name'] = bounds_df['pretty_name'].fillna(bounds_df['variable_name'])
|
|
217
|
+
|
|
218
|
+
bounds_df = bounds_df.loc[:, ["variable_name", "alarm_codes", "pretty_name"]]
|
|
219
|
+
bounds_df.dropna(axis=0, thresh=2, inplace=True)
|
|
220
|
+
|
|
221
|
+
# Check if all alarm_codes are null or if dataframe is empty
|
|
222
|
+
if bounds_df.empty or bounds_df['alarm_codes'].isna().all():
|
|
223
|
+
return pd.DataFrame()
|
|
224
|
+
|
|
225
|
+
bounds_df = bounds_df[bounds_df['alarm_codes'].str.contains(self.alarm_tag, na=False)]
|
|
226
|
+
|
|
227
|
+
# Split alarm_codes by semicolons and create a row for each STS code
|
|
228
|
+
expanded_rows = []
|
|
229
|
+
for idx, row in bounds_df.iterrows():
|
|
230
|
+
alarm_codes = str(row['alarm_codes']).split(';')
|
|
231
|
+
tag_codes = [code.strip() for code in alarm_codes if code.strip().startswith(self.alarm_tag)]
|
|
232
|
+
|
|
233
|
+
if tag_codes: # Only process if there are STS codes
|
|
234
|
+
for tag_code in tag_codes:
|
|
235
|
+
new_row = row.copy()
|
|
236
|
+
if ":" in tag_code:
|
|
237
|
+
tag_parts = tag_code.split(':')
|
|
238
|
+
if len(tag_parts) > 2:
|
|
239
|
+
raise Exception(f"Improperly formated alarm code : {tag_code}")
|
|
240
|
+
if self.range_bounds:
|
|
241
|
+
bounds = tag_parts[1]
|
|
242
|
+
bound_range = bounds.split('-')
|
|
243
|
+
if len(bound_range) != 2:
|
|
244
|
+
raise Exception(f"Improperly formated alarm code : {tag_code}. Expected bound range in form '[number]-[number]' but recieved '{bounds}'.")
|
|
245
|
+
new_row['bound'] = bound_range[0]
|
|
246
|
+
new_row['bound2'] = bound_range[1]
|
|
247
|
+
else:
|
|
248
|
+
new_row['bound'] = tag_parts[1]
|
|
249
|
+
tag_code = tag_parts[0]
|
|
250
|
+
else:
|
|
251
|
+
new_row['bound'] = None
|
|
252
|
+
if self.range_bounds:
|
|
253
|
+
new_row['bound2'] = None
|
|
254
|
+
new_row['alarm_codes'] = tag_code
|
|
255
|
+
|
|
256
|
+
expanded_rows.append(new_row)
|
|
257
|
+
|
|
258
|
+
if expanded_rows:
|
|
259
|
+
bounds_df = pd.DataFrame(expanded_rows)
|
|
260
|
+
else:
|
|
261
|
+
return pd.DataFrame()# no tagged alarms to look into
|
|
262
|
+
|
|
263
|
+
alarm_code_parts = []
|
|
264
|
+
for idx, row in bounds_df.iterrows():
|
|
265
|
+
parts = row['alarm_codes'].split('_')
|
|
266
|
+
if self.two_part_tag:
|
|
267
|
+
if len(parts) == 2:
|
|
268
|
+
alarm_code_parts.append([parts[1], "No ID"])
|
|
269
|
+
elif len(parts) == 3:
|
|
270
|
+
alarm_code_parts.append([parts[1], parts[2]])
|
|
271
|
+
else:
|
|
272
|
+
raise Exception(f"improper {self.alarm_tag} alarm code format for {row['variable_name']}")
|
|
273
|
+
else:
|
|
274
|
+
if len(parts) == 1:
|
|
275
|
+
alarm_code_parts.append(["default", "No ID"])
|
|
276
|
+
elif len(parts) == 2:
|
|
277
|
+
alarm_code_parts.append(["default", parts[1]])
|
|
278
|
+
else:
|
|
279
|
+
raise Exception(f"improper {self.alarm_tag} alarm code format for {row['variable_name']}")
|
|
280
|
+
if alarm_code_parts:
|
|
281
|
+
bounds_df[['alarm_code_type', 'alarm_code_id']] = pd.DataFrame(alarm_code_parts, index=bounds_df.index)
|
|
282
|
+
|
|
283
|
+
# Replace None bounds with appropriate defaults based on alarm_code_type
|
|
284
|
+
for idx, row in bounds_df.iterrows():
|
|
285
|
+
if pd.isna(row['bound']) or row['bound'] is None:
|
|
286
|
+
if row['alarm_code_type'] in self.type_default_dict.keys():
|
|
287
|
+
if self.range_bounds:
|
|
288
|
+
bounds_df.at[idx, 'bound'] = self.type_default_dict[row['alarm_code_type']][0]
|
|
289
|
+
bounds_df.at[idx, 'bound2'] = self.type_default_dict[row['alarm_code_type']][1]
|
|
290
|
+
else:
|
|
291
|
+
bounds_df.at[idx, 'bound'] = self.type_default_dict[row['alarm_code_type']]
|
|
292
|
+
# Coerce bound column to float
|
|
293
|
+
bounds_df['bound'] = pd.to_numeric(bounds_df['bound'], errors='coerce').astype(float)
|
|
294
|
+
if self.range_bounds:
|
|
295
|
+
bounds_df['bound2'] = pd.to_numeric(bounds_df['bound2'], errors='coerce').astype(float)
|
|
296
|
+
|
|
297
|
+
return bounds_df
|
|
298
|
+
|
|
299
|
+
def _append_previous_days_to_df(self, daily_df: pd.DataFrame, config : ConfigManager, ratio_period_days : int, day_table_name : str, primary_key : str = "time_pt") -> pd.DataFrame:
|
|
300
|
+
db_connection, cursor = config.connect_db()
|
|
301
|
+
period_start = daily_df.index.min() - timedelta(ratio_period_days)
|
|
302
|
+
try:
|
|
303
|
+
# find existing times in database for upsert statement
|
|
304
|
+
cursor.execute(
|
|
305
|
+
f"SELECT * FROM {day_table_name} WHERE {primary_key} < '{daily_df.index.min()}' AND {primary_key} >= '{period_start}'")
|
|
306
|
+
result = cursor.fetchall()
|
|
307
|
+
column_names = [desc[0] for desc in cursor.description]
|
|
308
|
+
old_days_df = pd.DataFrame(result, columns=column_names)
|
|
309
|
+
old_days_df = old_days_df.set_index(primary_key)
|
|
310
|
+
daily_df = pd.concat([daily_df, old_days_df])
|
|
311
|
+
daily_df = daily_df.sort_index(ascending=True)
|
|
312
|
+
except mysqlerrors.Error:
|
|
313
|
+
print(f"Table {day_table_name} has no data.")
|
|
314
|
+
|
|
315
|
+
db_connection.close()
|
|
316
|
+
cursor.close()
|
|
317
|
+
return daily_df
|
|
@@ -1,4 +1,21 @@
|
|
|
1
1
|
from .event_tracking import *
|
|
2
|
+
from .Alarm import Alarm
|
|
3
|
+
from .alarms.ShortCycle import ShortCycle
|
|
4
|
+
from .alarms.TempRange import TempRange
|
|
5
|
+
from .alarms.LSInconsist import LSInconsist
|
|
6
|
+
from .alarms.SOOChange import SOOChange
|
|
7
|
+
from .alarms.BlownFuse import BlownFuse
|
|
8
|
+
from .alarms.HPWHOutage import HPWHOutage
|
|
9
|
+
from .alarms.BackupUse import BackupUse
|
|
10
|
+
from .alarms.HPWHOutlet import HPWHOutlet
|
|
11
|
+
from .alarms.HPWHInlet import HPWHInlet
|
|
12
|
+
from .alarms.BalancingValve import BalancingValve
|
|
13
|
+
from .alarms.TMSetpoint import TMSetpoint
|
|
14
|
+
from .alarms.AbnormalCOP import AbnormalCOP
|
|
15
|
+
from .alarms.PowerRatio import PowerRatio
|
|
16
|
+
from .alarms.Boundary import Boundary
|
|
17
|
+
|
|
2
18
|
__all__ = ['central_alarm_df_creator','flag_boundary_alarms','power_ratio_alarm','flag_abnormal_COP','flag_high_tm_setpoint',
|
|
3
19
|
'flag_recirc_balance_valve','flag_hp_inlet_temp','flag_backup_use','flag_blown_fuse','flag_unexpected_soo_change','flag_shortcycle',
|
|
4
|
-
'flag_hp_outlet_temp','flag_HP_outage','flag_unexpected_temp','flag_ls_mode_inconsistancy'
|
|
20
|
+
'flag_hp_outlet_temp','flag_HP_outage','flag_unexpected_temp','flag_ls_mode_inconsistancy','Alarm','ShortCycle','TempRange','LSInconsist',
|
|
21
|
+
'SOOChange','BlownFuse','HPWHOutage','BackupUse','HPWHOutlet','HPWHInlet','BalancingValve','TMSetpoint','AbnormalCOP','PowerRatio','Boundary']
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
import numpy as np
|
|
3
|
+
import datetime as datetime
|
|
4
|
+
from ecopipeline import ConfigManager
|
|
5
|
+
import re
|
|
6
|
+
import mysql.connector.errors as mysqlerrors
|
|
7
|
+
from datetime import timedelta
|
|
8
|
+
from ecopipeline.event_tracking.Alarm import Alarm
|
|
9
|
+
|
|
10
|
+
class AbnormalCOP(Alarm):
|
|
11
|
+
"""
|
|
12
|
+
Detects unexpected state of operation (SOO) changes by checking if the heat pump turns on or off
|
|
13
|
+
when the temperature is not near the expected aquastat setpoint thresholds. An alarm is triggered
|
|
14
|
+
if the HP turns on/off and the corresponding temperature is more than 5.0 degrees away from the
|
|
15
|
+
expected threshold.
|
|
16
|
+
|
|
17
|
+
VarNames syntax:
|
|
18
|
+
SOOCHNG_POW:### - Indicates a power variable for the heat pump system (should be total power across all primary heat pumps). ### is the power threshold (default 1.0) above which
|
|
19
|
+
the heat pump system is considered 'on'.
|
|
20
|
+
SOOCHNG_ON_[Mode ID]:### - Indicates the temperature variable at the ON aquastat fraction. ### is the temperature (default 115.0)
|
|
21
|
+
that should trigger the heat pump to turn ON. Mode ID should be the load up mode from ['loadUp','shed','criticalPeak','gridEmergency','advLoadUp','normal'] or left blank for normal mode
|
|
22
|
+
SOOCHNG_OFF_[Mode ID]:### - Indicates the temperature variable at the OFF aquastat fraction (can be same as ON aquastat). ### is the temperature (default 140.0)
|
|
23
|
+
that should trigger the heat pump to turn OFF. Mode ID should be the load up mode from ['loadUp','shed','criticalPeak','gridEmergency','advLoadUp','normal'] or left blank for normal mode
|
|
24
|
+
|
|
25
|
+
Parameters
|
|
26
|
+
----------
|
|
27
|
+
default_power_threshold : float
|
|
28
|
+
Default power threshold for POW alarm codes when no custom bound is specified (default 1.0). Heat pump is considered 'on'
|
|
29
|
+
when power exceeds this value.
|
|
30
|
+
default_on_temp : float
|
|
31
|
+
Default ON temperature threshold (default 115.0). When the HP turns on, an alarm triggers if the temperature
|
|
32
|
+
is more than 5.0 degrees away from this value.
|
|
33
|
+
default_off_temp : float
|
|
34
|
+
Default OFF temperature threshold (default 140.0). When the HP turns off, an alarm triggers if the temperature
|
|
35
|
+
is more than 5.0 degrees away from this value.
|
|
36
|
+
"""
|
|
37
|
+
def __init__(self, bounds_df : pd.DataFrame, default_high_bound : float = 4.5, default_low_bound : float = 0):
|
|
38
|
+
self.default_high_bound = default_high_bound
|
|
39
|
+
self.default_low_bound = default_low_bound
|
|
40
|
+
|
|
41
|
+
super().__init__(bounds_df, None, {}, alarm_db_type='ABNORMAL_COP', daily_only=True)
|
|
42
|
+
|
|
43
|
+
def _process_bounds_df_alarm_codes(self, og_bounds_df : pd.DataFrame) -> pd.DataFrame:
|
|
44
|
+
bounds_df = og_bounds_df.copy()
|
|
45
|
+
if not "variable_name" in bounds_df.columns:
|
|
46
|
+
raise Exception(f"variable_name is not present in Variable_Names.csv")
|
|
47
|
+
if not 'pretty_name' in bounds_df.columns:
|
|
48
|
+
bounds_df['pretty_name'] = bounds_df['variable_name']
|
|
49
|
+
else:
|
|
50
|
+
bounds_df['pretty_name'] = bounds_df['pretty_name'].fillna(bounds_df['variable_name'])
|
|
51
|
+
if not 'high_alarm' in bounds_df.columns:
|
|
52
|
+
bounds_df['high_alarm'] = self.default_high_bound
|
|
53
|
+
else:
|
|
54
|
+
bounds_df['high_alarm'] = bounds_df['high_alarm'].fillna(self.default_high_bound)
|
|
55
|
+
if not 'low_alarm' in bounds_df.columns:
|
|
56
|
+
bounds_df['low_alarm'] = self.default_low_bound
|
|
57
|
+
else:
|
|
58
|
+
bounds_df['low_alarm'] = bounds_df['low_alarm'].fillna(self.default_low_bound)
|
|
59
|
+
|
|
60
|
+
bounds_df = bounds_df.loc[:, ["variable_name", "high_alarm", "low_alarm", "pretty_name"]]
|
|
61
|
+
bounds_df.dropna(axis=0, thresh=2, inplace=True)
|
|
62
|
+
bounds_df.set_index(['variable_name'], inplace=True)
|
|
63
|
+
|
|
64
|
+
return bounds_df
|
|
65
|
+
|
|
66
|
+
def specific_alarm_function(self, df: pd.DataFrame, daily_df : pd.DataFrame, config : ConfigManager):
|
|
67
|
+
cop_pattern = re.compile(r'^(COP\w*|SystemCOP\w*)$')
|
|
68
|
+
cop_columns = [col for col in daily_df.columns if re.match(cop_pattern, col)]
|
|
69
|
+
|
|
70
|
+
if not daily_df.empty and len(cop_columns) > 0:
|
|
71
|
+
for bound_var, bounds in self.bounds_df.iterrows():
|
|
72
|
+
if bound_var in cop_columns:
|
|
73
|
+
for day, day_values in daily_df.iterrows():
|
|
74
|
+
if not day_values[bound_var] is None and (day_values[bound_var] > bounds['high_alarm'] or day_values[bound_var] < bounds['low_alarm']):
|
|
75
|
+
alarm_str = f"Unexpected COP Value detected: {bounds['pretty_name']} = {round(day_values[bound_var],2)}"
|
|
76
|
+
self._add_an_alarm(day, day + timedelta(1), bound_var, alarm_str, add_one_minute_to_end=False)
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
import numpy as np
|
|
3
|
+
import datetime as datetime
|
|
4
|
+
from ecopipeline import ConfigManager
|
|
5
|
+
import re
|
|
6
|
+
import mysql.connector.errors as mysqlerrors
|
|
7
|
+
from datetime import timedelta
|
|
8
|
+
from ecopipeline.event_tracking.Alarm import Alarm
|
|
9
|
+
|
|
10
|
+
class BackupUse(Alarm):
|
|
11
|
+
"""
|
|
12
|
+
Function will take a pandas dataframe and location of alarm information in a csv,
|
|
13
|
+
and create an dataframe with applicable alarm events
|
|
14
|
+
|
|
15
|
+
VarNames syntax:
|
|
16
|
+
IMBCKUP_P_ID - Back Up Tank Power Varriable. Must be in same power units as total system power
|
|
17
|
+
IMBCKUP_TP_ID:### - Total System Power for ratio alarming for alarming if back up power is more than ### (40% default) of usage
|
|
18
|
+
IMBCKUP_ST_ID:### - Back Up Setpoint that should not change at all from ### (default 130)
|
|
19
|
+
|
|
20
|
+
Parameters
|
|
21
|
+
----------
|
|
22
|
+
default_setpoint : float
|
|
23
|
+
Default temperature setpoint in degrees for T and ST alarm codes when no custom bound is specified (default 130.0)
|
|
24
|
+
default_power_indication : float
|
|
25
|
+
Default power threshold in kW for SP alarm codes when no custom bound is specified (default 1.0)
|
|
26
|
+
default_power_ratio : float
|
|
27
|
+
Default power ratio threshold (as decimal, e.g., 0.4 for 40%) for TP alarm codes when no custom bound is specified (default 0.4)
|
|
28
|
+
"""
|
|
29
|
+
def __init__(self, bounds_df : pd.DataFrame, default_setpoint : float = 130.0, default_power_ratio : float = 0.1):
|
|
30
|
+
alarm_tag = 'IMBCKUP'
|
|
31
|
+
type_default_dict = {
|
|
32
|
+
'POW': None,
|
|
33
|
+
'TP': default_power_ratio,
|
|
34
|
+
'ST': default_setpoint
|
|
35
|
+
}
|
|
36
|
+
super().__init__(bounds_df, alarm_tag, type_default_dict, alarm_db_type='BACKUP_USE')
|
|
37
|
+
|
|
38
|
+
def specific_alarm_function(self, df: pd.DataFrame, daily_df : pd.DataFrame, config : ConfigManager):
|
|
39
|
+
for day in daily_df.index:
|
|
40
|
+
next_day = day + pd.Timedelta(days=1)
|
|
41
|
+
filtered_df = df.loc[(df.index >= day) & (df.index < next_day)]
|
|
42
|
+
for alarm_id in self.bounds_df['alarm_code_id'].unique():
|
|
43
|
+
id_group = self.bounds_df[self.bounds_df['alarm_code_id'] == alarm_id]
|
|
44
|
+
|
|
45
|
+
# Get T and SP alarm codes for this ID
|
|
46
|
+
pow_codes = id_group[id_group['alarm_code_type'] == 'POW']
|
|
47
|
+
tp_codes = id_group[id_group['alarm_code_type'] == 'TP']
|
|
48
|
+
st_codes = id_group[id_group['alarm_code_type'] == 'ST']
|
|
49
|
+
|
|
50
|
+
# Check for multiple T or SP codes with same ID
|
|
51
|
+
if len(tp_codes) > 1:
|
|
52
|
+
raise Exception(f"Improper alarm codes for swing tank setpoint with id {alarm_id}")
|
|
53
|
+
|
|
54
|
+
if len(st_codes) >= 1:
|
|
55
|
+
# Check each ST code against its individual bound
|
|
56
|
+
for idx, st_row in st_codes.iterrows():
|
|
57
|
+
st_var_name = st_row['variable_name']
|
|
58
|
+
st_pretty_name = st_row['pretty_name']
|
|
59
|
+
st_setpoint = st_row['bound']
|
|
60
|
+
# Check if st_var_name exists in filtered_df
|
|
61
|
+
if st_var_name in filtered_df.columns:
|
|
62
|
+
# Check if setpoint was altered for over 10 minutes
|
|
63
|
+
altered_mask = filtered_df[st_var_name] != st_setpoint
|
|
64
|
+
consecutive_condition = altered_mask.rolling(window=10).min() == 1
|
|
65
|
+
if consecutive_condition.any():
|
|
66
|
+
# Find all consecutive groups where condition is true
|
|
67
|
+
group = (consecutive_condition != consecutive_condition.shift()).cumsum()
|
|
68
|
+
for group_id in consecutive_condition.groupby(group).first()[lambda x: x].index:
|
|
69
|
+
streak_indices = consecutive_condition[group == group_id].index
|
|
70
|
+
start_time = streak_indices[0] - pd.Timedelta(minutes=9)
|
|
71
|
+
end_time = streak_indices[-1]
|
|
72
|
+
streak_length = len(streak_indices) + 9
|
|
73
|
+
actual_value = filtered_df.loc[streak_indices[0], st_var_name]
|
|
74
|
+
self._add_an_alarm(start_time, end_time, st_var_name,
|
|
75
|
+
f"Swing tank setpoint was altered: {st_pretty_name} was {actual_value} for {streak_length} minutes starting at {start_time} (expected {st_setpoint}).")
|
|
76
|
+
|
|
77
|
+
if len(tp_codes) == 1 and len(pow_codes) >= 1:
|
|
78
|
+
tp_var_name = tp_codes.iloc[0]['variable_name']
|
|
79
|
+
tp_bound = tp_codes.iloc[0]['bound']
|
|
80
|
+
if tp_var_name in daily_df.columns:
|
|
81
|
+
# Get list of ER variable names
|
|
82
|
+
bu_pow_names = pow_codes['variable_name'].tolist()
|
|
83
|
+
|
|
84
|
+
# Check if all ER variables exist in daily_df
|
|
85
|
+
if all(var in daily_df.columns for var in bu_pow_names):
|
|
86
|
+
# Sum all ER variables for this day
|
|
87
|
+
bu_pow_sum = daily_df.loc[day, bu_pow_names].sum()
|
|
88
|
+
tp_value = daily_df.loc[day, tp_var_name]
|
|
89
|
+
|
|
90
|
+
# Check if sum of ER >= OUT value
|
|
91
|
+
if bu_pow_sum >= tp_value*tp_bound:
|
|
92
|
+
self._add_an_alarm(day, day + timedelta(1), tp_var_name,
|
|
93
|
+
f"Improper Back Up Use: Sum of back up equipment ({bu_pow_sum:.2f}) exceeds {(tp_bound * 100):.2f}% of total power.", certainty="med")
|
|
94
|
+
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
import numpy as np
|
|
3
|
+
import datetime as datetime
|
|
4
|
+
from ecopipeline import ConfigManager
|
|
5
|
+
import re
|
|
6
|
+
import mysql.connector.errors as mysqlerrors
|
|
7
|
+
from datetime import timedelta
|
|
8
|
+
from ecopipeline.event_tracking.Alarm import Alarm
|
|
9
|
+
|
|
10
|
+
class BalancingValve(Alarm):
|
|
11
|
+
"""
|
|
12
|
+
Detects recirculation balance issues by comparing sum of ER (equipment recirculation) heater
|
|
13
|
+
power to either total power or heating output.
|
|
14
|
+
|
|
15
|
+
VarNames syntax:
|
|
16
|
+
BALVALV_ER_[OPTIONAL ID] - Indicates a power variable for an ER heater (equipment recirculation).
|
|
17
|
+
Multiple ER variables with the same ID will be summed together.
|
|
18
|
+
BALVALV_TP_[OPTIONAL ID]:### - Indicates the Total Power of the system. Optional ### for the percentage
|
|
19
|
+
threshold that should not be crossed by the ER elements (default 0.4 for 40%).
|
|
20
|
+
Alarm triggers when sum of ER >= total_power * threshold.
|
|
21
|
+
BALVALV_OUT_[OPTIONAL ID] - Indicates the heating output variable the ER heating contributes to.
|
|
22
|
+
Alarm triggers when sum of ER > sum of OUT * 0.95 (i.e., ER exceeds 95% of heating output).
|
|
23
|
+
Multiple OUT variables with the same ID will be summed together.
|
|
24
|
+
|
|
25
|
+
Note: Each alarm ID requires at least one ER code AND either one TP code OR at least one OUT code.
|
|
26
|
+
If a TP code exists for an ID, it takes precedence over OUT codes.
|
|
27
|
+
|
|
28
|
+
Parameters
|
|
29
|
+
----------
|
|
30
|
+
default_power_ratio : float
|
|
31
|
+
Default power ratio threshold (as decimal, e.g., 0.4 for 40%) for TP alarm codes when no custom bound is specified (default 0.4).
|
|
32
|
+
|
|
33
|
+
"""
|
|
34
|
+
def __init__(self, bounds_df : pd.DataFrame, default_power_ratio : float = 0.4):
|
|
35
|
+
alarm_tag = 'BALVALV'
|
|
36
|
+
type_default_dict = {'TP' : default_power_ratio}
|
|
37
|
+
super().__init__(bounds_df, alarm_tag,type_default_dict, two_part_tag = True, alarm_db_type='BALANCING_VALVE', daily_only=True)
|
|
38
|
+
|
|
39
|
+
def specific_alarm_function(self, df: pd.DataFrame, daily_df : pd.DataFrame, config : ConfigManager):
|
|
40
|
+
for alarm_id in self.bounds_df['alarm_code_id'].unique():
|
|
41
|
+
id_group = self.bounds_df[self.bounds_df['alarm_code_id'] == alarm_id]
|
|
42
|
+
out_codes = id_group[id_group['alarm_code_type'] == 'OUT']
|
|
43
|
+
tp_codes = id_group[id_group['alarm_code_type'] == 'TP']
|
|
44
|
+
er_codes = id_group[id_group['alarm_code_type'] == 'ER']
|
|
45
|
+
if len(er_codes) < 1 or (len(out_codes) < 1 and len(tp_codes) != 1):
|
|
46
|
+
raise Exception(f"Improper alarm codes for balancing valve with id {alarm_id}")
|
|
47
|
+
er_var_names = er_codes['variable_name'].tolist()
|
|
48
|
+
if len(tp_codes) == 1 and tp_codes.iloc[0]['variable_name']in daily_df.columns:
|
|
49
|
+
tp_var_name = tp_codes.iloc[0]['variable_name']
|
|
50
|
+
tp_bound = tp_codes.iloc[0]['bound']
|
|
51
|
+
for day in daily_df.index:
|
|
52
|
+
|
|
53
|
+
# Check if all ER variables exist in daily_df
|
|
54
|
+
if all(var in daily_df.columns for var in er_var_names):
|
|
55
|
+
# Sum all ER variables for this day
|
|
56
|
+
er_sum = daily_df.loc[day, er_var_names].sum()
|
|
57
|
+
tp_value = daily_df.loc[day, tp_var_name]
|
|
58
|
+
|
|
59
|
+
# Check if sum of ER >= OUT value
|
|
60
|
+
if er_sum >= tp_value*tp_bound:
|
|
61
|
+
self._add_an_alarm(day, day + timedelta(1), tp_var_name,
|
|
62
|
+
f"Recirculation imbalance: Sum of recirculation equipment ({er_sum:.2f}) exceeds or equals {(tp_bound * 100):.2f}% of total power.",
|
|
63
|
+
add_one_minute_to_end=False, certainty="low")
|
|
64
|
+
elif len(out_codes) >= 1:
|
|
65
|
+
out_var_names = out_codes['variable_name'].tolist()
|
|
66
|
+
for day in daily_df.index:
|
|
67
|
+
|
|
68
|
+
# Check if all ER variables exist in daily_df
|
|
69
|
+
if all(var in daily_df.columns for var in er_var_names) and all(var in daily_df.columns for var in out_var_names):
|
|
70
|
+
# Sum all ER variables for this day
|
|
71
|
+
er_sum = daily_df.loc[day, er_var_names].sum()
|
|
72
|
+
out_sum = daily_df.loc[day, out_var_names].sum()
|
|
73
|
+
|
|
74
|
+
# Check if sum of ER >= OUT value
|
|
75
|
+
if er_sum > out_sum:
|
|
76
|
+
self._add_an_alarm(day, day + timedelta(1), out_codes.iloc[0]['variable_name'],
|
|
77
|
+
f"Recirculation imbalance: Sum of recirculation equipment power ({er_sum:.2f} kW) exceeds TM heating output ({out_sum:.2f} kW).",
|
|
78
|
+
add_one_minute_to_end=False, certainty="low")
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
import numpy as np
|
|
3
|
+
import datetime as datetime
|
|
4
|
+
from ecopipeline import ConfigManager
|
|
5
|
+
import re
|
|
6
|
+
import mysql.connector.errors as mysqlerrors
|
|
7
|
+
from datetime import timedelta
|
|
8
|
+
from ecopipeline.event_tracking.Alarm import Alarm
|
|
9
|
+
|
|
10
|
+
class BlownFuse(Alarm):
|
|
11
|
+
"""
|
|
12
|
+
Detects blown fuse alarms for heating elements by identifying when an element is drawing power
|
|
13
|
+
but significantly less than expected, which may indicate a blown fuse.
|
|
14
|
+
|
|
15
|
+
VarNames syntax:
|
|
16
|
+
BLWNFSE_[OPTIONAL ID]:### - Indicates a blown fuse alarm for an element. ### is the expected kW input when the element is on.
|
|
17
|
+
|
|
18
|
+
Parameters
|
|
19
|
+
----------
|
|
20
|
+
default_power_threshold : float
|
|
21
|
+
Power threshold to determine if the element is "on" (default 1.0). Element is considered on when power exceeds this value.
|
|
22
|
+
default_power_range : float
|
|
23
|
+
Allowable variance below the expected power draw (default 2.0). An alarm triggers when the actual power draw is less than
|
|
24
|
+
(expected_power_draw - default_power_range) while the element is on.
|
|
25
|
+
default_power_draw : float
|
|
26
|
+
Default expected power draw in kW when no custom bound is specified in the alarm code (default 30).
|
|
27
|
+
fault_time : int
|
|
28
|
+
Number of consecutive minutes that the fault condition must persist before triggering an alarm (default 3).
|
|
29
|
+
"""
|
|
30
|
+
def __init__(self, bounds_df : pd.DataFrame, default_power_threshold : float = 1.0, default_power_range : float = 2.0, default_power_draw : float = 30, fault_time : int = 3):
|
|
31
|
+
alarm_tag = 'BLWNFSE'
|
|
32
|
+
type_default_dict = {'default' : default_power_draw}
|
|
33
|
+
self.default_power_threshold = default_power_threshold
|
|
34
|
+
self.default_power_range = default_power_range
|
|
35
|
+
self.fault_time = fault_time
|
|
36
|
+
super().__init__(bounds_df, alarm_tag,type_default_dict, two_part_tag = False, alarm_db_type='BLOWN_FUSE')
|
|
37
|
+
|
|
38
|
+
def specific_alarm_function(self, df: pd.DataFrame, daily_df : pd.DataFrame, config : ConfigManager):
|
|
39
|
+
for var_name in self.bounds_df['variable_name'].unique():
|
|
40
|
+
for day in daily_df.index:
|
|
41
|
+
next_day = day + pd.Timedelta(days=1)
|
|
42
|
+
filtered_df = df.loc[(df.index >= day) & (df.index < next_day)]
|
|
43
|
+
rows = self.bounds_df[self.bounds_df['variable_name'] == var_name]
|
|
44
|
+
expected_power_draw = rows.iloc[0]['bound']
|
|
45
|
+
if len(rows) != 1:
|
|
46
|
+
raise Exception(f"Multiple blown fuse alarm codes for {var_name}")
|
|
47
|
+
if var_name in filtered_df.columns:
|
|
48
|
+
# Check for consecutive minutes where both power and temp exceed thresholds
|
|
49
|
+
power_on_mask = filtered_df[var_name] > self.default_power_threshold
|
|
50
|
+
unexpected_power_mask = filtered_df[var_name] < expected_power_draw - self.default_power_range
|
|
51
|
+
combined_mask = power_on_mask & unexpected_power_mask
|
|
52
|
+
|
|
53
|
+
# Check for fault_time consecutive minutes
|
|
54
|
+
consecutive_condition = combined_mask.rolling(window=self.fault_time).min() == 1
|
|
55
|
+
if consecutive_condition.any():
|
|
56
|
+
|
|
57
|
+
# Find all streaks of consecutive True values
|
|
58
|
+
group = (consecutive_condition != consecutive_condition.shift()).cumsum()
|
|
59
|
+
|
|
60
|
+
# Iterate through each streak and add an alarm for each
|
|
61
|
+
for group_id in consecutive_condition.groupby(group).first()[lambda x: x].index:
|
|
62
|
+
streak_indices = consecutive_condition[group == group_id].index
|
|
63
|
+
start_time = streak_indices[0] - pd.Timedelta(minutes=self.fault_time-1)
|
|
64
|
+
end_time = streak_indices[-1]
|
|
65
|
+
|
|
66
|
+
self._add_an_alarm(start_time, end_time, var_name,
|
|
67
|
+
f"Blown Fuse: {var_name} had a power draw less than {expected_power_draw - self.default_power_range:.1f} while element was ON starting at {start_time}.",
|
|
68
|
+
certainty="high")
|
|
69
|
+
|
|
70
|
+
# first_true_index = consecutive_condition.idxmax()
|
|
71
|
+
# adjusted_time = first_true_index - pd.Timedelta(minutes=self.fault_time-1)
|
|
72
|
+
# _add_an_alarm(alarms, day, var_name, f"Blown Fuse: {var_name} had a power draw less than {expected_power_draw - self.default_power_range:.1f} while element was ON starting at {adjusted_time}.")
|