tsadmetrics 0.1.3__py3-none-any.whl → 0.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- entorno/bin/activate_this.py +32 -0
- entorno/bin/rst2html.py +23 -0
- entorno/bin/rst2html4.py +26 -0
- entorno/bin/rst2html5.py +33 -0
- entorno/bin/rst2latex.py +26 -0
- entorno/bin/rst2man.py +27 -0
- entorno/bin/rst2odt.py +28 -0
- entorno/bin/rst2odt_prepstyles.py +20 -0
- entorno/bin/rst2pseudoxml.py +23 -0
- entorno/bin/rst2s5.py +24 -0
- entorno/bin/rst2xetex.py +27 -0
- entorno/bin/rst2xml.py +23 -0
- entorno/bin/rstpep2html.py +25 -0
- experiments/scripts/compute_metrics.py +187 -0
- experiments/scripts/metrics_complexity_analysis.py +109 -0
- experiments/scripts/metro_experiment.py +133 -0
- experiments/scripts/opt_metro_experiment.py +343 -0
- tests/__init__.py +0 -0
- tests/test_binary.py +759 -0
- tests/test_non_binary.py +371 -0
- tsadmetrics/_tsadeval/affiliation/__init__.py +0 -0
- tsadmetrics/_tsadeval/affiliation/_affiliation_zone.py +86 -0
- tsadmetrics/_tsadeval/affiliation/_integral_interval.py +464 -0
- tsadmetrics/_tsadeval/affiliation/_single_ground_truth_event.py +68 -0
- tsadmetrics/_tsadeval/affiliation/generics.py +135 -0
- tsadmetrics/_tsadeval/affiliation/metrics.py +114 -0
- tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/File_IO.py +175 -0
- tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Range.py +50 -0
- tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Time_Plot.py +184 -0
- tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/__init__.py +0 -0
- tsadmetrics/_tsadeval/eTaPR_pkg/__init__.py +0 -0
- tsadmetrics/_tsadeval/eTaPR_pkg/etapr.py +386 -0
- tsadmetrics/_tsadeval/eTaPR_pkg/tapr.py +362 -0
- tsadmetrics/_tsadeval/prts/__init__.py +0 -0
- tsadmetrics/_tsadeval/prts/base/__init__.py +0 -0
- tsadmetrics/_tsadeval/prts/base/time_series_metrics.py +165 -0
- tsadmetrics/_tsadeval/prts/basic_metrics_ts.py +121 -0
- tsadmetrics/_tsadeval/prts/time_series_metrics/__init__.py +0 -0
- tsadmetrics/_tsadeval/prts/time_series_metrics/fscore.py +61 -0
- tsadmetrics/_tsadeval/prts/time_series_metrics/precision.py +86 -0
- tsadmetrics/_tsadeval/prts/time_series_metrics/precision_recall.py +21 -0
- tsadmetrics/_tsadeval/prts/time_series_metrics/recall.py +85 -0
- {tsadmetrics-0.1.3.dist-info → tsadmetrics-0.1.5.dist-info}/METADATA +1 -1
- tsadmetrics-0.1.5.dist-info/RECORD +62 -0
- tsadmetrics-0.1.5.dist-info/top_level.txt +4 -0
- tsadmetrics-0.1.3.dist-info/RECORD +0 -20
- tsadmetrics-0.1.3.dist-info/top_level.txt +0 -1
- {tsadmetrics-0.1.3.dist-info → tsadmetrics-0.1.5.dist-info}/WHEEL +0 -0
@@ -0,0 +1,386 @@
|
|
1
|
+
from .tapr import *
|
2
|
+
import math
|
3
|
+
import argparse
|
4
|
+
from .DataManage import File_IO#, Time_Plot
|
5
|
+
import numpy as np
|
6
|
+
from .DataManage import Range as rng
|
7
|
+
|
8
|
+
class eTaPR(TaPR):
|
9
|
+
def __init__(self, theta_p, theta_r, delta=0.0):
|
10
|
+
super(eTaPR, self).__init__(0.0, 0)
|
11
|
+
self._predictions_weight = []
|
12
|
+
self._predictions_total_weight = 0.0
|
13
|
+
self._prune_predictions = []
|
14
|
+
|
15
|
+
self._theta_p = theta_p
|
16
|
+
self._theta_r = theta_r
|
17
|
+
self._delta_ratio = delta
|
18
|
+
|
19
|
+
self._overlap_score_mat_org = np.zeros(1)
|
20
|
+
self._overlap_score_mat_elm = np.zeros(1) #eleminate by prunning
|
21
|
+
self._max_anomaly_score = []
|
22
|
+
self._max_prediction_score = []
|
23
|
+
|
24
|
+
self._weight_func = math.sqrt
|
25
|
+
|
26
|
+
|
27
|
+
def _gen_ambiguous(self):
|
28
|
+
for i in range(len(self._anomalies)):
|
29
|
+
start_id = self._anomalies[i].get_time()[1] + 1
|
30
|
+
end_id = start_id + int(self._delta_ratio * (self._anomalies[i].get_time()[1] - self._anomalies[i].get_time()[0]))
|
31
|
+
|
32
|
+
# if the next anomaly occurs during the theta, update the end_id
|
33
|
+
if i + 1 < len(self._anomalies) and end_id > self._anomalies[i + 1].get_time()[0]:
|
34
|
+
end_id = self._anomalies[i + 1].get_time()[0] - 1
|
35
|
+
|
36
|
+
if start_id > end_id:
|
37
|
+
start_id = -2
|
38
|
+
end_id = -1
|
39
|
+
|
40
|
+
self._ambiguous_inst.append(rng.Range(start_id, end_id, str(i)))
|
41
|
+
|
42
|
+
#load data -> build the score matrix -> do pruning
|
43
|
+
def set(self, anomalies: list, predictions: list) -> None:
|
44
|
+
#loading data
|
45
|
+
self.set_anomalies(anomalies)
|
46
|
+
self.set_predictions(predictions)
|
47
|
+
|
48
|
+
#computing weights
|
49
|
+
for a_prediction in self._predictions:
|
50
|
+
first, last = a_prediction.get_time()
|
51
|
+
temp_weight = math.sqrt(last-first+1)
|
52
|
+
self._predictions_weight.append(temp_weight)
|
53
|
+
self._predictions_total_weight += temp_weight
|
54
|
+
|
55
|
+
#computing the score matrix
|
56
|
+
self._overlap_score_mat_org = np.zeros((self.get_n_anomalies(), self.get_n_predictions()))
|
57
|
+
for anomaly_id in range(self.get_n_anomalies()):
|
58
|
+
for prediction_id in range(self.get_n_predictions()):
|
59
|
+
self._overlap_score_mat_org[anomaly_id, prediction_id] = \
|
60
|
+
float(self._overlap_and_subsequent_score(self._anomalies[anomaly_id], self._ambiguous_inst[anomaly_id], self._predictions[prediction_id]))
|
61
|
+
|
62
|
+
#computing the maximum scores for each anomaly or prediction
|
63
|
+
for an_anomaly in self._anomalies:
|
64
|
+
start, end = an_anomaly.get_time()
|
65
|
+
self._max_anomaly_score.append(float(self._sum_of_func(start, end, start, end, self._uniform_func)))
|
66
|
+
for a_prediction in self._predictions:
|
67
|
+
self._max_prediction_score.append(a_prediction.get_len())
|
68
|
+
|
69
|
+
#pruning
|
70
|
+
self._pruning()
|
71
|
+
|
72
|
+
def _pruning(self):
|
73
|
+
self._overlap_score_mat_elm = self._overlap_score_mat_org.copy()
|
74
|
+
|
75
|
+
while True:
|
76
|
+
tars = self._overlap_score_mat_elm.sum(axis=1)/self._max_anomaly_score
|
77
|
+
elem_anomaly_ids = set(np.where(tars<self._theta_r)[0]) - set(np.where(tars==0.0)[0])
|
78
|
+
for id in elem_anomaly_ids:
|
79
|
+
self._overlap_score_mat_elm[id] = np.zeros(self.get_n_predictions())
|
80
|
+
taps = self._overlap_score_mat_elm.sum(axis=0)/self._max_prediction_score
|
81
|
+
elem_prediction_ids = set(np.where(taps<self._theta_p)[0]) - set(np.where(taps==0.0)[0])
|
82
|
+
for id in elem_prediction_ids:
|
83
|
+
self._overlap_score_mat_elm[:, id] = np.zeros(self.get_n_anomalies())
|
84
|
+
|
85
|
+
if len(elem_anomaly_ids) == 0 and len(elem_prediction_ids) == 0:
|
86
|
+
break
|
87
|
+
|
88
|
+
def _etar_d(self, theta: float) -> np.array and list:
|
89
|
+
if self.get_n_anomalies() == 0.0 or self.get_n_predictions() == 0.0:
|
90
|
+
return np.zeros(self.get_n_anomalies()), []
|
91
|
+
|
92
|
+
scores = self._overlap_score_mat_elm.sum(axis=1)/self._max_anomaly_score
|
93
|
+
scores = np.where(scores >= theta, 1.0, scores)
|
94
|
+
scores = np.where(scores < theta, 0.0, scores)
|
95
|
+
detected_id_list = np.where(scores >= theta)[0]
|
96
|
+
|
97
|
+
return scores, detected_id_list
|
98
|
+
|
99
|
+
def eTaR_d(self) -> float and list:
|
100
|
+
_, detected_id_list = self._etar_d(self._theta_r)
|
101
|
+
return len(detected_id_list)/self.get_n_anomalies(), detected_id_list
|
102
|
+
|
103
|
+
def _etar_p(self) -> np.array:
|
104
|
+
if self.get_n_anomalies() == 0.0 or self.get_n_predictions() == 0.0:
|
105
|
+
return 0.0
|
106
|
+
|
107
|
+
scores = self._overlap_score_mat_elm.sum(axis=1) / self._max_anomaly_score
|
108
|
+
scores = np.where(scores > 1.0, 1.0, scores)
|
109
|
+
return scores
|
110
|
+
|
111
|
+
def eTaR_p(self) -> float:
|
112
|
+
scores = self._etar_p()
|
113
|
+
return scores.mean()
|
114
|
+
|
115
|
+
def eTaR(self) -> float:
|
116
|
+
|
117
|
+
detection_scores, detected_id_list = self._etar_d(self._theta_r)
|
118
|
+
portion_scores = self._etar_p()
|
119
|
+
|
120
|
+
return ((detection_scores + detection_scores * portion_scores)/2).mean(), portion_scores.mean(), len(detected_id_list)/self.get_n_anomalies(), detected_id_list
|
121
|
+
|
122
|
+
def _etap_d(self, theta: float) -> np.array and list:
|
123
|
+
if self.get_n_anomalies() == 0.0 or self.get_n_predictions() == 0.0:
|
124
|
+
return 0.0, []
|
125
|
+
|
126
|
+
scores = self._overlap_score_mat_elm.sum(axis=0) / self._max_prediction_score
|
127
|
+
scores = np.where(scores >= theta, 1.0, scores)
|
128
|
+
scores = np.where(scores < theta, 0.0, scores)
|
129
|
+
correct_id_list = np.where(scores >= theta)[0]
|
130
|
+
|
131
|
+
return scores, correct_id_list
|
132
|
+
|
133
|
+
def eTaP_d(self) -> float and list:
|
134
|
+
_, correct_id_list = self._etap_d(self._theta_p)
|
135
|
+
|
136
|
+
tapd = 0.0
|
137
|
+
for correct_id in correct_id_list:
|
138
|
+
tapd += self._predictions_weight[correct_id]
|
139
|
+
tapd /= float(self._predictions_total_weight)
|
140
|
+
|
141
|
+
return tapd, correct_id_list
|
142
|
+
|
143
|
+
def _etap_p(self) -> np.array:
|
144
|
+
if self.get_n_anomalies() == 0.0 or self.get_n_predictions() == 0.0:
|
145
|
+
return 0.0
|
146
|
+
|
147
|
+
scores = self._overlap_score_mat_elm.sum(axis=0) / self._max_prediction_score
|
148
|
+
return scores
|
149
|
+
|
150
|
+
def eTaP_p(self) -> float:
|
151
|
+
scores = self._etap_p()
|
152
|
+
|
153
|
+
final_score = 0.0
|
154
|
+
for i in range(len(scores)):
|
155
|
+
final_score += float(self._predictions_weight[i]) * scores[i]
|
156
|
+
final_score /= float(self._predictions_total_weight)
|
157
|
+
return final_score
|
158
|
+
|
159
|
+
def eTaP(self) -> float:
|
160
|
+
#Computing etap_d and etap_p manually to optimize the performance
|
161
|
+
etap_d = 0
|
162
|
+
etap_p = 0
|
163
|
+
if self.get_n_anomalies() == 0.0 or self.get_n_predictions() == 0.0:
|
164
|
+
etap_d,etap_p = 0.0, 0.0
|
165
|
+
|
166
|
+
etap_d = self._overlap_score_mat_elm.sum(axis=0) / self._max_prediction_score
|
167
|
+
etap_p = etap_d
|
168
|
+
etap_d = np.where(etap_d >= self._theta_p, 1.0, etap_d)
|
169
|
+
etap_d = np.where(etap_d < self._theta_p, 0.0, etap_d)
|
170
|
+
corrected_id_list = np.where(etap_d >= self._theta_p)[0]
|
171
|
+
|
172
|
+
detection_scores = etap_d
|
173
|
+
portion_scores = etap_p
|
174
|
+
eTaP_d,eTaP_p = 0.0,0.0
|
175
|
+
|
176
|
+
|
177
|
+
scores = (detection_scores + detection_scores * portion_scores)/2
|
178
|
+
final_score = 0.0
|
179
|
+
for i in range(max(len(scores),len(etap_d),len(corrected_id_list))):
|
180
|
+
if i < len(scores):
|
181
|
+
final_score += float(self._predictions_weight[i]) * scores[i]
|
182
|
+
if i < len(etap_p):
|
183
|
+
eTaP_p += float(self._predictions_weight[i]) * etap_p[i]
|
184
|
+
if i < len(corrected_id_list):
|
185
|
+
eTaP_d += self._predictions_weight[corrected_id_list[i]]
|
186
|
+
|
187
|
+
final_score /= float(self._predictions_total_weight)
|
188
|
+
eTaP_d /= float(self._predictions_total_weight)
|
189
|
+
eTaP_p /= float(self._predictions_total_weight)
|
190
|
+
self.eTaP_d_value = eTaP_d
|
191
|
+
self.eTaP_p_value = eTaP_p
|
192
|
+
self.corrected_id_list = corrected_id_list
|
193
|
+
return final_score
|
194
|
+
|
195
|
+
# conventional precision
|
196
|
+
def precision(self) -> float:
|
197
|
+
if self.get_n_anomalies() == 0.0 or self.get_n_predictions() == 0.0:
|
198
|
+
return 0.0
|
199
|
+
|
200
|
+
return self._overlap_score_mat_org.sum() / sum(self._max_prediction_score)
|
201
|
+
|
202
|
+
# conventional recall
|
203
|
+
def recall(self) -> float:
|
204
|
+
if self.get_n_anomalies() == 0.0 or self.get_n_predictions() == 0.0:
|
205
|
+
return 0.0
|
206
|
+
|
207
|
+
return self._overlap_score_mat_org.sum() / sum(self._max_anomaly_score)
|
208
|
+
|
209
|
+
# point adjust precision
|
210
|
+
def point_adjust_precision(self, theta: float) -> float:
|
211
|
+
if self.get_n_anomalies() == 0.0 or self.get_n_predictions() == 0.0:
|
212
|
+
return 0.0
|
213
|
+
|
214
|
+
_, detected_id_list = self._TaR_d(self._anomalies, [ rng.Range(-2, -1, '' ) for i in range(len(self._anomalies)) ], self._predictions, theta)
|
215
|
+
|
216
|
+
hit_cnt = 0
|
217
|
+
for detected_id in detected_id_list:
|
218
|
+
hit_cnt += self._anomalies[detected_id].get_len()
|
219
|
+
|
220
|
+
extended_predictions_len = sum(self._max_prediction_score) + hit_cnt - self._overlap_score_mat_org.sum()
|
221
|
+
|
222
|
+
return hit_cnt / extended_predictions_len
|
223
|
+
|
224
|
+
def point_adjust_recall(self, theta: float) -> float:
|
225
|
+
if self.get_n_anomalies() == 0.0 or self.get_n_predictions() == 0.0:
|
226
|
+
return 0.0
|
227
|
+
|
228
|
+
_, detected_id_list = self._TaR_d(self._anomalies, [ rng.Range(-2, -1, '' ) for i in range(len(self._anomalies)) ], self._predictions, theta)
|
229
|
+
hit_cnt = 0
|
230
|
+
for detected_id in detected_id_list:
|
231
|
+
hit_cnt += self._anomalies[detected_id].get_len()
|
232
|
+
return hit_cnt / sum(self._max_anomaly_score)
|
233
|
+
|
234
|
+
import time
|
235
|
+
def evaluate_w_ranges(anomalies: list, predictions: list, theta_p: float, theta_r: float, delta: float = 0.0) -> dict:
|
236
|
+
assert(0.0 <= theta_p <= 1.0)
|
237
|
+
assert(0.0 <= theta_r <= 1.0)
|
238
|
+
assert(0.0 <= delta <= 1.0)
|
239
|
+
|
240
|
+
ev = eTaPR(theta_p, theta_r, delta)
|
241
|
+
ev.set(anomalies, predictions)
|
242
|
+
|
243
|
+
|
244
|
+
tar_value, tarp_value, tard_value, detected_id_list = ev.eTaR()
|
245
|
+
|
246
|
+
tap_value = ev.eTaP()
|
247
|
+
tapd_value = ev.eTaP_d_value
|
248
|
+
tapp_value = ev.eTaP_p_value
|
249
|
+
|
250
|
+
|
251
|
+
result = {}
|
252
|
+
result['eTaR'] = tar_value
|
253
|
+
result['eTaRd'] = tard_value
|
254
|
+
result['eTaRp'] = tarp_value
|
255
|
+
|
256
|
+
result['eTaP'] = tap_value
|
257
|
+
result['eTaPd'] = tapd_value
|
258
|
+
result['eTaPp'] = tapp_value
|
259
|
+
|
260
|
+
# detected_anomalies = []
|
261
|
+
# for id in detected_id_list:
|
262
|
+
# detected_anomalies.append(anomalies[id])
|
263
|
+
|
264
|
+
# correct_predictions = []
|
265
|
+
# for id in correct_id_list:
|
266
|
+
# correct_predictions.append(predictions[id])
|
267
|
+
|
268
|
+
# result['Detected_Anomalies'] = detected_anomalies
|
269
|
+
# result['Correct_Predictions'] = correct_predictions
|
270
|
+
|
271
|
+
if tar_value + tap_value == 0:
|
272
|
+
result['f1'] = 0.0
|
273
|
+
else:
|
274
|
+
result['f1'] = (2 * tar_value * tap_value) / (tar_value + tap_value)
|
275
|
+
|
276
|
+
# false_alarm = 0
|
277
|
+
# false_alarm_cnt = 0
|
278
|
+
# for id in range(len(predictions)):
|
279
|
+
# if id not in correct_id_list:
|
280
|
+
# false_alarm += predictions[id].get_len()
|
281
|
+
# false_alarm_cnt += 1
|
282
|
+
|
283
|
+
|
284
|
+
# result['False Alarm'] = false_alarm
|
285
|
+
# result['N False Alarm'] = false_alarm_cnt
|
286
|
+
|
287
|
+
# result['precision'] = ev.precision()
|
288
|
+
# result['recall'] = ev.recall()
|
289
|
+
# result['point_adjust_precision'] = ev.point_adjust_precision(1e-10)
|
290
|
+
# result['point_adjust_recall'] = ev.point_adjust_recall(1e-10)
|
291
|
+
return result
|
292
|
+
|
293
|
+
|
294
|
+
def evaluate_w_streams(anomalies: list, predictions: list, theta_p = 0.7, theta_r: float = 0.1, delta: float = 0.0) -> dict:
|
295
|
+
assert(0.0 <= theta_p <= 1.0)
|
296
|
+
assert(0.0 <= theta_r <= 1.0)
|
297
|
+
assert(0.0 <= delta <= 1.0)
|
298
|
+
|
299
|
+
anomalous_ranges = File_IO.load_stream_2_range(anomalies, 0, 1, True)
|
300
|
+
predicted_ranges = File_IO.load_stream_2_range(predictions, 0, 1, True)
|
301
|
+
|
302
|
+
return evaluate_w_ranges(anomalies =anomalous_ranges,
|
303
|
+
predictions =predicted_ranges,
|
304
|
+
theta_p=theta_p,
|
305
|
+
theta_r=theta_r,
|
306
|
+
delta=delta)
|
307
|
+
|
308
|
+
|
309
|
+
def evaluate_w_files(anomaly_file: str, prediction_file: str, file_type: str, theta_p: float, theta_r: float, delta: float = 0.0) -> dict:
|
310
|
+
assert(0.0 <= theta_p <= 1.0)
|
311
|
+
assert(0.0 <= theta_r <= 1.0)
|
312
|
+
assert(0.0 <= delta <= 1.0)
|
313
|
+
|
314
|
+
anomalies = File_IO.load_file(anomaly_file, file_type)
|
315
|
+
predictions = File_IO.load_file(prediction_file, file_type)
|
316
|
+
|
317
|
+
return evaluate_w_ranges(anomalies, predictions, theta_p, theta_r, delta)
|
318
|
+
|
319
|
+
|
320
|
+
def print_results(result: dict, verbose: bool) -> None:
|
321
|
+
print('\n[TaR]:', "%0.5f" % result['TaR'])
|
322
|
+
print("\t* Detection score:", "%0.5f" % result['TaRd'])
|
323
|
+
print("\t* Portion score:", "%0.5f" % result['TaRp'])
|
324
|
+
if verbose:
|
325
|
+
buf = '\t\tdetected anomalies: '
|
326
|
+
if len(result['Detected_Anomalies']) == 0:
|
327
|
+
buf += "None "
|
328
|
+
else:
|
329
|
+
for value in result['Detected_Anomalies']:
|
330
|
+
buf += value.get_name() + '(' + str(value.get_time()[0]) + ':' + str(value.get_time()[1]) + '), '
|
331
|
+
print(buf[:-2])
|
332
|
+
|
333
|
+
|
334
|
+
print('\n[TaP]:', "%0.5f" % result['TaP'])
|
335
|
+
print("\t* Detection score:", "%0.5f" % result['TaPd'])
|
336
|
+
print("\t* Portion score:", "%0.5f" % result['TaPp'])
|
337
|
+
if verbose:
|
338
|
+
buf = '\t\tcorrect predictions: '
|
339
|
+
if len(result['Correct_Predictions']) == 0:
|
340
|
+
buf += "None "
|
341
|
+
else:
|
342
|
+
for value in result['Correct_Predictions']:
|
343
|
+
buf += value.get_name() + '(' + str(value.get_time()[0]) + ':' + str(value.get_time()[1]) + '), '
|
344
|
+
print(buf[:-2])
|
345
|
+
|
346
|
+
|
347
|
+
def draw_graph(anomalies: list, predictions: list, graph_dst: str) -> None:
|
348
|
+
assert (graph_dst == 'screen' or graph_dst == 'file' or graph_dst == 'none' or graph_dst == 'all')
|
349
|
+
if graph_dst == 'screen' or graph_dst == 'file' or graph_dst == 'all':
|
350
|
+
Time_Plot.draw_graphs(anomalies, predictions, graph_dst)
|
351
|
+
|
352
|
+
|
353
|
+
if __name__ == '__main__':
|
354
|
+
argument_parser = argparse.ArgumentParser()
|
355
|
+
argument_parser.add_argument("--anomalies", help="anomaly file name (ground truth)", required=True)
|
356
|
+
argument_parser.add_argument("--predictions", help="prediction file name", required=True)
|
357
|
+
argument_parser.add_argument("--filetype", help="choose the file type between range and stream", required=True)
|
358
|
+
argument_parser.add_argument("--graph", help="show graph of results")
|
359
|
+
|
360
|
+
argument_parser.add_argument("--verbose", help="show detail results", action='store_true')
|
361
|
+
argument_parser.add_argument("--theta_r", help="set parameter theta_r")
|
362
|
+
argument_parser.add_argument("--theta_p", help="set parameter theta_p")
|
363
|
+
argument_parser.add_argument("--delta", help="set parameter delta")
|
364
|
+
# arguments = argument_parser.parse_args()
|
365
|
+
|
366
|
+
arguments = argument_parser.parse_args()
|
367
|
+
theta_p, theta_r, delta, graph = 0.5, 0.1, 0.0, 'none' #default values
|
368
|
+
if arguments.tp is not None:
|
369
|
+
theta_p = float(arguments.tp)
|
370
|
+
if arguments.tr is not None:
|
371
|
+
theta_r = float(arguments.tr)
|
372
|
+
if arguments.delta is not None:
|
373
|
+
delta = int(arguments.delta)
|
374
|
+
if arguments.graph is not None:
|
375
|
+
graph = arguments.graph
|
376
|
+
|
377
|
+
# assert(isinstance(delta, int))
|
378
|
+
assert(graph == 'screen' or graph == 'file' or graph == 'none' or graph == 'all')
|
379
|
+
|
380
|
+
anomalies = File_IO.load_file(arguments.anomalies, arguments.filetype)
|
381
|
+
predictions = File_IO.load_file(arguments.predictions, arguments.filetype)
|
382
|
+
results = evaluate_w_ranges(anomalies, predictions, theta_p, theta_r, delta)
|
383
|
+
|
384
|
+
print_results(results, arguments.verbose)
|
385
|
+
draw_graph(anomalies, predictions, graph)
|
386
|
+
|