tsadmetrics 0.1.16__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (148) hide show
  1. docs/api_doc/conf.py +67 -0
  2. docs/{conf.py → full_doc/conf.py} +1 -1
  3. docs/manual_doc/conf.py +67 -0
  4. examples/example_direct_data.py +28 -0
  5. examples/example_direct_single_data.py +25 -0
  6. examples/example_file_reference.py +24 -0
  7. examples/example_global_config_file.py +13 -0
  8. examples/example_metric_config_file.py +19 -0
  9. examples/example_simple_metric.py +8 -0
  10. examples/specific_examples/AbsoluteDetectionDistance_example.py +24 -0
  11. examples/specific_examples/AffiliationbasedFScore_example.py +24 -0
  12. examples/specific_examples/AverageDetectionCount_example.py +24 -0
  13. examples/specific_examples/CompositeFScore_example.py +24 -0
  14. examples/specific_examples/DelayThresholdedPointadjustedFScore_example.py +24 -0
  15. examples/specific_examples/DetectionAccuracyInRange_example.py +24 -0
  16. examples/specific_examples/EnhancedTimeseriesAwareFScore_example.py +24 -0
  17. examples/specific_examples/LatencySparsityawareFScore_example.py +24 -0
  18. examples/specific_examples/MeanTimeToDetect_example.py +24 -0
  19. examples/specific_examples/NabScore_example.py +24 -0
  20. examples/specific_examples/PateFScore_example.py +24 -0
  21. examples/specific_examples/Pate_example.py +24 -0
  22. examples/specific_examples/PointadjustedAtKFScore_example.py +24 -0
  23. examples/specific_examples/PointadjustedAucPr_example.py +24 -0
  24. examples/specific_examples/PointadjustedAucRoc_example.py +24 -0
  25. examples/specific_examples/PointadjustedFScore_example.py +24 -0
  26. examples/specific_examples/RangebasedFScore_example.py +24 -0
  27. examples/specific_examples/SegmentwiseFScore_example.py +24 -0
  28. examples/specific_examples/TemporalDistance_example.py +24 -0
  29. examples/specific_examples/TimeTolerantFScore_example.py +24 -0
  30. examples/specific_examples/TimeseriesAwareFScore_example.py +24 -0
  31. examples/specific_examples/TotalDetectedInRange_example.py +24 -0
  32. examples/specific_examples/VusPr_example.py +24 -0
  33. examples/specific_examples/VusRoc_example.py +24 -0
  34. examples/specific_examples/WeightedDetectionDifference_example.py +24 -0
  35. tests/test_dpm.py +212 -0
  36. tests/test_ptdm.py +366 -0
  37. tests/test_registry.py +58 -0
  38. tests/test_runner.py +185 -0
  39. tests/test_spm.py +213 -0
  40. tests/test_tmem.py +198 -0
  41. tests/test_tpdm.py +369 -0
  42. tests/test_tstm.py +338 -0
  43. tsadmetrics/__init__.py +0 -21
  44. tsadmetrics/base/Metric.py +188 -0
  45. tsadmetrics/evaluation/Report.py +25 -0
  46. tsadmetrics/evaluation/Runner.py +253 -0
  47. tsadmetrics/metrics/Registry.py +141 -0
  48. tsadmetrics/metrics/__init__.py +2 -0
  49. tsadmetrics/metrics/spm/PointwiseAucPr.py +62 -0
  50. tsadmetrics/metrics/spm/PointwiseAucRoc.py +63 -0
  51. tsadmetrics/metrics/spm/PointwiseFScore.py +86 -0
  52. tsadmetrics/metrics/spm/PrecisionAtK.py +81 -0
  53. tsadmetrics/metrics/spm/__init__.py +9 -0
  54. tsadmetrics/metrics/tem/dpm/DelayThresholdedPointadjustedFScore.py +83 -0
  55. tsadmetrics/metrics/tem/dpm/LatencySparsityawareFScore.py +76 -0
  56. tsadmetrics/metrics/tem/dpm/MeanTimeToDetect.py +47 -0
  57. tsadmetrics/metrics/tem/dpm/NabScore.py +60 -0
  58. tsadmetrics/metrics/tem/dpm/__init__.py +11 -0
  59. tsadmetrics/metrics/tem/ptdm/AverageDetectionCount.py +53 -0
  60. tsadmetrics/metrics/tem/ptdm/DetectionAccuracyInRange.py +66 -0
  61. tsadmetrics/metrics/tem/ptdm/PointadjustedAtKFScore.py +80 -0
  62. tsadmetrics/metrics/tem/ptdm/TimeseriesAwareFScore.py +248 -0
  63. tsadmetrics/metrics/tem/ptdm/TotalDetectedInRange.py +65 -0
  64. tsadmetrics/metrics/tem/ptdm/WeightedDetectionDifference.py +97 -0
  65. tsadmetrics/metrics/tem/ptdm/__init__.py +12 -0
  66. tsadmetrics/metrics/tem/tmem/AbsoluteDetectionDistance.py +48 -0
  67. tsadmetrics/metrics/tem/tmem/EnhancedTimeseriesAwareFScore.py +252 -0
  68. tsadmetrics/metrics/tem/tmem/TemporalDistance.py +68 -0
  69. tsadmetrics/metrics/tem/tmem/__init__.py +9 -0
  70. tsadmetrics/metrics/tem/tpdm/CompositeFScore.py +104 -0
  71. tsadmetrics/metrics/tem/tpdm/PointadjustedAucPr.py +123 -0
  72. tsadmetrics/metrics/tem/tpdm/PointadjustedAucRoc.py +119 -0
  73. tsadmetrics/metrics/tem/tpdm/PointadjustedFScore.py +96 -0
  74. tsadmetrics/metrics/tem/tpdm/RangebasedFScore.py +236 -0
  75. tsadmetrics/metrics/tem/tpdm/SegmentwiseFScore.py +73 -0
  76. tsadmetrics/metrics/tem/tpdm/__init__.py +12 -0
  77. tsadmetrics/metrics/tem/tstm/AffiliationbasedFScore.py +68 -0
  78. tsadmetrics/metrics/tem/tstm/Pate.py +62 -0
  79. tsadmetrics/metrics/tem/tstm/PateFScore.py +61 -0
  80. tsadmetrics/metrics/tem/tstm/TimeTolerantFScore.py +85 -0
  81. tsadmetrics/metrics/tem/tstm/VusPr.py +51 -0
  82. tsadmetrics/metrics/tem/tstm/VusRoc.py +55 -0
  83. tsadmetrics/metrics/tem/tstm/__init__.py +15 -0
  84. tsadmetrics/{_tsadeval/affiliation/_integral_interval.py → utils/functions_affiliation.py} +377 -9
  85. tsadmetrics/utils/functions_auc.py +393 -0
  86. tsadmetrics/utils/functions_conversion.py +63 -0
  87. tsadmetrics/utils/functions_counting_metrics.py +26 -0
  88. tsadmetrics/{_tsadeval/latency_sparsity_aware.py → utils/functions_latency_sparsity_aware.py} +1 -1
  89. tsadmetrics/{_tsadeval/nabscore.py → utils/functions_nabscore.py} +15 -1
  90. tsadmetrics-1.0.0.dist-info/METADATA +69 -0
  91. tsadmetrics-1.0.0.dist-info/RECORD +99 -0
  92. {tsadmetrics-0.1.16.dist-info → tsadmetrics-1.0.0.dist-info}/top_level.txt +1 -1
  93. entorno/bin/activate_this.py +0 -32
  94. entorno/bin/rst2html.py +0 -23
  95. entorno/bin/rst2html4.py +0 -26
  96. entorno/bin/rst2html5.py +0 -33
  97. entorno/bin/rst2latex.py +0 -26
  98. entorno/bin/rst2man.py +0 -27
  99. entorno/bin/rst2odt.py +0 -28
  100. entorno/bin/rst2odt_prepstyles.py +0 -20
  101. entorno/bin/rst2pseudoxml.py +0 -23
  102. entorno/bin/rst2s5.py +0 -24
  103. entorno/bin/rst2xetex.py +0 -27
  104. entorno/bin/rst2xml.py +0 -23
  105. entorno/bin/rstpep2html.py +0 -25
  106. tests/test_binary.py +0 -946
  107. tests/test_non_binary.py +0 -420
  108. tests/test_utils.py +0 -49
  109. tsadmetrics/_tsadeval/affiliation/_affiliation_zone.py +0 -86
  110. tsadmetrics/_tsadeval/affiliation/_single_ground_truth_event.py +0 -68
  111. tsadmetrics/_tsadeval/affiliation/generics.py +0 -135
  112. tsadmetrics/_tsadeval/affiliation/metrics.py +0 -114
  113. tsadmetrics/_tsadeval/auc_roc_pr_plot.py +0 -295
  114. tsadmetrics/_tsadeval/discontinuity_graph.py +0 -109
  115. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/File_IO.py +0 -175
  116. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Range.py +0 -50
  117. tsadmetrics/_tsadeval/eTaPR_pkg/DataManage/Time_Plot.py +0 -184
  118. tsadmetrics/_tsadeval/eTaPR_pkg/__init__.py +0 -0
  119. tsadmetrics/_tsadeval/eTaPR_pkg/etapr.py +0 -386
  120. tsadmetrics/_tsadeval/eTaPR_pkg/tapr.py +0 -362
  121. tsadmetrics/_tsadeval/metrics.py +0 -698
  122. tsadmetrics/_tsadeval/prts/__init__.py +0 -0
  123. tsadmetrics/_tsadeval/prts/base/__init__.py +0 -0
  124. tsadmetrics/_tsadeval/prts/base/time_series_metrics.py +0 -165
  125. tsadmetrics/_tsadeval/prts/basic_metrics_ts.py +0 -121
  126. tsadmetrics/_tsadeval/prts/time_series_metrics/__init__.py +0 -0
  127. tsadmetrics/_tsadeval/prts/time_series_metrics/fscore.py +0 -61
  128. tsadmetrics/_tsadeval/prts/time_series_metrics/precision.py +0 -86
  129. tsadmetrics/_tsadeval/prts/time_series_metrics/precision_recall.py +0 -21
  130. tsadmetrics/_tsadeval/prts/time_series_metrics/recall.py +0 -85
  131. tsadmetrics/_tsadeval/tests.py +0 -376
  132. tsadmetrics/_tsadeval/threshold_plt.py +0 -30
  133. tsadmetrics/_tsadeval/time_tolerant.py +0 -33
  134. tsadmetrics/binary_metrics.py +0 -1652
  135. tsadmetrics/metric_utils.py +0 -98
  136. tsadmetrics/non_binary_metrics.py +0 -398
  137. tsadmetrics/scripts/__init__.py +0 -0
  138. tsadmetrics/scripts/compute_metrics.py +0 -42
  139. tsadmetrics/utils.py +0 -122
  140. tsadmetrics/validation.py +0 -35
  141. tsadmetrics-0.1.16.dist-info/METADATA +0 -23
  142. tsadmetrics-0.1.16.dist-info/RECORD +0 -64
  143. tsadmetrics-0.1.16.dist-info/entry_points.txt +0 -2
  144. /tsadmetrics/{_tsadeval → base}/__init__.py +0 -0
  145. /tsadmetrics/{_tsadeval/affiliation → evaluation}/__init__.py +0 -0
  146. /tsadmetrics/{_tsadeval/eTaPR_pkg/DataManage → metrics/tem}/__init__.py +0 -0
  147. /tsadmetrics/{_tsadeval/vus_utils.py → utils/functions_vus.py} +0 -0
  148. {tsadmetrics-0.1.16.dist-info → tsadmetrics-1.0.0.dist-info}/WHEEL +0 -0
@@ -1,362 +0,0 @@
1
- import argparse
2
- from typing import Callable
3
- import math
4
- import copy
5
- from .DataManage import File_IO, Range as rng#, Time_Plot
6
-
7
-
8
- class TaPR:
9
- def __init__(self, theta: float, delta: int, past_range: bool = False):
10
- self._past_range = past_range
11
- self._predictions = [] # list of Ranges
12
- self._anomalies = [] # list of Ranges
13
- self._ambiguous_inst = [] # list of Ranges
14
-
15
- self._set_predictions = False
16
- self._set_anomalies = False
17
-
18
- #self._rho = theta
19
- #self._pi = theta
20
- self._theta = theta
21
- self._delta = delta
22
-
23
- pass
24
-
25
- def set_anomalies(self, anomaly_list: list) -> None:
26
- self._anomalies = copy.deepcopy(anomaly_list)
27
- self._gen_ambiguous()
28
- self._set_anomalies = True
29
-
30
- def set_predictions(self, prediction_list: list) -> None:
31
- self._predictions = copy.deepcopy(prediction_list)
32
- self._set_predictions = True
33
-
34
- def _gen_ambiguous(self):
35
- for i in range(len(self._anomalies)):
36
- start_id = self._anomalies[i].get_time()[1] + 1
37
- end_id = end_id = start_id + self._delta
38
-
39
- if self._past_range:
40
- end_id = start_id + int(self._delta * (self._anomalies[i].get_time()[1] - self._anomalies[i].get_time()[0]))
41
-
42
- #if the next anomaly occurs during the theta, update the end_id
43
- if i+1 < len(self._anomalies) and end_id > self._anomalies[i+1].get_time()[0]:
44
- end_id = self._anomalies[i+1].get_time()[0] - 1
45
-
46
- if start_id > end_id:
47
- start_id = -2
48
- end_id = -1
49
-
50
- self._ambiguous_inst.append(rng.Range(start_id, end_id, str(i)))
51
-
52
- def get_n_predictions(self):
53
- return len(self._predictions)
54
-
55
- def get_n_anomalies(self):
56
- return len(self._anomalies)
57
-
58
- def _ids_2_objects(self, id_list, range_list):
59
- result = []
60
- for id in id_list:
61
- result.append(range_list[id])
62
- return result
63
-
64
- def TaR_d(self) -> float and list:
65
- score, detected_id_list = self._TaR_d(self._anomalies, self._ambiguous_inst, self._predictions, self._theta)
66
- return score, self._ids_2_objects(detected_id_list, self._anomalies)
67
-
68
- def _TaR_d(self, anomalies: list, ambiguous_inst: list, predictions: list, threshold: float) -> float and list:
69
- total_score = 0.0
70
- detected_anomalies = []
71
- total_score_p = 0.0
72
- for anomaly_id in range(len(anomalies)):
73
- anomaly = anomalies[anomaly_id]
74
- ambiguous = ambiguous_inst[anomaly_id]
75
-
76
- max_score = self._sum_of_func(anomaly.get_time()[0], anomaly.get_time()[1],
77
- anomaly.get_time()[0], anomaly.get_time()[1], self._uniform_func)
78
-
79
- score = 0.0
80
- for prediction in predictions:
81
- score += self._overlap_and_subsequent_score(anomaly, ambiguous, prediction)
82
-
83
- total_score_p += min(1.0, score/max_score)
84
- if min(1.0, score / max_score) >= threshold:
85
- total_score += 1.0
86
- detected_anomalies.append(anomaly_id)
87
-
88
- if len(anomalies) == 0:
89
- self.TaR_p_value = 0
90
- return 0.0, []
91
- else:
92
- self.TaR_p_value = total_score_p / len(anomalies)
93
- return total_score / len(anomalies), detected_anomalies
94
-
95
- def TaR_p(self) -> float:
96
- total_score = 0.0
97
- for anomaly_id in range(len(self._anomalies)):
98
- anomaly = self._anomalies[anomaly_id]
99
- ambiguous = self._ambiguous_inst[anomaly_id]
100
-
101
- max_score = self._sum_of_func(anomaly.get_time()[0], anomaly.get_time()[1],
102
- anomaly.get_time()[0], anomaly.get_time()[1], self._uniform_func)
103
-
104
- score = 0.0
105
- for prediction in self._predictions:
106
- score += self._overlap_and_subsequent_score(anomaly, ambiguous, prediction)
107
-
108
- total_score += min(1.0, score/max_score)
109
-
110
- if len(self._anomalies) == 0:
111
- return 0.0
112
- else:
113
- return total_score / len(self._anomalies)
114
-
115
-
116
- def TaP_d(self) -> float and list:
117
- score, correct_id_list = self._TaP_d(self._anomalies, self._ambiguous_inst, self._predictions, self._theta)
118
- return score, self._ids_2_objects(correct_id_list, self._predictions)
119
-
120
- def _TaP_d(self, anomalies, ambiguous_inst, predictions, threshold):
121
- #Compute TaP_d and TaP_p in one function to optimize the performance
122
- correct_predictions = []
123
- total_score = 0.0
124
- total_score_p = 0.0
125
- for prediction_id in range(len(predictions)):
126
- max_score = predictions[prediction_id].get_time()[1] - predictions[prediction_id].get_time()[0] + 1
127
-
128
- score = 0.0
129
- for anomaly_id in range(len(anomalies)):
130
- anomaly = anomalies[anomaly_id]
131
- ambiguous = ambiguous_inst[anomaly_id]
132
-
133
- score += self._overlap_and_subsequent_score(anomaly, ambiguous, predictions[prediction_id])
134
- total_score_p += score / max_score
135
- if (score/max_score) >= threshold:
136
- total_score += 1.0
137
- correct_predictions.append(prediction_id)
138
-
139
- if len(predictions) == 0:
140
- self.TaP_p_value = 0
141
- return 0.0, []
142
-
143
- else:
144
- self.TaP_p_value = total_score_p / len(predictions)
145
- return total_score / len(predictions), correct_predictions
146
-
147
-
148
-
149
- def _detect(self, src_range: rng.Range, ranges: list, theta: int) -> bool:
150
- rest_len = src_range.get_time()[1] - src_range.get_time()[0] + 1
151
- for dst_range in ranges:
152
- len = self._overlapped_len(src_range, dst_range)
153
- if len != -1:
154
- rest_len -= len
155
- return (float)(rest_len) / (src_range.get_time()[1] - src_range.get_time()[0] + 1) <= (1.0 - theta)
156
-
157
- def _overlapped_len(self, range1: rng.Range, range2: rng.Range) -> int:
158
- detected_start = max(range1.get_time()[0], range2.get_time()[0])
159
- detected_end = min(range1.get_time()[1], range2.get_time()[1])
160
-
161
- if detected_end < detected_start:
162
- return 0
163
- else:
164
- return detected_end - detected_start + 1
165
-
166
- def _min_max_norm(self, value: int, org_min: int, org_max: int, new_min: int, new_max: int) -> float:
167
- if org_min == org_max:
168
- return new_min
169
- else:
170
- return (float)(new_min) + (float)(value - org_min) * (new_max - new_min) / (org_max - org_min)
171
-
172
- def _decaying_func(self, val: float) -> float:
173
- assert (-6 <= val <= 6)
174
- return 1 / (1 + math.exp(val))
175
-
176
- def _ascending_func(self, val: float) -> float:
177
- assert (-6 <= val <= 6)
178
- return 1 / (1 + math.exp(val * -1))
179
-
180
- def _uniform_func(self, val: float) -> float:
181
- return 1.0
182
-
183
- def _sum_of_func(self, start_time: int, end_time: int, org_start: int, org_end: int,
184
- func: Callable[[float], float]) -> float:
185
- val = 0.0
186
- for timestamp in range(start_time, end_time + 1):
187
- val += func(self._min_max_norm(timestamp, org_start, org_end, -6, 6))
188
- return val
189
-
190
- def _overlap_and_subsequent_score(self, anomaly: rng.Range, ambiguous: rng.Range, prediction: rng.Range) -> float:
191
- score = 0.0
192
-
193
- detected_start = max(anomaly.get_time()[0], prediction.get_time()[0])
194
- detected_end = min(anomaly.get_time()[1], prediction.get_time()[1])
195
-
196
- score += self._sum_of_func(detected_start, detected_end,
197
- anomaly.get_time()[0], anomaly.get_time()[1], self._uniform_func)
198
-
199
- if ambiguous.get_time()[0] < ambiguous.get_time()[1]:
200
- detected_start = max(ambiguous.get_time()[0], prediction.get_time()[0])
201
- detected_end = min(ambiguous.get_time()[1], prediction.get_time()[1])
202
-
203
- score += self._sum_of_func(detected_start, detected_end,
204
- ambiguous.get_time()[0], ambiguous.get_time()[1], self._decaying_func)
205
-
206
- return score
207
-
208
- def TaR_p(self) -> float:
209
- total_score = 0.0
210
- for anomaly_id in range(len(self._anomalies)):
211
- anomaly = self._anomalies[anomaly_id]
212
- ambiguous = self._ambiguous_inst[anomaly_id]
213
-
214
- max_score = self._sum_of_func(anomaly.get_time()[0], anomaly.get_time()[1],
215
- anomaly.get_time()[0], anomaly.get_time()[1], self._uniform_func)
216
-
217
- score = 0.0
218
- for prediction in self._predictions:
219
- score += self._overlap_and_subsequent_score(anomaly, ambiguous, prediction)
220
-
221
- total_score += min(1.0, score/max_score)
222
-
223
- if len(self._anomalies) == 0:
224
- return 0.0
225
- else:
226
- return total_score / len(self._anomalies)
227
-
228
- def TaP_p(self) -> float:
229
- total_score = 0.0
230
- for prediction in self._predictions:
231
- max_score = prediction.get_time()[1] - prediction.get_time()[0] + 1
232
-
233
- score = 0.0
234
- for anomaly_id in range(len(self._anomalies)):
235
- anomaly = self._anomalies[anomaly_id]
236
- ambiguous = self._ambiguous_inst[anomaly_id]
237
-
238
- score += self._overlap_and_subsequent_score(anomaly, ambiguous, prediction)
239
-
240
- total_score += score/max_score
241
-
242
- if len(self._predictions) == 0:
243
- return 0.0
244
- else:
245
- return total_score / len(self._predictions)
246
-
247
-
248
- def compute(anomalies: list, predictions: list, alpha: float, theta: float, delta: int) -> dict:
249
- ev = TaPR(theta, delta)
250
-
251
- ev.set_anomalies(anomalies)
252
- ev.set_predictions(predictions)
253
-
254
- tard_value, detected_list = ev.TaR_d()
255
- tarp_value = ev.TaR_p()
256
-
257
- tapd_value, correct_list = ev.TaP_d()
258
- tapp_value = ev.TaP_p()
259
-
260
- result = {}
261
- tar_value = alpha * tard_value + (1 - alpha) * tarp_value
262
- result['TaR'] = tar_value
263
- result['TaRd'] = tard_value
264
- result['TaRp'] = tarp_value
265
-
266
- tap_value = alpha * tapd_value + (1 - alpha) * tapp_value
267
- result['TaP'] = tap_value
268
- result['TaPd'] = tapd_value
269
- result['TaPp'] = tapp_value
270
-
271
- detected_anomalies = []
272
- for value in detected_list:
273
- detected_anomalies.append(value.get_name())
274
-
275
- result['Detected_Anomalies'] = detected_anomalies
276
- result['Detected_Anomalies_Ranges'] = detected_list
277
- result['Correct_Predictions_Ranges'] = correct_list
278
-
279
- if tar_value + tap_value == 0:
280
- result['f1'] = 0.0
281
- else:
282
- result['f1'] = (2 * tar_value * tap_value) / (tar_value + tap_value)
283
-
284
- return result
285
-
286
-
287
- def compute_with_load(anomaly_file: str, prediction_file: str, file_type: str, alpha: float, theta: float, delta: int) -> dict:
288
- anomalies = File_IO.load_file(anomaly_file, file_type)
289
- predictions = File_IO.load_file(prediction_file, file_type)
290
- return compute(anomalies, predictions, alpha, theta, delta)
291
-
292
-
293
- def print_result(anomalies: list, predictions: list, alpha: float, theta: float, delta: int, verbose: bool, graph: str) -> None:
294
- org_predictions = copy.deepcopy(predictions)
295
- result = compute(anomalies, predictions, alpha, theta, delta)
296
-
297
- print("The parameters (alpha, theta, delta) are set as %g, %g, and %d." % (alpha, theta, delta))
298
-
299
- print('\n[TaR]:', "%0.5f" % result['TaR'])
300
- print("\t* Detection score:", "%0.5f" % result['TaRd'])
301
- print("\t* Portion score:", "%0.5f" % result['TaRp'])
302
- if verbose:
303
- buf = '\t\tdetected anomalies: '
304
- if len(result['Detected_Anomalies_Ranges']) == 0:
305
- buf += "None "
306
- else:
307
- for value in result['Detected_Anomalies_Ranges']:
308
- buf += value.get_name() + '(' + str(value.get_time()[0]) + ':' + str(value.get_time()[1]) + '), '
309
- print(buf[:-2])
310
-
311
-
312
- print('\n[TaP]:', "%0.5f" % result['TaP'])
313
- print("\t* Detection score:", "%0.5f" % result['TaPd'])
314
- print("\t* Portion score:", "%0.5f" % result['TaPp'])
315
- if verbose:
316
- buf = '\t\tcorrect predictions: '
317
- if len(result['Correct_Predictions_Ranges']) == 0:
318
- buf += "None "
319
- else:
320
- for value in result['Correct_Predictions_Ranges']:
321
- buf += value.get_name() + '(' + str(value.get_time()[0]) + ':' + str(value.get_time()[1]) + '), '
322
- print(buf[:-2])
323
-
324
-
325
- assert(graph == 'screen' or graph == 'file' or graph == 'none' or graph == 'all')
326
- if graph == 'screen' or graph == 'file' or graph == 'all':
327
- Time_Plot.draw_graphs(anomalies, org_predictions, graph)
328
-
329
-
330
- if __name__ == '__main__':
331
- argument_parser = argparse.ArgumentParser()
332
- argument_parser.add_argument("--anomalies", help="anomaly file name (ground truth)", required=True)
333
- argument_parser.add_argument("--predictions", help="prediction file name", required=True)
334
- argument_parser.add_argument("--filetype", help="choose the file type between range and stream", required=True)
335
- argument_parser.add_argument("--graph", help="show graph of results")
336
-
337
- argument_parser.add_argument("--verbose", help="show detail results", action='store_true')
338
- argument_parser.add_argument("--theta", help="set parameter theta")
339
- argument_parser.add_argument("--alpha", help="set parameter alpha")
340
- argument_parser.add_argument("--delta", help="set parameter delta")
341
- arguments = argument_parser.parse_args()
342
-
343
- arguments = argument_parser.parse_args()
344
- theta, alpha, delta, graph = 0.5, 0.8, 600, 'none' #default values
345
- if arguments.theta is not None:
346
- theta = float(arguments.theta)
347
- if arguments.alpha is not None:
348
- alpha = float(arguments.alpha)
349
- if arguments.delta is not None:
350
- delta = int(arguments.delta)
351
- if arguments.graph is not None:
352
- graph = arguments.graph
353
-
354
- assert(0.0 <= theta <= 1.0)
355
- assert(0.0 <= alpha <= 1.0)
356
- assert(isinstance(delta, int))
357
- assert(graph == 'screen' or graph == 'file' or graph == 'none' or graph == 'all')
358
-
359
- anomalies = File_IO.load_file(arguments.anomalies, arguments.filetype)
360
- predictions = File_IO.load_file(arguments.predictions, arguments.filetype)
361
-
362
- print_result(anomalies, predictions, alpha, theta, delta, arguments.verbose, graph)