tsadmetrics 0.1.1__py3-none-any.whl → 0.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,311 @@
1
+ # ----------------------------------------------------------------------
2
+ # Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
3
+ # with Numenta, Inc., for a separate license for this software code, the
4
+ # following terms and conditions apply:
5
+ #
6
+ # This program is free software: you can redistribute it and/or modify
7
+ # it under the terms of the GNU Affero Public License version 3 as
8
+ # published by the Free Software Foundation.
9
+ #
10
+ # This program is distributed in the hope that it will be useful,
11
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
12
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
13
+ # See the GNU Affero Public License for more details.
14
+ #
15
+ # You should have received a copy of the GNU Affero Public License
16
+ # along with this program. If not, see http://www.gnu.org/licenses.
17
+ #
18
+ # http://numenta.org/licenses/
19
+ # ----------------------------------------------------------------------
20
+ from collections import namedtuple
21
+ import logging
22
+ import math
23
+
24
+ logger = logging.getLogger(__name__)
25
+ AnomalyPoint = namedtuple(
26
+ "AnomalyPoint",
27
+ ["timestamp", "anomalyScore", "sweepScore", "windowName"]
28
+ )
29
+ ThresholdScore = namedtuple(
30
+ "ThresholdScore",
31
+ ["threshold", "score", "tp", "tn", "fp", "fn", "total"]
32
+ )
33
+
34
+
35
+ def sigmoid(x):
36
+ """Standard sigmoid function."""
37
+ return 1 / (1 + math.exp(-x))
38
+
39
+
40
+ def scaledSigmoid(relativePositionInWindow):
41
+ """Return a scaled sigmoid function given a relative position within a
42
+ labeled window. The function is computed as follows:
43
+ A relative position of -1.0 is the far left edge of the anomaly window and
44
+ corresponds to S = 2*sigmoid(5) - 1.0 = 0.98661. This is the earliest to be
45
+ counted as a true positive.
46
+ A relative position of -0.5 is halfway into the anomaly window and
47
+ corresponds to S = 2*sigmoid(0.5*5) - 1.0 = 0.84828.
48
+ A relative position of 0.0 consists of the right edge of the window and
49
+ corresponds to S = 2*sigmoid(0) - 1 = 0.0.
50
+ Relative positions > 0 correspond to false positives increasingly far away
51
+ from the right edge of the window. A relative position of 1.0 is past the
52
+ right edge of the window and corresponds to a score of 2*sigmoid(-5) - 1.0 =
53
+ -0.98661.
54
+ @param relativePositionInWindow (float) A relative position
55
+ within a window calculated per the
56
+ rules above.
57
+ @return (float)
58
+ """
59
+ if relativePositionInWindow > 3.0:
60
+ # FP well behind window
61
+ val = -1.0
62
+ else:
63
+ val = 2*sigmoid(-5*relativePositionInWindow) - 1.0
64
+
65
+ return val
66
+
67
+
68
+ def prepAnomalyListForScoring(inputAnomalyList):
69
+ """
70
+ Sort by anomaly score and filter all rows with 'probationary' window name
71
+ """
72
+ return sorted(
73
+ [x for x in inputAnomalyList if x.windowName != 'probationary'],
74
+ key=lambda x: x.anomalyScore,
75
+ reverse=True)
76
+
77
+ class Sweeper(object):
78
+ """Class used to iterate over all anomaly scores in a data set, generating
79
+ threshold-score pairs for use in threshold optimization or dataset scoring.
80
+ """
81
+
82
+ def __init__(self, probationPercent=0.15, costMatrix=None):
83
+ self.probationPercent = probationPercent
84
+
85
+ self.tpWeight = 0
86
+ self.fpWeight = 0
87
+ self.fnWeight = 0
88
+
89
+ if costMatrix is not None:
90
+ self.setCostMatrix(costMatrix)
91
+
92
+
93
+ def setCostMatrix(self, costMatrix):
94
+ self.tpWeight = costMatrix["tpWeight"]
95
+ self.fpWeight = costMatrix["fpWeight"]
96
+ self.fnWeight = costMatrix["fnWeight"]
97
+
98
+
99
+ def _getProbationaryLength(self, numRows):
100
+ return min(
101
+ math.floor(self.probationPercent * numRows),
102
+ self.probationPercent * 5000
103
+ )
104
+
105
+
106
+ def _prepareScoreByThresholdParts(self, inputAnomalyList):
107
+ scoreParts = {"fp": 0}
108
+ for row in inputAnomalyList:
109
+ if row.windowName not in ('probationary', None):
110
+ scoreParts[row.windowName] = -self.fnWeight
111
+ return scoreParts
112
+
113
+
114
+ def calcSweepScore(
115
+ self, timestamps, anomalyScores, windowLimits, dataSetName):
116
+ """
117
+ Given a single file's rows, return a list of AnomalyPoints.
118
+ Each AnomalyPoint contains the row's timestamp, anomaly score,
119
+ calculated NAB score, and window name. These lists may be passed
120
+ to `calcScoreByThreshold()` directly in order to score or optimize
121
+ a single file, or combined together prior to being passed to
122
+ `calcScoreByThreshold()` in order to score / calculate multiple
123
+ files / an entire corpus.
124
+ @param timestamps: (list) `datetime` objects
125
+ @param anomalyScores: (list) `float` objects in the range [0.0, 1.0]
126
+ @param windowLimits: (list) `tuple` objects of window limits
127
+ @param dataSetName: (list) `string` name of dataset, often filename
128
+ @return (list) List of AnomalyPoint objects
129
+ """
130
+ assert len(timestamps) == len(anomalyScores), \
131
+ "timestamps and anomalyScores should not be different lengths!"
132
+ timestamps = list(timestamps)
133
+ windowLimits = list(windowLimits) # Copy because we mutate this list
134
+ # The final list of anomaly points returned from this function.
135
+ # Used for threshold optimization and scoring in other functions.
136
+ anomalyList = []
137
+
138
+ # One-time config variables
139
+ maxTP = scaledSigmoid(-1.0)
140
+ probationaryLength = self._getProbationaryLength(len(timestamps))
141
+
142
+ # Iteration variables - these update as we iterate through the data
143
+ curWindowLimits = None
144
+ curWindowName = None
145
+ curWindowWidth = None
146
+ curWindowRightIndex = None
147
+ prevWindowWidth = None
148
+ prevWindowRightIndex = None
149
+
150
+ for i, (curTime, curAnomaly) in enumerate(zip(timestamps, anomalyScores)):
151
+ unweightedScore = None
152
+ weightedScore = None
153
+
154
+ # If not in a window, check if we've just entered one
155
+ if windowLimits and curTime == windowLimits[0][0]:
156
+ curWindowLimits = windowLimits.pop(0)
157
+ curWindowName = "%s|%s" % (dataSetName, curWindowLimits[0])
158
+ curWindowRightIndex = timestamps.index(curWindowLimits[1])
159
+ curWindowWidth = float(curWindowRightIndex -
160
+ timestamps.index(curWindowLimits[0]) + 1)
161
+
162
+ logger.debug(
163
+ "Entering window: %s (%s)", curWindowName, str(curWindowLimits))
164
+
165
+ # If in a window, score as if true positive
166
+ if curWindowLimits is not None:
167
+ positionInWindow = -(curWindowRightIndex - i + 1) / curWindowWidth
168
+ unweightedScore = scaledSigmoid(positionInWindow)
169
+ weightedScore = unweightedScore * self.tpWeight / maxTP
170
+
171
+ # If outside a window, score as if false positive
172
+ else:
173
+ if prevWindowRightIndex is None:
174
+ # No preceding window, so return score as is we were just really
175
+ # far away from the nearest window.
176
+ unweightedScore = -1.0
177
+ else:
178
+ numerator = abs(prevWindowRightIndex - i)
179
+ denominator = float(prevWindowWidth - 1)
180
+ positionPastWindow = numerator / denominator
181
+ unweightedScore = scaledSigmoid(positionPastWindow)
182
+
183
+ weightedScore = unweightedScore * self.fpWeight
184
+
185
+ if i >= probationaryLength:
186
+ pointWindowName = curWindowName
187
+ else:
188
+ pointWindowName = "probationary"
189
+
190
+ point = AnomalyPoint(curTime, curAnomaly, weightedScore, pointWindowName)
191
+
192
+ anomalyList.append(point)
193
+
194
+ # If at right-edge of window, exit window.
195
+ # This happens after processing the current point and appending it
196
+ # to the list.
197
+ if curWindowLimits is not None and curTime == curWindowLimits[1]:
198
+ logger.debug("Exiting window: %s", curWindowName)
199
+ prevWindowRightIndex = i
200
+ prevWindowWidth = curWindowWidth
201
+ curWindowLimits = None
202
+ curWindowName = None
203
+ curWindowWidth = None
204
+ curWindowRightIndex = None
205
+
206
+ return anomalyList
207
+
208
+
209
+ def calcScoreByThreshold(self, anomalyList):
210
+ """
211
+ Find NAB scores for each threshold in `anomalyList`.
212
+ @param anomalyList (list) `AnomalyPoint` objects from `calcSweepScore()`
213
+ @return (list) List of `ThresholdScore` objects
214
+ """
215
+ scorableList = prepAnomalyListForScoring(anomalyList)
216
+ scoreParts = self._prepareScoreByThresholdParts(scorableList)
217
+ scoresByThreshold = [] # The final list we return
218
+
219
+ # The current threshold above which an anomaly score is considered
220
+ # an anomaly prediction. This starts above 1.0 so that all points
221
+ # are skipped, which gives us a full false-negative score.
222
+ curThreshold = 1.1
223
+
224
+ # Initialize counts:
225
+ # * every point in a window is a false negative
226
+ # * every point outside a window is a true negative
227
+ tn = sum(1 if x.windowName is None else 0 for x in scorableList)
228
+ fn = sum(1 if x.windowName is not None else 0 for x in scorableList)
229
+ tp = 0
230
+ fp = 0
231
+
232
+ # Iterate through every data point, starting with highest anomaly scores
233
+ # and working down. Whenever we reach a new anomaly score, we save the
234
+ # current score and begin calculating the score for the new, lower
235
+ # threshold. Every data point we iterate over is 'active' for the current
236
+ # threshold level, so the point is either:
237
+ # * a true positive (has a `windowName`)
238
+ # * a false positive (`windowName is None`).
239
+ for dataPoint in scorableList:
240
+ # If we've reached a new anomaly threshold, store the current
241
+ # threshold+score pair.
242
+ if dataPoint.anomalyScore != curThreshold:
243
+ curScore = sum(scoreParts.values())
244
+ totalCount = tp + tn + fp + fn
245
+ s = ThresholdScore(curThreshold, curScore, tp, tn, fp, fn, totalCount)
246
+ scoresByThreshold.append(s)
247
+ curThreshold = dataPoint.anomalyScore
248
+
249
+ # Adjust counts
250
+ if dataPoint.windowName is not None:
251
+ tp += 1
252
+ fn -= 1
253
+ else:
254
+ fp += 1
255
+ tn -= 1
256
+
257
+ if dataPoint.windowName is None:
258
+ scoreParts["fp"] += dataPoint.sweepScore
259
+ else:
260
+ scoreParts[dataPoint.windowName] = max(
261
+ scoreParts[dataPoint.windowName],
262
+ dataPoint.sweepScore
263
+ )
264
+
265
+ # Make sure to save the score for the last threshold
266
+ curScore = sum(scoreParts.values())
267
+ totalCount = tp + tn + fp + fn
268
+ s = ThresholdScore(curThreshold, curScore, tp, tn, fp, fn, totalCount)
269
+ scoresByThreshold.append(s)
270
+
271
+ return scoresByThreshold
272
+
273
+
274
+ def scoreDataSet(
275
+ self, timestamps, anomalyScores, windowLimits, dataSetName, threshold):
276
+ """Function called to score each dataset in the corpus.
277
+ @param timestamps (tuple) tuple of timestamps
278
+ @param anomalyScores (tuple) tuple of anomaly scores (floats [0, 1.0])
279
+ @param windowLimits (tuple) tuple of window limit tuples
280
+ @param dataSetName (string) name of this dataset, usually a file path.
281
+ Used to name the windows in this dataset, which is important when scoring
282
+ more than one data set, as each window in all data sets needs to be
283
+ uniquely named.
284
+ @param threshold (float) the threshold at which an anomaly score is
285
+ considered to be an anomaly prediction.
286
+ @return
287
+ :return: (tuple) Contains:
288
+ scores (list) List of per-row scores, to be saved in score file
289
+ matchingRow (ThresholdScore)
290
+ """
291
+ anomalyList = self.calcSweepScore(
292
+ timestamps, anomalyScores, windowLimits, dataSetName)
293
+ scoresByThreshold = self.calcScoreByThreshold(anomalyList)
294
+
295
+ matchingRow = None
296
+ prevRow = None
297
+ for thresholdScore in scoresByThreshold:
298
+ if thresholdScore.threshold == threshold:
299
+ matchingRow = thresholdScore
300
+ break
301
+ elif thresholdScore.threshold < threshold:
302
+ matchingRow = prevRow
303
+ break
304
+
305
+ prevRow = thresholdScore
306
+
307
+ # Return sweepScore for each row, to be added to score file
308
+ return (
309
+ [x.sweepScore for x in anomalyList],
310
+ matchingRow
311
+ )
@@ -0,0 +1,376 @@
1
+ from .metrics import *
2
+
3
+ import unittest
4
+
5
+
6
+ class Binary_detection_tester(unittest.TestCase):
7
+ def test_unsorted(self):
8
+ self.assertRaises(AssertionError, Binary_detection, 10, [2, 3, 4], [3, 4, 2])
9
+ self.assertRaises(AssertionError, Binary_detection, 10, [3, 4, 2], [2, 3, 4])
10
+ self.assertRaises(AssertionError, Binary_detection, 10, [[1, 8]], [[5, 6], [1, 2]])
11
+ self.assertRaises(AssertionError, Binary_detection, 10, [[5, 6], [1, 2]], [[1, 8]])
12
+
13
+ def test_nonunique(self):
14
+ self.assertRaises(AssertionError, Binary_detection, 10, [2, 4, 4], [2, 3, 4])
15
+ self.assertRaises(AssertionError, Binary_detection, 10, [2, 3, 4], [2, 4, 4])
16
+
17
+ def test_long_anom(self):
18
+ self.assertRaises(AssertionError, Binary_detection, 4, [1], [2, 3, 4])
19
+ self.assertRaises(AssertionError, Binary_detection, 4, [[2, 4]], [1])
20
+ self.assertRaises(AssertionError, Binary_detection, 4, [-1], [1])
21
+
22
+ def test_point_to_seq(self):
23
+ anom1 = [3, 4, 5, 7, 8, 11]
24
+ anom2 = [[3, 5], [7, 8], [11, 11]]
25
+ d = Binary_detection(12, anom1, anom2)
26
+
27
+ self.assertTrue(np.array_equal(np.array(anom1), d.get_predicted_anomalies_ptwise()))
28
+ self.assertTrue(np.array_equal(np.array(anom2), d.get_gt_anomalies_segmentwise()))
29
+
30
+ def test_anomaly_full_seires(self):
31
+ anom1 = [3, 4, 5, 7, 8, 11]
32
+ d = Binary_detection(12, anom1, anom1)
33
+
34
+ self.assertTrue(
35
+ np.array_equal(d.get_gt_anomalies_full_series(), np.array([0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1]))
36
+ )
37
+ self.assertTrue(
38
+ np.array_equal(d.get_predicted_anomalies_full_series(), np.array([0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1]))
39
+ )
40
+
41
+ def test_empty_anom(self):
42
+ anom1 = [3, 4, 5, 7, 8]
43
+ anom2 = []
44
+
45
+ d = Binary_detection(12, anom1, anom2)
46
+ self.assertEqual(0, len(d.get_predicted_anomalies_ptwise()))
47
+ self.assertEqual(0, len(d.get_predicted_anomalies_segmentwise()))
48
+
49
+
50
+ class Confusion_metrics_tester(unittest.TestCase):
51
+ def test_metrics(self):
52
+ self.assertEqual(0.75, recall(tp=3, fn=1))
53
+ self.assertEqual(0.75, precision(tp=3, fp=1))
54
+ self.assertEqual(0.6, f1_score(tp=3, fn=1, fp=3))
55
+ self.assertEqual(0.625, f1_from_pr(p=1, r=0.25, beta=0.5))
56
+ self.assertAlmostEqual(5 / 9, f1_from_pr(p=1, r=0.5, beta=2))
57
+
58
+ def test_requires_names(self):
59
+ self.assertRaises(TypeError, recall, 3, 4)
60
+ self.assertRaises(TypeError, precision, 3, 4)
61
+ self.assertRaises(TypeError, f1_score, 3, 4, 5)
62
+
63
+ def test_zerodivision(self):
64
+ self.assertEqual(0, recall(tp=0, fn=0))
65
+ self.assertEqual(0, precision(tp=0, fp=0))
66
+ self.assertEqual(0, f1_score(tp=0, fp=1, fn=1))
67
+
68
+
69
+ class Metrics_tester(unittest.TestCase):
70
+ def test_PW(self):
71
+ pw = Pointwise_metrics(10, [1, 2, 3, 4], [4, 5, 6])
72
+
73
+ self.assertEqual(pw.tp, 1)
74
+ self.assertEqual(pw.fp, 2)
75
+ self.assertEqual(pw.fn, 3)
76
+
77
+ def test_PA(self):
78
+ pa = PointAdjust(10, [1, 2, 3, 4, 9], [4, 5, 6])
79
+ self.assertEqual(pa.tp, 4)
80
+ self.assertEqual(pa.fp, 2)
81
+ self.assertEqual(pa.fn, 1)
82
+
83
+ pa = PointAdjust(10, [1, 2, 3, 4, 5, 6, 7], [4])
84
+ self.assertEqual(pa.get_score(), 1)
85
+
86
+ pa = PointAdjust(10, [1, 2, 3, 4, 5, 6, 7], [9])
87
+ self.assertEqual(pa.get_score(), 0)
88
+
89
+ def test_dtPA(self):
90
+ pa = DelayThresholdedPointAdjust(10, [1, 2, 3, 4, 9], [4, 5, 6], k=3)
91
+ self.assertEqual(pa.tp, 4)
92
+ self.assertEqual(pa.fp, 2)
93
+ self.assertEqual(pa.fn, 1)
94
+
95
+ pa = DelayThresholdedPointAdjust(10, [1, 2, 3, 4, 9], [4, 5, 6], k=2)
96
+ self.assertEqual(pa.tp, 0)
97
+ self.assertEqual(pa.fp, 2)
98
+ self.assertEqual(pa.fn, 5)
99
+
100
+ pa = DelayThresholdedPointAdjust(10, [1, 2, 3, 4, 5, 6, 7], [4], k=3)
101
+ self.assertEqual(pa.get_score(), 1)
102
+
103
+ pa = DelayThresholdedPointAdjust(10, [1, 2, 3, 4, 5, 6, 7], [4], k=2)
104
+ self.assertEqual(pa.get_score(), 0)
105
+
106
+ def test_pakf(self):
107
+ pa = PointAdjustKPercent(10, [1, 2, 3, 4, 9], [4, 5, 6], k=0.5)
108
+ self.assertEqual(pa.tp, 1)
109
+ self.assertEqual(pa.fp, 2)
110
+ self.assertEqual(pa.fn, 4)
111
+
112
+ pa = PointAdjustKPercent(10, [1, 2, 3, 4, 9], [4, 5, 6], k=0.1)
113
+ self.assertEqual(pa.tp, 4)
114
+ self.assertEqual(pa.fp, 2)
115
+ self.assertEqual(pa.fn, 1)
116
+
117
+ def test_lspa(self):
118
+ pa = LatencySparsityAware(10, [2, 3, 4, 5, 9], [4, 7], tw=1)
119
+ self.assertAlmostEqual(pa.get_score(), f1_score(tp=pa.tp, fn=pa.fn, fp=pa.fp), 4)
120
+ self.assertEqual(pa.tp, 2)
121
+ self.assertEqual(pa.fp, 1)
122
+ self.assertEqual(pa.fn, 3)
123
+
124
+ pa = LatencySparsityAware(10, [2, 3, 4, 5, 9], [4, 7], tw=2)
125
+ self.assertAlmostEqual(pa.get_score(), f1_score(tp=pa.tp, fn=pa.fn, fp=pa.fp), 4)
126
+ self.assertEqual(pa.tp, 1)
127
+ self.assertEqual(pa.fp, 1)
128
+ self.assertEqual(pa.fn, 2)
129
+
130
+ def test_Segment(self):
131
+ s = Segmentwise_metrics(10, [[1, 2], [4, 4], [7, 9]], [[0, 6]])
132
+ self.assertEqual(s.tp, 2)
133
+ self.assertEqual(s.fp, 0)
134
+ self.assertEqual(s.fn, 1)
135
+
136
+ s = Segmentwise_metrics(10, [[1, 2], [4, 4], [7, 9]], [[6, 6], [8, 8]])
137
+ self.assertEqual(s.tp, 1)
138
+ self.assertEqual(s.fp, 1)
139
+ self.assertEqual(s.fn, 2)
140
+
141
+ s = Segmentwise_metrics(10, [[1, 2], [4, 4], [7, 9]], [])
142
+ self.assertEqual(s.tp, 0)
143
+ self.assertEqual(s.fp, 0)
144
+ self.assertEqual(s.fn, 3)
145
+
146
+ s = Segmentwise_metrics(10, [[1, 2], [4, 4], [7, 9]], [[0, 9]])
147
+ self.assertEqual(s.tp, 3)
148
+ self.assertEqual(s.fp, 0)
149
+ self.assertEqual(s.fn, 0)
150
+
151
+ def test_CF(self):
152
+ c = Composite_f(10, [0, 2, 3, 5, 7, 9], [3, 6])
153
+ f = c.get_score()
154
+ self.assertEqual(c.p, 0.5)
155
+ self.assertEqual(c.r, 0.2)
156
+
157
+ def test_affiliation(self):
158
+ a = Affiliation(10, [2, 3], [2])
159
+ f = a.get_score()
160
+ self.assertEqual(a.p, 1)
161
+ self.assertTrue(a.r < 1)
162
+
163
+ a = Affiliation(10, [2, 3], [2, 3, 4])
164
+ f = a.get_score()
165
+ self.assertTrue(a.p < 1)
166
+ self.assertEqual(a.r, 1)
167
+
168
+ def test_range_pr(self):
169
+ r = Range_PR(10, [2, 3], [2])
170
+ f = r.get_score()
171
+ self.assertEqual(r.p, 1)
172
+ self.assertTrue(r.r < 1)
173
+
174
+ r2 = Range_PR(10, [2, 3], [2, 3])
175
+ f2 = r2.get_score()
176
+ self.assertTrue(f2 > f)
177
+
178
+ r = Range_PR(10, [2, 3], [2, 3, 4])
179
+ f = r.get_score()
180
+ self.assertTrue(r.p < 1)
181
+ self.assertEqual(r.r, 1)
182
+
183
+ def test_NAB(self):
184
+ n = NAB_score(10, [[3, 6]], [3])
185
+ self.assertAlmostEqual(n.get_score(), 100)
186
+
187
+ n = NAB_score(10, [[3, 6]], [])
188
+ self.assertAlmostEqual(n.get_score(), 0)
189
+
190
+ n = NAB_score(10, [[3, 6]], [1])
191
+ self.assertAlmostEqual(n.get_score(), -100 * 0.11 / 2)
192
+
193
+ n = NAB_score(10, [3, 6], [1])
194
+ self.assertTrue(np.isnan(n.get_score()))
195
+
196
+ def test_ttol(self):
197
+ t = Time_Tolerant(10, [3, 4, 8], [1, 2, 3], d=2)
198
+ self.assertAlmostEqual(t.recall(), 2 / 3)
199
+ self.assertAlmostEqual(t.precision(), 1)
200
+
201
+ t = Time_Tolerant(10, [4, 5], [6], d=1)
202
+ self.assertAlmostEqual(t.recall(), 1 / 2)
203
+ self.assertAlmostEqual(t.precision(), 1)
204
+
205
+ def test_TaF(self):
206
+ t = TaF(10, [4, 5, 6], [4, 5, 6])
207
+ self.assertEqual(t.get_score(), 1)
208
+
209
+ t = TaF(10, [4, 5, 6], [1, 2, 3])
210
+ self.assertEqual(t.get_score(), 0)
211
+
212
+ t = TaF(10, [4, 5, 6], [7, 8, 9])
213
+ self.assertEqual(t.get_score(), 0)
214
+ t = TaF(10, [4, 5, 6], [7, 8, 9], delta=1)
215
+ self.assertTrue(t.get_score() > 0)
216
+
217
+ t1 = TaF(10, [4, 5, 8, 9], [4, 5])
218
+ t2 = TaF(10, [4, 5, 8, 9], [5, 8])
219
+ self.assertTrue(t1.get_score() < t2.get_score())
220
+
221
+ def test_eTaF(self):
222
+ t = eTaF(10, [4, 5, 6], [4, 5, 6])
223
+ self.assertEqual(t.get_score(), 1)
224
+
225
+ t = eTaF(10, [4, 5, 6], [1, 2, 3])
226
+ self.assertEqual(t.get_score(), 0)
227
+
228
+ t = eTaF(10, [4, 5, 6], [7, 8, 9])
229
+ self.assertTrue(t.get_score() == 0)
230
+
231
+ t1 = eTaF(10, [4, 5, 8, 9], [4, 5])
232
+ t2 = eTaF(10, [4, 5, 8, 9], [5, 8])
233
+ self.assertTrue(t1.get_score() < t2.get_score())
234
+
235
+ def test_temp_dist(self):
236
+ t = Temporal_Distance(10, [4, 5, 6], [4, 5, 6])
237
+ self.assertEqual(t.get_score(), 0)
238
+
239
+ t = Temporal_Distance(10, [4, 6], [4, 5, 6])
240
+ self.assertEqual(t.get_score(), 1)
241
+
242
+ t = Temporal_Distance(10, [4], [4, 5, 6])
243
+ self.assertEqual(t.get_score(), 3)
244
+
245
+ t = Temporal_Distance(10, [4, 5, 6], [8])
246
+ self.assertEqual(t.get_score(), 11)
247
+
248
+ t = Temporal_Distance(10, [4, 5, 6], [])
249
+ self.assertEqual(t.get_score(), 30)
250
+
251
+
252
+ class Threshold_metric_tester(unittest.TestCase):
253
+ # def test_roc(self):
254
+ # a = aucroc(true = [0,0,1,1], score = [0.1,0.4,0.35,0.8])
255
+ def test_auc_pr(self):
256
+ gt = [[2, 3]]
257
+ anomaly_score = [1, 3, 2, 4]
258
+ auc_pr = AUC_PR_pw(gt, anomaly_score)
259
+
260
+ score = auc_pr.get_score()
261
+ self.assertAlmostEqual(score, 0.83, 2)
262
+
263
+ anomaly_score = [1, 2, 3, 4]
264
+ auc_pr = AUC_PR_pw(gt, anomaly_score)
265
+ score = auc_pr.get_score()
266
+ self.assertEqual(score, 1)
267
+
268
+ anomaly_score = [4, 3, 1, 1]
269
+ auc_pr = AUC_PR_pw(gt, anomaly_score)
270
+ score = auc_pr.get_score()
271
+ self.assertEqual(score, 0.5)
272
+
273
+ def test_auc_roc(self):
274
+ gt = [[2, 3]]
275
+ anomaly_score = [1, 3, 2, 4]
276
+ auc_roc = AUC_ROC(gt, anomaly_score)
277
+
278
+ score = auc_roc.get_score()
279
+ self.assertAlmostEqual(score, 0.75, 2)
280
+
281
+ anomaly_score = [1, 2, 3, 4]
282
+ auc_roc = AUC_ROC(gt, anomaly_score)
283
+ score = auc_roc.get_score()
284
+ self.assertEqual(score, 1)
285
+
286
+ anomaly_score = [4, 4, 4, 4]
287
+ auc_roc = AUC_ROC(gt, anomaly_score)
288
+ score = auc_roc.get_score()
289
+ self.assertEqual(score, 0.5)
290
+
291
+ def test_vus_pr(self):
292
+ gt = [[0, 1]]
293
+ anomaly_score = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
294
+ vus_pr = VUS_PR(gt, anomaly_score, max_window=4)
295
+
296
+ score = vus_pr.get_score()
297
+ self.assertTrue(score <= 0.2)
298
+
299
+ gt = [[1, 3]]
300
+ anomaly_score = [8, 0, 9, 1, 7, 2, 3, 4, 5, 6]
301
+ vus_pr = VUS_PR(gt, anomaly_score, max_window=4)
302
+ score = vus_pr.get_score()
303
+ self.assertTrue(score > 0.5)
304
+ vus_pr = VUS_PR(gt, anomaly_score, max_window=0)
305
+ score = vus_pr.get_score()
306
+ self.assertTrue(score < 0.5)
307
+
308
+ def test_vus_roc(self):
309
+ gt = [[0, 1]]
310
+ anomaly_score = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
311
+ vus = VUS_ROC(gt, anomaly_score, max_window=4)
312
+
313
+ score = vus.get_score()
314
+ self.assertTrue(score <= 0.1)
315
+
316
+ gt = [[1, 3]]
317
+ anomaly_score = [8, 0, 9, 1, 7, 2, 3, 4, 5, 6]
318
+ vus = VUS_ROC(gt, anomaly_score, max_window=4)
319
+ score = vus.get_score()
320
+ self.assertTrue(score > 0.4)
321
+ vus = VUS_ROC(gt, anomaly_score, max_window=0)
322
+ score = vus.get_score()
323
+ self.assertTrue(score < 0.4)
324
+
325
+ def test_PatK(self):
326
+ gt = [[2, 3]]
327
+
328
+ anomaly_score = [1, 4, 2, 3]
329
+ patk = PatK_pw(gt, anomaly_score)
330
+ score = patk.get_score()
331
+ self.assertEqual(score, 0.5)
332
+
333
+ anomaly_score = [1, 2, 3, 4]
334
+ patk = PatK_pw(gt, anomaly_score)
335
+ score = patk.get_score()
336
+ self.assertEqual(score, 1)
337
+
338
+ anomaly_score = [3, 4, 1, 2]
339
+ patk = PatK_pw(gt, anomaly_score)
340
+ score = patk.get_score()
341
+ self.assertEqual(score, 0)
342
+
343
+ anomaly_score = [3, 4, 1, 2]
344
+ patk = PatK_pw([1, 2, 3], anomaly_score)
345
+ score = patk.get_score()
346
+ self.assertAlmostEqual(score, 2 / 3)
347
+
348
+ anomaly_score = [2, 1, 1, 0]
349
+ patk = PatK_pw([0, 1], anomaly_score)
350
+ score = patk.get_score()
351
+ self.assertAlmostEqual(score, 2 / 3)
352
+
353
+ patk = PatK_pw([], [0, 1, 2, 4])
354
+ self.assertRaises(AssertionError, patk.get_score)
355
+
356
+ def test_best_threshold_pw(self):
357
+ gt = [[2, 3]]
358
+
359
+ anomaly_score = [1, 3, 2, 4]
360
+ metric = Best_threshold_pw(gt, anomaly_score)
361
+ score = metric.get_score()
362
+ self.assertAlmostEqual(score, 2 * 2 / 3 * 1 / (1 + 2 / 3))
363
+
364
+ anomaly_score = [2, 3, 1, 4]
365
+ metric = Best_threshold_pw(gt, anomaly_score)
366
+ score = metric.get_score()
367
+ self.assertAlmostEqual(score, 2 * 1 / 2 * 1 / (1 + 1 / 2))
368
+
369
+ anomaly_score = [4, 3, 1, 2]
370
+ metric = Best_threshold_pw(gt, anomaly_score)
371
+ score = metric.get_score()
372
+ self.assertAlmostEqual(score, 2 * 1 / 2 * 1 / (1 + 1 / 2))
373
+
374
+
375
+ if __name__ == "__main__":
376
+ unittest.main()