tsadmetrics 0.1.15__py3-none-any.whl → 0.1.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
tests/test_non_binary.py CHANGED
@@ -148,8 +148,7 @@ class TestAUCPRPW(unittest.TestCase):
148
148
 
149
149
 
150
150
 
151
-
152
- class TestAUCPRPA(unittest.TestCase):
151
+ class TestAUCROCPA(unittest.TestCase):
153
152
 
154
153
  def setUp(self):
155
154
  """
@@ -165,64 +164,64 @@ class TestAUCPRPA(unittest.TestCase):
165
164
 
166
165
  self.y_pred3 = np.array([4, 4, 4, 4])
167
166
 
168
- self.y_true2 = [0,1,1,0,0,0,0,0,1,1,0,0,0,0,1,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,1,0,0,1,1,0
169
- ,1,1,1,0,0,1,0,0,1,0,1,1,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,1,0,1,1,1,1,1,0,1,1
170
- ,1,1,1,1,0,0,1,1,1,1,0,1,0,0,1,1,1,0,0,1,0,0,1,0,1,1]
167
+ self.y_true2 = np.array([0,1,1,0,0,0,0,0,1,1,0,0,0,0,1,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,1,0,0,1,1,0
168
+ ,1,1,1,0,0,1,0,0,1,0,1,1,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,1,0,1,1,1,1,1,0,1,1
169
+ ,1,1,1,1,0,0,1,1,1,1,0,1,0,0,1,1,1,0,0,1,0,0,1,0,1,1])
171
170
 
172
171
 
173
172
  self.y_pred4 = [0.1280475, 0.12059283 ,0.29936968 ,0.85866402 ,0.74071874 ,0.22310849
174
- ,0.11281839 ,0.26133246 ,0.33696106 ,0.01442675 ,0.51962876 ,0.07828833
175
- ,0.45337844 ,0.09444483 ,0.91216588 ,0.18847595 ,0.26828481 ,0.65248919
176
- ,0.46291981 ,0.43730757 ,0.78087553 ,0.45031043 ,0.88661033 ,0.56209352
177
- ,0.45029423 ,0.17638205 ,0.9261279 ,0.58830652 ,0.01602648 ,0.73903379
178
- ,0.61831379 ,0.74779903 ,0.42682106 ,0.82583519 ,0.19709012 ,0.44925962
179
- ,0.62752415 ,0.52458327 ,0.46291768 ,0.33937527 ,0.34868777 ,0.12293847
180
- ,0.84477504 ,0.10225254 ,0.37048167 ,0.04476031 ,0.36680499 ,0.11346155
181
- ,0.10583112 ,0.09493136 ,0.54878736 ,0.68514489 ,0.5940307 ,0.14526962
182
- ,0.69385728 ,0.38888727 ,0.61495304 ,0.06795402 ,0.02894603 ,0.08293609
183
- ,0.22865685 ,0.63531487 ,0.97966126 ,0.31418622 ,0.8943095 ,0.22974177
184
- ,0.94402929 ,0.13140625 ,0.80539267 ,0.40160344 ,0.38151339 ,0.65011626
185
- ,0.71657942 ,0.93297398 ,0.32043329 ,0.54667941 ,0.90645979 ,0.98730183
186
- ,0.82351336 ,0.10404812 ,0.6962921 ,0.72890752 ,0.49700666 ,0.47461103
187
- ,0.59696079 ,0.85876179 ,0.247344 ,0.38187879 ,0.23906861 ,0.5266315
188
- ,0.08171512 ,0.27903375 ,0.61112439 ,0.20784267 ,0.90652453 ,0.87575255
189
- ,0.26972245 ,0.78780138 ,0.37649185 ,0.08467683]
190
-
191
-
192
- def test_auc_pr_pa(self):
173
+ ,0.11281839 ,0.26133246 ,0.33696106 ,0.01442675 ,0.51962876 ,0.07828833
174
+ ,0.45337844 ,0.09444483 ,0.91216588 ,0.18847595 ,0.26828481 ,0.65248919
175
+ ,0.46291981 ,0.43730757 ,0.78087553 ,0.45031043 ,0.88661033 ,0.56209352
176
+ ,0.45029423 ,0.17638205 ,0.9261279 ,0.58830652 ,0.01602648 ,0.73903379
177
+ ,0.61831379 ,0.74779903 ,0.42682106 ,0.82583519 ,0.19709012 ,0.44925962
178
+ ,0.62752415 ,0.52458327 ,0.46291768 ,0.33937527 ,0.34868777 ,0.12293847
179
+ ,0.84477504 ,0.10225254 ,0.37048167 ,0.04476031 ,0.36680499 ,0.11346155
180
+ ,0.10583112 ,0.09493136 ,0.54878736 ,0.68514489 ,0.5940307 ,0.14526962
181
+ ,0.69385728 ,0.38888727 ,0.61495304 ,0.06795402 ,0.02894603 ,0.08293609
182
+ ,0.22865685 ,0.63531487 ,0.97966126 ,0.31418622 ,0.8943095 ,0.22974177
183
+ ,0.94402929 ,0.13140625 ,0.80539267 ,0.40160344 ,0.38151339 ,0.65011626
184
+ ,0.71657942 ,0.93297398 ,0.32043329 ,0.54667941 ,0.90645979 ,0.98730183
185
+ ,0.82351336 ,0.10404812 ,0.6962921 ,0.72890752 ,0.49700666 ,0.47461103
186
+ ,0.59696079 ,0.85876179 ,0.247344 ,0.38187879 ,0.23906861 ,0.5266315
187
+ ,0.08171512 ,0.27903375 ,0.61112439 ,0.20784267 ,0.90652453 ,0.87575255
188
+ ,0.26972245 ,0.78780138 ,0.37649185 ,0.08467683]
189
+
190
+
191
+ def test_auc_roc_pa(self):
193
192
  """
194
193
  Prueba para la función auc_pr_pa.
195
194
  """
196
- score = round(auc_pr_pa(self.y_true1, self.y_pred1),2)
197
- expected_score = 1.0
195
+ score = round(auc_roc_pa(self.y_true1, self.y_pred1),2)
196
+ expected_score = 0.5
198
197
  self.assertAlmostEqual(score, expected_score, places=4)
199
198
 
200
- score = round(auc_pr_pa(self.y_true1, self.y_pred2),2)
201
- expected_score = 1.0
199
+ score = round(auc_roc_pa(self.y_true1, self.y_pred2),2)
200
+ expected_score = 0.5
202
201
  self.assertAlmostEqual(score, expected_score, places=4)
203
202
 
204
- score = round(auc_pr_pa(self.y_true1, self.y_pred3),2)
205
- expected_score = 0.75
203
+ score = round(auc_roc_pa(self.y_true1, self.y_pred3),2)
204
+ expected_score = 0.25
206
205
  self.assertAlmostEqual(score, expected_score, places=4)
207
206
 
208
- if len(self.y_true2) == len(self.y_pred4):
209
- score = round(auc_pr_pa(self.y_true2, self.y_pred4),2)
210
- expected_score = 0.78
211
- self.assertAlmostEqual(score, expected_score, places=4)
207
+
208
+ score = round(auc_roc_pa(self.y_true2, self.y_pred4),2)
209
+ expected_score = 0.33
210
+ self.assertAlmostEqual(score, expected_score, places=4)
212
211
 
213
212
 
214
- def test_auc_pr_pa_consistency(self):
213
+ def test_auc_roc_pa_consistency(self):
215
214
  y_true, y_pred = [],[]
216
215
  try:
217
216
  for _ in range(100):
218
217
  y_true = np.random.choice([0, 1], size=(100,))
219
218
  y_pred = np.random.random( size=(100,))
220
- score = auc_pr_pa(y_true, y_pred)
219
+ score = auc_roc_pa(y_true, y_pred)
221
220
  except Exception as e:
222
- self.fail(f"auc_roc_pr_pa raised an exception {e}")
223
-
221
+ self.fail(f"auc_roc_pa raised an exception {e}")
224
222
 
225
- class TestAUCPRSW(unittest.TestCase):
223
+
224
+ class TestAUCPRPA(unittest.TestCase):
226
225
 
227
226
  def setUp(self):
228
227
  """
@@ -237,34 +236,65 @@ class TestAUCPRSW(unittest.TestCase):
237
236
  self.y_pred2 = np.array([1, 2, 3, 4])
238
237
 
239
238
  self.y_pred3 = np.array([4, 4, 4, 4])
240
-
241
239
 
242
- def test_auc_pr_sw(self):
240
+ self.y_true2 = np.array([0,1,1,0,0,0,0,0,1,1,0,0,0,0,1,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,1,0,0,1,1,0
241
+ ,1,1,1,0,0,1,0,0,1,0,1,1,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,1,0,1,1,1,1,1,0,1,1
242
+ ,1,1,1,1,0,0,1,1,1,1,0,1,0,0,1,1,1,0,0,1,0,0,1,0,1,1])
243
+
244
+
245
+ self.y_pred4 = [0.1280475, 0.12059283 ,0.29936968 ,0.85866402 ,0.74071874 ,0.22310849
246
+ ,0.11281839 ,0.26133246 ,0.33696106 ,0.01442675 ,0.51962876 ,0.07828833
247
+ ,0.45337844 ,0.09444483 ,0.91216588 ,0.18847595 ,0.26828481 ,0.65248919
248
+ ,0.46291981 ,0.43730757 ,0.78087553 ,0.45031043 ,0.88661033 ,0.56209352
249
+ ,0.45029423 ,0.17638205 ,0.9261279 ,0.58830652 ,0.01602648 ,0.73903379
250
+ ,0.61831379 ,0.74779903 ,0.42682106 ,0.82583519 ,0.19709012 ,0.44925962
251
+ ,0.62752415 ,0.52458327 ,0.46291768 ,0.33937527 ,0.34868777 ,0.12293847
252
+ ,0.84477504 ,0.10225254 ,0.37048167 ,0.04476031 ,0.36680499 ,0.11346155
253
+ ,0.10583112 ,0.09493136 ,0.54878736 ,0.68514489 ,0.5940307 ,0.14526962
254
+ ,0.69385728 ,0.38888727 ,0.61495304 ,0.06795402 ,0.02894603 ,0.08293609
255
+ ,0.22865685 ,0.63531487 ,0.97966126 ,0.31418622 ,0.8943095 ,0.22974177
256
+ ,0.94402929 ,0.13140625 ,0.80539267 ,0.40160344 ,0.38151339 ,0.65011626
257
+ ,0.71657942 ,0.93297398 ,0.32043329 ,0.54667941 ,0.90645979 ,0.98730183
258
+ ,0.82351336 ,0.10404812 ,0.6962921 ,0.72890752 ,0.49700666 ,0.47461103
259
+ ,0.59696079 ,0.85876179 ,0.247344 ,0.38187879 ,0.23906861 ,0.5266315
260
+ ,0.08171512 ,0.27903375 ,0.61112439 ,0.20784267 ,0.90652453 ,0.87575255
261
+ ,0.26972245 ,0.78780138 ,0.37649185 ,0.08467683]
262
+
263
+
264
+ def test_auc_pr_pa(self):
243
265
  """
244
- Prueba para la función auc_pr_sw.
266
+ Prueba para la función auc_pr_pa.
245
267
  """
246
- score = round(auc_pr_sw(self.y_true1, self.y_pred1),2)
268
+ score = round(auc_pr_pa(self.y_true1, self.y_pred1),2)
247
269
  expected_score = 1.0
248
270
  self.assertAlmostEqual(score, expected_score, places=4)
249
271
 
250
- score = round(auc_pr_sw(self.y_true1, self.y_pred2),2)
251
- expected_score = 1
272
+ score = round(auc_pr_pa(self.y_true1, self.y_pred2),2)
273
+ expected_score = 1.0
252
274
  self.assertAlmostEqual(score, expected_score, places=4)
253
275
 
254
- score = round(auc_pr_sw(self.y_true1, self.y_pred3),2)
255
- expected_score = 1
276
+ score = round(auc_pr_pa(self.y_true1, self.y_pred3),2)
277
+ expected_score = 0.75
256
278
  self.assertAlmostEqual(score, expected_score, places=4)
257
279
 
258
280
 
259
- # def test_auc_pr_sw_consistency(self):
260
- # try:
261
- # for _ in range(100):
262
- # y_true = np.random.choice([0, 1], size=(100,))
263
- # y_pred = np.random.random( size=(100,))
281
+ score = round(auc_pr_pa(self.y_true2, self.y_pred4),2)
282
+ expected_score = 0.78
283
+ self.assertAlmostEqual(score, expected_score, places=4)
284
+
285
+
286
+ def test_auc_pr_pa_consistency(self):
287
+ y_true, y_pred = [],[]
288
+ try:
289
+ for _ in range(100):
290
+ y_true = np.random.choice([0, 1], size=(100,))
291
+ y_pred = np.random.random( size=(100,))
292
+ score = auc_pr_pa(y_true, y_pred)
293
+ except Exception as e:
294
+ self.fail(f"auc_roc_pr_pa raised an exception {e}")
295
+
296
+
264
297
 
265
- # score = auc_pr_sw(y_true, y_pred)
266
- # except Exception as e:
267
- # self.fail(f"auc_pr_sw raised an exception {e}")
268
298
 
269
299
 
270
300
  class TestVUSROC(unittest.TestCase):
@@ -355,16 +385,65 @@ class TestNonBinaryPATE(unittest.TestCase):
355
385
  """
356
386
  Configuración inicial para las pruebas.
357
387
  """
358
- pass
388
+ self.y_true1 = np.array([0,0,1,1])
359
389
 
360
-
390
+
391
+ self.y_pred1 = np.array([1, 3, 2, 4])
392
+
393
+ self.y_pred2 = np.array([1, 2, 3, 4])
394
+
395
+ self.y_pred3 = np.array([4, 4, 4, 4])
396
+
397
+ self.y_true2 = np.array([0,1,1,0,0,0,0,0,1,1,0,0,0,0,1,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,1,0,0,1,1,0
398
+ ,1,1,1,0,0,1,0,0,1,0,1,1,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,1,0,1,1,1,1,1,0,1,1
399
+ ,1,1,1,1,0,0,1,1,1,1,0,1,0,0,1,1,1,0,0,1,0,0,1,0,1,1])
400
+
401
+
402
+ self.y_pred4 = [0.1280475, 0.12059283 ,0.29936968 ,0.85866402 ,0.74071874 ,0.22310849
403
+ ,0.11281839 ,0.26133246 ,0.33696106 ,0.01442675 ,0.51962876 ,0.07828833
404
+ ,0.45337844 ,0.09444483 ,0.91216588 ,0.18847595 ,0.26828481 ,0.65248919
405
+ ,0.46291981 ,0.43730757 ,0.78087553 ,0.45031043 ,0.88661033 ,0.56209352
406
+ ,0.45029423 ,0.17638205 ,0.9261279 ,0.58830652 ,0.01602648 ,0.73903379
407
+ ,0.61831379 ,0.74779903 ,0.42682106 ,0.82583519 ,0.19709012 ,0.44925962
408
+ ,0.62752415 ,0.52458327 ,0.46291768 ,0.33937527 ,0.34868777 ,0.12293847
409
+ ,0.84477504 ,0.10225254 ,0.37048167 ,0.04476031 ,0.36680499 ,0.11346155
410
+ ,0.10583112 ,0.09493136 ,0.54878736 ,0.68514489 ,0.5940307 ,0.14526962
411
+ ,0.69385728 ,0.38888727 ,0.61495304 ,0.06795402 ,0.02894603 ,0.08293609
412
+ ,0.22865685 ,0.63531487 ,0.97966126 ,0.31418622 ,0.8943095 ,0.22974177
413
+ ,0.94402929 ,0.13140625 ,0.80539267 ,0.40160344 ,0.38151339 ,0.65011626
414
+ ,0.71657942 ,0.93297398 ,0.32043329 ,0.54667941 ,0.90645979 ,0.98730183
415
+ ,0.82351336 ,0.10404812 ,0.6962921 ,0.72890752 ,0.49700666 ,0.47461103
416
+ ,0.59696079 ,0.85876179 ,0.247344 ,0.38187879 ,0.23906861 ,0.5266315
417
+ ,0.08171512 ,0.27903375 ,0.61112439 ,0.20784267 ,0.90652453 ,0.87575255
418
+ ,0.26972245 ,0.78780138 ,0.37649185 ,0.08467683]
419
+
420
+ def test_real_pate(self):
421
+ """
422
+ Prueba para la función real_pate.
423
+ """
424
+ score = round(real_pate(self.y_true1, self.y_pred1,early=1, delay=1),2)
425
+ expected_score = 0.79
426
+ self.assertAlmostEqual(score, expected_score, places=4)
427
+
428
+ score = round(real_pate(self.y_true1, self.y_pred2,early=1, delay=1),2)
429
+ expected_score = 1.0
430
+ self.assertAlmostEqual(score, expected_score, places=4)
431
+
432
+ score = round(real_pate(self.y_true1, self.y_pred3,early=1, delay=1),2)
433
+ expected_score = 0.75
434
+ self.assertAlmostEqual(score, expected_score, places=4)
361
435
 
362
436
 
363
- def test_pate_consistency(self):
437
+ score = round(real_pate(self.y_true2, self.y_pred4,early=5, delay=5),2)
438
+ expected_score = 0.67
439
+ self.assertAlmostEqual(score, expected_score, places=4)
440
+
441
+
442
+ def test_real_pate_consistency(self):
364
443
  try:
365
444
  for _ in range(10):
366
445
  y_true = np.random.choice([0, 1], size=(100,))
367
- y_pred = np.random.choice([0, 1], size=(100,))
446
+ y_pred = np.random.random( size=(100,))
368
447
 
369
448
  score = real_pate(y_true, y_pred, early=5, delay=5)
370
449
  except Exception as e:
tests/test_utils.py ADDED
@@ -0,0 +1,49 @@
1
+ import unittest
2
+ from tsadmetrics import *
3
+ import os
4
+ import numpy as np
5
+ import random
6
+
7
+ class TestComputeMetrics(unittest.TestCase):
8
+ def setUp(self):
9
+ self.y_true = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
10
+ self.y_pred_binary = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
11
+ self.y_pred_non_binary = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.12, 0.11, 0.21, 0.13, 0.4, 0.3, 0.2, 0.1, 0.32, 0.98, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
12
+
13
+
14
+ def test_compute_metrics_binary(self):
15
+ metrics = [
16
+ ('point_wise_f_score', point_wise_f_score),
17
+ ('segment_wise_f_score', segment_wise_f_score),
18
+ ]
19
+ metrics_params = {}
20
+
21
+ results = compute_metrics(self.y_true, self.y_pred_binary, metrics, metrics_params)
22
+
23
+ self.assertTrue('point_wise_f_score' in results['metric_name'].values)
24
+ self.assertTrue('segment_wise_f_score' in results['metric_name'].values)
25
+
26
+ def test_compute_metrics_non_binary(self):
27
+ metrics = [
28
+ ('vus_roc', vus_roc),
29
+ ('vus_pr', vus_pr),
30
+ ]
31
+ metrics_params = {
32
+ 'vus_roc': {'window': 3},
33
+ 'vus_pr': {'window': 3}}
34
+
35
+ results = compute_metrics(self.y_true, self.y_pred_non_binary, metrics, metrics_params, is_anomaly_score=True)
36
+
37
+ self.assertTrue('vus_roc' in results['metric_name'].values)
38
+ self.assertTrue('vus_pr' in results['metric_name'].values)
39
+
40
+ class TestComputeMetricsFromFile(unittest.TestCase):
41
+ def setUp(self):
42
+ self.y_true = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
43
+ self.y_pred_binary = np.array([0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
44
+ self.results_file = 'tests/test_data/results.csv'
45
+ self.conf_file = 'tests/test_data/config.json'
46
+
47
+ def test_compute_metrics_from_file(self):
48
+ results_df = compute_metrics_from_file(self.results_file, self.conf_file, output_dir='tests/test_data')
49
+ assert os.path.exists('tests/test_data/computed_metrics.csv'), f"Error: The file 'computed_metrics.csv' was not created."
tsadmetrics/__init__.py CHANGED
@@ -18,4 +18,4 @@ __all__ = ['point_wise_recall', 'point_wise_precision', 'point_wise_f_score','po
18
18
  'affiliation_based_recall','affiliation_based_precision','affiliation_based_f_score','nab_score','temporal_distance',
19
19
  'average_detection_count','absolute_detection_distance','total_detected_in_range','detection_accuracy_in_range','weighted_detection_difference',
20
20
  'binary_pate','real_pate','mean_time_to_detect',
21
- 'precision_at_k','auc_roc_pw','auc_pr_pw','auc_pr_pa','auc_pr_sw','vus_roc','vus_pr', 'compute_metrics',]
21
+ 'precision_at_k','auc_roc_pw','auc_pr_pw','auc_roc_pa','auc_pr_pa','vus_roc','vus_pr', 'compute_metrics', 'compute_metrics_from_file']