valor-lite 0.33.10__tar.gz → 0.33.11__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of valor-lite might be problematic. Click here for more details.

Files changed (79) hide show
  1. {valor_lite-0.33.10 → valor_lite-0.33.11}/PKG-INFO +1 -1
  2. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/object_detection/test_stability.py +22 -2
  3. {valor_lite-0.33.10 → valor_lite-0.33.11}/valor_lite/object_detection/computation.py +55 -44
  4. {valor_lite-0.33.10 → valor_lite-0.33.11}/valor_lite.egg-info/PKG-INFO +1 -1
  5. {valor_lite-0.33.10 → valor_lite-0.33.11}/LICENSE +0 -0
  6. {valor_lite-0.33.10 → valor_lite-0.33.11}/README.md +0 -0
  7. {valor_lite-0.33.10 → valor_lite-0.33.11}/benchmarks/.gitignore +0 -0
  8. {valor_lite-0.33.10 → valor_lite-0.33.11}/benchmarks/benchmark_classification.py +0 -0
  9. {valor_lite-0.33.10 → valor_lite-0.33.11}/benchmarks/benchmark_objdet.py +0 -0
  10. {valor_lite-0.33.10 → valor_lite-0.33.11}/examples/.gitignore +0 -0
  11. {valor_lite-0.33.10 → valor_lite-0.33.11}/examples/object-detection.ipynb +0 -0
  12. {valor_lite-0.33.10 → valor_lite-0.33.11}/examples/tabular_classification.ipynb +0 -0
  13. {valor_lite-0.33.10 → valor_lite-0.33.11}/pyproject.toml +0 -0
  14. {valor_lite-0.33.10 → valor_lite-0.33.11}/setup.cfg +0 -0
  15. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/__init__.py +0 -0
  16. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/classification/__init__.py +0 -0
  17. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/classification/conftest.py +0 -0
  18. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/classification/test_accuracy.py +0 -0
  19. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/classification/test_confusion_matrix.py +0 -0
  20. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/classification/test_counts.py +0 -0
  21. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/classification/test_dataloader.py +0 -0
  22. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/classification/test_evaluator.py +0 -0
  23. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/classification/test_f1.py +0 -0
  24. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/classification/test_filtering.py +0 -0
  25. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/classification/test_precision.py +0 -0
  26. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/classification/test_recall.py +0 -0
  27. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/classification/test_rocauc.py +0 -0
  28. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/classification/test_schemas.py +0 -0
  29. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/classification/test_stability.py +0 -0
  30. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/object_detection/__init__.py +0 -0
  31. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/object_detection/conftest.py +0 -0
  32. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/object_detection/test_average_precision.py +0 -0
  33. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/object_detection/test_average_recall.py +0 -0
  34. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/object_detection/test_confusion_matrix.py +0 -0
  35. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/object_detection/test_counts.py +0 -0
  36. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/object_detection/test_dataloader.py +0 -0
  37. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/object_detection/test_evaluator.py +0 -0
  38. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/object_detection/test_filtering.py +0 -0
  39. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/object_detection/test_iou.py +0 -0
  40. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/object_detection/test_pr_curve.py +0 -0
  41. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/object_detection/test_precision.py +0 -0
  42. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/object_detection/test_recall.py +0 -0
  43. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/object_detection/test_schemas.py +0 -0
  44. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/semantic_segmentation/__init__.py +0 -0
  45. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/semantic_segmentation/conftest.py +0 -0
  46. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/semantic_segmentation/test_accuracy.py +0 -0
  47. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/semantic_segmentation/test_annotation.py +0 -0
  48. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/semantic_segmentation/test_confusion_matrix.py +0 -0
  49. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/semantic_segmentation/test_dataloader.py +0 -0
  50. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/semantic_segmentation/test_evaluator.py +0 -0
  51. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/semantic_segmentation/test_f1.py +0 -0
  52. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/semantic_segmentation/test_filtering.py +0 -0
  53. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/semantic_segmentation/test_iou.py +0 -0
  54. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/semantic_segmentation/test_precision.py +0 -0
  55. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/semantic_segmentation/test_recall.py +0 -0
  56. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/semantic_segmentation/test_stability.py +0 -0
  57. {valor_lite-0.33.10 → valor_lite-0.33.11}/tests/text_generation/__init__.py +0 -0
  58. {valor_lite-0.33.10 → valor_lite-0.33.11}/valor_lite/LICENSE +0 -0
  59. {valor_lite-0.33.10 → valor_lite-0.33.11}/valor_lite/__init__.py +0 -0
  60. {valor_lite-0.33.10 → valor_lite-0.33.11}/valor_lite/classification/__init__.py +0 -0
  61. {valor_lite-0.33.10 → valor_lite-0.33.11}/valor_lite/classification/annotation.py +0 -0
  62. {valor_lite-0.33.10 → valor_lite-0.33.11}/valor_lite/classification/computation.py +0 -0
  63. {valor_lite-0.33.10 → valor_lite-0.33.11}/valor_lite/classification/manager.py +0 -0
  64. {valor_lite-0.33.10 → valor_lite-0.33.11}/valor_lite/classification/metric.py +0 -0
  65. {valor_lite-0.33.10 → valor_lite-0.33.11}/valor_lite/object_detection/__init__.py +0 -0
  66. {valor_lite-0.33.10 → valor_lite-0.33.11}/valor_lite/object_detection/annotation.py +0 -0
  67. {valor_lite-0.33.10 → valor_lite-0.33.11}/valor_lite/object_detection/manager.py +0 -0
  68. {valor_lite-0.33.10 → valor_lite-0.33.11}/valor_lite/object_detection/metric.py +0 -0
  69. {valor_lite-0.33.10 → valor_lite-0.33.11}/valor_lite/schemas.py +0 -0
  70. {valor_lite-0.33.10 → valor_lite-0.33.11}/valor_lite/semantic_segmentation/__init__.py +0 -0
  71. {valor_lite-0.33.10 → valor_lite-0.33.11}/valor_lite/semantic_segmentation/annotation.py +0 -0
  72. {valor_lite-0.33.10 → valor_lite-0.33.11}/valor_lite/semantic_segmentation/computation.py +0 -0
  73. {valor_lite-0.33.10 → valor_lite-0.33.11}/valor_lite/semantic_segmentation/manager.py +0 -0
  74. {valor_lite-0.33.10 → valor_lite-0.33.11}/valor_lite/semantic_segmentation/metric.py +0 -0
  75. {valor_lite-0.33.10 → valor_lite-0.33.11}/valor_lite/text_generation/__init__.py +0 -0
  76. {valor_lite-0.33.10 → valor_lite-0.33.11}/valor_lite.egg-info/SOURCES.txt +0 -0
  77. {valor_lite-0.33.10 → valor_lite-0.33.11}/valor_lite.egg-info/dependency_links.txt +0 -0
  78. {valor_lite-0.33.10 → valor_lite-0.33.11}/valor_lite.egg-info/requires.txt +0 -0
  79. {valor_lite-0.33.10 → valor_lite-0.33.11}/valor_lite.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: valor-lite
3
- Version: 0.33.10
3
+ Version: 0.33.11
4
4
  Summary: Compute valor metrics locally.
5
5
  License: MIT License
6
6
 
@@ -7,8 +7,9 @@ def _generate_random_detections(
7
7
  n_detections: int, n_boxes: int, labels: str
8
8
  ) -> list[Detection]:
9
9
  def bbox(is_prediction):
10
- xmin, ymin = uniform(0, 10), uniform(0, 10)
11
- xmax, ymax = uniform(xmin, 15), uniform(ymin, 15)
10
+ width, height = 50, 50
11
+ xmin, ymin = uniform(0, 1000), uniform(0, 1000)
12
+ xmax, ymax = uniform(xmin, xmin + width), uniform(ymin, ymin + height)
12
13
  kw = {"scores": [uniform(0, 1)]} if is_prediction else {}
13
14
  return BoundingBox(
14
15
  xmin,
@@ -81,3 +82,22 @@ def test_fuzz_detections_with_filtering():
81
82
  score_thresholds=[0.25, 0.75],
82
83
  filter_=filter_,
83
84
  )
85
+
86
+
87
+ def test_fuzz_confusion_matrix():
88
+ dets = _generate_random_detections(1000, 30, "abcde")
89
+ loader = DataLoader()
90
+ loader.add_bounding_boxes(dets)
91
+ evaluator = loader.finalize()
92
+ assert evaluator.metadata == {
93
+ "ignored_prediction_labels": [],
94
+ "missing_prediction_labels": [],
95
+ "n_datums": 1000,
96
+ "n_groundtruths": 30000,
97
+ "n_predictions": 30000,
98
+ "n_labels": 5,
99
+ }
100
+ evaluator.evaluate(
101
+ iou_thresholds=[0.25, 0.75],
102
+ score_thresholds=[0.5],
103
+ )
@@ -184,7 +184,7 @@ def _compute_ranked_pairs_for_datum(
184
184
 
185
185
  # find best fits for prediction
186
186
  mask_label_match = data[:, 4] == data[:, 5]
187
- matched_predicitons = np.unique(data[mask_label_match, 2].astype(int))
187
+ matched_predicitons = np.unique(data[mask_label_match, 2].astype(np.int32))
188
188
  mask_unmatched_predictions = ~np.isin(data[:, 2], matched_predicitons)
189
189
  data = data[mask_label_match | mask_unmatched_predictions]
190
190
 
@@ -333,7 +333,7 @@ def compute_metrics(
333
333
  average_recall = np.zeros((n_scores, n_labels))
334
334
  counts = np.zeros((n_ious, n_scores, n_labels, 7))
335
335
 
336
- pd_labels = data[:, 5].astype(int)
336
+ pd_labels = data[:, 5].astype(np.int32)
337
337
  scores = data[:, 6]
338
338
  unique_pd_labels, unique_pd_indices = np.unique(
339
339
  pd_labels, return_index=True
@@ -383,17 +383,19 @@ def compute_metrics(
383
383
  true_positives_mask[mask_tp_inner] = mask_gt_unique
384
384
 
385
385
  # calculate intermediates
386
- pd_count = np.bincount(pd_labels, minlength=n_labels).astype(float)
386
+ pd_count = np.bincount(pd_labels, minlength=n_labels).astype(
387
+ np.float64
388
+ )
387
389
  tp_count = np.bincount(
388
390
  pd_labels,
389
391
  weights=true_positives_mask,
390
392
  minlength=n_labels,
391
- ).astype(float)
393
+ ).astype(np.float64)
392
394
 
393
395
  fp_count = np.bincount(
394
396
  pd_labels[mask_fp_inner],
395
397
  minlength=n_labels,
396
- ).astype(float)
398
+ ).astype(np.float64)
397
399
 
398
400
  fn_count = np.bincount(
399
401
  pd_labels[mask_fn_inner],
@@ -476,7 +478,7 @@ def compute_metrics(
476
478
  where=running_gt_count > 1e-9,
477
479
  out=recall,
478
480
  )
479
- recall_index = np.floor(recall * 100.0).astype(int)
481
+ recall_index = np.floor(recall * 100.0).astype(np.int32)
480
482
 
481
483
  # bin precision-recall curve
482
484
  pr_curve = np.zeros((n_ious, n_labels, 101, 2))
@@ -582,7 +584,7 @@ def _count_with_examples(
582
584
  Counts for each unique label index.
583
585
  """
584
586
  unique_rows, indices = np.unique(
585
- data.astype(int)[:, unique_idx],
587
+ data.astype(np.int32)[:, unique_idx],
586
588
  return_index=True,
587
589
  axis=0,
588
590
  )
@@ -593,6 +595,35 @@ def _count_with_examples(
593
595
  return examples, labels, counts
594
596
 
595
597
 
598
+ def _isin(
599
+ data: NDArray[np.int32],
600
+ subset: NDArray[np.int32],
601
+ ) -> NDArray[np.bool_]:
602
+ """
603
+ Creates a mask of rows that exist within the subset.
604
+
605
+ Parameters
606
+ ----------
607
+ data : NDArray[np.int32]
608
+ An array with shape (N, 2).
609
+ subset : NDArray[np.int32]
610
+ An array with shape (M, 2) where N >= M.
611
+
612
+ Returns
613
+ -------
614
+ NDArray[np.bool_]
615
+ Returns a bool mask with shape (N,).
616
+ """
617
+ combined_data = (data[:, 0].astype(np.int64) << 32) | data[:, 1].astype(
618
+ np.uint32
619
+ )
620
+ combined_subset = (subset[:, 0].astype(np.int64) << 32) | subset[
621
+ :, 1
622
+ ].astype(np.uint32)
623
+ mask = np.isin(combined_data, combined_subset, assume_unique=False)
624
+ return mask
625
+
626
+
596
627
  def compute_confusion_matrix(
597
628
  data: NDArray[np.float64],
598
629
  label_metadata: NDArray[np.int32],
@@ -666,20 +697,16 @@ def compute_confusion_matrix(
666
697
  mask_gt_pd_match = mask_gt_pd_exists & mask_label_match
667
698
  mask_gt_pd_mismatch = mask_gt_pd_exists & ~mask_label_match
668
699
 
669
- groundtruths = data[:, [0, 1]].astype(int)
670
- predictions = data[:, [0, 2]].astype(int)
700
+ groundtruths = data[:, [0, 1]].astype(np.int32)
701
+ predictions = data[:, [0, 2]].astype(np.int32)
671
702
  for iou_idx in range(n_ious):
672
703
  mask_iou_threshold = data[:, 3] >= iou_thresholds[iou_idx]
673
704
  mask_iou = mask_iou_nonzero & mask_iou_threshold
674
705
 
675
706
  groundtruths_passing_ious = np.unique(groundtruths[mask_iou], axis=0)
676
- mask_groundtruths_with_passing_ious = (
677
- (
678
- groundtruths.reshape(-1, 1, 2)
679
- == groundtruths_passing_ious.reshape(1, -1, 2)
680
- )
681
- .all(axis=2)
682
- .any(axis=1)
707
+ mask_groundtruths_with_passing_ious = _isin(
708
+ data=groundtruths,
709
+ subset=groundtruths_passing_ious,
683
710
  )
684
711
  mask_groundtruths_without_passing_ious = (
685
712
  ~mask_groundtruths_with_passing_ious & mask_gt_exists
@@ -688,13 +715,9 @@ def compute_confusion_matrix(
688
715
  predictions_with_passing_ious = np.unique(
689
716
  predictions[mask_iou], axis=0
690
717
  )
691
- mask_predictions_with_passing_ious = (
692
- (
693
- predictions.reshape(-1, 1, 2)
694
- == predictions_with_passing_ious.reshape(1, -1, 2)
695
- )
696
- .all(axis=2)
697
- .any(axis=1)
718
+ mask_predictions_with_passing_ious = _isin(
719
+ data=predictions,
720
+ subset=predictions_with_passing_ious,
698
721
  )
699
722
  mask_predictions_without_passing_ious = (
700
723
  ~mask_predictions_with_passing_ious & mask_pd_exists
@@ -707,13 +730,9 @@ def compute_confusion_matrix(
707
730
  groundtruths_with_passing_score = np.unique(
708
731
  groundtruths[mask_iou & mask_score], axis=0
709
732
  )
710
- mask_groundtruths_with_passing_score = (
711
- (
712
- groundtruths.reshape(-1, 1, 2)
713
- == groundtruths_with_passing_score.reshape(1, -1, 2)
714
- )
715
- .all(axis=2)
716
- .any(axis=1)
733
+ mask_groundtruths_with_passing_score = _isin(
734
+ data=groundtruths,
735
+ subset=groundtruths_with_passing_score,
717
736
  )
718
737
  mask_groundtruths_without_passing_score = (
719
738
  ~mask_groundtruths_with_passing_score & mask_gt_exists
@@ -736,21 +755,13 @@ def compute_confusion_matrix(
736
755
  )
737
756
 
738
757
  # filter out true-positives from misclf and misprd
739
- mask_gts_with_tp_override = (
740
- (
741
- data[mask_misclf][:, [0, 1]].reshape(-1, 1, 2)
742
- == data[mask_tp][:, [0, 1]].reshape(1, -1, 2)
743
- )
744
- .all(axis=2)
745
- .any(axis=1)
758
+ mask_gts_with_tp_override = _isin(
759
+ data=groundtruths[mask_misclf],
760
+ subset=groundtruths[mask_tp],
746
761
  )
747
- mask_pds_with_tp_override = (
748
- (
749
- data[mask_misclf][:, [0, 2]].reshape(-1, 1, 2)
750
- == data[mask_tp][:, [0, 2]].reshape(1, -1, 2)
751
- )
752
- .all(axis=2)
753
- .any(axis=1)
762
+ mask_pds_with_tp_override = _isin(
763
+ data=predictions[mask_misclf],
764
+ subset=predictions[mask_tp],
754
765
  )
755
766
  mask_misprd[mask_misclf] |= (
756
767
  ~mask_gts_with_tp_override & mask_pds_with_tp_override
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: valor-lite
3
- Version: 0.33.10
3
+ Version: 0.33.11
4
4
  Summary: Compute valor metrics locally.
5
5
  License: MIT License
6
6
 
File without changes
File without changes
File without changes