valor-lite 0.33.9__py3-none-any.whl → 0.33.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -184,7 +184,7 @@ def _compute_ranked_pairs_for_datum(
184
184
 
185
185
  # find best fits for prediction
186
186
  mask_label_match = data[:, 4] == data[:, 5]
187
- matched_predicitons = np.unique(data[mask_label_match, 2].astype(int))
187
+ matched_predicitons = np.unique(data[mask_label_match, 2].astype(np.int32))
188
188
  mask_unmatched_predictions = ~np.isin(data[:, 2], matched_predicitons)
189
189
  data = data[mask_label_match | mask_unmatched_predictions]
190
190
 
@@ -333,8 +333,11 @@ def compute_metrics(
333
333
  average_recall = np.zeros((n_scores, n_labels))
334
334
  counts = np.zeros((n_ious, n_scores, n_labels, 7))
335
335
 
336
- pd_labels = data[:, 5].astype(int)
337
- unique_pd_labels = np.unique(pd_labels)
336
+ pd_labels = data[:, 5].astype(np.int32)
337
+ scores = data[:, 6]
338
+ unique_pd_labels, unique_pd_indices = np.unique(
339
+ pd_labels, return_index=True
340
+ )
338
341
  gt_count = label_metadata[:, 0]
339
342
  running_total_count = np.zeros(
340
343
  (n_ious, n_rows),
@@ -342,7 +345,6 @@ def compute_metrics(
342
345
  )
343
346
  running_tp_count = np.zeros_like(running_total_count)
344
347
  running_gt_count = np.zeros_like(running_total_count)
345
- pr_curve = np.zeros((n_ious, n_labels, 101))
346
348
 
347
349
  mask_score_nonzero = data[:, 6] > 1e-9
348
350
  mask_gt_exists = data[:, 1] >= 0.0
@@ -381,17 +383,19 @@ def compute_metrics(
381
383
  true_positives_mask[mask_tp_inner] = mask_gt_unique
382
384
 
383
385
  # calculate intermediates
384
- pd_count = np.bincount(pd_labels, minlength=n_labels).astype(float)
386
+ pd_count = np.bincount(pd_labels, minlength=n_labels).astype(
387
+ np.float64
388
+ )
385
389
  tp_count = np.bincount(
386
390
  pd_labels,
387
391
  weights=true_positives_mask,
388
392
  minlength=n_labels,
389
- ).astype(float)
393
+ ).astype(np.float64)
390
394
 
391
395
  fp_count = np.bincount(
392
396
  pd_labels[mask_fp_inner],
393
397
  minlength=n_labels,
394
- ).astype(float)
398
+ ).astype(np.float64)
395
399
 
396
400
  fn_count = np.bincount(
397
401
  pd_labels[mask_fn_inner],
@@ -474,21 +478,43 @@ def compute_metrics(
474
478
  where=running_gt_count > 1e-9,
475
479
  out=recall,
476
480
  )
477
- recall_index = np.floor(recall * 100.0).astype(int)
481
+ recall_index = np.floor(recall * 100.0).astype(np.int32)
482
+
483
+ # bin precision-recall curve
484
+ pr_curve = np.zeros((n_ious, n_labels, 101, 2))
478
485
  for iou_idx in range(n_ious):
479
486
  p = precision[iou_idx]
480
487
  r = recall_index[iou_idx]
481
- pr_curve[iou_idx, pd_labels, r] = np.maximum(
482
- pr_curve[iou_idx, pd_labels, r], p
488
+ pr_curve[iou_idx, pd_labels, r, 0] = np.maximum(
489
+ pr_curve[iou_idx, pd_labels, r, 0],
490
+ p,
491
+ )
492
+ pr_curve[iou_idx, pd_labels, r, 1] = np.maximum(
493
+ pr_curve[iou_idx, pd_labels, r, 1],
494
+ scores,
483
495
  )
484
496
 
485
497
  # calculate average precision
486
- running_max = np.zeros((n_ious, n_labels))
498
+ running_max_precision = np.zeros((n_ious, n_labels))
499
+ running_max_score = np.zeros((n_labels))
487
500
  for recall in range(100, -1, -1):
488
- precision = pr_curve[:, :, recall]
489
- running_max = np.maximum(precision, running_max)
490
- average_precision += running_max
491
- pr_curve[:, :, recall] = running_max
501
+
502
+ # running max precision
503
+ running_max_precision = np.maximum(
504
+ pr_curve[:, :, recall, 0],
505
+ running_max_precision,
506
+ )
507
+ pr_curve[:, :, recall, 0] = running_max_precision
508
+
509
+ # running max score
510
+ running_max_score = np.maximum(
511
+ pr_curve[:, :, recall, 1],
512
+ running_max_score,
513
+ )
514
+ pr_curve[:, :, recall, 1] = running_max_score
515
+
516
+ average_precision += running_max_precision
517
+
492
518
  average_precision = average_precision / 101.0
493
519
 
494
520
  # calculate average recall
@@ -558,7 +584,7 @@ def _count_with_examples(
558
584
  Counts for each unique label index.
559
585
  """
560
586
  unique_rows, indices = np.unique(
561
- data.astype(int)[:, unique_idx],
587
+ data.astype(np.int32)[:, unique_idx],
562
588
  return_index=True,
563
589
  axis=0,
564
590
  )
@@ -569,6 +595,35 @@ def _count_with_examples(
569
595
  return examples, labels, counts
570
596
 
571
597
 
598
+ def _isin(
599
+ data: NDArray[np.int32],
600
+ subset: NDArray[np.int32],
601
+ ) -> NDArray[np.bool_]:
602
+ """
603
+ Creates a mask of rows that exist within the subset.
604
+
605
+ Parameters
606
+ ----------
607
+ data : NDArray[np.int32]
608
+ An array with shape (N, 2).
609
+ subset : NDArray[np.int32]
610
+ An array with shape (M, 2) where N >= M.
611
+
612
+ Returns
613
+ -------
614
+ NDArray[np.bool_]
615
+ Returns a bool mask with shape (N,).
616
+ """
617
+ combined_data = (data[:, 0].astype(np.int64) << 32) | data[:, 1].astype(
618
+ np.uint32
619
+ )
620
+ combined_subset = (subset[:, 0].astype(np.int64) << 32) | subset[
621
+ :, 1
622
+ ].astype(np.uint32)
623
+ mask = np.isin(combined_data, combined_subset, assume_unique=False)
624
+ return mask
625
+
626
+
572
627
  def compute_confusion_matrix(
573
628
  data: NDArray[np.float64],
574
629
  label_metadata: NDArray[np.int32],
@@ -642,20 +697,16 @@ def compute_confusion_matrix(
642
697
  mask_gt_pd_match = mask_gt_pd_exists & mask_label_match
643
698
  mask_gt_pd_mismatch = mask_gt_pd_exists & ~mask_label_match
644
699
 
645
- groundtruths = data[:, [0, 1]].astype(int)
646
- predictions = data[:, [0, 2]].astype(int)
700
+ groundtruths = data[:, [0, 1]].astype(np.int32)
701
+ predictions = data[:, [0, 2]].astype(np.int32)
647
702
  for iou_idx in range(n_ious):
648
703
  mask_iou_threshold = data[:, 3] >= iou_thresholds[iou_idx]
649
704
  mask_iou = mask_iou_nonzero & mask_iou_threshold
650
705
 
651
706
  groundtruths_passing_ious = np.unique(groundtruths[mask_iou], axis=0)
652
- mask_groundtruths_with_passing_ious = (
653
- (
654
- groundtruths.reshape(-1, 1, 2)
655
- == groundtruths_passing_ious.reshape(1, -1, 2)
656
- )
657
- .all(axis=2)
658
- .any(axis=1)
707
+ mask_groundtruths_with_passing_ious = _isin(
708
+ data=groundtruths,
709
+ subset=groundtruths_passing_ious,
659
710
  )
660
711
  mask_groundtruths_without_passing_ious = (
661
712
  ~mask_groundtruths_with_passing_ious & mask_gt_exists
@@ -664,13 +715,9 @@ def compute_confusion_matrix(
664
715
  predictions_with_passing_ious = np.unique(
665
716
  predictions[mask_iou], axis=0
666
717
  )
667
- mask_predictions_with_passing_ious = (
668
- (
669
- predictions.reshape(-1, 1, 2)
670
- == predictions_with_passing_ious.reshape(1, -1, 2)
671
- )
672
- .all(axis=2)
673
- .any(axis=1)
718
+ mask_predictions_with_passing_ious = _isin(
719
+ data=predictions,
720
+ subset=predictions_with_passing_ious,
674
721
  )
675
722
  mask_predictions_without_passing_ious = (
676
723
  ~mask_predictions_with_passing_ious & mask_pd_exists
@@ -683,13 +730,9 @@ def compute_confusion_matrix(
683
730
  groundtruths_with_passing_score = np.unique(
684
731
  groundtruths[mask_iou & mask_score], axis=0
685
732
  )
686
- mask_groundtruths_with_passing_score = (
687
- (
688
- groundtruths.reshape(-1, 1, 2)
689
- == groundtruths_with_passing_score.reshape(1, -1, 2)
690
- )
691
- .all(axis=2)
692
- .any(axis=1)
733
+ mask_groundtruths_with_passing_score = _isin(
734
+ data=groundtruths,
735
+ subset=groundtruths_with_passing_score,
693
736
  )
694
737
  mask_groundtruths_without_passing_score = (
695
738
  ~mask_groundtruths_with_passing_score & mask_gt_exists
@@ -712,21 +755,13 @@ def compute_confusion_matrix(
712
755
  )
713
756
 
714
757
  # filter out true-positives from misclf and misprd
715
- mask_gts_with_tp_override = (
716
- (
717
- data[mask_misclf][:, [0, 1]].reshape(-1, 1, 2)
718
- == data[mask_tp][:, [0, 1]].reshape(1, -1, 2)
719
- )
720
- .all(axis=2)
721
- .any(axis=1)
758
+ mask_gts_with_tp_override = _isin(
759
+ data=groundtruths[mask_misclf],
760
+ subset=groundtruths[mask_tp],
722
761
  )
723
- mask_pds_with_tp_override = (
724
- (
725
- data[mask_misclf][:, [0, 2]].reshape(-1, 1, 2)
726
- == data[mask_tp][:, [0, 2]].reshape(1, -1, 2)
727
- )
728
- .all(axis=2)
729
- .any(axis=1)
762
+ mask_pds_with_tp_override = _isin(
763
+ data=predictions[mask_misclf],
764
+ subset=predictions[mask_tp],
730
765
  )
731
766
  mask_misprd[mask_misclf] |= (
732
767
  ~mask_gts_with_tp_override & mask_pds_with_tp_override
@@ -595,7 +595,12 @@ class Evaluator:
595
595
 
596
596
  metrics[MetricType.PrecisionRecallCurve] = [
597
597
  PrecisionRecallCurve(
598
- precision=pr_curves[iou_idx][label_idx].astype(float).tolist(),
598
+ precisions=pr_curves[iou_idx, label_idx, :, 0]
599
+ .astype(float)
600
+ .tolist(),
601
+ scores=pr_curves[iou_idx, label_idx, :, 1]
602
+ .astype(float)
603
+ .tolist(),
599
604
  iou_threshold=iou_threshold,
600
605
  label=label,
601
606
  )
@@ -591,8 +591,10 @@ class PrecisionRecallCurve:
591
591
 
592
592
  Attributes
593
593
  ----------
594
- precision : list[float]
594
+ precisions : list[float]
595
595
  Interpolated precision values corresponding to recalls at 0.0, 0.01, ..., 1.0.
596
+ scores : list[float]
597
+ Maximum prediction score for each point on the interpolated curve.
596
598
  iou_threshold : float
597
599
  The Intersection over Union (IoU) threshold used to determine true positives.
598
600
  label : str
@@ -606,14 +608,18 @@ class PrecisionRecallCurve:
606
608
  Converts the instance to a dictionary representation.
607
609
  """
608
610
 
609
- precision: list[float]
611
+ precisions: list[float]
612
+ scores: list[float]
610
613
  iou_threshold: float
611
614
  label: str
612
615
 
613
616
  def to_metric(self) -> Metric:
614
617
  return Metric(
615
618
  type=type(self).__name__,
616
- value=self.precision,
619
+ value={
620
+ "precisions": self.precisions,
621
+ "scores": self.scores,
622
+ },
617
623
  parameters={
618
624
  "iou_threshold": self.iou_threshold,
619
625
  "label": self.label,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: valor-lite
3
- Version: 0.33.9
3
+ Version: 0.33.11
4
4
  Summary: Compute valor metrics locally.
5
5
  License: MIT License
6
6
 
@@ -8,17 +8,17 @@ valor_lite/classification/manager.py,sha256=7NKk4syQHH5hBEUDWTD0zIFkJSNdOMzJn8a8
8
8
  valor_lite/classification/metric.py,sha256=m9_zD82YGl0QhuMql943YNKg67NZ6bsrR8ggs6_JZms,11728
9
9
  valor_lite/object_detection/__init__.py,sha256=PiKfemo8FkZRzBhPSjhil8ahGURLy0Vk_iV25CB4UBU,1139
10
10
  valor_lite/object_detection/annotation.py,sha256=o6VfiRobiB0ljqsNBLAYMXgi32RSIR7uTA-dgxq6zBI,8248
11
- valor_lite/object_detection/computation.py,sha256=7rOfVlYDadXcJ1_S0FJRF3IPigcsR7guk_0rXeIdAOE,26919
12
- valor_lite/object_detection/manager.py,sha256=k8VRqmlfWGKj1IuijbG49jXkMelE8v59pTQTCwkSMKk,38833
13
- valor_lite/object_detection/metric.py,sha256=nWSqIQSBQrpl3Stz_xe2-AYoo2nrATeMuFVFmREjSNA,23833
11
+ valor_lite/object_detection/computation.py,sha256=ZW83XT-aemRg-5ZdISmrj0bRD9wWmYCU3gkSlfXlNZc,27747
12
+ valor_lite/object_detection/manager.py,sha256=vb4JpynNF0JcnFwNmReFjls9UGAquigN2hpEbG89J04,38991
13
+ valor_lite/object_detection/metric.py,sha256=tHRVnpBqw_w1VwnNkTCmu1yv7Max9FRlf5uh0wYew4s,24046
14
14
  valor_lite/semantic_segmentation/__init__.py,sha256=IdarTHKUuUMDvMBmInQu12Mm_NMCbql6Hf0nL5b56Ak,424
15
15
  valor_lite/semantic_segmentation/annotation.py,sha256=CujYFdHS3fgr4Y7mEDs_u1XBmbPJzNU2CdqvjCT_d_A,2938
16
16
  valor_lite/semantic_segmentation/computation.py,sha256=iJkEmTNmw9HwQCxSnpJkQsAdVcFriGhhu_WMks6D7tU,5122
17
17
  valor_lite/semantic_segmentation/manager.py,sha256=aJk6edWZWKqrzl6hVmEUSZVYhHLuyihxWgAIXsCXkZ0,17361
18
18
  valor_lite/semantic_segmentation/metric.py,sha256=Y8M3z92SaABEe9TwBUN37TFsh9DR5WoIxO-TfXVwz8I,6289
19
19
  valor_lite/text_generation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
- valor_lite-0.33.9.dist-info/LICENSE,sha256=M0L53VuwfEEqezhHb7NPeYcO_glw7-k4DMLZQ3eRN64,1068
21
- valor_lite-0.33.9.dist-info/METADATA,sha256=dXS7Nt_WHKBaIARWZ3Ek27i26-pWyatewb3eFEnYor8,5631
22
- valor_lite-0.33.9.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
23
- valor_lite-0.33.9.dist-info/top_level.txt,sha256=9ujykxSwpl2Hu0_R95UQTR_l07k9UUTSdrpiqmq6zc4,11
24
- valor_lite-0.33.9.dist-info/RECORD,,
20
+ valor_lite-0.33.11.dist-info/LICENSE,sha256=M0L53VuwfEEqezhHb7NPeYcO_glw7-k4DMLZQ3eRN64,1068
21
+ valor_lite-0.33.11.dist-info/METADATA,sha256=QniFV86iMnaqPtJElufV4tkF3-kI1sS6EXKRzupWavc,5632
22
+ valor_lite-0.33.11.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
23
+ valor_lite-0.33.11.dist-info/top_level.txt,sha256=9ujykxSwpl2Hu0_R95UQTR_l07k9UUTSdrpiqmq6zc4,11
24
+ valor_lite-0.33.11.dist-info/RECORD,,