valor-lite 0.34.2__py3-none-any.whl → 0.35.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of valor-lite might be problematic. Click here for more details.

@@ -6,7 +6,6 @@ from valor_lite.schemas import BaseMetric
6
6
 
7
7
  class MetricType(str, Enum):
8
8
  Counts = "Counts"
9
- Accuracy = "Accuracy"
10
9
  Precision = "Precision"
11
10
  Recall = "Recall"
12
11
  F1 = "F1"
@@ -175,41 +174,6 @@ class Metric(BaseMetric):
175
174
  },
176
175
  )
177
176
 
178
- @classmethod
179
- def accuracy(
180
- cls,
181
- value: float,
182
- iou_threshold: float,
183
- score_threshold: float,
184
- ):
185
- """
186
- Accuracy metric for the object detection task type.
187
-
188
- This class encapsulates a metric value at a specific Intersection
189
- over Union (IOU) threshold and confidence score threshold.
190
-
191
- Parameters
192
- ----------
193
- value : float
194
- The metric value.
195
- iou_threshold : float
196
- The IOU threshold used to determine matches between predicted and ground truth boxes.
197
- score_threshold : float
198
- The confidence score threshold above which predictions are considered.
199
-
200
- Returns
201
- -------
202
- Metric
203
- """
204
- return cls(
205
- type=MetricType.Accuracy.value,
206
- value=value,
207
- parameters={
208
- "iou_threshold": iou_threshold,
209
- "score_threshold": score_threshold,
210
- },
211
- )
212
-
213
177
  @classmethod
214
178
  def average_precision(
215
179
  cls,
@@ -608,12 +572,8 @@ class Metric(BaseMetric):
608
572
  int
609
573
  | list[
610
574
  dict[
611
- str, # either `datum`, `groundtruth`, `prediction` or score
612
- str # datum uid
613
- | dict[
614
- str, float
615
- ] # bounding box (xmin, xmax, ymin, ymax)
616
- | float, # prediction score
575
+ str, # either `datum_id`, `ground_truth_id`, `prediction_id`
576
+ str, # string identifier
617
577
  ]
618
578
  ],
619
579
  ],
@@ -626,12 +586,8 @@ class Metric(BaseMetric):
626
586
  int
627
587
  | list[
628
588
  dict[
629
- str, # either `datum`, `prediction` or score
630
- str # datum uid
631
- | float # prediction score
632
- | dict[
633
- str, float
634
- ], # bounding box (xmin, xmax, ymin, ymax)
589
+ str, # either `datum_id` or `prediction_id``
590
+ str, # string identifier
635
591
  ]
636
592
  ],
637
593
  ],
@@ -643,18 +599,14 @@ class Metric(BaseMetric):
643
599
  int
644
600
  | list[
645
601
  dict[
646
- str, # either `datum` or `groundtruth`
647
- str # datum uid
648
- | dict[
649
- str, float
650
- ], # bounding box (xmin, xmax, ymin, ymax)
602
+ str, # either `datum_id` or `ground_truth_id`
603
+ str, # string identifier
651
604
  ]
652
605
  ],
653
606
  ],
654
607
  ],
655
608
  score_threshold: float,
656
609
  iou_threshold: float,
657
- maximum_number_of_examples: int,
658
610
  ):
659
611
  """
660
612
  Confusion matrix for object detection tasks.
@@ -670,10 +622,9 @@ class Metric(BaseMetric):
670
622
  'count': int,
671
623
  'examples': [
672
624
  {
673
- 'datum': str,
674
- 'groundtruth': dict, # {'xmin': float, 'xmax': float, 'ymin': float, 'ymax': float}
675
- 'prediction': dict, # {'xmin': float, 'xmax': float, 'ymin': float, 'ymax': float}
676
- 'score': float,
625
+ 'datum_id': str,
626
+ 'groundtruth_id': str,
627
+ 'prediction_id': str
677
628
  },
678
629
  ...
679
630
  ],
@@ -689,9 +640,8 @@ class Metric(BaseMetric):
689
640
  'count': int,
690
641
  'examples': [
691
642
  {
692
- 'datum': str,
693
- 'prediction': dict, # {'xmin': float, 'xmax': float, 'ymin': float, 'ymax': float}
694
- 'score': float,
643
+ 'datum_id': str,
644
+ 'prediction_id': str
695
645
  },
696
646
  ...
697
647
  ],
@@ -705,8 +655,8 @@ class Metric(BaseMetric):
705
655
  'count': int,
706
656
  'examples': [
707
657
  {
708
- 'datum': str,
709
- 'groundtruth': dict, # {'xmin': float, 'xmax': float, 'ymin': float, 'ymax': float}
658
+ 'datum_id': str,
659
+ 'groundtruth_id': str
710
660
  },
711
661
  ...
712
662
  ],
@@ -719,22 +669,19 @@ class Metric(BaseMetric):
719
669
  confusion_matrix : dict
720
670
  A nested dictionary where the first key is the ground truth label value, the second key
721
671
  is the prediction label value, and the innermost dictionary contains either a `count`
722
- or a list of `examples`. Each example includes the datum UID, ground truth bounding box,
723
- predicted bounding box, and prediction scores.
672
+ or a list of `examples`. Each example includes annotation and datum identifers.
724
673
  unmatched_predictions : dict
725
674
  A dictionary where each key is a prediction label value with no corresponding ground truth
726
675
  (subset of false positives). The value is a dictionary containing either a `count` or a list of
727
- `examples`. Each example includes the datum UID, predicted bounding box, and prediction score.
676
+ `examples`. Each example includes annotation and datum identifers.
728
677
  unmatched_ground_truths : dict
729
678
  A dictionary where each key is a ground truth label value for which the model failed to predict
730
679
  (subset of false negatives). The value is a dictionary containing either a `count` or a list of `examples`.
731
- Each example includes the datum UID and ground truth bounding box.
680
+ Each example includes annotation and datum identifers.
732
681
  score_threshold : float
733
682
  The confidence score threshold used to filter predictions.
734
683
  iou_threshold : float
735
684
  The Intersection over Union (IOU) threshold used to determine true positives.
736
- maximum_number_of_examples : int
737
- The maximum number of examples per element.
738
685
 
739
686
  Returns
740
687
  -------
@@ -750,6 +697,5 @@ class Metric(BaseMetric):
750
697
  parameters={
751
698
  "score_threshold": score_threshold,
752
699
  "iou_threshold": iou_threshold,
753
- "maximum_number_of_examples": maximum_number_of_examples,
754
700
  },
755
701
  )