valor-lite 0.33.3__py3-none-any.whl → 0.33.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of valor-lite might be problematic. Click here for more details.

@@ -1,11 +1,10 @@
1
- from .annotation import Bitmask, BoundingBox, Detection
1
+ from .annotation import Bitmask, BoundingBox, Detection, Polygon
2
2
  from .computation import (
3
3
  compute_detailed_counts,
4
- compute_iou,
5
4
  compute_metrics,
6
5
  compute_ranked_pairs,
7
6
  )
8
- from .manager import DataLoader, Evaluator
7
+ from .manager import DataLoader, Evaluator, compute_iou
9
8
  from .metric import (
10
9
  AP,
11
10
  AR,
@@ -29,6 +28,7 @@ __all__ = [
29
28
  "Bitmask",
30
29
  "BoundingBox",
31
30
  "Detection",
31
+ "Polygon",
32
32
  "MetricType",
33
33
  "Counts",
34
34
  "Precision",
@@ -2,6 +2,7 @@ from dataclasses import dataclass, field
2
2
 
3
3
  import numpy as np
4
4
  from numpy.typing import NDArray
5
+ from shapely.geometry import Polygon as ShapelyPolygon
5
6
 
6
7
 
7
8
  @dataclass
@@ -24,6 +25,37 @@ class BoundingBox:
24
25
  return (self.xmin, self.xmax, self.ymin, self.ymax)
25
26
 
26
27
 
28
+ @dataclass
29
+ class Polygon:
30
+ shape: ShapelyPolygon
31
+ labels: list[tuple[str, str]]
32
+ scores: list[float] = field(default_factory=list)
33
+
34
+ def __post_init__(self):
35
+ if not isinstance(self.shape, ShapelyPolygon):
36
+ raise TypeError("shape must be of type shapely.geometry.Polygon.")
37
+ if len(self.scores) > 0 and len(self.labels) != len(self.scores):
38
+ raise ValueError(
39
+ "If scores are defined, there must be a 1:1 pairing with labels."
40
+ )
41
+
42
+ def to_box(self) -> BoundingBox | None:
43
+
44
+ if self.shape.is_empty:
45
+ return None
46
+
47
+ xmin, ymin, xmax, ymax = self.shape.bounds
48
+
49
+ return BoundingBox(
50
+ xmin=xmin,
51
+ xmax=xmax,
52
+ ymin=ymin,
53
+ ymax=ymax,
54
+ labels=self.labels,
55
+ scores=self.scores,
56
+ )
57
+
58
+
27
59
  @dataclass
28
60
  class Bitmask:
29
61
  mask: NDArray[np.bool_]
@@ -55,8 +87,8 @@ class Bitmask:
55
87
  @dataclass
56
88
  class Detection:
57
89
  uid: str
58
- groundtruths: list[BoundingBox]
59
- predictions: list[BoundingBox]
90
+ groundtruths: list[BoundingBox] | list[Bitmask] | list[Polygon]
91
+ predictions: list[BoundingBox] | list[Bitmask] | list[Polygon]
60
92
 
61
93
  def __post_init__(self):
62
94
  for prediction in self.predictions:
@@ -2,7 +2,7 @@ import numpy as np
2
2
  from numpy.typing import NDArray
3
3
 
4
4
 
5
- def compute_iou(data: NDArray[np.floating]) -> NDArray[np.floating]:
5
+ def compute_bbox_iou(data: NDArray[np.floating]) -> NDArray[np.floating]:
6
6
  """
7
7
  Computes intersection-over-union (IoU) for axis-aligned bounding boxes.
8
8
 
@@ -24,14 +24,12 @@ def compute_iou(data: NDArray[np.floating]) -> NDArray[np.floating]:
24
24
  Parameters
25
25
  ----------
26
26
  data : NDArray[np.floating]
27
- A sorted array of classification pairs.
28
- label_metadata : NDArray[np.int32]
29
- An array containing metadata related to labels.
27
+ A sorted array of bounding box pairs.
30
28
 
31
29
  Returns
32
30
  -------
33
31
  NDArray[np.floating]
34
- Compute IoU's.
32
+ Computed IoU's.
35
33
  """
36
34
 
37
35
  xmin1, xmax1, ymin1, ymax1 = (
@@ -69,6 +67,73 @@ def compute_iou(data: NDArray[np.floating]) -> NDArray[np.floating]:
69
67
  return iou
70
68
 
71
69
 
70
+ def compute_bitmask_iou(data: NDArray[np.floating]) -> NDArray[np.floating]:
71
+ """
72
+ Computes intersection-over-union (IoU) for bitmasks.
73
+
74
+ Takes data with shape (N, 2):
75
+
76
+ Index 0 - first bitmask
77
+ Index 1 - second bitmask
78
+
79
+ Returns data with shape (N, 1):
80
+
81
+ Index 0 - IoU
82
+
83
+ Parameters
84
+ ----------
85
+ data : NDArray[np.floating]
86
+ A sorted array of bitmask pairs.
87
+
88
+ Returns
89
+ -------
90
+ NDArray[np.floating]
91
+ Computed IoU's.
92
+ """
93
+ intersection_ = np.array([np.logical_and(x, y).sum() for x, y in data])
94
+ union_ = np.array([np.logical_or(x, y).sum() for x, y in data])
95
+
96
+ return intersection_ / union_
97
+
98
+
99
+ def compute_polygon_iou(
100
+ data: NDArray[np.floating],
101
+ ) -> NDArray[np.floating]:
102
+ """
103
+ Computes intersection-over-union (IoU) for shapely polygons.
104
+
105
+ Takes data with shape (N, 2):
106
+
107
+ Index 0 - first polygon
108
+ Index 1 - second polygon
109
+
110
+ Returns data with shape (N, 1):
111
+
112
+ Index 0 - IoU
113
+
114
+ Parameters
115
+ ----------
116
+ data : NDArray[np.floating]
117
+ A sorted array of polygon pairs.
118
+
119
+ Returns
120
+ -------
121
+ NDArray[np.floating]
122
+ Computed IoU's.
123
+ """
124
+ intersection_ = np.array(
125
+ [poly1.intersection(poly2).area for poly1, poly2 in data]
126
+ )
127
+ union_ = np.array(
128
+ [
129
+ poly1.area + poly2.area - intersection_[i]
130
+ for i, (poly1, poly2) in enumerate(data)
131
+ ]
132
+ )
133
+
134
+ return intersection_ / union_
135
+
136
+
72
137
  def _compute_ranked_pairs_for_datum(
73
138
  data: NDArray[np.floating],
74
139
  label_metadata: NDArray[np.int32],
@@ -133,7 +198,7 @@ def compute_ranked_pairs(
133
198
  Parameters
134
199
  ----------
135
200
  data : NDArray[np.floating]
136
- A sorted array of classification pairs.
201
+ A sorted array summarizing the IOU calculations of one or more pairs.
137
202
  label_metadata : NDArray[np.int32]
138
203
  An array containing metadata related to labels.
139
204
 
@@ -161,10 +226,10 @@ def compute_ranked_pairs(
161
226
 
162
227
 
163
228
  def compute_metrics(
164
- data: np.ndarray,
165
- label_metadata: np.ndarray,
166
- iou_thresholds: np.ndarray,
167
- score_thresholds: np.ndarray,
229
+ data: NDArray[np.floating],
230
+ label_metadata: NDArray[np.int32],
231
+ iou_thresholds: NDArray[np.floating],
232
+ score_thresholds: NDArray[np.floating],
168
233
  ) -> tuple[
169
234
  tuple[
170
235
  NDArray[np.floating],
@@ -197,7 +262,7 @@ def compute_metrics(
197
262
  Parameters
198
263
  ----------
199
264
  data : NDArray[np.floating]
200
- A sorted array of classification pairs.
265
+ A sorted array summarizing the IOU calculations of one or more pairs.
201
266
  label_metadata : NDArray[np.int32]
202
267
  An array containing metadata related to labels.
203
268
  iou_thresholds : NDArray[np.floating]
@@ -463,7 +528,7 @@ def compute_detailed_counts(
463
528
  Parameters
464
529
  ----------
465
530
  data : NDArray[np.floating]
466
- A sorted array of classification pairs.
531
+ A sorted array summarizing the IOU calculations of one or more pairs.
467
532
  label_metadata : NDArray[np.int32]
468
533
  An array containing metadata related to labels.
469
534
  iou_thresholds : NDArray[np.floating]
@@ -3,12 +3,20 @@ from dataclasses import dataclass
3
3
 
4
4
  import numpy as np
5
5
  from numpy.typing import NDArray
6
+ from shapely.geometry import Polygon as ShapelyPolygon
6
7
  from tqdm import tqdm
7
- from valor_lite.detection.annotation import Detection
8
+ from valor_lite.detection.annotation import (
9
+ Bitmask,
10
+ BoundingBox,
11
+ Detection,
12
+ Polygon,
13
+ )
8
14
  from valor_lite.detection.computation import (
15
+ compute_bbox_iou,
16
+ compute_bitmask_iou,
9
17
  compute_detailed_counts,
10
- compute_iou,
11
18
  compute_metrics,
19
+ compute_polygon_iou,
12
20
  compute_ranked_pairs,
13
21
  )
14
22
  from valor_lite.detection.metric import (
@@ -35,7 +43,7 @@ Usage
35
43
  -----
36
44
 
37
45
  loader = DataLoader()
38
- loader.add_data(
46
+ loader.add_bounding_boxes(
39
47
  groundtruths=groundtruths,
40
48
  predictions=predictions,
41
49
  )
@@ -51,6 +59,103 @@ filtered_metrics = evaluator.evaluate(iou_thresholds=[0.5], filter_mask=filter_m
51
59
  """
52
60
 
53
61
 
62
+ def _get_valor_dict_annotation_key(
63
+ annotation_type: type[BoundingBox] | type[Polygon] | type[Bitmask],
64
+ ) -> str:
65
+ """Get the correct JSON key to extract a given annotation type."""
66
+
67
+ if issubclass(annotation_type, BoundingBox):
68
+ return "bounding_box"
69
+ if issubclass(annotation_type, Polygon):
70
+ return "polygon"
71
+ else:
72
+ return "raster"
73
+
74
+
75
+ def _get_annotation_representation(
76
+ annotation_type: type[BoundingBox] | type[Polygon] | type[Bitmask],
77
+ ) -> str:
78
+ """Get the correct representation of an annotation object."""
79
+
80
+ representation = (
81
+ "extrema"
82
+ if issubclass(annotation_type, BoundingBox)
83
+ else ("mask" if issubclass(annotation_type, Bitmask) else "shape")
84
+ )
85
+
86
+ return representation
87
+
88
+
89
+ def _get_annotation_representation_from_valor_dict(
90
+ data: list,
91
+ annotation_type: type[BoundingBox] | type[Polygon] | type[Bitmask],
92
+ ) -> tuple[float, float, float, float] | ShapelyPolygon | NDArray[np.bool_]:
93
+ """Get the correct representation of an annotation object from a valor dictionary."""
94
+
95
+ if issubclass(annotation_type, BoundingBox):
96
+ x = [point[0] for shape in data for point in shape]
97
+ y = [point[1] for shape in data for point in shape]
98
+ return (min(x), max(x), min(y), max(y))
99
+ if issubclass(annotation_type, Polygon):
100
+ return ShapelyPolygon(data)
101
+ else:
102
+ return np.array(data)
103
+
104
+
105
+ def _get_annotation_data(
106
+ keyed_groundtruths: dict,
107
+ keyed_predictions: dict,
108
+ annotation_type: type[BoundingBox] | type[Polygon] | type[Bitmask] | None,
109
+ key=int,
110
+ ) -> np.ndarray:
111
+ """Create an array of annotation pairs for use when calculating IOU. Needed because we unpack bounding box representations, but not bitmask or polygon representations."""
112
+ if annotation_type == BoundingBox:
113
+ return np.array(
114
+ [
115
+ np.array([*gextrema, *pextrema])
116
+ for _, _, _, pextrema in keyed_predictions[key]
117
+ for _, _, gextrema in keyed_groundtruths[key]
118
+ ]
119
+ )
120
+ else:
121
+ return np.array(
122
+ [
123
+ np.array([groundtruth_obj, prediction_obj])
124
+ for _, _, _, prediction_obj in keyed_predictions[key]
125
+ for _, _, groundtruth_obj in keyed_groundtruths[key]
126
+ ]
127
+ )
128
+
129
+
130
+ def compute_iou(
131
+ data: NDArray[np.floating],
132
+ annotation_type: type[BoundingBox] | type[Polygon] | type[Bitmask],
133
+ ) -> NDArray[np.floating]:
134
+ """
135
+ Computes intersection-over-union (IoU) calculations for various annotation types.
136
+
137
+ Parameters
138
+ ----------
139
+ data : NDArray[np.floating]
140
+ A sorted array of bounding box, bitmask, or polygon pairs.
141
+ annotation_type: type[BoundingBox] | type[Polygon] | type[Bitmask]
142
+ The type of annotation contained in the data.
143
+
144
+
145
+ Returns
146
+ -------
147
+ NDArray[np.floating]
148
+ Computed IoU's.
149
+ """
150
+
151
+ if annotation_type == BoundingBox:
152
+ return compute_bbox_iou(data=data)
153
+ elif annotation_type == Bitmask:
154
+ return compute_bitmask_iou(data=data)
155
+ else:
156
+ return compute_polygon_iou(data=data)
157
+
158
+
54
159
  @dataclass
55
160
  class Filter:
56
161
  indices: NDArray[np.int32]
@@ -167,46 +272,35 @@ class Evaluator:
167
272
  [self.uid_to_index[uid] for uid in datum_uids],
168
273
  dtype=np.int32,
169
274
  )
170
- mask = np.zeros_like(mask_pairs, dtype=np.bool_)
171
- mask[
172
- np.isin(self._ranked_pairs[:, 0].astype(int), datum_uids)
173
- ] = True
174
- mask_pairs &= mask
175
-
176
- mask = np.zeros_like(mask_datums, dtype=np.bool_)
177
- mask[datum_uids] = True
178
- mask_datums &= mask
275
+ mask_pairs[
276
+ ~np.isin(self._ranked_pairs[:, 0].astype(int), datum_uids)
277
+ ] = False
278
+ mask_datums[~np.isin(np.arange(n_datums), datum_uids)] = False
179
279
 
180
280
  if labels is not None:
181
281
  if isinstance(labels, list):
182
282
  labels = np.array(
183
283
  [self.label_to_index[label] for label in labels]
184
284
  )
185
- mask = np.zeros_like(mask_pairs, dtype=np.bool_)
186
- mask[np.isin(self._ranked_pairs[:, 4].astype(int), labels)] = True
187
- mask_pairs &= mask
188
-
189
- mask = np.zeros_like(mask_labels, dtype=np.bool_)
190
- mask[labels] = True
191
- mask_labels &= mask
285
+ mask_pairs[
286
+ ~np.isin(self._ranked_pairs[:, 4].astype(int), labels)
287
+ ] = False
288
+ mask_labels[~np.isin(np.arange(n_labels), labels)] = False
192
289
 
193
290
  if label_keys is not None:
194
291
  if isinstance(label_keys, list):
195
292
  label_keys = np.array(
196
293
  [self.label_key_to_index[key] for key in label_keys]
197
294
  )
198
- label_indices = np.where(
199
- np.isclose(self._label_metadata[:, 2], label_keys)
200
- )[0]
201
- mask = np.zeros_like(mask_pairs, dtype=np.bool_)
202
- mask[
203
- np.isin(self._ranked_pairs[:, 4].astype(int), label_indices)
204
- ] = True
205
- mask_pairs &= mask
206
-
207
- mask = np.zeros_like(mask_labels, dtype=np.bool_)
208
- mask[label_indices] = True
209
- mask_labels &= mask
295
+ label_indices = (
296
+ np.where(np.isclose(self._label_metadata[:, 2], label_keys))[0]
297
+ if label_keys.size > 0
298
+ else np.array([])
299
+ )
300
+ mask_pairs[
301
+ ~np.isin(self._ranked_pairs[:, 4].astype(int), label_indices)
302
+ ] = False
303
+ mask_labels[~np.isin(np.arange(n_labels), label_indices)] = False
210
304
 
211
305
  mask = mask_datums[:, np.newaxis] & mask_labels[np.newaxis, :]
212
306
  label_metadata_per_datum = self._label_metadata_per_datum.copy()
@@ -294,7 +388,7 @@ class Evaluator:
294
388
  )
295
389
  for iou_idx in range(average_precision.shape[0])
296
390
  for label_idx in range(average_precision.shape[1])
297
- if int(label_metadata[label_idx][0]) > 0
391
+ if int(label_metadata[label_idx, 0]) > 0
298
392
  ]
299
393
 
300
394
  metrics[MetricType.mAP] = [
@@ -314,7 +408,7 @@ class Evaluator:
314
408
  label=self.index_to_label[label_idx],
315
409
  )
316
410
  for label_idx in range(self.n_labels)
317
- if int(label_metadata[label_idx][0]) > 0
411
+ if int(label_metadata[label_idx, 0]) > 0
318
412
  ]
319
413
 
320
414
  metrics[MetricType.mAPAveragedOverIOUs] = [
@@ -337,7 +431,7 @@ class Evaluator:
337
431
  )
338
432
  for score_idx in range(average_recall.shape[0])
339
433
  for label_idx in range(average_recall.shape[1])
340
- if int(label_metadata[label_idx][0]) > 0
434
+ if int(label_metadata[label_idx, 0]) > 0
341
435
  ]
342
436
 
343
437
  metrics[MetricType.mAR] = [
@@ -359,7 +453,7 @@ class Evaluator:
359
453
  label=self.index_to_label[label_idx],
360
454
  )
361
455
  for label_idx in range(self.n_labels)
362
- if int(label_metadata[label_idx][0]) > 0
456
+ if int(label_metadata[label_idx, 0]) > 0
363
457
  ]
364
458
 
365
459
  metrics[MetricType.mARAveragedOverScores] = [
@@ -382,16 +476,17 @@ class Evaluator:
382
476
  )
383
477
  for iou_idx, iou_threshold in enumerate(iou_thresholds)
384
478
  for label_idx, label in self.index_to_label.items()
385
- if int(label_metadata[label_idx][0]) > 0
479
+ if int(label_metadata[label_idx, 0]) > 0
386
480
  ]
387
481
 
388
482
  for label_idx, label in self.index_to_label.items():
483
+
484
+ if label_metadata[label_idx, 0] == 0:
485
+ continue
486
+
389
487
  for score_idx, score_threshold in enumerate(score_thresholds):
390
488
  for iou_idx, iou_threshold in enumerate(iou_thresholds):
391
489
 
392
- if label_metadata[label_idx, 0] == 0:
393
- continue
394
-
395
490
  row = precision_recall[iou_idx][score_idx][label_idx]
396
491
  kwargs = {
397
492
  "label": label,
@@ -475,7 +570,7 @@ class Evaluator:
475
570
  return list()
476
571
 
477
572
  metrics = compute_detailed_counts(
478
- self._detailed_pairs,
573
+ data=self._detailed_pairs,
479
574
  label_metadata=self._label_metadata,
480
575
  iou_thresholds=np.array(iou_thresholds),
481
576
  score_thresholds=np.array(score_thresholds),
@@ -661,12 +756,27 @@ class DataLoader:
661
756
  self._evaluator.label_key_to_index[label[0]],
662
757
  )
663
758
 
664
- def _add_data(
759
+ def _compute_ious_and_cache_pairs(
665
760
  self,
666
761
  uid_index: int,
667
762
  keyed_groundtruths: dict,
668
763
  keyed_predictions: dict,
669
- ):
764
+ annotation_type: type[BoundingBox] | type[Polygon] | type[Bitmask],
765
+ ) -> None:
766
+ """
767
+ Compute IOUs between groundtruths and preditions before storing as pairs.
768
+
769
+ Parameters
770
+ ----------
771
+ uid_index: int
772
+ The index of the detection.
773
+ keyed_groundtruths: dict
774
+ A dictionary of groundtruths.
775
+ keyed_predictions: dict
776
+ A dictionary of predictions.
777
+ annotation_type: type[BoundingBox] | type[Polygon] | type[Bitmask]
778
+ The type of annotation to compute IOUs for.
779
+ """
670
780
  gt_keys = set(keyed_groundtruths.keys())
671
781
  pd_keys = set(keyed_predictions.keys())
672
782
  joint_keys = gt_keys.intersection(pd_keys)
@@ -677,14 +787,13 @@ class DataLoader:
677
787
  for key in joint_keys:
678
788
  n_predictions = len(keyed_predictions[key])
679
789
  n_groundtruths = len(keyed_groundtruths[key])
680
- boxes = np.array(
681
- [
682
- np.array([*gextrema, *pextrema])
683
- for _, _, _, pextrema in keyed_predictions[key]
684
- for _, _, gextrema in keyed_groundtruths[key]
685
- ]
790
+ data = _get_annotation_data(
791
+ keyed_groundtruths=keyed_groundtruths,
792
+ keyed_predictions=keyed_predictions,
793
+ key=key,
794
+ annotation_type=annotation_type,
686
795
  )
687
- ious = compute_iou(boxes)
796
+ ious = compute_iou(data=data, annotation_type=annotation_type)
688
797
  mask_nonzero_iou = (ious > 1e-9).reshape(
689
798
  (n_predictions, n_groundtruths)
690
799
  )
@@ -780,9 +889,10 @@ class DataLoader:
780
889
 
781
890
  self.pairs.append(np.array(pairs))
782
891
 
783
- def add_data(
892
+ def _add_data(
784
893
  self,
785
894
  detections: list[Detection],
895
+ annotation_type: type[Bitmask] | type[BoundingBox] | type[Polygon],
786
896
  show_progress: bool = False,
787
897
  ):
788
898
  """
@@ -792,6 +902,8 @@ class DataLoader:
792
902
  ----------
793
903
  detections : list[Detection]
794
904
  A list of Detection objects.
905
+ annotation_type : type[Bitmask] | type[BoundingBox] | type[Polygon]
906
+ The annotation type to process.
795
907
  show_progress : bool, default=False
796
908
  Toggle for tqdm progress bar.
797
909
  """
@@ -817,45 +929,146 @@ class DataLoader:
817
929
  # cache labels and annotations
818
930
  keyed_groundtruths = defaultdict(list)
819
931
  keyed_predictions = defaultdict(list)
932
+
933
+ representation_property = _get_annotation_representation(
934
+ annotation_type=annotation_type
935
+ )
936
+
820
937
  for gidx, gann in enumerate(detection.groundtruths):
821
- self._evaluator.groundtruth_examples[uid_index][
822
- gidx
823
- ] = np.array(gann.extrema)
938
+ if not isinstance(gann, annotation_type):
939
+ raise ValueError(
940
+ f"Expected {annotation_type}, but annotation is of type {type(gann)}."
941
+ )
942
+
943
+ if isinstance(gann, BoundingBox):
944
+ self._evaluator.groundtruth_examples[uid_index][
945
+ gidx
946
+ ] = getattr(gann, representation_property)
947
+ else:
948
+ converted_box = gann.to_box()
949
+ self._evaluator.groundtruth_examples[uid_index][gidx] = (
950
+ getattr(converted_box, "extrema")
951
+ if converted_box is not None
952
+ else None
953
+ )
824
954
  for glabel in gann.labels:
825
955
  label_idx, label_key_idx = self._add_label(glabel)
826
956
  self.groundtruth_count[label_idx][uid_index] += 1
957
+ representation = getattr(gann, representation_property)
827
958
  keyed_groundtruths[label_key_idx].append(
828
959
  (
829
960
  gidx,
830
961
  label_idx,
831
- gann.extrema,
962
+ representation,
832
963
  )
833
964
  )
965
+
834
966
  for pidx, pann in enumerate(detection.predictions):
835
- self._evaluator.prediction_examples[uid_index][
836
- pidx
837
- ] = np.array(pann.extrema)
967
+ if not isinstance(pann, annotation_type):
968
+ raise ValueError(
969
+ f"Expected {annotation_type}, but annotation is of type {type(pann)}."
970
+ )
971
+
972
+ if isinstance(pann, BoundingBox):
973
+ self._evaluator.prediction_examples[uid_index][
974
+ pidx
975
+ ] = getattr(pann, representation_property)
976
+ else:
977
+ converted_box = pann.to_box()
978
+ self._evaluator.prediction_examples[uid_index][pidx] = (
979
+ getattr(converted_box, "extrema")
980
+ if converted_box is not None
981
+ else None
982
+ )
838
983
  for plabel, pscore in zip(pann.labels, pann.scores):
839
984
  label_idx, label_key_idx = self._add_label(plabel)
840
985
  self.prediction_count[label_idx][uid_index] += 1
986
+ representation = representation = getattr(
987
+ pann, representation_property
988
+ )
841
989
  keyed_predictions[label_key_idx].append(
842
990
  (
843
991
  pidx,
844
992
  label_idx,
845
993
  pscore,
846
- pann.extrema,
994
+ representation,
847
995
  )
848
996
  )
849
997
 
850
- self._add_data(
998
+ self._compute_ious_and_cache_pairs(
851
999
  uid_index=uid_index,
852
1000
  keyed_groundtruths=keyed_groundtruths,
853
1001
  keyed_predictions=keyed_predictions,
1002
+ annotation_type=annotation_type,
854
1003
  )
855
1004
 
856
- def add_data_from_valor_dict(
1005
+ def add_bounding_boxes(
1006
+ self,
1007
+ detections: list[Detection],
1008
+ show_progress: bool = False,
1009
+ ):
1010
+ """
1011
+ Adds bounding box detections to the cache.
1012
+
1013
+ Parameters
1014
+ ----------
1015
+ detections : list[Detection]
1016
+ A list of Detection objects.
1017
+ show_progress : bool, default=False
1018
+ Toggle for tqdm progress bar.
1019
+ """
1020
+ return self._add_data(
1021
+ detections=detections,
1022
+ show_progress=show_progress,
1023
+ annotation_type=BoundingBox,
1024
+ )
1025
+
1026
+ def add_polygons(
1027
+ self,
1028
+ detections: list[Detection],
1029
+ show_progress: bool = False,
1030
+ ):
1031
+ """
1032
+ Adds polygon detections to the cache.
1033
+
1034
+ Parameters
1035
+ ----------
1036
+ detections : list[Detection]
1037
+ A list of Detection objects.
1038
+ show_progress : bool, default=False
1039
+ Toggle for tqdm progress bar.
1040
+ """
1041
+ return self._add_data(
1042
+ detections=detections,
1043
+ show_progress=show_progress,
1044
+ annotation_type=Polygon,
1045
+ )
1046
+
1047
+ def add_bitmasks(
1048
+ self,
1049
+ detections: list[Detection],
1050
+ show_progress: bool = False,
1051
+ ):
1052
+ """
1053
+ Adds bitmask detections to the cache.
1054
+
1055
+ Parameters
1056
+ ----------
1057
+ detections : list[Detection]
1058
+ A list of Detection objects.
1059
+ show_progress : bool, default=False
1060
+ Toggle for tqdm progress bar.
1061
+ """
1062
+ return self._add_data(
1063
+ detections=detections,
1064
+ show_progress=show_progress,
1065
+ annotation_type=Bitmask,
1066
+ )
1067
+
1068
+ def _add_data_from_valor_dict(
857
1069
  self,
858
1070
  detections: list[tuple[dict, dict]],
1071
+ annotation_type: type[Bitmask] | type[BoundingBox] | type[Polygon],
859
1072
  show_progress: bool = False,
860
1073
  ):
861
1074
  """
@@ -865,20 +1078,14 @@ class DataLoader:
865
1078
  ----------
866
1079
  detections : list[tuple[dict, dict]]
867
1080
  A list of groundtruth, prediction pairs in Valor-format dictionaries.
1081
+ annotation_type : type[Bitmask] | type[BoundingBox] | type[Polygon]
1082
+ The annotation type to process.
868
1083
  show_progress : bool, default=False
869
1084
  Toggle for tqdm progress bar.
870
1085
  """
871
1086
 
872
- def _get_bbox_extrema(
873
- data: list[list[list[float]]],
874
- ) -> tuple[float, float, float, float]:
875
- x = [point[0] for shape in data for point in shape]
876
- y = [point[1] for shape in data for point in shape]
877
- return (min(x), max(x), min(y), max(y))
878
-
879
1087
  disable_tqdm = not show_progress
880
1088
  for groundtruth, prediction in tqdm(detections, disable=disable_tqdm):
881
-
882
1089
  # update metadata
883
1090
  self._evaluator.n_datums += 1
884
1091
  self._evaluator.n_groundtruths += len(groundtruth["annotations"])
@@ -898,10 +1105,34 @@ class DataLoader:
898
1105
  # cache labels and annotations
899
1106
  keyed_groundtruths = defaultdict(list)
900
1107
  keyed_predictions = defaultdict(list)
1108
+
1109
+ annotation_key = _get_valor_dict_annotation_key(
1110
+ annotation_type=annotation_type
1111
+ )
1112
+ invalid_keys = list(
1113
+ filter(
1114
+ lambda x: x != annotation_key,
1115
+ ["bounding_box", "raster", "polygon"],
1116
+ )
1117
+ )
1118
+
901
1119
  for gidx, gann in enumerate(groundtruth["annotations"]):
902
- self._evaluator.groundtruth_examples[uid_index][
903
- gidx
904
- ] = np.array(_get_bbox_extrema(gann["bounding_box"]))
1120
+ if (gann[annotation_key] is None) or any(
1121
+ [gann[k] is not None for k in invalid_keys]
1122
+ ):
1123
+ raise ValueError(
1124
+ f"Input JSON doesn't contain {annotation_type} data, or contains data for multiple annotation types."
1125
+ )
1126
+ if annotation_type == BoundingBox:
1127
+ self._evaluator.groundtruth_examples[uid_index][
1128
+ gidx
1129
+ ] = np.array(
1130
+ _get_annotation_representation_from_valor_dict(
1131
+ gann[annotation_key],
1132
+ annotation_type=annotation_type,
1133
+ ),
1134
+ )
1135
+
905
1136
  for valor_label in gann["labels"]:
906
1137
  glabel = (valor_label["key"], valor_label["value"])
907
1138
  label_idx, label_key_idx = self._add_label(glabel)
@@ -910,13 +1141,29 @@ class DataLoader:
910
1141
  (
911
1142
  gidx,
912
1143
  label_idx,
913
- _get_bbox_extrema(gann["bounding_box"]),
1144
+ _get_annotation_representation_from_valor_dict(
1145
+ gann[annotation_key],
1146
+ annotation_type=annotation_type,
1147
+ ),
914
1148
  )
915
1149
  )
916
1150
  for pidx, pann in enumerate(prediction["annotations"]):
917
- self._evaluator.prediction_examples[uid_index][
918
- pidx
919
- ] = np.array(_get_bbox_extrema(pann["bounding_box"]))
1151
+ if (pann[annotation_key] is None) or any(
1152
+ [pann[k] is not None for k in invalid_keys]
1153
+ ):
1154
+ raise ValueError(
1155
+ f"Input JSON doesn't contain {annotation_type} data, or contains data for multiple annotation types."
1156
+ )
1157
+
1158
+ if annotation_type == BoundingBox:
1159
+ self._evaluator.prediction_examples[uid_index][
1160
+ pidx
1161
+ ] = np.array(
1162
+ _get_annotation_representation_from_valor_dict(
1163
+ pann[annotation_key],
1164
+ annotation_type=annotation_type,
1165
+ )
1166
+ )
920
1167
  for valor_label in pann["labels"]:
921
1168
  plabel = (valor_label["key"], valor_label["value"])
922
1169
  pscore = valor_label["score"]
@@ -927,16 +1174,41 @@ class DataLoader:
927
1174
  pidx,
928
1175
  label_idx,
929
1176
  pscore,
930
- _get_bbox_extrema(pann["bounding_box"]),
1177
+ _get_annotation_representation_from_valor_dict(
1178
+ pann[annotation_key],
1179
+ annotation_type=annotation_type,
1180
+ ),
931
1181
  )
932
1182
  )
933
1183
 
934
- self._add_data(
1184
+ self._compute_ious_and_cache_pairs(
935
1185
  uid_index=uid_index,
936
1186
  keyed_groundtruths=keyed_groundtruths,
937
1187
  keyed_predictions=keyed_predictions,
1188
+ annotation_type=annotation_type,
938
1189
  )
939
1190
 
1191
+ def add_bounding_boxes_from_valor_dict(
1192
+ self,
1193
+ detections: list[tuple[dict, dict]],
1194
+ show_progress: bool = False,
1195
+ ):
1196
+ """
1197
+ Adds Valor-format bounding box detections to the cache.
1198
+
1199
+ Parameters
1200
+ ----------
1201
+ detections : list[tuple[dict, dict]]
1202
+ A list of groundtruth, prediction pairs in Valor-format dictionaries.
1203
+ show_progress : bool, default=False
1204
+ Toggle for tqdm progress bar.
1205
+ """
1206
+ return self._add_data_from_valor_dict(
1207
+ detections=detections,
1208
+ show_progress=show_progress,
1209
+ annotation_type=BoundingBox,
1210
+ )
1211
+
940
1212
  def finalize(self) -> Evaluator:
941
1213
  """
942
1214
  Performs data finalization and some preprocessing steps.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: valor-lite
3
- Version: 0.33.3
3
+ Version: 0.33.4
4
4
  Summary: Compute valor metrics directly in your client.
5
5
  License: MIT License
6
6
 
@@ -32,6 +32,7 @@ Requires-Dist: Pillow >=9.1.0
32
32
  Requires-Dist: tqdm
33
33
  Requires-Dist: requests
34
34
  Requires-Dist: numpy
35
+ Requires-Dist: shapely
35
36
  Requires-Dist: importlib-metadata ; python_version < "3.8"
36
37
  Provides-Extra: test
37
38
  Requires-Dist: pytest ; extra == 'test'
@@ -0,0 +1,12 @@
1
+ valor_lite/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ valor_lite/schemas.py,sha256=r4cC10w1xYsA785KmGE4ePeOX3wzEs846vT7QAiVg_I,293
3
+ valor_lite/detection/__init__.py,sha256=taEB7NQBsyCSsMtvDA7E_FhDxMfJB1rax-Rl1ZtRMoE,1017
4
+ valor_lite/detection/annotation.py,sha256=BspLc3SjWXj6qYlGGpzDPHEZ8j7CiFzIL5cNlk0WCAM,2732
5
+ valor_lite/detection/computation.py,sha256=AsF9zb_c7XQ7z3LfOAtMPZDkmuCZmB8HeAMZJlCaO6U,24696
6
+ valor_lite/detection/manager.py,sha256=vnouYdx_Ul9jz_pOYt8xfvdPrNy0S4SB838KXvtS1Bw,45301
7
+ valor_lite/detection/metric.py,sha256=DLqpODJZOG7SCqt7TCgR4am68PQORRCIQW_SXiTb1IA,9473
8
+ valor_lite-0.33.4.dist-info/LICENSE,sha256=M0L53VuwfEEqezhHb7NPeYcO_glw7-k4DMLZQ3eRN64,1068
9
+ valor_lite-0.33.4.dist-info/METADATA,sha256=Eqb7KlTizDcjIV7eWM67zgdbbbVICGURdGrbben2NrI,1865
10
+ valor_lite-0.33.4.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
11
+ valor_lite-0.33.4.dist-info/top_level.txt,sha256=9ujykxSwpl2Hu0_R95UQTR_l07k9UUTSdrpiqmq6zc4,11
12
+ valor_lite-0.33.4.dist-info/RECORD,,
@@ -1,12 +0,0 @@
1
- valor_lite/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- valor_lite/schemas.py,sha256=r4cC10w1xYsA785KmGE4ePeOX3wzEs846vT7QAiVg_I,293
3
- valor_lite/detection/__init__.py,sha256=WHLHwHoKzXTBjkjC6E1_lhqB7gRWkiGWVWPqkKn-yK8,997
4
- valor_lite/detection/annotation.py,sha256=c45pZD1Pp2vf5GeyW_6Kl9JCx5FoaaktCaaa4q3QDUo,1758
5
- valor_lite/detection/computation.py,sha256=7PttK0VuOWlhRN92wpLVrGzB7RAdfdZyT3b1aTm_WaI,23214
6
- valor_lite/detection/manager.py,sha256=ziVnukGs-WrkyBEBBO3LVSv4LTbaWFaWqLWarVosj2c,35807
7
- valor_lite/detection/metric.py,sha256=DLqpODJZOG7SCqt7TCgR4am68PQORRCIQW_SXiTb1IA,9473
8
- valor_lite-0.33.3.dist-info/LICENSE,sha256=M0L53VuwfEEqezhHb7NPeYcO_glw7-k4DMLZQ3eRN64,1068
9
- valor_lite-0.33.3.dist-info/METADATA,sha256=FBpd-wMWv-m37EK5BfFuiVmnJXg4GNzCaJrTDHv4-gE,1842
10
- valor_lite-0.33.3.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
11
- valor_lite-0.33.3.dist-info/top_level.txt,sha256=9ujykxSwpl2Hu0_R95UQTR_l07k9UUTSdrpiqmq6zc4,11
12
- valor_lite-0.33.3.dist-info/RECORD,,