valor-lite 0.34.3__py3-none-any.whl → 0.35.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of valor-lite might be problematic. Click here for more details.

@@ -572,12 +572,8 @@ class Metric(BaseMetric):
572
572
  int
573
573
  | list[
574
574
  dict[
575
- str, # either `datum`, `groundtruth`, `prediction` or score
576
- str # datum uid
577
- | dict[
578
- str, float
579
- ] # bounding box (xmin, xmax, ymin, ymax)
580
- | float, # prediction score
575
+ str, # either `datum_id`, `ground_truth_id`, `prediction_id`
576
+ str, # string identifier
581
577
  ]
582
578
  ],
583
579
  ],
@@ -590,12 +586,8 @@ class Metric(BaseMetric):
590
586
  int
591
587
  | list[
592
588
  dict[
593
- str, # either `datum`, `prediction` or score
594
- str # datum uid
595
- | float # prediction score
596
- | dict[
597
- str, float
598
- ], # bounding box (xmin, xmax, ymin, ymax)
589
+ str, # either `datum_id` or `prediction_id``
590
+ str, # string identifier
599
591
  ]
600
592
  ],
601
593
  ],
@@ -607,18 +599,14 @@ class Metric(BaseMetric):
607
599
  int
608
600
  | list[
609
601
  dict[
610
- str, # either `datum` or `groundtruth`
611
- str # datum uid
612
- | dict[
613
- str, float
614
- ], # bounding box (xmin, xmax, ymin, ymax)
602
+ str, # either `datum_id` or `ground_truth_id`
603
+ str, # string identifier
615
604
  ]
616
605
  ],
617
606
  ],
618
607
  ],
619
608
  score_threshold: float,
620
609
  iou_threshold: float,
621
- maximum_number_of_examples: int,
622
610
  ):
623
611
  """
624
612
  Confusion matrix for object detection tasks.
@@ -634,10 +622,9 @@ class Metric(BaseMetric):
634
622
  'count': int,
635
623
  'examples': [
636
624
  {
637
- 'datum': str,
638
- 'groundtruth': dict, # {'xmin': float, 'xmax': float, 'ymin': float, 'ymax': float}
639
- 'prediction': dict, # {'xmin': float, 'xmax': float, 'ymin': float, 'ymax': float}
640
- 'score': float,
625
+ 'datum_id': str,
626
+ 'groundtruth_id': str,
627
+ 'prediction_id': str
641
628
  },
642
629
  ...
643
630
  ],
@@ -653,9 +640,8 @@ class Metric(BaseMetric):
653
640
  'count': int,
654
641
  'examples': [
655
642
  {
656
- 'datum': str,
657
- 'prediction': dict, # {'xmin': float, 'xmax': float, 'ymin': float, 'ymax': float}
658
- 'score': float,
643
+ 'datum_id': str,
644
+ 'prediction_id': str
659
645
  },
660
646
  ...
661
647
  ],
@@ -669,8 +655,8 @@ class Metric(BaseMetric):
669
655
  'count': int,
670
656
  'examples': [
671
657
  {
672
- 'datum': str,
673
- 'groundtruth': dict, # {'xmin': float, 'xmax': float, 'ymin': float, 'ymax': float}
658
+ 'datum_id': str,
659
+ 'groundtruth_id': str
674
660
  },
675
661
  ...
676
662
  ],
@@ -683,22 +669,19 @@ class Metric(BaseMetric):
683
669
  confusion_matrix : dict
684
670
  A nested dictionary where the first key is the ground truth label value, the second key
685
671
  is the prediction label value, and the innermost dictionary contains either a `count`
686
- or a list of `examples`. Each example includes the datum UID, ground truth bounding box,
687
- predicted bounding box, and prediction scores.
672
+ or a list of `examples`. Each example includes annotation and datum identifers.
688
673
  unmatched_predictions : dict
689
674
  A dictionary where each key is a prediction label value with no corresponding ground truth
690
675
  (subset of false positives). The value is a dictionary containing either a `count` or a list of
691
- `examples`. Each example includes the datum UID, predicted bounding box, and prediction score.
676
+ `examples`. Each example includes annotation and datum identifers.
692
677
  unmatched_ground_truths : dict
693
678
  A dictionary where each key is a ground truth label value for which the model failed to predict
694
679
  (subset of false negatives). The value is a dictionary containing either a `count` or a list of `examples`.
695
- Each example includes the datum UID and ground truth bounding box.
680
+ Each example includes annotation and datum identifers.
696
681
  score_threshold : float
697
682
  The confidence score threshold used to filter predictions.
698
683
  iou_threshold : float
699
684
  The Intersection over Union (IOU) threshold used to determine true positives.
700
- maximum_number_of_examples : int
701
- The maximum number of examples per element.
702
685
 
703
686
  Returns
704
687
  -------
@@ -714,6 +697,5 @@ class Metric(BaseMetric):
714
697
  parameters={
715
698
  "score_threshold": score_threshold,
716
699
  "iou_threshold": iou_threshold,
717
- "maximum_number_of_examples": maximum_number_of_examples,
718
700
  },
719
701
  )
@@ -3,6 +3,7 @@ from collections import defaultdict
3
3
  import numpy as np
4
4
  from numpy.typing import NDArray
5
5
 
6
+ from valor_lite.object_detection.computation import PairClassification
6
7
  from valor_lite.object_detection.metric import Metric, MetricType
7
8
 
8
9
 
@@ -11,35 +12,27 @@ def unpack_precision_recall_into_metric_lists(
11
12
  tuple[
12
13
  NDArray[np.float64],
13
14
  NDArray[np.float64],
14
- NDArray[np.float64],
15
- float,
16
15
  ],
17
16
  tuple[
18
17
  NDArray[np.float64],
19
18
  NDArray[np.float64],
20
- NDArray[np.float64],
21
- float,
22
19
  ],
23
20
  NDArray[np.float64],
24
21
  NDArray[np.float64],
25
22
  ],
26
23
  iou_thresholds: list[float],
27
24
  score_thresholds: list[float],
28
- index_to_label: dict[int, str],
25
+ index_to_label: list[str],
29
26
  label_metadata: NDArray[np.int32],
30
27
  ):
31
28
  (
32
29
  (
33
30
  average_precision,
34
31
  mean_average_precision,
35
- average_precision_average_over_ious,
36
- mean_average_precision_average_over_ious,
37
32
  ),
38
33
  (
39
34
  average_recall,
40
35
  mean_average_recall,
41
- average_recall_averaged_over_scores,
42
- mean_average_recall_averaged_over_scores,
43
36
  ),
44
37
  precision_recall,
45
38
  pr_curves,
@@ -54,7 +47,7 @@ def unpack_precision_recall_into_metric_lists(
54
47
  label=label,
55
48
  )
56
49
  for iou_idx, iou_threshold in enumerate(iou_thresholds)
57
- for label_idx, label in index_to_label.items()
50
+ for label_idx, label in enumerate(index_to_label)
58
51
  if int(label_metadata[label_idx, 0]) > 0
59
52
  ]
60
53
 
@@ -66,19 +59,21 @@ def unpack_precision_recall_into_metric_lists(
66
59
  for iou_idx, iou_threshold in enumerate(iou_thresholds)
67
60
  ]
68
61
 
62
+ # TODO - (c.zaloom) will be removed in the future
69
63
  metrics[MetricType.APAveragedOverIOUs] = [
70
64
  Metric.average_precision_averaged_over_IOUs(
71
- value=float(average_precision_average_over_ious[label_idx]),
65
+ value=float(average_precision.mean(axis=0)[label_idx]),
72
66
  iou_thresholds=iou_thresholds,
73
67
  label=label,
74
68
  )
75
- for label_idx, label in index_to_label.items()
69
+ for label_idx, label in enumerate(index_to_label)
76
70
  if int(label_metadata[label_idx, 0]) > 0
77
71
  ]
78
72
 
73
+ # TODO - (c.zaloom) will be removed in the future
79
74
  metrics[MetricType.mAPAveragedOverIOUs] = [
80
75
  Metric.mean_average_precision_averaged_over_IOUs(
81
- value=float(mean_average_precision_average_over_ious),
76
+ value=float(mean_average_precision.mean()),
82
77
  iou_thresholds=iou_thresholds,
83
78
  )
84
79
  ]
@@ -91,7 +86,7 @@ def unpack_precision_recall_into_metric_lists(
91
86
  label=label,
92
87
  )
93
88
  for score_idx, score_threshold in enumerate(score_thresholds)
94
- for label_idx, label in index_to_label.items()
89
+ for label_idx, label in enumerate(index_to_label)
95
90
  if int(label_metadata[label_idx, 0]) > 0
96
91
  ]
97
92
 
@@ -104,20 +99,22 @@ def unpack_precision_recall_into_metric_lists(
104
99
  for score_idx, score_threshold in enumerate(score_thresholds)
105
100
  ]
106
101
 
102
+ # TODO - (c.zaloom) will be removed in the future
107
103
  metrics[MetricType.ARAveragedOverScores] = [
108
104
  Metric.average_recall_averaged_over_scores(
109
- value=float(average_recall_averaged_over_scores[label_idx]),
105
+ value=float(average_recall.mean(axis=0)[label_idx]),
110
106
  score_thresholds=score_thresholds,
111
107
  iou_thresholds=iou_thresholds,
112
108
  label=label,
113
109
  )
114
- for label_idx, label in index_to_label.items()
110
+ for label_idx, label in enumerate(index_to_label)
115
111
  if int(label_metadata[label_idx, 0]) > 0
116
112
  ]
117
113
 
114
+ # TODO - (c.zaloom) will be removed in the future
118
115
  metrics[MetricType.mARAveragedOverScores] = [
119
116
  Metric.mean_average_recall_averaged_over_scores(
120
- value=float(mean_average_recall_averaged_over_scores),
117
+ value=float(mean_average_recall.mean()),
121
118
  score_thresholds=score_thresholds,
122
119
  iou_thresholds=iou_thresholds,
123
120
  )
@@ -131,12 +128,11 @@ def unpack_precision_recall_into_metric_lists(
131
128
  label=label,
132
129
  )
133
130
  for iou_idx, iou_threshold in enumerate(iou_thresholds)
134
- for label_idx, label in index_to_label.items()
131
+ for label_idx, label in enumerate(index_to_label)
135
132
  if label_metadata[label_idx, 0] > 0
136
133
  ]
137
134
 
138
- for label_idx, label in index_to_label.items():
139
-
135
+ for label_idx, label in enumerate(index_to_label):
140
136
  if label_metadata[label_idx, 0] == 0:
141
137
  continue
142
138
 
@@ -180,315 +176,148 @@ def unpack_precision_recall_into_metric_lists(
180
176
  return metrics
181
177
 
182
178
 
183
- def _convert_example_to_dict(box: NDArray[np.float16]) -> dict[str, float]:
184
- """
185
- Converts a cached bounding box example to dictionary format.
186
- """
187
- return {
188
- "xmin": float(box[0]),
189
- "xmax": float(box[1]),
190
- "ymin": float(box[2]),
191
- "ymax": float(box[3]),
192
- }
193
-
194
-
195
- def _unpack_confusion_matrix_value(
196
- confusion_matrix: NDArray[np.float64],
197
- number_of_labels: int,
198
- number_of_examples: int,
199
- index_to_uid: dict[int, str],
200
- index_to_label: dict[int, str],
201
- groundtruth_examples: dict[int, NDArray[np.float16]],
202
- prediction_examples: dict[int, NDArray[np.float16]],
203
- ) -> dict[
204
- str,
205
- dict[
206
- str,
207
- dict[
208
- str,
209
- int
210
- | list[
211
- dict[
212
- str,
213
- str | dict[str, float] | float,
214
- ]
215
- ],
216
- ],
217
- ],
218
- ]:
219
- """
220
- Unpacks a numpy array of confusion matrix counts and examples.
221
- """
222
-
223
- datum_idx = lambda gt_label_idx, pd_label_idx, example_idx: int( # noqa: E731 - lambda fn
224
- confusion_matrix[
225
- gt_label_idx,
226
- pd_label_idx,
227
- example_idx * 4 + 1,
228
- ]
179
+ def _create_empty_confusion_matrix(index_to_labels: list[str]):
180
+ unmatched_ground_truths = dict()
181
+ unmatched_predictions = dict()
182
+ confusion_matrix = dict()
183
+ for label in index_to_labels:
184
+ unmatched_ground_truths[label] = {"count": 0, "examples": []}
185
+ unmatched_predictions[label] = {"count": 0, "examples": []}
186
+ confusion_matrix[label] = {}
187
+ for plabel in index_to_labels:
188
+ confusion_matrix[label][plabel] = {"count": 0, "examples": []}
189
+ return (
190
+ confusion_matrix,
191
+ unmatched_predictions,
192
+ unmatched_ground_truths,
229
193
  )
230
194
 
231
- groundtruth_idx = lambda gt_label_idx, pd_label_idx, example_idx: int( # noqa: E731 - lambda fn
232
- confusion_matrix[
233
- gt_label_idx,
234
- pd_label_idx,
235
- example_idx * 4 + 2,
236
- ]
237
- )
238
195
 
239
- prediction_idx = lambda gt_label_idx, pd_label_idx, example_idx: int( # noqa: E731 - lambda fn
240
- confusion_matrix[
241
- gt_label_idx,
242
- pd_label_idx,
243
- example_idx * 4 + 3,
244
- ]
245
- )
196
+ def _unpack_confusion_matrix(
197
+ ids: NDArray[np.int32],
198
+ mask_matched: NDArray[np.bool_],
199
+ mask_fp_unmatched: NDArray[np.bool_],
200
+ mask_fn_unmatched: NDArray[np.bool_],
201
+ index_to_datum_id: list[str],
202
+ index_to_groundtruth_id: list[str],
203
+ index_to_prediction_id: list[str],
204
+ index_to_label: list[str],
205
+ iou_threhsold: float,
206
+ score_threshold: float,
207
+ ):
208
+ (
209
+ confusion_matrix,
210
+ unmatched_predictions,
211
+ unmatched_ground_truths,
212
+ ) = _create_empty_confusion_matrix(index_to_label)
246
213
 
247
- score_idx = lambda gt_label_idx, pd_label_idx, example_idx: float( # noqa: E731 - lambda fn
248
- confusion_matrix[
249
- gt_label_idx,
250
- pd_label_idx,
251
- example_idx * 4 + 4,
252
- ]
214
+ unique_matches = np.unique(
215
+ ids[np.ix_(mask_matched, (0, 1, 2, 3, 4))], axis=0 # type: ignore - numpy ix_ typing
253
216
  )
254
-
255
- return {
256
- index_to_label[gt_label_idx]: {
257
- index_to_label[pd_label_idx]: {
258
- "count": max(
259
- int(confusion_matrix[gt_label_idx, pd_label_idx, 0]),
260
- 0,
261
- ),
262
- "examples": [
263
- {
264
- "datum": index_to_uid[
265
- datum_idx(gt_label_idx, pd_label_idx, example_idx)
266
- ],
267
- "groundtruth": _convert_example_to_dict(
268
- groundtruth_examples[
269
- datum_idx(
270
- gt_label_idx,
271
- pd_label_idx,
272
- example_idx,
273
- )
274
- ][
275
- groundtruth_idx(
276
- gt_label_idx,
277
- pd_label_idx,
278
- example_idx,
279
- )
280
- ]
281
- ),
282
- "prediction": _convert_example_to_dict(
283
- prediction_examples[
284
- datum_idx(
285
- gt_label_idx,
286
- pd_label_idx,
287
- example_idx,
288
- )
289
- ][
290
- prediction_idx(
291
- gt_label_idx,
292
- pd_label_idx,
293
- example_idx,
294
- )
295
- ]
296
- ),
297
- "score": score_idx(
298
- gt_label_idx, pd_label_idx, example_idx
299
- ),
300
- }
301
- for example_idx in range(number_of_examples)
302
- if datum_idx(gt_label_idx, pd_label_idx, example_idx) >= 0
303
- ],
304
- }
305
- for pd_label_idx in range(number_of_labels)
306
- }
307
- for gt_label_idx in range(number_of_labels)
308
- }
309
-
310
-
311
- def _unpack_unmatched_predictions_value(
312
- unmatched_predictions: NDArray[np.float64],
313
- number_of_labels: int,
314
- number_of_examples: int,
315
- index_to_uid: dict[int, str],
316
- index_to_label: dict[int, str],
317
- prediction_examples: dict[int, NDArray[np.float16]],
318
- ) -> dict[
319
- str,
320
- dict[
321
- str,
322
- int | list[dict[str, str | float | dict[str, float]]],
323
- ],
324
- ]:
325
- """
326
- Unpacks a numpy array of unmatched_prediction counts and examples.
327
- """
328
-
329
- datum_idx = (
330
- lambda pd_label_idx, example_idx: int( # noqa: E731 - lambda fn
331
- unmatched_predictions[
332
- pd_label_idx,
333
- example_idx * 3 + 1,
334
- ]
335
- )
217
+ unique_unmatched_predictions = np.unique(
218
+ ids[np.ix_(mask_fp_unmatched, (0, 2, 4))], axis=0 # type: ignore - numpy ix_ typing
336
219
  )
337
-
338
- prediction_idx = (
339
- lambda pd_label_idx, example_idx: int( # noqa: E731 - lambda fn
340
- unmatched_predictions[
341
- pd_label_idx,
342
- example_idx * 3 + 2,
343
- ]
344
- )
220
+ unique_unmatched_groundtruths = np.unique(
221
+ ids[np.ix_(mask_fn_unmatched, (0, 1, 3))], axis=0 # type: ignore - numpy ix_ typing
345
222
  )
346
223
 
347
- score_idx = (
348
- lambda pd_label_idx, example_idx: float( # noqa: E731 - lambda fn
349
- unmatched_predictions[
350
- pd_label_idx,
351
- example_idx * 3 + 3,
352
- ]
353
- )
354
- )
224
+ n_matched = unique_matches.shape[0]
225
+ n_unmatched_predictions = unique_unmatched_predictions.shape[0]
226
+ n_unmatched_groundtruths = unique_unmatched_groundtruths.shape[0]
227
+ n_max = max(n_matched, n_unmatched_groundtruths, n_unmatched_predictions)
355
228
 
356
- return {
357
- index_to_label[pd_label_idx]: {
358
- "count": max(
359
- int(unmatched_predictions[pd_label_idx, 0]),
360
- 0,
361
- ),
362
- "examples": [
229
+ for idx in range(n_max):
230
+ if idx < n_unmatched_groundtruths:
231
+ label = index_to_label[unique_unmatched_groundtruths[idx, 2]]
232
+ unmatched_ground_truths[label]["count"] += 1
233
+ unmatched_ground_truths[label]["examples"].append(
363
234
  {
364
- "datum": index_to_uid[
365
- datum_idx(pd_label_idx, example_idx)
235
+ "datum_id": index_to_datum_id[
236
+ unique_unmatched_groundtruths[idx, 0]
237
+ ],
238
+ "ground_truth_id": index_to_groundtruth_id[
239
+ unique_unmatched_groundtruths[idx, 1]
366
240
  ],
367
- "prediction": _convert_example_to_dict(
368
- prediction_examples[
369
- datum_idx(pd_label_idx, example_idx)
370
- ][prediction_idx(pd_label_idx, example_idx)]
371
- ),
372
- "score": score_idx(pd_label_idx, example_idx),
373
241
  }
374
- for example_idx in range(number_of_examples)
375
- if datum_idx(pd_label_idx, example_idx) >= 0
376
- ],
377
- }
378
- for pd_label_idx in range(number_of_labels)
379
- }
380
-
381
-
382
- def _unpack_unmatched_ground_truths_value(
383
- unmatched_ground_truths: NDArray[np.int32],
384
- number_of_labels: int,
385
- number_of_examples: int,
386
- index_to_uid: dict[int, str],
387
- index_to_label: dict[int, str],
388
- groundtruth_examples: dict[int, NDArray[np.float16]],
389
- ) -> dict[str, dict[str, int | list[dict[str, str | dict[str, float]]]]]:
390
- """
391
- Unpacks a numpy array of unmatched ground truth counts and examples.
392
- """
393
-
394
- datum_idx = (
395
- lambda gt_label_idx, example_idx: int( # noqa: E731 - lambda fn
396
- unmatched_ground_truths[
397
- gt_label_idx,
398
- example_idx * 2 + 1,
399
- ]
400
- )
401
- )
402
-
403
- groundtruth_idx = (
404
- lambda gt_label_idx, example_idx: int( # noqa: E731 - lambda fn
405
- unmatched_ground_truths[
406
- gt_label_idx,
407
- example_idx * 2 + 2,
408
- ]
409
- )
410
- )
411
-
412
- return {
413
- index_to_label[gt_label_idx]: {
414
- "count": max(
415
- int(unmatched_ground_truths[gt_label_idx, 0]),
416
- 0,
417
- ),
418
- "examples": [
242
+ )
243
+ if idx < n_unmatched_predictions:
244
+ label = index_to_label[unique_unmatched_predictions[idx, 2]]
245
+ unmatched_predictions[label]["count"] += 1
246
+ unmatched_predictions[label]["examples"].append(
419
247
  {
420
- "datum": index_to_uid[
421
- datum_idx(gt_label_idx, example_idx)
248
+ "datum_id": index_to_datum_id[
249
+ unique_unmatched_predictions[idx, 0]
250
+ ],
251
+ "prediction_id": index_to_prediction_id[
252
+ unique_unmatched_predictions[idx, 1]
422
253
  ],
423
- "groundtruth": _convert_example_to_dict(
424
- groundtruth_examples[
425
- datum_idx(gt_label_idx, example_idx)
426
- ][groundtruth_idx(gt_label_idx, example_idx)]
427
- ),
428
254
  }
429
- for example_idx in range(number_of_examples)
430
- if datum_idx(gt_label_idx, example_idx) >= 0
431
- ],
432
- }
433
- for gt_label_idx in range(number_of_labels)
434
- }
255
+ )
256
+ if idx < n_matched:
257
+ glabel = index_to_label[unique_matches[idx, 3]]
258
+ plabel = index_to_label[unique_matches[idx, 4]]
259
+ confusion_matrix[glabel][plabel]["count"] += 1
260
+ confusion_matrix[glabel][plabel]["examples"].append(
261
+ {
262
+ "datum_id": index_to_datum_id[unique_matches[idx, 0]],
263
+ "ground_truth_id": index_to_groundtruth_id[
264
+ unique_matches[idx, 1]
265
+ ],
266
+ "prediction_id": index_to_prediction_id[
267
+ unique_matches[idx, 2]
268
+ ],
269
+ }
270
+ )
271
+
272
+ return Metric.confusion_matrix(
273
+ confusion_matrix=confusion_matrix,
274
+ unmatched_ground_truths=unmatched_ground_truths,
275
+ unmatched_predictions=unmatched_predictions,
276
+ iou_threshold=iou_threhsold,
277
+ score_threshold=score_threshold,
278
+ )
435
279
 
436
280
 
437
281
  def unpack_confusion_matrix_into_metric_list(
438
- results: tuple[
439
- NDArray[np.float64],
440
- NDArray[np.float64],
441
- NDArray[np.int32],
442
- ],
282
+ results: NDArray[np.uint8],
283
+ detailed_pairs: NDArray[np.float64],
443
284
  iou_thresholds: list[float],
444
285
  score_thresholds: list[float],
445
- number_of_examples: int,
446
- index_to_label: dict[int, str],
447
- index_to_uid: dict[int, str],
448
- groundtruth_examples: dict[int, NDArray[np.float16]],
449
- prediction_examples: dict[int, NDArray[np.float16]],
286
+ index_to_datum_id: list[str],
287
+ index_to_groundtruth_id: list[str],
288
+ index_to_prediction_id: list[str],
289
+ index_to_label: list[str],
450
290
  ) -> list[Metric]:
451
- (
452
- confusion_matrix,
453
- unmatched_predictions,
454
- unmatched_ground_truths,
455
- ) = results
456
- n_labels = len(index_to_label)
291
+
292
+ ids = detailed_pairs[:, :5].astype(np.int32)
293
+
294
+ mask_matched = (
295
+ np.bitwise_and(
296
+ results, PairClassification.TP | PairClassification.FP_FN_MISCLF
297
+ )
298
+ > 0
299
+ )
300
+ mask_fp_unmatched = (
301
+ np.bitwise_and(results, PairClassification.FP_UNMATCHED) > 0
302
+ )
303
+ mask_fn_unmatched = (
304
+ np.bitwise_and(results, PairClassification.FN_UNMATCHED) > 0
305
+ )
306
+
457
307
  return [
458
- Metric.confusion_matrix(
459
- iou_threshold=iou_threshold,
308
+ _unpack_confusion_matrix(
309
+ ids=ids,
310
+ mask_matched=mask_matched[iou_idx, score_idx],
311
+ mask_fp_unmatched=mask_fp_unmatched[iou_idx, score_idx],
312
+ mask_fn_unmatched=mask_fn_unmatched[iou_idx, score_idx],
313
+ index_to_datum_id=index_to_datum_id,
314
+ index_to_groundtruth_id=index_to_groundtruth_id,
315
+ index_to_prediction_id=index_to_prediction_id,
316
+ index_to_label=index_to_label,
317
+ iou_threhsold=iou_threshold,
460
318
  score_threshold=score_threshold,
461
- maximum_number_of_examples=number_of_examples,
462
- confusion_matrix=_unpack_confusion_matrix_value(
463
- confusion_matrix=confusion_matrix[iou_idx, score_idx, :, :, :],
464
- number_of_labels=n_labels,
465
- number_of_examples=number_of_examples,
466
- index_to_label=index_to_label,
467
- index_to_uid=index_to_uid,
468
- groundtruth_examples=groundtruth_examples,
469
- prediction_examples=prediction_examples,
470
- ),
471
- unmatched_predictions=_unpack_unmatched_predictions_value(
472
- unmatched_predictions=unmatched_predictions[
473
- iou_idx, score_idx, :, :
474
- ],
475
- number_of_labels=n_labels,
476
- number_of_examples=number_of_examples,
477
- index_to_label=index_to_label,
478
- index_to_uid=index_to_uid,
479
- prediction_examples=prediction_examples,
480
- ),
481
- unmatched_ground_truths=_unpack_unmatched_ground_truths_value(
482
- unmatched_ground_truths=unmatched_ground_truths[
483
- iou_idx, score_idx, :, :
484
- ],
485
- number_of_labels=n_labels,
486
- number_of_examples=number_of_examples,
487
- index_to_label=index_to_label,
488
- index_to_uid=index_to_uid,
489
- groundtruth_examples=groundtruth_examples,
490
- ),
491
319
  )
492
320
  for iou_idx, iou_threshold in enumerate(iou_thresholds)
493
321
  for score_idx, score_threshold in enumerate(score_thresholds)
322
+ if (results[iou_idx, score_idx] != -1).any()
494
323
  ]