valor-lite 0.33.17__py3-none-any.whl → 0.33.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of valor-lite might be problematic. Click here for more details.
- valor_lite/classification/computation.py +6 -6
- valor_lite/classification/metric.py +6 -6
- valor_lite/classification/utilities.py +10 -8
- valor_lite/object_detection/computation.py +14 -14
- valor_lite/object_detection/manager.py +6 -2
- valor_lite/object_detection/metric.py +12 -12
- valor_lite/object_detection/utilities.py +21 -19
- valor_lite/profiling.py +374 -0
- valor_lite/semantic_segmentation/__init__.py +2 -1
- valor_lite/semantic_segmentation/annotation.py +84 -1
- valor_lite/semantic_segmentation/benchmark.py +151 -0
- valor_lite/semantic_segmentation/computation.py +20 -33
- valor_lite/semantic_segmentation/manager.py +6 -2
- valor_lite/semantic_segmentation/metric.py +10 -10
- valor_lite/semantic_segmentation/utilities.py +6 -6
- {valor_lite-0.33.17.dist-info → valor_lite-0.33.19.dist-info}/METADATA +9 -9
- {valor_lite-0.33.17.dist-info → valor_lite-0.33.19.dist-info}/RECORD +20 -18
- {valor_lite-0.33.17.dist-info → valor_lite-0.33.19.dist-info}/WHEEL +1 -1
- {valor_lite-0.33.17.dist-info → valor_lite-0.33.19.dist-info}/LICENSE +0 -0
- {valor_lite-0.33.17.dist-info → valor_lite-0.33.19.dist-info}/top_level.txt +0 -0
|
@@ -282,7 +282,7 @@ def compute_confusion_matrix(
|
|
|
282
282
|
NDArray[np.float64]
|
|
283
283
|
Confusion matrix.
|
|
284
284
|
NDArray[np.int32]
|
|
285
|
-
Ground
|
|
285
|
+
Unmatched Ground Truths.
|
|
286
286
|
"""
|
|
287
287
|
|
|
288
288
|
n_labels = label_metadata.shape[0]
|
|
@@ -292,7 +292,7 @@ def compute_confusion_matrix(
|
|
|
292
292
|
(n_scores, n_labels, n_labels, 2 * n_examples + 1),
|
|
293
293
|
dtype=np.float32,
|
|
294
294
|
)
|
|
295
|
-
|
|
295
|
+
unmatched_ground_truths = -1 * np.ones(
|
|
296
296
|
(n_scores, n_labels, n_examples + 1),
|
|
297
297
|
dtype=np.int32,
|
|
298
298
|
)
|
|
@@ -339,7 +339,7 @@ def compute_confusion_matrix(
|
|
|
339
339
|
score_idx, misclf_labels[:, 0], misclf_labels[:, 1], 0
|
|
340
340
|
] = misclf_counts
|
|
341
341
|
|
|
342
|
-
|
|
342
|
+
unmatched_ground_truths[score_idx, misprd_labels, 0] = misprd_counts
|
|
343
343
|
|
|
344
344
|
if n_examples > 0:
|
|
345
345
|
for label_idx in range(n_labels):
|
|
@@ -375,16 +375,16 @@ def compute_confusion_matrix(
|
|
|
375
375
|
1 : 2 * misclf_label_examples.shape[0] + 1,
|
|
376
376
|
] = misclf_label_examples[:, [0, 3]].flatten()
|
|
377
377
|
|
|
378
|
-
#
|
|
378
|
+
# unmatched ground truth examples
|
|
379
379
|
mask_misprd_label = misprd_examples[:, 1] == label_idx
|
|
380
380
|
if misprd_examples.size > 0:
|
|
381
381
|
misprd_label_examples = misprd_examples[mask_misprd_label][
|
|
382
382
|
:n_examples
|
|
383
383
|
]
|
|
384
|
-
|
|
384
|
+
unmatched_ground_truths[
|
|
385
385
|
score_idx,
|
|
386
386
|
label_idx,
|
|
387
387
|
1 : misprd_label_examples.shape[0] + 1,
|
|
388
388
|
] = misprd_label_examples[:, 0].flatten()
|
|
389
389
|
|
|
390
|
-
return confusion_matrix,
|
|
390
|
+
return confusion_matrix, unmatched_ground_truths
|
|
@@ -321,7 +321,7 @@ class Metric(BaseMetric):
|
|
|
321
321
|
],
|
|
322
322
|
],
|
|
323
323
|
],
|
|
324
|
-
|
|
324
|
+
unmatched_ground_truths: dict[
|
|
325
325
|
str, # ground truth label value
|
|
326
326
|
dict[
|
|
327
327
|
str, # either `count` or `examples`
|
|
@@ -335,8 +335,8 @@ class Metric(BaseMetric):
|
|
|
335
335
|
The confusion matrix and related metrics for the classification task.
|
|
336
336
|
|
|
337
337
|
This class encapsulates detailed information about the model's performance, including correct
|
|
338
|
-
predictions, misclassifications,
|
|
339
|
-
(false negatives). It provides counts and examples for each category to facilitate in-depth analysis.
|
|
338
|
+
predictions, misclassifications, unmatched predictions (subset of false positives), and unmatched ground truths
|
|
339
|
+
(subset of false negatives). It provides counts and examples for each category to facilitate in-depth analysis.
|
|
340
340
|
|
|
341
341
|
Confusion Matrix Structure:
|
|
342
342
|
{
|
|
@@ -358,7 +358,7 @@ class Metric(BaseMetric):
|
|
|
358
358
|
...
|
|
359
359
|
}
|
|
360
360
|
|
|
361
|
-
|
|
361
|
+
Unmatched Ground Truths Structure:
|
|
362
362
|
{
|
|
363
363
|
ground_truth_label: {
|
|
364
364
|
'count': int,
|
|
@@ -379,7 +379,7 @@ class Metric(BaseMetric):
|
|
|
379
379
|
A nested dictionary where the first key is the ground truth label value, the second key
|
|
380
380
|
is the prediction label value, and the innermost dictionary contains either a `count`
|
|
381
381
|
or a list of `examples`. Each example includes the datum UID and prediction score.
|
|
382
|
-
|
|
382
|
+
unmatched_ground_truths : dict
|
|
383
383
|
A dictionary where each key is a ground truth label value for which the model failed to predict
|
|
384
384
|
(false negatives). The value is a dictionary containing either a `count` or a list of `examples`.
|
|
385
385
|
Each example includes the datum UID.
|
|
@@ -396,7 +396,7 @@ class Metric(BaseMetric):
|
|
|
396
396
|
type=MetricType.ConfusionMatrix.value,
|
|
397
397
|
value={
|
|
398
398
|
"confusion_matrix": confusion_matrix,
|
|
399
|
-
"
|
|
399
|
+
"unmatched_ground_truths": unmatched_ground_truths,
|
|
400
400
|
},
|
|
401
401
|
parameters={
|
|
402
402
|
"score_threshold": score_threshold,
|
|
@@ -153,20 +153,20 @@ def _unpack_confusion_matrix_value(
|
|
|
153
153
|
}
|
|
154
154
|
|
|
155
155
|
|
|
156
|
-
def
|
|
157
|
-
|
|
156
|
+
def _unpack_unmatched_ground_truths_value(
|
|
157
|
+
unmatched_ground_truths: NDArray[np.int32],
|
|
158
158
|
number_of_labels: int,
|
|
159
159
|
number_of_examples: int,
|
|
160
160
|
index_to_uid: dict[int, str],
|
|
161
161
|
index_to_label: dict[int, str],
|
|
162
162
|
) -> dict[str, dict[str, int | list[dict[str, str]]]]:
|
|
163
163
|
"""
|
|
164
|
-
Unpacks a numpy array of
|
|
164
|
+
Unpacks a numpy array of unmatched ground truth counts and examples.
|
|
165
165
|
"""
|
|
166
166
|
|
|
167
167
|
datum_idx = (
|
|
168
168
|
lambda gt_label_idx, example_idx: int( # noqa: E731 - lambda fn
|
|
169
|
-
|
|
169
|
+
unmatched_ground_truths[
|
|
170
170
|
gt_label_idx,
|
|
171
171
|
example_idx + 1,
|
|
172
172
|
]
|
|
@@ -176,7 +176,7 @@ def _unpack_missing_predictions_value(
|
|
|
176
176
|
return {
|
|
177
177
|
index_to_label[gt_label_idx]: {
|
|
178
178
|
"count": max(
|
|
179
|
-
int(
|
|
179
|
+
int(unmatched_ground_truths[gt_label_idx, 0]),
|
|
180
180
|
0,
|
|
181
181
|
),
|
|
182
182
|
"examples": [
|
|
@@ -197,7 +197,7 @@ def unpack_confusion_matrix_into_metric_list(
|
|
|
197
197
|
index_to_label: dict[int, str],
|
|
198
198
|
) -> list[Metric]:
|
|
199
199
|
|
|
200
|
-
(confusion_matrix,
|
|
200
|
+
(confusion_matrix, unmatched_ground_truths) = results
|
|
201
201
|
n_scores, n_labels, _, _ = confusion_matrix.shape
|
|
202
202
|
return [
|
|
203
203
|
Metric.confusion_matrix(
|
|
@@ -210,8 +210,10 @@ def unpack_confusion_matrix_into_metric_list(
|
|
|
210
210
|
index_to_label=index_to_label,
|
|
211
211
|
index_to_uid=index_to_uid,
|
|
212
212
|
),
|
|
213
|
-
|
|
214
|
-
|
|
213
|
+
unmatched_ground_truths=_unpack_unmatched_ground_truths_value(
|
|
214
|
+
unmatched_ground_truths=unmatched_ground_truths[
|
|
215
|
+
score_idx, :, :
|
|
216
|
+
],
|
|
215
217
|
number_of_labels=n_labels,
|
|
216
218
|
number_of_examples=number_of_examples,
|
|
217
219
|
index_to_label=index_to_label,
|
|
@@ -669,9 +669,9 @@ def compute_confusion_matrix(
|
|
|
669
669
|
NDArray[np.float64]
|
|
670
670
|
Confusion matrix.
|
|
671
671
|
NDArray[np.float64]
|
|
672
|
-
|
|
672
|
+
Unmatched Predictions.
|
|
673
673
|
NDArray[np.int32]
|
|
674
|
-
|
|
674
|
+
Unmatched Ground Truths.
|
|
675
675
|
"""
|
|
676
676
|
|
|
677
677
|
n_labels = label_metadata.shape[0]
|
|
@@ -683,12 +683,12 @@ def compute_confusion_matrix(
|
|
|
683
683
|
(n_ious, n_scores, n_labels, n_labels, 4 * n_examples + 1),
|
|
684
684
|
dtype=np.float32,
|
|
685
685
|
)
|
|
686
|
-
|
|
686
|
+
unmatched_predictions = -1 * np.ones(
|
|
687
687
|
# (datum idx, pd idx, pd score) * n_examples + count
|
|
688
688
|
(n_ious, n_scores, n_labels, 3 * n_examples + 1),
|
|
689
689
|
dtype=np.float32,
|
|
690
690
|
)
|
|
691
|
-
|
|
691
|
+
unmatched_ground_truths = -1 * np.ones(
|
|
692
692
|
# (datum idx, gt idx) * n_examples + count
|
|
693
693
|
(n_ious, n_scores, n_labels, 2 * n_examples + 1),
|
|
694
694
|
dtype=np.int32,
|
|
@@ -793,7 +793,7 @@ def compute_confusion_matrix(
|
|
|
793
793
|
data[mask_misclf], unique_idx=[0, 1, 2, 4, 5], label_idx=[3, 4]
|
|
794
794
|
)
|
|
795
795
|
|
|
796
|
-
# count
|
|
796
|
+
# count unmatched predictions
|
|
797
797
|
(
|
|
798
798
|
halluc_examples,
|
|
799
799
|
halluc_labels,
|
|
@@ -802,7 +802,7 @@ def compute_confusion_matrix(
|
|
|
802
802
|
data[mask_halluc], unique_idx=[0, 2, 5], label_idx=2
|
|
803
803
|
)
|
|
804
804
|
|
|
805
|
-
# count
|
|
805
|
+
# count unmatched ground truths
|
|
806
806
|
(
|
|
807
807
|
misprd_examples,
|
|
808
808
|
misprd_labels,
|
|
@@ -822,13 +822,13 @@ def compute_confusion_matrix(
|
|
|
822
822
|
misclf_labels[:, 1],
|
|
823
823
|
0,
|
|
824
824
|
] = misclf_counts
|
|
825
|
-
|
|
825
|
+
unmatched_predictions[
|
|
826
826
|
iou_idx,
|
|
827
827
|
score_idx,
|
|
828
828
|
halluc_labels,
|
|
829
829
|
0,
|
|
830
830
|
] = halluc_counts
|
|
831
|
-
|
|
831
|
+
unmatched_ground_truths[
|
|
832
832
|
iou_idx,
|
|
833
833
|
score_idx,
|
|
834
834
|
misprd_labels,
|
|
@@ -877,26 +877,26 @@ def compute_confusion_matrix(
|
|
|
877
877
|
:, [0, 1, 2, 6]
|
|
878
878
|
].flatten()
|
|
879
879
|
|
|
880
|
-
#
|
|
880
|
+
# unmatched prediction examples
|
|
881
881
|
mask_halluc_label = halluc_examples[:, 5] == label_idx
|
|
882
882
|
if mask_halluc_label.sum() > 0:
|
|
883
883
|
halluc_label_examples = halluc_examples[
|
|
884
884
|
mask_halluc_label
|
|
885
885
|
][:n_examples]
|
|
886
|
-
|
|
886
|
+
unmatched_predictions[
|
|
887
887
|
iou_idx,
|
|
888
888
|
score_idx,
|
|
889
889
|
label_idx,
|
|
890
890
|
1 : 3 * halluc_label_examples.shape[0] + 1,
|
|
891
891
|
] = halluc_label_examples[:, [0, 2, 6]].flatten()
|
|
892
892
|
|
|
893
|
-
#
|
|
893
|
+
# unmatched ground truth examples
|
|
894
894
|
mask_misprd_label = misprd_examples[:, 4] == label_idx
|
|
895
895
|
if misprd_examples.size > 0:
|
|
896
896
|
misprd_label_examples = misprd_examples[
|
|
897
897
|
mask_misprd_label
|
|
898
898
|
][:n_examples]
|
|
899
|
-
|
|
899
|
+
unmatched_ground_truths[
|
|
900
900
|
iou_idx,
|
|
901
901
|
score_idx,
|
|
902
902
|
label_idx,
|
|
@@ -905,6 +905,6 @@ def compute_confusion_matrix(
|
|
|
905
905
|
|
|
906
906
|
return (
|
|
907
907
|
confusion_matrix,
|
|
908
|
-
|
|
909
|
-
|
|
908
|
+
unmatched_predictions,
|
|
909
|
+
unmatched_ground_truths,
|
|
910
910
|
)
|
|
@@ -334,6 +334,10 @@ class Evaluator:
|
|
|
334
334
|
return metrics
|
|
335
335
|
|
|
336
336
|
|
|
337
|
+
def defaultdict_int():
|
|
338
|
+
return defaultdict(int)
|
|
339
|
+
|
|
340
|
+
|
|
337
341
|
class DataLoader:
|
|
338
342
|
"""
|
|
339
343
|
Object Detection DataLoader
|
|
@@ -342,8 +346,8 @@ class DataLoader:
|
|
|
342
346
|
def __init__(self):
|
|
343
347
|
self._evaluator = Evaluator()
|
|
344
348
|
self.pairs: list[NDArray[np.float64]] = list()
|
|
345
|
-
self.groundtruth_count = defaultdict(
|
|
346
|
-
self.prediction_count = defaultdict(
|
|
349
|
+
self.groundtruth_count = defaultdict(defaultdict_int)
|
|
350
|
+
self.prediction_count = defaultdict(defaultdict_int)
|
|
347
351
|
|
|
348
352
|
def _add_datum(self, uid: str) -> int:
|
|
349
353
|
"""
|
|
@@ -619,7 +619,7 @@ class Metric(BaseMetric):
|
|
|
619
619
|
],
|
|
620
620
|
],
|
|
621
621
|
],
|
|
622
|
-
|
|
622
|
+
unmatched_predictions: dict[
|
|
623
623
|
str, # prediction label value
|
|
624
624
|
dict[
|
|
625
625
|
str, # either `count` or `examples`
|
|
@@ -636,7 +636,7 @@ class Metric(BaseMetric):
|
|
|
636
636
|
],
|
|
637
637
|
],
|
|
638
638
|
],
|
|
639
|
-
|
|
639
|
+
unmatched_ground_truths: dict[
|
|
640
640
|
str, # ground truth label value
|
|
641
641
|
dict[
|
|
642
642
|
str, # either `count` or `examples`
|
|
@@ -660,8 +660,8 @@ class Metric(BaseMetric):
|
|
|
660
660
|
Confusion matrix for object detection tasks.
|
|
661
661
|
|
|
662
662
|
This class encapsulates detailed information about the model's performance, including correct
|
|
663
|
-
predictions, misclassifications,
|
|
664
|
-
(false negatives). It provides counts and examples for each category to facilitate in-depth analysis.
|
|
663
|
+
predictions, misclassifications, unmatched_predictions (subset of false positives), and unmatched ground truths
|
|
664
|
+
(subset of false negatives). It provides counts and examples for each category to facilitate in-depth analysis.
|
|
665
665
|
|
|
666
666
|
Confusion Matrix Format:
|
|
667
667
|
{
|
|
@@ -683,7 +683,7 @@ class Metric(BaseMetric):
|
|
|
683
683
|
...
|
|
684
684
|
}
|
|
685
685
|
|
|
686
|
-
|
|
686
|
+
Unmatched Predictions Format:
|
|
687
687
|
{
|
|
688
688
|
<prediction label>: {
|
|
689
689
|
'count': int,
|
|
@@ -699,7 +699,7 @@ class Metric(BaseMetric):
|
|
|
699
699
|
...
|
|
700
700
|
}
|
|
701
701
|
|
|
702
|
-
|
|
702
|
+
Unmatched Ground Truths Format:
|
|
703
703
|
{
|
|
704
704
|
<ground truth label>: {
|
|
705
705
|
'count': int,
|
|
@@ -721,13 +721,13 @@ class Metric(BaseMetric):
|
|
|
721
721
|
is the prediction label value, and the innermost dictionary contains either a `count`
|
|
722
722
|
or a list of `examples`. Each example includes the datum UID, ground truth bounding box,
|
|
723
723
|
predicted bounding box, and prediction scores.
|
|
724
|
-
|
|
724
|
+
unmatched_predictions : dict
|
|
725
725
|
A dictionary where each key is a prediction label value with no corresponding ground truth
|
|
726
|
-
(false positives). The value is a dictionary containing either a `count` or a list of
|
|
726
|
+
(subset of false positives). The value is a dictionary containing either a `count` or a list of
|
|
727
727
|
`examples`. Each example includes the datum UID, predicted bounding box, and prediction score.
|
|
728
|
-
|
|
728
|
+
unmatched_ground_truths : dict
|
|
729
729
|
A dictionary where each key is a ground truth label value for which the model failed to predict
|
|
730
|
-
(false negatives). The value is a dictionary containing either a `count` or a list of `examples`.
|
|
730
|
+
(subset of false negatives). The value is a dictionary containing either a `count` or a list of `examples`.
|
|
731
731
|
Each example includes the datum UID and ground truth bounding box.
|
|
732
732
|
score_threshold : float
|
|
733
733
|
The confidence score threshold used to filter predictions.
|
|
@@ -744,8 +744,8 @@ class Metric(BaseMetric):
|
|
|
744
744
|
type=MetricType.ConfusionMatrix.value,
|
|
745
745
|
value={
|
|
746
746
|
"confusion_matrix": confusion_matrix,
|
|
747
|
-
"
|
|
748
|
-
"
|
|
747
|
+
"unmatched_predictions": unmatched_predictions,
|
|
748
|
+
"unmatched_ground_truths": unmatched_ground_truths,
|
|
749
749
|
},
|
|
750
750
|
parameters={
|
|
751
751
|
"score_threshold": score_threshold,
|
|
@@ -321,8 +321,8 @@ def _unpack_confusion_matrix_value(
|
|
|
321
321
|
}
|
|
322
322
|
|
|
323
323
|
|
|
324
|
-
def
|
|
325
|
-
|
|
324
|
+
def _unpack_unmatched_predictions_value(
|
|
325
|
+
unmatched_predictions: NDArray[np.float64],
|
|
326
326
|
number_of_labels: int,
|
|
327
327
|
number_of_examples: int,
|
|
328
328
|
index_to_uid: dict[int, str],
|
|
@@ -336,12 +336,12 @@ def _unpack_hallucinations_value(
|
|
|
336
336
|
],
|
|
337
337
|
]:
|
|
338
338
|
"""
|
|
339
|
-
Unpacks a numpy array of
|
|
339
|
+
Unpacks a numpy array of unmatched_prediction counts and examples.
|
|
340
340
|
"""
|
|
341
341
|
|
|
342
342
|
datum_idx = (
|
|
343
343
|
lambda pd_label_idx, example_idx: int( # noqa: E731 - lambda fn
|
|
344
|
-
|
|
344
|
+
unmatched_predictions[
|
|
345
345
|
pd_label_idx,
|
|
346
346
|
example_idx * 3 + 1,
|
|
347
347
|
]
|
|
@@ -350,7 +350,7 @@ def _unpack_hallucinations_value(
|
|
|
350
350
|
|
|
351
351
|
prediction_idx = (
|
|
352
352
|
lambda pd_label_idx, example_idx: int( # noqa: E731 - lambda fn
|
|
353
|
-
|
|
353
|
+
unmatched_predictions[
|
|
354
354
|
pd_label_idx,
|
|
355
355
|
example_idx * 3 + 2,
|
|
356
356
|
]
|
|
@@ -359,7 +359,7 @@ def _unpack_hallucinations_value(
|
|
|
359
359
|
|
|
360
360
|
score_idx = (
|
|
361
361
|
lambda pd_label_idx, example_idx: float( # noqa: E731 - lambda fn
|
|
362
|
-
|
|
362
|
+
unmatched_predictions[
|
|
363
363
|
pd_label_idx,
|
|
364
364
|
example_idx * 3 + 3,
|
|
365
365
|
]
|
|
@@ -369,7 +369,7 @@ def _unpack_hallucinations_value(
|
|
|
369
369
|
return {
|
|
370
370
|
index_to_label[pd_label_idx]: {
|
|
371
371
|
"count": max(
|
|
372
|
-
int(
|
|
372
|
+
int(unmatched_predictions[pd_label_idx, 0]),
|
|
373
373
|
0,
|
|
374
374
|
),
|
|
375
375
|
"examples": [
|
|
@@ -392,8 +392,8 @@ def _unpack_hallucinations_value(
|
|
|
392
392
|
}
|
|
393
393
|
|
|
394
394
|
|
|
395
|
-
def
|
|
396
|
-
|
|
395
|
+
def _unpack_unmatched_ground_truths_value(
|
|
396
|
+
unmatched_ground_truths: NDArray[np.int32],
|
|
397
397
|
number_of_labels: int,
|
|
398
398
|
number_of_examples: int,
|
|
399
399
|
index_to_uid: dict[int, str],
|
|
@@ -401,12 +401,12 @@ def _unpack_missing_predictions_value(
|
|
|
401
401
|
groundtruth_examples: dict[int, NDArray[np.float16]],
|
|
402
402
|
) -> dict[str, dict[str, int | list[dict[str, str | dict[str, float]]]]]:
|
|
403
403
|
"""
|
|
404
|
-
Unpacks a numpy array of
|
|
404
|
+
Unpacks a numpy array of unmatched ground truth counts and examples.
|
|
405
405
|
"""
|
|
406
406
|
|
|
407
407
|
datum_idx = (
|
|
408
408
|
lambda gt_label_idx, example_idx: int( # noqa: E731 - lambda fn
|
|
409
|
-
|
|
409
|
+
unmatched_ground_truths[
|
|
410
410
|
gt_label_idx,
|
|
411
411
|
example_idx * 2 + 1,
|
|
412
412
|
]
|
|
@@ -415,7 +415,7 @@ def _unpack_missing_predictions_value(
|
|
|
415
415
|
|
|
416
416
|
groundtruth_idx = (
|
|
417
417
|
lambda gt_label_idx, example_idx: int( # noqa: E731 - lambda fn
|
|
418
|
-
|
|
418
|
+
unmatched_ground_truths[
|
|
419
419
|
gt_label_idx,
|
|
420
420
|
example_idx * 2 + 2,
|
|
421
421
|
]
|
|
@@ -425,7 +425,7 @@ def _unpack_missing_predictions_value(
|
|
|
425
425
|
return {
|
|
426
426
|
index_to_label[gt_label_idx]: {
|
|
427
427
|
"count": max(
|
|
428
|
-
int(
|
|
428
|
+
int(unmatched_ground_truths[gt_label_idx, 0]),
|
|
429
429
|
0,
|
|
430
430
|
),
|
|
431
431
|
"examples": [
|
|
@@ -463,8 +463,8 @@ def unpack_confusion_matrix_into_metric_list(
|
|
|
463
463
|
) -> list[Metric]:
|
|
464
464
|
(
|
|
465
465
|
confusion_matrix,
|
|
466
|
-
|
|
467
|
-
|
|
466
|
+
unmatched_predictions,
|
|
467
|
+
unmatched_ground_truths,
|
|
468
468
|
) = results
|
|
469
469
|
n_labels = len(index_to_label)
|
|
470
470
|
return [
|
|
@@ -481,16 +481,18 @@ def unpack_confusion_matrix_into_metric_list(
|
|
|
481
481
|
groundtruth_examples=groundtruth_examples,
|
|
482
482
|
prediction_examples=prediction_examples,
|
|
483
483
|
),
|
|
484
|
-
|
|
485
|
-
|
|
484
|
+
unmatched_predictions=_unpack_unmatched_predictions_value(
|
|
485
|
+
unmatched_predictions=unmatched_predictions[
|
|
486
|
+
iou_idx, score_idx, :, :
|
|
487
|
+
],
|
|
486
488
|
number_of_labels=n_labels,
|
|
487
489
|
number_of_examples=number_of_examples,
|
|
488
490
|
index_to_label=index_to_label,
|
|
489
491
|
index_to_uid=index_to_uid,
|
|
490
492
|
prediction_examples=prediction_examples,
|
|
491
493
|
),
|
|
492
|
-
|
|
493
|
-
|
|
494
|
+
unmatched_ground_truths=_unpack_unmatched_ground_truths_value(
|
|
495
|
+
unmatched_ground_truths=unmatched_ground_truths[
|
|
494
496
|
iou_idx, score_idx, :, :
|
|
495
497
|
],
|
|
496
498
|
number_of_labels=n_labels,
|