valor-lite 0.33.13__py3-none-any.whl → 0.33.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- valor_lite/classification/__init__.py +8 -21
- valor_lite/classification/computation.py +2 -2
- valor_lite/classification/manager.py +32 -244
- valor_lite/classification/metric.py +331 -372
- valor_lite/classification/utilities.py +222 -0
- valor_lite/object_detection/__init__.py +4 -35
- valor_lite/object_detection/computation.py +25 -22
- valor_lite/object_detection/manager.py +38 -497
- valor_lite/object_detection/metric.py +633 -706
- valor_lite/object_detection/utilities.py +505 -0
- valor_lite/schemas.py +10 -8
- valor_lite/semantic_segmentation/__init__.py +2 -17
- valor_lite/semantic_segmentation/computation.py +1 -1
- valor_lite/semantic_segmentation/manager.py +13 -116
- valor_lite/semantic_segmentation/metric.py +216 -239
- valor_lite/semantic_segmentation/utilities.py +104 -0
- {valor_lite-0.33.13.dist-info → valor_lite-0.33.15.dist-info}/METADATA +1 -1
- valor_lite-0.33.15.dist-info/RECORD +27 -0
- valor_lite-0.33.13.dist-info/RECORD +0 -24
- {valor_lite-0.33.13.dist-info → valor_lite-0.33.15.dist-info}/LICENSE +0 -0
- {valor_lite-0.33.13.dist-info → valor_lite-0.33.15.dist-info}/WHEEL +0 -0
- {valor_lite-0.33.13.dist-info → valor_lite-0.33.15.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
from collections import defaultdict
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
from numpy.typing import NDArray
|
|
5
|
+
from valor_lite.classification.metric import Metric, MetricType
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def unpack_precision_recall_rocauc_into_metric_lists(
|
|
9
|
+
results: tuple[
|
|
10
|
+
NDArray[np.int32],
|
|
11
|
+
NDArray[np.float64],
|
|
12
|
+
NDArray[np.float64],
|
|
13
|
+
NDArray[np.float64],
|
|
14
|
+
NDArray[np.float64],
|
|
15
|
+
NDArray[np.float64],
|
|
16
|
+
float,
|
|
17
|
+
],
|
|
18
|
+
score_thresholds: list[float],
|
|
19
|
+
hardmax: bool,
|
|
20
|
+
label_metadata: NDArray[np.int32],
|
|
21
|
+
index_to_label: dict[int, str],
|
|
22
|
+
) -> dict[MetricType, list[Metric]]:
|
|
23
|
+
(
|
|
24
|
+
counts,
|
|
25
|
+
precision,
|
|
26
|
+
recall,
|
|
27
|
+
accuracy,
|
|
28
|
+
f1_score,
|
|
29
|
+
rocauc,
|
|
30
|
+
mean_rocauc,
|
|
31
|
+
) = results
|
|
32
|
+
|
|
33
|
+
metrics = defaultdict(list)
|
|
34
|
+
|
|
35
|
+
metrics[MetricType.ROCAUC] = [
|
|
36
|
+
Metric.roc_auc(
|
|
37
|
+
value=float(rocauc[label_idx]),
|
|
38
|
+
label=label,
|
|
39
|
+
)
|
|
40
|
+
for label_idx, label in index_to_label.items()
|
|
41
|
+
if label_metadata[label_idx, 0] > 0
|
|
42
|
+
]
|
|
43
|
+
|
|
44
|
+
metrics[MetricType.mROCAUC] = [
|
|
45
|
+
Metric.mean_roc_auc(
|
|
46
|
+
value=float(mean_rocauc),
|
|
47
|
+
)
|
|
48
|
+
]
|
|
49
|
+
|
|
50
|
+
metrics[MetricType.Accuracy] = [
|
|
51
|
+
Metric.accuracy(
|
|
52
|
+
value=float(accuracy[score_idx]),
|
|
53
|
+
score_threshold=score_threshold,
|
|
54
|
+
hardmax=hardmax,
|
|
55
|
+
)
|
|
56
|
+
for score_idx, score_threshold in enumerate(score_thresholds)
|
|
57
|
+
]
|
|
58
|
+
|
|
59
|
+
for label_idx, label in index_to_label.items():
|
|
60
|
+
for score_idx, score_threshold in enumerate(score_thresholds):
|
|
61
|
+
|
|
62
|
+
kwargs = {
|
|
63
|
+
"label": label,
|
|
64
|
+
"hardmax": hardmax,
|
|
65
|
+
"score_threshold": score_threshold,
|
|
66
|
+
}
|
|
67
|
+
row = counts[:, label_idx]
|
|
68
|
+
metrics[MetricType.Counts].append(
|
|
69
|
+
Metric.counts(
|
|
70
|
+
tp=int(row[score_idx, 0]),
|
|
71
|
+
fp=int(row[score_idx, 1]),
|
|
72
|
+
fn=int(row[score_idx, 2]),
|
|
73
|
+
tn=int(row[score_idx, 3]),
|
|
74
|
+
**kwargs,
|
|
75
|
+
)
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
# if no groundtruths exists for a label, skip it.
|
|
79
|
+
if label_metadata[label_idx, 0] == 0:
|
|
80
|
+
continue
|
|
81
|
+
|
|
82
|
+
metrics[MetricType.Precision].append(
|
|
83
|
+
Metric.precision(
|
|
84
|
+
value=float(precision[score_idx, label_idx]),
|
|
85
|
+
**kwargs,
|
|
86
|
+
)
|
|
87
|
+
)
|
|
88
|
+
metrics[MetricType.Recall].append(
|
|
89
|
+
Metric.recall(
|
|
90
|
+
value=float(recall[score_idx, label_idx]),
|
|
91
|
+
**kwargs,
|
|
92
|
+
)
|
|
93
|
+
)
|
|
94
|
+
metrics[MetricType.F1].append(
|
|
95
|
+
Metric.f1_score(
|
|
96
|
+
value=float(f1_score[score_idx, label_idx]),
|
|
97
|
+
**kwargs,
|
|
98
|
+
)
|
|
99
|
+
)
|
|
100
|
+
return metrics
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def _unpack_confusion_matrix_value(
|
|
104
|
+
confusion_matrix: NDArray[np.float64],
|
|
105
|
+
number_of_labels: int,
|
|
106
|
+
number_of_examples: int,
|
|
107
|
+
index_to_uid: dict[int, str],
|
|
108
|
+
index_to_label: dict[int, str],
|
|
109
|
+
) -> dict[str, dict[str, dict[str, int | list[dict[str, str | float]]]]]:
|
|
110
|
+
"""
|
|
111
|
+
Unpacks a numpy array of confusion matrix counts and examples.
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
datum_idx = lambda gt_label_idx, pd_label_idx, example_idx: int( # noqa: E731 - lambda fn
|
|
115
|
+
confusion_matrix[
|
|
116
|
+
gt_label_idx,
|
|
117
|
+
pd_label_idx,
|
|
118
|
+
example_idx * 2 + 1,
|
|
119
|
+
]
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
score_idx = lambda gt_label_idx, pd_label_idx, example_idx: float( # noqa: E731 - lambda fn
|
|
123
|
+
confusion_matrix[
|
|
124
|
+
gt_label_idx,
|
|
125
|
+
pd_label_idx,
|
|
126
|
+
example_idx * 2 + 2,
|
|
127
|
+
]
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
return {
|
|
131
|
+
index_to_label[gt_label_idx]: {
|
|
132
|
+
index_to_label[pd_label_idx]: {
|
|
133
|
+
"count": max(
|
|
134
|
+
int(confusion_matrix[gt_label_idx, pd_label_idx, 0]),
|
|
135
|
+
0,
|
|
136
|
+
),
|
|
137
|
+
"examples": [
|
|
138
|
+
{
|
|
139
|
+
"datum": index_to_uid[
|
|
140
|
+
datum_idx(gt_label_idx, pd_label_idx, example_idx)
|
|
141
|
+
],
|
|
142
|
+
"score": score_idx(
|
|
143
|
+
gt_label_idx, pd_label_idx, example_idx
|
|
144
|
+
),
|
|
145
|
+
}
|
|
146
|
+
for example_idx in range(number_of_examples)
|
|
147
|
+
if datum_idx(gt_label_idx, pd_label_idx, example_idx) >= 0
|
|
148
|
+
],
|
|
149
|
+
}
|
|
150
|
+
for pd_label_idx in range(number_of_labels)
|
|
151
|
+
}
|
|
152
|
+
for gt_label_idx in range(number_of_labels)
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def _unpack_missing_predictions_value(
|
|
157
|
+
missing_predictions: NDArray[np.int32],
|
|
158
|
+
number_of_labels: int,
|
|
159
|
+
number_of_examples: int,
|
|
160
|
+
index_to_uid: dict[int, str],
|
|
161
|
+
index_to_label: dict[int, str],
|
|
162
|
+
) -> dict[str, dict[str, int | list[dict[str, str]]]]:
|
|
163
|
+
"""
|
|
164
|
+
Unpacks a numpy array of missing prediction counts and examples.
|
|
165
|
+
"""
|
|
166
|
+
|
|
167
|
+
datum_idx = (
|
|
168
|
+
lambda gt_label_idx, example_idx: int( # noqa: E731 - lambda fn
|
|
169
|
+
missing_predictions[
|
|
170
|
+
gt_label_idx,
|
|
171
|
+
example_idx + 1,
|
|
172
|
+
]
|
|
173
|
+
)
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
return {
|
|
177
|
+
index_to_label[gt_label_idx]: {
|
|
178
|
+
"count": max(
|
|
179
|
+
int(missing_predictions[gt_label_idx, 0]),
|
|
180
|
+
0,
|
|
181
|
+
),
|
|
182
|
+
"examples": [
|
|
183
|
+
{"datum": index_to_uid[datum_idx(gt_label_idx, example_idx)]}
|
|
184
|
+
for example_idx in range(number_of_examples)
|
|
185
|
+
if datum_idx(gt_label_idx, example_idx) >= 0
|
|
186
|
+
],
|
|
187
|
+
}
|
|
188
|
+
for gt_label_idx in range(number_of_labels)
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
def unpack_confusion_matrix_into_metric_list(
|
|
193
|
+
results: tuple[NDArray[np.float64], NDArray[np.int32]],
|
|
194
|
+
score_thresholds: list[float],
|
|
195
|
+
number_of_examples: int,
|
|
196
|
+
index_to_uid: dict[int, str],
|
|
197
|
+
index_to_label: dict[int, str],
|
|
198
|
+
) -> list[Metric]:
|
|
199
|
+
|
|
200
|
+
(confusion_matrix, missing_predictions) = results
|
|
201
|
+
n_scores, n_labels, _, _ = confusion_matrix.shape
|
|
202
|
+
return [
|
|
203
|
+
Metric.confusion_matrix(
|
|
204
|
+
score_threshold=score_threshold,
|
|
205
|
+
maximum_number_of_examples=number_of_examples,
|
|
206
|
+
confusion_matrix=_unpack_confusion_matrix_value(
|
|
207
|
+
confusion_matrix=confusion_matrix[score_idx, :, :, :],
|
|
208
|
+
number_of_labels=n_labels,
|
|
209
|
+
number_of_examples=number_of_examples,
|
|
210
|
+
index_to_label=index_to_label,
|
|
211
|
+
index_to_uid=index_to_uid,
|
|
212
|
+
),
|
|
213
|
+
missing_predictions=_unpack_missing_predictions_value(
|
|
214
|
+
missing_predictions=missing_predictions[score_idx, :, :],
|
|
215
|
+
number_of_labels=n_labels,
|
|
216
|
+
number_of_examples=number_of_examples,
|
|
217
|
+
index_to_label=index_to_label,
|
|
218
|
+
index_to_uid=index_to_uid,
|
|
219
|
+
),
|
|
220
|
+
)
|
|
221
|
+
for score_idx, score_threshold in enumerate(score_thresholds)
|
|
222
|
+
]
|
|
@@ -3,56 +3,25 @@ from .computation import (
|
|
|
3
3
|
compute_bbox_iou,
|
|
4
4
|
compute_bitmask_iou,
|
|
5
5
|
compute_confusion_matrix,
|
|
6
|
-
compute_metrics,
|
|
7
6
|
compute_polygon_iou,
|
|
7
|
+
compute_precion_recall,
|
|
8
8
|
compute_ranked_pairs,
|
|
9
9
|
)
|
|
10
10
|
from .manager import DataLoader, Evaluator
|
|
11
|
-
from .metric import
|
|
12
|
-
AP,
|
|
13
|
-
AR,
|
|
14
|
-
F1,
|
|
15
|
-
Accuracy,
|
|
16
|
-
APAveragedOverIOUs,
|
|
17
|
-
ARAveragedOverScores,
|
|
18
|
-
ConfusionMatrix,
|
|
19
|
-
Counts,
|
|
20
|
-
MetricType,
|
|
21
|
-
Precision,
|
|
22
|
-
PrecisionRecallCurve,
|
|
23
|
-
Recall,
|
|
24
|
-
mAP,
|
|
25
|
-
mAPAveragedOverIOUs,
|
|
26
|
-
mAR,
|
|
27
|
-
mARAveragedOverScores,
|
|
28
|
-
)
|
|
11
|
+
from .metric import Metric, MetricType
|
|
29
12
|
|
|
30
13
|
__all__ = [
|
|
31
14
|
"Bitmask",
|
|
32
15
|
"BoundingBox",
|
|
33
16
|
"Detection",
|
|
34
17
|
"Polygon",
|
|
18
|
+
"Metric",
|
|
35
19
|
"MetricType",
|
|
36
|
-
"Counts",
|
|
37
|
-
"Precision",
|
|
38
|
-
"Recall",
|
|
39
|
-
"Accuracy",
|
|
40
|
-
"F1",
|
|
41
|
-
"AP",
|
|
42
|
-
"mAP",
|
|
43
|
-
"AR",
|
|
44
|
-
"mAR",
|
|
45
|
-
"APAveragedOverIOUs",
|
|
46
|
-
"mAPAveragedOverIOUs",
|
|
47
|
-
"ARAveragedOverScores",
|
|
48
|
-
"mARAveragedOverScores",
|
|
49
|
-
"PrecisionRecallCurve",
|
|
50
|
-
"ConfusionMatrix",
|
|
51
20
|
"compute_bbox_iou",
|
|
52
21
|
"compute_bitmask_iou",
|
|
53
22
|
"compute_polygon_iou",
|
|
54
23
|
"compute_ranked_pairs",
|
|
55
|
-
"
|
|
24
|
+
"compute_precion_recall",
|
|
56
25
|
"compute_confusion_matrix",
|
|
57
26
|
"DataLoader",
|
|
58
27
|
"Evaluator",
|
|
@@ -5,7 +5,7 @@ from numpy.typing import NDArray
|
|
|
5
5
|
|
|
6
6
|
def compute_bbox_iou(data: NDArray[np.float64]) -> NDArray[np.float64]:
|
|
7
7
|
"""
|
|
8
|
-
Computes intersection-over-union (
|
|
8
|
+
Computes intersection-over-union (IOU) for axis-aligned bounding boxes.
|
|
9
9
|
|
|
10
10
|
Takes data with shape (N, 8):
|
|
11
11
|
|
|
@@ -20,7 +20,7 @@ def compute_bbox_iou(data: NDArray[np.float64]) -> NDArray[np.float64]:
|
|
|
20
20
|
|
|
21
21
|
Returns data with shape (N, 1):
|
|
22
22
|
|
|
23
|
-
Index 0 -
|
|
23
|
+
Index 0 - IOU
|
|
24
24
|
|
|
25
25
|
Parameters
|
|
26
26
|
----------
|
|
@@ -30,7 +30,7 @@ def compute_bbox_iou(data: NDArray[np.float64]) -> NDArray[np.float64]:
|
|
|
30
30
|
Returns
|
|
31
31
|
-------
|
|
32
32
|
NDArray[np.float64]
|
|
33
|
-
Computed
|
|
33
|
+
Computed IOU's.
|
|
34
34
|
"""
|
|
35
35
|
if data.size == 0:
|
|
36
36
|
return np.array([], dtype=np.float64)
|
|
@@ -76,7 +76,7 @@ def compute_bbox_iou(data: NDArray[np.float64]) -> NDArray[np.float64]:
|
|
|
76
76
|
|
|
77
77
|
def compute_bitmask_iou(data: NDArray[np.bool_]) -> NDArray[np.float64]:
|
|
78
78
|
"""
|
|
79
|
-
Computes intersection-over-union (
|
|
79
|
+
Computes intersection-over-union (IOU) for bitmasks.
|
|
80
80
|
|
|
81
81
|
Takes data with shape (N, 2):
|
|
82
82
|
|
|
@@ -85,7 +85,7 @@ def compute_bitmask_iou(data: NDArray[np.bool_]) -> NDArray[np.float64]:
|
|
|
85
85
|
|
|
86
86
|
Returns data with shape (N, 1):
|
|
87
87
|
|
|
88
|
-
Index 0 -
|
|
88
|
+
Index 0 - IOU
|
|
89
89
|
|
|
90
90
|
Parameters
|
|
91
91
|
----------
|
|
@@ -95,7 +95,7 @@ def compute_bitmask_iou(data: NDArray[np.bool_]) -> NDArray[np.float64]:
|
|
|
95
95
|
Returns
|
|
96
96
|
-------
|
|
97
97
|
NDArray[np.float64]
|
|
98
|
-
Computed
|
|
98
|
+
Computed IOU's.
|
|
99
99
|
"""
|
|
100
100
|
|
|
101
101
|
if data.size == 0:
|
|
@@ -125,7 +125,7 @@ def compute_polygon_iou(
|
|
|
125
125
|
data: NDArray[np.float64],
|
|
126
126
|
) -> NDArray[np.float64]:
|
|
127
127
|
"""
|
|
128
|
-
Computes intersection-over-union (
|
|
128
|
+
Computes intersection-over-union (IOU) for shapely polygons.
|
|
129
129
|
|
|
130
130
|
Takes data with shape (N, 2):
|
|
131
131
|
|
|
@@ -134,7 +134,7 @@ def compute_polygon_iou(
|
|
|
134
134
|
|
|
135
135
|
Returns data with shape (N, 1):
|
|
136
136
|
|
|
137
|
-
Index 0 -
|
|
137
|
+
Index 0 - IOU
|
|
138
138
|
|
|
139
139
|
Parameters
|
|
140
140
|
----------
|
|
@@ -144,7 +144,7 @@ def compute_polygon_iou(
|
|
|
144
144
|
Returns
|
|
145
145
|
-------
|
|
146
146
|
NDArray[np.float64]
|
|
147
|
-
Computed
|
|
147
|
+
Computed IOU's.
|
|
148
148
|
"""
|
|
149
149
|
|
|
150
150
|
if data.size == 0:
|
|
@@ -225,7 +225,7 @@ def compute_ranked_pairs(
|
|
|
225
225
|
Index 0 - Datum Index
|
|
226
226
|
Index 1 - GroundTruth Index
|
|
227
227
|
Index 2 - Prediction Index
|
|
228
|
-
Index 3 -
|
|
228
|
+
Index 3 - IOU
|
|
229
229
|
Index 4 - GroundTruth Label Index
|
|
230
230
|
Index 5 - Prediction Label Index
|
|
231
231
|
Index 6 - Score
|
|
@@ -262,7 +262,7 @@ def compute_ranked_pairs(
|
|
|
262
262
|
return ranked_pairs[indices]
|
|
263
263
|
|
|
264
264
|
|
|
265
|
-
def
|
|
265
|
+
def compute_precion_recall(
|
|
266
266
|
data: NDArray[np.float64],
|
|
267
267
|
label_metadata: NDArray[np.int32],
|
|
268
268
|
iou_thresholds: NDArray[np.float64],
|
|
@@ -292,7 +292,7 @@ def compute_metrics(
|
|
|
292
292
|
Index 0 - Datum Index
|
|
293
293
|
Index 1 - GroundTruth Index
|
|
294
294
|
Index 2 - Prediction Index
|
|
295
|
-
Index 3 -
|
|
295
|
+
Index 3 - IOU
|
|
296
296
|
Index 4 - GroundTruth Label Index
|
|
297
297
|
Index 5 - Prediction Label Index
|
|
298
298
|
Index 6 - Score
|
|
@@ -304,7 +304,7 @@ def compute_metrics(
|
|
|
304
304
|
label_metadata : NDArray[np.int32]
|
|
305
305
|
An array containing metadata related to labels.
|
|
306
306
|
iou_thresholds : NDArray[np.float64]
|
|
307
|
-
A 1-D array containing
|
|
307
|
+
A 1-D array containing IOU thresholds.
|
|
308
308
|
score_thresholds : NDArray[np.float64]
|
|
309
309
|
A 1-D array containing score thresholds.
|
|
310
310
|
|
|
@@ -328,7 +328,7 @@ def compute_metrics(
|
|
|
328
328
|
n_scores = score_thresholds.shape[0]
|
|
329
329
|
|
|
330
330
|
if n_ious == 0:
|
|
331
|
-
raise ValueError("At least one
|
|
331
|
+
raise ValueError("At least one IOU threshold must be passed.")
|
|
332
332
|
elif n_scores == 0:
|
|
333
333
|
raise ValueError("At least one score threshold must be passed.")
|
|
334
334
|
|
|
@@ -408,17 +408,20 @@ def compute_metrics(
|
|
|
408
408
|
|
|
409
409
|
# calculate component metrics
|
|
410
410
|
recall = np.zeros_like(tp_count)
|
|
411
|
-
precision = np.zeros_like(tp_count)
|
|
412
411
|
np.divide(tp_count, gt_count, where=gt_count > 1e-9, out=recall)
|
|
412
|
+
|
|
413
|
+
precision = np.zeros_like(tp_count)
|
|
413
414
|
np.divide(tp_count, pd_count, where=pd_count > 1e-9, out=precision)
|
|
415
|
+
|
|
414
416
|
fn_count = gt_count - tp_count
|
|
415
417
|
|
|
416
418
|
f1_score = np.zeros_like(precision)
|
|
417
419
|
np.divide(
|
|
418
|
-
np.multiply(precision, recall),
|
|
420
|
+
2 * np.multiply(precision, recall),
|
|
419
421
|
(precision + recall),
|
|
420
422
|
where=(precision + recall) > 1e-9,
|
|
421
423
|
out=f1_score,
|
|
424
|
+
dtype=np.float64,
|
|
422
425
|
)
|
|
423
426
|
|
|
424
427
|
counts[iou_idx][score_idx] = np.concatenate(
|
|
@@ -532,18 +535,18 @@ def compute_metrics(
|
|
|
532
535
|
mAR = np.zeros(n_scores, dtype=np.float64)
|
|
533
536
|
|
|
534
537
|
# calculate AR and AR averaged over thresholds
|
|
535
|
-
|
|
538
|
+
APAveragedOverIOUs = average_precision.mean(axis=0)
|
|
536
539
|
ARAveragedOverScores = average_recall.mean(axis=0)
|
|
537
540
|
|
|
538
541
|
# calculate mAP and mAR averaged over thresholds
|
|
539
|
-
|
|
542
|
+
mAPAveragedOverIOUs = mAP.mean(axis=0)
|
|
540
543
|
mARAveragedOverScores = mAR.mean(axis=0)
|
|
541
544
|
|
|
542
545
|
ap_results = (
|
|
543
546
|
average_precision,
|
|
544
547
|
mAP,
|
|
545
|
-
|
|
546
|
-
|
|
548
|
+
APAveragedOverIOUs,
|
|
549
|
+
mAPAveragedOverIOUs,
|
|
547
550
|
)
|
|
548
551
|
ar_results = (
|
|
549
552
|
average_recall,
|
|
@@ -643,7 +646,7 @@ def compute_confusion_matrix(
|
|
|
643
646
|
Index 0 - Datum Index
|
|
644
647
|
Index 1 - GroundTruth Index
|
|
645
648
|
Index 2 - Prediction Index
|
|
646
|
-
Index 3 -
|
|
649
|
+
Index 3 - IOU
|
|
647
650
|
Index 4 - GroundTruth Label Index
|
|
648
651
|
Index 5 - Prediction Label Index
|
|
649
652
|
Index 6 - Score
|
|
@@ -655,7 +658,7 @@ def compute_confusion_matrix(
|
|
|
655
658
|
label_metadata : NDArray[np.int32]
|
|
656
659
|
An array containing metadata related to labels.
|
|
657
660
|
iou_thresholds : NDArray[np.float64]
|
|
658
|
-
A 1-D array containing
|
|
661
|
+
A 1-D array containing IOU thresholds.
|
|
659
662
|
score_thresholds : NDArray[np.float64]
|
|
660
663
|
A 1-D array containing score thresholds.
|
|
661
664
|
n_examples : int
|