valor-lite 0.33.12__py3-none-any.whl → 0.33.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of valor-lite might be problematic. Click here for more details.
- valor_lite/classification/__init__.py +8 -21
- valor_lite/classification/computation.py +2 -2
- valor_lite/classification/manager.py +32 -244
- valor_lite/classification/metric.py +331 -372
- valor_lite/classification/utilities.py +222 -0
- valor_lite/object_detection/__init__.py +4 -35
- valor_lite/object_detection/computation.py +41 -37
- valor_lite/object_detection/manager.py +38 -492
- valor_lite/object_detection/metric.py +636 -696
- valor_lite/object_detection/utilities.py +505 -0
- valor_lite/schemas.py +10 -8
- valor_lite/semantic_segmentation/__init__.py +2 -17
- valor_lite/semantic_segmentation/computation.py +1 -1
- valor_lite/semantic_segmentation/manager.py +13 -116
- valor_lite/semantic_segmentation/metric.py +216 -239
- valor_lite/semantic_segmentation/utilities.py +104 -0
- {valor_lite-0.33.12.dist-info → valor_lite-0.33.14.dist-info}/METADATA +1 -1
- valor_lite-0.33.14.dist-info/RECORD +27 -0
- {valor_lite-0.33.12.dist-info → valor_lite-0.33.14.dist-info}/WHEEL +1 -1
- valor_lite-0.33.12.dist-info/RECORD +0 -24
- {valor_lite-0.33.12.dist-info → valor_lite-0.33.14.dist-info}/LICENSE +0 -0
- {valor_lite-0.33.12.dist-info → valor_lite-0.33.14.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
from collections import defaultdict
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
from numpy.typing import NDArray
|
|
5
|
+
from valor_lite.classification.metric import Metric, MetricType
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def unpack_precision_recall_rocauc_into_metric_lists(
|
|
9
|
+
results: tuple[
|
|
10
|
+
NDArray[np.int32],
|
|
11
|
+
NDArray[np.float64],
|
|
12
|
+
NDArray[np.float64],
|
|
13
|
+
NDArray[np.float64],
|
|
14
|
+
NDArray[np.float64],
|
|
15
|
+
NDArray[np.float64],
|
|
16
|
+
float,
|
|
17
|
+
],
|
|
18
|
+
score_thresholds: list[float],
|
|
19
|
+
hardmax: bool,
|
|
20
|
+
label_metadata: NDArray[np.int32],
|
|
21
|
+
index_to_label: dict[int, str],
|
|
22
|
+
) -> dict[MetricType, list[Metric]]:
|
|
23
|
+
(
|
|
24
|
+
counts,
|
|
25
|
+
precision,
|
|
26
|
+
recall,
|
|
27
|
+
accuracy,
|
|
28
|
+
f1_score,
|
|
29
|
+
rocauc,
|
|
30
|
+
mean_rocauc,
|
|
31
|
+
) = results
|
|
32
|
+
|
|
33
|
+
metrics = defaultdict(list)
|
|
34
|
+
|
|
35
|
+
metrics[MetricType.ROCAUC] = [
|
|
36
|
+
Metric.roc_auc(
|
|
37
|
+
value=float(rocauc[label_idx]),
|
|
38
|
+
label=label,
|
|
39
|
+
)
|
|
40
|
+
for label_idx, label in index_to_label.items()
|
|
41
|
+
if label_metadata[label_idx, 0] > 0
|
|
42
|
+
]
|
|
43
|
+
|
|
44
|
+
metrics[MetricType.mROCAUC] = [
|
|
45
|
+
Metric.mean_roc_auc(
|
|
46
|
+
value=float(mean_rocauc),
|
|
47
|
+
)
|
|
48
|
+
]
|
|
49
|
+
|
|
50
|
+
metrics[MetricType.Accuracy] = [
|
|
51
|
+
Metric.accuracy(
|
|
52
|
+
value=float(accuracy[score_idx]),
|
|
53
|
+
score_threshold=score_threshold,
|
|
54
|
+
hardmax=hardmax,
|
|
55
|
+
)
|
|
56
|
+
for score_idx, score_threshold in enumerate(score_thresholds)
|
|
57
|
+
]
|
|
58
|
+
|
|
59
|
+
for label_idx, label in index_to_label.items():
|
|
60
|
+
for score_idx, score_threshold in enumerate(score_thresholds):
|
|
61
|
+
|
|
62
|
+
kwargs = {
|
|
63
|
+
"label": label,
|
|
64
|
+
"hardmax": hardmax,
|
|
65
|
+
"score_threshold": score_threshold,
|
|
66
|
+
}
|
|
67
|
+
row = counts[:, label_idx]
|
|
68
|
+
metrics[MetricType.Counts].append(
|
|
69
|
+
Metric.counts(
|
|
70
|
+
tp=int(row[score_idx, 0]),
|
|
71
|
+
fp=int(row[score_idx, 1]),
|
|
72
|
+
fn=int(row[score_idx, 2]),
|
|
73
|
+
tn=int(row[score_idx, 3]),
|
|
74
|
+
**kwargs,
|
|
75
|
+
)
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
# if no groundtruths exists for a label, skip it.
|
|
79
|
+
if label_metadata[label_idx, 0] == 0:
|
|
80
|
+
continue
|
|
81
|
+
|
|
82
|
+
metrics[MetricType.Precision].append(
|
|
83
|
+
Metric.precision(
|
|
84
|
+
value=float(precision[score_idx, label_idx]),
|
|
85
|
+
**kwargs,
|
|
86
|
+
)
|
|
87
|
+
)
|
|
88
|
+
metrics[MetricType.Recall].append(
|
|
89
|
+
Metric.recall(
|
|
90
|
+
value=float(recall[score_idx, label_idx]),
|
|
91
|
+
**kwargs,
|
|
92
|
+
)
|
|
93
|
+
)
|
|
94
|
+
metrics[MetricType.F1].append(
|
|
95
|
+
Metric.f1_score(
|
|
96
|
+
value=float(f1_score[score_idx, label_idx]),
|
|
97
|
+
**kwargs,
|
|
98
|
+
)
|
|
99
|
+
)
|
|
100
|
+
return metrics
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def _unpack_confusion_matrix_value(
|
|
104
|
+
confusion_matrix: NDArray[np.float64],
|
|
105
|
+
number_of_labels: int,
|
|
106
|
+
number_of_examples: int,
|
|
107
|
+
index_to_uid: dict[int, str],
|
|
108
|
+
index_to_label: dict[int, str],
|
|
109
|
+
) -> dict[str, dict[str, dict[str, int | list[dict[str, str | float]]]]]:
|
|
110
|
+
"""
|
|
111
|
+
Unpacks a numpy array of confusion matrix counts and examples.
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
datum_idx = lambda gt_label_idx, pd_label_idx, example_idx: int( # noqa: E731 - lambda fn
|
|
115
|
+
confusion_matrix[
|
|
116
|
+
gt_label_idx,
|
|
117
|
+
pd_label_idx,
|
|
118
|
+
example_idx * 2 + 1,
|
|
119
|
+
]
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
score_idx = lambda gt_label_idx, pd_label_idx, example_idx: float( # noqa: E731 - lambda fn
|
|
123
|
+
confusion_matrix[
|
|
124
|
+
gt_label_idx,
|
|
125
|
+
pd_label_idx,
|
|
126
|
+
example_idx * 2 + 2,
|
|
127
|
+
]
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
return {
|
|
131
|
+
index_to_label[gt_label_idx]: {
|
|
132
|
+
index_to_label[pd_label_idx]: {
|
|
133
|
+
"count": max(
|
|
134
|
+
int(confusion_matrix[gt_label_idx, pd_label_idx, 0]),
|
|
135
|
+
0,
|
|
136
|
+
),
|
|
137
|
+
"examples": [
|
|
138
|
+
{
|
|
139
|
+
"datum": index_to_uid[
|
|
140
|
+
datum_idx(gt_label_idx, pd_label_idx, example_idx)
|
|
141
|
+
],
|
|
142
|
+
"score": score_idx(
|
|
143
|
+
gt_label_idx, pd_label_idx, example_idx
|
|
144
|
+
),
|
|
145
|
+
}
|
|
146
|
+
for example_idx in range(number_of_examples)
|
|
147
|
+
if datum_idx(gt_label_idx, pd_label_idx, example_idx) >= 0
|
|
148
|
+
],
|
|
149
|
+
}
|
|
150
|
+
for pd_label_idx in range(number_of_labels)
|
|
151
|
+
}
|
|
152
|
+
for gt_label_idx in range(number_of_labels)
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def _unpack_missing_predictions_value(
|
|
157
|
+
missing_predictions: NDArray[np.int32],
|
|
158
|
+
number_of_labels: int,
|
|
159
|
+
number_of_examples: int,
|
|
160
|
+
index_to_uid: dict[int, str],
|
|
161
|
+
index_to_label: dict[int, str],
|
|
162
|
+
) -> dict[str, dict[str, int | list[dict[str, str]]]]:
|
|
163
|
+
"""
|
|
164
|
+
Unpacks a numpy array of missing prediction counts and examples.
|
|
165
|
+
"""
|
|
166
|
+
|
|
167
|
+
datum_idx = (
|
|
168
|
+
lambda gt_label_idx, example_idx: int( # noqa: E731 - lambda fn
|
|
169
|
+
missing_predictions[
|
|
170
|
+
gt_label_idx,
|
|
171
|
+
example_idx + 1,
|
|
172
|
+
]
|
|
173
|
+
)
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
return {
|
|
177
|
+
index_to_label[gt_label_idx]: {
|
|
178
|
+
"count": max(
|
|
179
|
+
int(missing_predictions[gt_label_idx, 0]),
|
|
180
|
+
0,
|
|
181
|
+
),
|
|
182
|
+
"examples": [
|
|
183
|
+
{"datum": index_to_uid[datum_idx(gt_label_idx, example_idx)]}
|
|
184
|
+
for example_idx in range(number_of_examples)
|
|
185
|
+
if datum_idx(gt_label_idx, example_idx) >= 0
|
|
186
|
+
],
|
|
187
|
+
}
|
|
188
|
+
for gt_label_idx in range(number_of_labels)
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
def unpack_confusion_matrix_into_metric_list(
|
|
193
|
+
results: tuple[NDArray[np.float64], NDArray[np.int32]],
|
|
194
|
+
score_thresholds: list[float],
|
|
195
|
+
number_of_examples: int,
|
|
196
|
+
index_to_uid: dict[int, str],
|
|
197
|
+
index_to_label: dict[int, str],
|
|
198
|
+
) -> list[Metric]:
|
|
199
|
+
|
|
200
|
+
(confusion_matrix, missing_predictions) = results
|
|
201
|
+
n_scores, n_labels, _, _ = confusion_matrix.shape
|
|
202
|
+
return [
|
|
203
|
+
Metric.confusion_matrix(
|
|
204
|
+
score_threshold=score_threshold,
|
|
205
|
+
maximum_number_of_examples=number_of_examples,
|
|
206
|
+
confusion_matrix=_unpack_confusion_matrix_value(
|
|
207
|
+
confusion_matrix=confusion_matrix[score_idx, :, :, :],
|
|
208
|
+
number_of_labels=n_labels,
|
|
209
|
+
number_of_examples=number_of_examples,
|
|
210
|
+
index_to_label=index_to_label,
|
|
211
|
+
index_to_uid=index_to_uid,
|
|
212
|
+
),
|
|
213
|
+
missing_predictions=_unpack_missing_predictions_value(
|
|
214
|
+
missing_predictions=missing_predictions[score_idx, :, :],
|
|
215
|
+
number_of_labels=n_labels,
|
|
216
|
+
number_of_examples=number_of_examples,
|
|
217
|
+
index_to_label=index_to_label,
|
|
218
|
+
index_to_uid=index_to_uid,
|
|
219
|
+
),
|
|
220
|
+
)
|
|
221
|
+
for score_idx, score_threshold in enumerate(score_thresholds)
|
|
222
|
+
]
|
|
@@ -3,56 +3,25 @@ from .computation import (
|
|
|
3
3
|
compute_bbox_iou,
|
|
4
4
|
compute_bitmask_iou,
|
|
5
5
|
compute_confusion_matrix,
|
|
6
|
-
compute_metrics,
|
|
7
6
|
compute_polygon_iou,
|
|
7
|
+
compute_precion_recall,
|
|
8
8
|
compute_ranked_pairs,
|
|
9
9
|
)
|
|
10
10
|
from .manager import DataLoader, Evaluator
|
|
11
|
-
from .metric import
|
|
12
|
-
AP,
|
|
13
|
-
AR,
|
|
14
|
-
F1,
|
|
15
|
-
Accuracy,
|
|
16
|
-
APAveragedOverIOUs,
|
|
17
|
-
ARAveragedOverScores,
|
|
18
|
-
ConfusionMatrix,
|
|
19
|
-
Counts,
|
|
20
|
-
MetricType,
|
|
21
|
-
Precision,
|
|
22
|
-
PrecisionRecallCurve,
|
|
23
|
-
Recall,
|
|
24
|
-
mAP,
|
|
25
|
-
mAPAveragedOverIOUs,
|
|
26
|
-
mAR,
|
|
27
|
-
mARAveragedOverScores,
|
|
28
|
-
)
|
|
11
|
+
from .metric import Metric, MetricType
|
|
29
12
|
|
|
30
13
|
__all__ = [
|
|
31
14
|
"Bitmask",
|
|
32
15
|
"BoundingBox",
|
|
33
16
|
"Detection",
|
|
34
17
|
"Polygon",
|
|
18
|
+
"Metric",
|
|
35
19
|
"MetricType",
|
|
36
|
-
"Counts",
|
|
37
|
-
"Precision",
|
|
38
|
-
"Recall",
|
|
39
|
-
"Accuracy",
|
|
40
|
-
"F1",
|
|
41
|
-
"AP",
|
|
42
|
-
"mAP",
|
|
43
|
-
"AR",
|
|
44
|
-
"mAR",
|
|
45
|
-
"APAveragedOverIOUs",
|
|
46
|
-
"mAPAveragedOverIOUs",
|
|
47
|
-
"ARAveragedOverScores",
|
|
48
|
-
"mARAveragedOverScores",
|
|
49
|
-
"PrecisionRecallCurve",
|
|
50
|
-
"ConfusionMatrix",
|
|
51
20
|
"compute_bbox_iou",
|
|
52
21
|
"compute_bitmask_iou",
|
|
53
22
|
"compute_polygon_iou",
|
|
54
23
|
"compute_ranked_pairs",
|
|
55
|
-
"
|
|
24
|
+
"compute_precion_recall",
|
|
56
25
|
"compute_confusion_matrix",
|
|
57
26
|
"DataLoader",
|
|
58
27
|
"Evaluator",
|
|
@@ -5,7 +5,7 @@ from numpy.typing import NDArray
|
|
|
5
5
|
|
|
6
6
|
def compute_bbox_iou(data: NDArray[np.float64]) -> NDArray[np.float64]:
|
|
7
7
|
"""
|
|
8
|
-
Computes intersection-over-union (
|
|
8
|
+
Computes intersection-over-union (IOU) for axis-aligned bounding boxes.
|
|
9
9
|
|
|
10
10
|
Takes data with shape (N, 8):
|
|
11
11
|
|
|
@@ -20,7 +20,7 @@ def compute_bbox_iou(data: NDArray[np.float64]) -> NDArray[np.float64]:
|
|
|
20
20
|
|
|
21
21
|
Returns data with shape (N, 1):
|
|
22
22
|
|
|
23
|
-
Index 0 -
|
|
23
|
+
Index 0 - IOU
|
|
24
24
|
|
|
25
25
|
Parameters
|
|
26
26
|
----------
|
|
@@ -30,7 +30,7 @@ def compute_bbox_iou(data: NDArray[np.float64]) -> NDArray[np.float64]:
|
|
|
30
30
|
Returns
|
|
31
31
|
-------
|
|
32
32
|
NDArray[np.float64]
|
|
33
|
-
Computed
|
|
33
|
+
Computed IOU's.
|
|
34
34
|
"""
|
|
35
35
|
if data.size == 0:
|
|
36
36
|
return np.array([], dtype=np.float64)
|
|
@@ -76,7 +76,7 @@ def compute_bbox_iou(data: NDArray[np.float64]) -> NDArray[np.float64]:
|
|
|
76
76
|
|
|
77
77
|
def compute_bitmask_iou(data: NDArray[np.bool_]) -> NDArray[np.float64]:
|
|
78
78
|
"""
|
|
79
|
-
Computes intersection-over-union (
|
|
79
|
+
Computes intersection-over-union (IOU) for bitmasks.
|
|
80
80
|
|
|
81
81
|
Takes data with shape (N, 2):
|
|
82
82
|
|
|
@@ -85,7 +85,7 @@ def compute_bitmask_iou(data: NDArray[np.bool_]) -> NDArray[np.float64]:
|
|
|
85
85
|
|
|
86
86
|
Returns data with shape (N, 1):
|
|
87
87
|
|
|
88
|
-
Index 0 -
|
|
88
|
+
Index 0 - IOU
|
|
89
89
|
|
|
90
90
|
Parameters
|
|
91
91
|
----------
|
|
@@ -95,7 +95,7 @@ def compute_bitmask_iou(data: NDArray[np.bool_]) -> NDArray[np.float64]:
|
|
|
95
95
|
Returns
|
|
96
96
|
-------
|
|
97
97
|
NDArray[np.float64]
|
|
98
|
-
Computed
|
|
98
|
+
Computed IOU's.
|
|
99
99
|
"""
|
|
100
100
|
|
|
101
101
|
if data.size == 0:
|
|
@@ -125,7 +125,7 @@ def compute_polygon_iou(
|
|
|
125
125
|
data: NDArray[np.float64],
|
|
126
126
|
) -> NDArray[np.float64]:
|
|
127
127
|
"""
|
|
128
|
-
Computes intersection-over-union (
|
|
128
|
+
Computes intersection-over-union (IOU) for shapely polygons.
|
|
129
129
|
|
|
130
130
|
Takes data with shape (N, 2):
|
|
131
131
|
|
|
@@ -134,7 +134,7 @@ def compute_polygon_iou(
|
|
|
134
134
|
|
|
135
135
|
Returns data with shape (N, 1):
|
|
136
136
|
|
|
137
|
-
Index 0 -
|
|
137
|
+
Index 0 - IOU
|
|
138
138
|
|
|
139
139
|
Parameters
|
|
140
140
|
----------
|
|
@@ -144,7 +144,7 @@ def compute_polygon_iou(
|
|
|
144
144
|
Returns
|
|
145
145
|
-------
|
|
146
146
|
NDArray[np.float64]
|
|
147
|
-
Computed
|
|
147
|
+
Computed IOU's.
|
|
148
148
|
"""
|
|
149
149
|
|
|
150
150
|
if data.size == 0:
|
|
@@ -225,7 +225,7 @@ def compute_ranked_pairs(
|
|
|
225
225
|
Index 0 - Datum Index
|
|
226
226
|
Index 1 - GroundTruth Index
|
|
227
227
|
Index 2 - Prediction Index
|
|
228
|
-
Index 3 -
|
|
228
|
+
Index 3 - IOU
|
|
229
229
|
Index 4 - GroundTruth Label Index
|
|
230
230
|
Index 5 - Prediction Label Index
|
|
231
231
|
Index 6 - Score
|
|
@@ -262,7 +262,7 @@ def compute_ranked_pairs(
|
|
|
262
262
|
return ranked_pairs[indices]
|
|
263
263
|
|
|
264
264
|
|
|
265
|
-
def
|
|
265
|
+
def compute_precion_recall(
|
|
266
266
|
data: NDArray[np.float64],
|
|
267
267
|
label_metadata: NDArray[np.int32],
|
|
268
268
|
iou_thresholds: NDArray[np.float64],
|
|
@@ -282,6 +282,7 @@ def compute_metrics(
|
|
|
282
282
|
],
|
|
283
283
|
NDArray[np.float64],
|
|
284
284
|
NDArray[np.float64],
|
|
285
|
+
NDArray[np.float64],
|
|
285
286
|
]:
|
|
286
287
|
"""
|
|
287
288
|
Computes Object Detection metrics.
|
|
@@ -291,7 +292,7 @@ def compute_metrics(
|
|
|
291
292
|
Index 0 - Datum Index
|
|
292
293
|
Index 1 - GroundTruth Index
|
|
293
294
|
Index 2 - Prediction Index
|
|
294
|
-
Index 3 -
|
|
295
|
+
Index 3 - IOU
|
|
295
296
|
Index 4 - GroundTruth Label Index
|
|
296
297
|
Index 5 - Prediction Label Index
|
|
297
298
|
Index 6 - Score
|
|
@@ -303,19 +304,21 @@ def compute_metrics(
|
|
|
303
304
|
label_metadata : NDArray[np.int32]
|
|
304
305
|
An array containing metadata related to labels.
|
|
305
306
|
iou_thresholds : NDArray[np.float64]
|
|
306
|
-
A 1-D array containing
|
|
307
|
+
A 1-D array containing IOU thresholds.
|
|
307
308
|
score_thresholds : NDArray[np.float64]
|
|
308
309
|
A 1-D array containing score thresholds.
|
|
309
310
|
|
|
310
311
|
Returns
|
|
311
312
|
-------
|
|
312
|
-
tuple[NDArray, NDArray, NDArray, float]
|
|
313
|
+
tuple[NDArray[np.float64], NDArray[np.float64], NDArray[np.float64], float]
|
|
313
314
|
Average Precision results.
|
|
314
|
-
tuple[NDArray, NDArray, NDArray, float]
|
|
315
|
+
tuple[NDArray[np.float64], NDArray[np.float64], NDArray[np.float64], float]
|
|
315
316
|
Average Recall results.
|
|
316
|
-
np.
|
|
317
|
-
|
|
318
|
-
np.
|
|
317
|
+
NDArray[np.float64]
|
|
318
|
+
Accuracy.
|
|
319
|
+
NDArray[np.float64]
|
|
320
|
+
Precision, Recall, TP, FP, FN, F1 Score.
|
|
321
|
+
NDArray[np.float64]
|
|
319
322
|
Interpolated Precision-Recall Curves.
|
|
320
323
|
"""
|
|
321
324
|
|
|
@@ -325,13 +328,14 @@ def compute_metrics(
|
|
|
325
328
|
n_scores = score_thresholds.shape[0]
|
|
326
329
|
|
|
327
330
|
if n_ious == 0:
|
|
328
|
-
raise ValueError("At least one
|
|
331
|
+
raise ValueError("At least one IOU threshold must be passed.")
|
|
329
332
|
elif n_scores == 0:
|
|
330
333
|
raise ValueError("At least one score threshold must be passed.")
|
|
331
334
|
|
|
332
|
-
average_precision = np.zeros((n_ious, n_labels))
|
|
333
|
-
average_recall = np.zeros((n_scores, n_labels))
|
|
334
|
-
|
|
335
|
+
average_precision = np.zeros((n_ious, n_labels), dtype=np.float64)
|
|
336
|
+
average_recall = np.zeros((n_scores, n_labels), dtype=np.float64)
|
|
337
|
+
accuracy = np.zeros((n_ious, n_scores), dtype=np.float64)
|
|
338
|
+
counts = np.zeros((n_ious, n_scores, n_labels, 6), dtype=np.float64)
|
|
335
339
|
|
|
336
340
|
pd_labels = data[:, 5].astype(np.int32)
|
|
337
341
|
scores = data[:, 6]
|
|
@@ -417,14 +421,6 @@ def compute_metrics(
|
|
|
417
421
|
out=f1_score,
|
|
418
422
|
)
|
|
419
423
|
|
|
420
|
-
accuracy = np.zeros_like(tp_count)
|
|
421
|
-
np.divide(
|
|
422
|
-
tp_count,
|
|
423
|
-
(gt_count + pd_count),
|
|
424
|
-
where=(gt_count + pd_count) > 1e-9,
|
|
425
|
-
out=accuracy,
|
|
426
|
-
)
|
|
427
|
-
|
|
428
424
|
counts[iou_idx][score_idx] = np.concatenate(
|
|
429
425
|
(
|
|
430
426
|
tp_count[:, np.newaxis],
|
|
@@ -433,11 +429,18 @@ def compute_metrics(
|
|
|
433
429
|
precision[:, np.newaxis],
|
|
434
430
|
recall[:, np.newaxis],
|
|
435
431
|
f1_score[:, np.newaxis],
|
|
436
|
-
accuracy[:, np.newaxis],
|
|
437
432
|
),
|
|
438
433
|
axis=1,
|
|
439
434
|
)
|
|
440
435
|
|
|
436
|
+
# caluculate accuracy
|
|
437
|
+
total_pd_count = label_metadata[:, 1].sum()
|
|
438
|
+
accuracy[iou_idx, score_idx] = (
|
|
439
|
+
(tp_count.sum() / total_pd_count)
|
|
440
|
+
if total_pd_count > 1e-9
|
|
441
|
+
else 0.0
|
|
442
|
+
)
|
|
443
|
+
|
|
441
444
|
# calculate recall for AR
|
|
442
445
|
average_recall[score_idx] += recall
|
|
443
446
|
|
|
@@ -529,18 +532,18 @@ def compute_metrics(
|
|
|
529
532
|
mAR = np.zeros(n_scores, dtype=np.float64)
|
|
530
533
|
|
|
531
534
|
# calculate AR and AR averaged over thresholds
|
|
532
|
-
|
|
535
|
+
APAveragedOverIOUs = average_precision.mean(axis=0)
|
|
533
536
|
ARAveragedOverScores = average_recall.mean(axis=0)
|
|
534
537
|
|
|
535
538
|
# calculate mAP and mAR averaged over thresholds
|
|
536
|
-
|
|
539
|
+
mAPAveragedOverIOUs = mAP.mean(axis=0)
|
|
537
540
|
mARAveragedOverScores = mAR.mean(axis=0)
|
|
538
541
|
|
|
539
542
|
ap_results = (
|
|
540
543
|
average_precision,
|
|
541
544
|
mAP,
|
|
542
|
-
|
|
543
|
-
|
|
545
|
+
APAveragedOverIOUs,
|
|
546
|
+
mAPAveragedOverIOUs,
|
|
544
547
|
)
|
|
545
548
|
ar_results = (
|
|
546
549
|
average_recall,
|
|
@@ -552,6 +555,7 @@ def compute_metrics(
|
|
|
552
555
|
return (
|
|
553
556
|
ap_results,
|
|
554
557
|
ar_results,
|
|
558
|
+
accuracy,
|
|
555
559
|
counts,
|
|
556
560
|
pr_curve,
|
|
557
561
|
)
|
|
@@ -639,7 +643,7 @@ def compute_confusion_matrix(
|
|
|
639
643
|
Index 0 - Datum Index
|
|
640
644
|
Index 1 - GroundTruth Index
|
|
641
645
|
Index 2 - Prediction Index
|
|
642
|
-
Index 3 -
|
|
646
|
+
Index 3 - IOU
|
|
643
647
|
Index 4 - GroundTruth Label Index
|
|
644
648
|
Index 5 - Prediction Label Index
|
|
645
649
|
Index 6 - Score
|
|
@@ -651,7 +655,7 @@ def compute_confusion_matrix(
|
|
|
651
655
|
label_metadata : NDArray[np.int32]
|
|
652
656
|
An array containing metadata related to labels.
|
|
653
657
|
iou_thresholds : NDArray[np.float64]
|
|
654
|
-
A 1-D array containing
|
|
658
|
+
A 1-D array containing IOU thresholds.
|
|
655
659
|
score_thresholds : NDArray[np.float64]
|
|
656
660
|
A 1-D array containing score thresholds.
|
|
657
661
|
n_examples : int
|