valor-lite 0.36.5__py3-none-any.whl → 0.37.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- valor_lite/cache/__init__.py +11 -0
- valor_lite/cache/compute.py +211 -0
- valor_lite/cache/ephemeral.py +302 -0
- valor_lite/cache/persistent.py +536 -0
- valor_lite/classification/__init__.py +5 -10
- valor_lite/classification/annotation.py +4 -0
- valor_lite/classification/computation.py +233 -251
- valor_lite/classification/evaluator.py +882 -0
- valor_lite/classification/loader.py +97 -0
- valor_lite/classification/metric.py +141 -4
- valor_lite/classification/shared.py +184 -0
- valor_lite/classification/utilities.py +221 -118
- valor_lite/exceptions.py +5 -0
- valor_lite/object_detection/__init__.py +5 -4
- valor_lite/object_detection/annotation.py +13 -1
- valor_lite/object_detection/computation.py +367 -304
- valor_lite/object_detection/evaluator.py +804 -0
- valor_lite/object_detection/loader.py +292 -0
- valor_lite/object_detection/metric.py +152 -3
- valor_lite/object_detection/shared.py +206 -0
- valor_lite/object_detection/utilities.py +182 -109
- valor_lite/semantic_segmentation/__init__.py +5 -4
- valor_lite/semantic_segmentation/annotation.py +7 -0
- valor_lite/semantic_segmentation/computation.py +20 -110
- valor_lite/semantic_segmentation/evaluator.py +414 -0
- valor_lite/semantic_segmentation/loader.py +205 -0
- valor_lite/semantic_segmentation/shared.py +149 -0
- valor_lite/semantic_segmentation/utilities.py +6 -23
- {valor_lite-0.36.5.dist-info → valor_lite-0.37.5.dist-info}/METADATA +3 -1
- valor_lite-0.37.5.dist-info/RECORD +49 -0
- {valor_lite-0.36.5.dist-info → valor_lite-0.37.5.dist-info}/WHEEL +1 -1
- valor_lite/classification/manager.py +0 -545
- valor_lite/object_detection/manager.py +0 -865
- valor_lite/profiling.py +0 -374
- valor_lite/semantic_segmentation/benchmark.py +0 -237
- valor_lite/semantic_segmentation/manager.py +0 -446
- valor_lite-0.36.5.dist-info/RECORD +0 -41
- {valor_lite-0.36.5.dist-info → valor_lite-0.37.5.dist-info}/top_level.txt +0 -0
|
@@ -1,43 +1,24 @@
|
|
|
1
1
|
from collections import defaultdict
|
|
2
2
|
|
|
3
3
|
import numpy as np
|
|
4
|
+
import pyarrow as pa
|
|
4
5
|
from numpy.typing import NDArray
|
|
5
6
|
|
|
6
|
-
from valor_lite.object_detection.computation import PairClassification
|
|
7
7
|
from valor_lite.object_detection.metric import Metric, MetricType
|
|
8
8
|
|
|
9
9
|
|
|
10
10
|
def unpack_precision_recall_into_metric_lists(
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
NDArray[np.float64],
|
|
19
|
-
],
|
|
20
|
-
NDArray[np.float64],
|
|
21
|
-
NDArray[np.float64],
|
|
22
|
-
],
|
|
11
|
+
counts: NDArray[np.uint64],
|
|
12
|
+
precision_recall_f1: NDArray[np.float64],
|
|
13
|
+
average_precision: NDArray[np.float64],
|
|
14
|
+
mean_average_precision: NDArray[np.float64],
|
|
15
|
+
average_recall: NDArray[np.float64],
|
|
16
|
+
mean_average_recall: NDArray[np.float64],
|
|
17
|
+
pr_curve: NDArray[np.float64],
|
|
23
18
|
iou_thresholds: list[float],
|
|
24
19
|
score_thresholds: list[float],
|
|
25
|
-
index_to_label:
|
|
26
|
-
label_metadata: NDArray[np.int32],
|
|
20
|
+
index_to_label: dict[int, str],
|
|
27
21
|
):
|
|
28
|
-
(
|
|
29
|
-
(
|
|
30
|
-
average_precision,
|
|
31
|
-
mean_average_precision,
|
|
32
|
-
),
|
|
33
|
-
(
|
|
34
|
-
average_recall,
|
|
35
|
-
mean_average_recall,
|
|
36
|
-
),
|
|
37
|
-
precision_recall,
|
|
38
|
-
pr_curves,
|
|
39
|
-
) = results
|
|
40
|
-
|
|
41
22
|
metrics = defaultdict(list)
|
|
42
23
|
|
|
43
24
|
metrics[MetricType.AP] = [
|
|
@@ -47,8 +28,7 @@ def unpack_precision_recall_into_metric_lists(
|
|
|
47
28
|
label=label,
|
|
48
29
|
)
|
|
49
30
|
for iou_idx, iou_threshold in enumerate(iou_thresholds)
|
|
50
|
-
for label_idx, label in
|
|
51
|
-
if int(label_metadata[label_idx, 0]) > 0
|
|
31
|
+
for label_idx, label in index_to_label.items()
|
|
52
32
|
]
|
|
53
33
|
|
|
54
34
|
metrics[MetricType.mAP] = [
|
|
@@ -66,8 +46,7 @@ def unpack_precision_recall_into_metric_lists(
|
|
|
66
46
|
iou_thresholds=iou_thresholds,
|
|
67
47
|
label=label,
|
|
68
48
|
)
|
|
69
|
-
for label_idx, label in
|
|
70
|
-
if int(label_metadata[label_idx, 0]) > 0
|
|
49
|
+
for label_idx, label in index_to_label.items()
|
|
71
50
|
]
|
|
72
51
|
|
|
73
52
|
# TODO - (c.zaloom) will be removed in the future
|
|
@@ -86,8 +65,7 @@ def unpack_precision_recall_into_metric_lists(
|
|
|
86
65
|
label=label,
|
|
87
66
|
)
|
|
88
67
|
for score_idx, score_threshold in enumerate(score_thresholds)
|
|
89
|
-
for label_idx, label in
|
|
90
|
-
if int(label_metadata[label_idx, 0]) > 0
|
|
68
|
+
for label_idx, label in index_to_label.items()
|
|
91
69
|
]
|
|
92
70
|
|
|
93
71
|
metrics[MetricType.mAR] = [
|
|
@@ -107,8 +85,7 @@ def unpack_precision_recall_into_metric_lists(
|
|
|
107
85
|
iou_thresholds=iou_thresholds,
|
|
108
86
|
label=label,
|
|
109
87
|
)
|
|
110
|
-
for label_idx, label in
|
|
111
|
-
if int(label_metadata[label_idx, 0]) > 0
|
|
88
|
+
for label_idx, label in index_to_label.items()
|
|
112
89
|
]
|
|
113
90
|
|
|
114
91
|
# TODO - (c.zaloom) will be removed in the future
|
|
@@ -122,24 +99,20 @@ def unpack_precision_recall_into_metric_lists(
|
|
|
122
99
|
|
|
123
100
|
metrics[MetricType.PrecisionRecallCurve] = [
|
|
124
101
|
Metric.precision_recall_curve(
|
|
125
|
-
precisions=
|
|
126
|
-
scores=
|
|
102
|
+
precisions=pr_curve[iou_idx, label_idx, :, 0].tolist(),
|
|
103
|
+
scores=pr_curve[iou_idx, label_idx, :, 1].tolist(),
|
|
127
104
|
iou_threshold=iou_threshold,
|
|
128
105
|
label=label,
|
|
129
106
|
)
|
|
130
107
|
for iou_idx, iou_threshold in enumerate(iou_thresholds)
|
|
131
|
-
for label_idx, label in
|
|
132
|
-
if label_metadata[label_idx, 0] > 0
|
|
108
|
+
for label_idx, label in index_to_label.items()
|
|
133
109
|
]
|
|
134
110
|
|
|
135
|
-
for label_idx, label in
|
|
136
|
-
if label_metadata[label_idx, 0] == 0:
|
|
137
|
-
continue
|
|
138
|
-
|
|
111
|
+
for label_idx, label in index_to_label.items():
|
|
139
112
|
for score_idx, score_threshold in enumerate(score_thresholds):
|
|
140
113
|
for iou_idx, iou_threshold in enumerate(iou_thresholds):
|
|
141
114
|
|
|
142
|
-
row =
|
|
115
|
+
row = counts[iou_idx, score_idx, :, label_idx]
|
|
143
116
|
kwargs = {
|
|
144
117
|
"label": label,
|
|
145
118
|
"iou_threshold": iou_threshold,
|
|
@@ -154,21 +127,22 @@ def unpack_precision_recall_into_metric_lists(
|
|
|
154
127
|
)
|
|
155
128
|
)
|
|
156
129
|
|
|
130
|
+
row = precision_recall_f1[iou_idx, score_idx, :, label_idx]
|
|
157
131
|
metrics[MetricType.Precision].append(
|
|
158
132
|
Metric.precision(
|
|
159
|
-
value=float(row[
|
|
133
|
+
value=float(row[0]),
|
|
160
134
|
**kwargs,
|
|
161
135
|
)
|
|
162
136
|
)
|
|
163
137
|
metrics[MetricType.Recall].append(
|
|
164
138
|
Metric.recall(
|
|
165
|
-
value=float(row[
|
|
139
|
+
value=float(row[1]),
|
|
166
140
|
**kwargs,
|
|
167
141
|
)
|
|
168
142
|
)
|
|
169
143
|
metrics[MetricType.F1].append(
|
|
170
144
|
Metric.f1_score(
|
|
171
|
-
value=float(row[
|
|
145
|
+
value=float(row[2]),
|
|
172
146
|
**kwargs,
|
|
173
147
|
)
|
|
174
148
|
)
|
|
@@ -176,40 +150,153 @@ def unpack_precision_recall_into_metric_lists(
|
|
|
176
150
|
return metrics
|
|
177
151
|
|
|
178
152
|
|
|
179
|
-
def
|
|
180
|
-
|
|
153
|
+
def unpack_confusion_matrix(
|
|
154
|
+
confusion_matrices: NDArray[np.uint64],
|
|
155
|
+
unmatched_groundtruths: NDArray[np.uint64],
|
|
156
|
+
unmatched_predictions: NDArray[np.uint64],
|
|
157
|
+
index_to_label: dict[int, str],
|
|
158
|
+
iou_thresholds: list[float],
|
|
159
|
+
score_thresholds: list[float],
|
|
160
|
+
) -> list[Metric]:
|
|
161
|
+
metrics = []
|
|
162
|
+
for iou_idx, iou_thresh in enumerate(iou_thresholds):
|
|
163
|
+
for score_idx, score_thresh in enumerate(score_thresholds):
|
|
164
|
+
cm_dict = {}
|
|
165
|
+
ugt_dict = {}
|
|
166
|
+
upd_dict = {}
|
|
167
|
+
for idx, label in index_to_label.items():
|
|
168
|
+
ugt_dict[label] = int(
|
|
169
|
+
unmatched_groundtruths[iou_idx, score_idx, idx]
|
|
170
|
+
)
|
|
171
|
+
upd_dict[label] = int(
|
|
172
|
+
unmatched_predictions[iou_idx, score_idx, idx]
|
|
173
|
+
)
|
|
174
|
+
for pidx, plabel in index_to_label.items():
|
|
175
|
+
if label not in cm_dict:
|
|
176
|
+
cm_dict[label] = {}
|
|
177
|
+
cm_dict[label][plabel] = int(
|
|
178
|
+
confusion_matrices[iou_idx, score_idx, idx, pidx]
|
|
179
|
+
)
|
|
180
|
+
metrics.append(
|
|
181
|
+
Metric.confusion_matrix(
|
|
182
|
+
confusion_matrix=cm_dict,
|
|
183
|
+
unmatched_ground_truths=ugt_dict,
|
|
184
|
+
unmatched_predictions=upd_dict,
|
|
185
|
+
iou_threshold=iou_thresh,
|
|
186
|
+
score_threshold=score_thresh,
|
|
187
|
+
)
|
|
188
|
+
)
|
|
189
|
+
return metrics
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
def create_mapping(
|
|
193
|
+
tbl: pa.Table,
|
|
194
|
+
pairs: NDArray[np.float64],
|
|
195
|
+
index: int,
|
|
196
|
+
id_col: str,
|
|
197
|
+
uid_col: str,
|
|
198
|
+
) -> dict[int, str]:
|
|
199
|
+
col = pairs[:, index].astype(np.int64)
|
|
200
|
+
values, indices = np.unique(col, return_index=True)
|
|
201
|
+
indices = indices[values >= 0]
|
|
202
|
+
return {
|
|
203
|
+
tbl[id_col][idx].as_py(): tbl[uid_col][idx].as_py() for idx in indices
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
def unpack_examples(
|
|
208
|
+
detailed_pairs: NDArray[np.float64],
|
|
209
|
+
mask_tp: NDArray[np.bool_],
|
|
210
|
+
mask_fn: NDArray[np.bool_],
|
|
211
|
+
mask_fp: NDArray[np.bool_],
|
|
212
|
+
iou_thresholds: list[float],
|
|
213
|
+
score_thresholds: list[float],
|
|
214
|
+
index_to_datum_id: dict[int, str],
|
|
215
|
+
index_to_groundtruth_id: dict[int, str],
|
|
216
|
+
index_to_prediction_id: dict[int, str],
|
|
217
|
+
) -> list[Metric]:
|
|
218
|
+
metrics = []
|
|
219
|
+
ids = detailed_pairs[:, :5].astype(np.int64)
|
|
220
|
+
unique_datums = np.unique(detailed_pairs[:, 0].astype(np.int64))
|
|
221
|
+
for datum_index in unique_datums:
|
|
222
|
+
mask_datum = detailed_pairs[:, 0] == datum_index
|
|
223
|
+
mask_datum_tp = mask_tp & mask_datum
|
|
224
|
+
mask_datum_fp = mask_fp & mask_datum
|
|
225
|
+
mask_datum_fn = mask_fn & mask_datum
|
|
226
|
+
|
|
227
|
+
datum_id = index_to_datum_id[datum_index]
|
|
228
|
+
for iou_idx, iou_thresh in enumerate(iou_thresholds):
|
|
229
|
+
for score_idx, score_thresh in enumerate(score_thresholds):
|
|
230
|
+
|
|
231
|
+
unique_tp = np.unique(
|
|
232
|
+
ids[np.ix_(mask_datum_tp[iou_idx, score_idx], (0, 1, 2, 3, 4))], axis=0 # type: ignore - numpy ix_ typing
|
|
233
|
+
)
|
|
234
|
+
unique_fp = np.unique(
|
|
235
|
+
ids[np.ix_(mask_datum_fp[iou_idx, score_idx], (0, 2, 4))], axis=0 # type: ignore - numpy ix_ typing
|
|
236
|
+
)
|
|
237
|
+
unique_fn = np.unique(
|
|
238
|
+
ids[np.ix_(mask_datum_fn[iou_idx, score_idx], (0, 1, 3))], axis=0 # type: ignore - numpy ix_ typing
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
tp = [
|
|
242
|
+
(
|
|
243
|
+
index_to_groundtruth_id[row[1]],
|
|
244
|
+
index_to_prediction_id[row[2]],
|
|
245
|
+
)
|
|
246
|
+
for row in unique_tp
|
|
247
|
+
]
|
|
248
|
+
fp = [index_to_prediction_id[row[1]] for row in unique_fp]
|
|
249
|
+
fn = [index_to_groundtruth_id[row[1]] for row in unique_fn]
|
|
250
|
+
metrics.append(
|
|
251
|
+
Metric.examples(
|
|
252
|
+
datum_id=datum_id,
|
|
253
|
+
true_positives=tp,
|
|
254
|
+
false_negatives=fn,
|
|
255
|
+
false_positives=fp,
|
|
256
|
+
iou_threshold=iou_thresh,
|
|
257
|
+
score_threshold=score_thresh,
|
|
258
|
+
)
|
|
259
|
+
)
|
|
260
|
+
return metrics
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
def create_empty_confusion_matrix_with_examples(
|
|
264
|
+
iou_threhsold: float,
|
|
265
|
+
score_threshold: float,
|
|
266
|
+
index_to_label: dict[int, str],
|
|
267
|
+
) -> Metric:
|
|
268
|
+
unmatched_groundtruths = dict()
|
|
181
269
|
unmatched_predictions = dict()
|
|
182
270
|
confusion_matrix = dict()
|
|
183
|
-
for label in
|
|
184
|
-
|
|
271
|
+
for label in index_to_label.values():
|
|
272
|
+
unmatched_groundtruths[label] = {"count": 0, "examples": []}
|
|
185
273
|
unmatched_predictions[label] = {"count": 0, "examples": []}
|
|
186
274
|
confusion_matrix[label] = {}
|
|
187
|
-
for plabel in
|
|
275
|
+
for plabel in index_to_label.values():
|
|
188
276
|
confusion_matrix[label][plabel] = {"count": 0, "examples": []}
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
unmatched_ground_truths,
|
|
277
|
+
|
|
278
|
+
return Metric.confusion_matrix_with_examples(
|
|
279
|
+
confusion_matrix=confusion_matrix,
|
|
280
|
+
unmatched_ground_truths=unmatched_groundtruths,
|
|
281
|
+
unmatched_predictions=unmatched_predictions,
|
|
282
|
+
iou_threshold=iou_threhsold,
|
|
283
|
+
score_threshold=score_threshold,
|
|
193
284
|
)
|
|
194
285
|
|
|
195
286
|
|
|
196
|
-
def
|
|
287
|
+
def _unpack_confusion_matrix_with_examples(
|
|
288
|
+
metric: Metric,
|
|
197
289
|
ids: NDArray[np.int32],
|
|
198
290
|
mask_matched: NDArray[np.bool_],
|
|
199
291
|
mask_fp_unmatched: NDArray[np.bool_],
|
|
200
292
|
mask_fn_unmatched: NDArray[np.bool_],
|
|
201
|
-
index_to_datum_id:
|
|
202
|
-
index_to_groundtruth_id:
|
|
203
|
-
index_to_prediction_id:
|
|
204
|
-
index_to_label:
|
|
205
|
-
iou_threhsold: float,
|
|
206
|
-
score_threshold: float,
|
|
293
|
+
index_to_datum_id: dict[int, str],
|
|
294
|
+
index_to_groundtruth_id: dict[int, str],
|
|
295
|
+
index_to_prediction_id: dict[int, str],
|
|
296
|
+
index_to_label: dict[int, str],
|
|
207
297
|
):
|
|
208
|
-
(
|
|
209
|
-
|
|
210
|
-
unmatched_predictions,
|
|
211
|
-
unmatched_ground_truths,
|
|
212
|
-
) = _create_empty_confusion_matrix(index_to_label)
|
|
298
|
+
if not isinstance(metric.value, dict):
|
|
299
|
+
raise TypeError("expected metric to contain a dictionary value")
|
|
213
300
|
|
|
214
301
|
unique_matches = np.unique(
|
|
215
302
|
ids[np.ix_(mask_matched, (0, 1, 2, 3, 4))], axis=0 # type: ignore - numpy ix_ typing
|
|
@@ -229,8 +316,8 @@ def _unpack_confusion_matrix(
|
|
|
229
316
|
for idx in range(n_max):
|
|
230
317
|
if idx < n_unmatched_groundtruths:
|
|
231
318
|
label = index_to_label[unique_unmatched_groundtruths[idx, 2]]
|
|
232
|
-
unmatched_ground_truths[label]["count"] += 1
|
|
233
|
-
unmatched_ground_truths[label]["examples"].append(
|
|
319
|
+
metric.value["unmatched_ground_truths"][label]["count"] += 1
|
|
320
|
+
metric.value["unmatched_ground_truths"][label]["examples"].append(
|
|
234
321
|
{
|
|
235
322
|
"datum_id": index_to_datum_id[
|
|
236
323
|
unique_unmatched_groundtruths[idx, 0]
|
|
@@ -241,9 +328,10 @@ def _unpack_confusion_matrix(
|
|
|
241
328
|
}
|
|
242
329
|
)
|
|
243
330
|
if idx < n_unmatched_predictions:
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
unmatched_predictions[label]["
|
|
331
|
+
label_id = unique_unmatched_predictions[idx, 2]
|
|
332
|
+
label = index_to_label[label_id]
|
|
333
|
+
metric.value["unmatched_predictions"][label]["count"] += 1
|
|
334
|
+
metric.value["unmatched_predictions"][label]["examples"].append(
|
|
247
335
|
{
|
|
248
336
|
"datum_id": index_to_datum_id[
|
|
249
337
|
unique_unmatched_predictions[idx, 0]
|
|
@@ -256,8 +344,10 @@ def _unpack_confusion_matrix(
|
|
|
256
344
|
if idx < n_matched:
|
|
257
345
|
glabel = index_to_label[unique_matches[idx, 3]]
|
|
258
346
|
plabel = index_to_label[unique_matches[idx, 4]]
|
|
259
|
-
confusion_matrix[glabel][plabel]["count"] += 1
|
|
260
|
-
confusion_matrix[glabel][plabel][
|
|
347
|
+
metric.value["confusion_matrix"][glabel][plabel]["count"] += 1
|
|
348
|
+
metric.value["confusion_matrix"][glabel][plabel][
|
|
349
|
+
"examples"
|
|
350
|
+
].append(
|
|
261
351
|
{
|
|
262
352
|
"datum_id": index_to_datum_id[unique_matches[idx, 0]],
|
|
263
353
|
"ground_truth_id": index_to_groundtruth_id[
|
|
@@ -269,43 +359,29 @@ def _unpack_confusion_matrix(
|
|
|
269
359
|
}
|
|
270
360
|
)
|
|
271
361
|
|
|
272
|
-
return
|
|
273
|
-
confusion_matrix=confusion_matrix,
|
|
274
|
-
unmatched_ground_truths=unmatched_ground_truths,
|
|
275
|
-
unmatched_predictions=unmatched_predictions,
|
|
276
|
-
iou_threshold=iou_threhsold,
|
|
277
|
-
score_threshold=score_threshold,
|
|
278
|
-
)
|
|
362
|
+
return metric
|
|
279
363
|
|
|
280
364
|
|
|
281
|
-
def
|
|
282
|
-
|
|
365
|
+
def unpack_confusion_matrix_with_examples(
|
|
366
|
+
metrics: dict[int, dict[int, Metric]],
|
|
283
367
|
detailed_pairs: NDArray[np.float64],
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
368
|
+
mask_tp: NDArray[np.bool_],
|
|
369
|
+
mask_fp_fn_misclf: NDArray[np.bool_],
|
|
370
|
+
mask_fp_unmatched: NDArray[np.bool_],
|
|
371
|
+
mask_fn_unmatched: NDArray[np.bool_],
|
|
372
|
+
index_to_datum_id: dict[int, str],
|
|
373
|
+
index_to_groundtruth_id: dict[int, str],
|
|
374
|
+
index_to_prediction_id: dict[int, str],
|
|
375
|
+
index_to_label: dict[int, str],
|
|
290
376
|
) -> list[Metric]:
|
|
291
377
|
|
|
292
378
|
ids = detailed_pairs[:, :5].astype(np.int32)
|
|
293
379
|
|
|
294
|
-
mask_matched =
|
|
295
|
-
np.bitwise_and(
|
|
296
|
-
results, PairClassification.TP | PairClassification.FP_FN_MISCLF
|
|
297
|
-
)
|
|
298
|
-
> 0
|
|
299
|
-
)
|
|
300
|
-
mask_fp_unmatched = (
|
|
301
|
-
np.bitwise_and(results, PairClassification.FP_UNMATCHED) > 0
|
|
302
|
-
)
|
|
303
|
-
mask_fn_unmatched = (
|
|
304
|
-
np.bitwise_and(results, PairClassification.FN_UNMATCHED) > 0
|
|
305
|
-
)
|
|
380
|
+
mask_matched = mask_tp | mask_fp_fn_misclf
|
|
306
381
|
|
|
307
382
|
return [
|
|
308
|
-
|
|
383
|
+
_unpack_confusion_matrix_with_examples(
|
|
384
|
+
metric=metric,
|
|
309
385
|
ids=ids,
|
|
310
386
|
mask_matched=mask_matched[iou_idx, score_idx],
|
|
311
387
|
mask_fp_unmatched=mask_fp_unmatched[iou_idx, score_idx],
|
|
@@ -314,10 +390,7 @@ def unpack_confusion_matrix_into_metric_list(
|
|
|
314
390
|
index_to_groundtruth_id=index_to_groundtruth_id,
|
|
315
391
|
index_to_prediction_id=index_to_prediction_id,
|
|
316
392
|
index_to_label=index_to_label,
|
|
317
|
-
iou_threhsold=iou_threshold,
|
|
318
|
-
score_threshold=score_threshold,
|
|
319
393
|
)
|
|
320
|
-
for iou_idx,
|
|
321
|
-
for score_idx,
|
|
322
|
-
if (results[iou_idx, score_idx] != -1).any()
|
|
394
|
+
for iou_idx, inner in metrics.items()
|
|
395
|
+
for score_idx, metric in inner.items()
|
|
323
396
|
]
|
|
@@ -1,14 +1,15 @@
|
|
|
1
1
|
from .annotation import Bitmask, Segmentation
|
|
2
|
-
from .
|
|
2
|
+
from .evaluator import Builder, Evaluator, EvaluatorInfo
|
|
3
|
+
from .loader import Loader
|
|
3
4
|
from .metric import Metric, MetricType
|
|
4
5
|
|
|
5
6
|
__all__ = [
|
|
6
|
-
"
|
|
7
|
+
"Builder",
|
|
8
|
+
"Loader",
|
|
7
9
|
"Evaluator",
|
|
8
10
|
"Segmentation",
|
|
9
11
|
"Bitmask",
|
|
10
12
|
"Metric",
|
|
11
13
|
"MetricType",
|
|
12
|
-
"
|
|
13
|
-
"Metadata",
|
|
14
|
+
"EvaluatorInfo",
|
|
14
15
|
]
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import warnings
|
|
2
2
|
from dataclasses import dataclass, field
|
|
3
|
+
from typing import Any
|
|
3
4
|
|
|
4
5
|
import numpy as np
|
|
5
6
|
from numpy.typing import NDArray
|
|
@@ -16,6 +17,8 @@ class Bitmask:
|
|
|
16
17
|
A NumPy array of boolean values representing the mask.
|
|
17
18
|
label : str
|
|
18
19
|
The semantic label associated with the mask.
|
|
20
|
+
metadata : dict[str, Any], optional
|
|
21
|
+
A dictionary containing any metadata to be used within filtering operations.
|
|
19
22
|
|
|
20
23
|
Examples
|
|
21
24
|
--------
|
|
@@ -26,6 +29,7 @@ class Bitmask:
|
|
|
26
29
|
|
|
27
30
|
mask: NDArray[np.bool_]
|
|
28
31
|
label: str
|
|
32
|
+
metadata: dict[str, Any] | None = None
|
|
29
33
|
|
|
30
34
|
def __post_init__(self):
|
|
31
35
|
if self.mask.dtype != np.bool_:
|
|
@@ -51,6 +55,8 @@ class Segmentation:
|
|
|
51
55
|
The shape of the segmentation masks. This is set automatically after initialization.
|
|
52
56
|
size : int, optional
|
|
53
57
|
The total number of pixels in the masks. This is set automatically after initialization.
|
|
58
|
+
metadata : dict[str, Any], optional
|
|
59
|
+
A dictionary containing any metadata to be used within filtering operations.
|
|
54
60
|
|
|
55
61
|
Examples
|
|
56
62
|
--------
|
|
@@ -71,6 +77,7 @@ class Segmentation:
|
|
|
71
77
|
predictions: list[Bitmask]
|
|
72
78
|
shape: tuple[int, ...]
|
|
73
79
|
size: int = field(default=0)
|
|
80
|
+
metadata: dict[str, Any] | None = None
|
|
74
81
|
|
|
75
82
|
def __post_init__(self):
|
|
76
83
|
|