eye-cv 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eye/__init__.py +115 -0
- eye/__init___supervision_original.py +120 -0
- eye/annotators/__init__.py +0 -0
- eye/annotators/base.py +22 -0
- eye/annotators/core.py +2699 -0
- eye/annotators/line.py +107 -0
- eye/annotators/modern.py +529 -0
- eye/annotators/trace.py +142 -0
- eye/annotators/utils.py +177 -0
- eye/assets/__init__.py +2 -0
- eye/assets/downloader.py +95 -0
- eye/assets/list.py +83 -0
- eye/classification/__init__.py +0 -0
- eye/classification/core.py +188 -0
- eye/config.py +2 -0
- eye/core/__init__.py +0 -0
- eye/core/trackers/__init__.py +1 -0
- eye/core/trackers/botsort_tracker.py +336 -0
- eye/core/trackers/bytetrack_tracker.py +284 -0
- eye/core/trackers/sort_tracker.py +200 -0
- eye/core/tracking.py +146 -0
- eye/dataset/__init__.py +0 -0
- eye/dataset/core.py +919 -0
- eye/dataset/formats/__init__.py +0 -0
- eye/dataset/formats/coco.py +258 -0
- eye/dataset/formats/pascal_voc.py +279 -0
- eye/dataset/formats/yolo.py +272 -0
- eye/dataset/utils.py +259 -0
- eye/detection/__init__.py +0 -0
- eye/detection/auto_convert.py +155 -0
- eye/detection/core.py +1529 -0
- eye/detection/detections_enhanced.py +392 -0
- eye/detection/line_zone.py +859 -0
- eye/detection/lmm.py +184 -0
- eye/detection/overlap_filter.py +270 -0
- eye/detection/tools/__init__.py +0 -0
- eye/detection/tools/csv_sink.py +181 -0
- eye/detection/tools/inference_slicer.py +288 -0
- eye/detection/tools/json_sink.py +142 -0
- eye/detection/tools/polygon_zone.py +202 -0
- eye/detection/tools/smoother.py +123 -0
- eye/detection/tools/smoothing.py +179 -0
- eye/detection/tools/smoothing_config.py +202 -0
- eye/detection/tools/transformers.py +247 -0
- eye/detection/utils.py +1175 -0
- eye/draw/__init__.py +0 -0
- eye/draw/color.py +154 -0
- eye/draw/utils.py +374 -0
- eye/filters.py +112 -0
- eye/geometry/__init__.py +0 -0
- eye/geometry/core.py +128 -0
- eye/geometry/utils.py +47 -0
- eye/keypoint/__init__.py +0 -0
- eye/keypoint/annotators.py +442 -0
- eye/keypoint/core.py +687 -0
- eye/keypoint/skeletons.py +2647 -0
- eye/metrics/__init__.py +21 -0
- eye/metrics/core.py +72 -0
- eye/metrics/detection.py +843 -0
- eye/metrics/f1_score.py +648 -0
- eye/metrics/mean_average_precision.py +628 -0
- eye/metrics/mean_average_recall.py +697 -0
- eye/metrics/precision.py +653 -0
- eye/metrics/recall.py +652 -0
- eye/metrics/utils/__init__.py +0 -0
- eye/metrics/utils/object_size.py +158 -0
- eye/metrics/utils/utils.py +9 -0
- eye/py.typed +0 -0
- eye/quick.py +104 -0
- eye/tracker/__init__.py +0 -0
- eye/tracker/byte_tracker/__init__.py +0 -0
- eye/tracker/byte_tracker/core.py +386 -0
- eye/tracker/byte_tracker/kalman_filter.py +205 -0
- eye/tracker/byte_tracker/matching.py +69 -0
- eye/tracker/byte_tracker/single_object_track.py +178 -0
- eye/tracker/byte_tracker/utils.py +18 -0
- eye/utils/__init__.py +0 -0
- eye/utils/conversion.py +132 -0
- eye/utils/file.py +159 -0
- eye/utils/image.py +794 -0
- eye/utils/internal.py +200 -0
- eye/utils/iterables.py +84 -0
- eye/utils/notebook.py +114 -0
- eye/utils/video.py +307 -0
- eye/utils_eye/__init__.py +1 -0
- eye/utils_eye/geometry.py +71 -0
- eye/utils_eye/nms.py +55 -0
- eye/validators/__init__.py +140 -0
- eye/web.py +271 -0
- eye_cv-1.0.0.dist-info/METADATA +319 -0
- eye_cv-1.0.0.dist-info/RECORD +94 -0
- eye_cv-1.0.0.dist-info/WHEEL +5 -0
- eye_cv-1.0.0.dist-info/licenses/LICENSE +21 -0
- eye_cv-1.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,628 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from copy import deepcopy
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
from matplotlib import pyplot as plt
|
|
9
|
+
|
|
10
|
+
from eye.config import ORIENTED_BOX_COORDINATES
|
|
11
|
+
from eye.detection.core import Detections
|
|
12
|
+
from eye.detection.utils import (
|
|
13
|
+
box_iou_batch,
|
|
14
|
+
mask_iou_batch,
|
|
15
|
+
oriented_box_iou_batch,
|
|
16
|
+
)
|
|
17
|
+
from eye.draw.color import LEGACY_COLOR_PALETTE
|
|
18
|
+
from eye.metrics.core import Metric, MetricTarget
|
|
19
|
+
from eye.metrics.utils.object_size import (
|
|
20
|
+
ObjectSizeCategory,
|
|
21
|
+
get_detection_size_category,
|
|
22
|
+
)
|
|
23
|
+
from eye.metrics.utils.utils import ensure_pandas_installed
|
|
24
|
+
|
|
25
|
+
if TYPE_CHECKING:
|
|
26
|
+
import pandas as pd
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class MeanAveragePrecision(Metric):
|
|
30
|
+
"""
|
|
31
|
+
Mean Average Precision (mAP) is a metric used to evaluate object detection models.
|
|
32
|
+
It is the average of the precision-recall curves at different IoU thresholds.
|
|
33
|
+
|
|
34
|
+
Example:
|
|
35
|
+
```python
|
|
36
|
+
import eye as sv
|
|
37
|
+
from eye.metrics import MeanAveragePrecision
|
|
38
|
+
|
|
39
|
+
predictions = sv.Detections(...)
|
|
40
|
+
targets = sv.Detections(...)
|
|
41
|
+
|
|
42
|
+
map_metric = MeanAveragePrecision()
|
|
43
|
+
map_result = map_metric.update(predictions, targets).compute()
|
|
44
|
+
|
|
45
|
+
print(map_result.map50_95)
|
|
46
|
+
# 0.4674
|
|
47
|
+
|
|
48
|
+
print(map_result)
|
|
49
|
+
# MeanAveragePrecisionResult:
|
|
50
|
+
# Metric target: MetricTarget.BOXES
|
|
51
|
+
# Class agnostic: False
|
|
52
|
+
# mAP @ 50:95: 0.4674
|
|
53
|
+
# mAP @ 50: 0.5048
|
|
54
|
+
# mAP @ 75: 0.4796
|
|
55
|
+
# mAP scores: [0.50485 0.50377 0.50377 ...]
|
|
56
|
+
# IoU thresh: [0.5 0.55 0.6 ...]
|
|
57
|
+
# AP per class:
|
|
58
|
+
# 0: [0.67699 0.67699 0.67699 ...]
|
|
59
|
+
# ...
|
|
60
|
+
# Small objects: ...
|
|
61
|
+
# Medium objects: ...
|
|
62
|
+
# Large objects: ...
|
|
63
|
+
|
|
64
|
+
map_result.plot()
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
{ align=center width="800" }
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
def __init__(
|
|
73
|
+
self,
|
|
74
|
+
metric_target: MetricTarget = MetricTarget.BOXES,
|
|
75
|
+
class_agnostic: bool = False,
|
|
76
|
+
):
|
|
77
|
+
"""
|
|
78
|
+
Initialize the Mean Average Precision metric.
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
metric_target (MetricTarget): The type of detection data to use.
|
|
82
|
+
class_agnostic (bool): Whether to treat all data as a single class.
|
|
83
|
+
"""
|
|
84
|
+
self._metric_target = metric_target
|
|
85
|
+
self._class_agnostic = class_agnostic
|
|
86
|
+
|
|
87
|
+
self._predictions_list: List[Detections] = []
|
|
88
|
+
self._targets_list: List[Detections] = []
|
|
89
|
+
|
|
90
|
+
def reset(self) -> None:
|
|
91
|
+
"""
|
|
92
|
+
Reset the metric to its initial state, clearing all stored data.
|
|
93
|
+
"""
|
|
94
|
+
self._predictions_list = []
|
|
95
|
+
self._targets_list = []
|
|
96
|
+
|
|
97
|
+
def update(
|
|
98
|
+
self,
|
|
99
|
+
predictions: Union[Detections, List[Detections]],
|
|
100
|
+
targets: Union[Detections, List[Detections]],
|
|
101
|
+
) -> MeanAveragePrecision:
|
|
102
|
+
"""
|
|
103
|
+
Add new predictions and targets to the metric, but do not compute the result.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
predictions (Union[Detections, List[Detections]]): The predicted detections.
|
|
107
|
+
targets (Union[Detections, List[Detections]]): The ground-truth detections.
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
(MeanAveragePrecision): The updated metric instance.
|
|
111
|
+
"""
|
|
112
|
+
if not isinstance(predictions, list):
|
|
113
|
+
predictions = [predictions]
|
|
114
|
+
if not isinstance(targets, list):
|
|
115
|
+
targets = [targets]
|
|
116
|
+
|
|
117
|
+
if len(predictions) != len(targets):
|
|
118
|
+
raise ValueError(
|
|
119
|
+
f"The number of predictions ({len(predictions)}) and"
|
|
120
|
+
f" targets ({len(targets)}) during the update must be the same."
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
if self._class_agnostic:
|
|
124
|
+
predictions = deepcopy(predictions)
|
|
125
|
+
targets = deepcopy(targets)
|
|
126
|
+
|
|
127
|
+
for prediction in predictions:
|
|
128
|
+
prediction.class_id[:] = -1
|
|
129
|
+
for target in targets:
|
|
130
|
+
target.class_id[:] = -1
|
|
131
|
+
|
|
132
|
+
self._predictions_list.extend(predictions)
|
|
133
|
+
self._targets_list.extend(targets)
|
|
134
|
+
|
|
135
|
+
return self
|
|
136
|
+
|
|
137
|
+
def compute(
|
|
138
|
+
self,
|
|
139
|
+
) -> MeanAveragePrecisionResult:
|
|
140
|
+
"""
|
|
141
|
+
Calculate Mean Average Precision based on predicted and ground-truth
|
|
142
|
+
detections at different thresholds.
|
|
143
|
+
|
|
144
|
+
Returns:
|
|
145
|
+
(MeanAveragePrecisionResult): The Mean Average Precision result.
|
|
146
|
+
"""
|
|
147
|
+
result = self._compute(self._predictions_list, self._targets_list)
|
|
148
|
+
|
|
149
|
+
small_predictions = []
|
|
150
|
+
small_targets = []
|
|
151
|
+
for predictions, targets in zip(self._predictions_list, self._targets_list):
|
|
152
|
+
small_predictions.append(
|
|
153
|
+
self._filter_detections_by_size(predictions, ObjectSizeCategory.SMALL)
|
|
154
|
+
)
|
|
155
|
+
small_targets.append(
|
|
156
|
+
self._filter_detections_by_size(targets, ObjectSizeCategory.SMALL)
|
|
157
|
+
)
|
|
158
|
+
result.small_objects = self._compute(small_predictions, small_targets)
|
|
159
|
+
|
|
160
|
+
medium_predictions = []
|
|
161
|
+
medium_targets = []
|
|
162
|
+
for predictions, targets in zip(self._predictions_list, self._targets_list):
|
|
163
|
+
medium_predictions.append(
|
|
164
|
+
self._filter_detections_by_size(predictions, ObjectSizeCategory.MEDIUM)
|
|
165
|
+
)
|
|
166
|
+
medium_targets.append(
|
|
167
|
+
self._filter_detections_by_size(targets, ObjectSizeCategory.MEDIUM)
|
|
168
|
+
)
|
|
169
|
+
result.medium_objects = self._compute(medium_predictions, medium_targets)
|
|
170
|
+
|
|
171
|
+
large_predictions = []
|
|
172
|
+
large_targets = []
|
|
173
|
+
for predictions, targets in zip(self._predictions_list, self._targets_list):
|
|
174
|
+
large_predictions.append(
|
|
175
|
+
self._filter_detections_by_size(predictions, ObjectSizeCategory.LARGE)
|
|
176
|
+
)
|
|
177
|
+
large_targets.append(
|
|
178
|
+
self._filter_detections_by_size(targets, ObjectSizeCategory.LARGE)
|
|
179
|
+
)
|
|
180
|
+
result.large_objects = self._compute(large_predictions, large_targets)
|
|
181
|
+
|
|
182
|
+
return result
|
|
183
|
+
|
|
184
|
+
def _compute(
|
|
185
|
+
self,
|
|
186
|
+
predictions_list: List[Detections],
|
|
187
|
+
targets_list: List[Detections],
|
|
188
|
+
) -> MeanAveragePrecisionResult:
|
|
189
|
+
iou_thresholds = np.linspace(0.5, 0.95, 10)
|
|
190
|
+
stats = []
|
|
191
|
+
|
|
192
|
+
for predictions, targets in zip(predictions_list, targets_list):
|
|
193
|
+
prediction_contents = self._detections_content(predictions)
|
|
194
|
+
target_contents = self._detections_content(targets)
|
|
195
|
+
|
|
196
|
+
if len(targets) > 0:
|
|
197
|
+
if len(predictions) == 0:
|
|
198
|
+
stats.append(
|
|
199
|
+
(
|
|
200
|
+
np.zeros((0, iou_thresholds.size), dtype=bool),
|
|
201
|
+
np.zeros((0,), dtype=np.float32),
|
|
202
|
+
np.zeros((0,), dtype=int),
|
|
203
|
+
targets.class_id,
|
|
204
|
+
)
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
else:
|
|
208
|
+
if self._metric_target == MetricTarget.BOXES:
|
|
209
|
+
iou = box_iou_batch(target_contents, prediction_contents)
|
|
210
|
+
elif self._metric_target == MetricTarget.MASKS:
|
|
211
|
+
iou = mask_iou_batch(target_contents, prediction_contents)
|
|
212
|
+
elif self._metric_target == MetricTarget.ORIENTED_BOUNDING_BOXES:
|
|
213
|
+
iou = oriented_box_iou_batch(
|
|
214
|
+
target_contents, prediction_contents
|
|
215
|
+
)
|
|
216
|
+
else:
|
|
217
|
+
raise ValueError(
|
|
218
|
+
"Unsupported metric target for IoU calculation"
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
matches = self._match_detection_batch(
|
|
222
|
+
predictions.class_id, targets.class_id, iou, iou_thresholds
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
stats.append(
|
|
226
|
+
(
|
|
227
|
+
matches,
|
|
228
|
+
predictions.confidence,
|
|
229
|
+
predictions.class_id,
|
|
230
|
+
targets.class_id,
|
|
231
|
+
)
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
# Compute average precisions if any matches exist
|
|
235
|
+
if stats:
|
|
236
|
+
concatenated_stats = [np.concatenate(items, 0) for items in zip(*stats)]
|
|
237
|
+
average_precisions, unique_classes = self._average_precisions_per_class(
|
|
238
|
+
*concatenated_stats
|
|
239
|
+
)
|
|
240
|
+
mAP_scores = np.mean(average_precisions, axis=0)
|
|
241
|
+
else:
|
|
242
|
+
mAP_scores = np.zeros((10,), dtype=np.float32)
|
|
243
|
+
unique_classes = np.empty((0,), dtype=int)
|
|
244
|
+
average_precisions = np.empty((0, len(iou_thresholds)), dtype=np.float32)
|
|
245
|
+
|
|
246
|
+
return MeanAveragePrecisionResult(
|
|
247
|
+
metric_target=self._metric_target,
|
|
248
|
+
is_class_agnostic=self._class_agnostic,
|
|
249
|
+
mAP_scores=mAP_scores,
|
|
250
|
+
iou_thresholds=iou_thresholds,
|
|
251
|
+
matched_classes=unique_classes,
|
|
252
|
+
ap_per_class=average_precisions,
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
@staticmethod
|
|
256
|
+
def _compute_average_precision(recall: np.ndarray, precision: np.ndarray) -> float:
|
|
257
|
+
"""
|
|
258
|
+
Compute the average precision using 101-point interpolation (COCO), given
|
|
259
|
+
the recall and precision curves.
|
|
260
|
+
|
|
261
|
+
Args:
|
|
262
|
+
recall (np.ndarray): The recall curve.
|
|
263
|
+
precision (np.ndarray): The precision curve.
|
|
264
|
+
|
|
265
|
+
Returns:
|
|
266
|
+
(float): Average precision.
|
|
267
|
+
"""
|
|
268
|
+
if len(recall) == 0 and len(precision) == 0:
|
|
269
|
+
return 0.0
|
|
270
|
+
|
|
271
|
+
recall_levels = np.linspace(0, 1, 101)
|
|
272
|
+
precision_levels = np.zeros_like(recall_levels)
|
|
273
|
+
for r, p in zip(recall[::-1], precision[::-1]):
|
|
274
|
+
precision_levels[recall_levels <= r] = p
|
|
275
|
+
|
|
276
|
+
average_precision = (1 / 101 * precision_levels).sum()
|
|
277
|
+
return average_precision
|
|
278
|
+
|
|
279
|
+
@staticmethod
|
|
280
|
+
def _match_detection_batch(
|
|
281
|
+
predictions_classes: np.ndarray,
|
|
282
|
+
target_classes: np.ndarray,
|
|
283
|
+
iou: np.ndarray,
|
|
284
|
+
iou_thresholds: np.ndarray,
|
|
285
|
+
) -> np.ndarray:
|
|
286
|
+
num_predictions, num_iou_levels = (
|
|
287
|
+
predictions_classes.shape[0],
|
|
288
|
+
iou_thresholds.shape[0],
|
|
289
|
+
)
|
|
290
|
+
correct = np.zeros((num_predictions, num_iou_levels), dtype=bool)
|
|
291
|
+
correct_class = target_classes[:, None] == predictions_classes
|
|
292
|
+
|
|
293
|
+
for i, iou_level in enumerate(iou_thresholds):
|
|
294
|
+
matched_indices = np.where((iou >= iou_level) & correct_class)
|
|
295
|
+
|
|
296
|
+
if matched_indices[0].shape[0]:
|
|
297
|
+
combined_indices = np.stack(matched_indices, axis=1)
|
|
298
|
+
iou_values = iou[matched_indices][:, None]
|
|
299
|
+
matches = np.hstack([combined_indices, iou_values])
|
|
300
|
+
|
|
301
|
+
if matched_indices[0].shape[0] > 1:
|
|
302
|
+
matches = matches[matches[:, 2].argsort()[::-1]]
|
|
303
|
+
matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
|
|
304
|
+
matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
|
|
305
|
+
|
|
306
|
+
correct[matches[:, 1].astype(int), i] = True
|
|
307
|
+
|
|
308
|
+
return correct
|
|
309
|
+
|
|
310
|
+
@staticmethod
|
|
311
|
+
def _average_precisions_per_class(
|
|
312
|
+
matches: np.ndarray,
|
|
313
|
+
prediction_confidence: np.ndarray,
|
|
314
|
+
prediction_class_ids: np.ndarray,
|
|
315
|
+
true_class_ids: np.ndarray,
|
|
316
|
+
) -> Tuple[np.ndarray, np.ndarray]:
|
|
317
|
+
"""
|
|
318
|
+
Compute the average precision, given the recall and precision curves.
|
|
319
|
+
Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
|
|
320
|
+
|
|
321
|
+
Args:
|
|
322
|
+
matches (np.ndarray): True positives.
|
|
323
|
+
prediction_confidence (np.ndarray): Objectness value from 0-1.
|
|
324
|
+
prediction_class_ids (np.ndarray): Predicted object classes.
|
|
325
|
+
true_class_ids (np.ndarray): True object classes.
|
|
326
|
+
eps (float, optional): Small value to prevent division by zero.
|
|
327
|
+
|
|
328
|
+
Returns:
|
|
329
|
+
(Tuple[np.ndarray, np.ndarray]): Average precision for different
|
|
330
|
+
IoU levels, and an array of class IDs that were matched.
|
|
331
|
+
"""
|
|
332
|
+
eps = 1e-16
|
|
333
|
+
|
|
334
|
+
sorted_indices = np.argsort(-prediction_confidence)
|
|
335
|
+
matches = matches[sorted_indices]
|
|
336
|
+
prediction_class_ids = prediction_class_ids[sorted_indices]
|
|
337
|
+
|
|
338
|
+
unique_classes, class_counts = np.unique(true_class_ids, return_counts=True)
|
|
339
|
+
num_classes = unique_classes.shape[0]
|
|
340
|
+
|
|
341
|
+
average_precisions = np.zeros((num_classes, matches.shape[1]))
|
|
342
|
+
|
|
343
|
+
for class_idx, class_id in enumerate(unique_classes):
|
|
344
|
+
is_class = prediction_class_ids == class_id
|
|
345
|
+
total_true = class_counts[class_idx]
|
|
346
|
+
total_prediction = is_class.sum()
|
|
347
|
+
|
|
348
|
+
if total_prediction == 0 or total_true == 0:
|
|
349
|
+
continue
|
|
350
|
+
|
|
351
|
+
false_positives = (1 - matches[is_class]).cumsum(0)
|
|
352
|
+
true_positives = matches[is_class].cumsum(0)
|
|
353
|
+
false_negatives = total_true - true_positives
|
|
354
|
+
|
|
355
|
+
recall = true_positives / (true_positives + false_negatives + eps)
|
|
356
|
+
precision = true_positives / (true_positives + false_positives)
|
|
357
|
+
|
|
358
|
+
for iou_level_idx in range(matches.shape[1]):
|
|
359
|
+
average_precisions[class_idx, iou_level_idx] = (
|
|
360
|
+
MeanAveragePrecision._compute_average_precision(
|
|
361
|
+
recall[:, iou_level_idx], precision[:, iou_level_idx]
|
|
362
|
+
)
|
|
363
|
+
)
|
|
364
|
+
|
|
365
|
+
return average_precisions, unique_classes
|
|
366
|
+
|
|
367
|
+
def _detections_content(self, detections: Detections) -> np.ndarray:
|
|
368
|
+
"""Return boxes, masks or oriented bounding boxes from detections."""
|
|
369
|
+
if self._metric_target == MetricTarget.BOXES:
|
|
370
|
+
return detections.xyxy
|
|
371
|
+
if self._metric_target == MetricTarget.MASKS:
|
|
372
|
+
return (
|
|
373
|
+
detections.mask
|
|
374
|
+
if detections.mask is not None
|
|
375
|
+
else self._make_empty_content()
|
|
376
|
+
)
|
|
377
|
+
if self._metric_target == MetricTarget.ORIENTED_BOUNDING_BOXES:
|
|
378
|
+
obb = detections.data.get(ORIENTED_BOX_COORDINATES)
|
|
379
|
+
if obb is not None and len(obb) > 0:
|
|
380
|
+
return np.array(obb, dtype=np.float32)
|
|
381
|
+
return self._make_empty_content()
|
|
382
|
+
raise ValueError(f"Invalid metric target: {self._metric_target}")
|
|
383
|
+
|
|
384
|
+
def _make_empty_content(self) -> np.ndarray:
|
|
385
|
+
if self._metric_target == MetricTarget.BOXES:
|
|
386
|
+
return np.empty((0, 4), dtype=np.float32)
|
|
387
|
+
if self._metric_target == MetricTarget.MASKS:
|
|
388
|
+
return np.empty((0, 0, 0), dtype=bool)
|
|
389
|
+
if self._metric_target == MetricTarget.ORIENTED_BOUNDING_BOXES:
|
|
390
|
+
return np.empty((0, 4, 2), dtype=np.float32)
|
|
391
|
+
raise ValueError(f"Invalid metric target: {self._metric_target}")
|
|
392
|
+
|
|
393
|
+
def _filter_detections_by_size(
|
|
394
|
+
self, detections: Detections, size_category: ObjectSizeCategory
|
|
395
|
+
) -> Detections:
|
|
396
|
+
"""Return a copy of detections with contents filtered by object size."""
|
|
397
|
+
new_detections = deepcopy(detections)
|
|
398
|
+
if detections.is_empty() or size_category == ObjectSizeCategory.ANY:
|
|
399
|
+
return new_detections
|
|
400
|
+
|
|
401
|
+
sizes = get_detection_size_category(new_detections, self._metric_target)
|
|
402
|
+
size_mask = sizes == size_category.value
|
|
403
|
+
|
|
404
|
+
new_detections.xyxy = new_detections.xyxy[size_mask]
|
|
405
|
+
if new_detections.mask is not None:
|
|
406
|
+
new_detections.mask = new_detections.mask[size_mask]
|
|
407
|
+
if new_detections.class_id is not None:
|
|
408
|
+
new_detections.class_id = new_detections.class_id[size_mask]
|
|
409
|
+
if new_detections.confidence is not None:
|
|
410
|
+
new_detections.confidence = new_detections.confidence[size_mask]
|
|
411
|
+
if new_detections.tracker_id is not None:
|
|
412
|
+
new_detections.tracker_id = new_detections.tracker_id[size_mask]
|
|
413
|
+
if new_detections.data is not None:
|
|
414
|
+
for key, value in new_detections.data.items():
|
|
415
|
+
new_detections.data[key] = np.array(value)[size_mask]
|
|
416
|
+
|
|
417
|
+
return new_detections
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
@dataclass
|
|
421
|
+
class MeanAveragePrecisionResult:
|
|
422
|
+
"""
|
|
423
|
+
The result of the Mean Average Precision calculation.
|
|
424
|
+
|
|
425
|
+
Defaults to `0` when no detections or targets are present.
|
|
426
|
+
|
|
427
|
+
Attributes:
|
|
428
|
+
metric_target (MetricTarget): the type of data used for the metric -
|
|
429
|
+
boxes, masks or oriented bounding boxes.
|
|
430
|
+
class_agnostic (bool): When computing class-agnostic results, class ID
|
|
431
|
+
is set to `-1`.
|
|
432
|
+
mAP_map50_95 (float): the mAP score at IoU thresholds from `0.5` to `0.95`.
|
|
433
|
+
mAP_map50 (float): the mAP score at IoU threshold of `0.5`.
|
|
434
|
+
mAP_map75 (float): the mAP score at IoU threshold of `0.75`.
|
|
435
|
+
mAP_scores (np.ndarray): the mAP scores at each IoU threshold.
|
|
436
|
+
Shape: `(num_iou_thresholds,)`
|
|
437
|
+
ap_per_class (np.ndarray): the average precision scores per
|
|
438
|
+
class and IoU threshold. Shape: `(num_target_classes, num_iou_thresholds)`
|
|
439
|
+
iou_thresholds (np.ndarray): the IoU thresholds used in the calculations.
|
|
440
|
+
matched_classes (np.ndarray): the class IDs of all matched classes.
|
|
441
|
+
Corresponds to the rows of `ap_per_class`.
|
|
442
|
+
small_objects (Optional[MeanAveragePrecisionResult]): the mAP results
|
|
443
|
+
for small objects (area < 32²).
|
|
444
|
+
medium_objects (Optional[MeanAveragePrecisionResult]): the mAP results
|
|
445
|
+
for medium objects (32² ≤ area < 96²).
|
|
446
|
+
large_objects (Optional[MeanAveragePrecisionResult]): the mAP results
|
|
447
|
+
for large objects (area ≥ 96²).
|
|
448
|
+
"""
|
|
449
|
+
|
|
450
|
+
metric_target: MetricTarget
|
|
451
|
+
is_class_agnostic: bool
|
|
452
|
+
|
|
453
|
+
@property
|
|
454
|
+
def map50_95(self) -> float:
|
|
455
|
+
return self.mAP_scores.mean()
|
|
456
|
+
|
|
457
|
+
@property
|
|
458
|
+
def map50(self) -> float:
|
|
459
|
+
return self.mAP_scores[0]
|
|
460
|
+
|
|
461
|
+
@property
|
|
462
|
+
def map75(self) -> float:
|
|
463
|
+
return self.mAP_scores[5]
|
|
464
|
+
|
|
465
|
+
mAP_scores: np.ndarray
|
|
466
|
+
ap_per_class: np.ndarray
|
|
467
|
+
iou_thresholds: np.ndarray
|
|
468
|
+
matched_classes: np.ndarray
|
|
469
|
+
small_objects: Optional[MeanAveragePrecisionResult] = None
|
|
470
|
+
medium_objects: Optional[MeanAveragePrecisionResult] = None
|
|
471
|
+
large_objects: Optional[MeanAveragePrecisionResult] = None
|
|
472
|
+
|
|
473
|
+
def __str__(self) -> str:
|
|
474
|
+
"""
|
|
475
|
+
Format as a pretty string.
|
|
476
|
+
|
|
477
|
+
Example:
|
|
478
|
+
```python
|
|
479
|
+
print(map_result)
|
|
480
|
+
# MeanAveragePrecisionResult:
|
|
481
|
+
# Metric target: MetricTarget.BOXES
|
|
482
|
+
# Class agnostic: False
|
|
483
|
+
# mAP @ 50:95: 0.4674
|
|
484
|
+
# mAP @ 50: 0.5048
|
|
485
|
+
# mAP @ 75: 0.4796
|
|
486
|
+
# mAP scores: [0.50485 0.50377 0.50377 ...]
|
|
487
|
+
# IoU thresh: [0.5 0.55 0.6 ...]
|
|
488
|
+
# AP per class:
|
|
489
|
+
# 0: [0.67699 0.67699 0.67699 ...]
|
|
490
|
+
# ...
|
|
491
|
+
# Small objects: ...
|
|
492
|
+
# Medium objects: ...
|
|
493
|
+
# Large objects: ...
|
|
494
|
+
```
|
|
495
|
+
"""
|
|
496
|
+
|
|
497
|
+
out_str = (
|
|
498
|
+
f"{self.__class__.__name__}:\n"
|
|
499
|
+
f"Metric target: {self.metric_target}\n"
|
|
500
|
+
f"Class agnostic: {self.is_class_agnostic}\n"
|
|
501
|
+
f"mAP @ 50:95: {self.map50_95:.4f}\n"
|
|
502
|
+
f"mAP @ 50: {self.map50:.4f}\n"
|
|
503
|
+
f"mAP @ 75: {self.map75:.4f}\n"
|
|
504
|
+
f"mAP scores: {self.mAP_scores}\n"
|
|
505
|
+
f"IoU thresh: {self.iou_thresholds}\n"
|
|
506
|
+
f"AP per class:\n"
|
|
507
|
+
)
|
|
508
|
+
if self.ap_per_class.size == 0:
|
|
509
|
+
out_str += " No results\n"
|
|
510
|
+
for class_id, ap_of_class in zip(self.matched_classes, self.ap_per_class):
|
|
511
|
+
out_str += f" {class_id}: {ap_of_class}\n"
|
|
512
|
+
|
|
513
|
+
indent = " "
|
|
514
|
+
if self.small_objects is not None:
|
|
515
|
+
indented = indent + str(self.small_objects).replace("\n", f"\n{indent}")
|
|
516
|
+
out_str += f"\nSmall objects:\n{indented}"
|
|
517
|
+
if self.medium_objects is not None:
|
|
518
|
+
indented = indent + str(self.medium_objects).replace("\n", f"\n{indent}")
|
|
519
|
+
out_str += f"\nMedium objects:\n{indented}"
|
|
520
|
+
if self.large_objects is not None:
|
|
521
|
+
indented = indent + str(self.large_objects).replace("\n", f"\n{indent}")
|
|
522
|
+
out_str += f"\nLarge objects:\n{indented}"
|
|
523
|
+
|
|
524
|
+
return out_str
|
|
525
|
+
|
|
526
|
+
def to_pandas(self) -> "pd.DataFrame":
|
|
527
|
+
"""
|
|
528
|
+
Convert the result to a pandas DataFrame.
|
|
529
|
+
|
|
530
|
+
Returns:
|
|
531
|
+
(pd.DataFrame): The result as a DataFrame.
|
|
532
|
+
"""
|
|
533
|
+
ensure_pandas_installed()
|
|
534
|
+
import pandas as pd
|
|
535
|
+
|
|
536
|
+
pandas_data = {
|
|
537
|
+
"mAP@50:95": self.map50_95,
|
|
538
|
+
"mAP@50": self.map50,
|
|
539
|
+
"mAP@75": self.map75,
|
|
540
|
+
}
|
|
541
|
+
|
|
542
|
+
if self.small_objects is not None:
|
|
543
|
+
small_objects_df = self.small_objects.to_pandas()
|
|
544
|
+
for key, value in small_objects_df.items():
|
|
545
|
+
pandas_data[f"small_objects_{key}"] = value
|
|
546
|
+
if self.medium_objects is not None:
|
|
547
|
+
medium_objects_df = self.medium_objects.to_pandas()
|
|
548
|
+
for key, value in medium_objects_df.items():
|
|
549
|
+
pandas_data[f"medium_objects_{key}"] = value
|
|
550
|
+
if self.large_objects is not None:
|
|
551
|
+
large_objects_df = self.large_objects.to_pandas()
|
|
552
|
+
for key, value in large_objects_df.items():
|
|
553
|
+
pandas_data[f"large_objects_{key}"] = value
|
|
554
|
+
|
|
555
|
+
# Average precisions are currently not included in the DataFrame.
|
|
556
|
+
|
|
557
|
+
return pd.DataFrame(
|
|
558
|
+
pandas_data,
|
|
559
|
+
index=[0],
|
|
560
|
+
)
|
|
561
|
+
|
|
562
|
+
def plot(self):
|
|
563
|
+
"""
|
|
564
|
+
Plot the mAP results.
|
|
565
|
+
|
|
566
|
+
{ align=center width="800" }
|
|
569
|
+
"""
|
|
570
|
+
|
|
571
|
+
labels = ["mAP@50:95", "mAP@50", "mAP@75"]
|
|
572
|
+
values = [self.map50_95, self.map50, self.map75]
|
|
573
|
+
colors = [LEGACY_COLOR_PALETTE[0]] * 3
|
|
574
|
+
|
|
575
|
+
if self.small_objects is not None:
|
|
576
|
+
labels += ["Small: mAP@50:95", "Small: mAP@50", "Small: mAP@75"]
|
|
577
|
+
values += [
|
|
578
|
+
self.small_objects.map50_95,
|
|
579
|
+
self.small_objects.map50,
|
|
580
|
+
self.small_objects.map75,
|
|
581
|
+
]
|
|
582
|
+
colors += [LEGACY_COLOR_PALETTE[3]] * 3
|
|
583
|
+
|
|
584
|
+
if self.medium_objects is not None:
|
|
585
|
+
labels += ["Medium: mAP@50:95", "Medium: mAP@50", "Medium: mAP@75"]
|
|
586
|
+
values += [
|
|
587
|
+
self.medium_objects.map50_95,
|
|
588
|
+
self.medium_objects.map50,
|
|
589
|
+
self.medium_objects.map75,
|
|
590
|
+
]
|
|
591
|
+
colors += [LEGACY_COLOR_PALETTE[2]] * 3
|
|
592
|
+
|
|
593
|
+
if self.large_objects is not None:
|
|
594
|
+
labels += ["Large: mAP@50:95", "Large: mAP@50", "Large: mAP@75"]
|
|
595
|
+
values += [
|
|
596
|
+
self.large_objects.map50_95,
|
|
597
|
+
self.large_objects.map50,
|
|
598
|
+
self.large_objects.map75,
|
|
599
|
+
]
|
|
600
|
+
colors += [LEGACY_COLOR_PALETTE[4]] * 3
|
|
601
|
+
|
|
602
|
+
plt.rcParams["font.family"] = "monospace"
|
|
603
|
+
|
|
604
|
+
_, ax = plt.subplots(figsize=(10, 6))
|
|
605
|
+
ax.set_ylim(0, 1)
|
|
606
|
+
ax.set_ylabel("Value", fontweight="bold")
|
|
607
|
+
ax.set_title("Mean Average Precision", fontweight="bold")
|
|
608
|
+
|
|
609
|
+
x_positions = range(len(labels))
|
|
610
|
+
bars = ax.bar(x_positions, values, color=colors, align="center")
|
|
611
|
+
|
|
612
|
+
ax.set_xticks(x_positions)
|
|
613
|
+
ax.set_xticklabels(labels, rotation=45, ha="right")
|
|
614
|
+
|
|
615
|
+
for bar in bars:
|
|
616
|
+
y_value = bar.get_height()
|
|
617
|
+
ax.text(
|
|
618
|
+
bar.get_x() + bar.get_width() / 2,
|
|
619
|
+
y_value + 0.02,
|
|
620
|
+
f"{y_value:.2f}",
|
|
621
|
+
ha="center",
|
|
622
|
+
va="bottom",
|
|
623
|
+
)
|
|
624
|
+
|
|
625
|
+
plt.rcParams["font.family"] = "sans-serif"
|
|
626
|
+
|
|
627
|
+
plt.tight_layout()
|
|
628
|
+
plt.show()
|