eye-cv 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eye/__init__.py +115 -0
- eye/__init___supervision_original.py +120 -0
- eye/annotators/__init__.py +0 -0
- eye/annotators/base.py +22 -0
- eye/annotators/core.py +2699 -0
- eye/annotators/line.py +107 -0
- eye/annotators/modern.py +529 -0
- eye/annotators/trace.py +142 -0
- eye/annotators/utils.py +177 -0
- eye/assets/__init__.py +2 -0
- eye/assets/downloader.py +95 -0
- eye/assets/list.py +83 -0
- eye/classification/__init__.py +0 -0
- eye/classification/core.py +188 -0
- eye/config.py +2 -0
- eye/core/__init__.py +0 -0
- eye/core/trackers/__init__.py +1 -0
- eye/core/trackers/botsort_tracker.py +336 -0
- eye/core/trackers/bytetrack_tracker.py +284 -0
- eye/core/trackers/sort_tracker.py +200 -0
- eye/core/tracking.py +146 -0
- eye/dataset/__init__.py +0 -0
- eye/dataset/core.py +919 -0
- eye/dataset/formats/__init__.py +0 -0
- eye/dataset/formats/coco.py +258 -0
- eye/dataset/formats/pascal_voc.py +279 -0
- eye/dataset/formats/yolo.py +272 -0
- eye/dataset/utils.py +259 -0
- eye/detection/__init__.py +0 -0
- eye/detection/auto_convert.py +155 -0
- eye/detection/core.py +1529 -0
- eye/detection/detections_enhanced.py +392 -0
- eye/detection/line_zone.py +859 -0
- eye/detection/lmm.py +184 -0
- eye/detection/overlap_filter.py +270 -0
- eye/detection/tools/__init__.py +0 -0
- eye/detection/tools/csv_sink.py +181 -0
- eye/detection/tools/inference_slicer.py +288 -0
- eye/detection/tools/json_sink.py +142 -0
- eye/detection/tools/polygon_zone.py +202 -0
- eye/detection/tools/smoother.py +123 -0
- eye/detection/tools/smoothing.py +179 -0
- eye/detection/tools/smoothing_config.py +202 -0
- eye/detection/tools/transformers.py +247 -0
- eye/detection/utils.py +1175 -0
- eye/draw/__init__.py +0 -0
- eye/draw/color.py +154 -0
- eye/draw/utils.py +374 -0
- eye/filters.py +112 -0
- eye/geometry/__init__.py +0 -0
- eye/geometry/core.py +128 -0
- eye/geometry/utils.py +47 -0
- eye/keypoint/__init__.py +0 -0
- eye/keypoint/annotators.py +442 -0
- eye/keypoint/core.py +687 -0
- eye/keypoint/skeletons.py +2647 -0
- eye/metrics/__init__.py +21 -0
- eye/metrics/core.py +72 -0
- eye/metrics/detection.py +843 -0
- eye/metrics/f1_score.py +648 -0
- eye/metrics/mean_average_precision.py +628 -0
- eye/metrics/mean_average_recall.py +697 -0
- eye/metrics/precision.py +653 -0
- eye/metrics/recall.py +652 -0
- eye/metrics/utils/__init__.py +0 -0
- eye/metrics/utils/object_size.py +158 -0
- eye/metrics/utils/utils.py +9 -0
- eye/py.typed +0 -0
- eye/quick.py +104 -0
- eye/tracker/__init__.py +0 -0
- eye/tracker/byte_tracker/__init__.py +0 -0
- eye/tracker/byte_tracker/core.py +386 -0
- eye/tracker/byte_tracker/kalman_filter.py +205 -0
- eye/tracker/byte_tracker/matching.py +69 -0
- eye/tracker/byte_tracker/single_object_track.py +178 -0
- eye/tracker/byte_tracker/utils.py +18 -0
- eye/utils/__init__.py +0 -0
- eye/utils/conversion.py +132 -0
- eye/utils/file.py +159 -0
- eye/utils/image.py +794 -0
- eye/utils/internal.py +200 -0
- eye/utils/iterables.py +84 -0
- eye/utils/notebook.py +114 -0
- eye/utils/video.py +307 -0
- eye/utils_eye/__init__.py +1 -0
- eye/utils_eye/geometry.py +71 -0
- eye/utils_eye/nms.py +55 -0
- eye/validators/__init__.py +140 -0
- eye/web.py +271 -0
- eye_cv-1.0.0.dist-info/METADATA +319 -0
- eye_cv-1.0.0.dist-info/RECORD +94 -0
- eye_cv-1.0.0.dist-info/WHEEL +5 -0
- eye_cv-1.0.0.dist-info/licenses/LICENSE +21 -0
- eye_cv-1.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,392 @@
|
|
|
1
|
+
"""Core detection management classes with universal auto-conversion."""
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
from typing import Optional, Dict, List, Tuple, Any, Union
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass
|
|
9
|
+
class Detections:
|
|
10
|
+
"""Container for object detections with optimized operations.
|
|
11
|
+
|
|
12
|
+
Design notes:
|
|
13
|
+
- Immutable operations (returns new instance instead of modifying)
|
|
14
|
+
- Built-in caching for expensive computations
|
|
15
|
+
- Native support for segmentation masks
|
|
16
|
+
- Better indexing and slicing
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
xyxy: np.ndarray # Shape: (N, 4) - [x1, y1, x2, y2]
|
|
20
|
+
confidence: Optional[np.ndarray] = None # Shape: (N,)
|
|
21
|
+
class_id: Optional[np.ndarray] = None # Shape: (N,)
|
|
22
|
+
tracker_id: Optional[np.ndarray] = None # Shape: (N,)
|
|
23
|
+
mask: Optional[np.ndarray] = None # Shape: (N, H, W) - segmentation masks
|
|
24
|
+
data: Dict[str, Any] = field(default_factory=dict) # Additional metadata
|
|
25
|
+
|
|
26
|
+
# Cache for expensive computations
|
|
27
|
+
_area_cache: Optional[np.ndarray] = field(default=None, repr=False, compare=False)
|
|
28
|
+
_center_cache: Optional[np.ndarray] = field(default=None, repr=False, compare=False)
|
|
29
|
+
|
|
30
|
+
def __len__(self) -> int:
|
|
31
|
+
"""Return number of detections."""
|
|
32
|
+
return len(self.xyxy)
|
|
33
|
+
|
|
34
|
+
def __getitem__(self, index) -> 'Detections':
|
|
35
|
+
"""Support indexing and slicing."""
|
|
36
|
+
if isinstance(index, (int, slice, list, np.ndarray)):
|
|
37
|
+
return Detections(
|
|
38
|
+
xyxy=self.xyxy[index],
|
|
39
|
+
confidence=self.confidence[index] if self.confidence is not None else None,
|
|
40
|
+
class_id=self.class_id[index] if self.class_id is not None else None,
|
|
41
|
+
tracker_id=self.tracker_id[index] if self.tracker_id is not None else None,
|
|
42
|
+
mask=self.mask[index] if self.mask is not None else None,
|
|
43
|
+
data={k: v[index] if isinstance(v, np.ndarray) else v for k, v in self.data.items()},
|
|
44
|
+
)
|
|
45
|
+
raise TypeError(f"Invalid index type: {type(index)}")
|
|
46
|
+
|
|
47
|
+
@property
|
|
48
|
+
def area(self) -> np.ndarray:
|
|
49
|
+
"""Get area of each bounding box (cached)."""
|
|
50
|
+
if self._area_cache is None:
|
|
51
|
+
widths = self.xyxy[:, 2] - self.xyxy[:, 0]
|
|
52
|
+
heights = self.xyxy[:, 3] - self.xyxy[:, 1]
|
|
53
|
+
self._area_cache = widths * heights
|
|
54
|
+
return self._area_cache
|
|
55
|
+
|
|
56
|
+
@property
|
|
57
|
+
def center(self) -> np.ndarray:
|
|
58
|
+
"""Get center point of each bounding box (cached). Shape: (N, 2)"""
|
|
59
|
+
if self._center_cache is None:
|
|
60
|
+
self._center_cache = np.column_stack([
|
|
61
|
+
(self.xyxy[:, 0] + self.xyxy[:, 2]) / 2,
|
|
62
|
+
(self.xyxy[:, 1] + self.xyxy[:, 3]) / 2
|
|
63
|
+
])
|
|
64
|
+
return self._center_cache
|
|
65
|
+
|
|
66
|
+
@property
|
|
67
|
+
def width(self) -> np.ndarray:
|
|
68
|
+
"""Get width of each bounding box."""
|
|
69
|
+
return self.xyxy[:, 2] - self.xyxy[:, 0]
|
|
70
|
+
|
|
71
|
+
@property
|
|
72
|
+
def height(self) -> np.ndarray:
|
|
73
|
+
"""Get height of each bounding box."""
|
|
74
|
+
return self.xyxy[:, 3] - self.xyxy[:, 1]
|
|
75
|
+
|
|
76
|
+
@property
|
|
77
|
+
def aspect_ratio(self) -> np.ndarray:
|
|
78
|
+
"""Get aspect ratio (width/height) of each box."""
|
|
79
|
+
return self.width / np.maximum(self.height, 1e-6)
|
|
80
|
+
|
|
81
|
+
def filter(self, mask: np.ndarray) -> 'Detections':
|
|
82
|
+
"""Filter detections using boolean mask."""
|
|
83
|
+
return self[mask]
|
|
84
|
+
|
|
85
|
+
def with_confidence(self, confidence: np.ndarray) -> 'Detections':
|
|
86
|
+
"""Return new Detections with updated confidence."""
|
|
87
|
+
return Detections(
|
|
88
|
+
xyxy=self.xyxy,
|
|
89
|
+
confidence=confidence,
|
|
90
|
+
class_id=self.class_id,
|
|
91
|
+
tracker_id=self.tracker_id,
|
|
92
|
+
mask=self.mask,
|
|
93
|
+
data=self.data.copy()
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
def with_class_id(self, class_id: np.ndarray) -> 'Detections':
|
|
97
|
+
"""Return new Detections with updated class IDs."""
|
|
98
|
+
return Detections(
|
|
99
|
+
xyxy=self.xyxy,
|
|
100
|
+
confidence=self.confidence,
|
|
101
|
+
class_id=class_id,
|
|
102
|
+
tracker_id=self.tracker_id,
|
|
103
|
+
mask=self.mask,
|
|
104
|
+
data=self.data.copy()
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
def with_tracker_id(self, tracker_id: np.ndarray) -> 'Detections':
|
|
108
|
+
"""Return new Detections with updated tracker IDs."""
|
|
109
|
+
return Detections(
|
|
110
|
+
xyxy=self.xyxy,
|
|
111
|
+
confidence=self.confidence,
|
|
112
|
+
class_id=self.class_id,
|
|
113
|
+
tracker_id=tracker_id,
|
|
114
|
+
mask=self.mask,
|
|
115
|
+
data=self.data.copy()
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
@staticmethod
|
|
119
|
+
def empty() -> 'Detections':
|
|
120
|
+
"""Create empty Detections object."""
|
|
121
|
+
return Detections(
|
|
122
|
+
xyxy=np.empty((0, 4), dtype=np.float32),
|
|
123
|
+
confidence=np.empty(0, dtype=np.float32),
|
|
124
|
+
class_id=np.empty(0, dtype=int),
|
|
125
|
+
tracker_id=np.empty(0, dtype=int)
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
@classmethod
|
|
129
|
+
def from_yolo(cls, results) -> 'Detections':
|
|
130
|
+
"""Create from YOLO/Ultralytics results."""
|
|
131
|
+
if hasattr(results, 'boxes'):
|
|
132
|
+
boxes = results.boxes
|
|
133
|
+
xyxy = boxes.xyxy.cpu().numpy()
|
|
134
|
+
confidence = boxes.conf.cpu().numpy()
|
|
135
|
+
class_id = boxes.cls.cpu().numpy().astype(int)
|
|
136
|
+
|
|
137
|
+
return cls(
|
|
138
|
+
xyxy=xyxy,
|
|
139
|
+
confidence=confidence,
|
|
140
|
+
class_id=class_id
|
|
141
|
+
)
|
|
142
|
+
return cls.empty()
|
|
143
|
+
|
|
144
|
+
def merge(self, other: 'Detections') -> 'Detections':
|
|
145
|
+
"""Merge with another Detections object."""
|
|
146
|
+
return Detections(
|
|
147
|
+
xyxy=np.vstack([self.xyxy, other.xyxy]),
|
|
148
|
+
confidence=np.concatenate([self.confidence, other.confidence]) if self.confidence is not None else None,
|
|
149
|
+
class_id=np.concatenate([self.class_id, other.class_id]) if self.class_id is not None else None,
|
|
150
|
+
tracker_id=np.concatenate([self.tracker_id, other.tracker_id]) if self.tracker_id is not None else None,
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
class DetectionManager:
|
|
155
|
+
"""Manages detection history and counting across zones.
|
|
156
|
+
|
|
157
|
+
Innovation: Thread-safe, supports multiple metrics, fast lookups.
|
|
158
|
+
"""
|
|
159
|
+
|
|
160
|
+
def __init__(self, class_names: Optional[Dict[int, str]] = None):
|
|
161
|
+
self.class_names = class_names or {}
|
|
162
|
+
self.tracker_to_zone: Dict[int, Tuple[int, int]] = {} # tracker_id -> (zone_id, class_id)
|
|
163
|
+
self.tracker_to_metadata: Dict[int, Dict[str, Any]] = {}
|
|
164
|
+
self.zone_counts: Dict[int, int] = {}
|
|
165
|
+
|
|
166
|
+
def update(
|
|
167
|
+
self,
|
|
168
|
+
detections: Detections,
|
|
169
|
+
zone_detections: List[Detections],
|
|
170
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
171
|
+
) -> Detections:
|
|
172
|
+
"""Update tracking information with zone crossings."""
|
|
173
|
+
for zone_id, zone_det in enumerate(zone_detections):
|
|
174
|
+
if len(zone_det) == 0:
|
|
175
|
+
continue
|
|
176
|
+
|
|
177
|
+
for tracker_id, class_id in zip(zone_det.tracker_id, zone_det.class_id):
|
|
178
|
+
if tracker_id not in self.tracker_to_zone:
|
|
179
|
+
self.tracker_to_zone[tracker_id] = (zone_id, class_id)
|
|
180
|
+
self.zone_counts[zone_id] = self.zone_counts.get(zone_id, 0) + 1
|
|
181
|
+
|
|
182
|
+
if metadata:
|
|
183
|
+
self.tracker_to_metadata[tracker_id] = metadata.copy()
|
|
184
|
+
|
|
185
|
+
# Update class IDs based on zone assignment
|
|
186
|
+
if len(detections) > 0 and detections.tracker_id is not None:
|
|
187
|
+
new_class_ids = np.array([
|
|
188
|
+
self.tracker_to_zone.get(tid, (-1, -1))[1]
|
|
189
|
+
for tid in detections.tracker_id
|
|
190
|
+
])
|
|
191
|
+
detections = detections.with_class_id(new_class_ids)
|
|
192
|
+
detections = detections.filter(new_class_ids != -1)
|
|
193
|
+
|
|
194
|
+
return detections
|
|
195
|
+
|
|
196
|
+
def get_zone_count(self, zone_id: int) -> int:
|
|
197
|
+
"""Get count for specific zone."""
|
|
198
|
+
return self.zone_counts.get(zone_id, 0)
|
|
199
|
+
|
|
200
|
+
def get_tracker_info(self, tracker_id: int) -> Optional[Tuple[int, int, Dict]]:
|
|
201
|
+
"""Get zone, class, and metadata for tracker."""
|
|
202
|
+
if tracker_id in self.tracker_to_zone:
|
|
203
|
+
zone_id, class_id = self.tracker_to_zone[tracker_id]
|
|
204
|
+
metadata = self.tracker_to_metadata.get(tracker_id, {})
|
|
205
|
+
return zone_id, class_id, metadata
|
|
206
|
+
return None
|
|
207
|
+
|
|
208
|
+
def export_csv(self, filepath: str):
|
|
209
|
+
"""Export tracking data to CSV."""
|
|
210
|
+
import csv
|
|
211
|
+
with open(filepath, 'w', newline='') as f:
|
|
212
|
+
writer = csv.writer(f)
|
|
213
|
+
writer.writerow(['tracker_id', 'zone_id', 'class_id', 'class_name', *self.tracker_to_metadata.get(list(self.tracker_to_metadata.keys())[0], {}).keys()])
|
|
214
|
+
|
|
215
|
+
for tracker_id, (zone_id, class_id) in self.tracker_to_zone.items():
|
|
216
|
+
class_name = self.class_names.get(class_id, str(class_id))
|
|
217
|
+
metadata = self.tracker_to_metadata.get(tracker_id, {})
|
|
218
|
+
writer.writerow([tracker_id, zone_id, class_id, class_name, *metadata.values()])
|
|
219
|
+
|
|
220
|
+
def clear(self):
|
|
221
|
+
"""Clear all tracking history."""
|
|
222
|
+
self.tracker_to_zone.clear()
|
|
223
|
+
self.tracker_to_metadata.clear()
|
|
224
|
+
self.zone_counts.clear()
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
# Universal auto-conversion methods
|
|
228
|
+
def from_yolo(results: Any) -> Detections:
|
|
229
|
+
"""Convert YOLO (Ultralytics) results to Detections.
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
results: YOLO results object or list
|
|
233
|
+
|
|
234
|
+
Returns:
|
|
235
|
+
Detections
|
|
236
|
+
|
|
237
|
+
Example:
|
|
238
|
+
>>> from ultralytics import YOLO
|
|
239
|
+
>>> model = YOLO("yolo11n.pt")
|
|
240
|
+
>>> results = model("image.jpg")
|
|
241
|
+
>>> detections = eye.from_yolo(results)
|
|
242
|
+
"""
|
|
243
|
+
# Handle list
|
|
244
|
+
if isinstance(results, list):
|
|
245
|
+
if len(results) == 0:
|
|
246
|
+
return Detections.empty()
|
|
247
|
+
results = results[0]
|
|
248
|
+
|
|
249
|
+
# Extract boxes
|
|
250
|
+
boxes = results.boxes.xyxy.cpu().numpy()
|
|
251
|
+
conf = results.boxes.conf.cpu().numpy() if results.boxes.conf is not None else None
|
|
252
|
+
cls = results.boxes.cls.cpu().numpy().astype(int) if results.boxes.cls is not None else None
|
|
253
|
+
|
|
254
|
+
# Extract masks if available
|
|
255
|
+
masks = None
|
|
256
|
+
if hasattr(results, 'masks') and results.masks is not None:
|
|
257
|
+
masks = results.masks.data.cpu().numpy()
|
|
258
|
+
|
|
259
|
+
det = Detections(xyxy=boxes, confidence=conf, class_id=cls)
|
|
260
|
+
if masks is not None:
|
|
261
|
+
det.data['masks'] = masks
|
|
262
|
+
|
|
263
|
+
return det
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
def from_pytorch(results: Union[Dict, List[Dict]], conf_threshold: float = 0.0) -> Detections:
|
|
267
|
+
"""Convert PyTorch/torchvision results to Detections.
|
|
268
|
+
|
|
269
|
+
Args:
|
|
270
|
+
results: Dict with 'boxes', 'scores', 'labels' or list of dicts
|
|
271
|
+
conf_threshold: Confidence threshold
|
|
272
|
+
|
|
273
|
+
Returns:
|
|
274
|
+
Detections
|
|
275
|
+
|
|
276
|
+
Example:
|
|
277
|
+
>>> import torchvision
|
|
278
|
+
>>> model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
|
|
279
|
+
>>> results = model(image)
|
|
280
|
+
>>> detections = eye.from_pytorch(results[0])
|
|
281
|
+
"""
|
|
282
|
+
# Handle list
|
|
283
|
+
if isinstance(results, list):
|
|
284
|
+
if len(results) == 0:
|
|
285
|
+
return Detections.empty()
|
|
286
|
+
results = results[0]
|
|
287
|
+
|
|
288
|
+
# Extract
|
|
289
|
+
boxes = results['boxes'].cpu().numpy() if hasattr(results['boxes'], 'cpu') else results['boxes']
|
|
290
|
+
scores = results['scores'].cpu().numpy() if hasattr(results['scores'], 'cpu') else results['scores']
|
|
291
|
+
labels = results['labels'].cpu().numpy() if hasattr(results['labels'], 'cpu') else results['labels']
|
|
292
|
+
|
|
293
|
+
# Filter by confidence
|
|
294
|
+
mask = scores >= conf_threshold
|
|
295
|
+
|
|
296
|
+
return Detections(
|
|
297
|
+
xyxy=boxes[mask],
|
|
298
|
+
confidence=scores[mask],
|
|
299
|
+
class_id=labels[mask].astype(int)
|
|
300
|
+
)
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
def from_tensorflow(boxes: np.ndarray, scores: np.ndarray, classes: np.ndarray,
|
|
304
|
+
conf_threshold: float = 0.0) -> Detections:
|
|
305
|
+
"""Convert TensorFlow Object Detection API results to Detections.
|
|
306
|
+
|
|
307
|
+
Args:
|
|
308
|
+
boxes: Detection boxes (normalized [0-1] or absolute)
|
|
309
|
+
scores: Detection scores
|
|
310
|
+
classes: Detection classes
|
|
311
|
+
conf_threshold: Confidence threshold
|
|
312
|
+
|
|
313
|
+
Returns:
|
|
314
|
+
Detections
|
|
315
|
+
|
|
316
|
+
Example:
|
|
317
|
+
>>> boxes, scores, classes, num = model(image)
|
|
318
|
+
>>> detections = eye.from_tensorflow(boxes[0], scores[0], classes[0])
|
|
319
|
+
"""
|
|
320
|
+
# Filter by confidence
|
|
321
|
+
mask = scores >= conf_threshold
|
|
322
|
+
boxes = boxes[mask]
|
|
323
|
+
scores = scores[mask]
|
|
324
|
+
classes = classes[mask].astype(int)
|
|
325
|
+
|
|
326
|
+
# Convert normalized to absolute if needed
|
|
327
|
+
if boxes.max() <= 1.0:
|
|
328
|
+
print("Warning: Boxes appear normalized. Consider denormalizing.")
|
|
329
|
+
|
|
330
|
+
return Detections(xyxy=boxes, confidence=scores, class_id=classes)
|
|
331
|
+
|
|
332
|
+
|
|
333
|
+
def from_opencv(results: np.ndarray, conf_threshold: float = 0.0) -> Detections:
|
|
334
|
+
"""Convert OpenCV DNN results to Detections.
|
|
335
|
+
|
|
336
|
+
Args:
|
|
337
|
+
results: OpenCV detection output (shape: (1, 1, N, 7))
|
|
338
|
+
conf_threshold: Confidence threshold
|
|
339
|
+
|
|
340
|
+
Returns:
|
|
341
|
+
Detections
|
|
342
|
+
|
|
343
|
+
Example:
|
|
344
|
+
>>> net = cv2.dnn.readNet("model.caffemodel", "deploy.prototxt")
|
|
345
|
+
>>> blob = cv2.dnn.blobFromImage(image)
|
|
346
|
+
>>> net.setInput(blob)
|
|
347
|
+
>>> results = net.forward()
|
|
348
|
+
>>> detections = eye.from_opencv(results)
|
|
349
|
+
"""
|
|
350
|
+
# Reshape to (N, 7)
|
|
351
|
+
if results.ndim == 4:
|
|
352
|
+
results = results[0, 0]
|
|
353
|
+
|
|
354
|
+
# Filter by confidence
|
|
355
|
+
mask = results[:, 2] >= conf_threshold
|
|
356
|
+
filtered = results[mask]
|
|
357
|
+
|
|
358
|
+
# Extract [image_id, label, confidence, x1, y1, x2, y2]
|
|
359
|
+
boxes = filtered[:, 3:7]
|
|
360
|
+
scores = filtered[:, 2]
|
|
361
|
+
classes = filtered[:, 1].astype(int)
|
|
362
|
+
|
|
363
|
+
return Detections(xyxy=boxes, confidence=scores, class_id=classes)
|
|
364
|
+
|
|
365
|
+
|
|
366
|
+
def from_numpy(boxes: np.ndarray, scores: Optional[np.ndarray] = None,
|
|
367
|
+
classes: Optional[np.ndarray] = None) -> Detections:
|
|
368
|
+
"""Convert raw numpy arrays to Detections.
|
|
369
|
+
|
|
370
|
+
Args:
|
|
371
|
+
boxes: Bounding boxes in xyxy format
|
|
372
|
+
scores: Confidence scores (optional)
|
|
373
|
+
classes: Class IDs (optional)
|
|
374
|
+
|
|
375
|
+
Returns:
|
|
376
|
+
Detections
|
|
377
|
+
|
|
378
|
+
Example:
|
|
379
|
+
>>> boxes = np.array([[100, 50, 200, 150], [300, 200, 400, 300]])
|
|
380
|
+
>>> scores = np.array([0.9, 0.85])
|
|
381
|
+
>>> classes = np.array([0, 1])
|
|
382
|
+
>>> detections = eye.from_numpy(boxes, scores, classes)
|
|
383
|
+
"""
|
|
384
|
+
return Detections(xyxy=boxes, confidence=scores, class_id=classes)
|
|
385
|
+
|
|
386
|
+
|
|
387
|
+
# Add class methods to Detections
|
|
388
|
+
Detections.from_yolo = staticmethod(from_yolo)
|
|
389
|
+
Detections.from_pytorch = staticmethod(from_pytorch)
|
|
390
|
+
Detections.from_tensorflow = staticmethod(from_tensorflow)
|
|
391
|
+
Detections.from_opencv = staticmethod(from_opencv)
|
|
392
|
+
Detections.from_numpy = staticmethod(from_numpy)
|