eye-cv 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eye/__init__.py +115 -0
- eye/__init___supervision_original.py +120 -0
- eye/annotators/__init__.py +0 -0
- eye/annotators/base.py +22 -0
- eye/annotators/core.py +2699 -0
- eye/annotators/line.py +107 -0
- eye/annotators/modern.py +529 -0
- eye/annotators/trace.py +142 -0
- eye/annotators/utils.py +177 -0
- eye/assets/__init__.py +2 -0
- eye/assets/downloader.py +95 -0
- eye/assets/list.py +83 -0
- eye/classification/__init__.py +0 -0
- eye/classification/core.py +188 -0
- eye/config.py +2 -0
- eye/core/__init__.py +0 -0
- eye/core/trackers/__init__.py +1 -0
- eye/core/trackers/botsort_tracker.py +336 -0
- eye/core/trackers/bytetrack_tracker.py +284 -0
- eye/core/trackers/sort_tracker.py +200 -0
- eye/core/tracking.py +146 -0
- eye/dataset/__init__.py +0 -0
- eye/dataset/core.py +919 -0
- eye/dataset/formats/__init__.py +0 -0
- eye/dataset/formats/coco.py +258 -0
- eye/dataset/formats/pascal_voc.py +279 -0
- eye/dataset/formats/yolo.py +272 -0
- eye/dataset/utils.py +259 -0
- eye/detection/__init__.py +0 -0
- eye/detection/auto_convert.py +155 -0
- eye/detection/core.py +1529 -0
- eye/detection/detections_enhanced.py +392 -0
- eye/detection/line_zone.py +859 -0
- eye/detection/lmm.py +184 -0
- eye/detection/overlap_filter.py +270 -0
- eye/detection/tools/__init__.py +0 -0
- eye/detection/tools/csv_sink.py +181 -0
- eye/detection/tools/inference_slicer.py +288 -0
- eye/detection/tools/json_sink.py +142 -0
- eye/detection/tools/polygon_zone.py +202 -0
- eye/detection/tools/smoother.py +123 -0
- eye/detection/tools/smoothing.py +179 -0
- eye/detection/tools/smoothing_config.py +202 -0
- eye/detection/tools/transformers.py +247 -0
- eye/detection/utils.py +1175 -0
- eye/draw/__init__.py +0 -0
- eye/draw/color.py +154 -0
- eye/draw/utils.py +374 -0
- eye/filters.py +112 -0
- eye/geometry/__init__.py +0 -0
- eye/geometry/core.py +128 -0
- eye/geometry/utils.py +47 -0
- eye/keypoint/__init__.py +0 -0
- eye/keypoint/annotators.py +442 -0
- eye/keypoint/core.py +687 -0
- eye/keypoint/skeletons.py +2647 -0
- eye/metrics/__init__.py +21 -0
- eye/metrics/core.py +72 -0
- eye/metrics/detection.py +843 -0
- eye/metrics/f1_score.py +648 -0
- eye/metrics/mean_average_precision.py +628 -0
- eye/metrics/mean_average_recall.py +697 -0
- eye/metrics/precision.py +653 -0
- eye/metrics/recall.py +652 -0
- eye/metrics/utils/__init__.py +0 -0
- eye/metrics/utils/object_size.py +158 -0
- eye/metrics/utils/utils.py +9 -0
- eye/py.typed +0 -0
- eye/quick.py +104 -0
- eye/tracker/__init__.py +0 -0
- eye/tracker/byte_tracker/__init__.py +0 -0
- eye/tracker/byte_tracker/core.py +386 -0
- eye/tracker/byte_tracker/kalman_filter.py +205 -0
- eye/tracker/byte_tracker/matching.py +69 -0
- eye/tracker/byte_tracker/single_object_track.py +178 -0
- eye/tracker/byte_tracker/utils.py +18 -0
- eye/utils/__init__.py +0 -0
- eye/utils/conversion.py +132 -0
- eye/utils/file.py +159 -0
- eye/utils/image.py +794 -0
- eye/utils/internal.py +200 -0
- eye/utils/iterables.py +84 -0
- eye/utils/notebook.py +114 -0
- eye/utils/video.py +307 -0
- eye/utils_eye/__init__.py +1 -0
- eye/utils_eye/geometry.py +71 -0
- eye/utils_eye/nms.py +55 -0
- eye/validators/__init__.py +140 -0
- eye/web.py +271 -0
- eye_cv-1.0.0.dist-info/METADATA +319 -0
- eye_cv-1.0.0.dist-info/RECORD +94 -0
- eye_cv-1.0.0.dist-info/WHEEL +5 -0
- eye_cv-1.0.0.dist-info/licenses/LICENSE +21 -0
- eye_cv-1.0.0.dist-info/top_level.txt +1 -0
eye/annotators/core.py
ADDED
|
@@ -0,0 +1,2699 @@
|
|
|
1
|
+
from functools import lru_cache
|
|
2
|
+
from math import sqrt
|
|
3
|
+
from typing import List, Optional, Tuple, Union
|
|
4
|
+
|
|
5
|
+
import cv2
|
|
6
|
+
import numpy as np
|
|
7
|
+
import numpy.typing as npt
|
|
8
|
+
from PIL import Image, ImageDraw, ImageFont
|
|
9
|
+
|
|
10
|
+
from eye.annotators.base import BaseAnnotator, ImageType
|
|
11
|
+
from eye.annotators.utils import (
|
|
12
|
+
ColorLookup,
|
|
13
|
+
Trace,
|
|
14
|
+
resolve_color,
|
|
15
|
+
resolve_text_background_xyxy,
|
|
16
|
+
)
|
|
17
|
+
from eye.config import CLASS_NAME_DATA_FIELD, ORIENTED_BOX_COORDINATES
|
|
18
|
+
from eye.detection.core import Detections
|
|
19
|
+
from eye.detection.utils import clip_boxes, mask_to_polygons, spread_out_boxes
|
|
20
|
+
from eye.draw.color import Color, ColorPalette
|
|
21
|
+
from eye.draw.utils import draw_polygon
|
|
22
|
+
from eye.geometry.core import Position
|
|
23
|
+
from eye.utils.conversion import (
|
|
24
|
+
ensure_cv2_image_for_annotation,
|
|
25
|
+
ensure_pil_image_for_annotation,
|
|
26
|
+
)
|
|
27
|
+
from eye.utils.image import (
|
|
28
|
+
crop_image,
|
|
29
|
+
letterbox_image,
|
|
30
|
+
overlay_image,
|
|
31
|
+
scale_image,
|
|
32
|
+
)
|
|
33
|
+
from eye.utils.internal import deprecated
|
|
34
|
+
|
|
35
|
+
CV2_FONT = cv2.FONT_HERSHEY_SIMPLEX
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class BoxAnnotator(BaseAnnotator):
|
|
39
|
+
"""
|
|
40
|
+
A class for drawing bounding boxes on an image using provided detections.
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
def __init__(
|
|
44
|
+
self,
|
|
45
|
+
color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
|
|
46
|
+
thickness: int = 2,
|
|
47
|
+
color_lookup: ColorLookup = ColorLookup.CLASS,
|
|
48
|
+
):
|
|
49
|
+
"""
|
|
50
|
+
Args:
|
|
51
|
+
color (Union[Color, ColorPalette]): The color or color palette to use for
|
|
52
|
+
annotating detections.
|
|
53
|
+
thickness (int): Thickness of the bounding box lines.
|
|
54
|
+
color_lookup (ColorLookup): Strategy for mapping colors to annotations.
|
|
55
|
+
Options are `INDEX`, `CLASS`, `TRACK`.
|
|
56
|
+
"""
|
|
57
|
+
self.color: Union[Color, ColorPalette] = color
|
|
58
|
+
self.thickness: int = thickness
|
|
59
|
+
self.color_lookup: ColorLookup = color_lookup
|
|
60
|
+
|
|
61
|
+
@ensure_cv2_image_for_annotation
|
|
62
|
+
def annotate(
|
|
63
|
+
self,
|
|
64
|
+
scene: ImageType,
|
|
65
|
+
detections: Detections,
|
|
66
|
+
custom_color_lookup: Optional[np.ndarray] = None,
|
|
67
|
+
) -> ImageType:
|
|
68
|
+
"""
|
|
69
|
+
Annotates the given scene with bounding boxes based on the provided detections.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
scene (ImageType): The image where bounding boxes will be drawn. `ImageType`
|
|
73
|
+
is a flexible type, accepting either `numpy.ndarray` or
|
|
74
|
+
`PIL.Image.Image`.
|
|
75
|
+
detections (Detections): Object detections to annotate.
|
|
76
|
+
custom_color_lookup (Optional[np.ndarray]): Custom color lookup array.
|
|
77
|
+
Allows to override the default color mapping strategy.
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
The annotated image, matching the type of `scene` (`numpy.ndarray`
|
|
81
|
+
or `PIL.Image.Image`)
|
|
82
|
+
|
|
83
|
+
Example:
|
|
84
|
+
```python
|
|
85
|
+
import eye as sv
|
|
86
|
+
|
|
87
|
+
image = ...
|
|
88
|
+
detections = sv.Detections(...)
|
|
89
|
+
|
|
90
|
+
box_annotator = sv.BoxAnnotator()
|
|
91
|
+
annotated_frame = box_annotator.annotate(
|
|
92
|
+
scene=image.copy(),
|
|
93
|
+
detections=detections
|
|
94
|
+
)
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+

|
|
99
|
+
"""
|
|
100
|
+
assert isinstance(scene, np.ndarray)
|
|
101
|
+
for detection_idx in range(len(detections)):
|
|
102
|
+
x1, y1, x2, y2 = detections.xyxy[detection_idx].astype(int)
|
|
103
|
+
color = resolve_color(
|
|
104
|
+
color=self.color,
|
|
105
|
+
detections=detections,
|
|
106
|
+
detection_idx=detection_idx,
|
|
107
|
+
color_lookup=self.color_lookup
|
|
108
|
+
if custom_color_lookup is None
|
|
109
|
+
else custom_color_lookup,
|
|
110
|
+
)
|
|
111
|
+
cv2.rectangle(
|
|
112
|
+
img=scene,
|
|
113
|
+
pt1=(x1, y1),
|
|
114
|
+
pt2=(x2, y2),
|
|
115
|
+
color=color.as_bgr(),
|
|
116
|
+
thickness=self.thickness,
|
|
117
|
+
)
|
|
118
|
+
return scene
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
@deprecated(
|
|
122
|
+
"`BoundingBoxAnnotator` is deprecated and has been renamed to `BoxAnnotator`."
|
|
123
|
+
" `BoundingBoxAnnotator` will be removed in eye-0.26.0."
|
|
124
|
+
)
|
|
125
|
+
class BoundingBoxAnnotator(BaseAnnotator):
|
|
126
|
+
"""
|
|
127
|
+
A class for drawing bounding boxes on an image using provided detections.
|
|
128
|
+
"""
|
|
129
|
+
|
|
130
|
+
def __init__(
|
|
131
|
+
self,
|
|
132
|
+
color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
|
|
133
|
+
thickness: int = 2,
|
|
134
|
+
color_lookup: ColorLookup = ColorLookup.CLASS,
|
|
135
|
+
):
|
|
136
|
+
"""
|
|
137
|
+
Args:
|
|
138
|
+
color (Union[Color, ColorPalette]): The color or color palette to use for
|
|
139
|
+
annotating detections.
|
|
140
|
+
thickness (int): Thickness of the bounding box lines.
|
|
141
|
+
color_lookup (ColorLookup): Strategy for mapping colors to annotations.
|
|
142
|
+
Options are `INDEX`, `CLASS`, `TRACK`.
|
|
143
|
+
"""
|
|
144
|
+
self.color: Union[Color, ColorPalette] = color
|
|
145
|
+
self.thickness: int = thickness
|
|
146
|
+
self.color_lookup: ColorLookup = color_lookup
|
|
147
|
+
|
|
148
|
+
@ensure_cv2_image_for_annotation
|
|
149
|
+
def annotate(
|
|
150
|
+
self,
|
|
151
|
+
scene: ImageType,
|
|
152
|
+
detections: Detections,
|
|
153
|
+
custom_color_lookup: Optional[np.ndarray] = None,
|
|
154
|
+
) -> ImageType:
|
|
155
|
+
"""
|
|
156
|
+
Annotates the given scene with bounding boxes based on the provided detections.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
scene (ImageType): The image where bounding boxes will be drawn. `ImageType`
|
|
160
|
+
is a flexible type, accepting either `numpy.ndarray` or `PIL.Image.Image`.
|
|
161
|
+
detections (Detections): Object detections to annotate.
|
|
162
|
+
custom_color_lookup (Optional[np.ndarray]): Custom color lookup array.
|
|
163
|
+
Allows to override the default color mapping strategy.
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
The annotated image, matching the type of `scene` (`numpy.ndarray`
|
|
167
|
+
or `PIL.Image.Image`)
|
|
168
|
+
|
|
169
|
+
Example:
|
|
170
|
+
```python
|
|
171
|
+
import eye as sv
|
|
172
|
+
|
|
173
|
+
image = ...
|
|
174
|
+
detections = sv.Detections(...)
|
|
175
|
+
|
|
176
|
+
bounding_box_annotator = sv.BoundingBoxAnnotator()
|
|
177
|
+
annotated_frame = bounding_box_annotator.annotate(
|
|
178
|
+
scene=image.copy(),
|
|
179
|
+
detections=detections
|
|
180
|
+
)
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+

|
|
185
|
+
"""
|
|
186
|
+
assert isinstance(scene, np.ndarray)
|
|
187
|
+
for detection_idx in range(len(detections)):
|
|
188
|
+
x1, y1, x2, y2 = detections.xyxy[detection_idx].astype(int)
|
|
189
|
+
color = resolve_color(
|
|
190
|
+
color=self.color,
|
|
191
|
+
detections=detections,
|
|
192
|
+
detection_idx=detection_idx,
|
|
193
|
+
color_lookup=self.color_lookup
|
|
194
|
+
if custom_color_lookup is None
|
|
195
|
+
else custom_color_lookup,
|
|
196
|
+
)
|
|
197
|
+
cv2.rectangle(
|
|
198
|
+
img=scene,
|
|
199
|
+
pt1=(x1, y1),
|
|
200
|
+
pt2=(x2, y2),
|
|
201
|
+
color=color.as_bgr(),
|
|
202
|
+
thickness=self.thickness,
|
|
203
|
+
)
|
|
204
|
+
return scene
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
class OrientedBoxAnnotator(BaseAnnotator):
|
|
208
|
+
"""
|
|
209
|
+
A class for drawing oriented bounding boxes on an image using provided detections.
|
|
210
|
+
"""
|
|
211
|
+
|
|
212
|
+
def __init__(
|
|
213
|
+
self,
|
|
214
|
+
color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
|
|
215
|
+
thickness: int = 2,
|
|
216
|
+
color_lookup: ColorLookup = ColorLookup.CLASS,
|
|
217
|
+
):
|
|
218
|
+
"""
|
|
219
|
+
Args:
|
|
220
|
+
color (Union[Color, ColorPalette]): The color or color palette to use for
|
|
221
|
+
annotating detections.
|
|
222
|
+
thickness (int): Thickness of the bounding box lines.
|
|
223
|
+
color_lookup (ColorLookup): Strategy for mapping colors to annotations.
|
|
224
|
+
Options are `INDEX`, `CLASS`, `TRACK`.
|
|
225
|
+
"""
|
|
226
|
+
self.color: Union[Color, ColorPalette] = color
|
|
227
|
+
self.thickness: int = thickness
|
|
228
|
+
self.color_lookup: ColorLookup = color_lookup
|
|
229
|
+
|
|
230
|
+
@ensure_cv2_image_for_annotation
|
|
231
|
+
def annotate(
|
|
232
|
+
self,
|
|
233
|
+
scene: ImageType,
|
|
234
|
+
detections: Detections,
|
|
235
|
+
custom_color_lookup: Optional[np.ndarray] = None,
|
|
236
|
+
) -> ImageType:
|
|
237
|
+
"""
|
|
238
|
+
Annotates the given scene with oriented bounding boxes based on the provided detections.
|
|
239
|
+
|
|
240
|
+
Args:
|
|
241
|
+
scene (ImageType): The image where bounding boxes will be drawn.
|
|
242
|
+
`ImageType` is a flexible type, accepting either `numpy.ndarray`
|
|
243
|
+
or `PIL.Image.Image`.
|
|
244
|
+
detections (Detections): Object detections to annotate.
|
|
245
|
+
custom_color_lookup (Optional[np.ndarray]): Custom color lookup array.
|
|
246
|
+
Allows to override the default color mapping strategy.
|
|
247
|
+
|
|
248
|
+
Returns:
|
|
249
|
+
The annotated image, matching the type of `scene` (`numpy.ndarray`
|
|
250
|
+
or `PIL.Image.Image`)
|
|
251
|
+
|
|
252
|
+
Example:
|
|
253
|
+
```python
|
|
254
|
+
import cv2
|
|
255
|
+
import eye as sv
|
|
256
|
+
from ultralytics import YOLO
|
|
257
|
+
|
|
258
|
+
image = cv2.imread(<SOURCE_IMAGE_PATH>)
|
|
259
|
+
model = YOLO("yolov8n-obb.pt")
|
|
260
|
+
|
|
261
|
+
result = model(image)[0]
|
|
262
|
+
detections = sv.Detections.from_ultralytics(result)
|
|
263
|
+
|
|
264
|
+
oriented_box_annotator = sv.OrientedBoxAnnotator()
|
|
265
|
+
annotated_frame = oriented_box_annotator.annotate(
|
|
266
|
+
scene=image.copy(),
|
|
267
|
+
detections=detections
|
|
268
|
+
)
|
|
269
|
+
```
|
|
270
|
+
""" # noqa E501 // docs
|
|
271
|
+
assert isinstance(scene, np.ndarray)
|
|
272
|
+
if detections.data is None or ORIENTED_BOX_COORDINATES not in detections.data:
|
|
273
|
+
return scene
|
|
274
|
+
obb_boxes = np.array(detections.data[ORIENTED_BOX_COORDINATES]).astype(int)
|
|
275
|
+
|
|
276
|
+
for detection_idx in range(len(detections)):
|
|
277
|
+
obb = obb_boxes[detection_idx]
|
|
278
|
+
color = resolve_color(
|
|
279
|
+
color=self.color,
|
|
280
|
+
detections=detections,
|
|
281
|
+
detection_idx=detection_idx,
|
|
282
|
+
color_lookup=self.color_lookup
|
|
283
|
+
if custom_color_lookup is None
|
|
284
|
+
else custom_color_lookup,
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
cv2.drawContours(scene, [obb], 0, color.as_bgr(), self.thickness)
|
|
288
|
+
|
|
289
|
+
return scene
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
class MaskAnnotator(BaseAnnotator):
|
|
293
|
+
"""
|
|
294
|
+
A class for drawing masks on an image using provided detections.
|
|
295
|
+
|
|
296
|
+
!!! warning
|
|
297
|
+
|
|
298
|
+
This annotator uses `sv.Detections.mask`.
|
|
299
|
+
"""
|
|
300
|
+
|
|
301
|
+
def __init__(
|
|
302
|
+
self,
|
|
303
|
+
color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
|
|
304
|
+
opacity: float = 0.5,
|
|
305
|
+
color_lookup: ColorLookup = ColorLookup.CLASS,
|
|
306
|
+
):
|
|
307
|
+
"""
|
|
308
|
+
Args:
|
|
309
|
+
color (Union[Color, ColorPalette]): The color or color palette to use for
|
|
310
|
+
annotating detections.
|
|
311
|
+
opacity (float): Opacity of the overlay mask. Must be between `0` and `1`.
|
|
312
|
+
color_lookup (ColorLookup): Strategy for mapping colors to annotations.
|
|
313
|
+
Options are `INDEX`, `CLASS`, `TRACK`.
|
|
314
|
+
"""
|
|
315
|
+
self.color: Union[Color, ColorPalette] = color
|
|
316
|
+
self.opacity = opacity
|
|
317
|
+
self.color_lookup: ColorLookup = color_lookup
|
|
318
|
+
|
|
319
|
+
@ensure_cv2_image_for_annotation
|
|
320
|
+
def annotate(
|
|
321
|
+
self,
|
|
322
|
+
scene: ImageType,
|
|
323
|
+
detections: Detections,
|
|
324
|
+
custom_color_lookup: Optional[np.ndarray] = None,
|
|
325
|
+
) -> ImageType:
|
|
326
|
+
"""
|
|
327
|
+
Annotates the given scene with masks based on the provided detections.
|
|
328
|
+
|
|
329
|
+
Args:
|
|
330
|
+
scene (ImageType): The image where masks will be drawn.
|
|
331
|
+
`ImageType` is a flexible type, accepting either `numpy.ndarray`
|
|
332
|
+
or `PIL.Image.Image`.
|
|
333
|
+
detections (Detections): Object detections to annotate.
|
|
334
|
+
custom_color_lookup (Optional[np.ndarray]): Custom color lookup array.
|
|
335
|
+
Allows to override the default color mapping strategy.
|
|
336
|
+
|
|
337
|
+
Returns:
|
|
338
|
+
The annotated image, matching the type of `scene` (`numpy.ndarray`
|
|
339
|
+
or `PIL.Image.Image`)
|
|
340
|
+
|
|
341
|
+
Example:
|
|
342
|
+
```python
|
|
343
|
+
import eye as sv
|
|
344
|
+
|
|
345
|
+
image = ...
|
|
346
|
+
detections = sv.Detections(...)
|
|
347
|
+
|
|
348
|
+
mask_annotator = sv.MaskAnnotator()
|
|
349
|
+
annotated_frame = mask_annotator.annotate(
|
|
350
|
+
scene=image.copy(),
|
|
351
|
+
detections=detections
|
|
352
|
+
)
|
|
353
|
+
```
|
|
354
|
+
|
|
355
|
+

|
|
357
|
+
"""
|
|
358
|
+
assert isinstance(scene, np.ndarray)
|
|
359
|
+
if detections.mask is None:
|
|
360
|
+
return scene
|
|
361
|
+
|
|
362
|
+
colored_mask = np.array(scene, copy=True, dtype=np.uint8)
|
|
363
|
+
|
|
364
|
+
for detection_idx in np.flip(np.argsort(detections.area)):
|
|
365
|
+
color = resolve_color(
|
|
366
|
+
color=self.color,
|
|
367
|
+
detections=detections,
|
|
368
|
+
detection_idx=detection_idx,
|
|
369
|
+
color_lookup=self.color_lookup
|
|
370
|
+
if custom_color_lookup is None
|
|
371
|
+
else custom_color_lookup,
|
|
372
|
+
)
|
|
373
|
+
mask = detections.mask[detection_idx]
|
|
374
|
+
colored_mask[mask] = color.as_bgr()
|
|
375
|
+
|
|
376
|
+
cv2.addWeighted(
|
|
377
|
+
colored_mask, self.opacity, scene, 1 - self.opacity, 0, dst=scene
|
|
378
|
+
)
|
|
379
|
+
return scene
|
|
380
|
+
|
|
381
|
+
|
|
382
|
+
class PolygonAnnotator(BaseAnnotator):
|
|
383
|
+
"""
|
|
384
|
+
A class for drawing polygons on an image using provided detections.
|
|
385
|
+
|
|
386
|
+
!!! warning
|
|
387
|
+
|
|
388
|
+
This annotator uses `sv.Detections.mask`.
|
|
389
|
+
"""
|
|
390
|
+
|
|
391
|
+
def __init__(
|
|
392
|
+
self,
|
|
393
|
+
color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
|
|
394
|
+
thickness: int = 2,
|
|
395
|
+
color_lookup: ColorLookup = ColorLookup.CLASS,
|
|
396
|
+
):
|
|
397
|
+
"""
|
|
398
|
+
Args:
|
|
399
|
+
color (Union[Color, ColorPalette]): The color or color palette to use for
|
|
400
|
+
annotating detections.
|
|
401
|
+
thickness (int): Thickness of the polygon lines.
|
|
402
|
+
color_lookup (ColorLookup): Strategy for mapping colors to annotations.
|
|
403
|
+
Options are `INDEX`, `CLASS`, `TRACK`.
|
|
404
|
+
"""
|
|
405
|
+
self.color: Union[Color, ColorPalette] = color
|
|
406
|
+
self.thickness: int = thickness
|
|
407
|
+
self.color_lookup: ColorLookup = color_lookup
|
|
408
|
+
|
|
409
|
+
@ensure_cv2_image_for_annotation
|
|
410
|
+
def annotate(
|
|
411
|
+
self,
|
|
412
|
+
scene: ImageType,
|
|
413
|
+
detections: Detections,
|
|
414
|
+
custom_color_lookup: Optional[np.ndarray] = None,
|
|
415
|
+
) -> ImageType:
|
|
416
|
+
"""
|
|
417
|
+
Annotates the given scene with polygons based on the provided detections.
|
|
418
|
+
|
|
419
|
+
Args:
|
|
420
|
+
scene (ImageType): The image where polygons will be drawn.
|
|
421
|
+
`ImageType` is a flexible type, accepting either `numpy.ndarray`
|
|
422
|
+
or `PIL.Image.Image`.
|
|
423
|
+
detections (Detections): Object detections to annotate.
|
|
424
|
+
custom_color_lookup (Optional[np.ndarray]): Custom color lookup array.
|
|
425
|
+
Allows to override the default color mapping strategy.
|
|
426
|
+
|
|
427
|
+
Returns:
|
|
428
|
+
The annotated image, matching the type of `scene` (`numpy.ndarray`
|
|
429
|
+
or `PIL.Image.Image`)
|
|
430
|
+
|
|
431
|
+
Example:
|
|
432
|
+
```python
|
|
433
|
+
import eye as sv
|
|
434
|
+
|
|
435
|
+
image = ...
|
|
436
|
+
detections = sv.Detections(...)
|
|
437
|
+
|
|
438
|
+
polygon_annotator = sv.PolygonAnnotator()
|
|
439
|
+
annotated_frame = polygon_annotator.annotate(
|
|
440
|
+
scene=image.copy(),
|
|
441
|
+
detections=detections
|
|
442
|
+
)
|
|
443
|
+
```
|
|
444
|
+
|
|
445
|
+

|
|
447
|
+
"""
|
|
448
|
+
assert isinstance(scene, np.ndarray)
|
|
449
|
+
if detections.mask is None:
|
|
450
|
+
return scene
|
|
451
|
+
|
|
452
|
+
for detection_idx in range(len(detections)):
|
|
453
|
+
mask = detections.mask[detection_idx]
|
|
454
|
+
color = resolve_color(
|
|
455
|
+
color=self.color,
|
|
456
|
+
detections=detections,
|
|
457
|
+
detection_idx=detection_idx,
|
|
458
|
+
color_lookup=self.color_lookup
|
|
459
|
+
if custom_color_lookup is None
|
|
460
|
+
else custom_color_lookup,
|
|
461
|
+
)
|
|
462
|
+
for polygon in mask_to_polygons(mask=mask):
|
|
463
|
+
scene = draw_polygon(
|
|
464
|
+
scene=scene,
|
|
465
|
+
polygon=polygon,
|
|
466
|
+
color=color,
|
|
467
|
+
thickness=self.thickness,
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
return scene
|
|
471
|
+
|
|
472
|
+
|
|
473
|
+
class ColorAnnotator(BaseAnnotator):
|
|
474
|
+
"""
|
|
475
|
+
A class for drawing box masks on an image using provided detections.
|
|
476
|
+
"""
|
|
477
|
+
|
|
478
|
+
def __init__(
|
|
479
|
+
self,
|
|
480
|
+
color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
|
|
481
|
+
opacity: float = 0.5,
|
|
482
|
+
color_lookup: ColorLookup = ColorLookup.CLASS,
|
|
483
|
+
):
|
|
484
|
+
"""
|
|
485
|
+
Args:
|
|
486
|
+
color (Union[Color, ColorPalette]): The color or color palette to use for
|
|
487
|
+
annotating detections.
|
|
488
|
+
opacity (float): Opacity of the overlay mask. Must be between `0` and `1`.
|
|
489
|
+
color_lookup (ColorLookup): Strategy for mapping colors to annotations.
|
|
490
|
+
Options are `INDEX`, `CLASS`, `TRACK`.
|
|
491
|
+
"""
|
|
492
|
+
self.color: Union[Color, ColorPalette] = color
|
|
493
|
+
self.color_lookup: ColorLookup = color_lookup
|
|
494
|
+
self.opacity = opacity
|
|
495
|
+
|
|
496
|
+
@ensure_cv2_image_for_annotation
|
|
497
|
+
def annotate(
|
|
498
|
+
self,
|
|
499
|
+
scene: ImageType,
|
|
500
|
+
detections: Detections,
|
|
501
|
+
custom_color_lookup: Optional[np.ndarray] = None,
|
|
502
|
+
) -> ImageType:
|
|
503
|
+
"""
|
|
504
|
+
Annotates the given scene with box masks based on the provided detections.
|
|
505
|
+
|
|
506
|
+
Args:
|
|
507
|
+
scene (ImageType): The image where bounding boxes will be drawn.
|
|
508
|
+
`ImageType` is a flexible type, accepting either `numpy.ndarray`
|
|
509
|
+
or `PIL.Image.Image`.
|
|
510
|
+
detections (Detections): Object detections to annotate.
|
|
511
|
+
custom_color_lookup (Optional[np.ndarray]): Custom color lookup array.
|
|
512
|
+
Allows to override the default color mapping strategy.
|
|
513
|
+
|
|
514
|
+
Returns:
|
|
515
|
+
The annotated image, matching the type of `scene` (`numpy.ndarray`
|
|
516
|
+
or `PIL.Image.Image`)
|
|
517
|
+
|
|
518
|
+
Example:
|
|
519
|
+
```python
|
|
520
|
+
import eye as sv
|
|
521
|
+
|
|
522
|
+
image = ...
|
|
523
|
+
detections = sv.Detections(...)
|
|
524
|
+
|
|
525
|
+
color_annotator = sv.ColorAnnotator()
|
|
526
|
+
annotated_frame = color_annotator.annotate(
|
|
527
|
+
scene=image.copy(),
|
|
528
|
+
detections=detections
|
|
529
|
+
)
|
|
530
|
+
```
|
|
531
|
+
|
|
532
|
+

|
|
534
|
+
"""
|
|
535
|
+
assert isinstance(scene, np.ndarray)
|
|
536
|
+
scene_with_boxes = scene.copy()
|
|
537
|
+
for detection_idx in range(len(detections)):
|
|
538
|
+
x1, y1, x2, y2 = detections.xyxy[detection_idx].astype(int)
|
|
539
|
+
color = resolve_color(
|
|
540
|
+
color=self.color,
|
|
541
|
+
detections=detections,
|
|
542
|
+
detection_idx=detection_idx,
|
|
543
|
+
color_lookup=self.color_lookup
|
|
544
|
+
if custom_color_lookup is None
|
|
545
|
+
else custom_color_lookup,
|
|
546
|
+
)
|
|
547
|
+
cv2.rectangle(
|
|
548
|
+
img=scene_with_boxes,
|
|
549
|
+
pt1=(x1, y1),
|
|
550
|
+
pt2=(x2, y2),
|
|
551
|
+
color=color.as_bgr(),
|
|
552
|
+
thickness=-1,
|
|
553
|
+
)
|
|
554
|
+
|
|
555
|
+
cv2.addWeighted(
|
|
556
|
+
scene_with_boxes, self.opacity, scene, 1 - self.opacity, gamma=0, dst=scene
|
|
557
|
+
)
|
|
558
|
+
return scene
|
|
559
|
+
|
|
560
|
+
|
|
561
|
+
class HaloAnnotator(BaseAnnotator):
|
|
562
|
+
"""
|
|
563
|
+
A class for drawing Halos on an image using provided detections.
|
|
564
|
+
|
|
565
|
+
!!! warning
|
|
566
|
+
|
|
567
|
+
This annotator uses `sv.Detections.mask`.
|
|
568
|
+
"""
|
|
569
|
+
|
|
570
|
+
def __init__(
|
|
571
|
+
self,
|
|
572
|
+
color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
|
|
573
|
+
opacity: float = 0.8,
|
|
574
|
+
kernel_size: int = 40,
|
|
575
|
+
color_lookup: ColorLookup = ColorLookup.CLASS,
|
|
576
|
+
):
|
|
577
|
+
"""
|
|
578
|
+
Args:
|
|
579
|
+
color (Union[Color, ColorPalette]): The color or color palette to use for
|
|
580
|
+
annotating detections.
|
|
581
|
+
opacity (float): Opacity of the overlay mask. Must be between `0` and `1`.
|
|
582
|
+
kernel_size (int): The size of the average pooling kernel used for creating
|
|
583
|
+
the halo.
|
|
584
|
+
color_lookup (ColorLookup): Strategy for mapping colors to annotations.
|
|
585
|
+
Options are `INDEX`, `CLASS`, `TRACK`.
|
|
586
|
+
"""
|
|
587
|
+
self.color: Union[Color, ColorPalette] = color
|
|
588
|
+
self.opacity = opacity
|
|
589
|
+
self.color_lookup: ColorLookup = color_lookup
|
|
590
|
+
self.kernel_size: int = kernel_size
|
|
591
|
+
|
|
592
|
+
@ensure_cv2_image_for_annotation
|
|
593
|
+
def annotate(
|
|
594
|
+
self,
|
|
595
|
+
scene: ImageType,
|
|
596
|
+
detections: Detections,
|
|
597
|
+
custom_color_lookup: Optional[np.ndarray] = None,
|
|
598
|
+
) -> ImageType:
|
|
599
|
+
"""
|
|
600
|
+
Annotates the given scene with halos based on the provided detections.
|
|
601
|
+
|
|
602
|
+
Args:
|
|
603
|
+
scene (ImageType): The image where masks will be drawn.
|
|
604
|
+
`ImageType` is a flexible type, accepting either `numpy.ndarray`
|
|
605
|
+
or `PIL.Image.Image`.
|
|
606
|
+
detections (Detections): Object detections to annotate.
|
|
607
|
+
custom_color_lookup (Optional[np.ndarray]): Custom color lookup array.
|
|
608
|
+
Allows to override the default color mapping strategy.
|
|
609
|
+
|
|
610
|
+
Returns:
|
|
611
|
+
The annotated image, matching the type of `scene` (`numpy.ndarray`
|
|
612
|
+
or `PIL.Image.Image`)
|
|
613
|
+
|
|
614
|
+
Example:
|
|
615
|
+
```python
|
|
616
|
+
import eye as sv
|
|
617
|
+
|
|
618
|
+
image = ...
|
|
619
|
+
detections = sv.Detections(...)
|
|
620
|
+
|
|
621
|
+
halo_annotator = sv.HaloAnnotator()
|
|
622
|
+
annotated_frame = halo_annotator.annotate(
|
|
623
|
+
scene=image.copy(),
|
|
624
|
+
detections=detections
|
|
625
|
+
)
|
|
626
|
+
```
|
|
627
|
+
|
|
628
|
+

|
|
630
|
+
"""
|
|
631
|
+
assert isinstance(scene, np.ndarray)
|
|
632
|
+
if detections.mask is None:
|
|
633
|
+
return scene
|
|
634
|
+
colored_mask = np.zeros_like(scene, dtype=np.uint8)
|
|
635
|
+
fmask = np.array([False] * scene.shape[0] * scene.shape[1]).reshape(
|
|
636
|
+
scene.shape[0], scene.shape[1]
|
|
637
|
+
)
|
|
638
|
+
|
|
639
|
+
for detection_idx in np.flip(np.argsort(detections.area)):
|
|
640
|
+
color = resolve_color(
|
|
641
|
+
color=self.color,
|
|
642
|
+
detections=detections,
|
|
643
|
+
detection_idx=detection_idx,
|
|
644
|
+
color_lookup=self.color_lookup
|
|
645
|
+
if custom_color_lookup is None
|
|
646
|
+
else custom_color_lookup,
|
|
647
|
+
)
|
|
648
|
+
mask = detections.mask[detection_idx]
|
|
649
|
+
fmask = np.logical_or(fmask, mask)
|
|
650
|
+
color_bgr = color.as_bgr()
|
|
651
|
+
colored_mask[mask] = color_bgr
|
|
652
|
+
|
|
653
|
+
colored_mask = cv2.blur(colored_mask, (self.kernel_size, self.kernel_size))
|
|
654
|
+
colored_mask[fmask] = [0, 0, 0]
|
|
655
|
+
gray = cv2.cvtColor(colored_mask, cv2.COLOR_BGR2GRAY)
|
|
656
|
+
alpha = self.opacity * gray / gray.max()
|
|
657
|
+
alpha_mask = alpha[:, :, np.newaxis]
|
|
658
|
+
blended_scene = np.uint8(scene * (1 - alpha_mask) + colored_mask * self.opacity)
|
|
659
|
+
np.copyto(scene, blended_scene)
|
|
660
|
+
return scene
|
|
661
|
+
|
|
662
|
+
|
|
663
|
+
class EllipseAnnotator(BaseAnnotator):
|
|
664
|
+
"""
|
|
665
|
+
A class for drawing ellipses on an image using provided detections.
|
|
666
|
+
"""
|
|
667
|
+
|
|
668
|
+
def __init__(
|
|
669
|
+
self,
|
|
670
|
+
color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
|
|
671
|
+
thickness: int = 2,
|
|
672
|
+
start_angle: int = -45,
|
|
673
|
+
end_angle: int = 235,
|
|
674
|
+
color_lookup: ColorLookup = ColorLookup.CLASS,
|
|
675
|
+
):
|
|
676
|
+
"""
|
|
677
|
+
Args:
|
|
678
|
+
color (Union[Color, ColorPalette]): The color or color palette to use for
|
|
679
|
+
annotating detections.
|
|
680
|
+
thickness (int): Thickness of the ellipse lines.
|
|
681
|
+
start_angle (int): Starting angle of the ellipse.
|
|
682
|
+
end_angle (int): Ending angle of the ellipse.
|
|
683
|
+
color_lookup (ColorLookup): Strategy for mapping colors to annotations.
|
|
684
|
+
Options are `INDEX`, `CLASS`, `TRACK`.
|
|
685
|
+
"""
|
|
686
|
+
self.color: Union[Color, ColorPalette] = color
|
|
687
|
+
self.thickness: int = thickness
|
|
688
|
+
self.start_angle: int = start_angle
|
|
689
|
+
self.end_angle: int = end_angle
|
|
690
|
+
self.color_lookup: ColorLookup = color_lookup
|
|
691
|
+
|
|
692
|
+
@ensure_cv2_image_for_annotation
|
|
693
|
+
def annotate(
|
|
694
|
+
self,
|
|
695
|
+
scene: ImageType,
|
|
696
|
+
detections: Detections,
|
|
697
|
+
custom_color_lookup: Optional[np.ndarray] = None,
|
|
698
|
+
) -> ImageType:
|
|
699
|
+
"""
|
|
700
|
+
Annotates the given scene with ellipses based on the provided detections.
|
|
701
|
+
|
|
702
|
+
Args:
|
|
703
|
+
scene (ImageType): The image where ellipses will be drawn.
|
|
704
|
+
`ImageType` is a flexible type, accepting either `numpy.ndarray`
|
|
705
|
+
or `PIL.Image.Image`.
|
|
706
|
+
detections (Detections): Object detections to annotate.
|
|
707
|
+
custom_color_lookup (Optional[np.ndarray]): Custom color lookup array.
|
|
708
|
+
Allows to override the default color mapping strategy.
|
|
709
|
+
|
|
710
|
+
Returns:
|
|
711
|
+
The annotated image, matching the type of `scene` (`numpy.ndarray`
|
|
712
|
+
or `PIL.Image.Image`)
|
|
713
|
+
|
|
714
|
+
Example:
|
|
715
|
+
```python
|
|
716
|
+
import eye as sv
|
|
717
|
+
|
|
718
|
+
image = ...
|
|
719
|
+
detections = sv.Detections(...)
|
|
720
|
+
|
|
721
|
+
ellipse_annotator = sv.EllipseAnnotator()
|
|
722
|
+
annotated_frame = ellipse_annotator.annotate(
|
|
723
|
+
scene=image.copy(),
|
|
724
|
+
detections=detections
|
|
725
|
+
)
|
|
726
|
+
```
|
|
727
|
+
|
|
728
|
+

|
|
730
|
+
"""
|
|
731
|
+
assert isinstance(scene, np.ndarray)
|
|
732
|
+
for detection_idx in range(len(detections)):
|
|
733
|
+
x1, y1, x2, y2 = detections.xyxy[detection_idx].astype(int)
|
|
734
|
+
color = resolve_color(
|
|
735
|
+
color=self.color,
|
|
736
|
+
detections=detections,
|
|
737
|
+
detection_idx=detection_idx,
|
|
738
|
+
color_lookup=self.color_lookup
|
|
739
|
+
if custom_color_lookup is None
|
|
740
|
+
else custom_color_lookup,
|
|
741
|
+
)
|
|
742
|
+
center = (int((x1 + x2) / 2), y2)
|
|
743
|
+
width = x2 - x1
|
|
744
|
+
cv2.ellipse(
|
|
745
|
+
scene,
|
|
746
|
+
center=center,
|
|
747
|
+
axes=(int(width), int(0.35 * width)),
|
|
748
|
+
angle=0.0,
|
|
749
|
+
startAngle=self.start_angle,
|
|
750
|
+
endAngle=self.end_angle,
|
|
751
|
+
color=color.as_bgr(),
|
|
752
|
+
thickness=self.thickness,
|
|
753
|
+
lineType=cv2.LINE_4,
|
|
754
|
+
)
|
|
755
|
+
return scene
|
|
756
|
+
|
|
757
|
+
|
|
758
|
+
class BoxCornerAnnotator(BaseAnnotator):
|
|
759
|
+
"""
|
|
760
|
+
A class for drawing box corners on an image using provided detections.
|
|
761
|
+
"""
|
|
762
|
+
|
|
763
|
+
def __init__(
|
|
764
|
+
self,
|
|
765
|
+
color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
|
|
766
|
+
thickness: int = 4,
|
|
767
|
+
corner_length: int = 15,
|
|
768
|
+
color_lookup: ColorLookup = ColorLookup.CLASS,
|
|
769
|
+
adaptive: bool = True,
|
|
770
|
+
):
|
|
771
|
+
"""
|
|
772
|
+
Args:
|
|
773
|
+
color (Union[Color, ColorPalette]): The color or color palette to use for
|
|
774
|
+
annotating detections.
|
|
775
|
+
thickness (int): Thickness of the corner lines.
|
|
776
|
+
corner_length (int): Length of each corner line (used as max when adaptive=True).
|
|
777
|
+
color_lookup (ColorLookup): Strategy for mapping colors to annotations.
|
|
778
|
+
Options are `INDEX`, `CLASS`, `TRACK`.
|
|
779
|
+
adaptive (bool): If True, corner length scales with box size (15-30% of shortest side).
|
|
780
|
+
"""
|
|
781
|
+
self.color: Union[Color, ColorPalette] = color
|
|
782
|
+
self.thickness: int = thickness
|
|
783
|
+
self.corner_length: int = corner_length
|
|
784
|
+
self.color_lookup: ColorLookup = color_lookup
|
|
785
|
+
self.adaptive: bool = adaptive
|
|
786
|
+
|
|
787
|
+
@ensure_cv2_image_for_annotation
|
|
788
|
+
def annotate(
|
|
789
|
+
self,
|
|
790
|
+
scene: ImageType,
|
|
791
|
+
detections: Detections,
|
|
792
|
+
custom_color_lookup: Optional[np.ndarray] = None,
|
|
793
|
+
) -> ImageType:
|
|
794
|
+
"""
|
|
795
|
+
Annotates the given scene with box corners based on the provided detections.
|
|
796
|
+
|
|
797
|
+
Args:
|
|
798
|
+
scene (ImageType): The image where box corners will be drawn.
|
|
799
|
+
`ImageType` is a flexible type, accepting either `numpy.ndarray`
|
|
800
|
+
or `PIL.Image.Image`.
|
|
801
|
+
detections (Detections): Object detections to annotate.
|
|
802
|
+
custom_color_lookup (Optional[np.ndarray]): Custom color lookup array.
|
|
803
|
+
Allows to override the default color mapping strategy.
|
|
804
|
+
|
|
805
|
+
Returns:
|
|
806
|
+
The annotated image, matching the type of `scene` (`numpy.ndarray`
|
|
807
|
+
or `PIL.Image.Image`)
|
|
808
|
+
|
|
809
|
+
Example:
|
|
810
|
+
```python
|
|
811
|
+
import eye as sv
|
|
812
|
+
|
|
813
|
+
image = ...
|
|
814
|
+
detections = sv.Detections(...)
|
|
815
|
+
|
|
816
|
+
corner_annotator = sv.BoxCornerAnnotator()
|
|
817
|
+
annotated_frame = corner_annotator.annotate(
|
|
818
|
+
scene=image.copy(),
|
|
819
|
+
detections=detections
|
|
820
|
+
)
|
|
821
|
+
```
|
|
822
|
+
|
|
823
|
+

|
|
825
|
+
"""
|
|
826
|
+
assert isinstance(scene, np.ndarray)
|
|
827
|
+
for detection_idx in range(len(detections)):
|
|
828
|
+
x1, y1, x2, y2 = detections.xyxy[detection_idx].astype(int)
|
|
829
|
+
color = resolve_color(
|
|
830
|
+
color=self.color,
|
|
831
|
+
detections=detections,
|
|
832
|
+
detection_idx=detection_idx,
|
|
833
|
+
color_lookup=self.color_lookup
|
|
834
|
+
if custom_color_lookup is None
|
|
835
|
+
else custom_color_lookup,
|
|
836
|
+
)
|
|
837
|
+
|
|
838
|
+
# Adaptive corner length based on box size
|
|
839
|
+
if self.adaptive:
|
|
840
|
+
box_width = x2 - x1
|
|
841
|
+
box_height = y2 - y1
|
|
842
|
+
min_side = min(box_width, box_height)
|
|
843
|
+
# 20-30% of shortest side, minimum 8px, maximum self.corner_length
|
|
844
|
+
corner_length = int(np.clip(min_side * 0.25, 8, self.corner_length))
|
|
845
|
+
else:
|
|
846
|
+
corner_length = self.corner_length
|
|
847
|
+
|
|
848
|
+
corners = [(x1, y1), (x2, y1), (x1, y2), (x2, y2)]
|
|
849
|
+
|
|
850
|
+
for x, y in corners:
|
|
851
|
+
x_end = x + corner_length if x == x1 else x - corner_length
|
|
852
|
+
cv2.line(
|
|
853
|
+
scene, (x, y), (x_end, y), color.as_bgr(), thickness=self.thickness
|
|
854
|
+
)
|
|
855
|
+
|
|
856
|
+
y_end = y + corner_length if y == y1 else y - corner_length
|
|
857
|
+
cv2.line(
|
|
858
|
+
scene, (x, y), (x, y_end), color.as_bgr(), thickness=self.thickness
|
|
859
|
+
)
|
|
860
|
+
return scene
|
|
861
|
+
|
|
862
|
+
|
|
863
|
+
class CircleAnnotator(BaseAnnotator):
|
|
864
|
+
"""
|
|
865
|
+
A class for drawing circle on an image using provided detections.
|
|
866
|
+
"""
|
|
867
|
+
|
|
868
|
+
def __init__(
|
|
869
|
+
self,
|
|
870
|
+
color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
|
|
871
|
+
thickness: int = 2,
|
|
872
|
+
color_lookup: ColorLookup = ColorLookup.CLASS,
|
|
873
|
+
):
|
|
874
|
+
"""
|
|
875
|
+
Args:
|
|
876
|
+
color (Union[Color, ColorPalette]): The color or color palette to use for
|
|
877
|
+
annotating detections.
|
|
878
|
+
thickness (int): Thickness of the circle line.
|
|
879
|
+
color_lookup (ColorLookup): Strategy for mapping colors to annotations.
|
|
880
|
+
Options are `INDEX`, `CLASS`, `TRACK`.
|
|
881
|
+
"""
|
|
882
|
+
|
|
883
|
+
self.color: Union[Color, ColorPalette] = color
|
|
884
|
+
self.thickness: int = thickness
|
|
885
|
+
self.color_lookup: ColorLookup = color_lookup
|
|
886
|
+
|
|
887
|
+
@ensure_cv2_image_for_annotation
|
|
888
|
+
def annotate(
|
|
889
|
+
self,
|
|
890
|
+
scene: ImageType,
|
|
891
|
+
detections: Detections,
|
|
892
|
+
custom_color_lookup: Optional[np.ndarray] = None,
|
|
893
|
+
) -> ImageType:
|
|
894
|
+
"""
|
|
895
|
+
Annotates the given scene with circles based on the provided detections.
|
|
896
|
+
|
|
897
|
+
Args:
|
|
898
|
+
scene (ImageType): The image where box corners will be drawn.
|
|
899
|
+
`ImageType` is a flexible type, accepting either `numpy.ndarray`
|
|
900
|
+
or `PIL.Image.Image`.
|
|
901
|
+
detections (Detections): Object detections to annotate.
|
|
902
|
+
custom_color_lookup (Optional[np.ndarray]): Custom color lookup array.
|
|
903
|
+
Allows to override the default color mapping strategy.
|
|
904
|
+
|
|
905
|
+
Returns:
|
|
906
|
+
The annotated image, matching the type of `scene` (`numpy.ndarray`
|
|
907
|
+
or `PIL.Image.Image`)
|
|
908
|
+
|
|
909
|
+
Example:
|
|
910
|
+
```python
|
|
911
|
+
import eye as sv
|
|
912
|
+
|
|
913
|
+
image = ...
|
|
914
|
+
detections = sv.Detections(...)
|
|
915
|
+
|
|
916
|
+
circle_annotator = sv.CircleAnnotator()
|
|
917
|
+
annotated_frame = circle_annotator.annotate(
|
|
918
|
+
scene=image.copy(),
|
|
919
|
+
detections=detections
|
|
920
|
+
)
|
|
921
|
+
```
|
|
922
|
+
|
|
923
|
+
|
|
924
|
+

|
|
926
|
+
"""
|
|
927
|
+
assert isinstance(scene, np.ndarray)
|
|
928
|
+
for detection_idx in range(len(detections)):
|
|
929
|
+
x1, y1, x2, y2 = detections.xyxy[detection_idx].astype(int)
|
|
930
|
+
center = ((x1 + x2) // 2, (y1 + y2) // 2)
|
|
931
|
+
distance = sqrt((x1 - center[0]) ** 2 + (y1 - center[1]) ** 2)
|
|
932
|
+
color = resolve_color(
|
|
933
|
+
color=self.color,
|
|
934
|
+
detections=detections,
|
|
935
|
+
detection_idx=detection_idx,
|
|
936
|
+
color_lookup=self.color_lookup
|
|
937
|
+
if custom_color_lookup is None
|
|
938
|
+
else custom_color_lookup,
|
|
939
|
+
)
|
|
940
|
+
cv2.circle(
|
|
941
|
+
img=scene,
|
|
942
|
+
center=center,
|
|
943
|
+
radius=int(distance),
|
|
944
|
+
color=color.as_bgr(),
|
|
945
|
+
thickness=self.thickness,
|
|
946
|
+
)
|
|
947
|
+
|
|
948
|
+
return scene
|
|
949
|
+
|
|
950
|
+
|
|
951
|
+
class DotAnnotator(BaseAnnotator):
|
|
952
|
+
"""
|
|
953
|
+
A class for drawing dots on an image at specific coordinates based on provided
|
|
954
|
+
detections.
|
|
955
|
+
"""
|
|
956
|
+
|
|
957
|
+
def __init__(
|
|
958
|
+
self,
|
|
959
|
+
color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
|
|
960
|
+
radius: int = 4,
|
|
961
|
+
position: Position = Position.CENTER,
|
|
962
|
+
color_lookup: ColorLookup = ColorLookup.CLASS,
|
|
963
|
+
outline_thickness: int = 0,
|
|
964
|
+
outline_color: Union[Color, ColorPalette] = Color.BLACK,
|
|
965
|
+
):
|
|
966
|
+
"""
|
|
967
|
+
Args:
|
|
968
|
+
color (Union[Color, ColorPalette]): The color or color palette to use for
|
|
969
|
+
annotating detections.
|
|
970
|
+
radius (int): Radius of the drawn dots.
|
|
971
|
+
position (Position): The anchor position for placing the dot.
|
|
972
|
+
color_lookup (ColorLookup): Strategy for mapping colors to annotations.
|
|
973
|
+
Options are `INDEX`, `CLASS`, `TRACK`.
|
|
974
|
+
outline_thickness (int): Thickness of the outline of the dot.
|
|
975
|
+
outline_color (Union[Color, ColorPalette]): The color or color palette to
|
|
976
|
+
use for outline. It is activated by setting outline_thickness to a value
|
|
977
|
+
greater than 0.
|
|
978
|
+
"""
|
|
979
|
+
self.color: Union[Color, ColorPalette] = color
|
|
980
|
+
self.radius: int = radius
|
|
981
|
+
self.position: Position = position
|
|
982
|
+
self.color_lookup: ColorLookup = color_lookup
|
|
983
|
+
self.outline_thickness = outline_thickness
|
|
984
|
+
self.outline_color: Union[Color, ColorPalette] = outline_color
|
|
985
|
+
|
|
986
|
+
@ensure_cv2_image_for_annotation
|
|
987
|
+
def annotate(
|
|
988
|
+
self,
|
|
989
|
+
scene: ImageType,
|
|
990
|
+
detections: Detections,
|
|
991
|
+
custom_color_lookup: Optional[np.ndarray] = None,
|
|
992
|
+
) -> ImageType:
|
|
993
|
+
"""
|
|
994
|
+
Annotates the given scene with dots based on the provided detections.
|
|
995
|
+
|
|
996
|
+
Args:
|
|
997
|
+
scene (ImageType): The image where dots will be drawn.
|
|
998
|
+
`ImageType` is a flexible type, accepting either `numpy.ndarray`
|
|
999
|
+
or `PIL.Image.Image`.
|
|
1000
|
+
detections (Detections): Object detections to annotate.
|
|
1001
|
+
custom_color_lookup (Optional[np.ndarray]): Custom color lookup array.
|
|
1002
|
+
Allows to override the default color mapping strategy.
|
|
1003
|
+
|
|
1004
|
+
Returns:
|
|
1005
|
+
The annotated image, matching the type of `scene` (`numpy.ndarray`
|
|
1006
|
+
or `PIL.Image.Image`)
|
|
1007
|
+
|
|
1008
|
+
Example:
|
|
1009
|
+
```python
|
|
1010
|
+
import eye as sv
|
|
1011
|
+
|
|
1012
|
+
image = ...
|
|
1013
|
+
detections = sv.Detections(...)
|
|
1014
|
+
|
|
1015
|
+
dot_annotator = sv.DotAnnotator()
|
|
1016
|
+
annotated_frame = dot_annotator.annotate(
|
|
1017
|
+
scene=image.copy(),
|
|
1018
|
+
detections=detections
|
|
1019
|
+
)
|
|
1020
|
+
```
|
|
1021
|
+
|
|
1022
|
+

|
|
1024
|
+
"""
|
|
1025
|
+
assert isinstance(scene, np.ndarray)
|
|
1026
|
+
xy = detections.get_anchors_coordinates(anchor=self.position)
|
|
1027
|
+
for detection_idx in range(len(detections)):
|
|
1028
|
+
color = resolve_color(
|
|
1029
|
+
color=self.color,
|
|
1030
|
+
detections=detections,
|
|
1031
|
+
detection_idx=detection_idx,
|
|
1032
|
+
color_lookup=self.color_lookup
|
|
1033
|
+
if custom_color_lookup is None
|
|
1034
|
+
else custom_color_lookup,
|
|
1035
|
+
)
|
|
1036
|
+
center = (int(xy[detection_idx, 0]), int(xy[detection_idx, 1]))
|
|
1037
|
+
|
|
1038
|
+
cv2.circle(scene, center, self.radius, color.as_bgr(), -1)
|
|
1039
|
+
if self.outline_thickness:
|
|
1040
|
+
outline_color = resolve_color(
|
|
1041
|
+
color=self.outline_color,
|
|
1042
|
+
detections=detections,
|
|
1043
|
+
detection_idx=detection_idx,
|
|
1044
|
+
color_lookup=self.color_lookup
|
|
1045
|
+
if custom_color_lookup is None
|
|
1046
|
+
else custom_color_lookup,
|
|
1047
|
+
)
|
|
1048
|
+
cv2.circle(
|
|
1049
|
+
scene,
|
|
1050
|
+
center,
|
|
1051
|
+
self.radius,
|
|
1052
|
+
outline_color.as_bgr(),
|
|
1053
|
+
self.outline_thickness,
|
|
1054
|
+
)
|
|
1055
|
+
return scene
|
|
1056
|
+
|
|
1057
|
+
|
|
1058
|
+
class LabelAnnotator(BaseAnnotator):
|
|
1059
|
+
"""
|
|
1060
|
+
A class for annotating labels on an image using provided detections.
|
|
1061
|
+
"""
|
|
1062
|
+
|
|
1063
|
+
def __init__(
|
|
1064
|
+
self,
|
|
1065
|
+
color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
|
|
1066
|
+
text_color: Union[Color, ColorPalette] = Color.WHITE,
|
|
1067
|
+
text_scale: float = 0.5,
|
|
1068
|
+
text_thickness: int = 1,
|
|
1069
|
+
text_padding: int = 10,
|
|
1070
|
+
text_position: Position = Position.TOP_LEFT,
|
|
1071
|
+
color_lookup: ColorLookup = ColorLookup.CLASS,
|
|
1072
|
+
border_radius: int = 0,
|
|
1073
|
+
smart_position: bool = False,
|
|
1074
|
+
):
|
|
1075
|
+
"""
|
|
1076
|
+
Args:
|
|
1077
|
+
color (Union[Color, ColorPalette]): The color or color palette to use for
|
|
1078
|
+
annotating the text background.
|
|
1079
|
+
text_color (Union[Color, ColorPalette]): The color or color palette to use
|
|
1080
|
+
for the text.
|
|
1081
|
+
text_scale (float): Font scale for the text.
|
|
1082
|
+
text_thickness (int): Thickness of the text characters.
|
|
1083
|
+
text_padding (int): Padding around the text within its background box.
|
|
1084
|
+
text_position (Position): Position of the text relative to the detection.
|
|
1085
|
+
Possible values are defined in the `Position` enum.
|
|
1086
|
+
color_lookup (ColorLookup): Strategy for mapping colors to annotations.
|
|
1087
|
+
Options are `INDEX`, `CLASS`, `TRACK`.
|
|
1088
|
+
border_radius (int): The radius to apply round edges. If the selected
|
|
1089
|
+
value is higher than the lower dimension, width or height, is clipped.
|
|
1090
|
+
smart_position (bool): Spread out the labels to avoid overlapping.
|
|
1091
|
+
"""
|
|
1092
|
+
self.border_radius: int = border_radius
|
|
1093
|
+
self.color: Union[Color, ColorPalette] = color
|
|
1094
|
+
self.text_color: Union[Color, ColorPalette] = text_color
|
|
1095
|
+
self.text_scale: float = text_scale
|
|
1096
|
+
self.text_thickness: int = text_thickness
|
|
1097
|
+
self.text_padding: int = text_padding
|
|
1098
|
+
self.text_anchor: Position = text_position
|
|
1099
|
+
self.color_lookup: ColorLookup = color_lookup
|
|
1100
|
+
self.smart_position = smart_position
|
|
1101
|
+
|
|
1102
|
+
@ensure_cv2_image_for_annotation
|
|
1103
|
+
def annotate(
|
|
1104
|
+
self,
|
|
1105
|
+
scene: ImageType,
|
|
1106
|
+
detections: Detections,
|
|
1107
|
+
labels: Optional[List[str]] = None,
|
|
1108
|
+
custom_color_lookup: Optional[np.ndarray] = None,
|
|
1109
|
+
) -> ImageType:
|
|
1110
|
+
"""
|
|
1111
|
+
Annotates the given scene with labels based on the provided detections.
|
|
1112
|
+
|
|
1113
|
+
Args:
|
|
1114
|
+
scene (ImageType): The image where labels will be drawn.
|
|
1115
|
+
`ImageType` is a flexible type, accepting either `numpy.ndarray`
|
|
1116
|
+
or `PIL.Image.Image`.
|
|
1117
|
+
detections (Detections): Object detections to annotate.
|
|
1118
|
+
labels (Optional[List[str]]): Custom labels for each detection.
|
|
1119
|
+
custom_color_lookup (Optional[np.ndarray]): Custom color lookup array.
|
|
1120
|
+
Allows to override the default color mapping strategy.
|
|
1121
|
+
|
|
1122
|
+
Returns:
|
|
1123
|
+
The annotated image, matching the type of `scene` (`numpy.ndarray`
|
|
1124
|
+
or `PIL.Image.Image`)
|
|
1125
|
+
|
|
1126
|
+
Example:
|
|
1127
|
+
```python
|
|
1128
|
+
import eye as sv
|
|
1129
|
+
|
|
1130
|
+
image = ...
|
|
1131
|
+
detections = sv.Detections(...)
|
|
1132
|
+
|
|
1133
|
+
labels = [
|
|
1134
|
+
f"{class_name} {confidence:.2f}"
|
|
1135
|
+
for class_name, confidence
|
|
1136
|
+
in zip(detections['class_name'], detections.confidence)
|
|
1137
|
+
]
|
|
1138
|
+
|
|
1139
|
+
label_annotator = sv.LabelAnnotator(text_position=sv.Position.CENTER)
|
|
1140
|
+
annotated_frame = label_annotator.annotate(
|
|
1141
|
+
scene=image.copy(),
|
|
1142
|
+
detections=detections,
|
|
1143
|
+
labels=labels
|
|
1144
|
+
)
|
|
1145
|
+
```
|
|
1146
|
+
|
|
1147
|
+

|
|
1149
|
+
"""
|
|
1150
|
+
|
|
1151
|
+
assert isinstance(scene, np.ndarray)
|
|
1152
|
+
self._validate_labels(labels, detections)
|
|
1153
|
+
|
|
1154
|
+
labels = self._get_labels_text(detections, labels)
|
|
1155
|
+
label_properties = self._get_label_properties(detections, labels)
|
|
1156
|
+
|
|
1157
|
+
if self.smart_position:
|
|
1158
|
+
xyxy = label_properties[:, :4]
|
|
1159
|
+
xyxy = spread_out_boxes(xyxy)
|
|
1160
|
+
label_properties[:, :4] = xyxy
|
|
1161
|
+
|
|
1162
|
+
self._draw_labels(
|
|
1163
|
+
scene=scene,
|
|
1164
|
+
labels=labels,
|
|
1165
|
+
label_properties=label_properties,
|
|
1166
|
+
detections=detections,
|
|
1167
|
+
custom_color_lookup=custom_color_lookup,
|
|
1168
|
+
)
|
|
1169
|
+
|
|
1170
|
+
return scene
|
|
1171
|
+
|
|
1172
|
+
def _validate_labels(self, labels: Optional[List[str]], detections: Detections):
|
|
1173
|
+
if labels is not None and len(labels) != len(detections):
|
|
1174
|
+
raise ValueError(
|
|
1175
|
+
f"The number of labels ({len(labels)}) does not match the "
|
|
1176
|
+
f"number of detections ({len(detections)}). Each detection "
|
|
1177
|
+
f"should have exactly 1 label."
|
|
1178
|
+
)
|
|
1179
|
+
|
|
1180
|
+
def _get_label_properties(
|
|
1181
|
+
self,
|
|
1182
|
+
detections: Detections,
|
|
1183
|
+
labels: List[str],
|
|
1184
|
+
) -> np.ndarray:
|
|
1185
|
+
"""
|
|
1186
|
+
Calculate the numerical properties required to draw the labels on the image.
|
|
1187
|
+
|
|
1188
|
+
Returns:
|
|
1189
|
+
(np.ndarray): An array of label properties, containing columns:
|
|
1190
|
+
`min_x`, `min_y`, `max_x`, `max_y`, `padded_text_height`.
|
|
1191
|
+
"""
|
|
1192
|
+
label_properties = []
|
|
1193
|
+
anchors_coordinates = detections.get_anchors_coordinates(
|
|
1194
|
+
anchor=self.text_anchor
|
|
1195
|
+
).astype(int)
|
|
1196
|
+
|
|
1197
|
+
for label, center_coords in zip(labels, anchors_coordinates):
|
|
1198
|
+
(text_w, text_h) = cv2.getTextSize(
|
|
1199
|
+
text=label,
|
|
1200
|
+
fontFace=CV2_FONT,
|
|
1201
|
+
fontScale=self.text_scale,
|
|
1202
|
+
thickness=self.text_thickness,
|
|
1203
|
+
)[0]
|
|
1204
|
+
|
|
1205
|
+
width_padded = text_w + 2 * self.text_padding
|
|
1206
|
+
height_padded = text_h + 2 * self.text_padding
|
|
1207
|
+
|
|
1208
|
+
text_background_xyxy = resolve_text_background_xyxy(
|
|
1209
|
+
center_coordinates=tuple(center_coords),
|
|
1210
|
+
text_wh=(width_padded, height_padded),
|
|
1211
|
+
position=self.text_anchor,
|
|
1212
|
+
)
|
|
1213
|
+
|
|
1214
|
+
label_properties.append(
|
|
1215
|
+
[
|
|
1216
|
+
*text_background_xyxy,
|
|
1217
|
+
text_h,
|
|
1218
|
+
]
|
|
1219
|
+
)
|
|
1220
|
+
|
|
1221
|
+
return np.array(label_properties).reshape(-1, 5)
|
|
1222
|
+
|
|
1223
|
+
@staticmethod
|
|
1224
|
+
def _get_labels_text(
|
|
1225
|
+
detections: Detections, custom_labels: Optional[List[str]]
|
|
1226
|
+
) -> List[str]:
|
|
1227
|
+
if custom_labels is not None:
|
|
1228
|
+
return custom_labels
|
|
1229
|
+
|
|
1230
|
+
labels = []
|
|
1231
|
+
for idx in range(len(detections)):
|
|
1232
|
+
if CLASS_NAME_DATA_FIELD in detections.data:
|
|
1233
|
+
labels.append(detections.data[CLASS_NAME_DATA_FIELD][idx])
|
|
1234
|
+
elif detections.class_id is not None:
|
|
1235
|
+
labels.append(str(detections.class_id[idx]))
|
|
1236
|
+
else:
|
|
1237
|
+
labels.append(str(idx))
|
|
1238
|
+
return labels
|
|
1239
|
+
|
|
1240
|
+
def _draw_labels(
|
|
1241
|
+
self,
|
|
1242
|
+
scene: np.ndarray,
|
|
1243
|
+
labels: List[str],
|
|
1244
|
+
label_properties: np.ndarray,
|
|
1245
|
+
detections: Detections,
|
|
1246
|
+
custom_color_lookup: Optional[np.ndarray],
|
|
1247
|
+
) -> None:
|
|
1248
|
+
assert len(labels) == len(label_properties) == len(detections), (
|
|
1249
|
+
f"Number of label properties ({len(label_properties)}), "
|
|
1250
|
+
f"labels ({len(labels)}) and detections ({len(detections)}) "
|
|
1251
|
+
"do not match."
|
|
1252
|
+
)
|
|
1253
|
+
|
|
1254
|
+
color_lookup = (
|
|
1255
|
+
custom_color_lookup
|
|
1256
|
+
if custom_color_lookup is not None
|
|
1257
|
+
else self.color_lookup
|
|
1258
|
+
)
|
|
1259
|
+
|
|
1260
|
+
for idx, label_property in enumerate(label_properties):
|
|
1261
|
+
background_color = resolve_color(
|
|
1262
|
+
color=self.color,
|
|
1263
|
+
detections=detections,
|
|
1264
|
+
detection_idx=idx,
|
|
1265
|
+
color_lookup=color_lookup,
|
|
1266
|
+
)
|
|
1267
|
+
text_color = resolve_color(
|
|
1268
|
+
color=self.text_color,
|
|
1269
|
+
detections=detections,
|
|
1270
|
+
detection_idx=idx,
|
|
1271
|
+
color_lookup=color_lookup,
|
|
1272
|
+
)
|
|
1273
|
+
|
|
1274
|
+
box_xyxy = label_property[:4]
|
|
1275
|
+
text_height_padded = label_property[4]
|
|
1276
|
+
self.draw_rounded_rectangle(
|
|
1277
|
+
scene=scene,
|
|
1278
|
+
xyxy=box_xyxy,
|
|
1279
|
+
color=background_color.as_bgr(),
|
|
1280
|
+
border_radius=self.border_radius,
|
|
1281
|
+
)
|
|
1282
|
+
|
|
1283
|
+
text_x = box_xyxy[0] + self.text_padding
|
|
1284
|
+
text_y = box_xyxy[1] + self.text_padding + text_height_padded
|
|
1285
|
+
cv2.putText(
|
|
1286
|
+
img=scene,
|
|
1287
|
+
text=labels[idx],
|
|
1288
|
+
org=(text_x, text_y),
|
|
1289
|
+
fontFace=CV2_FONT,
|
|
1290
|
+
fontScale=self.text_scale,
|
|
1291
|
+
color=text_color.as_bgr(),
|
|
1292
|
+
thickness=self.text_thickness,
|
|
1293
|
+
lineType=cv2.LINE_AA,
|
|
1294
|
+
)
|
|
1295
|
+
|
|
1296
|
+
@staticmethod
|
|
1297
|
+
def draw_rounded_rectangle(
|
|
1298
|
+
scene: np.ndarray,
|
|
1299
|
+
xyxy: Tuple[int, int, int, int],
|
|
1300
|
+
color: Tuple[int, int, int],
|
|
1301
|
+
border_radius: int,
|
|
1302
|
+
) -> np.ndarray:
|
|
1303
|
+
x1, y1, x2, y2 = xyxy
|
|
1304
|
+
width = x2 - x1
|
|
1305
|
+
height = y2 - y1
|
|
1306
|
+
|
|
1307
|
+
border_radius = min(border_radius, min(width, height) // 2)
|
|
1308
|
+
|
|
1309
|
+
rectangle_coordinates = [
|
|
1310
|
+
((x1 + border_radius, y1), (x2 - border_radius, y2)),
|
|
1311
|
+
((x1, y1 + border_radius), (x2, y2 - border_radius)),
|
|
1312
|
+
]
|
|
1313
|
+
circle_centers = [
|
|
1314
|
+
(x1 + border_radius, y1 + border_radius),
|
|
1315
|
+
(x2 - border_radius, y1 + border_radius),
|
|
1316
|
+
(x1 + border_radius, y2 - border_radius),
|
|
1317
|
+
(x2 - border_radius, y2 - border_radius),
|
|
1318
|
+
]
|
|
1319
|
+
|
|
1320
|
+
for coordinates in rectangle_coordinates:
|
|
1321
|
+
cv2.rectangle(
|
|
1322
|
+
img=scene,
|
|
1323
|
+
pt1=coordinates[0],
|
|
1324
|
+
pt2=coordinates[1],
|
|
1325
|
+
color=color,
|
|
1326
|
+
thickness=-1,
|
|
1327
|
+
)
|
|
1328
|
+
for center in circle_centers:
|
|
1329
|
+
cv2.circle(
|
|
1330
|
+
img=scene,
|
|
1331
|
+
center=center,
|
|
1332
|
+
radius=border_radius,
|
|
1333
|
+
color=color,
|
|
1334
|
+
thickness=-1,
|
|
1335
|
+
)
|
|
1336
|
+
return scene
|
|
1337
|
+
|
|
1338
|
+
|
|
1339
|
+
class RichLabelAnnotator(BaseAnnotator):
|
|
1340
|
+
"""
|
|
1341
|
+
A class for annotating labels on an image using provided detections,
|
|
1342
|
+
with support for Unicode characters by using a custom font.
|
|
1343
|
+
"""
|
|
1344
|
+
|
|
1345
|
+
def __init__(
|
|
1346
|
+
self,
|
|
1347
|
+
color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
|
|
1348
|
+
text_color: Union[Color, ColorPalette] = Color.WHITE,
|
|
1349
|
+
font_path: Optional[str] = None,
|
|
1350
|
+
font_size: int = 10,
|
|
1351
|
+
text_padding: int = 10,
|
|
1352
|
+
text_position: Position = Position.TOP_LEFT,
|
|
1353
|
+
color_lookup: ColorLookup = ColorLookup.CLASS,
|
|
1354
|
+
border_radius: int = 0,
|
|
1355
|
+
smart_position: bool = False,
|
|
1356
|
+
):
|
|
1357
|
+
"""
|
|
1358
|
+
Args:
|
|
1359
|
+
color (Union[Color, ColorPalette]): The color or color palette to use for
|
|
1360
|
+
annotating the text background.
|
|
1361
|
+
text_color (Union[Color, ColorPalette]): The color to use for the text.
|
|
1362
|
+
font_path (Optional[str]): Path to the font file (e.g., ".ttf" or ".otf")
|
|
1363
|
+
to use for rendering text. If `None`, the default PIL font will be used.
|
|
1364
|
+
font_size (int): Font size for the text.
|
|
1365
|
+
text_padding (int): Padding around the text within its background box.
|
|
1366
|
+
text_position (Position): Position of the text relative to the detection.
|
|
1367
|
+
Possible values are defined in the `Position` enum.
|
|
1368
|
+
color_lookup (ColorLookup): Strategy for mapping colors to annotations.
|
|
1369
|
+
Options are `INDEX`, `CLASS`, `TRACK`.
|
|
1370
|
+
border_radius (int): The radius to apply round edges. If the selected
|
|
1371
|
+
value is higher than the lower dimension, width or height, is clipped.
|
|
1372
|
+
smart_position (bool): Spread out the labels to avoid overlapping.
|
|
1373
|
+
"""
|
|
1374
|
+
self.color = color
|
|
1375
|
+
self.text_color = text_color
|
|
1376
|
+
self.text_padding = text_padding
|
|
1377
|
+
self.text_anchor = text_position
|
|
1378
|
+
self.color_lookup = color_lookup
|
|
1379
|
+
self.border_radius = border_radius
|
|
1380
|
+
self.smart_position = smart_position
|
|
1381
|
+
self.font = self._load_font(font_size, font_path)
|
|
1382
|
+
|
|
1383
|
+
@ensure_pil_image_for_annotation
|
|
1384
|
+
def annotate(
|
|
1385
|
+
self,
|
|
1386
|
+
scene: ImageType,
|
|
1387
|
+
detections: Detections,
|
|
1388
|
+
labels: Optional[List[str]] = None,
|
|
1389
|
+
custom_color_lookup: Optional[np.ndarray] = None,
|
|
1390
|
+
) -> ImageType:
|
|
1391
|
+
"""
|
|
1392
|
+
Annotates the given scene with labels based on the provided
|
|
1393
|
+
detections, with support for Unicode characters.
|
|
1394
|
+
|
|
1395
|
+
Args:
|
|
1396
|
+
scene (ImageType): The image where labels will be drawn.
|
|
1397
|
+
`ImageType` is a flexible type, accepting either `numpy.ndarray`
|
|
1398
|
+
or `PIL.Image.Image`.
|
|
1399
|
+
detections (Detections): Object detections to annotate.
|
|
1400
|
+
labels (Optional[List[str]]): Custom labels for each detection.
|
|
1401
|
+
custom_color_lookup (Optional[np.ndarray]): Custom color lookup array.
|
|
1402
|
+
Allows to override the default color mapping strategy.
|
|
1403
|
+
|
|
1404
|
+
Returns:
|
|
1405
|
+
The annotated image, matching the type of `scene` (`numpy.ndarray`
|
|
1406
|
+
or `PIL.Image.Image`)
|
|
1407
|
+
|
|
1408
|
+
Example:
|
|
1409
|
+
```python
|
|
1410
|
+
import eye as sv
|
|
1411
|
+
|
|
1412
|
+
image = ...
|
|
1413
|
+
detections = sv.Detections(...)
|
|
1414
|
+
|
|
1415
|
+
labels = [
|
|
1416
|
+
f"{class_name} {confidence:.2f}"
|
|
1417
|
+
for class_name, confidence
|
|
1418
|
+
in zip(detections['class_name'], detections.confidence)
|
|
1419
|
+
]
|
|
1420
|
+
|
|
1421
|
+
rich_label_annotator = sv.RichLabelAnnotator(font_path="path/to/font.ttf")
|
|
1422
|
+
annotated_frame = label_annotator.annotate(
|
|
1423
|
+
scene=image.copy(),
|
|
1424
|
+
detections=detections,
|
|
1425
|
+
labels=labels
|
|
1426
|
+
)
|
|
1427
|
+
```
|
|
1428
|
+
|
|
1429
|
+
"""
|
|
1430
|
+
assert isinstance(scene, Image.Image)
|
|
1431
|
+
self._validate_labels(labels, detections)
|
|
1432
|
+
|
|
1433
|
+
draw = ImageDraw.Draw(scene)
|
|
1434
|
+
labels = self._get_labels_text(detections, labels)
|
|
1435
|
+
label_properties = self._get_label_properties(draw, detections, labels)
|
|
1436
|
+
|
|
1437
|
+
if self.smart_position:
|
|
1438
|
+
xyxy = label_properties[:, :4]
|
|
1439
|
+
xyxy = spread_out_boxes(xyxy)
|
|
1440
|
+
label_properties[:, :4] = xyxy
|
|
1441
|
+
|
|
1442
|
+
self._draw_labels(
|
|
1443
|
+
draw=draw,
|
|
1444
|
+
labels=labels,
|
|
1445
|
+
label_properties=label_properties,
|
|
1446
|
+
detections=detections,
|
|
1447
|
+
custom_color_lookup=custom_color_lookup,
|
|
1448
|
+
)
|
|
1449
|
+
|
|
1450
|
+
return scene
|
|
1451
|
+
|
|
1452
|
+
def _validate_labels(self, labels: Optional[List[str]], detections: Detections):
|
|
1453
|
+
if labels is not None and len(labels) != len(detections):
|
|
1454
|
+
raise ValueError(
|
|
1455
|
+
f"The number of labels ({len(labels)}) does not match the "
|
|
1456
|
+
f"number of detections ({len(detections)}). Each detection "
|
|
1457
|
+
f"should have exactly 1 label."
|
|
1458
|
+
)
|
|
1459
|
+
|
|
1460
|
+
def _get_label_properties(
|
|
1461
|
+
self, draw, detections: Detections, labels: List[str]
|
|
1462
|
+
) -> np.ndarray:
|
|
1463
|
+
"""
|
|
1464
|
+
Calculate the numerical properties required to draw the labels on the image.
|
|
1465
|
+
|
|
1466
|
+
Returns:
|
|
1467
|
+
(np.ndarray): An array of label properties, containing columns:
|
|
1468
|
+
`min_x`, `min_y`, `max_x`, `max_y`, `text_left_coordinate`,
|
|
1469
|
+
`text_top_coordinate`. The first 4 values are already padded
|
|
1470
|
+
with `text_padding`.
|
|
1471
|
+
"""
|
|
1472
|
+
label_properties = []
|
|
1473
|
+
|
|
1474
|
+
anchor_coordinates = detections.get_anchors_coordinates(
|
|
1475
|
+
anchor=self.text_anchor
|
|
1476
|
+
).astype(int)
|
|
1477
|
+
|
|
1478
|
+
for label, center_coords in zip(labels, anchor_coordinates):
|
|
1479
|
+
text_left, text_top, text_right, text_bottom = draw.textbbox(
|
|
1480
|
+
(0, 0), label, font=self.font
|
|
1481
|
+
)
|
|
1482
|
+
text_width = text_right - text_left
|
|
1483
|
+
text_height = text_bottom - text_top
|
|
1484
|
+
width_padded = text_width + 2 * self.text_padding
|
|
1485
|
+
height_padded = text_height + 2 * self.text_padding
|
|
1486
|
+
|
|
1487
|
+
text_background_xyxy = resolve_text_background_xyxy(
|
|
1488
|
+
center_coordinates=tuple(center_coords),
|
|
1489
|
+
text_wh=(width_padded, height_padded),
|
|
1490
|
+
position=self.text_anchor,
|
|
1491
|
+
)
|
|
1492
|
+
|
|
1493
|
+
label_properties.append([*text_background_xyxy, text_left, text_top])
|
|
1494
|
+
|
|
1495
|
+
return np.array(label_properties).reshape(-1, 6)
|
|
1496
|
+
|
|
1497
|
+
@staticmethod
|
|
1498
|
+
def _get_labels_text(
|
|
1499
|
+
detections: Detections, custom_labels: Optional[List[str]]
|
|
1500
|
+
) -> List[str]:
|
|
1501
|
+
if custom_labels is not None:
|
|
1502
|
+
return custom_labels
|
|
1503
|
+
|
|
1504
|
+
labels = []
|
|
1505
|
+
for idx in range(len(detections)):
|
|
1506
|
+
if CLASS_NAME_DATA_FIELD in detections.data:
|
|
1507
|
+
labels.append(detections.data[CLASS_NAME_DATA_FIELD][idx])
|
|
1508
|
+
elif detections.class_id is not None:
|
|
1509
|
+
labels.append(str(detections.class_id[idx]))
|
|
1510
|
+
else:
|
|
1511
|
+
labels.append(str(idx))
|
|
1512
|
+
return labels
|
|
1513
|
+
|
|
1514
|
+
def _draw_labels(
|
|
1515
|
+
self,
|
|
1516
|
+
draw,
|
|
1517
|
+
labels: List[str],
|
|
1518
|
+
label_properties: np.ndarray,
|
|
1519
|
+
detections: Detections,
|
|
1520
|
+
custom_color_lookup: Optional[np.ndarray],
|
|
1521
|
+
) -> None:
|
|
1522
|
+
assert len(labels) == len(label_properties) == len(detections), (
|
|
1523
|
+
f"Number of label properties ({len(label_properties)}), "
|
|
1524
|
+
f"labels ({len(labels)}) and detections ({len(detections)}) "
|
|
1525
|
+
"do not match."
|
|
1526
|
+
)
|
|
1527
|
+
color_lookup = (
|
|
1528
|
+
custom_color_lookup
|
|
1529
|
+
if custom_color_lookup is not None
|
|
1530
|
+
else self.color_lookup
|
|
1531
|
+
)
|
|
1532
|
+
|
|
1533
|
+
for idx, label_property in enumerate(label_properties):
|
|
1534
|
+
background_color = resolve_color(
|
|
1535
|
+
color=self.color,
|
|
1536
|
+
detections=detections,
|
|
1537
|
+
detection_idx=idx,
|
|
1538
|
+
color_lookup=color_lookup,
|
|
1539
|
+
)
|
|
1540
|
+
text_color = resolve_color(
|
|
1541
|
+
color=self.text_color,
|
|
1542
|
+
detections=detections,
|
|
1543
|
+
detection_idx=idx,
|
|
1544
|
+
color_lookup=color_lookup,
|
|
1545
|
+
)
|
|
1546
|
+
|
|
1547
|
+
box_xyxy = label_property[:4]
|
|
1548
|
+
text_left = label_property[4]
|
|
1549
|
+
text_top = label_property[5]
|
|
1550
|
+
label_x_position = box_xyxy[0] + self.text_padding - text_left
|
|
1551
|
+
label_y_position = box_xyxy[1] + self.text_padding - text_top
|
|
1552
|
+
|
|
1553
|
+
draw.rounded_rectangle(
|
|
1554
|
+
tuple(box_xyxy),
|
|
1555
|
+
radius=self.border_radius,
|
|
1556
|
+
fill=background_color.as_rgb(),
|
|
1557
|
+
outline=None,
|
|
1558
|
+
)
|
|
1559
|
+
draw.text(
|
|
1560
|
+
xy=(label_x_position, label_y_position),
|
|
1561
|
+
text=labels[idx],
|
|
1562
|
+
font=self.font,
|
|
1563
|
+
fill=text_color.as_rgb(),
|
|
1564
|
+
)
|
|
1565
|
+
|
|
1566
|
+
@staticmethod
|
|
1567
|
+
def _load_font(font_size: int, font_path: Optional[str]):
|
|
1568
|
+
def load_default_font(size):
|
|
1569
|
+
try:
|
|
1570
|
+
return ImageFont.load_default(size)
|
|
1571
|
+
except TypeError:
|
|
1572
|
+
return ImageFont.load_default()
|
|
1573
|
+
|
|
1574
|
+
if font_path is None:
|
|
1575
|
+
return load_default_font(font_size)
|
|
1576
|
+
|
|
1577
|
+
try:
|
|
1578
|
+
return ImageFont.truetype(font_path, font_size)
|
|
1579
|
+
except OSError:
|
|
1580
|
+
print(f"Font path '{font_path}' not found. Using PIL's default font.")
|
|
1581
|
+
return load_default_font(font_size)
|
|
1582
|
+
|
|
1583
|
+
|
|
1584
|
+
class IconAnnotator(BaseAnnotator):
|
|
1585
|
+
"""
|
|
1586
|
+
A class for drawing an icon on an image, using provided detections.
|
|
1587
|
+
"""
|
|
1588
|
+
|
|
1589
|
+
def __init__(
|
|
1590
|
+
self,
|
|
1591
|
+
icon_resolution_wh: Tuple[int, int] = (64, 64),
|
|
1592
|
+
icon_position: Position = Position.TOP_CENTER,
|
|
1593
|
+
offset_xy: Tuple[int, int] = (0, 0),
|
|
1594
|
+
):
|
|
1595
|
+
"""
|
|
1596
|
+
Args:
|
|
1597
|
+
icon_resolution_wh (Tuple[int, int]): The size of drawn icons.
|
|
1598
|
+
All icons will be resized to this resolution, keeping the aspect ratio.
|
|
1599
|
+
icon_position (Position): The position of the icon.
|
|
1600
|
+
offset_xy (Tuple[int, int]): The offset to apply to the icon position,
|
|
1601
|
+
in pixels. Can be both positive and negative.
|
|
1602
|
+
"""
|
|
1603
|
+
self.icon_resolution_wh = icon_resolution_wh
|
|
1604
|
+
self.position = icon_position
|
|
1605
|
+
self.offset_xy = offset_xy
|
|
1606
|
+
|
|
1607
|
+
@ensure_cv2_image_for_annotation
|
|
1608
|
+
def annotate(
|
|
1609
|
+
self, scene: ImageType, detections: Detections, icon_path: Union[str, List[str]]
|
|
1610
|
+
) -> ImageType:
|
|
1611
|
+
"""
|
|
1612
|
+
Annotates the given scene with given icons.
|
|
1613
|
+
|
|
1614
|
+
Args:
|
|
1615
|
+
scene (ImageType): The image where labels will be drawn.
|
|
1616
|
+
`ImageType` is a flexible type, accepting either `numpy.ndarray`
|
|
1617
|
+
or `PIL.Image.Image`.
|
|
1618
|
+
detections (Detections): Object detections to annotate.
|
|
1619
|
+
icon_path (Union[str, List[str]]): The path to the PNG image to use as an
|
|
1620
|
+
icon. Must be a single path or a list of paths, one for each detection.
|
|
1621
|
+
Pass an empty string `""` to draw nothing.
|
|
1622
|
+
|
|
1623
|
+
Returns:
|
|
1624
|
+
The annotated image, matching the type of `scene` (`numpy.ndarray`
|
|
1625
|
+
or `PIL.Image.Image`)
|
|
1626
|
+
|
|
1627
|
+
Example:
|
|
1628
|
+
```python
|
|
1629
|
+
import eye as sv
|
|
1630
|
+
|
|
1631
|
+
image = ...
|
|
1632
|
+
detections = sv.Detections(...)
|
|
1633
|
+
|
|
1634
|
+
available_icons = ["roboflow.png", "lenny.png"]
|
|
1635
|
+
icon_paths = [np.random.choice(available_icons) for _ in detections]
|
|
1636
|
+
|
|
1637
|
+
icon_annotator = sv.IconAnnotator()
|
|
1638
|
+
annotated_frame = icon_annotator.annotate(
|
|
1639
|
+
scene=image.copy(),
|
|
1640
|
+
detections=detections,
|
|
1641
|
+
icon_path=icon_paths
|
|
1642
|
+
)
|
|
1643
|
+
```
|
|
1644
|
+
|
|
1645
|
+

|
|
1647
|
+
"""
|
|
1648
|
+
assert isinstance(scene, np.ndarray)
|
|
1649
|
+
if isinstance(icon_path, list) and len(icon_path) != len(detections):
|
|
1650
|
+
raise ValueError(
|
|
1651
|
+
f"The number of icon paths provided ({len(icon_path)}) does not match "
|
|
1652
|
+
f"the number of detections ({len(detections)}). Either provide a single"
|
|
1653
|
+
f" icon path or one for each detection."
|
|
1654
|
+
)
|
|
1655
|
+
|
|
1656
|
+
xy = detections.get_anchors_coordinates(anchor=self.position).astype(int)
|
|
1657
|
+
|
|
1658
|
+
for detection_idx in range(len(detections)):
|
|
1659
|
+
current_path = (
|
|
1660
|
+
icon_path if isinstance(icon_path, str) else icon_path[detection_idx]
|
|
1661
|
+
)
|
|
1662
|
+
if current_path == "":
|
|
1663
|
+
continue
|
|
1664
|
+
icon = self._load_icon(current_path)
|
|
1665
|
+
icon_h, icon_w = icon.shape[:2]
|
|
1666
|
+
|
|
1667
|
+
x = int(xy[detection_idx, 0] - icon_w / 2 + self.offset_xy[0])
|
|
1668
|
+
y = int(xy[detection_idx, 1] - icon_h / 2 + self.offset_xy[1])
|
|
1669
|
+
|
|
1670
|
+
scene[:] = overlay_image(scene, icon, (x, y))
|
|
1671
|
+
return scene
|
|
1672
|
+
|
|
1673
|
+
@lru_cache
|
|
1674
|
+
def _load_icon(self, icon_path: str) -> np.ndarray:
|
|
1675
|
+
icon = cv2.imread(icon_path, cv2.IMREAD_UNCHANGED)
|
|
1676
|
+
if icon is None:
|
|
1677
|
+
raise FileNotFoundError(
|
|
1678
|
+
f"Error: Couldn't load the icon image from {icon_path}"
|
|
1679
|
+
)
|
|
1680
|
+
icon = letterbox_image(image=icon, resolution_wh=self.icon_resolution_wh)
|
|
1681
|
+
return icon
|
|
1682
|
+
|
|
1683
|
+
|
|
1684
|
+
class BlurAnnotator(BaseAnnotator):
|
|
1685
|
+
"""
|
|
1686
|
+
A class for blurring regions in an image using provided detections.
|
|
1687
|
+
"""
|
|
1688
|
+
|
|
1689
|
+
def __init__(self, kernel_size: int = 15):
|
|
1690
|
+
"""
|
|
1691
|
+
Args:
|
|
1692
|
+
kernel_size (int): The size of the average pooling kernel used for blurring.
|
|
1693
|
+
"""
|
|
1694
|
+
self.kernel_size: int = kernel_size
|
|
1695
|
+
|
|
1696
|
+
@ensure_cv2_image_for_annotation
|
|
1697
|
+
def annotate(
|
|
1698
|
+
self,
|
|
1699
|
+
scene: ImageType,
|
|
1700
|
+
detections: Detections,
|
|
1701
|
+
) -> ImageType:
|
|
1702
|
+
"""
|
|
1703
|
+
Annotates the given scene by blurring regions based on the provided detections.
|
|
1704
|
+
|
|
1705
|
+
Args:
|
|
1706
|
+
scene (ImageType): The image where blurring will be applied.
|
|
1707
|
+
`ImageType` is a flexible type, accepting either `numpy.ndarray`
|
|
1708
|
+
or `PIL.Image.Image`.
|
|
1709
|
+
detections (Detections): Object detections to annotate.
|
|
1710
|
+
|
|
1711
|
+
Returns:
|
|
1712
|
+
The annotated image, matching the type of `scene` (`numpy.ndarray`
|
|
1713
|
+
or `PIL.Image.Image`)
|
|
1714
|
+
|
|
1715
|
+
Example:
|
|
1716
|
+
```python
|
|
1717
|
+
import eye as sv
|
|
1718
|
+
|
|
1719
|
+
image = ...
|
|
1720
|
+
detections = sv.Detections(...)
|
|
1721
|
+
|
|
1722
|
+
blur_annotator = sv.BlurAnnotator()
|
|
1723
|
+
annotated_frame = circle_annotator.annotate(
|
|
1724
|
+
scene=image.copy(),
|
|
1725
|
+
detections=detections
|
|
1726
|
+
)
|
|
1727
|
+
```
|
|
1728
|
+
|
|
1729
|
+

|
|
1731
|
+
"""
|
|
1732
|
+
assert isinstance(scene, np.ndarray)
|
|
1733
|
+
image_height, image_width = scene.shape[:2]
|
|
1734
|
+
clipped_xyxy = clip_boxes(
|
|
1735
|
+
xyxy=detections.xyxy, resolution_wh=(image_width, image_height)
|
|
1736
|
+
).astype(int)
|
|
1737
|
+
|
|
1738
|
+
for x1, y1, x2, y2 in clipped_xyxy:
|
|
1739
|
+
roi = scene[y1:y2, x1:x2]
|
|
1740
|
+
roi = cv2.blur(roi, (self.kernel_size, self.kernel_size))
|
|
1741
|
+
scene[y1:y2, x1:x2] = roi
|
|
1742
|
+
|
|
1743
|
+
return scene
|
|
1744
|
+
|
|
1745
|
+
|
|
1746
|
+
class TraceAnnotator(BaseAnnotator):
|
|
1747
|
+
"""
|
|
1748
|
+
A class for drawing trace paths on an image based on detection coordinates.
|
|
1749
|
+
|
|
1750
|
+
!!! warning
|
|
1751
|
+
|
|
1752
|
+
This annotator uses the `sv.Detections.tracker_id`. Read
|
|
1753
|
+
[here](/latest/trackers/) to learn how to plug
|
|
1754
|
+
tracking into your inference pipeline.
|
|
1755
|
+
"""
|
|
1756
|
+
|
|
1757
|
+
def __init__(
|
|
1758
|
+
self,
|
|
1759
|
+
color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
|
|
1760
|
+
position: Position = Position.CENTER,
|
|
1761
|
+
trace_length: int = 30,
|
|
1762
|
+
thickness: int = 2,
|
|
1763
|
+
color_lookup: ColorLookup = ColorLookup.CLASS,
|
|
1764
|
+
):
|
|
1765
|
+
"""
|
|
1766
|
+
Args:
|
|
1767
|
+
color (Union[Color, ColorPalette]): The color to draw the trace, can be
|
|
1768
|
+
a single color or a color palette.
|
|
1769
|
+
position (Position): The position of the trace.
|
|
1770
|
+
Defaults to `CENTER`.
|
|
1771
|
+
trace_length (int): The maximum length of the trace in terms of historical
|
|
1772
|
+
points. Defaults to `30`.
|
|
1773
|
+
thickness (int): The thickness of the trace lines. Defaults to `2`.
|
|
1774
|
+
color_lookup (ColorLookup): Strategy for mapping colors to annotations.
|
|
1775
|
+
Options are `INDEX`, `CLASS`, `TRACK`.
|
|
1776
|
+
"""
|
|
1777
|
+
self.color: Union[Color, ColorPalette] = color
|
|
1778
|
+
self.trace = Trace(max_size=trace_length, anchor=position)
|
|
1779
|
+
self.thickness = thickness
|
|
1780
|
+
self.color_lookup: ColorLookup = color_lookup
|
|
1781
|
+
|
|
1782
|
+
@ensure_cv2_image_for_annotation
|
|
1783
|
+
def annotate(
|
|
1784
|
+
self,
|
|
1785
|
+
scene: ImageType,
|
|
1786
|
+
detections: Detections,
|
|
1787
|
+
custom_color_lookup: Optional[np.ndarray] = None,
|
|
1788
|
+
) -> ImageType:
|
|
1789
|
+
"""
|
|
1790
|
+
Draws trace paths on the frame based on the detection coordinates provided.
|
|
1791
|
+
|
|
1792
|
+
Args:
|
|
1793
|
+
scene (ImageType): The image on which the traces will be drawn.
|
|
1794
|
+
`ImageType` is a flexible type, accepting either `numpy.ndarray`
|
|
1795
|
+
or `PIL.Image.Image`.
|
|
1796
|
+
detections (Detections): The detections which include coordinates for
|
|
1797
|
+
which the traces will be drawn.
|
|
1798
|
+
custom_color_lookup (Optional[np.ndarray]): Custom color lookup array.
|
|
1799
|
+
Allows to override the default color mapping strategy.
|
|
1800
|
+
|
|
1801
|
+
Returns:
|
|
1802
|
+
The annotated image, matching the type of `scene` (`numpy.ndarray`
|
|
1803
|
+
or `PIL.Image.Image`)
|
|
1804
|
+
|
|
1805
|
+
Example:
|
|
1806
|
+
```python
|
|
1807
|
+
import eye as sv
|
|
1808
|
+
from ultralytics import YOLO
|
|
1809
|
+
|
|
1810
|
+
model = YOLO('yolov8x.pt')
|
|
1811
|
+
trace_annotator = sv.TraceAnnotator()
|
|
1812
|
+
|
|
1813
|
+
video_info = sv.VideoInfo.from_video_path(video_path='...')
|
|
1814
|
+
frames_generator = sv.get_video_frames_generator(source_path='...')
|
|
1815
|
+
tracker = sv.ByteTrack()
|
|
1816
|
+
|
|
1817
|
+
with sv.VideoSink(target_path='...', video_info=video_info) as sink:
|
|
1818
|
+
for frame in frames_generator:
|
|
1819
|
+
result = model(frame)[0]
|
|
1820
|
+
detections = sv.Detections.from_ultralytics(result)
|
|
1821
|
+
detections = tracker.update_with_detections(detections)
|
|
1822
|
+
annotated_frame = trace_annotator.annotate(
|
|
1823
|
+
scene=frame.copy(),
|
|
1824
|
+
detections=detections)
|
|
1825
|
+
sink.write_frame(frame=annotated_frame)
|
|
1826
|
+
```
|
|
1827
|
+
|
|
1828
|
+

|
|
1830
|
+
"""
|
|
1831
|
+
assert isinstance(scene, np.ndarray)
|
|
1832
|
+
if detections.tracker_id is None:
|
|
1833
|
+
raise ValueError(
|
|
1834
|
+
"The `tracker_id` field is missing in the provided detections."
|
|
1835
|
+
" See more: https://eye.roboflow.com/latest/how_to/track_objects"
|
|
1836
|
+
)
|
|
1837
|
+
|
|
1838
|
+
self.trace.put(detections)
|
|
1839
|
+
for detection_idx in range(len(detections)):
|
|
1840
|
+
tracker_id = int(detections.tracker_id[detection_idx])
|
|
1841
|
+
color = resolve_color(
|
|
1842
|
+
color=self.color,
|
|
1843
|
+
detections=detections,
|
|
1844
|
+
detection_idx=detection_idx,
|
|
1845
|
+
color_lookup=self.color_lookup
|
|
1846
|
+
if custom_color_lookup is None
|
|
1847
|
+
else custom_color_lookup,
|
|
1848
|
+
)
|
|
1849
|
+
xy = self.trace.get(tracker_id=tracker_id)
|
|
1850
|
+
if len(xy) > 1:
|
|
1851
|
+
scene = cv2.polylines(
|
|
1852
|
+
scene,
|
|
1853
|
+
[xy.astype(np.int32)],
|
|
1854
|
+
False,
|
|
1855
|
+
color=color.as_bgr(),
|
|
1856
|
+
thickness=self.thickness,
|
|
1857
|
+
)
|
|
1858
|
+
return scene
|
|
1859
|
+
|
|
1860
|
+
|
|
1861
|
+
class HeatMapAnnotator(BaseAnnotator):
|
|
1862
|
+
"""
|
|
1863
|
+
A class for drawing heatmaps on an image based on provided detections.
|
|
1864
|
+
Heat accumulates over time and is drawn as a semi-transparent overlay
|
|
1865
|
+
of blurred circles.
|
|
1866
|
+
"""
|
|
1867
|
+
|
|
1868
|
+
def __init__(
|
|
1869
|
+
self,
|
|
1870
|
+
position: Position = Position.BOTTOM_CENTER,
|
|
1871
|
+
opacity: float = 0.2,
|
|
1872
|
+
radius: int = 40,
|
|
1873
|
+
kernel_size: int = 25,
|
|
1874
|
+
top_hue: int = 0,
|
|
1875
|
+
low_hue: int = 125,
|
|
1876
|
+
):
|
|
1877
|
+
"""
|
|
1878
|
+
Args:
|
|
1879
|
+
position (Position): The position of the heatmap. Defaults to
|
|
1880
|
+
`BOTTOM_CENTER`.
|
|
1881
|
+
opacity (float): Opacity of the overlay mask, between 0 and 1.
|
|
1882
|
+
radius (int): Radius of the heat circle.
|
|
1883
|
+
kernel_size (int): Kernel size for blurring the heatmap.
|
|
1884
|
+
top_hue (int): Hue at the top of the heatmap. Defaults to 0 (red).
|
|
1885
|
+
low_hue (int): Hue at the bottom of the heatmap. Defaults to 125 (blue).
|
|
1886
|
+
"""
|
|
1887
|
+
self.position = position
|
|
1888
|
+
self.opacity = opacity
|
|
1889
|
+
self.radius = radius
|
|
1890
|
+
self.kernel_size = kernel_size
|
|
1891
|
+
self.top_hue = top_hue
|
|
1892
|
+
self.low_hue = low_hue
|
|
1893
|
+
self.heat_mask: Optional[npt.NDArray[np.float32]] = None
|
|
1894
|
+
|
|
1895
|
+
@ensure_cv2_image_for_annotation
|
|
1896
|
+
def annotate(self, scene: ImageType, detections: Detections) -> ImageType:
|
|
1897
|
+
"""
|
|
1898
|
+
Annotates the scene with a heatmap based on the provided detections.
|
|
1899
|
+
|
|
1900
|
+
Args:
|
|
1901
|
+
scene (ImageType): The image where the heatmap will be drawn.
|
|
1902
|
+
`ImageType` is a flexible type, accepting either `numpy.ndarray`
|
|
1903
|
+
or `PIL.Image.Image`.
|
|
1904
|
+
detections (Detections): Object detections to annotate.
|
|
1905
|
+
|
|
1906
|
+
Returns:
|
|
1907
|
+
The annotated image, matching the type of `scene` (`numpy.ndarray`
|
|
1908
|
+
or `PIL.Image.Image`)
|
|
1909
|
+
|
|
1910
|
+
Example:
|
|
1911
|
+
```python
|
|
1912
|
+
import eye as sv
|
|
1913
|
+
from ultralytics import YOLO
|
|
1914
|
+
|
|
1915
|
+
model = YOLO('yolov8x.pt')
|
|
1916
|
+
|
|
1917
|
+
heat_map_annotator = sv.HeatMapAnnotator()
|
|
1918
|
+
|
|
1919
|
+
video_info = sv.VideoInfo.from_video_path(video_path='...')
|
|
1920
|
+
frames_generator = sv.get_video_frames_generator(source_path='...')
|
|
1921
|
+
|
|
1922
|
+
with sv.VideoSink(target_path='...', video_info=video_info) as sink:
|
|
1923
|
+
for frame in frames_generator:
|
|
1924
|
+
result = model(frame)[0]
|
|
1925
|
+
detections = sv.Detections.from_ultralytics(result)
|
|
1926
|
+
annotated_frame = heat_map_annotator.annotate(
|
|
1927
|
+
scene=frame.copy(),
|
|
1928
|
+
detections=detections)
|
|
1929
|
+
sink.write_frame(frame=annotated_frame)
|
|
1930
|
+
```
|
|
1931
|
+
|
|
1932
|
+

|
|
1934
|
+
"""
|
|
1935
|
+
assert isinstance(scene, np.ndarray)
|
|
1936
|
+
if self.heat_mask is None:
|
|
1937
|
+
self.heat_mask = np.zeros(scene.shape[:2], dtype=np.float32)
|
|
1938
|
+
|
|
1939
|
+
mask = np.zeros(scene.shape[:2])
|
|
1940
|
+
for xy in detections.get_anchors_coordinates(self.position):
|
|
1941
|
+
x, y = int(xy[0]), int(xy[1])
|
|
1942
|
+
cv2.circle(
|
|
1943
|
+
img=mask,
|
|
1944
|
+
center=(x, y),
|
|
1945
|
+
radius=self.radius,
|
|
1946
|
+
color=(1,),
|
|
1947
|
+
thickness=-1, # fill
|
|
1948
|
+
)
|
|
1949
|
+
self.heat_mask = mask + self.heat_mask
|
|
1950
|
+
temp = self.heat_mask.copy()
|
|
1951
|
+
temp = self.low_hue - temp / temp.max() * (self.low_hue - self.top_hue)
|
|
1952
|
+
temp = temp.astype(np.uint8)
|
|
1953
|
+
if self.kernel_size is not None:
|
|
1954
|
+
temp = cv2.blur(temp, (self.kernel_size, self.kernel_size))
|
|
1955
|
+
hsv = np.zeros(scene.shape)
|
|
1956
|
+
hsv[..., 0] = temp
|
|
1957
|
+
hsv[..., 1] = 255
|
|
1958
|
+
hsv[..., 2] = 255
|
|
1959
|
+
temp = cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2BGR)
|
|
1960
|
+
mask = cv2.cvtColor(self.heat_mask.astype(np.uint8), cv2.COLOR_GRAY2BGR) > 0
|
|
1961
|
+
scene[mask] = cv2.addWeighted(temp, self.opacity, scene, 1 - self.opacity, 0)[
|
|
1962
|
+
mask
|
|
1963
|
+
]
|
|
1964
|
+
return scene
|
|
1965
|
+
|
|
1966
|
+
|
|
1967
|
+
class PixelateAnnotator(BaseAnnotator):
|
|
1968
|
+
"""
|
|
1969
|
+
A class for pixelating regions in an image using provided detections.
|
|
1970
|
+
"""
|
|
1971
|
+
|
|
1972
|
+
def __init__(self, pixel_size: int = 20):
|
|
1973
|
+
"""
|
|
1974
|
+
Args:
|
|
1975
|
+
pixel_size (int): The size of the pixelation.
|
|
1976
|
+
"""
|
|
1977
|
+
self.pixel_size: int = pixel_size
|
|
1978
|
+
|
|
1979
|
+
@ensure_cv2_image_for_annotation
|
|
1980
|
+
def annotate(
|
|
1981
|
+
self,
|
|
1982
|
+
scene: ImageType,
|
|
1983
|
+
detections: Detections,
|
|
1984
|
+
) -> ImageType:
|
|
1985
|
+
"""
|
|
1986
|
+
Annotates the given scene by pixelating regions based on the provided
|
|
1987
|
+
detections.
|
|
1988
|
+
|
|
1989
|
+
Args:
|
|
1990
|
+
scene (ImageType): The image where pixelating will be applied.
|
|
1991
|
+
`ImageType` is a flexible type, accepting either `numpy.ndarray`
|
|
1992
|
+
or `PIL.Image.Image`.
|
|
1993
|
+
detections (Detections): Object detections to annotate.
|
|
1994
|
+
|
|
1995
|
+
Returns:
|
|
1996
|
+
The annotated image, matching the type of `scene` (`numpy.ndarray`
|
|
1997
|
+
or `PIL.Image.Image`)
|
|
1998
|
+
|
|
1999
|
+
Example:
|
|
2000
|
+
```python
|
|
2001
|
+
import eye as sv
|
|
2002
|
+
|
|
2003
|
+
image = ...
|
|
2004
|
+
detections = sv.Detections(...)
|
|
2005
|
+
|
|
2006
|
+
pixelate_annotator = sv.PixelateAnnotator()
|
|
2007
|
+
annotated_frame = pixelate_annotator.annotate(
|
|
2008
|
+
scene=image.copy(),
|
|
2009
|
+
detections=detections
|
|
2010
|
+
)
|
|
2011
|
+
```
|
|
2012
|
+
|
|
2013
|
+

|
|
2015
|
+
"""
|
|
2016
|
+
assert isinstance(scene, np.ndarray)
|
|
2017
|
+
image_height, image_width = scene.shape[:2]
|
|
2018
|
+
clipped_xyxy = clip_boxes(
|
|
2019
|
+
xyxy=detections.xyxy, resolution_wh=(image_width, image_height)
|
|
2020
|
+
).astype(int)
|
|
2021
|
+
|
|
2022
|
+
for x1, y1, x2, y2 in clipped_xyxy:
|
|
2023
|
+
roi = scene[y1:y2, x1:x2]
|
|
2024
|
+
scaled_up_roi = cv2.resize(
|
|
2025
|
+
src=roi, dsize=None, fx=1 / self.pixel_size, fy=1 / self.pixel_size
|
|
2026
|
+
)
|
|
2027
|
+
scaled_down_roi = cv2.resize(
|
|
2028
|
+
src=scaled_up_roi,
|
|
2029
|
+
dsize=(roi.shape[1], roi.shape[0]),
|
|
2030
|
+
interpolation=cv2.INTER_NEAREST,
|
|
2031
|
+
)
|
|
2032
|
+
|
|
2033
|
+
scene[y1:y2, x1:x2] = scaled_down_roi
|
|
2034
|
+
|
|
2035
|
+
return scene
|
|
2036
|
+
|
|
2037
|
+
|
|
2038
|
+
class TriangleAnnotator(BaseAnnotator):
|
|
2039
|
+
"""
|
|
2040
|
+
A class for drawing triangle markers on an image at specific coordinates based on
|
|
2041
|
+
provided detections.
|
|
2042
|
+
"""
|
|
2043
|
+
|
|
2044
|
+
def __init__(
|
|
2045
|
+
self,
|
|
2046
|
+
color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
|
|
2047
|
+
base: int = 10,
|
|
2048
|
+
height: int = 10,
|
|
2049
|
+
position: Position = Position.TOP_CENTER,
|
|
2050
|
+
color_lookup: ColorLookup = ColorLookup.CLASS,
|
|
2051
|
+
outline_thickness: int = 0,
|
|
2052
|
+
outline_color: Union[Color, ColorPalette] = Color.BLACK,
|
|
2053
|
+
):
|
|
2054
|
+
"""
|
|
2055
|
+
Args:
|
|
2056
|
+
color (Union[Color, ColorPalette]): The color or color palette to use for
|
|
2057
|
+
annotating detections.
|
|
2058
|
+
base (int): The base width of the triangle.
|
|
2059
|
+
height (int): The height of the triangle.
|
|
2060
|
+
position (Position): The anchor position for placing the triangle.
|
|
2061
|
+
color_lookup (ColorLookup): Strategy for mapping colors to annotations.
|
|
2062
|
+
Options are `INDEX`, `CLASS`, `TRACK`.
|
|
2063
|
+
outline_thickness (int): Thickness of the outline of the triangle.
|
|
2064
|
+
outline_color (Union[Color, ColorPalette]): The color or color palette to
|
|
2065
|
+
use for outline. It is activated by setting outline_thickness to a value
|
|
2066
|
+
greater than 0.
|
|
2067
|
+
"""
|
|
2068
|
+
self.color: Union[Color, ColorPalette] = color
|
|
2069
|
+
self.base: int = base
|
|
2070
|
+
self.height: int = height
|
|
2071
|
+
self.position: Position = position
|
|
2072
|
+
self.color_lookup: ColorLookup = color_lookup
|
|
2073
|
+
self.outline_thickness: int = outline_thickness
|
|
2074
|
+
self.outline_color: Union[Color, ColorPalette] = outline_color
|
|
2075
|
+
|
|
2076
|
+
@ensure_cv2_image_for_annotation
|
|
2077
|
+
def annotate(
|
|
2078
|
+
self,
|
|
2079
|
+
scene: ImageType,
|
|
2080
|
+
detections: Detections,
|
|
2081
|
+
custom_color_lookup: Optional[np.ndarray] = None,
|
|
2082
|
+
) -> ImageType:
|
|
2083
|
+
"""
|
|
2084
|
+
Annotates the given scene with triangles based on the provided detections.
|
|
2085
|
+
|
|
2086
|
+
Args:
|
|
2087
|
+
scene (ImageType): The image where triangles will be drawn.
|
|
2088
|
+
`ImageType` is a flexible type, accepting either `numpy.ndarray`
|
|
2089
|
+
or `PIL.Image.Image`.
|
|
2090
|
+
detections (Detections): Object detections to annotate.
|
|
2091
|
+
custom_color_lookup (Optional[np.ndarray]): Custom color lookup array.
|
|
2092
|
+
Allows to override the default color mapping strategy.
|
|
2093
|
+
|
|
2094
|
+
Returns:
|
|
2095
|
+
The annotated image, matching the type of `scene` (`numpy.ndarray`
|
|
2096
|
+
or `PIL.Image.Image`)
|
|
2097
|
+
|
|
2098
|
+
Example:
|
|
2099
|
+
```python
|
|
2100
|
+
import eye as sv
|
|
2101
|
+
|
|
2102
|
+
image = ...
|
|
2103
|
+
detections = sv.Detections(...)
|
|
2104
|
+
|
|
2105
|
+
triangle_annotator = sv.TriangleAnnotator()
|
|
2106
|
+
annotated_frame = triangle_annotator.annotate(
|
|
2107
|
+
scene=image.copy(),
|
|
2108
|
+
detections=detections
|
|
2109
|
+
)
|
|
2110
|
+
```
|
|
2111
|
+
|
|
2112
|
+

|
|
2114
|
+
"""
|
|
2115
|
+
assert isinstance(scene, np.ndarray)
|
|
2116
|
+
xy = detections.get_anchors_coordinates(anchor=self.position)
|
|
2117
|
+
for detection_idx in range(len(detections)):
|
|
2118
|
+
color = resolve_color(
|
|
2119
|
+
color=self.color,
|
|
2120
|
+
detections=detections,
|
|
2121
|
+
detection_idx=detection_idx,
|
|
2122
|
+
color_lookup=self.color_lookup
|
|
2123
|
+
if custom_color_lookup is None
|
|
2124
|
+
else custom_color_lookup,
|
|
2125
|
+
)
|
|
2126
|
+
tip_x, tip_y = int(xy[detection_idx, 0]), int(xy[detection_idx, 1])
|
|
2127
|
+
vertices = np.array(
|
|
2128
|
+
[
|
|
2129
|
+
[tip_x - self.base // 2, tip_y - self.height],
|
|
2130
|
+
[tip_x + self.base // 2, tip_y - self.height],
|
|
2131
|
+
[tip_x, tip_y],
|
|
2132
|
+
],
|
|
2133
|
+
np.int32,
|
|
2134
|
+
)
|
|
2135
|
+
|
|
2136
|
+
cv2.fillPoly(scene, [vertices], color.as_bgr())
|
|
2137
|
+
if self.outline_thickness:
|
|
2138
|
+
outline_color = resolve_color(
|
|
2139
|
+
color=self.outline_color,
|
|
2140
|
+
detections=detections,
|
|
2141
|
+
detection_idx=detection_idx,
|
|
2142
|
+
color_lookup=self.color_lookup
|
|
2143
|
+
if custom_color_lookup is None
|
|
2144
|
+
else custom_color_lookup,
|
|
2145
|
+
)
|
|
2146
|
+
cv2.polylines(
|
|
2147
|
+
scene,
|
|
2148
|
+
[vertices],
|
|
2149
|
+
True,
|
|
2150
|
+
outline_color.as_bgr(),
|
|
2151
|
+
thickness=self.outline_thickness,
|
|
2152
|
+
)
|
|
2153
|
+
return scene
|
|
2154
|
+
|
|
2155
|
+
|
|
2156
|
+
class RoundBoxAnnotator(BaseAnnotator):
|
|
2157
|
+
"""
|
|
2158
|
+
A class for drawing bounding boxes with round edges on an image
|
|
2159
|
+
using provided detections.
|
|
2160
|
+
"""
|
|
2161
|
+
|
|
2162
|
+
def __init__(
|
|
2163
|
+
self,
|
|
2164
|
+
color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
|
|
2165
|
+
thickness: int = 2,
|
|
2166
|
+
color_lookup: ColorLookup = ColorLookup.CLASS,
|
|
2167
|
+
roundness: float = 0.6,
|
|
2168
|
+
):
|
|
2169
|
+
"""
|
|
2170
|
+
Args:
|
|
2171
|
+
color (Union[Color, ColorPalette]): The color or color palette to use for
|
|
2172
|
+
annotating detections.
|
|
2173
|
+
thickness (int): Thickness of the bounding box lines.
|
|
2174
|
+
color_lookup (ColorLookup): Strategy for mapping colors to annotations.
|
|
2175
|
+
Options are `INDEX`, `CLASS`, `TRACK`.
|
|
2176
|
+
roundness (float): Percent of roundness for edges of bounding box.
|
|
2177
|
+
Value must be float 0 < roundness <= 1.0
|
|
2178
|
+
By default roundness percent is calculated based on smaller side
|
|
2179
|
+
length (width or height).
|
|
2180
|
+
"""
|
|
2181
|
+
self.color: Union[Color, ColorPalette] = color
|
|
2182
|
+
self.thickness: int = thickness
|
|
2183
|
+
self.color_lookup: ColorLookup = color_lookup
|
|
2184
|
+
if not 0 < roundness <= 1.0:
|
|
2185
|
+
raise ValueError("roundness attribute must be float between (0, 1.0]")
|
|
2186
|
+
self.roundness: float = roundness
|
|
2187
|
+
|
|
2188
|
+
@ensure_cv2_image_for_annotation
|
|
2189
|
+
def annotate(
|
|
2190
|
+
self,
|
|
2191
|
+
scene: ImageType,
|
|
2192
|
+
detections: Detections,
|
|
2193
|
+
custom_color_lookup: Optional[np.ndarray] = None,
|
|
2194
|
+
) -> ImageType:
|
|
2195
|
+
"""
|
|
2196
|
+
Annotates the given scene with bounding boxes with rounded edges
|
|
2197
|
+
based on the provided detections.
|
|
2198
|
+
|
|
2199
|
+
Args:
|
|
2200
|
+
scene (ImageType): The image where rounded bounding boxes will be drawn.
|
|
2201
|
+
`ImageType` is a flexible type, accepting either `numpy.ndarray`
|
|
2202
|
+
or `PIL.Image.Image`.
|
|
2203
|
+
detections (Detections): Object detections to annotate.
|
|
2204
|
+
custom_color_lookup (Optional[np.ndarray]): Custom color lookup array.
|
|
2205
|
+
Allows to override the default color mapping strategy.
|
|
2206
|
+
|
|
2207
|
+
Returns:
|
|
2208
|
+
The annotated image, matching the type of `scene` (`numpy.ndarray`
|
|
2209
|
+
or `PIL.Image.Image`)
|
|
2210
|
+
|
|
2211
|
+
Example:
|
|
2212
|
+
```python
|
|
2213
|
+
import eye as sv
|
|
2214
|
+
|
|
2215
|
+
image = ...
|
|
2216
|
+
detections = sv.Detections(...)
|
|
2217
|
+
|
|
2218
|
+
round_box_annotator = sv.RoundBoxAnnotator()
|
|
2219
|
+
annotated_frame = round_box_annotator.annotate(
|
|
2220
|
+
scene=image.copy(),
|
|
2221
|
+
detections=detections
|
|
2222
|
+
)
|
|
2223
|
+
```
|
|
2224
|
+
|
|
2225
|
+

|
|
2227
|
+
"""
|
|
2228
|
+
assert isinstance(scene, np.ndarray)
|
|
2229
|
+
for detection_idx in range(len(detections)):
|
|
2230
|
+
x1, y1, x2, y2 = detections.xyxy[detection_idx].astype(int)
|
|
2231
|
+
color = resolve_color(
|
|
2232
|
+
color=self.color,
|
|
2233
|
+
detections=detections,
|
|
2234
|
+
detection_idx=detection_idx,
|
|
2235
|
+
color_lookup=self.color_lookup
|
|
2236
|
+
if custom_color_lookup is None
|
|
2237
|
+
else custom_color_lookup,
|
|
2238
|
+
)
|
|
2239
|
+
|
|
2240
|
+
radius = (
|
|
2241
|
+
int((x2 - x1) // 2 * self.roundness)
|
|
2242
|
+
if abs(x1 - x2) < abs(y1 - y2)
|
|
2243
|
+
else int((y2 - y1) // 2 * self.roundness)
|
|
2244
|
+
)
|
|
2245
|
+
|
|
2246
|
+
circle_coordinates = [
|
|
2247
|
+
((x1 + radius), (y1 + radius)),
|
|
2248
|
+
((x2 - radius), (y1 + radius)),
|
|
2249
|
+
((x2 - radius), (y2 - radius)),
|
|
2250
|
+
((x1 + radius), (y2 - radius)),
|
|
2251
|
+
]
|
|
2252
|
+
|
|
2253
|
+
line_coordinates = [
|
|
2254
|
+
((x1 + radius, y1), (x2 - radius, y1)),
|
|
2255
|
+
((x2, y1 + radius), (x2, y2 - radius)),
|
|
2256
|
+
((x1 + radius, y2), (x2 - radius, y2)),
|
|
2257
|
+
((x1, y1 + radius), (x1, y2 - radius)),
|
|
2258
|
+
]
|
|
2259
|
+
|
|
2260
|
+
start_angles = (180, 270, 0, 90)
|
|
2261
|
+
end_angles = (270, 360, 90, 180)
|
|
2262
|
+
|
|
2263
|
+
for center_coordinates, line, start_angle, end_angle in zip(
|
|
2264
|
+
circle_coordinates, line_coordinates, start_angles, end_angles
|
|
2265
|
+
):
|
|
2266
|
+
cv2.ellipse(
|
|
2267
|
+
img=scene,
|
|
2268
|
+
center=center_coordinates,
|
|
2269
|
+
axes=(radius, radius),
|
|
2270
|
+
angle=0,
|
|
2271
|
+
startAngle=start_angle,
|
|
2272
|
+
endAngle=end_angle,
|
|
2273
|
+
color=color.as_bgr(),
|
|
2274
|
+
thickness=self.thickness,
|
|
2275
|
+
)
|
|
2276
|
+
|
|
2277
|
+
cv2.line(
|
|
2278
|
+
img=scene,
|
|
2279
|
+
pt1=line[0],
|
|
2280
|
+
pt2=line[1],
|
|
2281
|
+
color=color.as_bgr(),
|
|
2282
|
+
thickness=self.thickness,
|
|
2283
|
+
)
|
|
2284
|
+
|
|
2285
|
+
return scene
|
|
2286
|
+
|
|
2287
|
+
|
|
2288
|
+
class PercentageBarAnnotator(BaseAnnotator):
|
|
2289
|
+
"""
|
|
2290
|
+
A class for drawing percentage bars on an image using provided detections.
|
|
2291
|
+
"""
|
|
2292
|
+
|
|
2293
|
+
def __init__(
|
|
2294
|
+
self,
|
|
2295
|
+
height: int = 16,
|
|
2296
|
+
width: int = 80,
|
|
2297
|
+
color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
|
|
2298
|
+
border_color: Color = Color.BLACK,
|
|
2299
|
+
position: Position = Position.TOP_CENTER,
|
|
2300
|
+
color_lookup: ColorLookup = ColorLookup.CLASS,
|
|
2301
|
+
border_thickness: Optional[int] = None,
|
|
2302
|
+
):
|
|
2303
|
+
"""
|
|
2304
|
+
Args:
|
|
2305
|
+
height (int): The height in pixels of the percentage bar.
|
|
2306
|
+
width (int): The width in pixels of the percentage bar.
|
|
2307
|
+
color (Union[Color, ColorPalette]): The color or color palette to use for
|
|
2308
|
+
annotating detections.
|
|
2309
|
+
border_color (Color): The color of the border lines.
|
|
2310
|
+
position (Position): The anchor position of drawing the percentage bar.
|
|
2311
|
+
color_lookup (ColorLookup): Strategy for mapping colors to annotations.
|
|
2312
|
+
Options are `INDEX`, `CLASS`, `TRACK`.
|
|
2313
|
+
border_thickness (Optional[int]): The thickness of the border lines.
|
|
2314
|
+
"""
|
|
2315
|
+
self.height: int = height
|
|
2316
|
+
self.width: int = width
|
|
2317
|
+
self.color: Union[Color, ColorPalette] = color
|
|
2318
|
+
self.border_color: Color = border_color
|
|
2319
|
+
self.position: Position = position
|
|
2320
|
+
self.color_lookup: ColorLookup = color_lookup
|
|
2321
|
+
|
|
2322
|
+
if border_thickness is None:
|
|
2323
|
+
self.border_thickness = int(0.15 * self.height)
|
|
2324
|
+
|
|
2325
|
+
@ensure_cv2_image_for_annotation
|
|
2326
|
+
def annotate(
|
|
2327
|
+
self,
|
|
2328
|
+
scene: ImageType,
|
|
2329
|
+
detections: Detections,
|
|
2330
|
+
custom_color_lookup: Optional[np.ndarray] = None,
|
|
2331
|
+
custom_values: Optional[np.ndarray] = None,
|
|
2332
|
+
) -> ImageType:
|
|
2333
|
+
"""
|
|
2334
|
+
Annotates the given scene with percentage bars based on the provided
|
|
2335
|
+
detections. The percentage bars visually represent the confidence or custom
|
|
2336
|
+
values associated with each detection.
|
|
2337
|
+
|
|
2338
|
+
Args:
|
|
2339
|
+
scene (ImageType): The image where percentage bars will be drawn.
|
|
2340
|
+
`ImageType` is a flexible type, accepting either `numpy.ndarray`
|
|
2341
|
+
or `PIL.Image.Image`.
|
|
2342
|
+
detections (Detections): Object detections to annotate.
|
|
2343
|
+
custom_color_lookup (Optional[np.ndarray]): Custom color lookup array.
|
|
2344
|
+
Allows to override the default color mapping strategy.
|
|
2345
|
+
custom_values (Optional[np.ndarray]): Custom values array to use instead
|
|
2346
|
+
of the default detection confidences. This array should have the
|
|
2347
|
+
same length as the number of detections and contain a value between
|
|
2348
|
+
0 and 1 (inclusive) for each detection, representing the percentage
|
|
2349
|
+
to be displayed.
|
|
2350
|
+
|
|
2351
|
+
Returns:
|
|
2352
|
+
The annotated image, matching the type of `scene` (`numpy.ndarray`
|
|
2353
|
+
or `PIL.Image.Image`)
|
|
2354
|
+
|
|
2355
|
+
Example:
|
|
2356
|
+
```python
|
|
2357
|
+
import eye as sv
|
|
2358
|
+
|
|
2359
|
+
image = ...
|
|
2360
|
+
detections = sv.Detections(...)
|
|
2361
|
+
|
|
2362
|
+
percentage_bar_annotator = sv.PercentageBarAnnotator()
|
|
2363
|
+
annotated_frame = percentage_bar_annotator.annotate(
|
|
2364
|
+
scene=image.copy(),
|
|
2365
|
+
detections=detections
|
|
2366
|
+
)
|
|
2367
|
+
```
|
|
2368
|
+
|
|
2369
|
+

|
|
2371
|
+
"""
|
|
2372
|
+
assert isinstance(scene, np.ndarray)
|
|
2373
|
+
self.validate_custom_values(custom_values=custom_values, detections=detections)
|
|
2374
|
+
|
|
2375
|
+
anchors = detections.get_anchors_coordinates(anchor=self.position)
|
|
2376
|
+
for detection_idx in range(len(detections)):
|
|
2377
|
+
anchor = anchors[detection_idx]
|
|
2378
|
+
border_coordinates = self.calculate_border_coordinates(
|
|
2379
|
+
anchor_xy=(int(anchor[0]), int(anchor[1])),
|
|
2380
|
+
border_wh=(self.width, self.height),
|
|
2381
|
+
position=self.position,
|
|
2382
|
+
)
|
|
2383
|
+
border_width = border_coordinates[1][0] - border_coordinates[0][0]
|
|
2384
|
+
|
|
2385
|
+
if custom_values is not None:
|
|
2386
|
+
value = custom_values[detection_idx]
|
|
2387
|
+
else:
|
|
2388
|
+
assert detections.confidence is not None # MyPy type hint
|
|
2389
|
+
value = detections.confidence[detection_idx]
|
|
2390
|
+
|
|
2391
|
+
color = resolve_color(
|
|
2392
|
+
color=self.color,
|
|
2393
|
+
detections=detections,
|
|
2394
|
+
detection_idx=detection_idx,
|
|
2395
|
+
color_lookup=self.color_lookup
|
|
2396
|
+
if custom_color_lookup is None
|
|
2397
|
+
else custom_color_lookup,
|
|
2398
|
+
)
|
|
2399
|
+
cv2.rectangle(
|
|
2400
|
+
img=scene,
|
|
2401
|
+
pt1=border_coordinates[0],
|
|
2402
|
+
pt2=(
|
|
2403
|
+
border_coordinates[0][0] + int(border_width * value),
|
|
2404
|
+
border_coordinates[1][1],
|
|
2405
|
+
),
|
|
2406
|
+
color=color.as_bgr(),
|
|
2407
|
+
thickness=-1,
|
|
2408
|
+
)
|
|
2409
|
+
cv2.rectangle(
|
|
2410
|
+
img=scene,
|
|
2411
|
+
pt1=border_coordinates[0],
|
|
2412
|
+
pt2=border_coordinates[1],
|
|
2413
|
+
color=self.border_color.as_bgr(),
|
|
2414
|
+
thickness=self.border_thickness,
|
|
2415
|
+
)
|
|
2416
|
+
return scene
|
|
2417
|
+
|
|
2418
|
+
@staticmethod
|
|
2419
|
+
def calculate_border_coordinates(
|
|
2420
|
+
anchor_xy: Tuple[int, int], border_wh: Tuple[int, int], position: Position
|
|
2421
|
+
) -> Tuple[Tuple[int, int], Tuple[int, int]]:
|
|
2422
|
+
cx, cy = anchor_xy
|
|
2423
|
+
width, height = border_wh
|
|
2424
|
+
|
|
2425
|
+
if position == Position.TOP_LEFT:
|
|
2426
|
+
return (cx - width, cy - height), (cx, cy)
|
|
2427
|
+
elif position == Position.TOP_CENTER:
|
|
2428
|
+
return (cx - width // 2, cy), (cx + width // 2, cy - height)
|
|
2429
|
+
elif position == Position.TOP_RIGHT:
|
|
2430
|
+
return (cx, cy), (cx + width, cy - height)
|
|
2431
|
+
elif position == Position.CENTER_LEFT:
|
|
2432
|
+
return (cx - width, cy - height // 2), (cx, cy + height // 2)
|
|
2433
|
+
elif position == Position.CENTER or position == Position.CENTER_OF_MASS:
|
|
2434
|
+
return (
|
|
2435
|
+
(cx - width // 2, cy - height // 2),
|
|
2436
|
+
(cx + width // 2, cy + height // 2),
|
|
2437
|
+
)
|
|
2438
|
+
elif position == Position.CENTER_RIGHT:
|
|
2439
|
+
return (cx, cy - height // 2), (cx + width, cy + height // 2)
|
|
2440
|
+
elif position == Position.BOTTOM_LEFT:
|
|
2441
|
+
return (cx - width, cy), (cx, cy + height)
|
|
2442
|
+
elif position == Position.BOTTOM_CENTER:
|
|
2443
|
+
return (cx - width // 2, cy), (cx + width // 2, cy + height)
|
|
2444
|
+
elif position == Position.BOTTOM_RIGHT:
|
|
2445
|
+
return (cx, cy), (cx + width, cy + height)
|
|
2446
|
+
|
|
2447
|
+
@staticmethod
|
|
2448
|
+
def validate_custom_values(
|
|
2449
|
+
custom_values: Optional[Union[np.ndarray, List[float]]], detections: Detections
|
|
2450
|
+
) -> None:
|
|
2451
|
+
if custom_values is None:
|
|
2452
|
+
if detections.confidence is None:
|
|
2453
|
+
raise ValueError(
|
|
2454
|
+
"The provided detections do not contain confidence values. "
|
|
2455
|
+
"Please provide `custom_values` or ensure that the detections "
|
|
2456
|
+
"contain confidence values (e.g. by using a different model)."
|
|
2457
|
+
)
|
|
2458
|
+
|
|
2459
|
+
else:
|
|
2460
|
+
if not isinstance(custom_values, (np.ndarray, list)):
|
|
2461
|
+
raise TypeError(
|
|
2462
|
+
"custom_values must be either a numpy array or a list of floats."
|
|
2463
|
+
)
|
|
2464
|
+
|
|
2465
|
+
if len(custom_values) != len(detections):
|
|
2466
|
+
raise ValueError(
|
|
2467
|
+
"The length of custom_values must match the number of detections."
|
|
2468
|
+
)
|
|
2469
|
+
|
|
2470
|
+
if not all(0 <= value <= 1 for value in custom_values):
|
|
2471
|
+
raise ValueError("All values in custom_values must be between 0 and 1.")
|
|
2472
|
+
|
|
2473
|
+
|
|
2474
|
+
class CropAnnotator(BaseAnnotator):
|
|
2475
|
+
"""
|
|
2476
|
+
A class for drawing scaled up crops of detections on the scene.
|
|
2477
|
+
"""
|
|
2478
|
+
|
|
2479
|
+
def __init__(
|
|
2480
|
+
self,
|
|
2481
|
+
position: Position = Position.TOP_CENTER,
|
|
2482
|
+
scale_factor: float = 2.0,
|
|
2483
|
+
border_color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
|
|
2484
|
+
border_thickness: int = 2,
|
|
2485
|
+
border_color_lookup: ColorLookup = ColorLookup.CLASS,
|
|
2486
|
+
):
|
|
2487
|
+
"""
|
|
2488
|
+
Args:
|
|
2489
|
+
position (Position): The anchor position for placing the cropped and scaled
|
|
2490
|
+
part of the detection in the scene.
|
|
2491
|
+
scale_factor (float): The factor by which to scale the cropped image part. A
|
|
2492
|
+
factor of 2, for example, would double the size of the cropped area,
|
|
2493
|
+
allowing for a closer view of the detection.
|
|
2494
|
+
border_color (Union[Color, ColorPalette]): The color or color palette to
|
|
2495
|
+
use for annotating border around the cropped area.
|
|
2496
|
+
border_thickness (int): The thickness of the border around the cropped area.
|
|
2497
|
+
border_color_lookup (ColorLookup): Strategy for mapping colors to
|
|
2498
|
+
annotations. Options are `INDEX`, `CLASS`, `TRACK`.
|
|
2499
|
+
"""
|
|
2500
|
+
self.position: Position = position
|
|
2501
|
+
self.scale_factor: float = scale_factor
|
|
2502
|
+
self.border_color: Union[Color, ColorPalette] = border_color
|
|
2503
|
+
self.border_thickness: int = border_thickness
|
|
2504
|
+
self.border_color_lookup: ColorLookup = border_color_lookup
|
|
2505
|
+
|
|
2506
|
+
@ensure_cv2_image_for_annotation
|
|
2507
|
+
def annotate(
|
|
2508
|
+
self,
|
|
2509
|
+
scene: ImageType,
|
|
2510
|
+
detections: Detections,
|
|
2511
|
+
custom_color_lookup: Optional[np.ndarray] = None,
|
|
2512
|
+
) -> ImageType:
|
|
2513
|
+
"""
|
|
2514
|
+
Annotates the provided scene with scaled and cropped parts of the image based
|
|
2515
|
+
on the provided detections. Each detection is cropped from the original scene
|
|
2516
|
+
and scaled according to the annotator's scale factor before being placed back
|
|
2517
|
+
onto the scene at the specified position.
|
|
2518
|
+
|
|
2519
|
+
|
|
2520
|
+
Args:
|
|
2521
|
+
scene (ImageType): The image where cropped detection will be placed.
|
|
2522
|
+
`ImageType` is a flexible type, accepting either `numpy.ndarray`
|
|
2523
|
+
or `PIL.Image.Image`.
|
|
2524
|
+
detections (Detections): Object detections to annotate.
|
|
2525
|
+
custom_color_lookup (Optional[np.ndarray]): Custom color lookup array.
|
|
2526
|
+
Allows to override the default color mapping strategy.
|
|
2527
|
+
|
|
2528
|
+
Returns:
|
|
2529
|
+
The annotated image.
|
|
2530
|
+
|
|
2531
|
+
Example:
|
|
2532
|
+
```python
|
|
2533
|
+
import eye as sv
|
|
2534
|
+
|
|
2535
|
+
image = ...
|
|
2536
|
+
detections = sv.Detections(...)
|
|
2537
|
+
|
|
2538
|
+
crop_annotator = sv.CropAnnotator()
|
|
2539
|
+
annotated_frame = crop_annotator.annotate(
|
|
2540
|
+
scene=image.copy(),
|
|
2541
|
+
detections=detections
|
|
2542
|
+
)
|
|
2543
|
+
```
|
|
2544
|
+
"""
|
|
2545
|
+
assert isinstance(scene, np.ndarray)
|
|
2546
|
+
crops = [
|
|
2547
|
+
crop_image(image=scene, xyxy=xyxy) for xyxy in detections.xyxy.astype(int)
|
|
2548
|
+
]
|
|
2549
|
+
resized_crops = [
|
|
2550
|
+
scale_image(image=crop, scale_factor=self.scale_factor) for crop in crops
|
|
2551
|
+
]
|
|
2552
|
+
anchors = detections.get_anchors_coordinates(anchor=self.position).astype(int)
|
|
2553
|
+
|
|
2554
|
+
for idx, (resized_crop, anchor) in enumerate(zip(resized_crops, anchors)):
|
|
2555
|
+
crop_wh = resized_crop.shape[1], resized_crop.shape[0]
|
|
2556
|
+
(x1, y1), (x2, y2) = self.calculate_crop_coordinates(
|
|
2557
|
+
anchor=anchor, crop_wh=crop_wh, position=self.position
|
|
2558
|
+
)
|
|
2559
|
+
scene = overlay_image(image=scene, overlay=resized_crop, anchor=(x1, y1))
|
|
2560
|
+
color = resolve_color(
|
|
2561
|
+
color=self.border_color,
|
|
2562
|
+
detections=detections,
|
|
2563
|
+
detection_idx=idx,
|
|
2564
|
+
color_lookup=self.border_color_lookup
|
|
2565
|
+
if custom_color_lookup is None
|
|
2566
|
+
else custom_color_lookup,
|
|
2567
|
+
)
|
|
2568
|
+
cv2.rectangle(
|
|
2569
|
+
img=scene,
|
|
2570
|
+
pt1=(x1, y1),
|
|
2571
|
+
pt2=(x2, y2),
|
|
2572
|
+
color=color.as_bgr(),
|
|
2573
|
+
thickness=self.border_thickness,
|
|
2574
|
+
)
|
|
2575
|
+
|
|
2576
|
+
return scene
|
|
2577
|
+
|
|
2578
|
+
@staticmethod
|
|
2579
|
+
def calculate_crop_coordinates(
|
|
2580
|
+
anchor: Tuple[int, int], crop_wh: Tuple[int, int], position: Position
|
|
2581
|
+
) -> Tuple[Tuple[int, int], Tuple[int, int]]:
|
|
2582
|
+
anchor_x, anchor_y = anchor
|
|
2583
|
+
width, height = crop_wh
|
|
2584
|
+
|
|
2585
|
+
if position == Position.TOP_LEFT:
|
|
2586
|
+
return (anchor_x - width, anchor_y - height), (anchor_x, anchor_y)
|
|
2587
|
+
elif position == Position.TOP_CENTER:
|
|
2588
|
+
return (
|
|
2589
|
+
(anchor_x - width // 2, anchor_y - height),
|
|
2590
|
+
(anchor_x + width // 2, anchor_y),
|
|
2591
|
+
)
|
|
2592
|
+
elif position == Position.TOP_RIGHT:
|
|
2593
|
+
return (anchor_x, anchor_y - height), (anchor_x + width, anchor_y)
|
|
2594
|
+
elif position == Position.CENTER_LEFT:
|
|
2595
|
+
return (
|
|
2596
|
+
(anchor_x - width, anchor_y - height // 2),
|
|
2597
|
+
(anchor_x, anchor_y + height // 2),
|
|
2598
|
+
)
|
|
2599
|
+
elif position == Position.CENTER or position == Position.CENTER_OF_MASS:
|
|
2600
|
+
return (
|
|
2601
|
+
(anchor_x - width // 2, anchor_y - height // 2),
|
|
2602
|
+
(anchor_x + width // 2, anchor_y + height // 2),
|
|
2603
|
+
)
|
|
2604
|
+
elif position == Position.CENTER_RIGHT:
|
|
2605
|
+
return (
|
|
2606
|
+
(anchor_x, anchor_y - height // 2),
|
|
2607
|
+
(anchor_x + width, anchor_y + height // 2),
|
|
2608
|
+
)
|
|
2609
|
+
elif position == Position.BOTTOM_LEFT:
|
|
2610
|
+
return (anchor_x - width, anchor_y), (anchor_x, anchor_y + height)
|
|
2611
|
+
elif position == Position.BOTTOM_CENTER:
|
|
2612
|
+
return (
|
|
2613
|
+
(anchor_x - width // 2, anchor_y),
|
|
2614
|
+
(anchor_x + width // 2, anchor_y + height),
|
|
2615
|
+
)
|
|
2616
|
+
elif position == Position.BOTTOM_RIGHT:
|
|
2617
|
+
return (anchor_x, anchor_y), (anchor_x + width, anchor_y + height)
|
|
2618
|
+
|
|
2619
|
+
|
|
2620
|
+
class BackgroundOverlayAnnotator(BaseAnnotator):
|
|
2621
|
+
"""
|
|
2622
|
+
A class for drawing a colored overlay on the background of an image outside
|
|
2623
|
+
the region of detections.
|
|
2624
|
+
|
|
2625
|
+
If masks are provided, the background is colored outside the masks.
|
|
2626
|
+
If masks are not provided, the background is colored outside the bounding boxes.
|
|
2627
|
+
|
|
2628
|
+
You can use the `force_box` parameter to force the annotator to use bounding boxes.
|
|
2629
|
+
|
|
2630
|
+
!!! warning
|
|
2631
|
+
|
|
2632
|
+
This annotator uses `sv.Detections.mask`.
|
|
2633
|
+
"""
|
|
2634
|
+
|
|
2635
|
+
def __init__(
|
|
2636
|
+
self,
|
|
2637
|
+
color: Color = Color.BLACK,
|
|
2638
|
+
opacity: float = 0.5,
|
|
2639
|
+
force_box: bool = False,
|
|
2640
|
+
):
|
|
2641
|
+
"""
|
|
2642
|
+
Args:
|
|
2643
|
+
color (Color): The color to use for annotating detections.
|
|
2644
|
+
opacity (float): Opacity of the overlay mask. Must be between `0` and `1`.
|
|
2645
|
+
force_box (bool): If `True`, forces the annotator to use bounding boxes when
|
|
2646
|
+
masks are provided in the supplied sv.Detections.
|
|
2647
|
+
"""
|
|
2648
|
+
self.color: Color = color
|
|
2649
|
+
self.opacity = opacity
|
|
2650
|
+
self.force_box = force_box
|
|
2651
|
+
|
|
2652
|
+
@ensure_cv2_image_for_annotation
|
|
2653
|
+
def annotate(self, scene: ImageType, detections: Detections) -> ImageType:
|
|
2654
|
+
"""
|
|
2655
|
+
Applies a colored overlay to the scene outside of the detected regions.
|
|
2656
|
+
|
|
2657
|
+
Args:
|
|
2658
|
+
scene (ImageType): The image where masks will be drawn.
|
|
2659
|
+
`ImageType` is a flexible type, accepting either `numpy.ndarray`
|
|
2660
|
+
or `PIL.Image.Image`.
|
|
2661
|
+
detections (Detections): Object detections to annotate.
|
|
2662
|
+
|
|
2663
|
+
Returns:
|
|
2664
|
+
The annotated image, matching the type of `scene` (`numpy.ndarray`
|
|
2665
|
+
or `PIL.Image.Image`)
|
|
2666
|
+
|
|
2667
|
+
Example:
|
|
2668
|
+
```python
|
|
2669
|
+
import eye as sv
|
|
2670
|
+
|
|
2671
|
+
image = ...
|
|
2672
|
+
detections = sv.Detections(...)
|
|
2673
|
+
|
|
2674
|
+
background_overlay_annotator = sv.BackgroundOverlayAnnotator()
|
|
2675
|
+
annotated_frame = background_overlay_annotator.annotate(
|
|
2676
|
+
scene=image.copy(),
|
|
2677
|
+
detections=detections
|
|
2678
|
+
)
|
|
2679
|
+
```
|
|
2680
|
+
|
|
2681
|
+

|
|
2683
|
+
"""
|
|
2684
|
+
assert isinstance(scene, np.ndarray)
|
|
2685
|
+
colored_mask = np.full_like(scene, self.color.as_bgr(), dtype=np.uint8)
|
|
2686
|
+
|
|
2687
|
+
cv2.addWeighted(
|
|
2688
|
+
scene, 1 - self.opacity, colored_mask, self.opacity, 0, dst=colored_mask
|
|
2689
|
+
)
|
|
2690
|
+
|
|
2691
|
+
if detections.mask is None or self.force_box:
|
|
2692
|
+
for x1, y1, x2, y2 in detections.xyxy.astype(int):
|
|
2693
|
+
colored_mask[y1:y2, x1:x2] = scene[y1:y2, x1:x2]
|
|
2694
|
+
else:
|
|
2695
|
+
for mask in detections.mask:
|
|
2696
|
+
colored_mask[mask] = scene[mask]
|
|
2697
|
+
|
|
2698
|
+
np.copyto(scene, colored_mask)
|
|
2699
|
+
return scene
|