eye-cv 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. eye/__init__.py +115 -0
  2. eye/__init___supervision_original.py +120 -0
  3. eye/annotators/__init__.py +0 -0
  4. eye/annotators/base.py +22 -0
  5. eye/annotators/core.py +2699 -0
  6. eye/annotators/line.py +107 -0
  7. eye/annotators/modern.py +529 -0
  8. eye/annotators/trace.py +142 -0
  9. eye/annotators/utils.py +177 -0
  10. eye/assets/__init__.py +2 -0
  11. eye/assets/downloader.py +95 -0
  12. eye/assets/list.py +83 -0
  13. eye/classification/__init__.py +0 -0
  14. eye/classification/core.py +188 -0
  15. eye/config.py +2 -0
  16. eye/core/__init__.py +0 -0
  17. eye/core/trackers/__init__.py +1 -0
  18. eye/core/trackers/botsort_tracker.py +336 -0
  19. eye/core/trackers/bytetrack_tracker.py +284 -0
  20. eye/core/trackers/sort_tracker.py +200 -0
  21. eye/core/tracking.py +146 -0
  22. eye/dataset/__init__.py +0 -0
  23. eye/dataset/core.py +919 -0
  24. eye/dataset/formats/__init__.py +0 -0
  25. eye/dataset/formats/coco.py +258 -0
  26. eye/dataset/formats/pascal_voc.py +279 -0
  27. eye/dataset/formats/yolo.py +272 -0
  28. eye/dataset/utils.py +259 -0
  29. eye/detection/__init__.py +0 -0
  30. eye/detection/auto_convert.py +155 -0
  31. eye/detection/core.py +1529 -0
  32. eye/detection/detections_enhanced.py +392 -0
  33. eye/detection/line_zone.py +859 -0
  34. eye/detection/lmm.py +184 -0
  35. eye/detection/overlap_filter.py +270 -0
  36. eye/detection/tools/__init__.py +0 -0
  37. eye/detection/tools/csv_sink.py +181 -0
  38. eye/detection/tools/inference_slicer.py +288 -0
  39. eye/detection/tools/json_sink.py +142 -0
  40. eye/detection/tools/polygon_zone.py +202 -0
  41. eye/detection/tools/smoother.py +123 -0
  42. eye/detection/tools/smoothing.py +179 -0
  43. eye/detection/tools/smoothing_config.py +202 -0
  44. eye/detection/tools/transformers.py +247 -0
  45. eye/detection/utils.py +1175 -0
  46. eye/draw/__init__.py +0 -0
  47. eye/draw/color.py +154 -0
  48. eye/draw/utils.py +374 -0
  49. eye/filters.py +112 -0
  50. eye/geometry/__init__.py +0 -0
  51. eye/geometry/core.py +128 -0
  52. eye/geometry/utils.py +47 -0
  53. eye/keypoint/__init__.py +0 -0
  54. eye/keypoint/annotators.py +442 -0
  55. eye/keypoint/core.py +687 -0
  56. eye/keypoint/skeletons.py +2647 -0
  57. eye/metrics/__init__.py +21 -0
  58. eye/metrics/core.py +72 -0
  59. eye/metrics/detection.py +843 -0
  60. eye/metrics/f1_score.py +648 -0
  61. eye/metrics/mean_average_precision.py +628 -0
  62. eye/metrics/mean_average_recall.py +697 -0
  63. eye/metrics/precision.py +653 -0
  64. eye/metrics/recall.py +652 -0
  65. eye/metrics/utils/__init__.py +0 -0
  66. eye/metrics/utils/object_size.py +158 -0
  67. eye/metrics/utils/utils.py +9 -0
  68. eye/py.typed +0 -0
  69. eye/quick.py +104 -0
  70. eye/tracker/__init__.py +0 -0
  71. eye/tracker/byte_tracker/__init__.py +0 -0
  72. eye/tracker/byte_tracker/core.py +386 -0
  73. eye/tracker/byte_tracker/kalman_filter.py +205 -0
  74. eye/tracker/byte_tracker/matching.py +69 -0
  75. eye/tracker/byte_tracker/single_object_track.py +178 -0
  76. eye/tracker/byte_tracker/utils.py +18 -0
  77. eye/utils/__init__.py +0 -0
  78. eye/utils/conversion.py +132 -0
  79. eye/utils/file.py +159 -0
  80. eye/utils/image.py +794 -0
  81. eye/utils/internal.py +200 -0
  82. eye/utils/iterables.py +84 -0
  83. eye/utils/notebook.py +114 -0
  84. eye/utils/video.py +307 -0
  85. eye/utils_eye/__init__.py +1 -0
  86. eye/utils_eye/geometry.py +71 -0
  87. eye/utils_eye/nms.py +55 -0
  88. eye/validators/__init__.py +140 -0
  89. eye/web.py +271 -0
  90. eye_cv-1.0.0.dist-info/METADATA +319 -0
  91. eye_cv-1.0.0.dist-info/RECORD +94 -0
  92. eye_cv-1.0.0.dist-info/WHEEL +5 -0
  93. eye_cv-1.0.0.dist-info/licenses/LICENSE +21 -0
  94. eye_cv-1.0.0.dist-info/top_level.txt +1 -0
eye/geometry/core.py ADDED
@@ -0,0 +1,128 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+ from enum import Enum
5
+ from math import sqrt
6
+ from typing import Tuple
7
+
8
+
9
+ class Position(Enum):
10
+ """
11
+ Enum representing the position of an anchor point.
12
+ """
13
+
14
+ CENTER = "CENTER"
15
+ CENTER_LEFT = "CENTER_LEFT"
16
+ CENTER_RIGHT = "CENTER_RIGHT"
17
+ TOP_CENTER = "TOP_CENTER"
18
+ TOP_LEFT = "TOP_LEFT"
19
+ TOP_RIGHT = "TOP_RIGHT"
20
+ BOTTOM_LEFT = "BOTTOM_LEFT"
21
+ BOTTOM_CENTER = "BOTTOM_CENTER"
22
+ BOTTOM_RIGHT = "BOTTOM_RIGHT"
23
+ CENTER_OF_MASS = "CENTER_OF_MASS"
24
+
25
+ @classmethod
26
+ def list(cls):
27
+ return list(map(lambda c: c.value, cls))
28
+
29
+
30
+ @dataclass
31
+ class Point:
32
+ x: float
33
+ y: float
34
+
35
+ def as_xy_int_tuple(self) -> Tuple[int, int]:
36
+ return int(self.x), int(self.y)
37
+
38
+ def as_xy_float_tuple(self) -> Tuple[float, float]:
39
+ return self.x, self.y
40
+
41
+
42
+ @dataclass
43
+ class Vector:
44
+ start: Point
45
+ end: Point
46
+
47
+ @property
48
+ def magnitude(self) -> float:
49
+ """
50
+ Calculate the magnitude (length) of the vector.
51
+
52
+ Returns:
53
+ float: The magnitude of the vector.
54
+ """
55
+ dx = self.end.x - self.start.x
56
+ dy = self.end.y - self.start.y
57
+ return sqrt(dx**2 + dy**2)
58
+
59
+ @property
60
+ def center(self) -> Point:
61
+ """
62
+ Calculate the center point of the vector.
63
+
64
+ Returns:
65
+ Point: The center point of the vector.
66
+ """
67
+ return Point(
68
+ x=(self.start.x + self.end.x) / 2,
69
+ y=(self.start.y + self.end.y) / 2,
70
+ )
71
+
72
+ def cross_product(self, point: Point) -> float:
73
+ """
74
+ Calculate the 2D cross product (also known as the vector product or outer
75
+ product) of the vector and a point, treated as vectors in 2D space.
76
+
77
+ Args:
78
+ point (Point): The point to be evaluated, treated as the endpoint of a
79
+ vector originating from the 'start' of the main vector.
80
+
81
+ Returns:
82
+ float: The scalar value of the cross product. It is positive if 'point'
83
+ lies to the left of the vector (when moving from 'start' to 'end'),
84
+ negative if it lies to the right, and 0 if it is collinear with the
85
+ vector.
86
+ """
87
+ dx_vector = self.end.x - self.start.x
88
+ dy_vector = self.end.y - self.start.y
89
+ dx_point = point.x - self.start.x
90
+ dy_point = point.y - self.start.y
91
+ return (dx_vector * dy_point) - (dy_vector * dx_point)
92
+
93
+
94
+ @dataclass
95
+ class Rect:
96
+ x: float
97
+ y: float
98
+ width: float
99
+ height: float
100
+
101
+ @classmethod
102
+ def from_xyxy(cls, xyxy: Tuple[float, float, float, float]) -> Rect:
103
+ x1, y1, x2, y2 = xyxy
104
+ return cls(x=x1, y=y1, width=x2 - x1, height=y2 - y1)
105
+
106
+ @property
107
+ def top_left(self) -> Point:
108
+ return Point(x=self.x, y=self.y)
109
+
110
+ @property
111
+ def bottom_right(self) -> Point:
112
+ return Point(x=self.x + self.width, y=self.y + self.height)
113
+
114
+ def pad(self, padding) -> Rect:
115
+ return Rect(
116
+ x=self.x - padding,
117
+ y=self.y - padding,
118
+ width=self.width + 2 * padding,
119
+ height=self.height + 2 * padding,
120
+ )
121
+
122
+ def as_xyxy_int_tuple(self) -> Tuple[int, int, int, int]:
123
+ return (
124
+ int(self.x),
125
+ int(self.y),
126
+ int(self.x + self.width),
127
+ int(self.y + self.height),
128
+ )
eye/geometry/utils.py ADDED
@@ -0,0 +1,47 @@
1
+ import numpy as np
2
+
3
+ from eye.geometry.core import Point
4
+
5
+
6
+ def get_polygon_center(polygon: np.ndarray) -> Point:
7
+ """
8
+ Calculate the center of a polygon. The center is calculated as the center
9
+ of the solid figure formed by the points of the polygon
10
+
11
+ Parameters:
12
+ polygon (np.ndarray): A 2-dimensional numpy ndarray representing the
13
+ vertices of the polygon.
14
+
15
+ Returns:
16
+ Point: The center of the polygon, represented as a
17
+ Point object with x and y attributes.
18
+
19
+ Raises:
20
+ ValueError: If the polygon has no vertices.
21
+
22
+ Examples:
23
+ ```python
24
+ import numpy as np
25
+ import eye as sv
26
+
27
+ polygon = np.array([[0, 0], [0, 2], [2, 2], [2, 0]])
28
+ sv.get_polygon_center(polygon=polygon)
29
+ # Point(x=1, y=1)
30
+ ```
31
+ """
32
+
33
+ # This is one of the 3 candidate algorithms considered for centroid calculation.
34
+ # For a more detailed discussion, see PR #1084 and commit eb33176
35
+
36
+ if len(polygon) == 0:
37
+ raise ValueError("Polygon must have at least one vertex.")
38
+
39
+ shift_polygon = np.roll(polygon, -1, axis=0)
40
+ signed_areas = np.cross(polygon, shift_polygon) / 2
41
+ if signed_areas.sum() == 0:
42
+ center = np.mean(polygon, axis=0).round()
43
+ return Point(x=center[0], y=center[1])
44
+ centroids = (polygon + shift_polygon) / 3.0
45
+ center = np.average(centroids, axis=0, weights=signed_areas).round()
46
+
47
+ return Point(x=center[0], y=center[1])
File without changes
@@ -0,0 +1,442 @@
1
+ from abc import ABC, abstractmethod
2
+ from logging import warn
3
+ from typing import List, Optional, Tuple, Union
4
+
5
+ import cv2
6
+ import numpy as np
7
+
8
+ from eye.annotators.base import ImageType
9
+ from eye.detection.utils import pad_boxes, spread_out_boxes
10
+ from eye.draw.color import Color
11
+ from eye.draw.utils import draw_rounded_rectangle
12
+ from eye.geometry.core import Rect
13
+ from eye.keypoint.core import KeyPoints
14
+ from eye.keypoint.skeletons import SKELETONS_BY_VERTEX_COUNT
15
+ from eye.utils.conversion import ensure_cv2_image_for_annotation
16
+
17
+
18
+ class BaseKeyPointAnnotator(ABC):
19
+ @abstractmethod
20
+ def annotate(self, scene: ImageType, key_points: KeyPoints) -> ImageType:
21
+ pass
22
+
23
+
24
+ class VertexAnnotator(BaseKeyPointAnnotator):
25
+ """
26
+ A class that specializes in drawing skeleton vertices on images. It uses
27
+ specified key points to determine the locations where the vertices should be
28
+ drawn.
29
+ """
30
+
31
+ def __init__(
32
+ self,
33
+ color: Color = Color.EYE_ORANGE,
34
+ radius: int = 4,
35
+ ) -> None:
36
+ """
37
+ Args:
38
+ color (Color): The color to use for annotating key points.
39
+ radius (int): The radius of the circles used to represent the key
40
+ points.
41
+ """
42
+ self.color = color
43
+ self.radius = radius
44
+
45
+ @ensure_cv2_image_for_annotation
46
+ def annotate(self, scene: ImageType, key_points: KeyPoints) -> ImageType:
47
+ """
48
+ Annotates the given scene with skeleton vertices based on the provided key
49
+ points. It draws circles at each key point location.
50
+
51
+ Args:
52
+ scene (ImageType): The image where skeleton vertices will be drawn.
53
+ `ImageType` is a flexible type, accepting either `numpy.ndarray` or
54
+ `PIL.Image.Image`.
55
+ key_points (KeyPoints): A collection of key points where each key point
56
+ consists of x and y coordinates.
57
+
58
+ Returns:
59
+ The annotated image, matching the type of `scene` (`numpy.ndarray`
60
+ or `PIL.Image.Image`)
61
+
62
+ Example:
63
+ ```python
64
+ import eye as sv
65
+
66
+ image = ...
67
+ key_points = sv.KeyPoints(...)
68
+
69
+ vertex_annotator = sv.VertexAnnotator(
70
+ color=sv.Color.GREEN,
71
+ radius=10
72
+ )
73
+ annotated_frame = vertex_annotator.annotate(
74
+ scene=image.copy(),
75
+ key_points=key_points
76
+ )
77
+ ```
78
+
79
+ ![vertex-annotator-example](https://media.roboflow.com/
80
+ eye-annotator-examples/vertex-annotator-example.png)
81
+ """
82
+ assert isinstance(scene, np.ndarray)
83
+ if len(key_points) == 0:
84
+ return scene
85
+
86
+ for xy in key_points.xy:
87
+ for x, y in xy:
88
+ cv2.circle(
89
+ img=scene,
90
+ center=(int(x), int(y)),
91
+ radius=self.radius,
92
+ color=self.color.as_bgr(),
93
+ thickness=-1,
94
+ )
95
+
96
+ return scene
97
+
98
+
99
+ class EdgeAnnotator(BaseKeyPointAnnotator):
100
+ """
101
+ A class that specializes in drawing skeleton edges on images using specified key
102
+ points. It connects key points with lines to form the skeleton structure.
103
+ """
104
+
105
+ def __init__(
106
+ self,
107
+ color: Color = Color.ROBOFLOW,
108
+ thickness: int = 2,
109
+ edges: Optional[List[Tuple[int, int]]] = None,
110
+ ) -> None:
111
+ """
112
+ Args:
113
+ color (Color): The color to use for the edges.
114
+ thickness (int): The thickness of the edges.
115
+ edges (Optional[List[Tuple[int, int]]]): The edges to draw.
116
+ If set to `None`, will attempt to select automatically.
117
+ """
118
+ self.color = color
119
+ self.thickness = thickness
120
+ self.edges = edges
121
+
122
+ @ensure_cv2_image_for_annotation
123
+ def annotate(self, scene: ImageType, key_points: KeyPoints) -> ImageType:
124
+ """
125
+ Annotates the given scene by drawing lines between specified key points to form
126
+ edges.
127
+
128
+ Args:
129
+ scene (ImageType): The image where skeleton edges will be drawn. `ImageType`
130
+ is a flexible type, accepting either `numpy.ndarray` or
131
+ `PIL.Image.Image`.
132
+ key_points (KeyPoints): A collection of key points where each key point
133
+ consists of x and y coordinates.
134
+
135
+ Returns:
136
+ Returns:
137
+ The annotated image, matching the type of `scene` (`numpy.ndarray`
138
+ or `PIL.Image.Image`)
139
+
140
+ Example:
141
+ ```python
142
+ import eye as sv
143
+
144
+ image = ...
145
+ key_points = sv.KeyPoints(...)
146
+
147
+ edge_annotator = sv.EdgeAnnotator(
148
+ color=sv.Color.GREEN,
149
+ thickness=5
150
+ )
151
+ annotated_frame = edge_annotator.annotate(
152
+ scene=image.copy(),
153
+ key_points=key_points
154
+ )
155
+ ```
156
+
157
+ ![edge-annotator-example](https://media.roboflow.com/
158
+ eye-annotator-examples/edge-annotator-example.png)
159
+ """
160
+ assert isinstance(scene, np.ndarray)
161
+ if len(key_points) == 0:
162
+ return scene
163
+
164
+ for xy in key_points.xy:
165
+ edges = self.edges
166
+ if not edges:
167
+ edges = SKELETONS_BY_VERTEX_COUNT.get(len(xy))
168
+ if not edges:
169
+ warn(f"No skeleton found with {len(xy)} vertices")
170
+ return scene
171
+
172
+ for class_a, class_b in edges:
173
+ xy_a = xy[class_a - 1]
174
+ xy_b = xy[class_b - 1]
175
+ missing_a = np.allclose(xy_a, 0)
176
+ missing_b = np.allclose(xy_b, 0)
177
+ if missing_a or missing_b:
178
+ continue
179
+
180
+ cv2.line(
181
+ img=scene,
182
+ pt1=(int(xy_a[0]), int(xy_a[1])),
183
+ pt2=(int(xy_b[0]), int(xy_b[1])),
184
+ color=self.color.as_bgr(),
185
+ thickness=self.thickness,
186
+ )
187
+
188
+ return scene
189
+
190
+
191
+ class VertexLabelAnnotator:
192
+ """
193
+ A class that draws labels of skeleton vertices on images. It uses specified key
194
+ points to determine the locations where the vertices should be drawn.
195
+ """
196
+
197
+ def __init__(
198
+ self,
199
+ color: Union[Color, List[Color]] = Color.EYE_ORANGE,
200
+ text_color: Union[Color, List[Color]] = Color.WHITE,
201
+ text_scale: float = 0.5,
202
+ text_thickness: int = 1,
203
+ text_padding: int = 10,
204
+ border_radius: int = 0,
205
+ smart_position: bool = False,
206
+ ):
207
+ """
208
+ Args:
209
+ color (Union[Color, List[Color]]): The color to use for each
210
+ keypoint label. If a list is provided, the colors will be used in order
211
+ for each keypoint.
212
+ text_color (Union[Color, List[Color]]): The color to use
213
+ for the labels. If a list is provided, the colors will be used in order
214
+ for each keypoint.
215
+ text_scale (float): The scale of the text.
216
+ text_thickness (int): The thickness of the text.
217
+ text_padding (int): The padding around the text.
218
+ border_radius (int): The radius of the rounded corners of the
219
+ boxes. Set to a high value to produce circles.
220
+ smart_position (bool): Spread out the labels to avoid overlap.
221
+ """
222
+ self.border_radius: int = border_radius
223
+ self.color: Union[Color, List[Color]] = color
224
+ self.text_color: Union[Color, List[Color]] = text_color
225
+ self.text_scale: float = text_scale
226
+ self.text_thickness: int = text_thickness
227
+ self.text_padding: int = text_padding
228
+ self.smart_position = smart_position
229
+
230
+ def annotate(
231
+ self,
232
+ scene: ImageType,
233
+ key_points: KeyPoints,
234
+ labels: Optional[List[str]] = None,
235
+ ) -> ImageType:
236
+ """
237
+ A class that draws labels of skeleton vertices on images. It uses specified key
238
+ points to determine the locations where the vertices should be drawn.
239
+
240
+ Args:
241
+ scene (ImageType): The image where vertex labels will be drawn. `ImageType`
242
+ is a flexible type, accepting either `numpy.ndarray` or
243
+ `PIL.Image.Image`.
244
+ key_points (KeyPoints): A collection of key points where each key point
245
+ consists of x and y coordinates.
246
+ labels (Optional[List[str]]): A list of labels to be displayed on the
247
+ annotated image. If not provided, keypoint indices will be used.
248
+
249
+ Returns:
250
+ The annotated image, matching the type of `scene` (`numpy.ndarray`
251
+ or `PIL.Image.Image`)
252
+
253
+ Example:
254
+ ```python
255
+ import eye as sv
256
+
257
+ image = ...
258
+ key_points = sv.KeyPoints(...)
259
+
260
+ vertex_label_annotator = sv.VertexLabelAnnotator(
261
+ color=sv.Color.GREEN,
262
+ text_color=sv.Color.BLACK,
263
+ border_radius=5
264
+ )
265
+ annotated_frame = vertex_label_annotator.annotate(
266
+ scene=image.copy(),
267
+ key_points=key_points
268
+ )
269
+ ```
270
+
271
+ ![vertex-label-annotator-example](https://media.roboflow.com/
272
+ eye-annotator-examples/vertex-label-annotator-example.png)
273
+
274
+ !!! tip
275
+
276
+ `VertexLabelAnnotator` allows to customize the color of each keypoint label
277
+ values.
278
+
279
+ Example:
280
+ ```python
281
+ import eye as sv
282
+
283
+ image = ...
284
+ key_points = sv.KeyPoints(...)
285
+
286
+ LABELS = [
287
+ "nose", "left eye", "right eye", "left ear",
288
+ "right ear", "left shoulder", "right shoulder", "left elbow",
289
+ "right elbow", "left wrist", "right wrist", "left hip",
290
+ "right hip", "left knee", "right knee", "left ankle",
291
+ "right ankle"
292
+ ]
293
+
294
+ COLORS = [
295
+ "#FF6347", "#FF6347", "#FF6347", "#FF6347",
296
+ "#FF6347", "#FF1493", "#00FF00", "#FF1493",
297
+ "#00FF00", "#FF1493", "#00FF00", "#FFD700",
298
+ "#00BFFF", "#FFD700", "#00BFFF", "#FFD700",
299
+ "#00BFFF"
300
+ ]
301
+ COLORS = [sv.Color.from_hex(color_hex=c) for c in COLORS]
302
+
303
+ vertex_label_annotator = sv.VertexLabelAnnotator(
304
+ color=COLORS,
305
+ text_color=sv.Color.BLACK,
306
+ border_radius=5
307
+ )
308
+ annotated_frame = vertex_label_annotator.annotate(
309
+ scene=image.copy(),
310
+ key_points=key_points,
311
+ labels=labels
312
+ )
313
+ ```
314
+ ![vertex-label-annotator-custom-example](https://media.roboflow.com/
315
+ eye-annotator-examples/vertex-label-annotator-custom-example.png)
316
+ """
317
+ assert isinstance(scene, np.ndarray)
318
+ font = cv2.FONT_HERSHEY_SIMPLEX
319
+
320
+ skeletons_count, points_count, _ = key_points.xy.shape
321
+ if skeletons_count == 0:
322
+ return scene
323
+
324
+ anchors = key_points.xy.reshape(points_count * skeletons_count, 2).astype(int)
325
+ mask = np.all(anchors != 0, axis=1)
326
+
327
+ if not np.any(mask):
328
+ return scene
329
+
330
+ colors = self.preprocess_and_validate_colors(
331
+ colors=self.color,
332
+ points_count=points_count,
333
+ skeletons_count=skeletons_count,
334
+ )
335
+
336
+ text_colors = self.preprocess_and_validate_colors(
337
+ colors=self.text_color,
338
+ points_count=points_count,
339
+ skeletons_count=skeletons_count,
340
+ )
341
+
342
+ labels = self.preprocess_and_validate_labels(
343
+ labels=labels, points_count=points_count, skeletons_count=skeletons_count
344
+ )
345
+
346
+ anchors = anchors[mask]
347
+ colors = colors[mask]
348
+ text_colors = text_colors[mask]
349
+ labels = labels[mask]
350
+
351
+ xyxy = np.array(
352
+ [
353
+ self.get_text_bounding_box(
354
+ text=label,
355
+ font=font,
356
+ text_scale=self.text_scale,
357
+ text_thickness=self.text_thickness,
358
+ center_coordinates=tuple(anchor),
359
+ )
360
+ for anchor, label in zip(anchors, labels)
361
+ ]
362
+ )
363
+ xyxy_padded = pad_boxes(xyxy=xyxy, px=self.text_padding)
364
+
365
+ if self.smart_position:
366
+ xyxy_padded = spread_out_boxes(xyxy_padded)
367
+ xyxy = pad_boxes(xyxy=xyxy_padded, px=-self.text_padding)
368
+
369
+ for text, color, text_color, box, box_padded in zip(
370
+ labels, colors, text_colors, xyxy, xyxy_padded
371
+ ):
372
+ draw_rounded_rectangle(
373
+ scene=scene,
374
+ rect=Rect.from_xyxy(box_padded),
375
+ color=color,
376
+ border_radius=self.border_radius,
377
+ )
378
+ cv2.putText(
379
+ img=scene,
380
+ text=text,
381
+ org=(box[0], box[3]),
382
+ fontFace=font,
383
+ fontScale=self.text_scale,
384
+ color=text_color.as_bgr(),
385
+ thickness=self.text_thickness,
386
+ lineType=cv2.LINE_AA,
387
+ )
388
+
389
+ return scene
390
+
391
+ @staticmethod
392
+ def get_text_bounding_box(
393
+ text: str,
394
+ font: int,
395
+ text_scale: float,
396
+ text_thickness: int,
397
+ center_coordinates: Tuple[int, int],
398
+ ) -> Tuple[int, int, int, int]:
399
+ text_w, text_h = cv2.getTextSize(
400
+ text=text,
401
+ fontFace=font,
402
+ fontScale=text_scale,
403
+ thickness=text_thickness,
404
+ )[0]
405
+ center_x, center_y = center_coordinates
406
+ return (
407
+ center_x - text_w // 2,
408
+ center_y - text_h // 2,
409
+ center_x + text_w // 2,
410
+ center_y + text_h // 2,
411
+ )
412
+
413
+ @staticmethod
414
+ def preprocess_and_validate_labels(
415
+ labels: Optional[List[str]], points_count: int, skeletons_count: int
416
+ ) -> np.ndarray:
417
+ if labels and len(labels) != points_count:
418
+ raise ValueError(
419
+ f"Number of labels ({len(labels)}) must match number of key points "
420
+ f"({points_count})."
421
+ )
422
+ if labels is None:
423
+ labels = [str(i) for i in range(points_count)]
424
+
425
+ return np.array(labels * skeletons_count)
426
+
427
+ @staticmethod
428
+ def preprocess_and_validate_colors(
429
+ colors: Optional[Union[Color, List[Color]]],
430
+ points_count: int,
431
+ skeletons_count: int,
432
+ ) -> np.ndarray:
433
+ if isinstance(colors, list) and len(colors) != points_count:
434
+ raise ValueError(
435
+ f"Number of colors ({len(colors)}) must match number of key points "
436
+ f"({points_count})."
437
+ )
438
+ return (
439
+ np.array(colors * skeletons_count)
440
+ if isinstance(colors, list)
441
+ else np.array([colors] * points_count * skeletons_count)
442
+ )