eye-cv 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. eye/__init__.py +115 -0
  2. eye/__init___supervision_original.py +120 -0
  3. eye/annotators/__init__.py +0 -0
  4. eye/annotators/base.py +22 -0
  5. eye/annotators/core.py +2699 -0
  6. eye/annotators/line.py +107 -0
  7. eye/annotators/modern.py +529 -0
  8. eye/annotators/trace.py +142 -0
  9. eye/annotators/utils.py +177 -0
  10. eye/assets/__init__.py +2 -0
  11. eye/assets/downloader.py +95 -0
  12. eye/assets/list.py +83 -0
  13. eye/classification/__init__.py +0 -0
  14. eye/classification/core.py +188 -0
  15. eye/config.py +2 -0
  16. eye/core/__init__.py +0 -0
  17. eye/core/trackers/__init__.py +1 -0
  18. eye/core/trackers/botsort_tracker.py +336 -0
  19. eye/core/trackers/bytetrack_tracker.py +284 -0
  20. eye/core/trackers/sort_tracker.py +200 -0
  21. eye/core/tracking.py +146 -0
  22. eye/dataset/__init__.py +0 -0
  23. eye/dataset/core.py +919 -0
  24. eye/dataset/formats/__init__.py +0 -0
  25. eye/dataset/formats/coco.py +258 -0
  26. eye/dataset/formats/pascal_voc.py +279 -0
  27. eye/dataset/formats/yolo.py +272 -0
  28. eye/dataset/utils.py +259 -0
  29. eye/detection/__init__.py +0 -0
  30. eye/detection/auto_convert.py +155 -0
  31. eye/detection/core.py +1529 -0
  32. eye/detection/detections_enhanced.py +392 -0
  33. eye/detection/line_zone.py +859 -0
  34. eye/detection/lmm.py +184 -0
  35. eye/detection/overlap_filter.py +270 -0
  36. eye/detection/tools/__init__.py +0 -0
  37. eye/detection/tools/csv_sink.py +181 -0
  38. eye/detection/tools/inference_slicer.py +288 -0
  39. eye/detection/tools/json_sink.py +142 -0
  40. eye/detection/tools/polygon_zone.py +202 -0
  41. eye/detection/tools/smoother.py +123 -0
  42. eye/detection/tools/smoothing.py +179 -0
  43. eye/detection/tools/smoothing_config.py +202 -0
  44. eye/detection/tools/transformers.py +247 -0
  45. eye/detection/utils.py +1175 -0
  46. eye/draw/__init__.py +0 -0
  47. eye/draw/color.py +154 -0
  48. eye/draw/utils.py +374 -0
  49. eye/filters.py +112 -0
  50. eye/geometry/__init__.py +0 -0
  51. eye/geometry/core.py +128 -0
  52. eye/geometry/utils.py +47 -0
  53. eye/keypoint/__init__.py +0 -0
  54. eye/keypoint/annotators.py +442 -0
  55. eye/keypoint/core.py +687 -0
  56. eye/keypoint/skeletons.py +2647 -0
  57. eye/metrics/__init__.py +21 -0
  58. eye/metrics/core.py +72 -0
  59. eye/metrics/detection.py +843 -0
  60. eye/metrics/f1_score.py +648 -0
  61. eye/metrics/mean_average_precision.py +628 -0
  62. eye/metrics/mean_average_recall.py +697 -0
  63. eye/metrics/precision.py +653 -0
  64. eye/metrics/recall.py +652 -0
  65. eye/metrics/utils/__init__.py +0 -0
  66. eye/metrics/utils/object_size.py +158 -0
  67. eye/metrics/utils/utils.py +9 -0
  68. eye/py.typed +0 -0
  69. eye/quick.py +104 -0
  70. eye/tracker/__init__.py +0 -0
  71. eye/tracker/byte_tracker/__init__.py +0 -0
  72. eye/tracker/byte_tracker/core.py +386 -0
  73. eye/tracker/byte_tracker/kalman_filter.py +205 -0
  74. eye/tracker/byte_tracker/matching.py +69 -0
  75. eye/tracker/byte_tracker/single_object_track.py +178 -0
  76. eye/tracker/byte_tracker/utils.py +18 -0
  77. eye/utils/__init__.py +0 -0
  78. eye/utils/conversion.py +132 -0
  79. eye/utils/file.py +159 -0
  80. eye/utils/image.py +794 -0
  81. eye/utils/internal.py +200 -0
  82. eye/utils/iterables.py +84 -0
  83. eye/utils/notebook.py +114 -0
  84. eye/utils/video.py +307 -0
  85. eye/utils_eye/__init__.py +1 -0
  86. eye/utils_eye/geometry.py +71 -0
  87. eye/utils_eye/nms.py +55 -0
  88. eye/validators/__init__.py +140 -0
  89. eye/web.py +271 -0
  90. eye_cv-1.0.0.dist-info/METADATA +319 -0
  91. eye_cv-1.0.0.dist-info/RECORD +94 -0
  92. eye_cv-1.0.0.dist-info/WHEEL +5 -0
  93. eye_cv-1.0.0.dist-info/licenses/LICENSE +21 -0
  94. eye_cv-1.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,288 @@
1
+ import warnings
2
+ from concurrent.futures import ThreadPoolExecutor, as_completed
3
+ from typing import Callable, Optional, Tuple, Union
4
+
5
+ import numpy as np
6
+
7
+ from eye.config import ORIENTED_BOX_COORDINATES
8
+ from eye.detection.core import Detections
9
+ from eye.detection.overlap_filter import OverlapFilter
10
+ from eye.detection.utils import move_boxes, move_masks, move_oriented_boxes
11
+ from eye.utils.image import crop_image
12
+ from eye.utils.internal import (
13
+ EyeWarnings,
14
+ warn_deprecated,
15
+ )
16
+
17
+
18
+ def move_detections(
19
+ detections: Detections,
20
+ offset: np.ndarray,
21
+ resolution_wh: Optional[Tuple[int, int]] = None,
22
+ ) -> Detections:
23
+ """
24
+ Args:
25
+ detections (sv.Detections): Detections object to be moved.
26
+ offset (np.ndarray): An array of shape `(2,)` containing offset values in format
27
+ is `[dx, dy]`.
28
+ resolution_wh (Tuple[int, int]): The width and height of the desired mask
29
+ resolution. Required for segmentation detections.
30
+
31
+ Returns:
32
+ (sv.Detections) repositioned Detections object.
33
+ """
34
+ detections.xyxy = move_boxes(xyxy=detections.xyxy, offset=offset)
35
+ if ORIENTED_BOX_COORDINATES in detections.data:
36
+ detections.data[ORIENTED_BOX_COORDINATES] = move_oriented_boxes(
37
+ xyxyxyxy=detections.data[ORIENTED_BOX_COORDINATES], offset=offset
38
+ )
39
+ if detections.mask is not None:
40
+ if resolution_wh is None:
41
+ raise ValueError(
42
+ "Resolution width and height are required for moving segmentation "
43
+ "detections. This should be the same as (width, height) of image shape."
44
+ )
45
+ detections.mask = move_masks(
46
+ masks=detections.mask, offset=offset, resolution_wh=resolution_wh
47
+ )
48
+ return detections
49
+
50
+
51
+ class InferenceSlicer:
52
+ """
53
+ InferenceSlicer performs slicing-based inference for small target detection. This
54
+ method, often referred to as
55
+ [Slicing Adaptive Inference (SAHI)](https://ieeexplore.ieee.org/document/9897990),
56
+ involves dividing a larger image into smaller slices, performing inference on each
57
+ slice, and then merging the detections.
58
+
59
+ Args:
60
+ slice_wh (Tuple[int, int]): Dimensions of each slice measured in pixels. The
61
+ tuple should be in the format `(width, height)`.
62
+ overlap_ratio_wh (Optional[Tuple[float, float]]): [⚠️ Deprecated: please set
63
+ to `None` and use `overlap_wh`] A tuple representing the
64
+ desired overlap ratio for width and height between consecutive slices.
65
+ Each value should be in the range [0, 1), where 0 means no overlap and
66
+ a value close to 1 means high overlap.
67
+ overlap_wh (Optional[Tuple[int, int]]): A tuple representing the desired
68
+ overlap for width and height between consecutive slices measured in pixels.
69
+ Each value should be greater than or equal to 0. Takes precedence over
70
+ `overlap_ratio_wh`.
71
+ overlap_filter (Union[OverlapFilter, str]): Strategy for
72
+ filtering or merging overlapping detections in slices.
73
+ iou_threshold (float): Intersection over Union (IoU) threshold
74
+ used when filtering by overlap.
75
+ callback (Callable): A function that performs inference on a given image
76
+ slice and returns detections.
77
+ thread_workers (int): Number of threads for parallel execution.
78
+
79
+ Note:
80
+ The class ensures that slices do not exceed the boundaries of the original
81
+ image. As a result, the final slices in the row and column dimensions might be
82
+ smaller than the specified slice dimensions if the image's width or height is
83
+ not a multiple of the slice's width or height minus the overlap.
84
+ """
85
+
86
+ def __init__(
87
+ self,
88
+ callback: Callable[[np.ndarray], Detections],
89
+ slice_wh: Tuple[int, int] = (320, 320),
90
+ overlap_ratio_wh: Optional[Tuple[float, float]] = (0.2, 0.2),
91
+ overlap_wh: Optional[Tuple[int, int]] = None,
92
+ overlap_filter: Union[OverlapFilter, str] = OverlapFilter.NON_MAX_SUPPRESSION,
93
+ iou_threshold: float = 0.5,
94
+ thread_workers: int = 1,
95
+ ):
96
+ if overlap_ratio_wh is not None:
97
+ warn_deprecated(
98
+ "`overlap_ratio_wh` in `InferenceSlicer.__init__` is deprecated and "
99
+ "will be removed in `eye-0.27.0`. Please manually set it to "
100
+ "`None` and use `overlap_wh` instead."
101
+ )
102
+
103
+ self._validate_overlap(overlap_ratio_wh, overlap_wh)
104
+ self.overlap_ratio_wh = overlap_ratio_wh
105
+ self.overlap_wh = overlap_wh
106
+
107
+ self.slice_wh = slice_wh
108
+ self.iou_threshold = iou_threshold
109
+ self.overlap_filter = OverlapFilter.from_value(overlap_filter)
110
+ self.callback = callback
111
+ self.thread_workers = thread_workers
112
+
113
+ def __call__(self, image: np.ndarray) -> Detections:
114
+ """
115
+ Performs slicing-based inference on the provided image using the specified
116
+ callback.
117
+
118
+ Args:
119
+ image (np.ndarray): The input image on which inference needs to be
120
+ performed. The image should be in the format
121
+ `(height, width, channels)`.
122
+
123
+ Returns:
124
+ Detections: A collection of detections for the entire image after merging
125
+ results from all slices and applying NMS.
126
+
127
+ Example:
128
+ ```python
129
+ import cv2
130
+ import eye as sv
131
+ from ultralytics import YOLO
132
+
133
+ image = cv2.imread(SOURCE_IMAGE_PATH)
134
+ model = YOLO(...)
135
+
136
+ def callback(image_slice: np.ndarray) -> sv.Detections:
137
+ result = model(image_slice)[0]
138
+ return sv.Detections.from_ultralytics(result)
139
+
140
+ slicer = sv.InferenceSlicer(
141
+ callback=callback,
142
+ overlap_filter_strategy=sv.OverlapFilter.NON_MAX_SUPPRESSION,
143
+ )
144
+
145
+ detections = slicer(image)
146
+ ```
147
+ """
148
+ detections_list = []
149
+ resolution_wh = (image.shape[1], image.shape[0])
150
+ offsets = self._generate_offset(
151
+ resolution_wh=resolution_wh,
152
+ slice_wh=self.slice_wh,
153
+ overlap_ratio_wh=self.overlap_ratio_wh,
154
+ overlap_wh=self.overlap_wh,
155
+ )
156
+
157
+ with ThreadPoolExecutor(max_workers=self.thread_workers) as executor:
158
+ futures = [
159
+ executor.submit(self._run_callback, image, offset) for offset in offsets
160
+ ]
161
+ for future in as_completed(futures):
162
+ detections_list.append(future.result())
163
+
164
+ merged = Detections.merge(detections_list=detections_list)
165
+ if self.overlap_filter == OverlapFilter.NONE:
166
+ return merged
167
+ elif self.overlap_filter == OverlapFilter.NON_MAX_SUPPRESSION:
168
+ return merged.with_nms(threshold=self.iou_threshold)
169
+ elif self.overlap_filter == OverlapFilter.NON_MAX_MERGE:
170
+ return merged.with_nmm(threshold=self.iou_threshold)
171
+ else:
172
+ warnings.warn(
173
+ f"Invalid overlap filter strategy: {self.overlap_filter}",
174
+ category=EyeWarnings,
175
+ )
176
+ return merged
177
+
178
+ def _run_callback(self, image, offset) -> Detections:
179
+ """
180
+ Run the provided callback on a slice of an image.
181
+
182
+ Args:
183
+ image (np.ndarray): The input image on which inference needs to run
184
+ offset (np.ndarray): An array of shape `(4,)` containing coordinates
185
+ for the slice.
186
+
187
+ Returns:
188
+ Detections: A collection of detections for the slice.
189
+ """
190
+ image_slice = crop_image(image=image, xyxy=offset)
191
+ detections = self.callback(image_slice)
192
+ resolution_wh = (image.shape[1], image.shape[0])
193
+ detections = move_detections(
194
+ detections=detections, offset=offset[:2], resolution_wh=resolution_wh
195
+ )
196
+
197
+ return detections
198
+
199
+ @staticmethod
200
+ def _generate_offset(
201
+ resolution_wh: Tuple[int, int],
202
+ slice_wh: Tuple[int, int],
203
+ overlap_ratio_wh: Optional[Tuple[float, float]],
204
+ overlap_wh: Optional[Tuple[int, int]],
205
+ ) -> np.ndarray:
206
+ """
207
+ Generate offset coordinates for slicing an image based on the given resolution,
208
+ slice dimensions, and overlap ratios.
209
+
210
+ Args:
211
+ resolution_wh (Tuple[int, int]): A tuple representing the width and height
212
+ of the image to be sliced.
213
+ slice_wh (Tuple[int, int]): Dimensions of each slice measured in pixels. The
214
+ tuple should be in the format `(width, height)`.
215
+ overlap_ratio_wh (Optional[Tuple[float, float]]): A tuple representing the
216
+ desired overlap ratio for width and height between consecutive slices.
217
+ Each value should be in the range [0, 1), where 0 means no overlap and
218
+ a value close to 1 means high overlap.
219
+ overlap_wh (Optional[Tuple[int, int]]): A tuple representing the desired
220
+ overlap for width and height between consecutive slices measured in
221
+ pixels. Each value should be greater than or equal to 0.
222
+
223
+ Returns:
224
+ np.ndarray: An array of shape `(n, 4)` containing coordinates for each
225
+ slice in the format `[xmin, ymin, xmax, ymax]`.
226
+
227
+ Note:
228
+ The function ensures that slices do not exceed the boundaries of the
229
+ original image. As a result, the final slices in the row and column
230
+ dimensions might be smaller than the specified slice dimensions if the
231
+ image's width or height is not a multiple of the slice's width or
232
+ height minus the overlap.
233
+ """
234
+ slice_width, slice_height = slice_wh
235
+ image_width, image_height = resolution_wh
236
+ overlap_width = (
237
+ overlap_wh[0]
238
+ if overlap_wh is not None
239
+ else int(overlap_ratio_wh[0] * slice_width)
240
+ )
241
+ overlap_height = (
242
+ overlap_wh[1]
243
+ if overlap_wh is not None
244
+ else int(overlap_ratio_wh[1] * slice_height)
245
+ )
246
+
247
+ width_stride = slice_width - overlap_width
248
+ height_stride = slice_height - overlap_height
249
+
250
+ ws = np.arange(0, image_width, width_stride)
251
+ hs = np.arange(0, image_height, height_stride)
252
+
253
+ xmin, ymin = np.meshgrid(ws, hs)
254
+ xmax = np.clip(xmin + slice_width, 0, image_width)
255
+ ymax = np.clip(ymin + slice_height, 0, image_height)
256
+
257
+ offsets = np.stack([xmin, ymin, xmax, ymax], axis=-1).reshape(-1, 4)
258
+
259
+ return offsets
260
+
261
+ @staticmethod
262
+ def _validate_overlap(
263
+ overlap_ratio_wh: Optional[Tuple[float, float]],
264
+ overlap_wh: Optional[Tuple[int, int]],
265
+ ) -> None:
266
+ if overlap_ratio_wh is not None and overlap_wh is not None:
267
+ raise ValueError(
268
+ "Both `overlap_ratio_wh` and `overlap_wh` cannot be provided. "
269
+ "Please provide only one of them."
270
+ )
271
+ if overlap_ratio_wh is None and overlap_wh is None:
272
+ raise ValueError(
273
+ "Either `overlap_ratio_wh` or `overlap_wh` must be provided. "
274
+ "Please provide one of them."
275
+ )
276
+
277
+ if overlap_ratio_wh is not None:
278
+ if not (0 <= overlap_ratio_wh[0] < 1 and 0 <= overlap_ratio_wh[1] < 1):
279
+ raise ValueError(
280
+ "Overlap ratios must be in the range [0, 1). "
281
+ f"Received: {overlap_ratio_wh}"
282
+ )
283
+ if overlap_wh is not None:
284
+ if not (overlap_wh[0] >= 0 and overlap_wh[1] >= 0):
285
+ raise ValueError(
286
+ "Overlap values must be greater than or equal to 0. "
287
+ f"Received: {overlap_wh}"
288
+ )
@@ -0,0 +1,142 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import os
5
+ from typing import Any, Dict, List, Optional
6
+
7
+ from eye.detection.core import Detections
8
+
9
+
10
+ class JSONSink:
11
+ """
12
+ A utility class for saving detection data to a JSON file. This class is designed to
13
+ efficiently serialize detection objects into a JSON format, allowing for the
14
+ inclusion of bounding box coordinates and additional attributes like `confidence`,
15
+ `class_id`, and `tracker_id`.
16
+
17
+ !!! tip
18
+
19
+ JSONsink allow to pass custom data alongside the detection fields, providing
20
+ flexibility for logging various types of information.
21
+
22
+ Args:
23
+ file_name (str): The name of the JSON file where the detections will be stored.
24
+ Defaults to 'output.json'.
25
+
26
+ Example:
27
+ ```python
28
+ import eye as sv
29
+ from ultralytics import YOLO
30
+
31
+ model = YOLO(<SOURCE_MODEL_PATH>)
32
+ json_sink = sv.JSONSink(<RESULT_JSON_FILE_PATH>)
33
+ frames_generator = sv.get_video_frames_generator(<SOURCE_VIDEO_PATH>)
34
+
35
+ with json_sink as sink:
36
+ for frame in frames_generator:
37
+ result = model(frame)[0]
38
+ detections = sv.Detections.from_ultralytics(result)
39
+ sink.append(detections, custom_data={'<CUSTOM_LABEL>':'<CUSTOM_DATA>'})
40
+ ```
41
+ """
42
+
43
+ def __init__(self, file_name: str = "output.json") -> None:
44
+ """
45
+ Initialize the JSONSink instance.
46
+
47
+ Args:
48
+ file_name (str): The name of the JSON file.
49
+
50
+ Returns:
51
+ None
52
+ """
53
+ self.file_name = file_name
54
+ self.file: Optional[open] = None
55
+ self.data: List[Dict[str, Any]] = []
56
+
57
+ def __enter__(self) -> JSONSink:
58
+ self.open()
59
+ return self
60
+
61
+ def __exit__(
62
+ self,
63
+ exc_type: Optional[type],
64
+ exc_val: Optional[Exception],
65
+ exc_tb: Optional[Any],
66
+ ) -> None:
67
+ self.write_and_close()
68
+
69
+ def open(self) -> None:
70
+ """
71
+ Open the JSON file for writing.
72
+
73
+ Returns:
74
+ None
75
+ """
76
+ parent_directory = os.path.dirname(self.file_name)
77
+ if parent_directory and not os.path.exists(parent_directory):
78
+ os.makedirs(parent_directory)
79
+
80
+ self.file = open(self.file_name, "w")
81
+
82
+ def write_and_close(self) -> None:
83
+ """
84
+ Write and close the JSON file.
85
+
86
+ Returns:
87
+ None
88
+ """
89
+ if self.file:
90
+ json.dump(self.data, self.file, indent=4)
91
+ self.file.close()
92
+
93
+ @staticmethod
94
+ def parse_detection_data(
95
+ detections: Detections, custom_data: Optional[Dict[str, Any]] = None
96
+ ) -> List[Dict[str, Any]]:
97
+ parsed_rows = []
98
+ for i in range(len(detections.xyxy)):
99
+ row = {
100
+ "x_min": float(detections.xyxy[i][0]),
101
+ "y_min": float(detections.xyxy[i][1]),
102
+ "x_max": float(detections.xyxy[i][2]),
103
+ "y_max": float(detections.xyxy[i][3]),
104
+ "class_id": ""
105
+ if detections.class_id is None
106
+ else int(detections.class_id[i]),
107
+ "confidence": ""
108
+ if detections.confidence is None
109
+ else float(detections.confidence[i]),
110
+ "tracker_id": ""
111
+ if detections.tracker_id is None
112
+ else int(detections.tracker_id[i]),
113
+ }
114
+
115
+ if hasattr(detections, "data"):
116
+ for key, value in detections.data.items():
117
+ row[key] = (
118
+ str(value[i])
119
+ if hasattr(value, "__getitem__") and value.ndim != 0
120
+ else str(value)
121
+ )
122
+
123
+ if custom_data:
124
+ row.update(custom_data)
125
+ parsed_rows.append(row)
126
+ return parsed_rows
127
+
128
+ def append(
129
+ self, detections: Detections, custom_data: Optional[Dict[str, Any]] = None
130
+ ) -> None:
131
+ """
132
+ Append detection data to the JSON file.
133
+
134
+ Args:
135
+ detections (Detections): The detection data.
136
+ custom_data (Dict[str, Any]): Custom data to include.
137
+
138
+ Returns:
139
+ None
140
+ """
141
+ parsed_rows = JSONSink.parse_detection_data(detections, custom_data)
142
+ self.data.extend(parsed_rows)
@@ -0,0 +1,202 @@
1
+ from dataclasses import replace
2
+ from typing import Iterable, Optional
3
+
4
+ import cv2
5
+ import numpy as np
6
+ import numpy.typing as npt
7
+
8
+ from eye import Detections
9
+ from eye.detection.utils import clip_boxes, polygon_to_mask
10
+ from eye.draw.color import Color
11
+ from eye.draw.utils import draw_filled_polygon, draw_polygon, draw_text
12
+ from eye.geometry.core import Position
13
+ from eye.geometry.utils import get_polygon_center
14
+
15
+
16
+ class PolygonZone:
17
+ """
18
+ A class for defining a polygon-shaped zone within a frame for detecting objects.
19
+
20
+ !!! warning
21
+
22
+ PolygonZone uses the `tracker_id`. Read
23
+ [here](/latest/trackers/) to learn how to plug
24
+ tracking into your inference pipeline.
25
+
26
+ Attributes:
27
+ polygon (np.ndarray): A polygon represented by a numpy array of shape
28
+ `(N, 2)`, containing the `x`, `y` coordinates of the points.
29
+ triggering_anchors (Iterable[sv.Position]): A list of positions specifying
30
+ which anchors of the detections bounding box to consider when deciding on
31
+ whether the detection fits within the PolygonZone
32
+ (default: (sv.Position.BOTTOM_CENTER,)).
33
+ current_count (int): The current count of detected objects within the zone
34
+ mask (np.ndarray): The 2D bool mask for the polygon zone
35
+
36
+ Example:
37
+ ```python
38
+ import eye as sv
39
+ from ultralytics import YOLO
40
+ import numpy as np
41
+ import cv2
42
+
43
+ image = cv2.imread(<SOURCE_IMAGE_PATH>)
44
+ model = YOLO("yolo11s")
45
+ tracker = sv.ByteTrack()
46
+
47
+ polygon = np.array([[100, 200], [200, 100], [300, 200], [200, 300]])
48
+ polygon_zone = sv.PolygonZone(polygon=polygon)
49
+
50
+ result = model.infer(image)[0]
51
+ detections = sv.Detections.from_ultralytics(result)
52
+ detections = tracker.update_with_detections(detections)
53
+
54
+ is_detections_in_zone = polygon_zone.trigger(detections)
55
+ print(polygon_zone.current_count)
56
+ ```
57
+ """
58
+
59
+ def __init__(
60
+ self,
61
+ polygon: npt.NDArray[np.int64],
62
+ triggering_anchors: Iterable[Position] = (Position.BOTTOM_CENTER,),
63
+ ):
64
+ self.polygon = polygon.astype(int)
65
+ self.triggering_anchors = triggering_anchors
66
+ if not list(self.triggering_anchors):
67
+ raise ValueError("Triggering anchors cannot be empty.")
68
+
69
+ self.current_count = 0
70
+
71
+ x_max, y_max = np.max(polygon, axis=0)
72
+ self.frame_resolution_wh = (x_max + 1, y_max + 1)
73
+ self.mask = polygon_to_mask(
74
+ polygon=polygon, resolution_wh=(x_max + 2, y_max + 2)
75
+ )
76
+
77
+ def trigger(self, detections: Detections) -> npt.NDArray[np.bool_]:
78
+ """
79
+ Determines if the detections are within the polygon zone.
80
+
81
+ Parameters:
82
+ detections (Detections): The detections
83
+ to be checked against the polygon zone
84
+
85
+ Returns:
86
+ np.ndarray: A boolean numpy array indicating
87
+ if each detection is within the polygon zone
88
+ """
89
+
90
+ clipped_xyxy = clip_boxes(
91
+ xyxy=detections.xyxy, resolution_wh=self.frame_resolution_wh
92
+ )
93
+ clipped_detections = replace(detections, xyxy=clipped_xyxy)
94
+ all_clipped_anchors = np.array(
95
+ [
96
+ np.ceil(clipped_detections.get_anchors_coordinates(anchor)).astype(int)
97
+ for anchor in self.triggering_anchors
98
+ ]
99
+ )
100
+
101
+ is_in_zone: npt.NDArray[np.bool_] = (
102
+ self.mask[all_clipped_anchors[:, :, 1], all_clipped_anchors[:, :, 0]]
103
+ .transpose()
104
+ .astype(bool)
105
+ )
106
+
107
+ is_in_zone: npt.NDArray[np.bool_] = np.all(is_in_zone, axis=1)
108
+ self.current_count = int(np.sum(is_in_zone))
109
+ return is_in_zone.astype(bool)
110
+
111
+
112
+ class PolygonZoneAnnotator:
113
+ """
114
+ A class for annotating a polygon-shaped zone within a
115
+ frame with a count of detected objects.
116
+
117
+ Attributes:
118
+ zone (PolygonZone): The polygon zone to be annotated
119
+ color (Color): The color to draw the polygon lines, default is white
120
+ thickness (int): The thickness of the polygon lines, default is 2
121
+ text_color (Color): The color of the text on the polygon, default is black
122
+ text_scale (float): The scale of the text on the polygon, default is 0.5
123
+ text_thickness (int): The thickness of the text on the polygon, default is 1
124
+ text_padding (int): The padding around the text on the polygon, default is 10
125
+ font (int): The font type for the text on the polygon,
126
+ default is cv2.FONT_HERSHEY_SIMPLEX
127
+ center (Tuple[int, int]): The center of the polygon for text placement
128
+ display_in_zone_count (bool): Show the label of the zone or not. Default is True
129
+ opacity: The opacity of zone filling when drawn on the scene. Default is 0
130
+ """
131
+
132
+ def __init__(
133
+ self,
134
+ zone: PolygonZone,
135
+ color: Color = Color.WHITE,
136
+ thickness: int = 2,
137
+ text_color: Color = Color.BLACK,
138
+ text_scale: float = 0.5,
139
+ text_thickness: int = 1,
140
+ text_padding: int = 10,
141
+ display_in_zone_count: bool = True,
142
+ opacity: float = 0,
143
+ ):
144
+ self.zone = zone
145
+ self.color = color
146
+ self.thickness = thickness
147
+ self.text_color = text_color
148
+ self.text_scale = text_scale
149
+ self.text_thickness = text_thickness
150
+ self.text_padding = text_padding
151
+ self.font = cv2.FONT_HERSHEY_SIMPLEX
152
+ self.center = get_polygon_center(polygon=zone.polygon)
153
+ self.display_in_zone_count = display_in_zone_count
154
+ self.opacity = opacity
155
+
156
+ def annotate(self, scene: np.ndarray, label: Optional[str] = None) -> np.ndarray:
157
+ """
158
+ Annotates the polygon zone within a frame with a count of detected objects.
159
+
160
+ Parameters:
161
+ scene (np.ndarray): The image on which the polygon zone will be annotated
162
+ label (Optional[str]): A label for the count of detected objects
163
+ within the polygon zone (default: None)
164
+
165
+ Returns:
166
+ np.ndarray: The image with the polygon zone and count of detected objects
167
+ """
168
+ if self.opacity == 0:
169
+ annotated_frame = draw_polygon(
170
+ scene=scene,
171
+ polygon=self.zone.polygon,
172
+ color=self.color,
173
+ thickness=self.thickness,
174
+ )
175
+ else:
176
+ annotated_frame = draw_filled_polygon(
177
+ scene=scene.copy(),
178
+ polygon=self.zone.polygon,
179
+ color=self.color,
180
+ opacity=self.opacity,
181
+ )
182
+ annotated_frame = draw_polygon(
183
+ scene=annotated_frame,
184
+ polygon=self.zone.polygon,
185
+ color=self.color,
186
+ thickness=self.thickness,
187
+ )
188
+
189
+ if self.display_in_zone_count:
190
+ annotated_frame = draw_text(
191
+ scene=annotated_frame,
192
+ text=str(self.zone.current_count) if label is None else label,
193
+ text_anchor=self.center,
194
+ background_color=self.color,
195
+ text_color=self.text_color,
196
+ text_scale=self.text_scale,
197
+ text_thickness=self.text_thickness,
198
+ text_padding=self.text_padding,
199
+ text_font=self.font,
200
+ )
201
+
202
+ return annotated_frame