eye-cv 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. eye/__init__.py +115 -0
  2. eye/__init___supervision_original.py +120 -0
  3. eye/annotators/__init__.py +0 -0
  4. eye/annotators/base.py +22 -0
  5. eye/annotators/core.py +2699 -0
  6. eye/annotators/line.py +107 -0
  7. eye/annotators/modern.py +529 -0
  8. eye/annotators/trace.py +142 -0
  9. eye/annotators/utils.py +177 -0
  10. eye/assets/__init__.py +2 -0
  11. eye/assets/downloader.py +95 -0
  12. eye/assets/list.py +83 -0
  13. eye/classification/__init__.py +0 -0
  14. eye/classification/core.py +188 -0
  15. eye/config.py +2 -0
  16. eye/core/__init__.py +0 -0
  17. eye/core/trackers/__init__.py +1 -0
  18. eye/core/trackers/botsort_tracker.py +336 -0
  19. eye/core/trackers/bytetrack_tracker.py +284 -0
  20. eye/core/trackers/sort_tracker.py +200 -0
  21. eye/core/tracking.py +146 -0
  22. eye/dataset/__init__.py +0 -0
  23. eye/dataset/core.py +919 -0
  24. eye/dataset/formats/__init__.py +0 -0
  25. eye/dataset/formats/coco.py +258 -0
  26. eye/dataset/formats/pascal_voc.py +279 -0
  27. eye/dataset/formats/yolo.py +272 -0
  28. eye/dataset/utils.py +259 -0
  29. eye/detection/__init__.py +0 -0
  30. eye/detection/auto_convert.py +155 -0
  31. eye/detection/core.py +1529 -0
  32. eye/detection/detections_enhanced.py +392 -0
  33. eye/detection/line_zone.py +859 -0
  34. eye/detection/lmm.py +184 -0
  35. eye/detection/overlap_filter.py +270 -0
  36. eye/detection/tools/__init__.py +0 -0
  37. eye/detection/tools/csv_sink.py +181 -0
  38. eye/detection/tools/inference_slicer.py +288 -0
  39. eye/detection/tools/json_sink.py +142 -0
  40. eye/detection/tools/polygon_zone.py +202 -0
  41. eye/detection/tools/smoother.py +123 -0
  42. eye/detection/tools/smoothing.py +179 -0
  43. eye/detection/tools/smoothing_config.py +202 -0
  44. eye/detection/tools/transformers.py +247 -0
  45. eye/detection/utils.py +1175 -0
  46. eye/draw/__init__.py +0 -0
  47. eye/draw/color.py +154 -0
  48. eye/draw/utils.py +374 -0
  49. eye/filters.py +112 -0
  50. eye/geometry/__init__.py +0 -0
  51. eye/geometry/core.py +128 -0
  52. eye/geometry/utils.py +47 -0
  53. eye/keypoint/__init__.py +0 -0
  54. eye/keypoint/annotators.py +442 -0
  55. eye/keypoint/core.py +687 -0
  56. eye/keypoint/skeletons.py +2647 -0
  57. eye/metrics/__init__.py +21 -0
  58. eye/metrics/core.py +72 -0
  59. eye/metrics/detection.py +843 -0
  60. eye/metrics/f1_score.py +648 -0
  61. eye/metrics/mean_average_precision.py +628 -0
  62. eye/metrics/mean_average_recall.py +697 -0
  63. eye/metrics/precision.py +653 -0
  64. eye/metrics/recall.py +652 -0
  65. eye/metrics/utils/__init__.py +0 -0
  66. eye/metrics/utils/object_size.py +158 -0
  67. eye/metrics/utils/utils.py +9 -0
  68. eye/py.typed +0 -0
  69. eye/quick.py +104 -0
  70. eye/tracker/__init__.py +0 -0
  71. eye/tracker/byte_tracker/__init__.py +0 -0
  72. eye/tracker/byte_tracker/core.py +386 -0
  73. eye/tracker/byte_tracker/kalman_filter.py +205 -0
  74. eye/tracker/byte_tracker/matching.py +69 -0
  75. eye/tracker/byte_tracker/single_object_track.py +178 -0
  76. eye/tracker/byte_tracker/utils.py +18 -0
  77. eye/utils/__init__.py +0 -0
  78. eye/utils/conversion.py +132 -0
  79. eye/utils/file.py +159 -0
  80. eye/utils/image.py +794 -0
  81. eye/utils/internal.py +200 -0
  82. eye/utils/iterables.py +84 -0
  83. eye/utils/notebook.py +114 -0
  84. eye/utils/video.py +307 -0
  85. eye/utils_eye/__init__.py +1 -0
  86. eye/utils_eye/geometry.py +71 -0
  87. eye/utils_eye/nms.py +55 -0
  88. eye/validators/__init__.py +140 -0
  89. eye/web.py +271 -0
  90. eye_cv-1.0.0.dist-info/METADATA +319 -0
  91. eye_cv-1.0.0.dist-info/RECORD +94 -0
  92. eye_cv-1.0.0.dist-info/WHEEL +5 -0
  93. eye_cv-1.0.0.dist-info/licenses/LICENSE +21 -0
  94. eye_cv-1.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,247 @@
1
+ import io
2
+ from typing import Any, Dict, Optional
3
+
4
+ import numpy as np
5
+ from PIL import Image
6
+
7
+ from eye.config import CLASS_NAME_DATA_FIELD
8
+ from eye.detection.utils import mask_to_xyxy
9
+
10
+
11
+ def process_transformers_detection_result(
12
+ detection_result: dict, id2label: Optional[Dict[int, str]]
13
+ ) -> dict:
14
+ """
15
+ Process the result of Transformers object detection functions such as
16
+ `post_process` (v4) and `post_process_detection` (v5).
17
+
18
+ Args:
19
+ detection_result (dict): Dictionary containing detection results with keys
20
+ 'boxes', 'labels', and 'scores'.
21
+ id2label (Optional[Dict[int, str]]): A dictionary mapping class IDs to labels,
22
+ typically part of the `transformers` model configuration. If provided, the
23
+ resulting dictionary will include class names.
24
+
25
+ Returns:
26
+ dict: Processed detection result including bounding boxes, confidence scores,
27
+ class IDs, and data.
28
+ """
29
+ class_ids = detection_result["labels"].cpu().detach().numpy().astype(int)
30
+ data = append_class_names_to_data(class_ids, id2label, {})
31
+
32
+ return dict(
33
+ xyxy=detection_result["boxes"].cpu().detach().numpy(),
34
+ confidence=detection_result["scores"].cpu().detach().numpy(),
35
+ class_id=class_ids,
36
+ data=data,
37
+ )
38
+
39
+
40
+ def process_transformers_v4_segmentation_result(
41
+ segmentation_result: dict, id2label: Optional[Dict[int, str]]
42
+ ) -> dict:
43
+ """
44
+ Process the result of Transformers segmentation functions such as
45
+ `post_process_panoptic`, `post_process_segmentation`, and `post_process_instance`
46
+ (v4).
47
+
48
+ Args:
49
+ segmentation_result (dict): Dictionary containing segmentation results with keys
50
+ 'masks', 'labels', and 'scores'.
51
+ id2label (Optional[Dict[int, str]]): A dictionary mapping class IDs to labels,
52
+ typically part of the `transformers` model configuration. If provided, the
53
+ resulting dictionary will include class names.
54
+
55
+ Returns:
56
+ dict: Processed segmentation result including bounding boxes, masks, confidence
57
+ scores, class IDs, and data.
58
+ """
59
+ if "png_string" in segmentation_result:
60
+ return process_transformers_v4_panoptic_segmentation_result(
61
+ segmentation_result, id2label
62
+ )
63
+ else:
64
+ boxes = None
65
+ if "boxes" in segmentation_result:
66
+ boxes = segmentation_result["boxes"].cpu().detach().numpy()
67
+ masks = segmentation_result["masks"].cpu().detach().numpy().astype(bool)
68
+ class_ids = segmentation_result["labels"].cpu().detach().numpy().astype(int)
69
+
70
+ return dict(
71
+ xyxy=boxes if boxes is not None else mask_to_xyxy(masks),
72
+ mask=np.squeeze(masks, axis=1) if boxes is not None else masks,
73
+ confidence=segmentation_result["scores"].cpu().detach().numpy(),
74
+ class_id=class_ids,
75
+ data=append_class_names_to_data(class_ids, id2label, {}),
76
+ )
77
+
78
+
79
+ def process_transformers_v5_segmentation_result(
80
+ segmentation_result: dict, id2label: Optional[Dict[int, str]]
81
+ ) -> dict:
82
+ """
83
+ Process the result of Transformers segmentation functions such as
84
+ `post_process_semantic_segmentation`, `post_process_instance_segmentation`, and
85
+ `post_process_panoptic_segmentation` (v5).
86
+
87
+ Args:
88
+ segmentation_result (Union[dict, np.ndarray]): Either a dictionary containing
89
+ segmentation results or an ndarray representing a segmentation map.
90
+ id2label (Optional[Dict[int, str]]): A dictionary mapping class IDs to labels,
91
+ typically part of the `transformers` model configuration. If provided, the
92
+ resulting dictionary will include class names.
93
+
94
+ Returns:
95
+ dict: Processed segmentation result including bounding boxes, masks, confidence
96
+ scores, class IDs, and data.
97
+ """
98
+ if segmentation_result.__class__.__name__ == "Tensor":
99
+ segmentation_array = segmentation_result.cpu().detach().numpy()
100
+ return process_transformers_v5_panoptic_segmentation_result(
101
+ segmentation_array, id2label
102
+ )
103
+
104
+ return process_transformers_v5_semantic_or_instance_segmentation_result(
105
+ segmentation_result, id2label
106
+ )
107
+
108
+
109
+ def process_transformers_v5_semantic_or_instance_segmentation_result(
110
+ segmentation_result: dict, id2label: Optional[Dict[int, str]]
111
+ ) -> dict:
112
+ """
113
+ Process the result of Transformers segmentation functions such as
114
+ `post_process_semantic_segmentation` and `post_process_instance_segmentation` (v5).
115
+
116
+ Args:
117
+ segmentation_result (dict): Dictionary containing segmentation results with keys
118
+ `segments_info` and `segmentation`.
119
+ id2label (Optional[Dict[int, str]]): A dictionary mapping class IDs to labels,
120
+ typically part of the `transformers` model configuration. If provided, the
121
+ resulting dictionary will include class names.
122
+
123
+ Returns:
124
+ dict: Processed segmentation result including bounding boxes, masks, confidence
125
+ scores, class IDs, and data.
126
+ """
127
+ segments_info = segmentation_result["segments_info"]
128
+ scores = np.array([segment["score"] for segment in segments_info])
129
+ class_ids = np.array([segment["label_id"] for segment in segments_info])
130
+ segmentation_array = segmentation_result["segmentation"].cpu().detach().numpy()
131
+ masks = np.array(
132
+ [segmentation_array == segment["id"] for segment in segments_info]
133
+ ).astype(bool)
134
+ data = append_class_names_to_data(class_ids, id2label, {})
135
+
136
+ return dict(
137
+ xyxy=mask_to_xyxy(masks),
138
+ mask=masks,
139
+ confidence=scores,
140
+ class_id=class_ids,
141
+ data=data,
142
+ )
143
+
144
+
145
+ def process_transformers_v4_panoptic_segmentation_result(
146
+ segmentation_result: dict, id2label: Optional[Dict[int, str]]
147
+ ) -> dict:
148
+ """
149
+ Process the result of the Transformers function `post_process_panoptic` (v4).
150
+
151
+ Args:
152
+ segmentation_result (dict): Dictionary containing segmentation results with keys
153
+ such as 'png_string' and 'segments_info'.
154
+ id2label (Optional[Dict[int, str]]): A dictionary mapping class IDs to labels,
155
+ typically part of the `transformers` model configuration. If provided, the
156
+ resulting dictionary will include class names.
157
+
158
+ Returns:
159
+ dict: Processed segmentation result including bounding boxes, masks,
160
+ class IDs, and data.
161
+ """
162
+ segments_info = segmentation_result["segments_info"]
163
+ png_string = segmentation_result["png_string"]
164
+ class_ids = np.array([segment["category_id"] for segment in segments_info])
165
+ segmentation_array = png_string_to_segmentation_array(png_string=png_string)
166
+ masks = np.array(
167
+ [segmentation_array == segment["id"] for segment in segments_info]
168
+ ).astype(bool)
169
+ data = append_class_names_to_data(class_ids, id2label, {})
170
+
171
+ return dict(
172
+ xyxy=mask_to_xyxy(masks),
173
+ mask=masks,
174
+ class_id=class_ids,
175
+ data=data,
176
+ )
177
+
178
+
179
+ def process_transformers_v5_panoptic_segmentation_result(
180
+ segmentation_array: np.ndarray, id2label: Optional[Dict[int, str]]
181
+ ) -> dict:
182
+ """
183
+ Process the result of the Transformers function
184
+ `post_process_panoptic_segmentation` (v5).
185
+
186
+ Args:
187
+ segmentation_array (np.ndarray): Segmentation array.
188
+ id2label (Optional[Dict[int, str]]): A dictionary mapping class IDs to labels,
189
+ typically part of the `transformers` model configuration. If provided, the
190
+ resulting dictionary will include class names.
191
+
192
+ Returns:
193
+ dict: Processed segmentation result including bounding boxes, masks,
194
+ class IDs, and data.
195
+ """
196
+ class_ids = np.unique(segmentation_array)
197
+ masks = np.stack(
198
+ [segmentation_array == class_id for class_id in class_ids], axis=0
199
+ ).astype(bool)
200
+ data = append_class_names_to_data(class_ids, id2label, {})
201
+ return dict(xyxy=mask_to_xyxy(masks), mask=masks, class_id=class_ids, data=data)
202
+
203
+
204
+ def png_string_to_segmentation_array(png_string: bytes) -> np.ndarray:
205
+ """
206
+ Convert a PNG byte string to a label mask array.
207
+
208
+ Args:
209
+ png_string (bytes): A byte string representing the PNG image.
210
+
211
+ Returns:
212
+ np.ndarray: A label mask array with shape (H, W), where H and W
213
+ are the height and width of the image. Each unique value in the array
214
+ represents a different object or category.
215
+ """
216
+ image = Image.open(io.BytesIO(png_string))
217
+ mask = np.array(image, dtype=np.uint8)
218
+ return mask[:, :, 0]
219
+
220
+
221
+ def append_class_names_to_data(
222
+ class_ids: np.ndarray,
223
+ id2label: Optional[Dict[int, str]],
224
+ data: Optional[Dict[str, Any]] = None,
225
+ ) -> Dict[str, Any]:
226
+ """
227
+ Helper function to create or append to a data dictionary with class names if
228
+ available.
229
+
230
+ Args:
231
+ class_ids (np.ndarray): Array of class IDs.
232
+ id2label (Optional[Dict[int, str]]): A dictionary mapping class IDs to labels,
233
+ typically part of the `transformers` model configuration. If provided, the
234
+ resulting dictionary will include class names.
235
+ data (Optional[Dict[str, Any]]): An existing data dictionary to append to.
236
+
237
+ Returns:
238
+ Dict[str, Any]: Dictionary containing class names if id2label is provided.
239
+ """
240
+ if data is None:
241
+ data = {}
242
+
243
+ if id2label is not None:
244
+ class_names = np.array([id2label[class_id] for class_id in class_ids])
245
+ data[CLASS_NAME_DATA_FIELD] = class_names
246
+
247
+ return data