dgenerate-ultralytics-headless 8.3.222__py3-none-any.whl → 8.3.225__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (158) hide show
  1. {dgenerate_ultralytics_headless-8.3.222.dist-info → dgenerate_ultralytics_headless-8.3.225.dist-info}/METADATA +2 -2
  2. dgenerate_ultralytics_headless-8.3.225.dist-info/RECORD +286 -0
  3. tests/conftest.py +5 -8
  4. tests/test_cli.py +1 -8
  5. tests/test_python.py +1 -2
  6. ultralytics/__init__.py +1 -1
  7. ultralytics/cfg/__init__.py +34 -49
  8. ultralytics/cfg/datasets/ImageNet.yaml +1 -1
  9. ultralytics/cfg/datasets/kitti.yaml +27 -0
  10. ultralytics/cfg/datasets/lvis.yaml +5 -5
  11. ultralytics/cfg/datasets/open-images-v7.yaml +1 -1
  12. ultralytics/data/annotator.py +3 -4
  13. ultralytics/data/augment.py +244 -323
  14. ultralytics/data/base.py +12 -22
  15. ultralytics/data/build.py +47 -40
  16. ultralytics/data/converter.py +32 -42
  17. ultralytics/data/dataset.py +43 -71
  18. ultralytics/data/loaders.py +22 -34
  19. ultralytics/data/split.py +5 -6
  20. ultralytics/data/split_dota.py +8 -15
  21. ultralytics/data/utils.py +27 -36
  22. ultralytics/engine/exporter.py +49 -116
  23. ultralytics/engine/model.py +144 -180
  24. ultralytics/engine/predictor.py +18 -29
  25. ultralytics/engine/results.py +165 -231
  26. ultralytics/engine/trainer.py +11 -19
  27. ultralytics/engine/tuner.py +13 -23
  28. ultralytics/engine/validator.py +6 -10
  29. ultralytics/hub/__init__.py +7 -12
  30. ultralytics/hub/auth.py +6 -12
  31. ultralytics/hub/google/__init__.py +7 -10
  32. ultralytics/hub/session.py +15 -25
  33. ultralytics/hub/utils.py +3 -6
  34. ultralytics/models/fastsam/model.py +6 -8
  35. ultralytics/models/fastsam/predict.py +5 -10
  36. ultralytics/models/fastsam/utils.py +1 -2
  37. ultralytics/models/fastsam/val.py +2 -4
  38. ultralytics/models/nas/model.py +5 -8
  39. ultralytics/models/nas/predict.py +7 -9
  40. ultralytics/models/nas/val.py +1 -2
  41. ultralytics/models/rtdetr/model.py +5 -8
  42. ultralytics/models/rtdetr/predict.py +15 -18
  43. ultralytics/models/rtdetr/train.py +10 -13
  44. ultralytics/models/rtdetr/val.py +13 -20
  45. ultralytics/models/sam/amg.py +12 -18
  46. ultralytics/models/sam/build.py +6 -9
  47. ultralytics/models/sam/model.py +16 -23
  48. ultralytics/models/sam/modules/blocks.py +62 -84
  49. ultralytics/models/sam/modules/decoders.py +17 -24
  50. ultralytics/models/sam/modules/encoders.py +40 -56
  51. ultralytics/models/sam/modules/memory_attention.py +10 -16
  52. ultralytics/models/sam/modules/sam.py +41 -47
  53. ultralytics/models/sam/modules/tiny_encoder.py +64 -83
  54. ultralytics/models/sam/modules/transformer.py +17 -27
  55. ultralytics/models/sam/modules/utils.py +31 -42
  56. ultralytics/models/sam/predict.py +172 -209
  57. ultralytics/models/utils/loss.py +14 -26
  58. ultralytics/models/utils/ops.py +13 -17
  59. ultralytics/models/yolo/classify/predict.py +8 -11
  60. ultralytics/models/yolo/classify/train.py +8 -16
  61. ultralytics/models/yolo/classify/val.py +13 -20
  62. ultralytics/models/yolo/detect/predict.py +4 -8
  63. ultralytics/models/yolo/detect/train.py +11 -20
  64. ultralytics/models/yolo/detect/val.py +38 -48
  65. ultralytics/models/yolo/model.py +35 -47
  66. ultralytics/models/yolo/obb/predict.py +5 -8
  67. ultralytics/models/yolo/obb/train.py +11 -14
  68. ultralytics/models/yolo/obb/val.py +20 -28
  69. ultralytics/models/yolo/pose/predict.py +5 -8
  70. ultralytics/models/yolo/pose/train.py +4 -8
  71. ultralytics/models/yolo/pose/val.py +31 -39
  72. ultralytics/models/yolo/segment/predict.py +9 -14
  73. ultralytics/models/yolo/segment/train.py +3 -6
  74. ultralytics/models/yolo/segment/val.py +16 -26
  75. ultralytics/models/yolo/world/train.py +8 -14
  76. ultralytics/models/yolo/world/train_world.py +11 -16
  77. ultralytics/models/yolo/yoloe/predict.py +16 -23
  78. ultralytics/models/yolo/yoloe/train.py +30 -43
  79. ultralytics/models/yolo/yoloe/train_seg.py +5 -10
  80. ultralytics/models/yolo/yoloe/val.py +15 -20
  81. ultralytics/nn/autobackend.py +10 -18
  82. ultralytics/nn/modules/activation.py +4 -6
  83. ultralytics/nn/modules/block.py +99 -185
  84. ultralytics/nn/modules/conv.py +45 -90
  85. ultralytics/nn/modules/head.py +44 -98
  86. ultralytics/nn/modules/transformer.py +44 -76
  87. ultralytics/nn/modules/utils.py +14 -19
  88. ultralytics/nn/tasks.py +86 -146
  89. ultralytics/nn/text_model.py +25 -40
  90. ultralytics/solutions/ai_gym.py +10 -16
  91. ultralytics/solutions/analytics.py +7 -10
  92. ultralytics/solutions/config.py +4 -5
  93. ultralytics/solutions/distance_calculation.py +9 -12
  94. ultralytics/solutions/heatmap.py +7 -13
  95. ultralytics/solutions/instance_segmentation.py +5 -8
  96. ultralytics/solutions/object_blurrer.py +7 -10
  97. ultralytics/solutions/object_counter.py +8 -12
  98. ultralytics/solutions/object_cropper.py +5 -8
  99. ultralytics/solutions/parking_management.py +12 -14
  100. ultralytics/solutions/queue_management.py +4 -6
  101. ultralytics/solutions/region_counter.py +7 -10
  102. ultralytics/solutions/security_alarm.py +14 -19
  103. ultralytics/solutions/similarity_search.py +7 -12
  104. ultralytics/solutions/solutions.py +31 -53
  105. ultralytics/solutions/speed_estimation.py +6 -9
  106. ultralytics/solutions/streamlit_inference.py +2 -4
  107. ultralytics/solutions/trackzone.py +7 -10
  108. ultralytics/solutions/vision_eye.py +5 -8
  109. ultralytics/trackers/basetrack.py +2 -4
  110. ultralytics/trackers/bot_sort.py +6 -11
  111. ultralytics/trackers/byte_tracker.py +10 -15
  112. ultralytics/trackers/track.py +3 -6
  113. ultralytics/trackers/utils/gmc.py +6 -12
  114. ultralytics/trackers/utils/kalman_filter.py +35 -43
  115. ultralytics/trackers/utils/matching.py +6 -10
  116. ultralytics/utils/__init__.py +61 -100
  117. ultralytics/utils/autobatch.py +2 -4
  118. ultralytics/utils/autodevice.py +11 -13
  119. ultralytics/utils/benchmarks.py +25 -35
  120. ultralytics/utils/callbacks/base.py +8 -10
  121. ultralytics/utils/callbacks/clearml.py +2 -4
  122. ultralytics/utils/callbacks/comet.py +30 -44
  123. ultralytics/utils/callbacks/dvc.py +13 -18
  124. ultralytics/utils/callbacks/mlflow.py +4 -5
  125. ultralytics/utils/callbacks/neptune.py +4 -6
  126. ultralytics/utils/callbacks/raytune.py +3 -4
  127. ultralytics/utils/callbacks/tensorboard.py +4 -6
  128. ultralytics/utils/callbacks/wb.py +10 -13
  129. ultralytics/utils/checks.py +29 -56
  130. ultralytics/utils/cpu.py +1 -2
  131. ultralytics/utils/dist.py +8 -12
  132. ultralytics/utils/downloads.py +17 -27
  133. ultralytics/utils/errors.py +6 -8
  134. ultralytics/utils/events.py +2 -4
  135. ultralytics/utils/export/__init__.py +4 -239
  136. ultralytics/utils/export/engine.py +237 -0
  137. ultralytics/utils/export/imx.py +11 -17
  138. ultralytics/utils/export/tensorflow.py +217 -0
  139. ultralytics/utils/files.py +10 -15
  140. ultralytics/utils/git.py +5 -7
  141. ultralytics/utils/instance.py +30 -51
  142. ultralytics/utils/logger.py +11 -15
  143. ultralytics/utils/loss.py +8 -14
  144. ultralytics/utils/metrics.py +98 -138
  145. ultralytics/utils/nms.py +13 -16
  146. ultralytics/utils/ops.py +47 -74
  147. ultralytics/utils/patches.py +11 -18
  148. ultralytics/utils/plotting.py +29 -42
  149. ultralytics/utils/tal.py +25 -39
  150. ultralytics/utils/torch_utils.py +45 -73
  151. ultralytics/utils/tqdm.py +6 -8
  152. ultralytics/utils/triton.py +9 -12
  153. ultralytics/utils/tuner.py +1 -2
  154. dgenerate_ultralytics_headless-8.3.222.dist-info/RECORD +0 -283
  155. {dgenerate_ultralytics_headless-8.3.222.dist-info → dgenerate_ultralytics_headless-8.3.225.dist-info}/WHEEL +0 -0
  156. {dgenerate_ultralytics_headless-8.3.222.dist-info → dgenerate_ultralytics_headless-8.3.225.dist-info}/entry_points.txt +0 -0
  157. {dgenerate_ultralytics_headless-8.3.222.dist-info → dgenerate_ultralytics_headless-8.3.225.dist-info}/licenses/LICENSE +0 -0
  158. {dgenerate_ultralytics_headless-8.3.222.dist-info → dgenerate_ultralytics_headless-8.3.225.dist-info}/top_level.txt +0 -0
@@ -11,8 +11,7 @@ from ultralytics.utils.plotting import colors
11
11
 
12
12
 
13
13
  class RegionCounter(BaseSolution):
14
- """
15
- A class for real-time counting of objects within user-defined regions in a video stream.
14
+ """A class for real-time counting of objects within user-defined regions in a video stream.
16
15
 
17
16
  This class inherits from `BaseSolution` and provides functionality to define polygonal regions in a video frame,
18
17
  track objects, and count those objects that pass through each defined region. Useful for applications requiring
@@ -21,8 +20,8 @@ class RegionCounter(BaseSolution):
21
20
  Attributes:
22
21
  region_template (dict): Template for creating new counting regions with default attributes including name,
23
22
  polygon coordinates, and display colors.
24
- counting_regions (list): List storing all defined regions, where each entry is based on `region_template`
25
- and includes specific region settings like name, coordinates, and color.
23
+ counting_regions (list): List storing all defined regions, where each entry is based on `region_template` and
24
+ includes specific region settings like name, coordinates, and color.
26
25
  region_counts (dict): Dictionary storing the count of objects for each named region.
27
26
 
28
27
  Methods:
@@ -59,8 +58,7 @@ class RegionCounter(BaseSolution):
59
58
  region_color: tuple[int, int, int],
60
59
  text_color: tuple[int, int, int],
61
60
  ) -> dict[str, Any]:
62
- """
63
- Add a new region to the counting list based on the provided template with specific attributes.
61
+ """Add a new region to the counting list based on the provided template with specific attributes.
64
62
 
65
63
  Args:
66
64
  name (str): Name assigned to the new region.
@@ -94,15 +92,14 @@ class RegionCounter(BaseSolution):
94
92
  region["prepared_polygon"] = self.prep(region["polygon"])
95
93
 
96
94
  def process(self, im0: np.ndarray) -> SolutionResults:
97
- """
98
- Process the input frame to detect and count objects within each defined region.
95
+ """Process the input frame to detect and count objects within each defined region.
99
96
 
100
97
  Args:
101
98
  im0 (np.ndarray): Input image frame where objects and regions are annotated.
102
99
 
103
100
  Returns:
104
- (SolutionResults): Contains processed image `plot_im`, 'total_tracks' (int, total number of tracked objects),
105
- and 'region_counts' (dict, counts of objects per region).
101
+ (SolutionResults): Contains processed image `plot_im`, 'total_tracks' (int, total number of tracked
102
+ objects), and 'region_counts' (dict, counts of objects per region).
106
103
  """
107
104
  self.extract_tracks(im0)
108
105
  annotator = SolutionAnnotator(im0, line_width=self.line_width)
@@ -8,8 +8,7 @@ from ultralytics.utils.plotting import colors
8
8
 
9
9
 
10
10
  class SecurityAlarm(BaseSolution):
11
- """
12
- A class to manage security alarm functionalities for real-time monitoring.
11
+ """A class to manage security alarm functionalities for real-time monitoring.
13
12
 
14
13
  This class extends the BaseSolution class and provides features to monitor objects in a frame, send email
15
14
  notifications when specific thresholds are exceeded for total detections, and annotate the output frame for
@@ -35,8 +34,7 @@ class SecurityAlarm(BaseSolution):
35
34
  """
36
35
 
37
36
  def __init__(self, **kwargs: Any) -> None:
38
- """
39
- Initialize the SecurityAlarm class with parameters for real-time object monitoring.
37
+ """Initialize the SecurityAlarm class with parameters for real-time object monitoring.
40
38
 
41
39
  Args:
42
40
  **kwargs (Any): Additional keyword arguments passed to the parent class.
@@ -49,16 +47,15 @@ class SecurityAlarm(BaseSolution):
49
47
  self.from_email = ""
50
48
 
51
49
  def authenticate(self, from_email: str, password: str, to_email: str) -> None:
52
- """
53
- Authenticate the email server for sending alert notifications.
50
+ """Authenticate the email server for sending alert notifications.
51
+
52
+ This method initializes a secure connection with the SMTP server and logs in using the provided credentials.
54
53
 
55
54
  Args:
56
55
  from_email (str): Sender's email address.
57
56
  password (str): Password for the sender's email account.
58
57
  to_email (str): Recipient's email address.
59
58
 
60
- This method initializes a secure connection with the SMTP server and logs in using the provided credentials.
61
-
62
59
  Examples:
63
60
  >>> alarm = SecurityAlarm()
64
61
  >>> alarm.authenticate("sender@example.com", "password123", "recipient@example.com")
@@ -72,16 +69,15 @@ class SecurityAlarm(BaseSolution):
72
69
  self.from_email = from_email
73
70
 
74
71
  def send_email(self, im0, records: int = 5) -> None:
75
- """
76
- Send an email notification with an image attachment indicating the number of objects detected.
72
+ """Send an email notification with an image attachment indicating the number of objects detected.
73
+
74
+ This method encodes the input image, composes the email message with details about the detection, and sends it
75
+ to the specified recipient.
77
76
 
78
77
  Args:
79
78
  im0 (np.ndarray): The input image or frame to be attached to the email.
80
79
  records (int, optional): The number of detected objects to be included in the email message.
81
80
 
82
- This method encodes the input image, composes the email message with details about the detection, and sends it
83
- to the specified recipient.
84
-
85
81
  Examples:
86
82
  >>> alarm = SecurityAlarm()
87
83
  >>> frame = cv2.imread("path/to/image.jpg")
@@ -117,8 +113,11 @@ class SecurityAlarm(BaseSolution):
117
113
  LOGGER.error(f"Failed to send email: {e}")
118
114
 
119
115
  def process(self, im0) -> SolutionResults:
120
- """
121
- Monitor the frame, process object detections, and trigger alerts if thresholds are exceeded.
116
+ """Monitor the frame, process object detections, and trigger alerts if thresholds are exceeded.
117
+
118
+ This method processes the input frame, extracts detections, annotates the frame with bounding boxes, and sends
119
+ an email notification if the number of detected objects surpasses the specified threshold and an alert has not
120
+ already been sent.
122
121
 
123
122
  Args:
124
123
  im0 (np.ndarray): The input image or frame to be processed and annotated.
@@ -127,10 +126,6 @@ class SecurityAlarm(BaseSolution):
127
126
  (SolutionResults): Contains processed image `plot_im`, 'total_tracks' (total number of tracked objects) and
128
127
  'email_sent' (whether an email alert was triggered).
129
128
 
130
- This method processes the input frame, extracts detections, annotates the frame with bounding boxes, and sends
131
- an email notification if the number of detected objects surpasses the specified threshold and an alert has not
132
- already been sent.
133
-
134
129
  Examples:
135
130
  >>> alarm = SecurityAlarm()
136
131
  >>> frame = cv2.imread("path/to/image.jpg")
@@ -18,8 +18,7 @@ os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" # Avoid OpenMP conflict on some sys
18
18
 
19
19
 
20
20
  class VisualAISearch:
21
- """
22
- A semantic image search system that leverages OpenCLIP for generating high-quality image and text embeddings and
21
+ """A semantic image search system that leverages OpenCLIP for generating high-quality image and text embeddings and
23
22
  FAISS for fast similarity-based retrieval.
24
23
 
25
24
  This class aligns image and text embeddings in a shared semantic space, enabling users to search large collections
@@ -85,8 +84,7 @@ class VisualAISearch:
85
84
  return self.model.encode_text(self.model.tokenize([text])).detach().cpu().numpy()
86
85
 
87
86
  def load_or_build_index(self) -> None:
88
- """
89
- Load existing FAISS index or build a new one from image features.
87
+ """Load existing FAISS index or build a new one from image features.
90
88
 
91
89
  Checks if FAISS index and image paths exist on disk. If found, loads them directly. Otherwise, builds a new
92
90
  index by extracting features from all images in the data directory, normalizes the features, and saves both the
@@ -130,8 +128,7 @@ class VisualAISearch:
130
128
  LOGGER.info(f"Indexed {len(self.image_paths)} images.")
131
129
 
132
130
  def search(self, query: str, k: int = 30, similarity_thresh: float = 0.1) -> list[str]:
133
- """
134
- Return top-k semantically similar images to the given query.
131
+ """Return top-k semantically similar images to the given query.
135
132
 
136
133
  Args:
137
134
  query (str): Natural language text query to search for.
@@ -167,11 +164,10 @@ class VisualAISearch:
167
164
 
168
165
 
169
166
  class SearchApp:
170
- """
171
- A Flask-based web interface for semantic image search with natural language queries.
167
+ """A Flask-based web interface for semantic image search with natural language queries.
172
168
 
173
- This class provides a clean, responsive frontend that enables users to input natural language queries and
174
- instantly view the most relevant images retrieved from the indexed database.
169
+ This class provides a clean, responsive frontend that enables users to input natural language queries and instantly
170
+ view the most relevant images retrieved from the indexed database.
175
171
 
176
172
  Attributes:
177
173
  render_template: Flask template rendering function.
@@ -190,8 +186,7 @@ class SearchApp:
190
186
  """
191
187
 
192
188
  def __init__(self, data: str = "images", device: str | None = None) -> None:
193
- """
194
- Initialize the SearchApp with VisualAISearch backend.
189
+ """Initialize the SearchApp with VisualAISearch backend.
195
190
 
196
191
  Args:
197
192
  data (str, optional): Path to directory containing images to index and search.
@@ -18,8 +18,7 @@ from ultralytics.utils.plotting import Annotator
18
18
 
19
19
 
20
20
  class BaseSolution:
21
- """
22
- A base class for managing Ultralytics Solutions.
21
+ """A base class for managing Ultralytics Solutions.
23
22
 
24
23
  This class provides core functionality for various Ultralytics Solutions, including model loading, object tracking,
25
24
  and region initialization. It serves as the foundation for implementing specific computer vision solutions such as
@@ -73,8 +72,7 @@ class BaseSolution:
73
72
  """
74
73
 
75
74
  def __init__(self, is_cli: bool = False, **kwargs: Any) -> None:
76
- """
77
- Initialize the BaseSolution class with configuration settings and YOLO model.
75
+ """Initialize the BaseSolution class with configuration settings and YOLO model.
78
76
 
79
77
  Args:
80
78
  is_cli (bool): Enable CLI mode if set to True.
@@ -138,12 +136,11 @@ class BaseSolution:
138
136
  )
139
137
 
140
138
  def adjust_box_label(self, cls: int, conf: float, track_id: int | None = None) -> str | None:
141
- """
142
- Generate a formatted label for a bounding box.
139
+ """Generate a formatted label for a bounding box.
143
140
 
144
- This method constructs a label string for a bounding box using the class index and confidence score.
145
- Optionally includes the track ID if provided. The label format adapts based on the display settings
146
- defined in `self.show_conf` and `self.show_labels`.
141
+ This method constructs a label string for a bounding box using the class index and confidence score. Optionally
142
+ includes the track ID if provided. The label format adapts based on the display settings defined in
143
+ `self.show_conf` and `self.show_labels`.
147
144
 
148
145
  Args:
149
146
  cls (int): The class index of the detected object.
@@ -157,8 +154,7 @@ class BaseSolution:
157
154
  return (f"{name} {conf:.2f}" if self.show_conf else name) if self.show_labels else None
158
155
 
159
156
  def extract_tracks(self, im0: np.ndarray) -> None:
160
- """
161
- Apply object tracking and extract tracks from an input image or frame.
157
+ """Apply object tracking and extract tracks from an input image or frame.
162
158
 
163
159
  Args:
164
160
  im0 (np.ndarray): The input image or frame.
@@ -185,11 +181,10 @@ class BaseSolution:
185
181
  self.boxes, self.clss, self.track_ids, self.confs = [], [], [], []
186
182
 
187
183
  def store_tracking_history(self, track_id: int, box) -> None:
188
- """
189
- Store the tracking history of an object.
184
+ """Store the tracking history of an object.
190
185
 
191
- This method updates the tracking history for a given object by appending the center point of its
192
- bounding box to the track line. It maintains a maximum of 30 points in the tracking history.
186
+ This method updates the tracking history for a given object by appending the center point of its bounding box to
187
+ the track line. It maintains a maximum of 30 points in the tracking history.
193
188
 
194
189
  Args:
195
190
  track_id (int): The unique identifier for the tracked object.
@@ -214,8 +209,8 @@ class BaseSolution:
214
209
  ) # region or line
215
210
 
216
211
  def display_output(self, plot_im: np.ndarray) -> None:
217
- """
218
- Display the results of the processing, which could involve showing frames, printing counts, or saving results.
212
+ """Display the results of the processing, which could involve showing frames, printing counts, or saving
213
+ results.
219
214
 
220
215
  This method is responsible for visualizing the output of the object detection and tracking process. It displays
221
216
  the processed frame with annotations, and allows for user interaction to close the display.
@@ -264,8 +259,7 @@ class BaseSolution:
264
259
 
265
260
 
266
261
  class SolutionAnnotator(Annotator):
267
- """
268
- A specialized annotator class for visualizing and analyzing computer vision tasks.
262
+ """A specialized annotator class for visualizing and analyzing computer vision tasks.
269
263
 
270
264
  This class extends the base Annotator class, providing additional methods for drawing regions, centroids, tracking
271
265
  trails, and visual annotations for Ultralytics Solutions. It offers comprehensive visualization capabilities for
@@ -310,8 +304,7 @@ class SolutionAnnotator(Annotator):
310
304
  pil: bool = False,
311
305
  example: str = "abc",
312
306
  ):
313
- """
314
- Initialize the SolutionAnnotator class with an image for annotation.
307
+ """Initialize the SolutionAnnotator class with an image for annotation.
315
308
 
316
309
  Args:
317
310
  im (np.ndarray): The image to be annotated.
@@ -329,8 +322,7 @@ class SolutionAnnotator(Annotator):
329
322
  color: tuple[int, int, int] = (0, 255, 0),
330
323
  thickness: int = 5,
331
324
  ):
332
- """
333
- Draw a region or line on the image.
325
+ """Draw a region or line on the image.
334
326
 
335
327
  Args:
336
328
  reg_pts (list[tuple[int, int]], optional): Region points (for line 2 points, for region 4+ points).
@@ -350,8 +342,7 @@ class SolutionAnnotator(Annotator):
350
342
  region_color: tuple[int, int, int] = (255, 255, 255),
351
343
  txt_color: tuple[int, int, int] = (0, 0, 0),
352
344
  ):
353
- """
354
- Display queue counts on an image centered at the points with customizable font size and colors.
345
+ """Display queue counts on an image centered at the points with customizable font size and colors.
355
346
 
356
347
  Args:
357
348
  label (str): Queue counts label.
@@ -397,8 +388,7 @@ class SolutionAnnotator(Annotator):
397
388
  bg_color: tuple[int, int, int],
398
389
  margin: int,
399
390
  ):
400
- """
401
- Display the overall statistics for parking lots, object counter etc.
391
+ """Display the overall statistics for parking lots, object counter etc.
402
392
 
403
393
  Args:
404
394
  im0 (np.ndarray): Inference image.
@@ -428,8 +418,7 @@ class SolutionAnnotator(Annotator):
428
418
  @staticmethod
429
419
  @lru_cache(maxsize=256)
430
420
  def estimate_pose_angle(a: list[float], b: list[float], c: list[float]) -> float:
431
- """
432
- Calculate the angle between three points for workout monitoring.
421
+ """Calculate the angle between three points for workout monitoring.
433
422
 
434
423
  Args:
435
424
  a (list[float]): The coordinates of the first point.
@@ -450,8 +439,7 @@ class SolutionAnnotator(Annotator):
450
439
  radius: int = 2,
451
440
  conf_thresh: float = 0.25,
452
441
  ) -> np.ndarray:
453
- """
454
- Draw specific keypoints for gym steps counting.
442
+ """Draw specific keypoints for gym steps counting.
455
443
 
456
444
  Args:
457
445
  keypoints (list[list[float]]): Keypoints data to be plotted, each in format [x, y, confidence].
@@ -486,8 +474,7 @@ class SolutionAnnotator(Annotator):
486
474
  color: tuple[int, int, int] = (104, 31, 17),
487
475
  txt_color: tuple[int, int, int] = (255, 255, 255),
488
476
  ) -> int:
489
- """
490
- Draw workout text with a background on the image.
477
+ """Draw workout text with a background on the image.
491
478
 
492
479
  Args:
493
480
  display_text (str): The text to be displayed.
@@ -522,8 +509,7 @@ class SolutionAnnotator(Annotator):
522
509
  color: tuple[int, int, int] = (104, 31, 17),
523
510
  txt_color: tuple[int, int, int] = (255, 255, 255),
524
511
  ):
525
- """
526
- Plot the pose angle, count value, and step stage for workout monitoring.
512
+ """Plot the pose angle, count value, and step stage for workout monitoring.
527
513
 
528
514
  Args:
529
515
  angle_text (str): Angle value for workout monitoring.
@@ -554,8 +540,7 @@ class SolutionAnnotator(Annotator):
554
540
  line_color: tuple[int, int, int] = (104, 31, 17),
555
541
  centroid_color: tuple[int, int, int] = (255, 0, 255),
556
542
  ):
557
- """
558
- Plot the distance and line between two centroids on the frame.
543
+ """Plot the distance and line between two centroids on the frame.
559
544
 
560
545
  Args:
561
546
  pixels_distance (float): Pixels distance between two bbox centroids.
@@ -597,8 +582,7 @@ class SolutionAnnotator(Annotator):
597
582
  y_center: float,
598
583
  margin: int,
599
584
  ):
600
- """
601
- Display the bounding boxes labels in parking management app.
585
+ """Display the bounding boxes labels in parking management app.
602
586
 
603
587
  Args:
604
588
  im0 (np.ndarray): Inference image.
@@ -644,8 +628,7 @@ class SolutionAnnotator(Annotator):
644
628
  color: tuple[int, int, int] = (221, 0, 186),
645
629
  txt_color: tuple[int, int, int] = (255, 255, 255),
646
630
  ):
647
- """
648
- Draw a sweep annotation line and an optional label.
631
+ """Draw a sweep annotation line and an optional label.
649
632
 
650
633
  Args:
651
634
  line_x (int): The x-coordinate of the sweep line.
@@ -684,8 +667,7 @@ class SolutionAnnotator(Annotator):
684
667
  color: tuple[int, int, int] = (235, 219, 11),
685
668
  pin_color: tuple[int, int, int] = (255, 0, 255),
686
669
  ):
687
- """
688
- Perform pinpoint human-vision eye mapping and plotting.
670
+ """Perform pinpoint human-vision eye mapping and plotting.
689
671
 
690
672
  Args:
691
673
  box (list[float]): Bounding box coordinates in format [x1, y1, x2, y2].
@@ -707,8 +689,7 @@ class SolutionAnnotator(Annotator):
707
689
  shape: str = "rect",
708
690
  margin: int = 5,
709
691
  ):
710
- """
711
- Draw a label with a background rectangle or circle centered within a given bounding box.
692
+ """Draw a label with a background rectangle or circle centered within a given bounding box.
712
693
 
713
694
  Args:
714
695
  box (tuple[float, float, float, float]): The bounding box coordinates (x1, y1, x2, y2).
@@ -757,12 +738,11 @@ class SolutionAnnotator(Annotator):
757
738
 
758
739
 
759
740
  class SolutionResults:
760
- """
761
- A class to encapsulate the results of Ultralytics Solutions.
741
+ """A class to encapsulate the results of Ultralytics Solutions.
762
742
 
763
743
  This class is designed to store and manage various outputs generated by the solution pipeline, including counts,
764
- angles, workout stages, and other analytics data. It provides a structured way to access and manipulate results
765
- from different computer vision solutions such as object counting, pose estimation, and tracking analytics.
744
+ angles, workout stages, and other analytics data. It provides a structured way to access and manipulate results from
745
+ different computer vision solutions such as object counting, pose estimation, and tracking analytics.
766
746
 
767
747
  Attributes:
768
748
  plot_im (np.ndarray): Processed image with counts, blurred, or other effects from solutions.
@@ -785,8 +765,7 @@ class SolutionResults:
785
765
  """
786
766
 
787
767
  def __init__(self, **kwargs):
788
- """
789
- Initialize a SolutionResults object with default or user-specified values.
768
+ """Initialize a SolutionResults object with default or user-specified values.
790
769
 
791
770
  Args:
792
771
  **kwargs (Any): Optional arguments to override default attribute values.
@@ -813,8 +792,7 @@ class SolutionResults:
813
792
  self.__dict__.update(kwargs)
814
793
 
815
794
  def __str__(self) -> str:
816
- """
817
- Return a formatted string representation of the SolutionResults object.
795
+ """Return a formatted string representation of the SolutionResults object.
818
796
 
819
797
  Returns:
820
798
  (str): A string representation listing non-null attributes.
@@ -9,12 +9,11 @@ from ultralytics.utils.plotting import colors
9
9
 
10
10
 
11
11
  class SpeedEstimator(BaseSolution):
12
- """
13
- A class to estimate the speed of objects in a real-time video stream based on their tracks.
12
+ """A class to estimate the speed of objects in a real-time video stream based on their tracks.
14
13
 
15
- This class extends the BaseSolution class and provides functionality for estimating object speeds using
16
- tracking data in video streams. Speed is calculated based on pixel displacement over time and converted
17
- to real-world units using a configurable meters-per-pixel scale factor.
14
+ This class extends the BaseSolution class and provides functionality for estimating object speeds using tracking
15
+ data in video streams. Speed is calculated based on pixel displacement over time and converted to real-world units
16
+ using a configurable meters-per-pixel scale factor.
18
17
 
19
18
  Attributes:
20
19
  fps (float): Video frame rate for time calculations.
@@ -42,8 +41,7 @@ class SpeedEstimator(BaseSolution):
42
41
  """
43
42
 
44
43
  def __init__(self, **kwargs: Any) -> None:
45
- """
46
- Initialize the SpeedEstimator object with speed estimation parameters and data structures.
44
+ """Initialize the SpeedEstimator object with speed estimation parameters and data structures.
47
45
 
48
46
  Args:
49
47
  **kwargs (Any): Additional keyword arguments passed to the parent class.
@@ -61,8 +59,7 @@ class SpeedEstimator(BaseSolution):
61
59
  self.max_speed = self.CFG["max_speed"] # Maximum speed adjustment
62
60
 
63
61
  def process(self, im0) -> SolutionResults:
64
- """
65
- Process an input frame to estimate object speeds based on tracking data.
62
+ """Process an input frame to estimate object speeds based on tracking data.
66
63
 
67
64
  Args:
68
65
  im0 (np.ndarray): Input image for processing with shape (H, W, C) for RGB images.
@@ -16,8 +16,7 @@ torch.classes.__path__ = [] # Torch module __path__._path issue: https://github
16
16
 
17
17
 
18
18
  class Inference:
19
- """
20
- A class to perform object detection, image classification, image segmentation and pose estimation inference.
19
+ """A class to perform object detection, image classification, image segmentation and pose estimation inference.
21
20
 
22
21
  This class provides functionalities for loading models, configuring settings, uploading video files, and performing
23
22
  real-time inference using Streamlit and Ultralytics YOLO models.
@@ -54,8 +53,7 @@ class Inference:
54
53
  """
55
54
 
56
55
  def __init__(self, **kwargs: Any) -> None:
57
- """
58
- Initialize the Inference class, checking Streamlit requirements and setting up the model path.
56
+ """Initialize the Inference class, checking Streamlit requirements and setting up the model path.
59
57
 
60
58
  Args:
61
59
  **kwargs (Any): Additional keyword arguments for model configuration.
@@ -10,8 +10,7 @@ from ultralytics.utils.plotting import colors
10
10
 
11
11
 
12
12
  class TrackZone(BaseSolution):
13
- """
14
- A class to manage region-based object tracking in a video stream.
13
+ """A class to manage region-based object tracking in a video stream.
15
14
 
16
15
  This class extends the BaseSolution class and provides functionality for tracking objects within a specific region
17
16
  defined by a polygonal area. Objects outside the region are excluded from tracking.
@@ -37,8 +36,7 @@ class TrackZone(BaseSolution):
37
36
  """
38
37
 
39
38
  def __init__(self, **kwargs: Any) -> None:
40
- """
41
- Initialize the TrackZone class for tracking objects within a defined region in video streams.
39
+ """Initialize the TrackZone class for tracking objects within a defined region in video streams.
42
40
 
43
41
  Args:
44
42
  **kwargs (Any): Additional keyword arguments passed to the parent class.
@@ -49,18 +47,17 @@ class TrackZone(BaseSolution):
49
47
  self.mask = None
50
48
 
51
49
  def process(self, im0: np.ndarray) -> SolutionResults:
52
- """
53
- Process the input frame to track objects within a defined region.
50
+ """Process the input frame to track objects within a defined region.
54
51
 
55
- This method initializes the annotator, creates a mask for the specified region, extracts tracks
56
- only from the masked area, and updates tracking information. Objects outside the region are ignored.
52
+ This method initializes the annotator, creates a mask for the specified region, extracts tracks only from the
53
+ masked area, and updates tracking information. Objects outside the region are ignored.
57
54
 
58
55
  Args:
59
56
  im0 (np.ndarray): The input image or frame to be processed.
60
57
 
61
58
  Returns:
62
- (SolutionResults): Contains processed image `plot_im` and `total_tracks` (int) representing the
63
- total number of tracked objects within the defined region.
59
+ (SolutionResults): Contains processed image `plot_im` and `total_tracks` (int) representing the total number
60
+ of tracked objects within the defined region.
64
61
 
65
62
  Examples:
66
63
  >>> tracker = TrackZone()
@@ -7,11 +7,10 @@ from ultralytics.utils.plotting import colors
7
7
 
8
8
 
9
9
  class VisionEye(BaseSolution):
10
- """
11
- A class to manage object detection and vision mapping in images or video streams.
10
+ """A class to manage object detection and vision mapping in images or video streams.
12
11
 
13
- This class extends the BaseSolution class and provides functionality for detecting objects,
14
- mapping vision points, and annotating results with bounding boxes and labels.
12
+ This class extends the BaseSolution class and provides functionality for detecting objects, mapping vision points,
13
+ and annotating results with bounding boxes and labels.
15
14
 
16
15
  Attributes:
17
16
  vision_point (tuple[int, int]): Coordinates (x, y) where vision will view objects and draw tracks.
@@ -27,8 +26,7 @@ class VisionEye(BaseSolution):
27
26
  """
28
27
 
29
28
  def __init__(self, **kwargs: Any) -> None:
30
- """
31
- Initialize the VisionEye class for detecting objects and applying vision mapping.
29
+ """Initialize the VisionEye class for detecting objects and applying vision mapping.
32
30
 
33
31
  Args:
34
32
  **kwargs (Any): Keyword arguments passed to the parent class and for configuring vision_point.
@@ -38,8 +36,7 @@ class VisionEye(BaseSolution):
38
36
  self.vision_point = self.CFG["vision_point"]
39
37
 
40
38
  def process(self, im0) -> SolutionResults:
41
- """
42
- Perform object detection, vision mapping, and annotation on the input image.
39
+ """Perform object detection, vision mapping, and annotation on the input image.
43
40
 
44
41
  Args:
45
42
  im0 (np.ndarray): The input image for detection and annotation.
@@ -8,8 +8,7 @@ import numpy as np
8
8
 
9
9
 
10
10
  class TrackState:
11
- """
12
- Enumeration class representing the possible states of an object being tracked.
11
+ """Enumeration class representing the possible states of an object being tracked.
13
12
 
14
13
  Attributes:
15
14
  New (int): State when the object is newly detected.
@@ -30,8 +29,7 @@ class TrackState:
30
29
 
31
30
 
32
31
  class BaseTrack:
33
- """
34
- Base class for object tracking, providing foundational attributes and methods.
32
+ """Base class for object tracking, providing foundational attributes and methods.
35
33
 
36
34
  Attributes:
37
35
  _count (int): Class-level counter for unique track IDs.
@@ -19,8 +19,7 @@ from .utils.kalman_filter import KalmanFilterXYWH
19
19
 
20
20
 
21
21
  class BOTrack(STrack):
22
- """
23
- An extended version of the STrack class for YOLO, adding object tracking features.
22
+ """An extended version of the STrack class for YOLO, adding object tracking features.
24
23
 
25
24
  This class extends the STrack class to include additional functionalities for object tracking, such as feature
26
25
  smoothing, Kalman filter prediction, and reactivation of tracks.
@@ -57,8 +56,7 @@ class BOTrack(STrack):
57
56
  def __init__(
58
57
  self, xywh: np.ndarray, score: float, cls: int, feat: np.ndarray | None = None, feat_history: int = 50
59
58
  ):
60
- """
61
- Initialize a BOTrack object with temporal parameters, such as feature history, alpha, and current features.
59
+ """Initialize a BOTrack object with temporal parameters, such as feature history, alpha, and current features.
62
60
 
63
61
  Args:
64
62
  xywh (np.ndarray): Bounding box coordinates in xywh format (center x, center y, width, height).
@@ -154,8 +152,7 @@ class BOTrack(STrack):
154
152
 
155
153
 
156
154
  class BOTSORT(BYTETracker):
157
- """
158
- An extended version of the BYTETracker class for YOLO, designed for object tracking with ReID and GMC algorithm.
155
+ """An extended version of the BYTETracker class for YOLO, designed for object tracking with ReID and GMC algorithm.
159
156
 
160
157
  Attributes:
161
158
  proximity_thresh (float): Threshold for spatial proximity (IoU) between tracks and detections.
@@ -177,13 +174,12 @@ class BOTSORT(BYTETracker):
177
174
  >>> bot_sort.init_track(dets, scores, cls, img)
178
175
  >>> bot_sort.multi_predict(tracks)
179
176
 
180
- Note:
177
+ Notes:
181
178
  The class is designed to work with a YOLO object detection model and supports ReID only if enabled via args.
182
179
  """
183
180
 
184
181
  def __init__(self, args: Any, frame_rate: int = 30):
185
- """
186
- Initialize BOTSORT object with ReID module and GMC algorithm.
182
+ """Initialize BOTSORT object with ReID module and GMC algorithm.
187
183
 
188
184
  Args:
189
185
  args (Any): Parsed command-line arguments containing tracking parameters.
@@ -253,8 +249,7 @@ class ReID:
253
249
  """YOLO model as encoder for re-identification."""
254
250
 
255
251
  def __init__(self, model: str):
256
- """
257
- Initialize encoder for re-identification.
252
+ """Initialize encoder for re-identification.
258
253
 
259
254
  Args:
260
255
  model (str): Path to the YOLO model for re-identification.