dgenerate-ultralytics-headless 8.3.154__py3-none-any.whl → 8.3.155__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {dgenerate_ultralytics_headless-8.3.154.dist-info → dgenerate_ultralytics_headless-8.3.155.dist-info}/METADATA +1 -1
  2. {dgenerate_ultralytics_headless-8.3.154.dist-info → dgenerate_ultralytics_headless-8.3.155.dist-info}/RECORD +27 -27
  3. ultralytics/__init__.py +1 -1
  4. ultralytics/nn/tasks.py +0 -1
  5. ultralytics/solutions/ai_gym.py +3 -2
  6. ultralytics/solutions/analytics.py +2 -2
  7. ultralytics/solutions/config.py +2 -2
  8. ultralytics/solutions/distance_calculation.py +1 -1
  9. ultralytics/solutions/heatmap.py +5 -3
  10. ultralytics/solutions/instance_segmentation.py +4 -2
  11. ultralytics/solutions/object_blurrer.py +4 -2
  12. ultralytics/solutions/object_counter.py +5 -5
  13. ultralytics/solutions/object_cropper.py +3 -2
  14. ultralytics/solutions/parking_management.py +9 -9
  15. ultralytics/solutions/queue_management.py +4 -2
  16. ultralytics/solutions/region_counter.py +13 -5
  17. ultralytics/solutions/security_alarm.py +6 -4
  18. ultralytics/solutions/similarity_search.py +6 -6
  19. ultralytics/solutions/solutions.py +7 -7
  20. ultralytics/solutions/speed_estimation.py +3 -2
  21. ultralytics/solutions/streamlit_inference.py +6 -6
  22. ultralytics/solutions/trackzone.py +4 -2
  23. ultralytics/solutions/vision_eye.py +4 -2
  24. {dgenerate_ultralytics_headless-8.3.154.dist-info → dgenerate_ultralytics_headless-8.3.155.dist-info}/WHEEL +0 -0
  25. {dgenerate_ultralytics_headless-8.3.154.dist-info → dgenerate_ultralytics_headless-8.3.155.dist-info}/entry_points.txt +0 -0
  26. {dgenerate_ultralytics_headless-8.3.154.dist-info → dgenerate_ultralytics_headless-8.3.155.dist-info}/licenses/LICENSE +0 -0
  27. {dgenerate_ultralytics_headless-8.3.154.dist-info → dgenerate_ultralytics_headless-8.3.155.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dgenerate-ultralytics-headless
3
- Version: 8.3.154
3
+ Version: 8.3.155
4
4
  Summary: Automatically built Ultralytics package with python-opencv-headless dependency instead of python-opencv
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -1,4 +1,4 @@
1
- dgenerate_ultralytics_headless-8.3.154.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
1
+ dgenerate_ultralytics_headless-8.3.155.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
2
2
  tests/__init__.py,sha256=b4KP5_q-2IO8Br8YHOSLYnn7IwZS81l_vfEF2YPa2lM,894
3
3
  tests/conftest.py,sha256=JjgKSs36ZaGmmtqGmAapmFSoFF1YwyV3IZsOgqt2IVM,2593
4
4
  tests/test_cli.py,sha256=Kpfxq_RlbKK1Z8xNScDUbre6GB7neZhXZAYGI1tiDS8,5660
@@ -8,7 +8,7 @@ tests/test_exports.py,sha256=HmMKOTCia9ZDC0VYc_EPmvBTM5LM5eeI1NF_pKjLpd8,9677
8
8
  tests/test_integrations.py,sha256=cQfgueFhEZ8Xs-tF0uiIEhvn0DlhOH-Wqrx96LXp3D0,6303
9
9
  tests/test_python.py,sha256=nOoaPDg-0j7ZPRz9-uGFny3uocxjUM1ze5wA3BpGxKQ,27865
10
10
  tests/test_solutions.py,sha256=tuf6n_fsI8KvSdJrnc-cqP2qYdiYqCWuVrx0z9dOz3Q,13213
11
- ultralytics/__init__.py,sha256=0ApCLr1tPm8ret7Z-tWq7TY1z-OfOAvVVTXYnqjw5Fk,730
11
+ ultralytics/__init__.py,sha256=JK10bt4193n9_LeWJynhzdNkGFtjw86QgWQWbAr1cRs,730
12
12
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
13
13
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
14
14
  ultralytics/cfg/__init__.py,sha256=ds63URbbeRj5UxkCSyl62OrNw6HQy7xeit5-0wGDEKg,39699
@@ -196,7 +196,7 @@ ultralytics/models/yolo/yoloe/train_seg.py,sha256=aCV7M8oQOvODFnU4piZdJh3tIrBJYA
196
196
  ultralytics/models/yolo/yoloe/val.py,sha256=yebPkxwKKt__cY05Zbh1YXg4_BKzzpcDc3Cv3FJ5SAA,9769
197
197
  ultralytics/nn/__init__.py,sha256=rjociYD9lo_K-d-1s6TbdWklPLjTcEHk7OIlRDJstIE,615
198
198
  ultralytics/nn/autobackend.py,sha256=smyYoozUOtXPNKW9Rd24dZX-EY36CDvXMr7xH-uLEs0,41256
199
- ultralytics/nn/tasks.py,sha256=epmYC6psquUnmsAantY9j7O6EnIyeSVjbqkQtKSnpCQ,72484
199
+ ultralytics/nn/tasks.py,sha256=aCXYmWan2LTznH3i_-2OwMagG3ZwnVL1gjKtY-3oShM,72456
200
200
  ultralytics/nn/text_model.py,sha256=m4jDB5bzOLOS8XNmFi9oQk-skzRHiIpJy4K-_SIARR0,13498
201
201
  ultralytics/nn/modules/__init__.py,sha256=2nY0X69Z5DD5SWt6v3CUTZa5gXSzC9TQr3VTVqhyGho,3158
202
202
  ultralytics/nn/modules/activation.py,sha256=75JcIMH2Cu9GTC2Uf55r_5YLpxcrXQDaVoeGQ0hlUAU,2233
@@ -206,25 +206,25 @@ ultralytics/nn/modules/head.py,sha256=zTXFXc46ljPdP3mjgH7B3y2bPIjvbVPtgTu_rQCV8x
206
206
  ultralytics/nn/modules/transformer.py,sha256=PW5-6gzOP3_rZ_uAkmxvI42nU5bkrgbgLKCy5PC5px4,31415
207
207
  ultralytics/nn/modules/utils.py,sha256=rn8yTObZGkQoqVzjbZWLaHiytppG4ffjMME4Lw60glM,6092
208
208
  ultralytics/solutions/__init__.py,sha256=ZoeAQavTLp8aClnhZ9tbl6lxy86GxofyGvZWTx2aWkI,1209
209
- ultralytics/solutions/ai_gym.py,sha256=FM2JCRtqAIfOgUpvSsSER2XZsccZt0NmhUp2Si-_2gk,5162
210
- ultralytics/solutions/analytics.py,sha256=IfYlXV4vufpaOZz9h8cT1Vx9RjsqQYTCB7SbDlR0zv0,12784
211
- ultralytics/solutions/config.py,sha256=1HZvgWPt7duDxqAaOTyu4-TOBeRJeWx5EQgUwnyyO50,5394
212
- ultralytics/solutions/distance_calculation.py,sha256=e2Xa7dVOqiuk43JNakoxQlX48evEgZiEtxdtHTdlAsk,5931
213
- ultralytics/solutions/heatmap.py,sha256=IVnTOyIbxKrhmnzGbkncIqPakPHeJe4nrwQkOPJ00wY,5421
214
- ultralytics/solutions/instance_segmentation.py,sha256=HBWkCwmRa0jk84q4fhANzGpyirAtiCkAKRt0j9ED_Cw,3739
215
- ultralytics/solutions/object_blurrer.py,sha256=UVd9EGpyb_fJXFnPg3lbnhWxY1ntHVWmIJ2ragbZ6eY,3942
216
- ultralytics/solutions/object_counter.py,sha256=1iPJW_59iIw8DZedYdjw7HIQINpQtEBCd190g6TosNA,9353
217
- ultralytics/solutions/object_cropper.py,sha256=SVB9fflB7-juZWUARpi-kndSZDVI-oXjHg4WUnOuA9A,3470
218
- ultralytics/solutions/parking_management.py,sha256=8J9xfvg3kBVGVeyJkonfkOub8AmIxZXdtCBt6xn-o18,13541
219
- ultralytics/solutions/queue_management.py,sha256=_K6ugLMDfpp37S-LFV36K3QXf3vqjfxji8BPP_-6iqc,4337
220
- ultralytics/solutions/region_counter.py,sha256=8vNrr0SnEBJ7ngD_whWpD7jMlrzuYGWxUuZx3WOv0ys,5739
221
- ultralytics/solutions/security_alarm.py,sha256=HXoPFlTOVp5eUecPuGIl_DXLKuN8-M32BCvCOd_vRac,6279
222
- ultralytics/solutions/similarity_search.py,sha256=GdrPEpfBwLpM5Mx4XQiTrahgdQgiSIeGdHWWTLQl5xU,9926
223
- ultralytics/solutions/solutions.py,sha256=_o7J31k-RNjyaLs_4GniHTAXGwBEJos7oAr7kAuTiuM,37467
224
- ultralytics/solutions/speed_estimation.py,sha256=_4tIfWPI7O_hYRQAvNrALMzdy2sBR5_0BxnPdJb0Gks,5823
225
- ultralytics/solutions/streamlit_inference.py,sha256=menjJLsuP7AsQJSnBo7gRHfMlYE8HzMp0YNGqCU64n0,9986
226
- ultralytics/solutions/trackzone.py,sha256=C51IgbNG_kGsTi04ZKUThLPYZXthP7Rad0ImSjKwa0g,3873
227
- ultralytics/solutions/vision_eye.py,sha256=LCb-2YPVvEks9e7xqZtNGftpAXNaZhEUb5yb3N0ni_U,2952
209
+ ultralytics/solutions/ai_gym.py,sha256=wwfTqX7G3mZXneMwiibEfYbVYaJF_JUX3SQdsdQUvBM,5217
210
+ ultralytics/solutions/analytics.py,sha256=aHwKjSEW_3y47LrzugJbPB3VQGTDQCIb5goiPuxnmrc,12802
211
+ ultralytics/solutions/config.py,sha256=CevL8lzeSbiSAAA514CTiduCg2_Wh04P0RaB_kmwJa8,5404
212
+ ultralytics/solutions/distance_calculation.py,sha256=r05_ufxb2Mpw3EIX8X32PIWlh9rYMADypGhVIPoZYV4,5939
213
+ ultralytics/solutions/heatmap.py,sha256=vEdzLSYCNIFC9CsBWYSnCLiM8xNuYLJ-1i7enjQgOQw,5516
214
+ ultralytics/solutions/instance_segmentation.py,sha256=qsIQkvuR1Ur2bdEsCCJP2IEO1Hz2l0wfR2KUBo247xE,3795
215
+ ultralytics/solutions/object_blurrer.py,sha256=wHbfrudh6li_JADc-dTHGGMI8GU-MvesoTvVlX6YuYc,3998
216
+ ultralytics/solutions/object_counter.py,sha256=Zt6FNfPSPN3L69zks1u4DSPM3A6mdl7p29im4O-2QFQ,9406
217
+ ultralytics/solutions/object_cropper.py,sha256=mS3iT_CgqfqG9ldM_AM5ptq5bfYFyTycPQY5DxxMlSA,3525
218
+ ultralytics/solutions/parking_management.py,sha256=IfPUn15aelxz6YZNo9WYkVEl5IOVSw8VD0OrpKtExPE,13613
219
+ ultralytics/solutions/queue_management.py,sha256=u0VFzRqa0OxIWY7xXItsXEm073CzkQGFhhXG-6VK3SI,4393
220
+ ultralytics/solutions/region_counter.py,sha256=j6f5VAaE1JWGdWOecZpWMFp6yF1GdCnHjftN6CRybjQ,5967
221
+ ultralytics/solutions/security_alarm.py,sha256=U6FTbg3cthKLfWeLunsFhOJvB6GGmwYDDxZ3K0GCx-Q,6351
222
+ ultralytics/solutions/similarity_search.py,sha256=ZzC1SKjNSXX_wYE5ldQvkY4d7pI0pcUmM9D7_BOLXxY,9975
223
+ ultralytics/solutions/solutions.py,sha256=N5t1DgZpuFBbDvLVZ7wRkafmgu8SS1VC9VNjuupglwQ,37532
224
+ ultralytics/solutions/speed_estimation.py,sha256=chg_tBuKFw3EnFiv_obNDaUXLAo-FypxC7gsDeB_VUI,5878
225
+ ultralytics/solutions/streamlit_inference.py,sha256=lqHh0UDCVmWIeh3yzpvoV7j9K6Ipx7pJBkOsb0ZpZes,10034
226
+ ultralytics/solutions/trackzone.py,sha256=kIS94rNfL3yVPAtSbnW8F-aLMxXowQtsfKNB-jLezz8,3941
227
+ ultralytics/solutions/vision_eye.py,sha256=nlIdXhfM5EwJh4vqVhz3AEOoHXIELMo1OG8Cr1tMQRw,3008
228
228
  ultralytics/solutions/templates/similarity-search.html,sha256=vdz9XCH6VHbksvSW_sSg6Z2xVp82_EanaS_rY7xjZBE,4743
229
229
  ultralytics/trackers/__init__.py,sha256=Zlu_Ig5osn7hqch_g5Be_e4pwZUkeeTQiesJCi0pFGI,255
230
230
  ultralytics/trackers/basetrack.py,sha256=-skBFFatzgJFAPN9Frm1u1h_RDUg3WOlxG6eHQxp2Gw,4384
@@ -266,8 +266,8 @@ ultralytics/utils/callbacks/neptune.py,sha256=j8pecmlcsM8FGzLKWoBw5xUsi5t8E5HuxY
266
266
  ultralytics/utils/callbacks/raytune.py,sha256=S6Bq16oQDQ8BQgnZzA0zJHGN_BBr8iAM_WtGoLiEcwg,1283
267
267
  ultralytics/utils/callbacks/tensorboard.py,sha256=MDPBW7aDes-66OE6YqKXXvqA_EocjzEMHWGM-8z9vUQ,5281
268
268
  ultralytics/utils/callbacks/wb.py,sha256=Tm_-aRr2CN32MJkY9tylpMBJkb007-MSRNSQ7rDJ5QU,7521
269
- dgenerate_ultralytics_headless-8.3.154.dist-info/METADATA,sha256=ko2uatk1ToiDrkEyV6zjKLPkoWhSLJzcw9tEVLDkePU,38296
270
- dgenerate_ultralytics_headless-8.3.154.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
271
- dgenerate_ultralytics_headless-8.3.154.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
272
- dgenerate_ultralytics_headless-8.3.154.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
273
- dgenerate_ultralytics_headless-8.3.154.dist-info/RECORD,,
269
+ dgenerate_ultralytics_headless-8.3.155.dist-info/METADATA,sha256=9K9e8yepMKHtpJWJOrF3DcVjP2LzN1YaJwuIgEXcnAA,38296
270
+ dgenerate_ultralytics_headless-8.3.155.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
271
+ dgenerate_ultralytics_headless-8.3.155.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
272
+ dgenerate_ultralytics_headless-8.3.155.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
273
+ dgenerate_ultralytics_headless-8.3.155.dist-info/RECORD,,
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.154"
3
+ __version__ = "8.3.155"
4
4
 
5
5
  import os
6
6
 
ultralytics/nn/tasks.py CHANGED
@@ -880,7 +880,6 @@ class WorldModel(DetectionModel):
880
880
  self.txt_feats = self.get_text_pe(text, batch=batch, cache_clip_model=cache_clip_model)
881
881
  self.model[-1].nc = len(text)
882
882
 
883
- @smart_inference_mode()
884
883
  def get_text_pe(self, text, batch=80, cache_clip_model=True):
885
884
  """
886
885
  Set classes in advance so that model could do offline-inference without clip model.
@@ -1,6 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
3
  from collections import defaultdict
4
+ from typing import Any
4
5
 
5
6
  from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
6
7
 
@@ -30,7 +31,7 @@ class AIGym(BaseSolution):
30
31
  >>> cv2.waitKey(0)
31
32
  """
32
33
 
33
- def __init__(self, **kwargs):
34
+ def __init__(self, **kwargs: Any) -> None:
34
35
  """
35
36
  Initialize AIGym for workout monitoring using pose estimation and predefined angles.
36
37
 
@@ -47,7 +48,7 @@ class AIGym(BaseSolution):
47
48
  self.down_angle = float(self.CFG["down_angle"]) # Pose down predefined angle to consider down pose
48
49
  self.kpts = self.CFG["kpts"] # User selected kpts of workouts storage for further usage
49
50
 
50
- def process(self, im0):
51
+ def process(self, im0) -> SolutionResults:
51
52
  """
52
53
  Monitor workouts using Ultralytics YOLO Pose Model.
53
54
 
@@ -1,7 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
3
  from itertools import cycle
4
- from typing import Dict, Optional
4
+ from typing import Any, Dict, Optional
5
5
 
6
6
  import cv2
7
7
  import numpy as np
@@ -45,7 +45,7 @@ class Analytics(BaseSolution):
45
45
  >>> cv2.imshow("Analytics", results.plot_im)
46
46
  """
47
47
 
48
- def __init__(self, **kwargs):
48
+ def __init__(self, **kwargs: Any) -> None:
49
49
  """Initialize Analytics class with various chart types for visual data representation."""
50
50
  super().__init__(**kwargs)
51
51
 
@@ -1,7 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
3
  from dataclasses import dataclass, field
4
- from typing import List, Optional, Tuple
4
+ from typing import Any, List, Optional, Tuple
5
5
 
6
6
  import cv2
7
7
 
@@ -94,7 +94,7 @@ class SolutionConfig:
94
94
  verbose: bool = True
95
95
  data: str = "images"
96
96
 
97
- def update(self, **kwargs):
97
+ def update(self, **kwargs: Any):
98
98
  """Update configuration parameters with new values provided as keyword arguments."""
99
99
  for key, value in kwargs.items():
100
100
  if hasattr(self, key):
@@ -33,7 +33,7 @@ class DistanceCalculation(BaseSolution):
33
33
  >>> cv2.waitKey(0)
34
34
  """
35
35
 
36
- def __init__(self, **kwargs: Any):
36
+ def __init__(self, **kwargs: Any) -> None:
37
37
  """Initialize the DistanceCalculation class for measuring object distances in video streams."""
38
38
  super().__init__(**kwargs)
39
39
 
@@ -1,5 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
+ from typing import Any, List
4
+
3
5
  import cv2
4
6
  import numpy as np
5
7
 
@@ -31,7 +33,7 @@ class Heatmap(ObjectCounter):
31
33
  >>> processed_frame = heatmap.process(frame)
32
34
  """
33
35
 
34
- def __init__(self, **kwargs):
36
+ def __init__(self, **kwargs: Any) -> None:
35
37
  """
36
38
  Initialize the Heatmap class for real-time video stream heatmap generation based on object tracks.
37
39
 
@@ -48,7 +50,7 @@ class Heatmap(ObjectCounter):
48
50
  self.colormap = self.CFG["colormap"]
49
51
  self.heatmap = None
50
52
 
51
- def heatmap_effect(self, box):
53
+ def heatmap_effect(self, box: List[float]) -> None:
52
54
  """
53
55
  Efficiently calculate heatmap area and effect location for applying colormap.
54
56
 
@@ -70,7 +72,7 @@ class Heatmap(ObjectCounter):
70
72
  # Update only the values within the bounding box in a single vectorized operation
71
73
  self.heatmap[y0:y1, x0:x1][within_radius] += 2
72
74
 
73
- def process(self, im0):
75
+ def process(self, im0: np.ndarray) -> SolutionResults:
74
76
  """
75
77
  Generate heatmap for each frame using Ultralytics tracking.
76
78
 
@@ -1,5 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
+ from typing import Any
4
+
3
5
  from ultralytics.engine.results import Results
4
6
  from ultralytics.solutions.solutions import BaseSolution, SolutionResults
5
7
 
@@ -33,7 +35,7 @@ class InstanceSegmentation(BaseSolution):
33
35
  >>> print(f"Total segmented instances: {results.total_tracks}")
34
36
  """
35
37
 
36
- def __init__(self, **kwargs):
38
+ def __init__(self, **kwargs: Any) -> None:
37
39
  """
38
40
  Initialize the InstanceSegmentation class for detecting and annotating segmented instances.
39
41
 
@@ -48,7 +50,7 @@ class InstanceSegmentation(BaseSolution):
48
50
  self.show_labels = self.CFG.get("show_labels", True)
49
51
  self.show_boxes = self.CFG.get("show_boxes", True)
50
52
 
51
- def process(self, im0):
53
+ def process(self, im0) -> SolutionResults:
52
54
  """
53
55
  Perform instance segmentation on the input image and annotate the results.
54
56
 
@@ -1,5 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
+ from typing import Any
4
+
3
5
  import cv2
4
6
 
5
7
  from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
@@ -31,7 +33,7 @@ class ObjectBlurrer(BaseSolution):
31
33
  >>> print(f"Total blurred objects: {processed_results.total_tracks}")
32
34
  """
33
35
 
34
- def __init__(self, **kwargs):
36
+ def __init__(self, **kwargs: Any) -> None:
35
37
  """
36
38
  Initialize the ObjectBlurrer class for applying a blur effect to objects detected in video streams or images.
37
39
 
@@ -46,7 +48,7 @@ class ObjectBlurrer(BaseSolution):
46
48
  blur_ratio = 0.5
47
49
  self.blur_ratio = int(blur_ratio * 100)
48
50
 
49
- def process(self, im0):
51
+ def process(self, im0) -> SolutionResults:
50
52
  """
51
53
  Apply a blurring effect to detected objects in the input image.
52
54
 
@@ -1,7 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
3
  from collections import defaultdict
4
- from typing import Optional, Tuple
4
+ from typing import Any, Optional, Tuple
5
5
 
6
6
  from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
7
7
  from ultralytics.utils.plotting import colors
@@ -36,7 +36,7 @@ class ObjectCounter(BaseSolution):
36
36
  >>> print(f"Inward count: {counter.in_count}, Outward count: {counter.out_count}")
37
37
  """
38
38
 
39
- def __init__(self, **kwargs):
39
+ def __init__(self, **kwargs: Any) -> None:
40
40
  """Initialize the ObjectCounter class for real-time object counting in video streams."""
41
41
  super().__init__(**kwargs)
42
42
 
@@ -56,7 +56,7 @@ class ObjectCounter(BaseSolution):
56
56
  track_id: int,
57
57
  prev_position: Optional[Tuple[float, float]],
58
58
  cls: int,
59
- ):
59
+ ) -> None:
60
60
  """
61
61
  Count objects within a polygonal or linear region based on their tracks.
62
62
 
@@ -117,7 +117,7 @@ class ObjectCounter(BaseSolution):
117
117
  self.classwise_counts[self.names[cls]]["OUT"] += 1
118
118
  self.counted_ids.append(track_id)
119
119
 
120
- def display_counts(self, plot_im):
120
+ def display_counts(self, plot_im) -> None:
121
121
  """
122
122
  Display object counts on the input image or frame.
123
123
 
@@ -138,7 +138,7 @@ class ObjectCounter(BaseSolution):
138
138
  if labels_dict:
139
139
  self.annotator.display_analytics(plot_im, labels_dict, (104, 31, 17), (255, 255, 255), self.margin)
140
140
 
141
- def process(self, im0):
141
+ def process(self, im0) -> SolutionResults:
142
142
  """
143
143
  Process input data (frames or object tracks) and update object counts.
144
144
 
@@ -2,6 +2,7 @@
2
2
 
3
3
  import os
4
4
  from pathlib import Path
5
+ from typing import Any
5
6
 
6
7
  from ultralytics.solutions.solutions import BaseSolution, SolutionResults
7
8
  from ultralytics.utils.plotting import save_one_box
@@ -30,7 +31,7 @@ class ObjectCropper(BaseSolution):
30
31
  >>> print(f"Total cropped objects: {cropper.crop_idx}")
31
32
  """
32
33
 
33
- def __init__(self, **kwargs):
34
+ def __init__(self, **kwargs: Any) -> None:
34
35
  """
35
36
  Initialize the ObjectCropper class for cropping objects from detected bounding boxes.
36
37
 
@@ -51,7 +52,7 @@ class ObjectCropper(BaseSolution):
51
52
  self.iou = self.CFG["iou"]
52
53
  self.conf = self.CFG["conf"]
53
54
 
54
- def process(self, im0):
55
+ def process(self, im0) -> SolutionResults:
55
56
  """
56
57
  Crop detected objects from the input image and save them as separate images.
57
58
 
@@ -47,7 +47,7 @@ class ParkingPtsSelection:
47
47
  >>> # Use the GUI to upload an image, select parking zones, and save the data
48
48
  """
49
49
 
50
- def __init__(self):
50
+ def __init__(self) -> None:
51
51
  """Initialize the ParkingPtsSelection class, setting up UI and properties for parking zone point selection."""
52
52
  try: # Check if tkinter is installed
53
53
  import tkinter as tk
@@ -99,14 +99,14 @@ class ParkingPtsSelection:
99
99
  self.initialize_properties()
100
100
  self.master.mainloop()
101
101
 
102
- def initialize_properties(self):
102
+ def initialize_properties(self) -> None:
103
103
  """Initialize properties for image, canvas, bounding boxes, and dimensions."""
104
104
  self.image = self.canvas_image = None
105
105
  self.rg_data, self.current_box = [], []
106
106
  self.imgw = self.imgh = 0
107
107
  self.canvas_max_width, self.canvas_max_height = 1280, 720
108
108
 
109
- def upload_image(self):
109
+ def upload_image(self) -> None:
110
110
  """Upload and display an image on the canvas, resizing it to fit within specified dimensions."""
111
111
  from PIL import Image, ImageTk # Scoped import because ImageTk requires tkinter package
112
112
 
@@ -132,7 +132,7 @@ class ParkingPtsSelection:
132
132
 
133
133
  self.rg_data.clear(), self.current_box.clear()
134
134
 
135
- def on_canvas_click(self, event):
135
+ def on_canvas_click(self, event) -> None:
136
136
  """Handle mouse clicks to add points for bounding boxes on the canvas."""
137
137
  self.current_box.append((event.x, event.y))
138
138
  self.canvas.create_oval(event.x - 3, event.y - 3, event.x + 3, event.y + 3, fill="red")
@@ -141,12 +141,12 @@ class ParkingPtsSelection:
141
141
  self.draw_box(self.current_box)
142
142
  self.current_box.clear()
143
143
 
144
- def draw_box(self, box: List[Tuple[int, int]]):
144
+ def draw_box(self, box: List[Tuple[int, int]]) -> None:
145
145
  """Draw a bounding box on the canvas using the provided coordinates."""
146
146
  for i in range(4):
147
147
  self.canvas.create_line(box[i], box[(i + 1) % 4], fill="blue", width=2)
148
148
 
149
- def remove_last_bounding_box(self):
149
+ def remove_last_bounding_box(self) -> None:
150
150
  """Remove the last bounding box from the list and redraw the canvas."""
151
151
  if not self.rg_data:
152
152
  self.messagebox.showwarning("Warning", "No bounding boxes to remove.")
@@ -154,14 +154,14 @@ class ParkingPtsSelection:
154
154
  self.rg_data.pop()
155
155
  self.redraw_canvas()
156
156
 
157
- def redraw_canvas(self):
157
+ def redraw_canvas(self) -> None:
158
158
  """Redraw the canvas with the image and all bounding boxes."""
159
159
  self.canvas.delete("all")
160
160
  self.canvas.create_image(0, 0, anchor=self.tk.NW, image=self.canvas_image)
161
161
  for box in self.rg_data:
162
162
  self.draw_box(box)
163
163
 
164
- def save_to_json(self):
164
+ def save_to_json(self) -> None:
165
165
  """Save the selected parking zone points to a JSON file with scaled coordinates."""
166
166
  scale_w, scale_h = self.imgw / self.canvas.winfo_width(), self.imgh / self.canvas.winfo_height()
167
167
  data = [{"points": [(int(x * scale_w), int(y * scale_h)) for x, y in box]} for box in self.rg_data]
@@ -200,7 +200,7 @@ class ParkingManagement(BaseSolution):
200
200
  >>> print(f"Available spaces: {parking_manager.pr_info['Available']}")
201
201
  """
202
202
 
203
- def __init__(self, **kwargs: Any):
203
+ def __init__(self, **kwargs: Any) -> None:
204
204
  """Initialize the parking management system with a YOLO model and visualization settings."""
205
205
  super().__init__(**kwargs)
206
206
 
@@ -1,5 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
+ from typing import Any
4
+
3
5
  from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
4
6
  from ultralytics.utils.plotting import colors
5
7
 
@@ -35,7 +37,7 @@ class QueueManager(BaseSolution):
35
37
  >>> results = queue_manager.process(im0)
36
38
  """
37
39
 
38
- def __init__(self, **kwargs):
40
+ def __init__(self, **kwargs: Any) -> None:
39
41
  """Initialize the QueueManager with parameters for tracking and counting objects in a video stream."""
40
42
  super().__init__(**kwargs)
41
43
  self.initialize_region()
@@ -43,7 +45,7 @@ class QueueManager(BaseSolution):
43
45
  self.rect_color = (255, 255, 255) # Rectangle color for visualization
44
46
  self.region_length = len(self.region) # Store region length for further usage
45
47
 
46
- def process(self, im0):
48
+ def process(self, im0) -> SolutionResults:
47
49
  """
48
50
  Process queue management for a single frame of video.
49
51
 
@@ -1,5 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
+ from typing import Any, List, Tuple
4
+
3
5
  import numpy as np
4
6
 
5
7
  from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
@@ -33,7 +35,7 @@ class RegionCounter(BaseSolution):
33
35
  >>> print(f"Total tracks: {results.total_tracks}")
34
36
  """
35
37
 
36
- def __init__(self, **kwargs):
38
+ def __init__(self, **kwargs: Any) -> None:
37
39
  """Initialize the RegionCounter for real-time object counting in user-defined regions."""
38
40
  super().__init__(**kwargs)
39
41
  self.region_template = {
@@ -47,15 +49,21 @@ class RegionCounter(BaseSolution):
47
49
  self.region_counts = {}
48
50
  self.counting_regions = []
49
51
 
50
- def add_region(self, name, polygon_points, region_color, text_color):
52
+ def add_region(
53
+ self,
54
+ name: str,
55
+ polygon_points: List[Tuple],
56
+ region_color: Tuple[int, int, int],
57
+ text_color: Tuple[int, int, int],
58
+ ) -> None:
51
59
  """
52
60
  Add a new region to the counting list based on the provided template with specific attributes.
53
61
 
54
62
  Args:
55
63
  name (str): Name assigned to the new region.
56
64
  polygon_points (List[Tuple]): List of (x, y) coordinates defining the region's polygon.
57
- region_color (tuple): BGR color for region visualization.
58
- text_color (tuple): BGR color for the text within the region.
65
+ region_color (Tuple[int, int, int]): BGR color for region visualization.
66
+ text_color (Tuple[int, int, int]): BGR color for the text within the region.
59
67
  """
60
68
  region = self.region_template.copy()
61
69
  region.update(
@@ -68,7 +76,7 @@ class RegionCounter(BaseSolution):
68
76
  )
69
77
  self.counting_regions.append(region)
70
78
 
71
- def process(self, im0):
79
+ def process(self, im0: np.ndarray) -> SolutionResults:
72
80
  """
73
81
  Process the input frame to detect and count objects within each defined region.
74
82
 
@@ -1,5 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
+ from typing import Any
4
+
3
5
  from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
4
6
  from ultralytics.utils import LOGGER
5
7
  from ultralytics.utils.plotting import colors
@@ -32,7 +34,7 @@ class SecurityAlarm(BaseSolution):
32
34
  >>> results = security.process(frame)
33
35
  """
34
36
 
35
- def __init__(self, **kwargs):
37
+ def __init__(self, **kwargs: Any) -> None:
36
38
  """
37
39
  Initialize the SecurityAlarm class with parameters for real-time object monitoring.
38
40
 
@@ -46,7 +48,7 @@ class SecurityAlarm(BaseSolution):
46
48
  self.to_email = ""
47
49
  self.from_email = ""
48
50
 
49
- def authenticate(self, from_email: str, password: str, to_email: str):
51
+ def authenticate(self, from_email: str, password: str, to_email: str) -> None:
50
52
  """
51
53
  Authenticate the email server for sending alert notifications.
52
54
 
@@ -69,7 +71,7 @@ class SecurityAlarm(BaseSolution):
69
71
  self.to_email = to_email
70
72
  self.from_email = from_email
71
73
 
72
- def send_email(self, im0, records: int = 5):
74
+ def send_email(self, im0, records: int = 5) -> None:
73
75
  """
74
76
  Send an email notification with an image attachment indicating the number of objects detected.
75
77
 
@@ -114,7 +116,7 @@ class SecurityAlarm(BaseSolution):
114
116
  except Exception as e:
115
117
  LOGGER.error(f"Failed to send email: {e}")
116
118
 
117
- def process(self, im0):
119
+ def process(self, im0) -> SolutionResults:
118
120
  """
119
121
  Monitor the frame, process object detections, and trigger alerts if thresholds are exceeded.
120
122
 
@@ -2,7 +2,7 @@
2
2
 
3
3
  import os
4
4
  from pathlib import Path
5
- from typing import List
5
+ from typing import Any, List
6
6
 
7
7
  import numpy as np
8
8
  import torch
@@ -48,7 +48,7 @@ class VisualAISearch(BaseSolution):
48
48
  >>> results = searcher.search("a cat sitting on a chair", k=10)
49
49
  """
50
50
 
51
- def __init__(self, **kwargs):
51
+ def __init__(self, **kwargs: Any) -> None:
52
52
  """Initialize the VisualAISearch class with FAISS index and CLIP model."""
53
53
  super().__init__(**kwargs)
54
54
  check_requirements(["git+https://github.com/ultralytics/CLIP.git", "faiss-cpu"])
@@ -90,7 +90,7 @@ class VisualAISearch(BaseSolution):
90
90
  with torch.no_grad():
91
91
  return self.model.encode_text(tokens).cpu().numpy()
92
92
 
93
- def load_or_build_index(self):
93
+ def load_or_build_index(self) -> None:
94
94
  """
95
95
  Load existing FAISS index or build a new one from image features.
96
96
 
@@ -195,7 +195,7 @@ class SearchApp:
195
195
  >>> app.run(debug=True)
196
196
  """
197
197
 
198
- def __init__(self, data: str = "images", device: str = None):
198
+ def __init__(self, data: str = "images", device: str = None) -> None:
199
199
  """
200
200
  Initialize the SearchApp with VisualAISearch backend.
201
201
 
@@ -217,7 +217,7 @@ class SearchApp:
217
217
  )
218
218
  self.app.add_url_rule("/", view_func=self.index, methods=["GET", "POST"])
219
219
 
220
- def index(self):
220
+ def index(self) -> str:
221
221
  """Process user query and display search results in the web interface."""
222
222
  results = []
223
223
  if self.request.method == "POST":
@@ -225,6 +225,6 @@ class SearchApp:
225
225
  results = self.searcher(query)
226
226
  return self.render_template("similarity-search.html", results=results)
227
227
 
228
- def run(self, debug: bool = False):
228
+ def run(self, debug: bool = False) -> None:
229
229
  """Start the Flask web application server."""
230
230
  self.app.run(debug=debug)
@@ -70,7 +70,7 @@ class BaseSolution:
70
70
  >>> solution.display_output(image)
71
71
  """
72
72
 
73
- def __init__(self, is_cli: bool = False, **kwargs):
73
+ def __init__(self, is_cli: bool = False, **kwargs: Any) -> None:
74
74
  """
75
75
  Initialize the BaseSolution class with configuration settings and YOLO model.
76
76
 
@@ -155,7 +155,7 @@ class BaseSolution:
155
155
  name = ("" if track_id is None else f"{track_id} ") + self.names[cls]
156
156
  return (f"{name} {conf:.2f}" if self.show_conf else name) if self.show_labels else None
157
157
 
158
- def extract_tracks(self, im0: np.ndarray):
158
+ def extract_tracks(self, im0: np.ndarray) -> None:
159
159
  """
160
160
  Apply object tracking and extract tracks from an input image or frame.
161
161
 
@@ -183,7 +183,7 @@ class BaseSolution:
183
183
  self.LOGGER.warning("no tracks found!")
184
184
  self.boxes, self.clss, self.track_ids, self.confs = [], [], [], []
185
185
 
186
- def store_tracking_history(self, track_id: int, box):
186
+ def store_tracking_history(self, track_id: int, box) -> None:
187
187
  """
188
188
  Store the tracking history of an object.
189
189
 
@@ -204,7 +204,7 @@ class BaseSolution:
204
204
  if len(self.track_line) > 30:
205
205
  self.track_line.pop(0)
206
206
 
207
- def initialize_region(self):
207
+ def initialize_region(self) -> None:
208
208
  """Initialize the counting region and line segment based on configuration settings."""
209
209
  if self.region is None:
210
210
  self.region = [(10, 200), (540, 200), (540, 180), (10, 180)]
@@ -212,7 +212,7 @@ class BaseSolution:
212
212
  self.Polygon(self.region) if len(self.region) >= 3 else self.LineString(self.region)
213
213
  ) # region or line
214
214
 
215
- def display_output(self, plot_im: np.ndarray):
215
+ def display_output(self, plot_im: np.ndarray) -> None:
216
216
  """
217
217
  Display the results of the processing, which could involve showing frames, printing counts, or saving results.
218
218
 
@@ -238,10 +238,10 @@ class BaseSolution:
238
238
  cv2.destroyAllWindows() # Closes current frame window
239
239
  return
240
240
 
241
- def process(self, *args, **kwargs):
241
+ def process(self, *args: Any, **kwargs: Any):
242
242
  """Process method should be implemented by each Solution subclass."""
243
243
 
244
- def __call__(self, *args, **kwargs):
244
+ def __call__(self, *args: Any, **kwargs: Any):
245
245
  """Allow instances to be called like a function with flexible arguments."""
246
246
  with self.profilers[1]:
247
247
  result = self.process(*args, **kwargs) # Call the subclass-specific process method
@@ -2,6 +2,7 @@
2
2
 
3
3
  from collections import deque
4
4
  from math import sqrt
5
+ from typing import Any
5
6
 
6
7
  from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
7
8
  from ultralytics.utils.plotting import colors
@@ -40,7 +41,7 @@ class SpeedEstimator(BaseSolution):
40
41
  >>> cv2.imshow("Speed Estimation", results.plot_im)
41
42
  """
42
43
 
43
- def __init__(self, **kwargs):
44
+ def __init__(self, **kwargs: Any) -> None:
44
45
  """
45
46
  Initialize the SpeedEstimator object with speed estimation parameters and data structures.
46
47
 
@@ -59,7 +60,7 @@ class SpeedEstimator(BaseSolution):
59
60
  self.meter_per_pixel = self.CFG["meter_per_pixel"] # Scene scale, depends on camera details
60
61
  self.max_speed = self.CFG["max_speed"] # Maximum speed adjustment
61
62
 
62
- def process(self, im0):
63
+ def process(self, im0) -> SolutionResults:
63
64
  """
64
65
  Process an input frame to estimate object speeds based on tracking data.
65
66
 
@@ -49,7 +49,7 @@ class Inference:
49
49
  >>> inf.inference()
50
50
  """
51
51
 
52
- def __init__(self, **kwargs: Any):
52
+ def __init__(self, **kwargs: Any) -> None:
53
53
  """
54
54
  Initialize the Inference class, checking Streamlit requirements and setting up the model path.
55
55
 
@@ -77,7 +77,7 @@ class Inference:
77
77
 
78
78
  LOGGER.info(f"Ultralytics Solutions: ✅ {self.temp_dict}")
79
79
 
80
- def web_ui(self):
80
+ def web_ui(self) -> None:
81
81
  """Set up the Streamlit web interface with custom HTML elements."""
82
82
  menu_style_cfg = """<style>MainMenu {visibility: hidden;}</style>""" # Hide main menu style
83
83
 
@@ -96,7 +96,7 @@ class Inference:
96
96
  self.st.markdown(main_title_cfg, unsafe_allow_html=True)
97
97
  self.st.markdown(sub_title_cfg, unsafe_allow_html=True)
98
98
 
99
- def sidebar(self):
99
+ def sidebar(self) -> None:
100
100
  """Configure the Streamlit sidebar for model and inference settings."""
101
101
  with self.st.sidebar: # Add Ultralytics LOGO
102
102
  logo = "https://raw.githubusercontent.com/ultralytics/assets/main/logo/Ultralytics_Logotype_Original.svg"
@@ -117,7 +117,7 @@ class Inference:
117
117
  self.org_frame = col1.empty() # Container for original frame
118
118
  self.ann_frame = col2.empty() # Container for annotated frame
119
119
 
120
- def source_upload(self):
120
+ def source_upload(self) -> None:
121
121
  """Handle video file uploads through the Streamlit interface."""
122
122
  self.vid_file_name = ""
123
123
  if self.source == "video":
@@ -130,7 +130,7 @@ class Inference:
130
130
  elif self.source == "webcam":
131
131
  self.vid_file_name = 0 # Use webcam index 0
132
132
 
133
- def configure(self):
133
+ def configure(self) -> None:
134
134
  """Configure the model and load selected classes for inference."""
135
135
  # Add dropdown menu for model selection
136
136
  available_models = [x.replace("yolo", "YOLO") for x in GITHUB_ASSETS_STEMS if x.startswith("yolo11")]
@@ -150,7 +150,7 @@ class Inference:
150
150
  if not isinstance(self.selected_ind, list): # Ensure selected_options is a list
151
151
  self.selected_ind = list(self.selected_ind)
152
152
 
153
- def inference(self):
153
+ def inference(self) -> None:
154
154
  """Perform real-time object detection inference on video or webcam feed."""
155
155
  self.web_ui() # Initialize the web interface
156
156
  self.sidebar() # Create the sidebar
@@ -1,5 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
+ from typing import Any
4
+
3
5
  import cv2
4
6
  import numpy as np
5
7
 
@@ -34,7 +36,7 @@ class TrackZone(BaseSolution):
34
36
  >>> cv2.imshow("Tracked Frame", results.plot_im)
35
37
  """
36
38
 
37
- def __init__(self, **kwargs):
39
+ def __init__(self, **kwargs: Any) -> None:
38
40
  """
39
41
  Initialize the TrackZone class for tracking objects within a defined region in video streams.
40
42
 
@@ -46,7 +48,7 @@ class TrackZone(BaseSolution):
46
48
  self.region = cv2.convexHull(np.array(self.region or default_region, dtype=np.int32))
47
49
  self.mask = None
48
50
 
49
- def process(self, im0):
51
+ def process(self, im0: np.ndarray) -> SolutionResults:
50
52
  """
51
53
  Process the input frame to track objects within a defined region.
52
54
 
@@ -1,5 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
+ from typing import Any
4
+
3
5
  from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
4
6
  from ultralytics.utils.plotting import colors
5
7
 
@@ -24,7 +26,7 @@ class VisionEye(BaseSolution):
24
26
  >>> print(f"Total detected instances: {results.total_tracks}")
25
27
  """
26
28
 
27
- def __init__(self, **kwargs):
29
+ def __init__(self, **kwargs: Any) -> None:
28
30
  """
29
31
  Initialize the VisionEye class for detecting objects and applying vision mapping.
30
32
 
@@ -35,7 +37,7 @@ class VisionEye(BaseSolution):
35
37
  # Set the vision point where the system will view objects and draw tracks
36
38
  self.vision_point = self.CFG["vision_point"]
37
39
 
38
- def process(self, im0):
40
+ def process(self, im0) -> SolutionResults:
39
41
  """
40
42
  Perform object detection, vision mapping, and annotation on the input image.
41
43