dgenerate-ultralytics-headless 8.3.218__py3-none-any.whl → 8.3.221__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. {dgenerate_ultralytics_headless-8.3.218.dist-info → dgenerate_ultralytics_headless-8.3.221.dist-info}/METADATA +1 -1
  2. {dgenerate_ultralytics_headless-8.3.218.dist-info → dgenerate_ultralytics_headless-8.3.221.dist-info}/RECORD +77 -77
  3. tests/__init__.py +5 -7
  4. tests/conftest.py +3 -7
  5. tests/test_cli.py +9 -2
  6. tests/test_engine.py +1 -1
  7. tests/test_exports.py +37 -9
  8. tests/test_integrations.py +4 -4
  9. tests/test_python.py +37 -44
  10. tests/test_solutions.py +154 -145
  11. ultralytics/__init__.py +1 -1
  12. ultralytics/cfg/__init__.py +7 -5
  13. ultralytics/cfg/default.yaml +1 -1
  14. ultralytics/data/__init__.py +4 -4
  15. ultralytics/data/augment.py +10 -10
  16. ultralytics/data/base.py +1 -1
  17. ultralytics/data/build.py +1 -1
  18. ultralytics/data/converter.py +3 -3
  19. ultralytics/data/dataset.py +3 -3
  20. ultralytics/data/loaders.py +2 -2
  21. ultralytics/data/utils.py +2 -2
  22. ultralytics/engine/exporter.py +73 -20
  23. ultralytics/engine/model.py +1 -1
  24. ultralytics/engine/predictor.py +1 -0
  25. ultralytics/engine/trainer.py +5 -3
  26. ultralytics/engine/tuner.py +4 -4
  27. ultralytics/hub/__init__.py +9 -7
  28. ultralytics/hub/utils.py +2 -2
  29. ultralytics/models/__init__.py +1 -1
  30. ultralytics/models/fastsam/__init__.py +1 -1
  31. ultralytics/models/fastsam/predict.py +10 -16
  32. ultralytics/models/nas/__init__.py +1 -1
  33. ultralytics/models/rtdetr/__init__.py +1 -1
  34. ultralytics/models/sam/__init__.py +1 -1
  35. ultralytics/models/sam/amg.py +2 -2
  36. ultralytics/models/sam/modules/blocks.py +1 -1
  37. ultralytics/models/sam/modules/transformer.py +1 -1
  38. ultralytics/models/sam/predict.py +1 -1
  39. ultralytics/models/yolo/__init__.py +1 -1
  40. ultralytics/models/yolo/pose/__init__.py +1 -1
  41. ultralytics/models/yolo/segment/val.py +1 -1
  42. ultralytics/models/yolo/yoloe/__init__.py +7 -7
  43. ultralytics/nn/__init__.py +7 -7
  44. ultralytics/nn/autobackend.py +32 -5
  45. ultralytics/nn/modules/__init__.py +60 -60
  46. ultralytics/nn/modules/block.py +26 -26
  47. ultralytics/nn/modules/conv.py +7 -7
  48. ultralytics/nn/modules/head.py +1 -1
  49. ultralytics/nn/modules/transformer.py +7 -7
  50. ultralytics/nn/modules/utils.py +1 -1
  51. ultralytics/nn/tasks.py +3 -3
  52. ultralytics/solutions/__init__.py +12 -12
  53. ultralytics/solutions/object_counter.py +3 -6
  54. ultralytics/solutions/queue_management.py +1 -1
  55. ultralytics/solutions/similarity_search.py +3 -3
  56. ultralytics/trackers/__init__.py +1 -1
  57. ultralytics/trackers/byte_tracker.py +2 -2
  58. ultralytics/trackers/utils/matching.py +1 -1
  59. ultralytics/utils/__init__.py +2 -2
  60. ultralytics/utils/benchmarks.py +4 -4
  61. ultralytics/utils/callbacks/comet.py +2 -2
  62. ultralytics/utils/checks.py +2 -2
  63. ultralytics/utils/downloads.py +2 -2
  64. ultralytics/utils/export/__init__.py +1 -1
  65. ultralytics/utils/files.py +1 -1
  66. ultralytics/utils/git.py +1 -1
  67. ultralytics/utils/logger.py +1 -1
  68. ultralytics/utils/metrics.py +13 -9
  69. ultralytics/utils/ops.py +8 -8
  70. ultralytics/utils/plotting.py +2 -1
  71. ultralytics/utils/torch_utils.py +5 -4
  72. ultralytics/utils/triton.py +2 -2
  73. ultralytics/utils/tuner.py +4 -2
  74. {dgenerate_ultralytics_headless-8.3.218.dist-info → dgenerate_ultralytics_headless-8.3.221.dist-info}/WHEEL +0 -0
  75. {dgenerate_ultralytics_headless-8.3.218.dist-info → dgenerate_ultralytics_headless-8.3.221.dist-info}/entry_points.txt +0 -0
  76. {dgenerate_ultralytics_headless-8.3.218.dist-info → dgenerate_ultralytics_headless-8.3.221.dist-info}/licenses/LICENSE +0 -0
  77. {dgenerate_ultralytics_headless-8.3.218.dist-info → dgenerate_ultralytics_headless-8.3.221.dist-info}/top_level.txt +0 -0
tests/test_python.py CHANGED
@@ -12,7 +12,7 @@ import pytest
12
12
  import torch
13
13
  from PIL import Image
14
14
 
15
- from tests import CFG, MODEL, MODELS, SOURCE, SOURCES_LIST, TASK_MODEL_DATA, TMP
15
+ from tests import CFG, MODEL, MODELS, SOURCE, SOURCES_LIST, TASK_MODEL_DATA
16
16
  from ultralytics import RTDETR, YOLO
17
17
  from ultralytics.cfg import TASK2DATA, TASKS
18
18
  from ultralytics.data.build import load_inference_source
@@ -33,14 +33,11 @@ from ultralytics.utils import (
33
33
  WINDOWS,
34
34
  YAML,
35
35
  checks,
36
- is_dir_writeable,
37
36
  is_github_action_running,
38
37
  )
39
38
  from ultralytics.utils.downloads import download
40
39
  from ultralytics.utils.torch_utils import TORCH_1_11, TORCH_1_13
41
40
 
42
- IS_TMP_WRITEABLE = is_dir_writeable(TMP) # WARNING: must be run once tests start as TMP does not exist on tests/init
43
-
44
41
 
45
42
  def test_model_forward():
46
43
  """Test the forward pass of the YOLO model."""
@@ -77,10 +74,9 @@ def test_model_profile():
77
74
  _ = model.predict(im, profile=True)
78
75
 
79
76
 
80
- @pytest.mark.skipif(not IS_TMP_WRITEABLE, reason="directory is not writeable")
81
- def test_predict_txt():
77
+ def test_predict_txt(tmp_path):
82
78
  """Test YOLO predictions with file, directory, and pattern sources listed in a text file."""
83
- file = TMP / "sources_multi_row.txt"
79
+ file = tmp_path / "sources_multi_row.txt"
84
80
  with open(file, "w") as f:
85
81
  for src in SOURCES_LIST:
86
82
  f.write(f"{src}\n")
@@ -89,10 +85,9 @@ def test_predict_txt():
89
85
 
90
86
 
91
87
  @pytest.mark.skipif(True, reason="disabled for testing")
92
- @pytest.mark.skipif(not IS_TMP_WRITEABLE, reason="directory is not writeable")
93
- def test_predict_csv_multi_row():
88
+ def test_predict_csv_multi_row(tmp_path):
94
89
  """Test YOLO predictions with sources listed in multiple rows of a CSV file."""
95
- file = TMP / "sources_multi_row.csv"
90
+ file = tmp_path / "sources_multi_row.csv"
96
91
  with open(file, "w", newline="") as f:
97
92
  writer = csv.writer(f)
98
93
  writer.writerow(["source"])
@@ -102,10 +97,9 @@ def test_predict_csv_multi_row():
102
97
 
103
98
 
104
99
  @pytest.mark.skipif(True, reason="disabled for testing")
105
- @pytest.mark.skipif(not IS_TMP_WRITEABLE, reason="directory is not writeable")
106
- def test_predict_csv_single_row():
100
+ def test_predict_csv_single_row(tmp_path):
107
101
  """Test YOLO predictions with sources listed in a single row of a CSV file."""
108
- file = TMP / "sources_single_row.csv"
102
+ file = tmp_path / "sources_single_row.csv"
109
103
  with open(file, "w", newline="") as f:
110
104
  writer = csv.writer(f)
111
105
  writer.writerow(SOURCES_LIST)
@@ -142,16 +136,14 @@ def test_predict_visualize(model):
142
136
  YOLO(WEIGHTS_DIR / model)(SOURCE, imgsz=32, visualize=True)
143
137
 
144
138
 
145
- def test_predict_grey_and_4ch():
139
+ def test_predict_grey_and_4ch(tmp_path):
146
140
  """Test YOLO prediction on SOURCE converted to greyscale and 4-channel images with various filenames."""
147
141
  im = Image.open(SOURCE)
148
- directory = TMP / "im4"
149
- directory.mkdir(parents=True, exist_ok=True)
150
142
 
151
- source_greyscale = directory / "greyscale.jpg"
152
- source_rgba = directory / "4ch.png"
153
- source_non_utf = directory / "non_UTF_测试文件_tést_image.jpg"
154
- source_spaces = directory / "image with spaces.jpg"
143
+ source_greyscale = tmp_path / "greyscale.jpg"
144
+ source_rgba = tmp_path / "4ch.png"
145
+ source_non_utf = tmp_path / "non_UTF_测试文件_tést_image.jpg"
146
+ source_spaces = tmp_path / "image with spaces.jpg"
155
147
 
156
148
  im.convert("L").save(source_greyscale) # greyscale
157
149
  im.convert("RGBA").save(source_rgba) # 4-ch PNG with alpha
@@ -181,9 +173,8 @@ def test_youtube():
181
173
 
182
174
 
183
175
  @pytest.mark.skipif(not ONLINE, reason="environment is offline")
184
- @pytest.mark.skipif(not IS_TMP_WRITEABLE, reason="directory is not writeable")
185
176
  @pytest.mark.parametrize("model", MODELS)
186
- def test_track_stream(model):
177
+ def test_track_stream(model, tmp_path):
187
178
  """
188
179
  Test streaming tracking on a short 10 frame video using ByteTrack tracker and different GMC methods.
189
180
 
@@ -199,7 +190,7 @@ def test_track_stream(model):
199
190
  # Test Global Motion Compensation (GMC) methods and ReID
200
191
  for gmc, reidm in zip(["orb", "sift", "ecc"], ["auto", "auto", "yolo11n-cls.pt"]):
201
192
  default_args = YAML.load(ROOT / "cfg/trackers/botsort.yaml")
202
- custom_yaml = TMP / f"botsort-{gmc}.yaml"
193
+ custom_yaml = tmp_path / f"botsort-{gmc}.yaml"
203
194
  YAML.save(custom_yaml, {**default_args, "gmc_method": gmc, "with_reid": True, "model": reidm})
204
195
  model.track(video_url, imgsz=160, tracker=custom_yaml)
205
196
 
@@ -278,7 +269,7 @@ def test_predict_callback_and_setup():
278
269
  model.add_callback("on_predict_batch_end", on_predict_batch_end)
279
270
 
280
271
  dataset = load_inference_source(source=SOURCE)
281
- bs = dataset.bs # noqa access predictor properties
272
+ bs = dataset.bs # access predictor properties
282
273
  results = model.predict(dataset, stream=True, imgsz=160) # source already setup
283
274
  for r, im0, bs in results:
284
275
  print("test_callback", im0.shape)
@@ -288,7 +279,7 @@ def test_predict_callback_and_setup():
288
279
 
289
280
 
290
281
  @pytest.mark.parametrize("model", MODELS)
291
- def test_results(model: str):
282
+ def test_results(model: str, tmp_path):
292
283
  """Test YOLO model results processing and output in various formats."""
293
284
  im = f"{ASSETS_URL}/boats.jpg" if model == "yolo11n-obb.pt" else SOURCE
294
285
  results = YOLO(WEIGHTS_DIR / model)([im, im], imgsz=160)
@@ -297,12 +288,12 @@ def test_results(model: str):
297
288
  r = r.cpu().numpy()
298
289
  print(r, len(r), r.path) # print numpy attributes
299
290
  r = r.to(device="cpu", dtype=torch.float32)
300
- r.save_txt(txt_file=TMP / "runs/tests/label.txt", save_conf=True)
301
- r.save_crop(save_dir=TMP / "runs/tests/crops/")
291
+ r.save_txt(txt_file=tmp_path / "runs/tests/label.txt", save_conf=True)
292
+ r.save_crop(save_dir=tmp_path / "runs/tests/crops/")
302
293
  r.to_df(decimals=3) # Align to_ methods: https://docs.ultralytics.com/modes/predict/#working-with-results
303
294
  r.to_csv()
304
295
  r.to_json(normalize=True)
305
- r.plot(pil=True, save=True, filename=TMP / "results_plot_save.jpg")
296
+ r.plot(pil=True, save=True, filename=tmp_path / "results_plot_save.jpg")
306
297
  r.plot(conf=True, boxes=True)
307
298
  print(r, len(r), r.path) # print after methods
308
299
 
@@ -332,7 +323,7 @@ def test_labels_and_crops():
332
323
 
333
324
 
334
325
  @pytest.mark.skipif(not ONLINE, reason="environment is offline")
335
- def test_data_utils():
326
+ def test_data_utils(tmp_path):
336
327
  """Test utility functions in ultralytics/data/utils.py, including dataset stats and auto-splitting."""
337
328
  from ultralytics.data.split import autosplit
338
329
  from ultralytics.data.utils import HUBDatasetStats
@@ -343,26 +334,28 @@ def test_data_utils():
343
334
 
344
335
  for task in TASKS:
345
336
  file = Path(TASK2DATA[task]).with_suffix(".zip") # i.e. coco8.zip
346
- download(f"https://github.com/ultralytics/hub/raw/main/example_datasets/{file}", unzip=False, dir=TMP)
347
- stats = HUBDatasetStats(TMP / file, task=task)
337
+ download(f"https://github.com/ultralytics/hub/raw/main/example_datasets/{file}", unzip=False, dir=tmp_path)
338
+ stats = HUBDatasetStats(tmp_path / file, task=task)
348
339
  stats.get_json(save=True)
349
340
  stats.process_images()
350
341
 
351
- autosplit(TMP / "coco8")
352
- zip_directory(TMP / "coco8/images/val") # zip
342
+ autosplit(tmp_path / "coco8")
343
+ zip_directory(tmp_path / "coco8/images/val") # zip
353
344
 
354
345
 
355
346
  @pytest.mark.skipif(not ONLINE, reason="environment is offline")
356
- def test_data_converter():
347
+ def test_data_converter(tmp_path):
357
348
  """Test dataset conversion functions from COCO to YOLO format and class mappings."""
358
349
  from ultralytics.data.converter import coco80_to_coco91_class, convert_coco
359
350
 
360
- download(f"{ASSETS_URL}/instances_val2017.json", dir=TMP)
361
- convert_coco(labels_dir=TMP, save_dir=TMP / "yolo_labels", use_segments=True, use_keypoints=False, cls91to80=True)
351
+ download(f"{ASSETS_URL}/instances_val2017.json", dir=tmp_path)
352
+ convert_coco(
353
+ labels_dir=tmp_path, save_dir=tmp_path / "yolo_labels", use_segments=True, use_keypoints=False, cls91to80=True
354
+ )
362
355
  coco80_to_coco91_class()
363
356
 
364
357
 
365
- def test_data_annotator():
358
+ def test_data_annotator(tmp_path):
366
359
  """Test automatic annotation of data using detection and segmentation models."""
367
360
  from ultralytics.data.annotator import auto_annotate
368
361
 
@@ -370,7 +363,7 @@ def test_data_annotator():
370
363
  ASSETS,
371
364
  det_model=WEIGHTS_DIR / "yolo11n.pt",
372
365
  sam_model=WEIGHTS_DIR / "mobile_sam.pt",
373
- output_dir=TMP / "auto_annotate_labels",
366
+ output_dir=tmp_path / "auto_annotate_labels",
374
367
  )
375
368
 
376
369
 
@@ -464,7 +457,7 @@ def test_utils_ops():
464
457
  torch.allclose(boxes, xyxyxyxy2xywhr(xywhr2xyxyxyxy(boxes)), rtol=1e-3)
465
458
 
466
459
 
467
- def test_utils_files():
460
+ def test_utils_files(tmp_path):
468
461
  """Test file handling utilities including file age, date, and paths with spaces."""
469
462
  from ultralytics.utils.files import file_age, file_date, get_latest_run, spaces_in_path
470
463
 
@@ -472,14 +465,14 @@ def test_utils_files():
472
465
  file_date(SOURCE)
473
466
  get_latest_run(ROOT / "runs")
474
467
 
475
- path = TMP / "path/with spaces"
468
+ path = tmp_path / "path/with spaces"
476
469
  path.mkdir(parents=True, exist_ok=True)
477
470
  with spaces_in_path(path) as new_path:
478
471
  print(new_path)
479
472
 
480
473
 
481
474
  @pytest.mark.slow
482
- def test_utils_patches_torch_save():
475
+ def test_utils_patches_torch_save(tmp_path):
483
476
  """Test torch_save backoff when _torch_save raises RuntimeError."""
484
477
  from unittest.mock import MagicMock, patch
485
478
 
@@ -489,7 +482,7 @@ def test_utils_patches_torch_save():
489
482
 
490
483
  with patch("ultralytics.utils.patches._torch_save", new=mock):
491
484
  with pytest.raises(RuntimeError):
492
- torch_save(torch.zeros(1), TMP / "test.pt")
485
+ torch_save(torch.zeros(1), tmp_path / "test.pt")
493
486
 
494
487
  assert mock.call_count == 4, "torch_save was not attempted the expected number of times"
495
488
 
@@ -722,11 +715,11 @@ def test_multichannel():
722
715
 
723
716
 
724
717
  @pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
725
- def test_grayscale(task: str, model: str, data: str) -> None:
718
+ def test_grayscale(task: str, model: str, data: str, tmp_path) -> None:
726
719
  """Test YOLO model grayscale training, validation, and prediction functionality."""
727
720
  if task == "classify": # not support grayscale classification yet
728
721
  return
729
- grayscale_data = Path(TMP) / f"{Path(data).stem}-grayscale.yaml"
722
+ grayscale_data = tmp_path / f"{Path(data).stem}-grayscale.yaml"
730
723
  data = check_det_dataset(data)
731
724
  data["channels"] = 1 # add additional channels key for grayscale
732
725
  YAML.save(grayscale_data, data)
tests/test_solutions.py CHANGED
@@ -10,7 +10,7 @@ import cv2
10
10
  import numpy as np
11
11
  import pytest
12
12
 
13
- from tests import MODEL, TMP
13
+ from tests import MODEL
14
14
  from ultralytics import solutions
15
15
  from ultralytics.utils import ASSETS_URL, IS_RASPBERRYPI, TORCH_VERSION, checks
16
16
  from ultralytics.utils.downloads import safe_download
@@ -29,139 +29,6 @@ REGION = [(10, 200), (540, 200), (540, 180), (10, 180)] # for object counting,
29
29
  HORIZONTAL_LINE = [(10, 200), (540, 200)] # for object counting
30
30
  VERTICAL_LINE = [(320, 0), (320, 400)] # for object counting
31
31
 
32
- # Test configs for each solution : (name, class, needs_frame_count, video, kwargs)
33
- SOLUTIONS = [
34
- (
35
- "ObjectCounter",
36
- solutions.ObjectCounter,
37
- False,
38
- DEMO_VIDEO,
39
- {"region": REGION, "model": MODEL, "show": SHOW},
40
- ),
41
- (
42
- "ObjectCounter",
43
- solutions.ObjectCounter,
44
- False,
45
- DEMO_VIDEO,
46
- {"region": HORIZONTAL_LINE, "model": MODEL, "show": SHOW},
47
- ),
48
- (
49
- "ObjectCounterVertical",
50
- solutions.ObjectCounter,
51
- False,
52
- DEMO_VIDEO,
53
- {"region": VERTICAL_LINE, "model": MODEL, "show": SHOW},
54
- ),
55
- (
56
- "ObjectCounterwithOBB",
57
- solutions.ObjectCounter,
58
- False,
59
- DEMO_VIDEO,
60
- {"region": REGION, "model": "yolo11n-obb.pt", "show": SHOW},
61
- ),
62
- (
63
- "Heatmap",
64
- solutions.Heatmap,
65
- False,
66
- DEMO_VIDEO,
67
- {"colormap": cv2.COLORMAP_PARULA, "model": MODEL, "show": SHOW, "region": None},
68
- ),
69
- (
70
- "HeatmapWithRegion",
71
- solutions.Heatmap,
72
- False,
73
- DEMO_VIDEO,
74
- {"colormap": cv2.COLORMAP_PARULA, "region": REGION, "model": MODEL, "show": SHOW},
75
- ),
76
- (
77
- "SpeedEstimator",
78
- solutions.SpeedEstimator,
79
- False,
80
- DEMO_VIDEO,
81
- {"region": REGION, "model": MODEL, "show": SHOW},
82
- ),
83
- (
84
- "QueueManager",
85
- solutions.QueueManager,
86
- False,
87
- DEMO_VIDEO,
88
- {"region": REGION, "model": MODEL, "show": SHOW},
89
- ),
90
- (
91
- "LineAnalytics",
92
- solutions.Analytics,
93
- True,
94
- DEMO_VIDEO,
95
- {"analytics_type": "line", "model": MODEL, "show": SHOW, "figsize": (6.4, 3.2)},
96
- ),
97
- (
98
- "PieAnalytics",
99
- solutions.Analytics,
100
- True,
101
- DEMO_VIDEO,
102
- {"analytics_type": "pie", "model": MODEL, "show": SHOW, "figsize": (6.4, 3.2)},
103
- ),
104
- (
105
- "BarAnalytics",
106
- solutions.Analytics,
107
- True,
108
- DEMO_VIDEO,
109
- {"analytics_type": "bar", "model": MODEL, "show": SHOW, "figsize": (6.4, 3.2)},
110
- ),
111
- (
112
- "AreaAnalytics",
113
- solutions.Analytics,
114
- True,
115
- DEMO_VIDEO,
116
- {"analytics_type": "area", "model": MODEL, "show": SHOW, "figsize": (6.4, 3.2)},
117
- ),
118
- ("TrackZone", solutions.TrackZone, False, DEMO_VIDEO, {"region": REGION, "model": MODEL, "show": SHOW}),
119
- (
120
- "ObjectCropper",
121
- solutions.ObjectCropper,
122
- False,
123
- CROP_VIDEO,
124
- {"crop_dir": str(TMP / "cropped-detections"), "model": MODEL, "show": SHOW},
125
- ),
126
- (
127
- "ObjectBlurrer",
128
- solutions.ObjectBlurrer,
129
- False,
130
- DEMO_VIDEO,
131
- {"blur_ratio": 0.02, "model": MODEL, "show": SHOW},
132
- ),
133
- (
134
- "InstanceSegmentation",
135
- solutions.InstanceSegmentation,
136
- False,
137
- DEMO_VIDEO,
138
- {"model": "yolo11n-seg.pt", "show": SHOW},
139
- ),
140
- ("VisionEye", solutions.VisionEye, False, DEMO_VIDEO, {"model": MODEL, "show": SHOW}),
141
- (
142
- "RegionCounter",
143
- solutions.RegionCounter,
144
- False,
145
- DEMO_VIDEO,
146
- {"region": REGION, "model": MODEL, "show": SHOW},
147
- ),
148
- ("AIGym", solutions.AIGym, False, POSE_VIDEO, {"kpts": [6, 8, 10], "show": SHOW}),
149
- (
150
- "ParkingManager",
151
- solutions.ParkingManagement,
152
- False,
153
- PARKING_VIDEO,
154
- {"model": str(TMP / PARKING_MODEL), "show": SHOW, "json_file": str(TMP / PARKING_AREAS_JSON)},
155
- ),
156
- (
157
- "StreamlitInference",
158
- solutions.Inference,
159
- False,
160
- None, # streamlit application doesn't require video file
161
- {}, # streamlit application doesn't accept arguments
162
- ),
163
- ]
164
-
165
32
 
166
33
  def process_video(solution, video_path: str, needs_frame_count: bool = False):
167
34
  """Process video with solution, feeding frames and optional frame count to the solution instance."""
@@ -182,26 +49,168 @@ def process_video(solution, video_path: str, needs_frame_count: bool = False):
182
49
 
183
50
 
184
51
  @pytest.mark.skipif(IS_RASPBERRYPI, reason="Disabled for testing due to --slow test errors after YOLOE PR.")
185
- @pytest.mark.parametrize("name, solution_class, needs_frame_count, video, kwargs", SOLUTIONS)
186
- def test_solution(name, solution_class, needs_frame_count, video, kwargs):
52
+ @pytest.mark.parametrize(
53
+ "name, solution_class, needs_frame_count, video, kwargs",
54
+ [
55
+ (
56
+ "ObjectCounter",
57
+ solutions.ObjectCounter,
58
+ False,
59
+ DEMO_VIDEO,
60
+ {"region": REGION, "model": MODEL, "show": SHOW},
61
+ ),
62
+ (
63
+ "ObjectCounter",
64
+ solutions.ObjectCounter,
65
+ False,
66
+ DEMO_VIDEO,
67
+ {"region": HORIZONTAL_LINE, "model": MODEL, "show": SHOW},
68
+ ),
69
+ (
70
+ "ObjectCounterVertical",
71
+ solutions.ObjectCounter,
72
+ False,
73
+ DEMO_VIDEO,
74
+ {"region": VERTICAL_LINE, "model": MODEL, "show": SHOW},
75
+ ),
76
+ (
77
+ "ObjectCounterwithOBB",
78
+ solutions.ObjectCounter,
79
+ False,
80
+ DEMO_VIDEO,
81
+ {"region": REGION, "model": "yolo11n-obb.pt", "show": SHOW},
82
+ ),
83
+ (
84
+ "Heatmap",
85
+ solutions.Heatmap,
86
+ False,
87
+ DEMO_VIDEO,
88
+ {"colormap": cv2.COLORMAP_PARULA, "model": MODEL, "show": SHOW, "region": None},
89
+ ),
90
+ (
91
+ "HeatmapWithRegion",
92
+ solutions.Heatmap,
93
+ False,
94
+ DEMO_VIDEO,
95
+ {"colormap": cv2.COLORMAP_PARULA, "region": REGION, "model": MODEL, "show": SHOW},
96
+ ),
97
+ (
98
+ "SpeedEstimator",
99
+ solutions.SpeedEstimator,
100
+ False,
101
+ DEMO_VIDEO,
102
+ {"region": REGION, "model": MODEL, "show": SHOW},
103
+ ),
104
+ (
105
+ "QueueManager",
106
+ solutions.QueueManager,
107
+ False,
108
+ DEMO_VIDEO,
109
+ {"region": REGION, "model": MODEL, "show": SHOW},
110
+ ),
111
+ (
112
+ "LineAnalytics",
113
+ solutions.Analytics,
114
+ True,
115
+ DEMO_VIDEO,
116
+ {"analytics_type": "line", "model": MODEL, "show": SHOW, "figsize": (6.4, 3.2)},
117
+ ),
118
+ (
119
+ "PieAnalytics",
120
+ solutions.Analytics,
121
+ True,
122
+ DEMO_VIDEO,
123
+ {"analytics_type": "pie", "model": MODEL, "show": SHOW, "figsize": (6.4, 3.2)},
124
+ ),
125
+ (
126
+ "BarAnalytics",
127
+ solutions.Analytics,
128
+ True,
129
+ DEMO_VIDEO,
130
+ {"analytics_type": "bar", "model": MODEL, "show": SHOW, "figsize": (6.4, 3.2)},
131
+ ),
132
+ (
133
+ "AreaAnalytics",
134
+ solutions.Analytics,
135
+ True,
136
+ DEMO_VIDEO,
137
+ {"analytics_type": "area", "model": MODEL, "show": SHOW, "figsize": (6.4, 3.2)},
138
+ ),
139
+ ("TrackZone", solutions.TrackZone, False, DEMO_VIDEO, {"region": REGION, "model": MODEL, "show": SHOW}),
140
+ (
141
+ "ObjectCropper",
142
+ solutions.ObjectCropper,
143
+ False,
144
+ CROP_VIDEO,
145
+ {"temp_crop_dir": "cropped-detections", "model": MODEL, "show": SHOW},
146
+ ),
147
+ (
148
+ "ObjectBlurrer",
149
+ solutions.ObjectBlurrer,
150
+ False,
151
+ DEMO_VIDEO,
152
+ {"blur_ratio": 0.02, "model": MODEL, "show": SHOW},
153
+ ),
154
+ (
155
+ "InstanceSegmentation",
156
+ solutions.InstanceSegmentation,
157
+ False,
158
+ DEMO_VIDEO,
159
+ {"model": "yolo11n-seg.pt", "show": SHOW},
160
+ ),
161
+ ("VisionEye", solutions.VisionEye, False, DEMO_VIDEO, {"model": MODEL, "show": SHOW}),
162
+ (
163
+ "RegionCounter",
164
+ solutions.RegionCounter,
165
+ False,
166
+ DEMO_VIDEO,
167
+ {"region": REGION, "model": MODEL, "show": SHOW},
168
+ ),
169
+ ("AIGym", solutions.AIGym, False, POSE_VIDEO, {"kpts": [6, 8, 10], "show": SHOW}),
170
+ (
171
+ "ParkingManager",
172
+ solutions.ParkingManagement,
173
+ False,
174
+ PARKING_VIDEO,
175
+ {"temp_model": str(PARKING_MODEL), "show": SHOW, "temp_json_file": str(PARKING_AREAS_JSON)},
176
+ ),
177
+ (
178
+ "StreamlitInference",
179
+ solutions.Inference,
180
+ False,
181
+ None, # streamlit application doesn't require video file
182
+ {}, # streamlit application doesn't accept arguments
183
+ ),
184
+ ],
185
+ )
186
+ def test_solution(name, solution_class, needs_frame_count, video, kwargs, tmp_path):
187
187
  """Test individual Ultralytics solution with video processing and parameter validation."""
188
188
  if video:
189
189
  if name != "ObjectCounterVertical":
190
- safe_download(url=f"{ASSETS_URL}/{video}", dir=TMP)
190
+ safe_download(url=f"{ASSETS_URL}/{video}", dir=tmp_path)
191
191
  else:
192
- safe_download(url=f"{ASSETS_URL}/{VERTICAL_VIDEO}", dir=TMP)
192
+ safe_download(url=f"{ASSETS_URL}/{VERTICAL_VIDEO}", dir=tmp_path)
193
193
  if name == "ParkingManager":
194
- safe_download(url=f"{ASSETS_URL}/{PARKING_AREAS_JSON}", dir=TMP)
195
- safe_download(url=f"{ASSETS_URL}/{PARKING_MODEL}", dir=TMP)
194
+ safe_download(url=f"{ASSETS_URL}/{PARKING_AREAS_JSON}", dir=tmp_path)
195
+ safe_download(url=f"{ASSETS_URL}/{PARKING_MODEL}", dir=tmp_path)
196
+
196
197
  elif name == "StreamlitInference":
197
198
  if checks.check_imshow(): # do not merge with elif above
198
199
  solution_class(**kwargs).inference() # requires interactive GUI environment
199
200
  return
200
201
 
202
+ # Update kwargs to use tmp_path
203
+ kwargs_updated = {}
204
+ for key in kwargs:
205
+ if key.startswith("temp_"):
206
+ kwargs_updated[key.replace("temp_", "")] = str(tmp_path / kwargs[key])
207
+ else:
208
+ kwargs_updated[key] = kwargs[key]
209
+
201
210
  video = VERTICAL_VIDEO if name == "ObjectCounterVertical" else video
202
211
  process_video(
203
- solution=solution_class(**kwargs),
204
- video_path=str(TMP / video),
212
+ solution=solution_class(**kwargs_updated),
213
+ video_path=str(tmp_path / video),
205
214
  needs_frame_count=needs_frame_count,
206
215
  )
207
216
 
@@ -291,10 +300,10 @@ def test_streamlit_handle_video_upload_creates_file():
291
300
 
292
301
  @pytest.mark.skipif(not TORCH_2_4, reason=f"VisualAISearch requires torch>=2.4 (found torch=={TORCH_VERSION})")
293
302
  @pytest.mark.skipif(IS_RASPBERRYPI, reason="Disabled due to slow performance on Raspberry Pi.")
294
- def test_similarity_search():
303
+ def test_similarity_search(tmp_path):
295
304
  """Test similarity search solution with sample images and text query."""
296
- safe_download(f"{ASSETS_URL}/4-imgs-similaritysearch.zip", dir=TMP) # 4 dog images for testing in a zip file
297
- searcher = solutions.VisualAISearch(data=str(TMP / "4-imgs-similaritysearch"))
305
+ safe_download(f"{ASSETS_URL}/4-imgs-similaritysearch.zip", dir=tmp_path) # 4 dog images for testing in a zip file
306
+ searcher = solutions.VisualAISearch(data=str(tmp_path / "4-imgs-similaritysearch"))
298
307
  _ = searcher("a dog sitting on a bench") # Returns the results in format "- img name | similarity score"
299
308
 
300
309
 
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.218"
3
+ __version__ = "8.3.221"
4
4
 
5
5
  import importlib
6
6
  import os
@@ -77,7 +77,7 @@ TASK2METRIC = {
77
77
 
78
78
  ARGV = sys.argv or ["", ""] # sometimes sys.argv = []
79
79
  SOLUTIONS_HELP_MSG = f"""
80
- Arguments received: {str(["yolo"] + ARGV[1:])}. Ultralytics 'yolo solutions' usage overview:
80
+ Arguments received: {["yolo", *ARGV[1:]]!s}. Ultralytics 'yolo solutions' usage overview:
81
81
 
82
82
  yolo solutions SOLUTION ARGS
83
83
 
@@ -107,7 +107,7 @@ SOLUTIONS_HELP_MSG = f"""
107
107
  yolo streamlit-predict
108
108
  """
109
109
  CLI_HELP_MSG = f"""
110
- Arguments received: {str(["yolo"] + ARGV[1:])}. Ultralytics 'yolo' commands use the following syntax:
110
+ Arguments received: {["yolo", *ARGV[1:]]!s}. Ultralytics 'yolo' commands use the following syntax:
111
111
 
112
112
  yolo TASK MODE ARGS
113
113
 
@@ -276,7 +276,9 @@ def cfg2dict(cfg: str | Path | dict | SimpleNamespace) -> dict:
276
276
  return cfg
277
277
 
278
278
 
279
- def get_cfg(cfg: str | Path | dict | SimpleNamespace = DEFAULT_CFG_DICT, overrides: dict = None) -> SimpleNamespace:
279
+ def get_cfg(
280
+ cfg: str | Path | dict | SimpleNamespace = DEFAULT_CFG_DICT, overrides: dict | None = None
281
+ ) -> SimpleNamespace:
280
282
  """
281
283
  Load and merge configuration data from a file or dictionary, with optional overrides.
282
284
 
@@ -386,7 +388,7 @@ def check_cfg(cfg: dict, hard: bool = True) -> None:
386
388
  cfg[k] = bool(v)
387
389
 
388
390
 
389
- def get_save_dir(args: SimpleNamespace, name: str = None) -> Path:
391
+ def get_save_dir(args: SimpleNamespace, name: str | None = None) -> Path:
390
392
  """
391
393
  Return the directory path for saving outputs, derived from arguments or default settings.
392
394
 
@@ -462,7 +464,7 @@ def _handle_deprecation(custom: dict) -> dict:
462
464
  return custom
463
465
 
464
466
 
465
- def check_dict_alignment(base: dict, custom: dict, e: Exception = None) -> None:
467
+ def check_dict_alignment(base: dict, custom: dict, e: Exception | None = None) -> None:
466
468
  """
467
469
  Check alignment between custom and base configuration dictionaries, handling deprecated keys and providing error
468
470
  messages for mismatched keys.
@@ -80,7 +80,7 @@ show_boxes: True # (bool) draw bounding boxes on images
80
80
  line_width: # (int, optional) line width of boxes; auto-scales with image size if not set
81
81
 
82
82
  # Export settings ------------------------------------------------------------------------------------------------------
83
- format: torchscript # (str) target format, e.g. torchscript|onnx|openvino|engine|coreml|saved_model|pb|tflite|edgetpu|tfjs|paddle|mnn|ncnn|imx|rknn
83
+ format: torchscript # (str) target format, e.g. torchscript|onnx|openvino|engine|coreml|saved_model|pb|tflite|edgetpu|tfjs|paddle|mnn|ncnn|imx|rknn|executorch
84
84
  keras: False # (bool) TF SavedModel only (format=saved_model); enable Keras layers during export
85
85
  optimize: False # (bool) TorchScript only; apply mobile optimizations to the scripted model
86
86
  int8: False # (bool) INT8/PTQ where supported (openvino, tflite, tfjs, engine, imx); needs calibration data/fraction
@@ -14,13 +14,13 @@ from .dataset import (
14
14
  __all__ = (
15
15
  "BaseDataset",
16
16
  "ClassificationDataset",
17
+ "GroundingDataset",
17
18
  "SemanticDataset",
19
+ "YOLOConcatDataset",
18
20
  "YOLODataset",
19
21
  "YOLOMultiModalDataset",
20
- "YOLOConcatDataset",
21
- "GroundingDataset",
22
- "build_yolo_dataset",
23
- "build_grounding",
24
22
  "build_dataloader",
23
+ "build_grounding",
24
+ "build_yolo_dataset",
25
25
  "load_inference_source",
26
26
  )