ultralytics-opencv-headless 8.3.246__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (298) hide show
  1. tests/__init__.py +23 -0
  2. tests/conftest.py +59 -0
  3. tests/test_cli.py +131 -0
  4. tests/test_cuda.py +216 -0
  5. tests/test_engine.py +157 -0
  6. tests/test_exports.py +309 -0
  7. tests/test_integrations.py +151 -0
  8. tests/test_python.py +777 -0
  9. tests/test_solutions.py +371 -0
  10. ultralytics/__init__.py +48 -0
  11. ultralytics/assets/bus.jpg +0 -0
  12. ultralytics/assets/zidane.jpg +0 -0
  13. ultralytics/cfg/__init__.py +1026 -0
  14. ultralytics/cfg/datasets/Argoverse.yaml +78 -0
  15. ultralytics/cfg/datasets/DOTAv1.5.yaml +37 -0
  16. ultralytics/cfg/datasets/DOTAv1.yaml +36 -0
  17. ultralytics/cfg/datasets/GlobalWheat2020.yaml +68 -0
  18. ultralytics/cfg/datasets/HomeObjects-3K.yaml +32 -0
  19. ultralytics/cfg/datasets/ImageNet.yaml +2025 -0
  20. ultralytics/cfg/datasets/Objects365.yaml +447 -0
  21. ultralytics/cfg/datasets/SKU-110K.yaml +58 -0
  22. ultralytics/cfg/datasets/VOC.yaml +102 -0
  23. ultralytics/cfg/datasets/VisDrone.yaml +87 -0
  24. ultralytics/cfg/datasets/african-wildlife.yaml +25 -0
  25. ultralytics/cfg/datasets/brain-tumor.yaml +22 -0
  26. ultralytics/cfg/datasets/carparts-seg.yaml +44 -0
  27. ultralytics/cfg/datasets/coco-pose.yaml +64 -0
  28. ultralytics/cfg/datasets/coco.yaml +118 -0
  29. ultralytics/cfg/datasets/coco128-seg.yaml +101 -0
  30. ultralytics/cfg/datasets/coco128.yaml +101 -0
  31. ultralytics/cfg/datasets/coco8-grayscale.yaml +103 -0
  32. ultralytics/cfg/datasets/coco8-multispectral.yaml +104 -0
  33. ultralytics/cfg/datasets/coco8-pose.yaml +47 -0
  34. ultralytics/cfg/datasets/coco8-seg.yaml +101 -0
  35. ultralytics/cfg/datasets/coco8.yaml +101 -0
  36. ultralytics/cfg/datasets/construction-ppe.yaml +32 -0
  37. ultralytics/cfg/datasets/crack-seg.yaml +22 -0
  38. ultralytics/cfg/datasets/dog-pose.yaml +52 -0
  39. ultralytics/cfg/datasets/dota8-multispectral.yaml +38 -0
  40. ultralytics/cfg/datasets/dota8.yaml +35 -0
  41. ultralytics/cfg/datasets/hand-keypoints.yaml +50 -0
  42. ultralytics/cfg/datasets/kitti.yaml +27 -0
  43. ultralytics/cfg/datasets/lvis.yaml +1240 -0
  44. ultralytics/cfg/datasets/medical-pills.yaml +21 -0
  45. ultralytics/cfg/datasets/open-images-v7.yaml +663 -0
  46. ultralytics/cfg/datasets/package-seg.yaml +22 -0
  47. ultralytics/cfg/datasets/signature.yaml +21 -0
  48. ultralytics/cfg/datasets/tiger-pose.yaml +41 -0
  49. ultralytics/cfg/datasets/xView.yaml +155 -0
  50. ultralytics/cfg/default.yaml +130 -0
  51. ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml +17 -0
  52. ultralytics/cfg/models/11/yolo11-cls.yaml +33 -0
  53. ultralytics/cfg/models/11/yolo11-obb.yaml +50 -0
  54. ultralytics/cfg/models/11/yolo11-pose.yaml +51 -0
  55. ultralytics/cfg/models/11/yolo11-seg.yaml +50 -0
  56. ultralytics/cfg/models/11/yolo11.yaml +50 -0
  57. ultralytics/cfg/models/11/yoloe-11-seg.yaml +48 -0
  58. ultralytics/cfg/models/11/yoloe-11.yaml +48 -0
  59. ultralytics/cfg/models/12/yolo12-cls.yaml +32 -0
  60. ultralytics/cfg/models/12/yolo12-obb.yaml +48 -0
  61. ultralytics/cfg/models/12/yolo12-pose.yaml +49 -0
  62. ultralytics/cfg/models/12/yolo12-seg.yaml +48 -0
  63. ultralytics/cfg/models/12/yolo12.yaml +48 -0
  64. ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +53 -0
  65. ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +45 -0
  66. ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +45 -0
  67. ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +57 -0
  68. ultralytics/cfg/models/v10/yolov10b.yaml +45 -0
  69. ultralytics/cfg/models/v10/yolov10l.yaml +45 -0
  70. ultralytics/cfg/models/v10/yolov10m.yaml +45 -0
  71. ultralytics/cfg/models/v10/yolov10n.yaml +45 -0
  72. ultralytics/cfg/models/v10/yolov10s.yaml +45 -0
  73. ultralytics/cfg/models/v10/yolov10x.yaml +45 -0
  74. ultralytics/cfg/models/v3/yolov3-spp.yaml +49 -0
  75. ultralytics/cfg/models/v3/yolov3-tiny.yaml +40 -0
  76. ultralytics/cfg/models/v3/yolov3.yaml +49 -0
  77. ultralytics/cfg/models/v5/yolov5-p6.yaml +62 -0
  78. ultralytics/cfg/models/v5/yolov5.yaml +51 -0
  79. ultralytics/cfg/models/v6/yolov6.yaml +56 -0
  80. ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +48 -0
  81. ultralytics/cfg/models/v8/yoloe-v8.yaml +48 -0
  82. ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +28 -0
  83. ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +28 -0
  84. ultralytics/cfg/models/v8/yolov8-cls.yaml +32 -0
  85. ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +58 -0
  86. ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +60 -0
  87. ultralytics/cfg/models/v8/yolov8-ghost.yaml +50 -0
  88. ultralytics/cfg/models/v8/yolov8-obb.yaml +49 -0
  89. ultralytics/cfg/models/v8/yolov8-p2.yaml +57 -0
  90. ultralytics/cfg/models/v8/yolov8-p6.yaml +59 -0
  91. ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +60 -0
  92. ultralytics/cfg/models/v8/yolov8-pose.yaml +50 -0
  93. ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +49 -0
  94. ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +59 -0
  95. ultralytics/cfg/models/v8/yolov8-seg.yaml +49 -0
  96. ultralytics/cfg/models/v8/yolov8-world.yaml +51 -0
  97. ultralytics/cfg/models/v8/yolov8-worldv2.yaml +49 -0
  98. ultralytics/cfg/models/v8/yolov8.yaml +49 -0
  99. ultralytics/cfg/models/v9/yolov9c-seg.yaml +41 -0
  100. ultralytics/cfg/models/v9/yolov9c.yaml +41 -0
  101. ultralytics/cfg/models/v9/yolov9e-seg.yaml +64 -0
  102. ultralytics/cfg/models/v9/yolov9e.yaml +64 -0
  103. ultralytics/cfg/models/v9/yolov9m.yaml +41 -0
  104. ultralytics/cfg/models/v9/yolov9s.yaml +41 -0
  105. ultralytics/cfg/models/v9/yolov9t.yaml +41 -0
  106. ultralytics/cfg/trackers/botsort.yaml +21 -0
  107. ultralytics/cfg/trackers/bytetrack.yaml +12 -0
  108. ultralytics/data/__init__.py +26 -0
  109. ultralytics/data/annotator.py +66 -0
  110. ultralytics/data/augment.py +2801 -0
  111. ultralytics/data/base.py +435 -0
  112. ultralytics/data/build.py +437 -0
  113. ultralytics/data/converter.py +855 -0
  114. ultralytics/data/dataset.py +834 -0
  115. ultralytics/data/loaders.py +704 -0
  116. ultralytics/data/scripts/download_weights.sh +18 -0
  117. ultralytics/data/scripts/get_coco.sh +61 -0
  118. ultralytics/data/scripts/get_coco128.sh +18 -0
  119. ultralytics/data/scripts/get_imagenet.sh +52 -0
  120. ultralytics/data/split.py +138 -0
  121. ultralytics/data/split_dota.py +344 -0
  122. ultralytics/data/utils.py +798 -0
  123. ultralytics/engine/__init__.py +1 -0
  124. ultralytics/engine/exporter.py +1578 -0
  125. ultralytics/engine/model.py +1124 -0
  126. ultralytics/engine/predictor.py +508 -0
  127. ultralytics/engine/results.py +1522 -0
  128. ultralytics/engine/trainer.py +974 -0
  129. ultralytics/engine/tuner.py +448 -0
  130. ultralytics/engine/validator.py +384 -0
  131. ultralytics/hub/__init__.py +166 -0
  132. ultralytics/hub/auth.py +151 -0
  133. ultralytics/hub/google/__init__.py +174 -0
  134. ultralytics/hub/session.py +422 -0
  135. ultralytics/hub/utils.py +162 -0
  136. ultralytics/models/__init__.py +9 -0
  137. ultralytics/models/fastsam/__init__.py +7 -0
  138. ultralytics/models/fastsam/model.py +79 -0
  139. ultralytics/models/fastsam/predict.py +169 -0
  140. ultralytics/models/fastsam/utils.py +23 -0
  141. ultralytics/models/fastsam/val.py +38 -0
  142. ultralytics/models/nas/__init__.py +7 -0
  143. ultralytics/models/nas/model.py +98 -0
  144. ultralytics/models/nas/predict.py +56 -0
  145. ultralytics/models/nas/val.py +38 -0
  146. ultralytics/models/rtdetr/__init__.py +7 -0
  147. ultralytics/models/rtdetr/model.py +63 -0
  148. ultralytics/models/rtdetr/predict.py +88 -0
  149. ultralytics/models/rtdetr/train.py +89 -0
  150. ultralytics/models/rtdetr/val.py +216 -0
  151. ultralytics/models/sam/__init__.py +25 -0
  152. ultralytics/models/sam/amg.py +275 -0
  153. ultralytics/models/sam/build.py +365 -0
  154. ultralytics/models/sam/build_sam3.py +377 -0
  155. ultralytics/models/sam/model.py +169 -0
  156. ultralytics/models/sam/modules/__init__.py +1 -0
  157. ultralytics/models/sam/modules/blocks.py +1067 -0
  158. ultralytics/models/sam/modules/decoders.py +495 -0
  159. ultralytics/models/sam/modules/encoders.py +794 -0
  160. ultralytics/models/sam/modules/memory_attention.py +298 -0
  161. ultralytics/models/sam/modules/sam.py +1160 -0
  162. ultralytics/models/sam/modules/tiny_encoder.py +979 -0
  163. ultralytics/models/sam/modules/transformer.py +344 -0
  164. ultralytics/models/sam/modules/utils.py +512 -0
  165. ultralytics/models/sam/predict.py +3940 -0
  166. ultralytics/models/sam/sam3/__init__.py +3 -0
  167. ultralytics/models/sam/sam3/decoder.py +546 -0
  168. ultralytics/models/sam/sam3/encoder.py +529 -0
  169. ultralytics/models/sam/sam3/geometry_encoders.py +415 -0
  170. ultralytics/models/sam/sam3/maskformer_segmentation.py +286 -0
  171. ultralytics/models/sam/sam3/model_misc.py +199 -0
  172. ultralytics/models/sam/sam3/necks.py +129 -0
  173. ultralytics/models/sam/sam3/sam3_image.py +339 -0
  174. ultralytics/models/sam/sam3/text_encoder_ve.py +307 -0
  175. ultralytics/models/sam/sam3/vitdet.py +547 -0
  176. ultralytics/models/sam/sam3/vl_combiner.py +160 -0
  177. ultralytics/models/utils/__init__.py +1 -0
  178. ultralytics/models/utils/loss.py +466 -0
  179. ultralytics/models/utils/ops.py +315 -0
  180. ultralytics/models/yolo/__init__.py +7 -0
  181. ultralytics/models/yolo/classify/__init__.py +7 -0
  182. ultralytics/models/yolo/classify/predict.py +90 -0
  183. ultralytics/models/yolo/classify/train.py +202 -0
  184. ultralytics/models/yolo/classify/val.py +216 -0
  185. ultralytics/models/yolo/detect/__init__.py +7 -0
  186. ultralytics/models/yolo/detect/predict.py +122 -0
  187. ultralytics/models/yolo/detect/train.py +227 -0
  188. ultralytics/models/yolo/detect/val.py +507 -0
  189. ultralytics/models/yolo/model.py +430 -0
  190. ultralytics/models/yolo/obb/__init__.py +7 -0
  191. ultralytics/models/yolo/obb/predict.py +56 -0
  192. ultralytics/models/yolo/obb/train.py +79 -0
  193. ultralytics/models/yolo/obb/val.py +302 -0
  194. ultralytics/models/yolo/pose/__init__.py +7 -0
  195. ultralytics/models/yolo/pose/predict.py +65 -0
  196. ultralytics/models/yolo/pose/train.py +110 -0
  197. ultralytics/models/yolo/pose/val.py +248 -0
  198. ultralytics/models/yolo/segment/__init__.py +7 -0
  199. ultralytics/models/yolo/segment/predict.py +109 -0
  200. ultralytics/models/yolo/segment/train.py +69 -0
  201. ultralytics/models/yolo/segment/val.py +307 -0
  202. ultralytics/models/yolo/world/__init__.py +5 -0
  203. ultralytics/models/yolo/world/train.py +173 -0
  204. ultralytics/models/yolo/world/train_world.py +178 -0
  205. ultralytics/models/yolo/yoloe/__init__.py +22 -0
  206. ultralytics/models/yolo/yoloe/predict.py +162 -0
  207. ultralytics/models/yolo/yoloe/train.py +287 -0
  208. ultralytics/models/yolo/yoloe/train_seg.py +122 -0
  209. ultralytics/models/yolo/yoloe/val.py +206 -0
  210. ultralytics/nn/__init__.py +27 -0
  211. ultralytics/nn/autobackend.py +958 -0
  212. ultralytics/nn/modules/__init__.py +182 -0
  213. ultralytics/nn/modules/activation.py +54 -0
  214. ultralytics/nn/modules/block.py +1947 -0
  215. ultralytics/nn/modules/conv.py +669 -0
  216. ultralytics/nn/modules/head.py +1183 -0
  217. ultralytics/nn/modules/transformer.py +793 -0
  218. ultralytics/nn/modules/utils.py +159 -0
  219. ultralytics/nn/tasks.py +1768 -0
  220. ultralytics/nn/text_model.py +356 -0
  221. ultralytics/py.typed +1 -0
  222. ultralytics/solutions/__init__.py +41 -0
  223. ultralytics/solutions/ai_gym.py +108 -0
  224. ultralytics/solutions/analytics.py +264 -0
  225. ultralytics/solutions/config.py +107 -0
  226. ultralytics/solutions/distance_calculation.py +123 -0
  227. ultralytics/solutions/heatmap.py +125 -0
  228. ultralytics/solutions/instance_segmentation.py +86 -0
  229. ultralytics/solutions/object_blurrer.py +89 -0
  230. ultralytics/solutions/object_counter.py +190 -0
  231. ultralytics/solutions/object_cropper.py +87 -0
  232. ultralytics/solutions/parking_management.py +280 -0
  233. ultralytics/solutions/queue_management.py +93 -0
  234. ultralytics/solutions/region_counter.py +133 -0
  235. ultralytics/solutions/security_alarm.py +151 -0
  236. ultralytics/solutions/similarity_search.py +219 -0
  237. ultralytics/solutions/solutions.py +828 -0
  238. ultralytics/solutions/speed_estimation.py +114 -0
  239. ultralytics/solutions/streamlit_inference.py +260 -0
  240. ultralytics/solutions/templates/similarity-search.html +156 -0
  241. ultralytics/solutions/trackzone.py +88 -0
  242. ultralytics/solutions/vision_eye.py +67 -0
  243. ultralytics/trackers/__init__.py +7 -0
  244. ultralytics/trackers/basetrack.py +115 -0
  245. ultralytics/trackers/bot_sort.py +257 -0
  246. ultralytics/trackers/byte_tracker.py +469 -0
  247. ultralytics/trackers/track.py +116 -0
  248. ultralytics/trackers/utils/__init__.py +1 -0
  249. ultralytics/trackers/utils/gmc.py +339 -0
  250. ultralytics/trackers/utils/kalman_filter.py +482 -0
  251. ultralytics/trackers/utils/matching.py +154 -0
  252. ultralytics/utils/__init__.py +1450 -0
  253. ultralytics/utils/autobatch.py +118 -0
  254. ultralytics/utils/autodevice.py +205 -0
  255. ultralytics/utils/benchmarks.py +728 -0
  256. ultralytics/utils/callbacks/__init__.py +5 -0
  257. ultralytics/utils/callbacks/base.py +233 -0
  258. ultralytics/utils/callbacks/clearml.py +146 -0
  259. ultralytics/utils/callbacks/comet.py +625 -0
  260. ultralytics/utils/callbacks/dvc.py +197 -0
  261. ultralytics/utils/callbacks/hub.py +110 -0
  262. ultralytics/utils/callbacks/mlflow.py +134 -0
  263. ultralytics/utils/callbacks/neptune.py +126 -0
  264. ultralytics/utils/callbacks/platform.py +313 -0
  265. ultralytics/utils/callbacks/raytune.py +42 -0
  266. ultralytics/utils/callbacks/tensorboard.py +123 -0
  267. ultralytics/utils/callbacks/wb.py +188 -0
  268. ultralytics/utils/checks.py +1006 -0
  269. ultralytics/utils/cpu.py +85 -0
  270. ultralytics/utils/dist.py +123 -0
  271. ultralytics/utils/downloads.py +529 -0
  272. ultralytics/utils/errors.py +35 -0
  273. ultralytics/utils/events.py +113 -0
  274. ultralytics/utils/export/__init__.py +7 -0
  275. ultralytics/utils/export/engine.py +237 -0
  276. ultralytics/utils/export/imx.py +315 -0
  277. ultralytics/utils/export/tensorflow.py +231 -0
  278. ultralytics/utils/files.py +219 -0
  279. ultralytics/utils/git.py +137 -0
  280. ultralytics/utils/instance.py +484 -0
  281. ultralytics/utils/logger.py +501 -0
  282. ultralytics/utils/loss.py +849 -0
  283. ultralytics/utils/metrics.py +1563 -0
  284. ultralytics/utils/nms.py +337 -0
  285. ultralytics/utils/ops.py +664 -0
  286. ultralytics/utils/patches.py +201 -0
  287. ultralytics/utils/plotting.py +1045 -0
  288. ultralytics/utils/tal.py +403 -0
  289. ultralytics/utils/torch_utils.py +984 -0
  290. ultralytics/utils/tqdm.py +440 -0
  291. ultralytics/utils/triton.py +112 -0
  292. ultralytics/utils/tuner.py +160 -0
  293. ultralytics_opencv_headless-8.3.246.dist-info/METADATA +374 -0
  294. ultralytics_opencv_headless-8.3.246.dist-info/RECORD +298 -0
  295. ultralytics_opencv_headless-8.3.246.dist-info/WHEEL +5 -0
  296. ultralytics_opencv_headless-8.3.246.dist-info/entry_points.txt +3 -0
  297. ultralytics_opencv_headless-8.3.246.dist-info/licenses/LICENSE +661 -0
  298. ultralytics_opencv_headless-8.3.246.dist-info/top_level.txt +1 -0
tests/test_python.py ADDED
@@ -0,0 +1,777 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ import contextlib
4
+ import csv
5
+ import urllib
6
+ from copy import copy
7
+ from pathlib import Path
8
+
9
+ import cv2
10
+ import numpy as np
11
+ import pytest
12
+ import torch
13
+ from PIL import Image
14
+
15
+ from tests import CFG, MODEL, MODELS, SOURCE, SOURCES_LIST, TASK_MODEL_DATA
16
+ from ultralytics import RTDETR, YOLO
17
+ from ultralytics.cfg import TASK2DATA, TASKS
18
+ from ultralytics.data.build import load_inference_source
19
+ from ultralytics.data.utils import check_det_dataset
20
+ from ultralytics.utils import (
21
+ ARM64,
22
+ ASSETS,
23
+ ASSETS_URL,
24
+ DEFAULT_CFG,
25
+ DEFAULT_CFG_PATH,
26
+ IS_JETSON,
27
+ IS_RASPBERRYPI,
28
+ LINUX,
29
+ LOGGER,
30
+ ONLINE,
31
+ ROOT,
32
+ WEIGHTS_DIR,
33
+ WINDOWS,
34
+ YAML,
35
+ checks,
36
+ is_github_action_running,
37
+ )
38
+ from ultralytics.utils.downloads import download
39
+ from ultralytics.utils.torch_utils import TORCH_1_11, TORCH_1_13
40
+
41
+
42
+ def test_model_forward():
43
+ """Test the forward pass of the YOLO model."""
44
+ model = YOLO(CFG)
45
+ model(source=None, imgsz=32, augment=True) # also test no source and augment
46
+
47
+
48
+ def test_model_methods():
49
+ """Test various methods and properties of the YOLO model to ensure correct functionality."""
50
+ model = YOLO(MODEL)
51
+
52
+ # Model methods
53
+ model.info(verbose=True, detailed=True)
54
+ model = model.reset_weights()
55
+ model = model.load(MODEL)
56
+ model.to("cpu")
57
+ model.fuse()
58
+ model.clear_callback("on_train_start")
59
+ model.reset_callbacks()
60
+
61
+ # Model properties
62
+ _ = model.names
63
+ _ = model.device
64
+ _ = model.transforms
65
+ _ = model.task_map
66
+
67
+
68
+ def test_model_profile():
69
+ """Test profiling of the YOLO model with `profile=True` to assess performance and resource usage."""
70
+ from ultralytics.nn.tasks import DetectionModel
71
+
72
+ model = DetectionModel() # build model
73
+ im = torch.randn(1, 3, 64, 64) # requires min imgsz=64
74
+ _ = model.predict(im, profile=True)
75
+
76
+
77
+ def test_predict_txt(tmp_path):
78
+ """Test YOLO predictions with file, directory, and pattern sources listed in a text file."""
79
+ file = tmp_path / "sources_multi_row.txt"
80
+ with open(file, "w") as f:
81
+ for src in SOURCES_LIST:
82
+ f.write(f"{src}\n")
83
+ results = YOLO(MODEL)(source=file, imgsz=32)
84
+ assert len(results) == 7 # 1 + 2 + 2 + 2 = 7 images
85
+
86
+
87
+ @pytest.mark.skipif(True, reason="disabled for testing")
88
+ def test_predict_csv_multi_row(tmp_path):
89
+ """Test YOLO predictions with sources listed in multiple rows of a CSV file."""
90
+ file = tmp_path / "sources_multi_row.csv"
91
+ with open(file, "w", newline="") as f:
92
+ writer = csv.writer(f)
93
+ writer.writerow(["source"])
94
+ writer.writerows([[src] for src in SOURCES_LIST])
95
+ results = YOLO(MODEL)(source=file, imgsz=32)
96
+ assert len(results) == 7 # 1 + 2 + 2 + 2 = 7 images
97
+
98
+
99
+ @pytest.mark.skipif(True, reason="disabled for testing")
100
+ def test_predict_csv_single_row(tmp_path):
101
+ """Test YOLO predictions with sources listed in a single row of a CSV file."""
102
+ file = tmp_path / "sources_single_row.csv"
103
+ with open(file, "w", newline="") as f:
104
+ writer = csv.writer(f)
105
+ writer.writerow(SOURCES_LIST)
106
+ results = YOLO(MODEL)(source=file, imgsz=32)
107
+ assert len(results) == 7 # 1 + 2 + 2 + 2 = 7 images
108
+
109
+
110
+ @pytest.mark.parametrize("model_name", MODELS)
111
+ def test_predict_img(model_name):
112
+ """Test YOLO model predictions on various image input types and sources, including online images."""
113
+ channels = 1 if model_name == "yolo11n-grayscale.pt" else 3
114
+ model = YOLO(WEIGHTS_DIR / model_name)
115
+ im = cv2.imread(str(SOURCE), flags=cv2.IMREAD_GRAYSCALE if channels == 1 else cv2.IMREAD_COLOR) # uint8 NumPy array
116
+ assert len(model(source=Image.open(SOURCE), save=True, verbose=True, imgsz=32)) == 1 # PIL
117
+ assert len(model(source=im, save=True, save_txt=True, imgsz=32)) == 1 # ndarray
118
+ assert len(model(torch.rand((2, channels, 32, 32)), imgsz=32)) == 2 # batch-size 2 Tensor, FP32 0.0-1.0 RGB order
119
+ assert len(model(source=[im, im], save=True, save_txt=True, imgsz=32)) == 2 # batch
120
+ assert len(list(model(source=[im, im], save=True, stream=True, imgsz=32))) == 2 # stream
121
+ assert len(model(torch.zeros(320, 640, channels).numpy().astype(np.uint8), imgsz=32)) == 1 # tensor to numpy
122
+ batch = [
123
+ str(SOURCE), # filename
124
+ Path(SOURCE), # Path
125
+ f"{ASSETS_URL}/zidane.jpg?token=123" if ONLINE else SOURCE, # URI
126
+ im, # OpenCV
127
+ Image.open(SOURCE), # PIL
128
+ np.zeros((320, 640, channels), dtype=np.uint8), # numpy
129
+ ]
130
+ assert len(model(batch, imgsz=32, classes=0)) == len(batch) # multiple sources in a batch
131
+
132
+
133
+ @pytest.mark.parametrize("model", MODELS)
134
+ def test_predict_visualize(model):
135
+ """Test model prediction methods with 'visualize=True' to generate and display prediction visualizations."""
136
+ YOLO(WEIGHTS_DIR / model)(SOURCE, imgsz=32, visualize=True)
137
+
138
+
139
+ def test_predict_gray_and_4ch(tmp_path):
140
+ """Test YOLO prediction on SOURCE converted to grayscale and 4-channel images with various filenames."""
141
+ im = Image.open(SOURCE)
142
+
143
+ source_grayscale = tmp_path / "grayscale.jpg"
144
+ source_rgba = tmp_path / "4ch.png"
145
+ source_non_utf = tmp_path / "non_UTF_测试文件_tést_image.jpg"
146
+ source_spaces = tmp_path / "image with spaces.jpg"
147
+
148
+ im.convert("L").save(source_grayscale) # grayscale
149
+ im.convert("RGBA").save(source_rgba) # 4-ch PNG with alpha
150
+ im.save(source_non_utf) # non-UTF characters in filename
151
+ im.save(source_spaces) # spaces in filename
152
+
153
+ # Inference
154
+ model = YOLO(MODEL)
155
+ for f in source_rgba, source_grayscale, source_non_utf, source_spaces:
156
+ for source in Image.open(f), cv2.imread(str(f)), f:
157
+ results = model(source, save=True, verbose=True, imgsz=32)
158
+ assert len(results) == 1 # verify that an image was run
159
+ f.unlink() # cleanup
160
+
161
+
162
+ @pytest.mark.slow
163
+ @pytest.mark.skipif(not ONLINE, reason="environment is offline")
164
+ @pytest.mark.skipif(is_github_action_running(), reason="No auth https://github.com/JuanBindez/pytubefix/issues/166")
165
+ def test_youtube():
166
+ """Test YOLO model on a YouTube video stream, handling potential network-related errors."""
167
+ model = YOLO(MODEL)
168
+ try:
169
+ model.predict("https://youtu.be/G17sBkb38XQ", imgsz=96, save=True)
170
+ # Handle internet connection errors and 'urllib.error.HTTPError: HTTP Error 429: Too Many Requests'
171
+ except (urllib.error.HTTPError, ConnectionError) as e:
172
+ LOGGER.error(f"YouTube Test Error: {e}")
173
+
174
+
175
+ @pytest.mark.skipif(not ONLINE, reason="environment is offline")
176
+ @pytest.mark.parametrize("model", MODELS)
177
+ def test_track_stream(model, tmp_path):
178
+ """Test streaming tracking on a short 10 frame video using ByteTrack tracker and different GMC methods.
179
+
180
+ Note imgsz=160 required for tracking for higher confidence and better matches.
181
+ """
182
+ if model == "yolo11n-cls.pt": # classification model not supported for tracking
183
+ return
184
+ video_url = f"{ASSETS_URL}/decelera_portrait_min.mov"
185
+ model = YOLO(model)
186
+ model.track(video_url, imgsz=160, tracker="bytetrack.yaml")
187
+ model.track(video_url, imgsz=160, tracker="botsort.yaml", save_frames=True) # test frame saving also
188
+
189
+ # Test Global Motion Compensation (GMC) methods and ReID
190
+ for gmc, reidm in zip(["orb", "sift", "ecc"], ["auto", "auto", "yolo11n-cls.pt"]):
191
+ default_args = YAML.load(ROOT / "cfg/trackers/botsort.yaml")
192
+ custom_yaml = tmp_path / f"botsort-{gmc}.yaml"
193
+ YAML.save(custom_yaml, {**default_args, "gmc_method": gmc, "with_reid": True, "model": reidm})
194
+ model.track(video_url, imgsz=160, tracker=custom_yaml)
195
+
196
+
197
+ @pytest.mark.parametrize("task,weight,data", TASK_MODEL_DATA)
198
+ def test_val(task: str, weight: str, data: str) -> None:
199
+ """Test the validation mode of the YOLO model."""
200
+ model = YOLO(weight)
201
+ for plots in {True, False}: # Test both cases i.e. plots=True and plots=False
202
+ metrics = model.val(data=data, imgsz=32, plots=plots)
203
+ metrics.to_df()
204
+ metrics.to_csv()
205
+ metrics.to_json()
206
+ # Tests for confusion matrix export
207
+ metrics.confusion_matrix.to_df()
208
+ metrics.confusion_matrix.to_csv()
209
+ metrics.confusion_matrix.to_json()
210
+
211
+
212
+ @pytest.mark.skipif(IS_JETSON or IS_RASPBERRYPI, reason="Edge devices not intended for training")
213
+ def test_train_scratch():
214
+ """Test training the YOLO model from scratch using the provided configuration."""
215
+ model = YOLO(CFG)
216
+ model.train(data="coco8.yaml", epochs=2, imgsz=32, cache="disk", batch=-1, close_mosaic=1, name="model")
217
+ model(SOURCE)
218
+
219
+
220
+ @pytest.mark.skipif(not ONLINE, reason="environment is offline")
221
+ def test_train_ndjson():
222
+ """Test training the YOLO model using NDJSON format dataset."""
223
+ model = YOLO(WEIGHTS_DIR / "yolo11n.pt")
224
+ model.train(data=f"{ASSETS_URL}/coco8-ndjson.ndjson", epochs=1, imgsz=32)
225
+
226
+
227
+ @pytest.mark.parametrize("scls", [False, True])
228
+ def test_train_pretrained(scls):
229
+ """Test training of the YOLO model starting from a pre-trained checkpoint."""
230
+ model = YOLO(WEIGHTS_DIR / "yolo11n-seg.pt")
231
+ model.train(
232
+ data="coco8-seg.yaml", epochs=1, imgsz=32, cache="ram", copy_paste=0.5, mixup=0.5, name=0, single_cls=scls
233
+ )
234
+ model(SOURCE)
235
+
236
+
237
+ def test_all_model_yamls():
238
+ """Test YOLO model creation for all available YAML configurations in the `cfg/models` directory."""
239
+ for m in (ROOT / "cfg" / "models").rglob("*.yaml"):
240
+ if "rtdetr" in m.name:
241
+ if TORCH_1_11:
242
+ _ = RTDETR(m.name)(SOURCE, imgsz=640) # must be 640
243
+ else:
244
+ YOLO(m.name)
245
+
246
+
247
+ @pytest.mark.skipif(WINDOWS, reason="Windows slow CI export bug https://github.com/ultralytics/ultralytics/pull/16003")
248
+ def test_workflow():
249
+ """Test the complete workflow including training, validation, prediction, and exporting."""
250
+ model = YOLO(MODEL)
251
+ model.train(data="coco8.yaml", epochs=1, imgsz=32, optimizer="SGD")
252
+ model.val(imgsz=32)
253
+ model.predict(SOURCE, imgsz=32)
254
+ model.export(format="torchscript") # WARNING: Windows slow CI export bug
255
+
256
+
257
+ def test_predict_callback_and_setup():
258
+ """Test callback functionality during YOLO prediction setup and execution."""
259
+
260
+ def on_predict_batch_end(predictor):
261
+ """Callback function that handles operations at the end of a prediction batch."""
262
+ path, im0s, _ = predictor.batch
263
+ im0s = im0s if isinstance(im0s, list) else [im0s]
264
+ bs = [predictor.dataset.bs for _ in range(len(path))]
265
+ predictor.results = zip(predictor.results, im0s, bs) # results is list[batch_size]
266
+
267
+ model = YOLO(MODEL)
268
+ model.add_callback("on_predict_batch_end", on_predict_batch_end)
269
+
270
+ dataset = load_inference_source(source=SOURCE)
271
+ bs = dataset.bs # access predictor properties
272
+ results = model.predict(dataset, stream=True, imgsz=160) # source already setup
273
+ for r, im0, bs in results:
274
+ print("test_callback", im0.shape)
275
+ print("test_callback", bs)
276
+ boxes = r.boxes # Boxes object for bbox outputs
277
+ print(boxes)
278
+
279
+
280
+ @pytest.mark.parametrize("model", MODELS)
281
+ def test_results(model: str, tmp_path):
282
+ """Test YOLO model results processing and output in various formats."""
283
+ im = f"{ASSETS_URL}/boats.jpg" if model == "yolo11n-obb.pt" else SOURCE
284
+ results = YOLO(WEIGHTS_DIR / model)([im, im], imgsz=160)
285
+ for r in results:
286
+ assert len(r), f"'{model}' results should not be empty!"
287
+ r = r.cpu().numpy()
288
+ print(r, len(r), r.path) # print numpy attributes
289
+ r = r.to(device="cpu", dtype=torch.float32)
290
+ r.save_txt(txt_file=tmp_path / "runs/tests/label.txt", save_conf=True)
291
+ r.save_crop(save_dir=tmp_path / "runs/tests/crops/")
292
+ r.to_df(decimals=3) # Align to_ methods: https://docs.ultralytics.com/modes/predict/#working-with-results
293
+ r.to_csv()
294
+ r.to_json(normalize=True)
295
+ r.plot(pil=True, save=True, filename=tmp_path / "results_plot_save.jpg")
296
+ r.plot(conf=True, boxes=True)
297
+ print(r, len(r), r.path) # print after methods
298
+
299
+
300
+ def test_labels_and_crops():
301
+ """Test output from prediction args for saving YOLO detection labels and crops."""
302
+ imgs = [SOURCE, ASSETS / "zidane.jpg"]
303
+ results = YOLO(WEIGHTS_DIR / "yolo11n.pt")(imgs, imgsz=160, save_txt=True, save_crop=True)
304
+ save_path = Path(results[0].save_dir)
305
+ for r in results:
306
+ im_name = Path(r.path).stem
307
+ cls_idxs = r.boxes.cls.int().tolist()
308
+ # Check correct detections
309
+ assert cls_idxs == ([0, 7, 0, 0] if r.path.endswith("bus.jpg") else [0, 0, 0]) # bus.jpg and zidane.jpg classes
310
+ # Check label path
311
+ labels = save_path / f"labels/{im_name}.txt"
312
+ assert labels.exists()
313
+ # Check detections match label count
314
+ assert len(r.boxes.data) == len([line for line in labels.read_text().splitlines() if line])
315
+ # Check crops path and files
316
+ crop_dirs = list((save_path / "crops").iterdir())
317
+ crop_files = [f for p in crop_dirs for f in p.glob("*")]
318
+ # Crop directories match detections
319
+ assert all(r.names.get(c) in {d.name for d in crop_dirs} for c in cls_idxs)
320
+ # Same number of crops as detections
321
+ assert len([f for f in crop_files if im_name in f.name]) == len(r.boxes.data)
322
+
323
+
324
+ @pytest.mark.skipif(not ONLINE, reason="environment is offline")
325
+ def test_data_utils(tmp_path):
326
+ """Test utility functions in ultralytics/data/utils.py, including dataset stats and auto-splitting."""
327
+ from ultralytics.data.split import autosplit
328
+ from ultralytics.data.utils import HUBDatasetStats
329
+ from ultralytics.utils.downloads import zip_directory
330
+
331
+ # from ultralytics.utils.files import WorkingDirectory
332
+ # with WorkingDirectory(ROOT.parent / 'tests'):
333
+
334
+ for task in TASKS:
335
+ file = Path(TASK2DATA[task]).with_suffix(".zip") # i.e. coco8.zip
336
+ download(f"https://github.com/ultralytics/hub/raw/main/example_datasets/{file}", unzip=False, dir=tmp_path)
337
+ stats = HUBDatasetStats(tmp_path / file, task=task)
338
+ stats.get_json(save=True)
339
+ stats.process_images()
340
+
341
+ autosplit(tmp_path / "coco8")
342
+ zip_directory(tmp_path / "coco8/images/val") # zip
343
+
344
+
345
+ @pytest.mark.skipif(not ONLINE, reason="environment is offline")
346
+ def test_data_converter(tmp_path):
347
+ """Test dataset conversion functions from COCO to YOLO format and class mappings."""
348
+ from ultralytics.data.converter import coco80_to_coco91_class, convert_coco
349
+
350
+ download(f"{ASSETS_URL}/instances_val2017.json", dir=tmp_path)
351
+ convert_coco(
352
+ labels_dir=tmp_path, save_dir=tmp_path / "yolo_labels", use_segments=True, use_keypoints=False, cls91to80=True
353
+ )
354
+ coco80_to_coco91_class()
355
+
356
+
357
+ def test_data_annotator(tmp_path):
358
+ """Test automatic annotation of data using detection and segmentation models."""
359
+ from ultralytics.data.annotator import auto_annotate
360
+
361
+ auto_annotate(
362
+ ASSETS,
363
+ det_model=WEIGHTS_DIR / "yolo11n.pt",
364
+ sam_model=WEIGHTS_DIR / "mobile_sam.pt",
365
+ output_dir=tmp_path / "auto_annotate_labels",
366
+ )
367
+
368
+
369
+ def test_events():
370
+ """Test event sending functionality."""
371
+ from ultralytics.utils.events import Events
372
+
373
+ events = Events()
374
+ events.enabled = True
375
+ cfg = copy(DEFAULT_CFG) # does not require deepcopy
376
+ cfg.mode = "test"
377
+ events(cfg)
378
+
379
+
380
+ def test_cfg_init():
381
+ """Test configuration initialization utilities from the 'ultralytics.cfg' module."""
382
+ from ultralytics.cfg import check_dict_alignment, copy_default_cfg, smart_value
383
+
384
+ with contextlib.suppress(SyntaxError):
385
+ check_dict_alignment({"a": 1}, {"b": 2})
386
+ copy_default_cfg()
387
+ (Path.cwd() / DEFAULT_CFG_PATH.name.replace(".yaml", "_copy.yaml")).unlink(missing_ok=False)
388
+
389
+ # Test smart_value() with comprehensive cases
390
+ # Test None conversion
391
+ assert smart_value("none") is None
392
+ assert smart_value("None") is None
393
+ assert smart_value("NONE") is None
394
+
395
+ # Test boolean conversion
396
+ assert smart_value("true") is True
397
+ assert smart_value("True") is True
398
+ assert smart_value("TRUE") is True
399
+ assert smart_value("false") is False
400
+ assert smart_value("False") is False
401
+ assert smart_value("FALSE") is False
402
+
403
+ # Test numeric conversion (ast.literal_eval)
404
+ assert smart_value("42") == 42
405
+ assert smart_value("-42") == -42
406
+ assert smart_value("3.14") == 3.14
407
+ assert smart_value("-3.14") == -3.14
408
+ assert smart_value("1e-3") == 0.001
409
+
410
+ # Test list/tuple conversion (ast.literal_eval)
411
+ assert smart_value("[1, 2, 3]") == [1, 2, 3]
412
+ assert smart_value("(1, 2, 3)") == (1, 2, 3)
413
+ assert smart_value("[640, 640]") == [640, 640]
414
+
415
+ # Test dict conversion (ast.literal_eval)
416
+ assert smart_value("{'a': 1, 'b': 2}") == {"a": 1, "b": 2}
417
+
418
+ # Test string fallback (when ast.literal_eval fails)
419
+ assert smart_value("some_string") == "some_string"
420
+ assert smart_value("path/to/file") == "path/to/file"
421
+ assert smart_value("hello world") == "hello world"
422
+
423
+ # Test that code injection is prevented (ast.literal_eval safety)
424
+ # These should return strings, not execute code
425
+ assert smart_value("__import__('os').system('ls')") == "__import__('os').system('ls')"
426
+ assert smart_value("eval('1+1')") == "eval('1+1')"
427
+ assert smart_value("exec('x=1')") == "exec('x=1')"
428
+
429
+
430
+ def test_utils_init():
431
+ """Test initialization utilities in the Ultralytics library."""
432
+ from ultralytics.utils import get_ubuntu_version, is_github_action_running
433
+
434
+ get_ubuntu_version()
435
+ is_github_action_running()
436
+
437
+
438
+ def test_utils_checks():
439
+ """Test various utility checks for filenames, git status, requirements, image sizes, and versions."""
440
+ checks.check_yolov5u_filename("yolov5n.pt")
441
+ checks.check_requirements("numpy") # check requirements.txt
442
+ checks.check_imgsz([600, 600], max_dim=1)
443
+ checks.check_imshow(warn=True)
444
+ checks.check_version("ultralytics", "8.0.0")
445
+ checks.print_args()
446
+
447
+
448
+ @pytest.mark.skipif(WINDOWS, reason="Windows profiling is extremely slow (cause unknown)")
449
+ def test_utils_benchmarks():
450
+ """Benchmark model performance using 'ProfileModels' from 'ultralytics.utils.benchmarks'."""
451
+ from ultralytics.utils.benchmarks import ProfileModels
452
+
453
+ ProfileModels(["yolo11n.yaml"], imgsz=32, min_time=1, num_timed_runs=3, num_warmup_runs=1).run()
454
+
455
+
456
+ def test_utils_torchutils():
457
+ """Test Torch utility functions including profiling and FLOP calculations."""
458
+ from ultralytics.nn.modules.conv import Conv
459
+ from ultralytics.utils.torch_utils import get_flops_with_torch_profiler, profile_ops, time_sync
460
+
461
+ x = torch.randn(1, 64, 20, 20)
462
+ m = Conv(64, 64, k=1, s=2)
463
+
464
+ profile_ops(x, [m], n=3)
465
+ get_flops_with_torch_profiler(m)
466
+ time_sync()
467
+
468
+
469
+ def test_utils_ops():
470
+ """Test utility operations for coordinate transformations and normalizations."""
471
+ from ultralytics.utils.ops import (
472
+ ltwh2xywh,
473
+ ltwh2xyxy,
474
+ make_divisible,
475
+ xywh2ltwh,
476
+ xywh2xyxy,
477
+ xywhn2xyxy,
478
+ xywhr2xyxyxyxy,
479
+ xyxy2ltwh,
480
+ xyxy2xywh,
481
+ xyxy2xywhn,
482
+ xyxyxyxy2xywhr,
483
+ )
484
+
485
+ make_divisible(17, torch.tensor([8]))
486
+
487
+ boxes = torch.rand(10, 4) # xywh
488
+ torch.allclose(boxes, xyxy2xywh(xywh2xyxy(boxes)))
489
+ torch.allclose(boxes, xyxy2xywhn(xywhn2xyxy(boxes)))
490
+ torch.allclose(boxes, ltwh2xywh(xywh2ltwh(boxes)))
491
+ torch.allclose(boxes, xyxy2ltwh(ltwh2xyxy(boxes)))
492
+
493
+ boxes = torch.rand(10, 5) # xywhr for OBB
494
+ boxes[:, 4] = torch.randn(10) * 30
495
+ torch.allclose(boxes, xyxyxyxy2xywhr(xywhr2xyxyxyxy(boxes)), rtol=1e-3)
496
+
497
+
498
+ def test_utils_files(tmp_path):
499
+ """Test file handling utilities including file age, date, and paths with spaces."""
500
+ from ultralytics.utils.files import file_age, file_date, get_latest_run, spaces_in_path
501
+
502
+ file_age(SOURCE)
503
+ file_date(SOURCE)
504
+ get_latest_run(ROOT / "runs")
505
+
506
+ path = tmp_path / "path/with spaces"
507
+ path.mkdir(parents=True, exist_ok=True)
508
+ with spaces_in_path(path) as new_path:
509
+ print(new_path)
510
+
511
+
512
+ @pytest.mark.slow
513
+ def test_utils_patches_torch_save(tmp_path):
514
+ """Test torch_save backoff when _torch_save raises RuntimeError."""
515
+ from unittest.mock import MagicMock, patch
516
+
517
+ from ultralytics.utils.patches import torch_save
518
+
519
+ mock = MagicMock(side_effect=RuntimeError)
520
+
521
+ with patch("ultralytics.utils.patches._torch_save", new=mock):
522
+ with pytest.raises(RuntimeError):
523
+ torch_save(torch.zeros(1), tmp_path / "test.pt")
524
+
525
+ assert mock.call_count == 4, "torch_save was not attempted the expected number of times"
526
+
527
+
528
+ def test_nn_modules_conv():
529
+ """Test Convolutional Neural Network modules including CBAM, Conv2, and ConvTranspose."""
530
+ from ultralytics.nn.modules.conv import CBAM, Conv2, ConvTranspose, DWConvTranspose2d, Focus
531
+
532
+ c1, c2 = 8, 16 # input and output channels
533
+ x = torch.zeros(4, c1, 10, 10) # BCHW
534
+
535
+ # Run all modules not otherwise covered in tests
536
+ DWConvTranspose2d(c1, c2)(x)
537
+ ConvTranspose(c1, c2)(x)
538
+ Focus(c1, c2)(x)
539
+ CBAM(c1)(x)
540
+
541
+ # Fuse ops
542
+ m = Conv2(c1, c2)
543
+ m.fuse_convs()
544
+ m(x)
545
+
546
+
547
+ def test_nn_modules_block():
548
+ """Test various neural network block modules."""
549
+ from ultralytics.nn.modules.block import C1, C3TR, BottleneckCSP, C3Ghost, C3x
550
+
551
+ c1, c2 = 8, 16 # input and output channels
552
+ x = torch.zeros(4, c1, 10, 10) # BCHW
553
+
554
+ # Run all modules not otherwise covered in tests
555
+ C1(c1, c2)(x)
556
+ C3x(c1, c2)(x)
557
+ C3TR(c1, c2)(x)
558
+ C3Ghost(c1, c2)(x)
559
+ BottleneckCSP(c1, c2)(x)
560
+
561
+
562
+ @pytest.mark.skipif(not ONLINE, reason="environment is offline")
563
+ def test_hub():
564
+ """Test Ultralytics HUB functionalities."""
565
+ from ultralytics.hub import export_fmts_hub, logout
566
+ from ultralytics.hub.utils import smart_request
567
+
568
+ export_fmts_hub()
569
+ logout()
570
+ smart_request("GET", "https://github.com", progress=True)
571
+
572
+
573
+ @pytest.fixture
574
+ def image():
575
+ """Load and return an image from a predefined source (OpenCV BGR)."""
576
+ return cv2.imread(str(SOURCE))
577
+
578
+
579
+ @pytest.mark.parametrize(
580
+ "auto_augment, erasing, force_color_jitter",
581
+ [
582
+ (None, 0.0, False),
583
+ ("randaugment", 0.5, True),
584
+ ("augmix", 0.2, False),
585
+ ("autoaugment", 0.0, True),
586
+ ],
587
+ )
588
+ def test_classify_transforms_train(image, auto_augment, erasing, force_color_jitter):
589
+ """Test classification transforms during training with various augmentations."""
590
+ from ultralytics.data.augment import classify_augmentations
591
+
592
+ transform = classify_augmentations(
593
+ size=224,
594
+ mean=(0.5, 0.5, 0.5),
595
+ std=(0.5, 0.5, 0.5),
596
+ scale=(0.08, 1.0),
597
+ ratio=(3.0 / 4.0, 4.0 / 3.0),
598
+ hflip=0.5,
599
+ vflip=0.5,
600
+ auto_augment=auto_augment,
601
+ hsv_h=0.015,
602
+ hsv_s=0.4,
603
+ hsv_v=0.4,
604
+ force_color_jitter=force_color_jitter,
605
+ erasing=erasing,
606
+ )
607
+
608
+ transformed_image = transform(Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)))
609
+
610
+ assert transformed_image.shape == (3, 224, 224)
611
+ assert torch.is_tensor(transformed_image)
612
+ assert transformed_image.dtype == torch.float32
613
+
614
+
615
+ @pytest.mark.slow
616
+ @pytest.mark.skipif(not ONLINE, reason="environment is offline")
617
+ def test_model_tune():
618
+ """Tune YOLO model for performance improvement."""
619
+ YOLO("yolo11n-pose.pt").tune(data="coco8-pose.yaml", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
620
+ YOLO("yolo11n-cls.pt").tune(data="imagenet10", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
621
+
622
+
623
+ def test_model_embeddings():
624
+ """Test YOLO model embeddings extraction functionality."""
625
+ model_detect = YOLO(MODEL)
626
+ model_segment = YOLO(WEIGHTS_DIR / "yolo11n-seg.pt")
627
+
628
+ for batch in [SOURCE], [SOURCE, SOURCE]: # test batch size 1 and 2
629
+ assert len(model_detect.embed(source=batch, imgsz=32)) == len(batch)
630
+ assert len(model_segment.embed(source=batch, imgsz=32)) == len(batch)
631
+
632
+
633
+ @pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="YOLOWorld with CLIP is not supported in Python 3.12")
634
+ @pytest.mark.skipif(
635
+ checks.IS_PYTHON_3_8 and LINUX and ARM64,
636
+ reason="YOLOWorld with CLIP is not supported in Python 3.8 and aarch64 Linux",
637
+ )
638
+ def test_yolo_world():
639
+ """Test YOLO world models with CLIP support."""
640
+ model = YOLO(WEIGHTS_DIR / "yolov8s-world.pt") # no YOLO11n-world model yet
641
+ model.set_classes(["tree", "window"])
642
+ model(SOURCE, conf=0.01)
643
+
644
+ model = YOLO(WEIGHTS_DIR / "yolov8s-worldv2.pt") # no YOLO11n-world model yet
645
+ # Training from a pretrained model. Eval is included at the final stage of training.
646
+ # Use dota8.yaml which has fewer categories to reduce the inference time of CLIP model
647
+ model.train(
648
+ data="dota8.yaml",
649
+ epochs=1,
650
+ imgsz=32,
651
+ cache="disk",
652
+ close_mosaic=1,
653
+ )
654
+
655
+ # test WorWorldTrainerFromScratch
656
+ from ultralytics.models.yolo.world.train_world import WorldTrainerFromScratch
657
+
658
+ model = YOLO("yolov8s-worldv2.yaml") # no YOLO11n-world model yet
659
+ model.train(
660
+ data={"train": {"yolo_data": ["dota8.yaml"]}, "val": {"yolo_data": ["dota8.yaml"]}},
661
+ epochs=1,
662
+ imgsz=32,
663
+ cache="disk",
664
+ close_mosaic=1,
665
+ trainer=WorldTrainerFromScratch,
666
+ )
667
+
668
+
669
+ @pytest.mark.skipif(not TORCH_1_13, reason="YOLOE with CLIP requires torch>=1.13")
670
+ @pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="YOLOE with CLIP is not supported in Python 3.12")
671
+ @pytest.mark.skipif(
672
+ checks.IS_PYTHON_3_8 and LINUX and ARM64,
673
+ reason="YOLOE with CLIP is not supported in Python 3.8 and aarch64 Linux",
674
+ )
675
+ def test_yoloe():
676
+ """Test YOLOE models with MobileClip support."""
677
+ # Predict
678
+ # text-prompts
679
+ model = YOLO(WEIGHTS_DIR / "yoloe-11s-seg.pt")
680
+ names = ["person", "bus"]
681
+ model.set_classes(names, model.get_text_pe(names))
682
+ model(SOURCE, conf=0.01)
683
+
684
+ from ultralytics import YOLOE
685
+ from ultralytics.models.yolo.yoloe import YOLOEVPSegPredictor
686
+
687
+ # visual-prompts
688
+ visuals = dict(
689
+ bboxes=np.array([[221.52, 405.8, 344.98, 857.54], [120, 425, 160, 445]]),
690
+ cls=np.array([0, 1]),
691
+ )
692
+ model.predict(
693
+ SOURCE,
694
+ visual_prompts=visuals,
695
+ predictor=YOLOEVPSegPredictor,
696
+ )
697
+
698
+ # Val
699
+ model = YOLOE(WEIGHTS_DIR / "yoloe-11s-seg.pt")
700
+ # text prompts
701
+ model.val(data="coco128-seg.yaml", imgsz=32)
702
+ # visual prompts
703
+ model.val(data="coco128-seg.yaml", load_vp=True, imgsz=32)
704
+
705
+ # Train, fine-tune
706
+ from ultralytics.models.yolo.yoloe import YOLOEPESegTrainer, YOLOESegTrainerFromScratch
707
+
708
+ model = YOLOE("yoloe-11s-seg.pt")
709
+ model.train(
710
+ data="coco128-seg.yaml",
711
+ epochs=1,
712
+ close_mosaic=1,
713
+ trainer=YOLOEPESegTrainer,
714
+ imgsz=32,
715
+ )
716
+ # Train, from scratch
717
+ model = YOLOE("yoloe-11s-seg.yaml")
718
+ model.train(
719
+ data=dict(train=dict(yolo_data=["coco128-seg.yaml"]), val=dict(yolo_data=["coco128-seg.yaml"])),
720
+ epochs=1,
721
+ close_mosaic=1,
722
+ trainer=YOLOESegTrainerFromScratch,
723
+ imgsz=32,
724
+ )
725
+
726
+ # prompt-free
727
+ # predict
728
+ model = YOLOE(WEIGHTS_DIR / "yoloe-11s-seg-pf.pt")
729
+ model.predict(SOURCE)
730
+ # val
731
+ model = YOLOE("yoloe-11s-seg.pt") # or select yoloe-m/l-seg.pt for different sizes
732
+ model.val(data="coco128-seg.yaml", imgsz=32)
733
+
734
+
735
+ def test_yolov10():
736
+ """Test YOLOv10 model training, validation, and prediction functionality."""
737
+ model = YOLO("yolov10n.yaml")
738
+ # train/val/predict
739
+ model.train(data="coco8.yaml", epochs=1, imgsz=32, close_mosaic=1, cache="disk")
740
+ model.val(data="coco8.yaml", imgsz=32)
741
+ model.predict(imgsz=32, save_txt=True, save_crop=True, augment=True)
742
+ model(SOURCE)
743
+
744
+
745
+ def test_multichannel():
746
+ """Test YOLO model multi-channel training, validation, and prediction functionality."""
747
+ model = YOLO("yolo11n.pt")
748
+ model.train(data="coco8-multispectral.yaml", epochs=1, imgsz=32, close_mosaic=1, cache="disk")
749
+ model.val(data="coco8-multispectral.yaml")
750
+ im = np.zeros((32, 32, 10), dtype=np.uint8)
751
+ model.predict(source=im, imgsz=32, save_txt=True, save_crop=True, augment=True)
752
+ model.export(format="onnx")
753
+
754
+
755
+ @pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
756
+ def test_grayscale(task: str, model: str, data: str, tmp_path) -> None:
757
+ """Test YOLO model grayscale training, validation, and prediction functionality."""
758
+ if task == "classify": # not support grayscale classification yet
759
+ return
760
+ grayscale_data = tmp_path / f"{Path(data).stem}-grayscale.yaml"
761
+ data = check_det_dataset(data)
762
+ data["channels"] = 1 # add additional channels key for grayscale
763
+ YAML.save(data=data, file=grayscale_data)
764
+ # remove npy files in train/val splits if exists, might be created by previous tests
765
+ for split in {"train", "val"}:
766
+ for npy_file in (Path(data["path"]) / data[split]).glob("*.npy"):
767
+ npy_file.unlink()
768
+
769
+ model = YOLO(model)
770
+ model.train(data=grayscale_data, epochs=1, imgsz=32, close_mosaic=1)
771
+ model.val(data=grayscale_data)
772
+ im = np.zeros((32, 32, 1), dtype=np.uint8)
773
+ model.predict(source=im, imgsz=32, save_txt=True, save_crop=True, augment=True)
774
+ export_model = model.export(format="onnx")
775
+
776
+ model = YOLO(export_model, task=task)
777
+ model.predict(source=im, imgsz=32)