ultralytics 8.3.116__py3-none-any.whl → 8.3.118__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. tests/__init__.py +22 -0
  2. tests/conftest.py +83 -0
  3. tests/test_cli.py +128 -0
  4. tests/test_cuda.py +164 -0
  5. tests/test_engine.py +131 -0
  6. tests/test_exports.py +231 -0
  7. tests/test_integrations.py +154 -0
  8. tests/test_python.py +695 -0
  9. tests/test_solutions.py +176 -0
  10. ultralytics/__init__.py +1 -1
  11. ultralytics/cfg/__init__.py +1 -1
  12. ultralytics/data/augment.py +3 -0
  13. ultralytics/data/base.py +11 -3
  14. ultralytics/data/dataset.py +3 -4
  15. ultralytics/data/loaders.py +2 -1
  16. ultralytics/engine/exporter.py +18 -11
  17. ultralytics/engine/trainer.py +2 -2
  18. ultralytics/hub/session.py +3 -2
  19. ultralytics/hub/utils.py +1 -1
  20. ultralytics/models/yolo/detect/predict.py +2 -2
  21. ultralytics/models/yolo/detect/val.py +1 -1
  22. ultralytics/models/yolo/model.py +2 -3
  23. ultralytics/models/yolo/obb/train.py +1 -1
  24. ultralytics/models/yolo/pose/predict.py +1 -1
  25. ultralytics/models/yolo/pose/train.py +1 -1
  26. ultralytics/models/yolo/pose/val.py +1 -1
  27. ultralytics/models/yolo/segment/train.py +3 -3
  28. ultralytics/models/yolo/yoloe/val.py +1 -1
  29. ultralytics/nn/autobackend.py +10 -9
  30. ultralytics/nn/text_model.py +97 -15
  31. ultralytics/utils/__init__.py +1 -1
  32. ultralytics/utils/benchmarks.py +4 -5
  33. ultralytics/utils/checks.py +4 -2
  34. ultralytics/utils/downloads.py +1 -0
  35. ultralytics/utils/torch_utils.py +4 -3
  36. {ultralytics-8.3.116.dist-info → ultralytics-8.3.118.dist-info}/METADATA +3 -4
  37. {ultralytics-8.3.116.dist-info → ultralytics-8.3.118.dist-info}/RECORD +41 -32
  38. {ultralytics-8.3.116.dist-info → ultralytics-8.3.118.dist-info}/WHEEL +0 -0
  39. {ultralytics-8.3.116.dist-info → ultralytics-8.3.118.dist-info}/entry_points.txt +0 -0
  40. {ultralytics-8.3.116.dist-info → ultralytics-8.3.118.dist-info}/licenses/LICENSE +0 -0
  41. {ultralytics-8.3.116.dist-info → ultralytics-8.3.118.dist-info}/top_level.txt +0 -0
tests/test_exports.py ADDED
@@ -0,0 +1,231 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ import io
4
+ import shutil
5
+ import uuid
6
+ from contextlib import redirect_stderr, redirect_stdout
7
+ from itertools import product
8
+ from pathlib import Path
9
+
10
+ import pytest
11
+
12
+ from tests import MODEL, SOURCE
13
+ from ultralytics import YOLO
14
+ from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
15
+ from ultralytics.utils import (
16
+ ARM64,
17
+ IS_RASPBERRYPI,
18
+ LINUX,
19
+ MACOS,
20
+ WINDOWS,
21
+ checks,
22
+ )
23
+ from ultralytics.utils.torch_utils import TORCH_1_9, TORCH_1_13
24
+
25
+
26
+ def test_export_torchscript():
27
+ """Test YOLO model exporting to TorchScript format for compatibility and correctness."""
28
+ file = YOLO(MODEL).export(format="torchscript", optimize=False, imgsz=32)
29
+ YOLO(file)(SOURCE, imgsz=32) # exported model inference
30
+
31
+
32
+ def test_export_onnx():
33
+ """Test YOLO model export to ONNX format with dynamic axes."""
34
+ file = YOLO(MODEL).export(format="onnx", dynamic=True, imgsz=32)
35
+ YOLO(file)(SOURCE, imgsz=32) # exported model inference
36
+
37
+
38
+ @pytest.mark.skipif(not TORCH_1_13, reason="OpenVINO requires torch>=1.13")
39
+ def test_export_openvino():
40
+ """Test YOLO exports to OpenVINO format for model inference compatibility."""
41
+ file = YOLO(MODEL).export(format="openvino", imgsz=32)
42
+ YOLO(file)(SOURCE, imgsz=32) # exported model inference
43
+
44
+
45
+ @pytest.mark.slow
46
+ @pytest.mark.skipif(not TORCH_1_13, reason="OpenVINO requires torch>=1.13")
47
+ @pytest.mark.parametrize(
48
+ "task, dynamic, int8, half, batch, nms",
49
+ [ # generate all combinations except for exclusion cases
50
+ (task, dynamic, int8, half, batch, nms)
51
+ for task, dynamic, int8, half, batch, nms in product(
52
+ TASKS, [True, False], [True, False], [True, False], [1, 2], [True, False]
53
+ )
54
+ if not ((int8 and half) or (task == "classify" and nms))
55
+ ],
56
+ )
57
+ def test_export_openvino_matrix(task, dynamic, int8, half, batch, nms):
58
+ """Test YOLO model exports to OpenVINO under various configuration matrix conditions."""
59
+ file = YOLO(TASK2MODEL[task]).export(
60
+ format="openvino",
61
+ imgsz=32,
62
+ dynamic=dynamic,
63
+ int8=int8,
64
+ half=half,
65
+ batch=batch,
66
+ data=TASK2DATA[task],
67
+ nms=nms,
68
+ )
69
+ if WINDOWS:
70
+ # Use unique filenames due to Windows file permissions bug possibly due to latent threaded use
71
+ # See https://github.com/ultralytics/ultralytics/actions/runs/8957949304/job/24601616830?pr=10423
72
+ file = Path(file)
73
+ file = file.rename(file.with_stem(f"{file.stem}-{uuid.uuid4()}"))
74
+ YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32) # exported model inference
75
+ shutil.rmtree(file, ignore_errors=True) # retry in case of potential lingering multi-threaded file usage errors
76
+
77
+
78
+ @pytest.mark.slow
79
+ @pytest.mark.parametrize(
80
+ "task, dynamic, int8, half, batch, simplify, nms",
81
+ [ # generate all combinations except for exclusion cases
82
+ (task, dynamic, int8, half, batch, simplify, nms)
83
+ for task, dynamic, int8, half, batch, simplify, nms in product(
84
+ TASKS, [True, False], [False], [False], [1, 2], [True, False], [True, False]
85
+ )
86
+ if not ((int8 and half) or (task == "classify" and nms) or (task == "obb" and nms and not TORCH_1_13))
87
+ ],
88
+ )
89
+ def test_export_onnx_matrix(task, dynamic, int8, half, batch, simplify, nms):
90
+ """Test YOLO exports to ONNX format with various configurations and parameters."""
91
+ file = YOLO(TASK2MODEL[task]).export(
92
+ format="onnx", imgsz=32, dynamic=dynamic, int8=int8, half=half, batch=batch, simplify=simplify, nms=nms
93
+ )
94
+ YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32) # exported model inference
95
+ Path(file).unlink() # cleanup
96
+
97
+
98
+ @pytest.mark.slow
99
+ @pytest.mark.parametrize(
100
+ "task, dynamic, int8, half, batch, nms",
101
+ [ # generate all combinations except for exclusion cases
102
+ (task, dynamic, int8, half, batch, nms)
103
+ for task, dynamic, int8, half, batch, nms in product(TASKS, [False], [False], [False], [1, 2], [True, False])
104
+ if not (task == "classify" and nms)
105
+ ],
106
+ )
107
+ def test_export_torchscript_matrix(task, dynamic, int8, half, batch, nms):
108
+ """Tests YOLO model exports to TorchScript format under varied configurations."""
109
+ file = YOLO(TASK2MODEL[task]).export(
110
+ format="torchscript", imgsz=32, dynamic=dynamic, int8=int8, half=half, batch=batch, nms=nms
111
+ )
112
+ YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32) # exported model inference
113
+ Path(file).unlink() # cleanup
114
+
115
+
116
+ @pytest.mark.slow
117
+ @pytest.mark.skipif(not MACOS, reason="CoreML inference only supported on macOS")
118
+ @pytest.mark.skipif(not TORCH_1_9, reason="CoreML>=7.2 not supported with PyTorch<=1.8")
119
+ @pytest.mark.skipif(checks.IS_PYTHON_3_13, reason="CoreML not supported in Python 3.13")
120
+ @pytest.mark.parametrize(
121
+ "task, dynamic, int8, half, batch",
122
+ [ # generate all combinations except for exclusion cases
123
+ (task, dynamic, int8, half, batch)
124
+ for task, dynamic, int8, half, batch in product(TASKS, [False], [True, False], [True, False], [1])
125
+ if not (int8 and half)
126
+ ],
127
+ )
128
+ def test_export_coreml_matrix(task, dynamic, int8, half, batch):
129
+ """Test YOLO exports to CoreML format with various parameter configurations."""
130
+ file = YOLO(TASK2MODEL[task]).export(
131
+ format="coreml",
132
+ imgsz=32,
133
+ dynamic=dynamic,
134
+ int8=int8,
135
+ half=half,
136
+ batch=batch,
137
+ )
138
+ YOLO(file)([SOURCE] * batch, imgsz=32) # exported model inference at batch=3
139
+ shutil.rmtree(file) # cleanup
140
+
141
+
142
+ @pytest.mark.slow
143
+ @pytest.mark.skipif(not checks.IS_PYTHON_MINIMUM_3_10, reason="TFLite export requires Python>=3.10")
144
+ @pytest.mark.skipif(
145
+ not LINUX or IS_RASPBERRYPI,
146
+ reason="Test disabled as TF suffers from install conflicts on Windows, macOS and Raspberry Pi",
147
+ )
148
+ @pytest.mark.parametrize(
149
+ "task, dynamic, int8, half, batch, nms",
150
+ [ # generate all combinations except for exclusion cases
151
+ (task, dynamic, int8, half, batch, nms)
152
+ for task, dynamic, int8, half, batch, nms in product(
153
+ TASKS, [False], [True, False], [True, False], [1], [True, False]
154
+ )
155
+ if not ((int8 and half) or (task == "classify" and nms) or (ARM64 and nms))
156
+ ],
157
+ )
158
+ def test_export_tflite_matrix(task, dynamic, int8, half, batch, nms):
159
+ """Test YOLO exports to TFLite format considering various export configurations."""
160
+ file = YOLO(TASK2MODEL[task]).export(
161
+ format="tflite", imgsz=32, dynamic=dynamic, int8=int8, half=half, batch=batch, nms=nms
162
+ )
163
+ YOLO(file)([SOURCE] * batch, imgsz=32) # exported model inference at batch=3
164
+ Path(file).unlink() # cleanup
165
+
166
+
167
+ @pytest.mark.skipif(not TORCH_1_9, reason="CoreML>=7.2 not supported with PyTorch<=1.8")
168
+ @pytest.mark.skipif(WINDOWS, reason="CoreML not supported on Windows") # RuntimeError: BlobWriter not loaded
169
+ @pytest.mark.skipif(LINUX and ARM64, reason="CoreML not supported on aarch64 Linux")
170
+ @pytest.mark.skipif(checks.IS_PYTHON_3_13, reason="CoreML not supported in Python 3.13")
171
+ def test_export_coreml():
172
+ """Test YOLO exports to CoreML format and check for errors."""
173
+ # Capture stdout and stderr
174
+ stdout, stderr = io.StringIO(), io.StringIO()
175
+ with redirect_stdout(stdout), redirect_stderr(stderr):
176
+ YOLO(MODEL).export(format="coreml", nms=True, imgsz=32)
177
+ if MACOS:
178
+ file = YOLO(MODEL).export(format="coreml", imgsz=32)
179
+ YOLO(file)(SOURCE, imgsz=32) # model prediction only supported on macOS for nms=False models
180
+
181
+ # Check captured output for errors
182
+ output = stdout.getvalue() + stderr.getvalue()
183
+ assert "Error" not in output, f"CoreML export produced errors: {output}"
184
+ assert "You will not be able to run predict()" not in output, "CoreML export has predict() error"
185
+
186
+
187
+ @pytest.mark.skipif(not checks.IS_PYTHON_MINIMUM_3_10, reason="TFLite export requires Python>=3.10")
188
+ @pytest.mark.skipif(not LINUX, reason="Test disabled as TF suffers from install conflicts on Windows and macOS")
189
+ def test_export_tflite():
190
+ """Test YOLO exports to TFLite format under specific OS and Python version conditions."""
191
+ model = YOLO(MODEL)
192
+ file = model.export(format="tflite", imgsz=32)
193
+ YOLO(file)(SOURCE, imgsz=32)
194
+
195
+
196
+ @pytest.mark.skipif(True, reason="Test disabled")
197
+ @pytest.mark.skipif(not LINUX, reason="TF suffers from install conflicts on Windows and macOS")
198
+ def test_export_pb():
199
+ """Test YOLO exports to TensorFlow's Protobuf (*.pb) format."""
200
+ model = YOLO(MODEL)
201
+ file = model.export(format="pb", imgsz=32)
202
+ YOLO(file)(SOURCE, imgsz=32)
203
+
204
+
205
+ @pytest.mark.skipif(True, reason="Test disabled as Paddle protobuf and ONNX protobuf requirements conflict.")
206
+ def test_export_paddle():
207
+ """Test YOLO exports to Paddle format, noting protobuf conflicts with ONNX."""
208
+ YOLO(MODEL).export(format="paddle", imgsz=32)
209
+
210
+
211
+ @pytest.mark.slow
212
+ def test_export_mnn():
213
+ """Test YOLO exports to MNN format (WARNING: MNN test must precede NCNN test or CI error on Windows)."""
214
+ file = YOLO(MODEL).export(format="mnn", imgsz=32)
215
+ YOLO(file)(SOURCE, imgsz=32) # exported model inference
216
+
217
+
218
+ @pytest.mark.slow
219
+ def test_export_ncnn():
220
+ """Test YOLO exports to NCNN format."""
221
+ file = YOLO(MODEL).export(format="ncnn", imgsz=32)
222
+ YOLO(file)(SOURCE, imgsz=32) # exported model inference
223
+
224
+
225
+ @pytest.mark.skipif(True, reason="Test disabled as keras and tensorflow version conflicts with TFlite export.")
226
+ @pytest.mark.skipif(not LINUX or MACOS, reason="Skipping test on Windows and Macos")
227
+ def test_export_imx():
228
+ """Test YOLO exports to IMX format."""
229
+ model = YOLO("yolov8n.pt")
230
+ file = model.export(format="imx", imgsz=32)
231
+ YOLO(file)(SOURCE, imgsz=32)
@@ -0,0 +1,154 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ import contextlib
4
+ import os
5
+ import subprocess
6
+ import time
7
+ from pathlib import Path
8
+
9
+ import pytest
10
+
11
+ from tests import MODEL, SOURCE, TMP
12
+ from ultralytics import YOLO, download
13
+ from ultralytics.utils import DATASETS_DIR, SETTINGS
14
+ from ultralytics.utils.checks import check_requirements
15
+
16
+
17
+ @pytest.mark.slow
18
+ def test_tensorboard():
19
+ """Test training with TensorBoard logging enabled."""
20
+ SETTINGS["tensorboard"] = True
21
+ YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=3, plots=False, device="cpu")
22
+ SETTINGS["tensorboard"] = False
23
+
24
+
25
+ @pytest.mark.skipif(not check_requirements("ray", install=False), reason="ray[tune] not installed")
26
+ def test_model_ray_tune():
27
+ """Tune YOLO model using Ray for hyperparameter optimization."""
28
+ YOLO("yolo11n-cls.yaml").tune(
29
+ use_ray=True, data="imagenet10", grace_period=1, iterations=1, imgsz=32, epochs=1, plots=False, device="cpu"
30
+ )
31
+
32
+
33
+ @pytest.mark.skipif(not check_requirements("mlflow", install=False), reason="mlflow not installed")
34
+ def test_mlflow():
35
+ """Test training with MLflow tracking enabled (see https://mlflow.org/ for details)."""
36
+ SETTINGS["mlflow"] = True
37
+ YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=3, plots=False, device="cpu")
38
+ SETTINGS["mlflow"] = False
39
+
40
+
41
+ @pytest.mark.skipif(True, reason="Test failing in scheduled CI https://github.com/ultralytics/ultralytics/pull/8868")
42
+ @pytest.mark.skipif(not check_requirements("mlflow", install=False), reason="mlflow not installed")
43
+ def test_mlflow_keep_run_active():
44
+ """Ensure MLflow run status matches MLFLOW_KEEP_RUN_ACTIVE environment variable settings."""
45
+ import mlflow
46
+
47
+ SETTINGS["mlflow"] = True
48
+ run_name = "Test Run"
49
+ os.environ["MLFLOW_RUN"] = run_name
50
+
51
+ # Test with MLFLOW_KEEP_RUN_ACTIVE=True
52
+ os.environ["MLFLOW_KEEP_RUN_ACTIVE"] = "True"
53
+ YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
54
+ status = mlflow.active_run().info.status
55
+ assert status == "RUNNING", "MLflow run should be active when MLFLOW_KEEP_RUN_ACTIVE=True"
56
+
57
+ run_id = mlflow.active_run().info.run_id
58
+
59
+ # Test with MLFLOW_KEEP_RUN_ACTIVE=False
60
+ os.environ["MLFLOW_KEEP_RUN_ACTIVE"] = "False"
61
+ YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
62
+ status = mlflow.get_run(run_id=run_id).info.status
63
+ assert status == "FINISHED", "MLflow run should be ended when MLFLOW_KEEP_RUN_ACTIVE=False"
64
+
65
+ # Test with MLFLOW_KEEP_RUN_ACTIVE not set
66
+ os.environ.pop("MLFLOW_KEEP_RUN_ACTIVE", None)
67
+ YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
68
+ status = mlflow.get_run(run_id=run_id).info.status
69
+ assert status == "FINISHED", "MLflow run should be ended by default when MLFLOW_KEEP_RUN_ACTIVE is not set"
70
+ SETTINGS["mlflow"] = False
71
+
72
+
73
+ @pytest.mark.skipif(not check_requirements("tritonclient", install=False), reason="tritonclient[all] not installed")
74
+ def test_triton():
75
+ """Test NVIDIA Triton Server functionalities with YOLO model."""
76
+ check_requirements("tritonclient[all]")
77
+ from tritonclient.http import InferenceServerClient # noqa
78
+
79
+ # Create variables
80
+ model_name = "yolo"
81
+ triton_repo = TMP / "triton_repo" # Triton repo path
82
+ triton_model = triton_repo / model_name # Triton model path
83
+
84
+ # Export model to ONNX
85
+ f = YOLO(MODEL).export(format="onnx", dynamic=True)
86
+
87
+ # Prepare Triton repo
88
+ (triton_model / "1").mkdir(parents=True, exist_ok=True)
89
+ Path(f).rename(triton_model / "1" / "model.onnx")
90
+ (triton_model / "config.pbtxt").touch()
91
+
92
+ # Define image https://catalog.ngc.nvidia.com/orgs/nvidia/containers/tritonserver
93
+ tag = "nvcr.io/nvidia/tritonserver:23.09-py3" # 6.4 GB
94
+
95
+ # Pull the image
96
+ subprocess.call(f"docker pull {tag}", shell=True)
97
+
98
+ # Run the Triton server and capture the container ID
99
+ container_id = (
100
+ subprocess.check_output(
101
+ f"docker run -d --rm -v {triton_repo}:/models -p 8000:8000 {tag} tritonserver --model-repository=/models",
102
+ shell=True,
103
+ )
104
+ .decode("utf-8")
105
+ .strip()
106
+ )
107
+
108
+ # Wait for the Triton server to start
109
+ triton_client = InferenceServerClient(url="localhost:8000", verbose=False, ssl=False)
110
+
111
+ # Wait until model is ready
112
+ for _ in range(10):
113
+ with contextlib.suppress(Exception):
114
+ assert triton_client.is_model_ready(model_name)
115
+ break
116
+ time.sleep(1)
117
+
118
+ # Check Triton inference
119
+ YOLO(f"http://localhost:8000/{model_name}", "detect")(SOURCE) # exported model inference
120
+
121
+ # Kill and remove the container at the end of the test
122
+ subprocess.call(f"docker kill {container_id}", shell=True)
123
+
124
+
125
+ @pytest.mark.skipif(not check_requirements("pycocotools", install=False), reason="pycocotools not installed")
126
+ def test_pycocotools():
127
+ """Validate YOLO model predictions on COCO dataset using pycocotools."""
128
+ from ultralytics.models.yolo.detect import DetectionValidator
129
+ from ultralytics.models.yolo.pose import PoseValidator
130
+ from ultralytics.models.yolo.segment import SegmentationValidator
131
+
132
+ # Download annotations after each dataset downloads first
133
+ url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
134
+
135
+ args = {"model": "yolo11n.pt", "data": "coco8.yaml", "save_json": True, "imgsz": 64}
136
+ validator = DetectionValidator(args=args)
137
+ validator()
138
+ validator.is_coco = True
139
+ download(f"{url}instances_val2017.json", dir=DATASETS_DIR / "coco8/annotations")
140
+ _ = validator.eval_json(validator.stats)
141
+
142
+ args = {"model": "yolo11n-seg.pt", "data": "coco8-seg.yaml", "save_json": True, "imgsz": 64}
143
+ validator = SegmentationValidator(args=args)
144
+ validator()
145
+ validator.is_coco = True
146
+ download(f"{url}instances_val2017.json", dir=DATASETS_DIR / "coco8-seg/annotations")
147
+ _ = validator.eval_json(validator.stats)
148
+
149
+ args = {"model": "yolo11n-pose.pt", "data": "coco8-pose.yaml", "save_json": True, "imgsz": 64}
150
+ validator = PoseValidator(args=args)
151
+ validator()
152
+ validator.is_coco = True
153
+ download(f"{url}person_keypoints_val2017.json", dir=DATASETS_DIR / "coco8-pose/annotations")
154
+ _ = validator.eval_json(validator.stats)