ultralytics 8.3.104__py3-none-any.whl → 8.3.106__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
tests/test_cuda.py DELETED
@@ -1,164 +0,0 @@
1
- # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
-
3
- from itertools import product
4
- from pathlib import Path
5
-
6
- import pytest
7
- import torch
8
-
9
- from tests import CUDA_DEVICE_COUNT, CUDA_IS_AVAILABLE, MODEL, SOURCE
10
- from ultralytics import YOLO
11
- from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
12
- from ultralytics.utils import ASSETS, WEIGHTS_DIR
13
- from ultralytics.utils.checks import check_amp
14
-
15
-
16
- def test_checks():
17
- """Validate CUDA settings against torch CUDA functions."""
18
- assert torch.cuda.is_available() == CUDA_IS_AVAILABLE
19
- assert torch.cuda.device_count() == CUDA_DEVICE_COUNT
20
-
21
-
22
- @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
23
- def test_amp():
24
- """Test AMP training checks."""
25
- model = YOLO("yolo11n.pt").model.cuda()
26
- assert check_amp(model)
27
-
28
-
29
- @pytest.mark.slow
30
- @pytest.mark.skipif(True, reason="CUDA export tests disabled pending additional Ultralytics GPU server availability")
31
- @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
32
- @pytest.mark.parametrize(
33
- "task, dynamic, int8, half, batch",
34
- [ # generate all combinations but exclude those where both int8 and half are True
35
- (task, dynamic, int8, half, batch)
36
- # Note: tests reduced below pending compute availability expansion as GPU CI runner utilization is high
37
- # for task, dynamic, int8, half, batch in product(TASKS, [True, False], [True, False], [True, False], [1, 2])
38
- for task, dynamic, int8, half, batch in product(TASKS, [True], [True], [False], [2])
39
- if not (int8 and half) # exclude cases where both int8 and half are True
40
- ],
41
- )
42
- def test_export_engine_matrix(task, dynamic, int8, half, batch):
43
- """
44
- Test YOLO model export to TensorRT format for various configurations and run inference.
45
-
46
- Args:
47
- task (str): Task type like 'detect', 'segment', etc.
48
- dynamic (bool): Whether to use dynamic input size.
49
- int8 (bool): Whether to use INT8 precision.
50
- half (bool): Whether to use FP16 precision.
51
- batch (int): Batch size for export.
52
- """
53
- file = YOLO(TASK2MODEL[task]).export(
54
- format="engine",
55
- imgsz=32,
56
- dynamic=dynamic,
57
- int8=int8,
58
- half=half,
59
- batch=batch,
60
- data=TASK2DATA[task],
61
- workspace=1, # reduce workspace GB for less resource utilization during testing
62
- simplify=True, # use 'onnxslim'
63
- )
64
- YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32) # exported model inference
65
- Path(file).unlink() # cleanup
66
- Path(file).with_suffix(".cache").unlink() if int8 else None # cleanup INT8 cache
67
-
68
-
69
- @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
70
- def test_train():
71
- """Test model training on a minimal dataset using available CUDA devices."""
72
- device = 0 if CUDA_DEVICE_COUNT == 1 else [0, 1]
73
- YOLO(MODEL).train(data="coco8.yaml", imgsz=64, epochs=1, device=device) # requires imgsz>=64
74
-
75
-
76
- @pytest.mark.slow
77
- @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
78
- def test_predict_multiple_devices():
79
- """Validate model prediction consistency across CPU and CUDA devices."""
80
- model = YOLO("yolo11n.pt")
81
- model = model.cpu()
82
- assert str(model.device) == "cpu"
83
- _ = model(SOURCE) # CPU inference
84
- assert str(model.device) == "cpu"
85
-
86
- model = model.to("cuda:0")
87
- assert str(model.device) == "cuda:0"
88
- _ = model(SOURCE) # CUDA inference
89
- assert str(model.device) == "cuda:0"
90
-
91
- model = model.cpu()
92
- assert str(model.device) == "cpu"
93
- _ = model(SOURCE) # CPU inference
94
- assert str(model.device) == "cpu"
95
-
96
- model = model.cuda()
97
- assert str(model.device) == "cuda:0"
98
- _ = model(SOURCE) # CUDA inference
99
- assert str(model.device) == "cuda:0"
100
-
101
-
102
- @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
103
- def test_autobatch():
104
- """Check optimal batch size for YOLO model training using autobatch utility."""
105
- from ultralytics.utils.autobatch import check_train_batch_size
106
-
107
- check_train_batch_size(YOLO(MODEL).model.cuda(), imgsz=128, amp=True)
108
-
109
-
110
- @pytest.mark.slow
111
- @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
112
- def test_utils_benchmarks():
113
- """Profile YOLO models for performance benchmarks."""
114
- from ultralytics.utils.benchmarks import ProfileModels
115
-
116
- # Pre-export a dynamic engine model to use dynamic inference
117
- YOLO(MODEL).export(format="engine", imgsz=32, dynamic=True, batch=1)
118
- ProfileModels([MODEL], imgsz=32, half=False, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile()
119
-
120
-
121
- @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
122
- def test_predict_sam():
123
- """Test SAM model predictions using different prompts, including bounding boxes and point annotations."""
124
- from ultralytics import SAM
125
- from ultralytics.models.sam import Predictor as SAMPredictor
126
-
127
- # Load a model
128
- model = SAM(WEIGHTS_DIR / "sam2.1_b.pt")
129
-
130
- # Display model information (optional)
131
- model.info()
132
-
133
- # Run inference
134
- model(SOURCE, device=0)
135
-
136
- # Run inference with bboxes prompt
137
- model(SOURCE, bboxes=[439, 437, 524, 709], device=0)
138
-
139
- # Run inference with no labels
140
- model(ASSETS / "zidane.jpg", points=[900, 370], device=0)
141
-
142
- # Run inference with 1D points and 1D labels
143
- model(ASSETS / "zidane.jpg", points=[900, 370], labels=[1], device=0)
144
-
145
- # Run inference with 2D points and 1D labels
146
- model(ASSETS / "zidane.jpg", points=[[900, 370]], labels=[1], device=0)
147
-
148
- # Run inference with multiple 2D points and 1D labels
149
- model(ASSETS / "zidane.jpg", points=[[400, 370], [900, 370]], labels=[1, 1], device=0)
150
-
151
- # Run inference with 3D points and 2D labels (multiple points per object)
152
- model(ASSETS / "zidane.jpg", points=[[[900, 370], [1000, 100]]], labels=[[1, 1]], device=0)
153
-
154
- # Create SAMPredictor
155
- overrides = dict(conf=0.25, task="segment", mode="predict", imgsz=1024, model=WEIGHTS_DIR / "mobile_sam.pt")
156
- predictor = SAMPredictor(overrides=overrides)
157
-
158
- # Set image
159
- predictor.set_image(ASSETS / "zidane.jpg") # set with image file
160
- # predictor(bboxes=[439, 437, 524, 709])
161
- # predictor(points=[900, 370], labels=[1])
162
-
163
- # Reset image
164
- predictor.reset_image()
tests/test_engine.py DELETED
@@ -1,131 +0,0 @@
1
- # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
-
3
- import sys
4
- from unittest import mock
5
-
6
- from tests import MODEL
7
- from ultralytics import YOLO
8
- from ultralytics.cfg import get_cfg
9
- from ultralytics.engine.exporter import Exporter
10
- from ultralytics.models.yolo import classify, detect, segment
11
- from ultralytics.utils import ASSETS, DEFAULT_CFG, WEIGHTS_DIR
12
-
13
-
14
- def test_func(*args): # noqa
15
- """Test function callback for evaluating YOLO model performance metrics."""
16
- print("callback test passed")
17
-
18
-
19
- def test_export():
20
- """Tests the model exporting function by adding a callback and asserting its execution."""
21
- exporter = Exporter()
22
- exporter.add_callback("on_export_start", test_func)
23
- assert test_func in exporter.callbacks["on_export_start"], "callback test failed"
24
- f = exporter(model=YOLO("yolo11n.yaml").model)
25
- YOLO(f)(ASSETS) # exported model inference
26
-
27
-
28
- def test_detect():
29
- """Test YOLO object detection training, validation, and prediction functionality."""
30
- overrides = {"data": "coco8.yaml", "model": "yolo11n.yaml", "imgsz": 32, "epochs": 1, "save": False}
31
- cfg = get_cfg(DEFAULT_CFG)
32
- cfg.data = "coco8.yaml"
33
- cfg.imgsz = 32
34
-
35
- # Trainer
36
- trainer = detect.DetectionTrainer(overrides=overrides)
37
- trainer.add_callback("on_train_start", test_func)
38
- assert test_func in trainer.callbacks["on_train_start"], "callback test failed"
39
- trainer.train()
40
-
41
- # Validator
42
- val = detect.DetectionValidator(args=cfg)
43
- val.add_callback("on_val_start", test_func)
44
- assert test_func in val.callbacks["on_val_start"], "callback test failed"
45
- val(model=trainer.best) # validate best.pt
46
-
47
- # Predictor
48
- pred = detect.DetectionPredictor(overrides={"imgsz": [64, 64]})
49
- pred.add_callback("on_predict_start", test_func)
50
- assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
51
- # Confirm there is no issue with sys.argv being empty.
52
- with mock.patch.object(sys, "argv", []):
53
- result = pred(source=ASSETS, model=MODEL)
54
- assert len(result), "predictor test failed"
55
-
56
- overrides["resume"] = trainer.last
57
- trainer = detect.DetectionTrainer(overrides=overrides)
58
- try:
59
- trainer.train()
60
- except Exception as e:
61
- print(f"Expected exception caught: {e}")
62
- return
63
-
64
- Exception("Resume test failed!")
65
-
66
-
67
- def test_segment():
68
- """Tests image segmentation training, validation, and prediction pipelines using YOLO models."""
69
- overrides = {"data": "coco8-seg.yaml", "model": "yolo11n-seg.yaml", "imgsz": 32, "epochs": 1, "save": False}
70
- cfg = get_cfg(DEFAULT_CFG)
71
- cfg.data = "coco8-seg.yaml"
72
- cfg.imgsz = 32
73
- # YOLO(CFG_SEG).train(**overrides) # works
74
-
75
- # Trainer
76
- trainer = segment.SegmentationTrainer(overrides=overrides)
77
- trainer.add_callback("on_train_start", test_func)
78
- assert test_func in trainer.callbacks["on_train_start"], "callback test failed"
79
- trainer.train()
80
-
81
- # Validator
82
- val = segment.SegmentationValidator(args=cfg)
83
- val.add_callback("on_val_start", test_func)
84
- assert test_func in val.callbacks["on_val_start"], "callback test failed"
85
- val(model=trainer.best) # validate best.pt
86
-
87
- # Predictor
88
- pred = segment.SegmentationPredictor(overrides={"imgsz": [64, 64]})
89
- pred.add_callback("on_predict_start", test_func)
90
- assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
91
- result = pred(source=ASSETS, model=WEIGHTS_DIR / "yolo11n-seg.pt")
92
- assert len(result), "predictor test failed"
93
-
94
- # Test resume
95
- overrides["resume"] = trainer.last
96
- trainer = segment.SegmentationTrainer(overrides=overrides)
97
- try:
98
- trainer.train()
99
- except Exception as e:
100
- print(f"Expected exception caught: {e}")
101
- return
102
-
103
- Exception("Resume test failed!")
104
-
105
-
106
- def test_classify():
107
- """Test image classification including training, validation, and prediction phases."""
108
- overrides = {"data": "imagenet10", "model": "yolo11n-cls.yaml", "imgsz": 32, "epochs": 1, "save": False}
109
- cfg = get_cfg(DEFAULT_CFG)
110
- cfg.data = "imagenet10"
111
- cfg.imgsz = 32
112
- # YOLO(CFG_SEG).train(**overrides) # works
113
-
114
- # Trainer
115
- trainer = classify.ClassificationTrainer(overrides=overrides)
116
- trainer.add_callback("on_train_start", test_func)
117
- assert test_func in trainer.callbacks["on_train_start"], "callback test failed"
118
- trainer.train()
119
-
120
- # Validator
121
- val = classify.ClassificationValidator(args=cfg)
122
- val.add_callback("on_val_start", test_func)
123
- assert test_func in val.callbacks["on_val_start"], "callback test failed"
124
- val(model=trainer.best)
125
-
126
- # Predictor
127
- pred = classify.ClassificationPredictor(overrides={"imgsz": [64, 64]})
128
- pred.add_callback("on_predict_start", test_func)
129
- assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
130
- result = pred(source=ASSETS, model=trainer.best)
131
- assert len(result), "predictor test failed"
tests/test_exports.py DELETED
@@ -1,231 +0,0 @@
1
- # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
-
3
- import io
4
- import shutil
5
- import uuid
6
- from contextlib import redirect_stderr, redirect_stdout
7
- from itertools import product
8
- from pathlib import Path
9
-
10
- import pytest
11
-
12
- from tests import MODEL, SOURCE
13
- from ultralytics import YOLO
14
- from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
15
- from ultralytics.utils import (
16
- ARM64,
17
- IS_RASPBERRYPI,
18
- LINUX,
19
- MACOS,
20
- WINDOWS,
21
- checks,
22
- )
23
- from ultralytics.utils.torch_utils import TORCH_1_9, TORCH_1_13
24
-
25
-
26
- def test_export_torchscript():
27
- """Test YOLO model exporting to TorchScript format for compatibility and correctness."""
28
- file = YOLO(MODEL).export(format="torchscript", optimize=False, imgsz=32)
29
- YOLO(file)(SOURCE, imgsz=32) # exported model inference
30
-
31
-
32
- def test_export_onnx():
33
- """Test YOLO model export to ONNX format with dynamic axes."""
34
- file = YOLO(MODEL).export(format="onnx", dynamic=True, imgsz=32)
35
- YOLO(file)(SOURCE, imgsz=32) # exported model inference
36
-
37
-
38
- @pytest.mark.skipif(not TORCH_1_13, reason="OpenVINO requires torch>=1.13")
39
- def test_export_openvino():
40
- """Test YOLO exports to OpenVINO format for model inference compatibility."""
41
- file = YOLO(MODEL).export(format="openvino", imgsz=32)
42
- YOLO(file)(SOURCE, imgsz=32) # exported model inference
43
-
44
-
45
- @pytest.mark.slow
46
- @pytest.mark.skipif(not TORCH_1_13, reason="OpenVINO requires torch>=1.13")
47
- @pytest.mark.parametrize(
48
- "task, dynamic, int8, half, batch, nms",
49
- [ # generate all combinations except for exclusion cases
50
- (task, dynamic, int8, half, batch, nms)
51
- for task, dynamic, int8, half, batch, nms in product(
52
- TASKS, [True, False], [True, False], [True, False], [1, 2], [True, False]
53
- )
54
- if not ((int8 and half) or (task == "classify" and nms))
55
- ],
56
- )
57
- def test_export_openvino_matrix(task, dynamic, int8, half, batch, nms):
58
- """Test YOLO model exports to OpenVINO under various configuration matrix conditions."""
59
- file = YOLO(TASK2MODEL[task]).export(
60
- format="openvino",
61
- imgsz=32,
62
- dynamic=dynamic,
63
- int8=int8,
64
- half=half,
65
- batch=batch,
66
- data=TASK2DATA[task],
67
- nms=nms,
68
- )
69
- if WINDOWS:
70
- # Use unique filenames due to Windows file permissions bug possibly due to latent threaded use
71
- # See https://github.com/ultralytics/ultralytics/actions/runs/8957949304/job/24601616830?pr=10423
72
- file = Path(file)
73
- file = file.rename(file.with_stem(f"{file.stem}-{uuid.uuid4()}"))
74
- YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32) # exported model inference
75
- shutil.rmtree(file, ignore_errors=True) # retry in case of potential lingering multi-threaded file usage errors
76
-
77
-
78
- @pytest.mark.slow
79
- @pytest.mark.parametrize(
80
- "task, dynamic, int8, half, batch, simplify, nms",
81
- [ # generate all combinations except for exclusion cases
82
- (task, dynamic, int8, half, batch, simplify, nms)
83
- for task, dynamic, int8, half, batch, simplify, nms in product(
84
- TASKS, [True, False], [False], [False], [1, 2], [True, False], [True, False]
85
- )
86
- if not ((int8 and half) or (task == "classify" and nms) or (task == "obb" and nms and not TORCH_1_13))
87
- ],
88
- )
89
- def test_export_onnx_matrix(task, dynamic, int8, half, batch, simplify, nms):
90
- """Test YOLO exports to ONNX format with various configurations and parameters."""
91
- file = YOLO(TASK2MODEL[task]).export(
92
- format="onnx", imgsz=32, dynamic=dynamic, int8=int8, half=half, batch=batch, simplify=simplify, nms=nms
93
- )
94
- YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32) # exported model inference
95
- Path(file).unlink() # cleanup
96
-
97
-
98
- @pytest.mark.slow
99
- @pytest.mark.parametrize(
100
- "task, dynamic, int8, half, batch, nms",
101
- [ # generate all combinations except for exclusion cases
102
- (task, dynamic, int8, half, batch, nms)
103
- for task, dynamic, int8, half, batch, nms in product(TASKS, [False], [False], [False], [1, 2], [True, False])
104
- if not (task == "classify" and nms)
105
- ],
106
- )
107
- def test_export_torchscript_matrix(task, dynamic, int8, half, batch, nms):
108
- """Tests YOLO model exports to TorchScript format under varied configurations."""
109
- file = YOLO(TASK2MODEL[task]).export(
110
- format="torchscript", imgsz=32, dynamic=dynamic, int8=int8, half=half, batch=batch, nms=nms
111
- )
112
- YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32) # exported model inference
113
- Path(file).unlink() # cleanup
114
-
115
-
116
- @pytest.mark.slow
117
- @pytest.mark.skipif(not MACOS, reason="CoreML inference only supported on macOS")
118
- @pytest.mark.skipif(not TORCH_1_9, reason="CoreML>=7.2 not supported with PyTorch<=1.8")
119
- @pytest.mark.skipif(checks.IS_PYTHON_3_13, reason="CoreML not supported in Python 3.13")
120
- @pytest.mark.parametrize(
121
- "task, dynamic, int8, half, batch",
122
- [ # generate all combinations except for exclusion cases
123
- (task, dynamic, int8, half, batch)
124
- for task, dynamic, int8, half, batch in product(TASKS, [False], [True, False], [True, False], [1])
125
- if not (int8 and half)
126
- ],
127
- )
128
- def test_export_coreml_matrix(task, dynamic, int8, half, batch):
129
- """Test YOLO exports to CoreML format with various parameter configurations."""
130
- file = YOLO(TASK2MODEL[task]).export(
131
- format="coreml",
132
- imgsz=32,
133
- dynamic=dynamic,
134
- int8=int8,
135
- half=half,
136
- batch=batch,
137
- )
138
- YOLO(file)([SOURCE] * batch, imgsz=32) # exported model inference at batch=3
139
- shutil.rmtree(file) # cleanup
140
-
141
-
142
- @pytest.mark.slow
143
- @pytest.mark.skipif(not checks.IS_PYTHON_MINIMUM_3_10, reason="TFLite export requires Python>=3.10")
144
- @pytest.mark.skipif(
145
- not LINUX or IS_RASPBERRYPI,
146
- reason="Test disabled as TF suffers from install conflicts on Windows, macOS and Raspberry Pi",
147
- )
148
- @pytest.mark.parametrize(
149
- "task, dynamic, int8, half, batch, nms",
150
- [ # generate all combinations except for exclusion cases
151
- (task, dynamic, int8, half, batch, nms)
152
- for task, dynamic, int8, half, batch, nms in product(
153
- TASKS, [False], [True, False], [True, False], [1], [True, False]
154
- )
155
- if not ((int8 and half) or (task == "classify" and nms) or (ARM64 and nms))
156
- ],
157
- )
158
- def test_export_tflite_matrix(task, dynamic, int8, half, batch, nms):
159
- """Test YOLO exports to TFLite format considering various export configurations."""
160
- file = YOLO(TASK2MODEL[task]).export(
161
- format="tflite", imgsz=32, dynamic=dynamic, int8=int8, half=half, batch=batch, nms=nms
162
- )
163
- YOLO(file)([SOURCE] * batch, imgsz=32) # exported model inference at batch=3
164
- Path(file).unlink() # cleanup
165
-
166
-
167
- @pytest.mark.skipif(not TORCH_1_9, reason="CoreML>=7.2 not supported with PyTorch<=1.8")
168
- @pytest.mark.skipif(WINDOWS, reason="CoreML not supported on Windows") # RuntimeError: BlobWriter not loaded
169
- @pytest.mark.skipif(LINUX and ARM64, reason="CoreML not supported on aarch64 Linux")
170
- @pytest.mark.skipif(checks.IS_PYTHON_3_13, reason="CoreML not supported in Python 3.13")
171
- def test_export_coreml():
172
- """Test YOLO exports to CoreML format and check for errors."""
173
- # Capture stdout and stderr
174
- stdout, stderr = io.StringIO(), io.StringIO()
175
- with redirect_stdout(stdout), redirect_stderr(stderr):
176
- YOLO(MODEL).export(format="coreml", nms=True, imgsz=32)
177
- if MACOS:
178
- file = YOLO(MODEL).export(format="coreml", imgsz=32)
179
- YOLO(file)(SOURCE, imgsz=32) # model prediction only supported on macOS for nms=False models
180
-
181
- # Check captured output for errors
182
- output = stdout.getvalue() + stderr.getvalue()
183
- assert "Error" not in output, f"CoreML export produced errors: {output}"
184
- assert "You will not be able to run predict()" not in output, "CoreML export has predict() error"
185
-
186
-
187
- @pytest.mark.skipif(not checks.IS_PYTHON_MINIMUM_3_10, reason="TFLite export requires Python>=3.10")
188
- @pytest.mark.skipif(not LINUX, reason="Test disabled as TF suffers from install conflicts on Windows and macOS")
189
- def test_export_tflite():
190
- """Test YOLO exports to TFLite format under specific OS and Python version conditions."""
191
- model = YOLO(MODEL)
192
- file = model.export(format="tflite", imgsz=32)
193
- YOLO(file)(SOURCE, imgsz=32)
194
-
195
-
196
- @pytest.mark.skipif(True, reason="Test disabled")
197
- @pytest.mark.skipif(not LINUX, reason="TF suffers from install conflicts on Windows and macOS")
198
- def test_export_pb():
199
- """Test YOLO exports to TensorFlow's Protobuf (*.pb) format."""
200
- model = YOLO(MODEL)
201
- file = model.export(format="pb", imgsz=32)
202
- YOLO(file)(SOURCE, imgsz=32)
203
-
204
-
205
- @pytest.mark.skipif(True, reason="Test disabled as Paddle protobuf and ONNX protobuf requirements conflict.")
206
- def test_export_paddle():
207
- """Test YOLO exports to Paddle format, noting protobuf conflicts with ONNX."""
208
- YOLO(MODEL).export(format="paddle", imgsz=32)
209
-
210
-
211
- @pytest.mark.slow
212
- def test_export_mnn():
213
- """Test YOLO exports to MNN format (WARNING: MNN test must precede NCNN test or CI error on Windows)."""
214
- file = YOLO(MODEL).export(format="mnn", imgsz=32)
215
- YOLO(file)(SOURCE, imgsz=32) # exported model inference
216
-
217
-
218
- @pytest.mark.slow
219
- def test_export_ncnn():
220
- """Test YOLO exports to NCNN format."""
221
- file = YOLO(MODEL).export(format="ncnn", imgsz=32)
222
- YOLO(file)(SOURCE, imgsz=32) # exported model inference
223
-
224
-
225
- @pytest.mark.skipif(True, reason="Test disabled as keras and tensorflow version conflicts with TFlite export.")
226
- @pytest.mark.skipif(not LINUX or MACOS, reason="Skipping test on Windows and Macos")
227
- def test_export_imx():
228
- """Test YOLO exports to IMX format."""
229
- model = YOLO("yolov8n.pt")
230
- file = model.export(format="imx", imgsz=32)
231
- YOLO(file)(SOURCE, imgsz=32)
@@ -1,146 +0,0 @@
1
- # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
-
3
- import contextlib
4
- import os
5
- import subprocess
6
- import time
7
- from pathlib import Path
8
-
9
- import pytest
10
-
11
- from tests import MODEL, SOURCE, TMP
12
- from ultralytics import YOLO, download
13
- from ultralytics.utils import DATASETS_DIR, SETTINGS
14
- from ultralytics.utils.checks import check_requirements
15
-
16
-
17
- @pytest.mark.skipif(not check_requirements("ray", install=False), reason="ray[tune] not installed")
18
- def test_model_ray_tune():
19
- """Tune YOLO model using Ray for hyperparameter optimization."""
20
- YOLO("yolo11n-cls.yaml").tune(
21
- use_ray=True, data="imagenet10", grace_period=1, iterations=1, imgsz=32, epochs=1, plots=False, device="cpu"
22
- )
23
-
24
-
25
- @pytest.mark.skipif(not check_requirements("mlflow", install=False), reason="mlflow not installed")
26
- def test_mlflow():
27
- """Test training with MLflow tracking enabled (see https://mlflow.org/ for details)."""
28
- SETTINGS["mlflow"] = True
29
- YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=3, plots=False, device="cpu")
30
- SETTINGS["mlflow"] = False
31
-
32
-
33
- @pytest.mark.skipif(True, reason="Test failing in scheduled CI https://github.com/ultralytics/ultralytics/pull/8868")
34
- @pytest.mark.skipif(not check_requirements("mlflow", install=False), reason="mlflow not installed")
35
- def test_mlflow_keep_run_active():
36
- """Ensure MLflow run status matches MLFLOW_KEEP_RUN_ACTIVE environment variable settings."""
37
- import mlflow
38
-
39
- SETTINGS["mlflow"] = True
40
- run_name = "Test Run"
41
- os.environ["MLFLOW_RUN"] = run_name
42
-
43
- # Test with MLFLOW_KEEP_RUN_ACTIVE=True
44
- os.environ["MLFLOW_KEEP_RUN_ACTIVE"] = "True"
45
- YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
46
- status = mlflow.active_run().info.status
47
- assert status == "RUNNING", "MLflow run should be active when MLFLOW_KEEP_RUN_ACTIVE=True"
48
-
49
- run_id = mlflow.active_run().info.run_id
50
-
51
- # Test with MLFLOW_KEEP_RUN_ACTIVE=False
52
- os.environ["MLFLOW_KEEP_RUN_ACTIVE"] = "False"
53
- YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
54
- status = mlflow.get_run(run_id=run_id).info.status
55
- assert status == "FINISHED", "MLflow run should be ended when MLFLOW_KEEP_RUN_ACTIVE=False"
56
-
57
- # Test with MLFLOW_KEEP_RUN_ACTIVE not set
58
- os.environ.pop("MLFLOW_KEEP_RUN_ACTIVE", None)
59
- YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
60
- status = mlflow.get_run(run_id=run_id).info.status
61
- assert status == "FINISHED", "MLflow run should be ended by default when MLFLOW_KEEP_RUN_ACTIVE is not set"
62
- SETTINGS["mlflow"] = False
63
-
64
-
65
- @pytest.mark.skipif(not check_requirements("tritonclient", install=False), reason="tritonclient[all] not installed")
66
- def test_triton():
67
- """Test NVIDIA Triton Server functionalities with YOLO model."""
68
- check_requirements("tritonclient[all]")
69
- from tritonclient.http import InferenceServerClient # noqa
70
-
71
- # Create variables
72
- model_name = "yolo"
73
- triton_repo = TMP / "triton_repo" # Triton repo path
74
- triton_model = triton_repo / model_name # Triton model path
75
-
76
- # Export model to ONNX
77
- f = YOLO(MODEL).export(format="onnx", dynamic=True)
78
-
79
- # Prepare Triton repo
80
- (triton_model / "1").mkdir(parents=True, exist_ok=True)
81
- Path(f).rename(triton_model / "1" / "model.onnx")
82
- (triton_model / "config.pbtxt").touch()
83
-
84
- # Define image https://catalog.ngc.nvidia.com/orgs/nvidia/containers/tritonserver
85
- tag = "nvcr.io/nvidia/tritonserver:23.09-py3" # 6.4 GB
86
-
87
- # Pull the image
88
- subprocess.call(f"docker pull {tag}", shell=True)
89
-
90
- # Run the Triton server and capture the container ID
91
- container_id = (
92
- subprocess.check_output(
93
- f"docker run -d --rm -v {triton_repo}:/models -p 8000:8000 {tag} tritonserver --model-repository=/models",
94
- shell=True,
95
- )
96
- .decode("utf-8")
97
- .strip()
98
- )
99
-
100
- # Wait for the Triton server to start
101
- triton_client = InferenceServerClient(url="localhost:8000", verbose=False, ssl=False)
102
-
103
- # Wait until model is ready
104
- for _ in range(10):
105
- with contextlib.suppress(Exception):
106
- assert triton_client.is_model_ready(model_name)
107
- break
108
- time.sleep(1)
109
-
110
- # Check Triton inference
111
- YOLO(f"http://localhost:8000/{model_name}", "detect")(SOURCE) # exported model inference
112
-
113
- # Kill and remove the container at the end of the test
114
- subprocess.call(f"docker kill {container_id}", shell=True)
115
-
116
-
117
- @pytest.mark.skipif(not check_requirements("pycocotools", install=False), reason="pycocotools not installed")
118
- def test_pycocotools():
119
- """Validate YOLO model predictions on COCO dataset using pycocotools."""
120
- from ultralytics.models.yolo.detect import DetectionValidator
121
- from ultralytics.models.yolo.pose import PoseValidator
122
- from ultralytics.models.yolo.segment import SegmentationValidator
123
-
124
- # Download annotations after each dataset downloads first
125
- url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
126
-
127
- args = {"model": "yolo11n.pt", "data": "coco8.yaml", "save_json": True, "imgsz": 64}
128
- validator = DetectionValidator(args=args)
129
- validator()
130
- validator.is_coco = True
131
- download(f"{url}instances_val2017.json", dir=DATASETS_DIR / "coco8/annotations")
132
- _ = validator.eval_json(validator.stats)
133
-
134
- args = {"model": "yolo11n-seg.pt", "data": "coco8-seg.yaml", "save_json": True, "imgsz": 64}
135
- validator = SegmentationValidator(args=args)
136
- validator()
137
- validator.is_coco = True
138
- download(f"{url}instances_val2017.json", dir=DATASETS_DIR / "coco8-seg/annotations")
139
- _ = validator.eval_json(validator.stats)
140
-
141
- args = {"model": "yolo11n-pose.pt", "data": "coco8-pose.yaml", "save_json": True, "imgsz": 64}
142
- validator = PoseValidator(args=args)
143
- validator()
144
- validator.is_coco = True
145
- download(f"{url}person_keypoints_val2017.json", dir=DATASETS_DIR / "coco8-pose/annotations")
146
- _ = validator.eval_json(validator.stats)