ultralytics 8.3.117__py3-none-any.whl → 8.3.118__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tests/__init__.py +22 -0
- tests/conftest.py +83 -0
- tests/test_cli.py +128 -0
- tests/test_cuda.py +164 -0
- tests/test_engine.py +131 -0
- tests/test_exports.py +231 -0
- tests/test_integrations.py +154 -0
- tests/test_python.py +695 -0
- tests/test_solutions.py +176 -0
- ultralytics/__init__.py +1 -1
- ultralytics/data/augment.py +3 -0
- ultralytics/data/base.py +9 -2
- ultralytics/data/dataset.py +1 -1
- ultralytics/engine/exporter.py +1 -4
- ultralytics/models/yolo/detect/predict.py +1 -1
- ultralytics/models/yolo/model.py +2 -3
- ultralytics/models/yolo/obb/train.py +1 -1
- ultralytics/models/yolo/pose/predict.py +1 -1
- ultralytics/models/yolo/pose/train.py +1 -1
- ultralytics/models/yolo/pose/val.py +1 -1
- ultralytics/models/yolo/segment/train.py +3 -3
- ultralytics/nn/autobackend.py +2 -5
- ultralytics/nn/text_model.py +97 -13
- ultralytics/utils/benchmarks.py +1 -1
- ultralytics/utils/downloads.py +1 -0
- {ultralytics-8.3.117.dist-info → ultralytics-8.3.118.dist-info}/METADATA +1 -1
- {ultralytics-8.3.117.dist-info → ultralytics-8.3.118.dist-info}/RECORD +31 -22
- {ultralytics-8.3.117.dist-info → ultralytics-8.3.118.dist-info}/WHEEL +0 -0
- {ultralytics-8.3.117.dist-info → ultralytics-8.3.118.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.3.117.dist-info → ultralytics-8.3.118.dist-info}/licenses/LICENSE +0 -0
- {ultralytics-8.3.117.dist-info → ultralytics-8.3.118.dist-info}/top_level.txt +0 -0
tests/__init__.py
ADDED
@@ -0,0 +1,22 @@
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
|
+
|
3
|
+
from ultralytics.utils import ASSETS, ROOT, WEIGHTS_DIR, checks
|
4
|
+
|
5
|
+
# Constants used in tests
|
6
|
+
MODEL = WEIGHTS_DIR / "path with spaces" / "yolo11n.pt" # test spaces in path
|
7
|
+
CFG = "yolo11n.yaml"
|
8
|
+
SOURCE = ASSETS / "bus.jpg"
|
9
|
+
SOURCES_LIST = [ASSETS / "bus.jpg", ASSETS, ASSETS / "*", ASSETS / "**/*.jpg"]
|
10
|
+
TMP = (ROOT / "../tests/tmp").resolve() # temp directory for test files
|
11
|
+
CUDA_IS_AVAILABLE = checks.cuda_is_available()
|
12
|
+
CUDA_DEVICE_COUNT = checks.cuda_device_count()
|
13
|
+
|
14
|
+
__all__ = (
|
15
|
+
"MODEL",
|
16
|
+
"CFG",
|
17
|
+
"SOURCE",
|
18
|
+
"SOURCES_LIST",
|
19
|
+
"TMP",
|
20
|
+
"CUDA_IS_AVAILABLE",
|
21
|
+
"CUDA_DEVICE_COUNT",
|
22
|
+
)
|
tests/conftest.py
ADDED
@@ -0,0 +1,83 @@
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
|
+
|
3
|
+
import shutil
|
4
|
+
from pathlib import Path
|
5
|
+
|
6
|
+
from tests import TMP
|
7
|
+
|
8
|
+
|
9
|
+
def pytest_addoption(parser):
|
10
|
+
"""
|
11
|
+
Add custom command-line options to pytest.
|
12
|
+
|
13
|
+
Args:
|
14
|
+
parser (pytest.config.Parser): The pytest parser object for adding custom command-line options.
|
15
|
+
|
16
|
+
Returns:
|
17
|
+
(None)
|
18
|
+
"""
|
19
|
+
parser.addoption("--slow", action="store_true", default=False, help="Run slow tests")
|
20
|
+
|
21
|
+
|
22
|
+
def pytest_collection_modifyitems(config, items):
|
23
|
+
"""
|
24
|
+
Modify the list of test items to exclude tests marked as slow if the --slow option is not specified.
|
25
|
+
|
26
|
+
Args:
|
27
|
+
config (pytest.config.Config): The pytest configuration object that provides access to command-line options.
|
28
|
+
items (list): The list of collected pytest item objects to be modified based on the presence of --slow option.
|
29
|
+
|
30
|
+
Returns:
|
31
|
+
(None): The function modifies the 'items' list in place.
|
32
|
+
"""
|
33
|
+
if not config.getoption("--slow"):
|
34
|
+
# Remove the item entirely from the list of test items if it's marked as 'slow'
|
35
|
+
items[:] = [item for item in items if "slow" not in item.keywords]
|
36
|
+
|
37
|
+
|
38
|
+
def pytest_sessionstart(session):
|
39
|
+
"""
|
40
|
+
Initialize session configurations for pytest.
|
41
|
+
|
42
|
+
This function is automatically called by pytest after the 'Session' object has been created but before performing
|
43
|
+
test collection. It sets the initial seeds and prepares the temporary directory for the test session.
|
44
|
+
|
45
|
+
Args:
|
46
|
+
session (pytest.Session): The pytest session object.
|
47
|
+
|
48
|
+
Returns:
|
49
|
+
(None)
|
50
|
+
"""
|
51
|
+
from ultralytics.utils.torch_utils import init_seeds
|
52
|
+
|
53
|
+
init_seeds()
|
54
|
+
shutil.rmtree(TMP, ignore_errors=True) # delete any existing tests/tmp directory
|
55
|
+
TMP.mkdir(parents=True, exist_ok=True) # create a new empty directory
|
56
|
+
|
57
|
+
|
58
|
+
def pytest_terminal_summary(terminalreporter, exitstatus, config):
|
59
|
+
"""
|
60
|
+
Cleanup operations after pytest session.
|
61
|
+
|
62
|
+
This function is automatically called by pytest at the end of the entire test session. It removes certain files
|
63
|
+
and directories used during testing.
|
64
|
+
|
65
|
+
Args:
|
66
|
+
terminalreporter (pytest.terminal.TerminalReporter): The terminal reporter object used for terminal output.
|
67
|
+
exitstatus (int): The exit status of the test run.
|
68
|
+
config (pytest.config.Config): The pytest config object.
|
69
|
+
|
70
|
+
Returns:
|
71
|
+
(None)
|
72
|
+
"""
|
73
|
+
from ultralytics.utils import WEIGHTS_DIR
|
74
|
+
|
75
|
+
# Remove files
|
76
|
+
models = [path for x in ["*.onnx", "*.torchscript"] for path in WEIGHTS_DIR.rglob(x)]
|
77
|
+
for file in ["decelera_portrait_min.mov", "bus.jpg", "yolo11n.onnx", "yolo11n.torchscript"] + models:
|
78
|
+
Path(file).unlink(missing_ok=True)
|
79
|
+
|
80
|
+
# Remove directories
|
81
|
+
models = [path for x in ["*.mlpackage", "*_openvino_model"] for path in WEIGHTS_DIR.rglob(x)]
|
82
|
+
for directory in [WEIGHTS_DIR / "path with spaces", TMP.parents[1] / ".pytest_cache", TMP] + models:
|
83
|
+
shutil.rmtree(directory, ignore_errors=True)
|
tests/test_cli.py
ADDED
@@ -0,0 +1,128 @@
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
|
+
|
3
|
+
import subprocess
|
4
|
+
|
5
|
+
import pytest
|
6
|
+
from PIL import Image
|
7
|
+
|
8
|
+
from tests import CUDA_DEVICE_COUNT, CUDA_IS_AVAILABLE
|
9
|
+
from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
|
10
|
+
from ultralytics.utils import ARM64, ASSETS, LINUX, WEIGHTS_DIR, checks
|
11
|
+
from ultralytics.utils.torch_utils import TORCH_1_9
|
12
|
+
|
13
|
+
# Constants
|
14
|
+
TASK_MODEL_DATA = [(task, WEIGHTS_DIR / TASK2MODEL[task], TASK2DATA[task]) for task in TASKS]
|
15
|
+
MODELS = [WEIGHTS_DIR / TASK2MODEL[task] for task in TASKS]
|
16
|
+
|
17
|
+
|
18
|
+
def run(cmd: str) -> None:
|
19
|
+
"""Execute a shell command using subprocess."""
|
20
|
+
subprocess.run(cmd.split(), check=True)
|
21
|
+
|
22
|
+
|
23
|
+
def test_special_modes() -> None:
|
24
|
+
"""Test various special command-line modes for YOLO functionality."""
|
25
|
+
run("yolo help")
|
26
|
+
run("yolo checks")
|
27
|
+
run("yolo version")
|
28
|
+
run("yolo settings reset")
|
29
|
+
run("yolo cfg")
|
30
|
+
|
31
|
+
|
32
|
+
@pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
|
33
|
+
def test_train(task: str, model: str, data: str) -> None:
|
34
|
+
"""Test YOLO training for different tasks, models, and datasets."""
|
35
|
+
run(f"yolo train {task} model={model} data={data} imgsz=32 epochs=1 cache=disk")
|
36
|
+
|
37
|
+
|
38
|
+
@pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
|
39
|
+
def test_val(task: str, model: str, data: str) -> None:
|
40
|
+
"""Test YOLO validation process for specified task, model, and data using a shell command."""
|
41
|
+
run(f"yolo val {task} model={model} data={data} imgsz=32 save_txt save_json")
|
42
|
+
|
43
|
+
|
44
|
+
@pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
|
45
|
+
def test_predict(task: str, model: str, data: str) -> None:
|
46
|
+
"""Test YOLO prediction on provided sample assets for specified task and model."""
|
47
|
+
run(f"yolo predict model={model} source={ASSETS} imgsz=32 save save_crop save_txt")
|
48
|
+
|
49
|
+
|
50
|
+
@pytest.mark.parametrize("model", MODELS)
|
51
|
+
def test_export(model: str) -> None:
|
52
|
+
"""Test exporting a YOLO model to TorchScript format."""
|
53
|
+
run(f"yolo export model={model} format=torchscript imgsz=32")
|
54
|
+
|
55
|
+
|
56
|
+
def test_rtdetr(task: str = "detect", model: str = "yolov8n-rtdetr.yaml", data: str = "coco8.yaml") -> None:
|
57
|
+
"""Test the RTDETR functionality within Ultralytics for detection tasks using specified model and data."""
|
58
|
+
# Warning: must use imgsz=640 (note also add coma, spaces, fraction=0.25 args to test single-image training)
|
59
|
+
run(f"yolo train {task} model={model} data={data} --imgsz= 160 epochs =1, cache = disk fraction=0.25") # spaces
|
60
|
+
run(f"yolo predict {task} model={model} source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
|
61
|
+
if TORCH_1_9:
|
62
|
+
weights = WEIGHTS_DIR / "rtdetr-l.pt"
|
63
|
+
run(f"yolo predict {task} model={weights} source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
|
64
|
+
|
65
|
+
|
66
|
+
@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="MobileSAM with CLIP is not supported in Python 3.12")
|
67
|
+
@pytest.mark.skipif(
|
68
|
+
checks.IS_PYTHON_3_8 and LINUX and ARM64,
|
69
|
+
reason="MobileSAM with CLIP is not supported in Python 3.8 and aarch64 Linux",
|
70
|
+
)
|
71
|
+
def test_fastsam(
|
72
|
+
task: str = "segment", model: str = WEIGHTS_DIR / "FastSAM-s.pt", data: str = "coco8-seg.yaml"
|
73
|
+
) -> None:
|
74
|
+
"""Test FastSAM model for segmenting objects in images using various prompts within Ultralytics."""
|
75
|
+
source = ASSETS / "bus.jpg"
|
76
|
+
|
77
|
+
run(f"yolo segment val {task} model={model} data={data} imgsz=32")
|
78
|
+
run(f"yolo segment predict model={model} source={source} imgsz=32 save save_crop save_txt")
|
79
|
+
|
80
|
+
from ultralytics import FastSAM
|
81
|
+
from ultralytics.models.sam import Predictor
|
82
|
+
|
83
|
+
# Create a FastSAM model
|
84
|
+
sam_model = FastSAM(model) # or FastSAM-x.pt
|
85
|
+
|
86
|
+
# Run inference on an image
|
87
|
+
for s in (source, Image.open(source)):
|
88
|
+
everything_results = sam_model(s, device="cpu", retina_masks=True, imgsz=320, conf=0.4, iou=0.9)
|
89
|
+
|
90
|
+
# Remove small regions
|
91
|
+
new_masks, _ = Predictor.remove_small_regions(everything_results[0].masks.data, min_area=20)
|
92
|
+
|
93
|
+
# Run inference with bboxes and points and texts prompt at the same time
|
94
|
+
sam_model(source, bboxes=[439, 437, 524, 709], points=[[200, 200]], labels=[1], texts="a photo of a dog")
|
95
|
+
|
96
|
+
|
97
|
+
def test_mobilesam() -> None:
|
98
|
+
"""Test MobileSAM segmentation with point prompts using Ultralytics."""
|
99
|
+
from ultralytics import SAM
|
100
|
+
|
101
|
+
# Load the model
|
102
|
+
model = SAM(WEIGHTS_DIR / "mobile_sam.pt")
|
103
|
+
|
104
|
+
# Source
|
105
|
+
source = ASSETS / "zidane.jpg"
|
106
|
+
|
107
|
+
# Predict a segment based on a 1D point prompt and 1D labels.
|
108
|
+
model.predict(source, points=[900, 370], labels=[1])
|
109
|
+
|
110
|
+
# Predict a segment based on 3D points and 2D labels (multiple points per object).
|
111
|
+
model.predict(source, points=[[[900, 370], [1000, 100]]], labels=[[1, 1]])
|
112
|
+
|
113
|
+
# Predict a segment based on a box prompt
|
114
|
+
model.predict(source, bboxes=[439, 437, 524, 709], save=True)
|
115
|
+
|
116
|
+
# Predict all
|
117
|
+
# model(source)
|
118
|
+
|
119
|
+
|
120
|
+
# Slow Tests -----------------------------------------------------------------------------------------------------------
|
121
|
+
@pytest.mark.slow
|
122
|
+
@pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
|
123
|
+
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
|
124
|
+
@pytest.mark.skipif(CUDA_DEVICE_COUNT < 2, reason="DDP is not available")
|
125
|
+
def test_train_gpu(task: str, model: str, data: str) -> None:
|
126
|
+
"""Test YOLO training on GPU(s) for various tasks and models."""
|
127
|
+
run(f"yolo train {task} model={model} data={data} imgsz=32 epochs=1 device=0") # single GPU
|
128
|
+
run(f"yolo train {task} model={model} data={data} imgsz=32 epochs=1 device=0,1") # multi GPU
|
tests/test_cuda.py
ADDED
@@ -0,0 +1,164 @@
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
|
+
|
3
|
+
from itertools import product
|
4
|
+
from pathlib import Path
|
5
|
+
|
6
|
+
import pytest
|
7
|
+
import torch
|
8
|
+
|
9
|
+
from tests import CUDA_DEVICE_COUNT, CUDA_IS_AVAILABLE, MODEL, SOURCE
|
10
|
+
from ultralytics import YOLO
|
11
|
+
from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
|
12
|
+
from ultralytics.utils import ASSETS, WEIGHTS_DIR
|
13
|
+
from ultralytics.utils.checks import check_amp
|
14
|
+
|
15
|
+
|
16
|
+
def test_checks():
|
17
|
+
"""Validate CUDA settings against torch CUDA functions."""
|
18
|
+
assert torch.cuda.is_available() == CUDA_IS_AVAILABLE
|
19
|
+
assert torch.cuda.device_count() == CUDA_DEVICE_COUNT
|
20
|
+
|
21
|
+
|
22
|
+
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
|
23
|
+
def test_amp():
|
24
|
+
"""Test AMP training checks."""
|
25
|
+
model = YOLO("yolo11n.pt").model.cuda()
|
26
|
+
assert check_amp(model)
|
27
|
+
|
28
|
+
|
29
|
+
@pytest.mark.slow
|
30
|
+
@pytest.mark.skipif(True, reason="CUDA export tests disabled pending additional Ultralytics GPU server availability")
|
31
|
+
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
|
32
|
+
@pytest.mark.parametrize(
|
33
|
+
"task, dynamic, int8, half, batch",
|
34
|
+
[ # generate all combinations but exclude those where both int8 and half are True
|
35
|
+
(task, dynamic, int8, half, batch)
|
36
|
+
# Note: tests reduced below pending compute availability expansion as GPU CI runner utilization is high
|
37
|
+
# for task, dynamic, int8, half, batch in product(TASKS, [True, False], [True, False], [True, False], [1, 2])
|
38
|
+
for task, dynamic, int8, half, batch in product(TASKS, [True], [True], [False], [2])
|
39
|
+
if not (int8 and half) # exclude cases where both int8 and half are True
|
40
|
+
],
|
41
|
+
)
|
42
|
+
def test_export_engine_matrix(task, dynamic, int8, half, batch):
|
43
|
+
"""
|
44
|
+
Test YOLO model export to TensorRT format for various configurations and run inference.
|
45
|
+
|
46
|
+
Args:
|
47
|
+
task (str): Task type like 'detect', 'segment', etc.
|
48
|
+
dynamic (bool): Whether to use dynamic input size.
|
49
|
+
int8 (bool): Whether to use INT8 precision.
|
50
|
+
half (bool): Whether to use FP16 precision.
|
51
|
+
batch (int): Batch size for export.
|
52
|
+
"""
|
53
|
+
file = YOLO(TASK2MODEL[task]).export(
|
54
|
+
format="engine",
|
55
|
+
imgsz=32,
|
56
|
+
dynamic=dynamic,
|
57
|
+
int8=int8,
|
58
|
+
half=half,
|
59
|
+
batch=batch,
|
60
|
+
data=TASK2DATA[task],
|
61
|
+
workspace=1, # reduce workspace GB for less resource utilization during testing
|
62
|
+
simplify=True, # use 'onnxslim'
|
63
|
+
)
|
64
|
+
YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32) # exported model inference
|
65
|
+
Path(file).unlink() # cleanup
|
66
|
+
Path(file).with_suffix(".cache").unlink() if int8 else None # cleanup INT8 cache
|
67
|
+
|
68
|
+
|
69
|
+
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
|
70
|
+
def test_train():
|
71
|
+
"""Test model training on a minimal dataset using available CUDA devices."""
|
72
|
+
device = 0 if CUDA_DEVICE_COUNT == 1 else [0, 1]
|
73
|
+
YOLO(MODEL).train(data="coco8.yaml", imgsz=64, epochs=1, device=device) # requires imgsz>=64
|
74
|
+
|
75
|
+
|
76
|
+
@pytest.mark.slow
|
77
|
+
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
|
78
|
+
def test_predict_multiple_devices():
|
79
|
+
"""Validate model prediction consistency across CPU and CUDA devices."""
|
80
|
+
model = YOLO("yolo11n.pt")
|
81
|
+
model = model.cpu()
|
82
|
+
assert str(model.device) == "cpu"
|
83
|
+
_ = model(SOURCE) # CPU inference
|
84
|
+
assert str(model.device) == "cpu"
|
85
|
+
|
86
|
+
model = model.to("cuda:0")
|
87
|
+
assert str(model.device) == "cuda:0"
|
88
|
+
_ = model(SOURCE) # CUDA inference
|
89
|
+
assert str(model.device) == "cuda:0"
|
90
|
+
|
91
|
+
model = model.cpu()
|
92
|
+
assert str(model.device) == "cpu"
|
93
|
+
_ = model(SOURCE) # CPU inference
|
94
|
+
assert str(model.device) == "cpu"
|
95
|
+
|
96
|
+
model = model.cuda()
|
97
|
+
assert str(model.device) == "cuda:0"
|
98
|
+
_ = model(SOURCE) # CUDA inference
|
99
|
+
assert str(model.device) == "cuda:0"
|
100
|
+
|
101
|
+
|
102
|
+
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
|
103
|
+
def test_autobatch():
|
104
|
+
"""Check optimal batch size for YOLO model training using autobatch utility."""
|
105
|
+
from ultralytics.utils.autobatch import check_train_batch_size
|
106
|
+
|
107
|
+
check_train_batch_size(YOLO(MODEL).model.cuda(), imgsz=128, amp=True)
|
108
|
+
|
109
|
+
|
110
|
+
@pytest.mark.slow
|
111
|
+
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
|
112
|
+
def test_utils_benchmarks():
|
113
|
+
"""Profile YOLO models for performance benchmarks."""
|
114
|
+
from ultralytics.utils.benchmarks import ProfileModels
|
115
|
+
|
116
|
+
# Pre-export a dynamic engine model to use dynamic inference
|
117
|
+
YOLO(MODEL).export(format="engine", imgsz=32, dynamic=True, batch=1)
|
118
|
+
ProfileModels([MODEL], imgsz=32, half=False, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile()
|
119
|
+
|
120
|
+
|
121
|
+
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
|
122
|
+
def test_predict_sam():
|
123
|
+
"""Test SAM model predictions using different prompts, including bounding boxes and point annotations."""
|
124
|
+
from ultralytics import SAM
|
125
|
+
from ultralytics.models.sam import Predictor as SAMPredictor
|
126
|
+
|
127
|
+
# Load a model
|
128
|
+
model = SAM(WEIGHTS_DIR / "sam2.1_b.pt")
|
129
|
+
|
130
|
+
# Display model information (optional)
|
131
|
+
model.info()
|
132
|
+
|
133
|
+
# Run inference
|
134
|
+
model(SOURCE, device=0)
|
135
|
+
|
136
|
+
# Run inference with bboxes prompt
|
137
|
+
model(SOURCE, bboxes=[439, 437, 524, 709], device=0)
|
138
|
+
|
139
|
+
# Run inference with no labels
|
140
|
+
model(ASSETS / "zidane.jpg", points=[900, 370], device=0)
|
141
|
+
|
142
|
+
# Run inference with 1D points and 1D labels
|
143
|
+
model(ASSETS / "zidane.jpg", points=[900, 370], labels=[1], device=0)
|
144
|
+
|
145
|
+
# Run inference with 2D points and 1D labels
|
146
|
+
model(ASSETS / "zidane.jpg", points=[[900, 370]], labels=[1], device=0)
|
147
|
+
|
148
|
+
# Run inference with multiple 2D points and 1D labels
|
149
|
+
model(ASSETS / "zidane.jpg", points=[[400, 370], [900, 370]], labels=[1, 1], device=0)
|
150
|
+
|
151
|
+
# Run inference with 3D points and 2D labels (multiple points per object)
|
152
|
+
model(ASSETS / "zidane.jpg", points=[[[900, 370], [1000, 100]]], labels=[[1, 1]], device=0)
|
153
|
+
|
154
|
+
# Create SAMPredictor
|
155
|
+
overrides = dict(conf=0.25, task="segment", mode="predict", imgsz=1024, model=WEIGHTS_DIR / "mobile_sam.pt")
|
156
|
+
predictor = SAMPredictor(overrides=overrides)
|
157
|
+
|
158
|
+
# Set image
|
159
|
+
predictor.set_image(ASSETS / "zidane.jpg") # set with image file
|
160
|
+
# predictor(bboxes=[439, 437, 524, 709])
|
161
|
+
# predictor(points=[900, 370], labels=[1])
|
162
|
+
|
163
|
+
# Reset image
|
164
|
+
predictor.reset_image()
|
tests/test_engine.py
ADDED
@@ -0,0 +1,131 @@
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
|
+
|
3
|
+
import sys
|
4
|
+
from unittest import mock
|
5
|
+
|
6
|
+
from tests import MODEL
|
7
|
+
from ultralytics import YOLO
|
8
|
+
from ultralytics.cfg import get_cfg
|
9
|
+
from ultralytics.engine.exporter import Exporter
|
10
|
+
from ultralytics.models.yolo import classify, detect, segment
|
11
|
+
from ultralytics.utils import ASSETS, DEFAULT_CFG, WEIGHTS_DIR
|
12
|
+
|
13
|
+
|
14
|
+
def test_func(*args): # noqa
|
15
|
+
"""Test function callback for evaluating YOLO model performance metrics."""
|
16
|
+
print("callback test passed")
|
17
|
+
|
18
|
+
|
19
|
+
def test_export():
|
20
|
+
"""Tests the model exporting function by adding a callback and asserting its execution."""
|
21
|
+
exporter = Exporter()
|
22
|
+
exporter.add_callback("on_export_start", test_func)
|
23
|
+
assert test_func in exporter.callbacks["on_export_start"], "callback test failed"
|
24
|
+
f = exporter(model=YOLO("yolo11n.yaml").model)
|
25
|
+
YOLO(f)(ASSETS) # exported model inference
|
26
|
+
|
27
|
+
|
28
|
+
def test_detect():
|
29
|
+
"""Test YOLO object detection training, validation, and prediction functionality."""
|
30
|
+
overrides = {"data": "coco8.yaml", "model": "yolo11n.yaml", "imgsz": 32, "epochs": 1, "save": False}
|
31
|
+
cfg = get_cfg(DEFAULT_CFG)
|
32
|
+
cfg.data = "coco8.yaml"
|
33
|
+
cfg.imgsz = 32
|
34
|
+
|
35
|
+
# Trainer
|
36
|
+
trainer = detect.DetectionTrainer(overrides=overrides)
|
37
|
+
trainer.add_callback("on_train_start", test_func)
|
38
|
+
assert test_func in trainer.callbacks["on_train_start"], "callback test failed"
|
39
|
+
trainer.train()
|
40
|
+
|
41
|
+
# Validator
|
42
|
+
val = detect.DetectionValidator(args=cfg)
|
43
|
+
val.add_callback("on_val_start", test_func)
|
44
|
+
assert test_func in val.callbacks["on_val_start"], "callback test failed"
|
45
|
+
val(model=trainer.best) # validate best.pt
|
46
|
+
|
47
|
+
# Predictor
|
48
|
+
pred = detect.DetectionPredictor(overrides={"imgsz": [64, 64]})
|
49
|
+
pred.add_callback("on_predict_start", test_func)
|
50
|
+
assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
|
51
|
+
# Confirm there is no issue with sys.argv being empty.
|
52
|
+
with mock.patch.object(sys, "argv", []):
|
53
|
+
result = pred(source=ASSETS, model=MODEL)
|
54
|
+
assert len(result), "predictor test failed"
|
55
|
+
|
56
|
+
overrides["resume"] = trainer.last
|
57
|
+
trainer = detect.DetectionTrainer(overrides=overrides)
|
58
|
+
try:
|
59
|
+
trainer.train()
|
60
|
+
except Exception as e:
|
61
|
+
print(f"Expected exception caught: {e}")
|
62
|
+
return
|
63
|
+
|
64
|
+
Exception("Resume test failed!")
|
65
|
+
|
66
|
+
|
67
|
+
def test_segment():
|
68
|
+
"""Tests image segmentation training, validation, and prediction pipelines using YOLO models."""
|
69
|
+
overrides = {"data": "coco8-seg.yaml", "model": "yolo11n-seg.yaml", "imgsz": 32, "epochs": 1, "save": False}
|
70
|
+
cfg = get_cfg(DEFAULT_CFG)
|
71
|
+
cfg.data = "coco8-seg.yaml"
|
72
|
+
cfg.imgsz = 32
|
73
|
+
# YOLO(CFG_SEG).train(**overrides) # works
|
74
|
+
|
75
|
+
# Trainer
|
76
|
+
trainer = segment.SegmentationTrainer(overrides=overrides)
|
77
|
+
trainer.add_callback("on_train_start", test_func)
|
78
|
+
assert test_func in trainer.callbacks["on_train_start"], "callback test failed"
|
79
|
+
trainer.train()
|
80
|
+
|
81
|
+
# Validator
|
82
|
+
val = segment.SegmentationValidator(args=cfg)
|
83
|
+
val.add_callback("on_val_start", test_func)
|
84
|
+
assert test_func in val.callbacks["on_val_start"], "callback test failed"
|
85
|
+
val(model=trainer.best) # validate best.pt
|
86
|
+
|
87
|
+
# Predictor
|
88
|
+
pred = segment.SegmentationPredictor(overrides={"imgsz": [64, 64]})
|
89
|
+
pred.add_callback("on_predict_start", test_func)
|
90
|
+
assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
|
91
|
+
result = pred(source=ASSETS, model=WEIGHTS_DIR / "yolo11n-seg.pt")
|
92
|
+
assert len(result), "predictor test failed"
|
93
|
+
|
94
|
+
# Test resume
|
95
|
+
overrides["resume"] = trainer.last
|
96
|
+
trainer = segment.SegmentationTrainer(overrides=overrides)
|
97
|
+
try:
|
98
|
+
trainer.train()
|
99
|
+
except Exception as e:
|
100
|
+
print(f"Expected exception caught: {e}")
|
101
|
+
return
|
102
|
+
|
103
|
+
Exception("Resume test failed!")
|
104
|
+
|
105
|
+
|
106
|
+
def test_classify():
|
107
|
+
"""Test image classification including training, validation, and prediction phases."""
|
108
|
+
overrides = {"data": "imagenet10", "model": "yolo11n-cls.yaml", "imgsz": 32, "epochs": 1, "save": False}
|
109
|
+
cfg = get_cfg(DEFAULT_CFG)
|
110
|
+
cfg.data = "imagenet10"
|
111
|
+
cfg.imgsz = 32
|
112
|
+
# YOLO(CFG_SEG).train(**overrides) # works
|
113
|
+
|
114
|
+
# Trainer
|
115
|
+
trainer = classify.ClassificationTrainer(overrides=overrides)
|
116
|
+
trainer.add_callback("on_train_start", test_func)
|
117
|
+
assert test_func in trainer.callbacks["on_train_start"], "callback test failed"
|
118
|
+
trainer.train()
|
119
|
+
|
120
|
+
# Validator
|
121
|
+
val = classify.ClassificationValidator(args=cfg)
|
122
|
+
val.add_callback("on_val_start", test_func)
|
123
|
+
assert test_func in val.callbacks["on_val_start"], "callback test failed"
|
124
|
+
val(model=trainer.best)
|
125
|
+
|
126
|
+
# Predictor
|
127
|
+
pred = classify.ClassificationPredictor(overrides={"imgsz": [64, 64]})
|
128
|
+
pred.add_callback("on_predict_start", test_func)
|
129
|
+
assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
|
130
|
+
result = pred(source=ASSETS, model=trainer.best)
|
131
|
+
assert len(result), "predictor test failed"
|