dgenerate-ultralytics-headless 8.3.196__py3-none-any.whl → 8.3.248__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/METADATA +33 -34
- dgenerate_ultralytics_headless-8.3.248.dist-info/RECORD +298 -0
- tests/__init__.py +5 -7
- tests/conftest.py +8 -15
- tests/test_cli.py +8 -10
- tests/test_cuda.py +9 -10
- tests/test_engine.py +29 -2
- tests/test_exports.py +69 -21
- tests/test_integrations.py +8 -11
- tests/test_python.py +109 -71
- tests/test_solutions.py +170 -159
- ultralytics/__init__.py +27 -9
- ultralytics/cfg/__init__.py +57 -64
- ultralytics/cfg/datasets/Argoverse.yaml +7 -6
- ultralytics/cfg/datasets/DOTAv1.5.yaml +1 -1
- ultralytics/cfg/datasets/DOTAv1.yaml +1 -1
- ultralytics/cfg/datasets/ImageNet.yaml +1 -1
- ultralytics/cfg/datasets/Objects365.yaml +19 -15
- ultralytics/cfg/datasets/SKU-110K.yaml +1 -1
- ultralytics/cfg/datasets/VOC.yaml +19 -21
- ultralytics/cfg/datasets/VisDrone.yaml +5 -5
- ultralytics/cfg/datasets/african-wildlife.yaml +1 -1
- ultralytics/cfg/datasets/coco-pose.yaml +24 -2
- ultralytics/cfg/datasets/coco.yaml +2 -2
- ultralytics/cfg/datasets/coco128-seg.yaml +1 -1
- ultralytics/cfg/datasets/coco8-pose.yaml +21 -0
- ultralytics/cfg/datasets/construction-ppe.yaml +32 -0
- ultralytics/cfg/datasets/dog-pose.yaml +28 -0
- ultralytics/cfg/datasets/dota8-multispectral.yaml +1 -1
- ultralytics/cfg/datasets/dota8.yaml +2 -2
- ultralytics/cfg/datasets/hand-keypoints.yaml +26 -2
- ultralytics/cfg/datasets/kitti.yaml +27 -0
- ultralytics/cfg/datasets/lvis.yaml +7 -7
- ultralytics/cfg/datasets/open-images-v7.yaml +1 -1
- ultralytics/cfg/datasets/tiger-pose.yaml +16 -0
- ultralytics/cfg/datasets/xView.yaml +16 -16
- ultralytics/cfg/default.yaml +96 -94
- ultralytics/cfg/models/11/yolo11-pose.yaml +1 -1
- ultralytics/cfg/models/11/yoloe-11-seg.yaml +2 -2
- ultralytics/cfg/models/11/yoloe-11.yaml +2 -2
- ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +1 -1
- ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +1 -1
- ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +1 -1
- ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +1 -1
- ultralytics/cfg/models/v10/yolov10b.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10l.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10m.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10n.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10s.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10x.yaml +2 -2
- ultralytics/cfg/models/v3/yolov3-tiny.yaml +1 -1
- ultralytics/cfg/models/v6/yolov6.yaml +1 -1
- ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +9 -6
- ultralytics/cfg/models/v8/yoloe-v8.yaml +9 -6
- ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +2 -2
- ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +2 -2
- ultralytics/cfg/models/v8/yolov8-ghost.yaml +2 -2
- ultralytics/cfg/models/v8/yolov8-obb.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-p2.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-world.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-worldv2.yaml +6 -6
- ultralytics/cfg/models/v9/yolov9s.yaml +1 -1
- ultralytics/cfg/trackers/botsort.yaml +16 -17
- ultralytics/cfg/trackers/bytetrack.yaml +9 -11
- ultralytics/data/__init__.py +4 -4
- ultralytics/data/annotator.py +3 -4
- ultralytics/data/augment.py +286 -476
- ultralytics/data/base.py +18 -26
- ultralytics/data/build.py +151 -26
- ultralytics/data/converter.py +38 -50
- ultralytics/data/dataset.py +47 -75
- ultralytics/data/loaders.py +42 -49
- ultralytics/data/split.py +5 -6
- ultralytics/data/split_dota.py +8 -15
- ultralytics/data/utils.py +41 -45
- ultralytics/engine/exporter.py +462 -462
- ultralytics/engine/model.py +150 -191
- ultralytics/engine/predictor.py +30 -40
- ultralytics/engine/results.py +177 -311
- ultralytics/engine/trainer.py +193 -120
- ultralytics/engine/tuner.py +77 -63
- ultralytics/engine/validator.py +39 -22
- ultralytics/hub/__init__.py +16 -19
- ultralytics/hub/auth.py +6 -12
- ultralytics/hub/google/__init__.py +7 -10
- ultralytics/hub/session.py +15 -25
- ultralytics/hub/utils.py +5 -8
- ultralytics/models/__init__.py +1 -1
- ultralytics/models/fastsam/__init__.py +1 -1
- ultralytics/models/fastsam/model.py +8 -10
- ultralytics/models/fastsam/predict.py +19 -30
- ultralytics/models/fastsam/utils.py +1 -2
- ultralytics/models/fastsam/val.py +5 -7
- ultralytics/models/nas/__init__.py +1 -1
- ultralytics/models/nas/model.py +5 -8
- ultralytics/models/nas/predict.py +7 -9
- ultralytics/models/nas/val.py +1 -2
- ultralytics/models/rtdetr/__init__.py +1 -1
- ultralytics/models/rtdetr/model.py +7 -8
- ultralytics/models/rtdetr/predict.py +15 -19
- ultralytics/models/rtdetr/train.py +10 -13
- ultralytics/models/rtdetr/val.py +21 -23
- ultralytics/models/sam/__init__.py +15 -2
- ultralytics/models/sam/amg.py +14 -20
- ultralytics/models/sam/build.py +26 -19
- ultralytics/models/sam/build_sam3.py +377 -0
- ultralytics/models/sam/model.py +29 -32
- ultralytics/models/sam/modules/blocks.py +83 -144
- ultralytics/models/sam/modules/decoders.py +22 -40
- ultralytics/models/sam/modules/encoders.py +44 -101
- ultralytics/models/sam/modules/memory_attention.py +16 -30
- ultralytics/models/sam/modules/sam.py +206 -79
- ultralytics/models/sam/modules/tiny_encoder.py +64 -83
- ultralytics/models/sam/modules/transformer.py +18 -28
- ultralytics/models/sam/modules/utils.py +174 -50
- ultralytics/models/sam/predict.py +2268 -366
- ultralytics/models/sam/sam3/__init__.py +3 -0
- ultralytics/models/sam/sam3/decoder.py +546 -0
- ultralytics/models/sam/sam3/encoder.py +529 -0
- ultralytics/models/sam/sam3/geometry_encoders.py +415 -0
- ultralytics/models/sam/sam3/maskformer_segmentation.py +286 -0
- ultralytics/models/sam/sam3/model_misc.py +199 -0
- ultralytics/models/sam/sam3/necks.py +129 -0
- ultralytics/models/sam/sam3/sam3_image.py +339 -0
- ultralytics/models/sam/sam3/text_encoder_ve.py +307 -0
- ultralytics/models/sam/sam3/vitdet.py +547 -0
- ultralytics/models/sam/sam3/vl_combiner.py +160 -0
- ultralytics/models/utils/loss.py +14 -26
- ultralytics/models/utils/ops.py +13 -17
- ultralytics/models/yolo/__init__.py +1 -1
- ultralytics/models/yolo/classify/predict.py +9 -12
- ultralytics/models/yolo/classify/train.py +15 -41
- ultralytics/models/yolo/classify/val.py +34 -32
- ultralytics/models/yolo/detect/predict.py +8 -11
- ultralytics/models/yolo/detect/train.py +13 -32
- ultralytics/models/yolo/detect/val.py +75 -63
- ultralytics/models/yolo/model.py +37 -53
- ultralytics/models/yolo/obb/predict.py +5 -14
- ultralytics/models/yolo/obb/train.py +11 -14
- ultralytics/models/yolo/obb/val.py +42 -39
- ultralytics/models/yolo/pose/__init__.py +1 -1
- ultralytics/models/yolo/pose/predict.py +7 -22
- ultralytics/models/yolo/pose/train.py +10 -22
- ultralytics/models/yolo/pose/val.py +40 -59
- ultralytics/models/yolo/segment/predict.py +16 -20
- ultralytics/models/yolo/segment/train.py +3 -12
- ultralytics/models/yolo/segment/val.py +106 -56
- ultralytics/models/yolo/world/train.py +12 -16
- ultralytics/models/yolo/world/train_world.py +11 -34
- ultralytics/models/yolo/yoloe/__init__.py +7 -7
- ultralytics/models/yolo/yoloe/predict.py +16 -23
- ultralytics/models/yolo/yoloe/train.py +31 -56
- ultralytics/models/yolo/yoloe/train_seg.py +5 -10
- ultralytics/models/yolo/yoloe/val.py +16 -21
- ultralytics/nn/__init__.py +7 -7
- ultralytics/nn/autobackend.py +152 -80
- ultralytics/nn/modules/__init__.py +60 -60
- ultralytics/nn/modules/activation.py +4 -6
- ultralytics/nn/modules/block.py +133 -217
- ultralytics/nn/modules/conv.py +52 -97
- ultralytics/nn/modules/head.py +64 -116
- ultralytics/nn/modules/transformer.py +79 -89
- ultralytics/nn/modules/utils.py +16 -21
- ultralytics/nn/tasks.py +111 -156
- ultralytics/nn/text_model.py +40 -67
- ultralytics/solutions/__init__.py +12 -12
- ultralytics/solutions/ai_gym.py +11 -17
- ultralytics/solutions/analytics.py +15 -16
- ultralytics/solutions/config.py +5 -6
- ultralytics/solutions/distance_calculation.py +10 -13
- ultralytics/solutions/heatmap.py +7 -13
- ultralytics/solutions/instance_segmentation.py +5 -8
- ultralytics/solutions/object_blurrer.py +7 -10
- ultralytics/solutions/object_counter.py +12 -19
- ultralytics/solutions/object_cropper.py +8 -14
- ultralytics/solutions/parking_management.py +33 -31
- ultralytics/solutions/queue_management.py +10 -12
- ultralytics/solutions/region_counter.py +9 -12
- ultralytics/solutions/security_alarm.py +15 -20
- ultralytics/solutions/similarity_search.py +13 -17
- ultralytics/solutions/solutions.py +75 -74
- ultralytics/solutions/speed_estimation.py +7 -10
- ultralytics/solutions/streamlit_inference.py +4 -7
- ultralytics/solutions/templates/similarity-search.html +7 -18
- ultralytics/solutions/trackzone.py +7 -10
- ultralytics/solutions/vision_eye.py +5 -8
- ultralytics/trackers/__init__.py +1 -1
- ultralytics/trackers/basetrack.py +3 -5
- ultralytics/trackers/bot_sort.py +10 -27
- ultralytics/trackers/byte_tracker.py +14 -30
- ultralytics/trackers/track.py +3 -6
- ultralytics/trackers/utils/gmc.py +11 -22
- ultralytics/trackers/utils/kalman_filter.py +37 -48
- ultralytics/trackers/utils/matching.py +12 -15
- ultralytics/utils/__init__.py +116 -116
- ultralytics/utils/autobatch.py +2 -4
- ultralytics/utils/autodevice.py +17 -18
- ultralytics/utils/benchmarks.py +70 -70
- ultralytics/utils/callbacks/base.py +8 -10
- ultralytics/utils/callbacks/clearml.py +5 -13
- ultralytics/utils/callbacks/comet.py +32 -46
- ultralytics/utils/callbacks/dvc.py +13 -18
- ultralytics/utils/callbacks/mlflow.py +4 -5
- ultralytics/utils/callbacks/neptune.py +7 -15
- ultralytics/utils/callbacks/platform.py +314 -38
- ultralytics/utils/callbacks/raytune.py +3 -4
- ultralytics/utils/callbacks/tensorboard.py +23 -31
- ultralytics/utils/callbacks/wb.py +10 -13
- ultralytics/utils/checks.py +151 -87
- ultralytics/utils/cpu.py +3 -8
- ultralytics/utils/dist.py +19 -15
- ultralytics/utils/downloads.py +29 -41
- ultralytics/utils/errors.py +6 -14
- ultralytics/utils/events.py +2 -4
- ultralytics/utils/export/__init__.py +7 -0
- ultralytics/utils/{export.py → export/engine.py} +16 -16
- ultralytics/utils/export/imx.py +325 -0
- ultralytics/utils/export/tensorflow.py +231 -0
- ultralytics/utils/files.py +24 -28
- ultralytics/utils/git.py +9 -11
- ultralytics/utils/instance.py +30 -51
- ultralytics/utils/logger.py +212 -114
- ultralytics/utils/loss.py +15 -24
- ultralytics/utils/metrics.py +131 -160
- ultralytics/utils/nms.py +21 -30
- ultralytics/utils/ops.py +107 -165
- ultralytics/utils/patches.py +33 -21
- ultralytics/utils/plotting.py +122 -119
- ultralytics/utils/tal.py +28 -44
- ultralytics/utils/torch_utils.py +70 -187
- ultralytics/utils/tqdm.py +20 -20
- ultralytics/utils/triton.py +13 -19
- ultralytics/utils/tuner.py +17 -5
- dgenerate_ultralytics_headless-8.3.196.dist-info/RECORD +0 -281
- {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/WHEEL +0 -0
- {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/entry_points.txt +0 -0
- {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/licenses/LICENSE +0 -0
- {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/top_level.txt +0 -0
tests/test_cli.py
CHANGED
|
@@ -1,13 +1,14 @@
|
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
|
2
2
|
|
|
3
3
|
import subprocess
|
|
4
|
+
from pathlib import Path
|
|
4
5
|
|
|
5
6
|
import pytest
|
|
6
7
|
from PIL import Image
|
|
7
8
|
|
|
8
9
|
from tests import CUDA_DEVICE_COUNT, CUDA_IS_AVAILABLE, MODELS, TASK_MODEL_DATA
|
|
9
10
|
from ultralytics.utils import ARM64, ASSETS, LINUX, WEIGHTS_DIR, checks
|
|
10
|
-
from ultralytics.utils.torch_utils import
|
|
11
|
+
from ultralytics.utils.torch_utils import TORCH_1_11
|
|
11
12
|
|
|
12
13
|
|
|
13
14
|
def run(cmd: str) -> None:
|
|
@@ -33,7 +34,7 @@ def test_train(task: str, model: str, data: str) -> None:
|
|
|
33
34
|
@pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
|
|
34
35
|
def test_val(task: str, model: str, data: str) -> None:
|
|
35
36
|
"""Test YOLO validation process for specified task, model, and data using a shell command."""
|
|
36
|
-
run(f"yolo val {task} model={model} data={data} imgsz=32 save_txt save_json")
|
|
37
|
+
run(f"yolo val {task} model={model} data={data} imgsz=32 save_txt save_json visualize")
|
|
37
38
|
|
|
38
39
|
|
|
39
40
|
@pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
|
|
@@ -48,15 +49,12 @@ def test_export(model: str) -> None:
|
|
|
48
49
|
run(f"yolo export model={model} format=torchscript imgsz=32")
|
|
49
50
|
|
|
50
51
|
|
|
51
|
-
|
|
52
|
+
@pytest.mark.skipif(not TORCH_1_11, reason="RTDETR requires torch>=1.11")
|
|
53
|
+
def test_rtdetr(task: str = "detect", model: Path = WEIGHTS_DIR / "rtdetr-l.pt", data: str = "coco8.yaml") -> None:
|
|
52
54
|
"""Test the RTDETR functionality within Ultralytics for detection tasks using specified model and data."""
|
|
53
|
-
#
|
|
54
|
-
run(f"yolo train {task} model={model} data={data} --imgsz= 160 epochs =1, cache = disk fraction=0.25") # spaces
|
|
55
|
+
# Add comma, spaces, fraction=0.25 args to test single-image training
|
|
55
56
|
run(f"yolo predict {task} model={model} source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
|
|
56
|
-
|
|
57
|
-
weights = WEIGHTS_DIR / "rtdetr-l.pt"
|
|
58
|
-
run(f"yolo predict {task} model={weights} source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
|
|
59
|
-
run(f"yolo train {task} model={weights} epochs=1 imgsz=160 cache=disk data=coco8.yaml")
|
|
57
|
+
run(f"yolo train {task} model={model} data={data} --imgsz= 160 epochs =1, cache = disk fraction=0.25")
|
|
60
58
|
|
|
61
59
|
|
|
62
60
|
@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="MobileSAM with CLIP is not supported in Python 3.12")
|
|
@@ -84,7 +82,7 @@ def test_fastsam(
|
|
|
84
82
|
everything_results = sam_model(s, device="cpu", retina_masks=True, imgsz=320, conf=0.4, iou=0.9)
|
|
85
83
|
|
|
86
84
|
# Remove small regions
|
|
87
|
-
|
|
85
|
+
_new_masks, _ = Predictor.remove_small_regions(everything_results[0].masks.data, min_area=20)
|
|
88
86
|
|
|
89
87
|
# Run inference with bboxes and points and texts prompt at the same time
|
|
90
88
|
sam_model(source, bboxes=[439, 437, 524, 709], points=[[200, 200]], labels=[1], texts="a photo of a dog")
|
tests/test_cuda.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
|
2
2
|
|
|
3
|
+
import os
|
|
3
4
|
from itertools import product
|
|
4
5
|
from pathlib import Path
|
|
5
6
|
|
|
@@ -68,15 +69,15 @@ def test_export_onnx_matrix(task, dynamic, int8, half, batch, simplify, nms):
|
|
|
68
69
|
half=half,
|
|
69
70
|
batch=batch,
|
|
70
71
|
simplify=simplify,
|
|
71
|
-
nms=nms
|
|
72
|
+
nms=nms,
|
|
72
73
|
device=DEVICES[0],
|
|
74
|
+
# opset=20 if nms else None, # fix ONNX Runtime errors with NMS
|
|
73
75
|
)
|
|
74
76
|
YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32, device=DEVICES[0]) # exported model inference
|
|
75
77
|
Path(file).unlink() # cleanup
|
|
76
78
|
|
|
77
79
|
|
|
78
80
|
@pytest.mark.slow
|
|
79
|
-
@pytest.mark.skipif(True, reason="CUDA export tests disabled pending additional Ultralytics GPU server availability")
|
|
80
81
|
@pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
|
|
81
82
|
@pytest.mark.parametrize(
|
|
82
83
|
"task, dynamic, int8, half, batch",
|
|
@@ -104,23 +105,21 @@ def test_export_engine_matrix(task, dynamic, int8, half, batch):
|
|
|
104
105
|
)
|
|
105
106
|
YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32, device=DEVICES[0]) # exported model inference
|
|
106
107
|
Path(file).unlink() # cleanup
|
|
107
|
-
Path(file).with_suffix(".cache").unlink() if int8 else None # cleanup INT8 cache
|
|
108
|
+
Path(file).with_suffix(".cache").unlink(missing_ok=True) if int8 else None # cleanup INT8 cache
|
|
108
109
|
|
|
109
110
|
|
|
110
111
|
@pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
|
|
111
112
|
def test_train():
|
|
112
113
|
"""Test model training on a minimal dataset using available CUDA devices."""
|
|
113
|
-
import os
|
|
114
|
-
|
|
115
114
|
device = tuple(DEVICES) if len(DEVICES) > 1 else DEVICES[0]
|
|
116
115
|
# NVIDIA Jetson only has one GPU and therefore skipping checks
|
|
117
116
|
if not IS_JETSON:
|
|
118
|
-
results = YOLO(MODEL).train(data="coco8.yaml", imgsz=64, epochs=1, device=device)
|
|
117
|
+
results = YOLO(MODEL).train(data="coco8.yaml", imgsz=64, epochs=1, device=device, batch=15)
|
|
118
|
+
results = YOLO(MODEL).train(data="coco128.yaml", imgsz=64, epochs=1, device=device, batch=15, val=False)
|
|
119
119
|
visible = eval(os.environ["CUDA_VISIBLE_DEVICES"])
|
|
120
120
|
assert visible == device, f"Passed GPUs '{device}', but used GPUs '{visible}'"
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
) # DDP returns None, single-GPU returns metrics
|
|
121
|
+
# Note DDP training returns None, single-GPU returns metrics
|
|
122
|
+
assert (results is None) if len(DEVICES) > 1 else (results is not None)
|
|
124
123
|
|
|
125
124
|
|
|
126
125
|
@pytest.mark.slow
|
|
@@ -164,7 +163,7 @@ def test_autobatch():
|
|
|
164
163
|
|
|
165
164
|
|
|
166
165
|
@pytest.mark.slow
|
|
167
|
-
@pytest.mark.skipif(
|
|
166
|
+
@pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
|
|
168
167
|
def test_utils_benchmarks():
|
|
169
168
|
"""Profile YOLO models for performance benchmarks."""
|
|
170
169
|
from ultralytics.utils.benchmarks import ProfileModels
|
tests/test_engine.py
CHANGED
|
@@ -3,6 +3,8 @@
|
|
|
3
3
|
import sys
|
|
4
4
|
from unittest import mock
|
|
5
5
|
|
|
6
|
+
import torch
|
|
7
|
+
|
|
6
8
|
from tests import MODEL
|
|
7
9
|
from ultralytics import YOLO
|
|
8
10
|
from ultralytics.cfg import get_cfg
|
|
@@ -11,7 +13,7 @@ from ultralytics.models.yolo import classify, detect, segment
|
|
|
11
13
|
from ultralytics.utils import ASSETS, DEFAULT_CFG, WEIGHTS_DIR
|
|
12
14
|
|
|
13
15
|
|
|
14
|
-
def test_func(*args):
|
|
16
|
+
def test_func(*args, **kwargs):
|
|
15
17
|
"""Test function callback for evaluating YOLO model performance metrics."""
|
|
16
18
|
print("callback test passed")
|
|
17
19
|
|
|
@@ -67,7 +69,15 @@ def test_detect():
|
|
|
67
69
|
|
|
68
70
|
def test_segment():
|
|
69
71
|
"""Test image segmentation training, validation, and prediction pipelines using YOLO models."""
|
|
70
|
-
overrides = {
|
|
72
|
+
overrides = {
|
|
73
|
+
"data": "coco8-seg.yaml",
|
|
74
|
+
"model": "yolo11n-seg.yaml",
|
|
75
|
+
"imgsz": 32,
|
|
76
|
+
"epochs": 1,
|
|
77
|
+
"save": False,
|
|
78
|
+
"mask_ratio": 1,
|
|
79
|
+
"overlap_mask": False,
|
|
80
|
+
}
|
|
71
81
|
cfg = get_cfg(DEFAULT_CFG)
|
|
72
82
|
cfg.data = "coco8-seg.yaml"
|
|
73
83
|
cfg.imgsz = 32
|
|
@@ -128,3 +138,20 @@ def test_classify():
|
|
|
128
138
|
assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
|
|
129
139
|
result = pred(source=ASSETS, model=trainer.best)
|
|
130
140
|
assert len(result), "predictor test failed"
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def test_nan_recovery():
|
|
144
|
+
"""Test NaN loss detection and recovery during training."""
|
|
145
|
+
nan_injected = [False]
|
|
146
|
+
|
|
147
|
+
def inject_nan(trainer):
|
|
148
|
+
"""Inject NaN into loss during batch processing to test recovery mechanism."""
|
|
149
|
+
if trainer.epoch == 1 and trainer.tloss is not None and not nan_injected[0]:
|
|
150
|
+
trainer.tloss *= torch.tensor(float("nan"))
|
|
151
|
+
nan_injected[0] = True
|
|
152
|
+
|
|
153
|
+
overrides = {"data": "coco8.yaml", "model": "yolo11n.yaml", "imgsz": 32, "epochs": 3}
|
|
154
|
+
trainer = detect.DetectionTrainer(overrides=overrides)
|
|
155
|
+
trainer.add_callback("on_train_batch_end", inject_nan)
|
|
156
|
+
trainer.train()
|
|
157
|
+
assert nan_injected[0], "NaN injection failed"
|
tests/test_exports.py
CHANGED
|
@@ -12,15 +12,8 @@ import pytest
|
|
|
12
12
|
from tests import MODEL, SOURCE
|
|
13
13
|
from ultralytics import YOLO
|
|
14
14
|
from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
|
|
15
|
-
from ultralytics.utils import
|
|
16
|
-
|
|
17
|
-
IS_RASPBERRYPI,
|
|
18
|
-
LINUX,
|
|
19
|
-
MACOS,
|
|
20
|
-
WINDOWS,
|
|
21
|
-
checks,
|
|
22
|
-
)
|
|
23
|
-
from ultralytics.utils.torch_utils import TORCH_1_9, TORCH_1_13
|
|
15
|
+
from ultralytics.utils import ARM64, IS_RASPBERRYPI, LINUX, MACOS, WINDOWS, checks
|
|
16
|
+
from ultralytics.utils.torch_utils import TORCH_1_10, TORCH_1_11, TORCH_1_13, TORCH_2_1, TORCH_2_8, TORCH_2_9
|
|
24
17
|
|
|
25
18
|
|
|
26
19
|
def test_export_torchscript():
|
|
@@ -35,7 +28,7 @@ def test_export_onnx():
|
|
|
35
28
|
YOLO(file)(SOURCE, imgsz=32) # exported model inference
|
|
36
29
|
|
|
37
30
|
|
|
38
|
-
@pytest.mark.skipif(not
|
|
31
|
+
@pytest.mark.skipif(not TORCH_2_1, reason="OpenVINO requires torch>=2.1")
|
|
39
32
|
def test_export_openvino():
|
|
40
33
|
"""Test YOLO export to OpenVINO format for model inference compatibility."""
|
|
41
34
|
file = YOLO(MODEL).export(format="openvino", imgsz=32)
|
|
@@ -43,7 +36,7 @@ def test_export_openvino():
|
|
|
43
36
|
|
|
44
37
|
|
|
45
38
|
@pytest.mark.slow
|
|
46
|
-
@pytest.mark.skipif(not
|
|
39
|
+
@pytest.mark.skipif(not TORCH_2_1, reason="OpenVINO requires torch>=2.1")
|
|
47
40
|
@pytest.mark.parametrize(
|
|
48
41
|
"task, dynamic, int8, half, batch, nms",
|
|
49
42
|
[ # generate all combinations except for exclusion cases
|
|
@@ -83,7 +76,7 @@ def test_export_openvino_matrix(task, dynamic, int8, half, batch, nms):
|
|
|
83
76
|
for task, dynamic, int8, half, batch, simplify, nms in product(
|
|
84
77
|
TASKS, [True, False], [False], [False], [1, 2], [True, False], [True, False]
|
|
85
78
|
)
|
|
86
|
-
if not ((int8 and half) or (task == "classify" and nms) or (
|
|
79
|
+
if not ((int8 and half) or (task == "classify" and nms) or (nms and not TORCH_1_13))
|
|
87
80
|
],
|
|
88
81
|
)
|
|
89
82
|
def test_export_onnx_matrix(task, dynamic, int8, half, batch, simplify, nms):
|
|
@@ -101,7 +94,7 @@ def test_export_onnx_matrix(task, dynamic, int8, half, batch, simplify, nms):
|
|
|
101
94
|
[ # generate all combinations except for exclusion cases
|
|
102
95
|
(task, dynamic, int8, half, batch, nms)
|
|
103
96
|
for task, dynamic, int8, half, batch, nms in product(
|
|
104
|
-
TASKS, [False, True], [False], [False], [1, 2], [True, False]
|
|
97
|
+
TASKS, [False, True], [False], [False, True], [1, 2], [True, False]
|
|
105
98
|
)
|
|
106
99
|
if not (task == "classify" and nms)
|
|
107
100
|
],
|
|
@@ -117,16 +110,19 @@ def test_export_torchscript_matrix(task, dynamic, int8, half, batch, nms):
|
|
|
117
110
|
|
|
118
111
|
@pytest.mark.slow
|
|
119
112
|
@pytest.mark.skipif(not MACOS, reason="CoreML inference only supported on macOS")
|
|
120
|
-
@pytest.mark.skipif(not
|
|
113
|
+
@pytest.mark.skipif(not TORCH_1_11, reason="CoreML export requires torch>=1.11")
|
|
121
114
|
@pytest.mark.skipif(checks.IS_PYTHON_3_13, reason="CoreML not supported in Python 3.13")
|
|
122
115
|
@pytest.mark.parametrize(
|
|
123
116
|
"task, dynamic, int8, half, nms, batch",
|
|
124
117
|
[ # generate all combinations except for exclusion cases
|
|
125
118
|
(task, dynamic, int8, half, nms, batch)
|
|
126
119
|
for task, dynamic, int8, half, nms, batch in product(
|
|
127
|
-
TASKS, [False], [True, False], [True, False], [True, False], [1]
|
|
120
|
+
TASKS, [True, False], [True, False], [True, False], [True, False], [1]
|
|
128
121
|
)
|
|
129
|
-
if not (int8 and half)
|
|
122
|
+
if not (int8 and half)
|
|
123
|
+
and not (task != "detect" and nms)
|
|
124
|
+
and not (dynamic and nms)
|
|
125
|
+
and not (task == "classify" and dynamic)
|
|
130
126
|
],
|
|
131
127
|
)
|
|
132
128
|
def test_export_coreml_matrix(task, dynamic, int8, half, nms, batch):
|
|
@@ -157,7 +153,7 @@ def test_export_coreml_matrix(task, dynamic, int8, half, nms, batch):
|
|
|
157
153
|
for task, dynamic, int8, half, batch, nms in product(
|
|
158
154
|
TASKS, [False], [True, False], [True, False], [1], [True, False]
|
|
159
155
|
)
|
|
160
|
-
if not ((int8 and half) or (task == "classify" and nms) or (ARM64 and nms))
|
|
156
|
+
if not ((int8 and half) or (task == "classify" and nms) or (ARM64 and nms) or (nms and not TORCH_1_13))
|
|
161
157
|
],
|
|
162
158
|
)
|
|
163
159
|
def test_export_tflite_matrix(task, dynamic, int8, half, batch, nms):
|
|
@@ -169,7 +165,7 @@ def test_export_tflite_matrix(task, dynamic, int8, half, batch, nms):
|
|
|
169
165
|
Path(file).unlink() # cleanup
|
|
170
166
|
|
|
171
167
|
|
|
172
|
-
@pytest.mark.skipif(not
|
|
168
|
+
@pytest.mark.skipif(not TORCH_1_11, reason="CoreML export requires torch>=1.11")
|
|
173
169
|
@pytest.mark.skipif(WINDOWS, reason="CoreML not supported on Windows") # RuntimeError: BlobWriter not loaded
|
|
174
170
|
@pytest.mark.skipif(LINUX and ARM64, reason="CoreML not supported on aarch64 Linux")
|
|
175
171
|
@pytest.mark.skipif(checks.IS_PYTHON_3_13, reason="CoreML not supported in Python 3.13")
|
|
@@ -214,6 +210,7 @@ def test_export_paddle():
|
|
|
214
210
|
|
|
215
211
|
|
|
216
212
|
@pytest.mark.slow
|
|
213
|
+
@pytest.mark.skipif(not TORCH_1_10, reason="MNN export requires torch>=1.10")
|
|
217
214
|
def test_export_mnn():
|
|
218
215
|
"""Test YOLO export to MNN format (WARNING: MNN test must precede NCNN test or CI error on Windows)."""
|
|
219
216
|
file = YOLO(MODEL).export(format="mnn", imgsz=32)
|
|
@@ -221,6 +218,7 @@ def test_export_mnn():
|
|
|
221
218
|
|
|
222
219
|
|
|
223
220
|
@pytest.mark.slow
|
|
221
|
+
@pytest.mark.skipif(not TORCH_1_10, reason="MNN export requires torch>=1.10")
|
|
224
222
|
@pytest.mark.parametrize(
|
|
225
223
|
"task, int8, half, batch",
|
|
226
224
|
[ # generate all combinations except for exclusion cases
|
|
@@ -252,10 +250,60 @@ def test_export_ncnn_matrix(task, half, batch):
|
|
|
252
250
|
shutil.rmtree(file, ignore_errors=True) # retry in case of potential lingering multi-threaded file usage errors
|
|
253
251
|
|
|
254
252
|
|
|
255
|
-
@pytest.mark.skipif(
|
|
256
|
-
@pytest.mark.skipif(not
|
|
253
|
+
@pytest.mark.skipif(not TORCH_2_9, reason="IMX export requires torch>=2.9.0")
|
|
254
|
+
@pytest.mark.skipif(not checks.IS_PYTHON_MINIMUM_3_9, reason="Requires Python>=3.9")
|
|
255
|
+
@pytest.mark.skipif(WINDOWS or MACOS, reason="Skipping test on Windows and Macos")
|
|
256
|
+
@pytest.mark.skipif(ARM64, reason="IMX export is not supported on ARM64 architectures.")
|
|
257
257
|
def test_export_imx():
|
|
258
258
|
"""Test YOLO export to IMX format."""
|
|
259
|
-
model = YOLO(
|
|
259
|
+
model = YOLO(MODEL)
|
|
260
260
|
file = model.export(format="imx", imgsz=32)
|
|
261
261
|
YOLO(file)(SOURCE, imgsz=32)
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
@pytest.mark.slow
|
|
265
|
+
@pytest.mark.skipif(not TORCH_2_8, reason="Axelera export requires torch>=2.8.0")
|
|
266
|
+
@pytest.mark.skipif(not LINUX, reason="Axelera export only supported on Linux")
|
|
267
|
+
@pytest.mark.skipif(not checks.IS_PYTHON_3_10, reason="Axelera export requires Python 3.10")
|
|
268
|
+
def test_export_axelera():
|
|
269
|
+
"""Test YOLO export to Axelera format."""
|
|
270
|
+
# For faster testing, use a smaller calibration dataset (32 image size crashes axelera export, so 64 is used)
|
|
271
|
+
file = YOLO(MODEL).export(format="axelera", imgsz=64, data="coco8.yaml")
|
|
272
|
+
assert Path(file).exists(), f"Axelera export failed, directory not found: {file}"
|
|
273
|
+
shutil.rmtree(file, ignore_errors=True) # cleanup
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
# @pytest.mark.skipif(True, reason="Disabled for debugging ruamel.yaml installation required by executorch")
|
|
277
|
+
@pytest.mark.skipif(not checks.IS_PYTHON_MINIMUM_3_10 or not TORCH_2_9, reason="Requires Python>=3.10 and Torch>=2.9.0")
|
|
278
|
+
@pytest.mark.skipif(WINDOWS, reason="Skipping test on Windows")
|
|
279
|
+
def test_export_executorch():
|
|
280
|
+
"""Test YOLO model export to ExecuTorch format."""
|
|
281
|
+
file = YOLO(MODEL).export(format="executorch", imgsz=32)
|
|
282
|
+
assert Path(file).exists(), f"ExecuTorch export failed, directory not found: {file}"
|
|
283
|
+
# Check that .pte file exists in the exported directory
|
|
284
|
+
pte_file = Path(file) / Path(MODEL).with_suffix(".pte").name
|
|
285
|
+
assert pte_file.exists(), f"ExecuTorch .pte file not found: {pte_file}"
|
|
286
|
+
# Check that metadata.yaml exists
|
|
287
|
+
metadata_file = Path(file) / "metadata.yaml"
|
|
288
|
+
assert metadata_file.exists(), f"ExecuTorch metadata.yaml not found: {metadata_file}"
|
|
289
|
+
# Note: Inference testing skipped as ExecuTorch requires special runtime setup
|
|
290
|
+
shutil.rmtree(file, ignore_errors=True) # cleanup
|
|
291
|
+
|
|
292
|
+
|
|
293
|
+
@pytest.mark.slow
|
|
294
|
+
@pytest.mark.skipif(not checks.IS_PYTHON_MINIMUM_3_10 or not TORCH_2_9, reason="Requires Python>=3.10 and Torch>=2.9.0")
|
|
295
|
+
@pytest.mark.skipif(WINDOWS, reason="Skipping test on Windows")
|
|
296
|
+
@pytest.mark.parametrize("task", TASKS)
|
|
297
|
+
def test_export_executorch_matrix(task):
|
|
298
|
+
"""Test YOLO export to ExecuTorch format for various task types."""
|
|
299
|
+
file = YOLO(TASK2MODEL[task]).export(format="executorch", imgsz=32)
|
|
300
|
+
assert Path(file).exists(), f"ExecuTorch export failed for task '{task}', directory not found: {file}"
|
|
301
|
+
# Check that .pte file exists in the exported directory
|
|
302
|
+
model_name = Path(TASK2MODEL[task]).with_suffix(".pte").name
|
|
303
|
+
pte_file = Path(file) / model_name
|
|
304
|
+
assert pte_file.exists(), f"ExecuTorch .pte file not found for task '{task}': {pte_file}"
|
|
305
|
+
# Check that metadata.yaml exists
|
|
306
|
+
metadata_file = Path(file) / "metadata.yaml"
|
|
307
|
+
assert metadata_file.exists(), f"ExecuTorch metadata.yaml not found for task '{task}': {metadata_file}"
|
|
308
|
+
# Note: Inference testing skipped as ExecuTorch requires special runtime setup
|
|
309
|
+
shutil.rmtree(file, ignore_errors=True) # cleanup
|
tests/test_integrations.py
CHANGED
|
@@ -8,9 +8,9 @@ from pathlib import Path
|
|
|
8
8
|
|
|
9
9
|
import pytest
|
|
10
10
|
|
|
11
|
-
from tests import MODEL, SOURCE
|
|
11
|
+
from tests import MODEL, SOURCE
|
|
12
12
|
from ultralytics import YOLO, download
|
|
13
|
-
from ultralytics.utils import DATASETS_DIR, SETTINGS
|
|
13
|
+
from ultralytics.utils import ASSETS_URL, DATASETS_DIR, SETTINGS
|
|
14
14
|
from ultralytics.utils.checks import check_requirements
|
|
15
15
|
|
|
16
16
|
|
|
@@ -71,14 +71,14 @@ def test_mlflow_keep_run_active():
|
|
|
71
71
|
|
|
72
72
|
|
|
73
73
|
@pytest.mark.skipif(not check_requirements("tritonclient", install=False), reason="tritonclient[all] not installed")
|
|
74
|
-
def test_triton():
|
|
74
|
+
def test_triton(tmp_path):
|
|
75
75
|
"""Test NVIDIA Triton Server functionalities with YOLO model."""
|
|
76
76
|
check_requirements("tritonclient[all]")
|
|
77
|
-
from tritonclient.http import InferenceServerClient
|
|
77
|
+
from tritonclient.http import InferenceServerClient
|
|
78
78
|
|
|
79
79
|
# Create variables
|
|
80
80
|
model_name = "yolo"
|
|
81
|
-
triton_repo =
|
|
81
|
+
triton_repo = tmp_path / "triton_repo" # Triton repo path
|
|
82
82
|
triton_model = triton_repo / model_name # Triton model path
|
|
83
83
|
|
|
84
84
|
# Export model to ONNX
|
|
@@ -129,26 +129,23 @@ def test_faster_coco_eval():
|
|
|
129
129
|
from ultralytics.models.yolo.pose import PoseValidator
|
|
130
130
|
from ultralytics.models.yolo.segment import SegmentationValidator
|
|
131
131
|
|
|
132
|
-
# Download annotations after each dataset downloads first
|
|
133
|
-
url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
|
|
134
|
-
|
|
135
132
|
args = {"model": "yolo11n.pt", "data": "coco8.yaml", "save_json": True, "imgsz": 64}
|
|
136
133
|
validator = DetectionValidator(args=args)
|
|
137
134
|
validator()
|
|
138
135
|
validator.is_coco = True
|
|
139
|
-
download(f"{
|
|
136
|
+
download(f"{ASSETS_URL}/instances_val2017.json", dir=DATASETS_DIR / "coco8/annotations")
|
|
140
137
|
_ = validator.eval_json(validator.stats)
|
|
141
138
|
|
|
142
139
|
args = {"model": "yolo11n-seg.pt", "data": "coco8-seg.yaml", "save_json": True, "imgsz": 64}
|
|
143
140
|
validator = SegmentationValidator(args=args)
|
|
144
141
|
validator()
|
|
145
142
|
validator.is_coco = True
|
|
146
|
-
download(f"{
|
|
143
|
+
download(f"{ASSETS_URL}/instances_val2017.json", dir=DATASETS_DIR / "coco8-seg/annotations")
|
|
147
144
|
_ = validator.eval_json(validator.stats)
|
|
148
145
|
|
|
149
146
|
args = {"model": "yolo11n-pose.pt", "data": "coco8-pose.yaml", "save_json": True, "imgsz": 64}
|
|
150
147
|
validator = PoseValidator(args=args)
|
|
151
148
|
validator()
|
|
152
149
|
validator.is_coco = True
|
|
153
|
-
download(f"{
|
|
150
|
+
download(f"{ASSETS_URL}/person_keypoints_val2017.json", dir=DATASETS_DIR / "coco8-pose/annotations")
|
|
154
151
|
_ = validator.eval_json(validator.stats)
|