dgenerate-ultralytics-headless 8.3.214__py3-none-any.whl → 8.4.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/METADATA +64 -74
- dgenerate_ultralytics_headless-8.4.7.dist-info/RECORD +311 -0
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/WHEEL +1 -1
- tests/__init__.py +7 -9
- tests/conftest.py +8 -15
- tests/test_cli.py +1 -1
- tests/test_cuda.py +13 -10
- tests/test_engine.py +9 -9
- tests/test_exports.py +65 -13
- tests/test_integrations.py +13 -13
- tests/test_python.py +125 -69
- tests/test_solutions.py +161 -152
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +86 -92
- ultralytics/cfg/datasets/Argoverse.yaml +7 -6
- ultralytics/cfg/datasets/DOTAv1.5.yaml +1 -1
- ultralytics/cfg/datasets/DOTAv1.yaml +1 -1
- ultralytics/cfg/datasets/ImageNet.yaml +1 -1
- ultralytics/cfg/datasets/TT100K.yaml +346 -0
- ultralytics/cfg/datasets/VOC.yaml +15 -16
- ultralytics/cfg/datasets/african-wildlife.yaml +1 -1
- ultralytics/cfg/datasets/coco-pose.yaml +21 -0
- ultralytics/cfg/datasets/coco12-formats.yaml +101 -0
- ultralytics/cfg/datasets/coco128-seg.yaml +1 -1
- ultralytics/cfg/datasets/coco8-pose.yaml +21 -0
- ultralytics/cfg/datasets/dog-pose.yaml +28 -0
- ultralytics/cfg/datasets/dota8-multispectral.yaml +1 -1
- ultralytics/cfg/datasets/dota8.yaml +2 -2
- ultralytics/cfg/datasets/hand-keypoints.yaml +26 -2
- ultralytics/cfg/datasets/kitti.yaml +27 -0
- ultralytics/cfg/datasets/lvis.yaml +5 -5
- ultralytics/cfg/datasets/open-images-v7.yaml +1 -1
- ultralytics/cfg/datasets/tiger-pose.yaml +16 -0
- ultralytics/cfg/datasets/xView.yaml +16 -16
- ultralytics/cfg/default.yaml +4 -2
- ultralytics/cfg/models/11/yolo11-pose.yaml +1 -1
- ultralytics/cfg/models/11/yoloe-11-seg.yaml +2 -2
- ultralytics/cfg/models/11/yoloe-11.yaml +2 -2
- ultralytics/cfg/models/26/yolo26-cls.yaml +33 -0
- ultralytics/cfg/models/26/yolo26-obb.yaml +52 -0
- ultralytics/cfg/models/26/yolo26-p2.yaml +60 -0
- ultralytics/cfg/models/26/yolo26-p6.yaml +62 -0
- ultralytics/cfg/models/26/yolo26-pose.yaml +53 -0
- ultralytics/cfg/models/26/yolo26-seg.yaml +52 -0
- ultralytics/cfg/models/26/yolo26.yaml +52 -0
- ultralytics/cfg/models/26/yoloe-26-seg.yaml +53 -0
- ultralytics/cfg/models/26/yoloe-26.yaml +53 -0
- ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +1 -1
- ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +1 -1
- ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +1 -1
- ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +1 -1
- ultralytics/cfg/models/v10/yolov10b.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10l.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10m.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10n.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10s.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10x.yaml +2 -2
- ultralytics/cfg/models/v3/yolov3-tiny.yaml +1 -1
- ultralytics/cfg/models/v6/yolov6.yaml +1 -1
- ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +9 -6
- ultralytics/cfg/models/v8/yoloe-v8.yaml +9 -6
- ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +2 -2
- ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +2 -2
- ultralytics/cfg/models/v8/yolov8-ghost.yaml +2 -2
- ultralytics/cfg/models/v8/yolov8-obb.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-p2.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-world.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-worldv2.yaml +6 -6
- ultralytics/cfg/models/v9/yolov9s.yaml +1 -1
- ultralytics/data/__init__.py +4 -4
- ultralytics/data/annotator.py +5 -6
- ultralytics/data/augment.py +300 -475
- ultralytics/data/base.py +18 -26
- ultralytics/data/build.py +147 -25
- ultralytics/data/converter.py +108 -87
- ultralytics/data/dataset.py +47 -75
- ultralytics/data/loaders.py +42 -49
- ultralytics/data/split.py +5 -6
- ultralytics/data/split_dota.py +8 -15
- ultralytics/data/utils.py +36 -45
- ultralytics/engine/exporter.py +351 -263
- ultralytics/engine/model.py +186 -225
- ultralytics/engine/predictor.py +45 -54
- ultralytics/engine/results.py +198 -325
- ultralytics/engine/trainer.py +165 -106
- ultralytics/engine/tuner.py +41 -43
- ultralytics/engine/validator.py +55 -38
- ultralytics/hub/__init__.py +16 -19
- ultralytics/hub/auth.py +6 -12
- ultralytics/hub/google/__init__.py +7 -10
- ultralytics/hub/session.py +15 -25
- ultralytics/hub/utils.py +5 -8
- ultralytics/models/__init__.py +1 -1
- ultralytics/models/fastsam/__init__.py +1 -1
- ultralytics/models/fastsam/model.py +8 -10
- ultralytics/models/fastsam/predict.py +18 -30
- ultralytics/models/fastsam/utils.py +1 -2
- ultralytics/models/fastsam/val.py +5 -7
- ultralytics/models/nas/__init__.py +1 -1
- ultralytics/models/nas/model.py +5 -8
- ultralytics/models/nas/predict.py +7 -9
- ultralytics/models/nas/val.py +1 -2
- ultralytics/models/rtdetr/__init__.py +1 -1
- ultralytics/models/rtdetr/model.py +5 -8
- ultralytics/models/rtdetr/predict.py +15 -19
- ultralytics/models/rtdetr/train.py +10 -13
- ultralytics/models/rtdetr/val.py +21 -23
- ultralytics/models/sam/__init__.py +15 -2
- ultralytics/models/sam/amg.py +14 -20
- ultralytics/models/sam/build.py +26 -19
- ultralytics/models/sam/build_sam3.py +377 -0
- ultralytics/models/sam/model.py +29 -32
- ultralytics/models/sam/modules/blocks.py +83 -144
- ultralytics/models/sam/modules/decoders.py +19 -37
- ultralytics/models/sam/modules/encoders.py +44 -101
- ultralytics/models/sam/modules/memory_attention.py +16 -30
- ultralytics/models/sam/modules/sam.py +200 -73
- ultralytics/models/sam/modules/tiny_encoder.py +64 -83
- ultralytics/models/sam/modules/transformer.py +18 -28
- ultralytics/models/sam/modules/utils.py +174 -50
- ultralytics/models/sam/predict.py +2248 -350
- ultralytics/models/sam/sam3/__init__.py +3 -0
- ultralytics/models/sam/sam3/decoder.py +546 -0
- ultralytics/models/sam/sam3/encoder.py +529 -0
- ultralytics/models/sam/sam3/geometry_encoders.py +415 -0
- ultralytics/models/sam/sam3/maskformer_segmentation.py +286 -0
- ultralytics/models/sam/sam3/model_misc.py +199 -0
- ultralytics/models/sam/sam3/necks.py +129 -0
- ultralytics/models/sam/sam3/sam3_image.py +339 -0
- ultralytics/models/sam/sam3/text_encoder_ve.py +307 -0
- ultralytics/models/sam/sam3/vitdet.py +547 -0
- ultralytics/models/sam/sam3/vl_combiner.py +160 -0
- ultralytics/models/utils/loss.py +14 -26
- ultralytics/models/utils/ops.py +13 -17
- ultralytics/models/yolo/__init__.py +1 -1
- ultralytics/models/yolo/classify/predict.py +10 -13
- ultralytics/models/yolo/classify/train.py +12 -33
- ultralytics/models/yolo/classify/val.py +30 -29
- ultralytics/models/yolo/detect/predict.py +9 -12
- ultralytics/models/yolo/detect/train.py +17 -23
- ultralytics/models/yolo/detect/val.py +77 -59
- ultralytics/models/yolo/model.py +43 -60
- ultralytics/models/yolo/obb/predict.py +7 -16
- ultralytics/models/yolo/obb/train.py +14 -17
- ultralytics/models/yolo/obb/val.py +40 -37
- ultralytics/models/yolo/pose/__init__.py +1 -1
- ultralytics/models/yolo/pose/predict.py +7 -22
- ultralytics/models/yolo/pose/train.py +13 -16
- ultralytics/models/yolo/pose/val.py +39 -58
- ultralytics/models/yolo/segment/predict.py +17 -21
- ultralytics/models/yolo/segment/train.py +7 -10
- ultralytics/models/yolo/segment/val.py +95 -47
- ultralytics/models/yolo/world/train.py +8 -14
- ultralytics/models/yolo/world/train_world.py +11 -34
- ultralytics/models/yolo/yoloe/__init__.py +7 -7
- ultralytics/models/yolo/yoloe/predict.py +16 -23
- ultralytics/models/yolo/yoloe/train.py +36 -44
- ultralytics/models/yolo/yoloe/train_seg.py +11 -11
- ultralytics/models/yolo/yoloe/val.py +15 -20
- ultralytics/nn/__init__.py +7 -7
- ultralytics/nn/autobackend.py +159 -85
- ultralytics/nn/modules/__init__.py +68 -60
- ultralytics/nn/modules/activation.py +4 -6
- ultralytics/nn/modules/block.py +260 -224
- ultralytics/nn/modules/conv.py +52 -97
- ultralytics/nn/modules/head.py +831 -299
- ultralytics/nn/modules/transformer.py +76 -88
- ultralytics/nn/modules/utils.py +16 -21
- ultralytics/nn/tasks.py +180 -195
- ultralytics/nn/text_model.py +45 -69
- ultralytics/optim/__init__.py +5 -0
- ultralytics/optim/muon.py +338 -0
- ultralytics/solutions/__init__.py +12 -12
- ultralytics/solutions/ai_gym.py +13 -19
- ultralytics/solutions/analytics.py +15 -16
- ultralytics/solutions/config.py +6 -7
- ultralytics/solutions/distance_calculation.py +10 -13
- ultralytics/solutions/heatmap.py +8 -14
- ultralytics/solutions/instance_segmentation.py +6 -9
- ultralytics/solutions/object_blurrer.py +7 -10
- ultralytics/solutions/object_counter.py +12 -19
- ultralytics/solutions/object_cropper.py +8 -14
- ultralytics/solutions/parking_management.py +34 -32
- ultralytics/solutions/queue_management.py +10 -12
- ultralytics/solutions/region_counter.py +9 -12
- ultralytics/solutions/security_alarm.py +15 -20
- ultralytics/solutions/similarity_search.py +10 -15
- ultralytics/solutions/solutions.py +77 -76
- ultralytics/solutions/speed_estimation.py +7 -10
- ultralytics/solutions/streamlit_inference.py +2 -4
- ultralytics/solutions/templates/similarity-search.html +7 -18
- ultralytics/solutions/trackzone.py +7 -10
- ultralytics/solutions/vision_eye.py +5 -8
- ultralytics/trackers/__init__.py +1 -1
- ultralytics/trackers/basetrack.py +3 -5
- ultralytics/trackers/bot_sort.py +10 -27
- ultralytics/trackers/byte_tracker.py +21 -37
- ultralytics/trackers/track.py +4 -7
- ultralytics/trackers/utils/gmc.py +11 -22
- ultralytics/trackers/utils/kalman_filter.py +37 -48
- ultralytics/trackers/utils/matching.py +12 -15
- ultralytics/utils/__init__.py +124 -124
- ultralytics/utils/autobatch.py +2 -4
- ultralytics/utils/autodevice.py +17 -18
- ultralytics/utils/benchmarks.py +57 -71
- ultralytics/utils/callbacks/base.py +8 -10
- ultralytics/utils/callbacks/clearml.py +5 -13
- ultralytics/utils/callbacks/comet.py +32 -46
- ultralytics/utils/callbacks/dvc.py +13 -18
- ultralytics/utils/callbacks/mlflow.py +4 -5
- ultralytics/utils/callbacks/neptune.py +7 -15
- ultralytics/utils/callbacks/platform.py +423 -38
- ultralytics/utils/callbacks/raytune.py +3 -4
- ultralytics/utils/callbacks/tensorboard.py +25 -31
- ultralytics/utils/callbacks/wb.py +16 -14
- ultralytics/utils/checks.py +127 -85
- ultralytics/utils/cpu.py +3 -8
- ultralytics/utils/dist.py +9 -12
- ultralytics/utils/downloads.py +25 -33
- ultralytics/utils/errors.py +6 -14
- ultralytics/utils/events.py +2 -4
- ultralytics/utils/export/__init__.py +4 -236
- ultralytics/utils/export/engine.py +246 -0
- ultralytics/utils/export/imx.py +117 -63
- ultralytics/utils/export/tensorflow.py +231 -0
- ultralytics/utils/files.py +26 -30
- ultralytics/utils/git.py +9 -11
- ultralytics/utils/instance.py +30 -51
- ultralytics/utils/logger.py +212 -114
- ultralytics/utils/loss.py +601 -215
- ultralytics/utils/metrics.py +128 -156
- ultralytics/utils/nms.py +13 -16
- ultralytics/utils/ops.py +117 -166
- ultralytics/utils/patches.py +75 -21
- ultralytics/utils/plotting.py +75 -80
- ultralytics/utils/tal.py +125 -59
- ultralytics/utils/torch_utils.py +53 -79
- ultralytics/utils/tqdm.py +24 -21
- ultralytics/utils/triton.py +13 -19
- ultralytics/utils/tuner.py +19 -10
- dgenerate_ultralytics_headless-8.3.214.dist-info/RECORD +0 -283
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/entry_points.txt +0 -0
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/licenses/LICENSE +0 -0
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/top_level.txt +0 -0
ultralytics/utils/autodevice.py
CHANGED
|
@@ -9,24 +9,23 @@ from ultralytics.utils.checks import check_requirements
|
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
class GPUInfo:
|
|
12
|
-
"""
|
|
13
|
-
Manages NVIDIA GPU information via pynvml with robust error handling.
|
|
12
|
+
"""Manages NVIDIA GPU information via pynvml with robust error handling.
|
|
14
13
|
|
|
15
|
-
Provides methods to query detailed GPU statistics (utilization, memory, temp, power) and select the most idle
|
|
16
|
-
|
|
17
|
-
|
|
14
|
+
Provides methods to query detailed GPU statistics (utilization, memory, temp, power) and select the most idle GPUs
|
|
15
|
+
based on configurable criteria. It safely handles the absence or initialization failure of the pynvml library by
|
|
16
|
+
logging warnings and disabling related features, preventing application crashes.
|
|
18
17
|
|
|
19
18
|
Includes fallback logic using `torch.cuda` for basic device counting if NVML is unavailable during GPU
|
|
20
19
|
selection. Manages NVML initialization and shutdown internally.
|
|
21
20
|
|
|
22
21
|
Attributes:
|
|
23
22
|
pynvml (module | None): The `pynvml` module if successfully imported and initialized, otherwise `None`.
|
|
24
|
-
nvml_available (bool): Indicates if `pynvml` is ready for use. True if import and `nvmlInit()` succeeded,
|
|
25
|
-
|
|
26
|
-
gpu_stats (list[dict[str, Any]]): A list of dictionaries, each holding stats for one GPU
|
|
27
|
-
|
|
28
|
-
'
|
|
29
|
-
|
|
23
|
+
nvml_available (bool): Indicates if `pynvml` is ready for use. True if import and `nvmlInit()` succeeded, False
|
|
24
|
+
otherwise.
|
|
25
|
+
gpu_stats (list[dict[str, Any]]): A list of dictionaries, each holding stats for one GPU, populated on
|
|
26
|
+
initialization and by `refresh_stats()`. Keys include: 'index', 'name', 'utilization' (%), 'memory_used' (MiB),
|
|
27
|
+
'memory_total' (MiB), 'memory_free' (MiB), 'temperature' (C), 'power_draw' (W), 'power_limit' (W or 'N/A').
|
|
28
|
+
Empty if NVML is unavailable or queries fail.
|
|
30
29
|
|
|
31
30
|
Methods:
|
|
32
31
|
refresh_stats: Refresh the internal gpu_stats list by querying NVML.
|
|
@@ -137,8 +136,7 @@ class GPUInfo:
|
|
|
137
136
|
def select_idle_gpu(
|
|
138
137
|
self, count: int = 1, min_memory_fraction: float = 0, min_util_fraction: float = 0
|
|
139
138
|
) -> list[int]:
|
|
140
|
-
"""
|
|
141
|
-
Select the most idle GPUs based on utilization and free memory.
|
|
139
|
+
"""Select the most idle GPUs based on utilization and free memory.
|
|
142
140
|
|
|
143
141
|
Args:
|
|
144
142
|
count (int): The number of idle GPUs to select.
|
|
@@ -154,9 +152,10 @@ class GPUInfo:
|
|
|
154
152
|
"""
|
|
155
153
|
assert min_memory_fraction <= 1.0, f"min_memory_fraction must be <= 1.0, got {min_memory_fraction}"
|
|
156
154
|
assert min_util_fraction <= 1.0, f"min_util_fraction must be <= 1.0, got {min_util_fraction}"
|
|
157
|
-
|
|
158
|
-
f"
|
|
155
|
+
criteria = (
|
|
156
|
+
f"free memory >= {min_memory_fraction * 100:.1f}% and free utilization >= {min_util_fraction * 100:.1f}%"
|
|
159
157
|
)
|
|
158
|
+
LOGGER.info(f"Searching for {count} idle GPUs with {criteria}...")
|
|
160
159
|
|
|
161
160
|
if count <= 0:
|
|
162
161
|
return []
|
|
@@ -179,11 +178,11 @@ class GPUInfo:
|
|
|
179
178
|
selected = [gpu["index"] for gpu in eligible_gpus[:count]]
|
|
180
179
|
|
|
181
180
|
if selected:
|
|
181
|
+
if len(selected) < count:
|
|
182
|
+
LOGGER.warning(f"Requested {count} GPUs but only {len(selected)} met the idle criteria.")
|
|
182
183
|
LOGGER.info(f"Selected idle CUDA devices {selected}")
|
|
183
184
|
else:
|
|
184
|
-
LOGGER.warning(
|
|
185
|
-
f"No GPUs met criteria (Free Mem >= {min_memory_fraction * 100:.1f}% and Free Util >= {min_util_fraction * 100:.1f}%)."
|
|
186
|
-
)
|
|
185
|
+
LOGGER.warning(f"No GPUs met criteria ({criteria}).")
|
|
187
186
|
|
|
188
187
|
return selected
|
|
189
188
|
|
ultralytics/utils/benchmarks.py
CHANGED
|
@@ -1,30 +1,31 @@
|
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
|
2
2
|
"""
|
|
3
|
-
Benchmark
|
|
3
|
+
Benchmark YOLO model formats for speed and accuracy.
|
|
4
4
|
|
|
5
5
|
Usage:
|
|
6
6
|
from ultralytics.utils.benchmarks import ProfileModels, benchmark
|
|
7
|
-
ProfileModels(['
|
|
8
|
-
benchmark(model='
|
|
7
|
+
ProfileModels(['yolo26n.yaml', 'yolov8s.yaml']).run()
|
|
8
|
+
benchmark(model='yolo26n.pt', imgsz=160)
|
|
9
9
|
|
|
10
10
|
Format | `format=argument` | Model
|
|
11
11
|
--- | --- | ---
|
|
12
|
-
PyTorch | - |
|
|
13
|
-
TorchScript | `torchscript` |
|
|
14
|
-
ONNX | `onnx` |
|
|
15
|
-
OpenVINO | `openvino` |
|
|
16
|
-
TensorRT | `engine` |
|
|
17
|
-
CoreML | `coreml` |
|
|
18
|
-
TensorFlow SavedModel | `saved_model` |
|
|
19
|
-
TensorFlow GraphDef | `pb` |
|
|
20
|
-
TensorFlow Lite | `tflite` |
|
|
21
|
-
TensorFlow Edge TPU | `edgetpu` |
|
|
22
|
-
TensorFlow.js | `tfjs` |
|
|
23
|
-
PaddlePaddle | `paddle` |
|
|
24
|
-
MNN | `mnn` |
|
|
25
|
-
NCNN | `ncnn` |
|
|
26
|
-
IMX | `imx` |
|
|
27
|
-
RKNN | `rknn` |
|
|
12
|
+
PyTorch | - | yolo26n.pt
|
|
13
|
+
TorchScript | `torchscript` | yolo26n.torchscript
|
|
14
|
+
ONNX | `onnx` | yolo26n.onnx
|
|
15
|
+
OpenVINO | `openvino` | yolo26n_openvino_model/
|
|
16
|
+
TensorRT | `engine` | yolo26n.engine
|
|
17
|
+
CoreML | `coreml` | yolo26n.mlpackage
|
|
18
|
+
TensorFlow SavedModel | `saved_model` | yolo26n_saved_model/
|
|
19
|
+
TensorFlow GraphDef | `pb` | yolo26n.pb
|
|
20
|
+
TensorFlow Lite | `tflite` | yolo26n.tflite
|
|
21
|
+
TensorFlow Edge TPU | `edgetpu` | yolo26n_edgetpu.tflite
|
|
22
|
+
TensorFlow.js | `tfjs` | yolo26n_web_model/
|
|
23
|
+
PaddlePaddle | `paddle` | yolo26n_paddle_model/
|
|
24
|
+
MNN | `mnn` | yolo26n.mnn
|
|
25
|
+
NCNN | `ncnn` | yolo26n_ncnn_model/
|
|
26
|
+
IMX | `imx` | yolo26n_imx_model/
|
|
27
|
+
RKNN | `rknn` | yolo26n_rknn_model/
|
|
28
|
+
ExecuTorch | `executorch` | yolo26n_executorch_model/
|
|
28
29
|
"""
|
|
29
30
|
|
|
30
31
|
from __future__ import annotations
|
|
@@ -51,7 +52,7 @@ from ultralytics.utils.torch_utils import get_cpu_info, select_device
|
|
|
51
52
|
|
|
52
53
|
|
|
53
54
|
def benchmark(
|
|
54
|
-
model=WEIGHTS_DIR / "
|
|
55
|
+
model=WEIGHTS_DIR / "yolo26n.pt",
|
|
55
56
|
data=None,
|
|
56
57
|
imgsz=160,
|
|
57
58
|
half=False,
|
|
@@ -62,8 +63,7 @@ def benchmark(
|
|
|
62
63
|
format="",
|
|
63
64
|
**kwargs,
|
|
64
65
|
):
|
|
65
|
-
"""
|
|
66
|
-
Benchmark a YOLO model across different formats for speed and accuracy.
|
|
66
|
+
"""Benchmark a YOLO model across different formats for speed and accuracy.
|
|
67
67
|
|
|
68
68
|
Args:
|
|
69
69
|
model (str | Path): Path to the model file or directory.
|
|
@@ -78,13 +78,13 @@ def benchmark(
|
|
|
78
78
|
**kwargs (Any): Additional keyword arguments for exporter.
|
|
79
79
|
|
|
80
80
|
Returns:
|
|
81
|
-
(polars.DataFrame): A
|
|
82
|
-
|
|
81
|
+
(polars.DataFrame): A Polars DataFrame with benchmark results for each format, including file size, metric, and
|
|
82
|
+
inference time.
|
|
83
83
|
|
|
84
84
|
Examples:
|
|
85
85
|
Benchmark a YOLO model with default settings:
|
|
86
86
|
>>> from ultralytics.utils.benchmarks import benchmark
|
|
87
|
-
>>> benchmark(model="
|
|
87
|
+
>>> benchmark(model="yolo26n.pt", imgsz=640)
|
|
88
88
|
"""
|
|
89
89
|
imgsz = check_imgsz(imgsz)
|
|
90
90
|
assert imgsz[0] == imgsz[1] if isinstance(imgsz, list) else True, "benchmark() only supports square imgsz."
|
|
@@ -144,17 +144,24 @@ def benchmark(
|
|
|
144
144
|
if format == "imx":
|
|
145
145
|
assert not is_end2end
|
|
146
146
|
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 IMX exports not supported"
|
|
147
|
-
assert model.task
|
|
147
|
+
assert model.task in {"detect", "classify", "pose"}, (
|
|
148
|
+
"IMX export is only supported for detection, classification and pose estimation tasks"
|
|
149
|
+
)
|
|
148
150
|
assert "C2f" in model.__str__(), "IMX only supported for YOLOv8n and YOLO11n"
|
|
149
151
|
if format == "rknn":
|
|
150
152
|
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 RKNN exports not supported yet"
|
|
151
153
|
assert not is_end2end, "End-to-end models not supported by RKNN yet"
|
|
152
154
|
assert LINUX, "RKNN only supported on Linux"
|
|
153
155
|
assert not is_rockchip(), "RKNN Inference only supported on Rockchip devices"
|
|
156
|
+
if format == "executorch":
|
|
157
|
+
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 ExecuTorch exports not supported yet"
|
|
158
|
+
assert not is_end2end, "End-to-end models not supported by ExecuTorch yet"
|
|
154
159
|
if "cpu" in device.type:
|
|
155
160
|
assert cpu, "inference not supported on CPU"
|
|
156
161
|
if "cuda" in device.type:
|
|
157
162
|
assert gpu, "inference not supported on GPU"
|
|
163
|
+
if format == "ncnn":
|
|
164
|
+
assert not is_end2end, "End-to-end torch.topk operation is not supported for NCNN prediction yet"
|
|
158
165
|
|
|
159
166
|
# Export
|
|
160
167
|
if format == "-":
|
|
@@ -170,10 +177,9 @@ def benchmark(
|
|
|
170
177
|
|
|
171
178
|
# Predict
|
|
172
179
|
assert model.task != "pose" or format != "pb", "GraphDef Pose inference is not supported"
|
|
180
|
+
assert model.task != "pose" or format != "executorch", "ExecuTorch Pose inference is not supported"
|
|
173
181
|
assert format not in {"edgetpu", "tfjs"}, "inference not supported"
|
|
174
182
|
assert format != "coreml" or platform.system() == "Darwin", "inference only supported on macOS>=10.13"
|
|
175
|
-
if format == "ncnn":
|
|
176
|
-
assert not is_end2end, "End-to-end torch.topk operation is not supported for NCNN prediction yet"
|
|
177
183
|
exported_model.predict(ASSETS / "bus.jpg", imgsz=imgsz, device=device, half=half, verbose=False)
|
|
178
184
|
|
|
179
185
|
# Validate
|
|
@@ -220,8 +226,7 @@ def benchmark(
|
|
|
220
226
|
|
|
221
227
|
|
|
222
228
|
class RF100Benchmark:
|
|
223
|
-
"""
|
|
224
|
-
Benchmark YOLO model performance across various formats for speed and accuracy.
|
|
229
|
+
"""Benchmark YOLO model performance across various formats for speed and accuracy.
|
|
225
230
|
|
|
226
231
|
This class provides functionality to benchmark YOLO models on the RF100 dataset collection.
|
|
227
232
|
|
|
@@ -246,8 +251,7 @@ class RF100Benchmark:
|
|
|
246
251
|
self.val_metrics = ["class", "images", "targets", "precision", "recall", "map50", "map95"]
|
|
247
252
|
|
|
248
253
|
def set_key(self, api_key: str):
|
|
249
|
-
"""
|
|
250
|
-
Set Roboflow API key for processing.
|
|
254
|
+
"""Set Roboflow API key for processing.
|
|
251
255
|
|
|
252
256
|
Args:
|
|
253
257
|
api_key (str): The API key.
|
|
@@ -263,8 +267,7 @@ class RF100Benchmark:
|
|
|
263
267
|
self.rf = Roboflow(api_key=api_key)
|
|
264
268
|
|
|
265
269
|
def parse_dataset(self, ds_link_txt: str = "datasets_links.txt"):
|
|
266
|
-
"""
|
|
267
|
-
Parse dataset links and download datasets.
|
|
270
|
+
"""Parse dataset links and download datasets.
|
|
268
271
|
|
|
269
272
|
Args:
|
|
270
273
|
ds_link_txt (str): Path to the file containing dataset links.
|
|
@@ -286,7 +289,7 @@ class RF100Benchmark:
|
|
|
286
289
|
with open(ds_link_txt, encoding="utf-8") as file:
|
|
287
290
|
for line in file:
|
|
288
291
|
try:
|
|
289
|
-
_,
|
|
292
|
+
_, _url, workspace, project, version = re.split("/+", line.strip())
|
|
290
293
|
self.ds_names.append(project)
|
|
291
294
|
proj_version = f"{project}-{version}"
|
|
292
295
|
if not Path(proj_version).exists():
|
|
@@ -308,8 +311,7 @@ class RF100Benchmark:
|
|
|
308
311
|
YAML.dump(yaml_data, path)
|
|
309
312
|
|
|
310
313
|
def evaluate(self, yaml_path: str, val_log_file: str, eval_log_file: str, list_ind: int):
|
|
311
|
-
"""
|
|
312
|
-
Evaluate model performance on validation results.
|
|
314
|
+
"""Evaluate model performance on validation results.
|
|
313
315
|
|
|
314
316
|
Args:
|
|
315
317
|
yaml_path (str): Path to the YAML configuration file.
|
|
@@ -357,7 +359,7 @@ class RF100Benchmark:
|
|
|
357
359
|
map_val = lst["map50"]
|
|
358
360
|
else:
|
|
359
361
|
LOGGER.info("Single dict found")
|
|
360
|
-
map_val =
|
|
362
|
+
map_val = next(res["map50"] for res in eval_lines)
|
|
361
363
|
|
|
362
364
|
with open(eval_log_file, "a", encoding="utf-8") as f:
|
|
363
365
|
f.write(f"{self.ds_names[list_ind]}: {map_val}\n")
|
|
@@ -366,8 +368,7 @@ class RF100Benchmark:
|
|
|
366
368
|
|
|
367
369
|
|
|
368
370
|
class ProfileModels:
|
|
369
|
-
"""
|
|
370
|
-
ProfileModels class for profiling different models on ONNX and TensorRT.
|
|
371
|
+
"""ProfileModels class for profiling different models on ONNX and TensorRT.
|
|
371
372
|
|
|
372
373
|
This class profiles the performance of different models, returning results such as model speed and FLOPs.
|
|
373
374
|
|
|
@@ -395,7 +396,7 @@ class ProfileModels:
|
|
|
395
396
|
Examples:
|
|
396
397
|
Profile models and print results
|
|
397
398
|
>>> from ultralytics.utils.benchmarks import ProfileModels
|
|
398
|
-
>>> profiler = ProfileModels(["
|
|
399
|
+
>>> profiler = ProfileModels(["yolo26n.yaml", "yolov8s.yaml"], imgsz=640)
|
|
399
400
|
>>> profiler.run()
|
|
400
401
|
"""
|
|
401
402
|
|
|
@@ -410,8 +411,7 @@ class ProfileModels:
|
|
|
410
411
|
trt: bool = True,
|
|
411
412
|
device: torch.device | str | None = None,
|
|
412
413
|
):
|
|
413
|
-
"""
|
|
414
|
-
Initialize the ProfileModels class for profiling models.
|
|
414
|
+
"""Initialize the ProfileModels class for profiling models.
|
|
415
415
|
|
|
416
416
|
Args:
|
|
417
417
|
paths (list[str]): List of paths of the models to be profiled.
|
|
@@ -425,12 +425,6 @@ class ProfileModels:
|
|
|
425
425
|
|
|
426
426
|
Notes:
|
|
427
427
|
FP16 'half' argument option removed for ONNX as slower on CPU than FP32.
|
|
428
|
-
|
|
429
|
-
Examples:
|
|
430
|
-
Initialize and profile models
|
|
431
|
-
>>> from ultralytics.utils.benchmarks import ProfileModels
|
|
432
|
-
>>> profiler = ProfileModels(["yolo11n.yaml", "yolov8s.yaml"], imgsz=640)
|
|
433
|
-
>>> profiler.run()
|
|
434
428
|
"""
|
|
435
429
|
self.paths = paths
|
|
436
430
|
self.num_timed_runs = num_timed_runs
|
|
@@ -442,8 +436,7 @@ class ProfileModels:
|
|
|
442
436
|
self.device = device if isinstance(device, torch.device) else select_device(device)
|
|
443
437
|
|
|
444
438
|
def run(self):
|
|
445
|
-
"""
|
|
446
|
-
Profile YOLO models for speed and accuracy across various formats including ONNX and TensorRT.
|
|
439
|
+
"""Profile YOLO models for speed and accuracy across various formats including ONNX and TensorRT.
|
|
447
440
|
|
|
448
441
|
Returns:
|
|
449
442
|
(list[dict]): List of dictionaries containing profiling results for each model.
|
|
@@ -451,7 +444,7 @@ class ProfileModels:
|
|
|
451
444
|
Examples:
|
|
452
445
|
Profile models and print results
|
|
453
446
|
>>> from ultralytics.utils.benchmarks import ProfileModels
|
|
454
|
-
>>> profiler = ProfileModels(["
|
|
447
|
+
>>> profiler = ProfileModels(["yolo26n.yaml", "yolo11s.yaml"])
|
|
455
448
|
>>> results = profiler.run()
|
|
456
449
|
"""
|
|
457
450
|
files = self.get_files()
|
|
@@ -467,7 +460,7 @@ class ProfileModels:
|
|
|
467
460
|
if file.suffix in {".pt", ".yaml", ".yml"}:
|
|
468
461
|
model = YOLO(str(file))
|
|
469
462
|
model.fuse() # to report correct params and GFLOPs in model.info()
|
|
470
|
-
model_info = model.info()
|
|
463
|
+
model_info = model.info(imgsz=self.imgsz)
|
|
471
464
|
if self.trt and self.device.type != "cpu" and not engine_file.is_file():
|
|
472
465
|
engine_file = model.export(
|
|
473
466
|
format="engine",
|
|
@@ -497,8 +490,7 @@ class ProfileModels:
|
|
|
497
490
|
return output
|
|
498
491
|
|
|
499
492
|
def get_files(self):
|
|
500
|
-
"""
|
|
501
|
-
Return a list of paths for all relevant model files given by the user.
|
|
493
|
+
"""Return a list of paths for all relevant model files given by the user.
|
|
502
494
|
|
|
503
495
|
Returns:
|
|
504
496
|
(list[Path]): List of Path objects for the model files.
|
|
@@ -524,8 +516,7 @@ class ProfileModels:
|
|
|
524
516
|
|
|
525
517
|
@staticmethod
|
|
526
518
|
def iterative_sigma_clipping(data: np.ndarray, sigma: float = 2, max_iters: int = 3):
|
|
527
|
-
"""
|
|
528
|
-
Apply iterative sigma clipping to data to remove outliers.
|
|
519
|
+
"""Apply iterative sigma clipping to data to remove outliers.
|
|
529
520
|
|
|
530
521
|
Args:
|
|
531
522
|
data (np.ndarray): Input data array.
|
|
@@ -545,8 +536,7 @@ class ProfileModels:
|
|
|
545
536
|
return data
|
|
546
537
|
|
|
547
538
|
def profile_tensorrt_model(self, engine_file: str, eps: float = 1e-3):
|
|
548
|
-
"""
|
|
549
|
-
Profile YOLO model performance with TensorRT, measuring average run time and standard deviation.
|
|
539
|
+
"""Profile YOLO model performance with TensorRT, measuring average run time and standard deviation.
|
|
550
540
|
|
|
551
541
|
Args:
|
|
552
542
|
engine_file (str): Path to the TensorRT engine file.
|
|
@@ -589,8 +579,7 @@ class ProfileModels:
|
|
|
589
579
|
return not all(isinstance(dim, int) and dim >= 0 for dim in tensor_shape)
|
|
590
580
|
|
|
591
581
|
def profile_onnx_model(self, onnx_file: str, eps: float = 1e-3):
|
|
592
|
-
"""
|
|
593
|
-
Profile an ONNX model, measuring average inference time and standard deviation across multiple runs.
|
|
582
|
+
"""Profile an ONNX model, measuring average inference time and standard deviation across multiple runs.
|
|
594
583
|
|
|
595
584
|
Args:
|
|
596
585
|
onnx_file (str): Path to the ONNX model file.
|
|
@@ -609,7 +598,7 @@ class ProfileModels:
|
|
|
609
598
|
sess_options.intra_op_num_threads = 8 # Limit the number of threads
|
|
610
599
|
sess = ort.InferenceSession(onnx_file, sess_options, providers=["CPUExecutionProvider"])
|
|
611
600
|
|
|
612
|
-
input_data_dict =
|
|
601
|
+
input_data_dict = {}
|
|
613
602
|
for input_tensor in sess.get_inputs():
|
|
614
603
|
input_type = input_tensor.type
|
|
615
604
|
if self.check_dynamic(input_tensor.shape):
|
|
@@ -637,7 +626,7 @@ class ProfileModels:
|
|
|
637
626
|
|
|
638
627
|
input_data = np.random.rand(*input_shape).astype(input_dtype)
|
|
639
628
|
input_name = input_tensor.name
|
|
640
|
-
input_data_dict
|
|
629
|
+
input_data_dict[input_name] = input_data
|
|
641
630
|
|
|
642
631
|
output_name = sess.get_outputs()[0].name
|
|
643
632
|
|
|
@@ -669,8 +658,7 @@ class ProfileModels:
|
|
|
669
658
|
t_engine: tuple[float, float],
|
|
670
659
|
model_info: tuple[float, float, float, float],
|
|
671
660
|
):
|
|
672
|
-
"""
|
|
673
|
-
Generate a table row string with model performance metrics.
|
|
661
|
+
"""Generate a table row string with model performance metrics.
|
|
674
662
|
|
|
675
663
|
Args:
|
|
676
664
|
model_name (str): Name of the model.
|
|
@@ -681,7 +669,7 @@ class ProfileModels:
|
|
|
681
669
|
Returns:
|
|
682
670
|
(str): Formatted table row string with model metrics.
|
|
683
671
|
"""
|
|
684
|
-
|
|
672
|
+
_layers, params, _gradients, flops = model_info
|
|
685
673
|
return (
|
|
686
674
|
f"| {model_name:18s} | {self.imgsz} | - | {t_onnx[0]:.1f}±{t_onnx[1]:.1f} ms | {t_engine[0]:.1f}±"
|
|
687
675
|
f"{t_engine[1]:.1f} ms | {params / 1e6:.1f} | {flops:.1f} |"
|
|
@@ -694,8 +682,7 @@ class ProfileModels:
|
|
|
694
682
|
t_engine: tuple[float, float],
|
|
695
683
|
model_info: tuple[float, float, float, float],
|
|
696
684
|
):
|
|
697
|
-
"""
|
|
698
|
-
Generate a dictionary of profiling results.
|
|
685
|
+
"""Generate a dictionary of profiling results.
|
|
699
686
|
|
|
700
687
|
Args:
|
|
701
688
|
model_name (str): Name of the model.
|
|
@@ -706,7 +693,7 @@ class ProfileModels:
|
|
|
706
693
|
Returns:
|
|
707
694
|
(dict): Dictionary containing profiling results.
|
|
708
695
|
"""
|
|
709
|
-
|
|
696
|
+
_layers, params, _gradients, flops = model_info
|
|
710
697
|
return {
|
|
711
698
|
"model/name": model_name,
|
|
712
699
|
"model/parameters": params,
|
|
@@ -717,8 +704,7 @@ class ProfileModels:
|
|
|
717
704
|
|
|
718
705
|
@staticmethod
|
|
719
706
|
def print_table(table_rows: list[str]):
|
|
720
|
-
"""
|
|
721
|
-
Print a formatted table of model profiling results.
|
|
707
|
+
"""Print a formatted table of model profiling results.
|
|
722
708
|
|
|
723
709
|
Args:
|
|
724
710
|
table_rows (list[str]): List of formatted table row strings.
|
|
@@ -175,13 +175,12 @@ default_callbacks = {
|
|
|
175
175
|
|
|
176
176
|
|
|
177
177
|
def get_default_callbacks():
|
|
178
|
-
"""
|
|
179
|
-
Get the default callbacks for Ultralytics training, validation, prediction, and export processes.
|
|
178
|
+
"""Get the default callbacks for Ultralytics training, validation, prediction, and export processes.
|
|
180
179
|
|
|
181
180
|
Returns:
|
|
182
181
|
(dict): Dictionary of default callbacks for various training events. Each key represents an event during the
|
|
183
|
-
training process, and the corresponding value is a list of callback functions executed when that
|
|
184
|
-
occurs.
|
|
182
|
+
training process, and the corresponding value is a list of callback functions executed when that
|
|
183
|
+
event occurs.
|
|
185
184
|
|
|
186
185
|
Examples:
|
|
187
186
|
>>> callbacks = get_default_callbacks()
|
|
@@ -192,17 +191,16 @@ def get_default_callbacks():
|
|
|
192
191
|
|
|
193
192
|
|
|
194
193
|
def add_integration_callbacks(instance):
|
|
195
|
-
"""
|
|
196
|
-
Add integration callbacks to the instance's callbacks dictionary.
|
|
194
|
+
"""Add integration callbacks to the instance's callbacks dictionary.
|
|
197
195
|
|
|
198
196
|
This function loads and adds various integration callbacks to the provided instance. The specific callbacks added
|
|
199
197
|
depend on the type of instance provided. All instances receive HUB callbacks, while Trainer instances also receive
|
|
200
|
-
additional callbacks for various integrations like ClearML, Comet, DVC, MLflow, Neptune, Ray Tune, TensorBoard,
|
|
201
|
-
|
|
198
|
+
additional callbacks for various integrations like ClearML, Comet, DVC, MLflow, Neptune, Ray Tune, TensorBoard, and
|
|
199
|
+
Weights & Biases.
|
|
202
200
|
|
|
203
201
|
Args:
|
|
204
|
-
instance (Trainer | Predictor | Validator | Exporter): The object instance to which callbacks will be added.
|
|
205
|
-
|
|
202
|
+
instance (Trainer | Predictor | Validator | Exporter): The object instance to which callbacks will be added. The
|
|
203
|
+
type of instance determines which callbacks are loaded.
|
|
206
204
|
|
|
207
205
|
Examples:
|
|
208
206
|
>>> from ultralytics.engine.trainer import BaseTrainer
|
|
@@ -15,8 +15,7 @@ except (ImportError, AssertionError):
|
|
|
15
15
|
|
|
16
16
|
|
|
17
17
|
def _log_debug_samples(files, title: str = "Debug Samples") -> None:
|
|
18
|
-
"""
|
|
19
|
-
Log files (images) as debug samples in the ClearML task.
|
|
18
|
+
"""Log files (images) as debug samples in the ClearML task.
|
|
20
19
|
|
|
21
20
|
Args:
|
|
22
21
|
files (list[Path]): A list of file paths in PosixPath format.
|
|
@@ -35,8 +34,7 @@ def _log_debug_samples(files, title: str = "Debug Samples") -> None:
|
|
|
35
34
|
|
|
36
35
|
|
|
37
36
|
def _log_plot(title: str, plot_path: str) -> None:
|
|
38
|
-
"""
|
|
39
|
-
Log an image as a plot in the plot section of ClearML.
|
|
37
|
+
"""Log an image as a plot in the plot section of ClearML.
|
|
40
38
|
|
|
41
39
|
Args:
|
|
42
40
|
title (str): The title of the plot.
|
|
@@ -125,15 +123,9 @@ def on_train_end(trainer) -> None:
|
|
|
125
123
|
"""Log final model and training results on training completion."""
|
|
126
124
|
if task := Task.current_task():
|
|
127
125
|
# Log final results, confusion matrix and PR plots
|
|
128
|
-
|
|
129
|
-
"
|
|
130
|
-
|
|
131
|
-
"confusion_matrix_normalized.png",
|
|
132
|
-
*(f"{x}_curve.png" for x in ("F1", "PR", "P", "R")),
|
|
133
|
-
]
|
|
134
|
-
files = [(trainer.save_dir / f) for f in files if (trainer.save_dir / f).exists()] # filter existing files
|
|
135
|
-
for f in files:
|
|
136
|
-
_log_plot(title=f.stem, plot_path=f)
|
|
126
|
+
for f in [*trainer.plots.keys(), *trainer.validator.plots.keys()]:
|
|
127
|
+
if "batch" not in f.name:
|
|
128
|
+
_log_plot(title=f.stem, plot_path=f)
|
|
137
129
|
# Report final metrics
|
|
138
130
|
for k, v in trainer.validator.metrics.results_dict.items():
|
|
139
131
|
task.get_logger().report_single_value(k, v)
|