dgenerate-ultralytics-headless 8.3.214__py3-none-any.whl → 8.3.248__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/METADATA +13 -14
- dgenerate_ultralytics_headless-8.3.248.dist-info/RECORD +298 -0
- tests/__init__.py +5 -7
- tests/conftest.py +8 -15
- tests/test_cli.py +1 -1
- tests/test_cuda.py +5 -8
- tests/test_engine.py +1 -1
- tests/test_exports.py +57 -12
- tests/test_integrations.py +4 -4
- tests/test_python.py +84 -53
- tests/test_solutions.py +160 -151
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +56 -62
- ultralytics/cfg/datasets/Argoverse.yaml +7 -6
- ultralytics/cfg/datasets/DOTAv1.5.yaml +1 -1
- ultralytics/cfg/datasets/DOTAv1.yaml +1 -1
- ultralytics/cfg/datasets/ImageNet.yaml +1 -1
- ultralytics/cfg/datasets/VOC.yaml +15 -16
- ultralytics/cfg/datasets/african-wildlife.yaml +1 -1
- ultralytics/cfg/datasets/coco-pose.yaml +21 -0
- ultralytics/cfg/datasets/coco128-seg.yaml +1 -1
- ultralytics/cfg/datasets/coco8-pose.yaml +21 -0
- ultralytics/cfg/datasets/dog-pose.yaml +28 -0
- ultralytics/cfg/datasets/dota8-multispectral.yaml +1 -1
- ultralytics/cfg/datasets/dota8.yaml +2 -2
- ultralytics/cfg/datasets/hand-keypoints.yaml +26 -2
- ultralytics/cfg/datasets/kitti.yaml +27 -0
- ultralytics/cfg/datasets/lvis.yaml +5 -5
- ultralytics/cfg/datasets/open-images-v7.yaml +1 -1
- ultralytics/cfg/datasets/tiger-pose.yaml +16 -0
- ultralytics/cfg/datasets/xView.yaml +16 -16
- ultralytics/cfg/default.yaml +1 -1
- ultralytics/cfg/models/11/yolo11-pose.yaml +1 -1
- ultralytics/cfg/models/11/yoloe-11-seg.yaml +2 -2
- ultralytics/cfg/models/11/yoloe-11.yaml +2 -2
- ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +1 -1
- ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +1 -1
- ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +1 -1
- ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +1 -1
- ultralytics/cfg/models/v10/yolov10b.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10l.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10m.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10n.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10s.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10x.yaml +2 -2
- ultralytics/cfg/models/v3/yolov3-tiny.yaml +1 -1
- ultralytics/cfg/models/v6/yolov6.yaml +1 -1
- ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +9 -6
- ultralytics/cfg/models/v8/yoloe-v8.yaml +9 -6
- ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +2 -2
- ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +2 -2
- ultralytics/cfg/models/v8/yolov8-ghost.yaml +2 -2
- ultralytics/cfg/models/v8/yolov8-obb.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-p2.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-world.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-worldv2.yaml +6 -6
- ultralytics/cfg/models/v9/yolov9s.yaml +1 -1
- ultralytics/data/__init__.py +4 -4
- ultralytics/data/annotator.py +3 -4
- ultralytics/data/augment.py +285 -475
- ultralytics/data/base.py +18 -26
- ultralytics/data/build.py +147 -25
- ultralytics/data/converter.py +36 -46
- ultralytics/data/dataset.py +46 -74
- ultralytics/data/loaders.py +42 -49
- ultralytics/data/split.py +5 -6
- ultralytics/data/split_dota.py +8 -15
- ultralytics/data/utils.py +34 -43
- ultralytics/engine/exporter.py +319 -237
- ultralytics/engine/model.py +148 -188
- ultralytics/engine/predictor.py +29 -38
- ultralytics/engine/results.py +177 -311
- ultralytics/engine/trainer.py +83 -59
- ultralytics/engine/tuner.py +23 -34
- ultralytics/engine/validator.py +39 -22
- ultralytics/hub/__init__.py +16 -19
- ultralytics/hub/auth.py +6 -12
- ultralytics/hub/google/__init__.py +7 -10
- ultralytics/hub/session.py +15 -25
- ultralytics/hub/utils.py +5 -8
- ultralytics/models/__init__.py +1 -1
- ultralytics/models/fastsam/__init__.py +1 -1
- ultralytics/models/fastsam/model.py +8 -10
- ultralytics/models/fastsam/predict.py +17 -29
- ultralytics/models/fastsam/utils.py +1 -2
- ultralytics/models/fastsam/val.py +5 -7
- ultralytics/models/nas/__init__.py +1 -1
- ultralytics/models/nas/model.py +5 -8
- ultralytics/models/nas/predict.py +7 -9
- ultralytics/models/nas/val.py +1 -2
- ultralytics/models/rtdetr/__init__.py +1 -1
- ultralytics/models/rtdetr/model.py +5 -8
- ultralytics/models/rtdetr/predict.py +15 -19
- ultralytics/models/rtdetr/train.py +10 -13
- ultralytics/models/rtdetr/val.py +21 -23
- ultralytics/models/sam/__init__.py +15 -2
- ultralytics/models/sam/amg.py +14 -20
- ultralytics/models/sam/build.py +26 -19
- ultralytics/models/sam/build_sam3.py +377 -0
- ultralytics/models/sam/model.py +29 -32
- ultralytics/models/sam/modules/blocks.py +83 -144
- ultralytics/models/sam/modules/decoders.py +19 -37
- ultralytics/models/sam/modules/encoders.py +44 -101
- ultralytics/models/sam/modules/memory_attention.py +16 -30
- ultralytics/models/sam/modules/sam.py +200 -73
- ultralytics/models/sam/modules/tiny_encoder.py +64 -83
- ultralytics/models/sam/modules/transformer.py +18 -28
- ultralytics/models/sam/modules/utils.py +174 -50
- ultralytics/models/sam/predict.py +2248 -350
- ultralytics/models/sam/sam3/__init__.py +3 -0
- ultralytics/models/sam/sam3/decoder.py +546 -0
- ultralytics/models/sam/sam3/encoder.py +529 -0
- ultralytics/models/sam/sam3/geometry_encoders.py +415 -0
- ultralytics/models/sam/sam3/maskformer_segmentation.py +286 -0
- ultralytics/models/sam/sam3/model_misc.py +199 -0
- ultralytics/models/sam/sam3/necks.py +129 -0
- ultralytics/models/sam/sam3/sam3_image.py +339 -0
- ultralytics/models/sam/sam3/text_encoder_ve.py +307 -0
- ultralytics/models/sam/sam3/vitdet.py +547 -0
- ultralytics/models/sam/sam3/vl_combiner.py +160 -0
- ultralytics/models/utils/loss.py +14 -26
- ultralytics/models/utils/ops.py +13 -17
- ultralytics/models/yolo/__init__.py +1 -1
- ultralytics/models/yolo/classify/predict.py +9 -12
- ultralytics/models/yolo/classify/train.py +11 -32
- ultralytics/models/yolo/classify/val.py +29 -28
- ultralytics/models/yolo/detect/predict.py +7 -10
- ultralytics/models/yolo/detect/train.py +11 -20
- ultralytics/models/yolo/detect/val.py +70 -58
- ultralytics/models/yolo/model.py +36 -53
- ultralytics/models/yolo/obb/predict.py +5 -14
- ultralytics/models/yolo/obb/train.py +11 -14
- ultralytics/models/yolo/obb/val.py +39 -36
- ultralytics/models/yolo/pose/__init__.py +1 -1
- ultralytics/models/yolo/pose/predict.py +6 -21
- ultralytics/models/yolo/pose/train.py +10 -15
- ultralytics/models/yolo/pose/val.py +38 -57
- ultralytics/models/yolo/segment/predict.py +14 -18
- ultralytics/models/yolo/segment/train.py +3 -6
- ultralytics/models/yolo/segment/val.py +93 -45
- ultralytics/models/yolo/world/train.py +8 -14
- ultralytics/models/yolo/world/train_world.py +11 -34
- ultralytics/models/yolo/yoloe/__init__.py +7 -7
- ultralytics/models/yolo/yoloe/predict.py +16 -23
- ultralytics/models/yolo/yoloe/train.py +30 -43
- ultralytics/models/yolo/yoloe/train_seg.py +5 -10
- ultralytics/models/yolo/yoloe/val.py +15 -20
- ultralytics/nn/__init__.py +7 -7
- ultralytics/nn/autobackend.py +145 -77
- ultralytics/nn/modules/__init__.py +60 -60
- ultralytics/nn/modules/activation.py +4 -6
- ultralytics/nn/modules/block.py +132 -216
- ultralytics/nn/modules/conv.py +52 -97
- ultralytics/nn/modules/head.py +50 -103
- ultralytics/nn/modules/transformer.py +76 -88
- ultralytics/nn/modules/utils.py +16 -21
- ultralytics/nn/tasks.py +94 -154
- ultralytics/nn/text_model.py +40 -67
- ultralytics/solutions/__init__.py +12 -12
- ultralytics/solutions/ai_gym.py +11 -17
- ultralytics/solutions/analytics.py +15 -16
- ultralytics/solutions/config.py +5 -6
- ultralytics/solutions/distance_calculation.py +10 -13
- ultralytics/solutions/heatmap.py +7 -13
- ultralytics/solutions/instance_segmentation.py +5 -8
- ultralytics/solutions/object_blurrer.py +7 -10
- ultralytics/solutions/object_counter.py +12 -19
- ultralytics/solutions/object_cropper.py +8 -14
- ultralytics/solutions/parking_management.py +33 -31
- ultralytics/solutions/queue_management.py +10 -12
- ultralytics/solutions/region_counter.py +9 -12
- ultralytics/solutions/security_alarm.py +15 -20
- ultralytics/solutions/similarity_search.py +10 -15
- ultralytics/solutions/solutions.py +75 -74
- ultralytics/solutions/speed_estimation.py +7 -10
- ultralytics/solutions/streamlit_inference.py +2 -4
- ultralytics/solutions/templates/similarity-search.html +7 -18
- ultralytics/solutions/trackzone.py +7 -10
- ultralytics/solutions/vision_eye.py +5 -8
- ultralytics/trackers/__init__.py +1 -1
- ultralytics/trackers/basetrack.py +3 -5
- ultralytics/trackers/bot_sort.py +10 -27
- ultralytics/trackers/byte_tracker.py +14 -30
- ultralytics/trackers/track.py +3 -6
- ultralytics/trackers/utils/gmc.py +11 -22
- ultralytics/trackers/utils/kalman_filter.py +37 -48
- ultralytics/trackers/utils/matching.py +12 -15
- ultralytics/utils/__init__.py +116 -116
- ultralytics/utils/autobatch.py +2 -4
- ultralytics/utils/autodevice.py +17 -18
- ultralytics/utils/benchmarks.py +32 -46
- ultralytics/utils/callbacks/base.py +8 -10
- ultralytics/utils/callbacks/clearml.py +5 -13
- ultralytics/utils/callbacks/comet.py +32 -46
- ultralytics/utils/callbacks/dvc.py +13 -18
- ultralytics/utils/callbacks/mlflow.py +4 -5
- ultralytics/utils/callbacks/neptune.py +7 -15
- ultralytics/utils/callbacks/platform.py +314 -38
- ultralytics/utils/callbacks/raytune.py +3 -4
- ultralytics/utils/callbacks/tensorboard.py +23 -31
- ultralytics/utils/callbacks/wb.py +10 -13
- ultralytics/utils/checks.py +99 -76
- ultralytics/utils/cpu.py +3 -8
- ultralytics/utils/dist.py +8 -12
- ultralytics/utils/downloads.py +20 -30
- ultralytics/utils/errors.py +6 -14
- ultralytics/utils/events.py +2 -4
- ultralytics/utils/export/__init__.py +4 -236
- ultralytics/utils/export/engine.py +237 -0
- ultralytics/utils/export/imx.py +91 -55
- ultralytics/utils/export/tensorflow.py +231 -0
- ultralytics/utils/files.py +24 -28
- ultralytics/utils/git.py +9 -11
- ultralytics/utils/instance.py +30 -51
- ultralytics/utils/logger.py +212 -114
- ultralytics/utils/loss.py +14 -22
- ultralytics/utils/metrics.py +126 -155
- ultralytics/utils/nms.py +13 -16
- ultralytics/utils/ops.py +107 -165
- ultralytics/utils/patches.py +33 -21
- ultralytics/utils/plotting.py +72 -80
- ultralytics/utils/tal.py +25 -39
- ultralytics/utils/torch_utils.py +52 -78
- ultralytics/utils/tqdm.py +20 -20
- ultralytics/utils/triton.py +13 -19
- ultralytics/utils/tuner.py +17 -5
- dgenerate_ultralytics_headless-8.3.214.dist-info/RECORD +0 -283
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/WHEEL +0 -0
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/entry_points.txt +0 -0
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/licenses/LICENSE +0 -0
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/top_level.txt +0 -0
ultralytics/utils/autodevice.py
CHANGED
|
@@ -9,24 +9,23 @@ from ultralytics.utils.checks import check_requirements
|
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
class GPUInfo:
|
|
12
|
-
"""
|
|
13
|
-
Manages NVIDIA GPU information via pynvml with robust error handling.
|
|
12
|
+
"""Manages NVIDIA GPU information via pynvml with robust error handling.
|
|
14
13
|
|
|
15
|
-
Provides methods to query detailed GPU statistics (utilization, memory, temp, power) and select the most idle
|
|
16
|
-
|
|
17
|
-
|
|
14
|
+
Provides methods to query detailed GPU statistics (utilization, memory, temp, power) and select the most idle GPUs
|
|
15
|
+
based on configurable criteria. It safely handles the absence or initialization failure of the pynvml library by
|
|
16
|
+
logging warnings and disabling related features, preventing application crashes.
|
|
18
17
|
|
|
19
18
|
Includes fallback logic using `torch.cuda` for basic device counting if NVML is unavailable during GPU
|
|
20
19
|
selection. Manages NVML initialization and shutdown internally.
|
|
21
20
|
|
|
22
21
|
Attributes:
|
|
23
22
|
pynvml (module | None): The `pynvml` module if successfully imported and initialized, otherwise `None`.
|
|
24
|
-
nvml_available (bool): Indicates if `pynvml` is ready for use. True if import and `nvmlInit()` succeeded,
|
|
25
|
-
|
|
26
|
-
gpu_stats (list[dict[str, Any]]): A list of dictionaries, each holding stats for one GPU
|
|
27
|
-
|
|
28
|
-
'
|
|
29
|
-
|
|
23
|
+
nvml_available (bool): Indicates if `pynvml` is ready for use. True if import and `nvmlInit()` succeeded, False
|
|
24
|
+
otherwise.
|
|
25
|
+
gpu_stats (list[dict[str, Any]]): A list of dictionaries, each holding stats for one GPU, populated on
|
|
26
|
+
initialization and by `refresh_stats()`. Keys include: 'index', 'name', 'utilization' (%), 'memory_used' (MiB),
|
|
27
|
+
'memory_total' (MiB), 'memory_free' (MiB), 'temperature' (C), 'power_draw' (W), 'power_limit' (W or 'N/A').
|
|
28
|
+
Empty if NVML is unavailable or queries fail.
|
|
30
29
|
|
|
31
30
|
Methods:
|
|
32
31
|
refresh_stats: Refresh the internal gpu_stats list by querying NVML.
|
|
@@ -137,8 +136,7 @@ class GPUInfo:
|
|
|
137
136
|
def select_idle_gpu(
|
|
138
137
|
self, count: int = 1, min_memory_fraction: float = 0, min_util_fraction: float = 0
|
|
139
138
|
) -> list[int]:
|
|
140
|
-
"""
|
|
141
|
-
Select the most idle GPUs based on utilization and free memory.
|
|
139
|
+
"""Select the most idle GPUs based on utilization and free memory.
|
|
142
140
|
|
|
143
141
|
Args:
|
|
144
142
|
count (int): The number of idle GPUs to select.
|
|
@@ -154,9 +152,10 @@ class GPUInfo:
|
|
|
154
152
|
"""
|
|
155
153
|
assert min_memory_fraction <= 1.0, f"min_memory_fraction must be <= 1.0, got {min_memory_fraction}"
|
|
156
154
|
assert min_util_fraction <= 1.0, f"min_util_fraction must be <= 1.0, got {min_util_fraction}"
|
|
157
|
-
|
|
158
|
-
f"
|
|
155
|
+
criteria = (
|
|
156
|
+
f"free memory >= {min_memory_fraction * 100:.1f}% and free utilization >= {min_util_fraction * 100:.1f}%"
|
|
159
157
|
)
|
|
158
|
+
LOGGER.info(f"Searching for {count} idle GPUs with {criteria}...")
|
|
160
159
|
|
|
161
160
|
if count <= 0:
|
|
162
161
|
return []
|
|
@@ -179,11 +178,11 @@ class GPUInfo:
|
|
|
179
178
|
selected = [gpu["index"] for gpu in eligible_gpus[:count]]
|
|
180
179
|
|
|
181
180
|
if selected:
|
|
181
|
+
if len(selected) < count:
|
|
182
|
+
LOGGER.warning(f"Requested {count} GPUs but only {len(selected)} met the idle criteria.")
|
|
182
183
|
LOGGER.info(f"Selected idle CUDA devices {selected}")
|
|
183
184
|
else:
|
|
184
|
-
LOGGER.warning(
|
|
185
|
-
f"No GPUs met criteria (Free Mem >= {min_memory_fraction * 100:.1f}% and Free Util >= {min_util_fraction * 100:.1f}%)."
|
|
186
|
-
)
|
|
185
|
+
LOGGER.warning(f"No GPUs met criteria ({criteria}).")
|
|
187
186
|
|
|
188
187
|
return selected
|
|
189
188
|
|
ultralytics/utils/benchmarks.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
|
2
2
|
"""
|
|
3
|
-
Benchmark
|
|
3
|
+
Benchmark YOLO model formats for speed and accuracy.
|
|
4
4
|
|
|
5
5
|
Usage:
|
|
6
6
|
from ultralytics.utils.benchmarks import ProfileModels, benchmark
|
|
@@ -25,6 +25,7 @@ MNN | `mnn` | yolo11n.mnn
|
|
|
25
25
|
NCNN | `ncnn` | yolo11n_ncnn_model/
|
|
26
26
|
IMX | `imx` | yolo11n_imx_model/
|
|
27
27
|
RKNN | `rknn` | yolo11n_rknn_model/
|
|
28
|
+
ExecuTorch | `executorch` | yolo11n_executorch_model/
|
|
28
29
|
"""
|
|
29
30
|
|
|
30
31
|
from __future__ import annotations
|
|
@@ -62,8 +63,7 @@ def benchmark(
|
|
|
62
63
|
format="",
|
|
63
64
|
**kwargs,
|
|
64
65
|
):
|
|
65
|
-
"""
|
|
66
|
-
Benchmark a YOLO model across different formats for speed and accuracy.
|
|
66
|
+
"""Benchmark a YOLO model across different formats for speed and accuracy.
|
|
67
67
|
|
|
68
68
|
Args:
|
|
69
69
|
model (str | Path): Path to the model file or directory.
|
|
@@ -78,8 +78,8 @@ def benchmark(
|
|
|
78
78
|
**kwargs (Any): Additional keyword arguments for exporter.
|
|
79
79
|
|
|
80
80
|
Returns:
|
|
81
|
-
(polars.DataFrame): A
|
|
82
|
-
|
|
81
|
+
(polars.DataFrame): A Polars DataFrame with benchmark results for each format, including file size, metric, and
|
|
82
|
+
inference time.
|
|
83
83
|
|
|
84
84
|
Examples:
|
|
85
85
|
Benchmark a YOLO model with default settings:
|
|
@@ -144,13 +144,18 @@ def benchmark(
|
|
|
144
144
|
if format == "imx":
|
|
145
145
|
assert not is_end2end
|
|
146
146
|
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 IMX exports not supported"
|
|
147
|
-
assert model.task
|
|
147
|
+
assert model.task in {"detect", "classify", "pose"}, (
|
|
148
|
+
"IMX export is only supported for detection, classification and pose estimation tasks"
|
|
149
|
+
)
|
|
148
150
|
assert "C2f" in model.__str__(), "IMX only supported for YOLOv8n and YOLO11n"
|
|
149
151
|
if format == "rknn":
|
|
150
152
|
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 RKNN exports not supported yet"
|
|
151
153
|
assert not is_end2end, "End-to-end models not supported by RKNN yet"
|
|
152
154
|
assert LINUX, "RKNN only supported on Linux"
|
|
153
155
|
assert not is_rockchip(), "RKNN Inference only supported on Rockchip devices"
|
|
156
|
+
if format == "executorch":
|
|
157
|
+
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 ExecuTorch exports not supported yet"
|
|
158
|
+
assert not is_end2end, "End-to-end models not supported by ExecuTorch yet"
|
|
154
159
|
if "cpu" in device.type:
|
|
155
160
|
assert cpu, "inference not supported on CPU"
|
|
156
161
|
if "cuda" in device.type:
|
|
@@ -170,6 +175,7 @@ def benchmark(
|
|
|
170
175
|
|
|
171
176
|
# Predict
|
|
172
177
|
assert model.task != "pose" or format != "pb", "GraphDef Pose inference is not supported"
|
|
178
|
+
assert model.task != "pose" or format != "executorch", "ExecuTorch Pose inference is not supported"
|
|
173
179
|
assert format not in {"edgetpu", "tfjs"}, "inference not supported"
|
|
174
180
|
assert format != "coreml" or platform.system() == "Darwin", "inference only supported on macOS>=10.13"
|
|
175
181
|
if format == "ncnn":
|
|
@@ -220,8 +226,7 @@ def benchmark(
|
|
|
220
226
|
|
|
221
227
|
|
|
222
228
|
class RF100Benchmark:
|
|
223
|
-
"""
|
|
224
|
-
Benchmark YOLO model performance across various formats for speed and accuracy.
|
|
229
|
+
"""Benchmark YOLO model performance across various formats for speed and accuracy.
|
|
225
230
|
|
|
226
231
|
This class provides functionality to benchmark YOLO models on the RF100 dataset collection.
|
|
227
232
|
|
|
@@ -246,8 +251,7 @@ class RF100Benchmark:
|
|
|
246
251
|
self.val_metrics = ["class", "images", "targets", "precision", "recall", "map50", "map95"]
|
|
247
252
|
|
|
248
253
|
def set_key(self, api_key: str):
|
|
249
|
-
"""
|
|
250
|
-
Set Roboflow API key for processing.
|
|
254
|
+
"""Set Roboflow API key for processing.
|
|
251
255
|
|
|
252
256
|
Args:
|
|
253
257
|
api_key (str): The API key.
|
|
@@ -263,8 +267,7 @@ class RF100Benchmark:
|
|
|
263
267
|
self.rf = Roboflow(api_key=api_key)
|
|
264
268
|
|
|
265
269
|
def parse_dataset(self, ds_link_txt: str = "datasets_links.txt"):
|
|
266
|
-
"""
|
|
267
|
-
Parse dataset links and download datasets.
|
|
270
|
+
"""Parse dataset links and download datasets.
|
|
268
271
|
|
|
269
272
|
Args:
|
|
270
273
|
ds_link_txt (str): Path to the file containing dataset links.
|
|
@@ -286,7 +289,7 @@ class RF100Benchmark:
|
|
|
286
289
|
with open(ds_link_txt, encoding="utf-8") as file:
|
|
287
290
|
for line in file:
|
|
288
291
|
try:
|
|
289
|
-
_,
|
|
292
|
+
_, _url, workspace, project, version = re.split("/+", line.strip())
|
|
290
293
|
self.ds_names.append(project)
|
|
291
294
|
proj_version = f"{project}-{version}"
|
|
292
295
|
if not Path(proj_version).exists():
|
|
@@ -308,8 +311,7 @@ class RF100Benchmark:
|
|
|
308
311
|
YAML.dump(yaml_data, path)
|
|
309
312
|
|
|
310
313
|
def evaluate(self, yaml_path: str, val_log_file: str, eval_log_file: str, list_ind: int):
|
|
311
|
-
"""
|
|
312
|
-
Evaluate model performance on validation results.
|
|
314
|
+
"""Evaluate model performance on validation results.
|
|
313
315
|
|
|
314
316
|
Args:
|
|
315
317
|
yaml_path (str): Path to the YAML configuration file.
|
|
@@ -357,7 +359,7 @@ class RF100Benchmark:
|
|
|
357
359
|
map_val = lst["map50"]
|
|
358
360
|
else:
|
|
359
361
|
LOGGER.info("Single dict found")
|
|
360
|
-
map_val =
|
|
362
|
+
map_val = next(res["map50"] for res in eval_lines)
|
|
361
363
|
|
|
362
364
|
with open(eval_log_file, "a", encoding="utf-8") as f:
|
|
363
365
|
f.write(f"{self.ds_names[list_ind]}: {map_val}\n")
|
|
@@ -366,8 +368,7 @@ class RF100Benchmark:
|
|
|
366
368
|
|
|
367
369
|
|
|
368
370
|
class ProfileModels:
|
|
369
|
-
"""
|
|
370
|
-
ProfileModels class for profiling different models on ONNX and TensorRT.
|
|
371
|
+
"""ProfileModels class for profiling different models on ONNX and TensorRT.
|
|
371
372
|
|
|
372
373
|
This class profiles the performance of different models, returning results such as model speed and FLOPs.
|
|
373
374
|
|
|
@@ -410,8 +411,7 @@ class ProfileModels:
|
|
|
410
411
|
trt: bool = True,
|
|
411
412
|
device: torch.device | str | None = None,
|
|
412
413
|
):
|
|
413
|
-
"""
|
|
414
|
-
Initialize the ProfileModels class for profiling models.
|
|
414
|
+
"""Initialize the ProfileModels class for profiling models.
|
|
415
415
|
|
|
416
416
|
Args:
|
|
417
417
|
paths (list[str]): List of paths of the models to be profiled.
|
|
@@ -425,12 +425,6 @@ class ProfileModels:
|
|
|
425
425
|
|
|
426
426
|
Notes:
|
|
427
427
|
FP16 'half' argument option removed for ONNX as slower on CPU than FP32.
|
|
428
|
-
|
|
429
|
-
Examples:
|
|
430
|
-
Initialize and profile models
|
|
431
|
-
>>> from ultralytics.utils.benchmarks import ProfileModels
|
|
432
|
-
>>> profiler = ProfileModels(["yolo11n.yaml", "yolov8s.yaml"], imgsz=640)
|
|
433
|
-
>>> profiler.run()
|
|
434
428
|
"""
|
|
435
429
|
self.paths = paths
|
|
436
430
|
self.num_timed_runs = num_timed_runs
|
|
@@ -442,8 +436,7 @@ class ProfileModels:
|
|
|
442
436
|
self.device = device if isinstance(device, torch.device) else select_device(device)
|
|
443
437
|
|
|
444
438
|
def run(self):
|
|
445
|
-
"""
|
|
446
|
-
Profile YOLO models for speed and accuracy across various formats including ONNX and TensorRT.
|
|
439
|
+
"""Profile YOLO models for speed and accuracy across various formats including ONNX and TensorRT.
|
|
447
440
|
|
|
448
441
|
Returns:
|
|
449
442
|
(list[dict]): List of dictionaries containing profiling results for each model.
|
|
@@ -497,8 +490,7 @@ class ProfileModels:
|
|
|
497
490
|
return output
|
|
498
491
|
|
|
499
492
|
def get_files(self):
|
|
500
|
-
"""
|
|
501
|
-
Return a list of paths for all relevant model files given by the user.
|
|
493
|
+
"""Return a list of paths for all relevant model files given by the user.
|
|
502
494
|
|
|
503
495
|
Returns:
|
|
504
496
|
(list[Path]): List of Path objects for the model files.
|
|
@@ -524,8 +516,7 @@ class ProfileModels:
|
|
|
524
516
|
|
|
525
517
|
@staticmethod
|
|
526
518
|
def iterative_sigma_clipping(data: np.ndarray, sigma: float = 2, max_iters: int = 3):
|
|
527
|
-
"""
|
|
528
|
-
Apply iterative sigma clipping to data to remove outliers.
|
|
519
|
+
"""Apply iterative sigma clipping to data to remove outliers.
|
|
529
520
|
|
|
530
521
|
Args:
|
|
531
522
|
data (np.ndarray): Input data array.
|
|
@@ -545,8 +536,7 @@ class ProfileModels:
|
|
|
545
536
|
return data
|
|
546
537
|
|
|
547
538
|
def profile_tensorrt_model(self, engine_file: str, eps: float = 1e-3):
|
|
548
|
-
"""
|
|
549
|
-
Profile YOLO model performance with TensorRT, measuring average run time and standard deviation.
|
|
539
|
+
"""Profile YOLO model performance with TensorRT, measuring average run time and standard deviation.
|
|
550
540
|
|
|
551
541
|
Args:
|
|
552
542
|
engine_file (str): Path to the TensorRT engine file.
|
|
@@ -589,8 +579,7 @@ class ProfileModels:
|
|
|
589
579
|
return not all(isinstance(dim, int) and dim >= 0 for dim in tensor_shape)
|
|
590
580
|
|
|
591
581
|
def profile_onnx_model(self, onnx_file: str, eps: float = 1e-3):
|
|
592
|
-
"""
|
|
593
|
-
Profile an ONNX model, measuring average inference time and standard deviation across multiple runs.
|
|
582
|
+
"""Profile an ONNX model, measuring average inference time and standard deviation across multiple runs.
|
|
594
583
|
|
|
595
584
|
Args:
|
|
596
585
|
onnx_file (str): Path to the ONNX model file.
|
|
@@ -609,7 +598,7 @@ class ProfileModels:
|
|
|
609
598
|
sess_options.intra_op_num_threads = 8 # Limit the number of threads
|
|
610
599
|
sess = ort.InferenceSession(onnx_file, sess_options, providers=["CPUExecutionProvider"])
|
|
611
600
|
|
|
612
|
-
input_data_dict =
|
|
601
|
+
input_data_dict = {}
|
|
613
602
|
for input_tensor in sess.get_inputs():
|
|
614
603
|
input_type = input_tensor.type
|
|
615
604
|
if self.check_dynamic(input_tensor.shape):
|
|
@@ -637,7 +626,7 @@ class ProfileModels:
|
|
|
637
626
|
|
|
638
627
|
input_data = np.random.rand(*input_shape).astype(input_dtype)
|
|
639
628
|
input_name = input_tensor.name
|
|
640
|
-
input_data_dict
|
|
629
|
+
input_data_dict[input_name] = input_data
|
|
641
630
|
|
|
642
631
|
output_name = sess.get_outputs()[0].name
|
|
643
632
|
|
|
@@ -669,8 +658,7 @@ class ProfileModels:
|
|
|
669
658
|
t_engine: tuple[float, float],
|
|
670
659
|
model_info: tuple[float, float, float, float],
|
|
671
660
|
):
|
|
672
|
-
"""
|
|
673
|
-
Generate a table row string with model performance metrics.
|
|
661
|
+
"""Generate a table row string with model performance metrics.
|
|
674
662
|
|
|
675
663
|
Args:
|
|
676
664
|
model_name (str): Name of the model.
|
|
@@ -681,7 +669,7 @@ class ProfileModels:
|
|
|
681
669
|
Returns:
|
|
682
670
|
(str): Formatted table row string with model metrics.
|
|
683
671
|
"""
|
|
684
|
-
|
|
672
|
+
_layers, params, _gradients, flops = model_info
|
|
685
673
|
return (
|
|
686
674
|
f"| {model_name:18s} | {self.imgsz} | - | {t_onnx[0]:.1f}±{t_onnx[1]:.1f} ms | {t_engine[0]:.1f}±"
|
|
687
675
|
f"{t_engine[1]:.1f} ms | {params / 1e6:.1f} | {flops:.1f} |"
|
|
@@ -694,8 +682,7 @@ class ProfileModels:
|
|
|
694
682
|
t_engine: tuple[float, float],
|
|
695
683
|
model_info: tuple[float, float, float, float],
|
|
696
684
|
):
|
|
697
|
-
"""
|
|
698
|
-
Generate a dictionary of profiling results.
|
|
685
|
+
"""Generate a dictionary of profiling results.
|
|
699
686
|
|
|
700
687
|
Args:
|
|
701
688
|
model_name (str): Name of the model.
|
|
@@ -706,7 +693,7 @@ class ProfileModels:
|
|
|
706
693
|
Returns:
|
|
707
694
|
(dict): Dictionary containing profiling results.
|
|
708
695
|
"""
|
|
709
|
-
|
|
696
|
+
_layers, params, _gradients, flops = model_info
|
|
710
697
|
return {
|
|
711
698
|
"model/name": model_name,
|
|
712
699
|
"model/parameters": params,
|
|
@@ -717,8 +704,7 @@ class ProfileModels:
|
|
|
717
704
|
|
|
718
705
|
@staticmethod
|
|
719
706
|
def print_table(table_rows: list[str]):
|
|
720
|
-
"""
|
|
721
|
-
Print a formatted table of model profiling results.
|
|
707
|
+
"""Print a formatted table of model profiling results.
|
|
722
708
|
|
|
723
709
|
Args:
|
|
724
710
|
table_rows (list[str]): List of formatted table row strings.
|
|
@@ -175,13 +175,12 @@ default_callbacks = {
|
|
|
175
175
|
|
|
176
176
|
|
|
177
177
|
def get_default_callbacks():
|
|
178
|
-
"""
|
|
179
|
-
Get the default callbacks for Ultralytics training, validation, prediction, and export processes.
|
|
178
|
+
"""Get the default callbacks for Ultralytics training, validation, prediction, and export processes.
|
|
180
179
|
|
|
181
180
|
Returns:
|
|
182
181
|
(dict): Dictionary of default callbacks for various training events. Each key represents an event during the
|
|
183
|
-
training process, and the corresponding value is a list of callback functions executed when that
|
|
184
|
-
occurs.
|
|
182
|
+
training process, and the corresponding value is a list of callback functions executed when that
|
|
183
|
+
event occurs.
|
|
185
184
|
|
|
186
185
|
Examples:
|
|
187
186
|
>>> callbacks = get_default_callbacks()
|
|
@@ -192,17 +191,16 @@ def get_default_callbacks():
|
|
|
192
191
|
|
|
193
192
|
|
|
194
193
|
def add_integration_callbacks(instance):
|
|
195
|
-
"""
|
|
196
|
-
Add integration callbacks to the instance's callbacks dictionary.
|
|
194
|
+
"""Add integration callbacks to the instance's callbacks dictionary.
|
|
197
195
|
|
|
198
196
|
This function loads and adds various integration callbacks to the provided instance. The specific callbacks added
|
|
199
197
|
depend on the type of instance provided. All instances receive HUB callbacks, while Trainer instances also receive
|
|
200
|
-
additional callbacks for various integrations like ClearML, Comet, DVC, MLflow, Neptune, Ray Tune, TensorBoard,
|
|
201
|
-
|
|
198
|
+
additional callbacks for various integrations like ClearML, Comet, DVC, MLflow, Neptune, Ray Tune, TensorBoard, and
|
|
199
|
+
Weights & Biases.
|
|
202
200
|
|
|
203
201
|
Args:
|
|
204
|
-
instance (Trainer | Predictor | Validator | Exporter): The object instance to which callbacks will be added.
|
|
205
|
-
|
|
202
|
+
instance (Trainer | Predictor | Validator | Exporter): The object instance to which callbacks will be added. The
|
|
203
|
+
type of instance determines which callbacks are loaded.
|
|
206
204
|
|
|
207
205
|
Examples:
|
|
208
206
|
>>> from ultralytics.engine.trainer import BaseTrainer
|
|
@@ -15,8 +15,7 @@ except (ImportError, AssertionError):
|
|
|
15
15
|
|
|
16
16
|
|
|
17
17
|
def _log_debug_samples(files, title: str = "Debug Samples") -> None:
|
|
18
|
-
"""
|
|
19
|
-
Log files (images) as debug samples in the ClearML task.
|
|
18
|
+
"""Log files (images) as debug samples in the ClearML task.
|
|
20
19
|
|
|
21
20
|
Args:
|
|
22
21
|
files (list[Path]): A list of file paths in PosixPath format.
|
|
@@ -35,8 +34,7 @@ def _log_debug_samples(files, title: str = "Debug Samples") -> None:
|
|
|
35
34
|
|
|
36
35
|
|
|
37
36
|
def _log_plot(title: str, plot_path: str) -> None:
|
|
38
|
-
"""
|
|
39
|
-
Log an image as a plot in the plot section of ClearML.
|
|
37
|
+
"""Log an image as a plot in the plot section of ClearML.
|
|
40
38
|
|
|
41
39
|
Args:
|
|
42
40
|
title (str): The title of the plot.
|
|
@@ -125,15 +123,9 @@ def on_train_end(trainer) -> None:
|
|
|
125
123
|
"""Log final model and training results on training completion."""
|
|
126
124
|
if task := Task.current_task():
|
|
127
125
|
# Log final results, confusion matrix and PR plots
|
|
128
|
-
|
|
129
|
-
"
|
|
130
|
-
|
|
131
|
-
"confusion_matrix_normalized.png",
|
|
132
|
-
*(f"{x}_curve.png" for x in ("F1", "PR", "P", "R")),
|
|
133
|
-
]
|
|
134
|
-
files = [(trainer.save_dir / f) for f in files if (trainer.save_dir / f).exists()] # filter existing files
|
|
135
|
-
for f in files:
|
|
136
|
-
_log_plot(title=f.stem, plot_path=f)
|
|
126
|
+
for f in [*trainer.plots.keys(), *trainer.validator.plots.keys()]:
|
|
127
|
+
if "batch" not in f.name:
|
|
128
|
+
_log_plot(title=f.stem, plot_path=f)
|
|
137
129
|
# Report final metrics
|
|
138
130
|
for k, v in trainer.validator.metrics.results_dict.items():
|
|
139
131
|
task.get_logger().report_single_value(k, v)
|
|
@@ -88,8 +88,7 @@ def _should_log_image_predictions() -> bool:
|
|
|
88
88
|
|
|
89
89
|
|
|
90
90
|
def _resume_or_create_experiment(args: SimpleNamespace) -> None:
|
|
91
|
-
"""
|
|
92
|
-
Resume CometML experiment or create a new experiment based on args.
|
|
91
|
+
"""Resume CometML experiment or create a new experiment based on args.
|
|
93
92
|
|
|
94
93
|
Ensures that the experiment object is only created in a single process during distributed training.
|
|
95
94
|
|
|
@@ -124,8 +123,7 @@ def _resume_or_create_experiment(args: SimpleNamespace) -> None:
|
|
|
124
123
|
|
|
125
124
|
|
|
126
125
|
def _fetch_trainer_metadata(trainer) -> dict:
|
|
127
|
-
"""
|
|
128
|
-
Return metadata for YOLO training including epoch and asset saving status.
|
|
126
|
+
"""Return metadata for YOLO training including epoch and asset saving status.
|
|
129
127
|
|
|
130
128
|
Args:
|
|
131
129
|
trainer (ultralytics.engine.trainer.BaseTrainer): The YOLO trainer object containing training state and config.
|
|
@@ -150,11 +148,10 @@ def _fetch_trainer_metadata(trainer) -> dict:
|
|
|
150
148
|
def _scale_bounding_box_to_original_image_shape(
|
|
151
149
|
box, resized_image_shape, original_image_shape, ratio_pad
|
|
152
150
|
) -> list[float]:
|
|
153
|
-
"""
|
|
154
|
-
Scale bounding box from resized image coordinates to original image coordinates.
|
|
151
|
+
"""Scale bounding box from resized image coordinates to original image coordinates.
|
|
155
152
|
|
|
156
|
-
YOLO resizes images during training and the label values are normalized based on this resized shape.
|
|
157
|
-
|
|
153
|
+
YOLO resizes images during training and the label values are normalized based on this resized shape. This function
|
|
154
|
+
rescales the bounding box labels to the original image shape.
|
|
158
155
|
|
|
159
156
|
Args:
|
|
160
157
|
box (torch.Tensor): Bounding box in normalized xywh format.
|
|
@@ -181,8 +178,7 @@ def _scale_bounding_box_to_original_image_shape(
|
|
|
181
178
|
|
|
182
179
|
|
|
183
180
|
def _format_ground_truth_annotations_for_detection(img_idx, image_path, batch, class_name_map=None) -> dict | None:
|
|
184
|
-
"""
|
|
185
|
-
Format ground truth annotations for object detection.
|
|
181
|
+
"""Format ground truth annotations for object detection.
|
|
186
182
|
|
|
187
183
|
This function processes ground truth annotations from a batch of images for object detection tasks. It extracts
|
|
188
184
|
bounding boxes, class labels, and other metadata for a specific image in the batch, and formats them for
|
|
@@ -205,7 +201,7 @@ def _format_ground_truth_annotations_for_detection(img_idx, image_path, batch, c
|
|
|
205
201
|
- 'boxes': List of box coordinates [x, y, width, height]
|
|
206
202
|
- 'label': Label string with format "gt_{class_name}"
|
|
207
203
|
- 'score': Confidence score (always 1.0, scaled by _scale_confidence_score)
|
|
208
|
-
|
|
204
|
+
Returns None if no bounding boxes are found for the image.
|
|
209
205
|
"""
|
|
210
206
|
indices = batch["batch_idx"] == img_idx
|
|
211
207
|
bboxes = batch["bboxes"][indices]
|
|
@@ -236,8 +232,7 @@ def _format_ground_truth_annotations_for_detection(img_idx, image_path, batch, c
|
|
|
236
232
|
|
|
237
233
|
|
|
238
234
|
def _format_prediction_annotations(image_path, metadata, class_label_map=None, class_map=None) -> dict | None:
|
|
239
|
-
"""
|
|
240
|
-
Format YOLO predictions for object detection visualization.
|
|
235
|
+
"""Format YOLO predictions for object detection visualization.
|
|
241
236
|
|
|
242
237
|
Args:
|
|
243
238
|
image_path (Path): Path to the image file.
|
|
@@ -261,7 +256,7 @@ def _format_prediction_annotations(image_path, metadata, class_label_map=None, c
|
|
|
261
256
|
class_label_map = {class_map[k]: v for k, v in class_label_map.items()}
|
|
262
257
|
try:
|
|
263
258
|
# import pycotools utilities to decompress annotations for various tasks, e.g. segmentation
|
|
264
|
-
from faster_coco_eval.core.mask import decode
|
|
259
|
+
from faster_coco_eval.core.mask import decode
|
|
265
260
|
except ImportError:
|
|
266
261
|
decode = None
|
|
267
262
|
|
|
@@ -289,8 +284,7 @@ def _format_prediction_annotations(image_path, metadata, class_label_map=None, c
|
|
|
289
284
|
|
|
290
285
|
|
|
291
286
|
def _extract_segmentation_annotation(segmentation_raw: str, decode: Callable) -> list[list[Any]] | None:
|
|
292
|
-
"""
|
|
293
|
-
Extract segmentation annotation from compressed segmentations as list of polygons.
|
|
287
|
+
"""Extract segmentation annotation from compressed segmentations as list of polygons.
|
|
294
288
|
|
|
295
289
|
Args:
|
|
296
290
|
segmentation_raw (str): Raw segmentation data in compressed format.
|
|
@@ -310,8 +304,7 @@ def _extract_segmentation_annotation(segmentation_raw: str, decode: Callable) ->
|
|
|
310
304
|
|
|
311
305
|
|
|
312
306
|
def _fetch_annotations(img_idx, image_path, batch, prediction_metadata_map, class_label_map, class_map) -> list | None:
|
|
313
|
-
"""
|
|
314
|
-
Join the ground truth and prediction annotations if they exist.
|
|
307
|
+
"""Join the ground truth and prediction annotations if they exist.
|
|
315
308
|
|
|
316
309
|
Args:
|
|
317
310
|
img_idx (int): Index of the image in the batch.
|
|
@@ -350,25 +343,24 @@ def _create_prediction_metadata_map(model_predictions) -> dict:
|
|
|
350
343
|
def _log_confusion_matrix(experiment, trainer, curr_step, curr_epoch) -> None:
|
|
351
344
|
"""Log the confusion matrix to Comet experiment."""
|
|
352
345
|
conf_mat = trainer.validator.confusion_matrix.matrix
|
|
353
|
-
names = list(trainer.data["names"].values())
|
|
346
|
+
names = [*list(trainer.data["names"].values()), "background"]
|
|
354
347
|
experiment.log_confusion_matrix(
|
|
355
348
|
matrix=conf_mat, labels=names, max_categories=len(names), epoch=curr_epoch, step=curr_step
|
|
356
349
|
)
|
|
357
350
|
|
|
358
351
|
|
|
359
352
|
def _log_images(experiment, image_paths, curr_step: int | None, annotations=None) -> None:
|
|
360
|
-
"""
|
|
361
|
-
Log images to the experiment with optional annotations.
|
|
353
|
+
"""Log images to the experiment with optional annotations.
|
|
362
354
|
|
|
363
|
-
This function logs images to a Comet ML experiment, optionally including annotation data for visualization
|
|
364
|
-
|
|
355
|
+
This function logs images to a Comet ML experiment, optionally including annotation data for visualization such as
|
|
356
|
+
bounding boxes or segmentation masks.
|
|
365
357
|
|
|
366
358
|
Args:
|
|
367
359
|
experiment (comet_ml.CometExperiment): The Comet ML experiment to log images to.
|
|
368
360
|
image_paths (list[Path]): List of paths to images that will be logged.
|
|
369
361
|
curr_step (int): Current training step/iteration for tracking in the experiment timeline.
|
|
370
|
-
annotations (list[list[dict]], optional): Nested list of annotation dictionaries for each image. Each
|
|
371
|
-
|
|
362
|
+
annotations (list[list[dict]], optional): Nested list of annotation dictionaries for each image. Each annotation
|
|
363
|
+
contains visualization data like bounding boxes, labels, and confidence scores.
|
|
372
364
|
"""
|
|
373
365
|
if annotations:
|
|
374
366
|
for image_path, annotation in zip(image_paths, annotations):
|
|
@@ -380,11 +372,10 @@ def _log_images(experiment, image_paths, curr_step: int | None, annotations=None
|
|
|
380
372
|
|
|
381
373
|
|
|
382
374
|
def _log_image_predictions(experiment, validator, curr_step) -> None:
|
|
383
|
-
"""
|
|
384
|
-
Log predicted boxes for a single image during training.
|
|
375
|
+
"""Log predicted boxes for a single image during training.
|
|
385
376
|
|
|
386
|
-
This function logs image predictions to a Comet ML experiment during model validation. It processes
|
|
387
|
-
|
|
377
|
+
This function logs image predictions to a Comet ML experiment during model validation. It processes validation data
|
|
378
|
+
and formats both ground truth and prediction annotations for visualization in the Comet
|
|
388
379
|
dashboard. The function respects configured limits on the number of images to log.
|
|
389
380
|
|
|
390
381
|
Args:
|
|
@@ -443,12 +434,11 @@ def _log_image_predictions(experiment, validator, curr_step) -> None:
|
|
|
443
434
|
|
|
444
435
|
|
|
445
436
|
def _log_plots(experiment, trainer) -> None:
|
|
446
|
-
"""
|
|
447
|
-
Log evaluation plots and label plots for the experiment.
|
|
437
|
+
"""Log evaluation plots and label plots for the experiment.
|
|
448
438
|
|
|
449
439
|
This function logs various evaluation plots and confusion matrices to the experiment tracking system. It handles
|
|
450
|
-
different types of metrics (SegmentMetrics, PoseMetrics, DetMetrics, OBBMetrics) and logs the appropriate plots
|
|
451
|
-
|
|
440
|
+
different types of metrics (SegmentMetrics, PoseMetrics, DetMetrics, OBBMetrics) and logs the appropriate plots for
|
|
441
|
+
each type.
|
|
452
442
|
|
|
453
443
|
Args:
|
|
454
444
|
experiment (comet_ml.CometExperiment): The Comet ML experiment to log plots to.
|
|
@@ -503,8 +493,7 @@ def _log_image_batches(experiment, trainer, curr_step: int) -> None:
|
|
|
503
493
|
|
|
504
494
|
|
|
505
495
|
def _log_asset(experiment, asset_path) -> None:
|
|
506
|
-
"""
|
|
507
|
-
Logs a specific asset file to the given experiment.
|
|
496
|
+
"""Logs a specific asset file to the given experiment.
|
|
508
497
|
|
|
509
498
|
This function facilitates logging an asset, such as a file, to the provided
|
|
510
499
|
experiment. It enables integration with experiment tracking platforms.
|
|
@@ -517,11 +506,9 @@ def _log_asset(experiment, asset_path) -> None:
|
|
|
517
506
|
|
|
518
507
|
|
|
519
508
|
def _log_table(experiment, table_path) -> None:
|
|
520
|
-
"""
|
|
521
|
-
Logs a table to the provided experiment.
|
|
509
|
+
"""Logs a table to the provided experiment.
|
|
522
510
|
|
|
523
|
-
This function is used to log a table file to the given experiment. The table
|
|
524
|
-
is identified by its file path.
|
|
511
|
+
This function is used to log a table file to the given experiment. The table is identified by its file path.
|
|
525
512
|
|
|
526
513
|
Args:
|
|
527
514
|
experiment (comet_ml.CometExperiment): The experiment object where the table file will be logged.
|
|
@@ -549,16 +536,15 @@ def on_train_epoch_end(trainer) -> None:
|
|
|
549
536
|
|
|
550
537
|
|
|
551
538
|
def on_fit_epoch_end(trainer) -> None:
|
|
552
|
-
"""
|
|
553
|
-
Log model assets at the end of each epoch during training.
|
|
539
|
+
"""Log model assets at the end of each epoch during training.
|
|
554
540
|
|
|
555
|
-
This function is called at the end of each training epoch to log metrics, learning rates, and model information
|
|
556
|
-
|
|
557
|
-
|
|
541
|
+
This function is called at the end of each training epoch to log metrics, learning rates, and model information to a
|
|
542
|
+
Comet ML experiment. It also logs model assets, confusion matrices, and image predictions based on configuration
|
|
543
|
+
settings.
|
|
558
544
|
|
|
559
545
|
The function retrieves the current Comet ML experiment and logs various training metrics. If it's the first epoch,
|
|
560
|
-
it also logs model information. On specified save intervals, it logs the model, confusion matrix (if enabled),
|
|
561
|
-
|
|
546
|
+
it also logs model information. On specified save intervals, it logs the model, confusion matrix (if enabled), and
|
|
547
|
+
image predictions (if enabled).
|
|
562
548
|
|
|
563
549
|
Args:
|
|
564
550
|
trainer (BaseTrainer): The YOLO trainer object containing training state, metrics, and configuration.
|