dgenerate-ultralytics-headless 8.3.214__py3-none-any.whl → 8.4.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/METADATA +64 -74
- dgenerate_ultralytics_headless-8.4.7.dist-info/RECORD +311 -0
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/WHEEL +1 -1
- tests/__init__.py +7 -9
- tests/conftest.py +8 -15
- tests/test_cli.py +1 -1
- tests/test_cuda.py +13 -10
- tests/test_engine.py +9 -9
- tests/test_exports.py +65 -13
- tests/test_integrations.py +13 -13
- tests/test_python.py +125 -69
- tests/test_solutions.py +161 -152
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +86 -92
- ultralytics/cfg/datasets/Argoverse.yaml +7 -6
- ultralytics/cfg/datasets/DOTAv1.5.yaml +1 -1
- ultralytics/cfg/datasets/DOTAv1.yaml +1 -1
- ultralytics/cfg/datasets/ImageNet.yaml +1 -1
- ultralytics/cfg/datasets/TT100K.yaml +346 -0
- ultralytics/cfg/datasets/VOC.yaml +15 -16
- ultralytics/cfg/datasets/african-wildlife.yaml +1 -1
- ultralytics/cfg/datasets/coco-pose.yaml +21 -0
- ultralytics/cfg/datasets/coco12-formats.yaml +101 -0
- ultralytics/cfg/datasets/coco128-seg.yaml +1 -1
- ultralytics/cfg/datasets/coco8-pose.yaml +21 -0
- ultralytics/cfg/datasets/dog-pose.yaml +28 -0
- ultralytics/cfg/datasets/dota8-multispectral.yaml +1 -1
- ultralytics/cfg/datasets/dota8.yaml +2 -2
- ultralytics/cfg/datasets/hand-keypoints.yaml +26 -2
- ultralytics/cfg/datasets/kitti.yaml +27 -0
- ultralytics/cfg/datasets/lvis.yaml +5 -5
- ultralytics/cfg/datasets/open-images-v7.yaml +1 -1
- ultralytics/cfg/datasets/tiger-pose.yaml +16 -0
- ultralytics/cfg/datasets/xView.yaml +16 -16
- ultralytics/cfg/default.yaml +4 -2
- ultralytics/cfg/models/11/yolo11-pose.yaml +1 -1
- ultralytics/cfg/models/11/yoloe-11-seg.yaml +2 -2
- ultralytics/cfg/models/11/yoloe-11.yaml +2 -2
- ultralytics/cfg/models/26/yolo26-cls.yaml +33 -0
- ultralytics/cfg/models/26/yolo26-obb.yaml +52 -0
- ultralytics/cfg/models/26/yolo26-p2.yaml +60 -0
- ultralytics/cfg/models/26/yolo26-p6.yaml +62 -0
- ultralytics/cfg/models/26/yolo26-pose.yaml +53 -0
- ultralytics/cfg/models/26/yolo26-seg.yaml +52 -0
- ultralytics/cfg/models/26/yolo26.yaml +52 -0
- ultralytics/cfg/models/26/yoloe-26-seg.yaml +53 -0
- ultralytics/cfg/models/26/yoloe-26.yaml +53 -0
- ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +1 -1
- ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +1 -1
- ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +1 -1
- ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +1 -1
- ultralytics/cfg/models/v10/yolov10b.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10l.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10m.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10n.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10s.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10x.yaml +2 -2
- ultralytics/cfg/models/v3/yolov3-tiny.yaml +1 -1
- ultralytics/cfg/models/v6/yolov6.yaml +1 -1
- ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +9 -6
- ultralytics/cfg/models/v8/yoloe-v8.yaml +9 -6
- ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +2 -2
- ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +2 -2
- ultralytics/cfg/models/v8/yolov8-ghost.yaml +2 -2
- ultralytics/cfg/models/v8/yolov8-obb.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-p2.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-world.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-worldv2.yaml +6 -6
- ultralytics/cfg/models/v9/yolov9s.yaml +1 -1
- ultralytics/data/__init__.py +4 -4
- ultralytics/data/annotator.py +5 -6
- ultralytics/data/augment.py +300 -475
- ultralytics/data/base.py +18 -26
- ultralytics/data/build.py +147 -25
- ultralytics/data/converter.py +108 -87
- ultralytics/data/dataset.py +47 -75
- ultralytics/data/loaders.py +42 -49
- ultralytics/data/split.py +5 -6
- ultralytics/data/split_dota.py +8 -15
- ultralytics/data/utils.py +36 -45
- ultralytics/engine/exporter.py +351 -263
- ultralytics/engine/model.py +186 -225
- ultralytics/engine/predictor.py +45 -54
- ultralytics/engine/results.py +198 -325
- ultralytics/engine/trainer.py +165 -106
- ultralytics/engine/tuner.py +41 -43
- ultralytics/engine/validator.py +55 -38
- ultralytics/hub/__init__.py +16 -19
- ultralytics/hub/auth.py +6 -12
- ultralytics/hub/google/__init__.py +7 -10
- ultralytics/hub/session.py +15 -25
- ultralytics/hub/utils.py +5 -8
- ultralytics/models/__init__.py +1 -1
- ultralytics/models/fastsam/__init__.py +1 -1
- ultralytics/models/fastsam/model.py +8 -10
- ultralytics/models/fastsam/predict.py +18 -30
- ultralytics/models/fastsam/utils.py +1 -2
- ultralytics/models/fastsam/val.py +5 -7
- ultralytics/models/nas/__init__.py +1 -1
- ultralytics/models/nas/model.py +5 -8
- ultralytics/models/nas/predict.py +7 -9
- ultralytics/models/nas/val.py +1 -2
- ultralytics/models/rtdetr/__init__.py +1 -1
- ultralytics/models/rtdetr/model.py +5 -8
- ultralytics/models/rtdetr/predict.py +15 -19
- ultralytics/models/rtdetr/train.py +10 -13
- ultralytics/models/rtdetr/val.py +21 -23
- ultralytics/models/sam/__init__.py +15 -2
- ultralytics/models/sam/amg.py +14 -20
- ultralytics/models/sam/build.py +26 -19
- ultralytics/models/sam/build_sam3.py +377 -0
- ultralytics/models/sam/model.py +29 -32
- ultralytics/models/sam/modules/blocks.py +83 -144
- ultralytics/models/sam/modules/decoders.py +19 -37
- ultralytics/models/sam/modules/encoders.py +44 -101
- ultralytics/models/sam/modules/memory_attention.py +16 -30
- ultralytics/models/sam/modules/sam.py +200 -73
- ultralytics/models/sam/modules/tiny_encoder.py +64 -83
- ultralytics/models/sam/modules/transformer.py +18 -28
- ultralytics/models/sam/modules/utils.py +174 -50
- ultralytics/models/sam/predict.py +2248 -350
- ultralytics/models/sam/sam3/__init__.py +3 -0
- ultralytics/models/sam/sam3/decoder.py +546 -0
- ultralytics/models/sam/sam3/encoder.py +529 -0
- ultralytics/models/sam/sam3/geometry_encoders.py +415 -0
- ultralytics/models/sam/sam3/maskformer_segmentation.py +286 -0
- ultralytics/models/sam/sam3/model_misc.py +199 -0
- ultralytics/models/sam/sam3/necks.py +129 -0
- ultralytics/models/sam/sam3/sam3_image.py +339 -0
- ultralytics/models/sam/sam3/text_encoder_ve.py +307 -0
- ultralytics/models/sam/sam3/vitdet.py +547 -0
- ultralytics/models/sam/sam3/vl_combiner.py +160 -0
- ultralytics/models/utils/loss.py +14 -26
- ultralytics/models/utils/ops.py +13 -17
- ultralytics/models/yolo/__init__.py +1 -1
- ultralytics/models/yolo/classify/predict.py +10 -13
- ultralytics/models/yolo/classify/train.py +12 -33
- ultralytics/models/yolo/classify/val.py +30 -29
- ultralytics/models/yolo/detect/predict.py +9 -12
- ultralytics/models/yolo/detect/train.py +17 -23
- ultralytics/models/yolo/detect/val.py +77 -59
- ultralytics/models/yolo/model.py +43 -60
- ultralytics/models/yolo/obb/predict.py +7 -16
- ultralytics/models/yolo/obb/train.py +14 -17
- ultralytics/models/yolo/obb/val.py +40 -37
- ultralytics/models/yolo/pose/__init__.py +1 -1
- ultralytics/models/yolo/pose/predict.py +7 -22
- ultralytics/models/yolo/pose/train.py +13 -16
- ultralytics/models/yolo/pose/val.py +39 -58
- ultralytics/models/yolo/segment/predict.py +17 -21
- ultralytics/models/yolo/segment/train.py +7 -10
- ultralytics/models/yolo/segment/val.py +95 -47
- ultralytics/models/yolo/world/train.py +8 -14
- ultralytics/models/yolo/world/train_world.py +11 -34
- ultralytics/models/yolo/yoloe/__init__.py +7 -7
- ultralytics/models/yolo/yoloe/predict.py +16 -23
- ultralytics/models/yolo/yoloe/train.py +36 -44
- ultralytics/models/yolo/yoloe/train_seg.py +11 -11
- ultralytics/models/yolo/yoloe/val.py +15 -20
- ultralytics/nn/__init__.py +7 -7
- ultralytics/nn/autobackend.py +159 -85
- ultralytics/nn/modules/__init__.py +68 -60
- ultralytics/nn/modules/activation.py +4 -6
- ultralytics/nn/modules/block.py +260 -224
- ultralytics/nn/modules/conv.py +52 -97
- ultralytics/nn/modules/head.py +831 -299
- ultralytics/nn/modules/transformer.py +76 -88
- ultralytics/nn/modules/utils.py +16 -21
- ultralytics/nn/tasks.py +180 -195
- ultralytics/nn/text_model.py +45 -69
- ultralytics/optim/__init__.py +5 -0
- ultralytics/optim/muon.py +338 -0
- ultralytics/solutions/__init__.py +12 -12
- ultralytics/solutions/ai_gym.py +13 -19
- ultralytics/solutions/analytics.py +15 -16
- ultralytics/solutions/config.py +6 -7
- ultralytics/solutions/distance_calculation.py +10 -13
- ultralytics/solutions/heatmap.py +8 -14
- ultralytics/solutions/instance_segmentation.py +6 -9
- ultralytics/solutions/object_blurrer.py +7 -10
- ultralytics/solutions/object_counter.py +12 -19
- ultralytics/solutions/object_cropper.py +8 -14
- ultralytics/solutions/parking_management.py +34 -32
- ultralytics/solutions/queue_management.py +10 -12
- ultralytics/solutions/region_counter.py +9 -12
- ultralytics/solutions/security_alarm.py +15 -20
- ultralytics/solutions/similarity_search.py +10 -15
- ultralytics/solutions/solutions.py +77 -76
- ultralytics/solutions/speed_estimation.py +7 -10
- ultralytics/solutions/streamlit_inference.py +2 -4
- ultralytics/solutions/templates/similarity-search.html +7 -18
- ultralytics/solutions/trackzone.py +7 -10
- ultralytics/solutions/vision_eye.py +5 -8
- ultralytics/trackers/__init__.py +1 -1
- ultralytics/trackers/basetrack.py +3 -5
- ultralytics/trackers/bot_sort.py +10 -27
- ultralytics/trackers/byte_tracker.py +21 -37
- ultralytics/trackers/track.py +4 -7
- ultralytics/trackers/utils/gmc.py +11 -22
- ultralytics/trackers/utils/kalman_filter.py +37 -48
- ultralytics/trackers/utils/matching.py +12 -15
- ultralytics/utils/__init__.py +124 -124
- ultralytics/utils/autobatch.py +2 -4
- ultralytics/utils/autodevice.py +17 -18
- ultralytics/utils/benchmarks.py +57 -71
- ultralytics/utils/callbacks/base.py +8 -10
- ultralytics/utils/callbacks/clearml.py +5 -13
- ultralytics/utils/callbacks/comet.py +32 -46
- ultralytics/utils/callbacks/dvc.py +13 -18
- ultralytics/utils/callbacks/mlflow.py +4 -5
- ultralytics/utils/callbacks/neptune.py +7 -15
- ultralytics/utils/callbacks/platform.py +423 -38
- ultralytics/utils/callbacks/raytune.py +3 -4
- ultralytics/utils/callbacks/tensorboard.py +25 -31
- ultralytics/utils/callbacks/wb.py +16 -14
- ultralytics/utils/checks.py +127 -85
- ultralytics/utils/cpu.py +3 -8
- ultralytics/utils/dist.py +9 -12
- ultralytics/utils/downloads.py +25 -33
- ultralytics/utils/errors.py +6 -14
- ultralytics/utils/events.py +2 -4
- ultralytics/utils/export/__init__.py +4 -236
- ultralytics/utils/export/engine.py +246 -0
- ultralytics/utils/export/imx.py +117 -63
- ultralytics/utils/export/tensorflow.py +231 -0
- ultralytics/utils/files.py +26 -30
- ultralytics/utils/git.py +9 -11
- ultralytics/utils/instance.py +30 -51
- ultralytics/utils/logger.py +212 -114
- ultralytics/utils/loss.py +601 -215
- ultralytics/utils/metrics.py +128 -156
- ultralytics/utils/nms.py +13 -16
- ultralytics/utils/ops.py +117 -166
- ultralytics/utils/patches.py +75 -21
- ultralytics/utils/plotting.py +75 -80
- ultralytics/utils/tal.py +125 -59
- ultralytics/utils/torch_utils.py +53 -79
- ultralytics/utils/tqdm.py +24 -21
- ultralytics/utils/triton.py +13 -19
- ultralytics/utils/tuner.py +19 -10
- dgenerate_ultralytics_headless-8.3.214.dist-info/RECORD +0 -283
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/entry_points.txt +0 -0
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/licenses/LICENSE +0 -0
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/top_level.txt +0 -0
|
@@ -88,8 +88,7 @@ def _should_log_image_predictions() -> bool:
|
|
|
88
88
|
|
|
89
89
|
|
|
90
90
|
def _resume_or_create_experiment(args: SimpleNamespace) -> None:
|
|
91
|
-
"""
|
|
92
|
-
Resume CometML experiment or create a new experiment based on args.
|
|
91
|
+
"""Resume CometML experiment or create a new experiment based on args.
|
|
93
92
|
|
|
94
93
|
Ensures that the experiment object is only created in a single process during distributed training.
|
|
95
94
|
|
|
@@ -124,8 +123,7 @@ def _resume_or_create_experiment(args: SimpleNamespace) -> None:
|
|
|
124
123
|
|
|
125
124
|
|
|
126
125
|
def _fetch_trainer_metadata(trainer) -> dict:
|
|
127
|
-
"""
|
|
128
|
-
Return metadata for YOLO training including epoch and asset saving status.
|
|
126
|
+
"""Return metadata for YOLO training including epoch and asset saving status.
|
|
129
127
|
|
|
130
128
|
Args:
|
|
131
129
|
trainer (ultralytics.engine.trainer.BaseTrainer): The YOLO trainer object containing training state and config.
|
|
@@ -150,11 +148,10 @@ def _fetch_trainer_metadata(trainer) -> dict:
|
|
|
150
148
|
def _scale_bounding_box_to_original_image_shape(
|
|
151
149
|
box, resized_image_shape, original_image_shape, ratio_pad
|
|
152
150
|
) -> list[float]:
|
|
153
|
-
"""
|
|
154
|
-
Scale bounding box from resized image coordinates to original image coordinates.
|
|
151
|
+
"""Scale bounding box from resized image coordinates to original image coordinates.
|
|
155
152
|
|
|
156
|
-
YOLO resizes images during training and the label values are normalized based on this resized shape.
|
|
157
|
-
|
|
153
|
+
YOLO resizes images during training and the label values are normalized based on this resized shape. This function
|
|
154
|
+
rescales the bounding box labels to the original image shape.
|
|
158
155
|
|
|
159
156
|
Args:
|
|
160
157
|
box (torch.Tensor): Bounding box in normalized xywh format.
|
|
@@ -181,8 +178,7 @@ def _scale_bounding_box_to_original_image_shape(
|
|
|
181
178
|
|
|
182
179
|
|
|
183
180
|
def _format_ground_truth_annotations_for_detection(img_idx, image_path, batch, class_name_map=None) -> dict | None:
|
|
184
|
-
"""
|
|
185
|
-
Format ground truth annotations for object detection.
|
|
181
|
+
"""Format ground truth annotations for object detection.
|
|
186
182
|
|
|
187
183
|
This function processes ground truth annotations from a batch of images for object detection tasks. It extracts
|
|
188
184
|
bounding boxes, class labels, and other metadata for a specific image in the batch, and formats them for
|
|
@@ -205,7 +201,7 @@ def _format_ground_truth_annotations_for_detection(img_idx, image_path, batch, c
|
|
|
205
201
|
- 'boxes': List of box coordinates [x, y, width, height]
|
|
206
202
|
- 'label': Label string with format "gt_{class_name}"
|
|
207
203
|
- 'score': Confidence score (always 1.0, scaled by _scale_confidence_score)
|
|
208
|
-
|
|
204
|
+
Returns None if no bounding boxes are found for the image.
|
|
209
205
|
"""
|
|
210
206
|
indices = batch["batch_idx"] == img_idx
|
|
211
207
|
bboxes = batch["bboxes"][indices]
|
|
@@ -236,8 +232,7 @@ def _format_ground_truth_annotations_for_detection(img_idx, image_path, batch, c
|
|
|
236
232
|
|
|
237
233
|
|
|
238
234
|
def _format_prediction_annotations(image_path, metadata, class_label_map=None, class_map=None) -> dict | None:
|
|
239
|
-
"""
|
|
240
|
-
Format YOLO predictions for object detection visualization.
|
|
235
|
+
"""Format YOLO predictions for object detection visualization.
|
|
241
236
|
|
|
242
237
|
Args:
|
|
243
238
|
image_path (Path): Path to the image file.
|
|
@@ -261,7 +256,7 @@ def _format_prediction_annotations(image_path, metadata, class_label_map=None, c
|
|
|
261
256
|
class_label_map = {class_map[k]: v for k, v in class_label_map.items()}
|
|
262
257
|
try:
|
|
263
258
|
# import pycotools utilities to decompress annotations for various tasks, e.g. segmentation
|
|
264
|
-
from faster_coco_eval.core.mask import decode
|
|
259
|
+
from faster_coco_eval.core.mask import decode
|
|
265
260
|
except ImportError:
|
|
266
261
|
decode = None
|
|
267
262
|
|
|
@@ -289,8 +284,7 @@ def _format_prediction_annotations(image_path, metadata, class_label_map=None, c
|
|
|
289
284
|
|
|
290
285
|
|
|
291
286
|
def _extract_segmentation_annotation(segmentation_raw: str, decode: Callable) -> list[list[Any]] | None:
|
|
292
|
-
"""
|
|
293
|
-
Extract segmentation annotation from compressed segmentations as list of polygons.
|
|
287
|
+
"""Extract segmentation annotation from compressed segmentations as list of polygons.
|
|
294
288
|
|
|
295
289
|
Args:
|
|
296
290
|
segmentation_raw (str): Raw segmentation data in compressed format.
|
|
@@ -310,8 +304,7 @@ def _extract_segmentation_annotation(segmentation_raw: str, decode: Callable) ->
|
|
|
310
304
|
|
|
311
305
|
|
|
312
306
|
def _fetch_annotations(img_idx, image_path, batch, prediction_metadata_map, class_label_map, class_map) -> list | None:
|
|
313
|
-
"""
|
|
314
|
-
Join the ground truth and prediction annotations if they exist.
|
|
307
|
+
"""Join the ground truth and prediction annotations if they exist.
|
|
315
308
|
|
|
316
309
|
Args:
|
|
317
310
|
img_idx (int): Index of the image in the batch.
|
|
@@ -350,25 +343,24 @@ def _create_prediction_metadata_map(model_predictions) -> dict:
|
|
|
350
343
|
def _log_confusion_matrix(experiment, trainer, curr_step, curr_epoch) -> None:
|
|
351
344
|
"""Log the confusion matrix to Comet experiment."""
|
|
352
345
|
conf_mat = trainer.validator.confusion_matrix.matrix
|
|
353
|
-
names = list(trainer.data["names"].values())
|
|
346
|
+
names = [*list(trainer.data["names"].values()), "background"]
|
|
354
347
|
experiment.log_confusion_matrix(
|
|
355
348
|
matrix=conf_mat, labels=names, max_categories=len(names), epoch=curr_epoch, step=curr_step
|
|
356
349
|
)
|
|
357
350
|
|
|
358
351
|
|
|
359
352
|
def _log_images(experiment, image_paths, curr_step: int | None, annotations=None) -> None:
|
|
360
|
-
"""
|
|
361
|
-
Log images to the experiment with optional annotations.
|
|
353
|
+
"""Log images to the experiment with optional annotations.
|
|
362
354
|
|
|
363
|
-
This function logs images to a Comet ML experiment, optionally including annotation data for visualization
|
|
364
|
-
|
|
355
|
+
This function logs images to a Comet ML experiment, optionally including annotation data for visualization such as
|
|
356
|
+
bounding boxes or segmentation masks.
|
|
365
357
|
|
|
366
358
|
Args:
|
|
367
359
|
experiment (comet_ml.CometExperiment): The Comet ML experiment to log images to.
|
|
368
360
|
image_paths (list[Path]): List of paths to images that will be logged.
|
|
369
361
|
curr_step (int): Current training step/iteration for tracking in the experiment timeline.
|
|
370
|
-
annotations (list[list[dict]], optional): Nested list of annotation dictionaries for each image. Each
|
|
371
|
-
|
|
362
|
+
annotations (list[list[dict]], optional): Nested list of annotation dictionaries for each image. Each annotation
|
|
363
|
+
contains visualization data like bounding boxes, labels, and confidence scores.
|
|
372
364
|
"""
|
|
373
365
|
if annotations:
|
|
374
366
|
for image_path, annotation in zip(image_paths, annotations):
|
|
@@ -380,11 +372,10 @@ def _log_images(experiment, image_paths, curr_step: int | None, annotations=None
|
|
|
380
372
|
|
|
381
373
|
|
|
382
374
|
def _log_image_predictions(experiment, validator, curr_step) -> None:
|
|
383
|
-
"""
|
|
384
|
-
Log predicted boxes for a single image during training.
|
|
375
|
+
"""Log predicted boxes for a single image during training.
|
|
385
376
|
|
|
386
|
-
This function logs image predictions to a Comet ML experiment during model validation. It processes
|
|
387
|
-
|
|
377
|
+
This function logs image predictions to a Comet ML experiment during model validation. It processes validation data
|
|
378
|
+
and formats both ground truth and prediction annotations for visualization in the Comet
|
|
388
379
|
dashboard. The function respects configured limits on the number of images to log.
|
|
389
380
|
|
|
390
381
|
Args:
|
|
@@ -443,12 +434,11 @@ def _log_image_predictions(experiment, validator, curr_step) -> None:
|
|
|
443
434
|
|
|
444
435
|
|
|
445
436
|
def _log_plots(experiment, trainer) -> None:
|
|
446
|
-
"""
|
|
447
|
-
Log evaluation plots and label plots for the experiment.
|
|
437
|
+
"""Log evaluation plots and label plots for the experiment.
|
|
448
438
|
|
|
449
439
|
This function logs various evaluation plots and confusion matrices to the experiment tracking system. It handles
|
|
450
|
-
different types of metrics (SegmentMetrics, PoseMetrics, DetMetrics, OBBMetrics) and logs the appropriate plots
|
|
451
|
-
|
|
440
|
+
different types of metrics (SegmentMetrics, PoseMetrics, DetMetrics, OBBMetrics) and logs the appropriate plots for
|
|
441
|
+
each type.
|
|
452
442
|
|
|
453
443
|
Args:
|
|
454
444
|
experiment (comet_ml.CometExperiment): The Comet ML experiment to log plots to.
|
|
@@ -503,8 +493,7 @@ def _log_image_batches(experiment, trainer, curr_step: int) -> None:
|
|
|
503
493
|
|
|
504
494
|
|
|
505
495
|
def _log_asset(experiment, asset_path) -> None:
|
|
506
|
-
"""
|
|
507
|
-
Logs a specific asset file to the given experiment.
|
|
496
|
+
"""Logs a specific asset file to the given experiment.
|
|
508
497
|
|
|
509
498
|
This function facilitates logging an asset, such as a file, to the provided
|
|
510
499
|
experiment. It enables integration with experiment tracking platforms.
|
|
@@ -517,11 +506,9 @@ def _log_asset(experiment, asset_path) -> None:
|
|
|
517
506
|
|
|
518
507
|
|
|
519
508
|
def _log_table(experiment, table_path) -> None:
|
|
520
|
-
"""
|
|
521
|
-
Logs a table to the provided experiment.
|
|
509
|
+
"""Logs a table to the provided experiment.
|
|
522
510
|
|
|
523
|
-
This function is used to log a table file to the given experiment. The table
|
|
524
|
-
is identified by its file path.
|
|
511
|
+
This function is used to log a table file to the given experiment. The table is identified by its file path.
|
|
525
512
|
|
|
526
513
|
Args:
|
|
527
514
|
experiment (comet_ml.CometExperiment): The experiment object where the table file will be logged.
|
|
@@ -549,16 +536,15 @@ def on_train_epoch_end(trainer) -> None:
|
|
|
549
536
|
|
|
550
537
|
|
|
551
538
|
def on_fit_epoch_end(trainer) -> None:
|
|
552
|
-
"""
|
|
553
|
-
Log model assets at the end of each epoch during training.
|
|
539
|
+
"""Log model assets at the end of each epoch during training.
|
|
554
540
|
|
|
555
|
-
This function is called at the end of each training epoch to log metrics, learning rates, and model information
|
|
556
|
-
|
|
557
|
-
|
|
541
|
+
This function is called at the end of each training epoch to log metrics, learning rates, and model information to a
|
|
542
|
+
Comet ML experiment. It also logs model assets, confusion matrices, and image predictions based on configuration
|
|
543
|
+
settings.
|
|
558
544
|
|
|
559
545
|
The function retrieves the current Comet ML experiment and logs various training metrics. If it's the first epoch,
|
|
560
|
-
it also logs model information. On specified save intervals, it logs the model, confusion matrix (if enabled),
|
|
561
|
-
|
|
546
|
+
it also logs model information. On specified save intervals, it logs the model, confusion matrix (if enabled), and
|
|
547
|
+
image predictions (if enabled).
|
|
562
548
|
|
|
563
549
|
Args:
|
|
564
550
|
trainer (BaseTrainer): The YOLO trainer object containing training state, metrics, and configuration.
|
|
@@ -27,8 +27,7 @@ except (ImportError, AssertionError, TypeError):
|
|
|
27
27
|
|
|
28
28
|
|
|
29
29
|
def _log_images(path: Path, prefix: str = "") -> None:
|
|
30
|
-
"""
|
|
31
|
-
Log images at specified path with an optional prefix using DVCLive.
|
|
30
|
+
"""Log images at specified path with an optional prefix using DVCLive.
|
|
32
31
|
|
|
33
32
|
This function logs images found at the given path to DVCLive, organizing them by batch to enable slider
|
|
34
33
|
functionality in the UI. It processes image filenames to extract batch information and restructures the path
|
|
@@ -55,8 +54,7 @@ def _log_images(path: Path, prefix: str = "") -> None:
|
|
|
55
54
|
|
|
56
55
|
|
|
57
56
|
def _log_plots(plots: dict, prefix: str = "") -> None:
|
|
58
|
-
"""
|
|
59
|
-
Log plot images for training progress if they have not been previously processed.
|
|
57
|
+
"""Log plot images for training progress if they have not been previously processed.
|
|
60
58
|
|
|
61
59
|
Args:
|
|
62
60
|
plots (dict): Dictionary containing plot information with timestamps.
|
|
@@ -70,15 +68,14 @@ def _log_plots(plots: dict, prefix: str = "") -> None:
|
|
|
70
68
|
|
|
71
69
|
|
|
72
70
|
def _log_confusion_matrix(validator) -> None:
|
|
73
|
-
"""
|
|
74
|
-
Log confusion matrix for a validator using DVCLive.
|
|
71
|
+
"""Log confusion matrix for a validator using DVCLive.
|
|
75
72
|
|
|
76
|
-
This function processes the confusion matrix from a validator object and logs it to DVCLive by converting
|
|
77
|
-
|
|
73
|
+
This function processes the confusion matrix from a validator object and logs it to DVCLive by converting the matrix
|
|
74
|
+
into lists of target and prediction labels.
|
|
78
75
|
|
|
79
76
|
Args:
|
|
80
77
|
validator (BaseValidator): The validator object containing the confusion matrix and class names. Must have
|
|
81
|
-
attributes
|
|
78
|
+
attributes confusion_matrix.matrix, confusion_matrix.task, and names.
|
|
82
79
|
"""
|
|
83
80
|
targets = []
|
|
84
81
|
preds = []
|
|
@@ -123,11 +120,10 @@ def on_train_epoch_start(trainer) -> None:
|
|
|
123
120
|
|
|
124
121
|
|
|
125
122
|
def on_fit_epoch_end(trainer) -> None:
|
|
126
|
-
"""
|
|
127
|
-
Log training metrics, model info, and advance to next step at the end of each fit epoch.
|
|
123
|
+
"""Log training metrics, model info, and advance to next step at the end of each fit epoch.
|
|
128
124
|
|
|
129
|
-
This function is called at the end of each fit epoch during training. It logs various metrics including
|
|
130
|
-
|
|
125
|
+
This function is called at the end of each fit epoch during training. It logs various metrics including training
|
|
126
|
+
loss items, validation metrics, and learning rates. On the first epoch, it also logs model
|
|
131
127
|
information. Additionally, it logs training and validation plots and advances the DVCLive step counter.
|
|
132
128
|
|
|
133
129
|
Args:
|
|
@@ -157,12 +153,11 @@ def on_fit_epoch_end(trainer) -> None:
|
|
|
157
153
|
|
|
158
154
|
|
|
159
155
|
def on_train_end(trainer) -> None:
|
|
160
|
-
"""
|
|
161
|
-
Log best metrics, plots, and confusion matrix at the end of training.
|
|
156
|
+
"""Log best metrics, plots, and confusion matrix at the end of training.
|
|
162
157
|
|
|
163
|
-
This function is called at the conclusion of the training process to log final metrics, visualizations, and
|
|
164
|
-
|
|
165
|
-
|
|
158
|
+
This function is called at the conclusion of the training process to log final metrics, visualizations, and model
|
|
159
|
+
artifacts if DVCLive logging is active. It captures the best model performance metrics, training plots, validation
|
|
160
|
+
plots, and confusion matrix for later analysis.
|
|
166
161
|
|
|
167
162
|
Args:
|
|
168
163
|
trainer (BaseTrainer): The trainer object containing training state, metrics, and validation results.
|
|
@@ -45,17 +45,16 @@ def sanitize_dict(x: dict) -> dict:
|
|
|
45
45
|
|
|
46
46
|
|
|
47
47
|
def on_pretrain_routine_end(trainer):
|
|
48
|
-
"""
|
|
49
|
-
Log training parameters to MLflow at the end of the pretraining routine.
|
|
48
|
+
"""Log training parameters to MLflow at the end of the pretraining routine.
|
|
50
49
|
|
|
51
50
|
This function sets up MLflow logging based on environment variables and trainer arguments. It sets the tracking URI,
|
|
52
|
-
experiment name, and run name, then starts the MLflow run if not already active. It finally logs the parameters
|
|
53
|
-
|
|
51
|
+
experiment name, and run name, then starts the MLflow run if not already active. It finally logs the parameters from
|
|
52
|
+
the trainer.
|
|
54
53
|
|
|
55
54
|
Args:
|
|
56
55
|
trainer (ultralytics.engine.trainer.BaseTrainer): The training object with arguments and parameters to log.
|
|
57
56
|
|
|
58
|
-
|
|
57
|
+
Notes:
|
|
59
58
|
MLFLOW_TRACKING_URI: The URI for MLflow tracking. If not set, defaults to 'runs/mlflow'.
|
|
60
59
|
MLFLOW_EXPERIMENT_NAME: The name of the MLflow experiment. If not set, defaults to trainer.args.project.
|
|
61
60
|
MLFLOW_RUN: The name of the MLflow run. If not set, defaults to trainer.args.name.
|
|
@@ -18,8 +18,7 @@ except (ImportError, AssertionError):
|
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
def _log_scalars(scalars: dict, step: int = 0) -> None:
|
|
21
|
-
"""
|
|
22
|
-
Log scalars to the NeptuneAI experiment logger.
|
|
21
|
+
"""Log scalars to the NeptuneAI experiment logger.
|
|
23
22
|
|
|
24
23
|
Args:
|
|
25
24
|
scalars (dict): Dictionary of scalar values to log to NeptuneAI.
|
|
@@ -35,11 +34,10 @@ def _log_scalars(scalars: dict, step: int = 0) -> None:
|
|
|
35
34
|
|
|
36
35
|
|
|
37
36
|
def _log_images(imgs_dict: dict, group: str = "") -> None:
|
|
38
|
-
"""
|
|
39
|
-
Log images to the NeptuneAI experiment logger.
|
|
37
|
+
"""Log images to the NeptuneAI experiment logger.
|
|
40
38
|
|
|
41
|
-
This function logs image data to Neptune.ai when a valid Neptune run is active. Images are organized
|
|
42
|
-
|
|
39
|
+
This function logs image data to Neptune.ai when a valid Neptune run is active. Images are organized under the
|
|
40
|
+
specified group name.
|
|
43
41
|
|
|
44
42
|
Args:
|
|
45
43
|
imgs_dict (dict): Dictionary of images to log, with keys as image names and values as image data.
|
|
@@ -108,15 +106,9 @@ def on_train_end(trainer) -> None:
|
|
|
108
106
|
"""Log final results, plots, and model weights at the end of training."""
|
|
109
107
|
if run:
|
|
110
108
|
# Log final results, CM matrix + PR plots
|
|
111
|
-
|
|
112
|
-
"
|
|
113
|
-
|
|
114
|
-
"confusion_matrix_normalized.png",
|
|
115
|
-
*(f"{x}_curve.png" for x in ("F1", "PR", "P", "R")),
|
|
116
|
-
]
|
|
117
|
-
files = [(trainer.save_dir / f) for f in files if (trainer.save_dir / f).exists()] # filter
|
|
118
|
-
for f in files:
|
|
119
|
-
_log_plot(title=f.stem, plot_path=f)
|
|
109
|
+
for f in [*trainer.plots.keys(), *trainer.validator.plots.keys()]:
|
|
110
|
+
if "batch" not in f.name:
|
|
111
|
+
_log_plot(title=f.stem, plot_path=f)
|
|
120
112
|
# Log the final model
|
|
121
113
|
run[f"weights/{trainer.args.name or trainer.args.task}/{trainer.best.name}"].upload(File(str(trainer.best)))
|
|
122
114
|
|