dgenerate-ultralytics-headless 8.3.253__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dgenerate_ultralytics_headless-8.3.253.dist-info/METADATA +405 -0
- dgenerate_ultralytics_headless-8.3.253.dist-info/RECORD +299 -0
- dgenerate_ultralytics_headless-8.3.253.dist-info/WHEEL +5 -0
- dgenerate_ultralytics_headless-8.3.253.dist-info/entry_points.txt +3 -0
- dgenerate_ultralytics_headless-8.3.253.dist-info/licenses/LICENSE +661 -0
- dgenerate_ultralytics_headless-8.3.253.dist-info/top_level.txt +1 -0
- tests/__init__.py +23 -0
- tests/conftest.py +59 -0
- tests/test_cli.py +131 -0
- tests/test_cuda.py +216 -0
- tests/test_engine.py +157 -0
- tests/test_exports.py +309 -0
- tests/test_integrations.py +151 -0
- tests/test_python.py +777 -0
- tests/test_solutions.py +371 -0
- ultralytics/__init__.py +48 -0
- ultralytics/assets/bus.jpg +0 -0
- ultralytics/assets/zidane.jpg +0 -0
- ultralytics/cfg/__init__.py +1028 -0
- ultralytics/cfg/datasets/Argoverse.yaml +78 -0
- ultralytics/cfg/datasets/DOTAv1.5.yaml +37 -0
- ultralytics/cfg/datasets/DOTAv1.yaml +36 -0
- ultralytics/cfg/datasets/GlobalWheat2020.yaml +68 -0
- ultralytics/cfg/datasets/HomeObjects-3K.yaml +32 -0
- ultralytics/cfg/datasets/ImageNet.yaml +2025 -0
- ultralytics/cfg/datasets/Objects365.yaml +447 -0
- ultralytics/cfg/datasets/SKU-110K.yaml +58 -0
- ultralytics/cfg/datasets/TT100K.yaml +346 -0
- ultralytics/cfg/datasets/VOC.yaml +102 -0
- ultralytics/cfg/datasets/VisDrone.yaml +87 -0
- ultralytics/cfg/datasets/african-wildlife.yaml +25 -0
- ultralytics/cfg/datasets/brain-tumor.yaml +22 -0
- ultralytics/cfg/datasets/carparts-seg.yaml +44 -0
- ultralytics/cfg/datasets/coco-pose.yaml +64 -0
- ultralytics/cfg/datasets/coco.yaml +118 -0
- ultralytics/cfg/datasets/coco128-seg.yaml +101 -0
- ultralytics/cfg/datasets/coco128.yaml +101 -0
- ultralytics/cfg/datasets/coco8-grayscale.yaml +103 -0
- ultralytics/cfg/datasets/coco8-multispectral.yaml +104 -0
- ultralytics/cfg/datasets/coco8-pose.yaml +47 -0
- ultralytics/cfg/datasets/coco8-seg.yaml +101 -0
- ultralytics/cfg/datasets/coco8.yaml +101 -0
- ultralytics/cfg/datasets/construction-ppe.yaml +32 -0
- ultralytics/cfg/datasets/crack-seg.yaml +22 -0
- ultralytics/cfg/datasets/dog-pose.yaml +52 -0
- ultralytics/cfg/datasets/dota8-multispectral.yaml +38 -0
- ultralytics/cfg/datasets/dota8.yaml +35 -0
- ultralytics/cfg/datasets/hand-keypoints.yaml +50 -0
- ultralytics/cfg/datasets/kitti.yaml +27 -0
- ultralytics/cfg/datasets/lvis.yaml +1240 -0
- ultralytics/cfg/datasets/medical-pills.yaml +21 -0
- ultralytics/cfg/datasets/open-images-v7.yaml +663 -0
- ultralytics/cfg/datasets/package-seg.yaml +22 -0
- ultralytics/cfg/datasets/signature.yaml +21 -0
- ultralytics/cfg/datasets/tiger-pose.yaml +41 -0
- ultralytics/cfg/datasets/xView.yaml +155 -0
- ultralytics/cfg/default.yaml +130 -0
- ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml +17 -0
- ultralytics/cfg/models/11/yolo11-cls.yaml +33 -0
- ultralytics/cfg/models/11/yolo11-obb.yaml +50 -0
- ultralytics/cfg/models/11/yolo11-pose.yaml +51 -0
- ultralytics/cfg/models/11/yolo11-seg.yaml +50 -0
- ultralytics/cfg/models/11/yolo11.yaml +50 -0
- ultralytics/cfg/models/11/yoloe-11-seg.yaml +48 -0
- ultralytics/cfg/models/11/yoloe-11.yaml +48 -0
- ultralytics/cfg/models/12/yolo12-cls.yaml +32 -0
- ultralytics/cfg/models/12/yolo12-obb.yaml +48 -0
- ultralytics/cfg/models/12/yolo12-pose.yaml +49 -0
- ultralytics/cfg/models/12/yolo12-seg.yaml +48 -0
- ultralytics/cfg/models/12/yolo12.yaml +48 -0
- ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +53 -0
- ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +45 -0
- ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +45 -0
- ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +57 -0
- ultralytics/cfg/models/v10/yolov10b.yaml +45 -0
- ultralytics/cfg/models/v10/yolov10l.yaml +45 -0
- ultralytics/cfg/models/v10/yolov10m.yaml +45 -0
- ultralytics/cfg/models/v10/yolov10n.yaml +45 -0
- ultralytics/cfg/models/v10/yolov10s.yaml +45 -0
- ultralytics/cfg/models/v10/yolov10x.yaml +45 -0
- ultralytics/cfg/models/v3/yolov3-spp.yaml +49 -0
- ultralytics/cfg/models/v3/yolov3-tiny.yaml +40 -0
- ultralytics/cfg/models/v3/yolov3.yaml +49 -0
- ultralytics/cfg/models/v5/yolov5-p6.yaml +62 -0
- ultralytics/cfg/models/v5/yolov5.yaml +51 -0
- ultralytics/cfg/models/v6/yolov6.yaml +56 -0
- ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +48 -0
- ultralytics/cfg/models/v8/yoloe-v8.yaml +48 -0
- ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +28 -0
- ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +28 -0
- ultralytics/cfg/models/v8/yolov8-cls.yaml +32 -0
- ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +58 -0
- ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +60 -0
- ultralytics/cfg/models/v8/yolov8-ghost.yaml +50 -0
- ultralytics/cfg/models/v8/yolov8-obb.yaml +49 -0
- ultralytics/cfg/models/v8/yolov8-p2.yaml +57 -0
- ultralytics/cfg/models/v8/yolov8-p6.yaml +59 -0
- ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +60 -0
- ultralytics/cfg/models/v8/yolov8-pose.yaml +50 -0
- ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +49 -0
- ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +59 -0
- ultralytics/cfg/models/v8/yolov8-seg.yaml +49 -0
- ultralytics/cfg/models/v8/yolov8-world.yaml +51 -0
- ultralytics/cfg/models/v8/yolov8-worldv2.yaml +49 -0
- ultralytics/cfg/models/v8/yolov8.yaml +49 -0
- ultralytics/cfg/models/v9/yolov9c-seg.yaml +41 -0
- ultralytics/cfg/models/v9/yolov9c.yaml +41 -0
- ultralytics/cfg/models/v9/yolov9e-seg.yaml +64 -0
- ultralytics/cfg/models/v9/yolov9e.yaml +64 -0
- ultralytics/cfg/models/v9/yolov9m.yaml +41 -0
- ultralytics/cfg/models/v9/yolov9s.yaml +41 -0
- ultralytics/cfg/models/v9/yolov9t.yaml +41 -0
- ultralytics/cfg/trackers/botsort.yaml +21 -0
- ultralytics/cfg/trackers/bytetrack.yaml +12 -0
- ultralytics/data/__init__.py +26 -0
- ultralytics/data/annotator.py +66 -0
- ultralytics/data/augment.py +2801 -0
- ultralytics/data/base.py +435 -0
- ultralytics/data/build.py +437 -0
- ultralytics/data/converter.py +855 -0
- ultralytics/data/dataset.py +834 -0
- ultralytics/data/loaders.py +704 -0
- ultralytics/data/scripts/download_weights.sh +18 -0
- ultralytics/data/scripts/get_coco.sh +61 -0
- ultralytics/data/scripts/get_coco128.sh +18 -0
- ultralytics/data/scripts/get_imagenet.sh +52 -0
- ultralytics/data/split.py +138 -0
- ultralytics/data/split_dota.py +344 -0
- ultralytics/data/utils.py +798 -0
- ultralytics/engine/__init__.py +1 -0
- ultralytics/engine/exporter.py +1580 -0
- ultralytics/engine/model.py +1125 -0
- ultralytics/engine/predictor.py +508 -0
- ultralytics/engine/results.py +1522 -0
- ultralytics/engine/trainer.py +977 -0
- ultralytics/engine/tuner.py +449 -0
- ultralytics/engine/validator.py +387 -0
- ultralytics/hub/__init__.py +166 -0
- ultralytics/hub/auth.py +151 -0
- ultralytics/hub/google/__init__.py +174 -0
- ultralytics/hub/session.py +422 -0
- ultralytics/hub/utils.py +162 -0
- ultralytics/models/__init__.py +9 -0
- ultralytics/models/fastsam/__init__.py +7 -0
- ultralytics/models/fastsam/model.py +79 -0
- ultralytics/models/fastsam/predict.py +169 -0
- ultralytics/models/fastsam/utils.py +23 -0
- ultralytics/models/fastsam/val.py +38 -0
- ultralytics/models/nas/__init__.py +7 -0
- ultralytics/models/nas/model.py +98 -0
- ultralytics/models/nas/predict.py +56 -0
- ultralytics/models/nas/val.py +38 -0
- ultralytics/models/rtdetr/__init__.py +7 -0
- ultralytics/models/rtdetr/model.py +63 -0
- ultralytics/models/rtdetr/predict.py +88 -0
- ultralytics/models/rtdetr/train.py +89 -0
- ultralytics/models/rtdetr/val.py +216 -0
- ultralytics/models/sam/__init__.py +25 -0
- ultralytics/models/sam/amg.py +275 -0
- ultralytics/models/sam/build.py +365 -0
- ultralytics/models/sam/build_sam3.py +377 -0
- ultralytics/models/sam/model.py +169 -0
- ultralytics/models/sam/modules/__init__.py +1 -0
- ultralytics/models/sam/modules/blocks.py +1067 -0
- ultralytics/models/sam/modules/decoders.py +495 -0
- ultralytics/models/sam/modules/encoders.py +794 -0
- ultralytics/models/sam/modules/memory_attention.py +298 -0
- ultralytics/models/sam/modules/sam.py +1160 -0
- ultralytics/models/sam/modules/tiny_encoder.py +979 -0
- ultralytics/models/sam/modules/transformer.py +344 -0
- ultralytics/models/sam/modules/utils.py +512 -0
- ultralytics/models/sam/predict.py +3940 -0
- ultralytics/models/sam/sam3/__init__.py +3 -0
- ultralytics/models/sam/sam3/decoder.py +546 -0
- ultralytics/models/sam/sam3/encoder.py +529 -0
- ultralytics/models/sam/sam3/geometry_encoders.py +415 -0
- ultralytics/models/sam/sam3/maskformer_segmentation.py +286 -0
- ultralytics/models/sam/sam3/model_misc.py +199 -0
- ultralytics/models/sam/sam3/necks.py +129 -0
- ultralytics/models/sam/sam3/sam3_image.py +339 -0
- ultralytics/models/sam/sam3/text_encoder_ve.py +307 -0
- ultralytics/models/sam/sam3/vitdet.py +547 -0
- ultralytics/models/sam/sam3/vl_combiner.py +160 -0
- ultralytics/models/utils/__init__.py +1 -0
- ultralytics/models/utils/loss.py +466 -0
- ultralytics/models/utils/ops.py +315 -0
- ultralytics/models/yolo/__init__.py +7 -0
- ultralytics/models/yolo/classify/__init__.py +7 -0
- ultralytics/models/yolo/classify/predict.py +90 -0
- ultralytics/models/yolo/classify/train.py +202 -0
- ultralytics/models/yolo/classify/val.py +216 -0
- ultralytics/models/yolo/detect/__init__.py +7 -0
- ultralytics/models/yolo/detect/predict.py +122 -0
- ultralytics/models/yolo/detect/train.py +227 -0
- ultralytics/models/yolo/detect/val.py +507 -0
- ultralytics/models/yolo/model.py +430 -0
- ultralytics/models/yolo/obb/__init__.py +7 -0
- ultralytics/models/yolo/obb/predict.py +56 -0
- ultralytics/models/yolo/obb/train.py +79 -0
- ultralytics/models/yolo/obb/val.py +302 -0
- ultralytics/models/yolo/pose/__init__.py +7 -0
- ultralytics/models/yolo/pose/predict.py +65 -0
- ultralytics/models/yolo/pose/train.py +110 -0
- ultralytics/models/yolo/pose/val.py +248 -0
- ultralytics/models/yolo/segment/__init__.py +7 -0
- ultralytics/models/yolo/segment/predict.py +109 -0
- ultralytics/models/yolo/segment/train.py +69 -0
- ultralytics/models/yolo/segment/val.py +307 -0
- ultralytics/models/yolo/world/__init__.py +5 -0
- ultralytics/models/yolo/world/train.py +173 -0
- ultralytics/models/yolo/world/train_world.py +178 -0
- ultralytics/models/yolo/yoloe/__init__.py +22 -0
- ultralytics/models/yolo/yoloe/predict.py +162 -0
- ultralytics/models/yolo/yoloe/train.py +287 -0
- ultralytics/models/yolo/yoloe/train_seg.py +122 -0
- ultralytics/models/yolo/yoloe/val.py +206 -0
- ultralytics/nn/__init__.py +27 -0
- ultralytics/nn/autobackend.py +964 -0
- ultralytics/nn/modules/__init__.py +182 -0
- ultralytics/nn/modules/activation.py +54 -0
- ultralytics/nn/modules/block.py +1947 -0
- ultralytics/nn/modules/conv.py +669 -0
- ultralytics/nn/modules/head.py +1183 -0
- ultralytics/nn/modules/transformer.py +793 -0
- ultralytics/nn/modules/utils.py +159 -0
- ultralytics/nn/tasks.py +1768 -0
- ultralytics/nn/text_model.py +356 -0
- ultralytics/py.typed +1 -0
- ultralytics/solutions/__init__.py +41 -0
- ultralytics/solutions/ai_gym.py +108 -0
- ultralytics/solutions/analytics.py +264 -0
- ultralytics/solutions/config.py +107 -0
- ultralytics/solutions/distance_calculation.py +123 -0
- ultralytics/solutions/heatmap.py +125 -0
- ultralytics/solutions/instance_segmentation.py +86 -0
- ultralytics/solutions/object_blurrer.py +89 -0
- ultralytics/solutions/object_counter.py +190 -0
- ultralytics/solutions/object_cropper.py +87 -0
- ultralytics/solutions/parking_management.py +280 -0
- ultralytics/solutions/queue_management.py +93 -0
- ultralytics/solutions/region_counter.py +133 -0
- ultralytics/solutions/security_alarm.py +151 -0
- ultralytics/solutions/similarity_search.py +219 -0
- ultralytics/solutions/solutions.py +828 -0
- ultralytics/solutions/speed_estimation.py +114 -0
- ultralytics/solutions/streamlit_inference.py +260 -0
- ultralytics/solutions/templates/similarity-search.html +156 -0
- ultralytics/solutions/trackzone.py +88 -0
- ultralytics/solutions/vision_eye.py +67 -0
- ultralytics/trackers/__init__.py +7 -0
- ultralytics/trackers/basetrack.py +115 -0
- ultralytics/trackers/bot_sort.py +257 -0
- ultralytics/trackers/byte_tracker.py +469 -0
- ultralytics/trackers/track.py +116 -0
- ultralytics/trackers/utils/__init__.py +1 -0
- ultralytics/trackers/utils/gmc.py +339 -0
- ultralytics/trackers/utils/kalman_filter.py +482 -0
- ultralytics/trackers/utils/matching.py +154 -0
- ultralytics/utils/__init__.py +1450 -0
- ultralytics/utils/autobatch.py +118 -0
- ultralytics/utils/autodevice.py +205 -0
- ultralytics/utils/benchmarks.py +728 -0
- ultralytics/utils/callbacks/__init__.py +5 -0
- ultralytics/utils/callbacks/base.py +233 -0
- ultralytics/utils/callbacks/clearml.py +146 -0
- ultralytics/utils/callbacks/comet.py +625 -0
- ultralytics/utils/callbacks/dvc.py +197 -0
- ultralytics/utils/callbacks/hub.py +110 -0
- ultralytics/utils/callbacks/mlflow.py +134 -0
- ultralytics/utils/callbacks/neptune.py +126 -0
- ultralytics/utils/callbacks/platform.py +453 -0
- ultralytics/utils/callbacks/raytune.py +42 -0
- ultralytics/utils/callbacks/tensorboard.py +123 -0
- ultralytics/utils/callbacks/wb.py +188 -0
- ultralytics/utils/checks.py +1020 -0
- ultralytics/utils/cpu.py +85 -0
- ultralytics/utils/dist.py +123 -0
- ultralytics/utils/downloads.py +529 -0
- ultralytics/utils/errors.py +35 -0
- ultralytics/utils/events.py +113 -0
- ultralytics/utils/export/__init__.py +7 -0
- ultralytics/utils/export/engine.py +237 -0
- ultralytics/utils/export/imx.py +325 -0
- ultralytics/utils/export/tensorflow.py +231 -0
- ultralytics/utils/files.py +219 -0
- ultralytics/utils/git.py +137 -0
- ultralytics/utils/instance.py +484 -0
- ultralytics/utils/logger.py +506 -0
- ultralytics/utils/loss.py +849 -0
- ultralytics/utils/metrics.py +1563 -0
- ultralytics/utils/nms.py +337 -0
- ultralytics/utils/ops.py +664 -0
- ultralytics/utils/patches.py +201 -0
- ultralytics/utils/plotting.py +1047 -0
- ultralytics/utils/tal.py +404 -0
- ultralytics/utils/torch_utils.py +984 -0
- ultralytics/utils/tqdm.py +443 -0
- ultralytics/utils/triton.py +112 -0
- ultralytics/utils/tuner.py +168 -0
|
@@ -0,0 +1,453 @@
|
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import platform
|
|
5
|
+
import re
|
|
6
|
+
import socket
|
|
7
|
+
import sys
|
|
8
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from time import time
|
|
11
|
+
|
|
12
|
+
from ultralytics.utils import ENVIRONMENT, GIT, LOGGER, PYTHON_VERSION, RANK, SETTINGS, TESTS_RUNNING, colorstr
|
|
13
|
+
|
|
14
|
+
PREFIX = colorstr("Platform: ")
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def slugify(text):
|
|
18
|
+
"""Convert text to URL-safe slug (e.g., 'My Project 1' -> 'my-project-1')."""
|
|
19
|
+
if not text:
|
|
20
|
+
return text
|
|
21
|
+
return re.sub(r"-+", "-", re.sub(r"[^a-z0-9\s-]", "", str(text).lower()).replace(" ", "-")).strip("-")[:128]
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
try:
|
|
25
|
+
assert not TESTS_RUNNING # do not log pytest
|
|
26
|
+
assert SETTINGS.get("platform", False) is True or os.getenv("ULTRALYTICS_API_KEY") or SETTINGS.get("api_key")
|
|
27
|
+
_api_key = os.getenv("ULTRALYTICS_API_KEY") or SETTINGS.get("api_key")
|
|
28
|
+
assert _api_key # verify API key is present
|
|
29
|
+
|
|
30
|
+
import requests
|
|
31
|
+
|
|
32
|
+
from ultralytics.utils.logger import ConsoleLogger, SystemLogger
|
|
33
|
+
from ultralytics.utils.torch_utils import model_info_for_loggers
|
|
34
|
+
|
|
35
|
+
_executor = ThreadPoolExecutor(max_workers=10) # Bounded thread pool for async operations
|
|
36
|
+
|
|
37
|
+
except (AssertionError, ImportError):
|
|
38
|
+
_api_key = None
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def resolve_platform_uri(uri, hard=True):
|
|
42
|
+
"""Resolve ul:// URIs to signed URLs by authenticating with Ultralytics Platform.
|
|
43
|
+
|
|
44
|
+
Formats:
|
|
45
|
+
ul://username/datasets/slug -> Returns signed URL to NDJSON file
|
|
46
|
+
ul://username/project/model -> Returns signed URL to .pt file
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
uri (str): Platform URI starting with "ul://".
|
|
50
|
+
hard (bool): Whether to raise an error if resolution fails (FileNotFoundError only).
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
(str | None): Signed URL on success, None if not found and hard=False.
|
|
54
|
+
|
|
55
|
+
Raises:
|
|
56
|
+
ValueError: If API key is missing/invalid or URI format is wrong.
|
|
57
|
+
PermissionError: If access is denied.
|
|
58
|
+
RuntimeError: If resource is not ready (e.g., dataset still processing).
|
|
59
|
+
FileNotFoundError: If resource not found and hard=True.
|
|
60
|
+
ConnectionError: If network request fails and hard=True.
|
|
61
|
+
"""
|
|
62
|
+
import requests
|
|
63
|
+
|
|
64
|
+
path = uri[5:] # Remove "ul://"
|
|
65
|
+
parts = path.split("/")
|
|
66
|
+
|
|
67
|
+
api_key = os.getenv("ULTRALYTICS_API_KEY") or SETTINGS.get("api_key")
|
|
68
|
+
if not api_key:
|
|
69
|
+
raise ValueError(f"ULTRALYTICS_API_KEY required for '{uri}'. Get key at https://alpha.ultralytics.com/settings")
|
|
70
|
+
|
|
71
|
+
base = "https://alpha.ultralytics.com/api/webhooks"
|
|
72
|
+
headers = {"Authorization": f"Bearer {api_key}"}
|
|
73
|
+
|
|
74
|
+
# ul://username/datasets/slug
|
|
75
|
+
if len(parts) == 3 and parts[1] == "datasets":
|
|
76
|
+
username, _, slug = parts
|
|
77
|
+
url = f"{base}/datasets/{username}/{slug}/export"
|
|
78
|
+
|
|
79
|
+
# ul://username/project/model
|
|
80
|
+
elif len(parts) == 3:
|
|
81
|
+
username, project, model = parts
|
|
82
|
+
url = f"{base}/models/{username}/{project}/{model}/download"
|
|
83
|
+
|
|
84
|
+
else:
|
|
85
|
+
raise ValueError(f"Invalid platform URI: {uri}. Use ul://user/datasets/name or ul://user/project/model")
|
|
86
|
+
|
|
87
|
+
try:
|
|
88
|
+
r = requests.head(url, headers=headers, allow_redirects=False, timeout=30)
|
|
89
|
+
|
|
90
|
+
# Handle redirect responses (301, 302, 303, 307, 308)
|
|
91
|
+
if 300 <= r.status_code < 400 and "location" in r.headers:
|
|
92
|
+
return r.headers["location"] # Return signed URL
|
|
93
|
+
|
|
94
|
+
# Handle error responses
|
|
95
|
+
if r.status_code == 401:
|
|
96
|
+
raise ValueError(f"Invalid ULTRALYTICS_API_KEY for '{uri}'")
|
|
97
|
+
if r.status_code == 403:
|
|
98
|
+
raise PermissionError(f"Access denied for '{uri}'. Check dataset/model visibility settings.")
|
|
99
|
+
if r.status_code == 404:
|
|
100
|
+
if hard:
|
|
101
|
+
raise FileNotFoundError(f"Not found on platform: {uri}")
|
|
102
|
+
LOGGER.warning(f"Not found on platform: {uri}")
|
|
103
|
+
return None
|
|
104
|
+
if r.status_code == 409:
|
|
105
|
+
raise RuntimeError(f"Resource not ready: {uri}. Dataset may still be processing.")
|
|
106
|
+
|
|
107
|
+
# Unexpected response
|
|
108
|
+
r.raise_for_status()
|
|
109
|
+
raise RuntimeError(f"Unexpected response from platform for '{uri}': {r.status_code}")
|
|
110
|
+
|
|
111
|
+
except requests.exceptions.RequestException as e:
|
|
112
|
+
if hard:
|
|
113
|
+
raise ConnectionError(f"Failed to resolve {uri}: {e}") from e
|
|
114
|
+
LOGGER.warning(f"Failed to resolve {uri}: {e}")
|
|
115
|
+
return None
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def _interp_plot(plot, n=101):
|
|
119
|
+
"""Interpolate plot curve data from 1000 to n points to reduce storage size."""
|
|
120
|
+
import numpy as np
|
|
121
|
+
|
|
122
|
+
if not plot.get("x") or not plot.get("y"):
|
|
123
|
+
return plot # No interpolation needed (e.g., confusion_matrix)
|
|
124
|
+
|
|
125
|
+
x, y = np.array(plot["x"]), np.array(plot["y"])
|
|
126
|
+
if len(x) <= n:
|
|
127
|
+
return plot # Already small enough
|
|
128
|
+
|
|
129
|
+
# New x values (101 points gives clean 0.01 increments: 0, 0.01, 0.02, ..., 1.0)
|
|
130
|
+
x_new = np.linspace(x[0], x[-1], n)
|
|
131
|
+
|
|
132
|
+
# Interpolate y values (handle both 1D and 2D arrays)
|
|
133
|
+
if y.ndim == 1:
|
|
134
|
+
y_new = np.interp(x_new, x, y)
|
|
135
|
+
else:
|
|
136
|
+
y_new = np.array([np.interp(x_new, x, yi) for yi in y])
|
|
137
|
+
|
|
138
|
+
# Also interpolate ap if present (for PR curves)
|
|
139
|
+
result = {**plot, "x": x_new.tolist(), "y": y_new.tolist()}
|
|
140
|
+
if "ap" in plot:
|
|
141
|
+
result["ap"] = plot["ap"] # Keep AP values as-is (per-class scalars)
|
|
142
|
+
|
|
143
|
+
return result
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def _send(event, data, project, name, model_id=None):
|
|
147
|
+
"""Send event to Platform endpoint. Returns response JSON on success."""
|
|
148
|
+
try:
|
|
149
|
+
payload = {"event": event, "project": project, "name": name, "data": data}
|
|
150
|
+
if model_id:
|
|
151
|
+
payload["modelId"] = model_id
|
|
152
|
+
r = requests.post(
|
|
153
|
+
"https://alpha.ultralytics.com/api/webhooks/training/metrics",
|
|
154
|
+
json=payload,
|
|
155
|
+
headers={"Authorization": f"Bearer {_api_key}"},
|
|
156
|
+
timeout=10,
|
|
157
|
+
)
|
|
158
|
+
r.raise_for_status()
|
|
159
|
+
return r.json()
|
|
160
|
+
except Exception as e:
|
|
161
|
+
LOGGER.debug(f"Platform: Failed to send {event}: {e}")
|
|
162
|
+
return None
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def _send_async(event, data, project, name, model_id=None):
|
|
166
|
+
"""Send event asynchronously using bounded thread pool."""
|
|
167
|
+
_executor.submit(_send, event, data, project, name, model_id)
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
def _upload_model(model_path, project, name):
|
|
171
|
+
"""Upload model checkpoint to Platform via signed URL."""
|
|
172
|
+
try:
|
|
173
|
+
model_path = Path(model_path)
|
|
174
|
+
if not model_path.exists():
|
|
175
|
+
return None
|
|
176
|
+
|
|
177
|
+
# Get signed upload URL
|
|
178
|
+
response = requests.post(
|
|
179
|
+
"https://alpha.ultralytics.com/api/webhooks/models/upload",
|
|
180
|
+
json={"project": project, "name": name, "filename": model_path.name},
|
|
181
|
+
headers={"Authorization": f"Bearer {_api_key}"},
|
|
182
|
+
timeout=10,
|
|
183
|
+
)
|
|
184
|
+
response.raise_for_status()
|
|
185
|
+
data = response.json()
|
|
186
|
+
|
|
187
|
+
# Upload to GCS
|
|
188
|
+
with open(model_path, "rb") as f:
|
|
189
|
+
requests.put(
|
|
190
|
+
data["uploadUrl"],
|
|
191
|
+
data=f,
|
|
192
|
+
headers={"Content-Type": "application/octet-stream"},
|
|
193
|
+
timeout=600, # 10 min timeout for large models
|
|
194
|
+
).raise_for_status()
|
|
195
|
+
|
|
196
|
+
# url = f"https://alpha.ultralytics.com/{project}/{name}"
|
|
197
|
+
# LOGGER.info(f"{PREFIX}Model uploaded to {url}")
|
|
198
|
+
return data.get("gcsPath")
|
|
199
|
+
|
|
200
|
+
except Exception as e:
|
|
201
|
+
LOGGER.debug(f"Platform: Failed to upload model: {e}")
|
|
202
|
+
return None
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
def _upload_model_async(model_path, project, name):
|
|
206
|
+
"""Upload model asynchronously using bounded thread pool."""
|
|
207
|
+
_executor.submit(_upload_model, model_path, project, name)
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
def _get_environment_info():
|
|
211
|
+
"""Collect comprehensive environment info using existing ultralytics utilities."""
|
|
212
|
+
import shutil
|
|
213
|
+
|
|
214
|
+
import psutil
|
|
215
|
+
import torch
|
|
216
|
+
|
|
217
|
+
from ultralytics import __version__
|
|
218
|
+
from ultralytics.utils.torch_utils import get_cpu_info, get_gpu_info
|
|
219
|
+
|
|
220
|
+
# Get RAM and disk totals
|
|
221
|
+
memory = psutil.virtual_memory()
|
|
222
|
+
disk_usage = shutil.disk_usage("/")
|
|
223
|
+
|
|
224
|
+
env = {
|
|
225
|
+
"ultralyticsVersion": __version__,
|
|
226
|
+
"hostname": socket.gethostname(),
|
|
227
|
+
"os": platform.platform(),
|
|
228
|
+
"environment": ENVIRONMENT,
|
|
229
|
+
"pythonVersion": PYTHON_VERSION,
|
|
230
|
+
"pythonExecutable": sys.executable,
|
|
231
|
+
"cpuCount": os.cpu_count() or 0,
|
|
232
|
+
"cpu": get_cpu_info(),
|
|
233
|
+
"command": " ".join(sys.argv),
|
|
234
|
+
"totalRamGb": round(memory.total / (1 << 30), 1), # Total RAM in GB
|
|
235
|
+
"totalDiskGb": round(disk_usage.total / (1 << 30), 1), # Total disk in GB
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
# Git info using cached GIT singleton (no subprocess calls)
|
|
239
|
+
try:
|
|
240
|
+
if GIT.is_repo:
|
|
241
|
+
if GIT.origin:
|
|
242
|
+
env["gitRepository"] = GIT.origin
|
|
243
|
+
if GIT.branch:
|
|
244
|
+
env["gitBranch"] = GIT.branch
|
|
245
|
+
if GIT.commit:
|
|
246
|
+
env["gitCommit"] = GIT.commit[:12] # Short hash
|
|
247
|
+
except Exception:
|
|
248
|
+
pass
|
|
249
|
+
|
|
250
|
+
# GPU info
|
|
251
|
+
try:
|
|
252
|
+
if torch.cuda.is_available():
|
|
253
|
+
env["gpuCount"] = torch.cuda.device_count()
|
|
254
|
+
env["gpuType"] = get_gpu_info(0) if torch.cuda.device_count() > 0 else None
|
|
255
|
+
except Exception:
|
|
256
|
+
pass
|
|
257
|
+
|
|
258
|
+
return env
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
def _get_project_name(trainer):
|
|
262
|
+
"""Get slugified project and name from trainer args."""
|
|
263
|
+
raw = str(trainer.args.project)
|
|
264
|
+
parts = raw.split("/", 1)
|
|
265
|
+
project = f"{parts[0]}/{slugify(parts[1])}" if len(parts) == 2 else slugify(raw)
|
|
266
|
+
return project, slugify(str(trainer.args.name or "train"))
|
|
267
|
+
|
|
268
|
+
|
|
269
|
+
def on_pretrain_routine_start(trainer):
|
|
270
|
+
"""Initialize Platform logging at training start."""
|
|
271
|
+
if RANK not in {-1, 0} or not trainer.args.project:
|
|
272
|
+
return
|
|
273
|
+
|
|
274
|
+
# Per-trainer state to isolate concurrent training runs
|
|
275
|
+
trainer._platform_model_id = None
|
|
276
|
+
trainer._platform_last_upload = time()
|
|
277
|
+
|
|
278
|
+
project, name = _get_project_name(trainer)
|
|
279
|
+
url = f"https://alpha.ultralytics.com/{project}/{name}"
|
|
280
|
+
LOGGER.info(f"{PREFIX}Streaming to {url}")
|
|
281
|
+
|
|
282
|
+
# Create callback to send console output to Platform
|
|
283
|
+
def send_console_output(content, line_count, chunk_id):
|
|
284
|
+
"""Send batched console output to Platform webhook."""
|
|
285
|
+
_send_async(
|
|
286
|
+
"console_output",
|
|
287
|
+
{"chunkId": chunk_id, "content": content, "lineCount": line_count},
|
|
288
|
+
project,
|
|
289
|
+
name,
|
|
290
|
+
getattr(trainer, "_platform_model_id", None),
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
# Start console capture with batching (5 lines or 5 seconds)
|
|
294
|
+
trainer._platform_console_logger = ConsoleLogger(batch_size=5, flush_interval=5.0, on_flush=send_console_output)
|
|
295
|
+
trainer._platform_console_logger.start_capture()
|
|
296
|
+
|
|
297
|
+
# Collect environment info (W&B-style metadata)
|
|
298
|
+
environment = _get_environment_info()
|
|
299
|
+
|
|
300
|
+
# Build trainArgs - callback runs before get_dataset() so args.data is still original (e.g., ul:// URIs)
|
|
301
|
+
# Note: model_info is sent later in on_fit_epoch_end (epoch 0) when the model is actually loaded
|
|
302
|
+
train_args = {k: str(v) for k, v in vars(trainer.args).items()}
|
|
303
|
+
|
|
304
|
+
# Send synchronously to get modelId for subsequent webhooks
|
|
305
|
+
response = _send(
|
|
306
|
+
"training_started",
|
|
307
|
+
{
|
|
308
|
+
"trainArgs": train_args,
|
|
309
|
+
"epochs": trainer.epochs,
|
|
310
|
+
"device": str(trainer.device),
|
|
311
|
+
"environment": environment,
|
|
312
|
+
},
|
|
313
|
+
project,
|
|
314
|
+
name,
|
|
315
|
+
)
|
|
316
|
+
if response and response.get("modelId"):
|
|
317
|
+
trainer._platform_model_id = response["modelId"]
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
def on_fit_epoch_end(trainer):
|
|
321
|
+
"""Log training and system metrics at epoch end."""
|
|
322
|
+
if RANK not in {-1, 0} or not trainer.args.project:
|
|
323
|
+
return
|
|
324
|
+
|
|
325
|
+
project, name = _get_project_name(trainer)
|
|
326
|
+
metrics = {**trainer.label_loss_items(trainer.tloss, prefix="train"), **trainer.metrics}
|
|
327
|
+
|
|
328
|
+
if trainer.optimizer and trainer.optimizer.param_groups:
|
|
329
|
+
metrics["lr"] = trainer.optimizer.param_groups[0]["lr"]
|
|
330
|
+
|
|
331
|
+
# Extract model info at epoch 0 (sent as separate field, not in metrics)
|
|
332
|
+
model_info = None
|
|
333
|
+
if trainer.epoch == 0:
|
|
334
|
+
try:
|
|
335
|
+
info = model_info_for_loggers(trainer)
|
|
336
|
+
model_info = {
|
|
337
|
+
"parameters": info.get("model/parameters", 0),
|
|
338
|
+
"gflops": info.get("model/GFLOPs", 0),
|
|
339
|
+
"speedMs": info.get("model/speed_PyTorch(ms)", 0),
|
|
340
|
+
}
|
|
341
|
+
except Exception:
|
|
342
|
+
pass
|
|
343
|
+
|
|
344
|
+
# Get system metrics (cache SystemLogger on trainer for efficiency)
|
|
345
|
+
system = {}
|
|
346
|
+
try:
|
|
347
|
+
if not hasattr(trainer, "_platform_system_logger"):
|
|
348
|
+
trainer._platform_system_logger = SystemLogger()
|
|
349
|
+
system = trainer._platform_system_logger.get_metrics(rates=True)
|
|
350
|
+
except Exception:
|
|
351
|
+
pass
|
|
352
|
+
|
|
353
|
+
payload = {
|
|
354
|
+
"epoch": trainer.epoch,
|
|
355
|
+
"metrics": metrics,
|
|
356
|
+
"system": system,
|
|
357
|
+
"fitness": trainer.fitness,
|
|
358
|
+
"best_fitness": trainer.best_fitness,
|
|
359
|
+
}
|
|
360
|
+
if model_info:
|
|
361
|
+
payload["modelInfo"] = model_info
|
|
362
|
+
|
|
363
|
+
_send_async(
|
|
364
|
+
"epoch_end",
|
|
365
|
+
payload,
|
|
366
|
+
project,
|
|
367
|
+
name,
|
|
368
|
+
getattr(trainer, "_platform_model_id", None),
|
|
369
|
+
)
|
|
370
|
+
|
|
371
|
+
|
|
372
|
+
def on_model_save(trainer):
|
|
373
|
+
"""Upload model checkpoint (rate limited to every 15 min)."""
|
|
374
|
+
if RANK not in {-1, 0} or not trainer.args.project:
|
|
375
|
+
return
|
|
376
|
+
|
|
377
|
+
# Rate limit to every 15 minutes (900 seconds)
|
|
378
|
+
if time() - getattr(trainer, "_platform_last_upload", 0) < 900:
|
|
379
|
+
return
|
|
380
|
+
|
|
381
|
+
model_path = trainer.best if trainer.best and Path(trainer.best).exists() else trainer.last
|
|
382
|
+
if not model_path:
|
|
383
|
+
return
|
|
384
|
+
|
|
385
|
+
project, name = _get_project_name(trainer)
|
|
386
|
+
_upload_model_async(model_path, project, name)
|
|
387
|
+
trainer._platform_last_upload = time()
|
|
388
|
+
|
|
389
|
+
|
|
390
|
+
def on_train_end(trainer):
|
|
391
|
+
"""Log final results, upload best model, and send validation plot data."""
|
|
392
|
+
if RANK not in {-1, 0} or not trainer.args.project:
|
|
393
|
+
return
|
|
394
|
+
|
|
395
|
+
project, name = _get_project_name(trainer)
|
|
396
|
+
|
|
397
|
+
# Stop console capture
|
|
398
|
+
if hasattr(trainer, "_platform_console_logger") and trainer._platform_console_logger:
|
|
399
|
+
trainer._platform_console_logger.stop_capture()
|
|
400
|
+
trainer._platform_console_logger = None
|
|
401
|
+
|
|
402
|
+
# Upload best model (blocking to ensure it completes)
|
|
403
|
+
model_path = None
|
|
404
|
+
model_size = None
|
|
405
|
+
if trainer.best and Path(trainer.best).exists():
|
|
406
|
+
model_size = Path(trainer.best).stat().st_size
|
|
407
|
+
model_path = _upload_model(trainer.best, project, name)
|
|
408
|
+
|
|
409
|
+
# Collect plots from trainer and validator, deduplicating by type
|
|
410
|
+
plots_by_type = {}
|
|
411
|
+
for info in getattr(trainer, "plots", {}).values():
|
|
412
|
+
if info.get("data") and info["data"].get("type"):
|
|
413
|
+
plots_by_type[info["data"]["type"]] = info["data"]
|
|
414
|
+
for info in getattr(getattr(trainer, "validator", None), "plots", {}).values():
|
|
415
|
+
if info.get("data") and info["data"].get("type"):
|
|
416
|
+
plots_by_type.setdefault(info["data"]["type"], info["data"]) # Don't overwrite trainer plots
|
|
417
|
+
plots = [_interp_plot(p) for p in plots_by_type.values()] # Interpolate curves to reduce size
|
|
418
|
+
|
|
419
|
+
# Get class names
|
|
420
|
+
names = getattr(getattr(trainer, "validator", None), "names", None) or (trainer.data or {}).get("names")
|
|
421
|
+
class_names = list(names.values()) if isinstance(names, dict) else list(names) if names else None
|
|
422
|
+
|
|
423
|
+
_send(
|
|
424
|
+
"training_complete",
|
|
425
|
+
{
|
|
426
|
+
"results": {
|
|
427
|
+
"metrics": {**trainer.metrics, "fitness": trainer.fitness},
|
|
428
|
+
"bestEpoch": getattr(trainer, "best_epoch", trainer.epoch),
|
|
429
|
+
"bestFitness": trainer.best_fitness,
|
|
430
|
+
"modelPath": model_path or (str(trainer.best) if trainer.best else None),
|
|
431
|
+
"modelSize": model_size,
|
|
432
|
+
},
|
|
433
|
+
"classNames": class_names,
|
|
434
|
+
"plots": plots,
|
|
435
|
+
},
|
|
436
|
+
project,
|
|
437
|
+
name,
|
|
438
|
+
getattr(trainer, "_platform_model_id", None),
|
|
439
|
+
)
|
|
440
|
+
url = f"https://alpha.ultralytics.com/{project}/{name}"
|
|
441
|
+
LOGGER.info(f"{PREFIX}View results at {url}")
|
|
442
|
+
|
|
443
|
+
|
|
444
|
+
callbacks = (
|
|
445
|
+
{
|
|
446
|
+
"on_pretrain_routine_start": on_pretrain_routine_start,
|
|
447
|
+
"on_fit_epoch_end": on_fit_epoch_end,
|
|
448
|
+
"on_model_save": on_model_save,
|
|
449
|
+
"on_train_end": on_train_end,
|
|
450
|
+
}
|
|
451
|
+
if _api_key
|
|
452
|
+
else {}
|
|
453
|
+
)
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
|
2
|
+
|
|
3
|
+
from ultralytics.utils import SETTINGS
|
|
4
|
+
|
|
5
|
+
try:
|
|
6
|
+
assert SETTINGS["raytune"] is True # verify integration is enabled
|
|
7
|
+
import ray
|
|
8
|
+
from ray import tune
|
|
9
|
+
from ray.air import session
|
|
10
|
+
|
|
11
|
+
except (ImportError, AssertionError):
|
|
12
|
+
tune = None
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def on_fit_epoch_end(trainer):
|
|
16
|
+
"""Report training metrics to Ray Tune at epoch end when a Ray session is active.
|
|
17
|
+
|
|
18
|
+
Captures metrics from the trainer object and sends them to Ray Tune with the current epoch number, enabling
|
|
19
|
+
hyperparameter tuning optimization. Only executes when within an active Ray Tune session.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
trainer (ultralytics.engine.trainer.BaseTrainer): The Ultralytics trainer object containing metrics and epochs.
|
|
23
|
+
|
|
24
|
+
Examples:
|
|
25
|
+
>>> # Called automatically by the Ultralytics training loop
|
|
26
|
+
>>> on_fit_epoch_end(trainer)
|
|
27
|
+
|
|
28
|
+
References:
|
|
29
|
+
Ray Tune docs: https://docs.ray.io/en/latest/tune/index.html
|
|
30
|
+
"""
|
|
31
|
+
if ray.train._internal.session.get_session(): # check if Ray Tune session is active
|
|
32
|
+
metrics = trainer.metrics
|
|
33
|
+
session.report({**metrics, **{"epoch": trainer.epoch + 1}})
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
callbacks = (
|
|
37
|
+
{
|
|
38
|
+
"on_fit_epoch_end": on_fit_epoch_end,
|
|
39
|
+
}
|
|
40
|
+
if tune
|
|
41
|
+
else {}
|
|
42
|
+
)
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
|
2
|
+
|
|
3
|
+
from ultralytics.utils import LOGGER, SETTINGS, TESTS_RUNNING, colorstr, torch_utils
|
|
4
|
+
|
|
5
|
+
try:
|
|
6
|
+
assert not TESTS_RUNNING # do not log pytest
|
|
7
|
+
assert SETTINGS["tensorboard"] is True # verify integration is enabled
|
|
8
|
+
WRITER = None # TensorBoard SummaryWriter instance
|
|
9
|
+
PREFIX = colorstr("TensorBoard: ")
|
|
10
|
+
|
|
11
|
+
# Imports below only required if TensorBoard enabled
|
|
12
|
+
from copy import deepcopy
|
|
13
|
+
|
|
14
|
+
import torch
|
|
15
|
+
from torch.utils.tensorboard import SummaryWriter
|
|
16
|
+
|
|
17
|
+
except (ImportError, AssertionError, TypeError, AttributeError):
|
|
18
|
+
# TypeError for handling 'Descriptors cannot not be created directly.' protobuf errors in Windows
|
|
19
|
+
# AttributeError: module 'tensorflow' has no attribute 'io' if 'tensorflow' not installed
|
|
20
|
+
SummaryWriter = None
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _log_scalars(scalars: dict, step: int = 0) -> None:
|
|
24
|
+
"""Log scalar values to TensorBoard.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
scalars (dict): Dictionary of scalar values to log to TensorBoard. Keys are scalar names and values are the
|
|
28
|
+
corresponding scalar values.
|
|
29
|
+
step (int): Global step value to record with the scalar values. Used for x-axis in TensorBoard graphs.
|
|
30
|
+
|
|
31
|
+
Examples:
|
|
32
|
+
Log training metrics
|
|
33
|
+
>>> metrics = {"loss": 0.5, "accuracy": 0.95}
|
|
34
|
+
>>> _log_scalars(metrics, step=100)
|
|
35
|
+
"""
|
|
36
|
+
if WRITER:
|
|
37
|
+
for k, v in scalars.items():
|
|
38
|
+
WRITER.add_scalar(k, v, step)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def _log_tensorboard_graph(trainer) -> None:
|
|
42
|
+
"""Log model graph to TensorBoard.
|
|
43
|
+
|
|
44
|
+
This function attempts to visualize the model architecture in TensorBoard by tracing the model with a dummy input
|
|
45
|
+
tensor. It first tries a simple method suitable for YOLO models, and if that fails, falls back to a more complex
|
|
46
|
+
approach for models like RTDETR that may require special handling.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
trainer (ultralytics.engine.trainer.BaseTrainer): The trainer object containing the model to visualize. Must
|
|
50
|
+
have attributes model and args with imgsz.
|
|
51
|
+
|
|
52
|
+
Notes:
|
|
53
|
+
This function requires TensorBoard integration to be enabled and the global WRITER to be initialized.
|
|
54
|
+
It handles potential warnings from the PyTorch JIT tracer and attempts to gracefully handle different
|
|
55
|
+
model architectures.
|
|
56
|
+
"""
|
|
57
|
+
# Input image
|
|
58
|
+
imgsz = trainer.args.imgsz
|
|
59
|
+
imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz
|
|
60
|
+
p = next(trainer.model.parameters()) # for device, type
|
|
61
|
+
im = torch.zeros((1, 3, *imgsz), device=p.device, dtype=p.dtype) # input image (must be zeros, not empty)
|
|
62
|
+
|
|
63
|
+
# Try simple method first (YOLO)
|
|
64
|
+
try:
|
|
65
|
+
trainer.model.eval() # place in .eval() mode to avoid BatchNorm statistics changes
|
|
66
|
+
WRITER.add_graph(torch.jit.trace(torch_utils.unwrap_model(trainer.model), im, strict=False), [])
|
|
67
|
+
LOGGER.info(f"{PREFIX}model graph visualization added ✅")
|
|
68
|
+
return
|
|
69
|
+
except Exception as e1:
|
|
70
|
+
# Fallback to TorchScript export steps (RTDETR)
|
|
71
|
+
try:
|
|
72
|
+
model = deepcopy(torch_utils.unwrap_model(trainer.model))
|
|
73
|
+
model.eval()
|
|
74
|
+
model = model.fuse(verbose=False)
|
|
75
|
+
for m in model.modules():
|
|
76
|
+
if hasattr(m, "export"): # Detect, RTDETRDecoder (Segment and Pose use Detect base class)
|
|
77
|
+
m.export = True
|
|
78
|
+
m.format = "torchscript"
|
|
79
|
+
model(im) # dry run
|
|
80
|
+
WRITER.add_graph(torch.jit.trace(model, im, strict=False), [])
|
|
81
|
+
LOGGER.info(f"{PREFIX}model graph visualization added ✅")
|
|
82
|
+
except Exception as e2:
|
|
83
|
+
LOGGER.warning(f"{PREFIX}TensorBoard graph visualization failure: {e1} -> {e2}")
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def on_pretrain_routine_start(trainer) -> None:
|
|
87
|
+
"""Initialize TensorBoard logging with SummaryWriter."""
|
|
88
|
+
if SummaryWriter:
|
|
89
|
+
try:
|
|
90
|
+
global WRITER
|
|
91
|
+
WRITER = SummaryWriter(str(trainer.save_dir))
|
|
92
|
+
LOGGER.info(f"{PREFIX}Start with 'tensorboard --logdir {trainer.save_dir}', view at http://localhost:6006/")
|
|
93
|
+
except Exception as e:
|
|
94
|
+
LOGGER.warning(f"{PREFIX}TensorBoard not initialized correctly, not logging this run. {e}")
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def on_train_start(trainer) -> None:
|
|
98
|
+
"""Log TensorBoard graph."""
|
|
99
|
+
if WRITER:
|
|
100
|
+
_log_tensorboard_graph(trainer)
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def on_train_epoch_end(trainer) -> None:
|
|
104
|
+
"""Log scalar statistics at the end of a training epoch."""
|
|
105
|
+
_log_scalars(trainer.label_loss_items(trainer.tloss, prefix="train"), trainer.epoch + 1)
|
|
106
|
+
_log_scalars(trainer.lr, trainer.epoch + 1)
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def on_fit_epoch_end(trainer) -> None:
|
|
110
|
+
"""Log epoch metrics at end of training epoch."""
|
|
111
|
+
_log_scalars(trainer.metrics, trainer.epoch + 1)
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
callbacks = (
|
|
115
|
+
{
|
|
116
|
+
"on_pretrain_routine_start": on_pretrain_routine_start,
|
|
117
|
+
"on_train_start": on_train_start,
|
|
118
|
+
"on_fit_epoch_end": on_fit_epoch_end,
|
|
119
|
+
"on_train_epoch_end": on_train_epoch_end,
|
|
120
|
+
}
|
|
121
|
+
if SummaryWriter
|
|
122
|
+
else {}
|
|
123
|
+
)
|