dgenerate-ultralytics-headless 8.3.214__py3-none-any.whl → 8.3.248__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/METADATA +13 -14
- dgenerate_ultralytics_headless-8.3.248.dist-info/RECORD +298 -0
- tests/__init__.py +5 -7
- tests/conftest.py +8 -15
- tests/test_cli.py +1 -1
- tests/test_cuda.py +5 -8
- tests/test_engine.py +1 -1
- tests/test_exports.py +57 -12
- tests/test_integrations.py +4 -4
- tests/test_python.py +84 -53
- tests/test_solutions.py +160 -151
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +56 -62
- ultralytics/cfg/datasets/Argoverse.yaml +7 -6
- ultralytics/cfg/datasets/DOTAv1.5.yaml +1 -1
- ultralytics/cfg/datasets/DOTAv1.yaml +1 -1
- ultralytics/cfg/datasets/ImageNet.yaml +1 -1
- ultralytics/cfg/datasets/VOC.yaml +15 -16
- ultralytics/cfg/datasets/african-wildlife.yaml +1 -1
- ultralytics/cfg/datasets/coco-pose.yaml +21 -0
- ultralytics/cfg/datasets/coco128-seg.yaml +1 -1
- ultralytics/cfg/datasets/coco8-pose.yaml +21 -0
- ultralytics/cfg/datasets/dog-pose.yaml +28 -0
- ultralytics/cfg/datasets/dota8-multispectral.yaml +1 -1
- ultralytics/cfg/datasets/dota8.yaml +2 -2
- ultralytics/cfg/datasets/hand-keypoints.yaml +26 -2
- ultralytics/cfg/datasets/kitti.yaml +27 -0
- ultralytics/cfg/datasets/lvis.yaml +5 -5
- ultralytics/cfg/datasets/open-images-v7.yaml +1 -1
- ultralytics/cfg/datasets/tiger-pose.yaml +16 -0
- ultralytics/cfg/datasets/xView.yaml +16 -16
- ultralytics/cfg/default.yaml +1 -1
- ultralytics/cfg/models/11/yolo11-pose.yaml +1 -1
- ultralytics/cfg/models/11/yoloe-11-seg.yaml +2 -2
- ultralytics/cfg/models/11/yoloe-11.yaml +2 -2
- ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +1 -1
- ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +1 -1
- ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +1 -1
- ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +1 -1
- ultralytics/cfg/models/v10/yolov10b.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10l.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10m.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10n.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10s.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10x.yaml +2 -2
- ultralytics/cfg/models/v3/yolov3-tiny.yaml +1 -1
- ultralytics/cfg/models/v6/yolov6.yaml +1 -1
- ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +9 -6
- ultralytics/cfg/models/v8/yoloe-v8.yaml +9 -6
- ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +2 -2
- ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +2 -2
- ultralytics/cfg/models/v8/yolov8-ghost.yaml +2 -2
- ultralytics/cfg/models/v8/yolov8-obb.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-p2.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-world.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-worldv2.yaml +6 -6
- ultralytics/cfg/models/v9/yolov9s.yaml +1 -1
- ultralytics/data/__init__.py +4 -4
- ultralytics/data/annotator.py +3 -4
- ultralytics/data/augment.py +285 -475
- ultralytics/data/base.py +18 -26
- ultralytics/data/build.py +147 -25
- ultralytics/data/converter.py +36 -46
- ultralytics/data/dataset.py +46 -74
- ultralytics/data/loaders.py +42 -49
- ultralytics/data/split.py +5 -6
- ultralytics/data/split_dota.py +8 -15
- ultralytics/data/utils.py +34 -43
- ultralytics/engine/exporter.py +319 -237
- ultralytics/engine/model.py +148 -188
- ultralytics/engine/predictor.py +29 -38
- ultralytics/engine/results.py +177 -311
- ultralytics/engine/trainer.py +83 -59
- ultralytics/engine/tuner.py +23 -34
- ultralytics/engine/validator.py +39 -22
- ultralytics/hub/__init__.py +16 -19
- ultralytics/hub/auth.py +6 -12
- ultralytics/hub/google/__init__.py +7 -10
- ultralytics/hub/session.py +15 -25
- ultralytics/hub/utils.py +5 -8
- ultralytics/models/__init__.py +1 -1
- ultralytics/models/fastsam/__init__.py +1 -1
- ultralytics/models/fastsam/model.py +8 -10
- ultralytics/models/fastsam/predict.py +17 -29
- ultralytics/models/fastsam/utils.py +1 -2
- ultralytics/models/fastsam/val.py +5 -7
- ultralytics/models/nas/__init__.py +1 -1
- ultralytics/models/nas/model.py +5 -8
- ultralytics/models/nas/predict.py +7 -9
- ultralytics/models/nas/val.py +1 -2
- ultralytics/models/rtdetr/__init__.py +1 -1
- ultralytics/models/rtdetr/model.py +5 -8
- ultralytics/models/rtdetr/predict.py +15 -19
- ultralytics/models/rtdetr/train.py +10 -13
- ultralytics/models/rtdetr/val.py +21 -23
- ultralytics/models/sam/__init__.py +15 -2
- ultralytics/models/sam/amg.py +14 -20
- ultralytics/models/sam/build.py +26 -19
- ultralytics/models/sam/build_sam3.py +377 -0
- ultralytics/models/sam/model.py +29 -32
- ultralytics/models/sam/modules/blocks.py +83 -144
- ultralytics/models/sam/modules/decoders.py +19 -37
- ultralytics/models/sam/modules/encoders.py +44 -101
- ultralytics/models/sam/modules/memory_attention.py +16 -30
- ultralytics/models/sam/modules/sam.py +200 -73
- ultralytics/models/sam/modules/tiny_encoder.py +64 -83
- ultralytics/models/sam/modules/transformer.py +18 -28
- ultralytics/models/sam/modules/utils.py +174 -50
- ultralytics/models/sam/predict.py +2248 -350
- ultralytics/models/sam/sam3/__init__.py +3 -0
- ultralytics/models/sam/sam3/decoder.py +546 -0
- ultralytics/models/sam/sam3/encoder.py +529 -0
- ultralytics/models/sam/sam3/geometry_encoders.py +415 -0
- ultralytics/models/sam/sam3/maskformer_segmentation.py +286 -0
- ultralytics/models/sam/sam3/model_misc.py +199 -0
- ultralytics/models/sam/sam3/necks.py +129 -0
- ultralytics/models/sam/sam3/sam3_image.py +339 -0
- ultralytics/models/sam/sam3/text_encoder_ve.py +307 -0
- ultralytics/models/sam/sam3/vitdet.py +547 -0
- ultralytics/models/sam/sam3/vl_combiner.py +160 -0
- ultralytics/models/utils/loss.py +14 -26
- ultralytics/models/utils/ops.py +13 -17
- ultralytics/models/yolo/__init__.py +1 -1
- ultralytics/models/yolo/classify/predict.py +9 -12
- ultralytics/models/yolo/classify/train.py +11 -32
- ultralytics/models/yolo/classify/val.py +29 -28
- ultralytics/models/yolo/detect/predict.py +7 -10
- ultralytics/models/yolo/detect/train.py +11 -20
- ultralytics/models/yolo/detect/val.py +70 -58
- ultralytics/models/yolo/model.py +36 -53
- ultralytics/models/yolo/obb/predict.py +5 -14
- ultralytics/models/yolo/obb/train.py +11 -14
- ultralytics/models/yolo/obb/val.py +39 -36
- ultralytics/models/yolo/pose/__init__.py +1 -1
- ultralytics/models/yolo/pose/predict.py +6 -21
- ultralytics/models/yolo/pose/train.py +10 -15
- ultralytics/models/yolo/pose/val.py +38 -57
- ultralytics/models/yolo/segment/predict.py +14 -18
- ultralytics/models/yolo/segment/train.py +3 -6
- ultralytics/models/yolo/segment/val.py +93 -45
- ultralytics/models/yolo/world/train.py +8 -14
- ultralytics/models/yolo/world/train_world.py +11 -34
- ultralytics/models/yolo/yoloe/__init__.py +7 -7
- ultralytics/models/yolo/yoloe/predict.py +16 -23
- ultralytics/models/yolo/yoloe/train.py +30 -43
- ultralytics/models/yolo/yoloe/train_seg.py +5 -10
- ultralytics/models/yolo/yoloe/val.py +15 -20
- ultralytics/nn/__init__.py +7 -7
- ultralytics/nn/autobackend.py +145 -77
- ultralytics/nn/modules/__init__.py +60 -60
- ultralytics/nn/modules/activation.py +4 -6
- ultralytics/nn/modules/block.py +132 -216
- ultralytics/nn/modules/conv.py +52 -97
- ultralytics/nn/modules/head.py +50 -103
- ultralytics/nn/modules/transformer.py +76 -88
- ultralytics/nn/modules/utils.py +16 -21
- ultralytics/nn/tasks.py +94 -154
- ultralytics/nn/text_model.py +40 -67
- ultralytics/solutions/__init__.py +12 -12
- ultralytics/solutions/ai_gym.py +11 -17
- ultralytics/solutions/analytics.py +15 -16
- ultralytics/solutions/config.py +5 -6
- ultralytics/solutions/distance_calculation.py +10 -13
- ultralytics/solutions/heatmap.py +7 -13
- ultralytics/solutions/instance_segmentation.py +5 -8
- ultralytics/solutions/object_blurrer.py +7 -10
- ultralytics/solutions/object_counter.py +12 -19
- ultralytics/solutions/object_cropper.py +8 -14
- ultralytics/solutions/parking_management.py +33 -31
- ultralytics/solutions/queue_management.py +10 -12
- ultralytics/solutions/region_counter.py +9 -12
- ultralytics/solutions/security_alarm.py +15 -20
- ultralytics/solutions/similarity_search.py +10 -15
- ultralytics/solutions/solutions.py +75 -74
- ultralytics/solutions/speed_estimation.py +7 -10
- ultralytics/solutions/streamlit_inference.py +2 -4
- ultralytics/solutions/templates/similarity-search.html +7 -18
- ultralytics/solutions/trackzone.py +7 -10
- ultralytics/solutions/vision_eye.py +5 -8
- ultralytics/trackers/__init__.py +1 -1
- ultralytics/trackers/basetrack.py +3 -5
- ultralytics/trackers/bot_sort.py +10 -27
- ultralytics/trackers/byte_tracker.py +14 -30
- ultralytics/trackers/track.py +3 -6
- ultralytics/trackers/utils/gmc.py +11 -22
- ultralytics/trackers/utils/kalman_filter.py +37 -48
- ultralytics/trackers/utils/matching.py +12 -15
- ultralytics/utils/__init__.py +116 -116
- ultralytics/utils/autobatch.py +2 -4
- ultralytics/utils/autodevice.py +17 -18
- ultralytics/utils/benchmarks.py +32 -46
- ultralytics/utils/callbacks/base.py +8 -10
- ultralytics/utils/callbacks/clearml.py +5 -13
- ultralytics/utils/callbacks/comet.py +32 -46
- ultralytics/utils/callbacks/dvc.py +13 -18
- ultralytics/utils/callbacks/mlflow.py +4 -5
- ultralytics/utils/callbacks/neptune.py +7 -15
- ultralytics/utils/callbacks/platform.py +314 -38
- ultralytics/utils/callbacks/raytune.py +3 -4
- ultralytics/utils/callbacks/tensorboard.py +23 -31
- ultralytics/utils/callbacks/wb.py +10 -13
- ultralytics/utils/checks.py +99 -76
- ultralytics/utils/cpu.py +3 -8
- ultralytics/utils/dist.py +8 -12
- ultralytics/utils/downloads.py +20 -30
- ultralytics/utils/errors.py +6 -14
- ultralytics/utils/events.py +2 -4
- ultralytics/utils/export/__init__.py +4 -236
- ultralytics/utils/export/engine.py +237 -0
- ultralytics/utils/export/imx.py +91 -55
- ultralytics/utils/export/tensorflow.py +231 -0
- ultralytics/utils/files.py +24 -28
- ultralytics/utils/git.py +9 -11
- ultralytics/utils/instance.py +30 -51
- ultralytics/utils/logger.py +212 -114
- ultralytics/utils/loss.py +14 -22
- ultralytics/utils/metrics.py +126 -155
- ultralytics/utils/nms.py +13 -16
- ultralytics/utils/ops.py +107 -165
- ultralytics/utils/patches.py +33 -21
- ultralytics/utils/plotting.py +72 -80
- ultralytics/utils/tal.py +25 -39
- ultralytics/utils/torch_utils.py +52 -78
- ultralytics/utils/tqdm.py +20 -20
- ultralytics/utils/triton.py +13 -19
- ultralytics/utils/tuner.py +17 -5
- dgenerate_ultralytics_headless-8.3.214.dist-info/RECORD +0 -283
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/WHEEL +0 -0
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/entry_points.txt +0 -0
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/licenses/LICENSE +0 -0
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/top_level.txt +0 -0
|
@@ -27,8 +27,7 @@ except (ImportError, AssertionError, TypeError):
|
|
|
27
27
|
|
|
28
28
|
|
|
29
29
|
def _log_images(path: Path, prefix: str = "") -> None:
|
|
30
|
-
"""
|
|
31
|
-
Log images at specified path with an optional prefix using DVCLive.
|
|
30
|
+
"""Log images at specified path with an optional prefix using DVCLive.
|
|
32
31
|
|
|
33
32
|
This function logs images found at the given path to DVCLive, organizing them by batch to enable slider
|
|
34
33
|
functionality in the UI. It processes image filenames to extract batch information and restructures the path
|
|
@@ -55,8 +54,7 @@ def _log_images(path: Path, prefix: str = "") -> None:
|
|
|
55
54
|
|
|
56
55
|
|
|
57
56
|
def _log_plots(plots: dict, prefix: str = "") -> None:
|
|
58
|
-
"""
|
|
59
|
-
Log plot images for training progress if they have not been previously processed.
|
|
57
|
+
"""Log plot images for training progress if they have not been previously processed.
|
|
60
58
|
|
|
61
59
|
Args:
|
|
62
60
|
plots (dict): Dictionary containing plot information with timestamps.
|
|
@@ -70,15 +68,14 @@ def _log_plots(plots: dict, prefix: str = "") -> None:
|
|
|
70
68
|
|
|
71
69
|
|
|
72
70
|
def _log_confusion_matrix(validator) -> None:
|
|
73
|
-
"""
|
|
74
|
-
Log confusion matrix for a validator using DVCLive.
|
|
71
|
+
"""Log confusion matrix for a validator using DVCLive.
|
|
75
72
|
|
|
76
|
-
This function processes the confusion matrix from a validator object and logs it to DVCLive by converting
|
|
77
|
-
|
|
73
|
+
This function processes the confusion matrix from a validator object and logs it to DVCLive by converting the matrix
|
|
74
|
+
into lists of target and prediction labels.
|
|
78
75
|
|
|
79
76
|
Args:
|
|
80
77
|
validator (BaseValidator): The validator object containing the confusion matrix and class names. Must have
|
|
81
|
-
attributes
|
|
78
|
+
attributes confusion_matrix.matrix, confusion_matrix.task, and names.
|
|
82
79
|
"""
|
|
83
80
|
targets = []
|
|
84
81
|
preds = []
|
|
@@ -123,11 +120,10 @@ def on_train_epoch_start(trainer) -> None:
|
|
|
123
120
|
|
|
124
121
|
|
|
125
122
|
def on_fit_epoch_end(trainer) -> None:
|
|
126
|
-
"""
|
|
127
|
-
Log training metrics, model info, and advance to next step at the end of each fit epoch.
|
|
123
|
+
"""Log training metrics, model info, and advance to next step at the end of each fit epoch.
|
|
128
124
|
|
|
129
|
-
This function is called at the end of each fit epoch during training. It logs various metrics including
|
|
130
|
-
|
|
125
|
+
This function is called at the end of each fit epoch during training. It logs various metrics including training
|
|
126
|
+
loss items, validation metrics, and learning rates. On the first epoch, it also logs model
|
|
131
127
|
information. Additionally, it logs training and validation plots and advances the DVCLive step counter.
|
|
132
128
|
|
|
133
129
|
Args:
|
|
@@ -157,12 +153,11 @@ def on_fit_epoch_end(trainer) -> None:
|
|
|
157
153
|
|
|
158
154
|
|
|
159
155
|
def on_train_end(trainer) -> None:
|
|
160
|
-
"""
|
|
161
|
-
Log best metrics, plots, and confusion matrix at the end of training.
|
|
156
|
+
"""Log best metrics, plots, and confusion matrix at the end of training.
|
|
162
157
|
|
|
163
|
-
This function is called at the conclusion of the training process to log final metrics, visualizations, and
|
|
164
|
-
|
|
165
|
-
|
|
158
|
+
This function is called at the conclusion of the training process to log final metrics, visualizations, and model
|
|
159
|
+
artifacts if DVCLive logging is active. It captures the best model performance metrics, training plots, validation
|
|
160
|
+
plots, and confusion matrix for later analysis.
|
|
166
161
|
|
|
167
162
|
Args:
|
|
168
163
|
trainer (BaseTrainer): The trainer object containing training state, metrics, and validation results.
|
|
@@ -45,17 +45,16 @@ def sanitize_dict(x: dict) -> dict:
|
|
|
45
45
|
|
|
46
46
|
|
|
47
47
|
def on_pretrain_routine_end(trainer):
|
|
48
|
-
"""
|
|
49
|
-
Log training parameters to MLflow at the end of the pretraining routine.
|
|
48
|
+
"""Log training parameters to MLflow at the end of the pretraining routine.
|
|
50
49
|
|
|
51
50
|
This function sets up MLflow logging based on environment variables and trainer arguments. It sets the tracking URI,
|
|
52
|
-
experiment name, and run name, then starts the MLflow run if not already active. It finally logs the parameters
|
|
53
|
-
|
|
51
|
+
experiment name, and run name, then starts the MLflow run if not already active. It finally logs the parameters from
|
|
52
|
+
the trainer.
|
|
54
53
|
|
|
55
54
|
Args:
|
|
56
55
|
trainer (ultralytics.engine.trainer.BaseTrainer): The training object with arguments and parameters to log.
|
|
57
56
|
|
|
58
|
-
|
|
57
|
+
Notes:
|
|
59
58
|
MLFLOW_TRACKING_URI: The URI for MLflow tracking. If not set, defaults to 'runs/mlflow'.
|
|
60
59
|
MLFLOW_EXPERIMENT_NAME: The name of the MLflow experiment. If not set, defaults to trainer.args.project.
|
|
61
60
|
MLFLOW_RUN: The name of the MLflow run. If not set, defaults to trainer.args.name.
|
|
@@ -18,8 +18,7 @@ except (ImportError, AssertionError):
|
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
def _log_scalars(scalars: dict, step: int = 0) -> None:
|
|
21
|
-
"""
|
|
22
|
-
Log scalars to the NeptuneAI experiment logger.
|
|
21
|
+
"""Log scalars to the NeptuneAI experiment logger.
|
|
23
22
|
|
|
24
23
|
Args:
|
|
25
24
|
scalars (dict): Dictionary of scalar values to log to NeptuneAI.
|
|
@@ -35,11 +34,10 @@ def _log_scalars(scalars: dict, step: int = 0) -> None:
|
|
|
35
34
|
|
|
36
35
|
|
|
37
36
|
def _log_images(imgs_dict: dict, group: str = "") -> None:
|
|
38
|
-
"""
|
|
39
|
-
Log images to the NeptuneAI experiment logger.
|
|
37
|
+
"""Log images to the NeptuneAI experiment logger.
|
|
40
38
|
|
|
41
|
-
This function logs image data to Neptune.ai when a valid Neptune run is active. Images are organized
|
|
42
|
-
|
|
39
|
+
This function logs image data to Neptune.ai when a valid Neptune run is active. Images are organized under the
|
|
40
|
+
specified group name.
|
|
43
41
|
|
|
44
42
|
Args:
|
|
45
43
|
imgs_dict (dict): Dictionary of images to log, with keys as image names and values as image data.
|
|
@@ -108,15 +106,9 @@ def on_train_end(trainer) -> None:
|
|
|
108
106
|
"""Log final results, plots, and model weights at the end of training."""
|
|
109
107
|
if run:
|
|
110
108
|
# Log final results, CM matrix + PR plots
|
|
111
|
-
|
|
112
|
-
"
|
|
113
|
-
|
|
114
|
-
"confusion_matrix_normalized.png",
|
|
115
|
-
*(f"{x}_curve.png" for x in ("F1", "PR", "P", "R")),
|
|
116
|
-
]
|
|
117
|
-
files = [(trainer.save_dir / f) for f in files if (trainer.save_dir / f).exists()] # filter
|
|
118
|
-
for f in files:
|
|
119
|
-
_log_plot(title=f.stem, plot_path=f)
|
|
109
|
+
for f in [*trainer.plots.keys(), *trainer.validator.plots.keys()]:
|
|
110
|
+
if "batch" not in f.name:
|
|
111
|
+
_log_plot(title=f.stem, plot_path=f)
|
|
120
112
|
# Log the final model
|
|
121
113
|
run[f"weights/{trainer.args.name or trainer.args.task}/{trainer.best.name}"].upload(File(str(trainer.best)))
|
|
122
114
|
|
|
@@ -1,73 +1,349 @@
|
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
import os
|
|
4
|
+
import platform
|
|
5
|
+
import socket
|
|
6
|
+
import sys
|
|
7
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from time import time
|
|
10
|
+
|
|
11
|
+
from ultralytics.utils import ENVIRONMENT, GIT, LOGGER, PYTHON_VERSION, RANK, SETTINGS, TESTS_RUNNING, colorstr
|
|
12
|
+
|
|
13
|
+
PREFIX = colorstr("Platform: ")
|
|
14
|
+
_last_upload = 0 # Rate limit model uploads
|
|
15
|
+
_console_logger = None # Global console logger instance
|
|
16
|
+
_system_logger = None # Cached system logger instance
|
|
17
|
+
|
|
18
|
+
try:
|
|
19
|
+
assert not TESTS_RUNNING # do not log pytest
|
|
20
|
+
assert SETTINGS.get("platform", False) is True or os.getenv("ULTRALYTICS_API_KEY") or SETTINGS.get("api_key")
|
|
21
|
+
_api_key = os.getenv("ULTRALYTICS_API_KEY") or SETTINGS.get("api_key")
|
|
22
|
+
assert _api_key # verify API key is present
|
|
23
|
+
|
|
24
|
+
import requests
|
|
25
|
+
|
|
26
|
+
from ultralytics.utils.logger import ConsoleLogger, SystemLogger
|
|
27
|
+
from ultralytics.utils.torch_utils import model_info_for_loggers
|
|
28
|
+
|
|
29
|
+
_executor = ThreadPoolExecutor(max_workers=10) # Bounded thread pool for async operations
|
|
30
|
+
|
|
31
|
+
except (AssertionError, ImportError):
|
|
32
|
+
_api_key = None
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _interp_plot(plot, n=101):
|
|
36
|
+
"""Interpolate plot curve data from 1000 to n points to reduce storage size."""
|
|
37
|
+
import numpy as np
|
|
38
|
+
|
|
39
|
+
if not plot.get("x") or not plot.get("y"):
|
|
40
|
+
return plot # No interpolation needed (e.g., confusion_matrix)
|
|
41
|
+
|
|
42
|
+
x, y = np.array(plot["x"]), np.array(plot["y"])
|
|
43
|
+
if len(x) <= n:
|
|
44
|
+
return plot # Already small enough
|
|
45
|
+
|
|
46
|
+
# New x values (101 points gives clean 0.01 increments: 0, 0.01, 0.02, ..., 1.0)
|
|
47
|
+
x_new = np.linspace(x[0], x[-1], n)
|
|
48
|
+
|
|
49
|
+
# Interpolate y values (handle both 1D and 2D arrays)
|
|
50
|
+
if y.ndim == 1:
|
|
51
|
+
y_new = np.interp(x_new, x, y)
|
|
52
|
+
else:
|
|
53
|
+
y_new = np.array([np.interp(x_new, x, yi) for yi in y])
|
|
54
|
+
|
|
55
|
+
# Also interpolate ap if present (for PR curves)
|
|
56
|
+
result = {**plot, "x": x_new.tolist(), "y": y_new.tolist()}
|
|
57
|
+
if "ap" in plot:
|
|
58
|
+
result["ap"] = plot["ap"] # Keep AP values as-is (per-class scalars)
|
|
59
|
+
|
|
60
|
+
return result
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _send(event, data, project, name):
|
|
64
|
+
"""Send event to Platform endpoint."""
|
|
65
|
+
try:
|
|
66
|
+
requests.post(
|
|
67
|
+
"https://alpha.ultralytics.com/api/webhooks/training/metrics",
|
|
68
|
+
json={"event": event, "project": project, "name": name, "data": data},
|
|
69
|
+
headers={"Authorization": f"Bearer {_api_key}"},
|
|
70
|
+
timeout=10,
|
|
71
|
+
).raise_for_status()
|
|
72
|
+
except Exception as e:
|
|
73
|
+
LOGGER.debug(f"Platform: Failed to send {event}: {e}")
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def _send_async(event, data, project, name):
|
|
77
|
+
"""Send event asynchronously using bounded thread pool."""
|
|
78
|
+
_executor.submit(_send, event, data, project, name)
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def _upload_model(model_path, project, name):
|
|
82
|
+
"""Upload model checkpoint to Platform via signed URL."""
|
|
83
|
+
try:
|
|
84
|
+
model_path = Path(model_path)
|
|
85
|
+
if not model_path.exists():
|
|
86
|
+
return None
|
|
87
|
+
|
|
88
|
+
# Get signed upload URL
|
|
89
|
+
response = requests.post(
|
|
90
|
+
"https://alpha.ultralytics.com/api/webhooks/models/upload",
|
|
91
|
+
json={"project": project, "name": name, "filename": model_path.name},
|
|
92
|
+
headers={"Authorization": f"Bearer {_api_key}"},
|
|
93
|
+
timeout=10,
|
|
94
|
+
)
|
|
95
|
+
response.raise_for_status()
|
|
96
|
+
data = response.json()
|
|
97
|
+
|
|
98
|
+
# Upload to GCS
|
|
99
|
+
with open(model_path, "rb") as f:
|
|
100
|
+
requests.put(
|
|
101
|
+
data["uploadUrl"],
|
|
102
|
+
data=f,
|
|
103
|
+
headers={"Content-Type": "application/octet-stream"},
|
|
104
|
+
timeout=600, # 10 min timeout for large models
|
|
105
|
+
).raise_for_status()
|
|
106
|
+
|
|
107
|
+
# url = f"https://alpha.ultralytics.com/{project}/{name}"
|
|
108
|
+
# LOGGER.info(f"{PREFIX}Model uploaded to {url}")
|
|
109
|
+
return data.get("gcsPath")
|
|
110
|
+
|
|
111
|
+
except Exception as e:
|
|
112
|
+
LOGGER.debug(f"Platform: Failed to upload model: {e}")
|
|
113
|
+
return None
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def _upload_model_async(model_path, project, name):
|
|
117
|
+
"""Upload model asynchronously using bounded thread pool."""
|
|
118
|
+
_executor.submit(_upload_model, model_path, project, name)
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def _get_environment_info():
|
|
122
|
+
"""Collect comprehensive environment info using existing ultralytics utilities."""
|
|
123
|
+
import shutil
|
|
124
|
+
|
|
125
|
+
import psutil
|
|
126
|
+
import torch
|
|
127
|
+
|
|
128
|
+
from ultralytics import __version__
|
|
129
|
+
from ultralytics.utils.torch_utils import get_cpu_info, get_gpu_info
|
|
130
|
+
|
|
131
|
+
# Get RAM and disk totals
|
|
132
|
+
memory = psutil.virtual_memory()
|
|
133
|
+
disk_usage = shutil.disk_usage("/")
|
|
134
|
+
|
|
135
|
+
env = {
|
|
136
|
+
"ultralyticsVersion": __version__,
|
|
137
|
+
"hostname": socket.gethostname(),
|
|
138
|
+
"os": platform.platform(),
|
|
139
|
+
"environment": ENVIRONMENT,
|
|
140
|
+
"pythonVersion": PYTHON_VERSION,
|
|
141
|
+
"pythonExecutable": sys.executable,
|
|
142
|
+
"cpuCount": os.cpu_count() or 0,
|
|
143
|
+
"cpu": get_cpu_info(),
|
|
144
|
+
"command": " ".join(sys.argv),
|
|
145
|
+
"totalRamGb": round(memory.total / (1 << 30), 1), # Total RAM in GB
|
|
146
|
+
"totalDiskGb": round(disk_usage.total / (1 << 30), 1), # Total disk in GB
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
# Git info using cached GIT singleton (no subprocess calls)
|
|
150
|
+
try:
|
|
151
|
+
if GIT.is_repo:
|
|
152
|
+
if GIT.origin:
|
|
153
|
+
env["gitRepository"] = GIT.origin
|
|
154
|
+
if GIT.branch:
|
|
155
|
+
env["gitBranch"] = GIT.branch
|
|
156
|
+
if GIT.commit:
|
|
157
|
+
env["gitCommit"] = GIT.commit[:12] # Short hash
|
|
158
|
+
except Exception:
|
|
159
|
+
pass
|
|
160
|
+
|
|
161
|
+
# GPU info
|
|
162
|
+
try:
|
|
163
|
+
if torch.cuda.is_available():
|
|
164
|
+
env["gpuCount"] = torch.cuda.device_count()
|
|
165
|
+
env["gpuType"] = get_gpu_info(0) if torch.cuda.device_count() > 0 else None
|
|
166
|
+
except Exception:
|
|
167
|
+
pass
|
|
168
|
+
|
|
169
|
+
return env
|
|
4
170
|
|
|
5
171
|
|
|
6
172
|
def on_pretrain_routine_start(trainer):
|
|
7
|
-
"""Initialize
|
|
8
|
-
|
|
9
|
-
|
|
173
|
+
"""Initialize Platform logging at training start."""
|
|
174
|
+
global _console_logger, _last_upload
|
|
175
|
+
|
|
176
|
+
if RANK not in {-1, 0} or not trainer.args.project:
|
|
177
|
+
return
|
|
178
|
+
|
|
179
|
+
# Initialize upload timer to now so first checkpoint waits 15 min from training start
|
|
180
|
+
_last_upload = time()
|
|
10
181
|
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
182
|
+
project, name = str(trainer.args.project), str(trainer.args.name or "train")
|
|
183
|
+
url = f"https://alpha.ultralytics.com/{project}/{name}"
|
|
184
|
+
LOGGER.info(f"{PREFIX}Streaming to {url}")
|
|
14
185
|
|
|
186
|
+
# Create callback to send console output to Platform
|
|
187
|
+
def send_console_output(content, line_count, chunk_id):
|
|
188
|
+
"""Send batched console output to Platform webhook."""
|
|
189
|
+
_send_async("console_output", {"chunkId": chunk_id, "content": content, "lineCount": line_count}, project, name)
|
|
15
190
|
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
191
|
+
# Start console capture with batching (5 lines or 5 seconds)
|
|
192
|
+
_console_logger = ConsoleLogger(batch_size=5, flush_interval=5.0, on_flush=send_console_output)
|
|
193
|
+
_console_logger.start_capture()
|
|
194
|
+
|
|
195
|
+
# Gather model info for richer metadata
|
|
196
|
+
model_info = {}
|
|
197
|
+
try:
|
|
198
|
+
info = model_info_for_loggers(trainer)
|
|
199
|
+
model_info = {
|
|
200
|
+
"parameters": info.get("model/parameters", 0),
|
|
201
|
+
"gflops": info.get("model/GFLOPs", 0),
|
|
202
|
+
"classes": getattr(trainer.model, "yaml", {}).get("nc", 0), # number of classes
|
|
203
|
+
}
|
|
204
|
+
except Exception:
|
|
205
|
+
pass
|
|
206
|
+
|
|
207
|
+
# Collect environment info (W&B-style metadata)
|
|
208
|
+
environment = _get_environment_info()
|
|
209
|
+
|
|
210
|
+
_send_async(
|
|
211
|
+
"training_started",
|
|
212
|
+
{
|
|
213
|
+
"trainArgs": {k: str(v) for k, v in vars(trainer.args).items()},
|
|
214
|
+
"epochs": trainer.epochs,
|
|
215
|
+
"device": str(trainer.device),
|
|
216
|
+
"modelInfo": model_info,
|
|
217
|
+
"environment": environment,
|
|
218
|
+
},
|
|
219
|
+
project,
|
|
220
|
+
name,
|
|
221
|
+
)
|
|
19
222
|
|
|
20
223
|
|
|
21
224
|
def on_fit_epoch_end(trainer):
|
|
22
|
-
"""
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
225
|
+
"""Log training and system metrics at epoch end."""
|
|
226
|
+
global _system_logger
|
|
227
|
+
|
|
228
|
+
if RANK not in {-1, 0} or not trainer.args.project:
|
|
229
|
+
return
|
|
230
|
+
|
|
231
|
+
project, name = str(trainer.args.project), str(trainer.args.name or "train")
|
|
232
|
+
metrics = {**trainer.label_loss_items(trainer.tloss, prefix="train"), **trainer.metrics}
|
|
233
|
+
|
|
234
|
+
if trainer.optimizer and trainer.optimizer.param_groups:
|
|
235
|
+
metrics["lr"] = trainer.optimizer.param_groups[0]["lr"]
|
|
236
|
+
if trainer.epoch == 0:
|
|
237
|
+
try:
|
|
238
|
+
metrics.update(model_info_for_loggers(trainer))
|
|
239
|
+
except Exception:
|
|
240
|
+
pass
|
|
241
|
+
|
|
242
|
+
# Get system metrics (cache SystemLogger for efficiency)
|
|
243
|
+
system = {}
|
|
244
|
+
try:
|
|
245
|
+
if _system_logger is None:
|
|
246
|
+
_system_logger = SystemLogger()
|
|
247
|
+
system = _system_logger.get_metrics(rates=True)
|
|
248
|
+
except Exception:
|
|
249
|
+
pass
|
|
250
|
+
|
|
251
|
+
_send_async(
|
|
252
|
+
"epoch_end",
|
|
253
|
+
{
|
|
254
|
+
"epoch": trainer.epoch,
|
|
255
|
+
"metrics": metrics,
|
|
256
|
+
"system": system,
|
|
257
|
+
"fitness": trainer.fitness,
|
|
258
|
+
"best_fitness": trainer.best_fitness,
|
|
259
|
+
},
|
|
260
|
+
project,
|
|
261
|
+
name,
|
|
262
|
+
)
|
|
26
263
|
|
|
27
264
|
|
|
28
265
|
def on_model_save(trainer):
|
|
29
|
-
"""
|
|
30
|
-
|
|
266
|
+
"""Upload model checkpoint (rate limited to every 15 min)."""
|
|
267
|
+
global _last_upload
|
|
31
268
|
|
|
269
|
+
if RANK not in {-1, 0} or not trainer.args.project:
|
|
270
|
+
return
|
|
32
271
|
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
272
|
+
# Rate limit to every 15 minutes (900 seconds)
|
|
273
|
+
if time() - _last_upload < 900:
|
|
274
|
+
return
|
|
275
|
+
|
|
276
|
+
model_path = trainer.best if trainer.best and Path(trainer.best).exists() else trainer.last
|
|
277
|
+
if not model_path:
|
|
278
|
+
return
|
|
37
279
|
|
|
280
|
+
project, name = str(trainer.args.project), str(trainer.args.name or "train")
|
|
281
|
+
_upload_model_async(model_path, project, name)
|
|
282
|
+
_last_upload = time()
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
def on_train_end(trainer):
|
|
286
|
+
"""Log final results, upload best model, and send validation plot data."""
|
|
287
|
+
global _console_logger
|
|
38
288
|
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
pass
|
|
289
|
+
if RANK not in {-1, 0} or not trainer.args.project:
|
|
290
|
+
return
|
|
42
291
|
|
|
292
|
+
project, name = str(trainer.args.project), str(trainer.args.name or "train")
|
|
43
293
|
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
294
|
+
# Stop console capture
|
|
295
|
+
if _console_logger:
|
|
296
|
+
_console_logger.stop_capture()
|
|
297
|
+
_console_logger = None
|
|
47
298
|
|
|
299
|
+
# Upload best model (blocking to ensure it completes)
|
|
300
|
+
model_path = None
|
|
301
|
+
model_size = None
|
|
302
|
+
if trainer.best and Path(trainer.best).exists():
|
|
303
|
+
model_size = Path(trainer.best).stat().st_size
|
|
304
|
+
model_path = _upload_model(trainer.best, project, name)
|
|
48
305
|
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
306
|
+
# Collect plots from trainer and validator, deduplicating by type
|
|
307
|
+
plots_by_type = {}
|
|
308
|
+
for info in getattr(trainer, "plots", {}).values():
|
|
309
|
+
if info.get("data") and info["data"].get("type"):
|
|
310
|
+
plots_by_type[info["data"]["type"]] = info["data"]
|
|
311
|
+
for info in getattr(getattr(trainer, "validator", None), "plots", {}).values():
|
|
312
|
+
if info.get("data") and info["data"].get("type"):
|
|
313
|
+
plots_by_type.setdefault(info["data"]["type"], info["data"]) # Don't overwrite trainer plots
|
|
314
|
+
plots = [_interp_plot(p) for p in plots_by_type.values()] # Interpolate curves to reduce size
|
|
52
315
|
|
|
316
|
+
# Get class names
|
|
317
|
+
names = getattr(getattr(trainer, "validator", None), "names", None) or (trainer.data or {}).get("names")
|
|
318
|
+
class_names = list(names.values()) if isinstance(names, dict) else list(names) if names else None
|
|
53
319
|
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
320
|
+
_send(
|
|
321
|
+
"training_complete",
|
|
322
|
+
{
|
|
323
|
+
"results": {
|
|
324
|
+
"metrics": {**trainer.metrics, "fitness": trainer.fitness},
|
|
325
|
+
"bestEpoch": getattr(trainer, "best_epoch", trainer.epoch),
|
|
326
|
+
"bestFitness": trainer.best_fitness,
|
|
327
|
+
"modelPath": model_path or (str(trainer.best) if trainer.best else None),
|
|
328
|
+
"modelSize": model_size,
|
|
329
|
+
},
|
|
330
|
+
"classNames": class_names,
|
|
331
|
+
"plots": plots,
|
|
332
|
+
},
|
|
333
|
+
project,
|
|
334
|
+
name,
|
|
335
|
+
)
|
|
336
|
+
url = f"https://alpha.ultralytics.com/{project}/{name}"
|
|
337
|
+
LOGGER.info(f"{PREFIX}View results at {url}")
|
|
57
338
|
|
|
58
339
|
|
|
59
340
|
callbacks = (
|
|
60
341
|
{
|
|
61
342
|
"on_pretrain_routine_start": on_pretrain_routine_start,
|
|
62
|
-
"on_pretrain_routine_end": on_pretrain_routine_end,
|
|
63
343
|
"on_fit_epoch_end": on_fit_epoch_end,
|
|
64
344
|
"on_model_save": on_model_save,
|
|
65
345
|
"on_train_end": on_train_end,
|
|
66
|
-
"on_train_start": on_train_start,
|
|
67
|
-
"on_val_start": on_val_start,
|
|
68
|
-
"on_predict_start": on_predict_start,
|
|
69
|
-
"on_export_start": on_export_start,
|
|
70
346
|
}
|
|
71
|
-
if
|
|
347
|
+
if _api_key
|
|
72
348
|
else {}
|
|
73
349
|
)
|
|
@@ -13,11 +13,10 @@ except (ImportError, AssertionError):
|
|
|
13
13
|
|
|
14
14
|
|
|
15
15
|
def on_fit_epoch_end(trainer):
|
|
16
|
-
"""
|
|
17
|
-
Report training metrics to Ray Tune at epoch end when a Ray session is active.
|
|
16
|
+
"""Report training metrics to Ray Tune at epoch end when a Ray session is active.
|
|
18
17
|
|
|
19
|
-
Captures metrics from the trainer object and sends them to Ray Tune with the current epoch number,
|
|
20
|
-
|
|
18
|
+
Captures metrics from the trainer object and sends them to Ray Tune with the current epoch number, enabling
|
|
19
|
+
hyperparameter tuning optimization. Only executes when within an active Ray Tune session.
|
|
21
20
|
|
|
22
21
|
Args:
|
|
23
22
|
trainer (ultralytics.engine.trainer.BaseTrainer): The Ultralytics trainer object containing metrics and epochs.
|
|
@@ -9,7 +9,6 @@ try:
|
|
|
9
9
|
PREFIX = colorstr("TensorBoard: ")
|
|
10
10
|
|
|
11
11
|
# Imports below only required if TensorBoard enabled
|
|
12
|
-
import warnings
|
|
13
12
|
from copy import deepcopy
|
|
14
13
|
|
|
15
14
|
import torch
|
|
@@ -22,8 +21,7 @@ except (ImportError, AssertionError, TypeError, AttributeError):
|
|
|
22
21
|
|
|
23
22
|
|
|
24
23
|
def _log_scalars(scalars: dict, step: int = 0) -> None:
|
|
25
|
-
"""
|
|
26
|
-
Log scalar values to TensorBoard.
|
|
24
|
+
"""Log scalar values to TensorBoard.
|
|
27
25
|
|
|
28
26
|
Args:
|
|
29
27
|
scalars (dict): Dictionary of scalar values to log to TensorBoard. Keys are scalar names and values are the
|
|
@@ -41,16 +39,15 @@ def _log_scalars(scalars: dict, step: int = 0) -> None:
|
|
|
41
39
|
|
|
42
40
|
|
|
43
41
|
def _log_tensorboard_graph(trainer) -> None:
|
|
44
|
-
"""
|
|
45
|
-
Log model graph to TensorBoard.
|
|
42
|
+
"""Log model graph to TensorBoard.
|
|
46
43
|
|
|
47
44
|
This function attempts to visualize the model architecture in TensorBoard by tracing the model with a dummy input
|
|
48
45
|
tensor. It first tries a simple method suitable for YOLO models, and if that fails, falls back to a more complex
|
|
49
46
|
approach for models like RTDETR that may require special handling.
|
|
50
47
|
|
|
51
48
|
Args:
|
|
52
|
-
trainer (ultralytics.engine.trainer.BaseTrainer): The trainer object containing the model to visualize.
|
|
53
|
-
|
|
49
|
+
trainer (ultralytics.engine.trainer.BaseTrainer): The trainer object containing the model to visualize. Must
|
|
50
|
+
have attributes model and args with imgsz.
|
|
54
51
|
|
|
55
52
|
Notes:
|
|
56
53
|
This function requires TensorBoard integration to be enabled and the global WRITER to be initialized.
|
|
@@ -63,32 +60,27 @@ def _log_tensorboard_graph(trainer) -> None:
|
|
|
63
60
|
p = next(trainer.model.parameters()) # for device, type
|
|
64
61
|
im = torch.zeros((1, 3, *imgsz), device=p.device, dtype=p.dtype) # input image (must be zeros, not empty)
|
|
65
62
|
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
63
|
+
# Try simple method first (YOLO)
|
|
64
|
+
try:
|
|
65
|
+
trainer.model.eval() # place in .eval() mode to avoid BatchNorm statistics changes
|
|
66
|
+
WRITER.add_graph(torch.jit.trace(torch_utils.unwrap_model(trainer.model), im, strict=False), [])
|
|
67
|
+
LOGGER.info(f"{PREFIX}model graph visualization added ✅")
|
|
68
|
+
return
|
|
69
|
+
except Exception as e1:
|
|
70
|
+
# Fallback to TorchScript export steps (RTDETR)
|
|
71
71
|
try:
|
|
72
|
-
|
|
73
|
-
|
|
72
|
+
model = deepcopy(torch_utils.unwrap_model(trainer.model))
|
|
73
|
+
model.eval()
|
|
74
|
+
model = model.fuse(verbose=False)
|
|
75
|
+
for m in model.modules():
|
|
76
|
+
if hasattr(m, "export"): # Detect, RTDETRDecoder (Segment and Pose use Detect base class)
|
|
77
|
+
m.export = True
|
|
78
|
+
m.format = "torchscript"
|
|
79
|
+
model(im) # dry run
|
|
80
|
+
WRITER.add_graph(torch.jit.trace(model, im, strict=False), [])
|
|
74
81
|
LOGGER.info(f"{PREFIX}model graph visualization added ✅")
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
except Exception:
|
|
78
|
-
# Fallback to TorchScript export steps (RTDETR)
|
|
79
|
-
try:
|
|
80
|
-
model = deepcopy(torch_utils.unwrap_model(trainer.model))
|
|
81
|
-
model.eval()
|
|
82
|
-
model = model.fuse(verbose=False)
|
|
83
|
-
for m in model.modules():
|
|
84
|
-
if hasattr(m, "export"): # Detect, RTDETRDecoder (Segment and Pose use Detect base class)
|
|
85
|
-
m.export = True
|
|
86
|
-
m.format = "torchscript"
|
|
87
|
-
model(im) # dry run
|
|
88
|
-
WRITER.add_graph(torch.jit.trace(model, im, strict=False), [])
|
|
89
|
-
LOGGER.info(f"{PREFIX}model graph visualization added ✅")
|
|
90
|
-
except Exception as e:
|
|
91
|
-
LOGGER.warning(f"{PREFIX}TensorBoard graph visualization failure {e}")
|
|
82
|
+
except Exception as e2:
|
|
83
|
+
LOGGER.warning(f"{PREFIX}TensorBoard graph visualization failure: {e1} -> {e2}")
|
|
92
84
|
|
|
93
85
|
|
|
94
86
|
def on_pretrain_routine_start(trainer) -> None:
|