dgenerate-ultralytics-headless 8.3.134__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (272) hide show
  1. dgenerate_ultralytics_headless-8.3.134.dist-info/METADATA +400 -0
  2. dgenerate_ultralytics_headless-8.3.134.dist-info/RECORD +272 -0
  3. dgenerate_ultralytics_headless-8.3.134.dist-info/WHEEL +5 -0
  4. dgenerate_ultralytics_headless-8.3.134.dist-info/entry_points.txt +3 -0
  5. dgenerate_ultralytics_headless-8.3.134.dist-info/licenses/LICENSE +661 -0
  6. dgenerate_ultralytics_headless-8.3.134.dist-info/top_level.txt +1 -0
  7. tests/__init__.py +22 -0
  8. tests/conftest.py +83 -0
  9. tests/test_cli.py +138 -0
  10. tests/test_cuda.py +215 -0
  11. tests/test_engine.py +131 -0
  12. tests/test_exports.py +236 -0
  13. tests/test_integrations.py +154 -0
  14. tests/test_python.py +694 -0
  15. tests/test_solutions.py +187 -0
  16. ultralytics/__init__.py +30 -0
  17. ultralytics/assets/bus.jpg +0 -0
  18. ultralytics/assets/zidane.jpg +0 -0
  19. ultralytics/cfg/__init__.py +1023 -0
  20. ultralytics/cfg/datasets/Argoverse.yaml +77 -0
  21. ultralytics/cfg/datasets/DOTAv1.5.yaml +37 -0
  22. ultralytics/cfg/datasets/DOTAv1.yaml +36 -0
  23. ultralytics/cfg/datasets/GlobalWheat2020.yaml +68 -0
  24. ultralytics/cfg/datasets/HomeObjects-3K.yaml +33 -0
  25. ultralytics/cfg/datasets/ImageNet.yaml +2025 -0
  26. ultralytics/cfg/datasets/Objects365.yaml +443 -0
  27. ultralytics/cfg/datasets/SKU-110K.yaml +58 -0
  28. ultralytics/cfg/datasets/VOC.yaml +106 -0
  29. ultralytics/cfg/datasets/VisDrone.yaml +77 -0
  30. ultralytics/cfg/datasets/african-wildlife.yaml +25 -0
  31. ultralytics/cfg/datasets/brain-tumor.yaml +23 -0
  32. ultralytics/cfg/datasets/carparts-seg.yaml +44 -0
  33. ultralytics/cfg/datasets/coco-pose.yaml +42 -0
  34. ultralytics/cfg/datasets/coco.yaml +118 -0
  35. ultralytics/cfg/datasets/coco128-seg.yaml +101 -0
  36. ultralytics/cfg/datasets/coco128.yaml +101 -0
  37. ultralytics/cfg/datasets/coco8-multispectral.yaml +104 -0
  38. ultralytics/cfg/datasets/coco8-pose.yaml +26 -0
  39. ultralytics/cfg/datasets/coco8-seg.yaml +101 -0
  40. ultralytics/cfg/datasets/coco8.yaml +101 -0
  41. ultralytics/cfg/datasets/crack-seg.yaml +22 -0
  42. ultralytics/cfg/datasets/dog-pose.yaml +24 -0
  43. ultralytics/cfg/datasets/dota8-multispectral.yaml +38 -0
  44. ultralytics/cfg/datasets/dota8.yaml +35 -0
  45. ultralytics/cfg/datasets/hand-keypoints.yaml +26 -0
  46. ultralytics/cfg/datasets/lvis.yaml +1240 -0
  47. ultralytics/cfg/datasets/medical-pills.yaml +22 -0
  48. ultralytics/cfg/datasets/open-images-v7.yaml +666 -0
  49. ultralytics/cfg/datasets/package-seg.yaml +22 -0
  50. ultralytics/cfg/datasets/signature.yaml +21 -0
  51. ultralytics/cfg/datasets/tiger-pose.yaml +25 -0
  52. ultralytics/cfg/datasets/xView.yaml +155 -0
  53. ultralytics/cfg/default.yaml +127 -0
  54. ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml +17 -0
  55. ultralytics/cfg/models/11/yolo11-cls.yaml +33 -0
  56. ultralytics/cfg/models/11/yolo11-obb.yaml +50 -0
  57. ultralytics/cfg/models/11/yolo11-pose.yaml +51 -0
  58. ultralytics/cfg/models/11/yolo11-seg.yaml +50 -0
  59. ultralytics/cfg/models/11/yolo11.yaml +50 -0
  60. ultralytics/cfg/models/11/yoloe-11-seg.yaml +48 -0
  61. ultralytics/cfg/models/11/yoloe-11.yaml +48 -0
  62. ultralytics/cfg/models/12/yolo12-cls.yaml +32 -0
  63. ultralytics/cfg/models/12/yolo12-obb.yaml +48 -0
  64. ultralytics/cfg/models/12/yolo12-pose.yaml +49 -0
  65. ultralytics/cfg/models/12/yolo12-seg.yaml +48 -0
  66. ultralytics/cfg/models/12/yolo12.yaml +48 -0
  67. ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +53 -0
  68. ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +45 -0
  69. ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +45 -0
  70. ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +57 -0
  71. ultralytics/cfg/models/v10/yolov10b.yaml +45 -0
  72. ultralytics/cfg/models/v10/yolov10l.yaml +45 -0
  73. ultralytics/cfg/models/v10/yolov10m.yaml +45 -0
  74. ultralytics/cfg/models/v10/yolov10n.yaml +45 -0
  75. ultralytics/cfg/models/v10/yolov10s.yaml +45 -0
  76. ultralytics/cfg/models/v10/yolov10x.yaml +45 -0
  77. ultralytics/cfg/models/v3/yolov3-spp.yaml +49 -0
  78. ultralytics/cfg/models/v3/yolov3-tiny.yaml +40 -0
  79. ultralytics/cfg/models/v3/yolov3.yaml +49 -0
  80. ultralytics/cfg/models/v5/yolov5-p6.yaml +62 -0
  81. ultralytics/cfg/models/v5/yolov5.yaml +51 -0
  82. ultralytics/cfg/models/v6/yolov6.yaml +56 -0
  83. ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +45 -0
  84. ultralytics/cfg/models/v8/yoloe-v8.yaml +45 -0
  85. ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +28 -0
  86. ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +28 -0
  87. ultralytics/cfg/models/v8/yolov8-cls.yaml +32 -0
  88. ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +58 -0
  89. ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +60 -0
  90. ultralytics/cfg/models/v8/yolov8-ghost.yaml +50 -0
  91. ultralytics/cfg/models/v8/yolov8-obb.yaml +49 -0
  92. ultralytics/cfg/models/v8/yolov8-p2.yaml +57 -0
  93. ultralytics/cfg/models/v8/yolov8-p6.yaml +59 -0
  94. ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +60 -0
  95. ultralytics/cfg/models/v8/yolov8-pose.yaml +50 -0
  96. ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +49 -0
  97. ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +59 -0
  98. ultralytics/cfg/models/v8/yolov8-seg.yaml +49 -0
  99. ultralytics/cfg/models/v8/yolov8-world.yaml +51 -0
  100. ultralytics/cfg/models/v8/yolov8-worldv2.yaml +49 -0
  101. ultralytics/cfg/models/v8/yolov8.yaml +49 -0
  102. ultralytics/cfg/models/v9/yolov9c-seg.yaml +41 -0
  103. ultralytics/cfg/models/v9/yolov9c.yaml +41 -0
  104. ultralytics/cfg/models/v9/yolov9e-seg.yaml +64 -0
  105. ultralytics/cfg/models/v9/yolov9e.yaml +64 -0
  106. ultralytics/cfg/models/v9/yolov9m.yaml +41 -0
  107. ultralytics/cfg/models/v9/yolov9s.yaml +41 -0
  108. ultralytics/cfg/models/v9/yolov9t.yaml +41 -0
  109. ultralytics/cfg/trackers/botsort.yaml +22 -0
  110. ultralytics/cfg/trackers/bytetrack.yaml +14 -0
  111. ultralytics/data/__init__.py +26 -0
  112. ultralytics/data/annotator.py +66 -0
  113. ultralytics/data/augment.py +2945 -0
  114. ultralytics/data/base.py +438 -0
  115. ultralytics/data/build.py +258 -0
  116. ultralytics/data/converter.py +754 -0
  117. ultralytics/data/dataset.py +834 -0
  118. ultralytics/data/loaders.py +676 -0
  119. ultralytics/data/scripts/download_weights.sh +18 -0
  120. ultralytics/data/scripts/get_coco.sh +61 -0
  121. ultralytics/data/scripts/get_coco128.sh +18 -0
  122. ultralytics/data/scripts/get_imagenet.sh +52 -0
  123. ultralytics/data/split.py +125 -0
  124. ultralytics/data/split_dota.py +325 -0
  125. ultralytics/data/utils.py +777 -0
  126. ultralytics/engine/__init__.py +1 -0
  127. ultralytics/engine/exporter.py +1519 -0
  128. ultralytics/engine/model.py +1156 -0
  129. ultralytics/engine/predictor.py +502 -0
  130. ultralytics/engine/results.py +1840 -0
  131. ultralytics/engine/trainer.py +853 -0
  132. ultralytics/engine/tuner.py +243 -0
  133. ultralytics/engine/validator.py +377 -0
  134. ultralytics/hub/__init__.py +168 -0
  135. ultralytics/hub/auth.py +137 -0
  136. ultralytics/hub/google/__init__.py +176 -0
  137. ultralytics/hub/session.py +446 -0
  138. ultralytics/hub/utils.py +248 -0
  139. ultralytics/models/__init__.py +9 -0
  140. ultralytics/models/fastsam/__init__.py +7 -0
  141. ultralytics/models/fastsam/model.py +61 -0
  142. ultralytics/models/fastsam/predict.py +181 -0
  143. ultralytics/models/fastsam/utils.py +24 -0
  144. ultralytics/models/fastsam/val.py +40 -0
  145. ultralytics/models/nas/__init__.py +7 -0
  146. ultralytics/models/nas/model.py +102 -0
  147. ultralytics/models/nas/predict.py +58 -0
  148. ultralytics/models/nas/val.py +39 -0
  149. ultralytics/models/rtdetr/__init__.py +7 -0
  150. ultralytics/models/rtdetr/model.py +63 -0
  151. ultralytics/models/rtdetr/predict.py +84 -0
  152. ultralytics/models/rtdetr/train.py +85 -0
  153. ultralytics/models/rtdetr/val.py +191 -0
  154. ultralytics/models/sam/__init__.py +6 -0
  155. ultralytics/models/sam/amg.py +260 -0
  156. ultralytics/models/sam/build.py +358 -0
  157. ultralytics/models/sam/model.py +170 -0
  158. ultralytics/models/sam/modules/__init__.py +1 -0
  159. ultralytics/models/sam/modules/blocks.py +1129 -0
  160. ultralytics/models/sam/modules/decoders.py +515 -0
  161. ultralytics/models/sam/modules/encoders.py +854 -0
  162. ultralytics/models/sam/modules/memory_attention.py +299 -0
  163. ultralytics/models/sam/modules/sam.py +1006 -0
  164. ultralytics/models/sam/modules/tiny_encoder.py +1002 -0
  165. ultralytics/models/sam/modules/transformer.py +351 -0
  166. ultralytics/models/sam/modules/utils.py +394 -0
  167. ultralytics/models/sam/predict.py +1605 -0
  168. ultralytics/models/utils/__init__.py +1 -0
  169. ultralytics/models/utils/loss.py +455 -0
  170. ultralytics/models/utils/ops.py +268 -0
  171. ultralytics/models/yolo/__init__.py +7 -0
  172. ultralytics/models/yolo/classify/__init__.py +7 -0
  173. ultralytics/models/yolo/classify/predict.py +88 -0
  174. ultralytics/models/yolo/classify/train.py +233 -0
  175. ultralytics/models/yolo/classify/val.py +215 -0
  176. ultralytics/models/yolo/detect/__init__.py +7 -0
  177. ultralytics/models/yolo/detect/predict.py +124 -0
  178. ultralytics/models/yolo/detect/train.py +217 -0
  179. ultralytics/models/yolo/detect/val.py +451 -0
  180. ultralytics/models/yolo/model.py +354 -0
  181. ultralytics/models/yolo/obb/__init__.py +7 -0
  182. ultralytics/models/yolo/obb/predict.py +66 -0
  183. ultralytics/models/yolo/obb/train.py +81 -0
  184. ultralytics/models/yolo/obb/val.py +283 -0
  185. ultralytics/models/yolo/pose/__init__.py +7 -0
  186. ultralytics/models/yolo/pose/predict.py +79 -0
  187. ultralytics/models/yolo/pose/train.py +154 -0
  188. ultralytics/models/yolo/pose/val.py +394 -0
  189. ultralytics/models/yolo/segment/__init__.py +7 -0
  190. ultralytics/models/yolo/segment/predict.py +113 -0
  191. ultralytics/models/yolo/segment/train.py +123 -0
  192. ultralytics/models/yolo/segment/val.py +428 -0
  193. ultralytics/models/yolo/world/__init__.py +5 -0
  194. ultralytics/models/yolo/world/train.py +119 -0
  195. ultralytics/models/yolo/world/train_world.py +176 -0
  196. ultralytics/models/yolo/yoloe/__init__.py +22 -0
  197. ultralytics/models/yolo/yoloe/predict.py +169 -0
  198. ultralytics/models/yolo/yoloe/train.py +298 -0
  199. ultralytics/models/yolo/yoloe/train_seg.py +124 -0
  200. ultralytics/models/yolo/yoloe/val.py +191 -0
  201. ultralytics/nn/__init__.py +29 -0
  202. ultralytics/nn/autobackend.py +842 -0
  203. ultralytics/nn/modules/__init__.py +182 -0
  204. ultralytics/nn/modules/activation.py +53 -0
  205. ultralytics/nn/modules/block.py +1966 -0
  206. ultralytics/nn/modules/conv.py +712 -0
  207. ultralytics/nn/modules/head.py +880 -0
  208. ultralytics/nn/modules/transformer.py +713 -0
  209. ultralytics/nn/modules/utils.py +164 -0
  210. ultralytics/nn/tasks.py +1627 -0
  211. ultralytics/nn/text_model.py +351 -0
  212. ultralytics/solutions/__init__.py +41 -0
  213. ultralytics/solutions/ai_gym.py +116 -0
  214. ultralytics/solutions/analytics.py +252 -0
  215. ultralytics/solutions/config.py +106 -0
  216. ultralytics/solutions/distance_calculation.py +124 -0
  217. ultralytics/solutions/heatmap.py +127 -0
  218. ultralytics/solutions/instance_segmentation.py +84 -0
  219. ultralytics/solutions/object_blurrer.py +90 -0
  220. ultralytics/solutions/object_counter.py +195 -0
  221. ultralytics/solutions/object_cropper.py +84 -0
  222. ultralytics/solutions/parking_management.py +273 -0
  223. ultralytics/solutions/queue_management.py +93 -0
  224. ultralytics/solutions/region_counter.py +120 -0
  225. ultralytics/solutions/security_alarm.py +154 -0
  226. ultralytics/solutions/similarity_search.py +172 -0
  227. ultralytics/solutions/solutions.py +724 -0
  228. ultralytics/solutions/speed_estimation.py +110 -0
  229. ultralytics/solutions/streamlit_inference.py +196 -0
  230. ultralytics/solutions/templates/similarity-search.html +160 -0
  231. ultralytics/solutions/trackzone.py +88 -0
  232. ultralytics/solutions/vision_eye.py +68 -0
  233. ultralytics/trackers/__init__.py +7 -0
  234. ultralytics/trackers/basetrack.py +124 -0
  235. ultralytics/trackers/bot_sort.py +260 -0
  236. ultralytics/trackers/byte_tracker.py +480 -0
  237. ultralytics/trackers/track.py +125 -0
  238. ultralytics/trackers/utils/__init__.py +1 -0
  239. ultralytics/trackers/utils/gmc.py +376 -0
  240. ultralytics/trackers/utils/kalman_filter.py +493 -0
  241. ultralytics/trackers/utils/matching.py +157 -0
  242. ultralytics/utils/__init__.py +1435 -0
  243. ultralytics/utils/autobatch.py +106 -0
  244. ultralytics/utils/autodevice.py +174 -0
  245. ultralytics/utils/benchmarks.py +695 -0
  246. ultralytics/utils/callbacks/__init__.py +5 -0
  247. ultralytics/utils/callbacks/base.py +234 -0
  248. ultralytics/utils/callbacks/clearml.py +153 -0
  249. ultralytics/utils/callbacks/comet.py +552 -0
  250. ultralytics/utils/callbacks/dvc.py +205 -0
  251. ultralytics/utils/callbacks/hub.py +108 -0
  252. ultralytics/utils/callbacks/mlflow.py +138 -0
  253. ultralytics/utils/callbacks/neptune.py +140 -0
  254. ultralytics/utils/callbacks/raytune.py +43 -0
  255. ultralytics/utils/callbacks/tensorboard.py +132 -0
  256. ultralytics/utils/callbacks/wb.py +185 -0
  257. ultralytics/utils/checks.py +897 -0
  258. ultralytics/utils/dist.py +119 -0
  259. ultralytics/utils/downloads.py +499 -0
  260. ultralytics/utils/errors.py +43 -0
  261. ultralytics/utils/export.py +219 -0
  262. ultralytics/utils/files.py +221 -0
  263. ultralytics/utils/instance.py +499 -0
  264. ultralytics/utils/loss.py +813 -0
  265. ultralytics/utils/metrics.py +1356 -0
  266. ultralytics/utils/ops.py +885 -0
  267. ultralytics/utils/patches.py +143 -0
  268. ultralytics/utils/plotting.py +1011 -0
  269. ultralytics/utils/tal.py +416 -0
  270. ultralytics/utils/torch_utils.py +990 -0
  271. ultralytics/utils/triton.py +116 -0
  272. ultralytics/utils/tuner.py +159 -0
@@ -0,0 +1,552 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ from collections.abc import Callable
4
+ from types import SimpleNamespace
5
+ from typing import Any, List, Optional
6
+
7
+ import cv2
8
+ import numpy as np
9
+
10
+ from ultralytics.utils import LOGGER, RANK, SETTINGS, TESTS_RUNNING, ops
11
+ from ultralytics.utils.metrics import ClassifyMetrics, DetMetrics, OBBMetrics, PoseMetrics, SegmentMetrics
12
+
13
+ try:
14
+ assert not TESTS_RUNNING # do not log pytest
15
+ assert SETTINGS["comet"] is True # verify integration is enabled
16
+ import comet_ml
17
+
18
+ assert hasattr(comet_ml, "__version__") # verify package is not directory
19
+
20
+ import os
21
+ from pathlib import Path
22
+
23
+ # Ensures certain logging functions only run for supported tasks
24
+ COMET_SUPPORTED_TASKS = ["detect", "segment"]
25
+
26
+ # Names of plots created by Ultralytics that are logged to Comet
27
+ CONFUSION_MATRIX_PLOT_NAMES = "confusion_matrix", "confusion_matrix_normalized"
28
+ EVALUATION_PLOT_NAMES = "F1_curve", "P_curve", "R_curve", "PR_curve"
29
+ LABEL_PLOT_NAMES = "labels", "labels_correlogram"
30
+ SEGMENT_METRICS_PLOT_PREFIX = "Box", "Mask"
31
+ POSE_METRICS_PLOT_PREFIX = "Box", "Pose"
32
+
33
+ _comet_image_prediction_count = 0
34
+
35
+ except (ImportError, AssertionError):
36
+ comet_ml = None
37
+
38
+
39
+ def _get_comet_mode() -> str:
40
+ """Returns the mode of comet set in the environment variables, defaults to 'online' if not set."""
41
+ comet_mode = os.getenv("COMET_MODE")
42
+ if comet_mode is not None:
43
+ LOGGER.warning(
44
+ "The COMET_MODE environment variable is deprecated. "
45
+ "Please use COMET_START_ONLINE to set the Comet experiment mode. "
46
+ "To start an offline Comet experiment, use 'export COMET_START_ONLINE=0'. "
47
+ "If COMET_START_ONLINE is not set or is set to '1', an online Comet experiment will be created."
48
+ )
49
+ return comet_mode
50
+
51
+ return "online"
52
+
53
+
54
+ def _get_comet_model_name() -> str:
55
+ """Returns the model name for Comet from the environment variable COMET_MODEL_NAME or defaults to 'Ultralytics'."""
56
+ return os.getenv("COMET_MODEL_NAME", "Ultralytics")
57
+
58
+
59
+ def _get_eval_batch_logging_interval() -> int:
60
+ """Get the evaluation batch logging interval from environment variable or use default value 1."""
61
+ return int(os.getenv("COMET_EVAL_BATCH_LOGGING_INTERVAL", 1))
62
+
63
+
64
+ def _get_max_image_predictions_to_log() -> int:
65
+ """Get the maximum number of image predictions to log from the environment variables."""
66
+ return int(os.getenv("COMET_MAX_IMAGE_PREDICTIONS", 100))
67
+
68
+
69
+ def _scale_confidence_score(score: float) -> float:
70
+ """Scales the given confidence score by a factor specified in an environment variable."""
71
+ scale = float(os.getenv("COMET_MAX_CONFIDENCE_SCORE", 100.0))
72
+ return score * scale
73
+
74
+
75
+ def _should_log_confusion_matrix() -> bool:
76
+ """Determines if the confusion matrix should be logged based on the environment variable settings."""
77
+ return os.getenv("COMET_EVAL_LOG_CONFUSION_MATRIX", "false").lower() == "true"
78
+
79
+
80
+ def _should_log_image_predictions() -> bool:
81
+ """Determines whether to log image predictions based on a specified environment variable."""
82
+ return os.getenv("COMET_EVAL_LOG_IMAGE_PREDICTIONS", "true").lower() == "true"
83
+
84
+
85
+ def _resume_or_create_experiment(args: SimpleNamespace) -> None:
86
+ """
87
+ Resumes CometML experiment or creates a new experiment based on args.
88
+
89
+ Ensures that the experiment object is only created in a single process during distributed training.
90
+ """
91
+ if RANK not in {-1, 0}:
92
+ return
93
+
94
+ # Set environment variable (if not set by the user) to configure the Comet experiment's online mode under the hood.
95
+ # IF COMET_START_ONLINE is set by the user it will override COMET_MODE value.
96
+ if os.getenv("COMET_START_ONLINE") is None:
97
+ comet_mode = _get_comet_mode()
98
+ os.environ["COMET_START_ONLINE"] = "1" if comet_mode != "offline" else "0"
99
+
100
+ try:
101
+ _project_name = os.getenv("COMET_PROJECT_NAME", args.project)
102
+ experiment = comet_ml.start(project_name=_project_name)
103
+ experiment.log_parameters(vars(args))
104
+ experiment.log_others(
105
+ {
106
+ "eval_batch_logging_interval": _get_eval_batch_logging_interval(),
107
+ "log_confusion_matrix_on_eval": _should_log_confusion_matrix(),
108
+ "log_image_predictions": _should_log_image_predictions(),
109
+ "max_image_predictions": _get_max_image_predictions_to_log(),
110
+ }
111
+ )
112
+ experiment.log_other("Created from", "ultralytics")
113
+
114
+ except Exception as e:
115
+ LOGGER.warning(f"Comet installed but not initialized correctly, not logging this run. {e}")
116
+
117
+
118
+ def _fetch_trainer_metadata(trainer) -> dict:
119
+ """Returns metadata for YOLO training including epoch and asset saving status."""
120
+ curr_epoch = trainer.epoch + 1
121
+
122
+ train_num_steps_per_epoch = len(trainer.train_loader.dataset) // trainer.batch_size
123
+ curr_step = curr_epoch * train_num_steps_per_epoch
124
+ final_epoch = curr_epoch == trainer.epochs
125
+
126
+ save = trainer.args.save
127
+ save_period = trainer.args.save_period
128
+ save_interval = curr_epoch % save_period == 0
129
+ save_assets = save and save_period > 0 and save_interval and not final_epoch
130
+
131
+ return dict(curr_epoch=curr_epoch, curr_step=curr_step, save_assets=save_assets, final_epoch=final_epoch)
132
+
133
+
134
+ def _scale_bounding_box_to_original_image_shape(
135
+ box, resized_image_shape, original_image_shape, ratio_pad
136
+ ) -> List[float]:
137
+ """
138
+ YOLO resizes images during training and the label values are normalized based on this resized shape.
139
+
140
+ This function rescales the bounding box labels to the original image shape.
141
+ """
142
+ resized_image_height, resized_image_width = resized_image_shape
143
+
144
+ # Convert normalized xywh format predictions to xyxy in resized scale format
145
+ box = ops.xywhn2xyxy(box, h=resized_image_height, w=resized_image_width)
146
+ # Scale box predictions from resized image scale back to original image scale
147
+ box = ops.scale_boxes(resized_image_shape, box, original_image_shape, ratio_pad)
148
+ # Convert bounding box format from xyxy to xywh for Comet logging
149
+ box = ops.xyxy2xywh(box)
150
+ # Adjust xy center to correspond top-left corner
151
+ box[:2] -= box[2:] / 2
152
+ box = box.tolist()
153
+
154
+ return box
155
+
156
+
157
+ def _format_ground_truth_annotations_for_detection(img_idx, image_path, batch, class_name_map=None) -> Optional[dict]:
158
+ """
159
+ Format ground truth annotations for object detection.
160
+
161
+ This function processes ground truth annotations from a batch of images for object detection tasks. It extracts
162
+ bounding boxes, class labels, and other metadata for a specific image in the batch, and formats them for
163
+ visualization or evaluation.
164
+
165
+ Args:
166
+ img_idx (int): Index of the image in the batch to process.
167
+ image_path (str | Path): Path to the image file.
168
+ batch (dict): Batch dictionary containing detection data with keys:
169
+ - 'batch_idx': Tensor of batch indices
170
+ - 'bboxes': Tensor of bounding boxes in normalized xywh format
171
+ - 'cls': Tensor of class labels
172
+ - 'ori_shape': Original image shapes
173
+ - 'resized_shape': Resized image shapes
174
+ - 'ratio_pad': Ratio and padding information
175
+ class_name_map (dict | None, optional): Mapping from class indices to class names.
176
+
177
+ Returns:
178
+ (dict | None): Formatted ground truth annotations with the following structure:
179
+ - 'boxes': List of box coordinates [x, y, width, height]
180
+ - 'label': Label string with format "gt_{class_name}"
181
+ - 'score': Confidence score (always 1.0, scaled by _scale_confidence_score)
182
+ Returns None if no bounding boxes are found for the image.
183
+ """
184
+ indices = batch["batch_idx"] == img_idx
185
+ bboxes = batch["bboxes"][indices]
186
+ if len(bboxes) == 0:
187
+ LOGGER.debug(f"Comet Image: {image_path} has no bounding boxes labels")
188
+ return None
189
+
190
+ cls_labels = batch["cls"][indices].squeeze(1).tolist()
191
+ if class_name_map:
192
+ cls_labels = [str(class_name_map[label]) for label in cls_labels]
193
+
194
+ original_image_shape = batch["ori_shape"][img_idx]
195
+ resized_image_shape = batch["resized_shape"][img_idx]
196
+ ratio_pad = batch["ratio_pad"][img_idx]
197
+
198
+ data = []
199
+ for box, label in zip(bboxes, cls_labels):
200
+ box = _scale_bounding_box_to_original_image_shape(box, resized_image_shape, original_image_shape, ratio_pad)
201
+ data.append(
202
+ {
203
+ "boxes": [box],
204
+ "label": f"gt_{label}",
205
+ "score": _scale_confidence_score(1.0),
206
+ }
207
+ )
208
+
209
+ return {"name": "ground_truth", "data": data}
210
+
211
+
212
+ def _format_prediction_annotations(image_path, metadata, class_label_map=None, class_map=None) -> Optional[dict]:
213
+ """Format YOLO predictions for object detection visualization."""
214
+ stem = image_path.stem
215
+ image_id = int(stem) if stem.isnumeric() else stem
216
+
217
+ predictions = metadata.get(image_id)
218
+ if not predictions:
219
+ LOGGER.debug(f"Comet Image: {image_path} has no bounding boxes predictions")
220
+ return None
221
+
222
+ # apply the mapping that was used to map the predicted classes when the JSON was created
223
+ if class_label_map and class_map:
224
+ class_label_map = {class_map[k]: v for k, v in class_label_map.items()}
225
+ try:
226
+ # import pycotools utilities to decompress annotations for various tasks, e.g. segmentation
227
+ from pycocotools.mask import decode # noqa
228
+ except ImportError:
229
+ decode = None
230
+
231
+ data = []
232
+ for prediction in predictions:
233
+ boxes = prediction["bbox"]
234
+ score = _scale_confidence_score(prediction["score"])
235
+ cls_label = prediction["category_id"]
236
+ if class_label_map:
237
+ cls_label = str(class_label_map[cls_label])
238
+
239
+ annotation_data = {"boxes": [boxes], "label": cls_label, "score": score}
240
+
241
+ if decode is not None:
242
+ # do segmentation processing only if we are able to decode it
243
+ segments = prediction.get("segmentation", None)
244
+ if segments is not None:
245
+ segments = _extract_segmentation_annotation(segments, decode)
246
+ if segments is not None:
247
+ annotation_data["points"] = segments
248
+
249
+ data.append(annotation_data)
250
+
251
+ return {"name": "prediction", "data": data}
252
+
253
+
254
+ def _extract_segmentation_annotation(segmentation_raw: str, decode: Callable) -> Optional[List[List[Any]]]:
255
+ """
256
+ Extracts segmentation annotation from compressed segmentations as list of polygons.
257
+
258
+ Args:
259
+ segmentation_raw: Raw segmentation data in compressed format.
260
+ decode: Function to decode the compressed segmentation data.
261
+
262
+ Returns:
263
+ (Optional[List[List[Any]]]): List of polygon points or None if extraction fails.
264
+ """
265
+ try:
266
+ mask = decode(segmentation_raw)
267
+ contours, _ = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
268
+ annotations = [np.array(polygon).squeeze() for polygon in contours if len(polygon) >= 3]
269
+ return [annotation.ravel().tolist() for annotation in annotations]
270
+ except Exception as e:
271
+ LOGGER.warning(f"Comet Failed to extract segmentation annotation: {e}")
272
+ return None
273
+
274
+
275
+ def _fetch_annotations(
276
+ img_idx, image_path, batch, prediction_metadata_map, class_label_map, class_map
277
+ ) -> Optional[List]:
278
+ """Join the ground truth and prediction annotations if they exist."""
279
+ ground_truth_annotations = _format_ground_truth_annotations_for_detection(
280
+ img_idx, image_path, batch, class_label_map
281
+ )
282
+ prediction_annotations = _format_prediction_annotations(
283
+ image_path, prediction_metadata_map, class_label_map, class_map
284
+ )
285
+
286
+ annotations = [
287
+ annotation for annotation in [ground_truth_annotations, prediction_annotations] if annotation is not None
288
+ ]
289
+ return [annotations] if annotations else None
290
+
291
+
292
+ def _create_prediction_metadata_map(model_predictions) -> dict:
293
+ """Create metadata map for model predictions by groupings them based on image ID."""
294
+ pred_metadata_map = {}
295
+ for prediction in model_predictions:
296
+ pred_metadata_map.setdefault(prediction["image_id"], [])
297
+ pred_metadata_map[prediction["image_id"]].append(prediction)
298
+
299
+ return pred_metadata_map
300
+
301
+
302
+ def _log_confusion_matrix(experiment, trainer, curr_step, curr_epoch) -> None:
303
+ """Log the confusion matrix to Comet experiment."""
304
+ conf_mat = trainer.validator.confusion_matrix.matrix
305
+ names = list(trainer.data["names"].values()) + ["background"]
306
+ experiment.log_confusion_matrix(
307
+ matrix=conf_mat, labels=names, max_categories=len(names), epoch=curr_epoch, step=curr_step
308
+ )
309
+
310
+
311
+ def _log_images(experiment, image_paths, curr_step, annotations=None) -> None:
312
+ """
313
+ Log images to the experiment with optional annotations.
314
+
315
+ This function logs images to a Comet ML experiment, optionally including annotation data for visualization
316
+ such as bounding boxes or segmentation masks.
317
+
318
+ Args:
319
+ experiment (comet_ml.Experiment): The Comet ML experiment to log images to.
320
+ image_paths (List[Path]): List of paths to images that will be logged.
321
+ curr_step (int): Current training step/iteration for tracking in the experiment timeline.
322
+ annotations (List[List[dict]], optional): Nested list of annotation dictionaries for each image. Each annotation
323
+ contains visualization data like bounding boxes, labels, and confidence scores.
324
+
325
+ Returns:
326
+ None
327
+ """
328
+ if annotations:
329
+ for image_path, annotation in zip(image_paths, annotations):
330
+ experiment.log_image(image_path, name=image_path.stem, step=curr_step, annotations=annotation)
331
+
332
+ else:
333
+ for image_path in image_paths:
334
+ experiment.log_image(image_path, name=image_path.stem, step=curr_step)
335
+
336
+
337
+ def _log_image_predictions(experiment, validator, curr_step) -> None:
338
+ """
339
+ Log predicted boxes for a single image during training.
340
+
341
+ This function logs image predictions to a Comet ML experiment during model validation. It processes
342
+ validation data and formats both ground truth and prediction annotations for visualization in the Comet
343
+ dashboard. The function respects configured limits on the number of images to log.
344
+
345
+ Args:
346
+ experiment (comet_ml.Experiment): The Comet ML experiment to log to.
347
+ validator (BaseValidator): The validator instance containing validation data and predictions.
348
+ curr_step (int): The current training step for logging timeline.
349
+
350
+ Notes:
351
+ This function uses global state to track the number of logged predictions across calls.
352
+ It only logs predictions for supported tasks defined in COMET_SUPPORTED_TASKS.
353
+ The number of logged images is limited by the COMET_MAX_IMAGE_PREDICTIONS environment variable.
354
+ """
355
+ global _comet_image_prediction_count
356
+
357
+ task = validator.args.task
358
+ if task not in COMET_SUPPORTED_TASKS:
359
+ return
360
+
361
+ jdict = validator.jdict
362
+ if not jdict:
363
+ return
364
+
365
+ predictions_metadata_map = _create_prediction_metadata_map(jdict)
366
+ dataloader = validator.dataloader
367
+ class_label_map = validator.names
368
+ class_map = getattr(validator, "class_map", None)
369
+
370
+ batch_logging_interval = _get_eval_batch_logging_interval()
371
+ max_image_predictions = _get_max_image_predictions_to_log()
372
+
373
+ for batch_idx, batch in enumerate(dataloader):
374
+ if (batch_idx + 1) % batch_logging_interval != 0:
375
+ continue
376
+
377
+ image_paths = batch["im_file"]
378
+ for img_idx, image_path in enumerate(image_paths):
379
+ if _comet_image_prediction_count >= max_image_predictions:
380
+ return
381
+
382
+ image_path = Path(image_path)
383
+ annotations = _fetch_annotations(
384
+ img_idx,
385
+ image_path,
386
+ batch,
387
+ predictions_metadata_map,
388
+ class_label_map,
389
+ class_map=class_map,
390
+ )
391
+ _log_images(
392
+ experiment,
393
+ [image_path],
394
+ curr_step,
395
+ annotations=annotations,
396
+ )
397
+ _comet_image_prediction_count += 1
398
+
399
+
400
+ def _log_plots(experiment, trainer) -> None:
401
+ """
402
+ Log evaluation plots and label plots for the experiment.
403
+
404
+ This function logs various evaluation plots and confusion matrices to the experiment tracking system. It handles
405
+ different types of metrics (SegmentMetrics, PoseMetrics, DetMetrics, OBBMetrics) and logs the appropriate plots
406
+ for each type.
407
+
408
+ Args:
409
+ experiment (comet_ml.Experiment): The Comet ML experiment to log plots to.
410
+ trainer (ultralytics.engine.trainer.BaseTrainer): The trainer object containing validation metrics and save
411
+ directory information.
412
+
413
+ Examples:
414
+ >>> from ultralytics.utils.callbacks.comet import _log_plots
415
+ >>> _log_plots(experiment, trainer)
416
+ """
417
+ plot_filenames = None
418
+ if isinstance(trainer.validator.metrics, SegmentMetrics) and trainer.validator.metrics.task == "segment":
419
+ plot_filenames = [
420
+ trainer.save_dir / f"{prefix}{plots}.png"
421
+ for plots in EVALUATION_PLOT_NAMES
422
+ for prefix in SEGMENT_METRICS_PLOT_PREFIX
423
+ ]
424
+ elif isinstance(trainer.validator.metrics, PoseMetrics):
425
+ plot_filenames = [
426
+ trainer.save_dir / f"{prefix}{plots}.png"
427
+ for plots in EVALUATION_PLOT_NAMES
428
+ for prefix in POSE_METRICS_PLOT_PREFIX
429
+ ]
430
+ elif isinstance(trainer.validator.metrics, (DetMetrics, OBBMetrics)):
431
+ plot_filenames = [trainer.save_dir / f"{plots}.png" for plots in EVALUATION_PLOT_NAMES]
432
+
433
+ if plot_filenames is not None:
434
+ _log_images(experiment, plot_filenames, None)
435
+
436
+ confusion_matrix_filenames = [trainer.save_dir / f"{plots}.png" for plots in CONFUSION_MATRIX_PLOT_NAMES]
437
+ _log_images(experiment, confusion_matrix_filenames, None)
438
+
439
+ if not isinstance(trainer.validator.metrics, ClassifyMetrics):
440
+ label_plot_filenames = [trainer.save_dir / f"{labels}.jpg" for labels in LABEL_PLOT_NAMES]
441
+ _log_images(experiment, label_plot_filenames, None)
442
+
443
+
444
+ def _log_model(experiment, trainer) -> None:
445
+ """Log the best-trained model to Comet.ml."""
446
+ model_name = _get_comet_model_name()
447
+ experiment.log_model(model_name, file_or_folder=str(trainer.best), file_name="best.pt", overwrite=True)
448
+
449
+
450
+ def _log_image_batches(experiment, trainer, curr_step: int) -> None:
451
+ """Log samples of images batches for train, validation, and test."""
452
+ _log_images(experiment, trainer.save_dir.glob("train_batch*.jpg"), curr_step)
453
+ _log_images(experiment, trainer.save_dir.glob("val_batch*.jpg"), curr_step)
454
+
455
+
456
+ def on_pretrain_routine_start(trainer) -> None:
457
+ """Creates or resumes a CometML experiment at the start of a YOLO pre-training routine."""
458
+ _resume_or_create_experiment(trainer.args)
459
+
460
+
461
+ def on_train_epoch_end(trainer) -> None:
462
+ """Log metrics and save batch images at the end of training epochs."""
463
+ experiment = comet_ml.get_running_experiment()
464
+ if not experiment:
465
+ return
466
+
467
+ metadata = _fetch_trainer_metadata(trainer)
468
+ curr_epoch = metadata["curr_epoch"]
469
+ curr_step = metadata["curr_step"]
470
+
471
+ experiment.log_metrics(trainer.label_loss_items(trainer.tloss, prefix="train"), step=curr_step, epoch=curr_epoch)
472
+
473
+
474
+ def on_fit_epoch_end(trainer) -> None:
475
+ """
476
+ Log model assets at the end of each epoch during training.
477
+
478
+ This function is called at the end of each training epoch to log metrics, learning rates, and model information
479
+ to a Comet ML experiment. It also logs model assets, confusion matrices, and image predictions based on
480
+ configuration settings.
481
+
482
+ The function retrieves the current Comet ML experiment and logs various training metrics. If it's the first epoch,
483
+ it also logs model information. On specified save intervals, it logs the model, confusion matrix (if enabled),
484
+ and image predictions (if enabled).
485
+
486
+ Args:
487
+ trainer (BaseTrainer): The YOLO trainer object containing training state, metrics, and configuration.
488
+
489
+ Examples:
490
+ >>> # Inside a training loop
491
+ >>> on_fit_epoch_end(trainer) # Log metrics and assets to Comet ML
492
+ """
493
+ experiment = comet_ml.get_running_experiment()
494
+ if not experiment:
495
+ return
496
+
497
+ metadata = _fetch_trainer_metadata(trainer)
498
+ curr_epoch = metadata["curr_epoch"]
499
+ curr_step = metadata["curr_step"]
500
+ save_assets = metadata["save_assets"]
501
+
502
+ experiment.log_metrics(trainer.metrics, step=curr_step, epoch=curr_epoch)
503
+ experiment.log_metrics(trainer.lr, step=curr_step, epoch=curr_epoch)
504
+ if curr_epoch == 1:
505
+ from ultralytics.utils.torch_utils import model_info_for_loggers
506
+
507
+ experiment.log_metrics(model_info_for_loggers(trainer), step=curr_step, epoch=curr_epoch)
508
+
509
+ if not save_assets:
510
+ return
511
+
512
+ _log_model(experiment, trainer)
513
+ if _should_log_confusion_matrix():
514
+ _log_confusion_matrix(experiment, trainer, curr_step, curr_epoch)
515
+ if _should_log_image_predictions():
516
+ _log_image_predictions(experiment, trainer.validator, curr_step)
517
+
518
+
519
+ def on_train_end(trainer) -> None:
520
+ """Perform operations at the end of training."""
521
+ experiment = comet_ml.get_running_experiment()
522
+ if not experiment:
523
+ return
524
+
525
+ metadata = _fetch_trainer_metadata(trainer)
526
+ curr_epoch = metadata["curr_epoch"]
527
+ curr_step = metadata["curr_step"]
528
+ plots = trainer.args.plots
529
+
530
+ _log_model(experiment, trainer)
531
+ if plots:
532
+ _log_plots(experiment, trainer)
533
+
534
+ _log_confusion_matrix(experiment, trainer, curr_step, curr_epoch)
535
+ _log_image_predictions(experiment, trainer.validator, curr_step)
536
+ _log_image_batches(experiment, trainer, curr_step)
537
+ experiment.end()
538
+
539
+ global _comet_image_prediction_count
540
+ _comet_image_prediction_count = 0
541
+
542
+
543
+ callbacks = (
544
+ {
545
+ "on_pretrain_routine_start": on_pretrain_routine_start,
546
+ "on_train_epoch_end": on_train_epoch_end,
547
+ "on_fit_epoch_end": on_fit_epoch_end,
548
+ "on_train_end": on_train_end,
549
+ }
550
+ if comet_ml
551
+ else {}
552
+ )