ultralytics-opencv-headless 8.3.246__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (298) hide show
  1. tests/__init__.py +23 -0
  2. tests/conftest.py +59 -0
  3. tests/test_cli.py +131 -0
  4. tests/test_cuda.py +216 -0
  5. tests/test_engine.py +157 -0
  6. tests/test_exports.py +309 -0
  7. tests/test_integrations.py +151 -0
  8. tests/test_python.py +777 -0
  9. tests/test_solutions.py +371 -0
  10. ultralytics/__init__.py +48 -0
  11. ultralytics/assets/bus.jpg +0 -0
  12. ultralytics/assets/zidane.jpg +0 -0
  13. ultralytics/cfg/__init__.py +1026 -0
  14. ultralytics/cfg/datasets/Argoverse.yaml +78 -0
  15. ultralytics/cfg/datasets/DOTAv1.5.yaml +37 -0
  16. ultralytics/cfg/datasets/DOTAv1.yaml +36 -0
  17. ultralytics/cfg/datasets/GlobalWheat2020.yaml +68 -0
  18. ultralytics/cfg/datasets/HomeObjects-3K.yaml +32 -0
  19. ultralytics/cfg/datasets/ImageNet.yaml +2025 -0
  20. ultralytics/cfg/datasets/Objects365.yaml +447 -0
  21. ultralytics/cfg/datasets/SKU-110K.yaml +58 -0
  22. ultralytics/cfg/datasets/VOC.yaml +102 -0
  23. ultralytics/cfg/datasets/VisDrone.yaml +87 -0
  24. ultralytics/cfg/datasets/african-wildlife.yaml +25 -0
  25. ultralytics/cfg/datasets/brain-tumor.yaml +22 -0
  26. ultralytics/cfg/datasets/carparts-seg.yaml +44 -0
  27. ultralytics/cfg/datasets/coco-pose.yaml +64 -0
  28. ultralytics/cfg/datasets/coco.yaml +118 -0
  29. ultralytics/cfg/datasets/coco128-seg.yaml +101 -0
  30. ultralytics/cfg/datasets/coco128.yaml +101 -0
  31. ultralytics/cfg/datasets/coco8-grayscale.yaml +103 -0
  32. ultralytics/cfg/datasets/coco8-multispectral.yaml +104 -0
  33. ultralytics/cfg/datasets/coco8-pose.yaml +47 -0
  34. ultralytics/cfg/datasets/coco8-seg.yaml +101 -0
  35. ultralytics/cfg/datasets/coco8.yaml +101 -0
  36. ultralytics/cfg/datasets/construction-ppe.yaml +32 -0
  37. ultralytics/cfg/datasets/crack-seg.yaml +22 -0
  38. ultralytics/cfg/datasets/dog-pose.yaml +52 -0
  39. ultralytics/cfg/datasets/dota8-multispectral.yaml +38 -0
  40. ultralytics/cfg/datasets/dota8.yaml +35 -0
  41. ultralytics/cfg/datasets/hand-keypoints.yaml +50 -0
  42. ultralytics/cfg/datasets/kitti.yaml +27 -0
  43. ultralytics/cfg/datasets/lvis.yaml +1240 -0
  44. ultralytics/cfg/datasets/medical-pills.yaml +21 -0
  45. ultralytics/cfg/datasets/open-images-v7.yaml +663 -0
  46. ultralytics/cfg/datasets/package-seg.yaml +22 -0
  47. ultralytics/cfg/datasets/signature.yaml +21 -0
  48. ultralytics/cfg/datasets/tiger-pose.yaml +41 -0
  49. ultralytics/cfg/datasets/xView.yaml +155 -0
  50. ultralytics/cfg/default.yaml +130 -0
  51. ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml +17 -0
  52. ultralytics/cfg/models/11/yolo11-cls.yaml +33 -0
  53. ultralytics/cfg/models/11/yolo11-obb.yaml +50 -0
  54. ultralytics/cfg/models/11/yolo11-pose.yaml +51 -0
  55. ultralytics/cfg/models/11/yolo11-seg.yaml +50 -0
  56. ultralytics/cfg/models/11/yolo11.yaml +50 -0
  57. ultralytics/cfg/models/11/yoloe-11-seg.yaml +48 -0
  58. ultralytics/cfg/models/11/yoloe-11.yaml +48 -0
  59. ultralytics/cfg/models/12/yolo12-cls.yaml +32 -0
  60. ultralytics/cfg/models/12/yolo12-obb.yaml +48 -0
  61. ultralytics/cfg/models/12/yolo12-pose.yaml +49 -0
  62. ultralytics/cfg/models/12/yolo12-seg.yaml +48 -0
  63. ultralytics/cfg/models/12/yolo12.yaml +48 -0
  64. ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +53 -0
  65. ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +45 -0
  66. ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +45 -0
  67. ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +57 -0
  68. ultralytics/cfg/models/v10/yolov10b.yaml +45 -0
  69. ultralytics/cfg/models/v10/yolov10l.yaml +45 -0
  70. ultralytics/cfg/models/v10/yolov10m.yaml +45 -0
  71. ultralytics/cfg/models/v10/yolov10n.yaml +45 -0
  72. ultralytics/cfg/models/v10/yolov10s.yaml +45 -0
  73. ultralytics/cfg/models/v10/yolov10x.yaml +45 -0
  74. ultralytics/cfg/models/v3/yolov3-spp.yaml +49 -0
  75. ultralytics/cfg/models/v3/yolov3-tiny.yaml +40 -0
  76. ultralytics/cfg/models/v3/yolov3.yaml +49 -0
  77. ultralytics/cfg/models/v5/yolov5-p6.yaml +62 -0
  78. ultralytics/cfg/models/v5/yolov5.yaml +51 -0
  79. ultralytics/cfg/models/v6/yolov6.yaml +56 -0
  80. ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +48 -0
  81. ultralytics/cfg/models/v8/yoloe-v8.yaml +48 -0
  82. ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +28 -0
  83. ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +28 -0
  84. ultralytics/cfg/models/v8/yolov8-cls.yaml +32 -0
  85. ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +58 -0
  86. ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +60 -0
  87. ultralytics/cfg/models/v8/yolov8-ghost.yaml +50 -0
  88. ultralytics/cfg/models/v8/yolov8-obb.yaml +49 -0
  89. ultralytics/cfg/models/v8/yolov8-p2.yaml +57 -0
  90. ultralytics/cfg/models/v8/yolov8-p6.yaml +59 -0
  91. ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +60 -0
  92. ultralytics/cfg/models/v8/yolov8-pose.yaml +50 -0
  93. ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +49 -0
  94. ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +59 -0
  95. ultralytics/cfg/models/v8/yolov8-seg.yaml +49 -0
  96. ultralytics/cfg/models/v8/yolov8-world.yaml +51 -0
  97. ultralytics/cfg/models/v8/yolov8-worldv2.yaml +49 -0
  98. ultralytics/cfg/models/v8/yolov8.yaml +49 -0
  99. ultralytics/cfg/models/v9/yolov9c-seg.yaml +41 -0
  100. ultralytics/cfg/models/v9/yolov9c.yaml +41 -0
  101. ultralytics/cfg/models/v9/yolov9e-seg.yaml +64 -0
  102. ultralytics/cfg/models/v9/yolov9e.yaml +64 -0
  103. ultralytics/cfg/models/v9/yolov9m.yaml +41 -0
  104. ultralytics/cfg/models/v9/yolov9s.yaml +41 -0
  105. ultralytics/cfg/models/v9/yolov9t.yaml +41 -0
  106. ultralytics/cfg/trackers/botsort.yaml +21 -0
  107. ultralytics/cfg/trackers/bytetrack.yaml +12 -0
  108. ultralytics/data/__init__.py +26 -0
  109. ultralytics/data/annotator.py +66 -0
  110. ultralytics/data/augment.py +2801 -0
  111. ultralytics/data/base.py +435 -0
  112. ultralytics/data/build.py +437 -0
  113. ultralytics/data/converter.py +855 -0
  114. ultralytics/data/dataset.py +834 -0
  115. ultralytics/data/loaders.py +704 -0
  116. ultralytics/data/scripts/download_weights.sh +18 -0
  117. ultralytics/data/scripts/get_coco.sh +61 -0
  118. ultralytics/data/scripts/get_coco128.sh +18 -0
  119. ultralytics/data/scripts/get_imagenet.sh +52 -0
  120. ultralytics/data/split.py +138 -0
  121. ultralytics/data/split_dota.py +344 -0
  122. ultralytics/data/utils.py +798 -0
  123. ultralytics/engine/__init__.py +1 -0
  124. ultralytics/engine/exporter.py +1578 -0
  125. ultralytics/engine/model.py +1124 -0
  126. ultralytics/engine/predictor.py +508 -0
  127. ultralytics/engine/results.py +1522 -0
  128. ultralytics/engine/trainer.py +974 -0
  129. ultralytics/engine/tuner.py +448 -0
  130. ultralytics/engine/validator.py +384 -0
  131. ultralytics/hub/__init__.py +166 -0
  132. ultralytics/hub/auth.py +151 -0
  133. ultralytics/hub/google/__init__.py +174 -0
  134. ultralytics/hub/session.py +422 -0
  135. ultralytics/hub/utils.py +162 -0
  136. ultralytics/models/__init__.py +9 -0
  137. ultralytics/models/fastsam/__init__.py +7 -0
  138. ultralytics/models/fastsam/model.py +79 -0
  139. ultralytics/models/fastsam/predict.py +169 -0
  140. ultralytics/models/fastsam/utils.py +23 -0
  141. ultralytics/models/fastsam/val.py +38 -0
  142. ultralytics/models/nas/__init__.py +7 -0
  143. ultralytics/models/nas/model.py +98 -0
  144. ultralytics/models/nas/predict.py +56 -0
  145. ultralytics/models/nas/val.py +38 -0
  146. ultralytics/models/rtdetr/__init__.py +7 -0
  147. ultralytics/models/rtdetr/model.py +63 -0
  148. ultralytics/models/rtdetr/predict.py +88 -0
  149. ultralytics/models/rtdetr/train.py +89 -0
  150. ultralytics/models/rtdetr/val.py +216 -0
  151. ultralytics/models/sam/__init__.py +25 -0
  152. ultralytics/models/sam/amg.py +275 -0
  153. ultralytics/models/sam/build.py +365 -0
  154. ultralytics/models/sam/build_sam3.py +377 -0
  155. ultralytics/models/sam/model.py +169 -0
  156. ultralytics/models/sam/modules/__init__.py +1 -0
  157. ultralytics/models/sam/modules/blocks.py +1067 -0
  158. ultralytics/models/sam/modules/decoders.py +495 -0
  159. ultralytics/models/sam/modules/encoders.py +794 -0
  160. ultralytics/models/sam/modules/memory_attention.py +298 -0
  161. ultralytics/models/sam/modules/sam.py +1160 -0
  162. ultralytics/models/sam/modules/tiny_encoder.py +979 -0
  163. ultralytics/models/sam/modules/transformer.py +344 -0
  164. ultralytics/models/sam/modules/utils.py +512 -0
  165. ultralytics/models/sam/predict.py +3940 -0
  166. ultralytics/models/sam/sam3/__init__.py +3 -0
  167. ultralytics/models/sam/sam3/decoder.py +546 -0
  168. ultralytics/models/sam/sam3/encoder.py +529 -0
  169. ultralytics/models/sam/sam3/geometry_encoders.py +415 -0
  170. ultralytics/models/sam/sam3/maskformer_segmentation.py +286 -0
  171. ultralytics/models/sam/sam3/model_misc.py +199 -0
  172. ultralytics/models/sam/sam3/necks.py +129 -0
  173. ultralytics/models/sam/sam3/sam3_image.py +339 -0
  174. ultralytics/models/sam/sam3/text_encoder_ve.py +307 -0
  175. ultralytics/models/sam/sam3/vitdet.py +547 -0
  176. ultralytics/models/sam/sam3/vl_combiner.py +160 -0
  177. ultralytics/models/utils/__init__.py +1 -0
  178. ultralytics/models/utils/loss.py +466 -0
  179. ultralytics/models/utils/ops.py +315 -0
  180. ultralytics/models/yolo/__init__.py +7 -0
  181. ultralytics/models/yolo/classify/__init__.py +7 -0
  182. ultralytics/models/yolo/classify/predict.py +90 -0
  183. ultralytics/models/yolo/classify/train.py +202 -0
  184. ultralytics/models/yolo/classify/val.py +216 -0
  185. ultralytics/models/yolo/detect/__init__.py +7 -0
  186. ultralytics/models/yolo/detect/predict.py +122 -0
  187. ultralytics/models/yolo/detect/train.py +227 -0
  188. ultralytics/models/yolo/detect/val.py +507 -0
  189. ultralytics/models/yolo/model.py +430 -0
  190. ultralytics/models/yolo/obb/__init__.py +7 -0
  191. ultralytics/models/yolo/obb/predict.py +56 -0
  192. ultralytics/models/yolo/obb/train.py +79 -0
  193. ultralytics/models/yolo/obb/val.py +302 -0
  194. ultralytics/models/yolo/pose/__init__.py +7 -0
  195. ultralytics/models/yolo/pose/predict.py +65 -0
  196. ultralytics/models/yolo/pose/train.py +110 -0
  197. ultralytics/models/yolo/pose/val.py +248 -0
  198. ultralytics/models/yolo/segment/__init__.py +7 -0
  199. ultralytics/models/yolo/segment/predict.py +109 -0
  200. ultralytics/models/yolo/segment/train.py +69 -0
  201. ultralytics/models/yolo/segment/val.py +307 -0
  202. ultralytics/models/yolo/world/__init__.py +5 -0
  203. ultralytics/models/yolo/world/train.py +173 -0
  204. ultralytics/models/yolo/world/train_world.py +178 -0
  205. ultralytics/models/yolo/yoloe/__init__.py +22 -0
  206. ultralytics/models/yolo/yoloe/predict.py +162 -0
  207. ultralytics/models/yolo/yoloe/train.py +287 -0
  208. ultralytics/models/yolo/yoloe/train_seg.py +122 -0
  209. ultralytics/models/yolo/yoloe/val.py +206 -0
  210. ultralytics/nn/__init__.py +27 -0
  211. ultralytics/nn/autobackend.py +958 -0
  212. ultralytics/nn/modules/__init__.py +182 -0
  213. ultralytics/nn/modules/activation.py +54 -0
  214. ultralytics/nn/modules/block.py +1947 -0
  215. ultralytics/nn/modules/conv.py +669 -0
  216. ultralytics/nn/modules/head.py +1183 -0
  217. ultralytics/nn/modules/transformer.py +793 -0
  218. ultralytics/nn/modules/utils.py +159 -0
  219. ultralytics/nn/tasks.py +1768 -0
  220. ultralytics/nn/text_model.py +356 -0
  221. ultralytics/py.typed +1 -0
  222. ultralytics/solutions/__init__.py +41 -0
  223. ultralytics/solutions/ai_gym.py +108 -0
  224. ultralytics/solutions/analytics.py +264 -0
  225. ultralytics/solutions/config.py +107 -0
  226. ultralytics/solutions/distance_calculation.py +123 -0
  227. ultralytics/solutions/heatmap.py +125 -0
  228. ultralytics/solutions/instance_segmentation.py +86 -0
  229. ultralytics/solutions/object_blurrer.py +89 -0
  230. ultralytics/solutions/object_counter.py +190 -0
  231. ultralytics/solutions/object_cropper.py +87 -0
  232. ultralytics/solutions/parking_management.py +280 -0
  233. ultralytics/solutions/queue_management.py +93 -0
  234. ultralytics/solutions/region_counter.py +133 -0
  235. ultralytics/solutions/security_alarm.py +151 -0
  236. ultralytics/solutions/similarity_search.py +219 -0
  237. ultralytics/solutions/solutions.py +828 -0
  238. ultralytics/solutions/speed_estimation.py +114 -0
  239. ultralytics/solutions/streamlit_inference.py +260 -0
  240. ultralytics/solutions/templates/similarity-search.html +156 -0
  241. ultralytics/solutions/trackzone.py +88 -0
  242. ultralytics/solutions/vision_eye.py +67 -0
  243. ultralytics/trackers/__init__.py +7 -0
  244. ultralytics/trackers/basetrack.py +115 -0
  245. ultralytics/trackers/bot_sort.py +257 -0
  246. ultralytics/trackers/byte_tracker.py +469 -0
  247. ultralytics/trackers/track.py +116 -0
  248. ultralytics/trackers/utils/__init__.py +1 -0
  249. ultralytics/trackers/utils/gmc.py +339 -0
  250. ultralytics/trackers/utils/kalman_filter.py +482 -0
  251. ultralytics/trackers/utils/matching.py +154 -0
  252. ultralytics/utils/__init__.py +1450 -0
  253. ultralytics/utils/autobatch.py +118 -0
  254. ultralytics/utils/autodevice.py +205 -0
  255. ultralytics/utils/benchmarks.py +728 -0
  256. ultralytics/utils/callbacks/__init__.py +5 -0
  257. ultralytics/utils/callbacks/base.py +233 -0
  258. ultralytics/utils/callbacks/clearml.py +146 -0
  259. ultralytics/utils/callbacks/comet.py +625 -0
  260. ultralytics/utils/callbacks/dvc.py +197 -0
  261. ultralytics/utils/callbacks/hub.py +110 -0
  262. ultralytics/utils/callbacks/mlflow.py +134 -0
  263. ultralytics/utils/callbacks/neptune.py +126 -0
  264. ultralytics/utils/callbacks/platform.py +313 -0
  265. ultralytics/utils/callbacks/raytune.py +42 -0
  266. ultralytics/utils/callbacks/tensorboard.py +123 -0
  267. ultralytics/utils/callbacks/wb.py +188 -0
  268. ultralytics/utils/checks.py +1006 -0
  269. ultralytics/utils/cpu.py +85 -0
  270. ultralytics/utils/dist.py +123 -0
  271. ultralytics/utils/downloads.py +529 -0
  272. ultralytics/utils/errors.py +35 -0
  273. ultralytics/utils/events.py +113 -0
  274. ultralytics/utils/export/__init__.py +7 -0
  275. ultralytics/utils/export/engine.py +237 -0
  276. ultralytics/utils/export/imx.py +315 -0
  277. ultralytics/utils/export/tensorflow.py +231 -0
  278. ultralytics/utils/files.py +219 -0
  279. ultralytics/utils/git.py +137 -0
  280. ultralytics/utils/instance.py +484 -0
  281. ultralytics/utils/logger.py +501 -0
  282. ultralytics/utils/loss.py +849 -0
  283. ultralytics/utils/metrics.py +1563 -0
  284. ultralytics/utils/nms.py +337 -0
  285. ultralytics/utils/ops.py +664 -0
  286. ultralytics/utils/patches.py +201 -0
  287. ultralytics/utils/plotting.py +1045 -0
  288. ultralytics/utils/tal.py +403 -0
  289. ultralytics/utils/torch_utils.py +984 -0
  290. ultralytics/utils/tqdm.py +440 -0
  291. ultralytics/utils/triton.py +112 -0
  292. ultralytics/utils/tuner.py +160 -0
  293. ultralytics_opencv_headless-8.3.246.dist-info/METADATA +374 -0
  294. ultralytics_opencv_headless-8.3.246.dist-info/RECORD +298 -0
  295. ultralytics_opencv_headless-8.3.246.dist-info/WHEEL +5 -0
  296. ultralytics_opencv_headless-8.3.246.dist-info/entry_points.txt +3 -0
  297. ultralytics_opencv_headless-8.3.246.dist-info/licenses/LICENSE +661 -0
  298. ultralytics_opencv_headless-8.3.246.dist-info/top_level.txt +1 -0
@@ -0,0 +1,313 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ import os
4
+ import platform
5
+ import socket
6
+ import sys
7
+ from concurrent.futures import ThreadPoolExecutor
8
+ from pathlib import Path
9
+ from time import time
10
+
11
+ from ultralytics.utils import ENVIRONMENT, GIT, LOGGER, PYTHON_VERSION, RANK, SETTINGS, TESTS_RUNNING
12
+
13
+ _last_upload = 0 # Rate limit model uploads
14
+ _console_logger = None # Global console logger instance
15
+ _system_logger = None # Cached system logger instance
16
+
17
+ try:
18
+ assert not TESTS_RUNNING # do not log pytest
19
+ assert SETTINGS.get("platform", False) is True or os.getenv("ULTRALYTICS_API_KEY") or SETTINGS.get("api_key")
20
+ _api_key = os.getenv("ULTRALYTICS_API_KEY") or SETTINGS.get("api_key")
21
+ assert _api_key # verify API key is present
22
+
23
+ import requests
24
+
25
+ from ultralytics.utils.logger import ConsoleLogger, SystemLogger
26
+ from ultralytics.utils.torch_utils import model_info_for_loggers
27
+
28
+ _executor = ThreadPoolExecutor(max_workers=10) # Bounded thread pool for async operations
29
+
30
+ except (AssertionError, ImportError):
31
+ _api_key = None
32
+
33
+
34
+ def _send(event, data, project, name):
35
+ """Send event to Platform endpoint."""
36
+ try:
37
+ requests.post(
38
+ "https://alpha.ultralytics.com/api/webhooks/training/metrics",
39
+ json={"event": event, "project": project, "name": name, "data": data},
40
+ headers={"Authorization": f"Bearer {_api_key}"},
41
+ timeout=10,
42
+ ).raise_for_status()
43
+ except Exception as e:
44
+ LOGGER.debug(f"Platform: Failed to send {event}: {e}")
45
+
46
+
47
+ def _send_async(event, data, project, name):
48
+ """Send event asynchronously using bounded thread pool."""
49
+ _executor.submit(_send, event, data, project, name)
50
+
51
+
52
+ def _upload_model(model_path, project, name):
53
+ """Upload model checkpoint to Platform via signed URL."""
54
+ try:
55
+ model_path = Path(model_path)
56
+ if not model_path.exists():
57
+ return None
58
+
59
+ # Get signed upload URL
60
+ response = requests.post(
61
+ "https://alpha.ultralytics.com/api/webhooks/models/upload",
62
+ json={"project": project, "name": name, "filename": model_path.name},
63
+ headers={"Authorization": f"Bearer {_api_key}"},
64
+ timeout=10,
65
+ )
66
+ response.raise_for_status()
67
+ data = response.json()
68
+
69
+ # Upload to GCS
70
+ with open(model_path, "rb") as f:
71
+ requests.put(
72
+ data["uploadUrl"],
73
+ data=f,
74
+ headers={"Content-Type": "application/octet-stream"},
75
+ timeout=600, # 10 min timeout for large models
76
+ ).raise_for_status()
77
+
78
+ LOGGER.info(f"Platform: Model uploaded to '{project}'")
79
+ return data.get("gcsPath")
80
+
81
+ except Exception as e:
82
+ LOGGER.debug(f"Platform: Failed to upload model: {e}")
83
+ return None
84
+
85
+
86
+ def _upload_model_async(model_path, project, name):
87
+ """Upload model asynchronously using bounded thread pool."""
88
+ _executor.submit(_upload_model, model_path, project, name)
89
+
90
+
91
+ def _get_environment_info():
92
+ """Collect comprehensive environment info using existing ultralytics utilities."""
93
+ import shutil
94
+
95
+ import psutil
96
+ import torch
97
+
98
+ from ultralytics import __version__
99
+ from ultralytics.utils.torch_utils import get_cpu_info, get_gpu_info
100
+
101
+ # Get RAM and disk totals
102
+ memory = psutil.virtual_memory()
103
+ disk_usage = shutil.disk_usage("/")
104
+
105
+ env = {
106
+ "ultralyticsVersion": __version__,
107
+ "hostname": socket.gethostname(),
108
+ "os": platform.platform(),
109
+ "environment": ENVIRONMENT,
110
+ "pythonVersion": PYTHON_VERSION,
111
+ "pythonExecutable": sys.executable,
112
+ "cpuCount": os.cpu_count() or 0,
113
+ "cpu": get_cpu_info(),
114
+ "command": " ".join(sys.argv),
115
+ "totalRamGb": round(memory.total / (1 << 30), 1), # Total RAM in GB
116
+ "totalDiskGb": round(disk_usage.total / (1 << 30), 1), # Total disk in GB
117
+ }
118
+
119
+ # Git info using cached GIT singleton (no subprocess calls)
120
+ try:
121
+ if GIT.is_repo:
122
+ if GIT.origin:
123
+ env["gitRepository"] = GIT.origin
124
+ if GIT.branch:
125
+ env["gitBranch"] = GIT.branch
126
+ if GIT.commit:
127
+ env["gitCommit"] = GIT.commit[:12] # Short hash
128
+ except Exception:
129
+ pass
130
+
131
+ # GPU info
132
+ try:
133
+ if torch.cuda.is_available():
134
+ env["gpuCount"] = torch.cuda.device_count()
135
+ env["gpuType"] = get_gpu_info(0) if torch.cuda.device_count() > 0 else None
136
+ except Exception:
137
+ pass
138
+
139
+ return env
140
+
141
+
142
+ def on_pretrain_routine_start(trainer):
143
+ """Initialize Platform logging at training start."""
144
+ global _console_logger, _last_upload
145
+
146
+ if RANK not in {-1, 0} or not trainer.args.project:
147
+ return
148
+
149
+ # Initialize upload timer to now so first checkpoint waits 15 min from training start
150
+ _last_upload = time()
151
+
152
+ project, name = str(trainer.args.project), str(trainer.args.name or "train")
153
+ LOGGER.info(f"Platform: Streaming to project '{project}' as '{name}'")
154
+
155
+ # Create callback to send console output to Platform
156
+ def send_console_output(content, line_count, chunk_id):
157
+ """Send batched console output to Platform webhook."""
158
+ _send_async("console_output", {"chunkId": chunk_id, "content": content, "lineCount": line_count}, project, name)
159
+
160
+ # Start console capture with batching (5 lines or 5 seconds)
161
+ _console_logger = ConsoleLogger(batch_size=5, flush_interval=5.0, on_flush=send_console_output)
162
+ _console_logger.start_capture()
163
+
164
+ # Gather model info for richer metadata
165
+ model_info = {}
166
+ try:
167
+ info = model_info_for_loggers(trainer)
168
+ model_info = {
169
+ "parameters": info.get("model/parameters", 0),
170
+ "gflops": info.get("model/GFLOPs", 0),
171
+ "classes": getattr(trainer.model, "yaml", {}).get("nc", 0), # number of classes
172
+ }
173
+ except Exception:
174
+ pass
175
+
176
+ # Collect environment info (W&B-style metadata)
177
+ environment = _get_environment_info()
178
+
179
+ _send_async(
180
+ "training_started",
181
+ {
182
+ "trainArgs": {k: str(v) for k, v in vars(trainer.args).items()},
183
+ "epochs": trainer.epochs,
184
+ "device": str(trainer.device),
185
+ "modelInfo": model_info,
186
+ "environment": environment,
187
+ },
188
+ project,
189
+ name,
190
+ )
191
+
192
+
193
+ def on_fit_epoch_end(trainer):
194
+ """Log training and system metrics at epoch end."""
195
+ global _system_logger
196
+
197
+ if RANK not in {-1, 0} or not trainer.args.project:
198
+ return
199
+
200
+ project, name = str(trainer.args.project), str(trainer.args.name or "train")
201
+ metrics = {**trainer.label_loss_items(trainer.tloss, prefix="train"), **trainer.metrics}
202
+
203
+ if trainer.optimizer and trainer.optimizer.param_groups:
204
+ metrics["lr"] = trainer.optimizer.param_groups[0]["lr"]
205
+ if trainer.epoch == 0:
206
+ try:
207
+ metrics.update(model_info_for_loggers(trainer))
208
+ except Exception:
209
+ pass
210
+
211
+ # Get system metrics (cache SystemLogger for efficiency)
212
+ system = {}
213
+ try:
214
+ if _system_logger is None:
215
+ _system_logger = SystemLogger()
216
+ system = _system_logger.get_metrics(rates=True)
217
+ except Exception:
218
+ pass
219
+
220
+ _send_async(
221
+ "epoch_end",
222
+ {
223
+ "epoch": trainer.epoch,
224
+ "metrics": metrics,
225
+ "system": system,
226
+ "fitness": trainer.fitness,
227
+ "best_fitness": trainer.best_fitness,
228
+ },
229
+ project,
230
+ name,
231
+ )
232
+
233
+
234
+ def on_model_save(trainer):
235
+ """Upload model checkpoint (rate limited to every 15 min)."""
236
+ global _last_upload
237
+
238
+ if RANK not in {-1, 0} or not trainer.args.project:
239
+ return
240
+
241
+ # Rate limit to every 15 minutes (900 seconds)
242
+ if time() - _last_upload < 900:
243
+ return
244
+
245
+ model_path = trainer.best if trainer.best and Path(trainer.best).exists() else trainer.last
246
+ if not model_path:
247
+ return
248
+
249
+ project, name = str(trainer.args.project), str(trainer.args.name or "train")
250
+ _upload_model_async(model_path, project, name)
251
+ _last_upload = time()
252
+
253
+
254
+ def on_train_end(trainer):
255
+ """Log final results, upload best model, and send validation plot data."""
256
+ global _console_logger
257
+
258
+ if RANK not in {-1, 0} or not trainer.args.project:
259
+ return
260
+
261
+ project, name = str(trainer.args.project), str(trainer.args.name or "train")
262
+
263
+ # Stop console capture
264
+ if _console_logger:
265
+ _console_logger.stop_capture()
266
+ _console_logger = None
267
+
268
+ # Upload best model (blocking to ensure it completes)
269
+ model_path = None
270
+ model_size = None
271
+ if trainer.best and Path(trainer.best).exists():
272
+ model_size = Path(trainer.best).stat().st_size
273
+ model_path = _upload_model(trainer.best, project, name)
274
+
275
+ # Collect plots from trainer and validator
276
+ plots = [info["data"] for info in getattr(trainer, "plots", {}).values() if info.get("data")]
277
+ plots += [
278
+ info["data"] for info in getattr(getattr(trainer, "validator", None), "plots", {}).values() if info.get("data")
279
+ ]
280
+
281
+ # Get class names
282
+ names = getattr(getattr(trainer, "validator", None), "names", None) or (trainer.data or {}).get("names")
283
+ class_names = list(names.values()) if isinstance(names, dict) else list(names) if names else None
284
+
285
+ _send(
286
+ "training_complete",
287
+ {
288
+ "results": {
289
+ "metrics": {**trainer.metrics, "fitness": trainer.fitness},
290
+ "bestEpoch": getattr(trainer, "best_epoch", trainer.epoch),
291
+ "bestFitness": trainer.best_fitness,
292
+ "modelPath": model_path or (str(trainer.best) if trainer.best else None),
293
+ "modelSize": model_size,
294
+ },
295
+ "classNames": class_names,
296
+ "plots": plots,
297
+ },
298
+ project,
299
+ name,
300
+ )
301
+ LOGGER.info(f"Platform: Training complete, results uploaded to '{project}' ({len(plots)} plots)")
302
+
303
+
304
+ callbacks = (
305
+ {
306
+ "on_pretrain_routine_start": on_pretrain_routine_start,
307
+ "on_fit_epoch_end": on_fit_epoch_end,
308
+ "on_model_save": on_model_save,
309
+ "on_train_end": on_train_end,
310
+ }
311
+ if _api_key
312
+ else {}
313
+ )
@@ -0,0 +1,42 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ from ultralytics.utils import SETTINGS
4
+
5
+ try:
6
+ assert SETTINGS["raytune"] is True # verify integration is enabled
7
+ import ray
8
+ from ray import tune
9
+ from ray.air import session
10
+
11
+ except (ImportError, AssertionError):
12
+ tune = None
13
+
14
+
15
+ def on_fit_epoch_end(trainer):
16
+ """Report training metrics to Ray Tune at epoch end when a Ray session is active.
17
+
18
+ Captures metrics from the trainer object and sends them to Ray Tune with the current epoch number, enabling
19
+ hyperparameter tuning optimization. Only executes when within an active Ray Tune session.
20
+
21
+ Args:
22
+ trainer (ultralytics.engine.trainer.BaseTrainer): The Ultralytics trainer object containing metrics and epochs.
23
+
24
+ Examples:
25
+ >>> # Called automatically by the Ultralytics training loop
26
+ >>> on_fit_epoch_end(trainer)
27
+
28
+ References:
29
+ Ray Tune docs: https://docs.ray.io/en/latest/tune/index.html
30
+ """
31
+ if ray.train._internal.session.get_session(): # check if Ray Tune session is active
32
+ metrics = trainer.metrics
33
+ session.report({**metrics, **{"epoch": trainer.epoch + 1}})
34
+
35
+
36
+ callbacks = (
37
+ {
38
+ "on_fit_epoch_end": on_fit_epoch_end,
39
+ }
40
+ if tune
41
+ else {}
42
+ )
@@ -0,0 +1,123 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ from ultralytics.utils import LOGGER, SETTINGS, TESTS_RUNNING, colorstr, torch_utils
4
+
5
+ try:
6
+ assert not TESTS_RUNNING # do not log pytest
7
+ assert SETTINGS["tensorboard"] is True # verify integration is enabled
8
+ WRITER = None # TensorBoard SummaryWriter instance
9
+ PREFIX = colorstr("TensorBoard: ")
10
+
11
+ # Imports below only required if TensorBoard enabled
12
+ from copy import deepcopy
13
+
14
+ import torch
15
+ from torch.utils.tensorboard import SummaryWriter
16
+
17
+ except (ImportError, AssertionError, TypeError, AttributeError):
18
+ # TypeError for handling 'Descriptors cannot not be created directly.' protobuf errors in Windows
19
+ # AttributeError: module 'tensorflow' has no attribute 'io' if 'tensorflow' not installed
20
+ SummaryWriter = None
21
+
22
+
23
+ def _log_scalars(scalars: dict, step: int = 0) -> None:
24
+ """Log scalar values to TensorBoard.
25
+
26
+ Args:
27
+ scalars (dict): Dictionary of scalar values to log to TensorBoard. Keys are scalar names and values are the
28
+ corresponding scalar values.
29
+ step (int): Global step value to record with the scalar values. Used for x-axis in TensorBoard graphs.
30
+
31
+ Examples:
32
+ Log training metrics
33
+ >>> metrics = {"loss": 0.5, "accuracy": 0.95}
34
+ >>> _log_scalars(metrics, step=100)
35
+ """
36
+ if WRITER:
37
+ for k, v in scalars.items():
38
+ WRITER.add_scalar(k, v, step)
39
+
40
+
41
+ def _log_tensorboard_graph(trainer) -> None:
42
+ """Log model graph to TensorBoard.
43
+
44
+ This function attempts to visualize the model architecture in TensorBoard by tracing the model with a dummy input
45
+ tensor. It first tries a simple method suitable for YOLO models, and if that fails, falls back to a more complex
46
+ approach for models like RTDETR that may require special handling.
47
+
48
+ Args:
49
+ trainer (ultralytics.engine.trainer.BaseTrainer): The trainer object containing the model to visualize. Must
50
+ have attributes model and args with imgsz.
51
+
52
+ Notes:
53
+ This function requires TensorBoard integration to be enabled and the global WRITER to be initialized.
54
+ It handles potential warnings from the PyTorch JIT tracer and attempts to gracefully handle different
55
+ model architectures.
56
+ """
57
+ # Input image
58
+ imgsz = trainer.args.imgsz
59
+ imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz
60
+ p = next(trainer.model.parameters()) # for device, type
61
+ im = torch.zeros((1, 3, *imgsz), device=p.device, dtype=p.dtype) # input image (must be zeros, not empty)
62
+
63
+ # Try simple method first (YOLO)
64
+ try:
65
+ trainer.model.eval() # place in .eval() mode to avoid BatchNorm statistics changes
66
+ WRITER.add_graph(torch.jit.trace(torch_utils.unwrap_model(trainer.model), im, strict=False), [])
67
+ LOGGER.info(f"{PREFIX}model graph visualization added ✅")
68
+ return
69
+ except Exception as e1:
70
+ # Fallback to TorchScript export steps (RTDETR)
71
+ try:
72
+ model = deepcopy(torch_utils.unwrap_model(trainer.model))
73
+ model.eval()
74
+ model = model.fuse(verbose=False)
75
+ for m in model.modules():
76
+ if hasattr(m, "export"): # Detect, RTDETRDecoder (Segment and Pose use Detect base class)
77
+ m.export = True
78
+ m.format = "torchscript"
79
+ model(im) # dry run
80
+ WRITER.add_graph(torch.jit.trace(model, im, strict=False), [])
81
+ LOGGER.info(f"{PREFIX}model graph visualization added ✅")
82
+ except Exception as e2:
83
+ LOGGER.warning(f"{PREFIX}TensorBoard graph visualization failure: {e1} -> {e2}")
84
+
85
+
86
+ def on_pretrain_routine_start(trainer) -> None:
87
+ """Initialize TensorBoard logging with SummaryWriter."""
88
+ if SummaryWriter:
89
+ try:
90
+ global WRITER
91
+ WRITER = SummaryWriter(str(trainer.save_dir))
92
+ LOGGER.info(f"{PREFIX}Start with 'tensorboard --logdir {trainer.save_dir}', view at http://localhost:6006/")
93
+ except Exception as e:
94
+ LOGGER.warning(f"{PREFIX}TensorBoard not initialized correctly, not logging this run. {e}")
95
+
96
+
97
+ def on_train_start(trainer) -> None:
98
+ """Log TensorBoard graph."""
99
+ if WRITER:
100
+ _log_tensorboard_graph(trainer)
101
+
102
+
103
+ def on_train_epoch_end(trainer) -> None:
104
+ """Log scalar statistics at the end of a training epoch."""
105
+ _log_scalars(trainer.label_loss_items(trainer.tloss, prefix="train"), trainer.epoch + 1)
106
+ _log_scalars(trainer.lr, trainer.epoch + 1)
107
+
108
+
109
+ def on_fit_epoch_end(trainer) -> None:
110
+ """Log epoch metrics at end of training epoch."""
111
+ _log_scalars(trainer.metrics, trainer.epoch + 1)
112
+
113
+
114
+ callbacks = (
115
+ {
116
+ "on_pretrain_routine_start": on_pretrain_routine_start,
117
+ "on_train_start": on_train_start,
118
+ "on_fit_epoch_end": on_fit_epoch_end,
119
+ "on_train_epoch_end": on_train_epoch_end,
120
+ }
121
+ if SummaryWriter
122
+ else {}
123
+ )
@@ -0,0 +1,188 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ from ultralytics.utils import SETTINGS, TESTS_RUNNING
4
+ from ultralytics.utils.torch_utils import model_info_for_loggers
5
+
6
+ try:
7
+ assert not TESTS_RUNNING # do not log pytest
8
+ assert SETTINGS["wandb"] is True # verify integration is enabled
9
+ import wandb as wb
10
+
11
+ assert hasattr(wb, "__version__") # verify package is not directory
12
+ _processed_plots = {}
13
+
14
+ except (ImportError, AssertionError):
15
+ wb = None
16
+
17
+
18
+ def _custom_table(x, y, classes, title="Precision Recall Curve", x_title="Recall", y_title="Precision"):
19
+ """Create and log a custom metric visualization to wandb.plot.pr_curve.
20
+
21
+ This function crafts a custom metric visualization that mimics the behavior of the default wandb precision-recall
22
+ curve while allowing for enhanced customization. The visual metric is useful for monitoring model performance across
23
+ different classes.
24
+
25
+ Args:
26
+ x (list): Values for the x-axis; expected to have length N.
27
+ y (list): Corresponding values for the y-axis; also expected to have length N.
28
+ classes (list): Labels identifying the class of each point; length N.
29
+ title (str, optional): Title for the plot.
30
+ x_title (str, optional): Label for the x-axis.
31
+ y_title (str, optional): Label for the y-axis.
32
+
33
+ Returns:
34
+ (wandb.Object): A wandb object suitable for logging, showcasing the crafted metric visualization.
35
+ """
36
+ import polars as pl # scope for faster 'import ultralytics'
37
+ import polars.selectors as cs
38
+
39
+ df = pl.DataFrame({"class": classes, "y": y, "x": x}).with_columns(cs.numeric().round(3))
40
+ data = df.select(["class", "y", "x"]).rows()
41
+
42
+ fields = {"x": "x", "y": "y", "class": "class"}
43
+ string_fields = {"title": title, "x-axis-title": x_title, "y-axis-title": y_title}
44
+ return wb.plot_table(
45
+ "wandb/area-under-curve/v0",
46
+ wb.Table(data=data, columns=["class", "y", "x"]),
47
+ fields=fields,
48
+ string_fields=string_fields,
49
+ )
50
+
51
+
52
+ def _plot_curve(
53
+ x,
54
+ y,
55
+ names=None,
56
+ id="precision-recall",
57
+ title="Precision Recall Curve",
58
+ x_title="Recall",
59
+ y_title="Precision",
60
+ num_x=100,
61
+ only_mean=False,
62
+ ):
63
+ """Log a metric curve visualization.
64
+
65
+ This function generates a metric curve based on input data and logs the visualization to wandb. The curve can
66
+ represent aggregated data (mean) or individual class data, depending on the 'only_mean' flag.
67
+
68
+ Args:
69
+ x (np.ndarray): Data points for the x-axis with length N.
70
+ y (np.ndarray): Corresponding data points for the y-axis with shape (C, N), where C is the number of classes.
71
+ names (list, optional): Names of the classes corresponding to the y-axis data; length C.
72
+ id (str, optional): Unique identifier for the logged data in wandb.
73
+ title (str, optional): Title for the visualization plot.
74
+ x_title (str, optional): Label for the x-axis.
75
+ y_title (str, optional): Label for the y-axis.
76
+ num_x (int, optional): Number of interpolated data points for visualization.
77
+ only_mean (bool, optional): Flag to indicate if only the mean curve should be plotted.
78
+
79
+ Notes:
80
+ The function leverages the '_custom_table' function to generate the actual visualization.
81
+ """
82
+ import numpy as np
83
+
84
+ # Create new x
85
+ if names is None:
86
+ names = []
87
+ x_new = np.linspace(x[0], x[-1], num_x).round(5)
88
+
89
+ # Create arrays for logging
90
+ x_log = x_new.tolist()
91
+ y_log = np.interp(x_new, x, np.mean(y, axis=0)).round(3).tolist()
92
+
93
+ if only_mean:
94
+ table = wb.Table(data=list(zip(x_log, y_log)), columns=[x_title, y_title])
95
+ wb.run.log({title: wb.plot.line(table, x_title, y_title, title=title)})
96
+ else:
97
+ classes = ["mean"] * len(x_log)
98
+ for i, yi in enumerate(y):
99
+ x_log.extend(x_new) # add new x
100
+ y_log.extend(np.interp(x_new, x, yi)) # interpolate y to new x
101
+ classes.extend([names[i]] * len(x_new)) # add class names
102
+ wb.log({id: _custom_table(x_log, y_log, classes, title, x_title, y_title)}, commit=False)
103
+
104
+
105
+ def _log_plots(plots, step):
106
+ """Log plots to WandB at a specific step if they haven't been logged already.
107
+
108
+ This function checks each plot in the input dictionary against previously processed plots and logs new or updated
109
+ plots to WandB at the specified step.
110
+
111
+ Args:
112
+ plots (dict): Dictionary of plots to log, where keys are plot names and values are dictionaries containing plot
113
+ metadata including timestamps.
114
+ step (int): The step/epoch at which to log the plots in the WandB run.
115
+
116
+ Notes:
117
+ The function uses a shallow copy of the plots dictionary to prevent modification during iteration.
118
+ Plots are identified by their stem name (filename without extension).
119
+ Each plot is logged as a WandB Image object.
120
+ """
121
+ for name, params in plots.copy().items(): # shallow copy to prevent plots dict changing during iteration
122
+ timestamp = params["timestamp"]
123
+ if _processed_plots.get(name) != timestamp:
124
+ wb.run.log({name.stem: wb.Image(str(name))}, step=step)
125
+ _processed_plots[name] = timestamp
126
+
127
+
128
+ def on_pretrain_routine_start(trainer):
129
+ """Initialize and start wandb project if module is present."""
130
+ if not wb.run:
131
+ wb.init(
132
+ project=str(trainer.args.project).replace("/", "-") if trainer.args.project else "Ultralytics",
133
+ name=str(trainer.args.name).replace("/", "-"),
134
+ config=vars(trainer.args),
135
+ )
136
+
137
+
138
+ def on_fit_epoch_end(trainer):
139
+ """Log training metrics and model information at the end of an epoch."""
140
+ _log_plots(trainer.plots, step=trainer.epoch + 1)
141
+ _log_plots(trainer.validator.plots, step=trainer.epoch + 1)
142
+ if trainer.epoch == 0:
143
+ wb.run.log(model_info_for_loggers(trainer), step=trainer.epoch + 1)
144
+ wb.run.log(trainer.metrics, step=trainer.epoch + 1, commit=True) # commit forces sync
145
+
146
+
147
+ def on_train_epoch_end(trainer):
148
+ """Log metrics and save images at the end of each training epoch."""
149
+ wb.run.log(trainer.label_loss_items(trainer.tloss, prefix="train"), step=trainer.epoch + 1)
150
+ wb.run.log(trainer.lr, step=trainer.epoch + 1)
151
+ if trainer.epoch == 1:
152
+ _log_plots(trainer.plots, step=trainer.epoch + 1)
153
+
154
+
155
+ def on_train_end(trainer):
156
+ """Save the best model as an artifact and log final plots at the end of training."""
157
+ _log_plots(trainer.validator.plots, step=trainer.epoch + 1)
158
+ _log_plots(trainer.plots, step=trainer.epoch + 1)
159
+ art = wb.Artifact(type="model", name=f"run_{wb.run.id}_model")
160
+ if trainer.best.exists():
161
+ art.add_file(trainer.best)
162
+ wb.run.log_artifact(art, aliases=["best"])
163
+ # Check if we actually have plots to save
164
+ if trainer.args.plots and hasattr(trainer.validator.metrics, "curves_results"):
165
+ for curve_name, curve_values in zip(trainer.validator.metrics.curves, trainer.validator.metrics.curves_results):
166
+ x, y, x_title, y_title = curve_values
167
+ _plot_curve(
168
+ x,
169
+ y,
170
+ names=list(trainer.validator.metrics.names.values()),
171
+ id=f"curves/{curve_name}",
172
+ title=curve_name,
173
+ x_title=x_title,
174
+ y_title=y_title,
175
+ )
176
+ wb.run.finish() # required or run continues on dashboard
177
+
178
+
179
+ callbacks = (
180
+ {
181
+ "on_pretrain_routine_start": on_pretrain_routine_start,
182
+ "on_train_epoch_end": on_train_epoch_end,
183
+ "on_fit_epoch_end": on_fit_epoch_end,
184
+ "on_train_end": on_train_end,
185
+ }
186
+ if wb
187
+ else {}
188
+ )