ultralytics 8.1.29__py3-none-any.whl → 8.3.62__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (247) hide show
  1. tests/__init__.py +22 -0
  2. tests/conftest.py +83 -0
  3. tests/test_cli.py +122 -0
  4. tests/test_cuda.py +155 -0
  5. tests/test_engine.py +131 -0
  6. tests/test_exports.py +216 -0
  7. tests/test_integrations.py +150 -0
  8. tests/test_python.py +615 -0
  9. tests/test_solutions.py +94 -0
  10. ultralytics/__init__.py +11 -8
  11. ultralytics/cfg/__init__.py +569 -131
  12. ultralytics/cfg/datasets/Argoverse.yaml +2 -1
  13. ultralytics/cfg/datasets/DOTAv1.5.yaml +3 -2
  14. ultralytics/cfg/datasets/DOTAv1.yaml +3 -2
  15. ultralytics/cfg/datasets/GlobalWheat2020.yaml +3 -2
  16. ultralytics/cfg/datasets/ImageNet.yaml +2 -1
  17. ultralytics/cfg/datasets/Objects365.yaml +5 -4
  18. ultralytics/cfg/datasets/SKU-110K.yaml +2 -1
  19. ultralytics/cfg/datasets/VOC.yaml +3 -2
  20. ultralytics/cfg/datasets/VisDrone.yaml +6 -5
  21. ultralytics/cfg/datasets/african-wildlife.yaml +25 -0
  22. ultralytics/cfg/datasets/brain-tumor.yaml +23 -0
  23. ultralytics/cfg/datasets/carparts-seg.yaml +3 -2
  24. ultralytics/cfg/datasets/coco-pose.yaml +7 -6
  25. ultralytics/cfg/datasets/coco.yaml +3 -2
  26. ultralytics/cfg/datasets/coco128-seg.yaml +4 -3
  27. ultralytics/cfg/datasets/coco128.yaml +4 -3
  28. ultralytics/cfg/datasets/coco8-pose.yaml +3 -2
  29. ultralytics/cfg/datasets/coco8-seg.yaml +3 -2
  30. ultralytics/cfg/datasets/coco8.yaml +3 -2
  31. ultralytics/cfg/datasets/crack-seg.yaml +3 -2
  32. ultralytics/cfg/datasets/dog-pose.yaml +24 -0
  33. ultralytics/cfg/datasets/dota8.yaml +3 -2
  34. ultralytics/cfg/datasets/hand-keypoints.yaml +26 -0
  35. ultralytics/cfg/datasets/lvis.yaml +1236 -0
  36. ultralytics/cfg/datasets/medical-pills.yaml +22 -0
  37. ultralytics/cfg/datasets/open-images-v7.yaml +2 -1
  38. ultralytics/cfg/datasets/package-seg.yaml +5 -4
  39. ultralytics/cfg/datasets/signature.yaml +21 -0
  40. ultralytics/cfg/datasets/tiger-pose.yaml +3 -2
  41. ultralytics/cfg/datasets/xView.yaml +2 -1
  42. ultralytics/cfg/default.yaml +14 -11
  43. ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml +24 -0
  44. ultralytics/cfg/models/11/yolo11-cls.yaml +33 -0
  45. ultralytics/cfg/models/11/yolo11-obb.yaml +50 -0
  46. ultralytics/cfg/models/11/yolo11-pose.yaml +51 -0
  47. ultralytics/cfg/models/11/yolo11-seg.yaml +50 -0
  48. ultralytics/cfg/models/11/yolo11.yaml +50 -0
  49. ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +5 -2
  50. ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +5 -2
  51. ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +5 -2
  52. ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +5 -2
  53. ultralytics/cfg/models/v10/yolov10b.yaml +45 -0
  54. ultralytics/cfg/models/v10/yolov10l.yaml +45 -0
  55. ultralytics/cfg/models/v10/yolov10m.yaml +45 -0
  56. ultralytics/cfg/models/v10/yolov10n.yaml +45 -0
  57. ultralytics/cfg/models/v10/yolov10s.yaml +45 -0
  58. ultralytics/cfg/models/v10/yolov10x.yaml +45 -0
  59. ultralytics/cfg/models/v3/yolov3-spp.yaml +5 -2
  60. ultralytics/cfg/models/v3/yolov3-tiny.yaml +5 -2
  61. ultralytics/cfg/models/v3/yolov3.yaml +5 -2
  62. ultralytics/cfg/models/v5/yolov5-p6.yaml +5 -2
  63. ultralytics/cfg/models/v5/yolov5.yaml +5 -2
  64. ultralytics/cfg/models/v6/yolov6.yaml +5 -2
  65. ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +5 -2
  66. ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +5 -2
  67. ultralytics/cfg/models/v8/yolov8-cls.yaml +5 -2
  68. ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +6 -2
  69. ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +6 -2
  70. ultralytics/cfg/models/v8/yolov8-ghost.yaml +5 -2
  71. ultralytics/cfg/models/v8/yolov8-obb.yaml +5 -2
  72. ultralytics/cfg/models/v8/yolov8-p2.yaml +5 -2
  73. ultralytics/cfg/models/v8/yolov8-p6.yaml +10 -7
  74. ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +5 -2
  75. ultralytics/cfg/models/v8/yolov8-pose.yaml +5 -2
  76. ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +5 -2
  77. ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +5 -2
  78. ultralytics/cfg/models/v8/yolov8-seg.yaml +5 -2
  79. ultralytics/cfg/models/v8/yolov8-world.yaml +5 -2
  80. ultralytics/cfg/models/v8/yolov8-worldv2.yaml +5 -2
  81. ultralytics/cfg/models/v8/yolov8.yaml +5 -2
  82. ultralytics/cfg/models/v9/yolov9c-seg.yaml +41 -0
  83. ultralytics/cfg/models/v9/yolov9c.yaml +30 -25
  84. ultralytics/cfg/models/v9/yolov9e-seg.yaml +64 -0
  85. ultralytics/cfg/models/v9/yolov9e.yaml +46 -42
  86. ultralytics/cfg/models/v9/yolov9m.yaml +41 -0
  87. ultralytics/cfg/models/v9/yolov9s.yaml +41 -0
  88. ultralytics/cfg/models/v9/yolov9t.yaml +41 -0
  89. ultralytics/cfg/solutions/default.yaml +24 -0
  90. ultralytics/cfg/trackers/botsort.yaml +8 -5
  91. ultralytics/cfg/trackers/bytetrack.yaml +8 -5
  92. ultralytics/data/__init__.py +14 -3
  93. ultralytics/data/annotator.py +37 -15
  94. ultralytics/data/augment.py +1783 -289
  95. ultralytics/data/base.py +62 -27
  96. ultralytics/data/build.py +36 -8
  97. ultralytics/data/converter.py +196 -36
  98. ultralytics/data/dataset.py +233 -94
  99. ultralytics/data/loaders.py +199 -96
  100. ultralytics/data/split_dota.py +39 -29
  101. ultralytics/data/utils.py +110 -40
  102. ultralytics/engine/__init__.py +1 -1
  103. ultralytics/engine/exporter.py +569 -242
  104. ultralytics/engine/model.py +604 -252
  105. ultralytics/engine/predictor.py +22 -11
  106. ultralytics/engine/results.py +1228 -218
  107. ultralytics/engine/trainer.py +190 -129
  108. ultralytics/engine/tuner.py +18 -18
  109. ultralytics/engine/validator.py +18 -15
  110. ultralytics/hub/__init__.py +31 -13
  111. ultralytics/hub/auth.py +11 -7
  112. ultralytics/hub/google/__init__.py +159 -0
  113. ultralytics/hub/session.py +128 -94
  114. ultralytics/hub/utils.py +20 -21
  115. ultralytics/models/__init__.py +4 -2
  116. ultralytics/models/fastsam/__init__.py +2 -3
  117. ultralytics/models/fastsam/model.py +26 -4
  118. ultralytics/models/fastsam/predict.py +127 -63
  119. ultralytics/models/fastsam/utils.py +1 -44
  120. ultralytics/models/fastsam/val.py +1 -1
  121. ultralytics/models/nas/__init__.py +1 -1
  122. ultralytics/models/nas/model.py +21 -10
  123. ultralytics/models/nas/predict.py +3 -6
  124. ultralytics/models/nas/val.py +4 -4
  125. ultralytics/models/rtdetr/__init__.py +1 -1
  126. ultralytics/models/rtdetr/model.py +1 -1
  127. ultralytics/models/rtdetr/predict.py +6 -8
  128. ultralytics/models/rtdetr/train.py +6 -2
  129. ultralytics/models/rtdetr/val.py +3 -3
  130. ultralytics/models/sam/__init__.py +3 -3
  131. ultralytics/models/sam/amg.py +29 -23
  132. ultralytics/models/sam/build.py +211 -13
  133. ultralytics/models/sam/model.py +91 -30
  134. ultralytics/models/sam/modules/__init__.py +1 -1
  135. ultralytics/models/sam/modules/blocks.py +1129 -0
  136. ultralytics/models/sam/modules/decoders.py +381 -53
  137. ultralytics/models/sam/modules/encoders.py +515 -324
  138. ultralytics/models/sam/modules/memory_attention.py +237 -0
  139. ultralytics/models/sam/modules/sam.py +969 -21
  140. ultralytics/models/sam/modules/tiny_encoder.py +425 -154
  141. ultralytics/models/sam/modules/transformer.py +159 -60
  142. ultralytics/models/sam/modules/utils.py +293 -0
  143. ultralytics/models/sam/predict.py +1263 -132
  144. ultralytics/models/utils/__init__.py +1 -1
  145. ultralytics/models/utils/loss.py +36 -24
  146. ultralytics/models/utils/ops.py +3 -7
  147. ultralytics/models/yolo/__init__.py +3 -3
  148. ultralytics/models/yolo/classify/__init__.py +1 -1
  149. ultralytics/models/yolo/classify/predict.py +7 -8
  150. ultralytics/models/yolo/classify/train.py +17 -22
  151. ultralytics/models/yolo/classify/val.py +8 -4
  152. ultralytics/models/yolo/detect/__init__.py +1 -1
  153. ultralytics/models/yolo/detect/predict.py +3 -5
  154. ultralytics/models/yolo/detect/train.py +11 -4
  155. ultralytics/models/yolo/detect/val.py +90 -52
  156. ultralytics/models/yolo/model.py +14 -9
  157. ultralytics/models/yolo/obb/__init__.py +1 -1
  158. ultralytics/models/yolo/obb/predict.py +2 -2
  159. ultralytics/models/yolo/obb/train.py +5 -3
  160. ultralytics/models/yolo/obb/val.py +41 -23
  161. ultralytics/models/yolo/pose/__init__.py +1 -1
  162. ultralytics/models/yolo/pose/predict.py +3 -5
  163. ultralytics/models/yolo/pose/train.py +2 -2
  164. ultralytics/models/yolo/pose/val.py +51 -17
  165. ultralytics/models/yolo/segment/__init__.py +1 -1
  166. ultralytics/models/yolo/segment/predict.py +3 -5
  167. ultralytics/models/yolo/segment/train.py +2 -2
  168. ultralytics/models/yolo/segment/val.py +60 -19
  169. ultralytics/models/yolo/world/__init__.py +5 -0
  170. ultralytics/models/yolo/world/train.py +92 -0
  171. ultralytics/models/yolo/world/train_world.py +109 -0
  172. ultralytics/nn/__init__.py +1 -1
  173. ultralytics/nn/autobackend.py +228 -93
  174. ultralytics/nn/modules/__init__.py +39 -14
  175. ultralytics/nn/modules/activation.py +21 -0
  176. ultralytics/nn/modules/block.py +526 -66
  177. ultralytics/nn/modules/conv.py +24 -7
  178. ultralytics/nn/modules/head.py +177 -34
  179. ultralytics/nn/modules/transformer.py +6 -5
  180. ultralytics/nn/modules/utils.py +1 -2
  181. ultralytics/nn/tasks.py +225 -77
  182. ultralytics/solutions/__init__.py +30 -1
  183. ultralytics/solutions/ai_gym.py +96 -143
  184. ultralytics/solutions/analytics.py +247 -0
  185. ultralytics/solutions/distance_calculation.py +78 -135
  186. ultralytics/solutions/heatmap.py +93 -247
  187. ultralytics/solutions/object_counter.py +184 -259
  188. ultralytics/solutions/parking_management.py +246 -0
  189. ultralytics/solutions/queue_management.py +112 -0
  190. ultralytics/solutions/region_counter.py +116 -0
  191. ultralytics/solutions/security_alarm.py +144 -0
  192. ultralytics/solutions/solutions.py +178 -0
  193. ultralytics/solutions/speed_estimation.py +86 -174
  194. ultralytics/solutions/streamlit_inference.py +190 -0
  195. ultralytics/solutions/trackzone.py +68 -0
  196. ultralytics/trackers/__init__.py +1 -1
  197. ultralytics/trackers/basetrack.py +32 -13
  198. ultralytics/trackers/bot_sort.py +61 -28
  199. ultralytics/trackers/byte_tracker.py +83 -51
  200. ultralytics/trackers/track.py +21 -6
  201. ultralytics/trackers/utils/__init__.py +1 -1
  202. ultralytics/trackers/utils/gmc.py +62 -48
  203. ultralytics/trackers/utils/kalman_filter.py +166 -35
  204. ultralytics/trackers/utils/matching.py +40 -21
  205. ultralytics/utils/__init__.py +511 -239
  206. ultralytics/utils/autobatch.py +40 -22
  207. ultralytics/utils/benchmarks.py +266 -85
  208. ultralytics/utils/callbacks/__init__.py +1 -1
  209. ultralytics/utils/callbacks/base.py +1 -3
  210. ultralytics/utils/callbacks/clearml.py +7 -6
  211. ultralytics/utils/callbacks/comet.py +39 -17
  212. ultralytics/utils/callbacks/dvc.py +1 -1
  213. ultralytics/utils/callbacks/hub.py +16 -16
  214. ultralytics/utils/callbacks/mlflow.py +28 -24
  215. ultralytics/utils/callbacks/neptune.py +6 -2
  216. ultralytics/utils/callbacks/raytune.py +3 -4
  217. ultralytics/utils/callbacks/tensorboard.py +18 -18
  218. ultralytics/utils/callbacks/wb.py +27 -20
  219. ultralytics/utils/checks.py +160 -100
  220. ultralytics/utils/dist.py +2 -1
  221. ultralytics/utils/downloads.py +40 -34
  222. ultralytics/utils/errors.py +1 -1
  223. ultralytics/utils/files.py +72 -38
  224. ultralytics/utils/instance.py +41 -19
  225. ultralytics/utils/loss.py +83 -55
  226. ultralytics/utils/metrics.py +61 -56
  227. ultralytics/utils/ops.py +94 -89
  228. ultralytics/utils/patches.py +30 -14
  229. ultralytics/utils/plotting.py +600 -269
  230. ultralytics/utils/tal.py +67 -26
  231. ultralytics/utils/torch_utils.py +302 -102
  232. ultralytics/utils/triton.py +2 -1
  233. ultralytics/utils/tuner.py +21 -12
  234. ultralytics-8.3.62.dist-info/METADATA +370 -0
  235. ultralytics-8.3.62.dist-info/RECORD +241 -0
  236. {ultralytics-8.1.29.dist-info → ultralytics-8.3.62.dist-info}/WHEEL +1 -1
  237. ultralytics/data/explorer/__init__.py +0 -5
  238. ultralytics/data/explorer/explorer.py +0 -472
  239. ultralytics/data/explorer/gui/__init__.py +0 -1
  240. ultralytics/data/explorer/gui/dash.py +0 -268
  241. ultralytics/data/explorer/utils.py +0 -166
  242. ultralytics/models/fastsam/prompt.py +0 -357
  243. ultralytics-8.1.29.dist-info/METADATA +0 -373
  244. ultralytics-8.1.29.dist-info/RECORD +0 -197
  245. {ultralytics-8.1.29.dist-info → ultralytics-8.3.62.dist-info}/LICENSE +0 -0
  246. {ultralytics-8.1.29.dist-info → ultralytics-8.3.62.dist-info}/entry_points.txt +0 -0
  247. {ultralytics-8.1.29.dist-info → ultralytics-8.3.62.dist-info}/top_level.txt +0 -0
@@ -1,7 +1,7 @@
1
- # Ultralytics YOLO 🚀, AGPL-3.0 license
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
  """
3
- This module provides functionalities for hyperparameter tuning of the Ultralytics YOLO models for object detection,
4
- instance segmentation, image classification, pose estimation, and multi-object tracking.
3
+ Module provides functionalities for hyperparameter tuning of the Ultralytics YOLO models for object detection, instance
4
+ segmentation, image classification, pose estimation, and multi-object tracking.
5
5
 
6
6
  Hyperparameter tuning is the process of systematically searching for the optimal set of hyperparameters
7
7
  that yield the best model performance. This is particularly crucial in deep learning models like YOLO,
@@ -12,8 +12,8 @@ Example:
12
12
  ```python
13
13
  from ultralytics import YOLO
14
14
 
15
- model = YOLO('yolov8n.pt')
16
- model.tune(data='coco8.yaml', epochs=10, iterations=300, optimizer='AdamW', plots=False, save=False, val=False)
15
+ model = YOLO("yolo11n.pt")
16
+ model.tune(data="coco8.yaml", epochs=10, iterations=300, optimizer="AdamW", plots=False, save=False, val=False)
17
17
  ```
18
18
  """
19
19
 
@@ -54,15 +54,15 @@ class Tuner:
54
54
  ```python
55
55
  from ultralytics import YOLO
56
56
 
57
- model = YOLO('yolov8n.pt')
58
- model.tune(data='coco8.yaml', epochs=10, iterations=300, optimizer='AdamW', plots=False, save=False, val=False)
57
+ model = YOLO("yolo11n.pt")
58
+ model.tune(data="coco8.yaml", epochs=10, iterations=300, optimizer="AdamW", plots=False, save=False, val=False)
59
59
  ```
60
60
 
61
61
  Tune with custom search space.
62
62
  ```python
63
63
  from ultralytics import YOLO
64
64
 
65
- model = YOLO('yolov8n.pt')
65
+ model = YOLO("yolo11n.pt")
66
66
  model.tune(space={key1: val1, key2: val2}) # custom search space dictionary
67
67
  ```
68
68
  """
@@ -95,6 +95,7 @@ class Tuner:
95
95
  "perspective": (0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
96
96
  "flipud": (0.0, 1.0), # image flip up-down (probability)
97
97
  "fliplr": (0.0, 1.0), # image flip left-right (probability)
98
+ "bgr": (0.0, 1.0), # image channel bgr (probability)
98
99
  "mosaic": (0.0, 1.0), # image mixup (probability)
99
100
  "mixup": (0.0, 1.0), # image mixup (probability)
100
101
  "copy_paste": (0.0, 1.0), # segment copy-paste (probability)
@@ -139,7 +140,7 @@ class Tuner:
139
140
  # Mutate
140
141
  r = np.random # method
141
142
  r.seed(int(time.time()))
142
- g = np.array([v[2] if len(v) == 3 else 1.0 for k, v in self.space.items()]) # gains 0-1
143
+ g = np.array([v[2] if len(v) == 3 else 1.0 for v in self.space.values()]) # gains 0-1
143
144
  ng = len(self.space)
144
145
  v = np.ones(ng)
145
146
  while all(v == 1): # mutate until a change occurs (prevent duplicates)
@@ -175,7 +176,6 @@ class Tuner:
175
176
  The method utilizes the `self.tune_csv` Path object to read and log hyperparameters and fitness scores.
176
177
  Ensure this path is set correctly in the Tuner instance.
177
178
  """
178
-
179
179
  t0 = time.time()
180
180
  best_save_dir, best_metrics = None, None
181
181
  (self.tune_dir / "weights").mkdir(parents=True, exist_ok=True)
@@ -191,7 +191,7 @@ class Tuner:
191
191
  try:
192
192
  # Train YOLO model with mutated hyperparameters (run in subprocess to avoid dataloader hang)
193
193
  cmd = ["yolo", "train", *(f"{k}={v}" for k, v in train_args.items())]
194
- return_code = subprocess.run(cmd, check=True).returncode
194
+ return_code = subprocess.run(" ".join(cmd), check=True, shell=True).returncode
195
195
  ckpt_file = weights_dir / ("best.pt" if (weights_dir / "best.pt").exists() else "last.pt")
196
196
  metrics = torch.load(ckpt_file)["train_metrics"]
197
197
  assert return_code == 0, "training failed"
@@ -217,19 +217,19 @@ class Tuner:
217
217
  for ckpt in weights_dir.glob("*.pt"):
218
218
  shutil.copy2(ckpt, self.tune_dir / "weights")
219
219
  elif cleanup:
220
- shutil.rmtree(ckpt_file.parent) # remove iteration weights/ dir to reduce storage space
220
+ shutil.rmtree(weights_dir, ignore_errors=True) # remove iteration weights/ dir to reduce storage space
221
221
 
222
222
  # Plot tune results
223
223
  plot_tune_results(self.tune_csv)
224
224
 
225
225
  # Save and print tune results
226
226
  header = (
227
- f'{self.prefix}{i + 1}/{iterations} iterations complete ✅ ({time.time() - t0:.2f}s)\n'
228
- f'{self.prefix}Results saved to {colorstr("bold", self.tune_dir)}\n'
229
- f'{self.prefix}Best fitness={fitness[best_idx]} observed at iteration {best_idx + 1}\n'
230
- f'{self.prefix}Best fitness metrics are {best_metrics}\n'
231
- f'{self.prefix}Best fitness model is {best_save_dir}\n'
232
- f'{self.prefix}Best fitness hyperparameters are printed below.\n'
227
+ f"{self.prefix}{i + 1}/{iterations} iterations complete ✅ ({time.time() - t0:.2f}s)\n"
228
+ f"{self.prefix}Results saved to {colorstr('bold', self.tune_dir)}\n"
229
+ f"{self.prefix}Best fitness={fitness[best_idx]} observed at iteration {best_idx + 1}\n"
230
+ f"{self.prefix}Best fitness metrics are {best_metrics}\n"
231
+ f"{self.prefix}Best fitness model is {best_save_dir}\n"
232
+ f"{self.prefix}Best fitness hyperparameters are printed below.\n"
233
233
  )
234
234
  LOGGER.info("\n" + header)
235
235
  data = {k: float(x[best_idx, i + 1]) for i, k in enumerate(self.space.keys())}
@@ -1,9 +1,9 @@
1
- # Ultralytics YOLO 🚀, AGPL-3.0 license
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
  """
3
3
  Check a model's accuracy on a test or val split of a dataset.
4
4
 
5
5
  Usage:
6
- $ yolo mode=val model=yolov8n.pt data=coco128.yaml imgsz=640
6
+ $ yolo mode=val model=yolov8n.pt data=coco8.yaml imgsz=640
7
7
 
8
8
  Usage - formats:
9
9
  $ yolo mode=val model=yolov8n.pt # PyTorch
@@ -17,6 +17,7 @@ Usage - formats:
17
17
  yolov8n.tflite # TensorFlow Lite
18
18
  yolov8n_edgetpu.tflite # TensorFlow Edge TPU
19
19
  yolov8n_paddle_model # PaddlePaddle
20
+ yolov8n.mnn # MNN
20
21
  yolov8n_ncnn_model # NCNN
21
22
  """
22
23
 
@@ -104,15 +105,14 @@ class BaseValidator:
104
105
 
105
106
  @smart_inference_mode()
106
107
  def __call__(self, trainer=None, model=None):
107
- """Supports validation of a pre-trained model if passed or a model being trained if trainer is passed (trainer
108
- gets priority).
109
- """
108
+ """Executes validation process, running inference on dataloader and computing performance metrics."""
110
109
  self.training = trainer is not None
111
110
  augment = self.args.augment and (not self.training)
112
111
  if self.training:
113
112
  self.device = trainer.device
114
113
  self.data = trainer.data
115
- self.args.half = self.device.type != "cpu" # force FP16 val during training
114
+ # force FP16 val during training
115
+ self.args.half = self.device.type != "cpu" and trainer.amp
116
116
  model = trainer.ema.ema or trainer.model
117
117
  model = model.half() if self.args.half else model.float()
118
118
  # self.model = model
@@ -120,6 +120,8 @@ class BaseValidator:
120
120
  self.args.plots &= trainer.stopper.possible_stop or (trainer.epoch == trainer.epochs - 1)
121
121
  model.eval()
122
122
  else:
123
+ if str(self.args.model).endswith(".yaml") and model is None:
124
+ LOGGER.warning("WARNING ⚠️ validating an untrained model YAML will result in 0 mAP.")
123
125
  callbacks.add_integration_callbacks(self)
124
126
  model = AutoBackend(
125
127
  weights=model or self.args.model,
@@ -136,17 +138,17 @@ class BaseValidator:
136
138
  if engine:
137
139
  self.args.batch = model.batch_size
138
140
  elif not pt and not jit:
139
- self.args.batch = 1 # export.py models default to batch-size 1
140
- LOGGER.info(f"Forcing batch=1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models")
141
+ self.args.batch = model.metadata.get("batch", 1) # export.py models default to batch-size 1
142
+ LOGGER.info(f"Setting batch={self.args.batch} input of shape ({self.args.batch}, 3, {imgsz}, {imgsz})")
141
143
 
142
- if str(self.args.data).split(".")[-1] in ("yaml", "yml"):
144
+ if str(self.args.data).split(".")[-1] in {"yaml", "yml"}:
143
145
  self.data = check_det_dataset(self.args.data)
144
146
  elif self.args.task == "classify":
145
147
  self.data = check_cls_dataset(self.args.data, split=self.args.split)
146
148
  else:
147
149
  raise FileNotFoundError(emojis(f"Dataset '{self.args.data}' for task={self.args.task} not found ❌"))
148
150
 
149
- if self.device.type in ("cpu", "mps"):
151
+ if self.device.type in {"cpu", "mps"}:
150
152
  self.args.workers = 0 # faster CPU val as time dominated by inference, not dataloading
151
153
  if not pt:
152
154
  self.args.rect = False
@@ -204,8 +206,9 @@ class BaseValidator:
204
206
  return {k: round(float(v), 5) for k, v in results.items()} # return results as 5 decimal place floats
205
207
  else:
206
208
  LOGGER.info(
207
- "Speed: %.1fms preprocess, %.1fms inference, %.1fms loss, %.1fms postprocess per image"
208
- % tuple(self.speed.values())
209
+ "Speed: {:.1f}ms preprocess, {:.1f}ms inference, {:.1f}ms loss, {:.1f}ms postprocess per image".format(
210
+ *tuple(self.speed.values())
211
+ )
209
212
  )
210
213
  if self.args.save_json and self.jdict:
211
214
  with open(str(self.save_dir / "predictions.json"), "w") as f:
@@ -242,7 +245,7 @@ class BaseValidator:
242
245
 
243
246
  cost_matrix = iou * (iou >= threshold)
244
247
  if cost_matrix.any():
245
- labels_idx, detections_idx = scipy.optimize.linear_sum_assignment(cost_matrix, maximize=True)
248
+ labels_idx, detections_idx = scipy.optimize.linear_sum_assignment(cost_matrix)
246
249
  valid = cost_matrix[labels_idx, detections_idx] > 0
247
250
  if valid.any():
248
251
  correct[detections_idx[valid], i] = True
@@ -280,7 +283,7 @@ class BaseValidator:
280
283
  return batch
281
284
 
282
285
  def postprocess(self, preds):
283
- """Describes and summarizes the purpose of 'postprocess()' but no details mentioned."""
286
+ """Preprocesses the predictions."""
284
287
  return preds
285
288
 
286
289
  def init_metrics(self, model):
@@ -317,7 +320,7 @@ class BaseValidator:
317
320
  return []
318
321
 
319
322
  def on_plot(self, name, data=None):
320
- """Registers plots (e.g. to be consumed in callbacks)"""
323
+ """Registers plots (e.g. to be consumed in callbacks)."""
321
324
  self.plots[Path(name)] = {"data": data, "timestamp": time.time()}
322
325
 
323
326
  # TODO: may need to put these following functions into callback
@@ -1,12 +1,27 @@
1
- # Ultralytics YOLO 🚀, AGPL-3.0 license
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
3
  import requests
4
4
 
5
5
  from ultralytics.data.utils import HUBDatasetStats
6
6
  from ultralytics.hub.auth import Auth
7
- from ultralytics.hub.utils import HUB_API_ROOT, HUB_WEB_ROOT, PREFIX
7
+ from ultralytics.hub.session import HUBTrainingSession
8
+ from ultralytics.hub.utils import HUB_API_ROOT, HUB_WEB_ROOT, PREFIX, events
8
9
  from ultralytics.utils import LOGGER, SETTINGS, checks
9
10
 
11
+ __all__ = (
12
+ "PREFIX",
13
+ "HUB_WEB_ROOT",
14
+ "HUBTrainingSession",
15
+ "login",
16
+ "logout",
17
+ "reset_model",
18
+ "export_fmts_hub",
19
+ "export_model",
20
+ "get_export",
21
+ "check_dataset",
22
+ "events",
23
+ )
24
+
10
25
 
11
26
  def login(api_key: str = None, save=True) -> bool:
12
27
  """
@@ -23,7 +38,7 @@ def login(api_key: str = None, save=True) -> bool:
23
38
  Returns:
24
39
  (bool): True if authentication is successful, False otherwise.
25
40
  """
26
- checks.check_requirements("hub-sdk>=0.0.2")
41
+ checks.check_requirements("hub-sdk>=0.0.12")
27
42
  from hub_sdk import HUBClient
28
43
 
29
44
  api_key_url = f"{HUB_WEB_ROOT}/settings?tab=api+keys" # set the redirect URL
@@ -48,13 +63,13 @@ def login(api_key: str = None, save=True) -> bool:
48
63
  return True
49
64
  else:
50
65
  # Failed to authenticate with HUB
51
- LOGGER.info(f"{PREFIX}Get API key from {api_key_url} and then run 'yolo hub login API_KEY'")
66
+ LOGGER.info(f"{PREFIX}Get API key from {api_key_url} and then run 'yolo login API_KEY'")
52
67
  return False
53
68
 
54
69
 
55
70
  def logout():
56
71
  """
57
- Log out of Ultralytics HUB by removing the API key from the settings file. To log in again, use 'yolo hub login'.
72
+ Log out of Ultralytics HUB by removing the API key from the settings file. To log in again, use 'yolo login'.
58
73
 
59
74
  Example:
60
75
  ```python
@@ -64,8 +79,7 @@ def logout():
64
79
  ```
65
80
  """
66
81
  SETTINGS["api_key"] = ""
67
- SETTINGS.save()
68
- LOGGER.info(f"{PREFIX}logged out ✅. To log in again, use 'yolo hub login'.")
82
+ LOGGER.info(f"{PREFIX}logged out ✅. To log in again, use 'yolo login'.")
69
83
 
70
84
 
71
85
  def reset_model(model_id=""):
@@ -106,22 +120,26 @@ def get_export(model_id="", format="torchscript"):
106
120
  return r.json()
107
121
 
108
122
 
109
- def check_dataset(path="", task="detect"):
123
+ def check_dataset(path: str, task: str) -> None:
110
124
  """
111
125
  Function for error-checking HUB dataset Zip file before upload. It checks a dataset for errors before it is uploaded
112
126
  to the HUB. Usage examples are given below.
113
127
 
114
128
  Args:
115
- path (str, optional): Path to data.zip (with data.yaml inside data.zip). Defaults to ''.
116
- task (str, optional): Dataset task. Options are 'detect', 'segment', 'pose', 'classify'. Defaults to 'detect'.
129
+ path (str): Path to data.zip (with data.yaml inside data.zip).
130
+ task (str): Dataset task. Options are 'detect', 'segment', 'pose', 'classify', 'obb'.
117
131
 
118
132
  Example:
133
+ Download *.zip files from https://github.com/ultralytics/hub/tree/main/example_datasets
134
+ i.e. https://github.com/ultralytics/hub/raw/main/example_datasets/coco8.zip for coco8.zip.
119
135
  ```python
120
136
  from ultralytics.hub import check_dataset
121
137
 
122
- check_dataset('path/to/coco8.zip', task='detect') # detect dataset
123
- check_dataset('path/to/coco8-seg.zip', task='segment') # segment dataset
124
- check_dataset('path/to/coco8-pose.zip', task='pose') # pose dataset
138
+ check_dataset("path/to/coco8.zip", task="detect") # detect dataset
139
+ check_dataset("path/to/coco8-seg.zip", task="segment") # segment dataset
140
+ check_dataset("path/to/coco8-pose.zip", task="pose") # pose dataset
141
+ check_dataset("path/to/dota8.zip", task="obb") # OBB dataset
142
+ check_dataset("path/to/imagenet10.zip", task="classify") # classification dataset
125
143
  ```
126
144
  """
127
145
  HUBDatasetStats(path=path, task=task).get_json()
ultralytics/hub/auth.py CHANGED
@@ -1,9 +1,9 @@
1
- # Ultralytics YOLO 🚀, AGPL-3.0 license
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
3
  import requests
4
4
 
5
5
  from ultralytics.hub.utils import HUB_API_ROOT, HUB_WEB_ROOT, PREFIX, request_with_credentials
6
- from ultralytics.utils import LOGGER, SETTINGS, emojis, is_colab
6
+ from ultralytics.utils import IS_COLAB, LOGGER, SETTINGS, emojis
7
7
 
8
8
  API_KEY_URL = f"{HUB_WEB_ROOT}/settings?tab=api+keys"
9
9
 
@@ -27,10 +27,14 @@ class Auth:
27
27
 
28
28
  def __init__(self, api_key="", verbose=False):
29
29
  """
30
- Initialize the Auth class with an optional API key.
30
+ Initialize Auth class and authenticate user.
31
+
32
+ Handles API key validation, Google Colab authentication, and new key requests. Updates SETTINGS upon successful
33
+ authentication.
31
34
 
32
35
  Args:
33
- api_key (str, optional): May be an API key or a combination API key and model ID, i.e. key_id
36
+ api_key (str): API key or combined key_id format.
37
+ verbose (bool): Enable verbose logging.
34
38
  """
35
39
  # Split the input API key in case it contains a combined key_model and keep only the API key part
36
40
  api_key = api_key.split("_")[0]
@@ -50,7 +54,7 @@ class Auth:
50
54
  # Attempt to authenticate with the provided API key
51
55
  success = self.authenticate()
52
56
  # If the API key is not provided and the environment is a Google Colab notebook
53
- elif is_colab():
57
+ elif IS_COLAB:
54
58
  # Attempt to authenticate using browser cookies
55
59
  success = self.auth_with_cookies()
56
60
  else:
@@ -64,7 +68,7 @@ class Auth:
64
68
  if verbose:
65
69
  LOGGER.info(f"{PREFIX}New authentication successful ✅")
66
70
  elif verbose:
67
- LOGGER.info(f"{PREFIX}Get API key from {API_KEY_URL} and then run 'yolo hub login API_KEY'")
71
+ LOGGER.info(f"{PREFIX}Get API key from {API_KEY_URL} and then run 'yolo login API_KEY'")
68
72
 
69
73
  def request_api_key(self, max_attempts=3):
70
74
  """
@@ -109,7 +113,7 @@ class Auth:
109
113
  Returns:
110
114
  (bool): True if authentication is successful, False otherwise.
111
115
  """
112
- if not is_colab():
116
+ if not IS_COLAB:
113
117
  return False # Currently only works with Colab
114
118
  try:
115
119
  authn = request_with_credentials(f"{HUB_API_ROOT}/v1/auth/auto")
@@ -0,0 +1,159 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ import concurrent.futures
4
+ import statistics
5
+ import time
6
+ from typing import List, Optional, Tuple
7
+
8
+ import requests
9
+
10
+
11
+ class GCPRegions:
12
+ """
13
+ A class for managing and analyzing Google Cloud Platform (GCP) regions.
14
+
15
+ This class provides functionality to initialize, categorize, and analyze GCP regions based on their
16
+ geographical location, tier classification, and network latency.
17
+
18
+ Attributes:
19
+ regions (Dict[str, Tuple[int, str, str]]): A dictionary of GCP regions with their tier, city, and country.
20
+
21
+ Methods:
22
+ tier1: Returns a list of tier 1 GCP regions.
23
+ tier2: Returns a list of tier 2 GCP regions.
24
+ lowest_latency: Determines the GCP region(s) with the lowest network latency.
25
+
26
+ Examples:
27
+ >>> from ultralytics.hub.google import GCPRegions
28
+ >>> regions = GCPRegions()
29
+ >>> lowest_latency_region = regions.lowest_latency(verbose=True, attempts=3)
30
+ >>> print(f"Lowest latency region: {lowest_latency_region[0][0]}")
31
+ """
32
+
33
+ def __init__(self):
34
+ """Initializes the GCPRegions class with predefined Google Cloud Platform regions and their details."""
35
+ self.regions = {
36
+ "asia-east1": (1, "Taiwan", "China"),
37
+ "asia-east2": (2, "Hong Kong", "China"),
38
+ "asia-northeast1": (1, "Tokyo", "Japan"),
39
+ "asia-northeast2": (1, "Osaka", "Japan"),
40
+ "asia-northeast3": (2, "Seoul", "South Korea"),
41
+ "asia-south1": (2, "Mumbai", "India"),
42
+ "asia-south2": (2, "Delhi", "India"),
43
+ "asia-southeast1": (2, "Jurong West", "Singapore"),
44
+ "asia-southeast2": (2, "Jakarta", "Indonesia"),
45
+ "australia-southeast1": (2, "Sydney", "Australia"),
46
+ "australia-southeast2": (2, "Melbourne", "Australia"),
47
+ "europe-central2": (2, "Warsaw", "Poland"),
48
+ "europe-north1": (1, "Hamina", "Finland"),
49
+ "europe-southwest1": (1, "Madrid", "Spain"),
50
+ "europe-west1": (1, "St. Ghislain", "Belgium"),
51
+ "europe-west10": (2, "Berlin", "Germany"),
52
+ "europe-west12": (2, "Turin", "Italy"),
53
+ "europe-west2": (2, "London", "United Kingdom"),
54
+ "europe-west3": (2, "Frankfurt", "Germany"),
55
+ "europe-west4": (1, "Eemshaven", "Netherlands"),
56
+ "europe-west6": (2, "Zurich", "Switzerland"),
57
+ "europe-west8": (1, "Milan", "Italy"),
58
+ "europe-west9": (1, "Paris", "France"),
59
+ "me-central1": (2, "Doha", "Qatar"),
60
+ "me-west1": (1, "Tel Aviv", "Israel"),
61
+ "northamerica-northeast1": (2, "Montreal", "Canada"),
62
+ "northamerica-northeast2": (2, "Toronto", "Canada"),
63
+ "southamerica-east1": (2, "São Paulo", "Brazil"),
64
+ "southamerica-west1": (2, "Santiago", "Chile"),
65
+ "us-central1": (1, "Iowa", "United States"),
66
+ "us-east1": (1, "South Carolina", "United States"),
67
+ "us-east4": (1, "Northern Virginia", "United States"),
68
+ "us-east5": (1, "Columbus", "United States"),
69
+ "us-south1": (1, "Dallas", "United States"),
70
+ "us-west1": (1, "Oregon", "United States"),
71
+ "us-west2": (2, "Los Angeles", "United States"),
72
+ "us-west3": (2, "Salt Lake City", "United States"),
73
+ "us-west4": (2, "Las Vegas", "United States"),
74
+ }
75
+
76
+ def tier1(self) -> List[str]:
77
+ """Returns a list of GCP regions classified as tier 1 based on predefined criteria."""
78
+ return [region for region, info in self.regions.items() if info[0] == 1]
79
+
80
+ def tier2(self) -> List[str]:
81
+ """Returns a list of GCP regions classified as tier 2 based on predefined criteria."""
82
+ return [region for region, info in self.regions.items() if info[0] == 2]
83
+
84
+ @staticmethod
85
+ def _ping_region(region: str, attempts: int = 1) -> Tuple[str, float, float, float, float]:
86
+ """Pings a specified GCP region and returns latency statistics: mean, min, max, and standard deviation."""
87
+ url = f"https://{region}-docker.pkg.dev"
88
+ latencies = []
89
+ for _ in range(attempts):
90
+ try:
91
+ start_time = time.time()
92
+ _ = requests.head(url, timeout=5)
93
+ latency = (time.time() - start_time) * 1000 # convert latency to milliseconds
94
+ if latency != float("inf"):
95
+ latencies.append(latency)
96
+ except requests.RequestException:
97
+ pass
98
+ if not latencies:
99
+ return region, float("inf"), float("inf"), float("inf"), float("inf")
100
+
101
+ std_dev = statistics.stdev(latencies) if len(latencies) > 1 else 0
102
+ return region, statistics.mean(latencies), std_dev, min(latencies), max(latencies)
103
+
104
+ def lowest_latency(
105
+ self,
106
+ top: int = 1,
107
+ verbose: bool = False,
108
+ tier: Optional[int] = None,
109
+ attempts: int = 1,
110
+ ) -> List[Tuple[str, float, float, float, float]]:
111
+ """
112
+ Determines the GCP regions with the lowest latency based on ping tests.
113
+
114
+ Args:
115
+ top (int): Number of top regions to return.
116
+ verbose (bool): If True, prints detailed latency information for all tested regions.
117
+ tier (int | None): Filter regions by tier (1 or 2). If None, all regions are tested.
118
+ attempts (int): Number of ping attempts per region.
119
+
120
+ Returns:
121
+ (List[Tuple[str, float, float, float, float]]): List of tuples containing region information and
122
+ latency statistics. Each tuple contains (region, mean_latency, std_dev, min_latency, max_latency).
123
+
124
+ Examples:
125
+ >>> regions = GCPRegions()
126
+ >>> results = regions.lowest_latency(top=3, verbose=True, tier=1, attempts=2)
127
+ >>> print(results[0][0]) # Print the name of the lowest latency region
128
+ """
129
+ if verbose:
130
+ print(f"Testing GCP regions for latency (with {attempts} {'retry' if attempts == 1 else 'attempts'})...")
131
+
132
+ regions_to_test = [k for k, v in self.regions.items() if v[0] == tier] if tier else list(self.regions.keys())
133
+ with concurrent.futures.ThreadPoolExecutor(max_workers=50) as executor:
134
+ results = list(executor.map(lambda r: self._ping_region(r, attempts), regions_to_test))
135
+
136
+ sorted_results = sorted(results, key=lambda x: x[1])
137
+
138
+ if verbose:
139
+ print(f"{'Region':<25} {'Location':<35} {'Tier':<5} Latency (ms)")
140
+ for region, mean, std, min_, max_ in sorted_results:
141
+ tier, city, country = self.regions[region]
142
+ location = f"{city}, {country}"
143
+ if mean == float("inf"):
144
+ print(f"{region:<25} {location:<35} {tier:<5} Timeout")
145
+ else:
146
+ print(f"{region:<25} {location:<35} {tier:<5} {mean:.0f} ± {std:.0f} ({min_:.0f} - {max_:.0f})")
147
+ print(f"\nLowest latency region{'s' if top > 1 else ''}:")
148
+ for region, mean, std, min_, max_ in sorted_results[:top]:
149
+ tier, city, country = self.regions[region]
150
+ location = f"{city}, {country}"
151
+ print(f"{region} ({location}, {mean:.0f} ± {std:.0f} ms ({min_:.0f} - {max_:.0f}))")
152
+
153
+ return sorted_results[:top]
154
+
155
+
156
+ # Usage example
157
+ if __name__ == "__main__":
158
+ regions = GCPRegions()
159
+ top_3_latency_tier1 = regions.lowest_latency(top=3, verbose=True, tier=1, attempts=3)