dgenerate-ultralytics-headless 8.3.249__py3-none-any.whl → 8.3.252__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dgenerate-ultralytics-headless
3
- Version: 8.3.249
3
+ Version: 8.3.252
4
4
  Summary: Automatically built Ultralytics package with python-opencv-headless dependency instead of python-opencv
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -1,4 +1,4 @@
1
- dgenerate_ultralytics_headless-8.3.249.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
1
+ dgenerate_ultralytics_headless-8.3.252.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
2
2
  tests/__init__.py,sha256=bCox_hLdGRFYGLb2kd722VdNP2zEXNYNuLLYtqZSrbw,804
3
3
  tests/conftest.py,sha256=mOy9lGpNp7lk1hHl6_pVE0f9cU-72gnkoSm4TO-CNZU,2318
4
4
  tests/test_cli.py,sha256=GhIFHi-_WIJpDgoGNRi0DnjbfwP1wHbklBMnkCM-P_4,5464
@@ -8,11 +8,11 @@ tests/test_exports.py,sha256=5G5EgDmars6d-N7TVnJdDFWId0IJs-yw03DvdQIjrNU,14246
8
8
  tests/test_integrations.py,sha256=6QgSh9n0J04RdUYz08VeVOnKmf4S5MDEQ0chzS7jo_c,6220
9
9
  tests/test_python.py,sha256=viMvRajIbDZdm64hRRg9i8qZ1sU9frwB69e56mxwEXk,29266
10
10
  tests/test_solutions.py,sha256=CIaphpmOXgz9AE9xcm1RWODKrwGfZLCc84IggGXArNM,14122
11
- ultralytics/__init__.py,sha256=nbR9xzuCbfetnkDGSTE5sxiBO0svWjIEk40CpLGaTJo,1302
11
+ ultralytics/__init__.py,sha256=_RqBO8Tb9tCa2x-0NyaxY_pMTxp9NZgmgfraktQZQNQ,1302
12
12
  ultralytics/py.typed,sha256=la67KBlbjXN-_-DfGNcdOcjYumVpKG_Tkw-8n5dnGB4,8
13
13
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
14
14
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
15
- ultralytics/cfg/__init__.py,sha256=msy7NE7le831rv5-eRdLFkHRkf8o7m07HRkY4810n9s,40208
15
+ ultralytics/cfg/__init__.py,sha256=sJfreQYmFkCaW9eWex-Um1tG-4zRpC2Q7GuJAWBrFpk,40401
16
16
  ultralytics/cfg/default.yaml,sha256=KKENSHolDSto1HJVGjBvTXvz9ae-XMcYRzKrjU3QfZc,8912
17
17
  ultralytics/cfg/datasets/Argoverse.yaml,sha256=QGpdh3Hj5dFrvbsaE_8rAVj9BO4XpKTB7uhXaTTnE-o,3364
18
18
  ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=KE7VC-ZMDSei1pLPm-pdk_ZAMRU_gLwGgtIQNbwp6dA,1212
@@ -22,6 +22,7 @@ ultralytics/cfg/datasets/HomeObjects-3K.yaml,sha256=xEtSqEad-rtfGuIrERjjhdISggmP
22
22
  ultralytics/cfg/datasets/ImageNet.yaml,sha256=N9NHhIgnlNIBqZZbzQZAW3aCnz6RSXQABnopaDs5BmE,42529
23
23
  ultralytics/cfg/datasets/Objects365.yaml,sha256=8Bl-NAm0mlMW8EfMsz39JZo-HCvmp0ejJXaMeoHTpqw,9649
24
24
  ultralytics/cfg/datasets/SKU-110K.yaml,sha256=xvRkq3SdDOwBA91U85bln7HTXkod5MvFX6pt1PxTjJE,2609
25
+ ultralytics/cfg/datasets/TT100K.yaml,sha256=qrJ6nrZdvrMy5ov9FaHn-pFI8hJn_WLYaB60vhtCOxs,6918
25
26
  ultralytics/cfg/datasets/VOC.yaml,sha256=XpaegRHjp7xZnenOuA9zgg2lQURSL-o7mLQwzIKKuqM,3803
26
27
  ultralytics/cfg/datasets/VisDrone.yaml,sha256=PfudojW5av_5q-dC9VsG_xhvuv9cTGEpRp4loXCJ4Ng,3397
27
28
  ultralytics/cfg/datasets/african-wildlife.yaml,sha256=6UfO_gnwJEDVq05p72IMJfkTIKZlXKNLSeKru-JyTrQ,915
@@ -123,12 +124,12 @@ ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J
123
124
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
124
125
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
125
126
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
126
- ultralytics/engine/exporter.py,sha256=Ncf5GK5xAqSu0DH-6z5V53qZB7LstDJFTMF5a-7VQfs,72639
127
- ultralytics/engine/model.py,sha256=61ea1rB0wmL0CCaEr8p5gzneH0eL55OOMaTcFt8fR80,53079
127
+ ultralytics/engine/exporter.py,sha256=jbA_twaFBOjJ3Gx2pDuwEE8brduAWD8tFdPJ8rlVuCk,72718
128
+ ultralytics/engine/model.py,sha256=1Ex0Q7XOwWWtTsTMk-7O4wWiA2cYGayKJwB3zDC1XTg,53223
128
129
  ultralytics/engine/predictor.py,sha256=neYmNDX27Vv3ggk9xqaKlH6XzB2vlFIghU5o7ZC0zFo,22838
129
130
  ultralytics/engine/results.py,sha256=DomI01voqR_i7v8LhDGb6jWCprWB4H6I436GSO2NMBY,68030
130
- ultralytics/engine/trainer.py,sha256=mqVrhL8xnJwwKJVjxDEiiwu0WH48Ne5dB4SXxlxyHh4,45479
131
- ultralytics/engine/tuner.py,sha256=qiozSxYC-Hk1TQgyftrYTKkqLrrwFzjjkT6mOYR3Vjc,21460
131
+ ultralytics/engine/trainer.py,sha256=riVwjf_4uhrkH5TYjAvRQmIerNT7pxPBM8jWA60oF-A,45851
132
+ ultralytics/engine/tuner.py,sha256=xZGIYwpQVdnzQcdEmLc70eQy7G7swQQEgdDGxoBLmHY,21570
132
133
  ultralytics/engine/validator.py,sha256=2rqdVt4hB9ruMJq-L7PbaCNFwuERS7ZHdVSg91RM3wk,17761
133
134
  ultralytics/hub/__init__.py,sha256=Z0K_E00jzQh90b18q3IDChwVmTvyIYp6C00sCV-n2F8,6709
134
135
  ultralytics/hub/auth.py,sha256=ANzCeZA7lUzTWc_sFHbDuuyBh1jLl2sTpHkoUbIkFYE,6254
@@ -253,8 +254,8 @@ ultralytics/trackers/utils/matching.py,sha256=x6uZOIx0O9oVmAcfY6tYMTJQE2cDTUlRR6
253
254
  ultralytics/utils/__init__.py,sha256=JfvODTB4mG_JOhTeCiPtq0iCEgiCh14hJf195rnOhLQ,55145
254
255
  ultralytics/utils/autobatch.py,sha256=jiE4m_--H9UkXFDm_FqzcZk_hSTCGpS72XdVEKgZwAo,5114
255
256
  ultralytics/utils/autodevice.py,sha256=rXlPuo-iX-vZ4BabmMGEGh9Uxpau4R7Zlt1KCo9Xfyc,8892
256
- ultralytics/utils/benchmarks.py,sha256=S_W4S4pe2ktSRdSuWb6m09UEFQmZhmjl943bbo67hOI,32277
257
- ultralytics/utils/checks.py,sha256=NFtryEVFsmY35OsTDS-iEFKmU7nT9TVf_5qkUOF6f1U,38997
257
+ ultralytics/utils/benchmarks.py,sha256=KOFm2AZPehrJajbUu6NTdZoVOFjTpLhUUnfL59sC60w,32293
258
+ ultralytics/utils/checks.py,sha256=DheB1ip9ba7ZW_fjPieNdx98vZpwUDbnCKmavAIzJL4,39411
258
259
  ultralytics/utils/cpu.py,sha256=OksKOlX93AsbSsFuoYvLXRXgpkOibrZSwQyW6lipt4Q,3493
259
260
  ultralytics/utils/dist.py,sha256=hOuY1-unhQAY-uWiZw3LWw36d1mqJuYK75NdlwB4oKE,4131
260
261
  ultralytics/utils/downloads.py,sha256=IyiGjjXqOyf1B0qLMk7vE6sSQ8s232OhKS8aj9XbTgs,22883
@@ -265,16 +266,16 @@ ultralytics/utils/git.py,sha256=UdqeIiiEzg1qkerAZrg5YtTYPuJYwrpxW9N_6Pq6s8U,5501
265
266
  ultralytics/utils/instance.py,sha256=11mhefvTI9ftMqSirXuiViAi0Fxlo6v84qvNxfRNUoE,18862
266
267
  ultralytics/utils/logger.py,sha256=T5iaNnaqbCvx_FZf1dhVkr5FVxyxb4vO17t4SJfCIhg,19132
267
268
  ultralytics/utils/loss.py,sha256=t-z7qkvqF8OtuRHrj2wmvClZV2CCumIRi9jnqkc9i_A,39573
268
- ultralytics/utils/metrics.py,sha256=dpS9jSPf3dqozcrkiraKhYBI03U2t-_lt8pWNCijGww,69152
269
+ ultralytics/utils/metrics.py,sha256=SpyMGnuRwwmorJqSdUsDQquVpGmgfj1X3PNDiw_ZZWM,69152
269
270
  ultralytics/utils/nms.py,sha256=zv1rOzMF6WU8Kdk41VzNf1H1EMt_vZHcbDFbg3mnN2o,14248
270
271
  ultralytics/utils/ops.py,sha256=nWvTLJSBeW_XrxCy5Ytxl7sZJHp2sRqyCv4mm8QwYnw,25797
271
272
  ultralytics/utils/patches.py,sha256=mD3slAMAhcezzP42_fOWmacNMU6zXB68Br4_EBCyIjs,7117
272
- ultralytics/utils/plotting.py,sha256=SmKGGNYcd3cKaa5nQGqKUSEG2eG23QR1EdZh-9bB9ls,48301
273
- ultralytics/utils/tal.py,sha256=w7oi6fp0NmL6hHh-yvCCX1cBuuB4JuX7w1wiR4_SMZs,20678
273
+ ultralytics/utils/plotting.py,sha256=_iXs4gs8tzMSgiKxCriD4un-MJkOsC3lGSy0wn7qZGk,48433
274
+ ultralytics/utils/tal.py,sha256=iabLTij-MVyKxrkwhIOC1ouRB5Iy80Zp5H8aoYjvJJY,20773
274
275
  ultralytics/utils/torch_utils.py,sha256=zOPUQlorTiEPSkqlSEPyaQhpmzmgOIKF7f3xJb0UjdQ,40268
275
- ultralytics/utils/tqdm.py,sha256=4kL_nczykHu6VxRzRSbvUSJknrCZydoS_ZegZkFXpsg,16197
276
+ ultralytics/utils/tqdm.py,sha256=f2W608Qpvgu6tFi28qylaZpcRv3IX8wTGY_8lgicaqY,16343
276
277
  ultralytics/utils/triton.py,sha256=BQu3CD3OlT76d1OtmnX5slQU37VC1kzRvEtfI2saIQA,5211
277
- ultralytics/utils/tuner.py,sha256=NOh0CDAqD1IvTLB5UglIgSS5RXP7lmiyrWKU4uJ0I74,7355
278
+ ultralytics/utils/tuner.py,sha256=1PM7G89X95Yfmhskk8LBXU8T-Bfiln1Ajbnz2lkgvAI,7303
278
279
  ultralytics/utils/callbacks/__init__.py,sha256=hzL63Rce6VkZhP4Lcim9LKjadixaQG86nKqPhk7IkS0,242
279
280
  ultralytics/utils/callbacks/base.py,sha256=floD31JHqHpiVabQiE76_hzC_j7KjtL4w_czkD1bLKc,6883
280
281
  ultralytics/utils/callbacks/clearml.py,sha256=LjfNe4mswceCOpEGVLxqGXjkl_XGbef4awdcp4502RU,5831
@@ -283,16 +284,16 @@ ultralytics/utils/callbacks/dvc.py,sha256=YT0Sa5P8Huj8Fn9jM2P6MYzUY3PIVxsa5BInVi
283
284
  ultralytics/utils/callbacks/hub.py,sha256=fVLqqr3ZM6hoYFlVMEeejfq1MWDrkWCskPFOG3HGILQ,4159
284
285
  ultralytics/utils/callbacks/mlflow.py,sha256=wCXjQgdufp9LYujqMzLZOmIOur6kvrApHNeo9dA7t_g,5323
285
286
  ultralytics/utils/callbacks/neptune.py,sha256=_vt3cMwDHCR-LyT3KtRikGpj6AG11oQ-skUUUUdZ74o,4391
286
- ultralytics/utils/callbacks/platform.py,sha256=EbKwGV0kVX1ZfwR6MBOPSeDKWpd4-nVO14uydMpJlRs,14798
287
+ ultralytics/utils/callbacks/platform.py,sha256=eFPP5vgwGhGb0lHbJgaU24JDz8l6vEO9qQuzUIYhSsU,15977
287
288
  ultralytics/utils/callbacks/raytune.py,sha256=Y0dFyNZVRuFovSh7nkgUIHTQL3xIXOACElgHuYbg_5I,1278
288
289
  ultralytics/utils/callbacks/tensorboard.py,sha256=PTJYvD2gqRUN8xw5VoTjvKnu2adukLfvhMlDgTnTiFU,4952
289
290
  ultralytics/utils/callbacks/wb.py,sha256=ghmL3gigOa-z_F54-TzMraKw9MAaYX-Wk4H8dLoRvX8,7705
290
291
  ultralytics/utils/export/__init__.py,sha256=Cfh-PwVfTF_lwPp-Ss4wiX4z8Sm1XRPklsqdFfmTZ30,333
291
292
  ultralytics/utils/export/engine.py,sha256=23-lC6dNsmz5vprSJzaN7UGNXrFlVedNcqhlOH_IXes,9956
292
- ultralytics/utils/export/imx.py,sha256=2_mcNzqRIk5LB92JofqNYLN0kkQke1UgKT2jWmEy_l4,13300
293
+ ultralytics/utils/export/imx.py,sha256=0TNooKXzMagOMQxGxj90kEOAHrycQNNSLMdRQH-SJ30,13299
293
294
  ultralytics/utils/export/tensorflow.py,sha256=igYzwbdblb9YgfV4Jgl5lMvynuVRcF51dAzI7j-BBI0,9966
294
- dgenerate_ultralytics_headless-8.3.249.dist-info/METADATA,sha256=msxmb2FNZ3CXXLbjpCua4wN7TWRuuvQA8wtdINrhQ4k,38799
295
- dgenerate_ultralytics_headless-8.3.249.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
296
- dgenerate_ultralytics_headless-8.3.249.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
297
- dgenerate_ultralytics_headless-8.3.249.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
298
- dgenerate_ultralytics_headless-8.3.249.dist-info/RECORD,,
295
+ dgenerate_ultralytics_headless-8.3.252.dist-info/METADATA,sha256=zxy224qyGwtRKlPacAxNEMy_xHvyeRHg4Wni-dd5ULw,38799
296
+ dgenerate_ultralytics_headless-8.3.252.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
297
+ dgenerate_ultralytics_headless-8.3.252.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
298
+ dgenerate_ultralytics_headless-8.3.252.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
299
+ dgenerate_ultralytics_headless-8.3.252.dist-info/RECORD,,
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.249"
3
+ __version__ = "8.3.252"
4
4
 
5
5
  import importlib
6
6
  import os
@@ -410,9 +410,11 @@ def get_save_dir(args: SimpleNamespace, name: str | None = None) -> Path:
410
410
  else:
411
411
  from ultralytics.utils.files import increment_path
412
412
 
413
- project = args.project or (ROOT.parent / "tests/tmp/runs" if TESTS_RUNNING else RUNS_DIR) / args.task
413
+ runs = (ROOT.parent / "tests/tmp/runs" if TESTS_RUNNING else RUNS_DIR) / args.task
414
+ nested = args.project and len(Path(args.project).parts) > 1 # e.g. "user/project" or "org\repo"
415
+ project = runs / args.project if nested else args.project or runs
414
416
  name = name or args.name or f"{args.mode}"
415
- save_dir = increment_path(Path(project) / name, exist_ok=args.exist_ok if RANK in {-1, 0} else True)
417
+ save_dir = increment_path(Path(project) / name, exist_ok=args.exist_ok if RANK in {-1, 0} else True, mkdir=True)
416
418
 
417
419
  return Path(save_dir).resolve() # resolve to display full path in console
418
420
 
@@ -725,8 +727,8 @@ def handle_yolo_solutions(args: list[str]) -> None:
725
727
  )
726
728
  if solution_name == "analytics": # analytical graphs follow fixed shape for output i.e w=1920, h=1080
727
729
  w, h = 1280, 720
728
- save_dir = get_save_dir(SimpleNamespace(project="runs/solutions", name="exp", exist_ok=False))
729
- save_dir.mkdir(parents=True) # create the output directory i.e. runs/solutions/exp
730
+ save_dir = get_save_dir(SimpleNamespace(task="solutions", name="exp", exist_ok=False, project=None))
731
+ save_dir.mkdir(parents=True, exist_ok=True) # create the output directory i.e. runs/solutions/exp
730
732
  vw = cv2.VideoWriter(str(save_dir / f"{solution_name}.avi"), cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
731
733
 
732
734
  try: # Process video frames
@@ -0,0 +1,346 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ # Tsinghua-Tencent 100K (TT100K) dataset https://cg.cs.tsinghua.edu.cn/traffic-sign/ by Tsinghua University
4
+ # Documentation: https://cg.cs.tsinghua.edu.cn/traffic-sign/tutorial.html
5
+ # Paper: Traffic-Sign Detection and Classification in the Wild (CVPR 2016)
6
+ # License: CC BY-NC 2.0 license for non-commercial use only
7
+ # Example usage: yolo train data=TT100K.yaml
8
+ # parent
9
+ # ├── ultralytics
10
+ # └── datasets
11
+ # └── TT100K ← downloads here (~18 GB)
12
+
13
+ # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
14
+ path: TT100K # dataset root dir
15
+ train: images/train # train images (relative to 'path') 6105 images
16
+ val: images/val # val images (relative to 'path') 7641 images (original 'other' split)
17
+ test: images/test # test images (relative to 'path') 3071 images
18
+
19
+ # Classes (221 traffic sign categories, 45 with sufficient training instances)
20
+ names:
21
+ 0: pl5
22
+ 1: pl10
23
+ 2: pl15
24
+ 3: pl20
25
+ 4: pl25
26
+ 5: pl30
27
+ 6: pl40
28
+ 7: pl50
29
+ 8: pl60
30
+ 9: pl70
31
+ 10: pl80
32
+ 11: pl90
33
+ 12: pl100
34
+ 13: pl110
35
+ 14: pl120
36
+ 15: pm5
37
+ 16: pm10
38
+ 17: pm13
39
+ 18: pm15
40
+ 19: pm20
41
+ 20: pm25
42
+ 21: pm30
43
+ 22: pm35
44
+ 23: pm40
45
+ 24: pm46
46
+ 25: pm50
47
+ 26: pm55
48
+ 27: pm8
49
+ 28: pn
50
+ 29: pne
51
+ 30: ph4
52
+ 31: ph4.5
53
+ 32: ph5
54
+ 33: ps
55
+ 34: pg
56
+ 35: ph1.5
57
+ 36: ph2
58
+ 37: ph2.1
59
+ 38: ph2.2
60
+ 39: ph2.4
61
+ 40: ph2.5
62
+ 41: ph2.8
63
+ 42: ph2.9
64
+ 43: ph3
65
+ 44: ph3.2
66
+ 45: ph3.5
67
+ 46: ph3.8
68
+ 47: ph4.2
69
+ 48: ph4.3
70
+ 49: ph4.8
71
+ 50: ph5.3
72
+ 51: ph5.5
73
+ 52: pb
74
+ 53: pr10
75
+ 54: pr100
76
+ 55: pr20
77
+ 56: pr30
78
+ 57: pr40
79
+ 58: pr45
80
+ 59: pr50
81
+ 60: pr60
82
+ 61: pr70
83
+ 62: pr80
84
+ 63: pr90
85
+ 64: p1
86
+ 65: p2
87
+ 66: p3
88
+ 67: p4
89
+ 68: p5
90
+ 69: p6
91
+ 70: p7
92
+ 71: p8
93
+ 72: p9
94
+ 73: p10
95
+ 74: p11
96
+ 75: p12
97
+ 76: p13
98
+ 77: p14
99
+ 78: p15
100
+ 79: p16
101
+ 80: p17
102
+ 81: p18
103
+ 82: p19
104
+ 83: p20
105
+ 84: p21
106
+ 85: p22
107
+ 86: p23
108
+ 87: p24
109
+ 88: p25
110
+ 89: p26
111
+ 90: p27
112
+ 91: p28
113
+ 92: pa8
114
+ 93: pa10
115
+ 94: pa12
116
+ 95: pa13
117
+ 96: pa14
118
+ 97: pb5
119
+ 98: pc
120
+ 99: pg
121
+ 100: ph1
122
+ 101: ph1.3
123
+ 102: ph1.5
124
+ 103: ph2
125
+ 104: ph3
126
+ 105: ph4
127
+ 106: ph5
128
+ 107: pi
129
+ 108: pl0
130
+ 109: pl4
131
+ 110: pl5
132
+ 111: pl8
133
+ 112: pl10
134
+ 113: pl15
135
+ 114: pl20
136
+ 115: pl25
137
+ 116: pl30
138
+ 117: pl35
139
+ 118: pl40
140
+ 119: pl50
141
+ 120: pl60
142
+ 121: pl65
143
+ 122: pl70
144
+ 123: pl80
145
+ 124: pl90
146
+ 125: pl100
147
+ 126: pl110
148
+ 127: pl120
149
+ 128: pm2
150
+ 129: pm8
151
+ 130: pm10
152
+ 131: pm13
153
+ 132: pm15
154
+ 133: pm20
155
+ 134: pm25
156
+ 135: pm30
157
+ 136: pm35
158
+ 137: pm40
159
+ 138: pm46
160
+ 139: pm50
161
+ 140: pm55
162
+ 141: pn
163
+ 142: pne
164
+ 143: po
165
+ 144: pr10
166
+ 145: pr100
167
+ 146: pr20
168
+ 147: pr30
169
+ 148: pr40
170
+ 149: pr45
171
+ 150: pr50
172
+ 151: pr60
173
+ 152: pr70
174
+ 153: pr80
175
+ 154: ps
176
+ 155: w1
177
+ 156: w2
178
+ 157: w3
179
+ 158: w5
180
+ 159: w8
181
+ 160: w10
182
+ 161: w12
183
+ 162: w13
184
+ 163: w16
185
+ 164: w18
186
+ 165: w20
187
+ 166: w21
188
+ 167: w22
189
+ 168: w24
190
+ 169: w28
191
+ 170: w30
192
+ 171: w31
193
+ 172: w32
194
+ 173: w34
195
+ 174: w35
196
+ 175: w37
197
+ 176: w38
198
+ 177: w41
199
+ 178: w42
200
+ 179: w43
201
+ 180: w44
202
+ 181: w45
203
+ 182: w46
204
+ 183: w47
205
+ 184: w48
206
+ 185: w49
207
+ 186: w50
208
+ 187: w51
209
+ 188: w52
210
+ 189: w53
211
+ 190: w54
212
+ 191: w55
213
+ 192: w56
214
+ 193: w57
215
+ 194: w58
216
+ 195: w59
217
+ 196: w60
218
+ 197: w62
219
+ 198: w63
220
+ 199: w66
221
+ 200: i1
222
+ 201: i2
223
+ 202: i3
224
+ 203: i4
225
+ 204: i5
226
+ 205: i6
227
+ 206: i7
228
+ 207: i8
229
+ 208: i9
230
+ 209: i10
231
+ 210: i11
232
+ 211: i12
233
+ 212: i13
234
+ 213: i14
235
+ 214: i15
236
+ 215: il60
237
+ 216: il80
238
+ 217: il100
239
+ 218: il110
240
+ 219: io
241
+ 220: ip
242
+
243
+ # Download script/URL (optional) ---------------------------------------------------------------------------------------
244
+ download: |
245
+ import json
246
+ import shutil
247
+ from pathlib import Path
248
+
249
+ from PIL import Image
250
+
251
+ from ultralytics.utils import TQDM
252
+ from ultralytics.utils.downloads import download
253
+
254
+
255
+ def tt100k2yolo(dir):
256
+ """Convert TT100K annotations to YOLO format with images/{split} and labels/{split} structure."""
257
+ data_dir = dir / "data"
258
+ anno_file = data_dir / "annotations.json"
259
+
260
+ print("Loading annotations...")
261
+ with open(anno_file, encoding="utf-8") as f:
262
+ data = json.load(f)
263
+
264
+ # Build class name to index mapping from yaml
265
+ names = yaml["names"]
266
+ class_to_idx = {v: k for k, v in names.items()}
267
+
268
+ # Create directories
269
+ for split in ["train", "val", "test"]:
270
+ (dir / "images" / split).mkdir(parents=True, exist_ok=True)
271
+ (dir / "labels" / split).mkdir(parents=True, exist_ok=True)
272
+
273
+ print("Converting annotations to YOLO format...")
274
+ skipped = 0
275
+ for img_id, img_data in TQDM(data["imgs"].items(), desc="Processing"):
276
+ img_path_str = img_data["path"]
277
+ if "train" in img_path_str:
278
+ split = "train"
279
+ elif "test" in img_path_str:
280
+ split = "test"
281
+ else:
282
+ split = "val"
283
+
284
+ # Source and destination paths
285
+ src_img = data_dir / img_path_str
286
+ if not src_img.exists():
287
+ continue
288
+
289
+ dst_img = dir / "images" / split / src_img.name
290
+
291
+ # Get image dimensions
292
+ try:
293
+ with Image.open(src_img) as img:
294
+ img_width, img_height = img.size
295
+ except Exception as e:
296
+ print(f"Error reading {src_img}: {e}")
297
+ continue
298
+
299
+ # Copy image to destination
300
+ shutil.copy2(src_img, dst_img)
301
+
302
+ # Convert annotations
303
+ label_file = dir / "labels" / split / f"{src_img.stem}.txt"
304
+ lines = []
305
+
306
+ for obj in img_data.get("objects", []):
307
+ category = obj["category"]
308
+ if category not in class_to_idx:
309
+ skipped += 1
310
+ continue
311
+
312
+ bbox = obj["bbox"]
313
+ xmin, ymin = bbox["xmin"], bbox["ymin"]
314
+ xmax, ymax = bbox["xmax"], bbox["ymax"]
315
+
316
+ # Convert to YOLO format (normalized center coordinates and dimensions)
317
+ x_center = ((xmin + xmax) / 2.0) / img_width
318
+ y_center = ((ymin + ymax) / 2.0) / img_height
319
+ width = (xmax - xmin) / img_width
320
+ height = (ymax - ymin) / img_height
321
+
322
+ # Clip to valid range
323
+ x_center = max(0, min(1, x_center))
324
+ y_center = max(0, min(1, y_center))
325
+ width = max(0, min(1, width))
326
+ height = max(0, min(1, height))
327
+
328
+ cls_idx = class_to_idx[category]
329
+ lines.append(f"{cls_idx} {x_center:.6f} {y_center:.6f} {width:.6f} {height:.6f}\n")
330
+
331
+ # Write label file
332
+ if lines:
333
+ label_file.write_text("".join(lines), encoding="utf-8")
334
+
335
+ if skipped:
336
+ print(f"Skipped {skipped} annotations with unknown categories")
337
+ print("Conversion complete!")
338
+
339
+
340
+ # Download
341
+ dir = Path(yaml["path"]) # dataset root dir
342
+ urls = ["https://cg.cs.tsinghua.edu.cn/traffic-sign/data_model_code/data.zip"]
343
+ download(urls, dir=dir, curl=True, threads=1)
344
+
345
+ # Convert
346
+ tt100k2yolo(dir)
@@ -505,6 +505,7 @@ class Exporter:
505
505
  m.format = self.args.format
506
506
  m.max_det = self.args.max_det
507
507
  m.xyxy = self.args.nms and not coreml
508
+ m.shape = None # reset cached shape for new export input size
508
509
  if hasattr(model, "pe") and hasattr(m, "fuse"): # for YOLOE models
509
510
  m.fuse(model.pe.to(self.device))
510
511
  elif isinstance(m, C2f) and not is_tf_format:
@@ -275,7 +275,7 @@ class Model(torch.nn.Module):
275
275
  >>> model._load("yolo11n.pt")
276
276
  >>> model._load("path/to/weights.pth", task="detect")
277
277
  """
278
- if weights.lower().startswith(("https://", "http://", "rtsp://", "rtmp://", "tcp://")):
278
+ if weights.lower().startswith(("https://", "http://", "rtsp://", "rtmp://", "tcp://", "ul://")):
279
279
  weights = checks.check_file(weights, download_dir=SETTINGS["weights_dir"]) # download and return local file
280
280
  weights = checks.check_model_file_from_stem(weights) # add suffix, i.e. yolo11n -> yolo11n.pt
281
281
 
@@ -403,7 +403,7 @@ class Model(torch.nn.Module):
403
403
  }
404
404
  torch.save({**self.ckpt, **updates}, filename)
405
405
 
406
- def info(self, detailed: bool = False, verbose: bool = True):
406
+ def info(self, detailed: bool = False, verbose: bool = True, imgsz: int | list[int, int] = 640):
407
407
  """Display model information.
408
408
 
409
409
  This method provides an overview or detailed information about the model, depending on the arguments
@@ -412,6 +412,7 @@ class Model(torch.nn.Module):
412
412
  Args:
413
413
  detailed (bool): If True, shows detailed information about the model layers and parameters.
414
414
  verbose (bool): If True, prints the information. If False, returns the information as a list.
415
+ imgsz (int | list[int, int]): Input image size used for FLOPs calculation.
415
416
 
416
417
  Returns:
417
418
  (list[str]): A list of strings containing various types of information about the model, including model
@@ -423,7 +424,7 @@ class Model(torch.nn.Module):
423
424
  >>> info_list = model.info(detailed=True, verbose=False) # Returns detailed info as a list
424
425
  """
425
426
  self._check_is_pytorch_model()
426
- return self.model.info(detailed=detailed, verbose=verbose)
427
+ return self.model.info(detailed=detailed, verbose=verbose, imgsz=imgsz)
427
428
 
428
429
  def fuse(self) -> None:
429
430
  """Fuse Conv2d and BatchNorm2d layers in the model for optimized inference.
@@ -157,6 +157,27 @@ class BaseTrainer:
157
157
  if self.device.type in {"cpu", "mps"}:
158
158
  self.args.workers = 0 # faster CPU training as time dominated by inference, not dataloading
159
159
 
160
+ # Callbacks - initialize early so on_pretrain_routine_start can capture original args.data
161
+ self.callbacks = _callbacks or callbacks.get_default_callbacks()
162
+
163
+ if isinstance(self.args.device, str) and len(self.args.device): # i.e. device='0' or device='0,1,2,3'
164
+ world_size = len(self.args.device.split(","))
165
+ elif isinstance(self.args.device, (tuple, list)): # i.e. device=[0, 1, 2, 3] (multi-GPU from CLI is list)
166
+ world_size = len(self.args.device)
167
+ elif self.args.device in {"cpu", "mps"}: # i.e. device='cpu' or 'mps'
168
+ world_size = 0
169
+ elif torch.cuda.is_available(): # i.e. device=None or device='' or device=number
170
+ world_size = 1 # default to device 0
171
+ else: # i.e. device=None or device=''
172
+ world_size = 0
173
+
174
+ self.ddp = world_size > 1 and "LOCAL_RANK" not in os.environ
175
+ self.world_size = world_size
176
+ # Run on_pretrain_routine_start before get_dataset() to capture original args.data (e.g., ul:// URIs)
177
+ if RANK in {-1, 0} and not self.ddp:
178
+ callbacks.add_integration_callbacks(self)
179
+ self.run_callbacks("on_pretrain_routine_start")
180
+
160
181
  # Model and Dataset
161
182
  self.model = check_model_file_from_stem(self.args.model) # add suffix, i.e. yolo11n -> yolo11n.pt
162
183
  with torch_distributed_zero_first(LOCAL_RANK): # avoid auto-downloading dataset multiple times
@@ -180,28 +201,6 @@ class BaseTrainer:
180
201
  self.plot_idx = [0, 1, 2]
181
202
  self.nan_recovery_attempts = 0
182
203
 
183
- # Callbacks
184
- self.callbacks = _callbacks or callbacks.get_default_callbacks()
185
-
186
- if isinstance(self.args.device, str) and len(self.args.device): # i.e. device='0' or device='0,1,2,3'
187
- world_size = len(self.args.device.split(","))
188
- elif isinstance(self.args.device, (tuple, list)): # i.e. device=[0, 1, 2, 3] (multi-GPU from CLI is list)
189
- world_size = len(self.args.device)
190
- elif self.args.device in {"cpu", "mps"}: # i.e. device='cpu' or 'mps'
191
- world_size = 0
192
- elif torch.cuda.is_available(): # i.e. device=None or device='' or device=number
193
- world_size = 1 # default to device 0
194
- else: # i.e. device=None or device=''
195
- world_size = 0
196
-
197
- self.ddp = world_size > 1 and "LOCAL_RANK" not in os.environ
198
- self.world_size = world_size
199
- # Run subprocess if DDP training, else train normally
200
- if RANK in {-1, 0} and not self.ddp:
201
- callbacks.add_integration_callbacks(self)
202
- # Start console logging immediately at trainer initialization
203
- self.run_callbacks("on_pretrain_routine_start")
204
-
205
204
  def add_callback(self, event: str, callback):
206
205
  """Append the given callback to the event's callback list."""
207
206
  self.callbacks[event].append(callback)
@@ -631,13 +630,17 @@ class BaseTrainer:
631
630
  try:
632
631
  if self.args.task == "classify":
633
632
  data = check_cls_dataset(self.args.data)
634
- elif str(self.args.data).rsplit(".", 1)[-1] == "ndjson":
635
- # Convert NDJSON to YOLO format
633
+ elif str(self.args.data).rsplit(".", 1)[-1] == "ndjson" or (
634
+ str(self.args.data).startswith("ul://") and "/datasets/" in str(self.args.data)
635
+ ):
636
+ # Convert NDJSON to YOLO format (including ul:// platform dataset URIs)
636
637
  import asyncio
637
638
 
638
639
  from ultralytics.data.converter import convert_ndjson_to_yolo
640
+ from ultralytics.utils.checks import check_file
639
641
 
640
- yaml_path = asyncio.run(convert_ndjson_to_yolo(self.args.data))
642
+ ndjson_file = check_file(self.args.data) # Resolve ul:// or URL to local .ndjson file
643
+ yaml_path = asyncio.run(convert_ndjson_to_yolo(ndjson_file))
641
644
  self.args.data = str(yaml_path)
642
645
  data = check_det_dataset(self.args.data)
643
646
  elif str(self.args.data).rsplit(".", 1)[-1] in {"yaml", "yml"} or self.args.task in {
@@ -378,6 +378,7 @@ class Tuner:
378
378
  metrics = {}
379
379
  train_args = {**vars(self.args), **mutated_hyp}
380
380
  save_dir = get_save_dir(get_cfg(train_args))
381
+ train_args["save_dir"] = str(save_dir) # pass save_dir to subprocess to ensure same path is used
381
382
  weights_dir = save_dir / "weights"
382
383
  try:
383
384
  # Train YOLO model with mutated hyperparameters (run in subprocess to avoid dataloader hang)
@@ -460,7 +460,7 @@ class ProfileModels:
460
460
  if file.suffix in {".pt", ".yaml", ".yml"}:
461
461
  model = YOLO(str(file))
462
462
  model.fuse() # to report correct params and GFLOPs in model.info()
463
- model_info = model.info()
463
+ model_info = model.info(imgsz=self.imgsz)
464
464
  if self.trt and self.device.type != "cpu" and not engine_file.is_file():
465
465
  engine_file = model.export(
466
466
  format="engine",
@@ -2,6 +2,7 @@
2
2
 
3
3
  import os
4
4
  import platform
5
+ import re
5
6
  import socket
6
7
  import sys
7
8
  from concurrent.futures import ThreadPoolExecutor
@@ -11,9 +12,14 @@ from time import time
11
12
  from ultralytics.utils import ENVIRONMENT, GIT, LOGGER, PYTHON_VERSION, RANK, SETTINGS, TESTS_RUNNING, colorstr
12
13
 
13
14
  PREFIX = colorstr("Platform: ")
14
- _last_upload = 0 # Rate limit model uploads
15
- _console_logger = None # Global console logger instance
16
- _system_logger = None # Cached system logger instance
15
+
16
+
17
+ def slugify(text):
18
+ """Convert text to URL-safe slug (e.g., 'My Project 1' -> 'my-project-1')."""
19
+ if not text:
20
+ return text
21
+ return re.sub(r"-+", "-", re.sub(r"[^a-z0-9\s-]", "", str(text).lower()).replace(" ", "-")).strip("-")[:128]
22
+
17
23
 
18
24
  try:
19
25
  assert not TESTS_RUNNING # do not log pytest
@@ -62,7 +68,7 @@ def resolve_platform_uri(uri, hard=True):
62
68
  if not api_key:
63
69
  raise ValueError(f"ULTRALYTICS_API_KEY required for '{uri}'. Get key at https://alpha.ultralytics.com/settings")
64
70
 
65
- base = "https://alpha.ultralytics.com/api/v1"
71
+ base = "https://alpha.ultralytics.com/api/webhooks"
66
72
  headers = {"Authorization": f"Bearer {api_key}"}
67
73
 
68
74
  # ul://username/datasets/slug
@@ -78,8 +84,6 @@ def resolve_platform_uri(uri, hard=True):
78
84
  else:
79
85
  raise ValueError(f"Invalid platform URI: {uri}. Use ul://user/datasets/name or ul://user/project/model")
80
86
 
81
- LOGGER.info(f"Resolving {uri} from Ultralytics Platform...")
82
-
83
87
  try:
84
88
  r = requests.head(url, headers=headers, allow_redirects=False, timeout=30)
85
89
 
@@ -139,22 +143,28 @@ def _interp_plot(plot, n=101):
139
143
  return result
140
144
 
141
145
 
142
- def _send(event, data, project, name):
143
- """Send event to Platform endpoint."""
146
+ def _send(event, data, project, name, model_id=None):
147
+ """Send event to Platform endpoint. Returns response JSON on success."""
144
148
  try:
145
- requests.post(
149
+ payload = {"event": event, "project": project, "name": name, "data": data}
150
+ if model_id:
151
+ payload["modelId"] = model_id
152
+ r = requests.post(
146
153
  "https://alpha.ultralytics.com/api/webhooks/training/metrics",
147
- json={"event": event, "project": project, "name": name, "data": data},
154
+ json=payload,
148
155
  headers={"Authorization": f"Bearer {_api_key}"},
149
156
  timeout=10,
150
- ).raise_for_status()
157
+ )
158
+ r.raise_for_status()
159
+ return r.json()
151
160
  except Exception as e:
152
161
  LOGGER.debug(f"Platform: Failed to send {event}: {e}")
162
+ return None
153
163
 
154
164
 
155
- def _send_async(event, data, project, name):
165
+ def _send_async(event, data, project, name, model_id=None):
156
166
  """Send event asynchronously using bounded thread pool."""
157
- _executor.submit(_send, event, data, project, name)
167
+ _executor.submit(_send, event, data, project, name, model_id)
158
168
 
159
169
 
160
170
  def _upload_model(model_path, project, name):
@@ -248,132 +258,146 @@ def _get_environment_info():
248
258
  return env
249
259
 
250
260
 
261
+ def _get_project_name(trainer):
262
+ """Get slugified project and name from trainer args."""
263
+ raw = str(trainer.args.project)
264
+ parts = raw.split("/", 1)
265
+ project = f"{parts[0]}/{slugify(parts[1])}" if len(parts) == 2 else slugify(raw)
266
+ return project, slugify(str(trainer.args.name or "train"))
267
+
268
+
251
269
  def on_pretrain_routine_start(trainer):
252
270
  """Initialize Platform logging at training start."""
253
- global _console_logger, _last_upload
254
-
255
271
  if RANK not in {-1, 0} or not trainer.args.project:
256
272
  return
257
273
 
258
- # Initialize upload timer to now so first checkpoint waits 15 min from training start
259
- _last_upload = time()
274
+ # Per-trainer state to isolate concurrent training runs
275
+ trainer._platform_model_id = None
276
+ trainer._platform_last_upload = time()
260
277
 
261
- project, name = str(trainer.args.project), str(trainer.args.name or "train")
278
+ project, name = _get_project_name(trainer)
262
279
  url = f"https://alpha.ultralytics.com/{project}/{name}"
263
280
  LOGGER.info(f"{PREFIX}Streaming to {url}")
264
281
 
265
282
  # Create callback to send console output to Platform
266
283
  def send_console_output(content, line_count, chunk_id):
267
284
  """Send batched console output to Platform webhook."""
268
- _send_async("console_output", {"chunkId": chunk_id, "content": content, "lineCount": line_count}, project, name)
285
+ _send_async(
286
+ "console_output",
287
+ {"chunkId": chunk_id, "content": content, "lineCount": line_count},
288
+ project,
289
+ name,
290
+ getattr(trainer, "_platform_model_id", None),
291
+ )
269
292
 
270
293
  # Start console capture with batching (5 lines or 5 seconds)
271
- _console_logger = ConsoleLogger(batch_size=5, flush_interval=5.0, on_flush=send_console_output)
272
- _console_logger.start_capture()
273
-
274
- # Gather model info for richer metadata
275
- model_info = {}
276
- try:
277
- info = model_info_for_loggers(trainer)
278
- model_info = {
279
- "parameters": info.get("model/parameters", 0),
280
- "gflops": info.get("model/GFLOPs", 0),
281
- "classes": getattr(trainer.model, "yaml", {}).get("nc", 0), # number of classes
282
- }
283
- except Exception:
284
- pass
294
+ trainer._platform_console_logger = ConsoleLogger(batch_size=5, flush_interval=5.0, on_flush=send_console_output)
295
+ trainer._platform_console_logger.start_capture()
285
296
 
286
297
  # Collect environment info (W&B-style metadata)
287
298
  environment = _get_environment_info()
288
299
 
289
- _send_async(
300
+ # Build trainArgs - callback runs before get_dataset() so args.data is still original (e.g., ul:// URIs)
301
+ # Note: model_info is sent later in on_fit_epoch_end (epoch 0) when the model is actually loaded
302
+ train_args = {k: str(v) for k, v in vars(trainer.args).items()}
303
+
304
+ # Send synchronously to get modelId for subsequent webhooks
305
+ response = _send(
290
306
  "training_started",
291
307
  {
292
- "trainArgs": {k: str(v) for k, v in vars(trainer.args).items()},
308
+ "trainArgs": train_args,
293
309
  "epochs": trainer.epochs,
294
310
  "device": str(trainer.device),
295
- "modelInfo": model_info,
296
311
  "environment": environment,
297
312
  },
298
313
  project,
299
314
  name,
300
315
  )
316
+ if response and response.get("modelId"):
317
+ trainer._platform_model_id = response["modelId"]
301
318
 
302
319
 
303
320
  def on_fit_epoch_end(trainer):
304
321
  """Log training and system metrics at epoch end."""
305
- global _system_logger
306
-
307
322
  if RANK not in {-1, 0} or not trainer.args.project:
308
323
  return
309
324
 
310
- project, name = str(trainer.args.project), str(trainer.args.name or "train")
325
+ project, name = _get_project_name(trainer)
311
326
  metrics = {**trainer.label_loss_items(trainer.tloss, prefix="train"), **trainer.metrics}
312
327
 
313
328
  if trainer.optimizer and trainer.optimizer.param_groups:
314
329
  metrics["lr"] = trainer.optimizer.param_groups[0]["lr"]
330
+
331
+ # Extract model info at epoch 0 (sent as separate field, not in metrics)
332
+ model_info = None
315
333
  if trainer.epoch == 0:
316
334
  try:
317
- metrics.update(model_info_for_loggers(trainer))
335
+ info = model_info_for_loggers(trainer)
336
+ model_info = {
337
+ "parameters": info.get("model/parameters", 0),
338
+ "gflops": info.get("model/GFLOPs", 0),
339
+ "speedMs": info.get("model/speed_PyTorch(ms)", 0),
340
+ }
318
341
  except Exception:
319
342
  pass
320
343
 
321
- # Get system metrics (cache SystemLogger for efficiency)
344
+ # Get system metrics (cache SystemLogger on trainer for efficiency)
322
345
  system = {}
323
346
  try:
324
- if _system_logger is None:
325
- _system_logger = SystemLogger()
326
- system = _system_logger.get_metrics(rates=True)
347
+ if not hasattr(trainer, "_platform_system_logger"):
348
+ trainer._platform_system_logger = SystemLogger()
349
+ system = trainer._platform_system_logger.get_metrics(rates=True)
327
350
  except Exception:
328
351
  pass
329
352
 
353
+ payload = {
354
+ "epoch": trainer.epoch,
355
+ "metrics": metrics,
356
+ "system": system,
357
+ "fitness": trainer.fitness,
358
+ "best_fitness": trainer.best_fitness,
359
+ }
360
+ if model_info:
361
+ payload["modelInfo"] = model_info
362
+
330
363
  _send_async(
331
364
  "epoch_end",
332
- {
333
- "epoch": trainer.epoch,
334
- "metrics": metrics,
335
- "system": system,
336
- "fitness": trainer.fitness,
337
- "best_fitness": trainer.best_fitness,
338
- },
365
+ payload,
339
366
  project,
340
367
  name,
368
+ getattr(trainer, "_platform_model_id", None),
341
369
  )
342
370
 
343
371
 
344
372
  def on_model_save(trainer):
345
373
  """Upload model checkpoint (rate limited to every 15 min)."""
346
- global _last_upload
347
-
348
374
  if RANK not in {-1, 0} or not trainer.args.project:
349
375
  return
350
376
 
351
377
  # Rate limit to every 15 minutes (900 seconds)
352
- if time() - _last_upload < 900:
378
+ if time() - getattr(trainer, "_platform_last_upload", 0) < 900:
353
379
  return
354
380
 
355
381
  model_path = trainer.best if trainer.best and Path(trainer.best).exists() else trainer.last
356
382
  if not model_path:
357
383
  return
358
384
 
359
- project, name = str(trainer.args.project), str(trainer.args.name or "train")
385
+ project, name = _get_project_name(trainer)
360
386
  _upload_model_async(model_path, project, name)
361
- _last_upload = time()
387
+ trainer._platform_last_upload = time()
362
388
 
363
389
 
364
390
  def on_train_end(trainer):
365
391
  """Log final results, upload best model, and send validation plot data."""
366
- global _console_logger
367
-
368
392
  if RANK not in {-1, 0} or not trainer.args.project:
369
393
  return
370
394
 
371
- project, name = str(trainer.args.project), str(trainer.args.name or "train")
395
+ project, name = _get_project_name(trainer)
372
396
 
373
397
  # Stop console capture
374
- if _console_logger:
375
- _console_logger.stop_capture()
376
- _console_logger = None
398
+ if hasattr(trainer, "_platform_console_logger") and trainer._platform_console_logger:
399
+ trainer._platform_console_logger.stop_capture()
400
+ trainer._platform_console_logger = None
377
401
 
378
402
  # Upload best model (blocking to ensure it completes)
379
403
  model_path = None
@@ -411,6 +435,7 @@ def on_train_end(trainer):
411
435
  },
412
436
  project,
413
437
  name,
438
+ getattr(trainer, "_platform_model_id", None),
414
439
  )
415
440
  url = f"https://alpha.ultralytics.com/{project}/{name}"
416
441
  LOGGER.info(f"{PREFIX}View results at {url}")
@@ -592,7 +592,7 @@ def check_file(file, suffix="", download=True, download_dir=".", hard=True):
592
592
  """Search/download file (if necessary), check suffix (if provided), and return path.
593
593
 
594
594
  Args:
595
- file (str): File name or path, or platform URI (ul://username/datasets/name).
595
+ file (str): File name or path, URL, platform URI (ul://), or GCS path (gs://).
596
596
  suffix (str | tuple): Acceptable suffix or tuple of suffixes to validate against the file.
597
597
  download (bool): Whether to download the file if it doesn't exist locally.
598
598
  download_dir (str): Directory to download the file to.
@@ -616,13 +616,20 @@ def check_file(file, suffix="", download=True, download_dir=".", hard=True):
616
616
  url = resolve_platform_uri(file, hard=hard) # Convert to signed HTTPS URL
617
617
  if url is None:
618
618
  return [] # Not found, soft fail (consistent with file search behavior)
619
- local_file = Path(download_dir) / url2file(url)
619
+ # Use URI path for unique directory structure: ul://user/project/model -> user/project/model/filename.pt
620
+ uri_path = file[5:] # Remove "ul://"
621
+ local_file = Path(download_dir) / uri_path / url2file(url)
620
622
  if local_file.exists():
621
623
  LOGGER.info(f"Found {clean_url(url)} locally at {local_file}")
622
624
  else:
625
+ local_file.parent.mkdir(parents=True, exist_ok=True)
623
626
  downloads.safe_download(url=url, file=local_file, unzip=False)
624
627
  return str(local_file)
625
- elif download and file.lower().startswith(("https://", "http://", "rtsp://", "rtmp://", "tcp://")): # download
628
+ elif download and file.lower().startswith(
629
+ ("https://", "http://", "rtsp://", "rtmp://", "tcp://", "gs://")
630
+ ): # download
631
+ if file.startswith("gs://"):
632
+ file = "https://storage.googleapis.com/" + file[5:] # convert gs:// to public HTTPS URL
626
633
  url = file # warning: Pathlib turns :// -> :/
627
634
  file = Path(download_dir) / url2file(file) # '%2F' to '/', split https://url.com/file.txt?auth
628
635
  if file.exists():
@@ -957,7 +964,7 @@ def is_rockchip():
957
964
  with open("/proc/device-tree/compatible") as f:
958
965
  dev_str = f.read()
959
966
  *_, soc = dev_str.split(",")
960
- if soc.replace("\x00", "") in RKNN_CHIPS:
967
+ if soc.replace("\x00", "").split("-", 1)[0] in RKNN_CHIPS:
961
968
  return True
962
969
  except OSError:
963
970
  return False
@@ -219,7 +219,7 @@ def torch2imx(
219
219
  Examples:
220
220
  >>> from ultralytics import YOLO
221
221
  >>> model = YOLO("yolo11n.pt")
222
- >>> path, _ = export_imx(model, "model.imx", conf=0.25, iou=0.45, max_det=300)
222
+ >>> path, _ = export_imx(model, "model.imx", conf=0.25, iou=0.7, max_det=300)
223
223
 
224
224
  Notes:
225
225
  - Requires model_compression_toolkit, onnx, edgemdt_tpc, and edge-mdt-cl packages
@@ -315,7 +315,7 @@ class ConfusionMatrix(DataExportMixin):
315
315
  matches (dict): Contains the indices of ground truths and predictions categorized into TP, FP and FN.
316
316
  """
317
317
 
318
- def __init__(self, names: dict[int, str] = [], task: str = "detect", save_matches: bool = False):
318
+ def __init__(self, names: dict[int, str] = {}, task: str = "detect", save_matches: bool = False):
319
319
  """Initialize a ConfusionMatrix instance.
320
320
 
321
321
  Args:
@@ -972,6 +972,9 @@ def plot_tune_results(csv_file: str = "tune_results.csv", exclude_zero_fitness_p
972
972
  if exclude_zero_fitness_points:
973
973
  mask = fitness > 0 # exclude zero-fitness points
974
974
  x, fitness = x[mask], fitness[mask]
975
+ if len(fitness) == 0:
976
+ LOGGER.warning("No valid fitness values to plot (all iterations may have failed)")
977
+ return
975
978
  # Iterative sigma rejection on lower bound only
976
979
  for _ in range(3): # max 3 iterations
977
980
  mean, std = fitness.mean(), fitness.std()
ultralytics/utils/tal.py CHANGED
@@ -354,7 +354,8 @@ def make_anchors(feats, strides, grid_cell_offset=0.5):
354
354
  anchor_points, stride_tensor = [], []
355
355
  assert feats is not None
356
356
  dtype, device = feats[0].dtype, feats[0].device
357
- for i, stride in enumerate(strides):
357
+ for i in range(len(feats)): # use len(feats) to avoid TracerWarning from iterating over strides tensor
358
+ stride = strides[i]
358
359
  h, w = feats[i].shape[2:] if isinstance(feats, list) else (int(feats[i][0]), int(feats[i][1]))
359
360
  sx = torch.arange(end=w, device=device, dtype=dtype) + grid_cell_offset # shift x
360
361
  sy = torch.arange(end=h, device=device, dtype=dtype) + grid_cell_offset # shift y
ultralytics/utils/tqdm.py CHANGED
@@ -317,7 +317,10 @@ class TQDM:
317
317
  # Final display
318
318
  if self.total and self.n >= self.total:
319
319
  self.n = self.total
320
- self._display(final=True)
320
+ if self.n != self.last_print_n: # Skip if 100% already shown
321
+ self._display(final=True)
322
+ else:
323
+ self._display(final=True)
321
324
 
322
325
  # Cleanup
323
326
  if self.leave:
@@ -35,9 +35,6 @@ def run_ray_tune(
35
35
  >>> result_grid = model.tune(data="coco8.yaml", use_ray=True)
36
36
  """
37
37
  LOGGER.info("💡 Learn about RayTune at https://docs.ultralytics.com/integrations/ray-tune")
38
- if train_args is None:
39
- train_args = {}
40
-
41
38
  try:
42
39
  checks.check_requirements("ray[tune]")
43
40