dgenerate-ultralytics-headless 8.3.192__py3-none-any.whl → 8.3.193__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dgenerate-ultralytics-headless
3
- Version: 8.3.192
3
+ Version: 8.3.193
4
4
  Summary: Automatically built Ultralytics package with python-opencv-headless dependency instead of python-opencv
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -1,4 +1,4 @@
1
- dgenerate_ultralytics_headless-8.3.192.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
1
+ dgenerate_ultralytics_headless-8.3.193.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
2
2
  tests/__init__.py,sha256=b4KP5_q-2IO8Br8YHOSLYnn7IwZS81l_vfEF2YPa2lM,894
3
3
  tests/conftest.py,sha256=LXtQJcFNWPGuzauTGkiXgsvVC3llJKfg22WcmhRzuQc,2593
4
4
  tests/test_cli.py,sha256=EMf5gTAopOnIz8VvzaM-Qb044o7D0flnUHYQ-2ffOM4,5670
@@ -8,11 +8,11 @@ tests/test_exports.py,sha256=jBMAWADCqBsPaZuhZKU7JgQVA0gfYHHFwOI6kx84bqo,10885
8
8
  tests/test_integrations.py,sha256=kl_AKmE_Qs1GB0_91iVwbzNxofm_hFTt0zzU6JF-pg4,6323
9
9
  tests/test_python.py,sha256=IpjqS2wKSfZaukSdW7QtXeyijXxZ1uXiJLEdlkK_0jQ,27908
10
10
  tests/test_solutions.py,sha256=6wJ9-lhyWSAm7zaR4D9L_DrUA3iJU1NgqmbQO6PIuvo,13211
11
- ultralytics/__init__.py,sha256=fv9DpTt7g968y9c4iYJYq0uIRnM0qwT9YwnFXdpxv7o,730
11
+ ultralytics/__init__.py,sha256=xBsGMQjlH8FxjOGSLqnh06Xs9hcGM2u-QrR-OLhkHJk,730
12
12
  ultralytics/py.typed,sha256=la67KBlbjXN-_-DfGNcdOcjYumVpKG_Tkw-8n5dnGB4,8
13
13
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
14
14
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
15
- ultralytics/cfg/__init__.py,sha256=B6lVzv3ISXgZFgKv8cB1YumCvhFUDVr7RNXbHGF3V9Q,39867
15
+ ultralytics/cfg/__init__.py,sha256=oi3unUYklhdf-uF_0v9dSBOj5EyPRKQbgVVKHrztKns,39955
16
16
  ultralytics/cfg/default.yaml,sha256=1SspGAK_K_DT7DBfEScJh4jsJUTOxahehZYj92xmj7o,8347
17
17
  ultralytics/cfg/datasets/Argoverse.yaml,sha256=J4ItoUlE_EiYTmp1DFKYHfbqHkj8j4wUtRJQhaMIlBM,3275
18
18
  ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=VZ_KKFX0H2YvlFVJ8JHcLWYBZ2xiQ6Z-ROSTiKWpS7c,1211
@@ -47,7 +47,7 @@ ultralytics/cfg/datasets/open-images-v7.yaml,sha256=wK9v3OAGdHORkFdqoBi0hS0fa1b7
47
47
  ultralytics/cfg/datasets/package-seg.yaml,sha256=V4uyTDWWzgft24y9HJWuELKuZ5AndAHXbanxMI6T8GU,849
48
48
  ultralytics/cfg/datasets/signature.yaml,sha256=gBvU3715gVxVAafI_yaYczGX3kfEfA4BttbiMkgOXNk,774
49
49
  ultralytics/cfg/datasets/tiger-pose.yaml,sha256=Y_8htA4--6hmpqHTW-Ix4t9SdaWenSSyl_FUtI2A7n8,926
50
- ultralytics/cfg/datasets/xView.yaml,sha256=P347BJlmb7AG7YC29JyyOtNy52QqZ87Sn7gFP8Dx86s,5353
50
+ ultralytics/cfg/datasets/xView.yaml,sha256=eaQ7bYDRrOMRdaxN_wzlH_fN0wdIlT_GQDtPzrHS2-s,5353
51
51
  ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml,sha256=1Ycp9qMrwpb8rq7cqht3Q-1gMN0R87U35nm2j_isdro,524
52
52
  ultralytics/cfg/models/11/yolo11-cls.yaml,sha256=17l5GdN-Vst4LvafsK2-q6Li9VX9UlUcT5ClCtikweE,1412
53
53
  ultralytics/cfg/models/11/yolo11-obb.yaml,sha256=3M_c06B-y8da4tunHVxQQ-iFUNLKUfofqCZTpnH5FEU,2034
@@ -115,17 +115,17 @@ ultralytics/data/dataset.py,sha256=JC3sHsKva65sSptdAJHfh90yyag8WrqGXcXNpD9C-f0,3
115
115
  ultralytics/data/loaders.py,sha256=Mt6ogS2SUq8SE6oJajX7xSyzIxvwjKUhxFbIynhBlGk,31748
116
116
  ultralytics/data/split.py,sha256=5ubnL_wsEutFQOj4I4K01L9UpZrrO_vO3HrydSLJyIY,5107
117
117
  ultralytics/data/split_dota.py,sha256=gSGHRWZFQOofMkb8GcTtkUb5gV5jtOV7bzVMovTW0ak,12910
118
- ultralytics/data/utils.py,sha256=Y92D9qxVBZ0uOdhYbFctcBZhRTxOiYYwfFQsVYwtS-8,36702
118
+ ultralytics/data/utils.py,sha256=1YF_-v-laBG41WGmnLo4xsfOhCNNN1og8b2yo11FJos,36716
119
119
  ultralytics/data/scripts/download_weights.sh,sha256=0y8XtZxOru7dVThXDFUXLHBuICgOIqZNUwpyL4Rh6lg,595
120
120
  ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J3jKrnPw,1768
121
121
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
122
122
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
123
123
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
124
- ultralytics/engine/exporter.py,sha256=llc7LdSYD1bkwFw3iUs_xHEZZQk6wE64R6a6LNduUzU,75400
125
- ultralytics/engine/model.py,sha256=oxlYx1nMEOcqLFr_clFhWv3Lni-Ykl3-k1NX9JoHiqY,53520
126
- ultralytics/engine/predictor.py,sha256=bPG_YOnrtXr8zM8QGEkgJJWT5zaDJsN9hEhH6sEkYUs,22478
124
+ ultralytics/engine/exporter.py,sha256=kPX10s0YhRlSLlCSP9MigAO8yjzZ1Azi2apOWktSTjA,75411
125
+ ultralytics/engine/model.py,sha256=q3rj1QmJoR4uYXb6dvn2dVRheEY8TzJhm9KznD_VDso,53488
126
+ ultralytics/engine/predictor.py,sha256=EzFcRzihNTYgdayviLwEk7pUOotNoEVbQtVFI18jNpM,22625
127
127
  ultralytics/engine/results.py,sha256=115lVbiqzyho1fXm-YpqQBtKiv-Wo2FPNhz6ExYHtCk,71499
128
- ultralytics/engine/trainer.py,sha256=VXgBtW7S7-6XoUQKzQIE6gV4kSo-TPDXHzrT4OAUasg,40505
128
+ ultralytics/engine/trainer.py,sha256=_mTG-z6xnOdFUmB6uOF8HQkFb_uMwP0MrJHlt7X3zVw,40457
129
129
  ultralytics/engine/tuner.py,sha256=XuqcjyGpD79pUVn-PXlJJGKXgH1yblPdYBH_R2kHWSU,20586
130
130
  ultralytics/engine/validator.py,sha256=8ky0lcMCYDY7RGYRUowDAKxEMfsPBLpT7LlgfHA-IsY,16681
131
131
  ultralytics/hub/__init__.py,sha256=RmrZw24uyFF109SLVba1s-ItjllfzRKlCgQ_U0RVRUo,6720
@@ -186,7 +186,7 @@ ultralytics/models/yolo/pose/val.py,sha256=A4Cvzmqyu0oW5UC9mqNL731ngAwqQBic6tdMc
186
186
  ultralytics/models/yolo/segment/__init__.py,sha256=3IThhZ1wlkY9FvmWm9cE-5-ZyE6F1FgzAtQ6jOOFzzw,275
187
187
  ultralytics/models/yolo/segment/predict.py,sha256=qlprQCZn4_bpjpI08U0MU9Q9_1gpHrw_7MXwtXE1l1Y,5377
188
188
  ultralytics/models/yolo/segment/train.py,sha256=bBEnEUHqN3QlbreD5Fy-h1X5Ps-dONH2r9EnoLc72x4,3762
189
- ultralytics/models/yolo/segment/val.py,sha256=8ASdhhSaJ4NjRFHEuL26Uwt4zTKyXmcGvokHVg74ZsE,11345
189
+ ultralytics/models/yolo/segment/val.py,sha256=Xy2T-Y4IjjuR5GqWGJjHmCLGL9FxEWfMXDoCoP_ZBBM,11147
190
190
  ultralytics/models/yolo/world/__init__.py,sha256=nlh8I6t8hMGz_vZg8QSlsUW1R-2eKvn9CGUoPPQEGhA,131
191
191
  ultralytics/models/yolo/world/train.py,sha256=9tSDba3X9VA2y-2oJJUMdXUBi-IdgRmqLW18UqfukvI,7866
192
192
  ultralytics/models/yolo/world/train_world.py,sha256=lk9z_INGPSTP_W7Rjh3qrWSmjHaxOJtGngonh1cj2SM,9551
@@ -194,10 +194,10 @@ ultralytics/models/yolo/yoloe/__init__.py,sha256=6SLytdJtwu37qewf7CobG7C7Wl1m-xt
194
194
  ultralytics/models/yolo/yoloe/predict.py,sha256=GmQxCQe7sLomAujde53jQzquzryNn6fEjS4Oalf3mPs,7124
195
195
  ultralytics/models/yolo/yoloe/train.py,sha256=lAb-5TgYA_Z6rwEYCMdPcfjJmCGYQqXE5I40n25CTRk,14067
196
196
  ultralytics/models/yolo/yoloe/train_seg.py,sha256=aCV7M8oQOvODFnU4piZdJh3tIrBJYAzZfRVRx1vRgxo,4956
197
- ultralytics/models/yolo/yoloe/val.py,sha256=6uLH3znvn4iF_4F9RCbXWtiy9lsSQBUC9KrhU_2rIQs,9787
198
- ultralytics/nn/__init__.py,sha256=rjociYD9lo_K-d-1s6TbdWklPLjTcEHk7OIlRDJstIE,615
199
- ultralytics/nn/autobackend.py,sha256=lvotDJeABLcF7Xb0muAoxM6V3MI0EWKq9vAClHbm7tI,41376
200
- ultralytics/nn/tasks.py,sha256=WSEA_odbXC0R_EGlcoOUhvE8ERIj62-7Tt9rWbo54nE,72480
197
+ ultralytics/models/yolo/yoloe/val.py,sha256=MnS2YwhRxdqXPRlExHt-9HRp8KKIHuFdmiNH1z6LedI,9795
198
+ ultralytics/nn/__init__.py,sha256=PJgOn2phQTTBR2P3s_JWvGeGXQpvw1znsumKow4tCuE,545
199
+ ultralytics/nn/autobackend.py,sha256=yv_-p5F3BhOrZCXUdRZY9K9D6-5hSUUuq23Jm9FKAhk,41360
200
+ ultralytics/nn/tasks.py,sha256=tvoVVqVoMZuylLmUJNDs9eX4bdFbDibQKd3lJed5fYY,70470
201
201
  ultralytics/nn/text_model.py,sha256=lserJWlXNzP31jB4xA-7gkbhB0VsMBGiE9G8wYpztvE,15275
202
202
  ultralytics/nn/modules/__init__.py,sha256=BPMbEm1daI7Tuds3zph2_afAX7Gq1uAqK8BfiCfKTZs,3198
203
203
  ultralytics/nn/modules/activation.py,sha256=75JcIMH2Cu9GTC2Uf55r_5YLpxcrXQDaVoeGQ0hlUAU,2233
@@ -236,11 +236,11 @@ ultralytics/trackers/utils/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6D
236
236
  ultralytics/trackers/utils/gmc.py,sha256=a4WuIh976_GYogvlQEPKTNE59JNNtSNlT_IPrz4wmrM,14028
237
237
  ultralytics/trackers/utils/kalman_filter.py,sha256=PPmM0lwBMdT_hGojvfLoUsBUFMBBMNRAxKbMcQa3wJ0,21619
238
238
  ultralytics/trackers/utils/matching.py,sha256=uSYtywqi1lE_uNN1FwuBFPyISfDQXHMu8K5KH69nrRI,7160
239
- ultralytics/utils/__init__.py,sha256=AKhyF3aM0jwcgAlA2w6ZwAb6K2AQJAxY4q3GnETOxFY,53065
239
+ ultralytics/utils/__init__.py,sha256=qn-3RTwLSQ25d23glCtX39ZH4gQyXi1PQkjk5VZa7Vw,53256
240
240
  ultralytics/utils/autobatch.py,sha256=i6KYLLSItKP1Q2IUlTPHrZhjcxl7UOjs0Seb8bF8pvM,5124
241
241
  ultralytics/utils/autodevice.py,sha256=Od9SGx6xAQoX-3L62PS7I6xOxbbqjYLR4Wipgn5WoDc,8843
242
242
  ultralytics/utils/benchmarks.py,sha256=zKI-DxEqaVmqlE2pg9p6j1kI7Efo1OyM1NnKubYpDU8,31458
243
- ultralytics/utils/checks.py,sha256=jLM9CuKI6aZGIGG4W2n_CORCApGnUb2a1Z5YH87ngew,34453
243
+ ultralytics/utils/checks.py,sha256=WBuJKFiXGqf-5n0S_QNwRet3ZxVIFMdG0VPZSoOJwYM,34468
244
244
  ultralytics/utils/dist.py,sha256=A9lDGtGefTjSVvVS38w86GOdbtLzNBDZuDGK0MT4PRI,4170
245
245
  ultralytics/utils/downloads.py,sha256=mLOLonKQsePC15sLVZJzGyLmD_TZPkL1T_qd0gUb4lA,23029
246
246
  ultralytics/utils/errors.py,sha256=XT9Ru7ivoBgofK6PlnyigGoa7Fmf5nEhyHtnD-8TRXI,1584
@@ -255,11 +255,11 @@ ultralytics/utils/nms.py,sha256=wCRQ7O7shv5ccEWHgtF9Ky_vUeyumxFLWBFEj1h0U54,1419
255
255
  ultralytics/utils/ops.py,sha256=z6feVEbs57eN18Nl4IEdhgUiChGAZrq4bQjDkp-0MxA,26630
256
256
  ultralytics/utils/patches.py,sha256=j0fXwX3YqKrON7yrmSVkXsn__tsvs4qVuysH-hzcxOE,6541
257
257
  ultralytics/utils/plotting.py,sha256=6iPtxQJ7JFTv2vWpSzP90plkksXhvql1EVQaKomkuiQ,47515
258
- ultralytics/utils/tal.py,sha256=K3lPxC3bbduP8ho-toJ9VHnklo5IaGkqogEaQorbrvs,20924
259
- ultralytics/utils/torch_utils.py,sha256=UrFD-R-3XhYSHgFg3rF2HGyk0YojM2jNCyxiIsOb-TY,39493
260
- ultralytics/utils/tqdm.py,sha256=UyCozFXNACO0C_zE2fcqO9IFsbNYVAbJ_zCObyNb8nY,16908
258
+ ultralytics/utils/tal.py,sha256=LrziY_ZHz4wln3oOnqAzgyPaXKoup17Sa103BpuaQFU,20935
259
+ ultralytics/utils/torch_utils.py,sha256=SKgOEaa2_7k7tGI4VfNWcQV4wvjLWVUusJreEc4bPmI,39488
260
+ ultralytics/utils/tqdm.py,sha256=ny5RIg2OTkWQ7gdaXfYaoIgR0Xn2_hNGB6tUpO2Unns,16137
261
261
  ultralytics/utils/triton.py,sha256=-qG-ZP5uotcD8FZyaAcEGHX7Cv9_yBG8UHvmmXpYZkM,5422
262
- ultralytics/utils/tuner.py,sha256=bHr09Fz-0-t0ei55gX5wJh-obyiAQoicP7HUVM2I8qA,6826
262
+ ultralytics/utils/tuner.py,sha256=9D4dSIvwwxcNSJcH2QJ92qiIVi9zu-1L7_PBZ8okDyE,6816
263
263
  ultralytics/utils/callbacks/__init__.py,sha256=hzL63Rce6VkZhP4Lcim9LKjadixaQG86nKqPhk7IkS0,242
264
264
  ultralytics/utils/callbacks/base.py,sha256=dGir0vkJY4jjprW63e23Qy4kHUT5dOINPii6HnwJuPg,6893
265
265
  ultralytics/utils/callbacks/clearml.py,sha256=xr5mZT_cY6AY_drbdCXFt-Dp2fOjRELxLDhDoRhNPg8,6067
@@ -272,8 +272,8 @@ ultralytics/utils/callbacks/platform.py,sha256=a7T_8htoBB0uX1WIc392UJnhDjxkRyQMv
272
272
  ultralytics/utils/callbacks/raytune.py,sha256=S6Bq16oQDQ8BQgnZzA0zJHGN_BBr8iAM_WtGoLiEcwg,1283
273
273
  ultralytics/utils/callbacks/tensorboard.py,sha256=MDPBW7aDes-66OE6YqKXXvqA_EocjzEMHWGM-8z9vUQ,5281
274
274
  ultralytics/utils/callbacks/wb.py,sha256=ngQO8EJ1kxJDF1YajScVtzBbm26jGuejA0uWeOyvf5A,7685
275
- dgenerate_ultralytics_headless-8.3.192.dist-info/METADATA,sha256=I7BGHAsTcMR9_eqnSCrL1FyCSzsfQn2RKDXW-XsYfAQ,38789
276
- dgenerate_ultralytics_headless-8.3.192.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
277
- dgenerate_ultralytics_headless-8.3.192.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
278
- dgenerate_ultralytics_headless-8.3.192.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
279
- dgenerate_ultralytics_headless-8.3.192.dist-info/RECORD,,
275
+ dgenerate_ultralytics_headless-8.3.193.dist-info/METADATA,sha256=1wXryxIUWSf18KfYeyQmgtB1c56Ka8AThXX78xrxqIY,38789
276
+ dgenerate_ultralytics_headless-8.3.193.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
277
+ dgenerate_ultralytics_headless-8.3.193.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
278
+ dgenerate_ultralytics_headless-8.3.193.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
279
+ dgenerate_ultralytics_headless-8.3.193.dist-info/RECORD,,
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.192"
3
+ __version__ = "8.3.193"
4
4
 
5
5
  import os
6
6
 
@@ -15,6 +15,7 @@ from ultralytics.utils import (
15
15
  DEFAULT_CFG,
16
16
  DEFAULT_CFG_DICT,
17
17
  DEFAULT_CFG_PATH,
18
+ FLOAT_OR_INT,
18
19
  IS_VSCODE,
19
20
  LOGGER,
20
21
  RANK,
@@ -22,6 +23,7 @@ from ultralytics.utils import (
22
23
  RUNS_DIR,
23
24
  SETTINGS,
24
25
  SETTINGS_FILE,
26
+ STR_OR_PATH,
25
27
  TESTS_RUNNING,
26
28
  YAML,
27
29
  IterableSimpleNamespace,
@@ -267,7 +269,7 @@ def cfg2dict(cfg: str | Path | dict | SimpleNamespace) -> dict:
267
269
  - If cfg is a SimpleNamespace object, it's converted to a dictionary using vars().
268
270
  - If cfg is already a dictionary, it's returned unchanged.
269
271
  """
270
- if isinstance(cfg, (str, Path)):
272
+ if isinstance(cfg, STR_OR_PATH):
271
273
  cfg = YAML.load(cfg) # load dict
272
274
  elif isinstance(cfg, SimpleNamespace):
273
275
  cfg = vars(cfg) # convert to dict
@@ -309,7 +311,7 @@ def get_cfg(cfg: str | Path | dict | SimpleNamespace = DEFAULT_CFG_DICT, overrid
309
311
 
310
312
  # Special handling for numeric project/name
311
313
  for k in "project", "name":
312
- if k in cfg and isinstance(cfg[k], (int, float)):
314
+ if k in cfg and isinstance(cfg[k], FLOAT_OR_INT):
313
315
  cfg[k] = str(cfg[k])
314
316
  if cfg.get("name") == "model": # assign model to 'name' arg
315
317
  cfg["name"] = str(cfg.get("model", "")).partition(".")[0]
@@ -352,7 +354,7 @@ def check_cfg(cfg: dict, hard: bool = True) -> None:
352
354
  """
353
355
  for k, v in cfg.items():
354
356
  if v is not None: # None values may be from optional args
355
- if k in CFG_FLOAT_KEYS and not isinstance(v, (int, float)):
357
+ if k in CFG_FLOAT_KEYS and not isinstance(v, FLOAT_OR_INT):
356
358
  if hard:
357
359
  raise TypeError(
358
360
  f"'{k}={v}' is of invalid type {type(v).__name__}. "
@@ -360,7 +362,7 @@ def check_cfg(cfg: dict, hard: bool = True) -> None:
360
362
  )
361
363
  cfg[k] = float(v)
362
364
  elif k in CFG_FRACTION_KEYS:
363
- if not isinstance(v, (int, float)):
365
+ if not isinstance(v, FLOAT_OR_INT):
364
366
  if hard:
365
367
  raise TypeError(
366
368
  f"'{k}={v}' is of invalid type {type(v).__name__}. "
@@ -413,7 +415,7 @@ def get_save_dir(args: SimpleNamespace, name: str = None) -> Path:
413
415
  name = name or args.name or f"{args.mode}"
414
416
  save_dir = increment_path(Path(project) / name, exist_ok=args.exist_ok if RANK in {-1, 0} else True)
415
417
 
416
- return Path(save_dir)
418
+ return Path(save_dir).resolve() # resolve to display full path in console
417
419
 
418
420
 
419
421
  def _handle_deprecation(custom: dict) -> dict:
@@ -87,7 +87,7 @@ download: |
87
87
  from PIL import Image
88
88
 
89
89
  from ultralytics.utils import TQDM
90
- from ultralytics.data.utils import autosplit
90
+ from ultralytics.data.split import autosplit
91
91
  from ultralytics.utils.ops import xyxy2xywhn
92
92
 
93
93
 
ultralytics/data/utils.py CHANGED
@@ -216,7 +216,7 @@ def verify_image_label(args: tuple) -> list:
216
216
  points = lb[:, 1:]
217
217
  # Coordinate points check with 1% tolerance
218
218
  assert points.max() <= 1.01, f"non-normalized or out of bounds coordinates {points[points > 1.01]}"
219
- assert lb.min() >= -0.01, f"negative class labels {lb[lb < -0.01]}"
219
+ assert lb.min() >= -0.01, f"negative class labels or coordinate {lb[lb < -0.01]}"
220
220
 
221
221
  # All labels
222
222
  max_cls = 0 if single_cls else lb[:, 0].max() # max label count
@@ -90,6 +90,7 @@ from ultralytics.utils import (
90
90
  RKNN_CHIPS,
91
91
  ROOT,
92
92
  SETTINGS,
93
+ TORCH_VERSION,
93
94
  WINDOWS,
94
95
  YAML,
95
96
  callbacks,
@@ -567,7 +568,7 @@ class Exporter:
567
568
  @try_export
568
569
  def export_torchscript(self, prefix=colorstr("TorchScript:")):
569
570
  """Export YOLO model to TorchScript format."""
570
- LOGGER.info(f"\n{prefix} starting export with torch {torch.__version__}...")
571
+ LOGGER.info(f"\n{prefix} starting export with torch {TORCH_VERSION}...")
571
572
  f = self.file.with_suffix(".torchscript")
572
573
 
573
574
  ts = torch.jit.trace(NMSModel(self.model, self.args) if self.args.nms else self.model, self.im, strict=False)
@@ -586,7 +587,7 @@ class Exporter:
586
587
  """Export YOLO model to ONNX format."""
587
588
  requirements = ["onnx>=1.12.0"]
588
589
  if self.args.simplify:
589
- requirements += ["onnxslim>=0.1.65", "onnxruntime" + ("-gpu" if torch.cuda.is_available() else "")]
590
+ requirements += ["onnxslim==0.1.65", "onnxruntime" + ("-gpu" if torch.cuda.is_available() else "")]
590
591
  check_requirements(requirements)
591
592
  import onnx # noqa
592
593
 
@@ -648,7 +649,7 @@ class Exporter:
648
649
  import openvino as ov
649
650
 
650
651
  LOGGER.info(f"\n{prefix} starting export with openvino {ov.__version__}...")
651
- assert TORCH_1_13, f"OpenVINO export requires torch>=1.13.0 but torch=={torch.__version__} is installed"
652
+ assert TORCH_1_13, f"OpenVINO export requires torch>=1.13.0 but torch=={TORCH_VERSION} is installed"
652
653
  ov_model = ov.convert_model(
653
654
  NMSModel(self.model, self.args) if self.args.nms else self.model,
654
655
  input=None if self.args.dynamic else [self.im.shape],
@@ -964,7 +965,7 @@ class Exporter:
964
965
  "ai-edge-litert>=1.2.0,<1.4.0", # required by 'onnx2tf' package
965
966
  "onnx>=1.12.0",
966
967
  "onnx2tf>=1.26.3",
967
- "onnxslim>=0.1.65",
968
+ "onnxslim==0.1.65",
968
969
  "onnxruntime-gpu" if cuda else "onnxruntime",
969
970
  "protobuf>=5",
970
971
  ),
@@ -12,7 +12,7 @@ from PIL import Image
12
12
 
13
13
  from ultralytics.cfg import TASK2DATA, get_cfg, get_save_dir
14
14
  from ultralytics.engine.results import Results
15
- from ultralytics.nn.tasks import attempt_load_one_weight, guess_model_task, yaml_model_load
15
+ from ultralytics.nn.tasks import guess_model_task, load_checkpoint, yaml_model_load
16
16
  from ultralytics.utils import (
17
17
  ARGV,
18
18
  ASSETS,
@@ -294,7 +294,7 @@ class Model(torch.nn.Module):
294
294
  weights = checks.check_model_file_from_stem(weights) # add suffix, i.e. yolo11n -> yolo11n.pt
295
295
 
296
296
  if str(weights).rpartition(".")[-1] == "pt":
297
- self.model, self.ckpt = attempt_load_one_weight(weights)
297
+ self.model, self.ckpt = load_checkpoint(weights)
298
298
  self.task = self.model.task
299
299
  self.overrides = self.model.args = self._reset_ckpt_args(self.model.args)
300
300
  self.ckpt_path = self.model.pt_path
@@ -385,7 +385,7 @@ class Model(torch.nn.Module):
385
385
  self._check_is_pytorch_model()
386
386
  if isinstance(weights, (str, Path)):
387
387
  self.overrides["pretrained"] = weights # remember the weights for DDP training
388
- weights, self.ckpt = attempt_load_one_weight(weights)
388
+ weights, self.ckpt = load_checkpoint(weights)
389
389
  self.model.load(weights)
390
390
  return self
391
391
 
@@ -802,7 +802,7 @@ class Model(torch.nn.Module):
802
802
  # Update model and cfg after training
803
803
  if RANK in {-1, 0}:
804
804
  ckpt = self.trainer.best if self.trainer.best.exists() else self.trainer.last
805
- self.model, self.ckpt = attempt_load_one_weight(ckpt)
805
+ self.model, self.ckpt = load_checkpoint(ckpt)
806
806
  self.overrides = self.model.args
807
807
  self.metrics = getattr(self.trainer.validator, "metrics", None) # TODO: no metrics returned by DDP
808
808
  return self.metrics
@@ -266,13 +266,17 @@ class BasePredictor:
266
266
  channels=getattr(self.model, "ch", 3),
267
267
  )
268
268
  self.source_type = self.dataset.source_type
269
- if not getattr(self, "stream", True) and (
269
+ long_sequence = (
270
270
  self.source_type.stream
271
271
  or self.source_type.screenshot
272
272
  or len(self.dataset) > 1000 # many images
273
273
  or any(getattr(self.dataset, "video_flag", [False]))
274
- ): # videos
275
- LOGGER.warning(STREAM_WARNING)
274
+ )
275
+ if long_sequence:
276
+ import torchvision # noqa (import here triggers torchvision NMS use in nms.py)
277
+
278
+ if not getattr(self, "stream", True): # videos
279
+ LOGGER.warning(STREAM_WARNING)
276
280
  self.vid_writer = {}
277
281
 
278
282
  @smart_inference_mode()
@@ -24,7 +24,7 @@ from torch import nn, optim
24
24
  from ultralytics import __version__
25
25
  from ultralytics.cfg import get_cfg, get_save_dir
26
26
  from ultralytics.data.utils import check_cls_dataset, check_det_dataset
27
- from ultralytics.nn.tasks import attempt_load_one_weight, attempt_load_weights
27
+ from ultralytics.nn.tasks import load_checkpoint
28
28
  from ultralytics.utils import (
29
29
  DEFAULT_CFG,
30
30
  GIT,
@@ -644,10 +644,10 @@ class BaseTrainer:
644
644
  cfg, weights = self.model, None
645
645
  ckpt = None
646
646
  if str(self.model).endswith(".pt"):
647
- weights, ckpt = attempt_load_one_weight(self.model)
647
+ weights, ckpt = load_checkpoint(self.model)
648
648
  cfg = weights.yaml
649
649
  elif isinstance(self.args.pretrained, (str, Path)):
650
- weights, _ = attempt_load_one_weight(self.args.pretrained)
650
+ weights, _ = load_checkpoint(self.args.pretrained)
651
651
  self.model = self.get_model(cfg=cfg, weights=weights, verbose=RANK == -1) # calls Model(cfg, weights)
652
652
  return ckpt
653
653
 
@@ -768,7 +768,7 @@ class BaseTrainer:
768
768
  last = Path(check_file(resume) if exists else get_latest_run())
769
769
 
770
770
  # Check that resume data YAML exists, otherwise strip to force re-download of dataset
771
- ckpt_args = attempt_load_weights(last).args
771
+ ckpt_args = load_checkpoint(last)[0].args
772
772
  if not isinstance(ckpt_args["data"], dict) and not Path(ckpt_args["data"]).exists():
773
773
  ckpt_args["data"] = self.args.data
774
774
 
@@ -133,8 +133,17 @@ class SegmentationValidator(DetectionValidator):
133
133
  (Dict[str, Any]): Prepared batch with processed annotations.
134
134
  """
135
135
  prepared_batch = super()._prepare_batch(si, batch)
136
- midx = [si] if self.args.overlap_mask else batch["batch_idx"] == si
137
- prepared_batch["masks"] = batch["masks"][midx]
136
+ nl = len(prepared_batch["cls"])
137
+ if self.args.overlap_mask:
138
+ masks = batch["masks"][si]
139
+ index = torch.arange(1, nl + 1, device=masks.device).view(nl, 1, 1)
140
+ masks = (masks == index).float()
141
+ else:
142
+ masks = batch["masks"][batch["batch_idx"] == si]
143
+ if nl and self.process is ops.process_mask_native:
144
+ masks = F.interpolate(masks[None], prepared_batch["imgsz"], mode="bilinear", align_corners=False)[0]
145
+ masks = masks.gt_(0.5)
146
+ prepared_batch["masks"] = masks
138
147
  return prepared_batch
139
148
 
140
149
  def _process_batch(self, preds: dict[str, torch.Tensor], batch: dict[str, Any]) -> dict[str, np.ndarray]:
@@ -158,20 +167,11 @@ class SegmentationValidator(DetectionValidator):
158
167
  >>> correct_preds = validator._process_batch(preds, batch)
159
168
  """
160
169
  tp = super()._process_batch(preds, batch)
161
- gt_cls, gt_masks = batch["cls"], batch["masks"]
170
+ gt_cls = batch["cls"]
162
171
  if len(gt_cls) == 0 or len(preds["cls"]) == 0:
163
172
  tp_m = np.zeros((len(preds["cls"]), self.niou), dtype=bool)
164
173
  else:
165
- pred_masks = preds["masks"]
166
- if self.args.overlap_mask:
167
- nl = len(gt_cls)
168
- index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1
169
- gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640)
170
- gt_masks = torch.where(gt_masks == index, 1.0, 0.0)
171
- if gt_masks.shape[1:] != pred_masks.shape[1:]:
172
- gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0]
173
- gt_masks = gt_masks.gt_(0.5)
174
- iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1))
174
+ iou = mask_iou(batch["masks"].flatten(1), preds["masks"].flatten(1))
175
175
  tp_m = self.match_predictions(preds["cls"], gt_cls, iou).cpu().numpy()
176
176
  tp.update({"tp_m": tp_m}) # update tp with mask IoU
177
177
  return tp
@@ -186,9 +186,9 @@ class YOLOEDetectValidator(DetectionValidator):
186
186
  self.device = select_device(self.args.device, verbose=False)
187
187
 
188
188
  if isinstance(model, (str, Path)):
189
- from ultralytics.nn.tasks import attempt_load_weights
189
+ from ultralytics.nn.tasks import load_checkpoint
190
190
 
191
- model = attempt_load_weights(model, device=self.device)
191
+ model, _ = load_checkpoint(model, device=self.device) # model, ckpt
192
192
  model.eval().to(self.device)
193
193
  data = check_det_dataset(refer_data or self.args.data)
194
194
  names = [name.split("/", 1)[0] for name in list(data["names"].values())]
@@ -5,18 +5,16 @@ from .tasks import (
5
5
  ClassificationModel,
6
6
  DetectionModel,
7
7
  SegmentationModel,
8
- attempt_load_one_weight,
9
- attempt_load_weights,
10
8
  guess_model_scale,
11
9
  guess_model_task,
10
+ load_checkpoint,
12
11
  parse_model,
13
12
  torch_safe_load,
14
13
  yaml_model_load,
15
14
  )
16
15
 
17
16
  __all__ = (
18
- "attempt_load_one_weight",
19
- "attempt_load_weights",
17
+ "load_checkpoint",
20
18
  "parse_model",
21
19
  "yaml_model_load",
22
20
  "guess_model_task",
@@ -203,9 +203,9 @@ class AutoBackend(nn.Module):
203
203
  model = model.fuse(verbose=verbose)
204
204
  model = model.to(device)
205
205
  else: # pt file
206
- from ultralytics.nn.tasks import attempt_load_one_weight
206
+ from ultralytics.nn.tasks import load_checkpoint
207
207
 
208
- model, _ = attempt_load_one_weight(model, device=device, fuse=fuse) # load model, ckpt
208
+ model, _ = load_checkpoint(model, device=device, fuse=fuse) # load model, ckpt
209
209
 
210
210
  # Common PyTorch model processing
211
211
  if hasattr(model, "kpt_shape"):
ultralytics/nn/tasks.py CHANGED
@@ -1483,61 +1483,12 @@ def torch_safe_load(weight, safe_only=False):
1483
1483
  return ckpt, file
1484
1484
 
1485
1485
 
1486
- def attempt_load_weights(weights, device=None, inplace=True, fuse=False):
1487
- """
1488
- Load an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a.
1489
-
1490
- Args:
1491
- weights (str | List[str]): Model weights path(s).
1492
- device (torch.device, optional): Device to load model to.
1493
- inplace (bool): Whether to do inplace operations.
1494
- fuse (bool): Whether to fuse model.
1495
-
1496
- Returns:
1497
- (torch.nn.Module): Loaded model.
1498
- """
1499
- ensemble = Ensemble()
1500
- for w in weights if isinstance(weights, list) else [weights]:
1501
- ckpt, w = torch_safe_load(w) # load ckpt
1502
- args = {**DEFAULT_CFG_DICT, **ckpt["train_args"]} if "train_args" in ckpt else None # combined args
1503
- model = (ckpt.get("ema") or ckpt["model"]).float() # FP32 model
1504
-
1505
- # Model compatibility updates
1506
- model.args = args # attach args to model
1507
- model.pt_path = w # attach *.pt file path to model
1508
- model.task = getattr(model, "task", guess_model_task(model))
1509
- if not hasattr(model, "stride"):
1510
- model.stride = torch.tensor([32.0])
1511
-
1512
- # Append
1513
- ensemble.append((model.fuse().eval() if fuse and hasattr(model, "fuse") else model.eval()).to(device))
1514
-
1515
- # Module updates
1516
- for m in ensemble.modules():
1517
- if hasattr(m, "inplace"):
1518
- m.inplace = inplace
1519
- elif isinstance(m, torch.nn.Upsample) and not hasattr(m, "recompute_scale_factor"):
1520
- m.recompute_scale_factor = None # torch 1.11.0 compatibility
1521
-
1522
- # Return model
1523
- if len(ensemble) == 1:
1524
- return ensemble[-1]
1525
-
1526
- # Return ensemble
1527
- LOGGER.info(f"Ensemble created with {weights}\n")
1528
- for k in "names", "nc", "yaml":
1529
- setattr(ensemble, k, getattr(ensemble[0], k))
1530
- ensemble.stride = ensemble[int(torch.argmax(torch.tensor([m.stride.max() for m in ensemble])))].stride
1531
- assert all(ensemble[0].nc == m.nc for m in ensemble), f"Models differ in class counts {[m.nc for m in ensemble]}"
1532
- return ensemble
1533
-
1534
-
1535
- def attempt_load_one_weight(weight, device=None, inplace=True, fuse=False):
1486
+ def load_checkpoint(weight, device=None, inplace=True, fuse=False):
1536
1487
  """
1537
1488
  Load a single model weights.
1538
1489
 
1539
1490
  Args:
1540
- weight (str): Model weight path.
1491
+ weight (str | Path): Model weight path.
1541
1492
  device (torch.device, optional): Device to load model to.
1542
1493
  inplace (bool): Whether to do inplace operations.
1543
1494
  fuse (bool): Whether to fuse model.
@@ -49,7 +49,7 @@ MACOS_VERSION = platform.mac_ver()[0] if MACOS else None
49
49
  NOT_MACOS14 = not (MACOS and MACOS_VERSION.startswith("14."))
50
50
  ARM64 = platform.machine() in {"arm64", "aarch64"} # ARM64 booleans
51
51
  PYTHON_VERSION = platform.python_version()
52
- TORCH_VERSION = torch.__version__
52
+ TORCH_VERSION = str(torch.__version__) # Normalize torch.__version__ (PyTorch>1.9 returns TorchVersion objects)
53
53
  TORCHVISION_VERSION = importlib.metadata.version("torchvision") # faster than importing torchvision
54
54
  IS_VSCODE = os.environ.get("TERM_PROGRAM", False) == "vscode"
55
55
  RKNN_CHIPS = frozenset(
@@ -132,6 +132,10 @@ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" # suppress verbose TF compiler warning
132
132
  os.environ["TORCH_CPP_LOG_LEVEL"] = "ERROR" # suppress "NNPACK.cpp could not initialize NNPACK" warnings
133
133
  os.environ["KINETO_LOG_LEVEL"] = "5" # suppress verbose PyTorch profiler output when computing FLOPs
134
134
 
135
+ # Precompiled type tuples for faster isinstance() checks
136
+ FLOAT_OR_INT = (float, int)
137
+ STR_OR_PATH = (str, Path)
138
+
135
139
 
136
140
  class DataExportMixin:
137
141
  """
@@ -36,6 +36,7 @@ from ultralytics.utils import (
36
36
  PYTHON_VERSION,
37
37
  RKNN_CHIPS,
38
38
  ROOT,
39
+ TORCH_VERSION,
39
40
  TORCHVISION_VERSION,
40
41
  USER_CONFIG_DIR,
41
42
  WINDOWS,
@@ -464,7 +465,7 @@ def check_torchvision():
464
465
  }
465
466
 
466
467
  # Check major and minor versions
467
- v_torch = ".".join(torch.__version__.split("+", 1)[0].split(".")[:2])
468
+ v_torch = ".".join(TORCH_VERSION.split("+", 1)[0].split(".")[:2])
468
469
  if v_torch in compatibility_table:
469
470
  compatible_versions = compatibility_table[v_torch]
470
471
  v_torchvision = ".".join(TORCHVISION_VERSION.split("+", 1)[0].split(".")[:2])
ultralytics/utils/tal.py CHANGED
@@ -3,12 +3,12 @@
3
3
  import torch
4
4
  import torch.nn as nn
5
5
 
6
- from . import LOGGER
6
+ from . import LOGGER, TORCH_VERSION
7
7
  from .checks import check_version
8
8
  from .metrics import bbox_iou, probiou
9
9
  from .ops import xywhr2xyxyxyxy
10
10
 
11
- TORCH_1_10 = check_version(torch.__version__, "1.10.0")
11
+ TORCH_1_10 = check_version(TORCH_VERSION, "1.10.0")
12
12
 
13
13
 
14
14
  class TaskAlignedAssigner(nn.Module):
@@ -27,6 +27,7 @@ from ultralytics.utils import (
27
27
  LOGGER,
28
28
  NUM_THREADS,
29
29
  PYTHON_VERSION,
30
+ TORCH_VERSION,
30
31
  TORCHVISION_VERSION,
31
32
  WINDOWS,
32
33
  colorstr,
@@ -35,15 +36,15 @@ from ultralytics.utils.checks import check_version
35
36
  from ultralytics.utils.patches import torch_load
36
37
 
37
38
  # Version checks (all default to version>=min_version)
38
- TORCH_1_9 = check_version(torch.__version__, "1.9.0")
39
- TORCH_1_13 = check_version(torch.__version__, "1.13.0")
40
- TORCH_2_0 = check_version(torch.__version__, "2.0.0")
41
- TORCH_2_4 = check_version(torch.__version__, "2.4.0")
39
+ TORCH_1_9 = check_version(TORCH_VERSION, "1.9.0")
40
+ TORCH_1_13 = check_version(TORCH_VERSION, "1.13.0")
41
+ TORCH_2_0 = check_version(TORCH_VERSION, "2.0.0")
42
+ TORCH_2_4 = check_version(TORCH_VERSION, "2.4.0")
42
43
  TORCHVISION_0_10 = check_version(TORCHVISION_VERSION, "0.10.0")
43
44
  TORCHVISION_0_11 = check_version(TORCHVISION_VERSION, "0.11.0")
44
45
  TORCHVISION_0_13 = check_version(TORCHVISION_VERSION, "0.13.0")
45
46
  TORCHVISION_0_18 = check_version(TORCHVISION_VERSION, "0.18.0")
46
- if WINDOWS and check_version(torch.__version__, "==2.4.0"): # reject version 2.4.0 on Windows
47
+ if WINDOWS and check_version(TORCH_VERSION, "==2.4.0"): # reject version 2.4.0 on Windows
47
48
  LOGGER.warning(
48
49
  "Known issue with torch==2.4.0 on Windows with CPU, recommend upgrading to torch>=2.4.1 to resolve "
49
50
  "https://github.com/ultralytics/ultralytics/issues/15049"
@@ -165,7 +166,7 @@ def select_device(device="", batch=0, newline=False, verbose=True):
165
166
  if isinstance(device, torch.device) or str(device).startswith(("tpu", "intel")):
166
167
  return device
167
168
 
168
- s = f"Ultralytics {__version__} 🚀 Python-{PYTHON_VERSION} torch-{torch.__version__} "
169
+ s = f"Ultralytics {__version__} 🚀 Python-{PYTHON_VERSION} torch-{TORCH_VERSION} "
169
170
  device = str(device).lower()
170
171
  for remove in "cuda:", "none", "(", ")", "[", "]", "'", " ":
171
172
  device = device.replace(remove, "") # to string, 'cuda:0' -> '0' and '(0, 1)' -> '0,1'
ultralytics/utils/tqdm.py CHANGED
@@ -88,11 +88,11 @@ class TQDM:
88
88
  mininterval: float = 0.1,
89
89
  disable: bool | None = None,
90
90
  unit: str = "it",
91
- unit_scale: bool = False,
91
+ unit_scale: bool = True,
92
92
  unit_divisor: int = 1000,
93
- bar_format: str | None = None,
93
+ bar_format: str | None = None, # kept for API compatibility; not used for formatting
94
94
  initial: int = 0,
95
- **kwargs, # Accept unused args for compatibility
95
+ **kwargs,
96
96
  ) -> None:
97
97
  """
98
98
  Initialize the TQDM progress bar with specified configuration options.
@@ -138,11 +138,8 @@ class TQDM:
138
138
  self.mininterval = max(mininterval, self.NONINTERACTIVE_MIN_INTERVAL) if self.noninteractive else mininterval
139
139
  self.initial = initial
140
140
 
141
- # Set bar format based on whether we have a total
142
- if self.total:
143
- self.bar_format = bar_format or "{desc}: {percent:.0f}% {bar} {n}/{total} {rate} {elapsed}<{remaining}"
144
- else:
145
- self.bar_format = bar_format or "{desc}: {bar} {n} {rate} {elapsed}"
141
+ # Kept for API compatibility (unused for f-string formatting)
142
+ self.bar_format = bar_format
146
143
 
147
144
  self.file = file or sys.stdout
148
145
 
@@ -151,48 +148,31 @@ class TQDM:
151
148
  self.last_print_n = self.initial
152
149
  self.last_print_t = time.time()
153
150
  self.start_t = time.time()
154
- self.last_rate = 0
151
+ self.last_rate = 0.0
155
152
  self.closed = False
153
+ self.is_bytes = unit_scale and unit in ("B", "bytes")
154
+ self.scales = (
155
+ [(1073741824, "GB/s"), (1048576, "MB/s"), (1024, "KB/s")]
156
+ if self.is_bytes
157
+ else [(1e9, f"G{self.unit}/s"), (1e6, f"M{self.unit}/s"), (1e3, f"K{self.unit}/s")]
158
+ )
156
159
 
157
- # Display initial bar if we have total and not disabled
158
160
  if not self.disable and self.total and not self.noninteractive:
159
161
  self._display()
160
162
 
161
163
  def _format_rate(self, rate: float) -> str:
162
- """Format rate with proper units and reasonable precision."""
164
+ """Format rate with units."""
163
165
  if rate <= 0:
164
166
  return ""
167
+ fallback = f"{rate:.1f}B/s" if self.is_bytes else f"{rate:.1f}{self.unit}/s"
168
+ return next((f"{rate / t:.1f}{u}" for t, u in self.scales if rate >= t), fallback)
165
169
 
166
- # For bytes with scaling, use binary units
167
- if self.unit in ("B", "bytes") and self.unit_scale:
168
- return next(
169
- (
170
- f"{rate / threshold:.1f}{unit}"
171
- for threshold, unit in [
172
- (1073741824, "GB/s"),
173
- (1048576, "MB/s"),
174
- (1024, "KB/s"),
175
- ]
176
- if rate >= threshold
177
- ),
178
- f"{rate:.1f}B/s",
179
- )
180
- # For other scalable units, use decimal units
181
- if self.unit_scale and self.unit in ("it", "items", ""):
182
- for threshold, prefix in [(1000000, "M"), (1000, "K")]:
183
- if rate >= threshold:
184
- return f"{rate / threshold:.1f}{prefix}{self.unit}/s"
185
-
186
- # Default formatting
187
- precision = ".1f" if rate >= 1 else ".2f"
188
- return f"{rate:{precision}}{self.unit}/s"
189
-
190
- def _format_num(self, num: int) -> str:
170
+ def _format_num(self, num: int | float) -> str:
191
171
  """Format number with optional unit scaling."""
192
- if not self.unit_scale or self.unit not in ("B", "bytes"):
172
+ if not self.unit_scale or not self.is_bytes:
193
173
  return str(num)
194
174
 
195
- for unit in ["", "K", "M", "G", "T"]:
175
+ for unit in ("", "K", "M", "G", "T"):
196
176
  if abs(num) < self.unit_divisor:
197
177
  return f"{num:3.1f}{unit}B" if unit else f"{num:.0f}B"
198
178
  num /= self.unit_divisor
@@ -224,8 +204,7 @@ class TQDM:
224
204
  """Check if display should update."""
225
205
  if self.noninteractive:
226
206
  return False
227
-
228
- return True if self.total and self.n >= self.total else dt >= self.mininterval
207
+ return (self.total is not None and self.n >= self.total) or (dt >= self.mininterval)
229
208
 
230
209
  def _display(self, final: bool = False) -> None:
231
210
  """Display progress bar."""
@@ -240,8 +219,8 @@ class TQDM:
240
219
  return
241
220
 
242
221
  # Calculate rate (avoid crazy numbers)
243
- if dt > self.MIN_RATE_CALC_INTERVAL: # Only calculate rate if enough time has passed
244
- rate = dn / dt
222
+ if dt > self.MIN_RATE_CALC_INTERVAL:
223
+ rate = dn / dt if dt else 0.0
245
224
  # Smooth rate for reasonable values, use raw rate for very high values
246
225
  if rate < self.MAX_SMOOTHED_RATE:
247
226
  self.last_rate = self.RATE_SMOOTHING_FACTOR * rate + (1 - self.RATE_SMOOTHING_FACTOR) * self.last_rate
@@ -249,8 +228,8 @@ class TQDM:
249
228
  else:
250
229
  rate = self.last_rate
251
230
 
252
- # At completion, use the overall rate for more accurate display
253
- if self.n >= (self.total or float("inf")) and self.total and self.total > 0:
231
+ # At completion, use overall rate
232
+ if self.total and self.n >= self.total:
254
233
  overall_elapsed = current_time - self.start_t
255
234
  if overall_elapsed > 0:
256
235
  rate = self.n / overall_elapsed
@@ -260,45 +239,41 @@ class TQDM:
260
239
  self.last_print_t = current_time
261
240
  elapsed = current_time - self.start_t
262
241
 
263
- # Calculate remaining time
242
+ # Remaining time
264
243
  remaining_str = ""
265
244
  if self.total and 0 < self.n < self.total and elapsed > 0:
266
- est_rate = rate or self.n / elapsed
267
- remaining_str = self._format_time((self.total - self.n) / est_rate)
245
+ est_rate = rate or (self.n / elapsed)
246
+ remaining_str = f"<{self._format_time((self.total - self.n) / est_rate)}"
268
247
 
269
- # Build progress components
248
+ # Numbers and percent
270
249
  if self.total:
271
250
  percent = (self.n / self.total) * 100
272
- # For bytes with unit scaling, avoid repeating units: show "5.4/5.4MB" not "5.4MB/5.4MB"
273
- n = self._format_num(self.n)
274
- total = self._format_num(self.total)
275
- if self.unit_scale and self.unit in ("B", "bytes"):
276
- n = n.rstrip("KMGTPB") # Remove unit suffix from current
251
+ n_str = self._format_num(self.n)
252
+ t_str = self._format_num(self.total)
253
+ if self.is_bytes:
254
+ # Collapse suffix only when identical (e.g. "5.4/5.4MB")
255
+ if n_str[-2] == t_str[-2]:
256
+ n_str = n_str.rstrip("KMGTPB") # Remove unit suffix from current if different than total
277
257
  else:
278
- percent = 0
279
- n = self._format_num(self.n)
280
- total = "?"
258
+ percent = 0.0
259
+ n_str, t_str = self._format_num(self.n), "?"
281
260
 
282
261
  elapsed_str = self._format_time(elapsed)
262
+ rate_str = self._format_rate(rate) or (self._format_rate(self.n / elapsed) if elapsed > 0 else "")
283
263
 
284
- # Use different format for completion
285
- if self.total and self.n >= self.total:
286
- format_str = self.bar_format.replace("<{remaining}", "")
264
+ bar = self._generate_bar()
265
+
266
+ # Compose progress line via f-strings (two shapes: with/without total)
267
+ if self.total:
268
+ if self.is_bytes and self.n >= self.total:
269
+ # Completed bytes: show only final size
270
+ progress_str = f"{self.desc}: {percent:.0f}% {bar} {t_str} {rate_str} {elapsed_str}"
271
+ else:
272
+ progress_str = (
273
+ f"{self.desc}: {percent:.0f}% {bar} {n_str}/{t_str} {rate_str} {elapsed_str}{remaining_str}"
274
+ )
287
275
  else:
288
- format_str = self.bar_format
289
-
290
- # Format progress string
291
- progress_str = format_str.format(
292
- desc=self.desc,
293
- percent=percent,
294
- bar=self._generate_bar(),
295
- n=n,
296
- total=total,
297
- rate=self._format_rate(rate) or (self._format_rate(self.n / elapsed) if elapsed > 0 else ""),
298
- remaining=remaining_str,
299
- elapsed=elapsed_str,
300
- unit=self.unit,
301
- )
276
+ progress_str = f"{self.desc}: {bar} {n_str} {rate_str} {elapsed_str}"
302
277
 
303
278
  # Write to output
304
279
  try:
@@ -336,7 +311,7 @@ class TQDM:
336
311
  if self.closed:
337
312
  return
338
313
 
339
- self.closed = True # Set before final display
314
+ self.closed = True
340
315
 
341
316
  if not self.disable:
342
317
  # Final display
@@ -129,7 +129,7 @@ def run_ray_tune(
129
129
  {**train_args, **{"exist_ok": train_args.pop("resume", False)}}, # resume w/ same tune_dir
130
130
  ),
131
131
  name=train_args.pop("name", "tune"), # runs/{task}/{tune_dir}
132
- ).resolve() # must be absolute dir
132
+ ) # must be absolute dir
133
133
  tune_dir.mkdir(parents=True, exist_ok=True)
134
134
  if tune.Tuner.can_restore(tune_dir):
135
135
  LOGGER.info(f"{colorstr('Tuner: ')} Resuming tuning run {tune_dir}...")