dgenerate-ultralytics-headless 8.3.196__py3-none-any.whl → 8.3.248__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (243) hide show
  1. {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/METADATA +33 -34
  2. dgenerate_ultralytics_headless-8.3.248.dist-info/RECORD +298 -0
  3. tests/__init__.py +5 -7
  4. tests/conftest.py +8 -15
  5. tests/test_cli.py +8 -10
  6. tests/test_cuda.py +9 -10
  7. tests/test_engine.py +29 -2
  8. tests/test_exports.py +69 -21
  9. tests/test_integrations.py +8 -11
  10. tests/test_python.py +109 -71
  11. tests/test_solutions.py +170 -159
  12. ultralytics/__init__.py +27 -9
  13. ultralytics/cfg/__init__.py +57 -64
  14. ultralytics/cfg/datasets/Argoverse.yaml +7 -6
  15. ultralytics/cfg/datasets/DOTAv1.5.yaml +1 -1
  16. ultralytics/cfg/datasets/DOTAv1.yaml +1 -1
  17. ultralytics/cfg/datasets/ImageNet.yaml +1 -1
  18. ultralytics/cfg/datasets/Objects365.yaml +19 -15
  19. ultralytics/cfg/datasets/SKU-110K.yaml +1 -1
  20. ultralytics/cfg/datasets/VOC.yaml +19 -21
  21. ultralytics/cfg/datasets/VisDrone.yaml +5 -5
  22. ultralytics/cfg/datasets/african-wildlife.yaml +1 -1
  23. ultralytics/cfg/datasets/coco-pose.yaml +24 -2
  24. ultralytics/cfg/datasets/coco.yaml +2 -2
  25. ultralytics/cfg/datasets/coco128-seg.yaml +1 -1
  26. ultralytics/cfg/datasets/coco8-pose.yaml +21 -0
  27. ultralytics/cfg/datasets/construction-ppe.yaml +32 -0
  28. ultralytics/cfg/datasets/dog-pose.yaml +28 -0
  29. ultralytics/cfg/datasets/dota8-multispectral.yaml +1 -1
  30. ultralytics/cfg/datasets/dota8.yaml +2 -2
  31. ultralytics/cfg/datasets/hand-keypoints.yaml +26 -2
  32. ultralytics/cfg/datasets/kitti.yaml +27 -0
  33. ultralytics/cfg/datasets/lvis.yaml +7 -7
  34. ultralytics/cfg/datasets/open-images-v7.yaml +1 -1
  35. ultralytics/cfg/datasets/tiger-pose.yaml +16 -0
  36. ultralytics/cfg/datasets/xView.yaml +16 -16
  37. ultralytics/cfg/default.yaml +96 -94
  38. ultralytics/cfg/models/11/yolo11-pose.yaml +1 -1
  39. ultralytics/cfg/models/11/yoloe-11-seg.yaml +2 -2
  40. ultralytics/cfg/models/11/yoloe-11.yaml +2 -2
  41. ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +1 -1
  42. ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +1 -1
  43. ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +1 -1
  44. ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +1 -1
  45. ultralytics/cfg/models/v10/yolov10b.yaml +2 -2
  46. ultralytics/cfg/models/v10/yolov10l.yaml +2 -2
  47. ultralytics/cfg/models/v10/yolov10m.yaml +2 -2
  48. ultralytics/cfg/models/v10/yolov10n.yaml +2 -2
  49. ultralytics/cfg/models/v10/yolov10s.yaml +2 -2
  50. ultralytics/cfg/models/v10/yolov10x.yaml +2 -2
  51. ultralytics/cfg/models/v3/yolov3-tiny.yaml +1 -1
  52. ultralytics/cfg/models/v6/yolov6.yaml +1 -1
  53. ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +9 -6
  54. ultralytics/cfg/models/v8/yoloe-v8.yaml +9 -6
  55. ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +1 -1
  56. ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +1 -1
  57. ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +2 -2
  58. ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +2 -2
  59. ultralytics/cfg/models/v8/yolov8-ghost.yaml +2 -2
  60. ultralytics/cfg/models/v8/yolov8-obb.yaml +1 -1
  61. ultralytics/cfg/models/v8/yolov8-p2.yaml +1 -1
  62. ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +1 -1
  63. ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +1 -1
  64. ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +1 -1
  65. ultralytics/cfg/models/v8/yolov8-world.yaml +1 -1
  66. ultralytics/cfg/models/v8/yolov8-worldv2.yaml +6 -6
  67. ultralytics/cfg/models/v9/yolov9s.yaml +1 -1
  68. ultralytics/cfg/trackers/botsort.yaml +16 -17
  69. ultralytics/cfg/trackers/bytetrack.yaml +9 -11
  70. ultralytics/data/__init__.py +4 -4
  71. ultralytics/data/annotator.py +3 -4
  72. ultralytics/data/augment.py +286 -476
  73. ultralytics/data/base.py +18 -26
  74. ultralytics/data/build.py +151 -26
  75. ultralytics/data/converter.py +38 -50
  76. ultralytics/data/dataset.py +47 -75
  77. ultralytics/data/loaders.py +42 -49
  78. ultralytics/data/split.py +5 -6
  79. ultralytics/data/split_dota.py +8 -15
  80. ultralytics/data/utils.py +41 -45
  81. ultralytics/engine/exporter.py +462 -462
  82. ultralytics/engine/model.py +150 -191
  83. ultralytics/engine/predictor.py +30 -40
  84. ultralytics/engine/results.py +177 -311
  85. ultralytics/engine/trainer.py +193 -120
  86. ultralytics/engine/tuner.py +77 -63
  87. ultralytics/engine/validator.py +39 -22
  88. ultralytics/hub/__init__.py +16 -19
  89. ultralytics/hub/auth.py +6 -12
  90. ultralytics/hub/google/__init__.py +7 -10
  91. ultralytics/hub/session.py +15 -25
  92. ultralytics/hub/utils.py +5 -8
  93. ultralytics/models/__init__.py +1 -1
  94. ultralytics/models/fastsam/__init__.py +1 -1
  95. ultralytics/models/fastsam/model.py +8 -10
  96. ultralytics/models/fastsam/predict.py +19 -30
  97. ultralytics/models/fastsam/utils.py +1 -2
  98. ultralytics/models/fastsam/val.py +5 -7
  99. ultralytics/models/nas/__init__.py +1 -1
  100. ultralytics/models/nas/model.py +5 -8
  101. ultralytics/models/nas/predict.py +7 -9
  102. ultralytics/models/nas/val.py +1 -2
  103. ultralytics/models/rtdetr/__init__.py +1 -1
  104. ultralytics/models/rtdetr/model.py +7 -8
  105. ultralytics/models/rtdetr/predict.py +15 -19
  106. ultralytics/models/rtdetr/train.py +10 -13
  107. ultralytics/models/rtdetr/val.py +21 -23
  108. ultralytics/models/sam/__init__.py +15 -2
  109. ultralytics/models/sam/amg.py +14 -20
  110. ultralytics/models/sam/build.py +26 -19
  111. ultralytics/models/sam/build_sam3.py +377 -0
  112. ultralytics/models/sam/model.py +29 -32
  113. ultralytics/models/sam/modules/blocks.py +83 -144
  114. ultralytics/models/sam/modules/decoders.py +22 -40
  115. ultralytics/models/sam/modules/encoders.py +44 -101
  116. ultralytics/models/sam/modules/memory_attention.py +16 -30
  117. ultralytics/models/sam/modules/sam.py +206 -79
  118. ultralytics/models/sam/modules/tiny_encoder.py +64 -83
  119. ultralytics/models/sam/modules/transformer.py +18 -28
  120. ultralytics/models/sam/modules/utils.py +174 -50
  121. ultralytics/models/sam/predict.py +2268 -366
  122. ultralytics/models/sam/sam3/__init__.py +3 -0
  123. ultralytics/models/sam/sam3/decoder.py +546 -0
  124. ultralytics/models/sam/sam3/encoder.py +529 -0
  125. ultralytics/models/sam/sam3/geometry_encoders.py +415 -0
  126. ultralytics/models/sam/sam3/maskformer_segmentation.py +286 -0
  127. ultralytics/models/sam/sam3/model_misc.py +199 -0
  128. ultralytics/models/sam/sam3/necks.py +129 -0
  129. ultralytics/models/sam/sam3/sam3_image.py +339 -0
  130. ultralytics/models/sam/sam3/text_encoder_ve.py +307 -0
  131. ultralytics/models/sam/sam3/vitdet.py +547 -0
  132. ultralytics/models/sam/sam3/vl_combiner.py +160 -0
  133. ultralytics/models/utils/loss.py +14 -26
  134. ultralytics/models/utils/ops.py +13 -17
  135. ultralytics/models/yolo/__init__.py +1 -1
  136. ultralytics/models/yolo/classify/predict.py +9 -12
  137. ultralytics/models/yolo/classify/train.py +15 -41
  138. ultralytics/models/yolo/classify/val.py +34 -32
  139. ultralytics/models/yolo/detect/predict.py +8 -11
  140. ultralytics/models/yolo/detect/train.py +13 -32
  141. ultralytics/models/yolo/detect/val.py +75 -63
  142. ultralytics/models/yolo/model.py +37 -53
  143. ultralytics/models/yolo/obb/predict.py +5 -14
  144. ultralytics/models/yolo/obb/train.py +11 -14
  145. ultralytics/models/yolo/obb/val.py +42 -39
  146. ultralytics/models/yolo/pose/__init__.py +1 -1
  147. ultralytics/models/yolo/pose/predict.py +7 -22
  148. ultralytics/models/yolo/pose/train.py +10 -22
  149. ultralytics/models/yolo/pose/val.py +40 -59
  150. ultralytics/models/yolo/segment/predict.py +16 -20
  151. ultralytics/models/yolo/segment/train.py +3 -12
  152. ultralytics/models/yolo/segment/val.py +106 -56
  153. ultralytics/models/yolo/world/train.py +12 -16
  154. ultralytics/models/yolo/world/train_world.py +11 -34
  155. ultralytics/models/yolo/yoloe/__init__.py +7 -7
  156. ultralytics/models/yolo/yoloe/predict.py +16 -23
  157. ultralytics/models/yolo/yoloe/train.py +31 -56
  158. ultralytics/models/yolo/yoloe/train_seg.py +5 -10
  159. ultralytics/models/yolo/yoloe/val.py +16 -21
  160. ultralytics/nn/__init__.py +7 -7
  161. ultralytics/nn/autobackend.py +152 -80
  162. ultralytics/nn/modules/__init__.py +60 -60
  163. ultralytics/nn/modules/activation.py +4 -6
  164. ultralytics/nn/modules/block.py +133 -217
  165. ultralytics/nn/modules/conv.py +52 -97
  166. ultralytics/nn/modules/head.py +64 -116
  167. ultralytics/nn/modules/transformer.py +79 -89
  168. ultralytics/nn/modules/utils.py +16 -21
  169. ultralytics/nn/tasks.py +111 -156
  170. ultralytics/nn/text_model.py +40 -67
  171. ultralytics/solutions/__init__.py +12 -12
  172. ultralytics/solutions/ai_gym.py +11 -17
  173. ultralytics/solutions/analytics.py +15 -16
  174. ultralytics/solutions/config.py +5 -6
  175. ultralytics/solutions/distance_calculation.py +10 -13
  176. ultralytics/solutions/heatmap.py +7 -13
  177. ultralytics/solutions/instance_segmentation.py +5 -8
  178. ultralytics/solutions/object_blurrer.py +7 -10
  179. ultralytics/solutions/object_counter.py +12 -19
  180. ultralytics/solutions/object_cropper.py +8 -14
  181. ultralytics/solutions/parking_management.py +33 -31
  182. ultralytics/solutions/queue_management.py +10 -12
  183. ultralytics/solutions/region_counter.py +9 -12
  184. ultralytics/solutions/security_alarm.py +15 -20
  185. ultralytics/solutions/similarity_search.py +13 -17
  186. ultralytics/solutions/solutions.py +75 -74
  187. ultralytics/solutions/speed_estimation.py +7 -10
  188. ultralytics/solutions/streamlit_inference.py +4 -7
  189. ultralytics/solutions/templates/similarity-search.html +7 -18
  190. ultralytics/solutions/trackzone.py +7 -10
  191. ultralytics/solutions/vision_eye.py +5 -8
  192. ultralytics/trackers/__init__.py +1 -1
  193. ultralytics/trackers/basetrack.py +3 -5
  194. ultralytics/trackers/bot_sort.py +10 -27
  195. ultralytics/trackers/byte_tracker.py +14 -30
  196. ultralytics/trackers/track.py +3 -6
  197. ultralytics/trackers/utils/gmc.py +11 -22
  198. ultralytics/trackers/utils/kalman_filter.py +37 -48
  199. ultralytics/trackers/utils/matching.py +12 -15
  200. ultralytics/utils/__init__.py +116 -116
  201. ultralytics/utils/autobatch.py +2 -4
  202. ultralytics/utils/autodevice.py +17 -18
  203. ultralytics/utils/benchmarks.py +70 -70
  204. ultralytics/utils/callbacks/base.py +8 -10
  205. ultralytics/utils/callbacks/clearml.py +5 -13
  206. ultralytics/utils/callbacks/comet.py +32 -46
  207. ultralytics/utils/callbacks/dvc.py +13 -18
  208. ultralytics/utils/callbacks/mlflow.py +4 -5
  209. ultralytics/utils/callbacks/neptune.py +7 -15
  210. ultralytics/utils/callbacks/platform.py +314 -38
  211. ultralytics/utils/callbacks/raytune.py +3 -4
  212. ultralytics/utils/callbacks/tensorboard.py +23 -31
  213. ultralytics/utils/callbacks/wb.py +10 -13
  214. ultralytics/utils/checks.py +151 -87
  215. ultralytics/utils/cpu.py +3 -8
  216. ultralytics/utils/dist.py +19 -15
  217. ultralytics/utils/downloads.py +29 -41
  218. ultralytics/utils/errors.py +6 -14
  219. ultralytics/utils/events.py +2 -4
  220. ultralytics/utils/export/__init__.py +7 -0
  221. ultralytics/utils/{export.py → export/engine.py} +16 -16
  222. ultralytics/utils/export/imx.py +325 -0
  223. ultralytics/utils/export/tensorflow.py +231 -0
  224. ultralytics/utils/files.py +24 -28
  225. ultralytics/utils/git.py +9 -11
  226. ultralytics/utils/instance.py +30 -51
  227. ultralytics/utils/logger.py +212 -114
  228. ultralytics/utils/loss.py +15 -24
  229. ultralytics/utils/metrics.py +131 -160
  230. ultralytics/utils/nms.py +21 -30
  231. ultralytics/utils/ops.py +107 -165
  232. ultralytics/utils/patches.py +33 -21
  233. ultralytics/utils/plotting.py +122 -119
  234. ultralytics/utils/tal.py +28 -44
  235. ultralytics/utils/torch_utils.py +70 -187
  236. ultralytics/utils/tqdm.py +20 -20
  237. ultralytics/utils/triton.py +13 -19
  238. ultralytics/utils/tuner.py +17 -5
  239. dgenerate_ultralytics_headless-8.3.196.dist-info/RECORD +0 -281
  240. {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/WHEEL +0 -0
  241. {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/entry_points.txt +0 -0
  242. {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/licenses/LICENSE +0 -0
  243. {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/top_level.txt +0 -0
@@ -38,9 +38,14 @@ from ultralytics.utils.patches import torch_load
38
38
 
39
39
  # Version checks (all default to version>=min_version)
40
40
  TORCH_1_9 = check_version(TORCH_VERSION, "1.9.0")
41
+ TORCH_1_10 = check_version(TORCH_VERSION, "1.10.0")
42
+ TORCH_1_11 = check_version(TORCH_VERSION, "1.11.0")
41
43
  TORCH_1_13 = check_version(TORCH_VERSION, "1.13.0")
42
44
  TORCH_2_0 = check_version(TORCH_VERSION, "2.0.0")
45
+ TORCH_2_1 = check_version(TORCH_VERSION, "2.1.0")
43
46
  TORCH_2_4 = check_version(TORCH_VERSION, "2.4.0")
47
+ TORCH_2_8 = check_version(TORCH_VERSION, "2.8.0")
48
+ TORCH_2_9 = check_version(TORCH_VERSION, "2.9.0")
44
49
  TORCHVISION_0_10 = check_version(TORCHVISION_VERSION, "0.10.0")
45
50
  TORCHVISION_0_11 = check_version(TORCHVISION_VERSION, "0.11.0")
46
51
  TORCHVISION_0_13 = check_version(TORCHVISION_VERSION, "0.13.0")
@@ -79,8 +84,7 @@ def smart_inference_mode():
79
84
 
80
85
 
81
86
  def autocast(enabled: bool, device: str = "cuda"):
82
- """
83
- Get the appropriate autocast context manager based on PyTorch version and AMP setting.
87
+ """Get the appropriate autocast context manager based on PyTorch version and AMP setting.
84
88
 
85
89
  This function returns a context manager for automatic mixed precision (AMP) training that is compatible with both
86
90
  older and newer versions of PyTorch. It handles the differences in the autocast API between PyTorch versions.
@@ -92,14 +96,14 @@ def autocast(enabled: bool, device: str = "cuda"):
92
96
  Returns:
93
97
  (torch.amp.autocast): The appropriate autocast context manager.
94
98
 
95
- Notes:
96
- - For PyTorch versions 1.13 and newer, it uses `torch.amp.autocast`.
97
- - For older versions, it uses `torch.cuda.autocast`.
98
-
99
99
  Examples:
100
100
  >>> with autocast(enabled=True):
101
101
  ... # Your mixed precision operations here
102
102
  ... pass
103
+
104
+ Notes:
105
+ - For PyTorch versions 1.13 and newer, it uses `torch.amp.autocast`.
106
+ - For older versions, it uses `torch.cuda.autocast`.
103
107
  """
104
108
  if TORCH_1_13:
105
109
  return torch.amp.autocast(device, enabled=enabled)
@@ -127,9 +131,8 @@ def get_gpu_info(index):
127
131
  return f"{properties.name}, {properties.total_memory / (1 << 20):.0f}MiB"
128
132
 
129
133
 
130
- def select_device(device="", batch=0, newline=False, verbose=True):
131
- """
132
- Select the appropriate PyTorch device based on the provided arguments.
134
+ def select_device(device="", newline=False, verbose=True):
135
+ """Select the appropriate PyTorch device based on the provided arguments.
133
136
 
134
137
  The function takes a string specifying the device or a torch.device object and returns a torch.device object
135
138
  representing the selected device. The function also validates the number of available devices and raises an
@@ -138,17 +141,12 @@ def select_device(device="", batch=0, newline=False, verbose=True):
138
141
  Args:
139
142
  device (str | torch.device, optional): Device string or torch.device object. Options are 'None', 'cpu', or
140
143
  'cuda', or '0' or '0,1,2,3'. Auto-selects the first available GPU, or CPU if no GPU is available.
141
- batch (int, optional): Batch size being used in your model.
142
144
  newline (bool, optional): If True, adds a newline at the end of the log string.
143
145
  verbose (bool, optional): If True, logs the device information.
144
146
 
145
147
  Returns:
146
148
  (torch.device): Selected device.
147
149
 
148
- Raises:
149
- ValueError: If the specified device is not available or if the batch size is not a multiple of the number of
150
- devices when using multiple GPUs.
151
-
152
150
  Examples:
153
151
  >>> select_device("cuda:0")
154
152
  device(type='cuda', index=0)
@@ -182,7 +180,7 @@ def select_device(device="", batch=0, newline=False, verbose=True):
182
180
  cpu = device == "cpu"
183
181
  mps = device in {"mps", "mps:0"} # Apple Metal Performance Shaders (MPS)
184
182
  if cpu or mps:
185
- os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # force torch.cuda.is_available() = False
183
+ os.environ["CUDA_VISIBLE_DEVICES"] = "" # force torch.cuda.is_available() = False
186
184
  elif device: # non-cpu device requested
187
185
  if device == "cuda":
188
186
  device = "0"
@@ -210,19 +208,7 @@ def select_device(device="", batch=0, newline=False, verbose=True):
210
208
 
211
209
  if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available
212
210
  devices = device.split(",") if device else "0" # i.e. "0,1" -> ["0", "1"]
213
- n = len(devices) # device count
214
- if n > 1: # multi-GPU
215
- if batch < 1:
216
- raise ValueError(
217
- "AutoBatch with batch<1 not supported for Multi-GPU training, "
218
- f"please specify a valid batch size multiple of GPU count {n}, i.e. batch={n * 8}."
219
- )
220
- if batch >= 0 and batch % n != 0: # check batch_size is divisible by device_count
221
- raise ValueError(
222
- f"'batch={batch}' must be a multiple of GPU count {n}. Try 'batch={batch // n * n}' or "
223
- f"'batch={batch // n * n + n}', the nearest batch sizes evenly divisible by {n}."
224
- )
225
- space = " " * (len(s) + 1)
211
+ space = " " * len(s)
226
212
  for i, d in enumerate(devices):
227
213
  s += f"{'' if i == 0 else space}CUDA:{d} ({get_gpu_info(i)})\n" # bytes to MB
228
214
  arg = "cuda:0"
@@ -249,8 +235,7 @@ def time_sync():
249
235
 
250
236
 
251
237
  def fuse_conv_and_bn(conv, bn):
252
- """
253
- Fuse Conv2d and BatchNorm2d layers for inference optimization.
238
+ """Fuse Conv2d and BatchNorm2d layers for inference optimization.
254
239
 
255
240
  Args:
256
241
  conv (nn.Conv2d): Convolutional layer to fuse.
@@ -259,7 +244,7 @@ def fuse_conv_and_bn(conv, bn):
259
244
  Returns:
260
245
  (nn.Conv2d): The fused convolutional layer with gradients disabled.
261
246
 
262
- Example:
247
+ Examples:
263
248
  >>> conv = nn.Conv2d(3, 16, 3)
264
249
  >>> bn = nn.BatchNorm2d(16)
265
250
  >>> fused_conv = fuse_conv_and_bn(conv, bn)
@@ -283,8 +268,7 @@ def fuse_conv_and_bn(conv, bn):
283
268
 
284
269
 
285
270
  def fuse_deconv_and_bn(deconv, bn):
286
- """
287
- Fuse ConvTranspose2d and BatchNorm2d layers for inference optimization.
271
+ """Fuse ConvTranspose2d and BatchNorm2d layers for inference optimization.
288
272
 
289
273
  Args:
290
274
  deconv (nn.ConvTranspose2d): Transposed convolutional layer to fuse.
@@ -293,7 +277,7 @@ def fuse_deconv_and_bn(deconv, bn):
293
277
  Returns:
294
278
  (nn.ConvTranspose2d): The fused transposed convolutional layer with gradients disabled.
295
279
 
296
- Example:
280
+ Examples:
297
281
  >>> deconv = nn.ConvTranspose2d(16, 3, 3)
298
282
  >>> bn = nn.BatchNorm2d(3)
299
283
  >>> fused_deconv = fuse_deconv_and_bn(deconv, bn)
@@ -317,8 +301,7 @@ def fuse_deconv_and_bn(deconv, bn):
317
301
 
318
302
 
319
303
  def model_info(model, detailed=False, verbose=True, imgsz=640):
320
- """
321
- Print and return detailed model information layer by layer.
304
+ """Print and return detailed model information layer by layer.
322
305
 
323
306
  Args:
324
307
  model (nn.Module): Model to analyze.
@@ -347,10 +330,10 @@ def model_info(model, detailed=False, verbose=True, imgsz=640):
347
330
  if len(m._parameters):
348
331
  for pn, p in m.named_parameters():
349
332
  LOGGER.info(
350
- f"{i:>5g}{f'{mn}.{pn}':>40}{mt:>20}{p.requires_grad!r:>10}{p.numel():>12g}{str(list(p.shape)):>20}{p.mean():>10.3g}{p.std():>10.3g}{str(p.dtype).replace('torch.', ''):>15}"
333
+ f"{i:>5g}{f'{mn}.{pn}':>40}{mt:>20}{p.requires_grad!r:>10}{p.numel():>12g}{list(p.shape)!s:>20}{p.mean():>10.3g}{p.std():>10.3g}{str(p.dtype).replace('torch.', ''):>15}"
351
334
  )
352
335
  else: # layers with no learnable params
353
- LOGGER.info(f"{i:>5g}{mn:>40}{mt:>20}{False!r:>10}{0:>12g}{str([]):>20}{'-':>10}{'-':>10}{'-':>15}")
336
+ LOGGER.info(f"{i:>5g}{mn:>40}{mt:>20}{False!r:>10}{0:>12g}{[]!s:>20}{'-':>10}{'-':>10}{'-':>15}")
354
337
 
355
338
  flops = get_flops(model, imgsz) # imgsz may be int or list, i.e. imgsz=640 or imgsz=[640, 320]
356
339
  fused = " (fused)" if getattr(model, "is_fused", lambda: False)() else ""
@@ -372,8 +355,7 @@ def get_num_gradients(model):
372
355
 
373
356
 
374
357
  def model_info_for_loggers(trainer):
375
- """
376
- Return model info dict with useful model information.
358
+ """Return model info dict with useful model information.
377
359
 
378
360
  Args:
379
361
  trainer (ultralytics.engine.trainer.BaseTrainer): The trainer object containing model and validation data.
@@ -406,12 +388,10 @@ def model_info_for_loggers(trainer):
406
388
 
407
389
 
408
390
  def get_flops(model, imgsz=640):
409
- """
410
- Calculate FLOPs (floating point operations) for a model in billions.
391
+ """Calculate FLOPs (floating point operations) for a model in billions.
411
392
 
412
- Attempts two calculation methods: first with a stride-based tensor for efficiency,
413
- then falls back to full image size if needed (e.g., for RTDETR models). Returns 0.0
414
- if thop library is unavailable or calculation fails.
393
+ Attempts two calculation methods: first with a stride-based tensor for efficiency, then falls back to full image
394
+ size if needed (e.g., for RTDETR models). Returns 0.0 if thop library is unavailable or calculation fails.
415
395
 
416
396
  Args:
417
397
  model (nn.Module): The model to calculate FLOPs for.
@@ -448,8 +428,7 @@ def get_flops(model, imgsz=640):
448
428
 
449
429
 
450
430
  def get_flops_with_torch_profiler(model, imgsz=640):
451
- """
452
- Compute model FLOPs using torch profiler (alternative to thop package, but 2-10x slower).
431
+ """Compute model FLOPs using torch profiler (alternative to thop package, but 2-10x slower).
453
432
 
454
433
  Args:
455
434
  model (nn.Module): The model to calculate FLOPs for.
@@ -495,8 +474,7 @@ def initialize_weights(model):
495
474
 
496
475
 
497
476
  def scale_img(img, ratio=1.0, same_shape=False, gs=32):
498
- """
499
- Scale and pad an image tensor, optionally maintaining aspect ratio and padding to gs multiple.
477
+ """Scale and pad an image tensor, optionally maintaining aspect ratio and padding to gs multiple.
500
478
 
501
479
  Args:
502
480
  img (torch.Tensor): Input image tensor.
@@ -518,8 +496,7 @@ def scale_img(img, ratio=1.0, same_shape=False, gs=32):
518
496
 
519
497
 
520
498
  def copy_attr(a, b, include=(), exclude=()):
521
- """
522
- Copy attributes from object 'b' to object 'a', with options to include/exclude certain attributes.
499
+ """Copy attributes from object 'b' to object 'a', with options to include/exclude certain attributes.
523
500
 
524
501
  Args:
525
502
  a (Any): Destination object to copy attributes to.
@@ -534,24 +511,8 @@ def copy_attr(a, b, include=(), exclude=()):
534
511
  setattr(a, k, v)
535
512
 
536
513
 
537
- def get_latest_opset():
538
- """
539
- Return the second-most recent ONNX opset version supported by this version of PyTorch, adjusted for maturity.
540
-
541
- Returns:
542
- (int): The ONNX opset version.
543
- """
544
- if TORCH_1_13:
545
- # If the PyTorch>=1.13, dynamically compute the latest opset minus one using 'symbolic_opset'
546
- return max(int(k[14:]) for k in vars(torch.onnx) if "symbolic_opset" in k) - 1
547
- # Otherwise for PyTorch<=1.12 return the corresponding predefined opset
548
- version = torch.onnx.producer_version.rsplit(".", 1)[0] # i.e. '2.3'
549
- return {"1.12": 15, "1.11": 14, "1.10": 13, "1.9": 12, "1.8": 12}.get(version, 12)
550
-
551
-
552
514
  def intersect_dicts(da, db, exclude=()):
553
- """
554
- Return a dictionary of intersecting keys with matching shapes, excluding 'exclude' keys, using da values.
515
+ """Return a dictionary of intersecting keys with matching shapes, excluding 'exclude' keys, using da values.
555
516
 
556
517
  Args:
557
518
  da (dict): First dictionary.
@@ -565,8 +526,7 @@ def intersect_dicts(da, db, exclude=()):
565
526
 
566
527
 
567
528
  def is_parallel(model):
568
- """
569
- Return True if model is of type DP or DDP.
529
+ """Return True if model is of type DP or DDP.
570
530
 
571
531
  Args:
572
532
  model (nn.Module): Model to check.
@@ -578,8 +538,7 @@ def is_parallel(model):
578
538
 
579
539
 
580
540
  def unwrap_model(m: nn.Module) -> nn.Module:
581
- """
582
- Unwrap compiled and parallel models to get the base model.
541
+ """Unwrap compiled and parallel models to get the base model.
583
542
 
584
543
  Args:
585
544
  m (nn.Module): A model that may be wrapped by torch.compile (._orig_mod) or parallel wrappers such as
@@ -598,8 +557,7 @@ def unwrap_model(m: nn.Module) -> nn.Module:
598
557
 
599
558
 
600
559
  def one_cycle(y1=0.0, y2=1.0, steps=100):
601
- """
602
- Return a lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf.
560
+ """Return a lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf.
603
561
 
604
562
  Args:
605
563
  y1 (float, optional): Initial value.
@@ -613,8 +571,7 @@ def one_cycle(y1=0.0, y2=1.0, steps=100):
613
571
 
614
572
 
615
573
  def init_seeds(seed=0, deterministic=False):
616
- """
617
- Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html.
574
+ """Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html.
618
575
 
619
576
  Args:
620
577
  seed (int, optional): Random seed.
@@ -647,11 +604,10 @@ def unset_deterministic():
647
604
 
648
605
 
649
606
  class ModelEMA:
650
- """
651
- Updated Exponential Moving Average (EMA) implementation.
607
+ """Updated Exponential Moving Average (EMA) implementation.
652
608
 
653
- Keeps a moving average of everything in the model state_dict (parameters and buffers).
654
- For EMA details see References.
609
+ Keeps a moving average of everything in the model state_dict (parameters and buffers). For EMA details see
610
+ References.
655
611
 
656
612
  To disable EMA set the `enabled` attribute to `False`.
657
613
 
@@ -667,8 +623,7 @@ class ModelEMA:
667
623
  """
668
624
 
669
625
  def __init__(self, model, decay=0.9999, tau=2000, updates=0):
670
- """
671
- Initialize EMA for 'model' with given arguments.
626
+ """Initialize EMA for 'model' with given arguments.
672
627
 
673
628
  Args:
674
629
  model (nn.Module): Model to create EMA for.
@@ -684,8 +639,7 @@ class ModelEMA:
684
639
  self.enabled = True
685
640
 
686
641
  def update(self, model):
687
- """
688
- Update EMA parameters.
642
+ """Update EMA parameters.
689
643
 
690
644
  Args:
691
645
  model (nn.Module): Model to update EMA from.
@@ -702,8 +656,7 @@ class ModelEMA:
702
656
  # assert v.dtype == msd[k].dtype == torch.float32, f'{k}: EMA {v.dtype}, model {msd[k].dtype}'
703
657
 
704
658
  def update_attr(self, model, include=(), exclude=("process_group", "reducer")):
705
- """
706
- Update attributes and save stripped model with optimizer removed.
659
+ """Update attributes and save stripped model with optimizer removed.
707
660
 
708
661
  Args:
709
662
  model (nn.Module): Model to update attributes from.
@@ -714,9 +667,8 @@ class ModelEMA:
714
667
  copy_attr(self.ema, model, include, exclude)
715
668
 
716
669
 
717
- def strip_optimizer(f: str | Path = "best.pt", s: str = "", updates: dict[str, Any] = None) -> dict[str, Any]:
718
- """
719
- Strip optimizer from 'f' to finalize training, optionally save as 's'.
670
+ def strip_optimizer(f: str | Path = "best.pt", s: str = "", updates: dict[str, Any] | None = None) -> dict[str, Any]:
671
+ """Strip optimizer from 'f' to finalize training, optionally save as 's'.
720
672
 
721
673
  Args:
722
674
  f (str | Path): File path to model to strip the optimizer from.
@@ -761,7 +713,7 @@ def strip_optimizer(f: str | Path = "best.pt", s: str = "", updates: dict[str, A
761
713
 
762
714
  # Update other keys
763
715
  args = {**DEFAULT_CFG_DICT, **x.get("train_args", {})} # combine args
764
- for k in "optimizer", "best_fitness", "ema", "updates": # keys
716
+ for k in "optimizer", "best_fitness", "ema", "updates", "scaler": # keys
765
717
  x[k] = None
766
718
  x["epoch"] = -1
767
719
  x["train_args"] = {k: v for k, v in args.items() if k in DEFAULT_CFG_KEYS} # strip non-default keys
@@ -776,8 +728,7 @@ def strip_optimizer(f: str | Path = "best.pt", s: str = "", updates: dict[str, A
776
728
 
777
729
 
778
730
  def convert_optimizer_state_dict_to_fp16(state_dict):
779
- """
780
- Convert the state_dict of a given optimizer to FP16, focusing on the 'state' key for tensor conversions.
731
+ """Convert the state_dict of a given optimizer to FP16, focusing on the 'state' key for tensor conversions.
781
732
 
782
733
  Args:
783
734
  state_dict (dict): Optimizer state dictionary.
@@ -795,12 +746,11 @@ def convert_optimizer_state_dict_to_fp16(state_dict):
795
746
 
796
747
  @contextmanager
797
748
  def cuda_memory_usage(device=None):
798
- """
799
- Monitor and manage CUDA memory usage.
749
+ """Monitor and manage CUDA memory usage.
800
750
 
801
- This function checks if CUDA is available and, if so, empties the CUDA cache to free up unused memory.
802
- It then yields a dictionary containing memory usage information, which can be updated by the caller.
803
- Finally, it updates the dictionary with the amount of memory reserved by CUDA on the specified device.
751
+ This function checks if CUDA is available and, if so, empties the CUDA cache to free up unused memory. It then
752
+ yields a dictionary containing memory usage information, which can be updated by the caller. Finally, it updates the
753
+ dictionary with the amount of memory reserved by CUDA on the specified device.
804
754
 
805
755
  Args:
806
756
  device (torch.device, optional): The CUDA device to query memory usage for.
@@ -820,8 +770,7 @@ def cuda_memory_usage(device=None):
820
770
 
821
771
 
822
772
  def profile_ops(input, ops, n=10, device=None, max_num_obj=0):
823
- """
824
- Ultralytics speed, memory and FLOPs profiler.
773
+ """Ultralytics speed, memory and FLOPs profiler.
825
774
 
826
775
  Args:
827
776
  input (torch.Tensor | list): Input tensor(s) to profile.
@@ -894,7 +843,7 @@ def profile_ops(input, ops, n=10, device=None, max_num_obj=0):
894
843
  mem += cuda_info["memory"] / 1e9 # (GB)
895
844
  s_in, s_out = (tuple(x.shape) if isinstance(x, torch.Tensor) else "list" for x in (x, y)) # shapes
896
845
  p = sum(x.numel() for x in m.parameters()) if isinstance(m, nn.Module) else 0 # parameters
897
- LOGGER.info(f"{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}")
846
+ LOGGER.info(f"{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{s_in!s:>24s}{s_out!s:>24s}")
898
847
  results.append([p, flops, mem, tf, tb, s_in, s_out])
899
848
  except Exception as e:
900
849
  LOGGER.info(e)
@@ -906,8 +855,7 @@ def profile_ops(input, ops, n=10, device=None, max_num_obj=0):
906
855
 
907
856
 
908
857
  class EarlyStopping:
909
- """
910
- Early stopping class that stops training when a specified number of epochs have passed without improvement.
858
+ """Early stopping class that stops training when a specified number of epochs have passed without improvement.
911
859
 
912
860
  Attributes:
913
861
  best_fitness (float): Best fitness value observed.
@@ -917,8 +865,7 @@ class EarlyStopping:
917
865
  """
918
866
 
919
867
  def __init__(self, patience=50):
920
- """
921
- Initialize early stopping object.
868
+ """Initialize early stopping object.
922
869
 
923
870
  Args:
924
871
  patience (int, optional): Number of epochs to wait after fitness stops improving before stopping.
@@ -929,8 +876,7 @@ class EarlyStopping:
929
876
  self.possible_stop = False # possible stop may occur next epoch
930
877
 
931
878
  def __call__(self, epoch, fitness):
932
- """
933
- Check whether to stop training.
879
+ """Check whether to stop training.
934
880
 
935
881
  Args:
936
882
  epoch (int): Current epoch of training
@@ -959,85 +905,15 @@ class EarlyStopping:
959
905
  return stop
960
906
 
961
907
 
962
- class FXModel(nn.Module):
963
- """
964
- A custom model class for torch.fx compatibility.
965
-
966
- This class extends `torch.nn.Module` and is designed to ensure compatibility with torch.fx for tracing and graph
967
- manipulation. It copies attributes from an existing model and explicitly sets the model attribute to ensure proper
968
- copying.
969
-
970
- Attributes:
971
- model (nn.Module): The original model's layers.
972
- """
973
-
974
- def __init__(self, model):
975
- """
976
- Initialize the FXModel.
977
-
978
- Args:
979
- model (nn.Module): The original model to wrap for torch.fx compatibility.
980
- """
981
- super().__init__()
982
- copy_attr(self, model)
983
- # Explicitly set `model` since `copy_attr` somehow does not copy it.
984
- self.model = model.model
985
-
986
- def forward(self, x):
987
- """
988
- Forward pass through the model.
989
-
990
- This method performs the forward pass through the model, handling the dependencies between layers and saving
991
- intermediate outputs.
992
-
993
- Args:
994
- x (torch.Tensor): The input tensor to the model.
995
-
996
- Returns:
997
- (torch.Tensor): The output tensor from the model.
998
- """
999
- y = [] # outputs
1000
- for m in self.model:
1001
- if m.f != -1: # if not from previous layer
1002
- # from earlier layers
1003
- x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f]
1004
- x = m(x) # run
1005
- y.append(x) # save output
1006
- return x
1007
-
1008
-
1009
- def disable_dynamo(func: Any) -> Any:
1010
- """
1011
- Disable torch.compile/dynamo for a callable when available.
1012
-
1013
- Args:
1014
- func (Any): Callable object to wrap. Could be a function, method, or class.
1015
-
1016
- Returns:
1017
- func (Any): Same callable, wrapped by torch._dynamo.disable when available, otherwise unchanged.
1018
-
1019
- Examples:
1020
- >>> @disable_dynamo
1021
- ... def fn(x):
1022
- ... return x + 1
1023
- >>> # Works even if torch._dynamo is not available
1024
- >>> _ = fn(1)
1025
- """
1026
- if hasattr(torch, "_dynamo"):
1027
- return torch._dynamo.disable(func)
1028
- return func
1029
-
1030
-
1031
908
  def attempt_compile(
1032
909
  model: torch.nn.Module,
1033
910
  device: torch.device,
1034
911
  imgsz: int = 640,
1035
912
  use_autocast: bool = False,
1036
913
  warmup: bool = False,
1037
- prefix: str = colorstr("compile:"),
914
+ mode: bool | str = "default",
1038
915
  ) -> torch.nn.Module:
1039
- """
1040
- Compile a model with torch.compile and optionally warm up the graph to reduce first-iteration latency.
916
+ """Compile a model with torch.compile and optionally warm up the graph to reduce first-iteration latency.
1041
917
 
1042
918
  This utility attempts to compile the provided model using the inductor backend with dynamic shapes enabled and an
1043
919
  autotuning mode. If compilation is unavailable or fails, the original model is returned unchanged. An optional
@@ -1049,28 +925,35 @@ def attempt_compile(
1049
925
  imgsz (int, optional): Square input size to create a dummy tensor with shape (1, 3, imgsz, imgsz) for warmup.
1050
926
  use_autocast (bool, optional): Whether to run warmup under autocast on CUDA or MPS devices.
1051
927
  warmup (bool, optional): Whether to execute a single dummy forward pass to warm up the compiled model.
1052
- prefix (str, optional): Message prefix for logger output.
928
+ mode (bool | str, optional): torch.compile mode. True "default", False → no compile, or a string like
929
+ "default", "reduce-overhead", "max-autotune-no-cudagraphs".
1053
930
 
1054
931
  Returns:
1055
932
  model (torch.nn.Module): Compiled model if compilation succeeds, otherwise the original unmodified model.
1056
933
 
1057
- Notes:
1058
- - If the current PyTorch build does not provide torch.compile, the function returns the input model immediately.
1059
- - Warmup runs under torch.inference_mode and may use torch.autocast for CUDA/MPS to align compute precision.
1060
- - CUDA devices are synchronized after warmup to account for asynchronous kernel execution.
1061
-
1062
934
  Examples:
1063
935
  >>> device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
1064
936
  >>> # Try to compile and warm up a model with a 640x640 input
1065
937
  >>> model = attempt_compile(model, device=device, imgsz=640, use_autocast=True, warmup=True)
938
+
939
+ Notes:
940
+ - If the current PyTorch build does not provide torch.compile, the function returns the input model immediately.
941
+ - Warmup runs under torch.inference_mode and may use torch.autocast for CUDA/MPS to align compute precision.
942
+ - CUDA devices are synchronized after warmup to account for asynchronous kernel execution.
1066
943
  """
1067
- if not hasattr(torch, "compile"):
944
+ if not hasattr(torch, "compile") or not mode:
1068
945
  return model
1069
946
 
1070
- LOGGER.info(f"{prefix} starting torch.compile...")
947
+ if mode is True:
948
+ mode = "default"
949
+ prefix = colorstr("compile:")
950
+ LOGGER.info(f"{prefix} starting torch.compile with '{mode}' mode...")
951
+ if mode == "max-autotune":
952
+ LOGGER.warning(f"{prefix} mode='{mode}' not recommended, using mode='max-autotune-no-cudagraphs' instead")
953
+ mode = "max-autotune-no-cudagraphs"
1071
954
  t0 = time.perf_counter()
1072
955
  try:
1073
- model = torch.compile(model, mode="max-autotune", backend="inductor")
956
+ model = torch.compile(model, mode=mode, backend="inductor")
1074
957
  except Exception as e:
1075
958
  LOGGER.warning(f"{prefix} torch.compile failed, continuing uncompiled: {e}")
1076
959
  return model
ultralytics/utils/tqdm.py CHANGED
@@ -16,13 +16,12 @@ def is_noninteractive_console() -> bool:
16
16
 
17
17
 
18
18
  class TQDM:
19
- """
20
- Lightweight zero-dependency progress bar for Ultralytics.
19
+ """Lightweight zero-dependency progress bar for Ultralytics.
21
20
 
22
- Provides clean, rich-style progress bars suitable for various environments including Weights & Biases,
23
- console outputs, and other logging systems. Features zero external dependencies, clean single-line output,
24
- rich-style progress bars with Unicode block characters, context manager support, iterator protocol support,
25
- and dynamic description updates.
21
+ Provides clean, rich-style progress bars suitable for various environments including Weights & Biases, console
22
+ outputs, and other logging systems. Features zero external dependencies, clean single-line output, rich-style
23
+ progress bars with Unicode block characters, context manager support, iterator protocol support, and dynamic
24
+ description updates.
26
25
 
27
26
  Attributes:
28
27
  iterable (object): Iterable to wrap with progress bar.
@@ -94,8 +93,7 @@ class TQDM:
94
93
  initial: int = 0,
95
94
  **kwargs,
96
95
  ) -> None:
97
- """
98
- Initialize the TQDM progress bar with specified configuration options.
96
+ """Initialize the TQDM progress bar with specified configuration options.
99
97
 
100
98
  Args:
101
99
  iterable (object, optional): Iterable to wrap with progress bar.
@@ -111,11 +109,6 @@ class TQDM:
111
109
  bar_format (str, optional): Custom bar format string.
112
110
  initial (int, optional): Initial counter value.
113
111
  **kwargs (Any): Additional keyword arguments for compatibility (ignored).
114
-
115
- Examples:
116
- >>> pbar = TQDM(range(100), desc="Processing")
117
- >>> with TQDM(total=1000, unit="B", unit_scale=True) as pbar:
118
- ... pbar.update(1024) # Updates by 1KB
119
112
  """
120
113
  # Disable if not verbose
121
114
  if disable is None:
@@ -150,7 +143,7 @@ class TQDM:
150
143
  self.start_t = time.time()
151
144
  self.last_rate = 0.0
152
145
  self.closed = False
153
- self.is_bytes = unit_scale and unit in ("B", "bytes")
146
+ self.is_bytes = unit_scale and unit in {"B", "bytes"}
154
147
  self.scales = (
155
148
  [(1073741824, "GB/s"), (1048576, "MB/s"), (1024, "KB/s")]
156
149
  if self.is_bytes
@@ -161,9 +154,17 @@ class TQDM:
161
154
  self._display()
162
155
 
163
156
  def _format_rate(self, rate: float) -> str:
164
- """Format rate with units."""
157
+ """Format rate with units, switching between it/s and s/it for readability."""
165
158
  if rate <= 0:
166
159
  return ""
160
+
161
+ inv_rate = 1 / rate if rate else None
162
+
163
+ # Use s/it format when inv_rate > 1 (i.e., rate < 1 it/s) for better readability
164
+ if inv_rate and inv_rate > 1:
165
+ return f"{inv_rate:.1f}s/B" if self.is_bytes else f"{inv_rate:.1f}s/{self.unit}"
166
+
167
+ # Use it/s format for fast iterations
167
168
  fallback = f"{rate:.1f}B/s" if self.is_bytes else f"{rate:.1f}{self.unit}/s"
168
169
  return next((f"{rate / t:.1f}{u}" for t, u in self.scales if rate >= t), fallback)
169
170
 
@@ -178,7 +179,8 @@ class TQDM:
178
179
  num /= self.unit_divisor
179
180
  return f"{num:.1f}PB"
180
181
 
181
- def _format_time(self, seconds: float) -> str:
182
+ @staticmethod
183
+ def _format_time(seconds: float) -> str:
182
184
  """Format time duration."""
183
185
  if seconds < 60:
184
186
  return f"{seconds:.1f}s"
@@ -250,10 +252,8 @@ class TQDM:
250
252
  percent = (self.n / self.total) * 100
251
253
  n_str = self._format_num(self.n)
252
254
  t_str = self._format_num(self.total)
253
- if self.is_bytes:
254
- # Collapse suffix only when identical (e.g. "5.4/5.4MB")
255
- if n_str[-2] == t_str[-2]:
256
- n_str = n_str.rstrip("KMGTPB") # Remove unit suffix from current if different than total
255
+ if self.is_bytes and n_str[-2] == t_str[-2]: # Collapse suffix only when identical (e.g. "5.4/5.4MB")
256
+ n_str = n_str.rstrip("KMGTPB")
257
257
  else:
258
258
  percent = 0.0
259
259
  n_str, t_str = self._format_num(self.n), "?"