ultralytics 8.3.88__py3-none-any.whl → 8.3.90__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (155) hide show
  1. tests/conftest.py +2 -2
  2. tests/test_cli.py +13 -11
  3. tests/test_cuda.py +10 -1
  4. tests/test_integrations.py +1 -5
  5. tests/test_python.py +16 -16
  6. tests/test_solutions.py +9 -9
  7. ultralytics/__init__.py +1 -1
  8. ultralytics/cfg/__init__.py +3 -1
  9. ultralytics/cfg/models/11/yolo11-cls.yaml +5 -5
  10. ultralytics/cfg/models/11/yolo11-obb.yaml +5 -5
  11. ultralytics/cfg/models/11/yolo11-pose.yaml +5 -5
  12. ultralytics/cfg/models/11/yolo11-seg.yaml +5 -5
  13. ultralytics/cfg/models/11/yolo11.yaml +5 -5
  14. ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +5 -5
  15. ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +5 -5
  16. ultralytics/cfg/models/v8/yolov8-ghost.yaml +5 -5
  17. ultralytics/cfg/models/v8/yolov8-obb.yaml +5 -5
  18. ultralytics/cfg/models/v8/yolov8-p6.yaml +5 -5
  19. ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +5 -5
  20. ultralytics/cfg/models/v8/yolov8-world.yaml +5 -5
  21. ultralytics/cfg/models/v8/yolov8-worldv2.yaml +5 -5
  22. ultralytics/cfg/models/v8/yolov8.yaml +5 -5
  23. ultralytics/cfg/models/v9/yolov9c-seg.yaml +1 -1
  24. ultralytics/cfg/models/v9/yolov9c.yaml +1 -1
  25. ultralytics/cfg/models/v9/yolov9e-seg.yaml +1 -1
  26. ultralytics/cfg/models/v9/yolov9e.yaml +1 -1
  27. ultralytics/cfg/models/v9/yolov9m.yaml +1 -1
  28. ultralytics/cfg/models/v9/yolov9s.yaml +1 -1
  29. ultralytics/cfg/models/v9/yolov9t.yaml +1 -1
  30. ultralytics/data/annotator.py +9 -14
  31. ultralytics/data/base.py +125 -39
  32. ultralytics/data/build.py +63 -24
  33. ultralytics/data/converter.py +34 -33
  34. ultralytics/data/dataset.py +207 -53
  35. ultralytics/data/loaders.py +1 -0
  36. ultralytics/data/split_dota.py +39 -12
  37. ultralytics/data/utils.py +33 -47
  38. ultralytics/engine/exporter.py +19 -17
  39. ultralytics/engine/model.py +69 -90
  40. ultralytics/engine/predictor.py +106 -21
  41. ultralytics/engine/trainer.py +32 -23
  42. ultralytics/engine/tuner.py +31 -38
  43. ultralytics/engine/validator.py +75 -41
  44. ultralytics/hub/__init__.py +21 -26
  45. ultralytics/hub/auth.py +9 -12
  46. ultralytics/hub/session.py +76 -21
  47. ultralytics/hub/utils.py +19 -17
  48. ultralytics/models/fastsam/model.py +23 -17
  49. ultralytics/models/fastsam/predict.py +36 -16
  50. ultralytics/models/fastsam/utils.py +5 -5
  51. ultralytics/models/fastsam/val.py +6 -6
  52. ultralytics/models/nas/model.py +29 -24
  53. ultralytics/models/nas/predict.py +14 -11
  54. ultralytics/models/nas/val.py +11 -13
  55. ultralytics/models/rtdetr/model.py +20 -11
  56. ultralytics/models/rtdetr/predict.py +21 -21
  57. ultralytics/models/rtdetr/train.py +25 -24
  58. ultralytics/models/rtdetr/val.py +47 -14
  59. ultralytics/models/sam/__init__.py +1 -1
  60. ultralytics/models/sam/amg.py +50 -4
  61. ultralytics/models/sam/model.py +8 -14
  62. ultralytics/models/sam/modules/decoders.py +18 -21
  63. ultralytics/models/sam/modules/encoders.py +25 -46
  64. ultralytics/models/sam/modules/memory_attention.py +19 -15
  65. ultralytics/models/sam/modules/sam.py +18 -25
  66. ultralytics/models/sam/modules/tiny_encoder.py +19 -29
  67. ultralytics/models/sam/modules/transformer.py +35 -57
  68. ultralytics/models/sam/modules/utils.py +15 -15
  69. ultralytics/models/sam/predict.py +0 -3
  70. ultralytics/models/utils/loss.py +87 -36
  71. ultralytics/models/utils/ops.py +26 -31
  72. ultralytics/models/yolo/classify/predict.py +30 -12
  73. ultralytics/models/yolo/classify/train.py +83 -19
  74. ultralytics/models/yolo/classify/val.py +45 -23
  75. ultralytics/models/yolo/detect/predict.py +29 -19
  76. ultralytics/models/yolo/detect/train.py +90 -23
  77. ultralytics/models/yolo/detect/val.py +150 -29
  78. ultralytics/models/yolo/model.py +1 -2
  79. ultralytics/models/yolo/obb/predict.py +18 -13
  80. ultralytics/models/yolo/obb/train.py +12 -8
  81. ultralytics/models/yolo/obb/val.py +35 -22
  82. ultralytics/models/yolo/pose/predict.py +28 -15
  83. ultralytics/models/yolo/pose/train.py +21 -8
  84. ultralytics/models/yolo/pose/val.py +51 -31
  85. ultralytics/models/yolo/segment/predict.py +27 -16
  86. ultralytics/models/yolo/segment/train.py +11 -8
  87. ultralytics/models/yolo/segment/val.py +110 -29
  88. ultralytics/models/yolo/world/train.py +43 -16
  89. ultralytics/models/yolo/world/train_world.py +61 -36
  90. ultralytics/nn/autobackend.py +28 -14
  91. ultralytics/nn/modules/__init__.py +12 -12
  92. ultralytics/nn/modules/activation.py +12 -3
  93. ultralytics/nn/modules/block.py +587 -84
  94. ultralytics/nn/modules/conv.py +418 -54
  95. ultralytics/nn/modules/head.py +3 -4
  96. ultralytics/nn/modules/transformer.py +320 -34
  97. ultralytics/nn/modules/utils.py +17 -3
  98. ultralytics/nn/tasks.py +226 -79
  99. ultralytics/solutions/ai_gym.py +2 -2
  100. ultralytics/solutions/analytics.py +4 -4
  101. ultralytics/solutions/heatmap.py +4 -4
  102. ultralytics/solutions/instance_segmentation.py +10 -4
  103. ultralytics/solutions/object_blurrer.py +2 -2
  104. ultralytics/solutions/object_counter.py +2 -2
  105. ultralytics/solutions/object_cropper.py +2 -2
  106. ultralytics/solutions/parking_management.py +9 -9
  107. ultralytics/solutions/queue_management.py +1 -1
  108. ultralytics/solutions/region_counter.py +2 -2
  109. ultralytics/solutions/security_alarm.py +7 -7
  110. ultralytics/solutions/solutions.py +7 -4
  111. ultralytics/solutions/speed_estimation.py +2 -2
  112. ultralytics/solutions/streamlit_inference.py +6 -6
  113. ultralytics/solutions/trackzone.py +9 -2
  114. ultralytics/solutions/vision_eye.py +4 -4
  115. ultralytics/trackers/basetrack.py +1 -1
  116. ultralytics/trackers/bot_sort.py +23 -22
  117. ultralytics/trackers/byte_tracker.py +4 -4
  118. ultralytics/trackers/track.py +2 -1
  119. ultralytics/trackers/utils/gmc.py +26 -27
  120. ultralytics/trackers/utils/kalman_filter.py +31 -29
  121. ultralytics/trackers/utils/matching.py +7 -7
  122. ultralytics/utils/__init__.py +37 -35
  123. ultralytics/utils/autobatch.py +5 -5
  124. ultralytics/utils/benchmarks.py +111 -18
  125. ultralytics/utils/callbacks/base.py +3 -3
  126. ultralytics/utils/callbacks/clearml.py +11 -11
  127. ultralytics/utils/callbacks/comet.py +35 -22
  128. ultralytics/utils/callbacks/dvc.py +11 -10
  129. ultralytics/utils/callbacks/hub.py +8 -8
  130. ultralytics/utils/callbacks/mlflow.py +1 -1
  131. ultralytics/utils/callbacks/neptune.py +12 -10
  132. ultralytics/utils/callbacks/raytune.py +1 -1
  133. ultralytics/utils/callbacks/tensorboard.py +6 -6
  134. ultralytics/utils/callbacks/wb.py +16 -16
  135. ultralytics/utils/checks.py +139 -68
  136. ultralytics/utils/dist.py +15 -2
  137. ultralytics/utils/downloads.py +37 -56
  138. ultralytics/utils/files.py +12 -13
  139. ultralytics/utils/instance.py +117 -52
  140. ultralytics/utils/loss.py +28 -33
  141. ultralytics/utils/metrics.py +246 -181
  142. ultralytics/utils/ops.py +65 -61
  143. ultralytics/utils/patches.py +8 -6
  144. ultralytics/utils/plotting.py +72 -59
  145. ultralytics/utils/tal.py +88 -57
  146. ultralytics/utils/torch_utils.py +202 -64
  147. ultralytics/utils/triton.py +13 -3
  148. ultralytics/utils/tuner.py +13 -25
  149. {ultralytics-8.3.88.dist-info → ultralytics-8.3.90.dist-info}/METADATA +2 -2
  150. ultralytics-8.3.90.dist-info/RECORD +250 -0
  151. ultralytics-8.3.88.dist-info/RECORD +0 -250
  152. {ultralytics-8.3.88.dist-info → ultralytics-8.3.90.dist-info}/LICENSE +0 -0
  153. {ultralytics-8.3.88.dist-info → ultralytics-8.3.90.dist-info}/WHEEL +0 -0
  154. {ultralytics-8.3.88.dist-info → ultralytics-8.3.90.dist-info}/entry_points.txt +0 -0
  155. {ultralytics-8.3.88.dist-info → ultralytics-8.3.90.dist-info}/top_level.txt +0 -0
@@ -90,16 +90,14 @@ def autocast(enabled: bool, device: str = "cuda"):
90
90
  Returns:
91
91
  (torch.amp.autocast): The appropriate autocast context manager.
92
92
 
93
- Note:
93
+ Notes:
94
94
  - For PyTorch versions 1.13 and newer, it uses `torch.amp.autocast`.
95
95
  - For older versions, it uses `torch.cuda.autocast`.
96
96
 
97
- Example:
98
- ```python
99
- with autocast(amp=True):
100
- # Your mixed precision operations here
101
- pass
102
- ```
97
+ Examples:
98
+ >>> with autocast(enabled=True):
99
+ ... # Your mixed precision operations here
100
+ ... pass
103
101
  """
104
102
  if TORCH_1_13:
105
103
  return torch.amp.autocast(device, enabled=enabled)
@@ -132,7 +130,7 @@ def get_gpu_info(index):
132
130
 
133
131
  def select_device(device="", batch=0, newline=False, verbose=True):
134
132
  """
135
- Selects the appropriate PyTorch device based on the provided arguments.
133
+ Select the appropriate PyTorch device based on the provided arguments.
136
134
 
137
135
  The function takes a string specifying the device or a torch.device object and returns a torch.device object
138
136
  representing the selected device. The function also validates the number of available devices and raises an
@@ -301,7 +299,18 @@ def fuse_deconv_and_bn(deconv, bn):
301
299
 
302
300
 
303
301
  def model_info(model, detailed=False, verbose=True, imgsz=640):
304
- """Print and return detailed model information layer by layer."""
302
+ """
303
+ Print and return detailed model information layer by layer.
304
+
305
+ Args:
306
+ model (nn.Module): Model to analyze.
307
+ detailed (bool, optional): Whether to print detailed layer information. Defaults to False.
308
+ verbose (bool, optional): Whether to print model information. Defaults to True.
309
+ imgsz (int | List, optional): Input image size. Defaults to 640.
310
+
311
+ Returns:
312
+ (Tuple[int, int, int, float]): Number of layers, parameters, gradients, and GFLOPs.
313
+ """
305
314
  if not verbose:
306
315
  return
307
316
  n_p = get_num_params(model) # number of parameters
@@ -345,17 +354,21 @@ def model_info_for_loggers(trainer):
345
354
  """
346
355
  Return model info dict with useful model information.
347
356
 
348
- Example:
357
+ Args:
358
+ trainer (ultralytics.engine.trainer.BaseTrainer): The trainer object containing model and validation data.
359
+
360
+ Returns:
361
+ (dict): Dictionary containing model parameters, GFLOPs, and inference speeds.
362
+
363
+ Examples:
349
364
  YOLOv8n info for loggers
350
- ```python
351
- results = {
352
- "model/parameters": 3151904,
353
- "model/GFLOPs": 8.746,
354
- "model/speed_ONNX(ms)": 41.244,
355
- "model/speed_TensorRT(ms)": 3.211,
356
- "model/speed_PyTorch(ms)": 18.755,
357
- }
358
- ```
365
+ >>> results = {
366
+ ... "model/parameters": 3151904,
367
+ ... "model/GFLOPs": 8.746,
368
+ ... "model/speed_ONNX(ms)": 41.244,
369
+ ... "model/speed_TensorRT(ms)": 3.211,
370
+ ... "model/speed_PyTorch(ms)": 18.755,
371
+ ...}
359
372
  """
360
373
  if trainer.args.profile: # profile ONNX and TensorRT times
361
374
  from ultralytics.utils.benchmarks import ProfileModels
@@ -372,7 +385,16 @@ def model_info_for_loggers(trainer):
372
385
 
373
386
 
374
387
  def get_flops(model, imgsz=640):
375
- """Return a YOLO model's FLOPs."""
388
+ """
389
+ Return a YOLO model's FLOPs.
390
+
391
+ Args:
392
+ model (nn.Module): The model to calculate FLOPs for.
393
+ imgsz (int | List[int], optional): Input image size. Defaults to 640.
394
+
395
+ Returns:
396
+ (float): The model's FLOPs in billions.
397
+ """
376
398
  if not thop:
377
399
  return 0.0 # if not installed return 0.0 GFLOPs
378
400
 
@@ -396,7 +418,16 @@ def get_flops(model, imgsz=640):
396
418
 
397
419
 
398
420
  def get_flops_with_torch_profiler(model, imgsz=640):
399
- """Compute model FLOPs (thop package alternative, but 2-10x slower unfortunately)."""
421
+ """
422
+ Compute model FLOPs using torch profiler (alternative to thop package, but 2-10x slower).
423
+
424
+ Args:
425
+ model (nn.Module): The model to calculate FLOPs for.
426
+ imgsz (int | List[int], optional): Input image size. Defaults to 640.
427
+
428
+ Returns:
429
+ (float): The model's FLOPs in billions.
430
+ """
400
431
  if not TORCH_2_0: # torch profiler implemented in torch>=2.0
401
432
  return 0.0
402
433
  model = de_parallel(model)
@@ -434,7 +465,18 @@ def initialize_weights(model):
434
465
 
435
466
 
436
467
  def scale_img(img, ratio=1.0, same_shape=False, gs=32):
437
- """Scales and pads an image tensor, optionally maintaining aspect ratio and padding to gs multiple."""
468
+ """
469
+ Scales and pads an image tensor, optionally maintaining aspect ratio and padding to gs multiple.
470
+
471
+ Args:
472
+ img (torch.Tensor): Input image tensor.
473
+ ratio (float, optional): Scaling ratio. Defaults to 1.0.
474
+ same_shape (bool, optional): Whether to maintain the same shape. Defaults to False.
475
+ gs (int, optional): Grid size for padding. Defaults to 32.
476
+
477
+ Returns:
478
+ (torch.Tensor): Scaled and padded image tensor.
479
+ """
438
480
  if ratio == 1.0:
439
481
  return img
440
482
  h, w = img.shape[2:]
@@ -446,7 +488,15 @@ def scale_img(img, ratio=1.0, same_shape=False, gs=32):
446
488
 
447
489
 
448
490
  def copy_attr(a, b, include=(), exclude=()):
449
- """Copies attributes from object 'b' to object 'a', with options to include/exclude certain attributes."""
491
+ """
492
+ Copies attributes from object 'b' to object 'a', with options to include/exclude certain attributes.
493
+
494
+ Args:
495
+ a (object): Destination object to copy attributes to.
496
+ b (object): Source object to copy attributes from.
497
+ include (tuple, optional): Attributes to include. If empty, all attributes are included. Defaults to ().
498
+ exclude (tuple, optional): Attributes to exclude. Defaults to ().
499
+ """
450
500
  for k, v in b.__dict__.items():
451
501
  if (len(include) and k not in include) or k.startswith("_") or k in exclude:
452
502
  continue
@@ -455,7 +505,12 @@ def copy_attr(a, b, include=(), exclude=()):
455
505
 
456
506
 
457
507
  def get_latest_opset():
458
- """Return the second-most recent ONNX opset version supported by this version of PyTorch, adjusted for maturity."""
508
+ """
509
+ Return the second-most recent ONNX opset version supported by this version of PyTorch, adjusted for maturity.
510
+
511
+ Returns:
512
+ (int): The ONNX opset version.
513
+ """
459
514
  if TORCH_1_13:
460
515
  # If the PyTorch>=1.13, dynamically compute the latest opset minus one using 'symbolic_opset'
461
516
  return max(int(k[14:]) for k in vars(torch.onnx) if "symbolic_opset" in k) - 1
@@ -465,27 +520,69 @@ def get_latest_opset():
465
520
 
466
521
 
467
522
  def intersect_dicts(da, db, exclude=()):
468
- """Returns a dictionary of intersecting keys with matching shapes, excluding 'exclude' keys, using da values."""
523
+ """
524
+ Returns a dictionary of intersecting keys with matching shapes, excluding 'exclude' keys, using da values.
525
+
526
+ Args:
527
+ da (dict): First dictionary.
528
+ db (dict): Second dictionary.
529
+ exclude (tuple, optional): Keys to exclude. Defaults to ().
530
+
531
+ Returns:
532
+ (dict): Dictionary of intersecting keys with matching shapes.
533
+ """
469
534
  return {k: v for k, v in da.items() if k in db and all(x not in k for x in exclude) and v.shape == db[k].shape}
470
535
 
471
536
 
472
537
  def is_parallel(model):
473
- """Returns True if model is of type DP or DDP."""
538
+ """
539
+ Returns True if model is of type DP or DDP.
540
+
541
+ Args:
542
+ model (nn.Module): Model to check.
543
+
544
+ Returns:
545
+ (bool): True if model is DataParallel or DistributedDataParallel.
546
+ """
474
547
  return isinstance(model, (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel))
475
548
 
476
549
 
477
550
  def de_parallel(model):
478
- """De-parallelize a model: returns single-GPU model if model is of type DP or DDP."""
551
+ """
552
+ De-parallelize a model: returns single-GPU model if model is of type DP or DDP.
553
+
554
+ Args:
555
+ model (nn.Module): Model to de-parallelize.
556
+
557
+ Returns:
558
+ (nn.Module): De-parallelized model.
559
+ """
479
560
  return model.module if is_parallel(model) else model
480
561
 
481
562
 
482
563
  def one_cycle(y1=0.0, y2=1.0, steps=100):
483
- """Returns a lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf."""
564
+ """
565
+ Returns a lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf.
566
+
567
+ Args:
568
+ y1 (float, optional): Initial value. Defaults to 0.0.
569
+ y2 (float, optional): Final value. Defaults to 1.0.
570
+ steps (int, optional): Number of steps. Defaults to 100.
571
+
572
+ Returns:
573
+ (function): Lambda function for computing the sinusoidal ramp.
574
+ """
484
575
  return lambda x: max((1 - math.cos(x * math.pi / steps)) / 2, 0) * (y2 - y1) + y1
485
576
 
486
577
 
487
578
  def init_seeds(seed=0, deterministic=False):
488
- """Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html."""
579
+ """
580
+ Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html.
581
+
582
+ Args:
583
+ seed (int, optional): Random seed. Defaults to 0.
584
+ deterministic (bool, optional): Whether to set deterministic algorithms. Defaults to False.
585
+ """
489
586
  random.seed(seed)
490
587
  np.random.seed(seed)
491
588
  torch.manual_seed(seed)
@@ -514,16 +611,30 @@ def unset_deterministic():
514
611
 
515
612
  class ModelEMA:
516
613
  """
517
- Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models. Keeps a moving
518
- average of everything in the model state_dict (parameters and buffers).
614
+ Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models.
519
615
 
616
+ Keeps a moving average of everything in the model state_dict (parameters and buffers).
520
617
  For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
521
618
 
522
619
  To disable EMA set the `enabled` attribute to `False`.
620
+
621
+ Attributes:
622
+ ema (nn.Module): Copy of the model in evaluation mode.
623
+ updates (int): Number of EMA updates.
624
+ decay (function): Decay function that determines the EMA weight.
625
+ enabled (bool): Whether EMA is enabled.
523
626
  """
524
627
 
525
628
  def __init__(self, model, decay=0.9999, tau=2000, updates=0):
526
- """Initialize EMA for 'model' with given arguments."""
629
+ """
630
+ Initialize EMA for 'model' with given arguments.
631
+
632
+ Args:
633
+ model (nn.Module): Model to create EMA for.
634
+ decay (float, optional): Maximum EMA decay rate. Defaults to 0.9999.
635
+ tau (int, optional): EMA decay time constant. Defaults to 2000.
636
+ updates (int, optional): Initial number of updates. Defaults to 0.
637
+ """
527
638
  self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA
528
639
  self.updates = updates # number of EMA updates
529
640
  self.decay = lambda x: decay * (1 - math.exp(-x / tau)) # decay exponential ramp (to help early epochs)
@@ -532,7 +643,12 @@ class ModelEMA:
532
643
  self.enabled = True
533
644
 
534
645
  def update(self, model):
535
- """Update EMA parameters."""
646
+ """
647
+ Update EMA parameters.
648
+
649
+ Args:
650
+ model (nn.Module): Model to update EMA from.
651
+ """
536
652
  if self.enabled:
537
653
  self.updates += 1
538
654
  d = self.decay(self.updates)
@@ -545,7 +661,14 @@ class ModelEMA:
545
661
  # assert v.dtype == msd[k].dtype == torch.float32, f'{k}: EMA {v.dtype}, model {msd[k].dtype}'
546
662
 
547
663
  def update_attr(self, model, include=(), exclude=("process_group", "reducer")):
548
- """Updates attributes and saves stripped model with optimizer removed."""
664
+ """
665
+ Updates attributes and saves stripped model with optimizer removed.
666
+
667
+ Args:
668
+ model (nn.Module): Model to update attributes from.
669
+ include (tuple, optional): Attributes to include. Defaults to ().
670
+ exclude (tuple, optional): Attributes to exclude. Defaults to ("process_group", "reducer").
671
+ """
549
672
  if self.enabled:
550
673
  copy_attr(self.ema, model, include, exclude)
551
674
 
@@ -555,24 +678,18 @@ def strip_optimizer(f: Union[str, Path] = "best.pt", s: str = "", updates: dict
555
678
  Strip optimizer from 'f' to finalize training, optionally save as 's'.
556
679
 
557
680
  Args:
558
- f (str): file path to model to strip the optimizer from. Default is 'best.pt'.
559
- s (str): file path to save the model with stripped optimizer to. If not provided, 'f' will be overwritten.
560
- updates (dict): a dictionary of updates to overlay onto the checkpoint before saving.
681
+ f (str | Path): File path to model to strip the optimizer from. Defaults to 'best.pt'.
682
+ s (str, optional): File path to save the model with stripped optimizer to. If not provided, 'f' will be overwritten.
683
+ updates (dict, optional): A dictionary of updates to overlay onto the checkpoint before saving.
561
684
 
562
685
  Returns:
563
686
  (dict): The combined checkpoint dictionary.
564
687
 
565
- Example:
566
- ```python
567
- from pathlib import Path
568
- from ultralytics.utils.torch_utils import strip_optimizer
569
-
570
- for f in Path("path/to/model/checkpoints").rglob("*.pt"):
571
- strip_optimizer(f)
572
- ```
573
-
574
- Note:
575
- Use `ultralytics.nn.torch_safe_load` for missing modules with `x = torch_safe_load(f)[0]`
688
+ Examples:
689
+ >>> from pathlib import Path
690
+ >>> from ultralytics.utils.torch_utils import strip_optimizer
691
+ >>> for f in Path("path/to/model/checkpoints").rglob("*.pt"):
692
+ >>> strip_optimizer(f)
576
693
  """
577
694
  try:
578
695
  x = torch.load(f, map_location=torch.device("cpu"))
@@ -620,7 +737,11 @@ def convert_optimizer_state_dict_to_fp16(state_dict):
620
737
  """
621
738
  Converts the state_dict of a given optimizer to FP16, focusing on the 'state' key for tensor conversions.
622
739
 
623
- This method aims to reduce storage size without altering 'param_groups' as they contain non-tensor data.
740
+ Args:
741
+ state_dict (dict): Optimizer state dictionary.
742
+
743
+ Returns:
744
+ (dict): Converted optimizer state dictionary with FP16 tensors.
624
745
  """
625
746
  for state in state_dict["state"].values():
626
747
  for k, v in state.items():
@@ -660,15 +781,22 @@ def profile(input, ops, n=10, device=None, max_num_obj=0):
660
781
  """
661
782
  Ultralytics speed, memory and FLOPs profiler.
662
783
 
663
- Example:
664
- ```python
665
- from ultralytics.utils.torch_utils import profile
784
+ Args:
785
+ input (torch.Tensor | List[torch.Tensor]): Input tensor(s) to profile.
786
+ ops (nn.Module | List[nn.Module]): Model or list of operations to profile.
787
+ n (int, optional): Number of iterations to average. Defaults to 10.
788
+ device (str | torch.device, optional): Device to profile on. Defaults to None.
789
+ max_num_obj (int, optional): Maximum number of objects for simulation. Defaults to 0.
790
+
791
+ Returns:
792
+ (List): Profile results for each operation.
666
793
 
667
- input = torch.randn(16, 3, 640, 640)
668
- m1 = lambda x: x * torch.sigmoid(x)
669
- m2 = nn.SiLU()
670
- profile(input, [m1, m2], n=100) # profile over 100 iterations
671
- ```
794
+ Examples:
795
+ >>> from ultralytics.utils.torch_utils import profile
796
+ >>> input = torch.randn(16, 3, 640, 640)
797
+ >>> m1 = lambda x: x * torch.sigmoid(x)
798
+ >>> m2 = nn.SiLU()
799
+ >>> profile(input, [m1, m2], n=100) # profile over 100 iterations
672
800
  """
673
801
  results = []
674
802
  if not isinstance(device, torch.device):
@@ -731,7 +859,15 @@ def profile(input, ops, n=10, device=None, max_num_obj=0):
731
859
 
732
860
 
733
861
  class EarlyStopping:
734
- """Early stopping class that stops training when a specified number of epochs have passed without improvement."""
862
+ """
863
+ Early stopping class that stops training when a specified number of epochs have passed without improvement.
864
+
865
+ Attributes:
866
+ best_fitness (float): Best fitness value observed.
867
+ best_epoch (int): Epoch where best fitness was observed.
868
+ patience (int): Number of epochs to wait after fitness stops improving before stopping.
869
+ possible_stop (bool): Flag indicating if stopping may occur next epoch.
870
+ """
735
871
 
736
872
  def __init__(self, patience=50):
737
873
  """
@@ -780,11 +916,12 @@ class FXModel(nn.Module):
780
916
  """
781
917
  A custom model class for torch.fx compatibility.
782
918
 
783
- This class extends `torch.nn.Module` and is designed to ensure compatibility with torch.fx for tracing and graph manipulation.
784
- It copies attributes from an existing model and explicitly sets the model attribute to ensure proper copying.
919
+ This class extends `torch.nn.Module` and is designed to ensure compatibility with torch.fx for tracing and graph
920
+ manipulation. It copies attributes from an existing model and explicitly sets the model attribute to ensure proper
921
+ copying.
785
922
 
786
- Args:
787
- model (torch.nn.Module): The original model to wrap for torch.fx compatibility.
923
+ Attributes:
924
+ model (nn.Module): The original model's layers.
788
925
  """
789
926
 
790
927
  def __init__(self, model):
@@ -792,7 +929,7 @@ class FXModel(nn.Module):
792
929
  Initialize the FXModel.
793
930
 
794
931
  Args:
795
- model (torch.nn.Module): The original model to wrap for torch.fx compatibility.
932
+ model (nn.Module): The original model to wrap for torch.fx compatibility.
796
933
  """
797
934
  super().__init__()
798
935
  copy_attr(self, model)
@@ -803,7 +940,8 @@ class FXModel(nn.Module):
803
940
  """
804
941
  Forward pass through the model.
805
942
 
806
- This method performs the forward pass through the model, handling the dependencies between layers and saving intermediate outputs.
943
+ This method performs the forward pass through the model, handling the dependencies between layers and saving
944
+ intermediate outputs.
807
945
 
808
946
  Args:
809
947
  x (torch.Tensor): The input tensor to the model.
@@ -10,6 +10,9 @@ class TritonRemoteModel:
10
10
  """
11
11
  Client for interacting with a remote Triton Inference Server model.
12
12
 
13
+ This class provides a convenient interface for sending inference requests to a Triton Inference Server
14
+ and processing the responses.
15
+
13
16
  Attributes:
14
17
  endpoint (str): The name of the model on the Triton server.
15
18
  url (str): The URL of the Triton server.
@@ -20,6 +23,13 @@ class TritonRemoteModel:
20
23
  np_input_formats (List[type]): The numpy data types of the model inputs.
21
24
  input_names (List[str]): The names of the model inputs.
22
25
  output_names (List[str]): The names of the model outputs.
26
+ metadata: The metadata associated with the model.
27
+
28
+ Examples:
29
+ Initialize a Triton client with HTTP
30
+ >>> model = TritonRemoteModel(url="localhost:8000", endpoint="yolov8", scheme="http")
31
+ Make inference with numpy arrays
32
+ >>> outputs = model(np.random.rand(1, 3, 640, 640).astype(np.float32))
23
33
  """
24
34
 
25
35
  def __init__(self, url: str, endpoint: str = "", scheme: str = ""):
@@ -27,7 +37,7 @@ class TritonRemoteModel:
27
37
  Initialize the TritonRemoteModel.
28
38
 
29
39
  Arguments may be provided individually or parsed from a collective 'url' argument of the form
30
- <scheme>://<netloc>/<endpoint>/<task_name>
40
+ <scheme>://<netloc>/<endpoint>/<task_name>
31
41
 
32
42
  Args:
33
43
  url (str): The URL of the Triton server.
@@ -73,10 +83,10 @@ class TritonRemoteModel:
73
83
  Call the model with the given inputs.
74
84
 
75
85
  Args:
76
- *inputs (List[np.ndarray]): Input data to the model.
86
+ *inputs (np.ndarray): Input data to the model.
77
87
 
78
88
  Returns:
79
- (List[np.ndarray]): Model outputs.
89
+ (List[np.ndarray]): Model outputs with the same dtype as the input.
80
90
  """
81
91
  infer_inputs = []
82
92
  input_format = inputs[0].dtype
@@ -13,29 +13,25 @@ def run_ray_tune(
13
13
  **train_args,
14
14
  ):
15
15
  """
16
- Runs hyperparameter tuning using Ray Tune.
16
+ Run hyperparameter tuning using Ray Tune.
17
17
 
18
18
  Args:
19
19
  model (YOLO): Model to run the tuner on.
20
- space (dict, optional): The hyperparameter search space. Defaults to None.
21
- grace_period (int, optional): The grace period in epochs of the ASHA scheduler. Defaults to 10.
22
- gpu_per_trial (int, optional): The number of GPUs to allocate per trial. Defaults to None.
23
- max_samples (int, optional): The maximum number of trials to run. Defaults to 10.
24
- train_args (dict, optional): Additional arguments to pass to the `train()` method. Defaults to {}.
20
+ space (dict, optional): The hyperparameter search space.
21
+ grace_period (int, optional): The grace period in epochs of the ASHA scheduler.
22
+ gpu_per_trial (int, optional): The number of GPUs to allocate per trial.
23
+ max_samples (int, optional): The maximum number of trials to run.
24
+ **train_args (Any): Additional arguments to pass to the `train()` method.
25
25
 
26
26
  Returns:
27
27
  (dict): A dictionary containing the results of the hyperparameter search.
28
28
 
29
- Example:
30
- ```python
31
- from ultralytics import YOLO
29
+ Examples:
30
+ >>> from ultralytics import YOLO
31
+ >>> model = YOLO("yolo11n.pt") # Load a YOLO11n model
32
32
 
33
- # Load a YOLO11n model
34
- model = YOLO("yolo11n.pt")
35
-
36
- # Start tuning hyperparameters for YOLO11n training on the COCO8 dataset
37
- result_grid = model.tune(data="coco8.yaml", use_ray=True)
38
- ```
33
+ Start tuning hyperparameters for YOLO11n training on the COCO8 dataset
34
+ >>> result_grid = model.tune(data="coco8.yaml", use_ray=True)
39
35
  """
40
36
  LOGGER.info("💡 Learn about RayTune at https://docs.ultralytics.com/integrations/ray-tune")
41
37
  if train_args is None:
@@ -65,7 +61,7 @@ def run_ray_tune(
65
61
  "lr0": tune.uniform(1e-5, 1e-1),
66
62
  "lrf": tune.uniform(0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
67
63
  "momentum": tune.uniform(0.6, 0.98), # SGD momentum/Adam beta1
68
- "weight_decay": tune.uniform(0.0, 0.001), # optimizer weight decay 5e-4
64
+ "weight_decay": tune.uniform(0.0, 0.001), # optimizer weight decay
69
65
  "warmup_epochs": tune.uniform(0.0, 5.0), # warmup epochs (fractions ok)
70
66
  "warmup_momentum": tune.uniform(0.0, 0.95), # warmup initial momentum
71
67
  "box": tune.uniform(0.02, 0.2), # box loss gain
@@ -91,15 +87,7 @@ def run_ray_tune(
91
87
  model_in_store = ray.put(model)
92
88
 
93
89
  def _tune(config):
94
- """
95
- Trains the YOLO model with the specified hyperparameters and additional arguments.
96
-
97
- Args:
98
- config (dict): A dictionary of hyperparameters to use for training.
99
-
100
- Returns:
101
- None
102
- """
90
+ """Train the YOLO model with the specified hyperparameters."""
103
91
  model_to_train = ray.get(model_in_store) # get the model from ray store for tuning
104
92
  model_to_train.reset_callbacks()
105
93
  config.update(train_args)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: ultralytics
3
- Version: 8.3.88
3
+ Version: 8.3.90
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -65,7 +65,7 @@ Requires-Dist: coremltools>=7.0; (platform_system != "Windows" and python_versio
65
65
  Requires-Dist: scikit-learn>=1.3.2; (platform_system != "Windows" and python_version <= "3.11") and extra == "export"
66
66
  Requires-Dist: openvino!=2025.0.0,>=2024.0.0; extra == "export"
67
67
  Requires-Dist: tensorflow>=2.0.0; extra == "export"
68
- Requires-Dist: tensorflowjs>=3.9.0; extra == "export"
68
+ Requires-Dist: tensorflowjs>=4.0.0; extra == "export"
69
69
  Requires-Dist: tensorstore>=0.1.63; (platform_machine == "aarch64" and python_version >= "3.9") and extra == "export"
70
70
  Requires-Dist: keras; extra == "export"
71
71
  Requires-Dist: flatbuffers<100,>=23.5.26; platform_machine == "aarch64" and extra == "export"