ultralytics 8.2.81__py3-none-any.whl → 8.2.83__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

Files changed (97) hide show
  1. tests/test_solutions.py +0 -4
  2. ultralytics/__init__.py +1 -1
  3. ultralytics/cfg/__init__.py +21 -21
  4. ultralytics/data/annotator.py +1 -1
  5. ultralytics/data/augment.py +58 -58
  6. ultralytics/data/base.py +3 -3
  7. ultralytics/data/converter.py +7 -8
  8. ultralytics/data/explorer/explorer.py +7 -23
  9. ultralytics/data/loaders.py +2 -2
  10. ultralytics/data/split_dota.py +11 -3
  11. ultralytics/data/utils.py +6 -10
  12. ultralytics/engine/exporter.py +2 -4
  13. ultralytics/engine/model.py +47 -47
  14. ultralytics/engine/predictor.py +1 -1
  15. ultralytics/engine/results.py +28 -28
  16. ultralytics/engine/trainer.py +11 -8
  17. ultralytics/engine/tuner.py +7 -8
  18. ultralytics/engine/validator.py +3 -5
  19. ultralytics/hub/__init__.py +5 -5
  20. ultralytics/hub/auth.py +6 -2
  21. ultralytics/hub/session.py +3 -5
  22. ultralytics/models/fastsam/model.py +13 -10
  23. ultralytics/models/fastsam/predict.py +2 -2
  24. ultralytics/models/fastsam/utils.py +0 -1
  25. ultralytics/models/nas/model.py +4 -4
  26. ultralytics/models/nas/predict.py +1 -2
  27. ultralytics/models/nas/val.py +1 -1
  28. ultralytics/models/rtdetr/predict.py +1 -1
  29. ultralytics/models/rtdetr/train.py +1 -1
  30. ultralytics/models/rtdetr/val.py +1 -1
  31. ultralytics/models/sam/model.py +11 -11
  32. ultralytics/models/sam/modules/decoders.py +7 -4
  33. ultralytics/models/sam/modules/sam.py +9 -1
  34. ultralytics/models/sam/modules/tiny_encoder.py +1 -1
  35. ultralytics/models/sam/modules/transformer.py +0 -2
  36. ultralytics/models/sam/modules/utils.py +1 -1
  37. ultralytics/models/sam/predict.py +10 -10
  38. ultralytics/models/utils/loss.py +29 -17
  39. ultralytics/models/utils/ops.py +1 -5
  40. ultralytics/models/yolo/classify/predict.py +1 -1
  41. ultralytics/models/yolo/classify/train.py +1 -1
  42. ultralytics/models/yolo/classify/val.py +1 -1
  43. ultralytics/models/yolo/detect/predict.py +1 -1
  44. ultralytics/models/yolo/detect/train.py +1 -1
  45. ultralytics/models/yolo/detect/val.py +1 -1
  46. ultralytics/models/yolo/model.py +6 -2
  47. ultralytics/models/yolo/obb/predict.py +1 -1
  48. ultralytics/models/yolo/obb/train.py +1 -1
  49. ultralytics/models/yolo/obb/val.py +2 -2
  50. ultralytics/models/yolo/pose/predict.py +1 -1
  51. ultralytics/models/yolo/pose/train.py +1 -1
  52. ultralytics/models/yolo/pose/val.py +1 -1
  53. ultralytics/models/yolo/segment/predict.py +1 -1
  54. ultralytics/models/yolo/segment/train.py +1 -1
  55. ultralytics/models/yolo/segment/val.py +1 -1
  56. ultralytics/models/yolo/world/train.py +1 -1
  57. ultralytics/nn/autobackend.py +2 -2
  58. ultralytics/nn/modules/__init__.py +2 -2
  59. ultralytics/nn/modules/block.py +8 -20
  60. ultralytics/nn/modules/conv.py +1 -3
  61. ultralytics/nn/modules/head.py +16 -31
  62. ultralytics/nn/modules/transformer.py +0 -1
  63. ultralytics/nn/modules/utils.py +0 -1
  64. ultralytics/nn/tasks.py +11 -9
  65. ultralytics/solutions/__init__.py +1 -0
  66. ultralytics/solutions/ai_gym.py +0 -2
  67. ultralytics/solutions/analytics.py +1 -6
  68. ultralytics/solutions/heatmap.py +0 -1
  69. ultralytics/solutions/object_counter.py +0 -2
  70. ultralytics/solutions/queue_management.py +0 -2
  71. ultralytics/trackers/basetrack.py +1 -1
  72. ultralytics/trackers/byte_tracker.py +2 -2
  73. ultralytics/trackers/utils/gmc.py +5 -5
  74. ultralytics/trackers/utils/kalman_filter.py +1 -1
  75. ultralytics/trackers/utils/matching.py +1 -5
  76. ultralytics/utils/__init__.py +137 -24
  77. ultralytics/utils/autobatch.py +7 -4
  78. ultralytics/utils/benchmarks.py +6 -14
  79. ultralytics/utils/callbacks/base.py +0 -1
  80. ultralytics/utils/callbacks/comet.py +0 -1
  81. ultralytics/utils/callbacks/tensorboard.py +0 -1
  82. ultralytics/utils/checks.py +15 -18
  83. ultralytics/utils/downloads.py +6 -7
  84. ultralytics/utils/files.py +3 -4
  85. ultralytics/utils/instance.py +17 -7
  86. ultralytics/utils/metrics.py +16 -16
  87. ultralytics/utils/ops.py +8 -8
  88. ultralytics/utils/plotting.py +25 -35
  89. ultralytics/utils/tal.py +27 -18
  90. ultralytics/utils/torch_utils.py +12 -13
  91. ultralytics/utils/tuner.py +2 -3
  92. {ultralytics-8.2.81.dist-info → ultralytics-8.2.83.dist-info}/METADATA +4 -3
  93. {ultralytics-8.2.81.dist-info → ultralytics-8.2.83.dist-info}/RECORD +97 -97
  94. {ultralytics-8.2.81.dist-info → ultralytics-8.2.83.dist-info}/WHEEL +1 -1
  95. {ultralytics-8.2.81.dist-info → ultralytics-8.2.83.dist-info}/LICENSE +0 -0
  96. {ultralytics-8.2.81.dist-info → ultralytics-8.2.83.dist-info}/entry_points.txt +0 -0
  97. {ultralytics-8.2.81.dist-info → ultralytics-8.2.83.dist-info}/top_level.txt +0 -0
@@ -143,7 +143,7 @@ class BaseTensor(SimpleClass):
143
143
 
144
144
  Examples:
145
145
  >>> base_tensor = BaseTensor(torch.randn(3, 4), orig_shape=(480, 640))
146
- >>> cuda_tensor = base_tensor.to('cuda')
146
+ >>> cuda_tensor = base_tensor.to("cuda")
147
147
  >>> float16_tensor = base_tensor.to(dtype=torch.float16)
148
148
  """
149
149
  return self.__class__(torch.as_tensor(self.data).to(*args, **kwargs), self.orig_shape)
@@ -223,7 +223,7 @@ class Results(SimpleClass):
223
223
  >>> for result in results:
224
224
  ... print(result.boxes) # Print detection boxes
225
225
  ... result.show() # Display the annotated image
226
- ... result.save(filename='result.jpg') # Save annotated image
226
+ ... result.save(filename="result.jpg") # Save annotated image
227
227
  """
228
228
 
229
229
  def __init__(
@@ -280,7 +280,7 @@ class Results(SimpleClass):
280
280
  (Results): A new Results object containing the specified subset of inference results.
281
281
 
282
282
  Examples:
283
- >>> results = model('path/to/image.jpg') # Perform inference
283
+ >>> results = model("path/to/image.jpg") # Perform inference
284
284
  >>> single_result = results[0] # Get the first result
285
285
  >>> subset_results = results[1:4] # Get a slice of results
286
286
  """
@@ -319,7 +319,7 @@ class Results(SimpleClass):
319
319
  obb (torch.Tensor | None): A tensor of shape (N, 5) containing oriented bounding box coordinates.
320
320
 
321
321
  Examples:
322
- >>> results = model('image.jpg')
322
+ >>> results = model("image.jpg")
323
323
  >>> new_boxes = torch.tensor([[100, 100, 200, 200, 0.9, 0]])
324
324
  >>> results[0].update(boxes=new_boxes)
325
325
  """
@@ -370,7 +370,7 @@ class Results(SimpleClass):
370
370
  (Results): A new Results object with all tensor attributes on CPU memory.
371
371
 
372
372
  Examples:
373
- >>> results = model('path/to/image.jpg') # Perform inference
373
+ >>> results = model("path/to/image.jpg") # Perform inference
374
374
  >>> cpu_result = results[0].cpu() # Move the first result to CPU
375
375
  >>> print(cpu_result.boxes.device) # Output: cpu
376
376
  """
@@ -384,7 +384,7 @@ class Results(SimpleClass):
384
384
  (Results): A new Results object with all tensors converted to numpy arrays.
385
385
 
386
386
  Examples:
387
- >>> results = model('path/to/image.jpg')
387
+ >>> results = model("path/to/image.jpg")
388
388
  >>> numpy_result = results[0].numpy()
389
389
  >>> type(numpy_result.boxes.data)
390
390
  <class 'numpy.ndarray'>
@@ -488,7 +488,7 @@ class Results(SimpleClass):
488
488
  (np.ndarray): Annotated image as a numpy array.
489
489
 
490
490
  Examples:
491
- >>> results = model('image.jpg')
491
+ >>> results = model("image.jpg")
492
492
  >>> for result in results:
493
493
  ... im = result.plot()
494
494
  ... im.show()
@@ -578,7 +578,7 @@ class Results(SimpleClass):
578
578
  **kwargs (Any): Arbitrary keyword arguments to be passed to the `plot()` method.
579
579
 
580
580
  Examples:
581
- >>> results = model('path/to/image.jpg')
581
+ >>> results = model("path/to/image.jpg")
582
582
  >>> results[0].show() # Display the first result
583
583
  >>> for result in results:
584
584
  ... result.show() # Display all results
@@ -599,12 +599,12 @@ class Results(SimpleClass):
599
599
  **kwargs (Any): Arbitrary keyword arguments to be passed to the `plot` method.
600
600
 
601
601
  Examples:
602
- >>> results = model('path/to/image.jpg')
602
+ >>> results = model("path/to/image.jpg")
603
603
  >>> for result in results:
604
- ... result.save('annotated_image.jpg')
604
+ ... result.save("annotated_image.jpg")
605
605
  >>> # Or with custom plot arguments
606
606
  >>> for result in results:
607
- ... result.save('annotated_image.jpg', conf=False, line_width=2)
607
+ ... result.save("annotated_image.jpg", conf=False, line_width=2)
608
608
  """
609
609
  if not filename:
610
610
  filename = f"results_{Path(self.path).name}"
@@ -623,7 +623,7 @@ class Results(SimpleClass):
623
623
  number of detections per class. For classification tasks, it includes the top 5 class probabilities.
624
624
 
625
625
  Examples:
626
- >>> results = model('path/to/image.jpg')
626
+ >>> results = model("path/to/image.jpg")
627
627
  >>> for result in results:
628
628
  ... print(result.verbose())
629
629
  2 persons, 1 car, 3 traffic lights,
@@ -660,7 +660,7 @@ class Results(SimpleClass):
660
660
 
661
661
  Examples:
662
662
  >>> from ultralytics import YOLO
663
- >>> model = YOLO('yolov8n.pt')
663
+ >>> model = YOLO("yolov8n.pt")
664
664
  >>> results = model("path/to/image.jpg")
665
665
  >>> for result in results:
666
666
  ... result.save_txt("output.txt")
@@ -757,7 +757,7 @@ class Results(SimpleClass):
757
757
  task type (classification or detection) and available information (boxes, masks, keypoints).
758
758
 
759
759
  Examples:
760
- >>> results = model('image.jpg')
760
+ >>> results = model("image.jpg")
761
761
  >>> summary = results[0].summary()
762
762
  >>> print(summary)
763
763
  """
@@ -919,7 +919,7 @@ class Boxes(BaseTensor):
919
919
  coordinates in [x1, y1, x2, y2] format, where n is the number of boxes.
920
920
 
921
921
  Examples:
922
- >>> results = model('image.jpg')
922
+ >>> results = model("image.jpg")
923
923
  >>> boxes = results[0].boxes
924
924
  >>> xyxy = boxes.xyxy
925
925
  >>> print(xyxy)
@@ -953,7 +953,7 @@ class Boxes(BaseTensor):
953
953
  The shape is (N,), where N is the number of boxes.
954
954
 
955
955
  Examples:
956
- >>> results = model('image.jpg')
956
+ >>> results = model("image.jpg")
957
957
  >>> boxes = results[0].boxes
958
958
  >>> class_ids = boxes.cls
959
959
  >>> print(class_ids) # tensor([0., 2., 1.])
@@ -970,7 +970,7 @@ class Boxes(BaseTensor):
970
970
  otherwise None. Shape is (N,) where N is the number of boxes.
971
971
 
972
972
  Examples:
973
- >>> results = model.track('path/to/video.mp4')
973
+ >>> results = model.track("path/to/video.mp4")
974
974
  >>> for result in results:
975
975
  ... boxes = result.boxes
976
976
  ... if boxes.is_track:
@@ -1116,7 +1116,7 @@ class Masks(BaseTensor):
1116
1116
  mask contour.
1117
1117
 
1118
1118
  Examples:
1119
- >>> results = model('image.jpg')
1119
+ >>> results = model("image.jpg")
1120
1120
  >>> masks = results[0].masks
1121
1121
  >>> normalized_coords = masks.xyn
1122
1122
  >>> print(normalized_coords[0]) # Normalized coordinates of the first mask
@@ -1141,7 +1141,7 @@ class Masks(BaseTensor):
1141
1141
  number of points in the segment.
1142
1142
 
1143
1143
  Examples:
1144
- >>> results = model('image.jpg')
1144
+ >>> results = model("image.jpg")
1145
1145
  >>> masks = results[0].masks
1146
1146
  >>> xy_coords = masks.xy
1147
1147
  >>> print(len(xy_coords)) # Number of masks
@@ -1223,7 +1223,7 @@ class Keypoints(BaseTensor):
1223
1223
  the number of detections and K is the number of keypoints per detection.
1224
1224
 
1225
1225
  Examples:
1226
- >>> results = model('image.jpg')
1226
+ >>> results = model("image.jpg")
1227
1227
  >>> keypoints = results[0].keypoints
1228
1228
  >>> xy = keypoints.xy
1229
1229
  >>> print(xy.shape) # (N, K, 2)
@@ -1388,7 +1388,7 @@ class Probs(BaseTensor):
1388
1388
  (torch.Tensor | numpy.ndarray): A tensor containing the confidence score of the top 1 class.
1389
1389
 
1390
1390
  Examples:
1391
- >>> results = model('image.jpg') # classify an image
1391
+ >>> results = model("image.jpg") # classify an image
1392
1392
  >>> probs = results[0].probs # get classification probabilities
1393
1393
  >>> top1_confidence = probs.top1conf # get confidence of top 1 class
1394
1394
  >>> print(f"Top 1 class confidence: {top1_confidence.item():.4f}")
@@ -1410,7 +1410,7 @@ class Probs(BaseTensor):
1410
1410
  top 5 predicted classes, sorted in descending order of probability.
1411
1411
 
1412
1412
  Examples:
1413
- >>> results = model('image.jpg')
1413
+ >>> results = model("image.jpg")
1414
1414
  >>> probs = results[0].probs
1415
1415
  >>> top5_conf = probs.top5conf
1416
1416
  >>> print(top5_conf) # Prints confidence scores for top 5 classes
@@ -1497,7 +1497,7 @@ class OBB(BaseTensor):
1497
1497
  [x_center, y_center, width, height, rotation]. The shape is (N, 5) where N is the number of boxes.
1498
1498
 
1499
1499
  Examples:
1500
- >>> results = model('image.jpg')
1500
+ >>> results = model("image.jpg")
1501
1501
  >>> obb = results[0].obb
1502
1502
  >>> xywhr = obb.xywhr
1503
1503
  >>> print(xywhr.shape)
@@ -1518,7 +1518,7 @@ class OBB(BaseTensor):
1518
1518
  for N detections, where each score is in the range [0, 1].
1519
1519
 
1520
1520
  Examples:
1521
- >>> results = model('image.jpg')
1521
+ >>> results = model("image.jpg")
1522
1522
  >>> obb_result = results[0].obb
1523
1523
  >>> confidence_scores = obb_result.conf
1524
1524
  >>> print(confidence_scores)
@@ -1535,7 +1535,7 @@ class OBB(BaseTensor):
1535
1535
  bounding box. The shape is (N,), where N is the number of boxes.
1536
1536
 
1537
1537
  Examples:
1538
- >>> results = model('image.jpg')
1538
+ >>> results = model("image.jpg")
1539
1539
  >>> result = results[0]
1540
1540
  >>> obb = result.obb
1541
1541
  >>> class_values = obb.cls
@@ -1553,7 +1553,7 @@ class OBB(BaseTensor):
1553
1553
  oriented bounding box. Returns None if tracking IDs are not available.
1554
1554
 
1555
1555
  Examples:
1556
- >>> results = model('image.jpg', tracker=True) # Run inference with tracking
1556
+ >>> results = model("image.jpg", tracker=True) # Run inference with tracking
1557
1557
  >>> for result in results:
1558
1558
  ... if result.obb is not None:
1559
1559
  ... track_ids = result.obb.id
@@ -1620,8 +1620,8 @@ class OBB(BaseTensor):
1620
1620
  Examples:
1621
1621
  >>> import torch
1622
1622
  >>> from ultralytics import YOLO
1623
- >>> model = YOLO('yolov8n-obb.pt')
1624
- >>> results = model('path/to/image.jpg')
1623
+ >>> model = YOLO("yolov8n-obb.pt")
1624
+ >>> results = model("path/to/image.jpg")
1625
1625
  >>> for result in results:
1626
1626
  ... obb = result.obb
1627
1627
  ... if obb is not None:
@@ -56,8 +56,6 @@ from ultralytics.utils.torch_utils import (
56
56
 
57
57
  class BaseTrainer:
58
58
  """
59
- BaseTrainer.
60
-
61
59
  A base class for creating trainers.
62
60
 
63
61
  Attributes:
@@ -230,7 +228,6 @@ class BaseTrainer:
230
228
 
231
229
  def _setup_train(self, world_size):
232
230
  """Builds dataloaders and optimizer on correct rank process."""
233
-
234
231
  # Model
235
232
  self.run_callbacks("on_pretrain_routine_start")
236
233
  ckpt = self.setup_model()
@@ -478,12 +475,16 @@ class BaseTrainer:
478
475
  torch.cuda.empty_cache()
479
476
  self.run_callbacks("teardown")
480
477
 
478
+ def read_results_csv(self):
479
+ """Read results.csv into a dict using pandas."""
480
+ import pandas as pd # scope for faster 'import ultralytics'
481
+
482
+ return {k.strip(): v for k, v in pd.read_csv(self.csv).to_dict(orient="list").items()}
483
+
481
484
  def save_model(self):
482
485
  """Save model training checkpoints with additional metadata."""
483
486
  import io
484
487
 
485
- import pandas as pd # scope for faster 'import ultralytics'
486
-
487
488
  # Serialize ckpt to a byte buffer once (faster than repeated torch.save() calls)
488
489
  buffer = io.BytesIO()
489
490
  torch.save(
@@ -496,7 +497,7 @@ class BaseTrainer:
496
497
  "optimizer": convert_optimizer_state_dict_to_fp16(deepcopy(self.optimizer.state_dict())),
497
498
  "train_args": vars(self.args), # save as dict
498
499
  "train_metrics": {**self.metrics, **{"fitness": self.fitness}},
499
- "train_results": {k.strip(): v for k, v in pd.read_csv(self.csv).to_dict(orient="list").items()},
500
+ "train_results": self.read_results_csv(),
500
501
  "date": datetime.now().isoformat(),
501
502
  "version": __version__,
502
503
  "license": "AGPL-3.0 (https://ultralytics.com/license)",
@@ -636,7 +637,7 @@ class BaseTrainer:
636
637
  pass
637
638
 
638
639
  def on_plot(self, name, data=None):
639
- """Registers plots (e.g. to be consumed in callbacks)"""
640
+ """Registers plots (e.g. to be consumed in callbacks)."""
640
641
  path = Path(name)
641
642
  self.plots[path] = {"data": data, "timestamp": time.time()}
642
643
 
@@ -646,6 +647,9 @@ class BaseTrainer:
646
647
  if f.exists():
647
648
  strip_optimizer(f) # strip optimizers
648
649
  if f is self.best:
650
+ if self.last.is_file(): # update best.pt train_metrics from last.pt
651
+ k = "train_results"
652
+ torch.save({**torch.load(self.best), **{k: torch.load(self.last)[k]}}, self.best)
649
653
  LOGGER.info(f"\nValidating {f}...")
650
654
  self.validator.args.plots = self.args.plots
651
655
  self.metrics = self.validator(model=f)
@@ -732,7 +736,6 @@ class BaseTrainer:
732
736
  Returns:
733
737
  (torch.optim.Optimizer): The constructed optimizer.
734
738
  """
735
-
736
739
  g = [], [], [] # optimizer parameter groups
737
740
  bn = tuple(v for k, v in nn.__dict__.items() if "Norm" in k) # normalization layers, i.e. BatchNorm2d()
738
741
  if name == "auto":
@@ -1,7 +1,7 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
  """
3
- This module provides functionalities for hyperparameter tuning of the Ultralytics YOLO models for object detection,
4
- instance segmentation, image classification, pose estimation, and multi-object tracking.
3
+ Module provides functionalities for hyperparameter tuning of the Ultralytics YOLO models for object detection, instance
4
+ segmentation, image classification, pose estimation, and multi-object tracking.
5
5
 
6
6
  Hyperparameter tuning is the process of systematically searching for the optimal set of hyperparameters
7
7
  that yield the best model performance. This is particularly crucial in deep learning models like YOLO,
@@ -12,8 +12,8 @@ Example:
12
12
  ```python
13
13
  from ultralytics import YOLO
14
14
 
15
- model = YOLO('yolov8n.pt')
16
- model.tune(data='coco8.yaml', epochs=10, iterations=300, optimizer='AdamW', plots=False, save=False, val=False)
15
+ model = YOLO("yolov8n.pt")
16
+ model.tune(data="coco8.yaml", epochs=10, iterations=300, optimizer="AdamW", plots=False, save=False, val=False)
17
17
  ```
18
18
  """
19
19
 
@@ -54,15 +54,15 @@ class Tuner:
54
54
  ```python
55
55
  from ultralytics import YOLO
56
56
 
57
- model = YOLO('yolov8n.pt')
58
- model.tune(data='coco8.yaml', epochs=10, iterations=300, optimizer='AdamW', plots=False, save=False, val=False)
57
+ model = YOLO("yolov8n.pt")
58
+ model.tune(data="coco8.yaml", epochs=10, iterations=300, optimizer="AdamW", plots=False, save=False, val=False)
59
59
  ```
60
60
 
61
61
  Tune with custom search space.
62
62
  ```python
63
63
  from ultralytics import YOLO
64
64
 
65
- model = YOLO('yolov8n.pt')
65
+ model = YOLO("yolov8n.pt")
66
66
  model.tune(space={key1: val1, key2: val2}) # custom search space dictionary
67
67
  ```
68
68
  """
@@ -176,7 +176,6 @@ class Tuner:
176
176
  The method utilizes the `self.tune_csv` Path object to read and log hyperparameters and fitness scores.
177
177
  Ensure this path is set correctly in the Tuner instance.
178
178
  """
179
-
180
179
  t0 = time.time()
181
180
  best_save_dir, best_metrics = None, None
182
181
  (self.tune_dir / "weights").mkdir(parents=True, exist_ok=True)
@@ -104,9 +104,7 @@ class BaseValidator:
104
104
 
105
105
  @smart_inference_mode()
106
106
  def __call__(self, trainer=None, model=None):
107
- """Supports validation of a pre-trained model if passed or a model being trained if trainer is passed (trainer
108
- gets priority).
109
- """
107
+ """Executes validation process, running inference on dataloader and computing performance metrics."""
110
108
  self.training = trainer is not None
111
109
  augment = self.args.augment and (not self.training)
112
110
  if self.training:
@@ -280,7 +278,7 @@ class BaseValidator:
280
278
  return batch
281
279
 
282
280
  def postprocess(self, preds):
283
- """Describes and summarizes the purpose of 'postprocess()' but no details mentioned."""
281
+ """Preprocesses the predictions."""
284
282
  return preds
285
283
 
286
284
  def init_metrics(self, model):
@@ -317,7 +315,7 @@ class BaseValidator:
317
315
  return []
318
316
 
319
317
  def on_plot(self, name, data=None):
320
- """Registers plots (e.g. to be consumed in callbacks)"""
318
+ """Registers plots (e.g. to be consumed in callbacks)."""
321
319
  self.plots[Path(name)] = {"data": data, "timestamp": time.time()}
322
320
 
323
321
  # TODO: may need to put these following functions into callback
@@ -136,11 +136,11 @@ def check_dataset(path: str, task: str) -> None:
136
136
  ```python
137
137
  from ultralytics.hub import check_dataset
138
138
 
139
- check_dataset('path/to/coco8.zip', task='detect') # detect dataset
140
- check_dataset('path/to/coco8-seg.zip', task='segment') # segment dataset
141
- check_dataset('path/to/coco8-pose.zip', task='pose') # pose dataset
142
- check_dataset('path/to/dota8.zip', task='obb') # OBB dataset
143
- check_dataset('path/to/imagenet10.zip', task='classify') # classification dataset
139
+ check_dataset("path/to/coco8.zip", task="detect") # detect dataset
140
+ check_dataset("path/to/coco8-seg.zip", task="segment") # segment dataset
141
+ check_dataset("path/to/coco8-pose.zip", task="pose") # pose dataset
142
+ check_dataset("path/to/dota8.zip", task="obb") # OBB dataset
143
+ check_dataset("path/to/imagenet10.zip", task="classify") # classification dataset
144
144
  ```
145
145
  """
146
146
  HUBDatasetStats(path=path, task=task).get_json()
ultralytics/hub/auth.py CHANGED
@@ -27,10 +27,14 @@ class Auth:
27
27
 
28
28
  def __init__(self, api_key="", verbose=False):
29
29
  """
30
- Initialize the Auth class with an optional API key.
30
+ Initialize Auth class and authenticate user.
31
+
32
+ Handles API key validation, Google Colab authentication, and new key requests. Updates SETTINGS upon successful
33
+ authentication.
31
34
 
32
35
  Args:
33
- api_key (str, optional): May be an API key or a combination API key and model ID, i.e. key_id
36
+ api_key (str): API key or combined key_id format.
37
+ verbose (bool): Enable verbose logging.
34
38
  """
35
39
  # Split the input API key in case it contains a combined key_model and keep only the API key part
36
40
  api_key = api_key.split("_")[0]
@@ -159,7 +159,6 @@ class HUBTrainingSession:
159
159
  Raises:
160
160
  HUBModelError: If the identifier format is not recognized.
161
161
  """
162
-
163
162
  # Initialize variables
164
163
  api_key, model_id, filename = None, None, None
165
164
 
@@ -200,7 +199,6 @@ class HUBTrainingSession:
200
199
  ValueError: If the model is already trained, if required dataset information is missing, or if there are
201
200
  issues with the provided training arguments.
202
201
  """
203
-
204
202
  if self.model.is_resumable():
205
203
  # Model has saved weights
206
204
  self.train_args = {"data": self.model.get_dataset_url(), "resume": True}
@@ -276,7 +274,7 @@ class HUBTrainingSession:
276
274
 
277
275
  # if request related to metrics upload and exceed retries
278
276
  if response is None and kwargs.get("metrics"):
279
- self.metrics_upload_failed_queue.update(kwargs.get("metrics", None))
277
+ self.metrics_upload_failed_queue.update(kwargs.get("metrics"))
280
278
 
281
279
  return response
282
280
 
@@ -350,10 +348,10 @@ class HUBTrainingSession:
350
348
  last = weights.with_name("last" + weights.suffix)
351
349
  if final and last.is_file():
352
350
  LOGGER.warning(
353
- f"{PREFIX} ARNING ⚠️ Model 'best.pt' not found, copying 'last.pt' to 'best.pt' and uploading. "
351
+ f"{PREFIX} WARNING ⚠️ Model 'best.pt' not found, copying 'last.pt' to 'best.pt' and uploading. "
354
352
  "This often happens when resuming training in transient environments like Google Colab. "
355
353
  "For more reliable training, consider using Ultralytics HUB Cloud. "
356
- "Learn more at https://docs.ultralytics.com/hub/cloud-training/."
354
+ "Learn more at https://docs.ultralytics.com/hub/cloud-training."
357
355
  )
358
356
  shutil.copy(last, weights) # copy last.pt to best.pt
359
357
  else:
@@ -16,8 +16,8 @@ class FastSAM(Model):
16
16
  ```python
17
17
  from ultralytics import FastSAM
18
18
 
19
- model = FastSAM('last.pt')
20
- results = model.predict('ultralytics/assets/bus.jpg')
19
+ model = FastSAM("last.pt")
20
+ results = model.predict("ultralytics/assets/bus.jpg")
21
21
  ```
22
22
  """
23
23
 
@@ -30,18 +30,21 @@ class FastSAM(Model):
30
30
 
31
31
  def predict(self, source, stream=False, bboxes=None, points=None, labels=None, texts=None, **kwargs):
32
32
  """
33
- Performs segmentation prediction on the given image or video source.
33
+ Perform segmentation prediction on image or video source.
34
+
35
+ Supports prompted segmentation with bounding boxes, points, labels, and texts.
34
36
 
35
37
  Args:
36
- source (str): Path to the image or video file, or a PIL.Image object, or a numpy.ndarray object.
37
- stream (bool, optional): If True, enables real-time streaming. Defaults to False.
38
- bboxes (list, optional): List of bounding box coordinates for prompted segmentation. Defaults to None.
39
- points (list, optional): List of points for prompted segmentation. Defaults to None.
40
- labels (list, optional): List of labels for prompted segmentation. Defaults to None.
41
- texts (list, optional): List of texts for prompted segmentation. Defaults to None.
38
+ source (str | PIL.Image | numpy.ndarray): Input source.
39
+ stream (bool): Enable real-time streaming.
40
+ bboxes (list): Bounding box coordinates for prompted segmentation.
41
+ points (list): Points for prompted segmentation.
42
+ labels (list): Labels for prompted segmentation.
43
+ texts (list): Texts for prompted segmentation.
44
+ **kwargs (Any): Additional keyword arguments.
42
45
 
43
46
  Returns:
44
- (list): The model predictions.
47
+ (list): Model predictions.
45
48
  """
46
49
  prompts = dict(bboxes=bboxes, points=points, labels=labels, texts=texts)
47
50
  return super().predict(source, stream, prompts=prompts, **kwargs)
@@ -92,8 +92,8 @@ class FastSAMPredictor(SegmentationPredictor):
92
92
  if labels.sum() == 0 # all negative points
93
93
  else torch.zeros(len(result), dtype=torch.bool, device=self.device)
94
94
  )
95
- for p, l in zip(points, labels):
96
- point_idx[torch.nonzero(masks[:, p[1], p[0]], as_tuple=True)[0]] = True if l else False
95
+ for point, label in zip(points, labels):
96
+ point_idx[torch.nonzero(masks[:, point[1], point[0]], as_tuple=True)[0]] = True if label else False
97
97
  idx |= point_idx
98
98
  if texts is not None:
99
99
  if isinstance(texts, str):
@@ -13,7 +13,6 @@ def adjust_bboxes_to_image_border(boxes, image_shape, threshold=20):
13
13
  Returns:
14
14
  adjusted_boxes (torch.Tensor): adjusted bounding boxes
15
15
  """
16
-
17
16
  # Image dimensions
18
17
  h, w = image_shape
19
18
 
@@ -6,8 +6,8 @@ Example:
6
6
  ```python
7
7
  from ultralytics import NAS
8
8
 
9
- model = NAS('yolo_nas_s')
10
- results = model.predict('ultralytics/assets/bus.jpg')
9
+ model = NAS("yolo_nas_s")
10
+ results = model.predict("ultralytics/assets/bus.jpg")
11
11
  ```
12
12
  """
13
13
 
@@ -34,8 +34,8 @@ class NAS(Model):
34
34
  ```python
35
35
  from ultralytics import NAS
36
36
 
37
- model = NAS('yolo_nas_s')
38
- results = model.predict('ultralytics/assets/bus.jpg')
37
+ model = NAS("yolo_nas_s")
38
+ results = model.predict("ultralytics/assets/bus.jpg")
39
39
  ```
40
40
 
41
41
  Attributes:
@@ -22,7 +22,7 @@ class NASPredictor(BasePredictor):
22
22
  ```python
23
23
  from ultralytics import NAS
24
24
 
25
- model = NAS('yolo_nas_s')
25
+ model = NAS("yolo_nas_s")
26
26
  predictor = model.predictor
27
27
  # Assumes that raw_preds, img, orig_imgs are available
28
28
  results = predictor.postprocess(raw_preds, img, orig_imgs)
@@ -34,7 +34,6 @@ class NASPredictor(BasePredictor):
34
34
 
35
35
  def postprocess(self, preds_in, img, orig_imgs):
36
36
  """Postprocess predictions and returns a list of Results objects."""
37
-
38
37
  # Cat boxes and class scores
39
38
  boxes = ops.xyxy2xywh(preds_in[0][0])
40
39
  preds = torch.cat((boxes, preds_in[0][1]), -1).permute(0, 2, 1)
@@ -24,7 +24,7 @@ class NASValidator(DetectionValidator):
24
24
  ```python
25
25
  from ultralytics import NAS
26
26
 
27
- model = NAS('yolo_nas_s')
27
+ model = NAS("yolo_nas_s")
28
28
  validator = model.validator
29
29
  # Assumes that raw_preds are available
30
30
  final_preds = validator.postprocess(raw_preds)
@@ -21,7 +21,7 @@ class RTDETRPredictor(BasePredictor):
21
21
  from ultralytics.utils import ASSETS
22
22
  from ultralytics.models.rtdetr import RTDETRPredictor
23
23
 
24
- args = dict(model='rtdetr-l.pt', source=ASSETS)
24
+ args = dict(model="rtdetr-l.pt", source=ASSETS)
25
25
  predictor = RTDETRPredictor(overrides=args)
26
26
  predictor.predict_cli()
27
27
  ```
@@ -25,7 +25,7 @@ class RTDETRTrainer(DetectionTrainer):
25
25
  ```python
26
26
  from ultralytics.models.rtdetr.train import RTDETRTrainer
27
27
 
28
- args = dict(model='rtdetr-l.yaml', data='coco8.yaml', imgsz=640, epochs=3)
28
+ args = dict(model="rtdetr-l.yaml", data="coco8.yaml", imgsz=640, epochs=3)
29
29
  trainer = RTDETRTrainer(overrides=args)
30
30
  trainer.train()
31
31
  ```
@@ -62,7 +62,7 @@ class RTDETRValidator(DetectionValidator):
62
62
  ```python
63
63
  from ultralytics.models.rtdetr import RTDETRValidator
64
64
 
65
- args = dict(model='rtdetr-l.pt', data='coco8.yaml')
65
+ args = dict(model="rtdetr-l.pt", data="coco8.yaml")
66
66
  validator = RTDETRValidator(args=args)
67
67
  validator()
68
68
  ```