ultralytics 8.0.194__py3-none-any.whl → 8.0.196__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

Files changed (84) hide show
  1. ultralytics/__init__.py +1 -1
  2. ultralytics/cfg/__init__.py +5 -6
  3. ultralytics/data/augment.py +234 -29
  4. ultralytics/data/base.py +2 -1
  5. ultralytics/data/build.py +9 -3
  6. ultralytics/data/converter.py +5 -2
  7. ultralytics/data/dataset.py +16 -2
  8. ultralytics/data/loaders.py +111 -7
  9. ultralytics/data/utils.py +3 -3
  10. ultralytics/engine/exporter.py +1 -3
  11. ultralytics/engine/model.py +16 -9
  12. ultralytics/engine/predictor.py +10 -6
  13. ultralytics/engine/results.py +18 -8
  14. ultralytics/engine/trainer.py +19 -31
  15. ultralytics/engine/tuner.py +20 -20
  16. ultralytics/engine/validator.py +3 -4
  17. ultralytics/hub/__init__.py +2 -2
  18. ultralytics/hub/auth.py +18 -3
  19. ultralytics/hub/session.py +1 -0
  20. ultralytics/hub/utils.py +1 -3
  21. ultralytics/models/fastsam/model.py +2 -1
  22. ultralytics/models/fastsam/predict.py +10 -7
  23. ultralytics/models/fastsam/prompt.py +15 -1
  24. ultralytics/models/nas/model.py +3 -1
  25. ultralytics/models/rtdetr/model.py +4 -6
  26. ultralytics/models/rtdetr/predict.py +2 -1
  27. ultralytics/models/rtdetr/train.py +2 -1
  28. ultralytics/models/rtdetr/val.py +1 -0
  29. ultralytics/models/sam/amg.py +12 -6
  30. ultralytics/models/sam/model.py +5 -6
  31. ultralytics/models/sam/modules/decoders.py +5 -1
  32. ultralytics/models/sam/modules/encoders.py +15 -12
  33. ultralytics/models/sam/modules/tiny_encoder.py +38 -2
  34. ultralytics/models/sam/modules/transformer.py +2 -4
  35. ultralytics/models/sam/predict.py +8 -4
  36. ultralytics/models/utils/loss.py +35 -8
  37. ultralytics/models/utils/ops.py +14 -18
  38. ultralytics/models/yolo/classify/predict.py +1 -0
  39. ultralytics/models/yolo/classify/train.py +4 -2
  40. ultralytics/models/yolo/classify/val.py +1 -0
  41. ultralytics/models/yolo/detect/train.py +4 -3
  42. ultralytics/models/yolo/model.py +2 -4
  43. ultralytics/models/yolo/pose/predict.py +1 -0
  44. ultralytics/models/yolo/segment/predict.py +2 -0
  45. ultralytics/models/yolo/segment/val.py +1 -1
  46. ultralytics/nn/autobackend.py +54 -43
  47. ultralytics/nn/modules/__init__.py +13 -9
  48. ultralytics/nn/modules/block.py +11 -5
  49. ultralytics/nn/modules/conv.py +16 -7
  50. ultralytics/nn/modules/head.py +6 -3
  51. ultralytics/nn/modules/transformer.py +47 -15
  52. ultralytics/nn/modules/utils.py +6 -4
  53. ultralytics/nn/tasks.py +61 -21
  54. ultralytics/trackers/bot_sort.py +53 -6
  55. ultralytics/trackers/byte_tracker.py +71 -15
  56. ultralytics/trackers/track.py +0 -1
  57. ultralytics/trackers/utils/gmc.py +23 -0
  58. ultralytics/trackers/utils/kalman_filter.py +6 -6
  59. ultralytics/utils/__init__.py +32 -19
  60. ultralytics/utils/autobatch.py +1 -3
  61. ultralytics/utils/benchmarks.py +14 -1
  62. ultralytics/utils/callbacks/base.py +1 -3
  63. ultralytics/utils/callbacks/comet.py +11 -3
  64. ultralytics/utils/callbacks/dvc.py +9 -0
  65. ultralytics/utils/callbacks/neptune.py +5 -6
  66. ultralytics/utils/callbacks/wb.py +1 -0
  67. ultralytics/utils/checks.py +13 -9
  68. ultralytics/utils/dist.py +2 -1
  69. ultralytics/utils/downloads.py +7 -3
  70. ultralytics/utils/files.py +3 -3
  71. ultralytics/utils/instance.py +12 -3
  72. ultralytics/utils/loss.py +97 -22
  73. ultralytics/utils/metrics.py +35 -34
  74. ultralytics/utils/ops.py +10 -9
  75. ultralytics/utils/patches.py +9 -7
  76. ultralytics/utils/plotting.py +4 -3
  77. ultralytics/utils/torch_utils.py +8 -6
  78. ultralytics/utils/triton.py +87 -0
  79. {ultralytics-8.0.194.dist-info → ultralytics-8.0.196.dist-info}/METADATA +1 -1
  80. {ultralytics-8.0.194.dist-info → ultralytics-8.0.196.dist-info}/RECORD +84 -83
  81. {ultralytics-8.0.194.dist-info → ultralytics-8.0.196.dist-info}/LICENSE +0 -0
  82. {ultralytics-8.0.194.dist-info → ultralytics-8.0.196.dist-info}/WHEEL +0 -0
  83. {ultralytics-8.0.194.dist-info → ultralytics-8.0.196.dist-info}/entry_points.txt +0 -0
  84. {ultralytics-8.0.194.dist-info → ultralytics-8.0.196.dist-info}/top_level.txt +0 -0
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = '8.0.194'
3
+ __version__ = '8.0.196'
4
4
 
5
5
  from ultralytics.models import RTDETR, SAM, YOLO
6
6
  from ultralytics.models.fastsam import FastSAM
@@ -180,8 +180,8 @@ def _handle_deprecation(custom):
180
180
 
181
181
  def check_dict_alignment(base: Dict, custom: Dict, e=None):
182
182
  """
183
- This function checks for any mismatched keys between a custom configuration list and a base configuration list.
184
- If any mismatched keys are found, the function prints out similar keys from the base list and exits the program.
183
+ This function checks for any mismatched keys between a custom configuration list and a base configuration list. If
184
+ any mismatched keys are found, the function prints out similar keys from the base list and exits the program.
185
185
 
186
186
  Args:
187
187
  custom (dict): a dictionary of custom configuration options
@@ -205,9 +205,8 @@ def check_dict_alignment(base: Dict, custom: Dict, e=None):
205
205
 
206
206
  def merge_equals_args(args: List[str]) -> List[str]:
207
207
  """
208
- Merges arguments around isolated '=' args in a list of strings.
209
- The function considers cases where the first argument ends with '=' or the second starts with '=',
210
- as well as when the middle one is an equals sign.
208
+ Merges arguments around isolated '=' args in a list of strings. The function considers cases where the first
209
+ argument ends with '=' or the second starts with '=', as well as when the middle one is an equals sign.
211
210
 
212
211
  Args:
213
212
  args (List[str]): A list of strings where each element is an argument.
@@ -343,7 +342,7 @@ def entrypoint(debug=''):
343
342
  'copy-cfg': copy_default_cfg}
344
343
  full_args_dict = {**DEFAULT_CFG_DICT, **{k: None for k in TASKS}, **{k: None for k in MODES}, **special}
345
344
 
346
- # Define common mis-uses of special commands, i.e. -h, -help, --help
345
+ # Define common misuses of special commands, i.e. -h, -help, --help
347
346
  special.update({k[0]: v for k, v in special.items()}) # singular
348
347
  special.update({k[:-1]: v for k, v in special.items() if len(k) > 1 and k.endswith('s')}) # singular
349
348
  special = {**special, **{f'-{k}': v for k, v in special.items()}, **{f'--{k}': v for k, v in special.items()}}
@@ -20,16 +20,30 @@ from .utils import polygons2masks, polygons2masks_overlap
20
20
 
21
21
  # TODO: we might need a BaseTransform to make all these augments be compatible with both classification and semantic
22
22
  class BaseTransform:
23
+ """
24
+ Base class for image transformations.
25
+
26
+ This is a generic transformation class that can be extended for specific image processing needs.
27
+ The class is designed to be compatible with both classification and semantic segmentation tasks.
28
+
29
+ Methods:
30
+ __init__: Initializes the BaseTransform object.
31
+ apply_image: Applies image transformation to labels.
32
+ apply_instances: Applies transformations to object instances in labels.
33
+ apply_semantic: Applies semantic segmentation to an image.
34
+ __call__: Applies all label transformations to an image, instances, and semantic masks.
35
+ """
23
36
 
24
37
  def __init__(self) -> None:
38
+ """Initializes the BaseTransform object."""
25
39
  pass
26
40
 
27
41
  def apply_image(self, labels):
28
- """Applies image transformation to labels."""
42
+ """Applies image transformations to labels."""
29
43
  pass
30
44
 
31
45
  def apply_instances(self, labels):
32
- """Applies transformations to input 'labels' and returns object instances."""
46
+ """Applies transformations to object instances in labels."""
33
47
  pass
34
48
 
35
49
  def apply_semantic(self, labels):
@@ -37,13 +51,14 @@ class BaseTransform:
37
51
  pass
38
52
 
39
53
  def __call__(self, labels):
40
- """Applies label transformations to an image, instances and semantic masks."""
54
+ """Applies all label transformations to an image, instances, and semantic masks."""
41
55
  self.apply_image(labels)
42
56
  self.apply_instances(labels)
43
57
  self.apply_semantic(labels)
44
58
 
45
59
 
46
60
  class Compose:
61
+ """Class for composing multiple image transformations."""
47
62
 
48
63
  def __init__(self, transforms):
49
64
  """Initializes the Compose object with a list of transforms."""
@@ -60,18 +75,23 @@ class Compose:
60
75
  self.transforms.append(transform)
61
76
 
62
77
  def tolist(self):
63
- """Converts list of transforms to a standard Python list."""
78
+ """Converts the list of transforms to a standard Python list."""
64
79
  return self.transforms
65
80
 
66
81
  def __repr__(self):
67
- """Return string representation of object."""
82
+ """Returns a string representation of the object."""
68
83
  return f"{self.__class__.__name__}({', '.join([f'{t}' for t in self.transforms])})"
69
84
 
70
85
 
71
86
  class BaseMixTransform:
72
- """This implementation is from mmyolo."""
87
+ """
88
+ Class for base mix (MixUp/Mosaic) transformations.
89
+
90
+ This implementation is from mmyolo.
91
+ """
73
92
 
74
93
  def __init__(self, dataset, pre_transform=None, p=0.0) -> None:
94
+ """Initializes the BaseMixTransform object with dataset, pre_transform, and probability."""
75
95
  self.dataset = dataset
76
96
  self.pre_transform = pre_transform
77
97
  self.p = p
@@ -262,8 +282,10 @@ class Mosaic(BaseMixTransform):
262
282
 
263
283
 
264
284
  class MixUp(BaseMixTransform):
285
+ """Class for applying MixUp augmentation to the dataset."""
265
286
 
266
287
  def __init__(self, dataset, pre_transform=None, p=0.0) -> None:
288
+ """Initializes MixUp object with dataset, pre_transform, and probability of applying MixUp."""
267
289
  super().__init__(dataset=dataset, pre_transform=pre_transform, p=p)
268
290
 
269
291
  def get_indexes(self):
@@ -271,7 +293,7 @@ class MixUp(BaseMixTransform):
271
293
  return random.randint(0, len(self.dataset) - 1)
272
294
 
273
295
  def _mix_transform(self, labels):
274
- """Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf."""
296
+ """Applies MixUp augmentation as per https://arxiv.org/pdf/1710.09412.pdf."""
275
297
  r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0
276
298
  labels2 = labels['mix_labels'][0]
277
299
  labels['img'] = (labels['img'] * r + labels2['img'] * (1 - r)).astype(np.uint8)
@@ -281,6 +303,28 @@ class MixUp(BaseMixTransform):
281
303
 
282
304
 
283
305
  class RandomPerspective:
306
+ """
307
+ Implements random perspective and affine transformations on images and corresponding bounding boxes, segments, and
308
+ keypoints. These transformations include rotation, translation, scaling, and shearing. The class also offers the
309
+ option to apply these transformations conditionally with a specified probability.
310
+
311
+ Attributes:
312
+ degrees (float): Degree range for random rotations.
313
+ translate (float): Fraction of total width and height for random translation.
314
+ scale (float): Scaling factor interval, e.g., a scale factor of 0.1 allows a resize between 90%-110%.
315
+ shear (float): Shear intensity (angle in degrees).
316
+ perspective (float): Perspective distortion factor.
317
+ border (tuple): Tuple specifying mosaic border.
318
+ pre_transform (callable): A function/transform to apply to the image before starting the random transformation.
319
+
320
+ Methods:
321
+ affine_transform(img, border): Applies a series of affine transformations to the image.
322
+ apply_bboxes(bboxes, M): Transforms bounding boxes using the calculated affine matrix.
323
+ apply_segments(segments, M): Transforms segments and generates new bounding boxes.
324
+ apply_keypoints(keypoints, M): Transforms keypoints.
325
+ __call__(labels): Main method to apply transformations to both images and their corresponding annotations.
326
+ box_candidates(box1, box2): Filters out bounding boxes that don't meet certain criteria post-transformation.
327
+ """
284
328
 
285
329
  def __init__(self,
286
330
  degrees=0.0,
@@ -290,17 +334,31 @@ class RandomPerspective:
290
334
  perspective=0.0,
291
335
  border=(0, 0),
292
336
  pre_transform=None):
337
+ """Initializes RandomPerspective object with transformation parameters."""
338
+
293
339
  self.degrees = degrees
294
340
  self.translate = translate
295
341
  self.scale = scale
296
342
  self.shear = shear
297
343
  self.perspective = perspective
298
- # Mosaic border
299
- self.border = border
344
+ self.border = border # mosaic border
300
345
  self.pre_transform = pre_transform
301
346
 
302
347
  def affine_transform(self, img, border):
303
- """Center."""
348
+ """
349
+ Applies a sequence of affine transformations centered around the image center.
350
+
351
+ Args:
352
+ img (ndarray): Input image.
353
+ border (tuple): Border dimensions.
354
+
355
+ Returns:
356
+ img (ndarray): Transformed image.
357
+ M (ndarray): Transformation matrix.
358
+ s (float): Scale factor.
359
+ """
360
+
361
+ # Center
304
362
  C = np.eye(3, dtype=np.float32)
305
363
 
306
364
  C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
@@ -462,8 +520,22 @@ class RandomPerspective:
462
520
  labels['resized_shape'] = img.shape[:2]
463
521
  return labels
464
522
 
465
- def box_candidates(self, box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
466
- # Compute box candidates: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
523
+ def box_candidates(self, box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16):
524
+ """
525
+ Compute box candidates based on a set of thresholds. This method compares the characteristics of the boxes
526
+ before and after augmentation to decide whether a box is a candidate for further processing.
527
+
528
+ Args:
529
+ box1 (numpy.ndarray): The 4,n bounding box before augmentation, represented as [x1, y1, x2, y2].
530
+ box2 (numpy.ndarray): The 4,n bounding box after augmentation, represented as [x1, y1, x2, y2].
531
+ wh_thr (float, optional): The width and height threshold in pixels. Default is 2.
532
+ ar_thr (float, optional): The aspect ratio threshold. Default is 100.
533
+ area_thr (float, optional): The area ratio threshold. Default is 0.1.
534
+ eps (float, optional): A small epsilon value to prevent division by zero. Default is 1e-16.
535
+
536
+ Returns:
537
+ (numpy.ndarray): A boolean array indicating which boxes are candidates based on the given thresholds.
538
+ """
467
539
  w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
468
540
  w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
469
541
  ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
@@ -471,14 +543,32 @@ class RandomPerspective:
471
543
 
472
544
 
473
545
  class RandomHSV:
546
+ """
547
+ This class is responsible for performing random adjustments to the Hue, Saturation, and Value (HSV) channels of an
548
+ image.
549
+
550
+ The adjustments are random but within limits set by hgain, sgain, and vgain.
551
+ """
474
552
 
475
553
  def __init__(self, hgain=0.5, sgain=0.5, vgain=0.5) -> None:
554
+ """
555
+ Initialize RandomHSV class with gains for each HSV channel.
556
+
557
+ Args:
558
+ hgain (float, optional): Maximum variation for hue. Default is 0.5.
559
+ sgain (float, optional): Maximum variation for saturation. Default is 0.5.
560
+ vgain (float, optional): Maximum variation for value. Default is 0.5.
561
+ """
476
562
  self.hgain = hgain
477
563
  self.sgain = sgain
478
564
  self.vgain = vgain
479
565
 
480
566
  def __call__(self, labels):
481
- """Applies image HSV augmentation"""
567
+ """
568
+ Applies random HSV augmentation to an image within the predefined limits.
569
+
570
+ The modified image replaces the original image in the input 'labels' dict.
571
+ """
482
572
  img = labels['img']
483
573
  if self.hgain or self.sgain or self.vgain:
484
574
  r = np.random.uniform(-1, 1, 3) * [self.hgain, self.sgain, self.vgain] + 1 # random gains
@@ -496,9 +586,22 @@ class RandomHSV:
496
586
 
497
587
 
498
588
  class RandomFlip:
499
- """Applies random horizontal or vertical flip to an image with a given probability."""
589
+ """
590
+ Applies a random horizontal or vertical flip to an image with a given probability.
591
+
592
+ Also updates any instances (bounding boxes, keypoints, etc.) accordingly.
593
+ """
500
594
 
501
595
  def __init__(self, p=0.5, direction='horizontal', flip_idx=None) -> None:
596
+ """
597
+ Initializes the RandomFlip class with probability and direction.
598
+
599
+ Args:
600
+ p (float, optional): The probability of applying the flip. Must be between 0 and 1. Default is 0.5.
601
+ direction (str, optional): The direction to apply the flip. Must be 'horizontal' or 'vertical'.
602
+ Default is 'horizontal'.
603
+ flip_idx (array-like, optional): Index mapping for flipping keypoints, if any.
604
+ """
502
605
  assert direction in ['horizontal', 'vertical'], f'Support direction `horizontal` or `vertical`, got {direction}'
503
606
  assert 0 <= p <= 1.0
504
607
 
@@ -507,7 +610,16 @@ class RandomFlip:
507
610
  self.flip_idx = flip_idx
508
611
 
509
612
  def __call__(self, labels):
510
- """Resize image and padding for detection, instance segmentation, pose."""
613
+ """
614
+ Applies random flip to an image and updates any instances like bounding boxes or keypoints accordingly.
615
+
616
+ Args:
617
+ labels (dict): A dictionary containing the keys 'img' and 'instances'. 'img' is the image to be flipped.
618
+ 'instances' is an object containing bounding boxes and optionally keypoints.
619
+
620
+ Returns:
621
+ (dict): The same dict with the flipped image and updated instances under the 'img' and 'instances' keys.
622
+ """
511
623
  img = labels['img']
512
624
  instances = labels.pop('instances')
513
625
  instances.convert_bbox(format='xywh')
@@ -599,12 +711,38 @@ class LetterBox:
599
711
 
600
712
 
601
713
  class CopyPaste:
714
+ """
715
+ Implements the Copy-Paste augmentation as described in the paper https://arxiv.org/abs/2012.07177. This class is
716
+ responsible for applying the Copy-Paste augmentation on images and their corresponding instances.
717
+ """
602
718
 
603
719
  def __init__(self, p=0.5) -> None:
720
+ """
721
+ Initializes the CopyPaste class with a given probability.
722
+
723
+ Args:
724
+ p (float, optional): The probability of applying the Copy-Paste augmentation. Must be between 0 and 1.
725
+ Default is 0.5.
726
+ """
604
727
  self.p = p
605
728
 
606
729
  def __call__(self, labels):
607
- """Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)."""
730
+ """
731
+ Applies the Copy-Paste augmentation to the given image and instances.
732
+
733
+ Args:
734
+ labels (dict): A dictionary containing:
735
+ - 'img': The image to augment.
736
+ - 'cls': Class labels associated with the instances.
737
+ - 'instances': Object containing bounding boxes, and optionally, keypoints and segments.
738
+
739
+ Returns:
740
+ (dict): Dict with augmented image and updated instances under the 'img', 'cls', and 'instances' keys.
741
+
742
+ Notes:
743
+ 1. Instances are expected to have 'segments' as one of their attributes for this augmentation to work.
744
+ 2. This method modifies the input dictionary 'labels' in place.
745
+ """
608
746
  im = labels['img']
609
747
  cls = labels['cls']
610
748
  h, w = im.shape[:2]
@@ -639,9 +777,13 @@ class CopyPaste:
639
777
 
640
778
 
641
779
  class Albumentations:
642
- """Albumentations transformations. Optional, uninstall package to disable.
643
- Applies Blur, Median Blur, convert to grayscale, Contrast Limited Adaptive Histogram Equalization,
644
- random change of brightness and contrast, RandomGamma and lowering of image quality by compression."""
780
+ """
781
+ Albumentations transformations.
782
+
783
+ Optional, uninstall package to disable. Applies Blur, Median Blur, convert to grayscale, Contrast Limited Adaptive
784
+ Histogram Equalization, random change of brightness and contrast, RandomGamma and lowering of image quality by
785
+ compression.
786
+ """
645
787
 
646
788
  def __init__(self, p=1.0):
647
789
  """Initialize the transform object for YOLO bbox formatted params."""
@@ -690,6 +832,19 @@ class Albumentations:
690
832
 
691
833
  # TODO: technically this is not an augmentation, maybe we should put this to another files
692
834
  class Format:
835
+ """
836
+ Formats image annotations for object detection, instance segmentation, and pose estimation tasks. The class
837
+ standardizes the image and instance annotations to be used by the `collate_fn` in PyTorch DataLoader.
838
+
839
+ Attributes:
840
+ bbox_format (str): Format for bounding boxes. Default is 'xywh'.
841
+ normalize (bool): Whether to normalize bounding boxes. Default is True.
842
+ return_mask (bool): Return instance masks for segmentation. Default is False.
843
+ return_keypoint (bool): Return keypoints for pose estimation. Default is False.
844
+ mask_ratio (int): Downsample ratio for masks. Default is 4.
845
+ mask_overlap (bool): Whether to overlap masks. Default is True.
846
+ batch_idx (bool): Keep batch indexes. Default is True.
847
+ """
693
848
 
694
849
  def __init__(self,
695
850
  bbox_format='xywh',
@@ -699,6 +854,7 @@ class Format:
699
854
  mask_ratio=4,
700
855
  mask_overlap=True,
701
856
  batch_idx=True):
857
+ """Initializes the Format class with given parameters."""
702
858
  self.bbox_format = bbox_format
703
859
  self.normalize = normalize
704
860
  self.return_mask = return_mask # set False when training detection only
@@ -746,7 +902,7 @@ class Format:
746
902
  return img
747
903
 
748
904
  def _format_segments(self, instances, cls, w, h):
749
- """convert polygon points to bitmap."""
905
+ """Convert polygon points to bitmap."""
750
906
  segments = instances.segments
751
907
  if self.mask_overlap:
752
908
  masks, sorted_idx = polygons2masks_overlap((h, w), segments, downsample_ratio=self.mask_ratio)
@@ -851,35 +1007,75 @@ def classify_albumentations(
851
1007
 
852
1008
 
853
1009
  class ClassifyLetterBox:
854
- """YOLOv8 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()])"""
1010
+ """
1011
+ YOLOv8 LetterBox class for image preprocessing, designed to be part of a transformation pipeline, e.g.,
1012
+ T.Compose([LetterBox(size), ToTensor()]).
1013
+
1014
+ Attributes:
1015
+ h (int): Target height of the image.
1016
+ w (int): Target width of the image.
1017
+ auto (bool): If True, automatically solves for short side using stride.
1018
+ stride (int): The stride value, used when 'auto' is True.
1019
+ """
855
1020
 
856
1021
  def __init__(self, size=(640, 640), auto=False, stride=32):
857
- """Resizes image and crops it to center with max dimensions 'h' and 'w'."""
1022
+ """
1023
+ Initializes the ClassifyLetterBox class with a target size, auto-flag, and stride.
1024
+
1025
+ Args:
1026
+ size (Union[int, Tuple[int, int]]): The target dimensions (height, width) for the letterbox.
1027
+ auto (bool): If True, automatically calculates the short side based on stride.
1028
+ stride (int): The stride value, used when 'auto' is True.
1029
+ """
858
1030
  super().__init__()
859
1031
  self.h, self.w = (size, size) if isinstance(size, int) else size
860
1032
  self.auto = auto # pass max size integer, automatically solve for short side using stride
861
1033
  self.stride = stride # used with auto
862
1034
 
863
- def __call__(self, im): # im = np.array HWC
1035
+ def __call__(self, im):
1036
+ """
1037
+ Resizes the image and pads it with a letterbox method.
1038
+
1039
+ Args:
1040
+ im (numpy.ndarray): The input image as a numpy array of shape HWC.
1041
+
1042
+ Returns:
1043
+ (numpy.ndarray): The letterboxed and resized image as a numpy array.
1044
+ """
864
1045
  imh, imw = im.shape[:2]
865
- r = min(self.h / imh, self.w / imw) # ratio of new/old
866
- h, w = round(imh * r), round(imw * r) # resized image
1046
+ r = min(self.h / imh, self.w / imw) # ratio of new/old dimensions
1047
+ h, w = round(imh * r), round(imw * r) # resized image dimensions
1048
+
1049
+ # Calculate padding dimensions
867
1050
  hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else (self.h, self.w)
868
1051
  top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1)
1052
+
1053
+ # Create padded image
869
1054
  im_out = np.full((hs, ws, 3), 114, dtype=im.dtype)
870
1055
  im_out[top:top + h, left:left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR)
871
1056
  return im_out
872
1057
 
873
1058
 
874
1059
  class CenterCrop:
875
- """YOLOv8 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()])"""
1060
+ """YOLOv8 CenterCrop class for image preprocessing, designed to be part of a transformation pipeline, e.g.,
1061
+ T.Compose([CenterCrop(size), ToTensor()]).
1062
+ """
876
1063
 
877
1064
  def __init__(self, size=640):
878
1065
  """Converts an image from numpy array to PyTorch tensor."""
879
1066
  super().__init__()
880
1067
  self.h, self.w = (size, size) if isinstance(size, int) else size
881
1068
 
882
- def __call__(self, im): # im = np.array HWC
1069
+ def __call__(self, im):
1070
+ """
1071
+ Resizes and crops the center of the image using a letterbox method.
1072
+
1073
+ Args:
1074
+ im (numpy.ndarray): The input image as a numpy array of shape HWC.
1075
+
1076
+ Returns:
1077
+ (numpy.ndarray): The center-cropped and resized image as a numpy array.
1078
+ """
883
1079
  imh, imw = im.shape[:2]
884
1080
  m = min(imh, imw) # min dimension
885
1081
  top, left = (imh - m) // 2, (imw - m) // 2
@@ -887,14 +1083,23 @@ class CenterCrop:
887
1083
 
888
1084
 
889
1085
  class ToTensor:
890
- """YOLOv8 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()])."""
1086
+ """YOLOv8 ToTensor class for image preprocessing, i.e., T.Compose([LetterBox(size), ToTensor()])."""
891
1087
 
892
1088
  def __init__(self, half=False):
893
1089
  """Initialize YOLOv8 ToTensor object with optional half-precision support."""
894
1090
  super().__init__()
895
1091
  self.half = half
896
1092
 
897
- def __call__(self, im): # im = np.array HWC in BGR order
1093
+ def __call__(self, im):
1094
+ """
1095
+ Transforms an image from a numpy array to a PyTorch tensor, applying optional half-precision and normalization.
1096
+
1097
+ Args:
1098
+ im (numpy.ndarray): Input image as a numpy array with shape (H, W, C) in BGR order.
1099
+
1100
+ Returns:
1101
+ (torch.Tensor): The transformed image as a PyTorch tensor in float32 or float16, normalized to [0, 1].
1102
+ """
898
1103
  im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous
899
1104
  im = torch.from_numpy(im) # to torch
900
1105
  im = im.half() if self.half else im.float() # uint8 to fp16/32
ultralytics/data/base.py CHANGED
@@ -62,6 +62,7 @@ class BaseDataset(Dataset):
62
62
  classes=None,
63
63
  fraction=1.0):
64
64
  super().__init__()
65
+ """Initialize BaseDataset with given configuration and options."""
65
66
  self.img_path = img_path
66
67
  self.imgsz = imgsz
67
68
  self.augment = augment
@@ -256,7 +257,7 @@ class BaseDataset(Dataset):
256
257
  return len(self.labels)
257
258
 
258
259
  def update_labels_info(self, label):
259
- """custom your label format here."""
260
+ """Custom your label format here."""
260
261
  return label
261
262
 
262
263
  def build_transforms(self, hyp=None):
ultralytics/data/build.py CHANGED
@@ -20,7 +20,11 @@ from .utils import PIN_MEMORY
20
20
 
21
21
 
22
22
  class InfiniteDataLoader(dataloader.DataLoader):
23
- """Dataloader that reuses workers. Uses same syntax as vanilla DataLoader."""
23
+ """
24
+ Dataloader that reuses workers.
25
+
26
+ Uses same syntax as vanilla DataLoader.
27
+ """
24
28
 
25
29
  def __init__(self, *args, **kwargs):
26
30
  """Dataloader that infinitely recycles workers, inherits from DataLoader."""
@@ -38,7 +42,9 @@ class InfiniteDataLoader(dataloader.DataLoader):
38
42
  yield next(self.iterator)
39
43
 
40
44
  def reset(self):
41
- """Reset iterator.
45
+ """
46
+ Reset iterator.
47
+
42
48
  This is useful when we want to modify settings of dataset while training.
43
49
  """
44
50
  self.iterator = self._get_iterator()
@@ -70,7 +76,7 @@ def seed_worker(worker_id): # noqa
70
76
 
71
77
 
72
78
  def build_yolo_dataset(cfg, img_path, batch, data, mode='train', rect=False, stride=32):
73
- """Build YOLO Dataset"""
79
+ """Build YOLO Dataset."""
74
80
  return YOLODataset(
75
81
  img_path=img_path,
76
82
  imgsz=cfg.imgsz,
@@ -12,7 +12,8 @@ from ultralytics.utils import TQDM
12
12
 
13
13
 
14
14
  def coco91_to_coco80_class():
15
- """Converts 91-index COCO class IDs to 80-index COCO class IDs.
15
+ """
16
+ Converts 91-index COCO class IDs to 80-index COCO class IDs.
16
17
 
17
18
  Returns:
18
19
  (list): A list of 91 class IDs where the index represents the 80-index class ID and the value is the
@@ -51,7 +52,8 @@ def convert_coco(labels_dir='../coco/annotations/',
51
52
  use_segments=False,
52
53
  use_keypoints=False,
53
54
  cls91to80=True):
54
- """Converts COCO dataset annotations to a format suitable for training YOLOv5 models.
55
+ """
56
+ Converts COCO dataset annotations to a format suitable for training YOLOv5 models.
55
57
 
56
58
  Args:
57
59
  labels_dir (str, optional): Path to directory containing COCO dataset annotation files.
@@ -203,6 +205,7 @@ def convert_dota_to_yolo_obb(dota_root_path: str):
203
205
  'helipad': 17}
204
206
 
205
207
  def convert_label(image_name, image_width, image_height, orig_label_dir, save_dir):
208
+ """Converts a single image's DOTA annotation to YOLO OBB format and saves it to a specified directory."""
206
209
  orig_label_path = orig_label_dir / f'{image_name}.txt'
207
210
  save_path = save_dir / f'{image_name}.txt'
208
211
 
@@ -33,6 +33,7 @@ class YOLODataset(BaseDataset):
33
33
  """
34
34
 
35
35
  def __init__(self, *args, data=None, use_segments=False, use_keypoints=False, **kwargs):
36
+ """Initializes the YOLODataset with optional configurations for segments and keypoints."""
36
37
  self.use_segments = use_segments
37
38
  self.use_keypoints = use_keypoints
38
39
  self.data = data
@@ -40,7 +41,9 @@ class YOLODataset(BaseDataset):
40
41
  super().__init__(*args, **kwargs)
41
42
 
42
43
  def cache_labels(self, path=Path('./labels.cache')):
43
- """Cache dataset labels, check images and read shapes.
44
+ """
45
+ Cache dataset labels, check images and read shapes.
46
+
44
47
  Args:
45
48
  path (Path): path where to save the cache file (default: Path('./labels.cache')).
46
49
  Returns:
@@ -157,7 +160,7 @@ class YOLODataset(BaseDataset):
157
160
  self.transforms = self.build_transforms(hyp)
158
161
 
159
162
  def update_labels_info(self, label):
160
- """custom your label format here."""
163
+ """Custom your label format here."""
161
164
  # NOTE: cls is not with bboxes now, classification and semantic segmentation need an independent cls label
162
165
  # we can make it also support classification and semantic segmentation by add or remove some dict keys there.
163
166
  bboxes = label.pop('bboxes')
@@ -254,6 +257,7 @@ class ClassificationDataset(torchvision.datasets.ImageFolder):
254
257
  return {'img': sample, 'cls': j}
255
258
 
256
259
  def __len__(self) -> int:
260
+ """Return the total number of samples in the dataset."""
257
261
  return len(self.samples)
258
262
 
259
263
  def verify_images(self):
@@ -320,6 +324,16 @@ def save_dataset_cache_file(prefix, path, x):
320
324
 
321
325
  # TODO: support semantic segmentation
322
326
  class SemanticDataset(BaseDataset):
327
+ """
328
+ Semantic Segmentation Dataset.
329
+
330
+ This class is responsible for handling datasets used for semantic segmentation tasks. It inherits functionalities
331
+ from the BaseDataset class.
332
+
333
+ Note:
334
+ This class is currently a placeholder and needs to be populated with methods and attributes for supporting
335
+ semantic segmentation tasks.
336
+ """
323
337
 
324
338
  def __init__(self):
325
339
  """Initialize a SemanticDataset object."""