ultralytics 8.2.60__py3-none-any.whl → 8.2.62__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

@@ -23,74 +23,252 @@ DEFAULT_STD = (1.0, 1.0, 1.0)
23
23
  DEFAULT_CROP_FRACTION = 1.0
24
24
 
25
25
 
26
- # TODO: we might need a BaseTransform to make all these augments be compatible with both classification and semantic
27
26
  class BaseTransform:
28
27
  """
29
- Base class for image transformations.
28
+ Base class for image transformations in the Ultralytics library.
30
29
 
31
- This is a generic transformation class that can be extended for specific image processing needs.
32
- The class is designed to be compatible with both classification and semantic segmentation tasks.
30
+ This class serves as a foundation for implementing various image processing operations, designed to be
31
+ compatible with both classification and semantic segmentation tasks.
33
32
 
34
33
  Methods:
35
- __init__: Initializes the BaseTransform object.
36
- apply_image: Applies image transformation to labels.
34
+ apply_image: Applies image transformations to labels.
37
35
  apply_instances: Applies transformations to object instances in labels.
38
36
  apply_semantic: Applies semantic segmentation to an image.
39
37
  __call__: Applies all label transformations to an image, instances, and semantic masks.
38
+
39
+ Examples:
40
+ >>> transform = BaseTransform()
41
+ >>> labels = {'image': np.array(...), 'instances': [...], 'semantic': np.array(...)}
42
+ >>> transformed_labels = transform(labels)
40
43
  """
41
44
 
42
45
  def __init__(self) -> None:
43
- """Initializes the BaseTransform object."""
46
+ """
47
+ Initializes the BaseTransform object.
48
+
49
+ This constructor sets up the base transformation object, which can be extended for specific image
50
+ processing tasks. It is designed to be compatible with both classification and semantic segmentation.
51
+
52
+ Examples:
53
+ >>> transform = BaseTransform()
54
+ """
44
55
  pass
45
56
 
46
57
  def apply_image(self, labels):
47
- """Applies image transformations to labels."""
58
+ """
59
+ Applies image transformations to labels.
60
+
61
+ This method is intended to be overridden by subclasses to implement specific image transformation
62
+ logic. In its base form, it returns the input labels unchanged.
63
+
64
+ Args:
65
+ labels (Any): The input labels to be transformed. The exact type and structure of labels may
66
+ vary depending on the specific implementation.
67
+
68
+ Returns:
69
+ (Any): The transformed labels. In the base implementation, this is identical to the input.
70
+
71
+ Examples:
72
+ >>> transform = BaseTransform()
73
+ >>> original_labels = [1, 2, 3]
74
+ >>> transformed_labels = transform.apply_image(original_labels)
75
+ >>> print(transformed_labels)
76
+ [1, 2, 3]
77
+ """
48
78
  pass
49
79
 
50
80
  def apply_instances(self, labels):
51
- """Applies transformations to object instances in labels."""
81
+ """
82
+ Applies transformations to object instances in labels.
83
+
84
+ This method is responsible for applying various transformations to object instances within the given
85
+ labels. It is designed to be overridden by subclasses to implement specific instance transformation
86
+ logic.
87
+
88
+ Args:
89
+ labels (Dict): A dictionary containing label information, including object instances.
90
+
91
+ Returns:
92
+ (Dict): The modified labels dictionary with transformed object instances.
93
+
94
+ Examples:
95
+ >>> transform = BaseTransform()
96
+ >>> labels = {'instances': Instances(xyxy=torch.rand(5, 4), cls=torch.randint(0, 80, (5,)))}
97
+ >>> transformed_labels = transform.apply_instances(labels)
98
+ """
52
99
  pass
53
100
 
54
101
  def apply_semantic(self, labels):
55
- """Applies semantic segmentation to an image."""
102
+ """
103
+ Applies semantic segmentation transformations to an image.
104
+
105
+ This method is intended to be overridden by subclasses to implement specific semantic segmentation
106
+ transformations. In its base form, it does not perform any operations.
107
+
108
+ Args:
109
+ labels (Any): The input labels or semantic segmentation mask to be transformed.
110
+
111
+ Returns:
112
+ (Any): The transformed semantic segmentation mask or labels.
113
+
114
+ Examples:
115
+ >>> transform = BaseTransform()
116
+ >>> semantic_mask = np.zeros((100, 100), dtype=np.uint8)
117
+ >>> transformed_mask = transform.apply_semantic(semantic_mask)
118
+ """
56
119
  pass
57
120
 
58
121
  def __call__(self, labels):
59
- """Applies all label transformations to an image, instances, and semantic masks."""
122
+ """
123
+ Applies all label transformations to an image, instances, and semantic masks.
124
+
125
+ This method orchestrates the application of various transformations defined in the BaseTransform class
126
+ to the input labels. It sequentially calls the apply_image and apply_instances methods to process the
127
+ image and object instances, respectively.
128
+
129
+ Args:
130
+ labels (Dict): A dictionary containing image data and annotations. Expected keys include 'img' for
131
+ the image data, and 'instances' for object instances.
132
+
133
+ Returns:
134
+ (Dict): The input labels dictionary with transformed image and instances.
135
+
136
+ Examples:
137
+ >>> transform = BaseTransform()
138
+ >>> labels = {'img': np.random.rand(640, 640, 3), 'instances': []}
139
+ >>> transformed_labels = transform(labels)
140
+ """
60
141
  self.apply_image(labels)
61
142
  self.apply_instances(labels)
62
143
  self.apply_semantic(labels)
63
144
 
64
145
 
65
146
  class Compose:
66
- """Class for composing multiple image transformations."""
147
+ """
148
+ A class for composing multiple image transformations.
149
+
150
+ Attributes:
151
+ transforms (List[Callable]): A list of transformation functions to be applied sequentially.
152
+
153
+ Methods:
154
+ __call__: Applies a series of transformations to input data.
155
+ append: Appends a new transform to the existing list of transforms.
156
+ insert: Inserts a new transform at a specified index in the list of transforms.
157
+ __getitem__: Retrieves a specific transform or a set of transforms using indexing.
158
+ __setitem__: Sets a specific transform or a set of transforms using indexing.
159
+ tolist: Converts the list of transforms to a standard Python list.
160
+
161
+ Examples:
162
+ >>> transforms = [RandomFlip(), RandomPerspective(30)]
163
+ >>> compose = Compose(transforms)
164
+ >>> transformed_data = compose(data)
165
+ >>> compose.append(CenterCrop((224, 224)))
166
+ >>> compose.insert(0, RandomFlip())
167
+ """
67
168
 
68
169
  def __init__(self, transforms):
69
- """Initializes the Compose object with a list of transforms."""
170
+ """
171
+ Initializes the Compose object with a list of transforms.
172
+
173
+ Args:
174
+ transforms (List[Callable]): A list of callable transform objects to be applied sequentially.
175
+
176
+ Examples:
177
+ >>> from ultralytics.data.augment import Compose, RandomHSV, RandomFlip
178
+ >>> transforms = [RandomHSV(), RandomFlip()]
179
+ >>> compose = Compose(transforms)
180
+ """
70
181
  self.transforms = transforms if isinstance(transforms, list) else [transforms]
71
182
 
72
183
  def __call__(self, data):
73
- """Applies a series of transformations to input data."""
184
+ """
185
+ Applies a series of transformations to input data. This method sequentially applies each transformation in the
186
+ Compose object's list of transforms to the input data.
187
+
188
+ Args:
189
+ data (Any): The input data to be transformed. This can be of any type, depending on the
190
+ transformations in the list.
191
+
192
+ Returns:
193
+ (Any): The transformed data after applying all transformations in sequence.
194
+
195
+ Examples:
196
+ >>> transforms = [Transform1(), Transform2(), Transform3()]
197
+ >>> compose = Compose(transforms)
198
+ >>> transformed_data = compose(input_data)
199
+ """
74
200
  for t in self.transforms:
75
201
  data = t(data)
76
202
  return data
77
203
 
78
204
  def append(self, transform):
79
- """Appends a new transform to the existing list of transforms."""
205
+ """
206
+ Appends a new transform to the existing list of transforms.
207
+
208
+ Args:
209
+ transform (BaseTransform): The transformation to be added to the composition.
210
+
211
+ Examples:
212
+ >>> compose = Compose([RandomFlip(), RandomPerspective()])
213
+ >>> compose.append(RandomHSV())
214
+ """
80
215
  self.transforms.append(transform)
81
216
 
82
217
  def insert(self, index, transform):
83
- """Inserts a new transform to the existing list of transforms."""
218
+ """
219
+ Inserts a new transform at a specified index in the existing list of transforms.
220
+
221
+ Args:
222
+ index (int): The index at which to insert the new transform.
223
+ transform (BaseTransform): The transform object to be inserted.
224
+
225
+ Examples:
226
+ >>> compose = Compose([Transform1(), Transform2()])
227
+ >>> compose.insert(1, Transform3())
228
+ >>> len(compose.transforms)
229
+ 3
230
+ """
84
231
  self.transforms.insert(index, transform)
85
232
 
86
233
  def __getitem__(self, index: Union[list, int]) -> "Compose":
87
- """Retrieve a specific transform or a set of transforms using indexing."""
234
+ """
235
+ Retrieves a specific transform or a set of transforms using indexing.
236
+
237
+ Args:
238
+ index (int | List[int]): Index or list of indices of the transforms to retrieve.
239
+
240
+ Returns:
241
+ (Compose): A new Compose object containing the selected transform(s).
242
+
243
+ Raises:
244
+ AssertionError: If the index is not of type int or list.
245
+
246
+ Examples:
247
+ >>> transforms = [RandomFlip(), RandomPerspective(10), RandomHSV(0.5, 0.5, 0.5)]
248
+ >>> compose = Compose(transforms)
249
+ >>> single_transform = compose[1] # Returns a Compose object with only RandomPerspective
250
+ >>> multiple_transforms = compose[0:2] # Returns a Compose object with RandomFlip and RandomPerspective
251
+ """
88
252
  assert isinstance(index, (int, list)), f"The indices should be either list or int type but got {type(index)}"
89
253
  index = [index] if isinstance(index, int) else index
90
254
  return Compose([self.transforms[i] for i in index])
91
255
 
92
256
  def __setitem__(self, index: Union[list, int], value: Union[list, int]) -> None:
93
- """Retrieve a specific transform or a set of transforms using indexing."""
257
+ """
258
+ Sets one or more transforms in the composition using indexing.
259
+
260
+ Args:
261
+ index (int | List[int]): Index or list of indices to set transforms at.
262
+ value (Any | List[Any]): Transform or list of transforms to set at the specified index(es).
263
+
264
+ Raises:
265
+ AssertionError: If index type is invalid, value type doesn't match index type, or index is out of range.
266
+
267
+ Examples:
268
+ >>> compose = Compose([Transform1(), Transform2(), Transform3()])
269
+ >>> compose[1] = NewTransform() # Replace second transform
270
+ >>> compose[0:2] = [NewTransform1(), NewTransform2()] # Replace first two transforms
271
+ """
94
272
  assert isinstance(index, (int, list)), f"The indices should be either list or int type but got {type(index)}"
95
273
  if isinstance(index, list):
96
274
  assert isinstance(
@@ -103,29 +281,107 @@ class Compose:
103
281
  self.transforms[i] = v
104
282
 
105
283
  def tolist(self):
106
- """Converts the list of transforms to a standard Python list."""
284
+ """
285
+ Converts the list of transforms to a standard Python list.
286
+
287
+ Returns:
288
+ (List): A list containing all the transform objects in the Compose instance.
289
+
290
+ Examples:
291
+ >>> transforms = [RandomFlip(), RandomPerspective(10), CenterCrop()]
292
+ >>> compose = Compose(transforms)
293
+ >>> transform_list = compose.tolist()
294
+ >>> print(len(transform_list))
295
+ 3
296
+ """
107
297
  return self.transforms
108
298
 
109
299
  def __repr__(self):
110
- """Returns a string representation of the object."""
300
+ """
301
+ Returns a string representation of the Compose object.
302
+
303
+ Returns:
304
+ (str): A string representation of the Compose object, including the list of transforms.
305
+
306
+ Examples:
307
+ >>> transforms = [RandomFlip(), RandomPerspective(degrees=10, translate=0.1, scale=0.1)]
308
+ >>> compose = Compose(transforms)
309
+ >>> print(compose)
310
+ Compose([
311
+ RandomFlip(),
312
+ RandomPerspective(degrees=10, translate=0.1, scale=0.1)
313
+ ])
314
+ """
111
315
  return f"{self.__class__.__name__}({', '.join([f'{t}' for t in self.transforms])})"
112
316
 
113
317
 
114
318
  class BaseMixTransform:
115
319
  """
116
- Class for base mix (MixUp/Mosaic) transformations.
320
+ Base class for mix transformations like MixUp and Mosaic.
321
+
322
+ This class provides a foundation for implementing mix transformations on datasets. It handles the
323
+ probability-based application of transforms and manages the mixing of multiple images and labels.
324
+
325
+ Attributes:
326
+ dataset (Any): The dataset object containing images and labels.
327
+ pre_transform (Callable | None): Optional transform to apply before mixing.
328
+ p (float): Probability of applying the mix transformation.
117
329
 
118
- This implementation is from mmyolo.
330
+ Methods:
331
+ __call__: Applies the mix transformation to the input labels.
332
+ _mix_transform: Abstract method to be implemented by subclasses for specific mix operations.
333
+ get_indexes: Abstract method to get indexes of images to be mixed.
334
+ _update_label_text: Updates label text for mixed images.
335
+
336
+ Examples:
337
+ >>> class CustomMixTransform(BaseMixTransform):
338
+ ... def _mix_transform(self, labels):
339
+ ... # Implement custom mix logic here
340
+ ... return labels
341
+ ... def get_indexes(self):
342
+ ... return [random.randint(0, len(self.dataset) - 1) for _ in range(3)]
343
+ >>> dataset = YourDataset()
344
+ >>> transform = CustomMixTransform(dataset, p=0.5)
345
+ >>> mixed_labels = transform(original_labels)
119
346
  """
120
347
 
121
348
  def __init__(self, dataset, pre_transform=None, p=0.0) -> None:
122
- """Initializes the BaseMixTransform object with dataset, pre_transform, and probability."""
349
+ """
350
+ Initializes the BaseMixTransform object for mix transformations like MixUp and Mosaic.
351
+
352
+ This class serves as a base for implementing mix transformations in image processing pipelines.
353
+
354
+ Args:
355
+ dataset (Any): The dataset object containing images and labels for mixing.
356
+ pre_transform (Callable | None): Optional transform to apply before mixing.
357
+ p (float): Probability of applying the mix transformation. Should be in the range [0.0, 1.0].
358
+
359
+ Examples:
360
+ >>> dataset = YOLODataset("path/to/data")
361
+ >>> pre_transform = Compose([RandomFlip(), RandomPerspective()])
362
+ >>> mix_transform = BaseMixTransform(dataset, pre_transform, p=0.5)
363
+ """
123
364
  self.dataset = dataset
124
365
  self.pre_transform = pre_transform
125
366
  self.p = p
126
367
 
127
368
  def __call__(self, labels):
128
- """Applies pre-processing transforms and mixup/mosaic transforms to labels data."""
369
+ """
370
+ Applies pre-processing transforms and mixup/mosaic transforms to labels data.
371
+
372
+ This method determines whether to apply the mix transform based on a probability factor. If applied, it
373
+ selects additional images, applies pre-transforms if specified, and then performs the mix transform.
374
+
375
+ Args:
376
+ labels (Dict): A dictionary containing label data for an image.
377
+
378
+ Returns:
379
+ (Dict): The transformed labels dictionary, which may include mixed data from other images.
380
+
381
+ Examples:
382
+ >>> transform = BaseMixTransform(dataset, pre_transform=None, p=0.5)
383
+ >>> result = transform({"image": img, "bboxes": boxes, "cls": classes})
384
+ """
129
385
  if random.uniform(0, 1) > self.p:
130
386
  return labels
131
387
 
@@ -150,15 +406,73 @@ class BaseMixTransform:
150
406
  return labels
151
407
 
152
408
  def _mix_transform(self, labels):
153
- """Applies MixUp or Mosaic augmentation to the label dictionary."""
409
+ """
410
+ Applies MixUp or Mosaic augmentation to the label dictionary.
411
+
412
+ This method should be implemented by subclasses to perform specific mix transformations like MixUp or
413
+ Mosaic. It modifies the input label dictionary in-place with the augmented data.
414
+
415
+ Args:
416
+ labels (Dict): A dictionary containing image and label data. Expected to have a 'mix_labels' key
417
+ with a list of additional image and label data for mixing.
418
+
419
+ Returns:
420
+ (Dict): The modified labels dictionary with augmented data after applying the mix transform.
421
+
422
+ Examples:
423
+ >>> transform = BaseMixTransform(dataset)
424
+ >>> labels = {'image': img, 'bboxes': boxes, 'mix_labels': [{'image': img2, 'bboxes': boxes2}]}
425
+ >>> augmented_labels = transform._mix_transform(labels)
426
+ """
154
427
  raise NotImplementedError
155
428
 
156
429
  def get_indexes(self):
157
- """Gets a list of shuffled indexes for mosaic augmentation."""
430
+ """
431
+ Gets a list of shuffled indexes for mosaic augmentation.
432
+
433
+ Returns:
434
+ (List[int]): A list of shuffled indexes from the dataset.
435
+
436
+ Examples:
437
+ >>> transform = BaseMixTransform(dataset)
438
+ >>> indexes = transform.get_indexes()
439
+ >>> print(indexes) # [3, 18, 7, 2]
440
+ """
158
441
  raise NotImplementedError
159
442
 
160
443
  def _update_label_text(self, labels):
161
- """Update label text."""
444
+ """
445
+ Updates label text and class IDs for mixed labels in image augmentation.
446
+
447
+ This method processes the 'texts' and 'cls' fields of the input labels dictionary and any mixed labels,
448
+ creating a unified set of text labels and updating class IDs accordingly.
449
+
450
+ Args:
451
+ labels (Dict): A dictionary containing label information, including 'texts' and 'cls' fields,
452
+ and optionally a 'mix_labels' field with additional label dictionaries.
453
+
454
+ Returns:
455
+ (Dict): The updated labels dictionary with unified text labels and updated class IDs.
456
+
457
+ Examples:
458
+ >>> labels = {
459
+ ... 'texts': [['cat'], ['dog']],
460
+ ... 'cls': torch.tensor([[0], [1]]),
461
+ ... 'mix_labels': [{
462
+ ... 'texts': [['bird'], ['fish']],
463
+ ... 'cls': torch.tensor([[0], [1]])
464
+ ... }]
465
+ ... }
466
+ >>> updated_labels = self._update_label_text(labels)
467
+ >>> print(updated_labels['texts'])
468
+ [['cat'], ['dog'], ['bird'], ['fish']]
469
+ >>> print(updated_labels['cls'])
470
+ tensor([[0],
471
+ [1]])
472
+ >>> print(updated_labels['mix_labels'][0]['cls'])
473
+ tensor([[2],
474
+ [3]])
475
+ """
162
476
  if "texts" not in labels:
163
477
  return labels
164
478
 
@@ -176,20 +490,52 @@ class BaseMixTransform:
176
490
 
177
491
  class Mosaic(BaseMixTransform):
178
492
  """
179
- Mosaic augmentation.
493
+ Mosaic augmentation for image datasets.
180
494
 
181
495
  This class performs mosaic augmentation by combining multiple (4 or 9) images into a single mosaic image.
182
496
  The augmentation is applied to a dataset with a given probability.
183
497
 
184
498
  Attributes:
185
499
  dataset: The dataset on which the mosaic augmentation is applied.
186
- imgsz (int, optional): Image size (height and width) after mosaic pipeline of a single image. Default to 640.
187
- p (float, optional): Probability of applying the mosaic augmentation. Must be in the range 0-1. Default to 1.0.
188
- n (int, optional): The grid size, either 4 (for 2x2) or 9 (for 3x3).
500
+ imgsz (int): Image size (height and width) after mosaic pipeline of a single image.
501
+ p (float): Probability of applying the mosaic augmentation. Must be in the range 0-1.
502
+ n (int): The grid size, either 4 (for 2x2) or 9 (for 3x3).
503
+ border (Tuple[int, int]): Border size for width and height.
504
+
505
+ Methods:
506
+ get_indexes: Returns a list of random indexes from the dataset.
507
+ _mix_transform: Applies mixup transformation to the input image and labels.
508
+ _mosaic3: Creates a 1x3 image mosaic.
509
+ _mosaic4: Creates a 2x2 image mosaic.
510
+ _mosaic9: Creates a 3x3 image mosaic.
511
+ _update_labels: Updates labels with padding.
512
+ _cat_labels: Concatenates labels and clips mosaic border instances.
513
+
514
+ Examples:
515
+ >>> from ultralytics.data.augment import Mosaic
516
+ >>> dataset = YourDataset(...) # Your image dataset
517
+ >>> mosaic_aug = Mosaic(dataset, imgsz=640, p=0.5, n=4)
518
+ >>> augmented_labels = mosaic_aug(original_labels)
189
519
  """
190
520
 
191
521
  def __init__(self, dataset, imgsz=640, p=1.0, n=4):
192
- """Initializes the object with a dataset, image size, probability, and border."""
522
+ """
523
+ Initializes the Mosaic augmentation object.
524
+
525
+ This class performs mosaic augmentation by combining multiple (4 or 9) images into a single mosaic image.
526
+ The augmentation is applied to a dataset with a given probability.
527
+
528
+ Args:
529
+ dataset (Any): The dataset on which the mosaic augmentation is applied.
530
+ imgsz (int): Image size (height and width) after mosaic pipeline of a single image.
531
+ p (float): Probability of applying the mosaic augmentation. Must be in the range 0-1.
532
+ n (int): The grid size, either 4 (for 2x2) or 9 (for 3x3).
533
+
534
+ Examples:
535
+ >>> from ultralytics.data.augment import Mosaic
536
+ >>> dataset = YourDataset(...)
537
+ >>> mosaic_aug = Mosaic(dataset, imgsz=640, p=0.5, n=4)
538
+ """
193
539
  assert 0 <= p <= 1.0, f"The probability should be in range [0, 1], but got {p}."
194
540
  assert n in {4, 9}, "grid must be equal to 4 or 9."
195
541
  super().__init__(dataset=dataset, p=p)
@@ -199,14 +545,53 @@ class Mosaic(BaseMixTransform):
199
545
  self.n = n
200
546
 
201
547
  def get_indexes(self, buffer=True):
202
- """Return a list of random indexes from the dataset."""
548
+ """
549
+ Returns a list of random indexes from the dataset for mosaic augmentation.
550
+
551
+ This method selects random image indexes either from a buffer or from the entire dataset, depending on
552
+ the 'buffer' parameter. It is used to choose images for creating mosaic augmentations.
553
+
554
+ Args:
555
+ buffer (bool): If True, selects images from the dataset buffer. If False, selects from the entire
556
+ dataset.
557
+
558
+ Returns:
559
+ (List[int]): A list of random image indexes. The length of the list is n-1, where n is the number
560
+ of images used in the mosaic (either 3 or 8, depending on whether n is 4 or 9).
561
+
562
+ Examples:
563
+ >>> mosaic = Mosaic(dataset, imgsz=640, p=1.0, n=4)
564
+ >>> indexes = mosaic.get_indexes()
565
+ >>> print(len(indexes)) # Output: 3
566
+ """
203
567
  if buffer: # select images from buffer
204
568
  return random.choices(list(self.dataset.buffer), k=self.n - 1)
205
569
  else: # select any images
206
570
  return [random.randint(0, len(self.dataset) - 1) for _ in range(self.n - 1)]
207
571
 
208
572
  def _mix_transform(self, labels):
209
- """Apply mixup transformation to the input image and labels."""
573
+ """
574
+ Applies mosaic augmentation to the input image and labels.
575
+
576
+ This method combines multiple images (3, 4, or 9) into a single mosaic image based on the 'n' attribute.
577
+ It ensures that rectangular annotations are not present and that there are other images available for
578
+ mosaic augmentation.
579
+
580
+ Args:
581
+ labels (Dict): A dictionary containing image data and annotations. Expected keys include:
582
+ - 'rect_shape': Should be None as rect and mosaic are mutually exclusive.
583
+ - 'mix_labels': A list of dictionaries containing data for other images to be used in the mosaic.
584
+
585
+ Returns:
586
+ (Dict): A dictionary containing the mosaic-augmented image and updated annotations.
587
+
588
+ Raises:
589
+ AssertionError: If 'rect_shape' is not None or if 'mix_labels' is empty.
590
+
591
+ Examples:
592
+ >>> mosaic = Mosaic(dataset, imgsz=640, p=1.0, n=4)
593
+ >>> augmented_data = mosaic._mix_transform(labels)
594
+ """
210
595
  assert labels.get("rect_shape", None) is None, "rect and mosaic are mutually exclusive."
211
596
  assert len(labels.get("mix_labels", [])), "There are no other images for mosaic augment."
212
597
  return (
@@ -214,7 +599,29 @@ class Mosaic(BaseMixTransform):
214
599
  ) # This code is modified for mosaic3 method.
215
600
 
216
601
  def _mosaic3(self, labels):
217
- """Create a 1x3 image mosaic."""
602
+ """
603
+ Creates a 1x3 image mosaic by combining three images.
604
+
605
+ This method arranges three images in a horizontal layout, with the main image in the center and two
606
+ additional images on either side. It's part of the Mosaic augmentation technique used in object detection.
607
+
608
+ Args:
609
+ labels (Dict): A dictionary containing image and label information for the main (center) image.
610
+ Must include 'img' key with the image array, and 'mix_labels' key with a list of two
611
+ dictionaries containing information for the side images.
612
+
613
+ Returns:
614
+ (Dict): A dictionary with the mosaic image and updated labels. Keys include:
615
+ - 'img' (np.ndarray): The mosaic image array with shape (H, W, C).
616
+ - Other keys from the input labels, updated to reflect the new image dimensions.
617
+
618
+ Examples:
619
+ >>> mosaic = Mosaic(dataset, imgsz=640, p=1.0, n=3)
620
+ >>> labels = {'img': np.random.rand(480, 640, 3), 'mix_labels': [{'img': np.random.rand(480, 640, 3)} for _ in range(2)]}
621
+ >>> result = mosaic._mosaic3(labels)
622
+ >>> print(result['img'].shape)
623
+ (640, 640, 3)
624
+ """
218
625
  mosaic_labels = []
219
626
  s = self.imgsz
220
627
  for i in range(3):
@@ -248,7 +655,28 @@ class Mosaic(BaseMixTransform):
248
655
  return final_labels
249
656
 
250
657
  def _mosaic4(self, labels):
251
- """Create a 2x2 image mosaic."""
658
+ """
659
+ Creates a 2x2 image mosaic from four input images.
660
+
661
+ This method combines four images into a single mosaic image by placing them in a 2x2 grid. It also
662
+ updates the corresponding labels for each image in the mosaic.
663
+
664
+ Args:
665
+ labels (Dict): A dictionary containing image data and labels for the base image (index 0) and three
666
+ additional images (indices 1-3) in the 'mix_labels' key.
667
+
668
+ Returns:
669
+ (Dict): A dictionary containing the mosaic image and updated labels. The 'img' key contains the mosaic
670
+ image as a numpy array, and other keys contain the combined and adjusted labels for all four images.
671
+
672
+ Examples:
673
+ >>> mosaic = Mosaic(dataset, imgsz=640, p=1.0, n=4)
674
+ >>> labels = {"img": np.random.rand(480, 640, 3), "mix_labels": [
675
+ ... {"img": np.random.rand(480, 640, 3)} for _ in range(3)
676
+ ... ]}
677
+ >>> result = mosaic._mosaic4(labels)
678
+ >>> assert result["img"].shape == (1280, 1280, 3)
679
+ """
252
680
  mosaic_labels = []
253
681
  s = self.imgsz
254
682
  yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.border) # mosaic center x, y
@@ -284,7 +712,31 @@ class Mosaic(BaseMixTransform):
284
712
  return final_labels
285
713
 
286
714
  def _mosaic9(self, labels):
287
- """Create a 3x3 image mosaic."""
715
+ """
716
+ Creates a 3x3 image mosaic from the input image and eight additional images.
717
+
718
+ This method combines nine images into a single mosaic image. The input image is placed at the center,
719
+ and eight additional images from the dataset are placed around it in a 3x3 grid pattern.
720
+
721
+ Args:
722
+ labels (Dict): A dictionary containing the input image and its associated labels. It should have
723
+ the following keys:
724
+ - 'img' (numpy.ndarray): The input image.
725
+ - 'resized_shape' (Tuple[int, int]): The shape of the resized image (height, width).
726
+ - 'mix_labels' (List[Dict]): A list of dictionaries containing information for the additional
727
+ eight images, each with the same structure as the input labels.
728
+
729
+ Returns:
730
+ (Dict): A dictionary containing the mosaic image and updated labels. It includes the following keys:
731
+ - 'img' (numpy.ndarray): The final mosaic image.
732
+ - Other keys from the input labels, updated to reflect the new mosaic arrangement.
733
+
734
+ Examples:
735
+ >>> mosaic = Mosaic(dataset, imgsz=640, p=1.0, n=9)
736
+ >>> input_labels = dataset[0]
737
+ >>> mosaic_result = mosaic._mosaic9(input_labels)
738
+ >>> mosaic_image = mosaic_result['img']
739
+ """
288
740
  mosaic_labels = []
289
741
  s = self.imgsz
290
742
  hp, wp = -1, -1 # height, width previous
@@ -333,7 +785,25 @@ class Mosaic(BaseMixTransform):
333
785
 
334
786
  @staticmethod
335
787
  def _update_labels(labels, padw, padh):
336
- """Update labels."""
788
+ """
789
+ Updates label coordinates with padding values.
790
+
791
+ This method adjusts the bounding box coordinates of object instances in the labels by adding padding
792
+ values. It also denormalizes the coordinates if they were previously normalized.
793
+
794
+ Args:
795
+ labels (Dict): A dictionary containing image and instance information.
796
+ padw (int): Padding width to be added to the x-coordinates.
797
+ padh (int): Padding height to be added to the y-coordinates.
798
+
799
+ Returns:
800
+ (Dict): Updated labels dictionary with adjusted instance coordinates.
801
+
802
+ Examples:
803
+ >>> labels = {"img": np.zeros((100, 100, 3)), "instances": Instances(...)}
804
+ >>> padw, padh = 50, 50
805
+ >>> updated_labels = Mosaic._update_labels(labels, padw, padh)
806
+ """
337
807
  nh, nw = labels["img"].shape[:2]
338
808
  labels["instances"].convert_bbox(format="xyxy")
339
809
  labels["instances"].denormalize(nw, nh)
@@ -341,7 +811,32 @@ class Mosaic(BaseMixTransform):
341
811
  return labels
342
812
 
343
813
  def _cat_labels(self, mosaic_labels):
344
- """Return labels with mosaic border instances clipped."""
814
+ """
815
+ Concatenates and processes labels for mosaic augmentation.
816
+
817
+ This method combines labels from multiple images used in mosaic augmentation, clips instances to the
818
+ mosaic border, and removes zero-area boxes.
819
+
820
+ Args:
821
+ mosaic_labels (List[Dict]): A list of label dictionaries for each image in the mosaic.
822
+
823
+ Returns:
824
+ (Dict): A dictionary containing concatenated and processed labels for the mosaic image, including:
825
+ - im_file (str): File path of the first image in the mosaic.
826
+ - ori_shape (Tuple[int, int]): Original shape of the first image.
827
+ - resized_shape (Tuple[int, int]): Shape of the mosaic image (imgsz * 2, imgsz * 2).
828
+ - cls (np.ndarray): Concatenated class labels.
829
+ - instances (Instances): Concatenated instance annotations.
830
+ - mosaic_border (Tuple[int, int]): Mosaic border size.
831
+ - texts (List[str], optional): Text labels if present in the original labels.
832
+
833
+ Examples:
834
+ >>> mosaic = Mosaic(dataset, imgsz=640)
835
+ >>> mosaic_labels = [{"cls": np.array([0, 1]), "instances": Instances(...)} for _ in range(4)]
836
+ >>> result = mosaic._cat_labels(mosaic_labels)
837
+ >>> print(result.keys())
838
+ dict_keys(['im_file', 'ori_shape', 'resized_shape', 'cls', 'instances', 'mosaic_border'])
839
+ """
345
840
  if len(mosaic_labels) == 0:
346
841
  return {}
347
842
  cls = []
@@ -368,18 +863,82 @@ class Mosaic(BaseMixTransform):
368
863
 
369
864
 
370
865
  class MixUp(BaseMixTransform):
371
- """Class for applying MixUp augmentation to the dataset."""
866
+ """
867
+ Applies MixUp augmentation to image datasets.
868
+
869
+ This class implements the MixUp augmentation technique as described in the paper "mixup: Beyond Empirical Risk
870
+ Minimization" (https://arxiv.org/abs/1710.09412). MixUp combines two images and their labels using a random weight.
871
+
872
+ Attributes:
873
+ dataset (Any): The dataset to which MixUp augmentation will be applied.
874
+ pre_transform (Callable | None): Optional transform to apply before MixUp.
875
+ p (float): Probability of applying MixUp augmentation.
876
+
877
+ Methods:
878
+ get_indexes: Returns a random index from the dataset.
879
+ _mix_transform: Applies MixUp augmentation to the input labels.
880
+
881
+ Examples:
882
+ >>> from ultralytics.data.augment import MixUp
883
+ >>> dataset = YourDataset(...) # Your image dataset
884
+ >>> mixup = MixUp(dataset, p=0.5)
885
+ >>> augmented_labels = mixup(original_labels)
886
+ """
372
887
 
373
888
  def __init__(self, dataset, pre_transform=None, p=0.0) -> None:
374
- """Initializes MixUp object with dataset, pre_transform, and probability of applying MixUp."""
889
+ """
890
+ Initializes the MixUp augmentation object.
891
+
892
+ MixUp is an image augmentation technique that combines two images by taking a weighted sum of their pixel
893
+ values and labels. This implementation is designed for use with the Ultralytics YOLO framework.
894
+
895
+ Args:
896
+ dataset (Any): The dataset to which MixUp augmentation will be applied.
897
+ pre_transform (Callable | None): Optional transform to apply to images before MixUp.
898
+ p (float): Probability of applying MixUp augmentation to an image. Must be in the range [0, 1].
899
+
900
+ Examples:
901
+ >>> from ultralytics.data.dataset import YOLODataset
902
+ >>> dataset = YOLODataset('path/to/data.yaml')
903
+ >>> mixup = MixUp(dataset, pre_transform=None, p=0.5)
904
+ """
375
905
  super().__init__(dataset=dataset, pre_transform=pre_transform, p=p)
376
906
 
377
907
  def get_indexes(self):
378
- """Get a random index from the dataset."""
908
+ """
909
+ Get a random index from the dataset.
910
+
911
+ This method returns a single random index from the dataset, which is used to select an image for MixUp
912
+ augmentation.
913
+
914
+ Returns:
915
+ (int): A random integer index within the range of the dataset length.
916
+
917
+ Examples:
918
+ >>> mixup = MixUp(dataset)
919
+ >>> index = mixup.get_indexes()
920
+ >>> print(index)
921
+ 42
922
+ """
379
923
  return random.randint(0, len(self.dataset) - 1)
380
924
 
381
925
  def _mix_transform(self, labels):
382
- """Applies MixUp augmentation as per https://arxiv.org/pdf/1710.09412.pdf."""
926
+ """
927
+ Applies MixUp augmentation to the input labels.
928
+
929
+ This method implements the MixUp augmentation technique as described in the paper
930
+ "mixup: Beyond Empirical Risk Minimization" (https://arxiv.org/abs/1710.09412).
931
+
932
+ Args:
933
+ labels (Dict): A dictionary containing the original image and label information.
934
+
935
+ Returns:
936
+ (Dict): A dictionary containing the mixed-up image and combined label information.
937
+
938
+ Examples:
939
+ >>> mixer = MixUp(dataset)
940
+ >>> mixed_labels = mixer._mix_transform(labels)
941
+ """
383
942
  r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0
384
943
  labels2 = labels["mix_labels"][0]
385
944
  labels["img"] = (labels["img"] * r + labels2["img"] * (1 - r)).astype(np.uint8)
@@ -390,32 +949,61 @@ class MixUp(BaseMixTransform):
390
949
 
391
950
  class RandomPerspective:
392
951
  """
393
- Implements random perspective and affine transformations on images and corresponding bounding boxes, segments, and
394
- keypoints. These transformations include rotation, translation, scaling, and shearing. The class also offers the
395
- option to apply these transformations conditionally with a specified probability.
952
+ Implements random perspective and affine transformations on images and corresponding annotations.
953
+
954
+ This class applies random rotations, translations, scaling, shearing, and perspective transformations
955
+ to images and their associated bounding boxes, segments, and keypoints. It can be used as part of an
956
+ augmentation pipeline for object detection and instance segmentation tasks.
396
957
 
397
958
  Attributes:
398
- degrees (float): Degree range for random rotations.
399
- translate (float): Fraction of total width and height for random translation.
400
- scale (float): Scaling factor interval, e.g., a scale factor of 0.1 allows a resize between 90%-110%.
401
- shear (float): Shear intensity (angle in degrees).
959
+ degrees (float): Maximum absolute degree range for random rotations.
960
+ translate (float): Maximum translation as a fraction of the image size.
961
+ scale (float): Scaling factor range, e.g., scale=0.1 means 0.9-1.1.
962
+ shear (float): Maximum shear angle in degrees.
402
963
  perspective (float): Perspective distortion factor.
403
- border (tuple): Tuple specifying mosaic border.
404
- pre_transform (callable): A function/transform to apply to the image before starting the random transformation.
964
+ border (Tuple[int, int]): Mosaic border size as (x, y).
965
+ pre_transform (Callable | None): Optional transform to apply before the random perspective.
405
966
 
406
967
  Methods:
407
- affine_transform(img, border): Applies a series of affine transformations to the image.
408
- apply_bboxes(bboxes, M): Transforms bounding boxes using the calculated affine matrix.
409
- apply_segments(segments, M): Transforms segments and generates new bounding boxes.
410
- apply_keypoints(keypoints, M): Transforms keypoints.
411
- __call__(labels): Main method to apply transformations to both images and their corresponding annotations.
412
- box_candidates(box1, box2): Filters out bounding boxes that don't meet certain criteria post-transformation.
968
+ affine_transform: Applies affine transformations to the input image.
969
+ apply_bboxes: Transforms bounding boxes using the affine matrix.
970
+ apply_segments: Transforms segments and generates new bounding boxes.
971
+ apply_keypoints: Transforms keypoints using the affine matrix.
972
+ __call__: Applies the random perspective transformation to images and annotations.
973
+ box_candidates: Filters transformed bounding boxes based on size and aspect ratio.
974
+
975
+ Examples:
976
+ >>> transform = RandomPerspective(degrees=10, translate=0.1, scale=0.1, shear=10)
977
+ >>> image = np.random.randint(0, 255, (640, 640, 3), dtype=np.uint8)
978
+ >>> labels = {'img': image, 'cls': np.array([0, 1]), 'instances': Instances(...)}
979
+ >>> result = transform(labels)
980
+ >>> transformed_image = result['img']
981
+ >>> transformed_instances = result['instances']
413
982
  """
414
983
 
415
984
  def __init__(
416
985
  self, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, border=(0, 0), pre_transform=None
417
986
  ):
418
- """Initializes RandomPerspective object with transformation parameters."""
987
+ """
988
+ Initializes RandomPerspective object with transformation parameters.
989
+
990
+ This class implements random perspective and affine transformations on images and corresponding bounding boxes,
991
+ segments, and keypoints. Transformations include rotation, translation, scaling, and shearing.
992
+
993
+ Args:
994
+ degrees (float): Degree range for random rotations.
995
+ translate (float): Fraction of total width and height for random translation.
996
+ scale (float): Scaling factor interval, e.g., a scale factor of 0.5 allows a resize between 50%-150%.
997
+ shear (float): Shear intensity (angle in degrees).
998
+ perspective (float): Perspective distortion factor.
999
+ border (Tuple[int, int]): Tuple specifying mosaic border (top/bottom, left/right).
1000
+ pre_transform (Callable | None): Function/transform to apply to the image before starting the random
1001
+ transformation.
1002
+
1003
+ Examples:
1004
+ >>> transform = RandomPerspective(degrees=10.0, translate=0.1, scale=0.5, shear=5.0)
1005
+ >>> result = transform(labels) # Apply random perspective to labels
1006
+ """
419
1007
 
420
1008
  self.degrees = degrees
421
1009
  self.translate = translate
@@ -429,14 +1017,25 @@ class RandomPerspective:
429
1017
  """
430
1018
  Applies a sequence of affine transformations centered around the image center.
431
1019
 
1020
+ This function performs a series of geometric transformations on the input image, including
1021
+ translation, perspective change, rotation, scaling, and shearing. The transformations are
1022
+ applied in a specific order to maintain consistency.
1023
+
432
1024
  Args:
433
- img (ndarray): Input image.
434
- border (tuple): Border dimensions.
1025
+ img (np.ndarray): Input image to be transformed.
1026
+ border (Tuple[int, int]): Border dimensions for the transformed image.
435
1027
 
436
1028
  Returns:
437
- img (ndarray): Transformed image.
438
- M (ndarray): Transformation matrix.
439
- s (float): Scale factor.
1029
+ (Tuple[np.ndarray, np.ndarray, float]): A tuple containing:
1030
+ - np.ndarray: Transformed image.
1031
+ - np.ndarray: 3x3 transformation matrix.
1032
+ - float: Scale factor applied during the transformation.
1033
+
1034
+ Examples:
1035
+ >>> import numpy as np
1036
+ >>> img = np.random.rand(100, 100, 3)
1037
+ >>> border = (10, 10)
1038
+ >>> transformed_img, matrix, scale = affine_transform(img, border)
440
1039
  """
441
1040
 
442
1041
  # Center
@@ -480,14 +1079,23 @@ class RandomPerspective:
480
1079
 
481
1080
  def apply_bboxes(self, bboxes, M):
482
1081
  """
483
- Apply affine to bboxes only.
1082
+ Apply affine transformation to bounding boxes.
1083
+
1084
+ This function applies an affine transformation to a set of bounding boxes using the provided
1085
+ transformation matrix.
484
1086
 
485
1087
  Args:
486
- bboxes (ndarray): list of bboxes, xyxy format, with shape (num_bboxes, 4).
487
- M (ndarray): affine matrix.
1088
+ bboxes (torch.Tensor): Bounding boxes in xyxy format with shape (N, 4), where N is the number
1089
+ of bounding boxes.
1090
+ M (torch.Tensor): Affine transformation matrix with shape (3, 3).
488
1091
 
489
1092
  Returns:
490
- new_bboxes (ndarray): bboxes after affine, [num_bboxes, 4].
1093
+ (torch.Tensor): Transformed bounding boxes in xyxy format with shape (N, 4).
1094
+
1095
+ Examples:
1096
+ >>> bboxes = torch.tensor([[10, 10, 20, 20], [30, 30, 40, 40]])
1097
+ >>> M = torch.eye(3)
1098
+ >>> transformed_bboxes = apply_bboxes(bboxes, M)
491
1099
  """
492
1100
  n = len(bboxes)
493
1101
  if n == 0:
@@ -505,15 +1113,25 @@ class RandomPerspective:
505
1113
 
506
1114
  def apply_segments(self, segments, M):
507
1115
  """
508
- Apply affine to segments and generate new bboxes from segments.
1116
+ Apply affine transformations to segments and generate new bounding boxes.
1117
+
1118
+ This function applies affine transformations to input segments and generates new bounding boxes based on
1119
+ the transformed segments. It clips the transformed segments to fit within the new bounding boxes.
509
1120
 
510
1121
  Args:
511
- segments (ndarray): list of segments, [num_samples, 500, 2].
512
- M (ndarray): affine matrix.
1122
+ segments (np.ndarray): Input segments with shape (N, M, 2), where N is the number of segments and M is the
1123
+ number of points in each segment.
1124
+ M (np.ndarray): Affine transformation matrix with shape (3, 3).
513
1125
 
514
1126
  Returns:
515
- new_segments (ndarray): list of segments after affine, [num_samples, 500, 2].
516
- new_bboxes (ndarray): bboxes after affine, [N, 4].
1127
+ (Tuple[np.ndarray, np.ndarray]): A tuple containing:
1128
+ - New bounding boxes with shape (N, 4) in xyxy format.
1129
+ - Transformed and clipped segments with shape (N, M, 2).
1130
+
1131
+ Examples:
1132
+ >>> segments = np.random.rand(10, 500, 2) # 10 segments with 500 points each
1133
+ >>> M = np.eye(3) # Identity transformation matrix
1134
+ >>> new_bboxes, new_segments = apply_segments(segments, M)
517
1135
  """
518
1136
  n, num = segments.shape[:2]
519
1137
  if n == 0:
@@ -532,14 +1150,25 @@ class RandomPerspective:
532
1150
 
533
1151
  def apply_keypoints(self, keypoints, M):
534
1152
  """
535
- Apply affine to keypoints.
1153
+ Applies affine transformation to keypoints.
1154
+
1155
+ This method transforms the input keypoints using the provided affine transformation matrix. It handles
1156
+ perspective rescaling if necessary and updates the visibility of keypoints that fall outside the image
1157
+ boundaries after transformation.
536
1158
 
537
1159
  Args:
538
- keypoints (ndarray): keypoints, [N, 17, 3].
539
- M (ndarray): affine matrix.
1160
+ keypoints (np.ndarray): Array of keypoints with shape (N, 17, 3), where N is the number of instances,
1161
+ 17 is the number of keypoints per instance, and 3 represents (x, y, visibility).
1162
+ M (np.ndarray): 3x3 affine transformation matrix.
540
1163
 
541
1164
  Returns:
542
- new_keypoints (ndarray): keypoints after affine, [N, 17, 3].
1165
+ (np.ndarray): Transformed keypoints array with the same shape as input (N, 17, 3).
1166
+
1167
+ Examples:
1168
+ >>> random_perspective = RandomPerspective()
1169
+ >>> keypoints = np.random.rand(5, 17, 3) # 5 instances, 17 keypoints each
1170
+ >>> M = np.eye(3) # Identity transformation
1171
+ >>> transformed_keypoints = random_perspective.apply_keypoints(keypoints, M)
543
1172
  """
544
1173
  n, nkpt = keypoints.shape[:2]
545
1174
  if n == 0:
@@ -555,10 +1184,38 @@ class RandomPerspective:
555
1184
 
556
1185
  def __call__(self, labels):
557
1186
  """
558
- Affine images and targets.
1187
+ Applies random perspective and affine transformations to an image and its associated labels.
1188
+
1189
+ This method performs a series of transformations including rotation, translation, scaling, shearing,
1190
+ and perspective distortion on the input image and adjusts the corresponding bounding boxes, segments,
1191
+ and keypoints accordingly.
559
1192
 
560
1193
  Args:
561
- labels (dict): a dict of `bboxes`, `segments`, `keypoints`.
1194
+ labels (Dict): A dictionary containing image data and annotations.
1195
+ Must include:
1196
+ 'img' (ndarray): The input image.
1197
+ 'cls' (ndarray): Class labels.
1198
+ 'instances' (Instances): Object instances with bounding boxes, segments, and keypoints.
1199
+ May include:
1200
+ 'mosaic_border' (Tuple[int, int]): Border size for mosaic augmentation.
1201
+
1202
+ Returns:
1203
+ (Dict): Transformed labels dictionary containing:
1204
+ - 'img' (np.ndarray): The transformed image.
1205
+ - 'cls' (np.ndarray): Updated class labels.
1206
+ - 'instances' (Instances): Updated object instances.
1207
+ - 'resized_shape' (Tuple[int, int]): New image shape after transformation.
1208
+
1209
+ Examples:
1210
+ >>> transform = RandomPerspective()
1211
+ >>> image = np.random.randint(0, 255, (640, 640, 3), dtype=np.uint8)
1212
+ >>> labels = {
1213
+ ... 'img': image,
1214
+ ... 'cls': np.array([0, 1, 2]),
1215
+ ... 'instances': Instances(bboxes=np.array([[10, 10, 50, 50], [100, 100, 150, 150]]))
1216
+ ... }
1217
+ >>> result = transform(labels)
1218
+ >>> assert result['img'].shape[:2] == result['resized_shape']
562
1219
  """
563
1220
  if self.pre_transform and "mosaic_border" not in labels:
564
1221
  labels = self.pre_transform(labels)
@@ -605,19 +1262,36 @@ class RandomPerspective:
605
1262
 
606
1263
  def box_candidates(self, box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16):
607
1264
  """
608
- Compute box candidates based on a set of thresholds. This method compares the characteristics of the boxes
609
- before and after augmentation to decide whether a box is a candidate for further processing.
1265
+ Compute candidate boxes for further processing based on size and aspect ratio criteria.
1266
+
1267
+ This method compares boxes before and after augmentation to determine if they meet specified
1268
+ thresholds for width, height, aspect ratio, and area. It's used to filter out boxes that have
1269
+ been overly distorted or reduced by the augmentation process.
610
1270
 
611
1271
  Args:
612
- box1 (numpy.ndarray): The 4,n bounding box before augmentation, represented as [x1, y1, x2, y2].
613
- box2 (numpy.ndarray): The 4,n bounding box after augmentation, represented as [x1, y1, x2, y2].
614
- wh_thr (float, optional): The width and height threshold in pixels. Default is 2.
615
- ar_thr (float, optional): The aspect ratio threshold. Default is 100.
616
- area_thr (float, optional): The area ratio threshold. Default is 0.1.
617
- eps (float, optional): A small epsilon value to prevent division by zero. Default is 1e-16.
1272
+ box1 (numpy.ndarray): Original boxes before augmentation, shape (4, N) where n is the
1273
+ number of boxes. Format is [x1, y1, x2, y2] in absolute coordinates.
1274
+ box2 (numpy.ndarray): Augmented boxes after transformation, shape (4, N). Format is
1275
+ [x1, y1, x2, y2] in absolute coordinates.
1276
+ wh_thr (float): Width and height threshold in pixels. Boxes smaller than this in either
1277
+ dimension are rejected.
1278
+ ar_thr (float): Aspect ratio threshold. Boxes with an aspect ratio greater than this
1279
+ value are rejected.
1280
+ area_thr (float): Area ratio threshold. Boxes with an area ratio (new/old) less than
1281
+ this value are rejected.
1282
+ eps (float): Small epsilon value to prevent division by zero.
618
1283
 
619
1284
  Returns:
620
- (numpy.ndarray): A boolean array indicating which boxes are candidates based on the given thresholds.
1285
+ (numpy.ndarray): Boolean array of shape (n,) indicating which boxes are candidates.
1286
+ True values correspond to boxes that meet all criteria.
1287
+
1288
+ Examples:
1289
+ >>> random_perspective = RandomPerspective()
1290
+ >>> box1 = np.array([[0, 0, 100, 100], [0, 0, 50, 50]]).T
1291
+ >>> box2 = np.array([[10, 10, 90, 90], [5, 5, 45, 45]]).T
1292
+ >>> candidates = random_perspective.box_candidates(box1, box2)
1293
+ >>> print(candidates)
1294
+ [True True]
621
1295
  """
622
1296
  w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
623
1297
  w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
@@ -627,20 +1301,42 @@ class RandomPerspective:
627
1301
 
628
1302
  class RandomHSV:
629
1303
  """
630
- This class is responsible for performing random adjustments to the Hue, Saturation, and Value (HSV) channels of an
631
- image.
1304
+ Randomly adjusts the Hue, Saturation, and Value (HSV) channels of an image.
1305
+
1306
+ This class applies random HSV augmentation to images within predefined limits set by hgain, sgain, and vgain.
1307
+
1308
+ Attributes:
1309
+ hgain (float): Maximum variation for hue. Range is typically [0, 1].
1310
+ sgain (float): Maximum variation for saturation. Range is typically [0, 1].
1311
+ vgain (float): Maximum variation for value. Range is typically [0, 1].
632
1312
 
633
- The adjustments are random but within limits set by hgain, sgain, and vgain.
1313
+ Methods:
1314
+ __call__: Applies random HSV augmentation to an image.
1315
+
1316
+ Examples:
1317
+ >>> import numpy as np
1318
+ >>> from ultralytics.data.augment import RandomHSV
1319
+ >>> augmenter = RandomHSV(hgain=0.5, sgain=0.5, vgain=0.5)
1320
+ >>> image = np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8)
1321
+ >>> labels = {"img": image}
1322
+ >>> augmented_labels = augmenter(labels)
1323
+ >>> augmented_image = augmented_labels["img"]
634
1324
  """
635
1325
 
636
1326
  def __init__(self, hgain=0.5, sgain=0.5, vgain=0.5) -> None:
637
1327
  """
638
- Initialize RandomHSV class with gains for each HSV channel.
1328
+ Initializes the RandomHSV object for random HSV (Hue, Saturation, Value) augmentation.
1329
+
1330
+ This class applies random adjustments to the HSV channels of an image within specified limits.
639
1331
 
640
1332
  Args:
641
- hgain (float, optional): Maximum variation for hue. Default is 0.5.
642
- sgain (float, optional): Maximum variation for saturation. Default is 0.5.
643
- vgain (float, optional): Maximum variation for value. Default is 0.5.
1333
+ hgain (float): Maximum variation for hue. Should be in the range [0, 1].
1334
+ sgain (float): Maximum variation for saturation. Should be in the range [0, 1].
1335
+ vgain (float): Maximum variation for value. Should be in the range [0, 1].
1336
+
1337
+ Examples:
1338
+ >>> hsv_aug = RandomHSV(hgain=0.5, sgain=0.5, vgain=0.5)
1339
+ >>> augmented_image = hsv_aug(image)
644
1340
  """
645
1341
  self.hgain = hgain
646
1342
  self.sgain = sgain
@@ -648,9 +1344,24 @@ class RandomHSV:
648
1344
 
649
1345
  def __call__(self, labels):
650
1346
  """
651
- Applies random HSV augmentation to an image within the predefined limits.
1347
+ Applies random HSV augmentation to an image within predefined limits.
652
1348
 
653
- The modified image replaces the original image in the input 'labels' dict.
1349
+ This method modifies the input image by randomly adjusting its Hue, Saturation, and Value (HSV) channels.
1350
+ The adjustments are made within the limits set by hgain, sgain, and vgain during initialization.
1351
+
1352
+ Args:
1353
+ labels (Dict): A dictionary containing image data and metadata. Must include an 'img' key with
1354
+ the image as a numpy array.
1355
+
1356
+ Returns:
1357
+ (None): The function modifies the input 'labels' dictionary in-place, updating the 'img' key
1358
+ with the HSV-augmented image.
1359
+
1360
+ Examples:
1361
+ >>> hsv_augmenter = RandomHSV(hgain=0.5, sgain=0.5, vgain=0.5)
1362
+ >>> labels = {'img': np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8)}
1363
+ >>> hsv_augmenter(labels)
1364
+ >>> augmented_img = labels['img']
654
1365
  """
655
1366
  img = labels["img"]
656
1367
  if self.hgain or self.sgain or self.vgain:
@@ -672,18 +1383,42 @@ class RandomFlip:
672
1383
  """
673
1384
  Applies a random horizontal or vertical flip to an image with a given probability.
674
1385
 
675
- Also updates any instances (bounding boxes, keypoints, etc.) accordingly.
1386
+ This class performs random image flipping and updates corresponding instance annotations such as
1387
+ bounding boxes and keypoints.
1388
+
1389
+ Attributes:
1390
+ p (float): Probability of applying the flip. Must be between 0 and 1.
1391
+ direction (str): Direction of flip, either 'horizontal' or 'vertical'.
1392
+ flip_idx (array-like): Index mapping for flipping keypoints, if applicable.
1393
+
1394
+ Methods:
1395
+ __call__: Applies the random flip transformation to an image and its annotations.
1396
+
1397
+ Examples:
1398
+ >>> transform = RandomFlip(p=0.5, direction='horizontal')
1399
+ >>> result = transform({"img": image, "instances": instances})
1400
+ >>> flipped_image = result["img"]
1401
+ >>> flipped_instances = result["instances"]
676
1402
  """
677
1403
 
678
1404
  def __init__(self, p=0.5, direction="horizontal", flip_idx=None) -> None:
679
1405
  """
680
1406
  Initializes the RandomFlip class with probability and direction.
681
1407
 
1408
+ This class applies a random horizontal or vertical flip to an image with a given probability.
1409
+ It also updates any instances (bounding boxes, keypoints, etc.) accordingly.
1410
+
682
1411
  Args:
683
- p (float, optional): The probability of applying the flip. Must be between 0 and 1. Default is 0.5.
684
- direction (str, optional): The direction to apply the flip. Must be 'horizontal' or 'vertical'.
685
- Default is 'horizontal'.
686
- flip_idx (array-like, optional): Index mapping for flipping keypoints, if any.
1412
+ p (float): The probability of applying the flip. Must be between 0 and 1.
1413
+ direction (str): The direction to apply the flip. Must be 'horizontal' or 'vertical'.
1414
+ flip_idx (List[int] | None): Index mapping for flipping keypoints, if any.
1415
+
1416
+ Raises:
1417
+ AssertionError: If direction is not 'horizontal' or 'vertical', or if p is not between 0 and 1.
1418
+
1419
+ Examples:
1420
+ >>> flip = RandomFlip(p=0.5, direction='horizontal')
1421
+ >>> flip = RandomFlip(p=0.7, direction='vertical', flip_idx=[1, 0, 3, 2, 5, 4])
687
1422
  """
688
1423
  assert direction in {"horizontal", "vertical"}, f"Support direction `horizontal` or `vertical`, got {direction}"
689
1424
  assert 0 <= p <= 1.0, f"The probability should be in range [0, 1], but got {p}."
@@ -696,12 +1431,25 @@ class RandomFlip:
696
1431
  """
697
1432
  Applies random flip to an image and updates any instances like bounding boxes or keypoints accordingly.
698
1433
 
1434
+ This method randomly flips the input image either horizontally or vertically based on the initialized
1435
+ probability and direction. It also updates the corresponding instances (bounding boxes, keypoints) to
1436
+ match the flipped image.
1437
+
699
1438
  Args:
700
- labels (dict): A dictionary containing the keys 'img' and 'instances'. 'img' is the image to be flipped.
701
- 'instances' is an object containing bounding boxes and optionally keypoints.
1439
+ labels (Dict): A dictionary containing the following keys:
1440
+ 'img' (numpy.ndarray): The image to be flipped.
1441
+ 'instances' (ultralytics.utils.instance.Instances): An object containing bounding boxes and
1442
+ optionally keypoints.
702
1443
 
703
1444
  Returns:
704
- (dict): The same dict with the flipped image and updated instances under the 'img' and 'instances' keys.
1445
+ (Dict): The same dictionary with the flipped image and updated instances:
1446
+ 'img' (numpy.ndarray): The flipped image.
1447
+ 'instances' (ultralytics.utils.instance.Instances): Updated instances matching the flipped image.
1448
+
1449
+ Examples:
1450
+ >>> labels = {'img': np.random.rand(640, 640, 3), 'instances': Instances(...)}
1451
+ >>> random_flip = RandomFlip(p=0.5, direction='horizontal')
1452
+ >>> flipped_labels = random_flip(labels)
705
1453
  """
706
1454
  img = labels["img"]
707
1455
  instances = labels.pop("instances")
@@ -726,10 +1474,56 @@ class RandomFlip:
726
1474
 
727
1475
 
728
1476
  class LetterBox:
729
- """Resize image and padding for detection, instance segmentation, pose."""
1477
+ """
1478
+ Resize image and padding for detection, instance segmentation, pose.
1479
+
1480
+ This class resizes and pads images to a specified shape while preserving aspect ratio. It also updates
1481
+ corresponding labels and bounding boxes.
1482
+
1483
+ Attributes:
1484
+ new_shape (tuple): Target shape (height, width) for resizing.
1485
+ auto (bool): Whether to use minimum rectangle.
1486
+ scaleFill (bool): Whether to stretch the image to new_shape.
1487
+ scaleup (bool): Whether to allow scaling up. If False, only scale down.
1488
+ stride (int): Stride for rounding padding.
1489
+ center (bool): Whether to center the image or align to top-left.
1490
+
1491
+ Methods:
1492
+ __call__: Resize and pad image, update labels and bounding boxes.
1493
+
1494
+ Examples:
1495
+ >>> transform = LetterBox(new_shape=(640, 640))
1496
+ >>> result = transform(labels)
1497
+ >>> resized_img = result['img']
1498
+ >>> updated_instances = result['instances']
1499
+ """
730
1500
 
731
1501
  def __init__(self, new_shape=(640, 640), auto=False, scaleFill=False, scaleup=True, center=True, stride=32):
732
- """Initialize LetterBox object with specific parameters."""
1502
+ """
1503
+ Initialize LetterBox object for resizing and padding images.
1504
+
1505
+ This class is designed to resize and pad images for object detection, instance segmentation, and pose estimation
1506
+ tasks. It supports various resizing modes including auto-sizing, scale-fill, and letterboxing.
1507
+
1508
+ Args:
1509
+ new_shape (Tuple[int, int]): Target size (height, width) for the resized image.
1510
+ auto (bool): If True, use minimum rectangle to resize. If False, use new_shape directly.
1511
+ scaleFill (bool): If True, stretch the image to new_shape without padding.
1512
+ scaleup (bool): If True, allow scaling up. If False, only scale down.
1513
+ center (bool): If True, center the placed image. If False, place image in top-left corner.
1514
+ stride (int): Stride of the model (e.g., 32 for YOLOv5).
1515
+
1516
+ Attributes:
1517
+ new_shape (Tuple[int, int]): Target size for the resized image.
1518
+ auto (bool): Flag for using minimum rectangle resizing.
1519
+ scaleFill (bool): Flag for stretching image without padding.
1520
+ scaleup (bool): Flag for allowing upscaling.
1521
+ stride (int): Stride value for ensuring image size is divisible by stride.
1522
+
1523
+ Examples:
1524
+ >>> letterbox = LetterBox(new_shape=(640, 640), auto=False, scaleFill=False, scaleup=True, stride=32)
1525
+ >>> resized_img = letterbox(original_img)
1526
+ """
733
1527
  self.new_shape = new_shape
734
1528
  self.auto = auto
735
1529
  self.scaleFill = scaleFill
@@ -738,7 +1532,27 @@ class LetterBox:
738
1532
  self.center = center # Put the image in the middle or top-left
739
1533
 
740
1534
  def __call__(self, labels=None, image=None):
741
- """Return updated labels and image with added border."""
1535
+ """
1536
+ Resizes and pads an image for object detection, instance segmentation, or pose estimation tasks.
1537
+
1538
+ This method applies letterboxing to the input image, which involves resizing the image while maintaining its
1539
+ aspect ratio and adding padding to fit the new shape. It also updates any associated labels accordingly.
1540
+
1541
+ Args:
1542
+ labels (Dict | None): A dictionary containing image data and associated labels, or empty dict if None.
1543
+ image (np.ndarray | None): The input image as a numpy array. If None, the image is taken from 'labels'.
1544
+
1545
+ Returns:
1546
+ (Dict | Tuple): If 'labels' is provided, returns an updated dictionary with the resized and padded image,
1547
+ updated labels, and additional metadata. If 'labels' is empty, returns a tuple containing the resized
1548
+ and padded image, and a tuple of (ratio, (left_pad, top_pad)).
1549
+
1550
+ Examples:
1551
+ >>> letterbox = LetterBox(new_shape=(640, 640))
1552
+ >>> result = letterbox(labels={'img': np.zeros((480, 640, 3)), 'instances': Instances(...)})
1553
+ >>> resized_img = result['img']
1554
+ >>> updated_instances = result['instances']
1555
+ """
742
1556
  if labels is None:
743
1557
  labels = {}
744
1558
  img = labels.get("img") if image is None else image
@@ -786,7 +1600,28 @@ class LetterBox:
786
1600
  return img
787
1601
 
788
1602
  def _update_labels(self, labels, ratio, padw, padh):
789
- """Update labels."""
1603
+ """
1604
+ Updates labels after applying letterboxing to an image.
1605
+
1606
+ This method modifies the bounding box coordinates of instances in the labels
1607
+ to account for resizing and padding applied during letterboxing.
1608
+
1609
+ Args:
1610
+ labels (Dict): A dictionary containing image labels and instances.
1611
+ ratio (Tuple[float, float]): Scaling ratios (width, height) applied to the image.
1612
+ padw (float): Padding width added to the image.
1613
+ padh (float): Padding height added to the image.
1614
+
1615
+ Returns:
1616
+ (Dict): Updated labels dictionary with modified instance coordinates.
1617
+
1618
+ Examples:
1619
+ >>> letterbox = LetterBox(new_shape=(640, 640))
1620
+ >>> labels = {'instances': Instances(...)}
1621
+ >>> ratio = (0.5, 0.5)
1622
+ >>> padw, padh = 10, 20
1623
+ >>> updated_labels = letterbox._update_labels(labels, ratio, padw, padh)
1624
+ """
790
1625
  labels["instances"].convert_bbox(format="xyxy")
791
1626
  labels["instances"].denormalize(*labels["img"].shape[:2][::-1])
792
1627
  labels["instances"].scale(*ratio)
@@ -796,36 +1631,59 @@ class LetterBox:
796
1631
 
797
1632
  class CopyPaste:
798
1633
  """
799
- Implements the Copy-Paste augmentation as described in the paper https://arxiv.org/abs/2012.07177. This class is
800
- responsible for applying the Copy-Paste augmentation on images and their corresponding instances.
1634
+ Implements Copy-Paste augmentation as described in https://arxiv.org/abs/2012.07177.
1635
+
1636
+ This class applies Copy-Paste augmentation on images and their corresponding instances.
1637
+
1638
+ Attributes:
1639
+ p (float): Probability of applying the Copy-Paste augmentation. Must be between 0 and 1.
1640
+
1641
+ Methods:
1642
+ __call__: Applies Copy-Paste augmentation to given image and instances.
1643
+
1644
+ Examples:
1645
+ >>> copypaste = CopyPaste(p=0.5)
1646
+ >>> augmented_labels = copypaste(labels)
1647
+ >>> augmented_image = augmented_labels['img']
801
1648
  """
802
1649
 
803
1650
  def __init__(self, p=0.5) -> None:
804
1651
  """
805
- Initializes the CopyPaste class with a given probability.
1652
+ Initializes the CopyPaste augmentation object.
1653
+
1654
+ This class implements the Copy-Paste augmentation as described in the paper "Simple Copy-Paste is a Strong Data
1655
+ Augmentation Method for Instance Segmentation" (https://arxiv.org/abs/2012.07177). It applies the Copy-Paste
1656
+ augmentation on images and their corresponding instances with a given probability.
806
1657
 
807
1658
  Args:
808
- p (float, optional): The probability of applying the Copy-Paste augmentation. Must be between 0 and 1.
809
- Default is 0.5.
1659
+ p (float): The probability of applying the Copy-Paste augmentation. Must be between 0 and 1.
1660
+
1661
+ Attributes:
1662
+ p (float): Stores the probability of applying the augmentation.
1663
+
1664
+ Examples:
1665
+ >>> augment = CopyPaste(p=0.7)
1666
+ >>> augmented_data = augment(original_data)
810
1667
  """
811
1668
  self.p = p
812
1669
 
813
1670
  def __call__(self, labels):
814
1671
  """
815
- Applies the Copy-Paste augmentation to the given image and instances.
1672
+ Applies Copy-Paste augmentation to an image and its instances.
816
1673
 
817
1674
  Args:
818
- labels (dict): A dictionary containing:
819
- - 'img': The image to augment.
820
- - 'cls': Class labels associated with the instances.
821
- - 'instances': Object containing bounding boxes, and optionally, keypoints and segments.
1675
+ labels (Dict): A dictionary containing:
1676
+ - 'img' (np.ndarray): The image to augment.
1677
+ - 'cls' (np.ndarray): Class labels for the instances.
1678
+ - 'instances' (ultralytics.engine.results.Instances): Object containing bounding boxes, segments, etc.
822
1679
 
823
1680
  Returns:
824
- (dict): Dict with augmented image and updated instances under the 'img', 'cls', and 'instances' keys.
1681
+ (Dict): Dictionary with augmented image and updated instances under 'img', 'cls', and 'instances' keys.
825
1682
 
826
- Notes:
827
- 1. Instances are expected to have 'segments' as one of their attributes for this augmentation to work.
828
- 2. This method modifies the input dictionary 'labels' in place.
1683
+ Examples:
1684
+ >>> labels = {'img': np.random.rand(640, 640, 3), 'cls': np.array([0, 1, 2]), 'instances': Instances(...)}
1685
+ >>> augmenter = CopyPaste(p=0.5)
1686
+ >>> augmented_labels = augmenter(labels)
829
1687
  """
830
1688
  im = labels["img"]
831
1689
  cls = labels["cls"]
@@ -862,15 +1720,61 @@ class CopyPaste:
862
1720
 
863
1721
  class Albumentations:
864
1722
  """
865
- Albumentations transformations.
1723
+ Albumentations transformations for image augmentation.
1724
+
1725
+ This class applies various image transformations using the Albumentations library. It includes operations such as
1726
+ Blur, Median Blur, conversion to grayscale, Contrast Limited Adaptive Histogram Equalization (CLAHE), random changes
1727
+ in brightness and contrast, RandomGamma, and image quality reduction through compression.
1728
+
1729
+ Attributes:
1730
+ p (float): Probability of applying the transformations.
1731
+ transform (albumentations.Compose): Composed Albumentations transforms.
1732
+ contains_spatial (bool): Indicates if the transforms include spatial operations.
1733
+
1734
+ Methods:
1735
+ __call__: Applies the Albumentations transformations to the input labels.
866
1736
 
867
- Optional, uninstall package to disable. Applies Blur, Median Blur, convert to grayscale, Contrast Limited Adaptive
868
- Histogram Equalization, random change of brightness and contrast, RandomGamma and lowering of image quality by
869
- compression.
1737
+ Examples:
1738
+ >>> transform = Albumentations(p=0.5)
1739
+ >>> augmented_labels = transform(labels)
1740
+
1741
+ Notes:
1742
+ - The Albumentations package must be installed to use this class.
1743
+ - If the package is not installed or an error occurs during initialization, the transform will be set to None.
1744
+ - Spatial transforms are handled differently and require special processing for bounding boxes.
870
1745
  """
871
1746
 
872
1747
  def __init__(self, p=1.0):
873
- """Initialize the transform object for YOLO bbox formatted params."""
1748
+ """
1749
+ Initialize the Albumentations transform object for YOLO bbox formatted parameters.
1750
+
1751
+ This class applies various image augmentations using the Albumentations library, including Blur, Median Blur,
1752
+ conversion to grayscale, Contrast Limited Adaptive Histogram Equalization, random changes of brightness and
1753
+ contrast, RandomGamma, and image quality reduction through compression.
1754
+
1755
+ Args:
1756
+ p (float): Probability of applying the augmentations. Must be between 0 and 1.
1757
+
1758
+ Attributes:
1759
+ p (float): Probability of applying the augmentations.
1760
+ transform (albumentations.Compose): Composed Albumentations transforms.
1761
+ contains_spatial (bool): Indicates if the transforms include spatial transformations.
1762
+
1763
+ Raises:
1764
+ ImportError: If the Albumentations package is not installed.
1765
+ Exception: For any other errors during initialization.
1766
+
1767
+ Examples:
1768
+ >>> transform = Albumentations(p=0.5)
1769
+ >>> augmented = transform(image=image, bboxes=bboxes, class_labels=classes)
1770
+ >>> augmented_image = augmented['image']
1771
+ >>> augmented_bboxes = augmented['bboxes']
1772
+
1773
+ Notes:
1774
+ - Requires Albumentations version 1.0.3 or higher.
1775
+ - Spatial transforms are handled differently to ensure bbox compatibility.
1776
+ - Some transforms are applied with very low probability (0.01) by default.
1777
+ """
874
1778
  self.p = p
875
1779
  self.transform = None
876
1780
  prefix = colorstr("albumentations: ")
@@ -949,7 +1853,36 @@ class Albumentations:
949
1853
  LOGGER.info(f"{prefix}{e}")
950
1854
 
951
1855
  def __call__(self, labels):
952
- """Generates object detections and returns a dictionary with detection results."""
1856
+ """
1857
+ Applies Albumentations transformations to input labels.
1858
+
1859
+ This method applies a series of image augmentations using the Albumentations library. It can perform both
1860
+ spatial and non-spatial transformations on the input image and its corresponding labels.
1861
+
1862
+ Args:
1863
+ labels (Dict): A dictionary containing image data and annotations. Expected keys are:
1864
+ - 'img': numpy.ndarray representing the image
1865
+ - 'cls': numpy.ndarray of class labels
1866
+ - 'instances': object containing bounding boxes and other instance information
1867
+
1868
+ Returns:
1869
+ (Dict): The input dictionary with augmented image and updated annotations.
1870
+
1871
+ Examples:
1872
+ >>> transform = Albumentations(p=0.5)
1873
+ >>> labels = {
1874
+ ... "img": np.random.rand(640, 640, 3),
1875
+ ... "cls": np.array([0, 1]),
1876
+ ... "instances": Instances(bboxes=np.array([[0, 0, 1, 1], [0.5, 0.5, 0.8, 0.8]]))
1877
+ ... }
1878
+ >>> augmented = transform(labels)
1879
+ >>> assert augmented["img"].shape == (640, 640, 3)
1880
+
1881
+ Notes:
1882
+ - The method applies transformations with probability self.p.
1883
+ - Spatial transforms update bounding boxes, while non-spatial transforms only modify the image.
1884
+ - Requires the Albumentations library to be installed.
1885
+ """
953
1886
  if self.transform is None or random.random() > self.p:
954
1887
  return labels
955
1888
 
@@ -975,18 +1908,32 @@ class Albumentations:
975
1908
 
976
1909
  class Format:
977
1910
  """
978
- Formats image annotations for object detection, instance segmentation, and pose estimation tasks. The class
979
- standardizes the image and instance annotations to be used by the `collate_fn` in PyTorch DataLoader.
1911
+ A class for formatting image annotations for object detection, instance segmentation, and pose estimation tasks.
1912
+
1913
+ This class standardizes image and instance annotations to be used by the `collate_fn` in PyTorch DataLoader.
980
1914
 
981
1915
  Attributes:
982
- bbox_format (str): Format for bounding boxes. Default is 'xywh'.
983
- normalize (bool): Whether to normalize bounding boxes. Default is True.
984
- return_mask (bool): Return instance masks for segmentation. Default is False.
985
- return_keypoint (bool): Return keypoints for pose estimation. Default is False.
986
- mask_ratio (int): Downsample ratio for masks. Default is 4.
987
- mask_overlap (bool): Whether to overlap masks. Default is True.
988
- batch_idx (bool): Keep batch indexes. Default is True.
989
- bgr (float): The probability to return BGR images. Default is 0.0.
1916
+ bbox_format (str): Format for bounding boxes. Options are 'xywh' or 'xyxy'.
1917
+ normalize (bool): Whether to normalize bounding boxes.
1918
+ return_mask (bool): Whether to return instance masks for segmentation.
1919
+ return_keypoint (bool): Whether to return keypoints for pose estimation.
1920
+ return_obb (bool): Whether to return oriented bounding boxes.
1921
+ mask_ratio (int): Downsample ratio for masks.
1922
+ mask_overlap (bool): Whether to overlap masks.
1923
+ batch_idx (bool): Whether to keep batch indexes.
1924
+ bgr (float): The probability to return BGR images.
1925
+
1926
+ Methods:
1927
+ __call__: Formats labels dictionary with image, classes, bounding boxes, and optionally masks and keypoints.
1928
+ _format_img: Converts image from Numpy array to PyTorch tensor.
1929
+ _format_segments: Converts polygon points to bitmap masks.
1930
+
1931
+ Examples:
1932
+ >>> formatter = Format(bbox_format='xywh', normalize=True, return_mask=True)
1933
+ >>> formatted_labels = formatter(labels)
1934
+ >>> img = formatted_labels['img']
1935
+ >>> bboxes = formatted_labels['bboxes']
1936
+ >>> masks = formatted_labels['masks']
990
1937
  """
991
1938
 
992
1939
  def __init__(
@@ -1001,7 +1948,39 @@ class Format:
1001
1948
  batch_idx=True,
1002
1949
  bgr=0.0,
1003
1950
  ):
1004
- """Initializes the Format class with given parameters."""
1951
+ """
1952
+ Initializes the Format class with given parameters for image and instance annotation formatting.
1953
+
1954
+ This class standardizes image and instance annotations for object detection, instance segmentation, and pose
1955
+ estimation tasks, preparing them for use in PyTorch DataLoader's `collate_fn`.
1956
+
1957
+ Args:
1958
+ bbox_format (str): Format for bounding boxes. Options are 'xywh', 'xyxy', etc.
1959
+ normalize (bool): Whether to normalize bounding boxes to [0,1].
1960
+ return_mask (bool): If True, returns instance masks for segmentation tasks.
1961
+ return_keypoint (bool): If True, returns keypoints for pose estimation tasks.
1962
+ return_obb (bool): If True, returns oriented bounding boxes.
1963
+ mask_ratio (int): Downsample ratio for masks.
1964
+ mask_overlap (bool): If True, allows mask overlap.
1965
+ batch_idx (bool): If True, keeps batch indexes.
1966
+ bgr (float): Probability of returning BGR images instead of RGB.
1967
+
1968
+ Attributes:
1969
+ bbox_format (str): Format for bounding boxes.
1970
+ normalize (bool): Whether bounding boxes are normalized.
1971
+ return_mask (bool): Whether to return instance masks.
1972
+ return_keypoint (bool): Whether to return keypoints.
1973
+ return_obb (bool): Whether to return oriented bounding boxes.
1974
+ mask_ratio (int): Downsample ratio for masks.
1975
+ mask_overlap (bool): Whether masks can overlap.
1976
+ batch_idx (bool): Whether to keep batch indexes.
1977
+ bgr (float): The probability to return BGR images.
1978
+
1979
+ Examples:
1980
+ >>> format = Format(bbox_format='xyxy', return_mask=True, return_keypoint=False)
1981
+ >>> print(format.bbox_format)
1982
+ xyxy
1983
+ """
1005
1984
  self.bbox_format = bbox_format
1006
1985
  self.normalize = normalize
1007
1986
  self.return_mask = return_mask # set False when training detection only
@@ -1013,7 +1992,34 @@ class Format:
1013
1992
  self.bgr = bgr
1014
1993
 
1015
1994
  def __call__(self, labels):
1016
- """Return formatted image, classes, bounding boxes & keypoints to be used by 'collate_fn'."""
1995
+ """
1996
+ Formats image annotations for object detection, instance segmentation, and pose estimation tasks.
1997
+
1998
+ This method standardizes the image and instance annotations to be used by the `collate_fn` in PyTorch
1999
+ DataLoader. It processes the input labels dictionary, converting annotations to the specified format and
2000
+ applying normalization if required.
2001
+
2002
+ Args:
2003
+ labels (Dict): A dictionary containing image and annotation data with the following keys:
2004
+ - 'img': The input image as a numpy array.
2005
+ - 'cls': Class labels for instances.
2006
+ - 'instances': An Instances object containing bounding boxes, segments, and keypoints.
2007
+
2008
+ Returns:
2009
+ (Dict): A dictionary with formatted data, including:
2010
+ - 'img': Formatted image tensor.
2011
+ - 'cls': Class labels tensor.
2012
+ - 'bboxes': Bounding boxes tensor in the specified format.
2013
+ - 'masks': Instance masks tensor (if return_mask is True).
2014
+ - 'keypoints': Keypoints tensor (if return_keypoint is True).
2015
+ - 'batch_idx': Batch index tensor (if batch_idx is True).
2016
+
2017
+ Examples:
2018
+ >>> formatter = Format(bbox_format='xywh', normalize=True, return_mask=True)
2019
+ >>> labels = {'img': np.random.rand(640, 640, 3), 'cls': np.array([0, 1]), 'instances': Instances(...)}
2020
+ >>> formatted_labels = formatter(labels)
2021
+ >>> print(formatted_labels.keys())
2022
+ """
1017
2023
  img = labels.pop("img")
1018
2024
  h, w = img.shape[:2]
1019
2025
  cls = labels.pop("cls")
@@ -1053,7 +2059,29 @@ class Format:
1053
2059
  return labels
1054
2060
 
1055
2061
  def _format_img(self, img):
1056
- """Format the image for YOLO from Numpy array to PyTorch tensor."""
2062
+ """
2063
+ Formats an image for YOLO from a Numpy array to a PyTorch tensor.
2064
+
2065
+ This function performs the following operations:
2066
+ 1. Ensures the image has 3 dimensions (adds a channel dimension if needed).
2067
+ 2. Transposes the image from HWC to CHW format.
2068
+ 3. Optionally flips the color channels from RGB to BGR.
2069
+ 4. Converts the image to a contiguous array.
2070
+ 5. Converts the Numpy array to a PyTorch tensor.
2071
+
2072
+ Args:
2073
+ img (np.ndarray): Input image as a Numpy array with shape (H, W, C) or (H, W).
2074
+
2075
+ Returns:
2076
+ (torch.Tensor): Formatted image as a PyTorch tensor with shape (C, H, W).
2077
+
2078
+ Examples:
2079
+ >>> import numpy as np
2080
+ >>> img = np.random.rand(100, 100, 3)
2081
+ >>> formatted_img = self._format_img(img)
2082
+ >>> print(formatted_img.shape)
2083
+ torch.Size([3, 100, 100])
2084
+ """
1057
2085
  if len(img.shape) < 3:
1058
2086
  img = np.expand_dims(img, -1)
1059
2087
  img = img.transpose(2, 0, 1)
@@ -1062,7 +2090,26 @@ class Format:
1062
2090
  return img
1063
2091
 
1064
2092
  def _format_segments(self, instances, cls, w, h):
1065
- """Convert polygon points to bitmap."""
2093
+ """
2094
+ Converts polygon segments to bitmap masks.
2095
+
2096
+ Args:
2097
+ instances (Instances): Object containing segment information.
2098
+ cls (numpy.ndarray): Class labels for each instance.
2099
+ w (int): Width of the image.
2100
+ h (int): Height of the image.
2101
+
2102
+ Returns:
2103
+ (tuple): Tuple containing:
2104
+ masks (numpy.ndarray): Bitmap masks with shape (N, H, W) or (1, H, W) if mask_overlap is True.
2105
+ instances (Instances): Updated instances object with sorted segments if mask_overlap is True.
2106
+ cls (numpy.ndarray): Updated class labels, sorted if mask_overlap is True.
2107
+
2108
+ Notes:
2109
+ - If self.mask_overlap is True, masks are overlapped and sorted by area.
2110
+ - If self.mask_overlap is False, each mask is represented separately.
2111
+ - Masks are downsampled according to self.mask_ratio.
2112
+ """
1066
2113
  segments = instances.segments
1067
2114
  if self.mask_overlap:
1068
2115
  masks, sorted_idx = polygons2masks_overlap((h, w), segments, downsample_ratio=self.mask_ratio)
@@ -1077,14 +2124,28 @@ class Format:
1077
2124
 
1078
2125
  class RandomLoadText:
1079
2126
  """
1080
- Randomly sample positive texts and negative texts and update the class indices accordingly to the number of samples.
2127
+ Randomly samples positive and negative texts and updates class indices accordingly.
2128
+
2129
+ This class is responsible for sampling texts from a given set of class texts, including both positive
2130
+ (present in the image) and negative (not present in the image) samples. It updates the class indices
2131
+ to reflect the sampled texts and can optionally pad the text list to a fixed length.
1081
2132
 
1082
2133
  Attributes:
1083
- prompt_format (str): Format for prompt. Default is '{}'.
1084
- neg_samples (tuple[int]): A ranger to randomly sample negative texts, Default is (80, 80).
1085
- max_samples (int): The max number of different text samples in one image, Default is 80.
1086
- padding (bool): Whether to pad texts to max_samples. Default is False.
1087
- padding_value (str): The padding text. Default is "".
2134
+ prompt_format (str): Format string for text prompts.
2135
+ neg_samples (Tuple[int, int]): Range for randomly sampling negative texts.
2136
+ max_samples (int): Maximum number of different text samples in one image.
2137
+ padding (bool): Whether to pad texts to max_samples.
2138
+ padding_value (str): The text used for padding when padding is True.
2139
+
2140
+ Methods:
2141
+ __call__: Processes the input labels and returns updated classes and texts.
2142
+
2143
+ Examples:
2144
+ >>> loader = RandomLoadText(prompt_format="Object: {}", neg_samples=(5, 10), max_samples=20)
2145
+ >>> labels = {"cls": [0, 1, 2], "texts": [["cat"], ["dog"], ["bird"]], "instances": [...]}
2146
+ >>> updated_labels = loader(labels)
2147
+ >>> print(updated_labels["texts"])
2148
+ ['Object: cat', 'Object: dog', 'Object: bird', 'Object: elephant', 'Object: car']
1088
2149
  """
1089
2150
 
1090
2151
  def __init__(
@@ -1095,7 +2156,39 @@ class RandomLoadText:
1095
2156
  padding: bool = False,
1096
2157
  padding_value: str = "",
1097
2158
  ) -> None:
1098
- """Initializes the RandomLoadText class with given parameters."""
2159
+ """
2160
+ Initializes the RandomLoadText class for randomly sampling positive and negative texts.
2161
+
2162
+ This class is designed to randomly sample positive texts and negative texts, and update the class
2163
+ indices accordingly to the number of samples. It can be used for text-based object detection tasks.
2164
+
2165
+ Args:
2166
+ prompt_format (str): Format string for the prompt. Default is '{}'. The format string should
2167
+ contain a single pair of curly braces {} where the text will be inserted.
2168
+ neg_samples (Tuple[int, int]): A range to randomly sample negative texts. The first integer
2169
+ specifies the minimum number of negative samples, and the second integer specifies the
2170
+ maximum. Default is (80, 80).
2171
+ max_samples (int): The maximum number of different text samples in one image. Default is 80.
2172
+ padding (bool): Whether to pad texts to max_samples. If True, the number of texts will always
2173
+ be equal to max_samples. Default is False.
2174
+ padding_value (str): The padding text to use when padding is True. Default is an empty string.
2175
+
2176
+ Attributes:
2177
+ prompt_format (str): The format string for the prompt.
2178
+ neg_samples (Tuple[int, int]): The range for sampling negative texts.
2179
+ max_samples (int): The maximum number of text samples.
2180
+ padding (bool): Whether padding is enabled.
2181
+ padding_value (str): The value used for padding.
2182
+
2183
+ Examples:
2184
+ >>> random_load_text = RandomLoadText(prompt_format="Object: {}", neg_samples=(50, 100), max_samples=120)
2185
+ >>> random_load_text.prompt_format
2186
+ 'Object: {}'
2187
+ >>> random_load_text.neg_samples
2188
+ (50, 100)
2189
+ >>> random_load_text.max_samples
2190
+ 120
2191
+ """
1099
2192
  self.prompt_format = prompt_format
1100
2193
  self.neg_samples = neg_samples
1101
2194
  self.max_samples = max_samples
@@ -1103,7 +2196,24 @@ class RandomLoadText:
1103
2196
  self.padding_value = padding_value
1104
2197
 
1105
2198
  def __call__(self, labels: dict) -> dict:
1106
- """Return updated classes and texts."""
2199
+ """
2200
+ Randomly samples positive and negative texts and updates class indices accordingly.
2201
+
2202
+ This method samples positive texts based on the existing class labels in the image, and randomly
2203
+ selects negative texts from the remaining classes. It then updates the class indices to match the
2204
+ new sampled text order.
2205
+
2206
+ Args:
2207
+ labels (Dict): A dictionary containing image labels and metadata. Must include 'texts' and 'cls' keys.
2208
+
2209
+ Returns:
2210
+ (Dict): Updated labels dictionary with new 'cls' and 'texts' entries.
2211
+
2212
+ Examples:
2213
+ >>> loader = RandomLoadText(prompt_format="A photo of {}", neg_samples=(5, 10), max_samples=20)
2214
+ >>> labels = {"cls": np.array([[0], [1], [2]]), "texts": [["dog"], ["cat"], ["bird"]]}
2215
+ >>> updated_labels = loader(labels)
2216
+ """
1107
2217
  assert "texts" in labels, "No texts found in labels."
1108
2218
  class_texts = labels["texts"]
1109
2219
  num_classes = len(class_texts)
@@ -1150,7 +2260,28 @@ class RandomLoadText:
1150
2260
 
1151
2261
 
1152
2262
  def v8_transforms(dataset, imgsz, hyp, stretch=False):
1153
- """Convert images to a size suitable for YOLOv8 training."""
2263
+ """
2264
+ Applies a series of image transformations for YOLOv8 training.
2265
+
2266
+ This function creates a composition of image augmentation techniques to prepare images for YOLOv8 training.
2267
+ It includes operations such as mosaic, copy-paste, random perspective, mixup, and various color adjustments.
2268
+
2269
+ Args:
2270
+ dataset (Dataset): The dataset object containing image data and annotations.
2271
+ imgsz (int): The target image size for resizing.
2272
+ hyp (Dict): A dictionary of hyperparameters controlling various aspects of the transformations.
2273
+ stretch (bool): If True, applies stretching to the image. If False, uses LetterBox resizing.
2274
+
2275
+ Returns:
2276
+ (Compose): A composition of image transformations to be applied to the dataset.
2277
+
2278
+ Examples:
2279
+ >>> from ultralytics.data.dataset import YOLODataset
2280
+ >>> dataset = YOLODataset(img_path='path/to/images', imgsz=640)
2281
+ >>> hyp = {'mosaic': 1.0, 'copy_paste': 0.5, 'degrees': 10.0, 'translate': 0.2, 'scale': 0.9}
2282
+ >>> transforms = v8_transforms(dataset, imgsz=640, hyp=hyp)
2283
+ >>> augmented_data = transforms(dataset[0])
2284
+ """
1154
2285
  pre_transform = Compose(
1155
2286
  [
1156
2287
  Mosaic(dataset, imgsz=imgsz, p=hyp.mosaic),
@@ -1195,17 +2326,27 @@ def classify_transforms(
1195
2326
  crop_fraction: float = DEFAULT_CROP_FRACTION,
1196
2327
  ):
1197
2328
  """
1198
- Classification transforms for evaluation/inference. Inspired by timm/data/transforms_factory.py.
2329
+ Creates a composition of image transforms for classification tasks.
2330
+
2331
+ This function generates a sequence of torchvision transforms suitable for preprocessing images
2332
+ for classification models during evaluation or inference. The transforms include resizing,
2333
+ center cropping, conversion to tensor, and normalization.
1199
2334
 
1200
2335
  Args:
1201
- size (int): image size
1202
- mean (tuple): mean values of RGB channels
1203
- std (tuple): std values of RGB channels
1204
- interpolation (T.InterpolationMode): interpolation mode. default is T.InterpolationMode.BILINEAR.
1205
- crop_fraction (float): fraction of image to crop. default is 1.0.
2336
+ size (int | tuple): The target size for the transformed image. If an int, it defines the shortest edge. If a
2337
+ tuple, it defines (height, width).
2338
+ mean (tuple): Mean values for each RGB channel used in normalization.
2339
+ std (tuple): Standard deviation values for each RGB channel used in normalization.
2340
+ interpolation (int): Interpolation method for resizing.
2341
+ crop_fraction (float): Fraction of the image to be cropped.
1206
2342
 
1207
2343
  Returns:
1208
- (T.Compose): torchvision transforms
2344
+ (torchvision.transforms.Compose): A composition of torchvision transforms.
2345
+
2346
+ Examples:
2347
+ >>> transforms = classify_transforms(size=224)
2348
+ >>> img = Image.open('path/to/image.jpg')
2349
+ >>> transformed_img = transforms(img)
1209
2350
  """
1210
2351
  import torchvision.transforms as T # scope for faster 'import ultralytics'
1211
2352
 
@@ -1251,26 +2392,33 @@ def classify_augmentations(
1251
2392
  interpolation=Image.BILINEAR,
1252
2393
  ):
1253
2394
  """
1254
- Classification transforms with augmentation for training. Inspired by timm/data/transforms_factory.py.
2395
+ Creates a composition of image augmentation transforms for classification tasks.
2396
+
2397
+ This function generates a set of image transformations suitable for training classification models. It includes
2398
+ options for resizing, flipping, color jittering, auto augmentation, and random erasing.
1255
2399
 
1256
2400
  Args:
1257
- size (int): image size
1258
- scale (tuple): scale range of the image. default is (0.08, 1.0)
1259
- ratio (tuple): aspect ratio range of the image. default is (3./4., 4./3.)
1260
- mean (tuple): mean values of RGB channels
1261
- std (tuple): std values of RGB channels
1262
- hflip (float): probability of horizontal flip
1263
- vflip (float): probability of vertical flip
1264
- auto_augment (str): auto augmentation policy. can be 'randaugment', 'augmix', 'autoaugment' or None.
1265
- hsv_h (float): image HSV-Hue augmentation (fraction)
1266
- hsv_s (float): image HSV-Saturation augmentation (fraction)
1267
- hsv_v (float): image HSV-Value augmentation (fraction)
1268
- force_color_jitter (bool): force to apply color jitter even if auto augment is enabled
1269
- erasing (float): probability of random erasing
1270
- interpolation (T.InterpolationMode): interpolation mode. default is T.InterpolationMode.BILINEAR.
2401
+ size (int): Target size for the image after transformations.
2402
+ mean (tuple): Mean values for normalization, one per channel.
2403
+ std (tuple): Standard deviation values for normalization, one per channel.
2404
+ scale (tuple | None): Range of size of the origin size cropped.
2405
+ ratio (tuple | None): Range of aspect ratio of the origin aspect ratio cropped.
2406
+ hflip (float): Probability of horizontal flip.
2407
+ vflip (float): Probability of vertical flip.
2408
+ auto_augment (str | None): Auto augmentation policy. Can be 'randaugment', 'augmix', 'autoaugment' or None.
2409
+ hsv_h (float): Image HSV-Hue augmentation factor.
2410
+ hsv_s (float): Image HSV-Saturation augmentation factor.
2411
+ hsv_v (float): Image HSV-Value augmentation factor.
2412
+ force_color_jitter (bool): Whether to apply color jitter even if auto augment is enabled.
2413
+ erasing (float): Probability of random erasing.
2414
+ interpolation (int): Interpolation method.
1271
2415
 
1272
2416
  Returns:
1273
- (T.Compose): torchvision transforms
2417
+ (torchvision.transforms.Compose): A composition of image augmentation transforms.
2418
+
2419
+ Examples:
2420
+ >>> transforms = classify_augmentations(size=224, auto_augment='randaugment')
2421
+ >>> augmented_image = transforms(original_image)
1274
2422
  """
1275
2423
  # Transforms to apply if Albumentations not installed
1276
2424
  import torchvision.transforms as T # scope for faster 'import ultralytics'
@@ -1332,24 +2480,53 @@ def classify_augmentations(
1332
2480
  # NOTE: keep this class for backward compatibility
1333
2481
  class ClassifyLetterBox:
1334
2482
  """
1335
- YOLOv8 LetterBox class for image preprocessing, designed to be part of a transformation pipeline, e.g.,
1336
- T.Compose([LetterBox(size), ToTensor()]).
2483
+ A class for resizing and padding images for classification tasks.
2484
+
2485
+ This class is designed to be part of a transformation pipeline, e.g., T.Compose([LetterBox(size), ToTensor()]).
2486
+ It resizes and pads images to a specified size while maintaining the original aspect ratio.
1337
2487
 
1338
2488
  Attributes:
1339
2489
  h (int): Target height of the image.
1340
2490
  w (int): Target width of the image.
1341
- auto (bool): If True, automatically solves for short side using stride.
2491
+ auto (bool): If True, automatically calculates the short side using stride.
1342
2492
  stride (int): The stride value, used when 'auto' is True.
2493
+
2494
+ Methods:
2495
+ __call__: Applies the letterbox transformation to an input image.
2496
+
2497
+ Examples:
2498
+ >>> transform = ClassifyLetterBox(size=(640, 640), auto=False, stride=32)
2499
+ >>> img = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
2500
+ >>> result = transform(img)
2501
+ >>> print(result.shape)
2502
+ (640, 640, 3)
1343
2503
  """
1344
2504
 
1345
2505
  def __init__(self, size=(640, 640), auto=False, stride=32):
1346
2506
  """
1347
- Initializes the ClassifyLetterBox class with a target size, auto-flag, and stride.
2507
+ Initializes the ClassifyLetterBox object for image preprocessing.
2508
+
2509
+ This class is designed to be part of a transformation pipeline for image classification tasks. It resizes and
2510
+ pads images to a specified size while maintaining the original aspect ratio.
1348
2511
 
1349
2512
  Args:
1350
- size (Union[int, Tuple[int, int]]): The target dimensions (height, width) for the letterbox.
1351
- auto (bool): If True, automatically calculates the short side based on stride.
1352
- stride (int): The stride value, used when 'auto' is True.
2513
+ size (int | Tuple[int, int]): Target size for the letterboxed image. If an int, a square image of
2514
+ (size, size) is created. If a tuple, it should be (height, width).
2515
+ auto (bool): If True, automatically calculates the short side based on stride. Default is False.
2516
+ stride (int): The stride value, used when 'auto' is True. Default is 32.
2517
+
2518
+ Attributes:
2519
+ h (int): Target height of the letterboxed image.
2520
+ w (int): Target width of the letterboxed image.
2521
+ auto (bool): Flag indicating whether to automatically calculate short side.
2522
+ stride (int): Stride value for automatic short side calculation.
2523
+
2524
+ Examples:
2525
+ >>> transform = ClassifyLetterBox(size=224)
2526
+ >>> img = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
2527
+ >>> result = transform(img)
2528
+ >>> print(result.shape)
2529
+ (224, 224, 3)
1353
2530
  """
1354
2531
  super().__init__()
1355
2532
  self.h, self.w = (size, size) if isinstance(size, int) else size
@@ -1358,13 +2535,24 @@ class ClassifyLetterBox:
1358
2535
 
1359
2536
  def __call__(self, im):
1360
2537
  """
1361
- Resizes the image and pads it with a letterbox method.
2538
+ Resizes and pads an image using the letterbox method.
2539
+
2540
+ This method resizes the input image to fit within the specified dimensions while maintaining its aspect ratio,
2541
+ then pads the resized image to match the target size.
1362
2542
 
1363
2543
  Args:
1364
- im (numpy.ndarray): The input image as a numpy array of shape HWC.
2544
+ im (numpy.ndarray): Input image as a numpy array with shape (H, W, C).
1365
2545
 
1366
2546
  Returns:
1367
- (numpy.ndarray): The letterboxed and resized image as a numpy array.
2547
+ (numpy.ndarray): Resized and padded image as a numpy array with shape (hs, ws, 3), where hs and ws are
2548
+ the target height and width respectively.
2549
+
2550
+ Examples:
2551
+ >>> letterbox = ClassifyLetterBox(size=(640, 640))
2552
+ >>> image = np.random.randint(0, 255, (720, 1280, 3), dtype=np.uint8)
2553
+ >>> resized_image = letterbox(image)
2554
+ >>> print(resized_image.shape)
2555
+ (640, 640, 3)
1368
2556
  """
1369
2557
  imh, imw = im.shape[:2]
1370
2558
  r = min(self.h / imh, self.w / imw) # ratio of new/old dimensions
@@ -1382,24 +2570,70 @@ class ClassifyLetterBox:
1382
2570
 
1383
2571
  # NOTE: keep this class for backward compatibility
1384
2572
  class CenterCrop:
1385
- """YOLOv8 CenterCrop class for image preprocessing, designed to be part of a transformation pipeline, e.g.,
1386
- T.Compose([CenterCrop(size), ToTensor()]).
2573
+ """
2574
+ Applies center cropping to images for classification tasks.
2575
+
2576
+ This class performs center cropping on input images, resizing them to a specified size while maintaining the aspect
2577
+ ratio. It is designed to be part of a transformation pipeline, e.g., T.Compose([CenterCrop(size), ToTensor()]).
2578
+
2579
+ Attributes:
2580
+ h (int): Target height of the cropped image.
2581
+ w (int): Target width of the cropped image.
2582
+
2583
+ Methods:
2584
+ __call__: Applies the center crop transformation to an input image.
2585
+
2586
+ Examples:
2587
+ >>> transform = CenterCrop(640)
2588
+ >>> image = np.random.randint(0, 255, (1080, 1920, 3), dtype=np.uint8)
2589
+ >>> cropped_image = transform(image)
2590
+ >>> print(cropped_image.shape)
2591
+ (640, 640, 3)
1387
2592
  """
1388
2593
 
1389
2594
  def __init__(self, size=640):
1390
- """Converts an image from numpy array to PyTorch tensor."""
2595
+ """
2596
+ Initializes the CenterCrop object for image preprocessing.
2597
+
2598
+ This class is designed to be part of a transformation pipeline, e.g., T.Compose([CenterCrop(size), ToTensor()]).
2599
+ It performs a center crop on input images to a specified size.
2600
+
2601
+ Args:
2602
+ size (int | Tuple[int, int]): The desired output size of the crop. If size is an int, a square crop
2603
+ (size, size) is made. If size is a sequence like (h, w), it is used as the output size.
2604
+
2605
+ Returns:
2606
+ (None): This method initializes the object and does not return anything.
2607
+
2608
+ Examples:
2609
+ >>> transform = CenterCrop(224)
2610
+ >>> img = np.random.rand(300, 300, 3)
2611
+ >>> cropped_img = transform(img)
2612
+ >>> print(cropped_img.shape)
2613
+ (224, 224, 3)
2614
+ """
1391
2615
  super().__init__()
1392
2616
  self.h, self.w = (size, size) if isinstance(size, int) else size
1393
2617
 
1394
2618
  def __call__(self, im):
1395
2619
  """
1396
- Resizes and crops the center of the image using a letterbox method.
2620
+ Applies center cropping to an input image.
2621
+
2622
+ This method resizes and crops the center of the image using a letterbox method. It maintains the aspect
2623
+ ratio of the original image while fitting it into the specified dimensions.
1397
2624
 
1398
2625
  Args:
1399
- im (numpy.ndarray): The input image as a numpy array of shape HWC.
2626
+ im (numpy.ndarray | PIL.Image.Image): The input image as a numpy array of shape (H, W, C) or a
2627
+ PIL Image object.
1400
2628
 
1401
2629
  Returns:
1402
- (numpy.ndarray): The center-cropped and resized image as a numpy array.
2630
+ (numpy.ndarray): The center-cropped and resized image as a numpy array of shape (self.h, self.w, C).
2631
+
2632
+ Examples:
2633
+ >>> transform = CenterCrop(size=224)
2634
+ >>> image = np.random.randint(0, 255, (640, 480, 3), dtype=np.uint8)
2635
+ >>> cropped_image = transform(image)
2636
+ >>> assert cropped_image.shape == (224, 224, 3)
1403
2637
  """
1404
2638
  if isinstance(im, Image.Image): # convert from PIL to numpy array if required
1405
2639
  im = np.asarray(im)
@@ -1411,22 +2645,71 @@ class CenterCrop:
1411
2645
 
1412
2646
  # NOTE: keep this class for backward compatibility
1413
2647
  class ToTensor:
1414
- """YOLOv8 ToTensor class for image preprocessing, i.e., T.Compose([LetterBox(size), ToTensor()])."""
2648
+ """
2649
+ Converts an image from a numpy array to a PyTorch tensor.
2650
+
2651
+ This class is designed to be part of a transformation pipeline, e.g., T.Compose([LetterBox(size), ToTensor()]).
2652
+
2653
+ Attributes:
2654
+ half (bool): If True, converts the image to half precision (float16).
2655
+
2656
+ Methods:
2657
+ __call__: Applies the tensor conversion to an input image.
2658
+
2659
+ Examples:
2660
+ >>> transform = ToTensor(half=True)
2661
+ >>> img = np.random.randint(0, 255, (640, 640, 3), dtype=np.uint8)
2662
+ >>> tensor_img = transform(img)
2663
+ >>> print(tensor_img.shape, tensor_img.dtype)
2664
+ torch.Size([3, 640, 640]) torch.float16
2665
+
2666
+ Notes:
2667
+ The input image is expected to be in BGR format with shape (H, W, C).
2668
+ The output tensor will be in RGB format with shape (C, H, W), normalized to [0, 1].
2669
+ """
1415
2670
 
1416
2671
  def __init__(self, half=False):
1417
- """Initialize YOLOv8 ToTensor object with optional half-precision support."""
2672
+ """
2673
+ Initializes the ToTensor object for converting images to PyTorch tensors.
2674
+
2675
+ This class is designed to be used as part of a transformation pipeline for image preprocessing in the
2676
+ Ultralytics YOLO framework. It converts numpy arrays or PIL Images to PyTorch tensors, with an option
2677
+ for half-precision (float16) conversion.
2678
+
2679
+ Args:
2680
+ half (bool): If True, converts the tensor to half precision (float16). Default is False.
2681
+
2682
+ Examples:
2683
+ >>> transform = ToTensor(half=True)
2684
+ >>> img = np.random.rand(640, 640, 3)
2685
+ >>> tensor_img = transform(img)
2686
+ >>> print(tensor_img.dtype)
2687
+ torch.float16
2688
+ """
1418
2689
  super().__init__()
1419
2690
  self.half = half
1420
2691
 
1421
2692
  def __call__(self, im):
1422
2693
  """
1423
- Transforms an image from a numpy array to a PyTorch tensor, applying optional half-precision and normalization.
2694
+ Transforms an image from a numpy array to a PyTorch tensor.
2695
+
2696
+ This method converts the input image from a numpy array to a PyTorch tensor, applying optional
2697
+ half-precision conversion and normalization. The image is transposed from HWC to CHW format and
2698
+ the color channels are reversed from BGR to RGB.
1424
2699
 
1425
2700
  Args:
1426
2701
  im (numpy.ndarray): Input image as a numpy array with shape (H, W, C) in BGR order.
1427
2702
 
1428
2703
  Returns:
1429
- (torch.Tensor): The transformed image as a PyTorch tensor in float32 or float16, normalized to [0, 1].
2704
+ (torch.Tensor): The transformed image as a PyTorch tensor in float32 or float16, normalized
2705
+ to [0, 1] with shape (C, H, W) in RGB order.
2706
+
2707
+ Examples:
2708
+ >>> transform = ToTensor(half=True)
2709
+ >>> img = np.random.randint(0, 255, (640, 640, 3), dtype=np.uint8)
2710
+ >>> tensor_img = transform(img)
2711
+ >>> print(tensor_img.shape, tensor_img.dtype)
2712
+ torch.Size([3, 640, 640]) torch.float16
1430
2713
  """
1431
2714
  im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous
1432
2715
  im = torch.from_numpy(im) # to torch