ultralytics 8.2.81__py3-none-any.whl → 8.2.83__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

Files changed (97) hide show
  1. tests/test_solutions.py +0 -4
  2. ultralytics/__init__.py +1 -1
  3. ultralytics/cfg/__init__.py +21 -21
  4. ultralytics/data/annotator.py +1 -1
  5. ultralytics/data/augment.py +58 -58
  6. ultralytics/data/base.py +3 -3
  7. ultralytics/data/converter.py +7 -8
  8. ultralytics/data/explorer/explorer.py +7 -23
  9. ultralytics/data/loaders.py +2 -2
  10. ultralytics/data/split_dota.py +11 -3
  11. ultralytics/data/utils.py +6 -10
  12. ultralytics/engine/exporter.py +2 -4
  13. ultralytics/engine/model.py +47 -47
  14. ultralytics/engine/predictor.py +1 -1
  15. ultralytics/engine/results.py +28 -28
  16. ultralytics/engine/trainer.py +11 -8
  17. ultralytics/engine/tuner.py +7 -8
  18. ultralytics/engine/validator.py +3 -5
  19. ultralytics/hub/__init__.py +5 -5
  20. ultralytics/hub/auth.py +6 -2
  21. ultralytics/hub/session.py +3 -5
  22. ultralytics/models/fastsam/model.py +13 -10
  23. ultralytics/models/fastsam/predict.py +2 -2
  24. ultralytics/models/fastsam/utils.py +0 -1
  25. ultralytics/models/nas/model.py +4 -4
  26. ultralytics/models/nas/predict.py +1 -2
  27. ultralytics/models/nas/val.py +1 -1
  28. ultralytics/models/rtdetr/predict.py +1 -1
  29. ultralytics/models/rtdetr/train.py +1 -1
  30. ultralytics/models/rtdetr/val.py +1 -1
  31. ultralytics/models/sam/model.py +11 -11
  32. ultralytics/models/sam/modules/decoders.py +7 -4
  33. ultralytics/models/sam/modules/sam.py +9 -1
  34. ultralytics/models/sam/modules/tiny_encoder.py +1 -1
  35. ultralytics/models/sam/modules/transformer.py +0 -2
  36. ultralytics/models/sam/modules/utils.py +1 -1
  37. ultralytics/models/sam/predict.py +10 -10
  38. ultralytics/models/utils/loss.py +29 -17
  39. ultralytics/models/utils/ops.py +1 -5
  40. ultralytics/models/yolo/classify/predict.py +1 -1
  41. ultralytics/models/yolo/classify/train.py +1 -1
  42. ultralytics/models/yolo/classify/val.py +1 -1
  43. ultralytics/models/yolo/detect/predict.py +1 -1
  44. ultralytics/models/yolo/detect/train.py +1 -1
  45. ultralytics/models/yolo/detect/val.py +1 -1
  46. ultralytics/models/yolo/model.py +6 -2
  47. ultralytics/models/yolo/obb/predict.py +1 -1
  48. ultralytics/models/yolo/obb/train.py +1 -1
  49. ultralytics/models/yolo/obb/val.py +2 -2
  50. ultralytics/models/yolo/pose/predict.py +1 -1
  51. ultralytics/models/yolo/pose/train.py +1 -1
  52. ultralytics/models/yolo/pose/val.py +1 -1
  53. ultralytics/models/yolo/segment/predict.py +1 -1
  54. ultralytics/models/yolo/segment/train.py +1 -1
  55. ultralytics/models/yolo/segment/val.py +1 -1
  56. ultralytics/models/yolo/world/train.py +1 -1
  57. ultralytics/nn/autobackend.py +2 -2
  58. ultralytics/nn/modules/__init__.py +2 -2
  59. ultralytics/nn/modules/block.py +8 -20
  60. ultralytics/nn/modules/conv.py +1 -3
  61. ultralytics/nn/modules/head.py +16 -31
  62. ultralytics/nn/modules/transformer.py +0 -1
  63. ultralytics/nn/modules/utils.py +0 -1
  64. ultralytics/nn/tasks.py +11 -9
  65. ultralytics/solutions/__init__.py +1 -0
  66. ultralytics/solutions/ai_gym.py +0 -2
  67. ultralytics/solutions/analytics.py +1 -6
  68. ultralytics/solutions/heatmap.py +0 -1
  69. ultralytics/solutions/object_counter.py +0 -2
  70. ultralytics/solutions/queue_management.py +0 -2
  71. ultralytics/trackers/basetrack.py +1 -1
  72. ultralytics/trackers/byte_tracker.py +2 -2
  73. ultralytics/trackers/utils/gmc.py +5 -5
  74. ultralytics/trackers/utils/kalman_filter.py +1 -1
  75. ultralytics/trackers/utils/matching.py +1 -5
  76. ultralytics/utils/__init__.py +137 -24
  77. ultralytics/utils/autobatch.py +7 -4
  78. ultralytics/utils/benchmarks.py +6 -14
  79. ultralytics/utils/callbacks/base.py +0 -1
  80. ultralytics/utils/callbacks/comet.py +0 -1
  81. ultralytics/utils/callbacks/tensorboard.py +0 -1
  82. ultralytics/utils/checks.py +15 -18
  83. ultralytics/utils/downloads.py +6 -7
  84. ultralytics/utils/files.py +3 -4
  85. ultralytics/utils/instance.py +17 -7
  86. ultralytics/utils/metrics.py +16 -16
  87. ultralytics/utils/ops.py +8 -8
  88. ultralytics/utils/plotting.py +25 -35
  89. ultralytics/utils/tal.py +27 -18
  90. ultralytics/utils/torch_utils.py +12 -13
  91. ultralytics/utils/tuner.py +2 -3
  92. {ultralytics-8.2.81.dist-info → ultralytics-8.2.83.dist-info}/METADATA +4 -3
  93. {ultralytics-8.2.81.dist-info → ultralytics-8.2.83.dist-info}/RECORD +97 -97
  94. {ultralytics-8.2.81.dist-info → ultralytics-8.2.83.dist-info}/WHEEL +1 -1
  95. {ultralytics-8.2.81.dist-info → ultralytics-8.2.83.dist-info}/LICENSE +0 -0
  96. {ultralytics-8.2.81.dist-info → ultralytics-8.2.83.dist-info}/entry_points.txt +0 -0
  97. {ultralytics-8.2.81.dist-info → ultralytics-8.2.83.dist-info}/top_level.txt +0 -0
@@ -204,7 +204,6 @@ class Annotator:
204
204
  txt_color (tuple, optional): The color of the text (R, G, B).
205
205
  margin (int, optional): The margin between the text and the rectangle border.
206
206
  """
207
-
208
207
  # If label have more than 3 characters, skip other characters, due to circle size
209
208
  if len(label) > 3:
210
209
  print(
@@ -246,7 +245,6 @@ class Annotator:
246
245
  txt_color (tuple, optional): The color of the text (R, G, B).
247
246
  margin (int, optional): The margin between the text and the rectangle border.
248
247
  """
249
-
250
248
  # Calculate the center of the bounding box
251
249
  x_center, y_center = int((box[0] + box[2]) / 2), int((box[1] + box[3]) / 2)
252
250
  # Get the size of the text
@@ -284,7 +282,6 @@ class Annotator:
284
282
  txt_color (tuple, optional): The color of the text (R, G, B).
285
283
  rotated (bool, optional): Variable used to check if task is OBB
286
284
  """
287
-
288
285
  txt_color = self.get_txt_color(color, txt_color)
289
286
  if isinstance(box, torch.Tensor):
290
287
  box = box.tolist()
@@ -343,7 +340,6 @@ class Annotator:
343
340
  alpha (float): Mask transparency: 0.0 fully transparent, 1.0 opaque
344
341
  retina_masks (bool): Whether to use high resolution masks or not. Defaults to False.
345
342
  """
346
-
347
343
  if self.pil:
348
344
  # Convert to numpy first
349
345
  self.im = np.asarray(self.im).copy()
@@ -374,17 +370,18 @@ class Annotator:
374
370
  Plot keypoints on the image.
375
371
 
376
372
  Args:
377
- kpts (tensor): Predicted keypoints with shape [17, 3]. Each keypoint has (x, y, confidence).
378
- shape (tuple): Image shape as a tuple (h, w), where h is the height and w is the width.
379
- radius (int, optional): Radius of the drawn keypoints. Default is 5.
380
- kpt_line (bool, optional): If True, the function will draw lines connecting keypoints
381
- for human pose. Default is True.
382
- kpt_color (tuple, optional): The color of the keypoints (B, G, R).
373
+ kpts (torch.Tensor): Keypoints, shape [17, 3] (x, y, confidence).
374
+ shape (tuple, optional): Image shape (h, w). Defaults to (640, 640).
375
+ radius (int, optional): Keypoint radius. Defaults to 5.
376
+ kpt_line (bool, optional): Draw lines between keypoints. Defaults to True.
377
+ conf_thres (float, optional): Confidence threshold. Defaults to 0.25.
378
+ kpt_color (tuple, optional): Keypoint color (B, G, R). Defaults to None.
383
379
 
384
380
  Note:
385
- `kpt_line=True` currently only supports human pose plotting.
381
+ - `kpt_line=True` currently only supports human pose plotting.
382
+ - Modifies self.im in-place.
383
+ - If self.pil is True, converts image to numpy array and back to PIL.
386
384
  """
387
-
388
385
  if self.pil:
389
386
  # Convert to numpy first
390
387
  self.im = np.asarray(self.im).copy()
@@ -488,7 +485,6 @@ class Annotator:
488
485
  Returns:
489
486
  angle (degree): Degree value of angle between three points
490
487
  """
491
-
492
488
  x_min, y_min, x_max, y_max = bbox
493
489
  width = x_max - x_min
494
490
  height = y_max - y_min
@@ -503,7 +499,6 @@ class Annotator:
503
499
  color (tuple): Region Color value
504
500
  thickness (int): Region area thickness value
505
501
  """
506
-
507
502
  cv2.polylines(self.im, [np.array(reg_pts, dtype=np.int32)], isClosed=True, color=color, thickness=thickness)
508
503
 
509
504
  def draw_centroid_and_tracks(self, track, color=(255, 0, 255), track_thickness=2):
@@ -515,7 +510,6 @@ class Annotator:
515
510
  color (tuple): tracks line color
516
511
  track_thickness (int): track line thickness value
517
512
  """
518
-
519
513
  points = np.hstack(track).astype(np.int32).reshape((-1, 1, 2))
520
514
  cv2.polylines(self.im, [points], isClosed=False, color=color, thickness=track_thickness)
521
515
  cv2.circle(self.im, (int(track[-1][0]), int(track[-1][1])), track_thickness * 2, color, -1)
@@ -530,7 +524,6 @@ class Annotator:
530
524
  region_color (RGB): queue region color
531
525
  txt_color (RGB): text display color
532
526
  """
533
-
534
527
  x_values = [point[0] for point in points]
535
528
  y_values = [point[1] for point in points]
536
529
  center_x = sum(x_values) // len(points)
@@ -574,7 +567,6 @@ class Annotator:
574
567
  y_center (float): y position center point for bounding box
575
568
  margin (int): gap between text and rectangle for better display
576
569
  """
577
-
578
570
  text_size = cv2.getTextSize(text, 0, fontScale=self.sf, thickness=self.tf)[0]
579
571
  text_x = x_center - text_size[0] // 2
580
572
  text_y = y_center + text_size[1] // 2
@@ -597,7 +589,6 @@ class Annotator:
597
589
  bg_color (bgr color): display color for text background
598
590
  margin (int): gap between text and rectangle for better display
599
591
  """
600
-
601
592
  horizontal_gap = int(im0.shape[1] * 0.02)
602
593
  vertical_gap = int(im0.shape[0] * 0.01)
603
594
  text_y_offset = 0
@@ -629,7 +620,6 @@ class Annotator:
629
620
  Returns:
630
621
  angle (degree): Degree value of angle between three points
631
622
  """
632
-
633
623
  a, b, c = np.array(a), np.array(b), np.array(c)
634
624
  radians = np.arctan2(c[1] - b[1], c[0] - b[0]) - np.arctan2(a[1] - b[1], a[0] - b[0])
635
625
  angle = np.abs(radians * 180.0 / np.pi)
@@ -642,12 +632,19 @@ class Annotator:
642
632
  Draw specific keypoints for gym steps counting.
643
633
 
644
634
  Args:
645
- keypoints (list): list of keypoints data to be plotted
646
- indices (list): keypoints ids list to be plotted
647
- shape (tuple): imgsz for model inference
648
- radius (int): Keypoint radius value
649
- """
635
+ keypoints (list): Keypoints data to be plotted.
636
+ indices (list, optional): Keypoint indices to be plotted. Defaults to [2, 5, 7].
637
+ shape (tuple, optional): Image size for model inference. Defaults to (640, 640).
638
+ radius (int, optional): Keypoint radius. Defaults to 2.
639
+ conf_thres (float, optional): Confidence threshold for keypoints. Defaults to 0.25.
640
+
641
+ Returns:
642
+ (numpy.ndarray): Image with drawn keypoints.
650
643
 
644
+ Note:
645
+ Keypoint format: [x, y] or [x, y, confidence].
646
+ Modifies self.im in-place.
647
+ """
651
648
  if indices is None:
652
649
  indices = [2, 5, 7]
653
650
  for i, k in enumerate(keypoints):
@@ -675,7 +672,6 @@ class Annotator:
675
672
  color (tuple): text background color for workout monitoring
676
673
  txt_color (tuple): text foreground color for workout monitoring
677
674
  """
678
-
679
675
  angle_text, count_text, stage_text = (f" {angle_text:.2f}", f"Steps : {count_text}", f" {stage_text}")
680
676
 
681
677
  # Draw angle
@@ -744,7 +740,6 @@ class Annotator:
744
740
  label (str): Detection label text
745
741
  txt_color (RGB): text color
746
742
  """
747
-
748
743
  cv2.polylines(self.im, [np.int32([mask])], isClosed=True, color=mask_color, thickness=2)
749
744
  text_size, _ = cv2.getTextSize(label, 0, self.sf, self.tf)
750
745
 
@@ -772,7 +767,6 @@ class Annotator:
772
767
  line_color (RGB): Distance line color.
773
768
  centroid_color (RGB): Bounding box centroid color.
774
769
  """
775
-
776
770
  (text_width_m, text_height_m), _ = cv2.getTextSize(f"Distance M: {distance_m:.2f}m", 0, self.sf, self.tf)
777
771
  cv2.rectangle(self.im, (15, 25), (15 + text_width_m + 10, 25 + text_height_m + 20), line_color, -1)
778
772
  cv2.putText(
@@ -813,7 +807,6 @@ class Annotator:
813
807
  color (tuple): object centroid and line color value
814
808
  pin_color (tuple): visioneye point color value
815
809
  """
816
-
817
810
  center_bbox = int((box[0] + box[2]) / 2), int((box[1] + box[3]) / 2)
818
811
  cv2.circle(self.im, center_point, self.tf * 2, pin_color, -1)
819
812
  cv2.circle(self.im, center_bbox, self.tf * 2, color, -1)
@@ -902,11 +895,10 @@ def save_one_box(xyxy, im, file=Path("im.jpg"), gain=1.02, pad=10, square=False,
902
895
  from ultralytics.utils.plotting import save_one_box
903
896
 
904
897
  xyxy = [50, 50, 150, 150]
905
- im = cv2.imread('image.jpg')
906
- cropped_im = save_one_box(xyxy, im, file='cropped.jpg', square=True)
898
+ im = cv2.imread("image.jpg")
899
+ cropped_im = save_one_box(xyxy, im, file="cropped.jpg", square=True)
907
900
  ```
908
901
  """
909
-
910
902
  if not isinstance(xyxy, torch.Tensor): # may be list
911
903
  xyxy = torch.stack(xyxy)
912
904
  b = ops.xyxy2xywh(xyxy.view(-1, 4)) # boxes
@@ -1109,7 +1101,7 @@ def plot_results(file="path/to/results.csv", dir="", segment=False, pose=False,
1109
1101
  ```python
1110
1102
  from ultralytics.utils.plotting import plot_results
1111
1103
 
1112
- plot_results('path/to/results.csv', segment=True)
1104
+ plot_results("path/to/results.csv", segment=True)
1113
1105
  ```
1114
1106
  """
1115
1107
  import pandas as pd # scope for faster 'import ultralytics'
@@ -1171,7 +1163,6 @@ def plt_color_scatter(v, f, bins=20, cmap="viridis", alpha=0.8, edgecolors="none
1171
1163
  >>> f = np.random.rand(100)
1172
1164
  >>> plt_color_scatter(v, f)
1173
1165
  """
1174
-
1175
1166
  # Calculate 2D histogram and corresponding colors
1176
1167
  hist, xedges, yedges = np.histogram2d(v, f, bins=bins)
1177
1168
  colors = [
@@ -1195,9 +1186,8 @@ def plot_tune_results(csv_file="tune_results.csv"):
1195
1186
  csv_file (str, optional): Path to the CSV file containing the tuning results. Defaults to 'tune_results.csv'.
1196
1187
 
1197
1188
  Examples:
1198
- >>> plot_tune_results('path/to/tune_results.csv')
1189
+ >>> plot_tune_results("path/to/tune_results.csv")
1199
1190
  """
1200
-
1201
1191
  import pandas as pd # scope for faster 'import ultralytics'
1202
1192
  from scipy.ndimage import gaussian_filter1d
1203
1193
 
ultralytics/utils/tal.py CHANGED
@@ -140,7 +140,6 @@ class TaskAlignedAssigner(nn.Module):
140
140
  Returns:
141
141
  (Tensor): A tensor of shape (b, max_num_obj, h*w) containing the selected top-k candidates.
142
142
  """
143
-
144
143
  # (b, max_num_obj, topk)
145
144
  topk_metrics, topk_idxs = torch.topk(metrics, self.topk, dim=-1, largest=largest)
146
145
  if topk_mask is None:
@@ -184,7 +183,6 @@ class TaskAlignedAssigner(nn.Module):
184
183
  for positive anchor points, where num_classes is the number
185
184
  of object classes.
186
185
  """
187
-
188
186
  # Assigned target labels, (b, 1)
189
187
  batch_ind = torch.arange(end=self.bs, dtype=torch.int64, device=gt_labels.device)[..., None]
190
188
  target_gt_idx = target_gt_idx + batch_ind * self.n_max_boxes # (b, h*w)
@@ -212,14 +210,19 @@ class TaskAlignedAssigner(nn.Module):
212
210
  @staticmethod
213
211
  def select_candidates_in_gts(xy_centers, gt_bboxes, eps=1e-9):
214
212
  """
215
- Select the positive anchor center in gt.
213
+ Select positive anchor centers within ground truth bounding boxes.
216
214
 
217
215
  Args:
218
- xy_centers (Tensor): shape(h*w, 2)
219
- gt_bboxes (Tensor): shape(b, n_boxes, 4)
216
+ xy_centers (torch.Tensor): Anchor center coordinates, shape (h*w, 2).
217
+ gt_bboxes (torch.Tensor): Ground truth bounding boxes, shape (b, n_boxes, 4).
218
+ eps (float, optional): Small value for numerical stability. Defaults to 1e-9.
220
219
 
221
220
  Returns:
222
- (Tensor): shape(b, n_boxes, h*w)
221
+ (torch.Tensor): Boolean mask of positive anchors, shape (b, n_boxes, h*w).
222
+
223
+ Note:
224
+ b: batch size, n_boxes: number of ground truth boxes, h: height, w: width.
225
+ Bounding box format: [x_min, y_min, x_max, y_max].
223
226
  """
224
227
  n_anchors = xy_centers.shape[0]
225
228
  bs, n_boxes, _ = gt_bboxes.shape
@@ -231,18 +234,22 @@ class TaskAlignedAssigner(nn.Module):
231
234
  @staticmethod
232
235
  def select_highest_overlaps(mask_pos, overlaps, n_max_boxes):
233
236
  """
234
- If an anchor box is assigned to multiple gts, the one with the highest IoU will be selected.
237
+ Select anchor boxes with highest IoU when assigned to multiple ground truths.
235
238
 
236
239
  Args:
237
- mask_pos (Tensor): shape(b, n_max_boxes, h*w)
238
- overlaps (Tensor): shape(b, n_max_boxes, h*w)
240
+ mask_pos (torch.Tensor): Positive mask, shape (b, n_max_boxes, h*w).
241
+ overlaps (torch.Tensor): IoU overlaps, shape (b, n_max_boxes, h*w).
242
+ n_max_boxes (int): Maximum number of ground truth boxes.
239
243
 
240
244
  Returns:
241
- target_gt_idx (Tensor): shape(b, h*w)
242
- fg_mask (Tensor): shape(b, h*w)
243
- mask_pos (Tensor): shape(b, n_max_boxes, h*w)
245
+ target_gt_idx (torch.Tensor): Indices of assigned ground truths, shape (b, h*w).
246
+ fg_mask (torch.Tensor): Foreground mask, shape (b, h*w).
247
+ mask_pos (torch.Tensor): Updated positive mask, shape (b, n_max_boxes, h*w).
248
+
249
+ Note:
250
+ b: batch size, h: height, w: width.
244
251
  """
245
- # (b, n_max_boxes, h*w) -> (b, h*w)
252
+ # Convert (b, n_max_boxes, h*w) -> (b, h*w)
246
253
  fg_mask = mask_pos.sum(-2)
247
254
  if fg_mask.max() > 1: # one anchor is assigned to multiple gt_bboxes
248
255
  mask_multi_gts = (fg_mask.unsqueeze(1) > 1).expand(-1, n_max_boxes, -1) # (b, n_max_boxes, h*w)
@@ -328,14 +335,16 @@ def bbox2dist(anchor_points, bbox, reg_max):
328
335
 
329
336
  def dist2rbox(pred_dist, pred_angle, anchor_points, dim=-1):
330
337
  """
331
- Decode predicted object bounding box coordinates from anchor points and distribution.
338
+ Decode predicted rotated bounding box coordinates from anchor points and distribution.
332
339
 
333
340
  Args:
334
- pred_dist (torch.Tensor): Predicted rotated distance, (bs, h*w, 4).
335
- pred_angle (torch.Tensor): Predicted angle, (bs, h*w, 1).
336
- anchor_points (torch.Tensor): Anchor points, (h*w, 2).
341
+ pred_dist (torch.Tensor): Predicted rotated distance, shape (bs, h*w, 4).
342
+ pred_angle (torch.Tensor): Predicted angle, shape (bs, h*w, 1).
343
+ anchor_points (torch.Tensor): Anchor points, shape (h*w, 2).
344
+ dim (int, optional): Dimension along which to split. Defaults to -1.
345
+
337
346
  Returns:
338
- (torch.Tensor): Predicted rotated bounding boxes, (bs, h*w, 4).
347
+ (torch.Tensor): Predicted rotated bounding boxes, shape (bs, h*w, 4).
339
348
  """
340
349
  lt, rb = pred_dist.split(2, dim=dim)
341
350
  cos, sin = torch.cos(pred_angle), torch.sin(pred_angle)
@@ -137,16 +137,15 @@ def select_device(device="", batch=0, newline=False, verbose=True):
137
137
  devices when using multiple GPUs.
138
138
 
139
139
  Examples:
140
- >>> select_device('cuda:0')
140
+ >>> select_device("cuda:0")
141
141
  device(type='cuda', index=0)
142
142
 
143
- >>> select_device('cpu')
143
+ >>> select_device("cpu")
144
144
  device(type='cpu')
145
145
 
146
146
  Note:
147
147
  Sets the 'CUDA_VISIBLE_DEVICES' environment variable for specifying which GPUs to use.
148
148
  """
149
-
150
149
  if isinstance(device, torch.device):
151
150
  return device
152
151
 
@@ -331,11 +330,13 @@ def model_info_for_loggers(trainer):
331
330
  Example:
332
331
  YOLOv8n info for loggers
333
332
  ```python
334
- results = {'model/parameters': 3151904,
335
- 'model/GFLOPs': 8.746,
336
- 'model/speed_ONNX(ms)': 41.244,
337
- 'model/speed_TensorRT(ms)': 3.211,
338
- 'model/speed_PyTorch(ms)': 18.755}
333
+ results = {
334
+ "model/parameters": 3151904,
335
+ "model/GFLOPs": 8.746,
336
+ "model/speed_ONNX(ms)": 41.244,
337
+ "model/speed_TensorRT(ms)": 3.211,
338
+ "model/speed_PyTorch(ms)": 18.755,
339
+ }
339
340
  ```
340
341
  """
341
342
  if trainer.args.profile: # profile ONNX and TensorRT times
@@ -415,9 +416,7 @@ def initialize_weights(model):
415
416
 
416
417
 
417
418
  def scale_img(img, ratio=1.0, same_shape=False, gs=32):
418
- """Scales and pads an image tensor of shape img(bs,3,y,x) based on given ratio and grid size gs, optionally
419
- retaining the original shape.
420
- """
419
+ """Scales and pads an image tensor, optionally maintaining aspect ratio and padding to gs multiple."""
421
420
  if ratio == 1.0:
422
421
  return img
423
422
  h, w = img.shape[2:]
@@ -491,7 +490,7 @@ def init_seeds(seed=0, deterministic=False):
491
490
  class ModelEMA:
492
491
  """
493
492
  Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models. Keeps a moving
494
- average of everything in the model state_dict (parameters and buffers)
493
+ average of everything in the model state_dict (parameters and buffers).
495
494
 
496
495
  For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
497
496
 
@@ -542,7 +541,7 @@ def strip_optimizer(f: Union[str, Path] = "best.pt", s: str = "") -> None:
542
541
  from pathlib import Path
543
542
  from ultralytics.utils.torch_utils import strip_optimizer
544
543
 
545
- for f in Path('path/to/model/checkpoints').rglob('*.pt'):
544
+ for f in Path("path/to/model/checkpoints").rglob("*.pt"):
546
545
  strip_optimizer(f)
547
546
  ```
548
547
 
@@ -28,13 +28,12 @@ def run_ray_tune(
28
28
  from ultralytics import YOLO
29
29
 
30
30
  # Load a YOLOv8n model
31
- model = YOLO('yolov8n.pt')
31
+ model = YOLO("yolov8n.pt")
32
32
 
33
33
  # Start tuning hyperparameters for YOLOv8n training on the COCO8 dataset
34
- result_grid = model.tune(data='coco8.yaml', use_ray=True)
34
+ result_grid = model.tune(data="coco8.yaml", use_ray=True)
35
35
  ```
36
36
  """
37
-
38
37
  LOGGER.info("💡 Learn about RayTune at https://docs.ultralytics.com/integrations/ray-tune")
39
38
  if train_args is None:
40
39
  train_args = {}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.2.81
3
+ Version: 8.2.83
4
4
  Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
6
6
  Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
@@ -96,6 +96,7 @@ Requires-Dist: dvclive>=2.12.0; extra == "logging"
96
96
  <a href="https://hub.docker.com/r/ultralytics/ultralytics"><img src="https://img.shields.io/docker/pulls/ultralytics/ultralytics?logo=docker" alt="Ultralytics Docker Pulls"></a>
97
97
  <a href="https://ultralytics.com/discord"><img alt="Ultralytics Discord" src="https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue"></a>
98
98
  <a href="https://community.ultralytics.com"><img alt="Ultralytics Forums" src="https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue"></a>
99
+ <a href="https://reddit.com/r/ultralytics"><img alt="Ultralytics Reddit" src="https://img.shields.io/reddit/subreddit-subscribers/ultralytics?style=flat&logo=reddit&logoColor=white&label=Reddit&color=blue"></a>
99
100
  <br>
100
101
  <a href="https://console.paperspace.com/github/ultralytics/ultralytics"><img src="https://assets.paperspace.io/img/gradient-badge.svg" alt="Run Ultralytics on Gradient"></a>
101
102
  <a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open Ultralytics In Colab"></a>
@@ -105,7 +106,7 @@ Requires-Dist: dvclive>=2.12.0; extra == "logging"
105
106
 
106
107
  [Ultralytics](https://ultralytics.com) [YOLOv8](https://github.com/ultralytics/ultralytics) is a cutting-edge, state-of-the-art (SOTA) model that builds upon the success of previous YOLO versions and introduces new features and improvements to further boost performance and flexibility. YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection and tracking, instance segmentation, image classification and pose estimation tasks.
107
108
 
108
- We hope that the resources here will help you get the most out of YOLOv8. Please browse the YOLOv8 <a href="https://docs.ultralytics.com/">Docs</a> for details, raise an issue on <a href="https://github.com/ultralytics/ultralytics/issues/new/choose">GitHub</a> for support, and join our <a href="https://ultralytics.com/discord">Discord</a> community for questions and discussions!
109
+ We hope that the resources here will help you get the most out of YOLOv8. Please browse the YOLOv8 <a href="https://docs.ultralytics.com/">Docs</a> for details, raise an issue on <a href="https://github.com/ultralytics/ultralytics/issues/new/choose">GitHub</a> for support, questions, or discussions, become a member of the Ultralytics <a href="https://ultralytics.com/discord">Discord</a>, <a href="https://reddit.com/r/ultralytics">Reddit</a> and <a href="https://community.ultralytics.com">Forums</a>!
109
110
 
110
111
  To request an Enterprise License please complete the form at [Ultralytics Licensing](https://ultralytics.com/license).
111
112
 
@@ -361,7 +362,7 @@ Ultralytics offers two licensing options to accommodate diverse use cases:
361
362
 
362
363
  ## <div align="center">Contact</div>
363
364
 
364
- For Ultralytics bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/ultralytics/issues), and join our [Discord](https://ultralytics.com/discord) community for questions and discussions!
365
+ For Ultralytics bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/ultralytics/issues). Become a member of the Ultralytics [Discord](https://ultralytics.com/discord), [Reddit](https://reddit.com/r/ultralytics), or [Forums](https://community.ultralytics.com) for asking questions, sharing projects, learning discussions, or for help with all things Ultralytics!
365
366
 
366
367
  <br>
367
368
  <div align="center">