ultralytics 8.3.87__py3-none-any.whl → 8.3.89__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. tests/test_solutions.py +34 -45
  2. ultralytics/__init__.py +1 -1
  3. ultralytics/cfg/__init__.py +46 -39
  4. ultralytics/data/augment.py +2 -2
  5. ultralytics/data/base.py +7 -9
  6. ultralytics/data/converter.py +30 -29
  7. ultralytics/data/utils.py +20 -28
  8. ultralytics/engine/model.py +2 -2
  9. ultralytics/engine/tuner.py +11 -21
  10. ultralytics/hub/__init__.py +13 -17
  11. ultralytics/models/fastsam/model.py +4 -7
  12. ultralytics/models/nas/model.py +8 -14
  13. ultralytics/models/nas/predict.py +7 -9
  14. ultralytics/models/nas/val.py +7 -9
  15. ultralytics/models/rtdetr/predict.py +6 -9
  16. ultralytics/models/rtdetr/train.py +5 -8
  17. ultralytics/models/rtdetr/val.py +5 -8
  18. ultralytics/models/yolo/classify/predict.py +6 -9
  19. ultralytics/models/yolo/classify/train.py +5 -8
  20. ultralytics/models/yolo/classify/val.py +5 -8
  21. ultralytics/models/yolo/detect/predict.py +6 -9
  22. ultralytics/models/yolo/detect/train.py +5 -8
  23. ultralytics/models/yolo/detect/val.py +5 -8
  24. ultralytics/models/yolo/obb/predict.py +6 -9
  25. ultralytics/models/yolo/obb/train.py +5 -8
  26. ultralytics/models/yolo/obb/val.py +10 -15
  27. ultralytics/models/yolo/pose/predict.py +6 -9
  28. ultralytics/models/yolo/pose/train.py +5 -8
  29. ultralytics/models/yolo/pose/val.py +12 -17
  30. ultralytics/models/yolo/segment/predict.py +6 -9
  31. ultralytics/models/yolo/segment/train.py +5 -8
  32. ultralytics/models/yolo/segment/val.py +10 -15
  33. ultralytics/models/yolo/world/train.py +5 -8
  34. ultralytics/models/yolo/world/train_world.py +21 -25
  35. ultralytics/nn/modules/__init__.py +9 -12
  36. ultralytics/nn/tasks.py +7 -12
  37. ultralytics/solutions/__init__.py +14 -6
  38. ultralytics/solutions/ai_gym.py +39 -28
  39. ultralytics/solutions/analytics.py +22 -18
  40. ultralytics/solutions/distance_calculation.py +25 -25
  41. ultralytics/solutions/heatmap.py +40 -38
  42. ultralytics/solutions/instance_segmentation.py +69 -0
  43. ultralytics/solutions/object_blurrer.py +89 -0
  44. ultralytics/solutions/object_counter.py +35 -33
  45. ultralytics/solutions/object_cropper.py +84 -0
  46. ultralytics/solutions/parking_management.py +21 -9
  47. ultralytics/solutions/queue_management.py +20 -39
  48. ultralytics/solutions/region_counter.py +54 -51
  49. ultralytics/solutions/security_alarm.py +40 -30
  50. ultralytics/solutions/solutions.py +594 -16
  51. ultralytics/solutions/speed_estimation.py +34 -31
  52. ultralytics/solutions/streamlit_inference.py +34 -28
  53. ultralytics/solutions/trackzone.py +29 -18
  54. ultralytics/solutions/vision_eye.py +69 -0
  55. ultralytics/trackers/utils/kalman_filter.py +23 -23
  56. ultralytics/utils/__init__.py +5 -8
  57. ultralytics/utils/checks.py +25 -35
  58. ultralytics/utils/downloads.py +25 -48
  59. ultralytics/utils/instance.py +9 -11
  60. ultralytics/utils/ops.py +5 -9
  61. ultralytics/utils/plotting.py +8 -428
  62. ultralytics/utils/torch_utils.py +23 -33
  63. ultralytics/utils/tuner.py +5 -9
  64. {ultralytics-8.3.87.dist-info → ultralytics-8.3.89.dist-info}/METADATA +2 -2
  65. {ultralytics-8.3.87.dist-info → ultralytics-8.3.89.dist-info}/RECORD +69 -65
  66. {ultralytics-8.3.87.dist-info → ultralytics-8.3.89.dist-info}/LICENSE +0 -0
  67. {ultralytics-8.3.87.dist-info → ultralytics-8.3.89.dist-info}/WHEEL +0 -0
  68. {ultralytics-8.3.87.dist-info → ultralytics-8.3.89.dist-info}/entry_points.txt +0 -0
  69. {ultralytics-8.3.87.dist-info → ultralytics-8.3.89.dist-info}/top_level.txt +0 -0
@@ -271,84 +271,6 @@ class Annotator:
271
271
  else:
272
272
  return txt_color
273
273
 
274
- def circle_label(self, box, label="", color=(128, 128, 128), txt_color=(255, 255, 255), margin=2):
275
- """
276
- Draws a label with a background circle centered within a given bounding box.
277
-
278
- Args:
279
- box (tuple): The bounding box coordinates (x1, y1, x2, y2).
280
- label (str): The text label to be displayed.
281
- color (tuple, optional): The background color of the rectangle (B, G, R).
282
- txt_color (tuple, optional): The color of the text (R, G, B).
283
- margin (int, optional): The margin between the text and the rectangle border.
284
- """
285
- # If label have more than 3 characters, skip other characters, due to circle size
286
- if len(label) > 3:
287
- print(
288
- f"Length of label is {len(label)}, initial 3 label characters will be considered for circle annotation!"
289
- )
290
- label = label[:3]
291
-
292
- # Calculate the center of the box
293
- x_center, y_center = int((box[0] + box[2]) / 2), int((box[1] + box[3]) / 2)
294
- # Get the text size
295
- text_size = cv2.getTextSize(str(label), cv2.FONT_HERSHEY_SIMPLEX, self.sf - 0.15, self.tf)[0]
296
- # Calculate the required radius to fit the text with the margin
297
- required_radius = int(((text_size[0] ** 2 + text_size[1] ** 2) ** 0.5) / 2) + margin
298
- # Draw the circle with the required radius
299
- cv2.circle(self.im, (x_center, y_center), required_radius, color, -1)
300
- # Calculate the position for the text
301
- text_x = x_center - text_size[0] // 2
302
- text_y = y_center + text_size[1] // 2
303
- # Draw the text
304
- cv2.putText(
305
- self.im,
306
- str(label),
307
- (text_x, text_y),
308
- cv2.FONT_HERSHEY_SIMPLEX,
309
- self.sf - 0.15,
310
- self.get_txt_color(color, txt_color),
311
- self.tf,
312
- lineType=cv2.LINE_AA,
313
- )
314
-
315
- def text_label(self, box, label="", color=(128, 128, 128), txt_color=(255, 255, 255), margin=5):
316
- """
317
- Draws a label with a background rectangle centered within a given bounding box.
318
-
319
- Args:
320
- box (tuple): The bounding box coordinates (x1, y1, x2, y2).
321
- label (str): The text label to be displayed.
322
- color (tuple, optional): The background color of the rectangle (B, G, R).
323
- txt_color (tuple, optional): The color of the text (R, G, B).
324
- margin (int, optional): The margin between the text and the rectangle border.
325
- """
326
- # Calculate the center of the bounding box
327
- x_center, y_center = int((box[0] + box[2]) / 2), int((box[1] + box[3]) / 2)
328
- # Get the size of the text
329
- text_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, self.sf - 0.1, self.tf)[0]
330
- # Calculate the top-left corner of the text (to center it)
331
- text_x = x_center - text_size[0] // 2
332
- text_y = y_center + text_size[1] // 2
333
- # Calculate the coordinates of the background rectangle
334
- rect_x1 = text_x - margin
335
- rect_y1 = text_y - text_size[1] - margin
336
- rect_x2 = text_x + text_size[0] + margin
337
- rect_y2 = text_y + margin
338
- # Draw the background rectangle
339
- cv2.rectangle(self.im, (rect_x1, rect_y1), (rect_x2, rect_y2), color, -1)
340
- # Draw the text on top of the rectangle
341
- cv2.putText(
342
- self.im,
343
- label,
344
- (text_x, text_y),
345
- cv2.FONT_HERSHEY_SIMPLEX,
346
- self.sf - 0.1,
347
- self.get_txt_color(color, txt_color),
348
- self.tf,
349
- lineType=cv2.LINE_AA,
350
- )
351
-
352
274
  def box_label(self, box, label="", color=(128, 128, 128), txt_color=(255, 255, 255), rotated=False):
353
275
  """
354
276
  Draws a bounding box to image with label.
@@ -591,342 +513,6 @@ class Annotator:
591
513
  height = y_max - y_min
592
514
  return width, height, width * height
593
515
 
594
- def draw_region(self, reg_pts=None, color=(0, 255, 0), thickness=5):
595
- """
596
- Draw region line.
597
-
598
- Args:
599
- reg_pts (list): Region Points (for line 2 points, for region 4 points)
600
- color (tuple): Region Color value
601
- thickness (int): Region area thickness value
602
- """
603
- cv2.polylines(self.im, [np.array(reg_pts, dtype=np.int32)], isClosed=True, color=color, thickness=thickness)
604
-
605
- # Draw small circles at the corner points
606
- for point in reg_pts:
607
- cv2.circle(self.im, (point[0], point[1]), thickness * 2, color, -1) # -1 fills the circle
608
-
609
- def draw_centroid_and_tracks(self, track, color=(255, 0, 255), track_thickness=2):
610
- """
611
- Draw centroid point and track trails.
612
-
613
- Args:
614
- track (list): object tracking points for trails display
615
- color (tuple): tracks line color
616
- track_thickness (int): track line thickness value
617
- """
618
- points = np.hstack(track).astype(np.int32).reshape((-1, 1, 2))
619
- cv2.polylines(self.im, [points], isClosed=False, color=color, thickness=track_thickness)
620
- cv2.circle(self.im, (int(track[-1][0]), int(track[-1][1])), track_thickness * 2, color, -1)
621
-
622
- def queue_counts_display(self, label, points=None, region_color=(255, 255, 255), txt_color=(0, 0, 0)):
623
- """
624
- Displays queue counts on an image centered at the points with customizable font size and colors.
625
-
626
- Args:
627
- label (str): Queue counts label.
628
- points (tuple): Region points for center point calculation to display text.
629
- region_color (tuple): RGB queue region color.
630
- txt_color (tuple): RGB text display color.
631
- """
632
- x_values = [point[0] for point in points]
633
- y_values = [point[1] for point in points]
634
- center_x = sum(x_values) // len(points)
635
- center_y = sum(y_values) // len(points)
636
-
637
- text_size = cv2.getTextSize(label, 0, fontScale=self.sf, thickness=self.tf)[0]
638
- text_width = text_size[0]
639
- text_height = text_size[1]
640
-
641
- rect_width = text_width + 20
642
- rect_height = text_height + 20
643
- rect_top_left = (center_x - rect_width // 2, center_y - rect_height // 2)
644
- rect_bottom_right = (center_x + rect_width // 2, center_y + rect_height // 2)
645
- cv2.rectangle(self.im, rect_top_left, rect_bottom_right, region_color, -1)
646
-
647
- text_x = center_x - text_width // 2
648
- text_y = center_y + text_height // 2
649
-
650
- # Draw text
651
- cv2.putText(
652
- self.im,
653
- label,
654
- (text_x, text_y),
655
- 0,
656
- fontScale=self.sf,
657
- color=txt_color,
658
- thickness=self.tf,
659
- lineType=cv2.LINE_AA,
660
- )
661
-
662
- def display_objects_labels(self, im0, text, txt_color, bg_color, x_center, y_center, margin):
663
- """
664
- Display the bounding boxes labels in parking management app.
665
-
666
- Args:
667
- im0 (ndarray): Inference image.
668
- text (str): Object/class name.
669
- txt_color (tuple): Display color for text foreground.
670
- bg_color (tuple): Display color for text background.
671
- x_center (float): The x position center point for bounding box.
672
- y_center (float): The y position center point for bounding box.
673
- margin (int): The gap between text and rectangle for better display.
674
- """
675
- text_size = cv2.getTextSize(text, 0, fontScale=self.sf, thickness=self.tf)[0]
676
- text_x = x_center - text_size[0] // 2
677
- text_y = y_center + text_size[1] // 2
678
-
679
- rect_x1 = text_x - margin
680
- rect_y1 = text_y - text_size[1] - margin
681
- rect_x2 = text_x + text_size[0] + margin
682
- rect_y2 = text_y + margin
683
- cv2.rectangle(im0, (rect_x1, rect_y1), (rect_x2, rect_y2), bg_color, -1)
684
- cv2.putText(im0, text, (text_x, text_y), 0, self.sf, txt_color, self.tf, lineType=cv2.LINE_AA)
685
-
686
- def display_analytics(self, im0, text, txt_color, bg_color, margin):
687
- """
688
- Display the overall statistics for parking lots.
689
-
690
- Args:
691
- im0 (ndarray): Inference image.
692
- text (dict): Labels dictionary.
693
- txt_color (tuple): Display color for text foreground.
694
- bg_color (tuple): Display color for text background.
695
- margin (int): Gap between text and rectangle for better display.
696
- """
697
- horizontal_gap = int(im0.shape[1] * 0.02)
698
- vertical_gap = int(im0.shape[0] * 0.01)
699
- text_y_offset = 0
700
- for label, value in text.items():
701
- txt = f"{label}: {value}"
702
- text_size = cv2.getTextSize(txt, 0, self.sf, self.tf)[0]
703
- if text_size[0] < 5 or text_size[1] < 5:
704
- text_size = (5, 5)
705
- text_x = im0.shape[1] - text_size[0] - margin * 2 - horizontal_gap
706
- text_y = text_y_offset + text_size[1] + margin * 2 + vertical_gap
707
- rect_x1 = text_x - margin * 2
708
- rect_y1 = text_y - text_size[1] - margin * 2
709
- rect_x2 = text_x + text_size[0] + margin * 2
710
- rect_y2 = text_y + margin * 2
711
- cv2.rectangle(im0, (rect_x1, rect_y1), (rect_x2, rect_y2), bg_color, -1)
712
- cv2.putText(im0, txt, (text_x, text_y), 0, self.sf, txt_color, self.tf, lineType=cv2.LINE_AA)
713
- text_y_offset = rect_y2
714
-
715
- @staticmethod
716
- def estimate_pose_angle(a, b, c):
717
- """
718
- Calculate the pose angle for object.
719
-
720
- Args:
721
- a (float) : The value of pose point a
722
- b (float): The value of pose point b
723
- c (float): The value o pose point c
724
-
725
- Returns:
726
- angle (degree): Degree value of angle between three points
727
- """
728
- a, b, c = np.array(a), np.array(b), np.array(c)
729
- radians = np.arctan2(c[1] - b[1], c[0] - b[0]) - np.arctan2(a[1] - b[1], a[0] - b[0])
730
- angle = np.abs(radians * 180.0 / np.pi)
731
- if angle > 180.0:
732
- angle = 360 - angle
733
- return angle
734
-
735
- def draw_specific_points(self, keypoints, indices=None, radius=2, conf_thres=0.25):
736
- """
737
- Draw specific keypoints for gym steps counting.
738
-
739
- Args:
740
- keypoints (list): Keypoints data to be plotted.
741
- indices (list, optional): Keypoint indices to be plotted. Defaults to [2, 5, 7].
742
- radius (int, optional): Keypoint radius. Defaults to 2.
743
- conf_thres (float, optional): Confidence threshold for keypoints. Defaults to 0.25.
744
-
745
- Returns:
746
- (numpy.ndarray): Image with drawn keypoints.
747
-
748
- Note:
749
- Keypoint format: [x, y] or [x, y, confidence].
750
- Modifies self.im in-place.
751
- """
752
- indices = indices or [2, 5, 7]
753
- points = [(int(k[0]), int(k[1])) for i, k in enumerate(keypoints) if i in indices and k[2] >= conf_thres]
754
-
755
- # Draw lines between consecutive points
756
- for start, end in zip(points[:-1], points[1:]):
757
- cv2.line(self.im, start, end, (0, 255, 0), 2, lineType=cv2.LINE_AA)
758
-
759
- # Draw circles for keypoints
760
- for pt in points:
761
- cv2.circle(self.im, pt, radius, (0, 0, 255), -1, lineType=cv2.LINE_AA)
762
-
763
- return self.im
764
-
765
- def plot_workout_information(self, display_text, position, color=(104, 31, 17), txt_color=(255, 255, 255)):
766
- """
767
- Draw text with a background on the image.
768
-
769
- Args:
770
- display_text (str): The text to be displayed.
771
- position (tuple): Coordinates (x, y) on the image where the text will be placed.
772
- color (tuple, optional): Text background color
773
- txt_color (tuple, optional): Text foreground color
774
- """
775
- (text_width, text_height), _ = cv2.getTextSize(display_text, 0, self.sf, self.tf)
776
-
777
- # Draw background rectangle
778
- cv2.rectangle(
779
- self.im,
780
- (position[0], position[1] - text_height - 5),
781
- (position[0] + text_width + 10, position[1] - text_height - 5 + text_height + 10 + self.tf),
782
- color,
783
- -1,
784
- )
785
- # Draw text
786
- cv2.putText(self.im, display_text, position, 0, self.sf, txt_color, self.tf)
787
-
788
- return text_height
789
-
790
- def plot_angle_and_count_and_stage(
791
- self, angle_text, count_text, stage_text, center_kpt, color=(104, 31, 17), txt_color=(255, 255, 255)
792
- ):
793
- """
794
- Plot the pose angle, count value, and step stage.
795
-
796
- Args:
797
- angle_text (str): Angle value for workout monitoring
798
- count_text (str): Counts value for workout monitoring
799
- stage_text (str): Stage decision for workout monitoring
800
- center_kpt (list): Centroid pose index for workout monitoring
801
- color (tuple, optional): Text background color
802
- txt_color (tuple, optional): Text foreground color
803
- """
804
- # Format text
805
- angle_text, count_text, stage_text = f" {angle_text:.2f}", f"Steps : {count_text}", f" {stage_text}"
806
-
807
- # Draw angle, count and stage text
808
- angle_height = self.plot_workout_information(
809
- angle_text, (int(center_kpt[0]), int(center_kpt[1])), color, txt_color
810
- )
811
- count_height = self.plot_workout_information(
812
- count_text, (int(center_kpt[0]), int(center_kpt[1]) + angle_height + 20), color, txt_color
813
- )
814
- self.plot_workout_information(
815
- stage_text, (int(center_kpt[0]), int(center_kpt[1]) + angle_height + count_height + 40), color, txt_color
816
- )
817
-
818
- def seg_bbox(self, mask, mask_color=(255, 0, 255), label=None, txt_color=(255, 255, 255)):
819
- """
820
- Function for drawing segmented object in bounding box shape.
821
-
822
- Args:
823
- mask (np.ndarray): A 2D array of shape (N, 2) containing the contour points of the segmented object.
824
- mask_color (tuple): RGB color for the contour and label background.
825
- label (str, optional): Text label for the object. If None, no label is drawn.
826
- txt_color (tuple): RGB color for the label text.
827
- """
828
- if mask.size == 0: # no masks to plot
829
- return
830
-
831
- cv2.polylines(self.im, [np.int32([mask])], isClosed=True, color=mask_color, thickness=2)
832
- if label:
833
- text_size, _ = cv2.getTextSize(label, 0, self.sf, self.tf)
834
- cv2.rectangle(
835
- self.im,
836
- (int(mask[0][0]) - text_size[0] // 2 - 10, int(mask[0][1]) - text_size[1] - 10),
837
- (int(mask[0][0]) + text_size[0] // 2 + 10, int(mask[0][1] + 10)),
838
- mask_color,
839
- -1,
840
- )
841
- cv2.putText(
842
- self.im, label, (int(mask[0][0]) - text_size[0] // 2, int(mask[0][1])), 0, self.sf, txt_color, self.tf
843
- )
844
-
845
- def sweep_annotator(self, line_x=0, line_y=0, label=None, color=(221, 0, 186), txt_color=(255, 255, 255)):
846
- """
847
- Function for drawing a sweep annotation line and an optional label.
848
-
849
- Args:
850
- line_x (int): The x-coordinate of the sweep line.
851
- line_y (int): The y-coordinate limit of the sweep line.
852
- label (str, optional): Text label to be drawn in center of sweep line. If None, no label is drawn.
853
- color (tuple): RGB color for the line and label background.
854
- txt_color (tuple): RGB color for the label text.
855
- """
856
- # Draw the sweep line
857
- cv2.line(self.im, (line_x, 0), (line_x, line_y), color, self.tf * 2)
858
-
859
- # Draw label, if provided
860
- if label:
861
- (text_width, text_height), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, self.sf, self.tf)
862
- cv2.rectangle(
863
- self.im,
864
- (line_x - text_width // 2 - 10, line_y // 2 - text_height // 2 - 10),
865
- (line_x + text_width // 2 + 10, line_y // 2 + text_height // 2 + 10),
866
- color,
867
- -1,
868
- )
869
- cv2.putText(
870
- self.im,
871
- label,
872
- (line_x - text_width // 2, line_y // 2 + text_height // 2),
873
- cv2.FONT_HERSHEY_SIMPLEX,
874
- self.sf,
875
- txt_color,
876
- self.tf,
877
- )
878
-
879
- def plot_distance_and_line(
880
- self, pixels_distance, centroids, line_color=(104, 31, 17), centroid_color=(255, 0, 255)
881
- ):
882
- """
883
- Plot the distance and line on frame.
884
-
885
- Args:
886
- pixels_distance (float): Pixels distance between two bbox centroids.
887
- centroids (list): Bounding box centroids data.
888
- line_color (tuple, optional): Distance line color.
889
- centroid_color (tuple, optional): Bounding box centroid color.
890
- """
891
- # Get the text size
892
- text = f"Pixels Distance: {pixels_distance:.2f}"
893
- (text_width_m, text_height_m), _ = cv2.getTextSize(text, 0, self.sf, self.tf)
894
-
895
- # Define corners with 10-pixel margin and draw rectangle
896
- cv2.rectangle(self.im, (15, 25), (15 + text_width_m + 20, 25 + text_height_m + 20), line_color, -1)
897
-
898
- # Calculate the position for the text with a 10-pixel margin and draw text
899
- text_position = (25, 25 + text_height_m + 10)
900
- cv2.putText(
901
- self.im,
902
- text,
903
- text_position,
904
- 0,
905
- self.sf,
906
- (255, 255, 255),
907
- self.tf,
908
- cv2.LINE_AA,
909
- )
910
-
911
- cv2.line(self.im, centroids[0], centroids[1], line_color, 3)
912
- cv2.circle(self.im, centroids[0], 6, centroid_color, -1)
913
- cv2.circle(self.im, centroids[1], 6, centroid_color, -1)
914
-
915
- def visioneye(self, box, center_point, color=(235, 219, 11), pin_color=(255, 0, 255)):
916
- """
917
- Function for pinpoint human-vision eye mapping and plotting.
918
-
919
- Args:
920
- box (list): Bounding box coordinates
921
- center_point (tuple): center point for vision eye view
922
- color (tuple): object centroid and line color value
923
- pin_color (tuple): visioneye point color value
924
- """
925
- center_bbox = int((box[0] + box[2]) / 2), int((box[1] + box[3]) / 2)
926
- cv2.circle(self.im, center_point, self.tf * 2, pin_color, -1)
927
- cv2.circle(self.im, center_bbox, self.tf * 2, color, -1)
928
- cv2.line(self.im, center_point, center_bbox, color, self.tf)
929
-
930
516
 
931
517
  @TryExcept() # known issue https://github.com/ultralytics/yolov5/issues/5395
932
518
  @plt_settings()
@@ -1005,14 +591,11 @@ def save_one_box(xyxy, im, file=Path("im.jpg"), gain=1.02, pad=10, square=False,
1005
591
  Returns:
1006
592
  (numpy.ndarray): The cropped image.
1007
593
 
1008
- Example:
1009
- ```python
1010
- from ultralytics.utils.plotting import save_one_box
1011
-
1012
- xyxy = [50, 50, 150, 150]
1013
- im = cv2.imread("image.jpg")
1014
- cropped_im = save_one_box(xyxy, im, file="cropped.jpg", square=True)
1015
- ```
594
+ Examples:
595
+ >>> from ultralytics.utils.plotting import save_one_box
596
+ >>> xyxy = [50, 50, 150, 150]
597
+ >>> im = cv2.imread("image.jpg")
598
+ >>> cropped_im = save_one_box(xyxy, im, file="cropped.jpg", square=True)
1016
599
  """
1017
600
  if not isinstance(xyxy, torch.Tensor): # may be list
1018
601
  xyxy = torch.stack(xyxy)
@@ -1214,12 +797,9 @@ def plot_results(file="path/to/results.csv", dir="", segment=False, pose=False,
1214
797
  on_plot (callable, optional): Callback function to be executed after plotting. Takes filename as an argument.
1215
798
  Defaults to None.
1216
799
 
1217
- Example:
1218
- ```python
1219
- from ultralytics.utils.plotting import plot_results
1220
-
1221
- plot_results("path/to/results.csv", segment=True)
1222
- ```
800
+ Examples:
801
+ >>> from ultralytics.utils.plotting import plot_results
802
+ >>> plot_results("path/to/results.csv", segment=True)
1223
803
  """
1224
804
  import pandas as pd # scope for faster 'import ultralytics'
1225
805
  from scipy.ndimage import gaussian_filter1d
@@ -94,12 +94,10 @@ def autocast(enabled: bool, device: str = "cuda"):
94
94
  - For PyTorch versions 1.13 and newer, it uses `torch.amp.autocast`.
95
95
  - For older versions, it uses `torch.cuda.autocast`.
96
96
 
97
- Example:
98
- ```python
99
- with autocast(amp=True):
100
- # Your mixed precision operations here
101
- pass
102
- ```
97
+ Examples:
98
+ >>> with autocast(amp=True):
99
+ ... # Your mixed precision operations here
100
+ ... pass
103
101
  """
104
102
  if TORCH_1_13:
105
103
  return torch.amp.autocast(device, enabled=enabled)
@@ -345,17 +343,15 @@ def model_info_for_loggers(trainer):
345
343
  """
346
344
  Return model info dict with useful model information.
347
345
 
348
- Example:
346
+ Examples:
349
347
  YOLOv8n info for loggers
350
- ```python
351
- results = {
352
- "model/parameters": 3151904,
353
- "model/GFLOPs": 8.746,
354
- "model/speed_ONNX(ms)": 41.244,
355
- "model/speed_TensorRT(ms)": 3.211,
356
- "model/speed_PyTorch(ms)": 18.755,
357
- }
358
- ```
348
+ >>> results = {
349
+ ... "model/parameters": 3151904,
350
+ ... "model/GFLOPs": 8.746,
351
+ ... "model/speed_ONNX(ms)": 41.244,
352
+ ... "model/speed_TensorRT(ms)": 3.211,
353
+ ... "model/speed_PyTorch(ms)": 18.755,
354
+ ...}
359
355
  """
360
356
  if trainer.args.profile: # profile ONNX and TensorRT times
361
357
  from ultralytics.utils.benchmarks import ProfileModels
@@ -562,14 +558,11 @@ def strip_optimizer(f: Union[str, Path] = "best.pt", s: str = "", updates: dict
562
558
  Returns:
563
559
  (dict): The combined checkpoint dictionary.
564
560
 
565
- Example:
566
- ```python
567
- from pathlib import Path
568
- from ultralytics.utils.torch_utils import strip_optimizer
569
-
570
- for f in Path("path/to/model/checkpoints").rglob("*.pt"):
571
- strip_optimizer(f)
572
- ```
561
+ Examples:
562
+ >>> from pathlib import Path
563
+ >>> from ultralytics.utils.torch_utils import strip_optimizer
564
+ >>> for f in Path("path/to/model/checkpoints").rglob("*.pt"):
565
+ >>> strip_optimizer(f)
573
566
 
574
567
  Note:
575
568
  Use `ultralytics.nn.torch_safe_load` for missing modules with `x = torch_safe_load(f)[0]`
@@ -660,15 +653,12 @@ def profile(input, ops, n=10, device=None, max_num_obj=0):
660
653
  """
661
654
  Ultralytics speed, memory and FLOPs profiler.
662
655
 
663
- Example:
664
- ```python
665
- from ultralytics.utils.torch_utils import profile
666
-
667
- input = torch.randn(16, 3, 640, 640)
668
- m1 = lambda x: x * torch.sigmoid(x)
669
- m2 = nn.SiLU()
670
- profile(input, [m1, m2], n=100) # profile over 100 iterations
671
- ```
656
+ Examples:
657
+ >>> from ultralytics.utils.torch_utils import profile
658
+ >>> input = torch.randn(16, 3, 640, 640)
659
+ >>> m1 = lambda x: x * torch.sigmoid(x)
660
+ >>> m2 = nn.SiLU()
661
+ >>> profile(input, [m1, m2], n=100) # profile over 100 iterations
672
662
  """
673
663
  results = []
674
664
  if not isinstance(device, torch.device):
@@ -26,16 +26,12 @@ def run_ray_tune(
26
26
  Returns:
27
27
  (dict): A dictionary containing the results of the hyperparameter search.
28
28
 
29
- Example:
30
- ```python
31
- from ultralytics import YOLO
29
+ Examples:
30
+ >>> from ultralytics import YOLO
31
+ >>> model = YOLO("yolo11n.pt") # Load a YOLO11n model
32
32
 
33
- # Load a YOLO11n model
34
- model = YOLO("yolo11n.pt")
35
-
36
- # Start tuning hyperparameters for YOLO11n training on the COCO8 dataset
37
- result_grid = model.tune(data="coco8.yaml", use_ray=True)
38
- ```
33
+ Start tuning hyperparameters for YOLO11n training on the COCO8 dataset
34
+ >>> result_grid = model.tune(data="coco8.yaml", use_ray=True)
39
35
  """
40
36
  LOGGER.info("💡 Learn about RayTune at https://docs.ultralytics.com/integrations/ray-tune")
41
37
  if train_args is None:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: ultralytics
3
- Version: 8.3.87
3
+ Version: 8.3.89
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -65,7 +65,7 @@ Requires-Dist: coremltools>=7.0; (platform_system != "Windows" and python_versio
65
65
  Requires-Dist: scikit-learn>=1.3.2; (platform_system != "Windows" and python_version <= "3.11") and extra == "export"
66
66
  Requires-Dist: openvino!=2025.0.0,>=2024.0.0; extra == "export"
67
67
  Requires-Dist: tensorflow>=2.0.0; extra == "export"
68
- Requires-Dist: tensorflowjs>=3.9.0; extra == "export"
68
+ Requires-Dist: tensorflowjs>=4.0.0; extra == "export"
69
69
  Requires-Dist: tensorstore>=0.1.63; (platform_machine == "aarch64" and python_version >= "3.9") and extra == "export"
70
70
  Requires-Dist: keras; extra == "export"
71
71
  Requires-Dist: flatbuffers<100,>=23.5.26; platform_machine == "aarch64" and extra == "export"