ultralytics 8.3.37__py3-none-any.whl → 8.3.39__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +5 -5
- ultralytics/cfg/default.yaml +0 -1
- ultralytics/data/augment.py +3 -4
- ultralytics/data/loaders.py +1 -1
- ultralytics/engine/exporter.py +1 -0
- ultralytics/engine/model.py +29 -0
- ultralytics/models/sam/__init__.py +2 -2
- ultralytics/models/sam/model.py +1 -1
- ultralytics/models/sam/modules/sam.py +16 -39
- ultralytics/models/sam/predict.py +817 -28
- ultralytics/models/yolo/classify/predict.py +1 -1
- ultralytics/models/yolo/detect/train.py +1 -1
- ultralytics/nn/modules/block.py +2 -2
- ultralytics/nn/modules/conv.py +1 -1
- ultralytics/nn/modules/head.py +1 -1
- ultralytics/solutions/parking_management.py +1 -1
- ultralytics/trackers/basetrack.py +1 -1
- ultralytics/trackers/utils/matching.py +3 -4
- ultralytics/utils/__init__.py +8 -6
- ultralytics/utils/loss.py +2 -3
- ultralytics/utils/metrics.py +12 -13
- ultralytics/utils/ops.py +22 -17
- ultralytics/utils/plotting.py +68 -24
- ultralytics/utils/torch_utils.py +5 -11
- {ultralytics-8.3.37.dist-info → ultralytics-8.3.39.dist-info}/METADATA +12 -8
- {ultralytics-8.3.37.dist-info → ultralytics-8.3.39.dist-info}/RECORD +31 -31
- {ultralytics-8.3.37.dist-info → ultralytics-8.3.39.dist-info}/LICENSE +0 -0
- {ultralytics-8.3.37.dist-info → ultralytics-8.3.39.dist-info}/WHEEL +0 -0
- {ultralytics-8.3.37.dist-info → ultralytics-8.3.39.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.3.37.dist-info → ultralytics-8.3.39.dist-info}/top_level.txt +0 -0
@@ -54,6 +54,6 @@ class ClassificationPredictor(BasePredictor):
|
|
54
54
|
orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
|
55
55
|
|
56
56
|
return [
|
57
|
-
Results(orig_img, path=img_path, names=self.model.names, probs=pred)
|
57
|
+
Results(orig_img, path=img_path, names=self.model.names, probs=pred.softmax(0))
|
58
58
|
for pred, orig_img, img_path in zip(preds, orig_imgs, self.batch[0])
|
59
59
|
]
|
@@ -146,5 +146,5 @@ class DetectionTrainer(BaseTrainer):
|
|
146
146
|
"""Get batch size by calculating memory occupation of model."""
|
147
147
|
train_dataset = self.build_dataset(self.trainset, mode="train", batch=16)
|
148
148
|
# 4 for mosaic augmentation
|
149
|
-
max_num_obj = max(len(
|
149
|
+
max_num_obj = max(len(label["cls"]) for label in train_dataset.labels) * 4
|
150
150
|
return super().auto_batch(max_num_obj)
|
ultralytics/nn/modules/block.py
CHANGED
@@ -280,8 +280,8 @@ class RepC3(nn.Module):
|
|
280
280
|
"""Initialize CSP Bottleneck with a single convolution using input channels, output channels, and number."""
|
281
281
|
super().__init__()
|
282
282
|
c_ = int(c2 * e) # hidden channels
|
283
|
-
self.cv1 = Conv(c1,
|
284
|
-
self.cv2 = Conv(c1,
|
283
|
+
self.cv1 = Conv(c1, c_, 1, 1)
|
284
|
+
self.cv2 = Conv(c1, c_, 1, 1)
|
285
285
|
self.m = nn.Sequential(*[RepConv(c_, c_) for _ in range(n)])
|
286
286
|
self.cv3 = Conv(c_, c2, 1, 1) if c_ != c2 else nn.Identity()
|
287
287
|
|
ultralytics/nn/modules/conv.py
CHANGED
ultralytics/nn/modules/head.py
CHANGED
@@ -89,7 +89,7 @@ class ParkingPtsSelection:
|
|
89
89
|
"""Uploads and displays an image on the canvas, resizing it to fit within specified dimensions."""
|
90
90
|
from PIL import Image, ImageTk # scope because ImageTk requires tkinter package
|
91
91
|
|
92
|
-
self.image = Image.open(self.filedialog.askopenfilename(filetypes=[("Image Files", "*.png
|
92
|
+
self.image = Image.open(self.filedialog.askopenfilename(filetypes=[("Image Files", "*.png *.jpg *.jpeg")]))
|
93
93
|
if not self.image:
|
94
94
|
return
|
95
95
|
|
@@ -44,7 +44,7 @@ class BaseTrack:
|
|
44
44
|
start_frame (int): The frame number where tracking started.
|
45
45
|
frame_id (int): The most recent frame ID processed by the track.
|
46
46
|
time_since_update (int): Frames passed since the last update.
|
47
|
-
location (
|
47
|
+
location (tuple): The location of the object in the context of multi-camera tracking.
|
48
48
|
|
49
49
|
Methods:
|
50
50
|
end_frame: Returns the ID of the last frame where the object was tracked.
|
@@ -27,10 +27,9 @@ def linear_assignment(cost_matrix: np.ndarray, thresh: float, use_lap: bool = Tr
|
|
27
27
|
use_lap (bool): Use lap.lapjv for the assignment. If False, scipy.optimize.linear_sum_assignment is used.
|
28
28
|
|
29
29
|
Returns:
|
30
|
-
(
|
31
|
-
|
32
|
-
|
33
|
-
- unmatched_b (np.ndarray): Array of unmatched indices from the second set, with shape (M,).
|
30
|
+
matched_indices (np.ndarray): Array of matched indices of shape (K, 2), where K is the number of matches.
|
31
|
+
unmatched_a (np.ndarray): Array of unmatched indices from the first set, with shape (L,).
|
32
|
+
unmatched_b (np.ndarray): Array of unmatched indices from the second set, with shape (M,).
|
34
33
|
|
35
34
|
Examples:
|
36
35
|
>>> cost_matrix = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
|
ultralytics/utils/__init__.py
CHANGED
@@ -607,13 +607,12 @@ def is_raspberrypi() -> bool:
|
|
607
607
|
|
608
608
|
def is_jetson() -> bool:
|
609
609
|
"""
|
610
|
-
Determines if the Python environment is running on
|
611
|
-
information.
|
610
|
+
Determines if the Python environment is running on an NVIDIA Jetson device by checking the device model information.
|
612
611
|
|
613
612
|
Returns:
|
614
|
-
(bool): True if running on
|
613
|
+
(bool): True if running on an NVIDIA Jetson device, False otherwise.
|
615
614
|
"""
|
616
|
-
return
|
615
|
+
return any(keyword in PROC_DEVICE_MODEL.lower() for keyword in ("nvidia", "jetson"))
|
617
616
|
|
618
617
|
|
619
618
|
def is_online() -> bool:
|
@@ -1255,9 +1254,12 @@ class SettingsManager(JSONDict):
|
|
1255
1254
|
self.update(self.defaults)
|
1256
1255
|
|
1257
1256
|
|
1258
|
-
def deprecation_warn(arg, new_arg):
|
1257
|
+
def deprecation_warn(arg, new_arg=None):
|
1259
1258
|
"""Issue a deprecation warning when a deprecated argument is used, suggesting an updated argument."""
|
1260
|
-
|
1259
|
+
msg = f"WARNING ⚠️ '{arg}' is deprecated and will be removed in in the future."
|
1260
|
+
if new_arg is not None:
|
1261
|
+
msg += f" Use '{new_arg}' instead."
|
1262
|
+
LOGGER.warning(msg)
|
1261
1263
|
|
1262
1264
|
|
1263
1265
|
def clean_url(url):
|
ultralytics/utils/loss.py
CHANGED
@@ -552,9 +552,8 @@ class v8PoseLoss(v8DetectionLoss):
|
|
552
552
|
pred_kpts (torch.Tensor): Predicted keypoints, shape (BS, N_anchors, N_kpts_per_object, kpts_dim).
|
553
553
|
|
554
554
|
Returns:
|
555
|
-
(
|
556
|
-
|
557
|
-
- kpts_obj_loss (torch.Tensor): The keypoints object loss.
|
555
|
+
kpts_loss (torch.Tensor): The keypoints loss.
|
556
|
+
kpts_obj_loss (torch.Tensor): The keypoints object loss.
|
558
557
|
"""
|
559
558
|
batch_idx = batch_idx.flatten()
|
560
559
|
batch_size = len(masks)
|
ultralytics/utils/metrics.py
CHANGED
@@ -549,19 +549,18 @@ def ap_per_class(
|
|
549
549
|
prefix (str, optional): A prefix string for saving the plot files. Defaults to an empty string.
|
550
550
|
|
551
551
|
Returns:
|
552
|
-
(
|
553
|
-
|
554
|
-
|
555
|
-
|
556
|
-
|
557
|
-
|
558
|
-
|
559
|
-
|
560
|
-
|
561
|
-
|
562
|
-
|
563
|
-
|
564
|
-
prec_values: Precision values at mAP@0.5 for each class. Shape: (nc, 1000).
|
552
|
+
tp (np.ndarray): True positive counts at threshold given by max F1 metric for each class.Shape: (nc,).
|
553
|
+
fp (np.ndarray): False positive counts at threshold given by max F1 metric for each class. Shape: (nc,).
|
554
|
+
p (np.ndarray): Precision values at threshold given by max F1 metric for each class. Shape: (nc,).
|
555
|
+
r (np.ndarray): Recall values at threshold given by max F1 metric for each class. Shape: (nc,).
|
556
|
+
f1 (np.ndarray): F1-score values at threshold given by max F1 metric for each class. Shape: (nc,).
|
557
|
+
ap (np.ndarray): Average precision for each class at different IoU thresholds. Shape: (nc, 10).
|
558
|
+
unique_classes (np.ndarray): An array of unique classes that have data. Shape: (nc,).
|
559
|
+
p_curve (np.ndarray): Precision curves for each class. Shape: (nc, 1000).
|
560
|
+
r_curve (np.ndarray): Recall curves for each class. Shape: (nc, 1000).
|
561
|
+
f1_curve (np.ndarray): F1-score curves for each class. Shape: (nc, 1000).
|
562
|
+
x (np.ndarray): X-axis values for the curves. Shape: (1000,).
|
563
|
+
prec_values (np.ndarray): Precision values at mAP@0.5 for each class. Shape: (nc, 1000).
|
565
564
|
"""
|
566
565
|
# Sort by objectness
|
567
566
|
i = np.argsort(-conf)
|
ultralytics/utils/ops.py
CHANGED
@@ -75,9 +75,8 @@ def segment2box(segment, width=640, height=640):
|
|
75
75
|
(np.ndarray): the minimum and maximum x and y values of the segment.
|
76
76
|
"""
|
77
77
|
x, y = segment.T # segment xy
|
78
|
-
|
79
|
-
|
80
|
-
y = y[inside]
|
78
|
+
x = x.clip(0, width)
|
79
|
+
y = y.clip(0, height)
|
81
80
|
return (
|
82
81
|
np.array([x.min(), y.min(), x.max(), y.max()], dtype=segment.dtype)
|
83
82
|
if any(x)
|
@@ -317,11 +316,11 @@ def clip_boxes(boxes, shape):
|
|
317
316
|
Takes a list of bounding boxes and a shape (height, width) and clips the bounding boxes to the shape.
|
318
317
|
|
319
318
|
Args:
|
320
|
-
boxes (torch.Tensor):
|
321
|
-
shape (tuple):
|
319
|
+
boxes (torch.Tensor): The bounding boxes to clip.
|
320
|
+
shape (tuple): The shape of the image.
|
322
321
|
|
323
322
|
Returns:
|
324
|
-
(torch.Tensor | numpy.ndarray):
|
323
|
+
(torch.Tensor | numpy.ndarray): The clipped boxes.
|
325
324
|
"""
|
326
325
|
if isinstance(boxes, torch.Tensor): # faster individually (WARNING: inplace .clamp_() Apple MPS bug)
|
327
326
|
boxes[..., 0] = boxes[..., 0].clamp(0, shape[1]) # x1
|
@@ -359,9 +358,9 @@ def scale_image(masks, im0_shape, ratio_pad=None):
|
|
359
358
|
Takes a mask, and resizes it to the original image size.
|
360
359
|
|
361
360
|
Args:
|
362
|
-
masks (np.ndarray):
|
363
|
-
im0_shape (tuple):
|
364
|
-
ratio_pad (tuple):
|
361
|
+
masks (np.ndarray): Resized and padded masks/images, [h, w, num]/[h, w, 3].
|
362
|
+
im0_shape (tuple): The original image shape.
|
363
|
+
ratio_pad (tuple): The ratio of the padding to the original image.
|
365
364
|
|
366
365
|
Returns:
|
367
366
|
masks (np.ndarray): The masks that are being returned with shape [h, w, num].
|
@@ -692,12 +691,12 @@ def process_mask_native(protos, masks_in, bboxes, shape):
|
|
692
691
|
|
693
692
|
Args:
|
694
693
|
protos (torch.Tensor): [mask_dim, mask_h, mask_w]
|
695
|
-
masks_in (torch.Tensor): [n, mask_dim], n is number of masks after nms
|
696
|
-
bboxes (torch.Tensor): [n, 4], n is number of masks after nms
|
697
|
-
shape (tuple):
|
694
|
+
masks_in (torch.Tensor): [n, mask_dim], n is number of masks after nms.
|
695
|
+
bboxes (torch.Tensor): [n, 4], n is number of masks after nms.
|
696
|
+
shape (tuple): The size of the input image (h,w).
|
698
697
|
|
699
698
|
Returns:
|
700
|
-
masks (torch.Tensor): The returned masks with dimensions [h, w, n]
|
699
|
+
masks (torch.Tensor): The returned masks with dimensions [h, w, n].
|
701
700
|
"""
|
702
701
|
c, mh, mw = protos.shape # CHW
|
703
702
|
masks = (masks_in @ protos.float().view(c, -1)).view(-1, mh, mw)
|
@@ -783,23 +782,29 @@ def regularize_rboxes(rboxes):
|
|
783
782
|
return torch.stack([x, y, w_, h_, t], dim=-1) # regularized boxes
|
784
783
|
|
785
784
|
|
786
|
-
def masks2segments(masks, strategy="
|
785
|
+
def masks2segments(masks, strategy="all"):
|
787
786
|
"""
|
788
787
|
It takes a list of masks(n,h,w) and returns a list of segments(n,xy).
|
789
788
|
|
790
789
|
Args:
|
791
790
|
masks (torch.Tensor): the output of the model, which is a tensor of shape (batch_size, 160, 160)
|
792
|
-
strategy (str): '
|
791
|
+
strategy (str): 'all' or 'largest'. Defaults to all
|
793
792
|
|
794
793
|
Returns:
|
795
794
|
segments (List): list of segment masks
|
796
795
|
"""
|
796
|
+
from ultralytics.data.converter import merge_multi_segment
|
797
|
+
|
797
798
|
segments = []
|
798
799
|
for x in masks.int().cpu().numpy().astype("uint8"):
|
799
800
|
c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
|
800
801
|
if c:
|
801
|
-
if strategy == "
|
802
|
-
c =
|
802
|
+
if strategy == "all": # merge and concatenate all segments
|
803
|
+
c = (
|
804
|
+
np.concatenate(merge_multi_segment([x.reshape(-1, 2) for x in c]))
|
805
|
+
if len(c) > 1
|
806
|
+
else c[0].reshape(-1, 2)
|
807
|
+
)
|
803
808
|
elif strategy == "largest": # select largest segment
|
804
809
|
c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2)
|
805
810
|
else:
|
ultralytics/utils/plotting.py
CHANGED
@@ -238,7 +238,16 @@ class Annotator:
|
|
238
238
|
}
|
239
239
|
|
240
240
|
def get_txt_color(self, color=(128, 128, 128), txt_color=(255, 255, 255)):
|
241
|
-
"""
|
241
|
+
"""
|
242
|
+
Assign text color based on background color.
|
243
|
+
|
244
|
+
Args:
|
245
|
+
color (tuple, optional): The background color of the rectangle for text (B, G, R).
|
246
|
+
txt_color (tuple, optional): The color of the text (R, G, B).
|
247
|
+
|
248
|
+
Returns:
|
249
|
+
txt_color (tuple): Text color for label
|
250
|
+
"""
|
242
251
|
if color in self.dark_colors:
|
243
252
|
return 104, 31, 17
|
244
253
|
elif color in self.light_colors:
|
@@ -544,7 +553,9 @@ class Annotator:
|
|
544
553
|
bbox (tuple): Bounding box coordinates in the format (x_min, y_min, x_max, y_max).
|
545
554
|
|
546
555
|
Returns:
|
547
|
-
|
556
|
+
width (float): Width of the bounding box.
|
557
|
+
height (float): Height of the bounding box.
|
558
|
+
area (float): Area enclosed by the bounding box.
|
548
559
|
"""
|
549
560
|
x_min, y_min, x_max, y_max = bbox
|
550
561
|
width = x_max - x_min
|
@@ -584,8 +595,8 @@ class Annotator:
|
|
584
595
|
Displays queue counts on an image centered at the points with customizable font size and colors.
|
585
596
|
|
586
597
|
Args:
|
587
|
-
label (str):
|
588
|
-
points (tuple):
|
598
|
+
label (str): Queue counts label.
|
599
|
+
points (tuple): Region points for center point calculation to display text.
|
589
600
|
region_color (tuple): RGB queue region color.
|
590
601
|
txt_color (tuple): RGB text display color.
|
591
602
|
"""
|
@@ -624,13 +635,13 @@ class Annotator:
|
|
624
635
|
Display the bounding boxes labels in parking management app.
|
625
636
|
|
626
637
|
Args:
|
627
|
-
im0 (ndarray):
|
628
|
-
text (str):
|
629
|
-
txt_color (tuple):
|
630
|
-
bg_color (tuple):
|
631
|
-
x_center (float): x position center point for bounding box
|
632
|
-
y_center (float): y position center point for bounding box
|
633
|
-
margin (int): gap between text and rectangle for better display
|
638
|
+
im0 (ndarray): Inference image.
|
639
|
+
text (str): Object/class name.
|
640
|
+
txt_color (tuple): Display color for text foreground.
|
641
|
+
bg_color (tuple): Display color for text background.
|
642
|
+
x_center (float): The x position center point for bounding box.
|
643
|
+
y_center (float): The y position center point for bounding box.
|
644
|
+
margin (int): The gap between text and rectangle for better display.
|
634
645
|
"""
|
635
646
|
text_size = cv2.getTextSize(text, 0, fontScale=self.sf, thickness=self.tf)[0]
|
636
647
|
text_x = x_center - text_size[0] // 2
|
@@ -648,11 +659,11 @@ class Annotator:
|
|
648
659
|
Display the overall statistics for parking lots.
|
649
660
|
|
650
661
|
Args:
|
651
|
-
im0 (ndarray):
|
652
|
-
text (dict):
|
653
|
-
txt_color (tuple):
|
654
|
-
bg_color (tuple):
|
655
|
-
margin (int):
|
662
|
+
im0 (ndarray): Inference image.
|
663
|
+
text (dict): Labels dictionary.
|
664
|
+
txt_color (tuple): Display color for text foreground.
|
665
|
+
bg_color (tuple): Display color for text background.
|
666
|
+
margin (int): Gap between text and rectangle for better display.
|
656
667
|
"""
|
657
668
|
horizontal_gap = int(im0.shape[1] * 0.02)
|
658
669
|
vertical_gap = int(im0.shape[0] * 0.01)
|
@@ -791,19 +802,52 @@ class Annotator:
|
|
791
802
|
cv2.polylines(self.im, [np.int32([mask])], isClosed=True, color=mask_color, thickness=2)
|
792
803
|
text_size, _ = cv2.getTextSize(label, 0, self.sf, self.tf)
|
793
804
|
|
794
|
-
cv2.rectangle(
|
795
|
-
self.im,
|
796
|
-
(int(mask[0][0]) - text_size[0] // 2 - 10, int(mask[0][1]) - text_size[1] - 10),
|
797
|
-
(int(mask[0][0]) + text_size[0] // 2 + 10, int(mask[0][1] + 10)),
|
798
|
-
mask_color,
|
799
|
-
-1,
|
800
|
-
)
|
801
|
-
|
802
805
|
if label:
|
806
|
+
cv2.rectangle(
|
807
|
+
self.im,
|
808
|
+
(int(mask[0][0]) - text_size[0] // 2 - 10, int(mask[0][1]) - text_size[1] - 10),
|
809
|
+
(int(mask[0][0]) + text_size[0] // 2 + 10, int(mask[0][1] + 10)),
|
810
|
+
mask_color,
|
811
|
+
-1,
|
812
|
+
)
|
803
813
|
cv2.putText(
|
804
814
|
self.im, label, (int(mask[0][0]) - text_size[0] // 2, int(mask[0][1])), 0, self.sf, txt_color, self.tf
|
805
815
|
)
|
806
816
|
|
817
|
+
def sweep_annotator(self, line_x=0, line_y=0, label=None, color=(221, 0, 186), txt_color=(255, 255, 255)):
|
818
|
+
"""
|
819
|
+
Function for drawing a sweep annotation line and an optional label.
|
820
|
+
|
821
|
+
Args:
|
822
|
+
line_x (int): The x-coordinate of the sweep line.
|
823
|
+
line_y (int): The y-coordinate limit of the sweep line.
|
824
|
+
label (str, optional): Text label to be drawn in center of sweep line. If None, no label is drawn.
|
825
|
+
color (tuple): RGB color for the line and label background.
|
826
|
+
txt_color (tuple): RGB color for the label text.
|
827
|
+
"""
|
828
|
+
# Draw the sweep line
|
829
|
+
cv2.line(self.im, (line_x, 0), (line_x, line_y), color, self.tf * 2)
|
830
|
+
|
831
|
+
# Draw label, if provided
|
832
|
+
if label:
|
833
|
+
(text_width, text_height), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, self.sf, self.tf)
|
834
|
+
cv2.rectangle(
|
835
|
+
self.im,
|
836
|
+
(line_x - text_width // 2 - 10, line_y // 2 - text_height // 2 - 10),
|
837
|
+
(line_x + text_width // 2 + 10, line_y // 2 + text_height // 2 + 10),
|
838
|
+
color,
|
839
|
+
-1,
|
840
|
+
)
|
841
|
+
cv2.putText(
|
842
|
+
self.im,
|
843
|
+
label,
|
844
|
+
(line_x - text_width // 2, line_y // 2 + text_height // 2),
|
845
|
+
cv2.FONT_HERSHEY_SIMPLEX,
|
846
|
+
self.sf,
|
847
|
+
txt_color,
|
848
|
+
self.tf,
|
849
|
+
)
|
850
|
+
|
807
851
|
def plot_distance_and_line(
|
808
852
|
self, pixels_distance, centroids, line_color=(104, 31, 17), centroid_color=(255, 0, 255)
|
809
853
|
):
|
ultralytics/utils/torch_utils.py
CHANGED
@@ -301,28 +301,22 @@ def fuse_deconv_and_bn(deconv, bn):
|
|
301
301
|
|
302
302
|
|
303
303
|
def model_info(model, detailed=False, verbose=True, imgsz=640):
|
304
|
-
"""
|
305
|
-
Model information.
|
306
|
-
|
307
|
-
imgsz may be int or list, i.e. imgsz=640 or imgsz=[640, 320].
|
308
|
-
"""
|
304
|
+
"""Print and return detailed model information layer by layer."""
|
309
305
|
if not verbose:
|
310
306
|
return
|
311
307
|
n_p = get_num_params(model) # number of parameters
|
312
308
|
n_g = get_num_gradients(model) # number of gradients
|
313
309
|
n_l = len(list(model.modules())) # number of layers
|
314
310
|
if detailed:
|
315
|
-
LOGGER.info(
|
316
|
-
f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}"
|
317
|
-
)
|
311
|
+
LOGGER.info(f"{'layer':>5}{'name':>40}{'gradient':>10}{'parameters':>12}{'shape':>20}{'mu':>10}{'sigma':>10}")
|
318
312
|
for i, (name, p) in enumerate(model.named_parameters()):
|
319
313
|
name = name.replace("module_list.", "")
|
320
314
|
LOGGER.info(
|
321
|
-
"
|
322
|
-
|
315
|
+
f"{i:>5g}{name:>40s}{p.requires_grad!r:>10}{p.numel():>12g}{str(list(p.shape)):>20s}"
|
316
|
+
f"{p.mean():>10.3g}{p.std():>10.3g}{str(p.dtype):>15s}"
|
323
317
|
)
|
324
318
|
|
325
|
-
flops = get_flops(model, imgsz)
|
319
|
+
flops = get_flops(model, imgsz) # imgsz may be int or list, i.e. imgsz=640 or imgsz=[640, 320]
|
326
320
|
fused = " (fused)" if getattr(model, "is_fused", lambda: False)() else ""
|
327
321
|
fs = f", {flops:.1f} GFLOPs" if flops else ""
|
328
322
|
yaml_file = getattr(model, "yaml_file", "") or getattr(model, "yaml", {}).get("yaml_file", "")
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: ultralytics
|
3
|
-
Version: 8.3.
|
3
|
+
Version: 8.3.39
|
4
4
|
Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
@@ -115,7 +115,9 @@ We hope that the resources here will help you get the most out of YOLO. Please b
|
|
115
115
|
|
116
116
|
To request an Enterprise License please complete the form at [Ultralytics Licensing](https://www.ultralytics.com/license).
|
117
117
|
|
118
|
-
<
|
118
|
+
<a href="https://docs.ultralytics.com/models/yolo11/" target="_blank">
|
119
|
+
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/refs/heads/main/yolo/performance-comparison.png" alt="YOLO11 performance plots">
|
120
|
+
</a>
|
119
121
|
|
120
122
|
<div align="center">
|
121
123
|
<a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="2%" alt="Ultralytics GitHub"></a>
|
@@ -203,11 +205,13 @@ See YOLO [Python Docs](https://docs.ultralytics.com/usage/python/) for more exam
|
|
203
205
|
|
204
206
|
## <div align="center">Models</div>
|
205
207
|
|
206
|
-
YOLO11 [Detect](https://docs.ultralytics.com/tasks/detect/), [Segment](https://docs.ultralytics.com/tasks/segment/) and [Pose](https://docs.ultralytics.com/tasks/pose/) models pretrained on the [COCO](https://docs.ultralytics.com/datasets/detect/coco/) dataset are available here, as well as YOLO11 [Classify](https://docs.ultralytics.com/tasks/classify/) models pretrained on the [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) dataset. [Track](https://docs.ultralytics.com/modes/track/) mode is available for all Detect, Segment and Pose models.
|
208
|
+
YOLO11 [Detect](https://docs.ultralytics.com/tasks/detect/), [Segment](https://docs.ultralytics.com/tasks/segment/) and [Pose](https://docs.ultralytics.com/tasks/pose/) models pretrained on the [COCO](https://docs.ultralytics.com/datasets/detect/coco/) dataset are available here, as well as YOLO11 [Classify](https://docs.ultralytics.com/tasks/classify/) models pretrained on the [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) dataset. [Track](https://docs.ultralytics.com/modes/track/) mode is available for all Detect, Segment and Pose models. All [Models](https://docs.ultralytics.com/models/) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use.
|
207
209
|
|
208
|
-
<
|
209
|
-
|
210
|
-
|
210
|
+
<a href="https://docs.ultralytics.com/tasks/" target="_blank">
|
211
|
+
<img width="100%" src="https://github.com/ultralytics/docs/releases/download/0/ultralytics-yolov8-tasks-banner.avif" alt="Ultralytics YOLO supported tasks">
|
212
|
+
</a>
|
213
|
+
<br>
|
214
|
+
<br>
|
211
215
|
|
212
216
|
<details open><summary>Detection (COCO)</summary>
|
213
217
|
|
@@ -298,9 +302,9 @@ See [OBB Docs](https://docs.ultralytics.com/tasks/obb/) for usage examples with
|
|
298
302
|
|
299
303
|
Our key integrations with leading AI platforms extend the functionality of Ultralytics' offerings, enhancing tasks like dataset labeling, training, visualization, and model management. Discover how Ultralytics, in collaboration with [W&B](https://docs.wandb.ai/guides/integrations/ultralytics/), [Comet](https://bit.ly/yolov8-readme-comet), [Roboflow](https://roboflow.com/?ref=ultralytics) and [OpenVINO](https://docs.ultralytics.com/integrations/openvino/), can optimize your AI workflow.
|
300
304
|
|
301
|
-
<br>
|
302
305
|
<a href="https://www.ultralytics.com/hub" target="_blank">
|
303
|
-
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/yolov8/banner-integrations.png" alt="Ultralytics active learning integrations"
|
306
|
+
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/yolov8/banner-integrations.png" alt="Ultralytics active learning integrations">
|
307
|
+
</a>
|
304
308
|
<br>
|
305
309
|
<br>
|
306
310
|
|