ultralytics 8.3.88__py3-none-any.whl → 8.3.90__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tests/conftest.py +2 -2
- tests/test_cli.py +13 -11
- tests/test_cuda.py +10 -1
- tests/test_integrations.py +1 -5
- tests/test_python.py +16 -16
- tests/test_solutions.py +9 -9
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +3 -1
- ultralytics/cfg/models/11/yolo11-cls.yaml +5 -5
- ultralytics/cfg/models/11/yolo11-obb.yaml +5 -5
- ultralytics/cfg/models/11/yolo11-pose.yaml +5 -5
- ultralytics/cfg/models/11/yolo11-seg.yaml +5 -5
- ultralytics/cfg/models/11/yolo11.yaml +5 -5
- ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +5 -5
- ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +5 -5
- ultralytics/cfg/models/v8/yolov8-ghost.yaml +5 -5
- ultralytics/cfg/models/v8/yolov8-obb.yaml +5 -5
- ultralytics/cfg/models/v8/yolov8-p6.yaml +5 -5
- ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +5 -5
- ultralytics/cfg/models/v8/yolov8-world.yaml +5 -5
- ultralytics/cfg/models/v8/yolov8-worldv2.yaml +5 -5
- ultralytics/cfg/models/v8/yolov8.yaml +5 -5
- ultralytics/cfg/models/v9/yolov9c-seg.yaml +1 -1
- ultralytics/cfg/models/v9/yolov9c.yaml +1 -1
- ultralytics/cfg/models/v9/yolov9e-seg.yaml +1 -1
- ultralytics/cfg/models/v9/yolov9e.yaml +1 -1
- ultralytics/cfg/models/v9/yolov9m.yaml +1 -1
- ultralytics/cfg/models/v9/yolov9s.yaml +1 -1
- ultralytics/cfg/models/v9/yolov9t.yaml +1 -1
- ultralytics/data/annotator.py +9 -14
- ultralytics/data/base.py +125 -39
- ultralytics/data/build.py +63 -24
- ultralytics/data/converter.py +34 -33
- ultralytics/data/dataset.py +207 -53
- ultralytics/data/loaders.py +1 -0
- ultralytics/data/split_dota.py +39 -12
- ultralytics/data/utils.py +33 -47
- ultralytics/engine/exporter.py +19 -17
- ultralytics/engine/model.py +69 -90
- ultralytics/engine/predictor.py +106 -21
- ultralytics/engine/trainer.py +32 -23
- ultralytics/engine/tuner.py +31 -38
- ultralytics/engine/validator.py +75 -41
- ultralytics/hub/__init__.py +21 -26
- ultralytics/hub/auth.py +9 -12
- ultralytics/hub/session.py +76 -21
- ultralytics/hub/utils.py +19 -17
- ultralytics/models/fastsam/model.py +23 -17
- ultralytics/models/fastsam/predict.py +36 -16
- ultralytics/models/fastsam/utils.py +5 -5
- ultralytics/models/fastsam/val.py +6 -6
- ultralytics/models/nas/model.py +29 -24
- ultralytics/models/nas/predict.py +14 -11
- ultralytics/models/nas/val.py +11 -13
- ultralytics/models/rtdetr/model.py +20 -11
- ultralytics/models/rtdetr/predict.py +21 -21
- ultralytics/models/rtdetr/train.py +25 -24
- ultralytics/models/rtdetr/val.py +47 -14
- ultralytics/models/sam/__init__.py +1 -1
- ultralytics/models/sam/amg.py +50 -4
- ultralytics/models/sam/model.py +8 -14
- ultralytics/models/sam/modules/decoders.py +18 -21
- ultralytics/models/sam/modules/encoders.py +25 -46
- ultralytics/models/sam/modules/memory_attention.py +19 -15
- ultralytics/models/sam/modules/sam.py +18 -25
- ultralytics/models/sam/modules/tiny_encoder.py +19 -29
- ultralytics/models/sam/modules/transformer.py +35 -57
- ultralytics/models/sam/modules/utils.py +15 -15
- ultralytics/models/sam/predict.py +0 -3
- ultralytics/models/utils/loss.py +87 -36
- ultralytics/models/utils/ops.py +26 -31
- ultralytics/models/yolo/classify/predict.py +30 -12
- ultralytics/models/yolo/classify/train.py +83 -19
- ultralytics/models/yolo/classify/val.py +45 -23
- ultralytics/models/yolo/detect/predict.py +29 -19
- ultralytics/models/yolo/detect/train.py +90 -23
- ultralytics/models/yolo/detect/val.py +150 -29
- ultralytics/models/yolo/model.py +1 -2
- ultralytics/models/yolo/obb/predict.py +18 -13
- ultralytics/models/yolo/obb/train.py +12 -8
- ultralytics/models/yolo/obb/val.py +35 -22
- ultralytics/models/yolo/pose/predict.py +28 -15
- ultralytics/models/yolo/pose/train.py +21 -8
- ultralytics/models/yolo/pose/val.py +51 -31
- ultralytics/models/yolo/segment/predict.py +27 -16
- ultralytics/models/yolo/segment/train.py +11 -8
- ultralytics/models/yolo/segment/val.py +110 -29
- ultralytics/models/yolo/world/train.py +43 -16
- ultralytics/models/yolo/world/train_world.py +61 -36
- ultralytics/nn/autobackend.py +28 -14
- ultralytics/nn/modules/__init__.py +12 -12
- ultralytics/nn/modules/activation.py +12 -3
- ultralytics/nn/modules/block.py +587 -84
- ultralytics/nn/modules/conv.py +418 -54
- ultralytics/nn/modules/head.py +3 -4
- ultralytics/nn/modules/transformer.py +320 -34
- ultralytics/nn/modules/utils.py +17 -3
- ultralytics/nn/tasks.py +226 -79
- ultralytics/solutions/ai_gym.py +2 -2
- ultralytics/solutions/analytics.py +4 -4
- ultralytics/solutions/heatmap.py +4 -4
- ultralytics/solutions/instance_segmentation.py +10 -4
- ultralytics/solutions/object_blurrer.py +2 -2
- ultralytics/solutions/object_counter.py +2 -2
- ultralytics/solutions/object_cropper.py +2 -2
- ultralytics/solutions/parking_management.py +9 -9
- ultralytics/solutions/queue_management.py +1 -1
- ultralytics/solutions/region_counter.py +2 -2
- ultralytics/solutions/security_alarm.py +7 -7
- ultralytics/solutions/solutions.py +7 -4
- ultralytics/solutions/speed_estimation.py +2 -2
- ultralytics/solutions/streamlit_inference.py +6 -6
- ultralytics/solutions/trackzone.py +9 -2
- ultralytics/solutions/vision_eye.py +4 -4
- ultralytics/trackers/basetrack.py +1 -1
- ultralytics/trackers/bot_sort.py +23 -22
- ultralytics/trackers/byte_tracker.py +4 -4
- ultralytics/trackers/track.py +2 -1
- ultralytics/trackers/utils/gmc.py +26 -27
- ultralytics/trackers/utils/kalman_filter.py +31 -29
- ultralytics/trackers/utils/matching.py +7 -7
- ultralytics/utils/__init__.py +37 -35
- ultralytics/utils/autobatch.py +5 -5
- ultralytics/utils/benchmarks.py +111 -18
- ultralytics/utils/callbacks/base.py +3 -3
- ultralytics/utils/callbacks/clearml.py +11 -11
- ultralytics/utils/callbacks/comet.py +35 -22
- ultralytics/utils/callbacks/dvc.py +11 -10
- ultralytics/utils/callbacks/hub.py +8 -8
- ultralytics/utils/callbacks/mlflow.py +1 -1
- ultralytics/utils/callbacks/neptune.py +12 -10
- ultralytics/utils/callbacks/raytune.py +1 -1
- ultralytics/utils/callbacks/tensorboard.py +6 -6
- ultralytics/utils/callbacks/wb.py +16 -16
- ultralytics/utils/checks.py +139 -68
- ultralytics/utils/dist.py +15 -2
- ultralytics/utils/downloads.py +37 -56
- ultralytics/utils/files.py +12 -13
- ultralytics/utils/instance.py +117 -52
- ultralytics/utils/loss.py +28 -33
- ultralytics/utils/metrics.py +246 -181
- ultralytics/utils/ops.py +65 -61
- ultralytics/utils/patches.py +8 -6
- ultralytics/utils/plotting.py +72 -59
- ultralytics/utils/tal.py +88 -57
- ultralytics/utils/torch_utils.py +202 -64
- ultralytics/utils/triton.py +13 -3
- ultralytics/utils/tuner.py +13 -25
- {ultralytics-8.3.88.dist-info → ultralytics-8.3.90.dist-info}/METADATA +2 -2
- ultralytics-8.3.90.dist-info/RECORD +250 -0
- ultralytics-8.3.88.dist-info/RECORD +0 -250
- {ultralytics-8.3.88.dist-info → ultralytics-8.3.90.dist-info}/LICENSE +0 -0
- {ultralytics-8.3.88.dist-info → ultralytics-8.3.90.dist-info}/WHEEL +0 -0
- {ultralytics-8.3.88.dist-info → ultralytics-8.3.90.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.3.88.dist-info → ultralytics-8.3.90.dist-info}/top_level.txt +0 -0
@@ -9,49 +9,59 @@ from ultralytics.utils.torch_utils import de_parallel
|
|
9
9
|
|
10
10
|
class WorldTrainerFromScratch(WorldTrainer):
|
11
11
|
"""
|
12
|
-
A class extending the WorldTrainer
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
12
|
+
A class extending the WorldTrainer for training a world model from scratch on open-set datasets.
|
13
|
+
|
14
|
+
This trainer specializes in handling mixed datasets including both object detection and grounding datasets,
|
15
|
+
supporting training YOLO-World models with combined vision-language capabilities.
|
16
|
+
|
17
|
+
Attributes:
|
18
|
+
cfg (Dict): Configuration dictionary with default parameters for model training.
|
19
|
+
overrides (Dict): Dictionary of parameter overrides to customize the configuration.
|
20
|
+
_callbacks (List): List of callback functions to be executed during different stages of training.
|
21
|
+
|
22
|
+
Examples:
|
23
|
+
>>> from ultralytics.models.yolo.world.train_world import WorldTrainerFromScratch
|
24
|
+
>>> from ultralytics import YOLOWorld
|
25
|
+
>>> data = dict(
|
26
|
+
... train=dict(
|
27
|
+
... yolo_data=["Objects365.yaml"],
|
28
|
+
... grounding_data=[
|
29
|
+
... dict(
|
30
|
+
... img_path="../datasets/flickr30k/images",
|
31
|
+
... json_file="../datasets/flickr30k/final_flickr_separateGT_train.json",
|
32
|
+
... ),
|
33
|
+
... dict(
|
34
|
+
... img_path="../datasets/GQA/images",
|
35
|
+
... json_file="../datasets/GQA/final_mixed_train_no_coco.json",
|
36
|
+
... ),
|
37
|
+
... ],
|
38
|
+
... ),
|
39
|
+
... val=dict(yolo_data=["lvis.yaml"]),
|
40
|
+
... )
|
41
|
+
>>> model = YOLOWorld("yolov8s-worldv2.yaml")
|
42
|
+
>>> model.train(data=data, trainer=WorldTrainerFromScratch)
|
39
43
|
"""
|
40
44
|
|
41
45
|
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
|
42
|
-
"""Initialize a
|
46
|
+
"""Initialize a WorldTrainerFromScratch object with given configuration and callbacks."""
|
43
47
|
if overrides is None:
|
44
48
|
overrides = {}
|
45
49
|
super().__init__(cfg, overrides, _callbacks)
|
46
50
|
|
47
51
|
def build_dataset(self, img_path, mode="train", batch=None):
|
48
52
|
"""
|
49
|
-
Build YOLO Dataset.
|
53
|
+
Build YOLO Dataset for training or validation.
|
54
|
+
|
55
|
+
This method constructs appropriate datasets based on the mode and input paths, handling both
|
56
|
+
standard YOLO datasets and grounding datasets with different formats.
|
50
57
|
|
51
58
|
Args:
|
52
|
-
img_path (List[str] | str): Path to the folder containing images.
|
53
|
-
mode (str):
|
54
|
-
batch (int, optional): Size of batches,
|
59
|
+
img_path (List[str] | str): Path to the folder containing images or list of paths.
|
60
|
+
mode (str): 'train' mode or 'val' mode, allowing customized augmentations for each mode.
|
61
|
+
batch (int, optional): Size of batches, used for rectangular training/validation.
|
62
|
+
|
63
|
+
Returns:
|
64
|
+
(YOLOConcatDataset | Dataset): The constructed dataset for training or validation.
|
55
65
|
"""
|
56
66
|
gs = max(int(de_parallel(self.model).stride.max() if self.model else 0), 32)
|
57
67
|
if mode != "train":
|
@@ -66,9 +76,17 @@ class WorldTrainerFromScratch(WorldTrainer):
|
|
66
76
|
|
67
77
|
def get_dataset(self):
|
68
78
|
"""
|
69
|
-
Get train
|
79
|
+
Get train and validation paths from data dictionary.
|
80
|
+
|
81
|
+
Processes the data configuration to extract paths for training and validation datasets,
|
82
|
+
handling both YOLO detection datasets and grounding datasets.
|
70
83
|
|
71
|
-
Returns
|
84
|
+
Returns:
|
85
|
+
(str): Train dataset path.
|
86
|
+
(str): Validation dataset path.
|
87
|
+
|
88
|
+
Raises:
|
89
|
+
AssertionError: If train or validation datasets are not found, or if validation has multiple datasets.
|
72
90
|
"""
|
73
91
|
final_data = {}
|
74
92
|
data_yaml = self.args.data
|
@@ -98,11 +116,18 @@ class WorldTrainerFromScratch(WorldTrainer):
|
|
98
116
|
return final_data["train"], final_data["val"][0]
|
99
117
|
|
100
118
|
def plot_training_labels(self):
|
101
|
-
"""
|
119
|
+
"""Do not plot labels for YOLO-World training."""
|
102
120
|
pass
|
103
121
|
|
104
122
|
def final_eval(self):
|
105
|
-
"""
|
123
|
+
"""
|
124
|
+
Perform final evaluation and validation for the YOLO-World model.
|
125
|
+
|
126
|
+
Configures the validator with appropriate dataset and split information before running evaluation.
|
127
|
+
|
128
|
+
Returns:
|
129
|
+
(Dict): Dictionary containing evaluation metrics and results.
|
130
|
+
"""
|
106
131
|
val = self.args.data["val"]["yolo_data"][0]
|
107
132
|
self.validator.args.data = val
|
108
133
|
self.validator.args.split = "minival" if isinstance(val, str) and "lvis" in val else "val"
|
ultralytics/nn/autobackend.py
CHANGED
@@ -19,11 +19,7 @@ from ultralytics.utils.downloads import attempt_download_asset, is_url
|
|
19
19
|
|
20
20
|
|
21
21
|
def check_class_names(names):
|
22
|
-
"""
|
23
|
-
Check class names.
|
24
|
-
|
25
|
-
Map imagenet class codes to human-readable names if required. Convert lists to dicts.
|
26
|
-
"""
|
22
|
+
"""Check class names and convert to dict format if needed."""
|
27
23
|
if isinstance(names, list): # names is a list
|
28
24
|
names = dict(enumerate(names)) # convert to dict
|
29
25
|
if isinstance(names, dict):
|
@@ -78,8 +74,23 @@ class AutoBackend(nn.Module):
|
|
78
74
|
| IMX | *_imx_model/ |
|
79
75
|
| RKNN | *_rknn_model/ |
|
80
76
|
|
81
|
-
|
82
|
-
|
77
|
+
Attributes:
|
78
|
+
model (torch.nn.Module): The loaded YOLO model.
|
79
|
+
device (torch.device): The device (CPU or GPU) on which the model is loaded.
|
80
|
+
task (str): The type of task the model performs (detect, segment, classify, pose).
|
81
|
+
names (Dict): A dictionary of class names that the model can detect.
|
82
|
+
stride (int): The model stride, typically 32 for YOLO models.
|
83
|
+
fp16 (bool): Whether the model uses half-precision (FP16) inference.
|
84
|
+
|
85
|
+
Methods:
|
86
|
+
forward: Run inference on an input image.
|
87
|
+
from_numpy: Convert numpy array to tensor.
|
88
|
+
warmup: Warm up the model with a dummy input.
|
89
|
+
_model_type: Determine the model type from file path.
|
90
|
+
|
91
|
+
Examples:
|
92
|
+
>>> model = AutoBackend(weights="yolov8n.pt", device="cuda")
|
93
|
+
>>> results = model(img)
|
83
94
|
"""
|
84
95
|
|
85
96
|
@torch.no_grad()
|
@@ -101,7 +112,7 @@ class AutoBackend(nn.Module):
|
|
101
112
|
weights (str | torch.nn.Module): Path to the model weights file or a module instance. Defaults to 'yolo11n.pt'.
|
102
113
|
device (torch.device): Device to run the model on. Defaults to CPU.
|
103
114
|
dnn (bool): Use OpenCV DNN module for ONNX inference. Defaults to False.
|
104
|
-
data (str | Path | optional): Path to the additional data.yaml file containing class names.
|
115
|
+
data (str | Path | optional): Path to the additional data.yaml file containing class names.
|
105
116
|
fp16 (bool): Enable half-precision inference. Supported only on specific backends. Defaults to False.
|
106
117
|
batch (int): Batch-size to assume for inference.
|
107
118
|
fuse (bool): Fuse Conv2D + BatchNorm layers for optimization. Defaults to True.
|
@@ -539,12 +550,12 @@ class AutoBackend(nn.Module):
|
|
539
550
|
|
540
551
|
Args:
|
541
552
|
im (torch.Tensor): The image tensor to perform inference on.
|
542
|
-
augment (bool):
|
543
|
-
visualize (bool):
|
544
|
-
embed (
|
553
|
+
augment (bool): Whether to perform data augmentation during inference. Defaults to False.
|
554
|
+
visualize (bool): Whether to visualize the output predictions. Defaults to False.
|
555
|
+
embed (List, optional): A list of feature vectors/embeddings to return.
|
545
556
|
|
546
557
|
Returns:
|
547
|
-
(
|
558
|
+
(torch.Tensor | List[torch.Tensor]): The raw output tensor(s) from the model.
|
548
559
|
"""
|
549
560
|
b, ch, h, w = im.shape # batch, channel, height, width
|
550
561
|
if self.fp16 and im.dtype != torch.float16:
|
@@ -776,10 +787,13 @@ class AutoBackend(nn.Module):
|
|
776
787
|
def _model_type(p="path/to/model.pt"):
|
777
788
|
"""
|
778
789
|
Takes a path to a model file and returns the model type. Possibles types are pt, jit, onnx, xml, engine, coreml,
|
779
|
-
saved_model, pb, tflite, edgetpu, tfjs, ncnn or paddle.
|
790
|
+
saved_model, pb, tflite, edgetpu, tfjs, ncnn, mnn, imx or paddle.
|
780
791
|
|
781
792
|
Args:
|
782
|
-
p (str):
|
793
|
+
p (str): Path to the model file. Defaults to path/to/model.pt
|
794
|
+
|
795
|
+
Returns:
|
796
|
+
(List[bool]): List of booleans indicating the model type.
|
783
797
|
|
784
798
|
Examples:
|
785
799
|
>>> model = AutoBackend(weights="path/to/model.onnx")
|
@@ -2,19 +2,19 @@
|
|
2
2
|
"""
|
3
3
|
Ultralytics modules.
|
4
4
|
|
5
|
-
|
6
|
-
|
7
|
-
```python
|
8
|
-
from ultralytics.nn.modules import *
|
9
|
-
import torch
|
10
|
-
import os
|
5
|
+
This module provides access to various neural network components used in Ultralytics models, including convolution blocks,
|
6
|
+
attention mechanisms, transformer components, and detection/segmentation heads.
|
11
7
|
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
8
|
+
Examples:
|
9
|
+
Visualize a module with Netron.
|
10
|
+
>>> from ultralytics.nn.modules import *
|
11
|
+
>>> import torch
|
12
|
+
>>> import os
|
13
|
+
>>> x = torch.ones(1, 128, 40, 40)
|
14
|
+
>>> m = Conv(128, 128)
|
15
|
+
>>> f = f"{m._get_name()}.onnx"
|
16
|
+
>>> torch.onnx.export(m, x, f)
|
17
|
+
>>> os.system(f"onnxslim {f} {f} && open {f}") # pip install onnxslim
|
18
18
|
"""
|
19
19
|
|
20
20
|
from .block import (
|
@@ -6,10 +6,19 @@ import torch.nn as nn
|
|
6
6
|
|
7
7
|
|
8
8
|
class AGLU(nn.Module):
|
9
|
-
"""
|
9
|
+
"""
|
10
|
+
Unified activation function module from https://github.com/kostas1515/AGLU.
|
11
|
+
|
12
|
+
This class implements a parameterized activation function with learnable parameters lambda and kappa.
|
13
|
+
|
14
|
+
Attributes:
|
15
|
+
act (nn.Softplus): Softplus activation function with negative beta.
|
16
|
+
lambd (nn.Parameter): Learnable lambda parameter initialized with uniform distribution.
|
17
|
+
kappa (nn.Parameter): Learnable kappa parameter initialized with uniform distribution.
|
18
|
+
"""
|
10
19
|
|
11
20
|
def __init__(self, device=None, dtype=None) -> None:
|
12
|
-
"""Initialize the Unified activation function."""
|
21
|
+
"""Initialize the Unified activation function with learnable parameters."""
|
13
22
|
super().__init__()
|
14
23
|
self.act = nn.Softplus(beta=-1.0)
|
15
24
|
self.lambd = nn.Parameter(nn.init.uniform_(torch.empty(1, device=device, dtype=dtype))) # lambda parameter
|
@@ -17,5 +26,5 @@ class AGLU(nn.Module):
|
|
17
26
|
|
18
27
|
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
19
28
|
"""Compute the forward pass of the Unified activation function."""
|
20
|
-
lam = torch.clamp(self.lambd, min=0.0001)
|
29
|
+
lam = torch.clamp(self.lambd, min=0.0001) # Clamp lambda to avoid division by zero
|
21
30
|
return torch.exp((1 / lam) * self.act((self.kappa * x) - torch.log(lam)))
|