ultralytics-opencv-headless 8.4.1__py3-none-any.whl → 8.4.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tests/test_exports.py +0 -2
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +19 -21
- ultralytics/data/annotator.py +2 -2
- ultralytics/data/converter.py +57 -38
- ultralytics/engine/exporter.py +22 -22
- ultralytics/engine/model.py +33 -33
- ultralytics/engine/predictor.py +17 -17
- ultralytics/engine/results.py +14 -12
- ultralytics/engine/trainer.py +27 -22
- ultralytics/engine/tuner.py +4 -4
- ultralytics/engine/validator.py +16 -16
- ultralytics/models/yolo/classify/predict.py +1 -1
- ultralytics/models/yolo/classify/train.py +1 -1
- ultralytics/models/yolo/classify/val.py +1 -1
- ultralytics/models/yolo/detect/predict.py +2 -2
- ultralytics/models/yolo/detect/train.py +1 -1
- ultralytics/models/yolo/detect/val.py +1 -1
- ultralytics/models/yolo/model.py +7 -7
- ultralytics/models/yolo/obb/predict.py +1 -1
- ultralytics/models/yolo/obb/train.py +2 -2
- ultralytics/models/yolo/obb/val.py +1 -1
- ultralytics/models/yolo/pose/predict.py +1 -1
- ultralytics/models/yolo/pose/train.py +4 -2
- ultralytics/models/yolo/pose/val.py +1 -1
- ultralytics/models/yolo/segment/predict.py +2 -2
- ultralytics/models/yolo/segment/train.py +3 -3
- ultralytics/models/yolo/segment/val.py +1 -1
- ultralytics/nn/autobackend.py +2 -2
- ultralytics/nn/modules/head.py +1 -1
- ultralytics/nn/tasks.py +12 -12
- ultralytics/solutions/ai_gym.py +3 -3
- ultralytics/solutions/config.py +1 -1
- ultralytics/solutions/heatmap.py +1 -1
- ultralytics/solutions/instance_segmentation.py +2 -2
- ultralytics/solutions/parking_management.py +1 -1
- ultralytics/solutions/solutions.py +2 -2
- ultralytics/trackers/track.py +1 -1
- ultralytics/utils/__init__.py +8 -8
- ultralytics/utils/benchmarks.py +23 -23
- ultralytics/utils/callbacks/platform.py +11 -9
- ultralytics/utils/checks.py +6 -6
- ultralytics/utils/downloads.py +2 -2
- ultralytics/utils/export/imx.py +3 -8
- ultralytics/utils/files.py +2 -2
- ultralytics/utils/loss.py +3 -3
- ultralytics/utils/tuner.py +2 -2
- {ultralytics_opencv_headless-8.4.1.dist-info → ultralytics_opencv_headless-8.4.3.dist-info}/METADATA +36 -36
- {ultralytics_opencv_headless-8.4.1.dist-info → ultralytics_opencv_headless-8.4.3.dist-info}/RECORD +53 -53
- {ultralytics_opencv_headless-8.4.1.dist-info → ultralytics_opencv_headless-8.4.3.dist-info}/WHEEL +0 -0
- {ultralytics_opencv_headless-8.4.1.dist-info → ultralytics_opencv_headless-8.4.3.dist-info}/entry_points.txt +0 -0
- {ultralytics_opencv_headless-8.4.1.dist-info → ultralytics_opencv_headless-8.4.3.dist-info}/licenses/LICENSE +0 -0
- {ultralytics_opencv_headless-8.4.1.dist-info → ultralytics_opencv_headless-8.4.3.dist-info}/top_level.txt +0 -0
ultralytics/models/yolo/model.py
CHANGED
|
@@ -40,24 +40,24 @@ class YOLO(Model):
|
|
|
40
40
|
task_map: Map tasks to their corresponding model, trainer, validator, and predictor classes.
|
|
41
41
|
|
|
42
42
|
Examples:
|
|
43
|
-
Load a pretrained
|
|
44
|
-
>>> model = YOLO("
|
|
43
|
+
Load a pretrained YOLO26n detection model
|
|
44
|
+
>>> model = YOLO("yolo26n.pt")
|
|
45
45
|
|
|
46
|
-
Load a pretrained
|
|
47
|
-
>>> model = YOLO("
|
|
46
|
+
Load a pretrained YOLO26n segmentation model
|
|
47
|
+
>>> model = YOLO("yolo26n-seg.pt")
|
|
48
48
|
|
|
49
49
|
Initialize from a YAML configuration
|
|
50
|
-
>>> model = YOLO("
|
|
50
|
+
>>> model = YOLO("yolo26n.yaml")
|
|
51
51
|
"""
|
|
52
52
|
|
|
53
|
-
def __init__(self, model: str | Path = "
|
|
53
|
+
def __init__(self, model: str | Path = "yolo26n.pt", task: str | None = None, verbose: bool = False):
|
|
54
54
|
"""Initialize a YOLO model.
|
|
55
55
|
|
|
56
56
|
This constructor initializes a YOLO model, automatically switching to specialized model types (YOLOWorld or
|
|
57
57
|
YOLOE) based on the model filename.
|
|
58
58
|
|
|
59
59
|
Args:
|
|
60
|
-
model (str | Path): Model name or path to model file, i.e. '
|
|
60
|
+
model (str | Path): Model name or path to model file, i.e. 'yolo26n.pt', 'yolo26n.yaml'.
|
|
61
61
|
task (str, optional): YOLO task specification, i.e. 'detect', 'segment', 'classify', 'pose', 'obb'. Defaults
|
|
62
62
|
to auto-detection based on model.
|
|
63
63
|
verbose (bool): Display model info on load.
|
|
@@ -20,7 +20,7 @@ class OBBPredictor(DetectionPredictor):
|
|
|
20
20
|
Examples:
|
|
21
21
|
>>> from ultralytics.utils import ASSETS
|
|
22
22
|
>>> from ultralytics.models.yolo.obb import OBBPredictor
|
|
23
|
-
>>> args = dict(model="
|
|
23
|
+
>>> args = dict(model="yolo26n-obb.pt", source=ASSETS)
|
|
24
24
|
>>> predictor = OBBPredictor(overrides=args)
|
|
25
25
|
>>> predictor.predict_cli()
|
|
26
26
|
"""
|
|
@@ -27,7 +27,7 @@ class OBBTrainer(yolo.detect.DetectionTrainer):
|
|
|
27
27
|
|
|
28
28
|
Examples:
|
|
29
29
|
>>> from ultralytics.models.yolo.obb import OBBTrainer
|
|
30
|
-
>>> args = dict(model="
|
|
30
|
+
>>> args = dict(model="yolo26n-obb.pt", data="dota8.yaml", epochs=3)
|
|
31
31
|
>>> trainer = OBBTrainer(overrides=args)
|
|
32
32
|
>>> trainer.train()
|
|
33
33
|
"""
|
|
@@ -63,7 +63,7 @@ class OBBTrainer(yolo.detect.DetectionTrainer):
|
|
|
63
63
|
|
|
64
64
|
Examples:
|
|
65
65
|
>>> trainer = OBBTrainer()
|
|
66
|
-
>>> model = trainer.get_model(cfg="
|
|
66
|
+
>>> model = trainer.get_model(cfg="yolo26n-obb.yaml", weights="yolo26n-obb.pt")
|
|
67
67
|
"""
|
|
68
68
|
model = OBBModel(cfg, nc=self.data["nc"], ch=self.data["channels"], verbose=verbose and RANK == -1)
|
|
69
69
|
if weights:
|
|
@@ -38,7 +38,7 @@ class OBBValidator(DetectionValidator):
|
|
|
38
38
|
|
|
39
39
|
Examples:
|
|
40
40
|
>>> from ultralytics.models.yolo.obb import OBBValidator
|
|
41
|
-
>>> args = dict(model="
|
|
41
|
+
>>> args = dict(model="yolo26n-obb.pt", data="dota8.yaml")
|
|
42
42
|
>>> validator = OBBValidator(args=args)
|
|
43
43
|
>>> validator(model=args["model"])
|
|
44
44
|
"""
|
|
@@ -20,7 +20,7 @@ class PosePredictor(DetectionPredictor):
|
|
|
20
20
|
Examples:
|
|
21
21
|
>>> from ultralytics.utils import ASSETS
|
|
22
22
|
>>> from ultralytics.models.yolo.pose import PosePredictor
|
|
23
|
-
>>> args = dict(model="
|
|
23
|
+
>>> args = dict(model="yolo26n-pose.pt", source=ASSETS)
|
|
24
24
|
>>> predictor = PosePredictor(overrides=args)
|
|
25
25
|
>>> predictor.predict_cli()
|
|
26
26
|
"""
|
|
@@ -32,7 +32,7 @@ class PoseTrainer(yolo.detect.DetectionTrainer):
|
|
|
32
32
|
|
|
33
33
|
Examples:
|
|
34
34
|
>>> from ultralytics.models.yolo.pose import PoseTrainer
|
|
35
|
-
>>> args = dict(model="
|
|
35
|
+
>>> args = dict(model="yolo26n-pose.pt", data="coco8-pose.yaml", epochs=3)
|
|
36
36
|
>>> trainer = PoseTrainer(overrides=args)
|
|
37
37
|
>>> trainer.train()
|
|
38
38
|
"""
|
|
@@ -90,7 +90,9 @@ class PoseTrainer(yolo.detect.DetectionTrainer):
|
|
|
90
90
|
|
|
91
91
|
def get_validator(self):
|
|
92
92
|
"""Return an instance of the PoseValidator class for validation."""
|
|
93
|
-
self.loss_names = "box_loss", "pose_loss", "kobj_loss", "cls_loss", "dfl_loss"
|
|
93
|
+
self.loss_names = "box_loss", "pose_loss", "kobj_loss", "cls_loss", "dfl_loss"
|
|
94
|
+
if getattr(self.model.model[-1], "flow_model", None) is not None:
|
|
95
|
+
self.loss_names += ("rle_loss",)
|
|
94
96
|
return yolo.pose.PoseValidator(
|
|
95
97
|
self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks
|
|
96
98
|
)
|
|
@@ -42,7 +42,7 @@ class PoseValidator(DetectionValidator):
|
|
|
42
42
|
|
|
43
43
|
Examples:
|
|
44
44
|
>>> from ultralytics.models.yolo.pose import PoseValidator
|
|
45
|
-
>>> args = dict(model="
|
|
45
|
+
>>> args = dict(model="yolo26n-pose.pt", data="coco8-pose.yaml")
|
|
46
46
|
>>> validator = PoseValidator(args=args)
|
|
47
47
|
>>> validator()
|
|
48
48
|
|
|
@@ -24,7 +24,7 @@ class SegmentationPredictor(DetectionPredictor):
|
|
|
24
24
|
Examples:
|
|
25
25
|
>>> from ultralytics.utils import ASSETS
|
|
26
26
|
>>> from ultralytics.models.yolo.segment import SegmentationPredictor
|
|
27
|
-
>>> args = dict(model="
|
|
27
|
+
>>> args = dict(model="yolo26n-seg.pt", source=ASSETS)
|
|
28
28
|
>>> predictor = SegmentationPredictor(overrides=args)
|
|
29
29
|
>>> predictor.predict_cli()
|
|
30
30
|
"""
|
|
@@ -56,7 +56,7 @@ class SegmentationPredictor(DetectionPredictor):
|
|
|
56
56
|
Results object includes both bounding boxes and segmentation masks.
|
|
57
57
|
|
|
58
58
|
Examples:
|
|
59
|
-
>>> predictor = SegmentationPredictor(overrides=dict(model="
|
|
59
|
+
>>> predictor = SegmentationPredictor(overrides=dict(model="yolo26n-seg.pt"))
|
|
60
60
|
>>> results = predictor.postprocess(preds, img, orig_img)
|
|
61
61
|
"""
|
|
62
62
|
# Extract protos - tuple if PyTorch model or array if exported
|
|
@@ -21,7 +21,7 @@ class SegmentationTrainer(yolo.detect.DetectionTrainer):
|
|
|
21
21
|
|
|
22
22
|
Examples:
|
|
23
23
|
>>> from ultralytics.models.yolo.segment import SegmentationTrainer
|
|
24
|
-
>>> args = dict(model="
|
|
24
|
+
>>> args = dict(model="yolo26n-seg.pt", data="coco8-seg.yaml", epochs=3)
|
|
25
25
|
>>> trainer = SegmentationTrainer(overrides=args)
|
|
26
26
|
>>> trainer.train()
|
|
27
27
|
"""
|
|
@@ -52,8 +52,8 @@ class SegmentationTrainer(yolo.detect.DetectionTrainer):
|
|
|
52
52
|
|
|
53
53
|
Examples:
|
|
54
54
|
>>> trainer = SegmentationTrainer()
|
|
55
|
-
>>> model = trainer.get_model(cfg="
|
|
56
|
-
>>> model = trainer.get_model(weights="
|
|
55
|
+
>>> model = trainer.get_model(cfg="yolo26n-seg.yaml")
|
|
56
|
+
>>> model = trainer.get_model(weights="yolo26n-seg.pt", verbose=False)
|
|
57
57
|
"""
|
|
58
58
|
model = SegmentationModel(cfg, nc=self.data["nc"], ch=self.data["channels"], verbose=verbose and RANK == -1)
|
|
59
59
|
if weights:
|
|
@@ -30,7 +30,7 @@ class SegmentationValidator(DetectionValidator):
|
|
|
30
30
|
|
|
31
31
|
Examples:
|
|
32
32
|
>>> from ultralytics.models.yolo.segment import SegmentationValidator
|
|
33
|
-
>>> args = dict(model="
|
|
33
|
+
>>> args = dict(model="yolo26n-seg.pt", data="coco8-seg.yaml")
|
|
34
34
|
>>> validator = SegmentationValidator(args=args)
|
|
35
35
|
>>> validator()
|
|
36
36
|
"""
|
ultralytics/nn/autobackend.py
CHANGED
|
@@ -132,14 +132,14 @@ class AutoBackend(nn.Module):
|
|
|
132
132
|
_model_type: Determine the model type from file path.
|
|
133
133
|
|
|
134
134
|
Examples:
|
|
135
|
-
>>> model = AutoBackend(model="
|
|
135
|
+
>>> model = AutoBackend(model="yolo26n.pt", device="cuda")
|
|
136
136
|
>>> results = model(img)
|
|
137
137
|
"""
|
|
138
138
|
|
|
139
139
|
@torch.no_grad()
|
|
140
140
|
def __init__(
|
|
141
141
|
self,
|
|
142
|
-
model: str | torch.nn.Module = "
|
|
142
|
+
model: str | torch.nn.Module = "yolo26n.pt",
|
|
143
143
|
device: torch.device = torch.device("cpu"),
|
|
144
144
|
dnn: bool = False,
|
|
145
145
|
data: str | Path | None = None,
|
ultralytics/nn/modules/head.py
CHANGED
|
@@ -170,7 +170,7 @@ class Detect(nn.Module):
|
|
|
170
170
|
def _get_decode_boxes(self, x: dict[str, torch.Tensor]) -> torch.Tensor:
|
|
171
171
|
"""Get decoded boxes based on anchors and strides."""
|
|
172
172
|
shape = x["feats"][0].shape # BCHW
|
|
173
|
-
if self.
|
|
173
|
+
if self.dynamic or self.shape != shape:
|
|
174
174
|
self.anchors, self.strides = (a.transpose(0, 1) for a in make_anchors(x["feats"], self.stride, 0.5))
|
|
175
175
|
self.shape = shape
|
|
176
176
|
|
ultralytics/nn/tasks.py
CHANGED
|
@@ -361,11 +361,11 @@ class DetectionModel(BaseModel):
|
|
|
361
361
|
|
|
362
362
|
Examples:
|
|
363
363
|
Initialize a detection model
|
|
364
|
-
>>> model = DetectionModel("
|
|
364
|
+
>>> model = DetectionModel("yolo26n.yaml", ch=3, nc=80)
|
|
365
365
|
>>> results = model.predict(image_tensor)
|
|
366
366
|
"""
|
|
367
367
|
|
|
368
|
-
def __init__(self, cfg="
|
|
368
|
+
def __init__(self, cfg="yolo26n.yaml", ch=3, nc=None, verbose=True):
|
|
369
369
|
"""Initialize the YOLO detection model with the given config and parameters.
|
|
370
370
|
|
|
371
371
|
Args:
|
|
@@ -506,11 +506,11 @@ class OBBModel(DetectionModel):
|
|
|
506
506
|
|
|
507
507
|
Examples:
|
|
508
508
|
Initialize an OBB model
|
|
509
|
-
>>> model = OBBModel("
|
|
509
|
+
>>> model = OBBModel("yolo26n-obb.yaml", ch=3, nc=80)
|
|
510
510
|
>>> results = model.predict(image_tensor)
|
|
511
511
|
"""
|
|
512
512
|
|
|
513
|
-
def __init__(self, cfg="
|
|
513
|
+
def __init__(self, cfg="yolo26n-obb.yaml", ch=3, nc=None, verbose=True):
|
|
514
514
|
"""Initialize YOLO OBB model with given config and parameters.
|
|
515
515
|
|
|
516
516
|
Args:
|
|
@@ -538,11 +538,11 @@ class SegmentationModel(DetectionModel):
|
|
|
538
538
|
|
|
539
539
|
Examples:
|
|
540
540
|
Initialize a segmentation model
|
|
541
|
-
>>> model = SegmentationModel("
|
|
541
|
+
>>> model = SegmentationModel("yolo26n-seg.yaml", ch=3, nc=80)
|
|
542
542
|
>>> results = model.predict(image_tensor)
|
|
543
543
|
"""
|
|
544
544
|
|
|
545
|
-
def __init__(self, cfg="
|
|
545
|
+
def __init__(self, cfg="yolo26n-seg.yaml", ch=3, nc=None, verbose=True):
|
|
546
546
|
"""Initialize Ultralytics YOLO segmentation model with given config and parameters.
|
|
547
547
|
|
|
548
548
|
Args:
|
|
@@ -573,11 +573,11 @@ class PoseModel(DetectionModel):
|
|
|
573
573
|
|
|
574
574
|
Examples:
|
|
575
575
|
Initialize a pose model
|
|
576
|
-
>>> model = PoseModel("
|
|
576
|
+
>>> model = PoseModel("yolo26n-pose.yaml", ch=3, nc=1, data_kpt_shape=(17, 3))
|
|
577
577
|
>>> results = model.predict(image_tensor)
|
|
578
578
|
"""
|
|
579
579
|
|
|
580
|
-
def __init__(self, cfg="
|
|
580
|
+
def __init__(self, cfg="yolo26n-pose.yaml", ch=3, nc=None, data_kpt_shape=(None, None), verbose=True):
|
|
581
581
|
"""Initialize Ultralytics YOLO Pose model.
|
|
582
582
|
|
|
583
583
|
Args:
|
|
@@ -619,11 +619,11 @@ class ClassificationModel(BaseModel):
|
|
|
619
619
|
|
|
620
620
|
Examples:
|
|
621
621
|
Initialize a classification model
|
|
622
|
-
>>> model = ClassificationModel("
|
|
622
|
+
>>> model = ClassificationModel("yolo26n-cls.yaml", ch=3, nc=1000)
|
|
623
623
|
>>> results = model.predict(image_tensor)
|
|
624
624
|
"""
|
|
625
625
|
|
|
626
|
-
def __init__(self, cfg="
|
|
626
|
+
def __init__(self, cfg="yolo26n-cls.yaml", ch=3, nc=None, verbose=True):
|
|
627
627
|
"""Initialize ClassificationModel with YAML, channels, number of classes, verbose flag.
|
|
628
628
|
|
|
629
629
|
Args:
|
|
@@ -1444,7 +1444,7 @@ def torch_safe_load(weight, safe_only=False):
|
|
|
1444
1444
|
f"with https://github.com/ultralytics/yolov5.\nThis model is NOT forwards compatible with "
|
|
1445
1445
|
f"YOLOv8 at https://github.com/ultralytics/ultralytics."
|
|
1446
1446
|
f"\nRecommend fixes are to train a new model using the latest 'ultralytics' package or to "
|
|
1447
|
-
f"run a command with an official Ultralytics model, i.e. 'yolo predict model=
|
|
1447
|
+
f"run a command with an official Ultralytics model, i.e. 'yolo predict model=yolo26n.pt'"
|
|
1448
1448
|
)
|
|
1449
1449
|
) from e
|
|
1450
1450
|
elif e.name == "numpy._core":
|
|
@@ -1457,7 +1457,7 @@ def torch_safe_load(weight, safe_only=False):
|
|
|
1457
1457
|
f"{weight} appears to require '{e.name}', which is not in Ultralytics requirements."
|
|
1458
1458
|
f"\nAutoInstall will run now for '{e.name}' but this feature will be removed in the future."
|
|
1459
1459
|
f"\nRecommend fixes are to train a new model using the latest 'ultralytics' package or to "
|
|
1460
|
-
f"run a command with an official Ultralytics model, i.e. 'yolo predict model=
|
|
1460
|
+
f"run a command with an official Ultralytics model, i.e. 'yolo predict model=yolo26n.pt'"
|
|
1461
1461
|
)
|
|
1462
1462
|
check_requirements(e.name) # install missing module
|
|
1463
1463
|
ckpt = torch_load(file, map_location="cpu")
|
ultralytics/solutions/ai_gym.py
CHANGED
|
@@ -22,7 +22,7 @@ class AIGym(BaseSolution):
|
|
|
22
22
|
process: Process a frame to detect poses, calculate angles, and count repetitions.
|
|
23
23
|
|
|
24
24
|
Examples:
|
|
25
|
-
>>> gym = AIGym(model="
|
|
25
|
+
>>> gym = AIGym(model="yolo26n-pose.pt")
|
|
26
26
|
>>> image = cv2.imread("gym_scene.jpg")
|
|
27
27
|
>>> results = gym.process(image)
|
|
28
28
|
>>> processed_image = results.plot_im
|
|
@@ -35,9 +35,9 @@ class AIGym(BaseSolution):
|
|
|
35
35
|
|
|
36
36
|
Args:
|
|
37
37
|
**kwargs (Any): Keyword arguments passed to the parent class constructor including:
|
|
38
|
-
- model (str): Model name or path, defaults to "
|
|
38
|
+
- model (str): Model name or path, defaults to "yolo26n-pose.pt".
|
|
39
39
|
"""
|
|
40
|
-
kwargs["model"] = kwargs.get("model", "
|
|
40
|
+
kwargs["model"] = kwargs.get("model", "yolo26n-pose.pt")
|
|
41
41
|
super().__init__(**kwargs)
|
|
42
42
|
self.states = defaultdict(lambda: {"angle": 0, "count": 0, "stage": "-"}) # Dict for count, angle and stage
|
|
43
43
|
|
ultralytics/solutions/config.py
CHANGED
|
@@ -56,7 +56,7 @@ class SolutionConfig:
|
|
|
56
56
|
|
|
57
57
|
Examples:
|
|
58
58
|
>>> from ultralytics.solutions.config import SolutionConfig
|
|
59
|
-
>>> cfg = SolutionConfig(model="
|
|
59
|
+
>>> cfg = SolutionConfig(model="yolo26n.pt", region=[(0, 0), (100, 0), (100, 100), (0, 100)])
|
|
60
60
|
>>> cfg.update(show=False, conf=0.3)
|
|
61
61
|
>>> print(cfg.model)
|
|
62
62
|
"""
|
ultralytics/solutions/heatmap.py
CHANGED
|
@@ -29,7 +29,7 @@ class Heatmap(ObjectCounter):
|
|
|
29
29
|
|
|
30
30
|
Examples:
|
|
31
31
|
>>> from ultralytics.solutions import Heatmap
|
|
32
|
-
>>> heatmap = Heatmap(model="
|
|
32
|
+
>>> heatmap = Heatmap(model="yolo26n.pt", colormap=cv2.COLORMAP_JET)
|
|
33
33
|
>>> frame = cv2.imread("frame.jpg")
|
|
34
34
|
>>> processed_frame = heatmap.process(frame)
|
|
35
35
|
"""
|
|
@@ -39,9 +39,9 @@ class InstanceSegmentation(BaseSolution):
|
|
|
39
39
|
|
|
40
40
|
Args:
|
|
41
41
|
**kwargs (Any): Keyword arguments passed to the BaseSolution parent class including:
|
|
42
|
-
- model (str): Model name or path, defaults to "
|
|
42
|
+
- model (str): Model name or path, defaults to "yolo26n-seg.pt".
|
|
43
43
|
"""
|
|
44
|
-
kwargs["model"] = kwargs.get("model", "
|
|
44
|
+
kwargs["model"] = kwargs.get("model", "yolo26n-seg.pt")
|
|
45
45
|
super().__init__(**kwargs)
|
|
46
46
|
|
|
47
47
|
self.show_conf = self.CFG.get("show_conf", True)
|
|
@@ -195,7 +195,7 @@ class ParkingManagement(BaseSolution):
|
|
|
195
195
|
|
|
196
196
|
Examples:
|
|
197
197
|
>>> from ultralytics.solutions import ParkingManagement
|
|
198
|
-
>>> parking_manager = ParkingManagement(model="
|
|
198
|
+
>>> parking_manager = ParkingManagement(model="yolo26n.pt", json_file="parking_regions.json")
|
|
199
199
|
>>> print(f"Occupied spaces: {parking_manager.pr_info['Occupancy']}")
|
|
200
200
|
>>> print(f"Available spaces: {parking_manager.pr_info['Available']}")
|
|
201
201
|
"""
|
|
@@ -64,7 +64,7 @@ class BaseSolution:
|
|
|
64
64
|
process: Process method to be implemented by each Solution subclass.
|
|
65
65
|
|
|
66
66
|
Examples:
|
|
67
|
-
>>> solution = BaseSolution(model="
|
|
67
|
+
>>> solution = BaseSolution(model="yolo26n.pt", region=[(0, 0), (100, 0), (100, 100), (0, 100)])
|
|
68
68
|
>>> solution.initialize_region()
|
|
69
69
|
>>> image = cv2.imread("image.jpg")
|
|
70
70
|
>>> solution.extract_tracks(image)
|
|
@@ -106,7 +106,7 @@ class BaseSolution:
|
|
|
106
106
|
|
|
107
107
|
# Load Model and store additional information (classes, show_conf, show_label)
|
|
108
108
|
if self.CFG["model"] is None:
|
|
109
|
-
self.CFG["model"] = "
|
|
109
|
+
self.CFG["model"] = "yolo26n.pt"
|
|
110
110
|
self.model = YOLO(self.CFG["model"])
|
|
111
111
|
self.names = self.model.names
|
|
112
112
|
self.classes = self.CFG["classes"]
|
ultralytics/trackers/track.py
CHANGED
|
@@ -50,7 +50,7 @@ def on_predict_start(predictor: object, persist: bool = False) -> None:
|
|
|
50
50
|
and isinstance(predictor.model.model.model[-1], Detect)
|
|
51
51
|
and not predictor.model.model.model[-1].end2end
|
|
52
52
|
):
|
|
53
|
-
cfg.model = "
|
|
53
|
+
cfg.model = "yolo26n-cls.pt"
|
|
54
54
|
else:
|
|
55
55
|
# Register hook to extract input of Detect layer
|
|
56
56
|
def pre_hook(module, input):
|
ultralytics/utils/__init__.py
CHANGED
|
@@ -80,8 +80,8 @@ HELP_MSG = """
|
|
|
80
80
|
from ultralytics import YOLO
|
|
81
81
|
|
|
82
82
|
# Load a model
|
|
83
|
-
model = YOLO("
|
|
84
|
-
model = YOLO("
|
|
83
|
+
model = YOLO("yolo26n.yaml") # build a new model from scratch
|
|
84
|
+
model = YOLO("yolo26n.pt") # load a pretrained model (recommended for training)
|
|
85
85
|
|
|
86
86
|
# Use the model
|
|
87
87
|
results = model.train(data="coco8.yaml", epochs=3) # train the model
|
|
@@ -101,16 +101,16 @@ HELP_MSG = """
|
|
|
101
101
|
See all ARGS at https://docs.ultralytics.com/usage/cfg or with "yolo cfg"
|
|
102
102
|
|
|
103
103
|
- Train a detection model for 10 epochs with an initial learning_rate of 0.01
|
|
104
|
-
yolo detect train data=coco8.yaml model=
|
|
104
|
+
yolo detect train data=coco8.yaml model=yolo26n.pt epochs=10 lr0=0.01
|
|
105
105
|
|
|
106
106
|
- Predict a YouTube video using a pretrained segmentation model at image size 320:
|
|
107
|
-
yolo segment predict model=
|
|
107
|
+
yolo segment predict model=yolo26n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320
|
|
108
108
|
|
|
109
109
|
- Val a pretrained detection model at batch-size 1 and image size 640:
|
|
110
|
-
yolo detect val model=
|
|
110
|
+
yolo detect val model=yolo26n.pt data=coco8.yaml batch=1 imgsz=640
|
|
111
111
|
|
|
112
|
-
- Export a
|
|
113
|
-
yolo export model=
|
|
112
|
+
- Export a YOLO26n classification model to ONNX format at image size 224 by 128 (no TASK required)
|
|
113
|
+
yolo export model=yolo26n-cls.pt format=onnx imgsz=224,128
|
|
114
114
|
|
|
115
115
|
- Run special commands:
|
|
116
116
|
yolo help
|
|
@@ -161,7 +161,7 @@ class DataExportMixin:
|
|
|
161
161
|
tojson: Deprecated alias for `to_json()`.
|
|
162
162
|
|
|
163
163
|
Examples:
|
|
164
|
-
>>> model = YOLO("
|
|
164
|
+
>>> model = YOLO("yolo26n.pt")
|
|
165
165
|
>>> results = model("image.jpg")
|
|
166
166
|
>>> df = results.to_df()
|
|
167
167
|
>>> print(df)
|
ultralytics/utils/benchmarks.py
CHANGED
|
@@ -4,28 +4,28 @@ Benchmark YOLO model formats for speed and accuracy.
|
|
|
4
4
|
|
|
5
5
|
Usage:
|
|
6
6
|
from ultralytics.utils.benchmarks import ProfileModels, benchmark
|
|
7
|
-
ProfileModels(['
|
|
8
|
-
benchmark(model='
|
|
7
|
+
ProfileModels(['yolo26n.yaml', 'yolov8s.yaml']).run()
|
|
8
|
+
benchmark(model='yolo26n.pt', imgsz=160)
|
|
9
9
|
|
|
10
10
|
Format | `format=argument` | Model
|
|
11
11
|
--- | --- | ---
|
|
12
|
-
PyTorch | - |
|
|
13
|
-
TorchScript | `torchscript` |
|
|
14
|
-
ONNX | `onnx` |
|
|
15
|
-
OpenVINO | `openvino` |
|
|
16
|
-
TensorRT | `engine` |
|
|
17
|
-
CoreML | `coreml` |
|
|
18
|
-
TensorFlow SavedModel | `saved_model` |
|
|
19
|
-
TensorFlow GraphDef | `pb` |
|
|
20
|
-
TensorFlow Lite | `tflite` |
|
|
21
|
-
TensorFlow Edge TPU | `edgetpu` |
|
|
22
|
-
TensorFlow.js | `tfjs` |
|
|
23
|
-
PaddlePaddle | `paddle` |
|
|
24
|
-
MNN | `mnn` |
|
|
25
|
-
NCNN | `ncnn` |
|
|
26
|
-
IMX | `imx` |
|
|
27
|
-
RKNN | `rknn` |
|
|
28
|
-
ExecuTorch | `executorch` |
|
|
12
|
+
PyTorch | - | yolo26n.pt
|
|
13
|
+
TorchScript | `torchscript` | yolo26n.torchscript
|
|
14
|
+
ONNX | `onnx` | yolo26n.onnx
|
|
15
|
+
OpenVINO | `openvino` | yolo26n_openvino_model/
|
|
16
|
+
TensorRT | `engine` | yolo26n.engine
|
|
17
|
+
CoreML | `coreml` | yolo26n.mlpackage
|
|
18
|
+
TensorFlow SavedModel | `saved_model` | yolo26n_saved_model/
|
|
19
|
+
TensorFlow GraphDef | `pb` | yolo26n.pb
|
|
20
|
+
TensorFlow Lite | `tflite` | yolo26n.tflite
|
|
21
|
+
TensorFlow Edge TPU | `edgetpu` | yolo26n_edgetpu.tflite
|
|
22
|
+
TensorFlow.js | `tfjs` | yolo26n_web_model/
|
|
23
|
+
PaddlePaddle | `paddle` | yolo26n_paddle_model/
|
|
24
|
+
MNN | `mnn` | yolo26n.mnn
|
|
25
|
+
NCNN | `ncnn` | yolo26n_ncnn_model/
|
|
26
|
+
IMX | `imx` | yolo26n_imx_model/
|
|
27
|
+
RKNN | `rknn` | yolo26n_rknn_model/
|
|
28
|
+
ExecuTorch | `executorch` | yolo26n_executorch_model/
|
|
29
29
|
"""
|
|
30
30
|
|
|
31
31
|
from __future__ import annotations
|
|
@@ -52,7 +52,7 @@ from ultralytics.utils.torch_utils import get_cpu_info, select_device
|
|
|
52
52
|
|
|
53
53
|
|
|
54
54
|
def benchmark(
|
|
55
|
-
model=WEIGHTS_DIR / "
|
|
55
|
+
model=WEIGHTS_DIR / "yolo26n.pt",
|
|
56
56
|
data=None,
|
|
57
57
|
imgsz=160,
|
|
58
58
|
half=False,
|
|
@@ -84,7 +84,7 @@ def benchmark(
|
|
|
84
84
|
Examples:
|
|
85
85
|
Benchmark a YOLO model with default settings:
|
|
86
86
|
>>> from ultralytics.utils.benchmarks import benchmark
|
|
87
|
-
>>> benchmark(model="
|
|
87
|
+
>>> benchmark(model="yolo26n.pt", imgsz=640)
|
|
88
88
|
"""
|
|
89
89
|
imgsz = check_imgsz(imgsz)
|
|
90
90
|
assert imgsz[0] == imgsz[1] if isinstance(imgsz, list) else True, "benchmark() only supports square imgsz."
|
|
@@ -396,7 +396,7 @@ class ProfileModels:
|
|
|
396
396
|
Examples:
|
|
397
397
|
Profile models and print results
|
|
398
398
|
>>> from ultralytics.utils.benchmarks import ProfileModels
|
|
399
|
-
>>> profiler = ProfileModels(["
|
|
399
|
+
>>> profiler = ProfileModels(["yolo26n.yaml", "yolov8s.yaml"], imgsz=640)
|
|
400
400
|
>>> profiler.run()
|
|
401
401
|
"""
|
|
402
402
|
|
|
@@ -444,7 +444,7 @@ class ProfileModels:
|
|
|
444
444
|
Examples:
|
|
445
445
|
Profile models and print results
|
|
446
446
|
>>> from ultralytics.utils.benchmarks import ProfileModels
|
|
447
|
-
>>> profiler = ProfileModels(["
|
|
447
|
+
>>> profiler = ProfileModels(["yolo26n.yaml", "yolo11s.yaml"])
|
|
448
448
|
>>> results = profiler.run()
|
|
449
449
|
"""
|
|
450
450
|
files = self.get_files()
|
|
@@ -13,6 +13,10 @@ from ultralytics.utils import ENVIRONMENT, GIT, LOGGER, PYTHON_VERSION, RANK, SE
|
|
|
13
13
|
|
|
14
14
|
PREFIX = colorstr("Platform: ")
|
|
15
15
|
|
|
16
|
+
# Configurable platform URL for debugging (e.g. ULTRALYTICS_PLATFORM_URL=http://localhost:3000)
|
|
17
|
+
PLATFORM_URL = os.getenv("ULTRALYTICS_PLATFORM_URL", "https://platform.ultralytics.com").rstrip("/")
|
|
18
|
+
PLATFORM_API_URL = f"{PLATFORM_URL}/api/webhooks"
|
|
19
|
+
|
|
16
20
|
|
|
17
21
|
def slugify(text):
|
|
18
22
|
"""Convert text to URL-safe slug (e.g., 'My Project 1' -> 'my-project-1')."""
|
|
@@ -66,11 +70,9 @@ def resolve_platform_uri(uri, hard=True):
|
|
|
66
70
|
|
|
67
71
|
api_key = os.getenv("ULTRALYTICS_API_KEY") or SETTINGS.get("api_key")
|
|
68
72
|
if not api_key:
|
|
69
|
-
raise ValueError(
|
|
70
|
-
f"ULTRALYTICS_API_KEY required for '{uri}'. Get key at https://platform.ultralytics.com/settings"
|
|
71
|
-
)
|
|
73
|
+
raise ValueError(f"ULTRALYTICS_API_KEY required for '{uri}'. Get key at {PLATFORM_URL}/settings")
|
|
72
74
|
|
|
73
|
-
base =
|
|
75
|
+
base = PLATFORM_API_URL
|
|
74
76
|
headers = {"Authorization": f"Bearer {api_key}"}
|
|
75
77
|
|
|
76
78
|
# ul://username/datasets/slug
|
|
@@ -152,7 +154,7 @@ def _send(event, data, project, name, model_id=None):
|
|
|
152
154
|
if model_id:
|
|
153
155
|
payload["modelId"] = model_id
|
|
154
156
|
r = requests.post(
|
|
155
|
-
"
|
|
157
|
+
f"{PLATFORM_API_URL}/training/metrics",
|
|
156
158
|
json=payload,
|
|
157
159
|
headers={"Authorization": f"Bearer {_api_key}"},
|
|
158
160
|
timeout=10,
|
|
@@ -178,7 +180,7 @@ def _upload_model(model_path, project, name):
|
|
|
178
180
|
|
|
179
181
|
# Get signed upload URL
|
|
180
182
|
response = requests.post(
|
|
181
|
-
"
|
|
183
|
+
f"{PLATFORM_API_URL}/models/upload",
|
|
182
184
|
json={"project": project, "name": name, "filename": model_path.name},
|
|
183
185
|
headers={"Authorization": f"Bearer {_api_key}"},
|
|
184
186
|
timeout=10,
|
|
@@ -195,7 +197,7 @@ def _upload_model(model_path, project, name):
|
|
|
195
197
|
timeout=600, # 10 min timeout for large models
|
|
196
198
|
).raise_for_status()
|
|
197
199
|
|
|
198
|
-
# url = f"
|
|
200
|
+
# url = f"{PLATFORM_URL}/{project}/{name}"
|
|
199
201
|
# LOGGER.info(f"{PREFIX}Model uploaded to {url}")
|
|
200
202
|
return data.get("gcsPath")
|
|
201
203
|
|
|
@@ -278,7 +280,7 @@ def on_pretrain_routine_start(trainer):
|
|
|
278
280
|
trainer._platform_last_upload = time()
|
|
279
281
|
|
|
280
282
|
project, name = _get_project_name(trainer)
|
|
281
|
-
url = f"
|
|
283
|
+
url = f"{PLATFORM_URL}/{project}/{name}"
|
|
282
284
|
LOGGER.info(f"{PREFIX}Streaming to {url}")
|
|
283
285
|
|
|
284
286
|
# Create callback to send console output to Platform
|
|
@@ -439,7 +441,7 @@ def on_train_end(trainer):
|
|
|
439
441
|
name,
|
|
440
442
|
getattr(trainer, "_platform_model_id", None),
|
|
441
443
|
)
|
|
442
|
-
url = f"
|
|
444
|
+
url = f"{PLATFORM_URL}/{project}/{name}"
|
|
443
445
|
LOGGER.info(f"{PREFIX}View results at {url}")
|
|
444
446
|
|
|
445
447
|
|
ultralytics/utils/checks.py
CHANGED
|
@@ -530,7 +530,7 @@ def check_torchvision():
|
|
|
530
530
|
)
|
|
531
531
|
|
|
532
532
|
|
|
533
|
-
def check_suffix(file="
|
|
533
|
+
def check_suffix(file="yolo26n.pt", suffix=".pt", msg=""):
|
|
534
534
|
"""Check file(s) for acceptable suffix.
|
|
535
535
|
|
|
536
536
|
Args:
|
|
@@ -584,7 +584,7 @@ def check_model_file_from_stem(model="yolo11n"):
|
|
|
584
584
|
"""
|
|
585
585
|
path = Path(model)
|
|
586
586
|
if not path.suffix and path.stem in downloads.GITHUB_ASSETS_STEMS:
|
|
587
|
-
return path.with_suffix(".pt") # add suffix, i.e.
|
|
587
|
+
return path.with_suffix(".pt") # add suffix, i.e. yolo26n -> yolo26n.pt
|
|
588
588
|
return model
|
|
589
589
|
|
|
590
590
|
|
|
@@ -812,7 +812,7 @@ def check_amp(model):
|
|
|
812
812
|
Examples:
|
|
813
813
|
>>> from ultralytics import YOLO
|
|
814
814
|
>>> from ultralytics.utils.checks import check_amp
|
|
815
|
-
>>> model = YOLO("
|
|
815
|
+
>>> model = YOLO("yolo26n.pt").model.cuda()
|
|
816
816
|
>>> check_amp(model)
|
|
817
817
|
"""
|
|
818
818
|
from ultralytics.utils.torch_utils import autocast
|
|
@@ -851,14 +851,14 @@ def check_amp(model):
|
|
|
851
851
|
try:
|
|
852
852
|
from ultralytics import YOLO
|
|
853
853
|
|
|
854
|
-
assert amp_allclose(YOLO("
|
|
854
|
+
assert amp_allclose(YOLO("yolo26n.pt"), im)
|
|
855
855
|
LOGGER.info(f"{prefix}checks passed ✅")
|
|
856
856
|
except ConnectionError:
|
|
857
|
-
LOGGER.warning(f"{prefix}checks skipped. Offline and unable to download
|
|
857
|
+
LOGGER.warning(f"{prefix}checks skipped. Offline and unable to download YOLO26n for AMP checks. {warning_msg}")
|
|
858
858
|
except (AttributeError, ModuleNotFoundError):
|
|
859
859
|
LOGGER.warning(
|
|
860
860
|
f"{prefix}checks skipped. "
|
|
861
|
-
f"Unable to load
|
|
861
|
+
f"Unable to load YOLO26n for AMP checks due to possible Ultralytics package modifications. {warning_msg}"
|
|
862
862
|
)
|
|
863
863
|
except AssertionError:
|
|
864
864
|
LOGGER.error(
|
ultralytics/utils/downloads.py
CHANGED
|
@@ -420,7 +420,7 @@ def get_github_assets(
|
|
|
420
420
|
LOGGER.warning(f"GitHub assets check failure for {url}: {r.status_code} {r.reason}")
|
|
421
421
|
return "", []
|
|
422
422
|
data = r.json()
|
|
423
|
-
return data["tag_name"], [x["name"] for x in data["assets"]] # tag, assets i.e. ['
|
|
423
|
+
return data["tag_name"], [x["name"] for x in data["assets"]] # tag, assets i.e. ['yolo26n.pt', 'yolo11s.pt', ...]
|
|
424
424
|
|
|
425
425
|
|
|
426
426
|
def attempt_download_asset(
|
|
@@ -441,7 +441,7 @@ def attempt_download_asset(
|
|
|
441
441
|
(str): The path to the downloaded file.
|
|
442
442
|
|
|
443
443
|
Examples:
|
|
444
|
-
>>> file_path = attempt_download_asset("
|
|
444
|
+
>>> file_path = attempt_download_asset("yolo26n.pt", repo="ultralytics/assets", release="latest")
|
|
445
445
|
"""
|
|
446
446
|
from ultralytics.utils import SETTINGS # scoped for circular import
|
|
447
447
|
|