dgenerate-ultralytics-headless 8.3.248__py3-none-any.whl → 8.4.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dgenerate_ultralytics_headless-8.3.248.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/METADATA +52 -61
- {dgenerate_ultralytics_headless-8.3.248.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/RECORD +97 -84
- {dgenerate_ultralytics_headless-8.3.248.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/WHEEL +1 -1
- tests/__init__.py +2 -2
- tests/conftest.py +1 -1
- tests/test_cuda.py +8 -2
- tests/test_engine.py +8 -8
- tests/test_exports.py +11 -4
- tests/test_integrations.py +9 -9
- tests/test_python.py +41 -16
- tests/test_solutions.py +3 -3
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +31 -31
- ultralytics/cfg/datasets/TT100K.yaml +346 -0
- ultralytics/cfg/datasets/coco12-formats.yaml +101 -0
- ultralytics/cfg/default.yaml +3 -1
- ultralytics/cfg/models/26/yolo26-cls.yaml +33 -0
- ultralytics/cfg/models/26/yolo26-obb.yaml +52 -0
- ultralytics/cfg/models/26/yolo26-p2.yaml +60 -0
- ultralytics/cfg/models/26/yolo26-p6.yaml +62 -0
- ultralytics/cfg/models/26/yolo26-pose.yaml +53 -0
- ultralytics/cfg/models/26/yolo26-seg.yaml +52 -0
- ultralytics/cfg/models/26/yolo26.yaml +52 -0
- ultralytics/cfg/models/26/yoloe-26-seg.yaml +53 -0
- ultralytics/cfg/models/26/yoloe-26.yaml +53 -0
- ultralytics/data/annotator.py +2 -2
- ultralytics/data/augment.py +15 -0
- ultralytics/data/converter.py +76 -45
- ultralytics/data/dataset.py +1 -1
- ultralytics/data/utils.py +2 -2
- ultralytics/engine/exporter.py +34 -28
- ultralytics/engine/model.py +38 -37
- ultralytics/engine/predictor.py +17 -17
- ultralytics/engine/results.py +22 -15
- ultralytics/engine/trainer.py +83 -48
- ultralytics/engine/tuner.py +20 -11
- ultralytics/engine/validator.py +16 -16
- ultralytics/models/fastsam/predict.py +1 -1
- ultralytics/models/yolo/classify/predict.py +1 -1
- ultralytics/models/yolo/classify/train.py +1 -1
- ultralytics/models/yolo/classify/val.py +1 -1
- ultralytics/models/yolo/detect/predict.py +2 -2
- ultralytics/models/yolo/detect/train.py +6 -3
- ultralytics/models/yolo/detect/val.py +7 -1
- ultralytics/models/yolo/model.py +8 -8
- ultralytics/models/yolo/obb/predict.py +2 -2
- ultralytics/models/yolo/obb/train.py +3 -3
- ultralytics/models/yolo/obb/val.py +1 -1
- ultralytics/models/yolo/pose/predict.py +1 -1
- ultralytics/models/yolo/pose/train.py +3 -1
- ultralytics/models/yolo/pose/val.py +1 -1
- ultralytics/models/yolo/segment/predict.py +3 -3
- ultralytics/models/yolo/segment/train.py +4 -4
- ultralytics/models/yolo/segment/val.py +2 -2
- ultralytics/models/yolo/yoloe/train.py +6 -1
- ultralytics/models/yolo/yoloe/train_seg.py +6 -1
- ultralytics/nn/autobackend.py +14 -8
- ultralytics/nn/modules/__init__.py +8 -0
- ultralytics/nn/modules/block.py +128 -8
- ultralytics/nn/modules/head.py +788 -203
- ultralytics/nn/tasks.py +86 -41
- ultralytics/nn/text_model.py +5 -2
- ultralytics/optim/__init__.py +5 -0
- ultralytics/optim/muon.py +338 -0
- ultralytics/solutions/ai_gym.py +3 -3
- ultralytics/solutions/config.py +1 -1
- ultralytics/solutions/heatmap.py +1 -1
- ultralytics/solutions/instance_segmentation.py +2 -2
- ultralytics/solutions/object_counter.py +1 -1
- ultralytics/solutions/parking_management.py +1 -1
- ultralytics/solutions/solutions.py +2 -2
- ultralytics/trackers/byte_tracker.py +7 -7
- ultralytics/trackers/track.py +1 -1
- ultralytics/utils/__init__.py +8 -8
- ultralytics/utils/benchmarks.py +26 -26
- ultralytics/utils/callbacks/platform.py +173 -64
- ultralytics/utils/callbacks/tensorboard.py +2 -0
- ultralytics/utils/callbacks/wb.py +6 -1
- ultralytics/utils/checks.py +28 -9
- ultralytics/utils/dist.py +1 -0
- ultralytics/utils/downloads.py +5 -3
- ultralytics/utils/export/engine.py +19 -10
- ultralytics/utils/export/imx.py +38 -20
- ultralytics/utils/export/tensorflow.py +21 -21
- ultralytics/utils/files.py +2 -2
- ultralytics/utils/loss.py +597 -203
- ultralytics/utils/metrics.py +2 -1
- ultralytics/utils/ops.py +11 -2
- ultralytics/utils/patches.py +42 -0
- ultralytics/utils/plotting.py +3 -0
- ultralytics/utils/tal.py +100 -20
- ultralytics/utils/torch_utils.py +1 -1
- ultralytics/utils/tqdm.py +4 -1
- ultralytics/utils/tuner.py +2 -5
- {dgenerate_ultralytics_headless-8.3.248.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/entry_points.txt +0 -0
- {dgenerate_ultralytics_headless-8.3.248.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/licenses/LICENSE +0 -0
- {dgenerate_ultralytics_headless-8.3.248.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/top_level.txt +0 -0
ultralytics/engine/model.py
CHANGED
|
@@ -71,7 +71,7 @@ class Model(torch.nn.Module):
|
|
|
71
71
|
|
|
72
72
|
Examples:
|
|
73
73
|
>>> from ultralytics import YOLO
|
|
74
|
-
>>> model = YOLO("
|
|
74
|
+
>>> model = YOLO("yolo26n.pt")
|
|
75
75
|
>>> results = model.predict("image.jpg")
|
|
76
76
|
>>> model.train(data="coco8.yaml", epochs=3)
|
|
77
77
|
>>> metrics = model.val()
|
|
@@ -80,7 +80,7 @@ class Model(torch.nn.Module):
|
|
|
80
80
|
|
|
81
81
|
def __init__(
|
|
82
82
|
self,
|
|
83
|
-
model: str | Path | Model = "
|
|
83
|
+
model: str | Path | Model = "yolo26n.pt",
|
|
84
84
|
task: str | None = None,
|
|
85
85
|
verbose: bool = False,
|
|
86
86
|
) -> None:
|
|
@@ -169,7 +169,7 @@ class Model(torch.nn.Module):
|
|
|
169
169
|
object.
|
|
170
170
|
|
|
171
171
|
Examples:
|
|
172
|
-
>>> model = YOLO("
|
|
172
|
+
>>> model = YOLO("yolo26n.pt")
|
|
173
173
|
>>> results = model("https://ultralytics.com/images/bus.jpg")
|
|
174
174
|
>>> for r in results:
|
|
175
175
|
... print(f"Detected {len(r)} objects in image")
|
|
@@ -192,7 +192,7 @@ class Model(torch.nn.Module):
|
|
|
192
192
|
Examples:
|
|
193
193
|
>>> Model.is_triton_model("http://localhost:8000/v2/models/yolo11n")
|
|
194
194
|
True
|
|
195
|
-
>>> Model.is_triton_model("
|
|
195
|
+
>>> Model.is_triton_model("yolo26n.pt")
|
|
196
196
|
False
|
|
197
197
|
"""
|
|
198
198
|
from urllib.parse import urlsplit
|
|
@@ -216,7 +216,7 @@ class Model(torch.nn.Module):
|
|
|
216
216
|
Examples:
|
|
217
217
|
>>> Model.is_hub_model("https://hub.ultralytics.com/models/MODEL")
|
|
218
218
|
True
|
|
219
|
-
>>> Model.is_hub_model("
|
|
219
|
+
>>> Model.is_hub_model("yolo26n.pt")
|
|
220
220
|
False
|
|
221
221
|
"""
|
|
222
222
|
from ultralytics.hub import HUB_WEB_ROOT
|
|
@@ -242,7 +242,7 @@ class Model(torch.nn.Module):
|
|
|
242
242
|
|
|
243
243
|
Examples:
|
|
244
244
|
>>> model = Model()
|
|
245
|
-
>>> model._new("
|
|
245
|
+
>>> model._new("yolo26n.yaml", task="detect", verbose=True)
|
|
246
246
|
"""
|
|
247
247
|
cfg_dict = yaml_model_load(cfg)
|
|
248
248
|
self.cfg = cfg
|
|
@@ -272,12 +272,12 @@ class Model(torch.nn.Module):
|
|
|
272
272
|
|
|
273
273
|
Examples:
|
|
274
274
|
>>> model = Model()
|
|
275
|
-
>>> model._load("
|
|
275
|
+
>>> model._load("yolo26n.pt")
|
|
276
276
|
>>> model._load("path/to/weights.pth", task="detect")
|
|
277
277
|
"""
|
|
278
|
-
if weights.lower().startswith(("https://", "http://", "rtsp://", "rtmp://", "tcp://")):
|
|
278
|
+
if weights.lower().startswith(("https://", "http://", "rtsp://", "rtmp://", "tcp://", "ul://")):
|
|
279
279
|
weights = checks.check_file(weights, download_dir=SETTINGS["weights_dir"]) # download and return local file
|
|
280
|
-
weights = checks.check_model_file_from_stem(weights) # add suffix, i.e.
|
|
280
|
+
weights = checks.check_model_file_from_stem(weights) # add suffix, i.e. yolo26 -> yolo26n.pt
|
|
281
281
|
|
|
282
282
|
if str(weights).rpartition(".")[-1] == "pt":
|
|
283
283
|
self.model, self.ckpt = load_checkpoint(weights)
|
|
@@ -304,7 +304,7 @@ class Model(torch.nn.Module):
|
|
|
304
304
|
information about supported model formats and operations.
|
|
305
305
|
|
|
306
306
|
Examples:
|
|
307
|
-
>>> model = Model("
|
|
307
|
+
>>> model = Model("yolo26n.pt")
|
|
308
308
|
>>> model._check_is_pytorch_model() # No error raised
|
|
309
309
|
>>> model = Model("yolo11n.onnx")
|
|
310
310
|
>>> model._check_is_pytorch_model() # Raises TypeError
|
|
@@ -334,7 +334,7 @@ class Model(torch.nn.Module):
|
|
|
334
334
|
AssertionError: If the model is not a PyTorch model.
|
|
335
335
|
|
|
336
336
|
Examples:
|
|
337
|
-
>>> model = Model("
|
|
337
|
+
>>> model = Model("yolo26n.pt")
|
|
338
338
|
>>> model.reset_weights()
|
|
339
339
|
"""
|
|
340
340
|
self._check_is_pytorch_model()
|
|
@@ -345,7 +345,7 @@ class Model(torch.nn.Module):
|
|
|
345
345
|
p.requires_grad = True
|
|
346
346
|
return self
|
|
347
347
|
|
|
348
|
-
def load(self, weights: str | Path = "
|
|
348
|
+
def load(self, weights: str | Path = "yolo26n.pt") -> Model:
|
|
349
349
|
"""Load parameters from the specified weights file into the model.
|
|
350
350
|
|
|
351
351
|
This method supports loading weights from a file or directly from a weights object. It matches parameters by
|
|
@@ -362,7 +362,7 @@ class Model(torch.nn.Module):
|
|
|
362
362
|
|
|
363
363
|
Examples:
|
|
364
364
|
>>> model = Model()
|
|
365
|
-
>>> model.load("
|
|
365
|
+
>>> model.load("yolo26n.pt")
|
|
366
366
|
>>> model.load(Path("path/to/weights.pt"))
|
|
367
367
|
"""
|
|
368
368
|
self._check_is_pytorch_model()
|
|
@@ -385,7 +385,7 @@ class Model(torch.nn.Module):
|
|
|
385
385
|
AssertionError: If the model is not a PyTorch model.
|
|
386
386
|
|
|
387
387
|
Examples:
|
|
388
|
-
>>> model = Model("
|
|
388
|
+
>>> model = Model("yolo26n.pt")
|
|
389
389
|
>>> model.save("my_model.pt")
|
|
390
390
|
"""
|
|
391
391
|
self._check_is_pytorch_model()
|
|
@@ -403,7 +403,7 @@ class Model(torch.nn.Module):
|
|
|
403
403
|
}
|
|
404
404
|
torch.save({**self.ckpt, **updates}, filename)
|
|
405
405
|
|
|
406
|
-
def info(self, detailed: bool = False, verbose: bool = True):
|
|
406
|
+
def info(self, detailed: bool = False, verbose: bool = True, imgsz: int | list[int, int] = 640):
|
|
407
407
|
"""Display model information.
|
|
408
408
|
|
|
409
409
|
This method provides an overview or detailed information about the model, depending on the arguments
|
|
@@ -412,18 +412,19 @@ class Model(torch.nn.Module):
|
|
|
412
412
|
Args:
|
|
413
413
|
detailed (bool): If True, shows detailed information about the model layers and parameters.
|
|
414
414
|
verbose (bool): If True, prints the information. If False, returns the information as a list.
|
|
415
|
+
imgsz (int | list[int, int]): Input image size used for FLOPs calculation.
|
|
415
416
|
|
|
416
417
|
Returns:
|
|
417
418
|
(list[str]): A list of strings containing various types of information about the model, including model
|
|
418
419
|
summary, layer details, and parameter counts. Empty if verbose is True.
|
|
419
420
|
|
|
420
421
|
Examples:
|
|
421
|
-
>>> model = Model("
|
|
422
|
+
>>> model = Model("yolo26n.pt")
|
|
422
423
|
>>> model.info() # Prints model summary
|
|
423
424
|
>>> info_list = model.info(detailed=True, verbose=False) # Returns detailed info as a list
|
|
424
425
|
"""
|
|
425
426
|
self._check_is_pytorch_model()
|
|
426
|
-
return self.model.info(detailed=detailed, verbose=verbose)
|
|
427
|
+
return self.model.info(detailed=detailed, verbose=verbose, imgsz=imgsz)
|
|
427
428
|
|
|
428
429
|
def fuse(self) -> None:
|
|
429
430
|
"""Fuse Conv2d and BatchNorm2d layers in the model for optimized inference.
|
|
@@ -437,7 +438,7 @@ class Model(torch.nn.Module):
|
|
|
437
438
|
performs both convolution and normalization in one step.
|
|
438
439
|
|
|
439
440
|
Examples:
|
|
440
|
-
>>> model = Model("
|
|
441
|
+
>>> model = Model("yolo26n.pt")
|
|
441
442
|
>>> model.fuse()
|
|
442
443
|
>>> # Model is now fused and ready for optimized inference
|
|
443
444
|
"""
|
|
@@ -465,7 +466,7 @@ class Model(torch.nn.Module):
|
|
|
465
466
|
(list[torch.Tensor]): A list containing the image embeddings.
|
|
466
467
|
|
|
467
468
|
Examples:
|
|
468
|
-
>>> model = YOLO("
|
|
469
|
+
>>> model = YOLO("yolo26n.pt")
|
|
469
470
|
>>> image = "https://ultralytics.com/images/bus.jpg"
|
|
470
471
|
>>> embeddings = model.embed(image)
|
|
471
472
|
>>> print(embeddings[0].shape)
|
|
@@ -501,7 +502,7 @@ class Model(torch.nn.Module):
|
|
|
501
502
|
object.
|
|
502
503
|
|
|
503
504
|
Examples:
|
|
504
|
-
>>> model = YOLO("
|
|
505
|
+
>>> model = YOLO("yolo26n.pt")
|
|
505
506
|
>>> results = model.predict(source="path/to/image.jpg", conf=0.25)
|
|
506
507
|
>>> for r in results:
|
|
507
508
|
... print(r.boxes.data) # print detection bounding boxes
|
|
@@ -558,7 +559,7 @@ class Model(torch.nn.Module):
|
|
|
558
559
|
(list[ultralytics.engine.results.Results]): A list of tracking results, each a Results object.
|
|
559
560
|
|
|
560
561
|
Examples:
|
|
561
|
-
>>> model = YOLO("
|
|
562
|
+
>>> model = YOLO("yolo26n.pt")
|
|
562
563
|
>>> results = model.track(source="path/to/video.mp4", show=True)
|
|
563
564
|
>>> for r in results:
|
|
564
565
|
... print(r.boxes.id) # print tracking IDs
|
|
@@ -600,7 +601,7 @@ class Model(torch.nn.Module):
|
|
|
600
601
|
AssertionError: If the model is not a PyTorch model.
|
|
601
602
|
|
|
602
603
|
Examples:
|
|
603
|
-
>>> model = YOLO("
|
|
604
|
+
>>> model = YOLO("yolo26n.pt")
|
|
604
605
|
>>> results = model.val(data="coco8.yaml", imgsz=640)
|
|
605
606
|
>>> print(results.box.map) # Print mAP50-95
|
|
606
607
|
"""
|
|
@@ -638,7 +639,7 @@ class Model(torch.nn.Module):
|
|
|
638
639
|
AssertionError: If the model is not a PyTorch model.
|
|
639
640
|
|
|
640
641
|
Examples:
|
|
641
|
-
>>> model = YOLO("
|
|
642
|
+
>>> model = YOLO("yolo26n.pt")
|
|
642
643
|
>>> results = model.benchmark(data="coco8.yaml", imgsz=640, half=True)
|
|
643
644
|
>>> print(results)
|
|
644
645
|
"""
|
|
@@ -691,7 +692,7 @@ class Model(torch.nn.Module):
|
|
|
691
692
|
RuntimeError: If the export process fails due to errors.
|
|
692
693
|
|
|
693
694
|
Examples:
|
|
694
|
-
>>> model = YOLO("
|
|
695
|
+
>>> model = YOLO("yolo26n.pt")
|
|
695
696
|
>>> model.export(format="onnx", dynamic=True, simplify=True)
|
|
696
697
|
'path/to/exported/model.onnx'
|
|
697
698
|
"""
|
|
@@ -741,7 +742,7 @@ class Model(torch.nn.Module):
|
|
|
741
742
|
(dict | None): Training metrics if available and training is successful; otherwise, None.
|
|
742
743
|
|
|
743
744
|
Examples:
|
|
744
|
-
>>> model = YOLO("
|
|
745
|
+
>>> model = YOLO("yolo26n.pt")
|
|
745
746
|
>>> results = model.train(data="coco8.yaml", epochs=3)
|
|
746
747
|
"""
|
|
747
748
|
self._check_is_pytorch_model()
|
|
@@ -807,7 +808,7 @@ class Model(torch.nn.Module):
|
|
|
807
808
|
TypeError: If the model is not a PyTorch model.
|
|
808
809
|
|
|
809
810
|
Examples:
|
|
810
|
-
>>> model = YOLO("
|
|
811
|
+
>>> model = YOLO("yolo26n.pt")
|
|
811
812
|
>>> results = model.tune(data="coco8.yaml", iterations=5)
|
|
812
813
|
>>> print(results)
|
|
813
814
|
|
|
@@ -824,7 +825,7 @@ class Model(torch.nn.Module):
|
|
|
824
825
|
|
|
825
826
|
custom = {} # method defaults
|
|
826
827
|
args = {**self.overrides, **custom, **kwargs, "mode": "train"} # highest priority args on the right
|
|
827
|
-
return Tuner(args=args, _callbacks=self.callbacks)(
|
|
828
|
+
return Tuner(args=args, _callbacks=self.callbacks)(iterations=iterations)
|
|
828
829
|
|
|
829
830
|
def _apply(self, fn) -> Model:
|
|
830
831
|
"""Apply a function to model tensors that are not parameters or registered buffers.
|
|
@@ -844,7 +845,7 @@ class Model(torch.nn.Module):
|
|
|
844
845
|
AssertionError: If the model is not a PyTorch model.
|
|
845
846
|
|
|
846
847
|
Examples:
|
|
847
|
-
>>> model = Model("
|
|
848
|
+
>>> model = Model("yolo26n.pt")
|
|
848
849
|
>>> model = model._apply(lambda t: t.cuda()) # Move model to GPU
|
|
849
850
|
"""
|
|
850
851
|
self._check_is_pytorch_model()
|
|
@@ -869,7 +870,7 @@ class Model(torch.nn.Module):
|
|
|
869
870
|
AttributeError: If the model or predictor does not have a 'names' attribute.
|
|
870
871
|
|
|
871
872
|
Examples:
|
|
872
|
-
>>> model = YOLO("
|
|
873
|
+
>>> model = YOLO("yolo26n.pt")
|
|
873
874
|
>>> print(model.names)
|
|
874
875
|
{0: 'person', 1: 'bicycle', 2: 'car', ...}
|
|
875
876
|
"""
|
|
@@ -897,7 +898,7 @@ class Model(torch.nn.Module):
|
|
|
897
898
|
AttributeError: If the model is not a torch.nn.Module instance.
|
|
898
899
|
|
|
899
900
|
Examples:
|
|
900
|
-
>>> model = YOLO("
|
|
901
|
+
>>> model = YOLO("yolo26n.pt")
|
|
901
902
|
>>> print(model.device)
|
|
902
903
|
device(type='cuda', index=0) # if CUDA is available
|
|
903
904
|
>>> model = model.to("cpu")
|
|
@@ -918,7 +919,7 @@ class Model(torch.nn.Module):
|
|
|
918
919
|
(object | None): The transform object of the model if available, otherwise None.
|
|
919
920
|
|
|
920
921
|
Examples:
|
|
921
|
-
>>> model = YOLO("
|
|
922
|
+
>>> model = YOLO("yolo26n.pt")
|
|
922
923
|
>>> transforms = model.transforms
|
|
923
924
|
>>> if transforms:
|
|
924
925
|
... print(f"Model transforms: {transforms}")
|
|
@@ -946,7 +947,7 @@ class Model(torch.nn.Module):
|
|
|
946
947
|
Examples:
|
|
947
948
|
>>> def on_train_start(trainer):
|
|
948
949
|
... print("Training is starting!")
|
|
949
|
-
>>> model = YOLO("
|
|
950
|
+
>>> model = YOLO("yolo26n.pt")
|
|
950
951
|
>>> model.add_callback("on_train_start", on_train_start)
|
|
951
952
|
>>> model.train(data="coco8.yaml", epochs=1)
|
|
952
953
|
"""
|
|
@@ -964,7 +965,7 @@ class Model(torch.nn.Module):
|
|
|
964
965
|
recognized by the Ultralytics callback system.
|
|
965
966
|
|
|
966
967
|
Examples:
|
|
967
|
-
>>> model = YOLO("
|
|
968
|
+
>>> model = YOLO("yolo26n.pt")
|
|
968
969
|
>>> model.add_callback("on_train_start", lambda: print("Training started"))
|
|
969
970
|
>>> model.clear_callback("on_train_start")
|
|
970
971
|
>>> # All callbacks for 'on_train_start' are now removed
|
|
@@ -993,7 +994,7 @@ class Model(torch.nn.Module):
|
|
|
993
994
|
modifications, ensuring consistent behavior across different runs or experiments.
|
|
994
995
|
|
|
995
996
|
Examples:
|
|
996
|
-
>>> model = YOLO("
|
|
997
|
+
>>> model = YOLO("yolo26n.pt")
|
|
997
998
|
>>> model.add_callback("on_train_start", custom_function)
|
|
998
999
|
>>> model.reset_callbacks()
|
|
999
1000
|
# All callbacks are now reset to their default functions
|
|
@@ -1075,7 +1076,7 @@ class Model(torch.nn.Module):
|
|
|
1075
1076
|
implementations for that task.
|
|
1076
1077
|
|
|
1077
1078
|
Examples:
|
|
1078
|
-
>>> model = Model("
|
|
1079
|
+
>>> model = Model("yolo26n.pt")
|
|
1079
1080
|
>>> task_map = model.task_map
|
|
1080
1081
|
>>> detect_predictor = task_map["detect"]["predictor"]
|
|
1081
1082
|
>>> segment_trainer = task_map["segment"]["trainer"]
|
|
@@ -1093,7 +1094,7 @@ class Model(torch.nn.Module):
|
|
|
1093
1094
|
(Model): The model instance with evaluation mode set.
|
|
1094
1095
|
|
|
1095
1096
|
Examples:
|
|
1096
|
-
>>> model = YOLO("
|
|
1097
|
+
>>> model = YOLO("yolo26n.pt")
|
|
1097
1098
|
>>> model.eval()
|
|
1098
1099
|
>>> # Model is now in evaluation mode for inference
|
|
1099
1100
|
"""
|
|
@@ -1117,7 +1118,7 @@ class Model(torch.nn.Module):
|
|
|
1117
1118
|
AttributeError: If the requested attribute does not exist in the model.
|
|
1118
1119
|
|
|
1119
1120
|
Examples:
|
|
1120
|
-
>>> model = YOLO("
|
|
1121
|
+
>>> model = YOLO("yolo26n.pt")
|
|
1121
1122
|
>>> print(model.stride) # Access model.stride attribute
|
|
1122
1123
|
>>> print(model.names) # Access model.names attribute
|
|
1123
1124
|
"""
|
ultralytics/engine/predictor.py
CHANGED
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
Run prediction on images, videos, directories, globs, YouTube, webcam, streams, etc.
|
|
4
4
|
|
|
5
5
|
Usage - sources:
|
|
6
|
-
$ yolo mode=predict model=
|
|
6
|
+
$ yolo mode=predict model=yolo26n.pt source=0 # webcam
|
|
7
7
|
img.jpg # image
|
|
8
8
|
vid.mp4 # video
|
|
9
9
|
screen # screenshot
|
|
@@ -15,22 +15,22 @@ Usage - sources:
|
|
|
15
15
|
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP, TCP stream
|
|
16
16
|
|
|
17
17
|
Usage - formats:
|
|
18
|
-
$ yolo mode=predict model=
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
18
|
+
$ yolo mode=predict model=yolo26n.pt # PyTorch
|
|
19
|
+
yolo26n.torchscript # TorchScript
|
|
20
|
+
yolo26n.onnx # ONNX Runtime or OpenCV DNN with dnn=True
|
|
21
|
+
yolo26n_openvino_model # OpenVINO
|
|
22
|
+
yolo26n.engine # TensorRT
|
|
23
|
+
yolo26n.mlpackage # CoreML (macOS-only)
|
|
24
|
+
yolo26n_saved_model # TensorFlow SavedModel
|
|
25
|
+
yolo26n.pb # TensorFlow GraphDef
|
|
26
|
+
yolo26n.tflite # TensorFlow Lite
|
|
27
|
+
yolo26n_edgetpu.tflite # TensorFlow Edge TPU
|
|
28
|
+
yolo26n_paddle_model # PaddlePaddle
|
|
29
|
+
yolo26n.mnn # MNN
|
|
30
|
+
yolo26n_ncnn_model # NCNN
|
|
31
|
+
yolo26n_imx_model # Sony IMX
|
|
32
|
+
yolo26n_rknn_model # Rockchip RKNN
|
|
33
|
+
yolo26n.pte # PyTorch Executorch
|
|
34
34
|
"""
|
|
35
35
|
|
|
36
36
|
from __future__ import annotations
|
ultralytics/engine/results.py
CHANGED
|
@@ -667,7 +667,7 @@ class Results(SimpleClass, DataExportMixin):
|
|
|
667
667
|
|
|
668
668
|
Examples:
|
|
669
669
|
>>> from ultralytics import YOLO
|
|
670
|
-
>>> model = YOLO("
|
|
670
|
+
>>> model = YOLO("yolo26n.pt")
|
|
671
671
|
>>> results = model("path/to/image.jpg")
|
|
672
672
|
>>> for result in results:
|
|
673
673
|
>>> result.save_txt("output.txt")
|
|
@@ -750,8 +750,8 @@ class Results(SimpleClass, DataExportMixin):
|
|
|
750
750
|
"""Convert inference results to a summarized dictionary with optional normalization for box coordinates.
|
|
751
751
|
|
|
752
752
|
This method creates a list of detection dictionaries, each containing information about a single detection or
|
|
753
|
-
classification result. For classification tasks, it returns the top
|
|
754
|
-
|
|
753
|
+
classification result. For classification tasks, it returns the top 5 classes and their
|
|
754
|
+
confidences. For detection tasks, it includes class information, bounding box coordinates, and
|
|
755
755
|
optionally mask segments and keypoints.
|
|
756
756
|
|
|
757
757
|
Args:
|
|
@@ -772,14 +772,16 @@ class Results(SimpleClass, DataExportMixin):
|
|
|
772
772
|
# Create list of detection dictionaries
|
|
773
773
|
results = []
|
|
774
774
|
if self.probs is not None:
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
775
|
+
# Return top 5 classification results
|
|
776
|
+
for class_id, conf in zip(self.probs.top5, self.probs.top5conf.tolist()):
|
|
777
|
+
class_id = int(class_id)
|
|
778
|
+
results.append(
|
|
779
|
+
{
|
|
780
|
+
"name": self.names[class_id],
|
|
781
|
+
"class": class_id,
|
|
782
|
+
"confidence": round(conf, decimals),
|
|
783
|
+
}
|
|
784
|
+
)
|
|
783
785
|
return results
|
|
784
786
|
|
|
785
787
|
is_obb = self.obb is not None
|
|
@@ -801,12 +803,17 @@ class Results(SimpleClass, DataExportMixin):
|
|
|
801
803
|
"y": (self.masks.xy[i][:, 1] / h).round(decimals).tolist(),
|
|
802
804
|
}
|
|
803
805
|
if self.keypoints is not None:
|
|
804
|
-
|
|
806
|
+
kpt = self.keypoints[i]
|
|
807
|
+
if kpt.has_visible:
|
|
808
|
+
x, y, visible = kpt.data[0].cpu().unbind(dim=1)
|
|
809
|
+
else:
|
|
810
|
+
x, y = kpt.data[0].cpu().unbind(dim=1)
|
|
805
811
|
result["keypoints"] = {
|
|
806
|
-
"x": (x / w).numpy().round(decimals).tolist(),
|
|
812
|
+
"x": (x / w).numpy().round(decimals).tolist(),
|
|
807
813
|
"y": (y / h).numpy().round(decimals).tolist(),
|
|
808
|
-
"visible": visible.numpy().round(decimals).tolist(),
|
|
809
814
|
}
|
|
815
|
+
if kpt.has_visible:
|
|
816
|
+
result["keypoints"]["visible"] = visible.numpy().round(decimals).tolist()
|
|
810
817
|
results.append(result)
|
|
811
818
|
|
|
812
819
|
return results
|
|
@@ -1500,7 +1507,7 @@ class OBB(BaseTensor):
|
|
|
1500
1507
|
Examples:
|
|
1501
1508
|
>>> import torch
|
|
1502
1509
|
>>> from ultralytics import YOLO
|
|
1503
|
-
>>> model = YOLO("
|
|
1510
|
+
>>> model = YOLO("yolo26n-obb.pt")
|
|
1504
1511
|
>>> results = model("path/to/image.jpg")
|
|
1505
1512
|
>>> for result in results:
|
|
1506
1513
|
... obb = result.obb
|