ultralytics 8.2.80__py3-none-any.whl → 8.2.82__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ultralytics might be problematic. Click here for more details.
- tests/test_solutions.py +0 -4
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +14 -16
- ultralytics/data/annotator.py +1 -1
- ultralytics/data/augment.py +58 -58
- ultralytics/data/base.py +3 -3
- ultralytics/data/converter.py +7 -8
- ultralytics/data/explorer/explorer.py +7 -23
- ultralytics/data/loaders.py +1 -1
- ultralytics/data/split_dota.py +11 -3
- ultralytics/data/utils.py +6 -10
- ultralytics/engine/exporter.py +2 -4
- ultralytics/engine/model.py +47 -47
- ultralytics/engine/predictor.py +1 -1
- ultralytics/engine/results.py +30 -30
- ultralytics/engine/trainer.py +11 -8
- ultralytics/engine/tuner.py +7 -8
- ultralytics/engine/validator.py +3 -5
- ultralytics/hub/__init__.py +5 -5
- ultralytics/hub/auth.py +6 -2
- ultralytics/hub/session.py +30 -20
- ultralytics/models/fastsam/model.py +13 -10
- ultralytics/models/fastsam/predict.py +2 -2
- ultralytics/models/fastsam/utils.py +0 -1
- ultralytics/models/nas/model.py +4 -4
- ultralytics/models/nas/predict.py +1 -2
- ultralytics/models/nas/val.py +1 -1
- ultralytics/models/rtdetr/predict.py +1 -1
- ultralytics/models/rtdetr/train.py +1 -1
- ultralytics/models/rtdetr/val.py +1 -1
- ultralytics/models/sam/model.py +11 -11
- ultralytics/models/sam/modules/decoders.py +7 -4
- ultralytics/models/sam/modules/sam.py +9 -1
- ultralytics/models/sam/modules/tiny_encoder.py +1 -1
- ultralytics/models/sam/modules/transformer.py +0 -2
- ultralytics/models/sam/modules/utils.py +1 -1
- ultralytics/models/sam/predict.py +10 -10
- ultralytics/models/utils/loss.py +29 -17
- ultralytics/models/utils/ops.py +1 -5
- ultralytics/models/yolo/classify/predict.py +1 -1
- ultralytics/models/yolo/classify/train.py +1 -1
- ultralytics/models/yolo/classify/val.py +1 -1
- ultralytics/models/yolo/detect/predict.py +1 -1
- ultralytics/models/yolo/detect/train.py +1 -1
- ultralytics/models/yolo/detect/val.py +1 -1
- ultralytics/models/yolo/model.py +6 -2
- ultralytics/models/yolo/obb/predict.py +1 -1
- ultralytics/models/yolo/obb/train.py +1 -1
- ultralytics/models/yolo/obb/val.py +2 -2
- ultralytics/models/yolo/pose/predict.py +1 -1
- ultralytics/models/yolo/pose/train.py +1 -1
- ultralytics/models/yolo/pose/val.py +1 -1
- ultralytics/models/yolo/segment/predict.py +1 -1
- ultralytics/models/yolo/segment/train.py +1 -1
- ultralytics/models/yolo/segment/val.py +1 -1
- ultralytics/models/yolo/world/train.py +1 -1
- ultralytics/nn/autobackend.py +2 -2
- ultralytics/nn/modules/__init__.py +2 -2
- ultralytics/nn/modules/block.py +8 -20
- ultralytics/nn/modules/conv.py +1 -3
- ultralytics/nn/modules/head.py +16 -31
- ultralytics/nn/modules/transformer.py +0 -1
- ultralytics/nn/modules/utils.py +0 -1
- ultralytics/nn/tasks.py +11 -9
- ultralytics/solutions/__init__.py +1 -0
- ultralytics/solutions/ai_gym.py +0 -2
- ultralytics/solutions/analytics.py +1 -6
- ultralytics/solutions/heatmap.py +0 -1
- ultralytics/solutions/object_counter.py +0 -2
- ultralytics/solutions/queue_management.py +0 -2
- ultralytics/trackers/basetrack.py +1 -1
- ultralytics/trackers/byte_tracker.py +2 -2
- ultralytics/trackers/utils/gmc.py +5 -5
- ultralytics/trackers/utils/kalman_filter.py +1 -1
- ultralytics/trackers/utils/matching.py +1 -5
- ultralytics/utils/__init__.py +132 -30
- ultralytics/utils/autobatch.py +7 -4
- ultralytics/utils/benchmarks.py +6 -14
- ultralytics/utils/callbacks/base.py +0 -1
- ultralytics/utils/callbacks/comet.py +0 -1
- ultralytics/utils/callbacks/tensorboard.py +0 -1
- ultralytics/utils/checks.py +15 -18
- ultralytics/utils/downloads.py +6 -7
- ultralytics/utils/files.py +3 -4
- ultralytics/utils/instance.py +17 -7
- ultralytics/utils/metrics.py +15 -15
- ultralytics/utils/ops.py +8 -8
- ultralytics/utils/plotting.py +25 -35
- ultralytics/utils/tal.py +27 -18
- ultralytics/utils/torch_utils.py +12 -13
- ultralytics/utils/tuner.py +2 -3
- {ultralytics-8.2.80.dist-info → ultralytics-8.2.82.dist-info}/METADATA +1 -1
- {ultralytics-8.2.80.dist-info → ultralytics-8.2.82.dist-info}/RECORD +97 -97
- {ultralytics-8.2.80.dist-info → ultralytics-8.2.82.dist-info}/LICENSE +0 -0
- {ultralytics-8.2.80.dist-info → ultralytics-8.2.82.dist-info}/WHEEL +0 -0
- {ultralytics-8.2.80.dist-info → ultralytics-8.2.82.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.2.80.dist-info → ultralytics-8.2.82.dist-info}/top_level.txt +0 -0
ultralytics/data/split_dota.py
CHANGED
|
@@ -19,11 +19,19 @@ from shapely.geometry import Polygon
|
|
|
19
19
|
|
|
20
20
|
def bbox_iof(polygon1, bbox2, eps=1e-6):
|
|
21
21
|
"""
|
|
22
|
-
Calculate
|
|
22
|
+
Calculate Intersection over Foreground (IoF) between polygons and bounding boxes.
|
|
23
23
|
|
|
24
24
|
Args:
|
|
25
|
-
polygon1 (np.ndarray): Polygon coordinates, (n, 8).
|
|
26
|
-
bbox2 (np.ndarray): Bounding boxes, (n
|
|
25
|
+
polygon1 (np.ndarray): Polygon coordinates, shape (n, 8).
|
|
26
|
+
bbox2 (np.ndarray): Bounding boxes, shape (n, 4).
|
|
27
|
+
eps (float, optional): Small value to prevent division by zero. Defaults to 1e-6.
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
(np.ndarray): IoF scores, shape (n, 1) or (n, m) if bbox2 is (m, 4).
|
|
31
|
+
|
|
32
|
+
Note:
|
|
33
|
+
Polygon format: [x1, y1, x2, y2, x3, y3, x4, y4].
|
|
34
|
+
Bounding box format: [x_min, y_min, x_max, y_max].
|
|
27
35
|
"""
|
|
28
36
|
polygon1 = polygon1.reshape(-1, 4, 2)
|
|
29
37
|
lt_point = np.min(polygon1, axis=-2) # left-top
|
ultralytics/data/utils.py
CHANGED
|
@@ -265,7 +265,6 @@ def check_det_dataset(dataset, autodownload=True):
|
|
|
265
265
|
Returns:
|
|
266
266
|
(dict): Parsed dataset information and paths.
|
|
267
267
|
"""
|
|
268
|
-
|
|
269
268
|
file = check_file(dataset)
|
|
270
269
|
|
|
271
270
|
# Download (optional)
|
|
@@ -363,7 +362,6 @@ def check_cls_dataset(dataset, split=""):
|
|
|
363
362
|
- 'nc' (int): The number of classes in the dataset.
|
|
364
363
|
- 'names' (dict): A dictionary of class names in the dataset.
|
|
365
364
|
"""
|
|
366
|
-
|
|
367
365
|
# Download (optional if dataset=https://file.zip is passed directly)
|
|
368
366
|
if str(dataset).startswith(("http:/", "https:/")):
|
|
369
367
|
dataset = safe_download(dataset, dir=DATASETS_DIR, unzip=True, delete=False)
|
|
@@ -438,11 +436,11 @@ class HUBDatasetStats:
|
|
|
438
436
|
```python
|
|
439
437
|
from ultralytics.data.utils import HUBDatasetStats
|
|
440
438
|
|
|
441
|
-
stats = HUBDatasetStats(
|
|
442
|
-
stats = HUBDatasetStats(
|
|
443
|
-
stats = HUBDatasetStats(
|
|
444
|
-
stats = HUBDatasetStats(
|
|
445
|
-
stats = HUBDatasetStats(
|
|
439
|
+
stats = HUBDatasetStats("path/to/coco8.zip", task="detect") # detect dataset
|
|
440
|
+
stats = HUBDatasetStats("path/to/coco8-seg.zip", task="segment") # segment dataset
|
|
441
|
+
stats = HUBDatasetStats("path/to/coco8-pose.zip", task="pose") # pose dataset
|
|
442
|
+
stats = HUBDatasetStats("path/to/dota8.zip", task="obb") # OBB dataset
|
|
443
|
+
stats = HUBDatasetStats("path/to/imagenet10.zip", task="classify") # classification dataset
|
|
446
444
|
|
|
447
445
|
stats.get_json(save=True)
|
|
448
446
|
stats.process_images()
|
|
@@ -598,11 +596,10 @@ def compress_one_image(f, f_new=None, max_dim=1920, quality=50):
|
|
|
598
596
|
from pathlib import Path
|
|
599
597
|
from ultralytics.data.utils import compress_one_image
|
|
600
598
|
|
|
601
|
-
for f in Path(
|
|
599
|
+
for f in Path("path/to/dataset").rglob("*.jpg"):
|
|
602
600
|
compress_one_image(f)
|
|
603
601
|
```
|
|
604
602
|
"""
|
|
605
|
-
|
|
606
603
|
try: # use PIL
|
|
607
604
|
im = Image.open(f)
|
|
608
605
|
r = max_dim / max(im.height, im.width) # ratio
|
|
@@ -635,7 +632,6 @@ def autosplit(path=DATASETS_DIR / "coco8/images", weights=(0.9, 0.1, 0.0), annot
|
|
|
635
632
|
autosplit()
|
|
636
633
|
```
|
|
637
634
|
"""
|
|
638
|
-
|
|
639
635
|
path = Path(path) # images dir
|
|
640
636
|
files = sorted(x for x in path.rglob("*.*") if x.suffix[1:].lower() in IMG_FORMATS) # image files only
|
|
641
637
|
n = len(files) # number of files
|
ultralytics/engine/exporter.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
|
2
2
|
"""
|
|
3
|
-
Export a YOLOv8 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit
|
|
3
|
+
Export a YOLOv8 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit.
|
|
4
4
|
|
|
5
5
|
Format | `format=argument` | Model
|
|
6
6
|
--- | --- | ---
|
|
@@ -533,9 +533,7 @@ class Exporter:
|
|
|
533
533
|
|
|
534
534
|
@try_export
|
|
535
535
|
def export_ncnn(self, prefix=colorstr("NCNN:")):
|
|
536
|
-
"""
|
|
537
|
-
YOLOv8 NCNN export using PNNX https://github.com/pnnx/pnnx.
|
|
538
|
-
"""
|
|
536
|
+
"""YOLOv8 NCNN export using PNNX https://github.com/pnnx/pnnx."""
|
|
539
537
|
check_requirements("ncnn")
|
|
540
538
|
import ncnn # noqa
|
|
541
539
|
|
ultralytics/engine/model.py
CHANGED
|
@@ -72,11 +72,11 @@ class Model(nn.Module):
|
|
|
72
72
|
|
|
73
73
|
Examples:
|
|
74
74
|
>>> from ultralytics import YOLO
|
|
75
|
-
>>> model = YOLO(
|
|
76
|
-
>>> results = model.predict(
|
|
77
|
-
>>> model.train(data=
|
|
75
|
+
>>> model = YOLO("yolov8n.pt")
|
|
76
|
+
>>> results = model.predict("image.jpg")
|
|
77
|
+
>>> model.train(data="coco128.yaml", epochs=3)
|
|
78
78
|
>>> metrics = model.val()
|
|
79
|
-
>>> model.export(format=
|
|
79
|
+
>>> model.export(format="onnx")
|
|
80
80
|
"""
|
|
81
81
|
|
|
82
82
|
def __init__(
|
|
@@ -166,8 +166,8 @@ class Model(nn.Module):
|
|
|
166
166
|
Results object.
|
|
167
167
|
|
|
168
168
|
Examples:
|
|
169
|
-
>>> model = YOLO(
|
|
170
|
-
>>> results = model(
|
|
169
|
+
>>> model = YOLO("yolov8n.pt")
|
|
170
|
+
>>> results = model("https://ultralytics.com/images/bus.jpg")
|
|
171
171
|
>>> for r in results:
|
|
172
172
|
... print(f"Detected {len(r)} objects in image")
|
|
173
173
|
"""
|
|
@@ -188,9 +188,9 @@ class Model(nn.Module):
|
|
|
188
188
|
(bool): True if the model string is a valid Triton Server URL, False otherwise.
|
|
189
189
|
|
|
190
190
|
Examples:
|
|
191
|
-
>>> Model.is_triton_model(
|
|
191
|
+
>>> Model.is_triton_model("http://localhost:8000/v2/models/yolov8n")
|
|
192
192
|
True
|
|
193
|
-
>>> Model.is_triton_model(
|
|
193
|
+
>>> Model.is_triton_model("yolov8n.pt")
|
|
194
194
|
False
|
|
195
195
|
"""
|
|
196
196
|
from urllib.parse import urlsplit
|
|
@@ -253,7 +253,7 @@ class Model(nn.Module):
|
|
|
253
253
|
|
|
254
254
|
Examples:
|
|
255
255
|
>>> model = Model()
|
|
256
|
-
>>> model._new(
|
|
256
|
+
>>> model._new("yolov8n.yaml", task="detect", verbose=True)
|
|
257
257
|
"""
|
|
258
258
|
cfg_dict = yaml_model_load(cfg)
|
|
259
259
|
self.cfg = cfg
|
|
@@ -284,8 +284,8 @@ class Model(nn.Module):
|
|
|
284
284
|
|
|
285
285
|
Examples:
|
|
286
286
|
>>> model = Model()
|
|
287
|
-
>>> model._load(
|
|
288
|
-
>>> model._load(
|
|
287
|
+
>>> model._load("yolov8n.pt")
|
|
288
|
+
>>> model._load("path/to/weights.pth", task="detect")
|
|
289
289
|
"""
|
|
290
290
|
if weights.lower().startswith(("https://", "http://", "rtsp://", "rtmp://", "tcp://")):
|
|
291
291
|
weights = checks.check_file(weights, download_dir=SETTINGS["weights_dir"]) # download and return local file
|
|
@@ -348,7 +348,7 @@ class Model(nn.Module):
|
|
|
348
348
|
AssertionError: If the model is not a PyTorch model.
|
|
349
349
|
|
|
350
350
|
Examples:
|
|
351
|
-
>>> model = Model(
|
|
351
|
+
>>> model = Model("yolov8n.pt")
|
|
352
352
|
>>> model.reset_weights()
|
|
353
353
|
"""
|
|
354
354
|
self._check_is_pytorch_model()
|
|
@@ -377,8 +377,8 @@ class Model(nn.Module):
|
|
|
377
377
|
|
|
378
378
|
Examples:
|
|
379
379
|
>>> model = Model()
|
|
380
|
-
>>> model.load(
|
|
381
|
-
>>> model.load(Path(
|
|
380
|
+
>>> model.load("yolov8n.pt")
|
|
381
|
+
>>> model.load(Path("path/to/weights.pt"))
|
|
382
382
|
"""
|
|
383
383
|
self._check_is_pytorch_model()
|
|
384
384
|
if isinstance(weights, (str, Path)):
|
|
@@ -402,8 +402,8 @@ class Model(nn.Module):
|
|
|
402
402
|
AssertionError: If the model is not a PyTorch model.
|
|
403
403
|
|
|
404
404
|
Examples:
|
|
405
|
-
>>> model = Model(
|
|
406
|
-
>>> model.save(
|
|
405
|
+
>>> model = Model("yolov8n.pt")
|
|
406
|
+
>>> model.save("my_model.pt")
|
|
407
407
|
"""
|
|
408
408
|
self._check_is_pytorch_model()
|
|
409
409
|
from copy import deepcopy
|
|
@@ -439,7 +439,7 @@ class Model(nn.Module):
|
|
|
439
439
|
TypeError: If the model is not a PyTorch model.
|
|
440
440
|
|
|
441
441
|
Examples:
|
|
442
|
-
>>> model = Model(
|
|
442
|
+
>>> model = Model("yolov8n.pt")
|
|
443
443
|
>>> model.info() # Prints model summary
|
|
444
444
|
>>> info_list = model.info(detailed=True, verbose=False) # Returns detailed info as a list
|
|
445
445
|
"""
|
|
@@ -494,8 +494,8 @@ class Model(nn.Module):
|
|
|
494
494
|
AssertionError: If the model is not a PyTorch model.
|
|
495
495
|
|
|
496
496
|
Examples:
|
|
497
|
-
>>> model = YOLO(
|
|
498
|
-
>>> image =
|
|
497
|
+
>>> model = YOLO("yolov8n.pt")
|
|
498
|
+
>>> image = "https://ultralytics.com/images/bus.jpg"
|
|
499
499
|
>>> embeddings = model.embed(image)
|
|
500
500
|
>>> print(embeddings[0].shape)
|
|
501
501
|
"""
|
|
@@ -531,8 +531,8 @@ class Model(nn.Module):
|
|
|
531
531
|
Results object.
|
|
532
532
|
|
|
533
533
|
Examples:
|
|
534
|
-
>>> model = YOLO(
|
|
535
|
-
>>> results = model.predict(source=
|
|
534
|
+
>>> model = YOLO("yolov8n.pt")
|
|
535
|
+
>>> results = model.predict(source="path/to/image.jpg", conf=0.25)
|
|
536
536
|
>>> for r in results:
|
|
537
537
|
... print(r.boxes.data) # print detection bounding boxes
|
|
538
538
|
|
|
@@ -592,8 +592,8 @@ class Model(nn.Module):
|
|
|
592
592
|
AttributeError: If the predictor does not have registered trackers.
|
|
593
593
|
|
|
594
594
|
Examples:
|
|
595
|
-
>>> model = YOLO(
|
|
596
|
-
>>> results = model.track(source=
|
|
595
|
+
>>> model = YOLO("yolov8n.pt")
|
|
596
|
+
>>> results = model.track(source="path/to/video.mp4", show=True)
|
|
597
597
|
>>> for r in results:
|
|
598
598
|
... print(r.boxes.id) # print tracking IDs
|
|
599
599
|
|
|
@@ -635,8 +635,8 @@ class Model(nn.Module):
|
|
|
635
635
|
AssertionError: If the model is not a PyTorch model.
|
|
636
636
|
|
|
637
637
|
Examples:
|
|
638
|
-
>>> model = YOLO(
|
|
639
|
-
>>> results = model.val(data=
|
|
638
|
+
>>> model = YOLO("yolov8n.pt")
|
|
639
|
+
>>> results = model.val(data="coco128.yaml", imgsz=640)
|
|
640
640
|
>>> print(results.box.map) # Print mAP50-95
|
|
641
641
|
"""
|
|
642
642
|
custom = {"rect": True} # method defaults
|
|
@@ -677,8 +677,8 @@ class Model(nn.Module):
|
|
|
677
677
|
AssertionError: If the model is not a PyTorch model.
|
|
678
678
|
|
|
679
679
|
Examples:
|
|
680
|
-
>>> model = YOLO(
|
|
681
|
-
>>> results = model.benchmark(data=
|
|
680
|
+
>>> model = YOLO("yolov8n.pt")
|
|
681
|
+
>>> results = model.benchmark(data="coco8.yaml", imgsz=640, half=True)
|
|
682
682
|
>>> print(results)
|
|
683
683
|
"""
|
|
684
684
|
self._check_is_pytorch_model()
|
|
@@ -727,8 +727,8 @@ class Model(nn.Module):
|
|
|
727
727
|
RuntimeError: If the export process fails due to errors.
|
|
728
728
|
|
|
729
729
|
Examples:
|
|
730
|
-
>>> model = YOLO(
|
|
731
|
-
>>> model.export(format=
|
|
730
|
+
>>> model = YOLO("yolov8n.pt")
|
|
731
|
+
>>> model.export(format="onnx", dynamic=True, simplify=True)
|
|
732
732
|
'path/to/exported/model.onnx'
|
|
733
733
|
"""
|
|
734
734
|
self._check_is_pytorch_model()
|
|
@@ -782,8 +782,8 @@ class Model(nn.Module):
|
|
|
782
782
|
ModuleNotFoundError: If the HUB SDK is not installed.
|
|
783
783
|
|
|
784
784
|
Examples:
|
|
785
|
-
>>> model = YOLO(
|
|
786
|
-
>>> results = model.train(data=
|
|
785
|
+
>>> model = YOLO("yolov8n.pt")
|
|
786
|
+
>>> results = model.train(data="coco128.yaml", epochs=3)
|
|
787
787
|
"""
|
|
788
788
|
self._check_is_pytorch_model()
|
|
789
789
|
if hasattr(self.session, "model") and self.session.model.id: # Ultralytics HUB session with loaded model
|
|
@@ -847,7 +847,7 @@ class Model(nn.Module):
|
|
|
847
847
|
AssertionError: If the model is not a PyTorch model.
|
|
848
848
|
|
|
849
849
|
Examples:
|
|
850
|
-
>>> model = YOLO(
|
|
850
|
+
>>> model = YOLO("yolov8n.pt")
|
|
851
851
|
>>> results = model.tune(use_ray=True, iterations=20)
|
|
852
852
|
>>> print(results)
|
|
853
853
|
"""
|
|
@@ -907,7 +907,7 @@ class Model(nn.Module):
|
|
|
907
907
|
AttributeError: If the model or predictor does not have a 'names' attribute.
|
|
908
908
|
|
|
909
909
|
Examples:
|
|
910
|
-
>>> model = YOLO(
|
|
910
|
+
>>> model = YOLO("yolov8n.pt")
|
|
911
911
|
>>> print(model.names)
|
|
912
912
|
{0: 'person', 1: 'bicycle', 2: 'car', ...}
|
|
913
913
|
"""
|
|
@@ -957,7 +957,7 @@ class Model(nn.Module):
|
|
|
957
957
|
(object | None): The transform object of the model if available, otherwise None.
|
|
958
958
|
|
|
959
959
|
Examples:
|
|
960
|
-
>>> model = YOLO(
|
|
960
|
+
>>> model = YOLO("yolov8n.pt")
|
|
961
961
|
>>> transforms = model.transforms
|
|
962
962
|
>>> if transforms:
|
|
963
963
|
... print(f"Model transforms: {transforms}")
|
|
@@ -986,9 +986,9 @@ class Model(nn.Module):
|
|
|
986
986
|
Examples:
|
|
987
987
|
>>> def on_train_start(trainer):
|
|
988
988
|
... print("Training is starting!")
|
|
989
|
-
>>> model = YOLO(
|
|
989
|
+
>>> model = YOLO("yolov8n.pt")
|
|
990
990
|
>>> model.add_callback("on_train_start", on_train_start)
|
|
991
|
-
>>> model.train(data=
|
|
991
|
+
>>> model.train(data="coco128.yaml", epochs=1)
|
|
992
992
|
"""
|
|
993
993
|
self.callbacks[event].append(func)
|
|
994
994
|
|
|
@@ -1005,9 +1005,9 @@ class Model(nn.Module):
|
|
|
1005
1005
|
recognized by the Ultralytics callback system.
|
|
1006
1006
|
|
|
1007
1007
|
Examples:
|
|
1008
|
-
>>> model = YOLO(
|
|
1009
|
-
>>> model.add_callback(
|
|
1010
|
-
>>> model.clear_callback(
|
|
1008
|
+
>>> model = YOLO("yolov8n.pt")
|
|
1009
|
+
>>> model.add_callback("on_train_start", lambda: print("Training started"))
|
|
1010
|
+
>>> model.clear_callback("on_train_start")
|
|
1011
1011
|
>>> # All callbacks for 'on_train_start' are now removed
|
|
1012
1012
|
|
|
1013
1013
|
Notes:
|
|
@@ -1035,8 +1035,8 @@ class Model(nn.Module):
|
|
|
1035
1035
|
modifications, ensuring consistent behavior across different runs or experiments.
|
|
1036
1036
|
|
|
1037
1037
|
Examples:
|
|
1038
|
-
>>> model = YOLO(
|
|
1039
|
-
>>> model.add_callback(
|
|
1038
|
+
>>> model = YOLO("yolov8n.pt")
|
|
1039
|
+
>>> model.add_callback("on_train_start", custom_function)
|
|
1040
1040
|
>>> model.reset_callbacks()
|
|
1041
1041
|
# All callbacks are now reset to their default functions
|
|
1042
1042
|
"""
|
|
@@ -1059,7 +1059,7 @@ class Model(nn.Module):
|
|
|
1059
1059
|
(dict): A new dictionary containing only the specified include keys from the input arguments.
|
|
1060
1060
|
|
|
1061
1061
|
Examples:
|
|
1062
|
-
>>> original_args = {
|
|
1062
|
+
>>> original_args = {"imgsz": 640, "data": "coco.yaml", "task": "detect", "batch": 16, "epochs": 100}
|
|
1063
1063
|
>>> reset_args = Model._reset_ckpt_args(original_args)
|
|
1064
1064
|
>>> print(reset_args)
|
|
1065
1065
|
{'imgsz': 640, 'data': 'coco.yaml', 'task': 'detect'}
|
|
@@ -1090,9 +1090,9 @@ class Model(nn.Module):
|
|
|
1090
1090
|
NotImplementedError: If the specified key is not supported for the current task.
|
|
1091
1091
|
|
|
1092
1092
|
Examples:
|
|
1093
|
-
>>> model = Model(task=
|
|
1094
|
-
>>> predictor = model._smart_load(
|
|
1095
|
-
>>> trainer = model._smart_load(
|
|
1093
|
+
>>> model = Model(task="detect")
|
|
1094
|
+
>>> predictor = model._smart_load("predictor")
|
|
1095
|
+
>>> trainer = model._smart_load("trainer")
|
|
1096
1096
|
|
|
1097
1097
|
Notes:
|
|
1098
1098
|
- This method is typically used internally by other methods of the Model class.
|
|
@@ -1128,8 +1128,8 @@ class Model(nn.Module):
|
|
|
1128
1128
|
Examples:
|
|
1129
1129
|
>>> model = Model()
|
|
1130
1130
|
>>> task_map = model.task_map
|
|
1131
|
-
>>> detect_class_map = task_map[
|
|
1132
|
-
>>> segment_class_map = task_map[
|
|
1131
|
+
>>> detect_class_map = task_map["detect"]
|
|
1132
|
+
>>> segment_class_map = task_map["segment"]
|
|
1133
1133
|
|
|
1134
1134
|
Note:
|
|
1135
1135
|
The actual implementation of this method may vary depending on the specific tasks and
|
ultralytics/engine/predictor.py
CHANGED
|
@@ -384,7 +384,7 @@ class BasePredictor:
|
|
|
384
384
|
cv2.imwrite(save_path, im)
|
|
385
385
|
|
|
386
386
|
def show(self, p=""):
|
|
387
|
-
"""Display an image in a window using OpenCV imshow
|
|
387
|
+
"""Display an image in a window using the OpenCV imshow function."""
|
|
388
388
|
im = self.plotted_img
|
|
389
389
|
if platform.system() == "Linux" and p not in self.windows:
|
|
390
390
|
self.windows.append(p)
|
ultralytics/engine/results.py
CHANGED
|
@@ -143,7 +143,7 @@ class BaseTensor(SimpleClass):
|
|
|
143
143
|
|
|
144
144
|
Examples:
|
|
145
145
|
>>> base_tensor = BaseTensor(torch.randn(3, 4), orig_shape=(480, 640))
|
|
146
|
-
>>> cuda_tensor = base_tensor.to(
|
|
146
|
+
>>> cuda_tensor = base_tensor.to("cuda")
|
|
147
147
|
>>> float16_tensor = base_tensor.to(dtype=torch.float16)
|
|
148
148
|
"""
|
|
149
149
|
return self.__class__(torch.as_tensor(self.data).to(*args, **kwargs), self.orig_shape)
|
|
@@ -223,7 +223,7 @@ class Results(SimpleClass):
|
|
|
223
223
|
>>> for result in results:
|
|
224
224
|
... print(result.boxes) # Print detection boxes
|
|
225
225
|
... result.show() # Display the annotated image
|
|
226
|
-
... result.save(filename=
|
|
226
|
+
... result.save(filename="result.jpg") # Save annotated image
|
|
227
227
|
"""
|
|
228
228
|
|
|
229
229
|
def __init__(
|
|
@@ -280,7 +280,7 @@ class Results(SimpleClass):
|
|
|
280
280
|
(Results): A new Results object containing the specified subset of inference results.
|
|
281
281
|
|
|
282
282
|
Examples:
|
|
283
|
-
>>> results = model(
|
|
283
|
+
>>> results = model("path/to/image.jpg") # Perform inference
|
|
284
284
|
>>> single_result = results[0] # Get the first result
|
|
285
285
|
>>> subset_results = results[1:4] # Get a slice of results
|
|
286
286
|
"""
|
|
@@ -319,7 +319,7 @@ class Results(SimpleClass):
|
|
|
319
319
|
obb (torch.Tensor | None): A tensor of shape (N, 5) containing oriented bounding box coordinates.
|
|
320
320
|
|
|
321
321
|
Examples:
|
|
322
|
-
>>> results = model(
|
|
322
|
+
>>> results = model("image.jpg")
|
|
323
323
|
>>> new_boxes = torch.tensor([[100, 100, 200, 200, 0.9, 0]])
|
|
324
324
|
>>> results[0].update(boxes=new_boxes)
|
|
325
325
|
"""
|
|
@@ -370,7 +370,7 @@ class Results(SimpleClass):
|
|
|
370
370
|
(Results): A new Results object with all tensor attributes on CPU memory.
|
|
371
371
|
|
|
372
372
|
Examples:
|
|
373
|
-
>>> results = model(
|
|
373
|
+
>>> results = model("path/to/image.jpg") # Perform inference
|
|
374
374
|
>>> cpu_result = results[0].cpu() # Move the first result to CPU
|
|
375
375
|
>>> print(cpu_result.boxes.device) # Output: cpu
|
|
376
376
|
"""
|
|
@@ -384,7 +384,7 @@ class Results(SimpleClass):
|
|
|
384
384
|
(Results): A new Results object with all tensors converted to numpy arrays.
|
|
385
385
|
|
|
386
386
|
Examples:
|
|
387
|
-
>>> results = model(
|
|
387
|
+
>>> results = model("path/to/image.jpg")
|
|
388
388
|
>>> numpy_result = results[0].numpy()
|
|
389
389
|
>>> type(numpy_result.boxes.data)
|
|
390
390
|
<class 'numpy.ndarray'>
|
|
@@ -488,7 +488,7 @@ class Results(SimpleClass):
|
|
|
488
488
|
(np.ndarray): Annotated image as a numpy array.
|
|
489
489
|
|
|
490
490
|
Examples:
|
|
491
|
-
>>> results = model(
|
|
491
|
+
>>> results = model("image.jpg")
|
|
492
492
|
>>> for result in results:
|
|
493
493
|
... im = result.plot()
|
|
494
494
|
... im.show()
|
|
@@ -578,7 +578,7 @@ class Results(SimpleClass):
|
|
|
578
578
|
**kwargs (Any): Arbitrary keyword arguments to be passed to the `plot()` method.
|
|
579
579
|
|
|
580
580
|
Examples:
|
|
581
|
-
>>> results = model(
|
|
581
|
+
>>> results = model("path/to/image.jpg")
|
|
582
582
|
>>> results[0].show() # Display the first result
|
|
583
583
|
>>> for result in results:
|
|
584
584
|
... result.show() # Display all results
|
|
@@ -599,12 +599,12 @@ class Results(SimpleClass):
|
|
|
599
599
|
**kwargs (Any): Arbitrary keyword arguments to be passed to the `plot` method.
|
|
600
600
|
|
|
601
601
|
Examples:
|
|
602
|
-
>>> results = model(
|
|
602
|
+
>>> results = model("path/to/image.jpg")
|
|
603
603
|
>>> for result in results:
|
|
604
|
-
... result.save(
|
|
604
|
+
... result.save("annotated_image.jpg")
|
|
605
605
|
>>> # Or with custom plot arguments
|
|
606
606
|
>>> for result in results:
|
|
607
|
-
... result.save(
|
|
607
|
+
... result.save("annotated_image.jpg", conf=False, line_width=2)
|
|
608
608
|
"""
|
|
609
609
|
if not filename:
|
|
610
610
|
filename = f"results_{Path(self.path).name}"
|
|
@@ -623,7 +623,7 @@ class Results(SimpleClass):
|
|
|
623
623
|
number of detections per class. For classification tasks, it includes the top 5 class probabilities.
|
|
624
624
|
|
|
625
625
|
Examples:
|
|
626
|
-
>>> results = model(
|
|
626
|
+
>>> results = model("path/to/image.jpg")
|
|
627
627
|
>>> for result in results:
|
|
628
628
|
... print(result.verbose())
|
|
629
629
|
2 persons, 1 car, 3 traffic lights,
|
|
@@ -660,7 +660,7 @@ class Results(SimpleClass):
|
|
|
660
660
|
|
|
661
661
|
Examples:
|
|
662
662
|
>>> from ultralytics import YOLO
|
|
663
|
-
>>> model = YOLO(
|
|
663
|
+
>>> model = YOLO("yolov8n.pt")
|
|
664
664
|
>>> results = model("path/to/image.jpg")
|
|
665
665
|
>>> for result in results:
|
|
666
666
|
... result.save_txt("output.txt")
|
|
@@ -757,7 +757,7 @@ class Results(SimpleClass):
|
|
|
757
757
|
task type (classification or detection) and available information (boxes, masks, keypoints).
|
|
758
758
|
|
|
759
759
|
Examples:
|
|
760
|
-
>>> results = model(
|
|
760
|
+
>>> results = model("image.jpg")
|
|
761
761
|
>>> summary = results[0].summary()
|
|
762
762
|
>>> print(summary)
|
|
763
763
|
"""
|
|
@@ -919,7 +919,7 @@ class Boxes(BaseTensor):
|
|
|
919
919
|
coordinates in [x1, y1, x2, y2] format, where n is the number of boxes.
|
|
920
920
|
|
|
921
921
|
Examples:
|
|
922
|
-
>>> results = model(
|
|
922
|
+
>>> results = model("image.jpg")
|
|
923
923
|
>>> boxes = results[0].boxes
|
|
924
924
|
>>> xyxy = boxes.xyxy
|
|
925
925
|
>>> print(xyxy)
|
|
@@ -953,7 +953,7 @@ class Boxes(BaseTensor):
|
|
|
953
953
|
The shape is (N,), where N is the number of boxes.
|
|
954
954
|
|
|
955
955
|
Examples:
|
|
956
|
-
>>> results = model(
|
|
956
|
+
>>> results = model("image.jpg")
|
|
957
957
|
>>> boxes = results[0].boxes
|
|
958
958
|
>>> class_ids = boxes.cls
|
|
959
959
|
>>> print(class_ids) # tensor([0., 2., 1.])
|
|
@@ -970,7 +970,7 @@ class Boxes(BaseTensor):
|
|
|
970
970
|
otherwise None. Shape is (N,) where N is the number of boxes.
|
|
971
971
|
|
|
972
972
|
Examples:
|
|
973
|
-
>>> results = model.track(
|
|
973
|
+
>>> results = model.track("path/to/video.mp4")
|
|
974
974
|
>>> for result in results:
|
|
975
975
|
... boxes = result.boxes
|
|
976
976
|
... if boxes.is_track:
|
|
@@ -992,8 +992,8 @@ class Boxes(BaseTensor):
|
|
|
992
992
|
Convert bounding boxes from [x1, y1, x2, y2] format to [x, y, width, height] format.
|
|
993
993
|
|
|
994
994
|
Returns:
|
|
995
|
-
(torch.Tensor | numpy.ndarray): Boxes in [
|
|
996
|
-
the
|
|
995
|
+
(torch.Tensor | numpy.ndarray): Boxes in [x_center, y_center, width, height] format, where x_center, y_center are the coordinates of
|
|
996
|
+
the center point of the bounding box, width, height are the dimensions of the bounding box and the
|
|
997
997
|
shape of the returned tensor is (N, 4), where N is the number of boxes.
|
|
998
998
|
|
|
999
999
|
Examples:
|
|
@@ -1116,7 +1116,7 @@ class Masks(BaseTensor):
|
|
|
1116
1116
|
mask contour.
|
|
1117
1117
|
|
|
1118
1118
|
Examples:
|
|
1119
|
-
>>> results = model(
|
|
1119
|
+
>>> results = model("image.jpg")
|
|
1120
1120
|
>>> masks = results[0].masks
|
|
1121
1121
|
>>> normalized_coords = masks.xyn
|
|
1122
1122
|
>>> print(normalized_coords[0]) # Normalized coordinates of the first mask
|
|
@@ -1141,7 +1141,7 @@ class Masks(BaseTensor):
|
|
|
1141
1141
|
number of points in the segment.
|
|
1142
1142
|
|
|
1143
1143
|
Examples:
|
|
1144
|
-
>>> results = model(
|
|
1144
|
+
>>> results = model("image.jpg")
|
|
1145
1145
|
>>> masks = results[0].masks
|
|
1146
1146
|
>>> xy_coords = masks.xy
|
|
1147
1147
|
>>> print(len(xy_coords)) # Number of masks
|
|
@@ -1223,7 +1223,7 @@ class Keypoints(BaseTensor):
|
|
|
1223
1223
|
the number of detections and K is the number of keypoints per detection.
|
|
1224
1224
|
|
|
1225
1225
|
Examples:
|
|
1226
|
-
>>> results = model(
|
|
1226
|
+
>>> results = model("image.jpg")
|
|
1227
1227
|
>>> keypoints = results[0].keypoints
|
|
1228
1228
|
>>> xy = keypoints.xy
|
|
1229
1229
|
>>> print(xy.shape) # (N, K, 2)
|
|
@@ -1388,7 +1388,7 @@ class Probs(BaseTensor):
|
|
|
1388
1388
|
(torch.Tensor | numpy.ndarray): A tensor containing the confidence score of the top 1 class.
|
|
1389
1389
|
|
|
1390
1390
|
Examples:
|
|
1391
|
-
>>> results = model(
|
|
1391
|
+
>>> results = model("image.jpg") # classify an image
|
|
1392
1392
|
>>> probs = results[0].probs # get classification probabilities
|
|
1393
1393
|
>>> top1_confidence = probs.top1conf # get confidence of top 1 class
|
|
1394
1394
|
>>> print(f"Top 1 class confidence: {top1_confidence.item():.4f}")
|
|
@@ -1410,7 +1410,7 @@ class Probs(BaseTensor):
|
|
|
1410
1410
|
top 5 predicted classes, sorted in descending order of probability.
|
|
1411
1411
|
|
|
1412
1412
|
Examples:
|
|
1413
|
-
>>> results = model(
|
|
1413
|
+
>>> results = model("image.jpg")
|
|
1414
1414
|
>>> probs = results[0].probs
|
|
1415
1415
|
>>> top5_conf = probs.top5conf
|
|
1416
1416
|
>>> print(top5_conf) # Prints confidence scores for top 5 classes
|
|
@@ -1497,7 +1497,7 @@ class OBB(BaseTensor):
|
|
|
1497
1497
|
[x_center, y_center, width, height, rotation]. The shape is (N, 5) where N is the number of boxes.
|
|
1498
1498
|
|
|
1499
1499
|
Examples:
|
|
1500
|
-
>>> results = model(
|
|
1500
|
+
>>> results = model("image.jpg")
|
|
1501
1501
|
>>> obb = results[0].obb
|
|
1502
1502
|
>>> xywhr = obb.xywhr
|
|
1503
1503
|
>>> print(xywhr.shape)
|
|
@@ -1518,7 +1518,7 @@ class OBB(BaseTensor):
|
|
|
1518
1518
|
for N detections, where each score is in the range [0, 1].
|
|
1519
1519
|
|
|
1520
1520
|
Examples:
|
|
1521
|
-
>>> results = model(
|
|
1521
|
+
>>> results = model("image.jpg")
|
|
1522
1522
|
>>> obb_result = results[0].obb
|
|
1523
1523
|
>>> confidence_scores = obb_result.conf
|
|
1524
1524
|
>>> print(confidence_scores)
|
|
@@ -1535,7 +1535,7 @@ class OBB(BaseTensor):
|
|
|
1535
1535
|
bounding box. The shape is (N,), where N is the number of boxes.
|
|
1536
1536
|
|
|
1537
1537
|
Examples:
|
|
1538
|
-
>>> results = model(
|
|
1538
|
+
>>> results = model("image.jpg")
|
|
1539
1539
|
>>> result = results[0]
|
|
1540
1540
|
>>> obb = result.obb
|
|
1541
1541
|
>>> class_values = obb.cls
|
|
@@ -1553,7 +1553,7 @@ class OBB(BaseTensor):
|
|
|
1553
1553
|
oriented bounding box. Returns None if tracking IDs are not available.
|
|
1554
1554
|
|
|
1555
1555
|
Examples:
|
|
1556
|
-
>>> results = model(
|
|
1556
|
+
>>> results = model("image.jpg", tracker=True) # Run inference with tracking
|
|
1557
1557
|
>>> for result in results:
|
|
1558
1558
|
... if result.obb is not None:
|
|
1559
1559
|
... track_ids = result.obb.id
|
|
@@ -1620,8 +1620,8 @@ class OBB(BaseTensor):
|
|
|
1620
1620
|
Examples:
|
|
1621
1621
|
>>> import torch
|
|
1622
1622
|
>>> from ultralytics import YOLO
|
|
1623
|
-
>>> model = YOLO(
|
|
1624
|
-
>>> results = model(
|
|
1623
|
+
>>> model = YOLO("yolov8n-obb.pt")
|
|
1624
|
+
>>> results = model("path/to/image.jpg")
|
|
1625
1625
|
>>> for result in results:
|
|
1626
1626
|
... obb = result.obb
|
|
1627
1627
|
... if obb is not None:
|