ultralytics 8.3.88__py3-none-any.whl → 8.3.89__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ultralytics/__init__.py +1 -1
- ultralytics/data/base.py +7 -9
- ultralytics/data/converter.py +30 -29
- ultralytics/data/utils.py +20 -28
- ultralytics/engine/model.py +2 -2
- ultralytics/engine/tuner.py +11 -21
- ultralytics/hub/__init__.py +13 -17
- ultralytics/models/fastsam/model.py +4 -7
- ultralytics/models/nas/model.py +8 -14
- ultralytics/models/nas/predict.py +7 -9
- ultralytics/models/nas/val.py +7 -9
- ultralytics/models/rtdetr/predict.py +6 -9
- ultralytics/models/rtdetr/train.py +5 -8
- ultralytics/models/rtdetr/val.py +5 -8
- ultralytics/models/yolo/classify/predict.py +6 -9
- ultralytics/models/yolo/classify/train.py +5 -8
- ultralytics/models/yolo/classify/val.py +5 -8
- ultralytics/models/yolo/detect/predict.py +6 -9
- ultralytics/models/yolo/detect/train.py +5 -8
- ultralytics/models/yolo/detect/val.py +5 -8
- ultralytics/models/yolo/obb/predict.py +6 -9
- ultralytics/models/yolo/obb/train.py +5 -8
- ultralytics/models/yolo/obb/val.py +10 -15
- ultralytics/models/yolo/pose/predict.py +6 -9
- ultralytics/models/yolo/pose/train.py +5 -8
- ultralytics/models/yolo/pose/val.py +12 -17
- ultralytics/models/yolo/segment/predict.py +6 -9
- ultralytics/models/yolo/segment/train.py +5 -8
- ultralytics/models/yolo/segment/val.py +10 -15
- ultralytics/models/yolo/world/train.py +5 -8
- ultralytics/models/yolo/world/train_world.py +21 -25
- ultralytics/nn/modules/__init__.py +9 -12
- ultralytics/nn/tasks.py +7 -12
- ultralytics/utils/__init__.py +5 -8
- ultralytics/utils/checks.py +25 -35
- ultralytics/utils/downloads.py +25 -48
- ultralytics/utils/instance.py +6 -8
- ultralytics/utils/ops.py +5 -9
- ultralytics/utils/plotting.py +8 -14
- ultralytics/utils/torch_utils.py +23 -33
- ultralytics/utils/tuner.py +5 -9
- {ultralytics-8.3.88.dist-info → ultralytics-8.3.89.dist-info}/METADATA +2 -2
- {ultralytics-8.3.88.dist-info → ultralytics-8.3.89.dist-info}/RECORD +47 -47
- {ultralytics-8.3.88.dist-info → ultralytics-8.3.89.dist-info}/LICENSE +0 -0
- {ultralytics-8.3.88.dist-info → ultralytics-8.3.89.dist-info}/WHEEL +0 -0
- {ultralytics-8.3.88.dist-info → ultralytics-8.3.89.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.3.88.dist-info → ultralytics-8.3.89.dist-info}/top_level.txt +0 -0
ultralytics/__init__.py
CHANGED
ultralytics/data/base.py
CHANGED
@@ -312,15 +312,13 @@ class BaseDataset(Dataset):
|
|
312
312
|
"""
|
313
313
|
Users can customize augmentations here.
|
314
314
|
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
return Compose([])
|
323
|
-
```
|
315
|
+
Examples:
|
316
|
+
>>> if self.augment:
|
317
|
+
... # Training transforms
|
318
|
+
... return Compose([])
|
319
|
+
>>> else:
|
320
|
+
... # Val transforms
|
321
|
+
... return Compose([])
|
324
322
|
"""
|
325
323
|
raise NotImplementedError
|
326
324
|
|
ultralytics/data/converter.py
CHANGED
@@ -124,15 +124,16 @@ def coco80_to_coco91_class():
|
|
124
124
|
Converts 80-index (val2014) to 91-index (paper).
|
125
125
|
For details see https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/.
|
126
126
|
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
x1 = [list(a[i] == b).index(True) + 1 for i in range(80)]
|
134
|
-
|
135
|
-
|
127
|
+
Examples:
|
128
|
+
>>> import numpy as np
|
129
|
+
>>> a = np.loadtxt("data/coco.names", dtype="str", delimiter="\n")
|
130
|
+
>>> b = np.loadtxt("data/coco_paper.names", dtype="str", delimiter="\n")
|
131
|
+
|
132
|
+
Convert the darknet to COCO format
|
133
|
+
>>> x1 = [list(a[i] == b).index(True) + 1 for i in range(80)]
|
134
|
+
|
135
|
+
Convert the COCO to darknet format
|
136
|
+
>>> x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)]
|
136
137
|
"""
|
137
138
|
return [
|
138
139
|
1,
|
@@ -237,15 +238,20 @@ def convert_coco(
|
|
237
238
|
cls91to80 (bool, optional): Whether to map 91 COCO class IDs to the corresponding 80 COCO class IDs.
|
238
239
|
lvis (bool, optional): Whether to convert data in lvis dataset way.
|
239
240
|
|
240
|
-
|
241
|
-
|
242
|
-
|
241
|
+
Examples:
|
242
|
+
>>> from ultralytics.data.converter import convert_coco
|
243
|
+
|
244
|
+
Convert COCO annotations to YOLO format
|
245
|
+
>>> convert_coco("../datasets/coco/annotations/", use_segments=True, use_keypoints=False, cls91to80=False)
|
243
246
|
|
244
|
-
|
245
|
-
convert_coco(
|
246
|
-
|
247
|
-
|
248
|
-
|
247
|
+
Convert LVIS annotations to YOLO format
|
248
|
+
>>> convert_coco(
|
249
|
+
>>> "../datasets/lvis/annotations/",
|
250
|
+
... use_segments=True,
|
251
|
+
... use_keypoints=False,
|
252
|
+
... cls91to80=False,
|
253
|
+
... lvis=True
|
254
|
+
... )
|
249
255
|
|
250
256
|
Output:
|
251
257
|
Generates output files in the specified output directory.
|
@@ -353,13 +359,11 @@ def convert_segment_masks_to_yolo_seg(masks_dir, output_dir, classes):
|
|
353
359
|
output_dir (str): The path to the directory where the converted YOLO segmentation masks will be stored.
|
354
360
|
classes (int): Total classes in the dataset i.e. for COCO classes=80
|
355
361
|
|
356
|
-
|
357
|
-
|
358
|
-
from ultralytics.data.converter import convert_segment_masks_to_yolo_seg
|
362
|
+
Examples:
|
363
|
+
>>> from ultralytics.data.converter import convert_segment_masks_to_yolo_seg
|
359
364
|
|
360
|
-
|
361
|
-
convert_segment_masks_to_yolo_seg("path/to/masks_directory", "path/to/output/directory", classes=80)
|
362
|
-
```
|
365
|
+
The classes here is the total classes in the dataset, for COCO dataset we have 80 classes
|
366
|
+
>>> convert_segment_masks_to_yolo_seg("path/to/masks_directory", "path/to/output/directory", classes=80)
|
363
367
|
|
364
368
|
Notes:
|
365
369
|
The expected directory structure for the masks is:
|
@@ -429,12 +433,9 @@ def convert_dota_to_yolo_obb(dota_root_path: str):
|
|
429
433
|
Args:
|
430
434
|
dota_root_path (str): The root directory path of the DOTA dataset.
|
431
435
|
|
432
|
-
|
433
|
-
|
434
|
-
|
435
|
-
|
436
|
-
convert_dota_to_yolo_obb("path/to/DOTA")
|
437
|
-
```
|
436
|
+
Examples:
|
437
|
+
>>> from ultralytics.data.converter import convert_dota_to_yolo_obb
|
438
|
+
>>> convert_dota_to_yolo_obb("path/to/DOTA")
|
438
439
|
|
439
440
|
Notes:
|
440
441
|
The directory structure assumed for the DOTA dataset:
|
ultralytics/data/utils.py
CHANGED
@@ -478,21 +478,19 @@ class HUBDatasetStats:
|
|
478
478
|
task (str): Dataset task. Options are 'detect', 'segment', 'pose', 'classify'. Default is 'detect'.
|
479
479
|
autodownload (bool): Attempt to download dataset if not found locally. Default is False.
|
480
480
|
|
481
|
-
|
481
|
+
Note:
|
482
482
|
Download *.zip files from https://github.com/ultralytics/hub/tree/main/example_datasets
|
483
|
-
|
484
|
-
|
485
|
-
|
486
|
-
|
487
|
-
stats = HUBDatasetStats("path/to/coco8.zip", task="detect") # detect dataset
|
488
|
-
stats = HUBDatasetStats("path/to/coco8-seg.zip", task="segment") # segment dataset
|
489
|
-
stats = HUBDatasetStats("path/to/coco8-pose.zip", task="pose") # pose dataset
|
490
|
-
stats = HUBDatasetStats("path/to/dota8.zip", task="obb") # OBB dataset
|
491
|
-
stats = HUBDatasetStats("path/to/imagenet10.zip", task="classify") # classification dataset
|
492
|
-
|
493
|
-
stats.
|
494
|
-
stats.process_images()
|
495
|
-
```
|
483
|
+
i.e. https://github.com/ultralytics/hub/raw/main/example_datasets/coco8.zip for coco8.zip.
|
484
|
+
|
485
|
+
Examples:
|
486
|
+
>>> from ultralytics.data.utils import HUBDatasetStats
|
487
|
+
>>> stats = HUBDatasetStats("path/to/coco8.zip", task="detect") # detect dataset
|
488
|
+
>>> stats = HUBDatasetStats("path/to/coco8-seg.zip", task="segment") # segment dataset
|
489
|
+
>>> stats = HUBDatasetStats("path/to/coco8-pose.zip", task="pose") # pose dataset
|
490
|
+
>>> stats = HUBDatasetStats("path/to/dota8.zip", task="obb") # OBB dataset
|
491
|
+
>>> stats = HUBDatasetStats("path/to/imagenet10.zip", task="classify") # classification dataset
|
492
|
+
>>> stats.get_json(save=True)
|
493
|
+
>>> stats.process_images()
|
496
494
|
"""
|
497
495
|
|
498
496
|
def __init__(self, path="coco8.yaml", task="detect", autodownload=False):
|
@@ -639,14 +637,11 @@ def compress_one_image(f, f_new=None, max_dim=1920, quality=50):
|
|
639
637
|
max_dim (int, optional): The maximum dimension (width or height) of the output image. Default is 1920 pixels.
|
640
638
|
quality (int, optional): The image compression quality as a percentage. Default is 50%.
|
641
639
|
|
642
|
-
|
643
|
-
|
644
|
-
from
|
645
|
-
|
646
|
-
|
647
|
-
for f in Path("path/to/dataset").rglob("*.jpg"):
|
648
|
-
compress_one_image(f)
|
649
|
-
```
|
640
|
+
Examples:
|
641
|
+
>>> from pathlib import Path
|
642
|
+
>>> from ultralytics.data.utils import compress_one_image
|
643
|
+
>>> for f in Path("path/to/dataset").rglob("*.jpg"):
|
644
|
+
>>> compress_one_image(f)
|
650
645
|
"""
|
651
646
|
try: # use PIL
|
652
647
|
im = Image.open(f)
|
@@ -673,12 +668,9 @@ def autosplit(path=DATASETS_DIR / "coco8/images", weights=(0.9, 0.1, 0.0), annot
|
|
673
668
|
weights (list | tuple, optional): Train, validation, and test split fractions. Defaults to (0.9, 0.1, 0.0).
|
674
669
|
annotated_only (bool, optional): If True, only images with an associated txt file are used. Defaults to False.
|
675
670
|
|
676
|
-
|
677
|
-
|
678
|
-
|
679
|
-
|
680
|
-
autosplit()
|
681
|
-
```
|
671
|
+
Examples:
|
672
|
+
>>> from ultralytics.data.utils import autosplit
|
673
|
+
>>> autosplit()
|
682
674
|
"""
|
683
675
|
path = Path(path) # images dir
|
684
676
|
files = sorted(x for x in path.rglob("*.*") if x.suffix[1:].lower() in IMG_FORMATS) # image files only
|
ultralytics/engine/model.py
CHANGED
@@ -1146,8 +1146,8 @@ class Model(torch.nn.Module):
|
|
1146
1146
|
(Model): The model instance with evaluation mode set.
|
1147
1147
|
|
1148
1148
|
Examples:
|
1149
|
-
|
1150
|
-
|
1149
|
+
>>> model = YOLO("yolo11n.pt")
|
1150
|
+
>>> model.eval()
|
1151
1151
|
"""
|
1152
1152
|
self.model.eval()
|
1153
1153
|
return self
|
ultralytics/engine/tuner.py
CHANGED
@@ -7,14 +7,11 @@ Hyperparameter tuning is the process of systematically searching for the optimal
|
|
7
7
|
that yield the best model performance. This is particularly crucial in deep learning models like YOLO,
|
8
8
|
where small changes in hyperparameters can lead to significant differences in model accuracy and efficiency.
|
9
9
|
|
10
|
-
|
10
|
+
Examples:
|
11
11
|
Tune hyperparameters for YOLO11n on COCO8 at imgsz=640 and epochs=30 for 300 tuning iterations.
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
model = YOLO("yolo11n.pt")
|
16
|
-
model.tune(data="coco8.yaml", epochs=10, iterations=300, optimizer="AdamW", plots=False, save=False, val=False)
|
17
|
-
```
|
12
|
+
>>> from ultralytics import YOLO
|
13
|
+
>>> model = YOLO("yolo11n.pt")
|
14
|
+
>>> model.tune(data="coco8.yaml", epochs=10, iterations=300, optimizer="AdamW", plots=False, save=False, val=False)
|
18
15
|
"""
|
19
16
|
|
20
17
|
import random
|
@@ -49,22 +46,15 @@ class Tuner:
|
|
49
46
|
__call__():
|
50
47
|
Executes the hyperparameter evolution across multiple iterations.
|
51
48
|
|
52
|
-
|
49
|
+
Examples:
|
53
50
|
Tune hyperparameters for YOLO11n on COCO8 at imgsz=640 and epochs=30 for 300 tuning iterations.
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
```
|
60
|
-
|
51
|
+
>>> from ultralytics import YOLO
|
52
|
+
>>> model = YOLO("yolo11n.pt")
|
53
|
+
>>> model.tune(
|
54
|
+
... data="coco8.yaml", epochs=10, iterations=300, optimizer="AdamW", plots=False, save=False, val=False
|
55
|
+
... )
|
61
56
|
Tune with custom search space.
|
62
|
-
|
63
|
-
from ultralytics import YOLO
|
64
|
-
|
65
|
-
model = YOLO("yolo11n.pt")
|
66
|
-
model.tune(space={key1: val1, key2: val2}) # custom search space dictionary
|
67
|
-
```
|
57
|
+
>>> model.tune(space={key1: val1, key2: val2}) # custom search space dictionary
|
68
58
|
"""
|
69
59
|
|
70
60
|
def __init__(self, args=DEFAULT_CFG, _callbacks=None):
|
ultralytics/hub/__init__.py
CHANGED
@@ -71,12 +71,9 @@ def logout():
|
|
71
71
|
"""
|
72
72
|
Log out of Ultralytics HUB by removing the API key from the settings file. To log in again, use 'yolo login'.
|
73
73
|
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
hub.logout()
|
79
|
-
```
|
74
|
+
Examples:
|
75
|
+
>>> from ultralytics import hub
|
76
|
+
>>> hub.logout()
|
80
77
|
"""
|
81
78
|
SETTINGS["api_key"] = ""
|
82
79
|
LOGGER.info(f"{PREFIX}logged out ✅. To log in again, use 'yolo login'.")
|
@@ -129,18 +126,17 @@ def check_dataset(path: str, task: str) -> None:
|
|
129
126
|
path (str): Path to data.zip (with data.yaml inside data.zip).
|
130
127
|
task (str): Dataset task. Options are 'detect', 'segment', 'pose', 'classify', 'obb'.
|
131
128
|
|
132
|
-
|
129
|
+
Note:
|
133
130
|
Download *.zip files from https://github.com/ultralytics/hub/tree/main/example_datasets
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
check_dataset("path/to/coco8.zip", task="detect") # detect dataset
|
139
|
-
check_dataset("path/to/coco8-seg.zip", task="segment") # segment dataset
|
140
|
-
check_dataset("path/to/coco8-pose.zip", task="pose") # pose dataset
|
141
|
-
check_dataset("path/to/dota8.zip", task="obb") # OBB dataset
|
142
|
-
check_dataset("path/to/imagenet10.zip", task="classify") # classification dataset
|
143
|
-
```
|
131
|
+
i.e. https://github.com/ultralytics/hub/raw/main/example_datasets/coco8.zip for coco8.zip.
|
132
|
+
|
133
|
+
Examples:
|
134
|
+
>>> from ultralytics.hub import check_dataset
|
135
|
+
>>> check_dataset("path/to/coco8.zip", task="detect") # detect dataset
|
136
|
+
>>> check_dataset("path/to/coco8-seg.zip", task="segment") # segment dataset
|
137
|
+
>>> check_dataset("path/to/coco8-pose.zip", task="pose") # pose dataset
|
138
|
+
>>> check_dataset("path/to/dota8.zip", task="obb") # OBB dataset
|
139
|
+
>>> check_dataset("path/to/imagenet10.zip", task="classify") # classification dataset
|
144
140
|
"""
|
145
141
|
HUBDatasetStats(path=path, task=task).get_json()
|
146
142
|
LOGGER.info(f"Checks completed correctly ✅. Upload this dataset to {HUB_WEB_ROOT}/datasets/.")
|
@@ -12,13 +12,10 @@ class FastSAM(Model):
|
|
12
12
|
"""
|
13
13
|
FastSAM model interface.
|
14
14
|
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
model = FastSAM("last.pt")
|
20
|
-
results = model.predict("ultralytics/assets/bus.jpg")
|
21
|
-
```
|
15
|
+
Examples:
|
16
|
+
>>> from ultralytics import FastSAM
|
17
|
+
>>> model = FastSAM("last.pt")
|
18
|
+
>>> results = model.predict("ultralytics/assets/bus.jpg")
|
22
19
|
"""
|
23
20
|
|
24
21
|
def __init__(self, model="FastSAM-x.pt"):
|
ultralytics/models/nas/model.py
CHANGED
@@ -2,13 +2,10 @@
|
|
2
2
|
"""
|
3
3
|
YOLO-NAS model interface.
|
4
4
|
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
model = NAS("yolo_nas_s")
|
10
|
-
results = model.predict("ultralytics/assets/bus.jpg")
|
11
|
-
```
|
5
|
+
Examples:
|
6
|
+
>>> from ultralytics import NAS
|
7
|
+
>>> model = NAS("yolo_nas_s")
|
8
|
+
>>> results = model.predict("ultralytics/assets/bus.jpg")
|
12
9
|
"""
|
13
10
|
|
14
11
|
from pathlib import Path
|
@@ -31,13 +28,10 @@ class NAS(Model):
|
|
31
28
|
This class provides an interface for the YOLO-NAS models and extends the `Model` class from Ultralytics engine.
|
32
29
|
It is designed to facilitate the task of object detection using pre-trained or custom-trained YOLO-NAS models.
|
33
30
|
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
model = NAS("yolo_nas_s")
|
39
|
-
results = model.predict("ultralytics/assets/bus.jpg")
|
40
|
-
```
|
31
|
+
Examples:
|
32
|
+
>>> from ultralytics import NAS
|
33
|
+
>>> model = NAS("yolo_nas_s")
|
34
|
+
>>> results = model.predict("ultralytics/assets/bus.jpg")
|
41
35
|
|
42
36
|
Attributes:
|
43
37
|
model (str): Path to the pre-trained model or model name. Defaults to 'yolo_nas_s.pt'.
|
@@ -18,15 +18,13 @@ class NASPredictor(BasePredictor):
|
|
18
18
|
Attributes:
|
19
19
|
args (Namespace): Namespace containing various configurations for post-processing.
|
20
20
|
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
results = predictor.postprocess(raw_preds, img, orig_imgs)
|
29
|
-
```
|
21
|
+
Examples:
|
22
|
+
>>> from ultralytics import NAS
|
23
|
+
>>> model = NAS("yolo_nas_s")
|
24
|
+
>>> predictor = model.predictor
|
25
|
+
|
26
|
+
Assumes that raw_preds, img, orig_imgs are available
|
27
|
+
>>> results = predictor.postprocess(raw_preds, img, orig_imgs)
|
30
28
|
|
31
29
|
Note:
|
32
30
|
Typically, this class is not instantiated directly. It is used internally within the `NAS` class.
|
ultralytics/models/nas/val.py
CHANGED
@@ -20,15 +20,13 @@ class NASValidator(DetectionValidator):
|
|
20
20
|
args (Namespace): Namespace containing various configurations for post-processing, such as confidence and IoU.
|
21
21
|
lb (torch.Tensor): Optional tensor for multilabel NMS.
|
22
22
|
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
final_preds = validator.postprocess(raw_preds)
|
31
|
-
```
|
23
|
+
Examples:
|
24
|
+
>>> from ultralytics import NAS
|
25
|
+
>>> model = NAS("yolo_nas_s")
|
26
|
+
>>> validator = model.validator
|
27
|
+
|
28
|
+
Assumes that raw_preds are available
|
29
|
+
>>> final_preds = validator.postprocess(raw_preds)
|
32
30
|
|
33
31
|
Note:
|
34
32
|
This class is generally not instantiated directly but is used internally within the `NAS` class.
|
@@ -16,15 +16,12 @@ class RTDETRPredictor(BasePredictor):
|
|
16
16
|
This class leverages the power of Vision Transformers to provide real-time object detection while maintaining
|
17
17
|
high accuracy. It supports key features like efficient hybrid encoding and IoU-aware query selection.
|
18
18
|
|
19
|
-
|
20
|
-
|
21
|
-
from ultralytics.
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
predictor = RTDETRPredictor(overrides=args)
|
26
|
-
predictor.predict_cli()
|
27
|
-
```
|
19
|
+
Examples:
|
20
|
+
>>> from ultralytics.utils import ASSETS
|
21
|
+
>>> from ultralytics.models.rtdetr import RTDETRPredictor
|
22
|
+
>>> args = dict(model="rtdetr-l.pt", source=ASSETS)
|
23
|
+
>>> predictor = RTDETRPredictor(overrides=args)
|
24
|
+
>>> predictor.predict_cli()
|
28
25
|
|
29
26
|
Attributes:
|
30
27
|
imgsz (int): Image size for inference (must be square and scale-filled).
|
@@ -21,14 +21,11 @@ class RTDETRTrainer(DetectionTrainer):
|
|
21
21
|
- F.grid_sample used in RT-DETR does not support the `deterministic=True` argument.
|
22
22
|
- AMP training can lead to NaN outputs and may produce errors during bipartite graph matching.
|
23
23
|
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
trainer = RTDETRTrainer(overrides=args)
|
30
|
-
trainer.train()
|
31
|
-
```
|
24
|
+
Examples:
|
25
|
+
>>> from ultralytics.models.rtdetr.train import RTDETRTrainer
|
26
|
+
>>> args = dict(model="rtdetr-l.yaml", data="coco8.yaml", imgsz=640, epochs=3)
|
27
|
+
>>> trainer = RTDETRTrainer(overrides=args)
|
28
|
+
>>> trainer.train()
|
32
29
|
"""
|
33
30
|
|
34
31
|
def get_model(self, cfg=None, weights=None, verbose=True):
|
ultralytics/models/rtdetr/val.py
CHANGED
@@ -58,14 +58,11 @@ class RTDETRValidator(DetectionValidator):
|
|
58
58
|
The class allows building of an RTDETR-specific dataset for validation, applies Non-maximum suppression for
|
59
59
|
post-processing, and updates evaluation metrics accordingly.
|
60
60
|
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
validator = RTDETRValidator(args=args)
|
67
|
-
validator()
|
68
|
-
```
|
61
|
+
Examples:
|
62
|
+
>>> from ultralytics.models.rtdetr import RTDETRValidator
|
63
|
+
>>> args = dict(model="rtdetr-l.pt", data="coco8.yaml")
|
64
|
+
>>> validator = RTDETRValidator(args=args)
|
65
|
+
>>> validator()
|
69
66
|
|
70
67
|
Note:
|
71
68
|
For further details on the attributes and methods, refer to the parent DetectionValidator class.
|
@@ -16,15 +16,12 @@ class ClassificationPredictor(BasePredictor):
|
|
16
16
|
Notes:
|
17
17
|
- Torchvision classification models can also be passed to the 'model' argument, i.e. model='resnet18'.
|
18
18
|
|
19
|
-
|
20
|
-
|
21
|
-
from ultralytics.
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
predictor = ClassificationPredictor(overrides=args)
|
26
|
-
predictor.predict_cli()
|
27
|
-
```
|
19
|
+
Examples:
|
20
|
+
>>> from ultralytics.utils import ASSETS
|
21
|
+
>>> from ultralytics.models.yolo.classify import ClassificationPredictor
|
22
|
+
>>> args = dict(model="yolo11n-cls.pt", source=ASSETS)
|
23
|
+
>>> predictor = ClassificationPredictor(overrides=args)
|
24
|
+
>>> predictor.predict_cli()
|
28
25
|
"""
|
29
26
|
|
30
27
|
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
|
@@ -20,14 +20,11 @@ class ClassificationTrainer(BaseTrainer):
|
|
20
20
|
Notes:
|
21
21
|
- Torchvision classification models can also be passed to the 'model' argument, i.e. model='resnet18'.
|
22
22
|
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
trainer = ClassificationTrainer(overrides=args)
|
29
|
-
trainer.train()
|
30
|
-
```
|
23
|
+
Examples:
|
24
|
+
>>> from ultralytics.models.yolo.classify import ClassificationTrainer
|
25
|
+
>>> args = dict(model="yolo11n-cls.pt", data="imagenet10", epochs=3)
|
26
|
+
>>> trainer = ClassificationTrainer(overrides=args)
|
27
|
+
>>> trainer.train()
|
31
28
|
"""
|
32
29
|
|
33
30
|
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
|
@@ -16,14 +16,11 @@ class ClassificationValidator(BaseValidator):
|
|
16
16
|
Notes:
|
17
17
|
- Torchvision classification models can also be passed to the 'model' argument, i.e. model='resnet18'.
|
18
18
|
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
validator = ClassificationValidator(args=args)
|
25
|
-
validator()
|
26
|
-
```
|
19
|
+
Examples:
|
20
|
+
>>> from ultralytics.models.yolo.classify import ClassificationValidator
|
21
|
+
>>> args = dict(model="yolo11n-cls.pt", data="imagenet10")
|
22
|
+
>>> validator = ClassificationValidator(args=args)
|
23
|
+
>>> validator()
|
27
24
|
"""
|
28
25
|
|
29
26
|
def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None):
|
@@ -9,15 +9,12 @@ class DetectionPredictor(BasePredictor):
|
|
9
9
|
"""
|
10
10
|
A class extending the BasePredictor class for prediction based on a detection model.
|
11
11
|
|
12
|
-
|
13
|
-
|
14
|
-
from ultralytics.
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
predictor = DetectionPredictor(overrides=args)
|
19
|
-
predictor.predict_cli()
|
20
|
-
```
|
12
|
+
Examples:
|
13
|
+
>>> from ultralytics.utils import ASSETS
|
14
|
+
>>> from ultralytics.models.yolo.detect import DetectionPredictor
|
15
|
+
>>> args = dict(model="yolo11n.pt", source=ASSETS)
|
16
|
+
>>> predictor = DetectionPredictor(overrides=args)
|
17
|
+
>>> predictor.predict_cli()
|
21
18
|
"""
|
22
19
|
|
23
20
|
def postprocess(self, preds, img, orig_imgs, **kwargs):
|
@@ -20,14 +20,11 @@ class DetectionTrainer(BaseTrainer):
|
|
20
20
|
"""
|
21
21
|
A class extending the BaseTrainer class for training based on a detection model.
|
22
22
|
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
trainer = DetectionTrainer(overrides=args)
|
29
|
-
trainer.train()
|
30
|
-
```
|
23
|
+
Examples:
|
24
|
+
>>> from ultralytics.models.yolo.detect import DetectionTrainer
|
25
|
+
>>> args = dict(model="yolo11n.pt", data="coco8.yaml", epochs=3)
|
26
|
+
>>> trainer = DetectionTrainer(overrides=args)
|
27
|
+
>>> trainer.train()
|
31
28
|
"""
|
32
29
|
|
33
30
|
def build_dataset(self, img_path, mode="train", batch=None):
|
@@ -18,14 +18,11 @@ class DetectionValidator(BaseValidator):
|
|
18
18
|
"""
|
19
19
|
A class extending the BaseValidator class for validation based on a detection model.
|
20
20
|
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
validator = DetectionValidator(args=args)
|
27
|
-
validator()
|
28
|
-
```
|
21
|
+
Examples:
|
22
|
+
>>> from ultralytics.models.yolo.detect import DetectionValidator
|
23
|
+
>>> args = dict(model="yolo11n.pt", data="coco8.yaml")
|
24
|
+
>>> validator = DetectionValidator(args=args)
|
25
|
+
>>> validator()
|
29
26
|
"""
|
30
27
|
|
31
28
|
def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None):
|
@@ -11,15 +11,12 @@ class OBBPredictor(DetectionPredictor):
|
|
11
11
|
"""
|
12
12
|
A class extending the DetectionPredictor class for prediction based on an Oriented Bounding Box (OBB) model.
|
13
13
|
|
14
|
-
|
15
|
-
|
16
|
-
from ultralytics.
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
predictor = OBBPredictor(overrides=args)
|
21
|
-
predictor.predict_cli()
|
22
|
-
```
|
14
|
+
Examples:
|
15
|
+
>>> from ultralytics.utils import ASSETS
|
16
|
+
>>> from ultralytics.models.yolo.obb import OBBPredictor
|
17
|
+
>>> args = dict(model="yolo11n-obb.pt", source=ASSETS)
|
18
|
+
>>> predictor = OBBPredictor(overrides=args)
|
19
|
+
>>> predictor.predict_cli()
|
23
20
|
"""
|
24
21
|
|
25
22
|
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
|