ultralytics 8.3.87__py3-none-any.whl → 8.3.89__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tests/test_solutions.py +34 -45
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +46 -39
- ultralytics/data/augment.py +2 -2
- ultralytics/data/base.py +7 -9
- ultralytics/data/converter.py +30 -29
- ultralytics/data/utils.py +20 -28
- ultralytics/engine/model.py +2 -2
- ultralytics/engine/tuner.py +11 -21
- ultralytics/hub/__init__.py +13 -17
- ultralytics/models/fastsam/model.py +4 -7
- ultralytics/models/nas/model.py +8 -14
- ultralytics/models/nas/predict.py +7 -9
- ultralytics/models/nas/val.py +7 -9
- ultralytics/models/rtdetr/predict.py +6 -9
- ultralytics/models/rtdetr/train.py +5 -8
- ultralytics/models/rtdetr/val.py +5 -8
- ultralytics/models/yolo/classify/predict.py +6 -9
- ultralytics/models/yolo/classify/train.py +5 -8
- ultralytics/models/yolo/classify/val.py +5 -8
- ultralytics/models/yolo/detect/predict.py +6 -9
- ultralytics/models/yolo/detect/train.py +5 -8
- ultralytics/models/yolo/detect/val.py +5 -8
- ultralytics/models/yolo/obb/predict.py +6 -9
- ultralytics/models/yolo/obb/train.py +5 -8
- ultralytics/models/yolo/obb/val.py +10 -15
- ultralytics/models/yolo/pose/predict.py +6 -9
- ultralytics/models/yolo/pose/train.py +5 -8
- ultralytics/models/yolo/pose/val.py +12 -17
- ultralytics/models/yolo/segment/predict.py +6 -9
- ultralytics/models/yolo/segment/train.py +5 -8
- ultralytics/models/yolo/segment/val.py +10 -15
- ultralytics/models/yolo/world/train.py +5 -8
- ultralytics/models/yolo/world/train_world.py +21 -25
- ultralytics/nn/modules/__init__.py +9 -12
- ultralytics/nn/tasks.py +7 -12
- ultralytics/solutions/__init__.py +14 -6
- ultralytics/solutions/ai_gym.py +39 -28
- ultralytics/solutions/analytics.py +22 -18
- ultralytics/solutions/distance_calculation.py +25 -25
- ultralytics/solutions/heatmap.py +40 -38
- ultralytics/solutions/instance_segmentation.py +69 -0
- ultralytics/solutions/object_blurrer.py +89 -0
- ultralytics/solutions/object_counter.py +35 -33
- ultralytics/solutions/object_cropper.py +84 -0
- ultralytics/solutions/parking_management.py +21 -9
- ultralytics/solutions/queue_management.py +20 -39
- ultralytics/solutions/region_counter.py +54 -51
- ultralytics/solutions/security_alarm.py +40 -30
- ultralytics/solutions/solutions.py +594 -16
- ultralytics/solutions/speed_estimation.py +34 -31
- ultralytics/solutions/streamlit_inference.py +34 -28
- ultralytics/solutions/trackzone.py +29 -18
- ultralytics/solutions/vision_eye.py +69 -0
- ultralytics/trackers/utils/kalman_filter.py +23 -23
- ultralytics/utils/__init__.py +5 -8
- ultralytics/utils/checks.py +25 -35
- ultralytics/utils/downloads.py +25 -48
- ultralytics/utils/instance.py +9 -11
- ultralytics/utils/ops.py +5 -9
- ultralytics/utils/plotting.py +8 -428
- ultralytics/utils/torch_utils.py +23 -33
- ultralytics/utils/tuner.py +5 -9
- {ultralytics-8.3.87.dist-info → ultralytics-8.3.89.dist-info}/METADATA +2 -2
- {ultralytics-8.3.87.dist-info → ultralytics-8.3.89.dist-info}/RECORD +69 -65
- {ultralytics-8.3.87.dist-info → ultralytics-8.3.89.dist-info}/LICENSE +0 -0
- {ultralytics-8.3.87.dist-info → ultralytics-8.3.89.dist-info}/WHEEL +0 -0
- {ultralytics-8.3.87.dist-info → ultralytics-8.3.89.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.3.87.dist-info → ultralytics-8.3.89.dist-info}/top_level.txt +0 -0
@@ -12,13 +12,10 @@ class FastSAM(Model):
|
|
12
12
|
"""
|
13
13
|
FastSAM model interface.
|
14
14
|
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
model = FastSAM("last.pt")
|
20
|
-
results = model.predict("ultralytics/assets/bus.jpg")
|
21
|
-
```
|
15
|
+
Examples:
|
16
|
+
>>> from ultralytics import FastSAM
|
17
|
+
>>> model = FastSAM("last.pt")
|
18
|
+
>>> results = model.predict("ultralytics/assets/bus.jpg")
|
22
19
|
"""
|
23
20
|
|
24
21
|
def __init__(self, model="FastSAM-x.pt"):
|
ultralytics/models/nas/model.py
CHANGED
@@ -2,13 +2,10 @@
|
|
2
2
|
"""
|
3
3
|
YOLO-NAS model interface.
|
4
4
|
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
model = NAS("yolo_nas_s")
|
10
|
-
results = model.predict("ultralytics/assets/bus.jpg")
|
11
|
-
```
|
5
|
+
Examples:
|
6
|
+
>>> from ultralytics import NAS
|
7
|
+
>>> model = NAS("yolo_nas_s")
|
8
|
+
>>> results = model.predict("ultralytics/assets/bus.jpg")
|
12
9
|
"""
|
13
10
|
|
14
11
|
from pathlib import Path
|
@@ -31,13 +28,10 @@ class NAS(Model):
|
|
31
28
|
This class provides an interface for the YOLO-NAS models and extends the `Model` class from Ultralytics engine.
|
32
29
|
It is designed to facilitate the task of object detection using pre-trained or custom-trained YOLO-NAS models.
|
33
30
|
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
model = NAS("yolo_nas_s")
|
39
|
-
results = model.predict("ultralytics/assets/bus.jpg")
|
40
|
-
```
|
31
|
+
Examples:
|
32
|
+
>>> from ultralytics import NAS
|
33
|
+
>>> model = NAS("yolo_nas_s")
|
34
|
+
>>> results = model.predict("ultralytics/assets/bus.jpg")
|
41
35
|
|
42
36
|
Attributes:
|
43
37
|
model (str): Path to the pre-trained model or model name. Defaults to 'yolo_nas_s.pt'.
|
@@ -18,15 +18,13 @@ class NASPredictor(BasePredictor):
|
|
18
18
|
Attributes:
|
19
19
|
args (Namespace): Namespace containing various configurations for post-processing.
|
20
20
|
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
results = predictor.postprocess(raw_preds, img, orig_imgs)
|
29
|
-
```
|
21
|
+
Examples:
|
22
|
+
>>> from ultralytics import NAS
|
23
|
+
>>> model = NAS("yolo_nas_s")
|
24
|
+
>>> predictor = model.predictor
|
25
|
+
|
26
|
+
Assumes that raw_preds, img, orig_imgs are available
|
27
|
+
>>> results = predictor.postprocess(raw_preds, img, orig_imgs)
|
30
28
|
|
31
29
|
Note:
|
32
30
|
Typically, this class is not instantiated directly. It is used internally within the `NAS` class.
|
ultralytics/models/nas/val.py
CHANGED
@@ -20,15 +20,13 @@ class NASValidator(DetectionValidator):
|
|
20
20
|
args (Namespace): Namespace containing various configurations for post-processing, such as confidence and IoU.
|
21
21
|
lb (torch.Tensor): Optional tensor for multilabel NMS.
|
22
22
|
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
final_preds = validator.postprocess(raw_preds)
|
31
|
-
```
|
23
|
+
Examples:
|
24
|
+
>>> from ultralytics import NAS
|
25
|
+
>>> model = NAS("yolo_nas_s")
|
26
|
+
>>> validator = model.validator
|
27
|
+
|
28
|
+
Assumes that raw_preds are available
|
29
|
+
>>> final_preds = validator.postprocess(raw_preds)
|
32
30
|
|
33
31
|
Note:
|
34
32
|
This class is generally not instantiated directly but is used internally within the `NAS` class.
|
@@ -16,15 +16,12 @@ class RTDETRPredictor(BasePredictor):
|
|
16
16
|
This class leverages the power of Vision Transformers to provide real-time object detection while maintaining
|
17
17
|
high accuracy. It supports key features like efficient hybrid encoding and IoU-aware query selection.
|
18
18
|
|
19
|
-
|
20
|
-
|
21
|
-
from ultralytics.
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
predictor = RTDETRPredictor(overrides=args)
|
26
|
-
predictor.predict_cli()
|
27
|
-
```
|
19
|
+
Examples:
|
20
|
+
>>> from ultralytics.utils import ASSETS
|
21
|
+
>>> from ultralytics.models.rtdetr import RTDETRPredictor
|
22
|
+
>>> args = dict(model="rtdetr-l.pt", source=ASSETS)
|
23
|
+
>>> predictor = RTDETRPredictor(overrides=args)
|
24
|
+
>>> predictor.predict_cli()
|
28
25
|
|
29
26
|
Attributes:
|
30
27
|
imgsz (int): Image size for inference (must be square and scale-filled).
|
@@ -21,14 +21,11 @@ class RTDETRTrainer(DetectionTrainer):
|
|
21
21
|
- F.grid_sample used in RT-DETR does not support the `deterministic=True` argument.
|
22
22
|
- AMP training can lead to NaN outputs and may produce errors during bipartite graph matching.
|
23
23
|
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
trainer = RTDETRTrainer(overrides=args)
|
30
|
-
trainer.train()
|
31
|
-
```
|
24
|
+
Examples:
|
25
|
+
>>> from ultralytics.models.rtdetr.train import RTDETRTrainer
|
26
|
+
>>> args = dict(model="rtdetr-l.yaml", data="coco8.yaml", imgsz=640, epochs=3)
|
27
|
+
>>> trainer = RTDETRTrainer(overrides=args)
|
28
|
+
>>> trainer.train()
|
32
29
|
"""
|
33
30
|
|
34
31
|
def get_model(self, cfg=None, weights=None, verbose=True):
|
ultralytics/models/rtdetr/val.py
CHANGED
@@ -58,14 +58,11 @@ class RTDETRValidator(DetectionValidator):
|
|
58
58
|
The class allows building of an RTDETR-specific dataset for validation, applies Non-maximum suppression for
|
59
59
|
post-processing, and updates evaluation metrics accordingly.
|
60
60
|
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
validator = RTDETRValidator(args=args)
|
67
|
-
validator()
|
68
|
-
```
|
61
|
+
Examples:
|
62
|
+
>>> from ultralytics.models.rtdetr import RTDETRValidator
|
63
|
+
>>> args = dict(model="rtdetr-l.pt", data="coco8.yaml")
|
64
|
+
>>> validator = RTDETRValidator(args=args)
|
65
|
+
>>> validator()
|
69
66
|
|
70
67
|
Note:
|
71
68
|
For further details on the attributes and methods, refer to the parent DetectionValidator class.
|
@@ -16,15 +16,12 @@ class ClassificationPredictor(BasePredictor):
|
|
16
16
|
Notes:
|
17
17
|
- Torchvision classification models can also be passed to the 'model' argument, i.e. model='resnet18'.
|
18
18
|
|
19
|
-
|
20
|
-
|
21
|
-
from ultralytics.
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
predictor = ClassificationPredictor(overrides=args)
|
26
|
-
predictor.predict_cli()
|
27
|
-
```
|
19
|
+
Examples:
|
20
|
+
>>> from ultralytics.utils import ASSETS
|
21
|
+
>>> from ultralytics.models.yolo.classify import ClassificationPredictor
|
22
|
+
>>> args = dict(model="yolo11n-cls.pt", source=ASSETS)
|
23
|
+
>>> predictor = ClassificationPredictor(overrides=args)
|
24
|
+
>>> predictor.predict_cli()
|
28
25
|
"""
|
29
26
|
|
30
27
|
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
|
@@ -20,14 +20,11 @@ class ClassificationTrainer(BaseTrainer):
|
|
20
20
|
Notes:
|
21
21
|
- Torchvision classification models can also be passed to the 'model' argument, i.e. model='resnet18'.
|
22
22
|
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
trainer = ClassificationTrainer(overrides=args)
|
29
|
-
trainer.train()
|
30
|
-
```
|
23
|
+
Examples:
|
24
|
+
>>> from ultralytics.models.yolo.classify import ClassificationTrainer
|
25
|
+
>>> args = dict(model="yolo11n-cls.pt", data="imagenet10", epochs=3)
|
26
|
+
>>> trainer = ClassificationTrainer(overrides=args)
|
27
|
+
>>> trainer.train()
|
31
28
|
"""
|
32
29
|
|
33
30
|
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
|
@@ -16,14 +16,11 @@ class ClassificationValidator(BaseValidator):
|
|
16
16
|
Notes:
|
17
17
|
- Torchvision classification models can also be passed to the 'model' argument, i.e. model='resnet18'.
|
18
18
|
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
validator = ClassificationValidator(args=args)
|
25
|
-
validator()
|
26
|
-
```
|
19
|
+
Examples:
|
20
|
+
>>> from ultralytics.models.yolo.classify import ClassificationValidator
|
21
|
+
>>> args = dict(model="yolo11n-cls.pt", data="imagenet10")
|
22
|
+
>>> validator = ClassificationValidator(args=args)
|
23
|
+
>>> validator()
|
27
24
|
"""
|
28
25
|
|
29
26
|
def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None):
|
@@ -9,15 +9,12 @@ class DetectionPredictor(BasePredictor):
|
|
9
9
|
"""
|
10
10
|
A class extending the BasePredictor class for prediction based on a detection model.
|
11
11
|
|
12
|
-
|
13
|
-
|
14
|
-
from ultralytics.
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
predictor = DetectionPredictor(overrides=args)
|
19
|
-
predictor.predict_cli()
|
20
|
-
```
|
12
|
+
Examples:
|
13
|
+
>>> from ultralytics.utils import ASSETS
|
14
|
+
>>> from ultralytics.models.yolo.detect import DetectionPredictor
|
15
|
+
>>> args = dict(model="yolo11n.pt", source=ASSETS)
|
16
|
+
>>> predictor = DetectionPredictor(overrides=args)
|
17
|
+
>>> predictor.predict_cli()
|
21
18
|
"""
|
22
19
|
|
23
20
|
def postprocess(self, preds, img, orig_imgs, **kwargs):
|
@@ -20,14 +20,11 @@ class DetectionTrainer(BaseTrainer):
|
|
20
20
|
"""
|
21
21
|
A class extending the BaseTrainer class for training based on a detection model.
|
22
22
|
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
trainer = DetectionTrainer(overrides=args)
|
29
|
-
trainer.train()
|
30
|
-
```
|
23
|
+
Examples:
|
24
|
+
>>> from ultralytics.models.yolo.detect import DetectionTrainer
|
25
|
+
>>> args = dict(model="yolo11n.pt", data="coco8.yaml", epochs=3)
|
26
|
+
>>> trainer = DetectionTrainer(overrides=args)
|
27
|
+
>>> trainer.train()
|
31
28
|
"""
|
32
29
|
|
33
30
|
def build_dataset(self, img_path, mode="train", batch=None):
|
@@ -18,14 +18,11 @@ class DetectionValidator(BaseValidator):
|
|
18
18
|
"""
|
19
19
|
A class extending the BaseValidator class for validation based on a detection model.
|
20
20
|
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
validator = DetectionValidator(args=args)
|
27
|
-
validator()
|
28
|
-
```
|
21
|
+
Examples:
|
22
|
+
>>> from ultralytics.models.yolo.detect import DetectionValidator
|
23
|
+
>>> args = dict(model="yolo11n.pt", data="coco8.yaml")
|
24
|
+
>>> validator = DetectionValidator(args=args)
|
25
|
+
>>> validator()
|
29
26
|
"""
|
30
27
|
|
31
28
|
def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None):
|
@@ -11,15 +11,12 @@ class OBBPredictor(DetectionPredictor):
|
|
11
11
|
"""
|
12
12
|
A class extending the DetectionPredictor class for prediction based on an Oriented Bounding Box (OBB) model.
|
13
13
|
|
14
|
-
|
15
|
-
|
16
|
-
from ultralytics.
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
predictor = OBBPredictor(overrides=args)
|
21
|
-
predictor.predict_cli()
|
22
|
-
```
|
14
|
+
Examples:
|
15
|
+
>>> from ultralytics.utils import ASSETS
|
16
|
+
>>> from ultralytics.models.yolo.obb import OBBPredictor
|
17
|
+
>>> args = dict(model="yolo11n-obb.pt", source=ASSETS)
|
18
|
+
>>> predictor = OBBPredictor(overrides=args)
|
19
|
+
>>> predictor.predict_cli()
|
23
20
|
"""
|
24
21
|
|
25
22
|
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
|
@@ -11,14 +11,11 @@ class OBBTrainer(yolo.detect.DetectionTrainer):
|
|
11
11
|
"""
|
12
12
|
A class extending the DetectionTrainer class for training based on an Oriented Bounding Box (OBB) model.
|
13
13
|
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
trainer = OBBTrainer(overrides=args)
|
20
|
-
trainer.train()
|
21
|
-
```
|
14
|
+
Examples:
|
15
|
+
>>> from ultralytics.models.yolo.obb import OBBTrainer
|
16
|
+
>>> args = dict(model="yolo11n-obb.pt", data="dota8.yaml", epochs=3)
|
17
|
+
>>> trainer = OBBTrainer(overrides=args)
|
18
|
+
>>> trainer.train()
|
22
19
|
"""
|
23
20
|
|
24
21
|
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
|
@@ -14,14 +14,11 @@ class OBBValidator(DetectionValidator):
|
|
14
14
|
"""
|
15
15
|
A class extending the DetectionValidator class for validation based on an Oriented Bounding Box (OBB) model.
|
16
16
|
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
validator = OBBValidator(args=args)
|
23
|
-
validator(model=args["model"])
|
24
|
-
```
|
17
|
+
Examples:
|
18
|
+
>>> from ultralytics.models.yolo.obb import OBBValidator
|
19
|
+
>>> args = dict(model="yolo11n-obb.pt", data="dota8.yaml")
|
20
|
+
>>> validator = OBBValidator(args=args)
|
21
|
+
>>> validator(model=args["model"])
|
25
22
|
"""
|
26
23
|
|
27
24
|
def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None):
|
@@ -51,13 +48,11 @@ class OBBValidator(DetectionValidator):
|
|
51
48
|
(torch.Tensor): The correct prediction matrix with shape (N, 10), which includes 10 IoU (Intersection over
|
52
49
|
Union) levels for each detection, indicating the accuracy of predictions compared to the ground truth.
|
53
50
|
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
correct_matrix = OBBValidator._process_batch(detections, gt_bboxes, gt_cls)
|
60
|
-
```
|
51
|
+
Examples:
|
52
|
+
>>> detections = torch.rand(100, 7) # 100 sample detections
|
53
|
+
>>> gt_bboxes = torch.rand(50, 5) # 50 sample ground truth boxes
|
54
|
+
>>> gt_cls = torch.randint(0, 5, (50,)) # 50 ground truth class labels
|
55
|
+
>>> correct_matrix = OBBValidator._process_batch(detections, gt_bboxes, gt_cls)
|
61
56
|
|
62
57
|
Note:
|
63
58
|
This method relies on `batch_probiou` to calculate IoU between detections and ground truth bounding boxes.
|
@@ -8,15 +8,12 @@ class PosePredictor(DetectionPredictor):
|
|
8
8
|
"""
|
9
9
|
A class extending the DetectionPredictor class for prediction based on a pose model.
|
10
10
|
|
11
|
-
|
12
|
-
|
13
|
-
from ultralytics.
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
predictor = PosePredictor(overrides=args)
|
18
|
-
predictor.predict_cli()
|
19
|
-
```
|
11
|
+
Examples:
|
12
|
+
>>> from ultralytics.utils import ASSETS
|
13
|
+
>>> from ultralytics.models.yolo.pose import PosePredictor
|
14
|
+
>>> args = dict(model="yolo11n-pose.pt", source=ASSETS)
|
15
|
+
>>> predictor = PosePredictor(overrides=args)
|
16
|
+
>>> predictor.predict_cli()
|
20
17
|
"""
|
21
18
|
|
22
19
|
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
|
@@ -12,14 +12,11 @@ class PoseTrainer(yolo.detect.DetectionTrainer):
|
|
12
12
|
"""
|
13
13
|
A class extending the DetectionTrainer class for training based on a pose model.
|
14
14
|
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
trainer = PoseTrainer(overrides=args)
|
21
|
-
trainer.train()
|
22
|
-
```
|
15
|
+
Examples:
|
16
|
+
>>> from ultralytics.models.yolo.pose import PoseTrainer
|
17
|
+
>>> args = dict(model="yolo11n-pose.pt", data="coco8-pose.yaml", epochs=3)
|
18
|
+
>>> trainer = PoseTrainer(overrides=args)
|
19
|
+
>>> trainer.train()
|
23
20
|
"""
|
24
21
|
|
25
22
|
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
|
@@ -16,14 +16,11 @@ class PoseValidator(DetectionValidator):
|
|
16
16
|
"""
|
17
17
|
A class extending the DetectionValidator class for validation based on a pose model.
|
18
18
|
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
validator = PoseValidator(args=args)
|
25
|
-
validator()
|
26
|
-
```
|
19
|
+
Examples:
|
20
|
+
>>> from ultralytics.models.yolo.pose import PoseValidator
|
21
|
+
>>> args = dict(model="yolo11n-pose.pt", data="coco8-pose.yaml")
|
22
|
+
>>> validator = PoseValidator(args=args)
|
23
|
+
>>> validator()
|
27
24
|
"""
|
28
25
|
|
29
26
|
def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None):
|
@@ -161,15 +158,13 @@ class PoseValidator(DetectionValidator):
|
|
161
158
|
(torch.Tensor): A tensor with shape (N, 10) representing the correct prediction matrix for 10 IoU levels,
|
162
159
|
where N is the number of detections.
|
163
160
|
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
correct_preds = _process_batch(detections, gt_bboxes, gt_cls, pred_kpts, gt_kpts)
|
172
|
-
```
|
161
|
+
Examples:
|
162
|
+
>>> detections = torch.rand(100, 6) # 100 predictions: (x1, y1, x2, y2, conf, class)
|
163
|
+
>>> gt_bboxes = torch.rand(50, 4) # 50 ground truth boxes: (x1, y1, x2, y2)
|
164
|
+
>>> gt_cls = torch.randint(0, 2, (50,)) # 50 ground truth class indices
|
165
|
+
>>> pred_kpts = torch.rand(100, 51) # 100 predicted keypoints
|
166
|
+
>>> gt_kpts = torch.rand(50, 51) # 50 ground truth keypoints
|
167
|
+
>>> correct_preds = _process_batch(detections, gt_bboxes, gt_cls, pred_kpts, gt_kpts)
|
173
168
|
|
174
169
|
Note:
|
175
170
|
`0.53` scale factor used in area computation is referenced from https://github.com/jin-s13/xtcocoapi/blob/master/xtcocotools/cocoeval.py#L384.
|
@@ -9,15 +9,12 @@ class SegmentationPredictor(DetectionPredictor):
|
|
9
9
|
"""
|
10
10
|
A class extending the DetectionPredictor class for prediction based on a segmentation model.
|
11
11
|
|
12
|
-
|
13
|
-
|
14
|
-
from ultralytics.
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
predictor = SegmentationPredictor(overrides=args)
|
19
|
-
predictor.predict_cli()
|
20
|
-
```
|
12
|
+
Examples:
|
13
|
+
>>> from ultralytics.utils import ASSETS
|
14
|
+
>>> from ultralytics.models.yolo.segment import SegmentationPredictor
|
15
|
+
>>> args = dict(model="yolo11n-seg.pt", source=ASSETS)
|
16
|
+
>>> predictor = SegmentationPredictor(overrides=args)
|
17
|
+
>>> predictor.predict_cli()
|
21
18
|
"""
|
22
19
|
|
23
20
|
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
|
@@ -12,14 +12,11 @@ class SegmentationTrainer(yolo.detect.DetectionTrainer):
|
|
12
12
|
"""
|
13
13
|
A class extending the DetectionTrainer class for training based on a segmentation model.
|
14
14
|
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
trainer = SegmentationTrainer(overrides=args)
|
21
|
-
trainer.train()
|
22
|
-
```
|
15
|
+
Examples:
|
16
|
+
>>> from ultralytics.models.yolo.segment import SegmentationTrainer
|
17
|
+
>>> args = dict(model="yolo11n-seg.pt", data="coco8-seg.yaml", epochs=3)
|
18
|
+
>>> trainer = SegmentationTrainer(overrides=args)
|
19
|
+
>>> trainer.train()
|
23
20
|
"""
|
24
21
|
|
25
22
|
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
|
@@ -18,14 +18,11 @@ class SegmentationValidator(DetectionValidator):
|
|
18
18
|
"""
|
19
19
|
A class extending the DetectionValidator class for validation based on a segmentation model.
|
20
20
|
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
validator = SegmentationValidator(args=args)
|
27
|
-
validator()
|
28
|
-
```
|
21
|
+
Examples:
|
22
|
+
>>> from ultralytics.models.yolo.segment import SegmentationValidator
|
23
|
+
>>> args = dict(model="yolo11n-seg.pt", data="coco8-seg.yaml")
|
24
|
+
>>> validator = SegmentationValidator(args=args)
|
25
|
+
>>> validator()
|
29
26
|
"""
|
30
27
|
|
31
28
|
def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None):
|
@@ -184,13 +181,11 @@ class SegmentationValidator(DetectionValidator):
|
|
184
181
|
- If `masks` is True, the function computes IoU between predicted and ground truth masks.
|
185
182
|
- If `overlap` is True and `masks` is True, overlapping masks are taken into account when computing IoU.
|
186
183
|
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
correct_preds = validator._process_batch(detections, gt_bboxes, gt_cls)
|
193
|
-
```
|
184
|
+
Examples:
|
185
|
+
>>> detections = torch.tensor([[25, 30, 200, 300, 0.8, 1], [50, 60, 180, 290, 0.75, 0]])
|
186
|
+
>>> gt_bboxes = torch.tensor([[24, 29, 199, 299], [55, 65, 185, 295]])
|
187
|
+
>>> gt_cls = torch.tensor([1, 0])
|
188
|
+
>>> correct_preds = validator._process_batch(detections, gt_bboxes, gt_cls)
|
194
189
|
"""
|
195
190
|
if masks:
|
196
191
|
if overlap:
|
@@ -25,14 +25,11 @@ class WorldTrainer(yolo.detect.DetectionTrainer):
|
|
25
25
|
"""
|
26
26
|
A class to fine-tune a world model on a close-set dataset.
|
27
27
|
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
trainer = WorldTrainer(overrides=args)
|
34
|
-
trainer.train()
|
35
|
-
```
|
28
|
+
Examples:
|
29
|
+
>>> from ultralytics.models.yolo.world import WorldModel
|
30
|
+
>>> args = dict(model="yolov8s-world.pt", data="coco8.yaml", epochs=3)
|
31
|
+
>>> trainer = WorldTrainer(overrides=args)
|
32
|
+
>>> trainer.train()
|
36
33
|
"""
|
37
34
|
|
38
35
|
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
|
@@ -11,31 +11,27 @@ class WorldTrainerFromScratch(WorldTrainer):
|
|
11
11
|
"""
|
12
12
|
A class extending the WorldTrainer class for training a world model from scratch on open-set dataset.
|
13
13
|
|
14
|
-
|
15
|
-
|
16
|
-
from ultralytics
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
)
|
35
|
-
|
36
|
-
model = YOLOWorld("yolov8s-worldv2.yaml")
|
37
|
-
model.train(data=data, trainer=WorldTrainerFromScratch)
|
38
|
-
```
|
14
|
+
Examples:
|
15
|
+
>>> from ultralytics.models.yolo.world.train_world import WorldTrainerFromScratch
|
16
|
+
>>> from ultralytics import YOLOWorld
|
17
|
+
>>> data = dict(
|
18
|
+
... train=dict(
|
19
|
+
... yolo_data=["Objects365.yaml"],
|
20
|
+
... grounding_data=[
|
21
|
+
... dict(
|
22
|
+
... img_path="../datasets/flickr30k/images",
|
23
|
+
... json_file="../datasets/flickr30k/final_flickr_separateGT_train.json",
|
24
|
+
... ),
|
25
|
+
... dict(
|
26
|
+
... img_path="../datasets/GQA/images",
|
27
|
+
... json_file="../datasets/GQA/final_mixed_train_no_coco.json",
|
28
|
+
... ),
|
29
|
+
... ],
|
30
|
+
... ),
|
31
|
+
... val=dict(yolo_data=["lvis.yaml"]),
|
32
|
+
... )
|
33
|
+
>>> model = YOLOWorld("yolov8s-worldv2.yaml")
|
34
|
+
>>> model.train(data=data, trainer=WorldTrainerFromScratch)
|
39
35
|
"""
|
40
36
|
|
41
37
|
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
|