ultralytics 8.3.64__py3-none-any.whl → 8.3.65__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tests/test_exports.py +1 -1
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +96 -88
- ultralytics/data/build.py +5 -1
- ultralytics/engine/exporter.py +50 -17
- ultralytics/engine/model.py +5 -5
- ultralytics/engine/predictor.py +16 -14
- ultralytics/engine/results.py +1 -1
- ultralytics/engine/trainer.py +2 -2
- ultralytics/engine/tuner.py +2 -2
- ultralytics/engine/validator.py +16 -14
- ultralytics/models/yolo/classify/predict.py +1 -1
- ultralytics/models/yolo/classify/train.py +1 -1
- ultralytics/models/yolo/classify/val.py +1 -1
- ultralytics/models/yolo/obb/predict.py +1 -1
- ultralytics/models/yolo/obb/train.py +1 -1
- ultralytics/models/yolo/obb/val.py +1 -1
- ultralytics/models/yolo/pose/predict.py +1 -1
- ultralytics/models/yolo/pose/train.py +1 -1
- ultralytics/models/yolo/pose/val.py +1 -1
- ultralytics/models/yolo/segment/predict.py +1 -1
- ultralytics/models/yolo/segment/train.py +1 -1
- ultralytics/models/yolo/segment/val.py +1 -1
- ultralytics/nn/autobackend.py +34 -4
- ultralytics/nn/tasks.py +57 -53
- ultralytics/solutions/ai_gym.py +1 -1
- ultralytics/solutions/heatmap.py +1 -1
- ultralytics/solutions/parking_management.py +1 -1
- ultralytics/solutions/solutions.py +1 -1
- ultralytics/trackers/utils/matching.py +2 -2
- ultralytics/utils/__init__.py +15 -1
- ultralytics/utils/benchmarks.py +25 -19
- ultralytics/utils/checks.py +21 -2
- ultralytics/utils/downloads.py +1 -1
- ultralytics/utils/instance.py +1 -1
- ultralytics/utils/loss.py +2 -2
- ultralytics/utils/tuner.py +2 -2
- {ultralytics-8.3.64.dist-info → ultralytics-8.3.65.dist-info}/METADATA +1 -2
- {ultralytics-8.3.64.dist-info → ultralytics-8.3.65.dist-info}/RECORD +43 -43
- {ultralytics-8.3.64.dist-info → ultralytics-8.3.65.dist-info}/LICENSE +0 -0
- {ultralytics-8.3.64.dist-info → ultralytics-8.3.65.dist-info}/WHEEL +0 -0
- {ultralytics-8.3.64.dist-info → ultralytics-8.3.65.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.3.64.dist-info → ultralytics-8.3.65.dist-info}/top_level.txt +0 -0
@@ -14,7 +14,7 @@ class SegmentationPredictor(DetectionPredictor):
|
|
14
14
|
from ultralytics.utils import ASSETS
|
15
15
|
from ultralytics.models.yolo.segment import SegmentationPredictor
|
16
16
|
|
17
|
-
args = dict(model="
|
17
|
+
args = dict(model="yolo11n-seg.pt", source=ASSETS)
|
18
18
|
predictor = SegmentationPredictor(overrides=args)
|
19
19
|
predictor.predict_cli()
|
20
20
|
```
|
@@ -16,7 +16,7 @@ class SegmentationTrainer(yolo.detect.DetectionTrainer):
|
|
16
16
|
```python
|
17
17
|
from ultralytics.models.yolo.segment import SegmentationTrainer
|
18
18
|
|
19
|
-
args = dict(model="
|
19
|
+
args = dict(model="yolo11n-seg.pt", data="coco8-seg.yaml", epochs=3)
|
20
20
|
trainer = SegmentationTrainer(overrides=args)
|
21
21
|
trainer.train()
|
22
22
|
```
|
@@ -22,7 +22,7 @@ class SegmentationValidator(DetectionValidator):
|
|
22
22
|
```python
|
23
23
|
from ultralytics.models.yolo.segment import SegmentationValidator
|
24
24
|
|
25
|
-
args = dict(model="
|
25
|
+
args = dict(model="yolo11n-seg.pt", data="coco8-seg.yaml")
|
26
26
|
validator = SegmentationValidator(args=args)
|
27
27
|
validator()
|
28
28
|
```
|
ultralytics/nn/autobackend.py
CHANGED
@@ -13,8 +13,8 @@ import torch
|
|
13
13
|
import torch.nn as nn
|
14
14
|
from PIL import Image
|
15
15
|
|
16
|
-
from ultralytics.utils import ARM64, IS_JETSON, IS_RASPBERRYPI, LINUX, LOGGER, ROOT, yaml_load
|
17
|
-
from ultralytics.utils.checks import check_requirements, check_suffix, check_version, check_yaml
|
16
|
+
from ultralytics.utils import ARM64, IS_JETSON, IS_RASPBERRYPI, LINUX, LOGGER, PYTHON_VERSION, ROOT, yaml_load
|
17
|
+
from ultralytics.utils.checks import check_requirements, check_suffix, check_version, check_yaml, is_rockchip
|
18
18
|
from ultralytics.utils.downloads import attempt_download_asset, is_url
|
19
19
|
|
20
20
|
|
@@ -60,7 +60,7 @@ class AutoBackend(nn.Module):
|
|
60
60
|
|
61
61
|
Supported Formats and Naming Conventions:
|
62
62
|
| Format | File Suffix |
|
63
|
-
|
63
|
+
| --------------------- | ----------------- |
|
64
64
|
| PyTorch | *.pt |
|
65
65
|
| TorchScript | *.torchscript |
|
66
66
|
| ONNX Runtime | *.onnx |
|
@@ -75,6 +75,8 @@ class AutoBackend(nn.Module):
|
|
75
75
|
| PaddlePaddle | *_paddle_model/ |
|
76
76
|
| MNN | *.mnn |
|
77
77
|
| NCNN | *_ncnn_model/ |
|
78
|
+
| IMX | *_imx_model/ |
|
79
|
+
| RKNN | *_rknn_model/ |
|
78
80
|
|
79
81
|
This class offers dynamic backend switching capabilities based on the input model format, making it easier to deploy
|
80
82
|
models across various platforms.
|
@@ -124,10 +126,11 @@ class AutoBackend(nn.Module):
|
|
124
126
|
mnn,
|
125
127
|
ncnn,
|
126
128
|
imx,
|
129
|
+
rknn,
|
127
130
|
triton,
|
128
131
|
) = self._model_type(w)
|
129
132
|
fp16 &= pt or jit or onnx or xml or engine or nn_module or triton # FP16
|
130
|
-
nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)
|
133
|
+
nhwc = coreml or saved_model or pb or tflite or edgetpu or rknn # BHWC formats (vs torch BCWH)
|
131
134
|
stride = 32 # default stride
|
132
135
|
model, metadata, task = None, None, None
|
133
136
|
|
@@ -262,6 +265,11 @@ class AutoBackend(nn.Module):
|
|
262
265
|
# TensorRT
|
263
266
|
elif engine:
|
264
267
|
LOGGER.info(f"Loading {w} for TensorRT inference...")
|
268
|
+
|
269
|
+
if IS_JETSON and PYTHON_VERSION <= "3.8.0":
|
270
|
+
# fix error: `np.bool` was a deprecated alias for the builtin `bool` for JetPack 4 with Python <= 3.8.0
|
271
|
+
check_requirements("numpy==1.23.5")
|
272
|
+
|
265
273
|
try:
|
266
274
|
import tensorrt as trt # noqa https://developer.nvidia.com/nvidia-tensorrt-download
|
267
275
|
except ImportError:
|
@@ -461,6 +469,22 @@ class AutoBackend(nn.Module):
|
|
461
469
|
model = TritonRemoteModel(w)
|
462
470
|
metadata = model.metadata
|
463
471
|
|
472
|
+
# RKNN
|
473
|
+
elif rknn:
|
474
|
+
if not is_rockchip():
|
475
|
+
raise OSError("RKNN inference is only supported on Rockchip devices.")
|
476
|
+
LOGGER.info(f"Loading {w} for RKNN inference...")
|
477
|
+
check_requirements("rknn-toolkit-lite2")
|
478
|
+
from rknnlite.api import RKNNLite
|
479
|
+
|
480
|
+
w = Path(w)
|
481
|
+
if not w.is_file(): # if not *.rknn
|
482
|
+
w = next(w.rglob("*.rknn")) # get *.rknn file from *_rknn_model dir
|
483
|
+
rknn_model = RKNNLite()
|
484
|
+
rknn_model.load_rknn(w)
|
485
|
+
ret = rknn_model.init_runtime()
|
486
|
+
metadata = Path(w).parent / "metadata.yaml"
|
487
|
+
|
464
488
|
# Any other format (unsupported)
|
465
489
|
else:
|
466
490
|
from ultralytics.engine.exporter import export_formats
|
@@ -647,6 +671,12 @@ class AutoBackend(nn.Module):
|
|
647
671
|
im = im.cpu().numpy() # torch to numpy
|
648
672
|
y = self.model(im)
|
649
673
|
|
674
|
+
# RKNN
|
675
|
+
elif self.rknn:
|
676
|
+
im = (im.cpu().numpy() * 255).astype("uint8")
|
677
|
+
im = im if isinstance(im, (list, tuple)) else [im]
|
678
|
+
y = self.rknn_model.inference(inputs=im)
|
679
|
+
|
650
680
|
# TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)
|
651
681
|
else:
|
652
682
|
im = im.cpu().numpy()
|
ultralytics/nn/tasks.py
CHANGED
@@ -296,10 +296,10 @@ class BaseModel(nn.Module):
|
|
296
296
|
|
297
297
|
|
298
298
|
class DetectionModel(BaseModel):
|
299
|
-
"""
|
299
|
+
"""YOLO detection model."""
|
300
300
|
|
301
|
-
def __init__(self, cfg="
|
302
|
-
"""Initialize the
|
301
|
+
def __init__(self, cfg="yolo11n.yaml", ch=3, nc=None, verbose=True): # model, input channels, number of classes
|
302
|
+
"""Initialize the YOLO detection model with the given config and parameters."""
|
303
303
|
super().__init__()
|
304
304
|
self.yaml = cfg if isinstance(cfg, dict) else yaml_model_load(cfg) # cfg dict
|
305
305
|
if self.yaml["backbone"][0][2] == "Silence":
|
@@ -388,10 +388,10 @@ class DetectionModel(BaseModel):
|
|
388
388
|
|
389
389
|
|
390
390
|
class OBBModel(DetectionModel):
|
391
|
-
"""
|
391
|
+
"""YOLO Oriented Bounding Box (OBB) model."""
|
392
392
|
|
393
|
-
def __init__(self, cfg="
|
394
|
-
"""Initialize
|
393
|
+
def __init__(self, cfg="yolo11n-obb.yaml", ch=3, nc=None, verbose=True):
|
394
|
+
"""Initialize YOLO OBB model with given config and parameters."""
|
395
395
|
super().__init__(cfg=cfg, ch=ch, nc=nc, verbose=verbose)
|
396
396
|
|
397
397
|
def init_criterion(self):
|
@@ -400,9 +400,9 @@ class OBBModel(DetectionModel):
|
|
400
400
|
|
401
401
|
|
402
402
|
class SegmentationModel(DetectionModel):
|
403
|
-
"""
|
403
|
+
"""YOLO segmentation model."""
|
404
404
|
|
405
|
-
def __init__(self, cfg="
|
405
|
+
def __init__(self, cfg="yolo11n-seg.yaml", ch=3, nc=None, verbose=True):
|
406
406
|
"""Initialize YOLOv8 segmentation model with given config and parameters."""
|
407
407
|
super().__init__(cfg=cfg, ch=ch, nc=nc, verbose=verbose)
|
408
408
|
|
@@ -412,9 +412,9 @@ class SegmentationModel(DetectionModel):
|
|
412
412
|
|
413
413
|
|
414
414
|
class PoseModel(DetectionModel):
|
415
|
-
"""
|
415
|
+
"""YOLO pose model."""
|
416
416
|
|
417
|
-
def __init__(self, cfg="
|
417
|
+
def __init__(self, cfg="yolo11n-pose.yaml", ch=3, nc=None, data_kpt_shape=(None, None), verbose=True):
|
418
418
|
"""Initialize YOLOv8 Pose model."""
|
419
419
|
if not isinstance(cfg, dict):
|
420
420
|
cfg = yaml_model_load(cfg) # load model YAML
|
@@ -429,9 +429,9 @@ class PoseModel(DetectionModel):
|
|
429
429
|
|
430
430
|
|
431
431
|
class ClassificationModel(BaseModel):
|
432
|
-
"""
|
432
|
+
"""YOLO classification model."""
|
433
433
|
|
434
|
-
def __init__(self, cfg="
|
434
|
+
def __init__(self, cfg="yolo11n-cls.yaml", ch=3, nc=None, verbose=True):
|
435
435
|
"""Init ClassificationModel with YAML, channels, number of classes, verbose flag."""
|
436
436
|
super().__init__()
|
437
437
|
self._from_yaml(cfg, ch, nc, verbose)
|
@@ -842,14 +842,14 @@ def torch_safe_load(weight, safe_only=False):
|
|
842
842
|
f"with https://github.com/ultralytics/yolov5.\nThis model is NOT forwards compatible with "
|
843
843
|
f"YOLOv8 at https://github.com/ultralytics/ultralytics."
|
844
844
|
f"\nRecommend fixes are to train a new model using the latest 'ultralytics' package or to "
|
845
|
-
f"run a command with an official Ultralytics model, i.e. 'yolo predict model=
|
845
|
+
f"run a command with an official Ultralytics model, i.e. 'yolo predict model=yolo11n.pt'"
|
846
846
|
)
|
847
847
|
) from e
|
848
848
|
LOGGER.warning(
|
849
849
|
f"WARNING ⚠️ {weight} appears to require '{e.name}', which is not in Ultralytics requirements."
|
850
850
|
f"\nAutoInstall will run now for '{e.name}' but this feature will be removed in the future."
|
851
851
|
f"\nRecommend fixes are to train a new model using the latest 'ultralytics' package or to "
|
852
|
-
f"run a command with an official Ultralytics model, i.e. 'yolo predict model=
|
852
|
+
f"run a command with an official Ultralytics model, i.e. 'yolo predict model=yolo11n.pt'"
|
853
853
|
)
|
854
854
|
check_requirements(e.name) # install missing module
|
855
855
|
ckpt = torch.load(file, map_location="cpu")
|
@@ -954,20 +954,8 @@ def parse_model(d, ch, verbose=True): # model_dict, input_channels(3)
|
|
954
954
|
LOGGER.info(f"\n{'':>3}{'from':>20}{'n':>3}{'params':>10} {'module':<45}{'arguments':<30}")
|
955
955
|
ch = [ch]
|
956
956
|
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
|
957
|
-
|
958
|
-
|
959
|
-
getattr(torch.nn, m[3:])
|
960
|
-
if "nn." in m
|
961
|
-
else getattr(__import__("torchvision").ops, m[16:])
|
962
|
-
if "torchvision.ops." in m
|
963
|
-
else globals()[m]
|
964
|
-
) # get module
|
965
|
-
for j, a in enumerate(args):
|
966
|
-
if isinstance(a, str):
|
967
|
-
with contextlib.suppress(ValueError):
|
968
|
-
args[j] = locals()[a] if a in locals() else ast.literal_eval(a)
|
969
|
-
n = n_ = max(round(n * depth), 1) if n > 1 else n # depth gain
|
970
|
-
if m in {
|
957
|
+
base_modules = frozenset(
|
958
|
+
{
|
971
959
|
Classify,
|
972
960
|
Conv,
|
973
961
|
ConvTranspose,
|
@@ -1001,33 +989,49 @@ def parse_model(d, ch, verbose=True): # model_dict, input_channels(3)
|
|
1001
989
|
PSA,
|
1002
990
|
SCDown,
|
1003
991
|
C2fCIB,
|
1004
|
-
}
|
992
|
+
}
|
993
|
+
)
|
994
|
+
repeat_modules = frozenset( # modules with 'repeat' arguments
|
995
|
+
{
|
996
|
+
BottleneckCSP,
|
997
|
+
C1,
|
998
|
+
C2,
|
999
|
+
C2f,
|
1000
|
+
C3k2,
|
1001
|
+
C2fAttn,
|
1002
|
+
C3,
|
1003
|
+
C3TR,
|
1004
|
+
C3Ghost,
|
1005
|
+
C3x,
|
1006
|
+
RepC3,
|
1007
|
+
C2fPSA,
|
1008
|
+
C2fCIB,
|
1009
|
+
C2PSA,
|
1010
|
+
}
|
1011
|
+
)
|
1012
|
+
for i, (f, n, m, args) in enumerate(d["backbone"] + d["head"]): # from, number, module, args
|
1013
|
+
m = (
|
1014
|
+
getattr(torch.nn, m[3:])
|
1015
|
+
if "nn." in m
|
1016
|
+
else getattr(__import__("torchvision").ops, m[16:])
|
1017
|
+
if "torchvision.ops." in m
|
1018
|
+
else globals()[m]
|
1019
|
+
) # get module
|
1020
|
+
for j, a in enumerate(args):
|
1021
|
+
if isinstance(a, str):
|
1022
|
+
with contextlib.suppress(ValueError):
|
1023
|
+
args[j] = locals()[a] if a in locals() else ast.literal_eval(a)
|
1024
|
+
n = n_ = max(round(n * depth), 1) if n > 1 else n # depth gain
|
1025
|
+
if m in base_modules:
|
1005
1026
|
c1, c2 = ch[f], args[0]
|
1006
1027
|
if c2 != nc: # if c2 not equal to number of classes (i.e. for Classify() output)
|
1007
1028
|
c2 = make_divisible(min(c2, max_channels) * width, 8)
|
1008
|
-
if m is C2fAttn:
|
1009
|
-
args[1] = make_divisible(min(args[1], max_channels // 2) * width, 8)
|
1010
|
-
args[2] = int(
|
1011
|
-
max(round(min(args[2], max_channels // 2 // 32)) * width, 1) if args[2] > 1 else args[2]
|
1012
|
-
) # num heads
|
1029
|
+
if m is C2fAttn: # set 1) embed channels and 2) num heads
|
1030
|
+
args[1] = make_divisible(min(args[1], max_channels // 2) * width, 8)
|
1031
|
+
args[2] = int(max(round(min(args[2], max_channels // 2 // 32)) * width, 1) if args[2] > 1 else args[2])
|
1013
1032
|
|
1014
1033
|
args = [c1, c2, *args[1:]]
|
1015
|
-
if m in
|
1016
|
-
BottleneckCSP,
|
1017
|
-
C1,
|
1018
|
-
C2,
|
1019
|
-
C2f,
|
1020
|
-
C3k2,
|
1021
|
-
C2fAttn,
|
1022
|
-
C3,
|
1023
|
-
C3TR,
|
1024
|
-
C3Ghost,
|
1025
|
-
C3x,
|
1026
|
-
RepC3,
|
1027
|
-
C2fPSA,
|
1028
|
-
C2fCIB,
|
1029
|
-
C2PSA,
|
1030
|
-
}:
|
1034
|
+
if m in repeat_modules:
|
1031
1035
|
args.insert(2, n) # number of repeats
|
1032
1036
|
n = 1
|
1033
1037
|
if m is C3k2: # for M/L/X sizes
|
@@ -1036,7 +1040,7 @@ def parse_model(d, ch, verbose=True): # model_dict, input_channels(3)
|
|
1036
1040
|
args[3] = True
|
1037
1041
|
elif m is AIFI:
|
1038
1042
|
args = [ch[f], *args]
|
1039
|
-
elif m in {HGStem, HGBlock}:
|
1043
|
+
elif m in frozenset({HGStem, HGBlock}):
|
1040
1044
|
c1, cm, c2 = ch[f], args[0], args[1]
|
1041
1045
|
args = [c1, cm, c2, *args[2:]]
|
1042
1046
|
if m is HGBlock:
|
@@ -1048,7 +1052,7 @@ def parse_model(d, ch, verbose=True): # model_dict, input_channels(3)
|
|
1048
1052
|
args = [ch[f]]
|
1049
1053
|
elif m is Concat:
|
1050
1054
|
c2 = sum(ch[x] for x in f)
|
1051
|
-
elif m in {Detect, WorldDetect, Segment, Pose, OBB, ImagePoolingAttn, v10Detect}:
|
1055
|
+
elif m in frozenset({Detect, WorldDetect, Segment, Pose, OBB, ImagePoolingAttn, v10Detect}):
|
1052
1056
|
args.append([ch[x] for x in f])
|
1053
1057
|
if m is Segment:
|
1054
1058
|
args[2] = make_divisible(min(args[2], max_channels) * width, 8)
|
@@ -1056,7 +1060,7 @@ def parse_model(d, ch, verbose=True): # model_dict, input_channels(3)
|
|
1056
1060
|
m.legacy = legacy
|
1057
1061
|
elif m is RTDETRDecoder: # special case, channels arg must be passed in index 1
|
1058
1062
|
args.insert(1, [ch[x] for x in f])
|
1059
|
-
elif m in {CBLinear, TorchVision, Index}:
|
1063
|
+
elif m in frozenset({CBLinear, TorchVision, Index}):
|
1060
1064
|
c2 = args[0]
|
1061
1065
|
c1 = ch[f]
|
1062
1066
|
args = [c1, c2, *args[1:]]
|
ultralytics/solutions/ai_gym.py
CHANGED
@@ -25,7 +25,7 @@ class AIGym(BaseSolution):
|
|
25
25
|
monitor: Processes a frame to detect poses, calculate angles, and count repetitions.
|
26
26
|
|
27
27
|
Examples:
|
28
|
-
>>> gym = AIGym(model="
|
28
|
+
>>> gym = AIGym(model="yolo11n-pose.pt")
|
29
29
|
>>> image = cv2.imread("gym_scene.jpg")
|
30
30
|
>>> processed_image = gym.monitor(image)
|
31
31
|
>>> cv2.imshow("Processed Image", processed_image)
|
ultralytics/solutions/heatmap.py
CHANGED
@@ -26,7 +26,7 @@ class Heatmap(ObjectCounter):
|
|
26
26
|
|
27
27
|
Examples:
|
28
28
|
>>> from ultralytics.solutions import Heatmap
|
29
|
-
>>> heatmap = Heatmap(model="
|
29
|
+
>>> heatmap = Heatmap(model="yolo11n.pt", colormap=cv2.COLORMAP_JET)
|
30
30
|
>>> frame = cv2.imread("frame.jpg")
|
31
31
|
>>> processed_frame = heatmap.generate_heatmap(frame)
|
32
32
|
"""
|
@@ -178,7 +178,7 @@ class ParkingManagement(BaseSolution):
|
|
178
178
|
|
179
179
|
Examples:
|
180
180
|
>>> from ultralytics.solutions import ParkingManagement
|
181
|
-
>>> parking_manager = ParkingManagement(model="
|
181
|
+
>>> parking_manager = ParkingManagement(model="yolo11n.pt", json_file="parking_regions.json")
|
182
182
|
>>> print(f"Occupied spaces: {parking_manager.pr_info['Occupancy']}")
|
183
183
|
>>> print(f"Available spaces: {parking_manager.pr_info['Available']}")
|
184
184
|
"""
|
@@ -35,7 +35,7 @@ class BaseSolution:
|
|
35
35
|
display_output: Display the results of processing, including showing frames or saving results.
|
36
36
|
|
37
37
|
Examples:
|
38
|
-
>>> solution = BaseSolution(model="
|
38
|
+
>>> solution = BaseSolution(model="yolo11n.pt", region=[(0, 0), (100, 0), (100, 100), (0, 100)])
|
39
39
|
>>> solution.initialize_region()
|
40
40
|
>>> image = cv2.imread("image.jpg")
|
41
41
|
>>> solution.extract_tracks(image)
|
@@ -55,8 +55,8 @@ def linear_assignment(cost_matrix: np.ndarray, thresh: float, use_lap: bool = Tr
|
|
55
55
|
unmatched_a = list(np.arange(cost_matrix.shape[0]))
|
56
56
|
unmatched_b = list(np.arange(cost_matrix.shape[1]))
|
57
57
|
else:
|
58
|
-
unmatched_a = list(
|
59
|
-
unmatched_b = list(
|
58
|
+
unmatched_a = list(frozenset(np.arange(cost_matrix.shape[0])) - frozenset(matches[:, 0]))
|
59
|
+
unmatched_b = list(frozenset(np.arange(cost_matrix.shape[1])) - frozenset(matches[:, 1]))
|
60
60
|
|
61
61
|
return matches, unmatched_a, unmatched_b
|
62
62
|
|
ultralytics/utils/__init__.py
CHANGED
@@ -51,6 +51,20 @@ PYTHON_VERSION = platform.python_version()
|
|
51
51
|
TORCH_VERSION = torch.__version__
|
52
52
|
TORCHVISION_VERSION = importlib.metadata.version("torchvision") # faster than importing torchvision
|
53
53
|
IS_VSCODE = os.environ.get("TERM_PROGRAM", False) == "vscode"
|
54
|
+
RKNN_CHIPS = frozenset(
|
55
|
+
{
|
56
|
+
"rk3588",
|
57
|
+
"rk3576",
|
58
|
+
"rk3566",
|
59
|
+
"rk3568",
|
60
|
+
"rk3562",
|
61
|
+
"rv1103",
|
62
|
+
"rv1106",
|
63
|
+
"rv1103b",
|
64
|
+
"rv1106b",
|
65
|
+
"rk2118",
|
66
|
+
}
|
67
|
+
) # Rockchip processors available for export
|
54
68
|
HELP_MSG = """
|
55
69
|
Examples for running Ultralytics:
|
56
70
|
|
@@ -1227,7 +1241,7 @@ class SettingsManager(JSONDict):
|
|
1227
1241
|
|
1228
1242
|
def _validate_settings(self):
|
1229
1243
|
"""Validate the current settings and reset if necessary."""
|
1230
|
-
correct_keys =
|
1244
|
+
correct_keys = frozenset(self.keys()) == frozenset(self.defaults.keys())
|
1231
1245
|
correct_types = all(isinstance(self.get(k), type(v)) for k, v in self.defaults.items())
|
1232
1246
|
correct_version = self.get("settings_version", "") == self.version
|
1233
1247
|
|
ultralytics/utils/benchmarks.py
CHANGED
@@ -4,25 +4,26 @@ Benchmark a YOLO model formats for speed and accuracy.
|
|
4
4
|
|
5
5
|
Usage:
|
6
6
|
from ultralytics.utils.benchmarks import ProfileModels, benchmark
|
7
|
-
ProfileModels(['
|
8
|
-
benchmark(model='
|
7
|
+
ProfileModels(['yolo11n.yaml', 'yolov8s.yaml']).profile()
|
8
|
+
benchmark(model='yolo11n.pt', imgsz=160)
|
9
9
|
|
10
10
|
Format | `format=argument` | Model
|
11
11
|
--- | --- | ---
|
12
|
-
PyTorch | - |
|
13
|
-
TorchScript | `torchscript` |
|
14
|
-
ONNX | `onnx` |
|
15
|
-
OpenVINO | `openvino` |
|
16
|
-
TensorRT | `engine` |
|
17
|
-
CoreML | `coreml` |
|
18
|
-
TensorFlow SavedModel | `saved_model` |
|
19
|
-
TensorFlow GraphDef | `pb` |
|
20
|
-
TensorFlow Lite | `tflite` |
|
21
|
-
TensorFlow Edge TPU | `edgetpu` |
|
22
|
-
TensorFlow.js | `tfjs` |
|
23
|
-
PaddlePaddle | `paddle` |
|
24
|
-
MNN | `mnn` |
|
25
|
-
NCNN | `ncnn` |
|
12
|
+
PyTorch | - | yolo11n.pt
|
13
|
+
TorchScript | `torchscript` | yolo11n.torchscript
|
14
|
+
ONNX | `onnx` | yolo11n.onnx
|
15
|
+
OpenVINO | `openvino` | yolo11n_openvino_model/
|
16
|
+
TensorRT | `engine` | yolo11n.engine
|
17
|
+
CoreML | `coreml` | yolo11n.mlpackage
|
18
|
+
TensorFlow SavedModel | `saved_model` | yolo11n_saved_model/
|
19
|
+
TensorFlow GraphDef | `pb` | yolo11n.pb
|
20
|
+
TensorFlow Lite | `tflite` | yolo11n.tflite
|
21
|
+
TensorFlow Edge TPU | `edgetpu` | yolo11n_edgetpu.tflite
|
22
|
+
TensorFlow.js | `tfjs` | yolo11n_web_model/
|
23
|
+
PaddlePaddle | `paddle` | yolo11n_paddle_model/
|
24
|
+
MNN | `mnn` | yolo11n.mnn
|
25
|
+
NCNN | `ncnn` | yolo11n_ncnn_model/
|
26
|
+
RKNN | `rknn` | yolo11n_rknn_model/
|
26
27
|
"""
|
27
28
|
|
28
29
|
import glob
|
@@ -41,7 +42,7 @@ from ultralytics import YOLO, YOLOWorld
|
|
41
42
|
from ultralytics.cfg import TASK2DATA, TASK2METRIC
|
42
43
|
from ultralytics.engine.exporter import export_formats
|
43
44
|
from ultralytics.utils import ARM64, ASSETS, IS_JETSON, IS_RASPBERRYPI, LINUX, LOGGER, MACOS, TQDM, WEIGHTS_DIR
|
44
|
-
from ultralytics.utils.checks import IS_PYTHON_3_12, check_requirements, check_yolo
|
45
|
+
from ultralytics.utils.checks import IS_PYTHON_3_12, check_requirements, check_yolo, is_rockchip
|
45
46
|
from ultralytics.utils.downloads import safe_download
|
46
47
|
from ultralytics.utils.files import file_size
|
47
48
|
from ultralytics.utils.torch_utils import get_cpu_info, select_device
|
@@ -121,6 +122,11 @@ def benchmark(
|
|
121
122
|
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 IMX exports not supported"
|
122
123
|
assert model.task == "detect", "IMX only supported for detection task"
|
123
124
|
assert "C2f" in model.__str__(), "IMX only supported for YOLOv8"
|
125
|
+
if i == 15: # RKNN
|
126
|
+
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 RKNN exports not supported yet"
|
127
|
+
assert not is_end2end, "End-to-end models not supported by RKNN yet"
|
128
|
+
assert LINUX, "RKNN only supported on Linux"
|
129
|
+
assert not is_rockchip(), "RKNN Inference only supported on Rockchip devices"
|
124
130
|
if "cpu" in device.type:
|
125
131
|
assert cpu, "inference not supported on CPU"
|
126
132
|
if "cuda" in device.type:
|
@@ -334,7 +340,7 @@ class ProfileModels:
|
|
334
340
|
Examples:
|
335
341
|
Profile models and print results
|
336
342
|
>>> from ultralytics.utils.benchmarks import ProfileModels
|
337
|
-
>>> profiler = ProfileModels(["
|
343
|
+
>>> profiler = ProfileModels(["yolo11n.yaml", "yolov8s.yaml"], imgsz=640)
|
338
344
|
>>> profiler.profile()
|
339
345
|
"""
|
340
346
|
|
@@ -368,7 +374,7 @@ class ProfileModels:
|
|
368
374
|
Examples:
|
369
375
|
Initialize and profile models
|
370
376
|
>>> from ultralytics.utils.benchmarks import ProfileModels
|
371
|
-
>>> profiler = ProfileModels(["
|
377
|
+
>>> profiler = ProfileModels(["yolo11n.yaml", "yolov8s.yaml"], imgsz=640)
|
372
378
|
>>> profiler.profile()
|
373
379
|
"""
|
374
380
|
self.paths = paths
|
ultralytics/utils/checks.py
CHANGED
@@ -19,6 +19,7 @@ import requests
|
|
19
19
|
import torch
|
20
20
|
|
21
21
|
from ultralytics.utils import (
|
22
|
+
ARM64,
|
22
23
|
ASSETS,
|
23
24
|
AUTOINSTALL,
|
24
25
|
IS_COLAB,
|
@@ -30,6 +31,7 @@ from ultralytics.utils import (
|
|
30
31
|
MACOS,
|
31
32
|
ONLINE,
|
32
33
|
PYTHON_VERSION,
|
34
|
+
RKNN_CHIPS,
|
33
35
|
ROOT,
|
34
36
|
TORCHVISION_VERSION,
|
35
37
|
USER_CONFIG_DIR,
|
@@ -487,10 +489,10 @@ def check_yolov5u_filename(file: str, verbose: bool = True):
|
|
487
489
|
return file
|
488
490
|
|
489
491
|
|
490
|
-
def check_model_file_from_stem(model="
|
492
|
+
def check_model_file_from_stem(model="yolo11n"):
|
491
493
|
"""Return a model filename from a valid model stem."""
|
492
494
|
if model and not Path(model).suffix and Path(model).stem in downloads.GITHUB_ASSETS_STEMS:
|
493
|
-
return Path(model).with_suffix(".pt") # add suffix, i.e.
|
495
|
+
return Path(model).with_suffix(".pt") # add suffix, i.e. yolo11n -> yolo11n.pt
|
494
496
|
else:
|
495
497
|
return model
|
496
498
|
|
@@ -782,6 +784,21 @@ def cuda_is_available() -> bool:
|
|
782
784
|
return cuda_device_count() > 0
|
783
785
|
|
784
786
|
|
787
|
+
def is_rockchip():
|
788
|
+
"""Check if the current environment is running on a Rockchip SoC."""
|
789
|
+
if LINUX and ARM64:
|
790
|
+
try:
|
791
|
+
with open("/proc/device-tree/compatible") as f:
|
792
|
+
dev_str = f.read()
|
793
|
+
*_, soc = dev_str.split(",")
|
794
|
+
if soc.replace("\x00", "") in RKNN_CHIPS:
|
795
|
+
return True
|
796
|
+
except OSError:
|
797
|
+
return False
|
798
|
+
else:
|
799
|
+
return False
|
800
|
+
|
801
|
+
|
785
802
|
def is_sudo_available() -> bool:
|
786
803
|
"""
|
787
804
|
Check if the sudo command is available in the environment.
|
@@ -798,5 +815,7 @@ def is_sudo_available() -> bool:
|
|
798
815
|
# Run checks and define constants
|
799
816
|
check_python("3.8", hard=False, verbose=True) # check python version
|
800
817
|
check_torchvision() # check torch-torchvision compatibility
|
818
|
+
|
819
|
+
# Define constants
|
801
820
|
IS_PYTHON_MINIMUM_3_10 = check_python("3.10", hard=False)
|
802
821
|
IS_PYTHON_3_12 = PYTHON_VERSION.startswith("3.12")
|
ultralytics/utils/downloads.py
CHANGED
@@ -405,7 +405,7 @@ def get_github_assets(repo="ultralytics/assets", version="latest", retry=False):
|
|
405
405
|
LOGGER.warning(f"⚠️ GitHub assets check failure for {url}: {r.status_code} {r.reason}")
|
406
406
|
return "", []
|
407
407
|
data = r.json()
|
408
|
-
return data["tag_name"], [x["name"] for x in data["assets"]] # tag, assets i.e. ['
|
408
|
+
return data["tag_name"], [x["name"] for x in data["assets"]] # tag, assets i.e. ['yolo11n.pt', 'yolov8s.pt', ...]
|
409
409
|
|
410
410
|
|
411
411
|
def attempt_download_asset(file, repo="ultralytics/assets", release="v8.3.0", **kwargs):
|
ultralytics/utils/instance.py
CHANGED
@@ -407,7 +407,7 @@ class Instances:
|
|
407
407
|
|
408
408
|
cat_boxes = np.concatenate([ins.bboxes for ins in instances_list], axis=axis)
|
409
409
|
seg_len = [b.segments.shape[1] for b in instances_list]
|
410
|
-
if len(
|
410
|
+
if len(frozenset(seg_len)) > 1: # resample segments if there's different length
|
411
411
|
max_len = max(seg_len)
|
412
412
|
cat_segments = np.concatenate(
|
413
413
|
[
|
ultralytics/utils/loss.py
CHANGED
@@ -297,7 +297,7 @@ class v8SegmentationLoss(v8DetectionLoss):
|
|
297
297
|
raise TypeError(
|
298
298
|
"ERROR ❌ segment dataset incorrectly formatted or not a segment dataset.\n"
|
299
299
|
"This error can occur when incorrectly training a 'segment' model on a 'detect' dataset, "
|
300
|
-
"i.e. 'yolo train model=
|
300
|
+
"i.e. 'yolo train model=yolo11n-seg.pt data=coco8.yaml'.\nVerify your dataset is a "
|
301
301
|
"correctly formatted 'segment' dataset using 'data=coco8-seg.yaml' "
|
302
302
|
"as an example.\nSee https://docs.ultralytics.com/datasets/segment/ for help."
|
303
303
|
) from e
|
@@ -666,7 +666,7 @@ class v8OBBLoss(v8DetectionLoss):
|
|
666
666
|
raise TypeError(
|
667
667
|
"ERROR ❌ OBB dataset incorrectly formatted or not a OBB dataset.\n"
|
668
668
|
"This error can occur when incorrectly training a 'OBB' model on a 'detect' dataset, "
|
669
|
-
"i.e. 'yolo train model=
|
669
|
+
"i.e. 'yolo train model=yolo11n-obb.pt data=dota8.yaml'.\nVerify your dataset is a "
|
670
670
|
"correctly formatted 'OBB' dataset using 'data=dota8.yaml' "
|
671
671
|
"as an example.\nSee https://docs.ultralytics.com/datasets/obb/ for help."
|
672
672
|
) from e
|
ultralytics/utils/tuner.py
CHANGED
@@ -30,10 +30,10 @@ def run_ray_tune(
|
|
30
30
|
```python
|
31
31
|
from ultralytics import YOLO
|
32
32
|
|
33
|
-
# Load a
|
33
|
+
# Load a YOLO11n model
|
34
34
|
model = YOLO("yolo11n.pt")
|
35
35
|
|
36
|
-
# Start tuning hyperparameters for
|
36
|
+
# Start tuning hyperparameters for YOLO11n training on the COCO8 dataset
|
37
37
|
result_grid = model.tune(data="coco8.yaml", use_ray=True)
|
38
38
|
```
|
39
39
|
"""
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: ultralytics
|
3
|
-
Version: 8.3.
|
3
|
+
Version: 8.3.65
|
4
4
|
Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
@@ -70,7 +70,6 @@ Requires-Dist: tensorflowjs>=3.9.0; extra == "export"
|
|
70
70
|
Requires-Dist: tensorstore>=0.1.63; (platform_machine == "aarch64" and python_version >= "3.9") and extra == "export"
|
71
71
|
Requires-Dist: keras; extra == "export"
|
72
72
|
Requires-Dist: flatbuffers<100,>=23.5.26; platform_machine == "aarch64" and extra == "export"
|
73
|
-
Requires-Dist: numpy==1.23.5; platform_machine == "aarch64" and extra == "export"
|
74
73
|
Requires-Dist: h5py!=3.11.0; platform_machine == "aarch64" and extra == "export"
|
75
74
|
Provides-Extra: solutions
|
76
75
|
Requires-Dist: shapely>=2.0.0; extra == "solutions"
|