ultralytics 8.3.116__py3-none-any.whl → 8.3.118__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. tests/__init__.py +22 -0
  2. tests/conftest.py +83 -0
  3. tests/test_cli.py +128 -0
  4. tests/test_cuda.py +164 -0
  5. tests/test_engine.py +131 -0
  6. tests/test_exports.py +231 -0
  7. tests/test_integrations.py +154 -0
  8. tests/test_python.py +695 -0
  9. tests/test_solutions.py +176 -0
  10. ultralytics/__init__.py +1 -1
  11. ultralytics/cfg/__init__.py +1 -1
  12. ultralytics/data/augment.py +3 -0
  13. ultralytics/data/base.py +11 -3
  14. ultralytics/data/dataset.py +3 -4
  15. ultralytics/data/loaders.py +2 -1
  16. ultralytics/engine/exporter.py +18 -11
  17. ultralytics/engine/trainer.py +2 -2
  18. ultralytics/hub/session.py +3 -2
  19. ultralytics/hub/utils.py +1 -1
  20. ultralytics/models/yolo/detect/predict.py +2 -2
  21. ultralytics/models/yolo/detect/val.py +1 -1
  22. ultralytics/models/yolo/model.py +2 -3
  23. ultralytics/models/yolo/obb/train.py +1 -1
  24. ultralytics/models/yolo/pose/predict.py +1 -1
  25. ultralytics/models/yolo/pose/train.py +1 -1
  26. ultralytics/models/yolo/pose/val.py +1 -1
  27. ultralytics/models/yolo/segment/train.py +3 -3
  28. ultralytics/models/yolo/yoloe/val.py +1 -1
  29. ultralytics/nn/autobackend.py +10 -9
  30. ultralytics/nn/text_model.py +97 -15
  31. ultralytics/utils/__init__.py +1 -1
  32. ultralytics/utils/benchmarks.py +4 -5
  33. ultralytics/utils/checks.py +4 -2
  34. ultralytics/utils/downloads.py +1 -0
  35. ultralytics/utils/torch_utils.py +4 -3
  36. {ultralytics-8.3.116.dist-info → ultralytics-8.3.118.dist-info}/METADATA +3 -4
  37. {ultralytics-8.3.116.dist-info → ultralytics-8.3.118.dist-info}/RECORD +41 -32
  38. {ultralytics-8.3.116.dist-info → ultralytics-8.3.118.dist-info}/WHEEL +0 -0
  39. {ultralytics-8.3.116.dist-info → ultralytics-8.3.118.dist-info}/entry_points.txt +0 -0
  40. {ultralytics-8.3.116.dist-info → ultralytics-8.3.118.dist-info}/licenses/LICENSE +0 -0
  41. {ultralytics-8.3.116.dist-info → ultralytics-8.3.118.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,176 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ # Tests Ultralytics Solutions: https://docs.ultralytics.com/solutions/,
4
+ # including every solution excluding DistanceCalculation and Security Alarm System.
5
+
6
+ import cv2
7
+ import pytest
8
+
9
+ from tests import MODEL, TMP
10
+ from ultralytics import solutions
11
+ from ultralytics.utils import ASSETS_URL, IS_RASPBERRYPI, LINUX, checks
12
+ from ultralytics.utils.downloads import safe_download
13
+
14
+ # Pre-defined arguments values
15
+ SHOW = False
16
+ DEMO_VIDEO = "solutions_ci_demo.mp4" # for all the solutions, except workout, object cropping and parking management
17
+ CROP_VIDEO = "decelera_landscape_min.mov" # for object cropping solution
18
+ POSE_VIDEO = "solution_ci_pose_demo.mp4" # only for workouts monitoring solution
19
+ PARKING_VIDEO = "solution_ci_parking_demo.mp4" # only for parking management solution
20
+ PARKING_AREAS_JSON = "solution_ci_parking_areas.json" # only for parking management solution
21
+ PARKING_MODEL = "solutions_ci_parking_model.pt" # only for parking management solution
22
+ REGION = [(10, 200), (540, 200), (540, 180), (10, 180)] # for object counting, speed estimation and queue management
23
+
24
+ # Test configs for each solution : (name, class, needs_frame_count, video, kwargs)
25
+ SOLUTIONS = [
26
+ (
27
+ "ObjectCounter",
28
+ solutions.ObjectCounter,
29
+ False,
30
+ DEMO_VIDEO,
31
+ {"region": REGION, "model": MODEL, "show": SHOW},
32
+ ),
33
+ (
34
+ "Heatmap",
35
+ solutions.Heatmap,
36
+ False,
37
+ DEMO_VIDEO,
38
+ {"colormap": cv2.COLORMAP_PARULA, "model": MODEL, "show": SHOW, "region": None},
39
+ ),
40
+ (
41
+ "HeatmapWithRegion",
42
+ solutions.Heatmap,
43
+ False,
44
+ DEMO_VIDEO,
45
+ {"colormap": cv2.COLORMAP_PARULA, "region": REGION, "model": MODEL, "show": SHOW},
46
+ ),
47
+ (
48
+ "SpeedEstimator",
49
+ solutions.SpeedEstimator,
50
+ False,
51
+ DEMO_VIDEO,
52
+ {"region": REGION, "model": MODEL, "show": SHOW},
53
+ ),
54
+ (
55
+ "QueueManager",
56
+ solutions.QueueManager,
57
+ False,
58
+ DEMO_VIDEO,
59
+ {"region": REGION, "model": MODEL, "show": SHOW},
60
+ ),
61
+ (
62
+ "LineAnalytics",
63
+ solutions.Analytics,
64
+ True,
65
+ DEMO_VIDEO,
66
+ {"analytics_type": "line", "model": MODEL, "show": SHOW},
67
+ ),
68
+ (
69
+ "PieAnalytics",
70
+ solutions.Analytics,
71
+ True,
72
+ DEMO_VIDEO,
73
+ {"analytics_type": "pie", "model": MODEL, "show": SHOW},
74
+ ),
75
+ (
76
+ "BarAnalytics",
77
+ solutions.Analytics,
78
+ True,
79
+ DEMO_VIDEO,
80
+ {"analytics_type": "bar", "model": MODEL, "show": SHOW},
81
+ ),
82
+ (
83
+ "AreaAnalytics",
84
+ solutions.Analytics,
85
+ True,
86
+ DEMO_VIDEO,
87
+ {"analytics_type": "area", "model": MODEL, "show": SHOW},
88
+ ),
89
+ ("TrackZone", solutions.TrackZone, False, DEMO_VIDEO, {"region": REGION, "model": MODEL, "show": SHOW}),
90
+ (
91
+ "ObjectCropper",
92
+ solutions.ObjectCropper,
93
+ False,
94
+ CROP_VIDEO,
95
+ {"crop_dir": str(TMP / "cropped-detections"), "model": MODEL, "show": SHOW},
96
+ ),
97
+ (
98
+ "ObjectBlurrer",
99
+ solutions.ObjectBlurrer,
100
+ False,
101
+ DEMO_VIDEO,
102
+ {"blur_ratio": 0.5, "model": MODEL, "show": SHOW},
103
+ ),
104
+ (
105
+ "InstanceSegmentation",
106
+ solutions.InstanceSegmentation,
107
+ False,
108
+ DEMO_VIDEO,
109
+ {"model": "yolo11n-seg.pt", "show": SHOW},
110
+ ),
111
+ ("VisionEye", solutions.VisionEye, False, DEMO_VIDEO, {"model": MODEL, "show": SHOW}),
112
+ (
113
+ "RegionCounter",
114
+ solutions.RegionCounter,
115
+ False,
116
+ DEMO_VIDEO,
117
+ {"region": REGION, "model": MODEL, "show": SHOW},
118
+ ),
119
+ ("AIGym", solutions.AIGym, False, POSE_VIDEO, {"kpts": [6, 8, 10], "show": SHOW}),
120
+ (
121
+ "ParkingManager",
122
+ solutions.ParkingManagement,
123
+ False,
124
+ PARKING_VIDEO,
125
+ {"model": str(TMP / PARKING_MODEL), "show": SHOW, "json_file": str(TMP / PARKING_AREAS_JSON)},
126
+ ),
127
+ (
128
+ "StreamlitInference",
129
+ solutions.Inference,
130
+ False,
131
+ None, # streamlit application don't require video file
132
+ {}, # streamlit application don't accept arguments
133
+ ),
134
+ ]
135
+
136
+
137
+ def process_video(solution, video_path, needs_frame_count=False):
138
+ """Process video with solution, feeding frames and optional frame count."""
139
+ cap = cv2.VideoCapture(video_path)
140
+ assert cap.isOpened(), f"Error reading video file {video_path}"
141
+
142
+ frame_count = 0
143
+ while cap.isOpened():
144
+ success, im0 = cap.read()
145
+ if not success:
146
+ break
147
+ frame_count += 1
148
+ im_copy = im0.copy()
149
+ args = [im_copy, frame_count] if needs_frame_count else [im_copy]
150
+ _ = solution(*args)
151
+
152
+ cap.release()
153
+
154
+
155
+ @pytest.mark.skipif(
156
+ (LINUX and checks.IS_PYTHON_3_11) or IS_RASPBERRYPI,
157
+ reason="Disabled for testing due to --slow test errors after YOLOE PR.",
158
+ )
159
+ @pytest.mark.parametrize("name, solution_class, needs_frame_count, video, kwargs", SOLUTIONS)
160
+ def test_solution(name, solution_class, needs_frame_count, video, kwargs):
161
+ """Test individual Ultralytics solution."""
162
+ if video:
163
+ safe_download(url=f"{ASSETS_URL}/{video}", dir=TMP)
164
+ if name == "ParkingManager":
165
+ safe_download(url=f"{ASSETS_URL}/{PARKING_AREAS_JSON}", dir=TMP)
166
+ safe_download(url=f"{ASSETS_URL}/{PARKING_MODEL}", dir=TMP)
167
+ elif name == "StreamlitInference":
168
+ if checks.check_imshow(): # do not merge with elif above
169
+ solution_class(**kwargs).inference() # requires interactive GUI environment
170
+ return
171
+
172
+ process_video(
173
+ solution=solution_class(**kwargs),
174
+ video_path=str(TMP / video),
175
+ needs_frame_count=needs_frame_count,
176
+ )
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.116"
3
+ __version__ = "8.3.118"
4
4
 
5
5
  import os
6
6
 
@@ -9,6 +9,7 @@ from typing import Any, Dict, List, Union
9
9
 
10
10
  import cv2
11
11
 
12
+ from ultralytics import __version__
12
13
  from ultralytics.utils import (
13
14
  ASSETS,
14
15
  DEFAULT_CFG,
@@ -24,7 +25,6 @@ from ultralytics.utils import (
24
25
  SETTINGS_FILE,
25
26
  TESTS_RUNNING,
26
27
  IterableSimpleNamespace,
27
- __version__,
28
28
  checks,
29
29
  colorstr,
30
30
  deprecation_warn,
@@ -1586,6 +1586,9 @@ class LetterBox:
1586
1586
 
1587
1587
  if shape[::-1] != new_unpad: # resize
1588
1588
  img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
1589
+ if img.ndim == 2:
1590
+ img = img[..., None]
1591
+
1589
1592
  top, bottom = int(round(dh - 0.1)) if self.center else 0, int(round(dh + 0.1))
1590
1593
  left, right = int(round(dw - 0.1)) if self.center else 0, int(round(dw + 0.1))
1591
1594
  h, w, c = img.shape
ultralytics/data/base.py CHANGED
@@ -15,7 +15,8 @@ import psutil
15
15
  from torch.utils.data import Dataset
16
16
 
17
17
  from ultralytics.data.utils import FORMATS_HELP_MSG, HELP_URL, IMG_FORMATS, check_file_speeds
18
- from ultralytics.utils import DEFAULT_CFG, LOCAL_RANK, LOGGER, NUM_THREADS, TQDM, imread
18
+ from ultralytics.utils import DEFAULT_CFG, LOCAL_RANK, LOGGER, NUM_THREADS, TQDM
19
+ from ultralytics.utils.patches import imread
19
20
 
20
21
 
21
22
  class BaseDataset(Dataset):
@@ -32,6 +33,7 @@ class BaseDataset(Dataset):
32
33
  single_cls (bool): Whether to treat all objects as a single class.
33
34
  prefix (str): Prefix to print in log messages.
34
35
  fraction (float): Fraction of dataset to utilize.
36
+ cv2_flag (int): OpenCV flag for reading images.
35
37
  im_files (List[str]): List of image file paths.
36
38
  labels (List[Dict]): List of label data dictionaries.
37
39
  ni (int): Number of images in the dataset.
@@ -78,6 +80,7 @@ class BaseDataset(Dataset):
78
80
  single_cls=False,
79
81
  classes=None,
80
82
  fraction=1.0,
83
+ channels=3,
81
84
  ):
82
85
  """
83
86
  Initialize BaseDataset with given configuration and options.
@@ -96,6 +99,7 @@ class BaseDataset(Dataset):
96
99
  single_cls (bool, optional): If True, single class training is used.
97
100
  classes (list, optional): List of included classes.
98
101
  fraction (float, optional): Fraction of dataset to utilize.
102
+ channels (int, optional): Number of channels in the images (1 for grayscale, 3 for RGB).
99
103
  """
100
104
  super().__init__()
101
105
  self.img_path = img_path
@@ -104,6 +108,8 @@ class BaseDataset(Dataset):
104
108
  self.single_cls = single_cls
105
109
  self.prefix = prefix
106
110
  self.fraction = fraction
111
+ self.channels = channels
112
+ self.cv2_flag = cv2.IMREAD_GRAYSCALE if channels == 1 else cv2.IMREAD_COLOR
107
113
  self.im_files = self.get_img_files(self.img_path)
108
114
  self.labels = self.get_labels()
109
115
  self.update_labels(include_class=classes) # single_cls and include_class
@@ -223,9 +229,9 @@ class BaseDataset(Dataset):
223
229
  except Exception as e:
224
230
  LOGGER.warning(f"{self.prefix}Removing corrupt *.npy image file {fn} due to: {e}")
225
231
  Path(fn).unlink(missing_ok=True)
226
- im = imread(f) # BGR
232
+ im = imread(f, flags=self.cv2_flag) # BGR
227
233
  else: # read image
228
- im = imread(f) # BGR
234
+ im = imread(f, flags=self.cv2_flag) # BGR
229
235
  if im is None:
230
236
  raise FileNotFoundError(f"Image Not Found {f}")
231
237
 
@@ -237,6 +243,8 @@ class BaseDataset(Dataset):
237
243
  im = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR)
238
244
  elif not (h0 == w0 == self.imgsz): # resize by stretching image to square imgsz
239
245
  im = cv2.resize(im, (self.imgsz, self.imgsz), interpolation=cv2.INTER_LINEAR)
246
+ if im.ndim == 2:
247
+ im = im[..., None]
240
248
 
241
249
  # Add to buffer if training with augmentations
242
250
  if self.augment:
@@ -12,14 +12,14 @@ import torch
12
12
  from PIL import Image
13
13
  from torch.utils.data import ConcatDataset
14
14
 
15
- from ultralytics.utils import LOCAL_RANK, NUM_THREADS, TQDM, colorstr
15
+ from ultralytics.utils import LOCAL_RANK, LOGGER, NUM_THREADS, TQDM, colorstr
16
+ from ultralytics.utils.instance import Instances
16
17
  from ultralytics.utils.ops import resample_segments, segments2boxes
17
18
  from ultralytics.utils.torch_utils import TORCHVISION_0_18
18
19
 
19
20
  from .augment import (
20
21
  Compose,
21
22
  Format,
22
- Instances,
23
23
  LetterBox,
24
24
  RandomLoadText,
25
25
  classify_augmentations,
@@ -30,7 +30,6 @@ from .base import BaseDataset
30
30
  from .converter import merge_multi_segment
31
31
  from .utils import (
32
32
  HELP_URL,
33
- LOGGER,
34
33
  check_file_speeds,
35
34
  get_hash,
36
35
  img2label_paths,
@@ -85,7 +84,7 @@ class YOLODataset(BaseDataset):
85
84
  self.use_obb = task == "obb"
86
85
  self.data = data
87
86
  assert not (self.use_segments and self.use_keypoints), "Can not use both segments and keypoints."
88
- super().__init__(*args, **kwargs)
87
+ super().__init__(*args, channels=self.data["channels"], **kwargs)
89
88
 
90
89
  def cache_labels(self, path=Path("./labels.cache")):
91
90
  """
@@ -16,8 +16,9 @@ import torch
16
16
  from PIL import Image
17
17
 
18
18
  from ultralytics.data.utils import FORMATS_HELP_MSG, IMG_FORMATS, VID_FORMATS
19
- from ultralytics.utils import IS_COLAB, IS_KAGGLE, LOGGER, imread, ops
19
+ from ultralytics.utils import IS_COLAB, IS_KAGGLE, LOGGER, ops
20
20
  from ultralytics.utils.checks import check_requirements
21
+ from ultralytics.utils.patches import imread
21
22
 
22
23
 
23
24
  @dataclass
@@ -70,6 +70,7 @@ from pathlib import Path
70
70
  import numpy as np
71
71
  import torch
72
72
 
73
+ from ultralytics import __version__
73
74
  from ultralytics.cfg import TASK2DATA, get_cfg
74
75
  from ultralytics.data import build_dataloader
75
76
  from ultralytics.data.dataset import YOLODataset
@@ -81,7 +82,6 @@ from ultralytics.utils import (
81
82
  ARM64,
82
83
  DEFAULT_CFG,
83
84
  IS_COLAB,
84
- IS_JETSON,
85
85
  LINUX,
86
86
  LOGGER,
87
87
  MACOS,
@@ -89,13 +89,13 @@ from ultralytics.utils import (
89
89
  RKNN_CHIPS,
90
90
  ROOT,
91
91
  WINDOWS,
92
- __version__,
93
92
  callbacks,
94
93
  colorstr,
95
94
  get_default_args,
96
95
  yaml_save,
97
96
  )
98
97
  from ultralytics.utils.checks import (
98
+ IS_PYTHON_MINIMUM_3_12,
99
99
  check_imgsz,
100
100
  check_is_path_safe,
101
101
  check_requirements,
@@ -238,9 +238,6 @@ class Exporter:
238
238
  _callbacks (dict, optional): Dictionary of callback functions.
239
239
  """
240
240
  self.args = get_cfg(cfg, overrides)
241
- if self.args.format.lower() in {"coreml", "mlmodel"}: # fix attempt for protobuf<3.20.x errors
242
- os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python" # must run before TensorBoard callback
243
-
244
241
  self.callbacks = _callbacks or callbacks.get_default_callbacks()
245
242
  callbacks.add_integration_callbacks(self)
246
243
 
@@ -384,7 +381,7 @@ class Exporter:
384
381
  m.export = True
385
382
  m.format = self.args.format
386
383
  m.max_det = self.args.max_det
387
- m.xyxy = self.args.nms
384
+ m.xyxy = self.args.nms and not coreml
388
385
  elif isinstance(m, C2f) and not is_tf_format:
389
386
  # EdgeTPU does not support FlexSplitV while split provides cleaner ONNX graph
390
387
  m.forward = m.forward_split
@@ -703,7 +700,7 @@ class Exporter:
703
700
 
704
701
  @try_export
705
702
  def export_mnn(self, prefix=colorstr("MNN:")):
706
- """YOLOv8 MNN export using MNN https://github.com/alibaba/MNN."""
703
+ """YOLO MNN export using MNN https://github.com/alibaba/MNN."""
707
704
  f_onnx, _ = self.export_onnx() # get onnx model first
708
705
 
709
706
  check_requirements("MNN>=2.9.6")
@@ -925,10 +922,8 @@ class Exporter:
925
922
  "onnx>=1.12.0",
926
923
  "onnx2tf>=1.26.3",
927
924
  "onnxslim>=0.1.31",
928
- "tflite_support<=0.4.3" if IS_JETSON else "tflite_support", # fix ImportError 'GLIBCXX_3.4.29'
929
- "flatbuffers>=23.5.26,<100", # update old 'flatbuffers' included inside tensorflow package
930
925
  "onnxruntime-gpu" if cuda else "onnxruntime",
931
- "protobuf>=5", # tflite_support pins <=4 but >=5 works
926
+ "protobuf>=5",
932
927
  ),
933
928
  cmds="--extra-index-url https://pypi.ngc.nvidia.com", # onnx_graphsurgeon only on NVIDIA
934
929
  )
@@ -1279,8 +1274,20 @@ class Exporter:
1279
1274
 
1280
1275
  return f, None
1281
1276
 
1282
- def _add_tflite_metadata(self, file):
1277
+ def _add_tflite_metadata(self, file, use_flatbuffers=False):
1283
1278
  """Add metadata to *.tflite models per https://ai.google.dev/edge/litert/models/metadata."""
1279
+ if not use_flatbuffers:
1280
+ import zipfile
1281
+
1282
+ with zipfile.ZipFile(file, "a", zipfile.ZIP_DEFLATED) as zf:
1283
+ zf.writestr("metadata.json", json.dumps(self.metadata, indent=2))
1284
+ return
1285
+
1286
+ if IS_PYTHON_MINIMUM_3_12:
1287
+ LOGGER.warning(f"TFLite Support package may not be compatible with Python>=3.12 environments for {file}")
1288
+
1289
+ # Update old 'flatbuffers' included inside tensorflow package
1290
+ check_requirements(("tflite_support", "flatbuffers>=23.5.26,<100; platform_machine == 'aarch64'"))
1284
1291
  import flatbuffers
1285
1292
 
1286
1293
  try:
@@ -21,6 +21,7 @@ import torch
21
21
  from torch import distributed as dist
22
22
  from torch import nn, optim
23
23
 
24
+ from ultralytics import __version__
24
25
  from ultralytics.cfg import get_cfg, get_save_dir
25
26
  from ultralytics.data.utils import check_cls_dataset, check_det_dataset
26
27
  from ultralytics.nn.tasks import attempt_load_one_weight, attempt_load_weights
@@ -30,7 +31,6 @@ from ultralytics.utils import (
30
31
  LOGGER,
31
32
  RANK,
32
33
  TQDM,
33
- __version__,
34
34
  callbacks,
35
35
  clean_url,
36
36
  colorstr,
@@ -268,7 +268,7 @@ class BaseTrainer:
268
268
  self.amp = torch.tensor(check_amp(self.model), device=self.device)
269
269
  callbacks.default_callbacks = callbacks_backup # restore callbacks
270
270
  if RANK > -1 and world_size > 1: # DDP
271
- dist.broadcast(self.amp, src=0) # broadcast the tensor from rank 0 to all other ranks (returns None)
271
+ dist.broadcast(self.amp.int(), src=0) # broadcast from rank 0 to all other ranks; gloo errors with boolean
272
272
  self.amp = bool(self.amp) # as boolean
273
273
  self.scaler = (
274
274
  torch.amp.GradScaler("cuda", enabled=self.amp) if TORCH_2_4 else torch.cuda.amp.GradScaler(enabled=self.amp)
@@ -9,8 +9,9 @@ from urllib.parse import parse_qs, urlparse
9
9
 
10
10
  import requests
11
11
 
12
- from ultralytics.hub.utils import HELP_MSG, HUB_WEB_ROOT, PREFIX, TQDM
13
- from ultralytics.utils import IS_COLAB, LOGGER, SETTINGS, __version__, checks, emojis
12
+ from ultralytics import __version__
13
+ from ultralytics.hub.utils import HELP_MSG, HUB_WEB_ROOT, PREFIX
14
+ from ultralytics.utils import IS_COLAB, LOGGER, SETTINGS, TQDM, checks, emojis
14
15
  from ultralytics.utils.errors import HUBModelError
15
16
 
16
17
  AGENT_NAME = f"python-{__version__}-colab" if IS_COLAB else f"python-{__version__}-local"
ultralytics/hub/utils.py CHANGED
@@ -9,6 +9,7 @@ from pathlib import Path
9
9
 
10
10
  import requests
11
11
 
12
+ from ultralytics import __version__
12
13
  from ultralytics.utils import (
13
14
  ARGV,
14
15
  ENVIRONMENT,
@@ -22,7 +23,6 @@ from ultralytics.utils import (
22
23
  TESTS_RUNNING,
23
24
  TQDM,
24
25
  TryExcept,
25
- __version__,
26
26
  colorstr,
27
27
  get_git_origin_url,
28
28
  )
@@ -47,7 +47,7 @@ class DetectionPredictor(BasePredictor):
47
47
  (list): List of Results objects containing the post-processed predictions.
48
48
 
49
49
  Examples:
50
- >>> predictor = DetectionPredictor(overrides=dict(model="yolov8n.pt"))
50
+ >>> predictor = DetectionPredictor(overrides=dict(model="yolo11n.pt"))
51
51
  >>> results = predictor.predict("path/to/image.jpg")
52
52
  >>> processed_results = predictor.postprocess(preds, img, orig_imgs)
53
53
  """
@@ -59,7 +59,7 @@ class DetectionPredictor(BasePredictor):
59
59
  self.args.classes,
60
60
  self.args.agnostic_nms,
61
61
  max_det=self.args.max_det,
62
- nc=len(self.model.names),
62
+ nc=0 if self.args.task == "detect" else len(self.model.names),
63
63
  end2end=getattr(self.model, "end2end", False),
64
64
  rotated=self.args.task == "obb",
65
65
  return_idxs=save_feats,
@@ -124,7 +124,7 @@ class DetectionValidator(BaseValidator):
124
124
  preds,
125
125
  self.args.conf,
126
126
  self.args.iou,
127
- nc=self.nc,
127
+ nc=0 if self.args.task == "detect" else self.nc,
128
128
  multi_label=True,
129
129
  agnostic=self.args.single_cls or self.args.agnostic_nms,
130
130
  max_det=self.args.max_det,
@@ -29,16 +29,15 @@ class YOLO(Model):
29
29
  (YOLOWorld or YOLOE) based on the model filename.
30
30
 
31
31
  Args:
32
- model (str | Path): Model name or path to model file, i.e. 'yolo11n.pt', 'yolov8n.yaml'.
32
+ model (str | Path): Model name or path to model file, i.e. 'yolo11n.pt', 'yolo11n.yaml'.
33
33
  task (str | None): YOLO task specification, i.e. 'detect', 'segment', 'classify', 'pose', 'obb'.
34
34
  Defaults to auto-detection based on model.
35
35
  verbose (bool): Display model info on load.
36
36
 
37
37
  Examples:
38
38
  >>> from ultralytics import YOLO
39
- >>> model = YOLO("yolov8n.pt") # load a pretrained YOLOv8n detection model
40
- >>> model = YOLO("yolov8n-seg.pt") # load a pretrained YOLOv8n segmentation model
41
39
  >>> model = YOLO("yolo11n.pt") # load a pretrained YOLOv11n detection model
40
+ >>> model = YOLO("yolo11n-seg.pt") # load a pretrained YOLO11n segmentation model
42
41
  """
43
42
  path = Path(model)
44
43
  if "-world" in path.stem and path.suffix in {".pt", ".yaml", ".yml"}: # if YOLOWorld PyTorch model
@@ -65,7 +65,7 @@ class OBBTrainer(yolo.detect.DetectionTrainer):
65
65
 
66
66
  Examples:
67
67
  >>> trainer = OBBTrainer()
68
- >>> model = trainer.get_model(cfg="yolov8n-obb.yaml", weights="yolov8n-obb.pt")
68
+ >>> model = trainer.get_model(cfg="yolo11n-obb.yaml", weights="yolo11n-obb.pt")
69
69
  """
70
70
  model = OBBModel(cfg, nc=self.data["nc"], ch=self.data["channels"], verbose=verbose and RANK == -1)
71
71
  if weights:
@@ -41,7 +41,7 @@ class PosePredictor(DetectionPredictor):
41
41
  Examples:
42
42
  >>> from ultralytics.utils import ASSETS
43
43
  >>> from ultralytics.models.yolo.pose import PosePredictor
44
- >>> args = dict(model="yolov8n-pose.pt", source=ASSETS)
44
+ >>> args = dict(model="yolo11n-pose.pt", source=ASSETS)
45
45
  >>> predictor = PosePredictor(overrides=args)
46
46
  >>> predictor.predict_cli()
47
47
  """
@@ -53,7 +53,7 @@ class PoseTrainer(yolo.detect.DetectionTrainer):
53
53
 
54
54
  Examples:
55
55
  >>> from ultralytics.models.yolo.pose import PoseTrainer
56
- >>> args = dict(model="yolov8n-pose.pt", data="coco8-pose.yaml", epochs=3)
56
+ >>> args = dict(model="yolo11n-pose.pt", data="coco8-pose.yaml", epochs=3)
57
57
  >>> trainer = PoseTrainer(overrides=args)
58
58
  >>> trainer.train()
59
59
  """
@@ -62,7 +62,7 @@ class PoseValidator(DetectionValidator):
62
62
 
63
63
  Examples:
64
64
  >>> from ultralytics.models.yolo.pose import PoseValidator
65
- >>> args = dict(model="yolov8n-pose.pt", data="coco8-pose.yaml")
65
+ >>> args = dict(model="yolo11n-pose.pt", data="coco8-pose.yaml")
66
66
  >>> validator = PoseValidator(args=args)
67
67
  >>> validator()
68
68
 
@@ -39,7 +39,7 @@ class SegmentationTrainer(yolo.detect.DetectionTrainer):
39
39
 
40
40
  Examples:
41
41
  >>> from ultralytics.models.yolo.segment import SegmentationTrainer
42
- >>> args = dict(model="yolov8n-seg.pt", data="coco8-seg.yaml", epochs=3)
42
+ >>> args = dict(model="yolo11n-seg.pt", data="coco8-seg.yaml", epochs=3)
43
43
  >>> trainer = SegmentationTrainer(overrides=args)
44
44
  >>> trainer.train()
45
45
  """
@@ -62,8 +62,8 @@ class SegmentationTrainer(yolo.detect.DetectionTrainer):
62
62
 
63
63
  Examples:
64
64
  >>> trainer = SegmentationTrainer()
65
- >>> model = trainer.get_model(cfg="yolov8n-seg.yaml")
66
- >>> model = trainer.get_model(weights="yolov8n-seg.pt", verbose=False)
65
+ >>> model = trainer.get_model(cfg="yolo11n-seg.yaml")
66
+ >>> model = trainer.get_model(weights="yolo11n-seg.pt", verbose=False)
67
67
  """
68
68
  model = SegmentationModel(cfg, nc=self.data["nc"], ch=self.data["channels"], verbose=verbose and RANK == -1)
69
69
  if weights:
@@ -9,9 +9,9 @@ from ultralytics.data import YOLOConcatDataset, build_dataloader, build_yolo_dat
9
9
  from ultralytics.data.augment import LoadVisualPrompt
10
10
  from ultralytics.data.utils import check_det_dataset
11
11
  from ultralytics.models.yolo.detect import DetectionValidator
12
- from ultralytics.models.yolo.model import YOLOEModel
13
12
  from ultralytics.models.yolo.segment import SegmentationValidator
14
13
  from ultralytics.nn.modules.head import YOLOEDetect
14
+ from ultralytics.nn.tasks import YOLOEModel
15
15
  from ultralytics.utils import LOGGER, TQDM
16
16
  from ultralytics.utils.torch_utils import select_device, smart_inference_mode
17
17
 
@@ -14,7 +14,7 @@ import torch
14
14
  import torch.nn as nn
15
15
  from PIL import Image
16
16
 
17
- from ultralytics.utils import ARM64, IS_JETSON, IS_RASPBERRYPI, LINUX, LOGGER, PYTHON_VERSION, ROOT, yaml_load
17
+ from ultralytics.utils import ARM64, IS_JETSON, LINUX, LOGGER, PYTHON_VERSION, ROOT, yaml_load
18
18
  from ultralytics.utils.checks import check_requirements, check_suffix, check_version, check_yaml, is_rockchip
19
19
  from ultralytics.utils.downloads import attempt_download_asset, is_url
20
20
 
@@ -90,7 +90,7 @@ class AutoBackend(nn.Module):
90
90
  _model_type: Determine the model type from file path.
91
91
 
92
92
  Examples:
93
- >>> model = AutoBackend(weights="yolov8n.pt", device="cuda")
93
+ >>> model = AutoBackend(weights="yolo11n.pt", device="cuda")
94
94
  >>> results = model(img)
95
95
  """
96
96
 
@@ -207,9 +207,6 @@ class AutoBackend(nn.Module):
207
207
  elif onnx or imx:
208
208
  LOGGER.info(f"Loading {w} for ONNX Runtime inference...")
209
209
  check_requirements(("onnx", "onnxruntime-gpu" if cuda else "onnxruntime"))
210
- if IS_RASPBERRYPI or IS_JETSON:
211
- # Fix 'numpy.linalg._umath_linalg' has no attribute '_ilp64' for TF SavedModel on RPi and Jetson
212
- check_requirements("numpy==1.23.5")
213
210
  import onnxruntime
214
211
 
215
212
  providers = ["CPUExecutionProvider"]
@@ -432,10 +429,14 @@ class AutoBackend(nn.Module):
432
429
  output_details = interpreter.get_output_details() # outputs
433
430
  # Load metadata
434
431
  try:
435
- with zipfile.ZipFile(w, "r") as model:
436
- meta_file = model.namelist()[0]
437
- metadata = ast.literal_eval(model.read(meta_file).decode("utf-8"))
438
- except zipfile.BadZipFile:
432
+ with zipfile.ZipFile(w, "r") as zf:
433
+ name = zf.namelist()[0]
434
+ contents = zf.read(name).decode("utf-8")
435
+ if name == "metadata.json": # Custom Ultralytics metadata dict for Python>=3.12
436
+ metadata = json.loads(contents)
437
+ else:
438
+ metadata = ast.literal_eval(contents) # Default tflite-support metadata for Python<=3.11
439
+ except (zipfile.BadZipFile, SyntaxError, ValueError, json.JSONDecodeError):
439
440
  pass
440
441
 
441
442
  # TF.js