ultralytics 8.2.81__py3-none-any.whl → 8.2.83__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ultralytics might be problematic. Click here for more details.
- tests/test_solutions.py +0 -4
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +21 -21
- ultralytics/data/annotator.py +1 -1
- ultralytics/data/augment.py +58 -58
- ultralytics/data/base.py +3 -3
- ultralytics/data/converter.py +7 -8
- ultralytics/data/explorer/explorer.py +7 -23
- ultralytics/data/loaders.py +2 -2
- ultralytics/data/split_dota.py +11 -3
- ultralytics/data/utils.py +6 -10
- ultralytics/engine/exporter.py +2 -4
- ultralytics/engine/model.py +47 -47
- ultralytics/engine/predictor.py +1 -1
- ultralytics/engine/results.py +28 -28
- ultralytics/engine/trainer.py +11 -8
- ultralytics/engine/tuner.py +7 -8
- ultralytics/engine/validator.py +3 -5
- ultralytics/hub/__init__.py +5 -5
- ultralytics/hub/auth.py +6 -2
- ultralytics/hub/session.py +3 -5
- ultralytics/models/fastsam/model.py +13 -10
- ultralytics/models/fastsam/predict.py +2 -2
- ultralytics/models/fastsam/utils.py +0 -1
- ultralytics/models/nas/model.py +4 -4
- ultralytics/models/nas/predict.py +1 -2
- ultralytics/models/nas/val.py +1 -1
- ultralytics/models/rtdetr/predict.py +1 -1
- ultralytics/models/rtdetr/train.py +1 -1
- ultralytics/models/rtdetr/val.py +1 -1
- ultralytics/models/sam/model.py +11 -11
- ultralytics/models/sam/modules/decoders.py +7 -4
- ultralytics/models/sam/modules/sam.py +9 -1
- ultralytics/models/sam/modules/tiny_encoder.py +1 -1
- ultralytics/models/sam/modules/transformer.py +0 -2
- ultralytics/models/sam/modules/utils.py +1 -1
- ultralytics/models/sam/predict.py +10 -10
- ultralytics/models/utils/loss.py +29 -17
- ultralytics/models/utils/ops.py +1 -5
- ultralytics/models/yolo/classify/predict.py +1 -1
- ultralytics/models/yolo/classify/train.py +1 -1
- ultralytics/models/yolo/classify/val.py +1 -1
- ultralytics/models/yolo/detect/predict.py +1 -1
- ultralytics/models/yolo/detect/train.py +1 -1
- ultralytics/models/yolo/detect/val.py +1 -1
- ultralytics/models/yolo/model.py +6 -2
- ultralytics/models/yolo/obb/predict.py +1 -1
- ultralytics/models/yolo/obb/train.py +1 -1
- ultralytics/models/yolo/obb/val.py +2 -2
- ultralytics/models/yolo/pose/predict.py +1 -1
- ultralytics/models/yolo/pose/train.py +1 -1
- ultralytics/models/yolo/pose/val.py +1 -1
- ultralytics/models/yolo/segment/predict.py +1 -1
- ultralytics/models/yolo/segment/train.py +1 -1
- ultralytics/models/yolo/segment/val.py +1 -1
- ultralytics/models/yolo/world/train.py +1 -1
- ultralytics/nn/autobackend.py +2 -2
- ultralytics/nn/modules/__init__.py +2 -2
- ultralytics/nn/modules/block.py +8 -20
- ultralytics/nn/modules/conv.py +1 -3
- ultralytics/nn/modules/head.py +16 -31
- ultralytics/nn/modules/transformer.py +0 -1
- ultralytics/nn/modules/utils.py +0 -1
- ultralytics/nn/tasks.py +11 -9
- ultralytics/solutions/__init__.py +1 -0
- ultralytics/solutions/ai_gym.py +0 -2
- ultralytics/solutions/analytics.py +1 -6
- ultralytics/solutions/heatmap.py +0 -1
- ultralytics/solutions/object_counter.py +0 -2
- ultralytics/solutions/queue_management.py +0 -2
- ultralytics/trackers/basetrack.py +1 -1
- ultralytics/trackers/byte_tracker.py +2 -2
- ultralytics/trackers/utils/gmc.py +5 -5
- ultralytics/trackers/utils/kalman_filter.py +1 -1
- ultralytics/trackers/utils/matching.py +1 -5
- ultralytics/utils/__init__.py +137 -24
- ultralytics/utils/autobatch.py +7 -4
- ultralytics/utils/benchmarks.py +6 -14
- ultralytics/utils/callbacks/base.py +0 -1
- ultralytics/utils/callbacks/comet.py +0 -1
- ultralytics/utils/callbacks/tensorboard.py +0 -1
- ultralytics/utils/checks.py +15 -18
- ultralytics/utils/downloads.py +6 -7
- ultralytics/utils/files.py +3 -4
- ultralytics/utils/instance.py +17 -7
- ultralytics/utils/metrics.py +16 -16
- ultralytics/utils/ops.py +8 -8
- ultralytics/utils/plotting.py +25 -35
- ultralytics/utils/tal.py +27 -18
- ultralytics/utils/torch_utils.py +12 -13
- ultralytics/utils/tuner.py +2 -3
- {ultralytics-8.2.81.dist-info → ultralytics-8.2.83.dist-info}/METADATA +4 -3
- {ultralytics-8.2.81.dist-info → ultralytics-8.2.83.dist-info}/RECORD +97 -97
- {ultralytics-8.2.81.dist-info → ultralytics-8.2.83.dist-info}/WHEEL +1 -1
- {ultralytics-8.2.81.dist-info → ultralytics-8.2.83.dist-info}/LICENSE +0 -0
- {ultralytics-8.2.81.dist-info → ultralytics-8.2.83.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.2.81.dist-info → ultralytics-8.2.83.dist-info}/top_level.txt +0 -0
|
@@ -163,7 +163,7 @@ class Explorer:
|
|
|
163
163
|
```python
|
|
164
164
|
exp = Explorer()
|
|
165
165
|
exp.create_embeddings_table()
|
|
166
|
-
similar = exp.query(img=
|
|
166
|
+
similar = exp.query(img="https://ultralytics.com/images/zidane.jpg")
|
|
167
167
|
```
|
|
168
168
|
"""
|
|
169
169
|
if self.table is None:
|
|
@@ -226,6 +226,7 @@ class Explorer:
|
|
|
226
226
|
def plot_sql_query(self, query: str, labels: bool = True) -> Image.Image:
|
|
227
227
|
"""
|
|
228
228
|
Plot the results of a SQL-Like query on the table.
|
|
229
|
+
|
|
229
230
|
Args:
|
|
230
231
|
query (str): SQL query to run.
|
|
231
232
|
labels (bool): Whether to plot the labels or not.
|
|
@@ -271,7 +272,7 @@ class Explorer:
|
|
|
271
272
|
```python
|
|
272
273
|
exp = Explorer()
|
|
273
274
|
exp.create_embeddings_table()
|
|
274
|
-
similar = exp.get_similar(img=
|
|
275
|
+
similar = exp.get_similar(img="https://ultralytics.com/images/zidane.jpg")
|
|
275
276
|
```
|
|
276
277
|
"""
|
|
277
278
|
assert return_type in {"pandas", "arrow"}, f"Return type should be `pandas` or `arrow`, but got {return_type}"
|
|
@@ -306,7 +307,7 @@ class Explorer:
|
|
|
306
307
|
```python
|
|
307
308
|
exp = Explorer()
|
|
308
309
|
exp.create_embeddings_table()
|
|
309
|
-
similar = exp.plot_similar(img=
|
|
310
|
+
similar = exp.plot_similar(img="https://ultralytics.com/images/zidane.jpg")
|
|
310
311
|
```
|
|
311
312
|
"""
|
|
312
313
|
similar = self.get_similar(img, idx, limit, return_type="arrow")
|
|
@@ -395,8 +396,8 @@ class Explorer:
|
|
|
395
396
|
exp.create_embeddings_table()
|
|
396
397
|
|
|
397
398
|
similarity_idx_plot = exp.plot_similarity_index()
|
|
398
|
-
similarity_idx_plot.show()
|
|
399
|
-
similarity_idx_plot.save(
|
|
399
|
+
similarity_idx_plot.show() # view image preview
|
|
400
|
+
similarity_idx_plot.save("path/to/save/similarity_index_plot.png") # save contents to file
|
|
400
401
|
```
|
|
401
402
|
"""
|
|
402
403
|
sim_idx = self.similarity_index(max_dist=max_dist, top_k=top_k, force=force)
|
|
@@ -447,7 +448,7 @@ class Explorer:
|
|
|
447
448
|
```python
|
|
448
449
|
exp = Explorer()
|
|
449
450
|
exp.create_embeddings_table()
|
|
450
|
-
answer = exp.ask_ai(
|
|
451
|
+
answer = exp.ask_ai("Show images with 1 person and 2 dogs")
|
|
451
452
|
```
|
|
452
453
|
"""
|
|
453
454
|
result = prompt_sql_query(query)
|
|
@@ -457,20 +458,3 @@ class Explorer:
|
|
|
457
458
|
LOGGER.error("AI generated query is not valid. Please try again with a different prompt")
|
|
458
459
|
LOGGER.error(e)
|
|
459
460
|
return None
|
|
460
|
-
|
|
461
|
-
def visualize(self, result):
|
|
462
|
-
"""
|
|
463
|
-
Visualize the results of a query. TODO.
|
|
464
|
-
|
|
465
|
-
Args:
|
|
466
|
-
result (pyarrow.Table): Table containing the results of a query.
|
|
467
|
-
"""
|
|
468
|
-
pass
|
|
469
|
-
|
|
470
|
-
def generate_report(self, result):
|
|
471
|
-
"""
|
|
472
|
-
Generate a report of the dataset.
|
|
473
|
-
|
|
474
|
-
TODO
|
|
475
|
-
"""
|
|
476
|
-
pass
|
ultralytics/data/loaders.py
CHANGED
|
@@ -84,7 +84,7 @@ class LoadStreams:
|
|
|
84
84
|
# Start thread to read frames from video stream
|
|
85
85
|
st = f"{i + 1}/{n}: {s}... "
|
|
86
86
|
if urlparse(s).hostname in {"www.youtube.com", "youtube.com", "youtu.be"}: # if source is YouTube video
|
|
87
|
-
# YouTube format i.e. 'https://www.youtube.com/watch?v=
|
|
87
|
+
# YouTube format i.e. 'https://www.youtube.com/watch?v=Jsn8D3aC840' or 'https://youtu.be/Jsn8D3aC840'
|
|
88
88
|
s = get_best_youtube_url(s)
|
|
89
89
|
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
|
|
90
90
|
if s == 0 and (IS_COLAB or IS_KAGGLE):
|
|
@@ -240,7 +240,7 @@ class LoadScreenshots:
|
|
|
240
240
|
return self
|
|
241
241
|
|
|
242
242
|
def __next__(self):
|
|
243
|
-
"""mss
|
|
243
|
+
"""Screen capture with 'mss' to get raw pixels from the screen as np array."""
|
|
244
244
|
im0 = np.asarray(self.sct.grab(self.monitor))[:, :, :3] # BGRA to BGR
|
|
245
245
|
s = f"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: "
|
|
246
246
|
|
ultralytics/data/split_dota.py
CHANGED
|
@@ -19,11 +19,19 @@ from shapely.geometry import Polygon
|
|
|
19
19
|
|
|
20
20
|
def bbox_iof(polygon1, bbox2, eps=1e-6):
|
|
21
21
|
"""
|
|
22
|
-
Calculate
|
|
22
|
+
Calculate Intersection over Foreground (IoF) between polygons and bounding boxes.
|
|
23
23
|
|
|
24
24
|
Args:
|
|
25
|
-
polygon1 (np.ndarray): Polygon coordinates, (n, 8).
|
|
26
|
-
bbox2 (np.ndarray): Bounding boxes, (n
|
|
25
|
+
polygon1 (np.ndarray): Polygon coordinates, shape (n, 8).
|
|
26
|
+
bbox2 (np.ndarray): Bounding boxes, shape (n, 4).
|
|
27
|
+
eps (float, optional): Small value to prevent division by zero. Defaults to 1e-6.
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
(np.ndarray): IoF scores, shape (n, 1) or (n, m) if bbox2 is (m, 4).
|
|
31
|
+
|
|
32
|
+
Note:
|
|
33
|
+
Polygon format: [x1, y1, x2, y2, x3, y3, x4, y4].
|
|
34
|
+
Bounding box format: [x_min, y_min, x_max, y_max].
|
|
27
35
|
"""
|
|
28
36
|
polygon1 = polygon1.reshape(-1, 4, 2)
|
|
29
37
|
lt_point = np.min(polygon1, axis=-2) # left-top
|
ultralytics/data/utils.py
CHANGED
|
@@ -265,7 +265,6 @@ def check_det_dataset(dataset, autodownload=True):
|
|
|
265
265
|
Returns:
|
|
266
266
|
(dict): Parsed dataset information and paths.
|
|
267
267
|
"""
|
|
268
|
-
|
|
269
268
|
file = check_file(dataset)
|
|
270
269
|
|
|
271
270
|
# Download (optional)
|
|
@@ -363,7 +362,6 @@ def check_cls_dataset(dataset, split=""):
|
|
|
363
362
|
- 'nc' (int): The number of classes in the dataset.
|
|
364
363
|
- 'names' (dict): A dictionary of class names in the dataset.
|
|
365
364
|
"""
|
|
366
|
-
|
|
367
365
|
# Download (optional if dataset=https://file.zip is passed directly)
|
|
368
366
|
if str(dataset).startswith(("http:/", "https:/")):
|
|
369
367
|
dataset = safe_download(dataset, dir=DATASETS_DIR, unzip=True, delete=False)
|
|
@@ -438,11 +436,11 @@ class HUBDatasetStats:
|
|
|
438
436
|
```python
|
|
439
437
|
from ultralytics.data.utils import HUBDatasetStats
|
|
440
438
|
|
|
441
|
-
stats = HUBDatasetStats(
|
|
442
|
-
stats = HUBDatasetStats(
|
|
443
|
-
stats = HUBDatasetStats(
|
|
444
|
-
stats = HUBDatasetStats(
|
|
445
|
-
stats = HUBDatasetStats(
|
|
439
|
+
stats = HUBDatasetStats("path/to/coco8.zip", task="detect") # detect dataset
|
|
440
|
+
stats = HUBDatasetStats("path/to/coco8-seg.zip", task="segment") # segment dataset
|
|
441
|
+
stats = HUBDatasetStats("path/to/coco8-pose.zip", task="pose") # pose dataset
|
|
442
|
+
stats = HUBDatasetStats("path/to/dota8.zip", task="obb") # OBB dataset
|
|
443
|
+
stats = HUBDatasetStats("path/to/imagenet10.zip", task="classify") # classification dataset
|
|
446
444
|
|
|
447
445
|
stats.get_json(save=True)
|
|
448
446
|
stats.process_images()
|
|
@@ -598,11 +596,10 @@ def compress_one_image(f, f_new=None, max_dim=1920, quality=50):
|
|
|
598
596
|
from pathlib import Path
|
|
599
597
|
from ultralytics.data.utils import compress_one_image
|
|
600
598
|
|
|
601
|
-
for f in Path(
|
|
599
|
+
for f in Path("path/to/dataset").rglob("*.jpg"):
|
|
602
600
|
compress_one_image(f)
|
|
603
601
|
```
|
|
604
602
|
"""
|
|
605
|
-
|
|
606
603
|
try: # use PIL
|
|
607
604
|
im = Image.open(f)
|
|
608
605
|
r = max_dim / max(im.height, im.width) # ratio
|
|
@@ -635,7 +632,6 @@ def autosplit(path=DATASETS_DIR / "coco8/images", weights=(0.9, 0.1, 0.0), annot
|
|
|
635
632
|
autosplit()
|
|
636
633
|
```
|
|
637
634
|
"""
|
|
638
|
-
|
|
639
635
|
path = Path(path) # images dir
|
|
640
636
|
files = sorted(x for x in path.rglob("*.*") if x.suffix[1:].lower() in IMG_FORMATS) # image files only
|
|
641
637
|
n = len(files) # number of files
|
ultralytics/engine/exporter.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
|
2
2
|
"""
|
|
3
|
-
Export a YOLOv8 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit
|
|
3
|
+
Export a YOLOv8 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit.
|
|
4
4
|
|
|
5
5
|
Format | `format=argument` | Model
|
|
6
6
|
--- | --- | ---
|
|
@@ -533,9 +533,7 @@ class Exporter:
|
|
|
533
533
|
|
|
534
534
|
@try_export
|
|
535
535
|
def export_ncnn(self, prefix=colorstr("NCNN:")):
|
|
536
|
-
"""
|
|
537
|
-
YOLOv8 NCNN export using PNNX https://github.com/pnnx/pnnx.
|
|
538
|
-
"""
|
|
536
|
+
"""YOLOv8 NCNN export using PNNX https://github.com/pnnx/pnnx."""
|
|
539
537
|
check_requirements("ncnn")
|
|
540
538
|
import ncnn # noqa
|
|
541
539
|
|
ultralytics/engine/model.py
CHANGED
|
@@ -72,11 +72,11 @@ class Model(nn.Module):
|
|
|
72
72
|
|
|
73
73
|
Examples:
|
|
74
74
|
>>> from ultralytics import YOLO
|
|
75
|
-
>>> model = YOLO(
|
|
76
|
-
>>> results = model.predict(
|
|
77
|
-
>>> model.train(data=
|
|
75
|
+
>>> model = YOLO("yolov8n.pt")
|
|
76
|
+
>>> results = model.predict("image.jpg")
|
|
77
|
+
>>> model.train(data="coco128.yaml", epochs=3)
|
|
78
78
|
>>> metrics = model.val()
|
|
79
|
-
>>> model.export(format=
|
|
79
|
+
>>> model.export(format="onnx")
|
|
80
80
|
"""
|
|
81
81
|
|
|
82
82
|
def __init__(
|
|
@@ -166,8 +166,8 @@ class Model(nn.Module):
|
|
|
166
166
|
Results object.
|
|
167
167
|
|
|
168
168
|
Examples:
|
|
169
|
-
>>> model = YOLO(
|
|
170
|
-
>>> results = model(
|
|
169
|
+
>>> model = YOLO("yolov8n.pt")
|
|
170
|
+
>>> results = model("https://ultralytics.com/images/bus.jpg")
|
|
171
171
|
>>> for r in results:
|
|
172
172
|
... print(f"Detected {len(r)} objects in image")
|
|
173
173
|
"""
|
|
@@ -188,9 +188,9 @@ class Model(nn.Module):
|
|
|
188
188
|
(bool): True if the model string is a valid Triton Server URL, False otherwise.
|
|
189
189
|
|
|
190
190
|
Examples:
|
|
191
|
-
>>> Model.is_triton_model(
|
|
191
|
+
>>> Model.is_triton_model("http://localhost:8000/v2/models/yolov8n")
|
|
192
192
|
True
|
|
193
|
-
>>> Model.is_triton_model(
|
|
193
|
+
>>> Model.is_triton_model("yolov8n.pt")
|
|
194
194
|
False
|
|
195
195
|
"""
|
|
196
196
|
from urllib.parse import urlsplit
|
|
@@ -253,7 +253,7 @@ class Model(nn.Module):
|
|
|
253
253
|
|
|
254
254
|
Examples:
|
|
255
255
|
>>> model = Model()
|
|
256
|
-
>>> model._new(
|
|
256
|
+
>>> model._new("yolov8n.yaml", task="detect", verbose=True)
|
|
257
257
|
"""
|
|
258
258
|
cfg_dict = yaml_model_load(cfg)
|
|
259
259
|
self.cfg = cfg
|
|
@@ -284,8 +284,8 @@ class Model(nn.Module):
|
|
|
284
284
|
|
|
285
285
|
Examples:
|
|
286
286
|
>>> model = Model()
|
|
287
|
-
>>> model._load(
|
|
288
|
-
>>> model._load(
|
|
287
|
+
>>> model._load("yolov8n.pt")
|
|
288
|
+
>>> model._load("path/to/weights.pth", task="detect")
|
|
289
289
|
"""
|
|
290
290
|
if weights.lower().startswith(("https://", "http://", "rtsp://", "rtmp://", "tcp://")):
|
|
291
291
|
weights = checks.check_file(weights, download_dir=SETTINGS["weights_dir"]) # download and return local file
|
|
@@ -348,7 +348,7 @@ class Model(nn.Module):
|
|
|
348
348
|
AssertionError: If the model is not a PyTorch model.
|
|
349
349
|
|
|
350
350
|
Examples:
|
|
351
|
-
>>> model = Model(
|
|
351
|
+
>>> model = Model("yolov8n.pt")
|
|
352
352
|
>>> model.reset_weights()
|
|
353
353
|
"""
|
|
354
354
|
self._check_is_pytorch_model()
|
|
@@ -377,8 +377,8 @@ class Model(nn.Module):
|
|
|
377
377
|
|
|
378
378
|
Examples:
|
|
379
379
|
>>> model = Model()
|
|
380
|
-
>>> model.load(
|
|
381
|
-
>>> model.load(Path(
|
|
380
|
+
>>> model.load("yolov8n.pt")
|
|
381
|
+
>>> model.load(Path("path/to/weights.pt"))
|
|
382
382
|
"""
|
|
383
383
|
self._check_is_pytorch_model()
|
|
384
384
|
if isinstance(weights, (str, Path)):
|
|
@@ -402,8 +402,8 @@ class Model(nn.Module):
|
|
|
402
402
|
AssertionError: If the model is not a PyTorch model.
|
|
403
403
|
|
|
404
404
|
Examples:
|
|
405
|
-
>>> model = Model(
|
|
406
|
-
>>> model.save(
|
|
405
|
+
>>> model = Model("yolov8n.pt")
|
|
406
|
+
>>> model.save("my_model.pt")
|
|
407
407
|
"""
|
|
408
408
|
self._check_is_pytorch_model()
|
|
409
409
|
from copy import deepcopy
|
|
@@ -439,7 +439,7 @@ class Model(nn.Module):
|
|
|
439
439
|
TypeError: If the model is not a PyTorch model.
|
|
440
440
|
|
|
441
441
|
Examples:
|
|
442
|
-
>>> model = Model(
|
|
442
|
+
>>> model = Model("yolov8n.pt")
|
|
443
443
|
>>> model.info() # Prints model summary
|
|
444
444
|
>>> info_list = model.info(detailed=True, verbose=False) # Returns detailed info as a list
|
|
445
445
|
"""
|
|
@@ -494,8 +494,8 @@ class Model(nn.Module):
|
|
|
494
494
|
AssertionError: If the model is not a PyTorch model.
|
|
495
495
|
|
|
496
496
|
Examples:
|
|
497
|
-
>>> model = YOLO(
|
|
498
|
-
>>> image =
|
|
497
|
+
>>> model = YOLO("yolov8n.pt")
|
|
498
|
+
>>> image = "https://ultralytics.com/images/bus.jpg"
|
|
499
499
|
>>> embeddings = model.embed(image)
|
|
500
500
|
>>> print(embeddings[0].shape)
|
|
501
501
|
"""
|
|
@@ -531,8 +531,8 @@ class Model(nn.Module):
|
|
|
531
531
|
Results object.
|
|
532
532
|
|
|
533
533
|
Examples:
|
|
534
|
-
>>> model = YOLO(
|
|
535
|
-
>>> results = model.predict(source=
|
|
534
|
+
>>> model = YOLO("yolov8n.pt")
|
|
535
|
+
>>> results = model.predict(source="path/to/image.jpg", conf=0.25)
|
|
536
536
|
>>> for r in results:
|
|
537
537
|
... print(r.boxes.data) # print detection bounding boxes
|
|
538
538
|
|
|
@@ -592,8 +592,8 @@ class Model(nn.Module):
|
|
|
592
592
|
AttributeError: If the predictor does not have registered trackers.
|
|
593
593
|
|
|
594
594
|
Examples:
|
|
595
|
-
>>> model = YOLO(
|
|
596
|
-
>>> results = model.track(source=
|
|
595
|
+
>>> model = YOLO("yolov8n.pt")
|
|
596
|
+
>>> results = model.track(source="path/to/video.mp4", show=True)
|
|
597
597
|
>>> for r in results:
|
|
598
598
|
... print(r.boxes.id) # print tracking IDs
|
|
599
599
|
|
|
@@ -635,8 +635,8 @@ class Model(nn.Module):
|
|
|
635
635
|
AssertionError: If the model is not a PyTorch model.
|
|
636
636
|
|
|
637
637
|
Examples:
|
|
638
|
-
>>> model = YOLO(
|
|
639
|
-
>>> results = model.val(data=
|
|
638
|
+
>>> model = YOLO("yolov8n.pt")
|
|
639
|
+
>>> results = model.val(data="coco128.yaml", imgsz=640)
|
|
640
640
|
>>> print(results.box.map) # Print mAP50-95
|
|
641
641
|
"""
|
|
642
642
|
custom = {"rect": True} # method defaults
|
|
@@ -677,8 +677,8 @@ class Model(nn.Module):
|
|
|
677
677
|
AssertionError: If the model is not a PyTorch model.
|
|
678
678
|
|
|
679
679
|
Examples:
|
|
680
|
-
>>> model = YOLO(
|
|
681
|
-
>>> results = model.benchmark(data=
|
|
680
|
+
>>> model = YOLO("yolov8n.pt")
|
|
681
|
+
>>> results = model.benchmark(data="coco8.yaml", imgsz=640, half=True)
|
|
682
682
|
>>> print(results)
|
|
683
683
|
"""
|
|
684
684
|
self._check_is_pytorch_model()
|
|
@@ -727,8 +727,8 @@ class Model(nn.Module):
|
|
|
727
727
|
RuntimeError: If the export process fails due to errors.
|
|
728
728
|
|
|
729
729
|
Examples:
|
|
730
|
-
>>> model = YOLO(
|
|
731
|
-
>>> model.export(format=
|
|
730
|
+
>>> model = YOLO("yolov8n.pt")
|
|
731
|
+
>>> model.export(format="onnx", dynamic=True, simplify=True)
|
|
732
732
|
'path/to/exported/model.onnx'
|
|
733
733
|
"""
|
|
734
734
|
self._check_is_pytorch_model()
|
|
@@ -782,8 +782,8 @@ class Model(nn.Module):
|
|
|
782
782
|
ModuleNotFoundError: If the HUB SDK is not installed.
|
|
783
783
|
|
|
784
784
|
Examples:
|
|
785
|
-
>>> model = YOLO(
|
|
786
|
-
>>> results = model.train(data=
|
|
785
|
+
>>> model = YOLO("yolov8n.pt")
|
|
786
|
+
>>> results = model.train(data="coco128.yaml", epochs=3)
|
|
787
787
|
"""
|
|
788
788
|
self._check_is_pytorch_model()
|
|
789
789
|
if hasattr(self.session, "model") and self.session.model.id: # Ultralytics HUB session with loaded model
|
|
@@ -847,7 +847,7 @@ class Model(nn.Module):
|
|
|
847
847
|
AssertionError: If the model is not a PyTorch model.
|
|
848
848
|
|
|
849
849
|
Examples:
|
|
850
|
-
>>> model = YOLO(
|
|
850
|
+
>>> model = YOLO("yolov8n.pt")
|
|
851
851
|
>>> results = model.tune(use_ray=True, iterations=20)
|
|
852
852
|
>>> print(results)
|
|
853
853
|
"""
|
|
@@ -907,7 +907,7 @@ class Model(nn.Module):
|
|
|
907
907
|
AttributeError: If the model or predictor does not have a 'names' attribute.
|
|
908
908
|
|
|
909
909
|
Examples:
|
|
910
|
-
>>> model = YOLO(
|
|
910
|
+
>>> model = YOLO("yolov8n.pt")
|
|
911
911
|
>>> print(model.names)
|
|
912
912
|
{0: 'person', 1: 'bicycle', 2: 'car', ...}
|
|
913
913
|
"""
|
|
@@ -957,7 +957,7 @@ class Model(nn.Module):
|
|
|
957
957
|
(object | None): The transform object of the model if available, otherwise None.
|
|
958
958
|
|
|
959
959
|
Examples:
|
|
960
|
-
>>> model = YOLO(
|
|
960
|
+
>>> model = YOLO("yolov8n.pt")
|
|
961
961
|
>>> transforms = model.transforms
|
|
962
962
|
>>> if transforms:
|
|
963
963
|
... print(f"Model transforms: {transforms}")
|
|
@@ -986,9 +986,9 @@ class Model(nn.Module):
|
|
|
986
986
|
Examples:
|
|
987
987
|
>>> def on_train_start(trainer):
|
|
988
988
|
... print("Training is starting!")
|
|
989
|
-
>>> model = YOLO(
|
|
989
|
+
>>> model = YOLO("yolov8n.pt")
|
|
990
990
|
>>> model.add_callback("on_train_start", on_train_start)
|
|
991
|
-
>>> model.train(data=
|
|
991
|
+
>>> model.train(data="coco128.yaml", epochs=1)
|
|
992
992
|
"""
|
|
993
993
|
self.callbacks[event].append(func)
|
|
994
994
|
|
|
@@ -1005,9 +1005,9 @@ class Model(nn.Module):
|
|
|
1005
1005
|
recognized by the Ultralytics callback system.
|
|
1006
1006
|
|
|
1007
1007
|
Examples:
|
|
1008
|
-
>>> model = YOLO(
|
|
1009
|
-
>>> model.add_callback(
|
|
1010
|
-
>>> model.clear_callback(
|
|
1008
|
+
>>> model = YOLO("yolov8n.pt")
|
|
1009
|
+
>>> model.add_callback("on_train_start", lambda: print("Training started"))
|
|
1010
|
+
>>> model.clear_callback("on_train_start")
|
|
1011
1011
|
>>> # All callbacks for 'on_train_start' are now removed
|
|
1012
1012
|
|
|
1013
1013
|
Notes:
|
|
@@ -1035,8 +1035,8 @@ class Model(nn.Module):
|
|
|
1035
1035
|
modifications, ensuring consistent behavior across different runs or experiments.
|
|
1036
1036
|
|
|
1037
1037
|
Examples:
|
|
1038
|
-
>>> model = YOLO(
|
|
1039
|
-
>>> model.add_callback(
|
|
1038
|
+
>>> model = YOLO("yolov8n.pt")
|
|
1039
|
+
>>> model.add_callback("on_train_start", custom_function)
|
|
1040
1040
|
>>> model.reset_callbacks()
|
|
1041
1041
|
# All callbacks are now reset to their default functions
|
|
1042
1042
|
"""
|
|
@@ -1059,7 +1059,7 @@ class Model(nn.Module):
|
|
|
1059
1059
|
(dict): A new dictionary containing only the specified include keys from the input arguments.
|
|
1060
1060
|
|
|
1061
1061
|
Examples:
|
|
1062
|
-
>>> original_args = {
|
|
1062
|
+
>>> original_args = {"imgsz": 640, "data": "coco.yaml", "task": "detect", "batch": 16, "epochs": 100}
|
|
1063
1063
|
>>> reset_args = Model._reset_ckpt_args(original_args)
|
|
1064
1064
|
>>> print(reset_args)
|
|
1065
1065
|
{'imgsz': 640, 'data': 'coco.yaml', 'task': 'detect'}
|
|
@@ -1090,9 +1090,9 @@ class Model(nn.Module):
|
|
|
1090
1090
|
NotImplementedError: If the specified key is not supported for the current task.
|
|
1091
1091
|
|
|
1092
1092
|
Examples:
|
|
1093
|
-
>>> model = Model(task=
|
|
1094
|
-
>>> predictor = model._smart_load(
|
|
1095
|
-
>>> trainer = model._smart_load(
|
|
1093
|
+
>>> model = Model(task="detect")
|
|
1094
|
+
>>> predictor = model._smart_load("predictor")
|
|
1095
|
+
>>> trainer = model._smart_load("trainer")
|
|
1096
1096
|
|
|
1097
1097
|
Notes:
|
|
1098
1098
|
- This method is typically used internally by other methods of the Model class.
|
|
@@ -1128,8 +1128,8 @@ class Model(nn.Module):
|
|
|
1128
1128
|
Examples:
|
|
1129
1129
|
>>> model = Model()
|
|
1130
1130
|
>>> task_map = model.task_map
|
|
1131
|
-
>>> detect_class_map = task_map[
|
|
1132
|
-
>>> segment_class_map = task_map[
|
|
1131
|
+
>>> detect_class_map = task_map["detect"]
|
|
1132
|
+
>>> segment_class_map = task_map["segment"]
|
|
1133
1133
|
|
|
1134
1134
|
Note:
|
|
1135
1135
|
The actual implementation of this method may vary depending on the specific tasks and
|
ultralytics/engine/predictor.py
CHANGED
|
@@ -384,7 +384,7 @@ class BasePredictor:
|
|
|
384
384
|
cv2.imwrite(save_path, im)
|
|
385
385
|
|
|
386
386
|
def show(self, p=""):
|
|
387
|
-
"""Display an image in a window using OpenCV imshow
|
|
387
|
+
"""Display an image in a window using the OpenCV imshow function."""
|
|
388
388
|
im = self.plotted_img
|
|
389
389
|
if platform.system() == "Linux" and p not in self.windows:
|
|
390
390
|
self.windows.append(p)
|