ultralytics 8.3.124__py3-none-any.whl → 8.3.126__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tests/test_cuda.py +71 -66
- tests/test_python.py +5 -8
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +4 -5
- ultralytics/cfg/default.yaml +3 -3
- ultralytics/data/utils.py +7 -7
- ultralytics/engine/exporter.py +7 -7
- ultralytics/engine/model.py +3 -3
- ultralytics/engine/trainer.py +3 -2
- ultralytics/engine/tuner.py +3 -3
- ultralytics/hub/session.py +1 -1
- ultralytics/models/yolo/model.py +3 -3
- ultralytics/nn/autobackend.py +4 -4
- ultralytics/nn/tasks.py +2 -2
- ultralytics/solutions/analytics.py +7 -6
- ultralytics/trackers/track.py +2 -2
- ultralytics/utils/__init__.py +118 -56
- ultralytics/utils/autodevice.py +175 -0
- ultralytics/utils/benchmarks.py +6 -10
- ultralytics/utils/checks.py +4 -4
- ultralytics/utils/metrics.py +6 -2
- ultralytics/utils/plotting.py +11 -5
- ultralytics/utils/torch_utils.py +18 -5
- {ultralytics-8.3.124.dist-info → ultralytics-8.3.126.dist-info}/METADATA +1 -1
- {ultralytics-8.3.124.dist-info → ultralytics-8.3.126.dist-info}/RECORD +29 -28
- {ultralytics-8.3.124.dist-info → ultralytics-8.3.126.dist-info}/WHEEL +1 -1
- {ultralytics-8.3.124.dist-info → ultralytics-8.3.126.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.3.124.dist-info → ultralytics-8.3.126.dist-info}/licenses/LICENSE +0 -0
- {ultralytics-8.3.124.dist-info → ultralytics-8.3.126.dist-info}/top_level.txt +0 -0
tests/test_cuda.py
CHANGED
@@ -10,8 +10,18 @@ from tests import CUDA_DEVICE_COUNT, CUDA_IS_AVAILABLE, MODEL, SOURCE
|
|
10
10
|
from ultralytics import YOLO
|
11
11
|
from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
|
12
12
|
from ultralytics.utils import ASSETS, WEIGHTS_DIR
|
13
|
+
from ultralytics.utils.autodevice import GPUInfo
|
13
14
|
from ultralytics.utils.checks import check_amp
|
14
15
|
|
16
|
+
# Try to find idle devices if CUDA is available
|
17
|
+
DEVICES = []
|
18
|
+
if CUDA_IS_AVAILABLE:
|
19
|
+
gpu_info = GPUInfo()
|
20
|
+
gpu_info.print_status()
|
21
|
+
idle_gpus = gpu_info.select_idle_gpu(count=2, min_memory_mb=2048)
|
22
|
+
if idle_gpus:
|
23
|
+
DEVICES = idle_gpus
|
24
|
+
|
15
25
|
|
16
26
|
def test_checks():
|
17
27
|
"""Validate CUDA settings against torch CUDA functions."""
|
@@ -19,16 +29,16 @@ def test_checks():
|
|
19
29
|
assert torch.cuda.device_count() == CUDA_DEVICE_COUNT
|
20
30
|
|
21
31
|
|
22
|
-
@pytest.mark.skipif(not
|
32
|
+
@pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
|
23
33
|
def test_amp():
|
24
34
|
"""Test AMP training checks."""
|
25
|
-
model = YOLO("yolo11n.pt").model.cuda
|
35
|
+
model = YOLO("yolo11n.pt").model.to(f"cuda:{DEVICES[0]}")
|
26
36
|
assert check_amp(model)
|
27
37
|
|
28
38
|
|
29
39
|
@pytest.mark.slow
|
30
40
|
@pytest.mark.skipif(True, reason="CUDA export tests disabled pending additional Ultralytics GPU server availability")
|
31
|
-
@pytest.mark.skipif(not
|
41
|
+
@pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
|
32
42
|
@pytest.mark.parametrize(
|
33
43
|
"task, dynamic, int8, half, batch",
|
34
44
|
[ # generate all combinations but exclude those where both int8 and half are True
|
@@ -40,16 +50,7 @@ def test_amp():
|
|
40
50
|
],
|
41
51
|
)
|
42
52
|
def test_export_engine_matrix(task, dynamic, int8, half, batch):
|
43
|
-
"""
|
44
|
-
Test YOLO model export to TensorRT format for various configurations and run inference.
|
45
|
-
|
46
|
-
Args:
|
47
|
-
task (str): Task type like 'detect', 'segment', etc.
|
48
|
-
dynamic (bool): Whether to use dynamic input size.
|
49
|
-
int8 (bool): Whether to use INT8 precision.
|
50
|
-
half (bool): Whether to use FP16 precision.
|
51
|
-
batch (int): Batch size for export.
|
52
|
-
"""
|
53
|
+
"""Test YOLO model export to TensorRT format for various configurations and run inference."""
|
53
54
|
file = YOLO(TASK2MODEL[task]).export(
|
54
55
|
format="engine",
|
55
56
|
imgsz=32,
|
@@ -60,105 +61,109 @@ def test_export_engine_matrix(task, dynamic, int8, half, batch):
|
|
60
61
|
data=TASK2DATA[task],
|
61
62
|
workspace=1, # reduce workspace GB for less resource utilization during testing
|
62
63
|
simplify=True, # use 'onnxslim'
|
64
|
+
device=DEVICES[0],
|
63
65
|
)
|
64
|
-
YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32) # exported model inference
|
66
|
+
YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32, device=DEVICES[0]) # exported model inference
|
65
67
|
Path(file).unlink() # cleanup
|
66
68
|
Path(file).with_suffix(".cache").unlink() if int8 else None # cleanup INT8 cache
|
67
69
|
|
68
70
|
|
69
|
-
@pytest.mark.skipif(not
|
71
|
+
@pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
|
70
72
|
def test_train():
|
71
73
|
"""Test model training on a minimal dataset using available CUDA devices."""
|
72
|
-
device =
|
74
|
+
device = DEVICES if len(DEVICES) > 1 else DEVICES[0]
|
73
75
|
YOLO(MODEL).train(data="coco8.yaml", imgsz=64, epochs=1, device=device) # requires imgsz>=64
|
74
76
|
|
75
77
|
|
76
78
|
@pytest.mark.slow
|
77
|
-
@pytest.mark.skipif(not
|
79
|
+
@pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
|
78
80
|
def test_predict_multiple_devices():
|
79
81
|
"""Validate model prediction consistency across CPU and CUDA devices."""
|
80
82
|
model = YOLO("yolo11n.pt")
|
83
|
+
|
84
|
+
# Test CPU
|
81
85
|
model = model.cpu()
|
82
86
|
assert str(model.device) == "cpu"
|
83
|
-
_ = model(SOURCE)
|
87
|
+
_ = model(SOURCE)
|
84
88
|
assert str(model.device) == "cpu"
|
85
89
|
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
assert str(model.device) ==
|
90
|
+
# Test CUDA
|
91
|
+
cuda_device = f"cuda:{DEVICES[0]}"
|
92
|
+
model = model.to(cuda_device)
|
93
|
+
assert str(model.device) == cuda_device
|
94
|
+
_ = model(SOURCE)
|
95
|
+
assert str(model.device) == cuda_device
|
90
96
|
|
97
|
+
# Test CPU again
|
91
98
|
model = model.cpu()
|
92
99
|
assert str(model.device) == "cpu"
|
93
|
-
_ = model(SOURCE)
|
100
|
+
_ = model(SOURCE)
|
94
101
|
assert str(model.device) == "cpu"
|
95
102
|
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
103
|
+
# Test CUDA again
|
104
|
+
model = model.to(cuda_device)
|
105
|
+
assert str(model.device) == cuda_device
|
106
|
+
_ = model(SOURCE)
|
107
|
+
assert str(model.device) == cuda_device
|
100
108
|
|
101
109
|
|
102
|
-
@pytest.mark.skipif(not
|
110
|
+
@pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
|
103
111
|
def test_autobatch():
|
104
112
|
"""Check optimal batch size for YOLO model training using autobatch utility."""
|
105
113
|
from ultralytics.utils.autobatch import check_train_batch_size
|
106
114
|
|
107
|
-
check_train_batch_size(YOLO(MODEL).model.cuda
|
115
|
+
check_train_batch_size(YOLO(MODEL).model.to(f"cuda:{DEVICES[0]}"), imgsz=128, amp=True)
|
108
116
|
|
109
117
|
|
110
118
|
@pytest.mark.slow
|
111
|
-
@pytest.mark.skipif(not
|
119
|
+
@pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
|
112
120
|
def test_utils_benchmarks():
|
113
121
|
"""Profile YOLO models for performance benchmarks."""
|
114
122
|
from ultralytics.utils.benchmarks import ProfileModels
|
115
123
|
|
116
124
|
# Pre-export a dynamic engine model to use dynamic inference
|
117
|
-
YOLO(MODEL).export(format="engine", imgsz=32, dynamic=True, batch=1)
|
118
|
-
ProfileModels(
|
125
|
+
YOLO(MODEL).export(format="engine", imgsz=32, dynamic=True, batch=1, device=DEVICES[0])
|
126
|
+
ProfileModels(
|
127
|
+
[MODEL],
|
128
|
+
imgsz=32,
|
129
|
+
half=False,
|
130
|
+
min_time=1,
|
131
|
+
num_timed_runs=3,
|
132
|
+
num_warmup_runs=1,
|
133
|
+
device=DEVICES[0],
|
134
|
+
).run()
|
119
135
|
|
120
136
|
|
121
|
-
@pytest.mark.skipif(not
|
137
|
+
@pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
|
122
138
|
def test_predict_sam():
|
123
|
-
"""Test SAM model predictions using different prompts
|
139
|
+
"""Test SAM model predictions using different prompts."""
|
124
140
|
from ultralytics import SAM
|
125
141
|
from ultralytics.models.sam import Predictor as SAMPredictor
|
126
142
|
|
127
|
-
# Load a model
|
128
143
|
model = SAM(WEIGHTS_DIR / "sam2.1_b.pt")
|
129
|
-
|
130
|
-
# Display model information (optional)
|
131
144
|
model.info()
|
132
145
|
|
133
|
-
# Run inference
|
134
|
-
model(SOURCE, device=0)
|
135
|
-
|
136
|
-
|
137
|
-
model(
|
138
|
-
|
139
|
-
|
140
|
-
model(ASSETS / "zidane.jpg", points=[900, 370], device=0)
|
141
|
-
|
142
|
-
#
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
# Create SAMPredictor
|
155
|
-
overrides = dict(conf=0.25, task="segment", mode="predict", imgsz=1024, model=WEIGHTS_DIR / "mobile_sam.pt")
|
156
|
-
predictor = SAMPredictor(overrides=overrides)
|
157
|
-
|
158
|
-
# Set image
|
159
|
-
predictor.set_image(ASSETS / "zidane.jpg") # set with image file
|
146
|
+
# Run inference with various prompts
|
147
|
+
model(SOURCE, device=DEVICES[0])
|
148
|
+
model(SOURCE, bboxes=[439, 437, 524, 709], device=DEVICES[0])
|
149
|
+
model(ASSETS / "zidane.jpg", points=[900, 370], device=DEVICES[0])
|
150
|
+
model(ASSETS / "zidane.jpg", points=[900, 370], labels=[1], device=DEVICES[0])
|
151
|
+
model(ASSETS / "zidane.jpg", points=[[900, 370]], labels=[1], device=DEVICES[0])
|
152
|
+
model(ASSETS / "zidane.jpg", points=[[400, 370], [900, 370]], labels=[1, 1], device=DEVICES[0])
|
153
|
+
model(ASSETS / "zidane.jpg", points=[[[900, 370], [1000, 100]]], labels=[[1, 1]], device=DEVICES[0])
|
154
|
+
|
155
|
+
# Test predictor
|
156
|
+
predictor = SAMPredictor(
|
157
|
+
overrides=dict(
|
158
|
+
conf=0.25,
|
159
|
+
task="segment",
|
160
|
+
mode="predict",
|
161
|
+
imgsz=1024,
|
162
|
+
model=WEIGHTS_DIR / "mobile_sam.pt",
|
163
|
+
device=DEVICES[0],
|
164
|
+
)
|
165
|
+
)
|
166
|
+
predictor.set_image(ASSETS / "zidane.jpg")
|
160
167
|
# predictor(bboxes=[439, 437, 524, 709])
|
161
168
|
# predictor(points=[900, 370], labels=[1])
|
162
|
-
|
163
|
-
# Reset image
|
164
169
|
predictor.reset_image()
|
tests/test_python.py
CHANGED
@@ -10,7 +10,6 @@ import cv2
|
|
10
10
|
import numpy as np
|
11
11
|
import pytest
|
12
12
|
import torch
|
13
|
-
import yaml
|
14
13
|
from PIL import Image
|
15
14
|
|
16
15
|
from tests import CFG, MODEL, SOURCE, SOURCES_LIST, TMP
|
@@ -28,6 +27,7 @@ from ultralytics.utils import (
|
|
28
27
|
ROOT,
|
29
28
|
WEIGHTS_DIR,
|
30
29
|
WINDOWS,
|
30
|
+
YAML,
|
31
31
|
checks,
|
32
32
|
is_dir_writeable,
|
33
33
|
is_github_action_running,
|
@@ -190,13 +190,10 @@ def test_track_stream():
|
|
190
190
|
|
191
191
|
# Test Global Motion Compensation (GMC) methods
|
192
192
|
for gmc in "orb", "sift", "ecc":
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
with open(tracker, "w", encoding="utf-8") as f:
|
198
|
-
yaml.safe_dump(data, f)
|
199
|
-
model.track(video_url, imgsz=160, tracker=tracker)
|
193
|
+
default_args = YAML.load(ROOT / "cfg/trackers/botsort.yaml")
|
194
|
+
custom_yaml = TMP / f"botsort-{gmc}.yaml"
|
195
|
+
YAML.save(custom_yaml, {**default_args, "gmc_method": gmc})
|
196
|
+
model.track(video_url, imgsz=160, tracker=custom_yaml)
|
200
197
|
|
201
198
|
|
202
199
|
def test_val():
|
ultralytics/__init__.py
CHANGED
ultralytics/cfg/__init__.py
CHANGED
@@ -23,13 +23,12 @@ from ultralytics.utils import (
|
|
23
23
|
SETTINGS,
|
24
24
|
SETTINGS_FILE,
|
25
25
|
TESTS_RUNNING,
|
26
|
+
YAML,
|
26
27
|
IterableSimpleNamespace,
|
27
28
|
checks,
|
28
29
|
colorstr,
|
29
30
|
deprecation_warn,
|
30
31
|
vscode_msg,
|
31
|
-
yaml_load,
|
32
|
-
yaml_print,
|
33
32
|
)
|
34
33
|
|
35
34
|
# Define valid solutions
|
@@ -270,7 +269,7 @@ def cfg2dict(cfg: Union[str, Path, Dict, SimpleNamespace]) -> Dict:
|
|
270
269
|
- If cfg is already a dictionary, it's returned unchanged.
|
271
270
|
"""
|
272
271
|
if isinstance(cfg, (str, Path)):
|
273
|
-
cfg =
|
272
|
+
cfg = YAML.load(cfg) # load dict
|
274
273
|
elif isinstance(cfg, SimpleNamespace):
|
275
274
|
cfg = vars(cfg) # convert to dict
|
276
275
|
return cfg
|
@@ -853,7 +852,7 @@ def entrypoint(debug: str = "") -> None:
|
|
853
852
|
"checks": checks.collect_system_info,
|
854
853
|
"version": lambda: LOGGER.info(__version__),
|
855
854
|
"settings": lambda: handle_yolo_settings(args[1:]),
|
856
|
-
"cfg": lambda:
|
855
|
+
"cfg": lambda: YAML.print(DEFAULT_CFG_PATH),
|
857
856
|
"hub": lambda: handle_yolo_hub(args[1:]),
|
858
857
|
"login": lambda: handle_yolo_hub(args),
|
859
858
|
"logout": lambda: handle_yolo_hub(args),
|
@@ -880,7 +879,7 @@ def entrypoint(debug: str = "") -> None:
|
|
880
879
|
k, v = parse_key_value_pair(a)
|
881
880
|
if k == "cfg" and v is not None: # custom.yaml passed
|
882
881
|
LOGGER.info(f"Overriding {DEFAULT_CFG_PATH} with {v}")
|
883
|
-
overrides = {k: val for k, val in
|
882
|
+
overrides = {k: val for k, val in YAML.load(checks.check_yaml(v)).items() if k != "cfg"}
|
884
883
|
else:
|
885
884
|
overrides[k] = v
|
886
885
|
except (NameError, SyntaxError, ValueError, AssertionError) as e:
|
ultralytics/cfg/default.yaml
CHANGED
@@ -17,7 +17,7 @@ imgsz: 640 # (int | list) input images size as int for train and val modes, or l
|
|
17
17
|
save: True # (bool) save train checkpoints and predict results
|
18
18
|
save_period: -1 # (int) Save checkpoint every x epochs (disabled if < 1)
|
19
19
|
cache: False # (bool) True/ram, disk or False. Use cache for data loading
|
20
|
-
device: # (int | str | list
|
20
|
+
device: # (int | str | list) device: CUDA device=0 or [0,1,2,3] or "cpu/mps" or -1 or [-1,-1] to auto-select idle GPUs
|
21
21
|
workers: 8 # (int) number of worker threads for data loading (per RANK if DDP)
|
22
22
|
project: # (str, optional) project name
|
23
23
|
name: # (str, optional) experiment name, results saved to 'project/name' directory
|
@@ -35,7 +35,7 @@ resume: False # (bool) resume training from last checkpoint
|
|
35
35
|
amp: True # (bool) Automatic Mixed Precision (AMP) training, choices=[True, False], True runs AMP check
|
36
36
|
fraction: 1.0 # (float) dataset fraction to train on (default is 1.0, all images in train set)
|
37
37
|
profile: False # (bool) profile ONNX and TensorRT speeds during training for loggers
|
38
|
-
freeze:
|
38
|
+
freeze: # (int | list, optional) freeze first n layers, or freeze list of layer indices during training
|
39
39
|
multi_scale: False # (bool) Whether to use multiscale during training
|
40
40
|
# Segmentation
|
41
41
|
overlap_mask: True # (bool) merge object masks into a single image mask during training (segment train only)
|
@@ -84,7 +84,7 @@ int8: False # (bool) CoreML/TF INT8 quantization
|
|
84
84
|
dynamic: False # (bool) ONNX/TF/TensorRT: dynamic axes
|
85
85
|
simplify: True # (bool) ONNX: simplify model using `onnxslim`
|
86
86
|
opset: # (int, optional) ONNX: opset version
|
87
|
-
workspace:
|
87
|
+
workspace: # (float, optional) TensorRT: workspace size (GiB), `None` will let TensorRT auto-allocate memory
|
88
88
|
nms: False # (bool) CoreML: add NMS
|
89
89
|
|
90
90
|
# Hyperparameters ------------------------------------------------------------------------------------------------------
|
ultralytics/data/utils.py
CHANGED
@@ -18,16 +18,16 @@ from ultralytics.nn.autobackend import check_class_names
|
|
18
18
|
from ultralytics.utils import (
|
19
19
|
DATASETS_DIR,
|
20
20
|
LOGGER,
|
21
|
+
MACOS,
|
21
22
|
NUM_THREADS,
|
22
23
|
ROOT,
|
23
24
|
SETTINGS_FILE,
|
24
25
|
TQDM,
|
26
|
+
YAML,
|
25
27
|
clean_url,
|
26
28
|
colorstr,
|
27
29
|
emojis,
|
28
30
|
is_dir_writeable,
|
29
|
-
yaml_load,
|
30
|
-
yaml_save,
|
31
31
|
)
|
32
32
|
from ultralytics.utils.checks import check_file, check_font, is_ascii
|
33
33
|
from ultralytics.utils.downloads import download, safe_download, unzip_file
|
@@ -36,7 +36,7 @@ from ultralytics.utils.ops import segments2boxes
|
|
36
36
|
HELP_URL = "See https://docs.ultralytics.com/datasets for dataset formatting guidance."
|
37
37
|
IMG_FORMATS = {"bmp", "dng", "jpeg", "jpg", "mpo", "png", "tif", "tiff", "webp", "pfm", "heic"} # image suffixes
|
38
38
|
VID_FORMATS = {"asf", "avi", "gif", "m4v", "mkv", "mov", "mp4", "mpeg", "mpg", "ts", "wmv", "webm"} # video suffixes
|
39
|
-
PIN_MEMORY = str(os.getenv("PIN_MEMORY",
|
39
|
+
PIN_MEMORY = str(os.getenv("PIN_MEMORY", not MACOS)).lower() == "true" # global pin_memory for dataloaders
|
40
40
|
FORMATS_HELP_MSG = f"Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}"
|
41
41
|
|
42
42
|
|
@@ -399,7 +399,7 @@ def check_det_dataset(dataset, autodownload=True):
|
|
399
399
|
extract_dir, autodownload = file.parent, False
|
400
400
|
|
401
401
|
# Read YAML
|
402
|
-
data =
|
402
|
+
data = YAML.load(file, append_filename=True) # dictionary
|
403
403
|
|
404
404
|
# Checks
|
405
405
|
for k in "train", "val":
|
@@ -492,7 +492,7 @@ def check_cls_dataset(dataset, split=""):
|
|
492
492
|
# Download (optional if dataset=https://file.zip is passed directly)
|
493
493
|
if str(dataset).startswith(("http:/", "https:/")):
|
494
494
|
dataset = safe_download(dataset, dir=DATASETS_DIR, unzip=True, delete=False)
|
495
|
-
elif
|
495
|
+
elif str(dataset).endswith((".zip", ".tar", ".gz")):
|
496
496
|
file = check_file(dataset)
|
497
497
|
dataset = safe_download(file, dir=DATASETS_DIR, unzip=True, delete=False)
|
498
498
|
|
@@ -599,9 +599,9 @@ class HUBDatasetStats:
|
|
599
599
|
_, data_dir, yaml_path = self._unzip(Path(path))
|
600
600
|
try:
|
601
601
|
# Load YAML with checks
|
602
|
-
data =
|
602
|
+
data = YAML.load(yaml_path)
|
603
603
|
data["path"] = "" # strip path since YAML should be in dataset root for all HUB datasets
|
604
|
-
|
604
|
+
YAML.save(yaml_path, data)
|
605
605
|
data = check_det_dataset(yaml_path, autodownload) # dict
|
606
606
|
data["path"] = data_dir # YAML path should be set to '' (relative) or parent (absolute)
|
607
607
|
except Exception as e:
|
ultralytics/engine/exporter.py
CHANGED
@@ -89,10 +89,10 @@ from ultralytics.utils import (
|
|
89
89
|
RKNN_CHIPS,
|
90
90
|
ROOT,
|
91
91
|
WINDOWS,
|
92
|
+
YAML,
|
92
93
|
callbacks,
|
93
94
|
colorstr,
|
94
95
|
get_default_args,
|
95
|
-
yaml_save,
|
96
96
|
)
|
97
97
|
from ultralytics.utils.checks import (
|
98
98
|
check_imgsz,
|
@@ -631,7 +631,7 @@ class Exporter:
|
|
631
631
|
ov_model.set_rt_info("fit_to_window_letterbox", ["model_info", "resize_type"])
|
632
632
|
|
633
633
|
ov.save_model(ov_model, file, compress_to_fp16=self.args.half)
|
634
|
-
|
634
|
+
YAML.save(Path(file).parent / "metadata.yaml", self.metadata) # add metadata.yaml
|
635
635
|
|
636
636
|
if self.args.int8:
|
637
637
|
fq = str(self.file).replace(self.file.suffix, f"_int8_openvino_model{os.sep}")
|
@@ -690,7 +690,7 @@ class Exporter:
|
|
690
690
|
f = str(self.file).replace(self.file.suffix, f"_paddle_model{os.sep}")
|
691
691
|
|
692
692
|
pytorch2paddle(module=self.model, save_dir=f, jit_type="trace", input_examples=[self.im]) # export
|
693
|
-
|
693
|
+
YAML.save(Path(f) / "metadata.yaml", self.metadata) # add metadata.yaml
|
694
694
|
return f, None
|
695
695
|
|
696
696
|
@try_export
|
@@ -783,7 +783,7 @@ class Exporter:
|
|
783
783
|
for f_debug in ("debug.bin", "debug.param", "debug2.bin", "debug2.param", *pnnx_files):
|
784
784
|
Path(f_debug).unlink(missing_ok=True)
|
785
785
|
|
786
|
-
|
786
|
+
YAML.save(f / "metadata.yaml", self.metadata) # add metadata.yaml
|
787
787
|
return str(f), None
|
788
788
|
|
789
789
|
@try_export
|
@@ -974,7 +974,7 @@ class Exporter:
|
|
974
974
|
output_signaturedefs=True, # fix error with Attention block group convolution
|
975
975
|
optimization_for_gpu_delegate=True,
|
976
976
|
)
|
977
|
-
|
977
|
+
YAML.save(f / "metadata.yaml", self.metadata) # add metadata.yaml
|
978
978
|
|
979
979
|
# Remove/rename TFLite models
|
980
980
|
if self.args.int8:
|
@@ -1087,7 +1087,7 @@ class Exporter:
|
|
1087
1087
|
LOGGER.warning(f"{prefix} your model may not work correctly with spaces in path '{f}'.")
|
1088
1088
|
|
1089
1089
|
# Add metadata
|
1090
|
-
|
1090
|
+
YAML.save(Path(f) / "metadata.yaml", self.metadata) # add metadata.yaml
|
1091
1091
|
return f, None
|
1092
1092
|
|
1093
1093
|
@try_export
|
@@ -1114,7 +1114,7 @@ class Exporter:
|
|
1114
1114
|
rknn.build(do_quantization=self.args.int8)
|
1115
1115
|
f = f.replace(".onnx", f"-{self.args.name}-int8.rknn" if self.args.int8 else f"-{self.args.name}-fp16.rknn")
|
1116
1116
|
rknn.export_rknn(f"{export_path / f}")
|
1117
|
-
|
1117
|
+
YAML.save(export_path / "metadata.yaml", self.metadata)
|
1118
1118
|
return export_path, None
|
1119
1119
|
|
1120
1120
|
@try_export
|
ultralytics/engine/model.py
CHANGED
@@ -18,9 +18,9 @@ from ultralytics.utils import (
|
|
18
18
|
LOGGER,
|
19
19
|
RANK,
|
20
20
|
SETTINGS,
|
21
|
+
YAML,
|
21
22
|
callbacks,
|
22
23
|
checks,
|
23
|
-
yaml_load,
|
24
24
|
)
|
25
25
|
|
26
26
|
|
@@ -142,7 +142,7 @@ class Model(torch.nn.Module):
|
|
142
142
|
|
143
143
|
# Load or create new YOLO model
|
144
144
|
__import__("os").environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" # to avoid deterministic warnings
|
145
|
-
if
|
145
|
+
if str(model).endswith((".yaml", ".yml")):
|
146
146
|
self._new(model, task=task, verbose=verbose)
|
147
147
|
else:
|
148
148
|
self._load(model, task=task)
|
@@ -773,7 +773,7 @@ class Model(torch.nn.Module):
|
|
773
773
|
|
774
774
|
checks.check_pip_update_available()
|
775
775
|
|
776
|
-
overrides =
|
776
|
+
overrides = YAML.load(checks.check_yaml(kwargs["cfg"])) if kwargs.get("cfg") else self.overrides
|
777
777
|
custom = {
|
778
778
|
# NOTE: handle the case when 'cfg' includes 'data'.
|
779
779
|
"data": overrides.get("data") or DEFAULT_CFG_DICT["data"] or TASK2DATA[self.task],
|
ultralytics/engine/trainer.py
CHANGED
@@ -31,11 +31,11 @@ from ultralytics.utils import (
|
|
31
31
|
LOGGER,
|
32
32
|
RANK,
|
33
33
|
TQDM,
|
34
|
+
YAML,
|
34
35
|
callbacks,
|
35
36
|
clean_url,
|
36
37
|
colorstr,
|
37
38
|
emojis,
|
38
|
-
yaml_save,
|
39
39
|
)
|
40
40
|
from ultralytics.utils.autobatch import check_train_batch_size
|
41
41
|
from ultralytics.utils.checks import check_amp, check_file, check_imgsz, check_model_file_from_stem, print_args
|
@@ -105,6 +105,7 @@ class BaseTrainer:
|
|
105
105
|
self.args = get_cfg(cfg, overrides)
|
106
106
|
self.check_resume(overrides)
|
107
107
|
self.device = select_device(self.args.device, self.args.batch)
|
108
|
+
self.args.device = str(self.device) # ensure -1 is updated to selected CUDA device
|
108
109
|
self.validator = None
|
109
110
|
self.metrics = None
|
110
111
|
self.plots = {}
|
@@ -117,7 +118,7 @@ class BaseTrainer:
|
|
117
118
|
if RANK in {-1, 0}:
|
118
119
|
self.wdir.mkdir(parents=True, exist_ok=True) # make dir
|
119
120
|
self.args.save_dir = str(self.save_dir)
|
120
|
-
|
121
|
+
YAML.save(self.save_dir / "args.yaml", vars(self.args)) # save run args
|
121
122
|
self.last, self.best = self.wdir / "last.pt", self.wdir / "best.pt" # checkpoint paths
|
122
123
|
self.save_period = self.args.save_period
|
123
124
|
|
ultralytics/engine/tuner.py
CHANGED
@@ -23,7 +23,7 @@ import numpy as np
|
|
23
23
|
import torch
|
24
24
|
|
25
25
|
from ultralytics.cfg import get_cfg, get_save_dir
|
26
|
-
from ultralytics.utils import DEFAULT_CFG, LOGGER, callbacks, colorstr, remove_colorstr
|
26
|
+
from ultralytics.utils import DEFAULT_CFG, LOGGER, YAML, callbacks, colorstr, remove_colorstr
|
27
27
|
from ultralytics.utils.plotting import plot_tune_results
|
28
28
|
|
29
29
|
|
@@ -235,9 +235,9 @@ class Tuner:
|
|
235
235
|
)
|
236
236
|
LOGGER.info("\n" + header)
|
237
237
|
data = {k: float(x[best_idx, i + 1]) for i, k in enumerate(self.space.keys())}
|
238
|
-
|
238
|
+
YAML.save(
|
239
239
|
self.tune_dir / "best_hyperparameters.yaml",
|
240
240
|
data=data,
|
241
241
|
header=remove_colorstr(header.replace(self.prefix, "# ")) + "\n",
|
242
242
|
)
|
243
|
-
|
243
|
+
YAML.print(self.tune_dir / "best_hyperparameters.yaml")
|
ultralytics/hub/session.py
CHANGED
@@ -201,7 +201,7 @@ class HUBTrainingSession:
|
|
201
201
|
HUBModelError: If the identifier format is not recognized.
|
202
202
|
"""
|
203
203
|
api_key, model_id, filename = None, None, None
|
204
|
-
if
|
204
|
+
if str(identifier).endswith((".pt", ".yaml")):
|
205
205
|
filename = identifier
|
206
206
|
elif identifier.startswith(f"{HUB_WEB_ROOT}/models/"):
|
207
207
|
parsed_url = urlparse(identifier)
|
ultralytics/models/yolo/model.py
CHANGED
@@ -15,7 +15,7 @@ from ultralytics.nn.tasks import (
|
|
15
15
|
YOLOEModel,
|
16
16
|
YOLOESegModel,
|
17
17
|
)
|
18
|
-
from ultralytics.utils import ROOT,
|
18
|
+
from ultralytics.utils import ROOT, YAML
|
19
19
|
|
20
20
|
|
21
21
|
class YOLO(Model):
|
@@ -107,7 +107,7 @@ class YOLOWorld(Model):
|
|
107
107
|
|
108
108
|
# Assign default COCO class names when there are no custom names
|
109
109
|
if not hasattr(self.model, "names"):
|
110
|
-
self.model.names =
|
110
|
+
self.model.names = YAML.load(ROOT / "cfg/datasets/coco8.yaml").get("names")
|
111
111
|
|
112
112
|
@property
|
113
113
|
def task_map(self):
|
@@ -156,7 +156,7 @@ class YOLOE(Model):
|
|
156
156
|
|
157
157
|
# Assign default COCO class names when there are no custom names
|
158
158
|
if not hasattr(self.model, "names"):
|
159
|
-
self.model.names =
|
159
|
+
self.model.names = YAML.load(ROOT / "cfg/datasets/coco8.yaml").get("names")
|
160
160
|
|
161
161
|
@property
|
162
162
|
def task_map(self):
|
ultralytics/nn/autobackend.py
CHANGED
@@ -14,7 +14,7 @@ import torch
|
|
14
14
|
import torch.nn as nn
|
15
15
|
from PIL import Image
|
16
16
|
|
17
|
-
from ultralytics.utils import ARM64, IS_JETSON, LINUX, LOGGER, PYTHON_VERSION, ROOT,
|
17
|
+
from ultralytics.utils import ARM64, IS_JETSON, LINUX, LOGGER, PYTHON_VERSION, ROOT, YAML
|
18
18
|
from ultralytics.utils.checks import check_requirements, check_suffix, check_version, check_yaml, is_rockchip
|
19
19
|
from ultralytics.utils.downloads import attempt_download_asset, is_url
|
20
20
|
|
@@ -33,7 +33,7 @@ def check_class_names(names):
|
|
33
33
|
f"{min(names.keys())}-{max(names.keys())} defined in your dataset YAML."
|
34
34
|
)
|
35
35
|
if isinstance(names[0], str) and names[0].startswith("n0"): # imagenet class codes, i.e. 'n01440764'
|
36
|
-
names_map =
|
36
|
+
names_map = YAML.load(ROOT / "cfg/datasets/ImageNet.yaml")["map"] # human-readable names
|
37
37
|
names = {k: names_map[v] for k, v in names.items()}
|
38
38
|
return names
|
39
39
|
|
@@ -42,7 +42,7 @@ def default_class_names(data=None):
|
|
42
42
|
"""Applies default class names to an input YAML file or returns numerical class names."""
|
43
43
|
if data:
|
44
44
|
try:
|
45
|
-
return
|
45
|
+
return YAML.load(check_yaml(data))["names"]
|
46
46
|
except Exception:
|
47
47
|
pass
|
48
48
|
return {i: f"class{i}" for i in range(999)} # return default if above errors
|
@@ -536,7 +536,7 @@ class AutoBackend(nn.Module):
|
|
536
536
|
|
537
537
|
# Load external metadata YAML
|
538
538
|
if isinstance(metadata, (str, Path)) and Path(metadata).exists():
|
539
|
-
metadata =
|
539
|
+
metadata = YAML.load(metadata)
|
540
540
|
if metadata and isinstance(metadata, dict):
|
541
541
|
for k, v in metadata.items():
|
542
542
|
if k in {"stride", "batch", "channels"}:
|
ultralytics/nn/tasks.py
CHANGED
@@ -69,7 +69,7 @@ from ultralytics.nn.modules import (
|
|
69
69
|
YOLOESegment,
|
70
70
|
v10Detect,
|
71
71
|
)
|
72
|
-
from ultralytics.utils import DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, colorstr, emojis
|
72
|
+
from ultralytics.utils import DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, YAML, colorstr, emojis
|
73
73
|
from ultralytics.utils.checks import check_requirements, check_suffix, check_yaml
|
74
74
|
from ultralytics.utils.loss import (
|
75
75
|
E2EDetectLoss,
|
@@ -1523,7 +1523,7 @@ def yaml_model_load(path):
|
|
1523
1523
|
|
1524
1524
|
unified_path = re.sub(r"(\d+)([nslmx])(.+)?$", r"\1\3", str(path)) # i.e. yolov8x.yaml -> yolov8.yaml
|
1525
1525
|
yaml_file = check_yaml(unified_path, hard=False) or check_yaml(path)
|
1526
|
-
d =
|
1526
|
+
d = YAML.load(yaml_file) # model dict
|
1527
1527
|
d["scale"] = guess_model_scale(path)
|
1528
1528
|
d["yaml_file"] = str(path)
|
1529
1529
|
return d
|