dgenerate-ultralytics-headless 8.3.248__py3-none-any.whl → 8.4.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dgenerate_ultralytics_headless-8.3.248.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/METADATA +52 -61
- {dgenerate_ultralytics_headless-8.3.248.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/RECORD +97 -84
- {dgenerate_ultralytics_headless-8.3.248.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/WHEEL +1 -1
- tests/__init__.py +2 -2
- tests/conftest.py +1 -1
- tests/test_cuda.py +8 -2
- tests/test_engine.py +8 -8
- tests/test_exports.py +11 -4
- tests/test_integrations.py +9 -9
- tests/test_python.py +41 -16
- tests/test_solutions.py +3 -3
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +31 -31
- ultralytics/cfg/datasets/TT100K.yaml +346 -0
- ultralytics/cfg/datasets/coco12-formats.yaml +101 -0
- ultralytics/cfg/default.yaml +3 -1
- ultralytics/cfg/models/26/yolo26-cls.yaml +33 -0
- ultralytics/cfg/models/26/yolo26-obb.yaml +52 -0
- ultralytics/cfg/models/26/yolo26-p2.yaml +60 -0
- ultralytics/cfg/models/26/yolo26-p6.yaml +62 -0
- ultralytics/cfg/models/26/yolo26-pose.yaml +53 -0
- ultralytics/cfg/models/26/yolo26-seg.yaml +52 -0
- ultralytics/cfg/models/26/yolo26.yaml +52 -0
- ultralytics/cfg/models/26/yoloe-26-seg.yaml +53 -0
- ultralytics/cfg/models/26/yoloe-26.yaml +53 -0
- ultralytics/data/annotator.py +2 -2
- ultralytics/data/augment.py +15 -0
- ultralytics/data/converter.py +76 -45
- ultralytics/data/dataset.py +1 -1
- ultralytics/data/utils.py +2 -2
- ultralytics/engine/exporter.py +34 -28
- ultralytics/engine/model.py +38 -37
- ultralytics/engine/predictor.py +17 -17
- ultralytics/engine/results.py +22 -15
- ultralytics/engine/trainer.py +83 -48
- ultralytics/engine/tuner.py +20 -11
- ultralytics/engine/validator.py +16 -16
- ultralytics/models/fastsam/predict.py +1 -1
- ultralytics/models/yolo/classify/predict.py +1 -1
- ultralytics/models/yolo/classify/train.py +1 -1
- ultralytics/models/yolo/classify/val.py +1 -1
- ultralytics/models/yolo/detect/predict.py +2 -2
- ultralytics/models/yolo/detect/train.py +6 -3
- ultralytics/models/yolo/detect/val.py +7 -1
- ultralytics/models/yolo/model.py +8 -8
- ultralytics/models/yolo/obb/predict.py +2 -2
- ultralytics/models/yolo/obb/train.py +3 -3
- ultralytics/models/yolo/obb/val.py +1 -1
- ultralytics/models/yolo/pose/predict.py +1 -1
- ultralytics/models/yolo/pose/train.py +3 -1
- ultralytics/models/yolo/pose/val.py +1 -1
- ultralytics/models/yolo/segment/predict.py +3 -3
- ultralytics/models/yolo/segment/train.py +4 -4
- ultralytics/models/yolo/segment/val.py +2 -2
- ultralytics/models/yolo/yoloe/train.py +6 -1
- ultralytics/models/yolo/yoloe/train_seg.py +6 -1
- ultralytics/nn/autobackend.py +14 -8
- ultralytics/nn/modules/__init__.py +8 -0
- ultralytics/nn/modules/block.py +128 -8
- ultralytics/nn/modules/head.py +788 -203
- ultralytics/nn/tasks.py +86 -41
- ultralytics/nn/text_model.py +5 -2
- ultralytics/optim/__init__.py +5 -0
- ultralytics/optim/muon.py +338 -0
- ultralytics/solutions/ai_gym.py +3 -3
- ultralytics/solutions/config.py +1 -1
- ultralytics/solutions/heatmap.py +1 -1
- ultralytics/solutions/instance_segmentation.py +2 -2
- ultralytics/solutions/object_counter.py +1 -1
- ultralytics/solutions/parking_management.py +1 -1
- ultralytics/solutions/solutions.py +2 -2
- ultralytics/trackers/byte_tracker.py +7 -7
- ultralytics/trackers/track.py +1 -1
- ultralytics/utils/__init__.py +8 -8
- ultralytics/utils/benchmarks.py +26 -26
- ultralytics/utils/callbacks/platform.py +173 -64
- ultralytics/utils/callbacks/tensorboard.py +2 -0
- ultralytics/utils/callbacks/wb.py +6 -1
- ultralytics/utils/checks.py +28 -9
- ultralytics/utils/dist.py +1 -0
- ultralytics/utils/downloads.py +5 -3
- ultralytics/utils/export/engine.py +19 -10
- ultralytics/utils/export/imx.py +38 -20
- ultralytics/utils/export/tensorflow.py +21 -21
- ultralytics/utils/files.py +2 -2
- ultralytics/utils/loss.py +597 -203
- ultralytics/utils/metrics.py +2 -1
- ultralytics/utils/ops.py +11 -2
- ultralytics/utils/patches.py +42 -0
- ultralytics/utils/plotting.py +3 -0
- ultralytics/utils/tal.py +100 -20
- ultralytics/utils/torch_utils.py +1 -1
- ultralytics/utils/tqdm.py +4 -1
- ultralytics/utils/tuner.py +2 -5
- {dgenerate_ultralytics_headless-8.3.248.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/entry_points.txt +0 -0
- {dgenerate_ultralytics_headless-8.3.248.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/licenses/LICENSE +0 -0
- {dgenerate_ultralytics_headless-8.3.248.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/top_level.txt +0 -0
tests/test_python.py
CHANGED
|
@@ -159,6 +159,30 @@ def test_predict_gray_and_4ch(tmp_path):
|
|
|
159
159
|
f.unlink() # cleanup
|
|
160
160
|
|
|
161
161
|
|
|
162
|
+
@pytest.mark.slow
|
|
163
|
+
@pytest.mark.skipif(not ONLINE, reason="environment is offline")
|
|
164
|
+
def test_predict_all_image_formats():
|
|
165
|
+
"""Test YOLO prediction all 12 image formats (AVIF, BMP, DNG, HEIC, JP2, JPEG, JPG, MPO, PNG, TIF, TIFF, WebP)."""
|
|
166
|
+
# Download dataset if needed
|
|
167
|
+
data = check_det_dataset("coco12-formats.yaml")
|
|
168
|
+
dataset_path = Path(data["path"])
|
|
169
|
+
|
|
170
|
+
# Collect all images from train and val
|
|
171
|
+
images = list((dataset_path / "images" / "train").glob("*.*"))
|
|
172
|
+
images += list((dataset_path / "images" / "val").glob("*.*"))
|
|
173
|
+
assert len(images) == 12, f"Expected 12 images, found {len(images)}"
|
|
174
|
+
|
|
175
|
+
# Verify all format extensions are represented
|
|
176
|
+
extensions = {img.suffix.lower().lstrip(".") for img in images}
|
|
177
|
+
expected = {"avif", "bmp", "dng", "heic", "jp2", "jpeg", "jpg", "mpo", "png", "tif", "tiff", "webp"}
|
|
178
|
+
assert extensions == expected, f"Missing formats: {expected - extensions}"
|
|
179
|
+
|
|
180
|
+
# Run inference on all images
|
|
181
|
+
model = YOLO(MODEL)
|
|
182
|
+
results = model(images, imgsz=32)
|
|
183
|
+
assert len(results) == 12, f"Expected 12 results, got {len(results)}"
|
|
184
|
+
|
|
185
|
+
|
|
162
186
|
@pytest.mark.slow
|
|
163
187
|
@pytest.mark.skipif(not ONLINE, reason="environment is offline")
|
|
164
188
|
@pytest.mark.skipif(is_github_action_running(), reason="No auth https://github.com/JuanBindez/pytubefix/issues/166")
|
|
@@ -179,7 +203,7 @@ def test_track_stream(model, tmp_path):
|
|
|
179
203
|
|
|
180
204
|
Note imgsz=160 required for tracking for higher confidence and better matches.
|
|
181
205
|
"""
|
|
182
|
-
if model == "
|
|
206
|
+
if model == "yolo26n-cls.pt": # classification model not supported for tracking
|
|
183
207
|
return
|
|
184
208
|
video_url = f"{ASSETS_URL}/decelera_portrait_min.mov"
|
|
185
209
|
model = YOLO(model)
|
|
@@ -187,7 +211,7 @@ def test_track_stream(model, tmp_path):
|
|
|
187
211
|
model.track(video_url, imgsz=160, tracker="botsort.yaml", save_frames=True) # test frame saving also
|
|
188
212
|
|
|
189
213
|
# Test Global Motion Compensation (GMC) methods and ReID
|
|
190
|
-
for gmc, reidm in zip(["orb", "sift", "ecc"], ["auto", "auto", "
|
|
214
|
+
for gmc, reidm in zip(["orb", "sift", "ecc"], ["auto", "auto", "yolo26n-cls.pt"]):
|
|
191
215
|
default_args = YAML.load(ROOT / "cfg/trackers/botsort.yaml")
|
|
192
216
|
custom_yaml = tmp_path / f"botsort-{gmc}.yaml"
|
|
193
217
|
YAML.save(custom_yaml, {**default_args, "gmc_method": gmc, "with_reid": True, "model": reidm})
|
|
@@ -209,25 +233,26 @@ def test_val(task: str, weight: str, data: str) -> None:
|
|
|
209
233
|
metrics.confusion_matrix.to_json()
|
|
210
234
|
|
|
211
235
|
|
|
236
|
+
@pytest.mark.skipif(not ONLINE, reason="environment is offline")
|
|
212
237
|
@pytest.mark.skipif(IS_JETSON or IS_RASPBERRYPI, reason="Edge devices not intended for training")
|
|
213
238
|
def test_train_scratch():
|
|
214
|
-
"""Test training the YOLO model from scratch
|
|
239
|
+
"""Test training the YOLO model from scratch on 12 different image types in the COCO12-Formats dataset."""
|
|
215
240
|
model = YOLO(CFG)
|
|
216
|
-
model.train(data="
|
|
241
|
+
model.train(data="coco12-formats.yaml", epochs=2, imgsz=32, cache="disk", batch=-1, close_mosaic=1, name="model")
|
|
217
242
|
model(SOURCE)
|
|
218
243
|
|
|
219
244
|
|
|
220
245
|
@pytest.mark.skipif(not ONLINE, reason="environment is offline")
|
|
221
246
|
def test_train_ndjson():
|
|
222
247
|
"""Test training the YOLO model using NDJSON format dataset."""
|
|
223
|
-
model = YOLO(WEIGHTS_DIR / "
|
|
248
|
+
model = YOLO(WEIGHTS_DIR / "yolo26n.pt")
|
|
224
249
|
model.train(data=f"{ASSETS_URL}/coco8-ndjson.ndjson", epochs=1, imgsz=32)
|
|
225
250
|
|
|
226
251
|
|
|
227
252
|
@pytest.mark.parametrize("scls", [False, True])
|
|
228
253
|
def test_train_pretrained(scls):
|
|
229
254
|
"""Test training of the YOLO model starting from a pre-trained checkpoint."""
|
|
230
|
-
model = YOLO(WEIGHTS_DIR / "
|
|
255
|
+
model = YOLO(WEIGHTS_DIR / "yolo26n-seg.pt")
|
|
231
256
|
model.train(
|
|
232
257
|
data="coco8-seg.yaml", epochs=1, imgsz=32, cache="ram", copy_paste=0.5, mixup=0.5, name=0, single_cls=scls
|
|
233
258
|
)
|
|
@@ -280,7 +305,7 @@ def test_predict_callback_and_setup():
|
|
|
280
305
|
@pytest.mark.parametrize("model", MODELS)
|
|
281
306
|
def test_results(model: str, tmp_path):
|
|
282
307
|
"""Test YOLO model results processing and output in various formats."""
|
|
283
|
-
im = f"{ASSETS_URL}/boats.jpg" if model == "
|
|
308
|
+
im = f"{ASSETS_URL}/boats.jpg" if model == "yolo26n-obb.pt" else SOURCE
|
|
284
309
|
results = YOLO(WEIGHTS_DIR / model)([im, im], imgsz=160)
|
|
285
310
|
for r in results:
|
|
286
311
|
assert len(r), f"'{model}' results should not be empty!"
|
|
@@ -300,13 +325,13 @@ def test_results(model: str, tmp_path):
|
|
|
300
325
|
def test_labels_and_crops():
|
|
301
326
|
"""Test output from prediction args for saving YOLO detection labels and crops."""
|
|
302
327
|
imgs = [SOURCE, ASSETS / "zidane.jpg"]
|
|
303
|
-
results = YOLO(WEIGHTS_DIR / "
|
|
328
|
+
results = YOLO(WEIGHTS_DIR / "yolo26n.pt")(imgs, imgsz=320, save_txt=True, save_crop=True)
|
|
304
329
|
save_path = Path(results[0].save_dir)
|
|
305
330
|
for r in results:
|
|
306
331
|
im_name = Path(r.path).stem
|
|
307
332
|
cls_idxs = r.boxes.cls.int().tolist()
|
|
308
|
-
# Check
|
|
309
|
-
assert cls_idxs
|
|
333
|
+
# Check that detections are made (at least 2 detections per image expected)
|
|
334
|
+
assert len(cls_idxs) >= 2, f"Expected at least 2 detections, got {len(cls_idxs)}"
|
|
310
335
|
# Check label path
|
|
311
336
|
labels = save_path / f"labels/{im_name}.txt"
|
|
312
337
|
assert labels.exists()
|
|
@@ -360,7 +385,7 @@ def test_data_annotator(tmp_path):
|
|
|
360
385
|
|
|
361
386
|
auto_annotate(
|
|
362
387
|
ASSETS,
|
|
363
|
-
det_model=WEIGHTS_DIR / "
|
|
388
|
+
det_model=WEIGHTS_DIR / "yolo26n.pt",
|
|
364
389
|
sam_model=WEIGHTS_DIR / "mobile_sam.pt",
|
|
365
390
|
output_dir=tmp_path / "auto_annotate_labels",
|
|
366
391
|
)
|
|
@@ -450,7 +475,7 @@ def test_utils_benchmarks():
|
|
|
450
475
|
"""Benchmark model performance using 'ProfileModels' from 'ultralytics.utils.benchmarks'."""
|
|
451
476
|
from ultralytics.utils.benchmarks import ProfileModels
|
|
452
477
|
|
|
453
|
-
ProfileModels(["
|
|
478
|
+
ProfileModels(["yolo26n.yaml"], imgsz=32, min_time=1, num_timed_runs=3, num_warmup_runs=1).run()
|
|
454
479
|
|
|
455
480
|
|
|
456
481
|
def test_utils_torchutils():
|
|
@@ -616,14 +641,14 @@ def test_classify_transforms_train(image, auto_augment, erasing, force_color_jit
|
|
|
616
641
|
@pytest.mark.skipif(not ONLINE, reason="environment is offline")
|
|
617
642
|
def test_model_tune():
|
|
618
643
|
"""Tune YOLO model for performance improvement."""
|
|
619
|
-
YOLO("
|
|
620
|
-
YOLO("
|
|
644
|
+
YOLO("yolo26n-pose.pt").tune(data="coco8-pose.yaml", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
|
|
645
|
+
YOLO("yolo26n-cls.pt").tune(data="imagenet10", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
|
|
621
646
|
|
|
622
647
|
|
|
623
648
|
def test_model_embeddings():
|
|
624
649
|
"""Test YOLO model embeddings extraction functionality."""
|
|
625
650
|
model_detect = YOLO(MODEL)
|
|
626
|
-
model_segment = YOLO(WEIGHTS_DIR / "
|
|
651
|
+
model_segment = YOLO(WEIGHTS_DIR / "yolo26n-seg.pt")
|
|
627
652
|
|
|
628
653
|
for batch in [SOURCE], [SOURCE, SOURCE]: # test batch size 1 and 2
|
|
629
654
|
assert len(model_detect.embed(source=batch, imgsz=32)) == len(batch)
|
|
@@ -744,7 +769,7 @@ def test_yolov10():
|
|
|
744
769
|
|
|
745
770
|
def test_multichannel():
|
|
746
771
|
"""Test YOLO model multi-channel training, validation, and prediction functionality."""
|
|
747
|
-
model = YOLO("
|
|
772
|
+
model = YOLO("yolo26n.pt")
|
|
748
773
|
model.train(data="coco8-multispectral.yaml", epochs=1, imgsz=32, close_mosaic=1, cache="disk")
|
|
749
774
|
model.val(data="coco8-multispectral.yaml")
|
|
750
775
|
im = np.zeros((32, 32, 10), dtype=np.uint8)
|
tests/test_solutions.py
CHANGED
|
@@ -78,7 +78,7 @@ def process_video(solution, video_path: str, needs_frame_count: bool = False):
|
|
|
78
78
|
solutions.ObjectCounter,
|
|
79
79
|
False,
|
|
80
80
|
DEMO_VIDEO,
|
|
81
|
-
{"region": REGION, "model": "
|
|
81
|
+
{"region": REGION, "model": "yolo26n-obb.pt", "show": SHOW},
|
|
82
82
|
),
|
|
83
83
|
(
|
|
84
84
|
"Heatmap",
|
|
@@ -156,7 +156,7 @@ def process_video(solution, video_path: str, needs_frame_count: bool = False):
|
|
|
156
156
|
solutions.InstanceSegmentation,
|
|
157
157
|
False,
|
|
158
158
|
DEMO_VIDEO,
|
|
159
|
-
{"model": "
|
|
159
|
+
{"model": "yolo26n-seg.pt", "show": SHOW},
|
|
160
160
|
),
|
|
161
161
|
("VisionEye", solutions.VisionEye, False, DEMO_VIDEO, {"model": MODEL, "show": SHOW}),
|
|
162
162
|
(
|
|
@@ -273,7 +273,7 @@ def test_config_update_method_with_invalid_argument():
|
|
|
273
273
|
def test_plot_with_no_masks():
|
|
274
274
|
"""Test that instance segmentation handles cases with no masks."""
|
|
275
275
|
im0 = np.zeros((640, 480, 3), dtype=np.uint8)
|
|
276
|
-
isegment = solutions.InstanceSegmentation(model="
|
|
276
|
+
isegment = solutions.InstanceSegmentation(model="yolo26n-seg.pt")
|
|
277
277
|
results = isegment(im0)
|
|
278
278
|
assert results.plot_im is not None
|
|
279
279
|
|
ultralytics/__init__.py
CHANGED
ultralytics/cfg/__init__.py
CHANGED
|
@@ -62,11 +62,11 @@ TASK2DATA = {
|
|
|
62
62
|
"obb": "dota8.yaml",
|
|
63
63
|
}
|
|
64
64
|
TASK2MODEL = {
|
|
65
|
-
"detect": "
|
|
66
|
-
"segment": "
|
|
67
|
-
"classify": "
|
|
68
|
-
"pose": "
|
|
69
|
-
"obb": "
|
|
65
|
+
"detect": "yolo26n.pt",
|
|
66
|
+
"segment": "yolo26n-seg.pt",
|
|
67
|
+
"classify": "yolo26n-cls.pt",
|
|
68
|
+
"pose": "yolo26n-pose.pt",
|
|
69
|
+
"obb": "yolo26n-obb.pt",
|
|
70
70
|
}
|
|
71
71
|
TASK2METRIC = {
|
|
72
72
|
"detect": "metrics/mAP50-95(B)",
|
|
@@ -90,13 +90,13 @@ SOLUTIONS_HELP_MSG = f"""
|
|
|
90
90
|
yolo solutions count source="path/to/video.mp4" region="[(20, 400), (1080, 400), (1080, 360), (20, 360)]"
|
|
91
91
|
|
|
92
92
|
2. Call heatmap solution
|
|
93
|
-
yolo solutions heatmap colormap=cv2.COLORMAP_PARULA model=
|
|
93
|
+
yolo solutions heatmap colormap=cv2.COLORMAP_PARULA model=yolo26n.pt
|
|
94
94
|
|
|
95
95
|
3. Call queue management solution
|
|
96
|
-
yolo solutions queue region="[(20, 400), (1080, 400), (1080, 360), (20, 360)]" model=
|
|
96
|
+
yolo solutions queue region="[(20, 400), (1080, 400), (1080, 360), (20, 360)]" model=yolo26n.pt
|
|
97
97
|
|
|
98
98
|
4. Call workout monitoring solution for push-ups
|
|
99
|
-
yolo solutions workout model=
|
|
99
|
+
yolo solutions workout model=yolo26n-pose.pt kpts=[6, 8, 10]
|
|
100
100
|
|
|
101
101
|
5. Generate analytical graphs
|
|
102
102
|
yolo solutions analytics analytics_type="pie"
|
|
@@ -118,16 +118,16 @@ CLI_HELP_MSG = f"""
|
|
|
118
118
|
See all ARGS at https://docs.ultralytics.com/usage/cfg or with 'yolo cfg'
|
|
119
119
|
|
|
120
120
|
1. Train a detection model for 10 epochs with an initial learning_rate of 0.01
|
|
121
|
-
yolo train data=coco8.yaml model=
|
|
121
|
+
yolo train data=coco8.yaml model=yolo26n.pt epochs=10 lr0=0.01
|
|
122
122
|
|
|
123
123
|
2. Predict a YouTube video using a pretrained segmentation model at image size 320:
|
|
124
|
-
yolo predict model=
|
|
124
|
+
yolo predict model=yolo26n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320
|
|
125
125
|
|
|
126
126
|
3. Validate a pretrained detection model at batch-size 1 and image size 640:
|
|
127
|
-
yolo val model=
|
|
127
|
+
yolo val model=yolo26n.pt data=coco8.yaml batch=1 imgsz=640
|
|
128
128
|
|
|
129
|
-
4. Export a
|
|
130
|
-
yolo export model=
|
|
129
|
+
4. Export a YOLO26n classification model to ONNX format at image size 224 by 128 (no TASK required)
|
|
130
|
+
yolo export model=yolo26n-cls.pt format=onnx imgsz=224,128
|
|
131
131
|
|
|
132
132
|
5. Ultralytics solutions usage
|
|
133
133
|
yolo solutions count or any of {list(SOLUTION_MAP.keys())[1:-1]} source="path/to/video.mp4"
|
|
@@ -186,6 +186,7 @@ CFG_FRACTION_KEYS = frozenset(
|
|
|
186
186
|
"conf",
|
|
187
187
|
"iou",
|
|
188
188
|
"fraction",
|
|
189
|
+
"multi_scale",
|
|
189
190
|
}
|
|
190
191
|
)
|
|
191
192
|
CFG_INT_KEYS = frozenset(
|
|
@@ -237,7 +238,6 @@ CFG_BOOL_KEYS = frozenset(
|
|
|
237
238
|
"simplify",
|
|
238
239
|
"nms",
|
|
239
240
|
"profile",
|
|
240
|
-
"multi_scale",
|
|
241
241
|
}
|
|
242
242
|
)
|
|
243
243
|
|
|
@@ -305,8 +305,6 @@ def get_cfg(
|
|
|
305
305
|
# Merge overrides
|
|
306
306
|
if overrides:
|
|
307
307
|
overrides = cfg2dict(overrides)
|
|
308
|
-
if "save_dir" not in cfg:
|
|
309
|
-
overrides.pop("save_dir", None) # special override keys to ignore
|
|
310
308
|
check_dict_alignment(cfg, overrides)
|
|
311
309
|
cfg = {**cfg, **overrides} # merge cfg and overrides dicts (prefer overrides)
|
|
312
310
|
|
|
@@ -403,14 +401,16 @@ def get_save_dir(args: SimpleNamespace, name: str | None = None) -> Path:
|
|
|
403
401
|
>>> args = SimpleNamespace(project="my_project", task="detect", mode="train", exist_ok=True)
|
|
404
402
|
>>> save_dir = get_save_dir(args)
|
|
405
403
|
>>> print(save_dir)
|
|
406
|
-
|
|
404
|
+
runs/detect/my_project/train
|
|
407
405
|
"""
|
|
408
406
|
if getattr(args, "save_dir", None):
|
|
409
407
|
save_dir = args.save_dir
|
|
410
408
|
else:
|
|
411
409
|
from ultralytics.utils.files import increment_path
|
|
412
410
|
|
|
413
|
-
project = args.project or
|
|
411
|
+
project = args.project or ""
|
|
412
|
+
if not Path(project).is_absolute():
|
|
413
|
+
project = (ROOT.parent / "tests/tmp/runs" if TESTS_RUNNING else RUNS_DIR) / args.task / project
|
|
414
414
|
name = name or args.name or f"{args.mode}"
|
|
415
415
|
save_dir = increment_path(Path(project) / name, exist_ok=args.exist_ok if RANK in {-1, 0} else True)
|
|
416
416
|
|
|
@@ -492,7 +492,7 @@ def check_dict_alignment(
|
|
|
492
492
|
base_keys, custom_keys = (frozenset(x.keys()) for x in (base, custom))
|
|
493
493
|
# Allow 'augmentations' as a valid custom parameter for custom Albumentations transforms
|
|
494
494
|
if allowed_custom_keys is None:
|
|
495
|
-
allowed_custom_keys = {"augmentations"}
|
|
495
|
+
allowed_custom_keys = {"augmentations", "save_dir"}
|
|
496
496
|
if mismatched := [k for k in custom_keys if k not in base_keys and k not in allowed_custom_keys]:
|
|
497
497
|
from difflib import get_close_matches
|
|
498
498
|
|
|
@@ -604,7 +604,7 @@ def handle_yolo_settings(args: list[str]) -> None:
|
|
|
604
604
|
|
|
605
605
|
Examples:
|
|
606
606
|
>>> handle_yolo_settings(["reset"]) # Reset YOLO settings
|
|
607
|
-
>>> handle_yolo_settings(["default_cfg_path=
|
|
607
|
+
>>> handle_yolo_settings(["default_cfg_path=yolo26n.yaml"]) # Update a specific setting
|
|
608
608
|
|
|
609
609
|
Notes:
|
|
610
610
|
- If no arguments are provided, the function will display the current settings.
|
|
@@ -649,7 +649,7 @@ def handle_yolo_solutions(args: list[str]) -> None:
|
|
|
649
649
|
>>> handle_yolo_solutions(["analytics", "conf=0.25", "source=path/to/video.mp4"])
|
|
650
650
|
|
|
651
651
|
Run inference with custom configuration, requires Streamlit version 1.29.0 or higher.
|
|
652
|
-
>>> handle_yolo_solutions(["inference", "model=
|
|
652
|
+
>>> handle_yolo_solutions(["inference", "model=yolo26n.pt"])
|
|
653
653
|
|
|
654
654
|
Notes:
|
|
655
655
|
- Arguments can be provided in the format 'key=value' or as boolean flags
|
|
@@ -707,7 +707,7 @@ def handle_yolo_solutions(args: list[str]) -> None:
|
|
|
707
707
|
str(ROOT / "solutions/streamlit_inference.py"),
|
|
708
708
|
"--server.headless",
|
|
709
709
|
"true",
|
|
710
|
-
overrides.pop("model", "
|
|
710
|
+
overrides.pop("model", "yolo26n.pt"),
|
|
711
711
|
]
|
|
712
712
|
)
|
|
713
713
|
else:
|
|
@@ -725,8 +725,8 @@ def handle_yolo_solutions(args: list[str]) -> None:
|
|
|
725
725
|
)
|
|
726
726
|
if solution_name == "analytics": # analytical graphs follow fixed shape for output i.e w=1920, h=1080
|
|
727
727
|
w, h = 1280, 720
|
|
728
|
-
save_dir = get_save_dir(SimpleNamespace(
|
|
729
|
-
save_dir.mkdir(parents=True) # create the output directory i.e. runs/solutions/exp
|
|
728
|
+
save_dir = get_save_dir(SimpleNamespace(task="solutions", name="exp", exist_ok=False, project=None))
|
|
729
|
+
save_dir.mkdir(parents=True, exist_ok=True) # create the output directory i.e. runs/solutions/exp
|
|
730
730
|
vw = cv2.VideoWriter(str(save_dir / f"{solution_name}.avi"), cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
|
731
731
|
|
|
732
732
|
try: # Process video frames
|
|
@@ -758,9 +758,9 @@ def parse_key_value_pair(pair: str = "key=value") -> tuple:
|
|
|
758
758
|
AssertionError: If the value is missing or empty.
|
|
759
759
|
|
|
760
760
|
Examples:
|
|
761
|
-
>>> key, value = parse_key_value_pair("model=
|
|
761
|
+
>>> key, value = parse_key_value_pair("model=yolo26n.pt")
|
|
762
762
|
>>> print(f"Key: {key}, Value: {value}")
|
|
763
|
-
Key: model, Value:
|
|
763
|
+
Key: model, Value: yolo26n.pt
|
|
764
764
|
|
|
765
765
|
>>> key, value = parse_key_value_pair("epochs=100")
|
|
766
766
|
>>> print(f"Key: {key}, Value: {value}")
|
|
@@ -832,13 +832,13 @@ def entrypoint(debug: str = "") -> None:
|
|
|
832
832
|
|
|
833
833
|
Examples:
|
|
834
834
|
Train a detection model for 10 epochs with an initial learning_rate of 0.01:
|
|
835
|
-
>>> entrypoint("train data=coco8.yaml model=
|
|
835
|
+
>>> entrypoint("train data=coco8.yaml model=yolo26n.pt epochs=10 lr0=0.01")
|
|
836
836
|
|
|
837
837
|
Predict a YouTube video using a pretrained segmentation model at image size 320:
|
|
838
|
-
>>> entrypoint("predict model=
|
|
838
|
+
>>> entrypoint("predict model=yolo26n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320")
|
|
839
839
|
|
|
840
840
|
Validate a pretrained detection model at batch-size 1 and image size 640:
|
|
841
|
-
>>> entrypoint("val model=
|
|
841
|
+
>>> entrypoint("val model=yolo26n.pt data=coco8.yaml batch=1 imgsz=640")
|
|
842
842
|
|
|
843
843
|
Notes:
|
|
844
844
|
- If no arguments are passed, the function will display the usage help message.
|
|
@@ -933,7 +933,7 @@ def entrypoint(debug: str = "") -> None:
|
|
|
933
933
|
# Model
|
|
934
934
|
model = overrides.pop("model", DEFAULT_CFG.model)
|
|
935
935
|
if model is None:
|
|
936
|
-
model = "
|
|
936
|
+
model = "yolo26n.pt"
|
|
937
937
|
LOGGER.warning(f"'model' argument is missing. Using default 'model={model}'.")
|
|
938
938
|
overrides["model"] = model
|
|
939
939
|
stem = Path(model).stem.lower()
|
|
@@ -1022,5 +1022,5 @@ def copy_default_cfg() -> None:
|
|
|
1022
1022
|
|
|
1023
1023
|
|
|
1024
1024
|
if __name__ == "__main__":
|
|
1025
|
-
# Example: entrypoint(debug='yolo predict model=
|
|
1025
|
+
# Example: entrypoint(debug='yolo predict model=yolo26n.pt')
|
|
1026
1026
|
entrypoint(debug="")
|