ultralytics 8.3.105__py3-none-any.whl → 8.3.107__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
tests/test_python.py DELETED
@@ -1,674 +0,0 @@
1
- # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
-
3
- import contextlib
4
- import csv
5
- import urllib
6
- from copy import copy
7
- from pathlib import Path
8
-
9
- import cv2
10
- import numpy as np
11
- import pytest
12
- import torch
13
- import yaml
14
- from PIL import Image
15
-
16
- from tests import CFG, MODEL, SOURCE, SOURCES_LIST, TMP
17
- from ultralytics import RTDETR, YOLO
18
- from ultralytics.cfg import MODELS, TASK2DATA, TASKS
19
- from ultralytics.data.build import load_inference_source
20
- from ultralytics.utils import (
21
- ASSETS,
22
- DEFAULT_CFG,
23
- DEFAULT_CFG_PATH,
24
- LOGGER,
25
- ONLINE,
26
- ROOT,
27
- WEIGHTS_DIR,
28
- WINDOWS,
29
- checks,
30
- is_dir_writeable,
31
- is_github_action_running,
32
- )
33
- from ultralytics.utils.downloads import download
34
- from ultralytics.utils.torch_utils import TORCH_1_9
35
-
36
- IS_TMP_WRITEABLE = is_dir_writeable(TMP) # WARNING: must be run once tests start as TMP does not exist on tests/init
37
-
38
-
39
- def test_model_forward():
40
- """Test the forward pass of the YOLO model."""
41
- model = YOLO(CFG)
42
- model(source=None, imgsz=32, augment=True) # also test no source and augment
43
-
44
-
45
- def test_model_methods():
46
- """Test various methods and properties of the YOLO model to ensure correct functionality."""
47
- model = YOLO(MODEL)
48
-
49
- # Model methods
50
- model.info(verbose=True, detailed=True)
51
- model = model.reset_weights()
52
- model = model.load(MODEL)
53
- model.to("cpu")
54
- model.fuse()
55
- model.clear_callback("on_train_start")
56
- model.reset_callbacks()
57
-
58
- # Model properties
59
- _ = model.names
60
- _ = model.device
61
- _ = model.transforms
62
- _ = model.task_map
63
-
64
-
65
- def test_model_profile():
66
- """Test profiling of the YOLO model with `profile=True` to assess performance and resource usage."""
67
- from ultralytics.nn.tasks import DetectionModel
68
-
69
- model = DetectionModel() # build model
70
- im = torch.randn(1, 3, 64, 64) # requires min imgsz=64
71
- _ = model.predict(im, profile=True)
72
-
73
-
74
- @pytest.mark.skipif(not IS_TMP_WRITEABLE, reason="directory is not writeable")
75
- def test_predict_txt():
76
- """Test YOLO predictions with file, directory, and pattern sources listed in a text file."""
77
- file = TMP / "sources_multi_row.txt"
78
- with open(file, "w") as f:
79
- for src in SOURCES_LIST:
80
- f.write(f"{src}\n")
81
- results = YOLO(MODEL)(source=file, imgsz=32)
82
- assert len(results) == 7 # 1 + 2 + 2 + 2 = 7 images
83
-
84
-
85
- @pytest.mark.skipif(True, reason="disabled for testing")
86
- @pytest.mark.skipif(not IS_TMP_WRITEABLE, reason="directory is not writeable")
87
- def test_predict_csv_multi_row():
88
- """Test YOLO predictions with sources listed in multiple rows of a CSV file."""
89
- file = TMP / "sources_multi_row.csv"
90
- with open(file, "w", newline="") as f:
91
- writer = csv.writer(f)
92
- writer.writerow(["source"])
93
- writer.writerows([[src] for src in SOURCES_LIST])
94
- results = YOLO(MODEL)(source=file, imgsz=32)
95
- assert len(results) == 7 # 1 + 2 + 2 + 2 = 7 images
96
-
97
-
98
- @pytest.mark.skipif(True, reason="disabled for testing")
99
- @pytest.mark.skipif(not IS_TMP_WRITEABLE, reason="directory is not writeable")
100
- def test_predict_csv_single_row():
101
- """Test YOLO predictions with sources listed in a single row of a CSV file."""
102
- file = TMP / "sources_single_row.csv"
103
- with open(file, "w", newline="") as f:
104
- writer = csv.writer(f)
105
- writer.writerow(SOURCES_LIST)
106
- results = YOLO(MODEL)(source=file, imgsz=32)
107
- assert len(results) == 7 # 1 + 2 + 2 + 2 = 7 images
108
-
109
-
110
- @pytest.mark.parametrize("model_name", MODELS)
111
- def test_predict_img(model_name):
112
- """Test YOLO model predictions on various image input types and sources, including online images."""
113
- model = YOLO(WEIGHTS_DIR / model_name)
114
- im = cv2.imread(str(SOURCE)) # uint8 numpy array
115
- assert len(model(source=Image.open(SOURCE), save=True, verbose=True, imgsz=32)) == 1 # PIL
116
- assert len(model(source=im, save=True, save_txt=True, imgsz=32)) == 1 # ndarray
117
- assert len(model(torch.rand((2, 3, 32, 32)), imgsz=32)) == 2 # batch-size 2 Tensor, FP32 0.0-1.0 RGB order
118
- assert len(model(source=[im, im], save=True, save_txt=True, imgsz=32)) == 2 # batch
119
- assert len(list(model(source=[im, im], save=True, stream=True, imgsz=32))) == 2 # stream
120
- assert len(model(torch.zeros(320, 640, 3).numpy().astype(np.uint8), imgsz=32)) == 1 # tensor to numpy
121
- batch = [
122
- str(SOURCE), # filename
123
- Path(SOURCE), # Path
124
- "https://github.com/ultralytics/assets/releases/download/v0.0.0/zidane.jpg" if ONLINE else SOURCE, # URI
125
- cv2.imread(str(SOURCE)), # OpenCV
126
- Image.open(SOURCE), # PIL
127
- np.zeros((320, 640, 3), dtype=np.uint8), # numpy
128
- ]
129
- assert len(model(batch, imgsz=32)) == len(batch) # multiple sources in a batch
130
-
131
-
132
- @pytest.mark.parametrize("model", MODELS)
133
- def test_predict_visualize(model):
134
- """Test model prediction methods with 'visualize=True' to generate and display prediction visualizations."""
135
- YOLO(WEIGHTS_DIR / model)(SOURCE, imgsz=32, visualize=True)
136
-
137
-
138
- def test_predict_grey_and_4ch():
139
- """Test YOLO prediction on SOURCE converted to greyscale and 4-channel images with various filenames."""
140
- im = Image.open(SOURCE)
141
- directory = TMP / "im4"
142
- directory.mkdir(parents=True, exist_ok=True)
143
-
144
- source_greyscale = directory / "greyscale.jpg"
145
- source_rgba = directory / "4ch.png"
146
- source_non_utf = directory / "non_UTF_测试文件_tést_image.jpg"
147
- source_spaces = directory / "image with spaces.jpg"
148
-
149
- im.convert("L").save(source_greyscale) # greyscale
150
- im.convert("RGBA").save(source_rgba) # 4-ch PNG with alpha
151
- im.save(source_non_utf) # non-UTF characters in filename
152
- im.save(source_spaces) # spaces in filename
153
-
154
- # Inference
155
- model = YOLO(MODEL)
156
- for f in source_rgba, source_greyscale, source_non_utf, source_spaces:
157
- for source in Image.open(f), cv2.imread(str(f)), f:
158
- results = model(source, save=True, verbose=True, imgsz=32)
159
- assert len(results) == 1 # verify that an image was run
160
- f.unlink() # cleanup
161
-
162
-
163
- @pytest.mark.slow
164
- @pytest.mark.skipif(not ONLINE, reason="environment is offline")
165
- @pytest.mark.skipif(is_github_action_running(), reason="No auth https://github.com/JuanBindez/pytubefix/issues/166")
166
- def test_youtube():
167
- """Test YOLO model on a YouTube video stream, handling potential network-related errors."""
168
- model = YOLO(MODEL)
169
- try:
170
- model.predict("https://youtu.be/G17sBkb38XQ", imgsz=96, save=True)
171
- # Handle internet connection errors and 'urllib.error.HTTPError: HTTP Error 429: Too Many Requests'
172
- except (urllib.error.HTTPError, ConnectionError) as e:
173
- LOGGER.warning(f"WARNING: YouTube Test Error: {e}")
174
-
175
-
176
- @pytest.mark.skipif(not ONLINE, reason="environment is offline")
177
- @pytest.mark.skipif(not IS_TMP_WRITEABLE, reason="directory is not writeable")
178
- def test_track_stream():
179
- """
180
- Test streaming tracking on a short 10 frame video using ByteTrack tracker and different GMC methods.
181
-
182
- Note imgsz=160 required for tracking for higher confidence and better matches.
183
- """
184
- video_url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/decelera_portrait_min.mov"
185
- model = YOLO(MODEL)
186
- model.track(video_url, imgsz=160, tracker="bytetrack.yaml")
187
- model.track(video_url, imgsz=160, tracker="botsort.yaml", save_frames=True) # test frame saving also
188
-
189
- # Test Global Motion Compensation (GMC) methods
190
- for gmc in "orb", "sift", "ecc":
191
- with open(ROOT / "cfg/trackers/botsort.yaml", encoding="utf-8") as f:
192
- data = yaml.safe_load(f)
193
- tracker = TMP / f"botsort-{gmc}.yaml"
194
- data["gmc_method"] = gmc
195
- with open(tracker, "w", encoding="utf-8") as f:
196
- yaml.safe_dump(data, f)
197
- model.track(video_url, imgsz=160, tracker=tracker)
198
-
199
-
200
- def test_val():
201
- """Test the validation mode of the YOLO model."""
202
- YOLO(MODEL).val(data="coco8.yaml", imgsz=32)
203
-
204
-
205
- def test_train_scratch():
206
- """Test training the YOLO model from scratch using the provided configuration."""
207
- model = YOLO(CFG)
208
- model.train(data="coco8.yaml", epochs=2, imgsz=32, cache="disk", batch=-1, close_mosaic=1, name="model")
209
- model(SOURCE)
210
-
211
-
212
- @pytest.mark.parametrize("scls", [False, True])
213
- def test_train_pretrained(scls):
214
- """Test training of the YOLO model starting from a pre-trained checkpoint."""
215
- model = YOLO(WEIGHTS_DIR / "yolo11n-seg.pt")
216
- model.train(
217
- data="coco8-seg.yaml", epochs=1, imgsz=32, cache="ram", copy_paste=0.5, mixup=0.5, name=0, single_cls=scls
218
- )
219
- model(SOURCE)
220
-
221
-
222
- def test_all_model_yamls():
223
- """Test YOLO model creation for all available YAML configurations in the `cfg/models` directory."""
224
- for m in (ROOT / "cfg" / "models").rglob("*.yaml"):
225
- if "rtdetr" in m.name:
226
- if TORCH_1_9: # torch<=1.8 issue - TypeError: __init__() got an unexpected keyword argument 'batch_first'
227
- _ = RTDETR(m.name)(SOURCE, imgsz=640) # must be 640
228
- else:
229
- YOLO(m.name)
230
-
231
-
232
- @pytest.mark.skipif(WINDOWS, reason="Windows slow CI export bug https://github.com/ultralytics/ultralytics/pull/16003")
233
- def test_workflow():
234
- """Test the complete workflow including training, validation, prediction, and exporting."""
235
- model = YOLO(MODEL)
236
- model.train(data="coco8.yaml", epochs=1, imgsz=32, optimizer="SGD")
237
- model.val(imgsz=32)
238
- model.predict(SOURCE, imgsz=32)
239
- model.export(format="torchscript") # WARNING: Windows slow CI export bug
240
-
241
-
242
- def test_predict_callback_and_setup():
243
- """Test callback functionality during YOLO prediction setup and execution."""
244
-
245
- def on_predict_batch_end(predictor):
246
- """Callback function that handles operations at the end of a prediction batch."""
247
- path, im0s, _ = predictor.batch
248
- im0s = im0s if isinstance(im0s, list) else [im0s]
249
- bs = [predictor.dataset.bs for _ in range(len(path))]
250
- predictor.results = zip(predictor.results, im0s, bs) # results is List[batch_size]
251
-
252
- model = YOLO(MODEL)
253
- model.add_callback("on_predict_batch_end", on_predict_batch_end)
254
-
255
- dataset = load_inference_source(source=SOURCE)
256
- bs = dataset.bs # noqa access predictor properties
257
- results = model.predict(dataset, stream=True, imgsz=160) # source already setup
258
- for r, im0, bs in results:
259
- print("test_callback", im0.shape)
260
- print("test_callback", bs)
261
- boxes = r.boxes # Boxes object for bbox outputs
262
- print(boxes)
263
-
264
-
265
- @pytest.mark.parametrize("model", MODELS)
266
- def test_results(model):
267
- """Test YOLO model results processing and output in various formats."""
268
- results = YOLO(WEIGHTS_DIR / model)([SOURCE, SOURCE], imgsz=160)
269
- for r in results:
270
- r = r.cpu().numpy()
271
- print(r, len(r), r.path) # print numpy attributes
272
- r = r.to(device="cpu", dtype=torch.float32)
273
- r.save_txt(txt_file=TMP / "runs/tests/label.txt", save_conf=True)
274
- r.save_crop(save_dir=TMP / "runs/tests/crops/")
275
- r.to_json(normalize=True)
276
- r.to_df(decimals=3)
277
- r.to_csv()
278
- r.to_xml()
279
- r.plot(pil=True)
280
- r.plot(conf=True, boxes=True)
281
- print(r, len(r), r.path) # print after methods
282
-
283
-
284
- def test_labels_and_crops():
285
- """Test output from prediction args for saving YOLO detection labels and crops."""
286
- imgs = [SOURCE, ASSETS / "zidane.jpg"]
287
- results = YOLO(WEIGHTS_DIR / "yolo11n.pt")(imgs, imgsz=160, save_txt=True, save_crop=True)
288
- save_path = Path(results[0].save_dir)
289
- for r in results:
290
- im_name = Path(r.path).stem
291
- cls_idxs = r.boxes.cls.int().tolist()
292
- # Check correct detections
293
- assert cls_idxs == ([0, 7, 0, 0] if r.path.endswith("bus.jpg") else [0, 0, 0]) # bus.jpg and zidane.jpg classes
294
- # Check label path
295
- labels = save_path / f"labels/{im_name}.txt"
296
- assert labels.exists()
297
- # Check detections match label count
298
- assert len(r.boxes.data) == len([line for line in labels.read_text().splitlines() if line])
299
- # Check crops path and files
300
- crop_dirs = list((save_path / "crops").iterdir())
301
- crop_files = [f for p in crop_dirs for f in p.glob("*")]
302
- # Crop directories match detections
303
- assert all(r.names.get(c) in {d.name for d in crop_dirs} for c in cls_idxs)
304
- # Same number of crops as detections
305
- assert len([f for f in crop_files if im_name in f.name]) == len(r.boxes.data)
306
-
307
-
308
- @pytest.mark.skipif(not ONLINE, reason="environment is offline")
309
- def test_data_utils():
310
- """Test utility functions in ultralytics/data/utils.py, including dataset stats and auto-splitting."""
311
- from ultralytics.data.utils import HUBDatasetStats, autosplit
312
- from ultralytics.utils.downloads import zip_directory
313
-
314
- # from ultralytics.utils.files import WorkingDirectory
315
- # with WorkingDirectory(ROOT.parent / 'tests'):
316
-
317
- for task in TASKS:
318
- file = Path(TASK2DATA[task]).with_suffix(".zip") # i.e. coco8.zip
319
- download(f"https://github.com/ultralytics/hub/raw/main/example_datasets/{file}", unzip=False, dir=TMP)
320
- stats = HUBDatasetStats(TMP / file, task=task)
321
- stats.get_json(save=True)
322
- stats.process_images()
323
-
324
- autosplit(TMP / "coco8")
325
- zip_directory(TMP / "coco8/images/val") # zip
326
-
327
-
328
- @pytest.mark.skipif(not ONLINE, reason="environment is offline")
329
- def test_data_converter():
330
- """Test dataset conversion functions from COCO to YOLO format and class mappings."""
331
- from ultralytics.data.converter import coco80_to_coco91_class, convert_coco
332
-
333
- file = "instances_val2017.json"
334
- download(f"https://github.com/ultralytics/assets/releases/download/v0.0.0/{file}", dir=TMP)
335
- convert_coco(labels_dir=TMP, save_dir=TMP / "yolo_labels", use_segments=True, use_keypoints=False, cls91to80=True)
336
- coco80_to_coco91_class()
337
-
338
-
339
- def test_data_annotator():
340
- """Test automatic annotation of data using detection and segmentation models."""
341
- from ultralytics.data.annotator import auto_annotate
342
-
343
- auto_annotate(
344
- ASSETS,
345
- det_model=WEIGHTS_DIR / "yolo11n.pt",
346
- sam_model=WEIGHTS_DIR / "mobile_sam.pt",
347
- output_dir=TMP / "auto_annotate_labels",
348
- )
349
-
350
-
351
- def test_events():
352
- """Test event sending functionality."""
353
- from ultralytics.hub.utils import Events
354
-
355
- events = Events()
356
- events.enabled = True
357
- cfg = copy(DEFAULT_CFG) # does not require deepcopy
358
- cfg.mode = "test"
359
- events(cfg)
360
-
361
-
362
- def test_cfg_init():
363
- """Test configuration initialization utilities from the 'ultralytics.cfg' module."""
364
- from ultralytics.cfg import check_dict_alignment, copy_default_cfg, smart_value
365
-
366
- with contextlib.suppress(SyntaxError):
367
- check_dict_alignment({"a": 1}, {"b": 2})
368
- copy_default_cfg()
369
- (Path.cwd() / DEFAULT_CFG_PATH.name.replace(".yaml", "_copy.yaml")).unlink(missing_ok=False)
370
- [smart_value(x) for x in ["none", "true", "false"]]
371
-
372
-
373
- def test_utils_init():
374
- """Test initialization utilities in the Ultralytics library."""
375
- from ultralytics.utils import get_git_branch, get_git_origin_url, get_ubuntu_version, is_github_action_running
376
-
377
- get_ubuntu_version()
378
- is_github_action_running()
379
- get_git_origin_url()
380
- get_git_branch()
381
-
382
-
383
- def test_utils_checks():
384
- """Test various utility checks for filenames, git status, requirements, image sizes, and versions."""
385
- checks.check_yolov5u_filename("yolov5n.pt")
386
- checks.git_describe(ROOT)
387
- checks.check_requirements() # check requirements.txt
388
- checks.check_imgsz([600, 600], max_dim=1)
389
- checks.check_imshow(warn=True)
390
- checks.check_version("ultralytics", "8.0.0")
391
- checks.print_args()
392
-
393
-
394
- @pytest.mark.skipif(WINDOWS, reason="Windows profiling is extremely slow (cause unknown)")
395
- def test_utils_benchmarks():
396
- """Benchmark model performance using 'ProfileModels' from 'ultralytics.utils.benchmarks'."""
397
- from ultralytics.utils.benchmarks import ProfileModels
398
-
399
- ProfileModels(["yolo11n.yaml"], imgsz=32, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile()
400
-
401
-
402
- def test_utils_torchutils():
403
- """Test Torch utility functions including profiling and FLOP calculations."""
404
- from ultralytics.nn.modules.conv import Conv
405
- from ultralytics.utils.torch_utils import get_flops_with_torch_profiler, profile, time_sync
406
-
407
- x = torch.randn(1, 64, 20, 20)
408
- m = Conv(64, 64, k=1, s=2)
409
-
410
- profile(x, [m], n=3)
411
- get_flops_with_torch_profiler(m)
412
- time_sync()
413
-
414
-
415
- def test_utils_ops():
416
- """Test utility operations for coordinate transformations and normalizations."""
417
- from ultralytics.utils.ops import (
418
- ltwh2xywh,
419
- ltwh2xyxy,
420
- make_divisible,
421
- xywh2ltwh,
422
- xywh2xyxy,
423
- xywhn2xyxy,
424
- xywhr2xyxyxyxy,
425
- xyxy2ltwh,
426
- xyxy2xywh,
427
- xyxy2xywhn,
428
- xyxyxyxy2xywhr,
429
- )
430
-
431
- make_divisible(17, torch.tensor([8]))
432
-
433
- boxes = torch.rand(10, 4) # xywh
434
- torch.allclose(boxes, xyxy2xywh(xywh2xyxy(boxes)))
435
- torch.allclose(boxes, xyxy2xywhn(xywhn2xyxy(boxes)))
436
- torch.allclose(boxes, ltwh2xywh(xywh2ltwh(boxes)))
437
- torch.allclose(boxes, xyxy2ltwh(ltwh2xyxy(boxes)))
438
-
439
- boxes = torch.rand(10, 5) # xywhr for OBB
440
- boxes[:, 4] = torch.randn(10) * 30
441
- torch.allclose(boxes, xyxyxyxy2xywhr(xywhr2xyxyxyxy(boxes)), rtol=1e-3)
442
-
443
-
444
- def test_utils_files():
445
- """Test file handling utilities including file age, date, and paths with spaces."""
446
- from ultralytics.utils.files import file_age, file_date, get_latest_run, spaces_in_path
447
-
448
- file_age(SOURCE)
449
- file_date(SOURCE)
450
- get_latest_run(ROOT / "runs")
451
-
452
- path = TMP / "path/with spaces"
453
- path.mkdir(parents=True, exist_ok=True)
454
- with spaces_in_path(path) as new_path:
455
- print(new_path)
456
-
457
-
458
- @pytest.mark.slow
459
- def test_utils_patches_torch_save():
460
- """Test torch_save backoff when _torch_save raises RuntimeError."""
461
- from unittest.mock import MagicMock, patch
462
-
463
- from ultralytics.utils.patches import torch_save
464
-
465
- mock = MagicMock(side_effect=RuntimeError)
466
-
467
- with patch("ultralytics.utils.patches._torch_save", new=mock):
468
- with pytest.raises(RuntimeError):
469
- torch_save(torch.zeros(1), TMP / "test.pt")
470
-
471
- assert mock.call_count == 4, "torch_save was not attempted the expected number of times"
472
-
473
-
474
- def test_nn_modules_conv():
475
- """Test Convolutional Neural Network modules including CBAM, Conv2, and ConvTranspose."""
476
- from ultralytics.nn.modules.conv import CBAM, Conv2, ConvTranspose, DWConvTranspose2d, Focus
477
-
478
- c1, c2 = 8, 16 # input and output channels
479
- x = torch.zeros(4, c1, 10, 10) # BCHW
480
-
481
- # Run all modules not otherwise covered in tests
482
- DWConvTranspose2d(c1, c2)(x)
483
- ConvTranspose(c1, c2)(x)
484
- Focus(c1, c2)(x)
485
- CBAM(c1)(x)
486
-
487
- # Fuse ops
488
- m = Conv2(c1, c2)
489
- m.fuse_convs()
490
- m(x)
491
-
492
-
493
- def test_nn_modules_block():
494
- """Test various neural network block modules."""
495
- from ultralytics.nn.modules.block import C1, C3TR, BottleneckCSP, C3Ghost, C3x
496
-
497
- c1, c2 = 8, 16 # input and output channels
498
- x = torch.zeros(4, c1, 10, 10) # BCHW
499
-
500
- # Run all modules not otherwise covered in tests
501
- C1(c1, c2)(x)
502
- C3x(c1, c2)(x)
503
- C3TR(c1, c2)(x)
504
- C3Ghost(c1, c2)(x)
505
- BottleneckCSP(c1, c2)(x)
506
-
507
-
508
- @pytest.mark.skipif(not ONLINE, reason="environment is offline")
509
- def test_hub():
510
- """Test Ultralytics HUB functionalities."""
511
- from ultralytics.hub import export_fmts_hub, logout
512
- from ultralytics.hub.utils import smart_request
513
-
514
- export_fmts_hub()
515
- logout()
516
- smart_request("GET", "https://github.com", progress=True)
517
-
518
-
519
- @pytest.fixture
520
- def image():
521
- """Load and return an image from a predefined source."""
522
- return cv2.imread(str(SOURCE))
523
-
524
-
525
- @pytest.mark.parametrize(
526
- "auto_augment, erasing, force_color_jitter",
527
- [
528
- (None, 0.0, False),
529
- ("randaugment", 0.5, True),
530
- ("augmix", 0.2, False),
531
- ("autoaugment", 0.0, True),
532
- ],
533
- )
534
- def test_classify_transforms_train(image, auto_augment, erasing, force_color_jitter):
535
- """Test classification transforms during training with various augmentations."""
536
- from ultralytics.data.augment import classify_augmentations
537
-
538
- transform = classify_augmentations(
539
- size=224,
540
- mean=(0.5, 0.5, 0.5),
541
- std=(0.5, 0.5, 0.5),
542
- scale=(0.08, 1.0),
543
- ratio=(3.0 / 4.0, 4.0 / 3.0),
544
- hflip=0.5,
545
- vflip=0.5,
546
- auto_augment=auto_augment,
547
- hsv_h=0.015,
548
- hsv_s=0.4,
549
- hsv_v=0.4,
550
- force_color_jitter=force_color_jitter,
551
- erasing=erasing,
552
- )
553
-
554
- transformed_image = transform(Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)))
555
-
556
- assert transformed_image.shape == (3, 224, 224)
557
- assert torch.is_tensor(transformed_image)
558
- assert transformed_image.dtype == torch.float32
559
-
560
-
561
- @pytest.mark.slow
562
- @pytest.mark.skipif(not ONLINE, reason="environment is offline")
563
- def test_model_tune():
564
- """Tune YOLO model for performance improvement."""
565
- YOLO("yolo11n-pose.pt").tune(data="coco8-pose.yaml", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
566
- YOLO("yolo11n-cls.pt").tune(data="imagenet10", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
567
-
568
-
569
- def test_model_embeddings():
570
- """Test YOLO model embeddings extraction functionality."""
571
- model_detect = YOLO(MODEL)
572
- model_segment = YOLO(WEIGHTS_DIR / "yolo11n-seg.pt")
573
-
574
- for batch in [SOURCE], [SOURCE, SOURCE]: # test batch size 1 and 2
575
- assert len(model_detect.embed(source=batch, imgsz=32)) == len(batch)
576
- assert len(model_segment.embed(source=batch, imgsz=32)) == len(batch)
577
-
578
-
579
- @pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="YOLOWorld with CLIP is not supported in Python 3.12")
580
- def test_yolo_world():
581
- """Test YOLO world models with CLIP support."""
582
- model = YOLO(WEIGHTS_DIR / "yolov8s-world.pt") # no YOLO11n-world model yet
583
- model.set_classes(["tree", "window"])
584
- model(SOURCE, conf=0.01)
585
-
586
- model = YOLO(WEIGHTS_DIR / "yolov8s-worldv2.pt") # no YOLO11n-world model yet
587
- # Training from a pretrained model. Eval is included at the final stage of training.
588
- # Use dota8.yaml which has fewer categories to reduce the inference time of CLIP model
589
- model.train(
590
- data="dota8.yaml",
591
- epochs=1,
592
- imgsz=32,
593
- cache="disk",
594
- close_mosaic=1,
595
- )
596
-
597
- # test WorWorldTrainerFromScratch
598
- from ultralytics.models.yolo.world.train_world import WorldTrainerFromScratch
599
-
600
- model = YOLO("yolov8s-worldv2.yaml") # no YOLO11n-world model yet
601
- model.train(
602
- data={"train": {"yolo_data": ["dota8.yaml"]}, "val": {"yolo_data": ["dota8.yaml"]}},
603
- epochs=1,
604
- imgsz=32,
605
- cache="disk",
606
- close_mosaic=1,
607
- trainer=WorldTrainerFromScratch,
608
- )
609
-
610
-
611
- @pytest.mark.skipif(checks.IS_PYTHON_3_12 or not TORCH_1_9, reason="YOLOE with CLIP is not supported in Python 3.12")
612
- def test_yoloe():
613
- """Test YOLOE models with MobileClip support."""
614
- # Predict
615
- # text-prompts
616
- model = YOLO(WEIGHTS_DIR / "yoloe-11s-seg.pt")
617
- names = ["person", "bus"]
618
- model.set_classes(names, model.get_text_pe(names))
619
- model(SOURCE, conf=0.01)
620
-
621
- import numpy as np
622
-
623
- from ultralytics import YOLOE
624
- from ultralytics.models.yolo.yoloe import YOLOEVPSegPredictor
625
-
626
- # visual-prompts
627
- visuals = dict(
628
- bboxes=np.array(
629
- [[221.52, 405.8, 344.98, 857.54], [120, 425, 160, 445]],
630
- ),
631
- cls=np.array([0, 1]),
632
- )
633
- model.predict(
634
- SOURCE,
635
- visual_prompts=visuals,
636
- predictor=YOLOEVPSegPredictor,
637
- )
638
-
639
- # Val
640
- model = YOLOE(WEIGHTS_DIR / "yoloe-11s-seg.pt")
641
- # text prompts
642
- model.val(data="coco128-seg.yaml", imgsz=32)
643
- # visual prompts
644
- model.val(data="coco128-seg.yaml", load_vp=True, imgsz=32)
645
-
646
- # Train, fine-tune
647
- from ultralytics.models.yolo.yoloe import YOLOEPESegTrainer
648
-
649
- model = YOLOE("yoloe-11s-seg.pt")
650
- model.train(
651
- data="coco128-seg.yaml",
652
- epochs=1,
653
- close_mosaic=1,
654
- trainer=YOLOEPESegTrainer,
655
- imgsz=32,
656
- )
657
-
658
- # prompt-free
659
- # predict
660
- model = YOLOE(WEIGHTS_DIR / "yoloe-11s-seg-pf.pt")
661
- model.predict(SOURCE)
662
- # val
663
- model = YOLOE("yoloe-11s-seg.pt") # or select yoloe-m/l-seg.pt for different sizes
664
- model.val(data="coco128-seg.yaml", imgsz=32)
665
-
666
-
667
- def test_yolov10():
668
- """Test YOLOv10 model training, validation, and prediction functionality."""
669
- model = YOLO("yolov10n.yaml")
670
- # train/val/predict
671
- model.train(data="coco8.yaml", epochs=1, imgsz=32, close_mosaic=1, cache="disk")
672
- model.val(data="coco8.yaml", imgsz=32)
673
- model.predict(imgsz=32, save_txt=True, save_crop=True, augment=True)
674
- model(SOURCE)