ultralytics 8.2.19__py3-none-any.whl → 8.2.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

tests/test_python.py ADDED
@@ -0,0 +1,576 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+
3
+ import contextlib
4
+ from copy import copy
5
+ from pathlib import Path
6
+
7
+ import cv2
8
+ import numpy as np
9
+ import pytest
10
+ import torch
11
+ import yaml
12
+ from PIL import Image
13
+
14
+ from ultralytics import RTDETR, YOLO
15
+ from ultralytics.cfg import MODELS, TASKS, TASK2DATA
16
+ from ultralytics.data.build import load_inference_source
17
+ from ultralytics.utils import (
18
+ ASSETS,
19
+ DEFAULT_CFG,
20
+ DEFAULT_CFG_PATH,
21
+ ONLINE,
22
+ ROOT,
23
+ WEIGHTS_DIR,
24
+ WINDOWS,
25
+ Retry,
26
+ checks,
27
+ )
28
+ from ultralytics.utils.downloads import download, is_url
29
+ from ultralytics.utils.torch_utils import TORCH_1_9
30
+ from tests import CFG, IS_TMP_WRITEABLE, MODEL, SOURCE, TMP
31
+
32
+
33
+ def test_model_forward():
34
+ """Test the forward pass of the YOLO model."""
35
+ model = YOLO(CFG)
36
+ model(source=None, imgsz=32, augment=True) # also test no source and augment
37
+
38
+
39
+ def test_model_methods():
40
+ """Test various methods and properties of the YOLO model."""
41
+ model = YOLO(MODEL)
42
+
43
+ # Model methods
44
+ model.info(verbose=True, detailed=True)
45
+ model = model.reset_weights()
46
+ model = model.load(MODEL)
47
+ model.to("cpu")
48
+ model.fuse()
49
+ model.clear_callback("on_train_start")
50
+ model.reset_callbacks()
51
+
52
+ # Model properties
53
+ _ = model.names
54
+ _ = model.device
55
+ _ = model.transforms
56
+ _ = model.task_map
57
+
58
+
59
+ def test_model_profile():
60
+ """Test profiling of the YOLO model with 'profile=True' argument."""
61
+ from ultralytics.nn.tasks import DetectionModel
62
+
63
+ model = DetectionModel() # build model
64
+ im = torch.randn(1, 3, 64, 64) # requires min imgsz=64
65
+ _ = model.predict(im, profile=True)
66
+
67
+
68
+ @pytest.mark.skipif(not IS_TMP_WRITEABLE, reason="directory is not writeable")
69
+ def test_predict_txt():
70
+ """Test YOLO predictions with sources (file, dir, glob, recursive glob) specified in a text file."""
71
+ txt_file = TMP / "sources.txt"
72
+ with open(txt_file, "w") as f:
73
+ for x in [ASSETS / "bus.jpg", ASSETS, ASSETS / "*", ASSETS / "**/*.jpg"]:
74
+ f.write(f"{x}\n")
75
+ _ = YOLO(MODEL)(source=txt_file, imgsz=32)
76
+
77
+
78
+ @pytest.mark.parametrize("model_name", MODELS)
79
+ def test_predict_img(model_name):
80
+ """Test YOLO prediction on various types of image sources."""
81
+ model = YOLO(WEIGHTS_DIR / model_name)
82
+ im = cv2.imread(str(SOURCE)) # uint8 numpy array
83
+ assert len(model(source=Image.open(SOURCE), save=True, verbose=True, imgsz=32)) == 1 # PIL
84
+ assert len(model(source=im, save=True, save_txt=True, imgsz=32)) == 1 # ndarray
85
+ assert len(model(torch.rand((2, 3, 32, 32)), imgsz=32)) == 2 # batch-size 2 Tensor, FP32 0.0-1.0 RGB order
86
+ assert len(model(source=[im, im], save=True, save_txt=True, imgsz=32)) == 2 # batch
87
+ assert len(list(model(source=[im, im], save=True, stream=True, imgsz=32))) == 2 # stream
88
+ assert len(model(torch.zeros(320, 640, 3).numpy().astype(np.uint8), imgsz=32)) == 1 # tensor to numpy
89
+ batch = [
90
+ str(SOURCE), # filename
91
+ Path(SOURCE), # Path
92
+ "https://ultralytics.com/images/zidane.jpg" if ONLINE else SOURCE, # URI
93
+ cv2.imread(str(SOURCE)), # OpenCV
94
+ Image.open(SOURCE), # PIL
95
+ np.zeros((320, 640, 3), dtype=np.uint8), # numpy
96
+ ]
97
+ assert len(model(batch, imgsz=32)) == len(batch) # multiple sources in a batch
98
+
99
+
100
+ @pytest.mark.parametrize("model", MODELS)
101
+ def test_predict_visualize(model):
102
+ """Test model predict methods with 'visualize=True' arguments."""
103
+ YOLO(WEIGHTS_DIR / model)(SOURCE, imgsz=32, visualize=True)
104
+
105
+
106
+ def test_predict_grey_and_4ch():
107
+ """Test YOLO prediction on SOURCE converted to greyscale and 4-channel images."""
108
+ im = Image.open(SOURCE)
109
+ directory = TMP / "im4"
110
+ directory.mkdir(parents=True, exist_ok=True)
111
+
112
+ source_greyscale = directory / "greyscale.jpg"
113
+ source_rgba = directory / "4ch.png"
114
+ source_non_utf = directory / "non_UTF_测试文件_tést_image.jpg"
115
+ source_spaces = directory / "image with spaces.jpg"
116
+
117
+ im.convert("L").save(source_greyscale) # greyscale
118
+ im.convert("RGBA").save(source_rgba) # 4-ch PNG with alpha
119
+ im.save(source_non_utf) # non-UTF characters in filename
120
+ im.save(source_spaces) # spaces in filename
121
+
122
+ # Inference
123
+ model = YOLO(MODEL)
124
+ for f in source_rgba, source_greyscale, source_non_utf, source_spaces:
125
+ for source in Image.open(f), cv2.imread(str(f)), f:
126
+ results = model(source, save=True, verbose=True, imgsz=32)
127
+ assert len(results) == 1 # verify that an image was run
128
+ f.unlink() # cleanup
129
+
130
+
131
+ @pytest.mark.slow
132
+ @pytest.mark.skipif(not ONLINE, reason="environment is offline")
133
+ @pytest.mark.skipif(not is_url("https://youtu.be/G17sBkb38XQ"), reason="YouTube URL issue")
134
+ @Retry(times=3, delay=10)
135
+ def test_youtube():
136
+ """
137
+ Test YouTube inference.
138
+
139
+ Marked --slow to reduce YouTube API rate limits risk.
140
+ """
141
+ model = YOLO(MODEL)
142
+ model.predict("https://youtu.be/G17sBkb38XQ", imgsz=96, save=True)
143
+
144
+
145
+ @pytest.mark.skipif(not ONLINE, reason="environment is offline")
146
+ @pytest.mark.skipif(not IS_TMP_WRITEABLE, reason="directory is not writeable")
147
+ def test_track_stream():
148
+ """
149
+ Test streaming tracking (short 10 frame video) with non-default ByteTrack tracker.
150
+
151
+ Note imgsz=160 required for tracking for higher confidence and better matches
152
+ """
153
+ video_url = "https://ultralytics.com/assets/decelera_portrait_min.mov"
154
+ model = YOLO(MODEL)
155
+ model.track(video_url, imgsz=160, tracker="bytetrack.yaml")
156
+ model.track(video_url, imgsz=160, tracker="botsort.yaml", save_frames=True) # test frame saving also
157
+
158
+ # Test Global Motion Compensation (GMC) methods
159
+ for gmc in "orb", "sift", "ecc":
160
+ with open(ROOT / "cfg/trackers/botsort.yaml", encoding="utf-8") as f:
161
+ data = yaml.safe_load(f)
162
+ tracker = TMP / f"botsort-{gmc}.yaml"
163
+ data["gmc_method"] = gmc
164
+ with open(tracker, "w", encoding="utf-8") as f:
165
+ yaml.safe_dump(data, f)
166
+ model.track(video_url, imgsz=160, tracker=tracker)
167
+
168
+
169
+ def test_val():
170
+ """Test the validation mode of the YOLO model."""
171
+ YOLO(MODEL).val(data="coco8.yaml", imgsz=32, save_hybrid=True)
172
+
173
+
174
+ def test_train_scratch():
175
+ """Test training the YOLO model from scratch."""
176
+ model = YOLO(CFG)
177
+ model.train(data="coco8.yaml", epochs=2, imgsz=32, cache="disk", batch=-1, close_mosaic=1, name="model")
178
+ model(SOURCE)
179
+
180
+
181
+ def test_train_pretrained():
182
+ """Test training the YOLO model from a pre-trained state."""
183
+ model = YOLO(WEIGHTS_DIR / "yolov8n-seg.pt")
184
+ model.train(data="coco8-seg.yaml", epochs=1, imgsz=32, cache="ram", copy_paste=0.5, mixup=0.5, name=0)
185
+ model(SOURCE)
186
+
187
+
188
+ def test_all_model_yamls():
189
+ """Test YOLO model creation for all available YAML configurations."""
190
+ for m in (ROOT / "cfg" / "models").rglob("*.yaml"):
191
+ if "rtdetr" in m.name:
192
+ if TORCH_1_9: # torch<=1.8 issue - TypeError: __init__() got an unexpected keyword argument 'batch_first'
193
+ _ = RTDETR(m.name)(SOURCE, imgsz=640) # must be 640
194
+ else:
195
+ YOLO(m.name)
196
+
197
+
198
+ def test_workflow():
199
+ """Test the complete workflow including training, validation, prediction, and exporting."""
200
+ model = YOLO(MODEL)
201
+ model.train(data="coco8.yaml", epochs=1, imgsz=32, optimizer="SGD")
202
+ model.val(imgsz=32)
203
+ model.predict(SOURCE, imgsz=32)
204
+ model.export(format="torchscript")
205
+
206
+
207
+ def test_predict_callback_and_setup():
208
+ """Test callback functionality during YOLO prediction."""
209
+
210
+ def on_predict_batch_end(predictor):
211
+ """Callback function that handles operations at the end of a prediction batch."""
212
+ path, im0s, _ = predictor.batch
213
+ im0s = im0s if isinstance(im0s, list) else [im0s]
214
+ bs = [predictor.dataset.bs for _ in range(len(path))]
215
+ predictor.results = zip(predictor.results, im0s, bs) # results is List[batch_size]
216
+
217
+ model = YOLO(MODEL)
218
+ model.add_callback("on_predict_batch_end", on_predict_batch_end)
219
+
220
+ dataset = load_inference_source(source=SOURCE)
221
+ bs = dataset.bs # noqa access predictor properties
222
+ results = model.predict(dataset, stream=True, imgsz=160) # source already setup
223
+ for r, im0, bs in results:
224
+ print("test_callback", im0.shape)
225
+ print("test_callback", bs)
226
+ boxes = r.boxes # Boxes object for bbox outputs
227
+ print(boxes)
228
+
229
+
230
+ @pytest.mark.parametrize("model", MODELS)
231
+ def test_results(model):
232
+ """Test various result formats for the YOLO model."""
233
+ results = YOLO(WEIGHTS_DIR / model)([SOURCE, SOURCE], imgsz=160)
234
+ for r in results:
235
+ r = r.cpu().numpy()
236
+ r = r.to(device="cpu", dtype=torch.float32)
237
+ r.save_txt(txt_file=TMP / "runs/tests/label.txt", save_conf=True)
238
+ r.save_crop(save_dir=TMP / "runs/tests/crops/")
239
+ r.tojson(normalize=True)
240
+ r.plot(pil=True)
241
+ r.plot(conf=True, boxes=True)
242
+ print(r, len(r), r.path)
243
+
244
+
245
+ def test_labels_and_crops():
246
+ """Test output from prediction args for saving detection labels and crops."""
247
+ imgs = [SOURCE, ASSETS / "zidane.jpg"]
248
+ results = YOLO(WEIGHTS_DIR / "yolov8n.pt")(imgs, imgsz=160, save_txt=True, save_crop=True)
249
+ save_path = Path(results[0].save_dir)
250
+ for r in results:
251
+ im_name = Path(r.path).stem
252
+ cls_idxs = r.boxes.cls.int().tolist()
253
+ # Check label path
254
+ labels = save_path / f"labels/{im_name}.txt"
255
+ assert labels.exists()
256
+ # Check detections match label count
257
+ assert len(r.boxes.data) == len([line for line in labels.read_text().splitlines() if line])
258
+ # Check crops path and files
259
+ crop_dirs = list((save_path / "crops").iterdir())
260
+ crop_files = [f for p in crop_dirs for f in p.glob("*")]
261
+ # Crop directories match detections
262
+ assert all(r.names.get(c) in {d.name for d in crop_dirs} for c in cls_idxs)
263
+ # Same number of crops as detections
264
+ assert len([f for f in crop_files if im_name in f.name]) == len(r.boxes.data)
265
+
266
+
267
+ @pytest.mark.skipif(not ONLINE, reason="environment is offline")
268
+ def test_data_utils():
269
+ """Test utility functions in ultralytics/data/utils.py."""
270
+ from ultralytics.data.utils import HUBDatasetStats, autosplit
271
+ from ultralytics.utils.downloads import zip_directory
272
+
273
+ # from ultralytics.utils.files import WorkingDirectory
274
+ # with WorkingDirectory(ROOT.parent / 'tests'):
275
+
276
+ for task in TASKS:
277
+ file = Path(TASK2DATA[task]).with_suffix(".zip") # i.e. coco8.zip
278
+ download(f"https://github.com/ultralytics/hub/raw/main/example_datasets/{file}", unzip=False, dir=TMP)
279
+ stats = HUBDatasetStats(TMP / file, task=task)
280
+ stats.get_json(save=True)
281
+ stats.process_images()
282
+
283
+ autosplit(TMP / "coco8")
284
+ zip_directory(TMP / "coco8/images/val") # zip
285
+
286
+
287
+ @pytest.mark.skipif(not ONLINE, reason="environment is offline")
288
+ def test_data_converter():
289
+ """Test dataset converters."""
290
+ from ultralytics.data.converter import coco80_to_coco91_class, convert_coco
291
+
292
+ file = "instances_val2017.json"
293
+ download(f"https://github.com/ultralytics/yolov5/releases/download/v1.0/{file}", dir=TMP)
294
+ convert_coco(labels_dir=TMP, save_dir=TMP / "yolo_labels", use_segments=True, use_keypoints=False, cls91to80=True)
295
+ coco80_to_coco91_class()
296
+
297
+
298
+ def test_data_annotator():
299
+ """Test automatic data annotation."""
300
+ from ultralytics.data.annotator import auto_annotate
301
+
302
+ auto_annotate(
303
+ ASSETS,
304
+ det_model=WEIGHTS_DIR / "yolov8n.pt",
305
+ sam_model=WEIGHTS_DIR / "mobile_sam.pt",
306
+ output_dir=TMP / "auto_annotate_labels",
307
+ )
308
+
309
+
310
+ def test_events():
311
+ """Test event sending functionality."""
312
+ from ultralytics.hub.utils import Events
313
+
314
+ events = Events()
315
+ events.enabled = True
316
+ cfg = copy(DEFAULT_CFG) # does not require deepcopy
317
+ cfg.mode = "test"
318
+ events(cfg)
319
+
320
+
321
+ def test_cfg_init():
322
+ """Test configuration initialization utilities."""
323
+ from ultralytics.cfg import check_dict_alignment, copy_default_cfg, smart_value
324
+
325
+ with contextlib.suppress(SyntaxError):
326
+ check_dict_alignment({"a": 1}, {"b": 2})
327
+ copy_default_cfg()
328
+ (Path.cwd() / DEFAULT_CFG_PATH.name.replace(".yaml", "_copy.yaml")).unlink(missing_ok=False)
329
+ [smart_value(x) for x in ["none", "true", "false"]]
330
+
331
+
332
+ def test_utils_init():
333
+ """Test initialization utilities."""
334
+ from ultralytics.utils import get_git_branch, get_git_origin_url, get_ubuntu_version, is_github_action_running
335
+
336
+ get_ubuntu_version()
337
+ is_github_action_running()
338
+ get_git_origin_url()
339
+ get_git_branch()
340
+
341
+
342
+ def test_utils_checks():
343
+ """Test various utility checks."""
344
+ checks.check_yolov5u_filename("yolov5n.pt")
345
+ checks.git_describe(ROOT)
346
+ checks.check_requirements() # check requirements.txt
347
+ checks.check_imgsz([600, 600], max_dim=1)
348
+ checks.check_imshow(warn=True)
349
+ checks.check_version("ultralytics", "8.0.0")
350
+ checks.print_args()
351
+
352
+
353
+ @pytest.mark.skipif(WINDOWS, reason="Windows profiling is extremely slow (cause unknown)")
354
+ def test_utils_benchmarks():
355
+ """Test model benchmarking."""
356
+ from ultralytics.utils.benchmarks import ProfileModels
357
+
358
+ ProfileModels(["yolov8n.yaml"], imgsz=32, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile()
359
+
360
+
361
+ def test_utils_torchutils():
362
+ """Test Torch utility functions."""
363
+ from ultralytics.nn.modules.conv import Conv
364
+ from ultralytics.utils.torch_utils import get_flops_with_torch_profiler, profile, time_sync
365
+
366
+ x = torch.randn(1, 64, 20, 20)
367
+ m = Conv(64, 64, k=1, s=2)
368
+
369
+ profile(x, [m], n=3)
370
+ get_flops_with_torch_profiler(m)
371
+ time_sync()
372
+
373
+
374
+ @pytest.mark.slow
375
+ @pytest.mark.skipif(not ONLINE, reason="environment is offline")
376
+ def test_utils_downloads():
377
+ """Test file download utilities."""
378
+ from ultralytics.utils.downloads import get_google_drive_file_info
379
+
380
+ get_google_drive_file_info("https://drive.google.com/file/d/1cqT-cJgANNrhIHCrEufUYhQ4RqiWG_lJ/view?usp=drive_link")
381
+
382
+
383
+ def test_utils_ops():
384
+ """Test various operations utilities."""
385
+ from ultralytics.utils.ops import (
386
+ ltwh2xywh,
387
+ ltwh2xyxy,
388
+ make_divisible,
389
+ xywh2ltwh,
390
+ xywh2xyxy,
391
+ xywhn2xyxy,
392
+ xywhr2xyxyxyxy,
393
+ xyxy2ltwh,
394
+ xyxy2xywh,
395
+ xyxy2xywhn,
396
+ xyxyxyxy2xywhr,
397
+ )
398
+
399
+ make_divisible(17, torch.tensor([8]))
400
+
401
+ boxes = torch.rand(10, 4) # xywh
402
+ torch.allclose(boxes, xyxy2xywh(xywh2xyxy(boxes)))
403
+ torch.allclose(boxes, xyxy2xywhn(xywhn2xyxy(boxes)))
404
+ torch.allclose(boxes, ltwh2xywh(xywh2ltwh(boxes)))
405
+ torch.allclose(boxes, xyxy2ltwh(ltwh2xyxy(boxes)))
406
+
407
+ boxes = torch.rand(10, 5) # xywhr for OBB
408
+ boxes[:, 4] = torch.randn(10) * 30
409
+ torch.allclose(boxes, xyxyxyxy2xywhr(xywhr2xyxyxyxy(boxes)), rtol=1e-3)
410
+
411
+
412
+ def test_utils_files():
413
+ """Test file handling utilities."""
414
+ from ultralytics.utils.files import file_age, file_date, get_latest_run, spaces_in_path
415
+
416
+ file_age(SOURCE)
417
+ file_date(SOURCE)
418
+ get_latest_run(ROOT / "runs")
419
+
420
+ path = TMP / "path/with spaces"
421
+ path.mkdir(parents=True, exist_ok=True)
422
+ with spaces_in_path(path) as new_path:
423
+ print(new_path)
424
+
425
+
426
+ @pytest.mark.slow
427
+ def test_utils_patches_torch_save():
428
+ """Test torch_save backoff when _torch_save throws RuntimeError."""
429
+ from unittest.mock import MagicMock, patch
430
+
431
+ from ultralytics.utils.patches import torch_save
432
+
433
+ mock = MagicMock(side_effect=RuntimeError)
434
+
435
+ with patch("ultralytics.utils.patches._torch_save", new=mock):
436
+ with pytest.raises(RuntimeError):
437
+ torch_save(torch.zeros(1), TMP / "test.pt")
438
+
439
+ assert mock.call_count == 4, "torch_save was not attempted the expected number of times"
440
+
441
+
442
+ def test_nn_modules_conv():
443
+ """Test Convolutional Neural Network modules."""
444
+ from ultralytics.nn.modules.conv import CBAM, Conv2, ConvTranspose, DWConvTranspose2d, Focus
445
+
446
+ c1, c2 = 8, 16 # input and output channels
447
+ x = torch.zeros(4, c1, 10, 10) # BCHW
448
+
449
+ # Run all modules not otherwise covered in tests
450
+ DWConvTranspose2d(c1, c2)(x)
451
+ ConvTranspose(c1, c2)(x)
452
+ Focus(c1, c2)(x)
453
+ CBAM(c1)(x)
454
+
455
+ # Fuse ops
456
+ m = Conv2(c1, c2)
457
+ m.fuse_convs()
458
+ m(x)
459
+
460
+
461
+ def test_nn_modules_block():
462
+ """Test Neural Network block modules."""
463
+ from ultralytics.nn.modules.block import C1, C3TR, BottleneckCSP, C3Ghost, C3x
464
+
465
+ c1, c2 = 8, 16 # input and output channels
466
+ x = torch.zeros(4, c1, 10, 10) # BCHW
467
+
468
+ # Run all modules not otherwise covered in tests
469
+ C1(c1, c2)(x)
470
+ C3x(c1, c2)(x)
471
+ C3TR(c1, c2)(x)
472
+ C3Ghost(c1, c2)(x)
473
+ BottleneckCSP(c1, c2)(x)
474
+
475
+
476
+ @pytest.mark.skipif(not ONLINE, reason="environment is offline")
477
+ def test_hub():
478
+ """Test Ultralytics HUB functionalities."""
479
+ from ultralytics.hub import export_fmts_hub, logout
480
+ from ultralytics.hub.utils import smart_request
481
+
482
+ export_fmts_hub()
483
+ logout()
484
+ smart_request("GET", "https://github.com", progress=True)
485
+
486
+
487
+ @pytest.fixture
488
+ def image():
489
+ """Loads an image from a predefined source using OpenCV."""
490
+ return cv2.imread(str(SOURCE))
491
+
492
+
493
+ @pytest.mark.parametrize(
494
+ "auto_augment, erasing, force_color_jitter",
495
+ [
496
+ (None, 0.0, False),
497
+ ("randaugment", 0.5, True),
498
+ ("augmix", 0.2, False),
499
+ ("autoaugment", 0.0, True),
500
+ ],
501
+ )
502
+ def test_classify_transforms_train(image, auto_augment, erasing, force_color_jitter):
503
+ """Tests classification transforms during training with various augmentation settings."""
504
+ from ultralytics.data.augment import classify_augmentations
505
+
506
+ transform = classify_augmentations(
507
+ size=224,
508
+ mean=(0.5, 0.5, 0.5),
509
+ std=(0.5, 0.5, 0.5),
510
+ scale=(0.08, 1.0),
511
+ ratio=(3.0 / 4.0, 4.0 / 3.0),
512
+ hflip=0.5,
513
+ vflip=0.5,
514
+ auto_augment=auto_augment,
515
+ hsv_h=0.015,
516
+ hsv_s=0.4,
517
+ hsv_v=0.4,
518
+ force_color_jitter=force_color_jitter,
519
+ erasing=erasing,
520
+ )
521
+
522
+ transformed_image = transform(Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)))
523
+
524
+ assert transformed_image.shape == (3, 224, 224)
525
+ assert torch.is_tensor(transformed_image)
526
+ assert transformed_image.dtype == torch.float32
527
+
528
+
529
+ @pytest.mark.slow
530
+ @pytest.mark.skipif(not ONLINE, reason="environment is offline")
531
+ def test_model_tune():
532
+ """Tune YOLO model for performance."""
533
+ YOLO("yolov8n-pose.pt").tune(data="coco8-pose.yaml", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
534
+ YOLO("yolov8n-cls.pt").tune(data="imagenet10", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
535
+
536
+
537
+ def test_model_embeddings():
538
+ """Test YOLO model embeddings."""
539
+ model_detect = YOLO(MODEL)
540
+ model_segment = YOLO(WEIGHTS_DIR / "yolov8n-seg.pt")
541
+
542
+ for batch in [SOURCE], [SOURCE, SOURCE]: # test batch size 1 and 2
543
+ assert len(model_detect.embed(source=batch, imgsz=32)) == len(batch)
544
+ assert len(model_segment.embed(source=batch, imgsz=32)) == len(batch)
545
+
546
+
547
+ @pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="YOLOWorld with CLIP is not supported in Python 3.12")
548
+ def test_yolo_world():
549
+ """Tests YOLO world models with different configurations, including classes, detection, and training scenarios."""
550
+ model = YOLO("yolov8s-world.pt") # no YOLOv8n-world model yet
551
+ model.set_classes(["tree", "window"])
552
+ model(SOURCE, conf=0.01)
553
+
554
+ model = YOLO("yolov8s-worldv2.pt") # no YOLOv8n-world model yet
555
+ # Training from a pretrained model. Eval is included at the final stage of training.
556
+ # Use dota8.yaml which has fewer categories to reduce the inference time of CLIP model
557
+ model.train(
558
+ data="dota8.yaml",
559
+ epochs=1,
560
+ imgsz=32,
561
+ cache="disk",
562
+ close_mosaic=1,
563
+ )
564
+
565
+ # test WorWorldTrainerFromScratch
566
+ from ultralytics.models.yolo.world.train_world import WorldTrainerFromScratch
567
+
568
+ model = YOLO("yolov8s-worldv2.yaml") # no YOLOv8n-world model yet
569
+ model.train(
570
+ data={"train": {"yolo_data": ["dota8.yaml"]}, "val": {"yolo_data": ["dota8.yaml"]}},
571
+ epochs=1,
572
+ imgsz=32,
573
+ cache="disk",
574
+ close_mosaic=1,
575
+ trainer=WorldTrainerFromScratch,
576
+ )
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = "8.2.19"
3
+ __version__ = "8.2.21"
4
4
 
5
5
  from ultralytics.data.explorer.explorer import Explorer
6
6
  from ultralytics.models import RTDETR, SAM, YOLO, YOLOWorld