ultralytics 8.2.20__py3-none-any.whl → 8.2.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

tests/__init__.py ADDED
@@ -0,0 +1,22 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+
3
+ from ultralytics.utils import ASSETS, ROOT, WEIGHTS_DIR, checks, is_dir_writeable
4
+
5
+ # Constants used in tests
6
+ MODEL = WEIGHTS_DIR / "path with spaces" / "yolov8n.pt" # test spaces in path
7
+ CFG = "yolov8n.yaml"
8
+ SOURCE = ASSETS / "bus.jpg"
9
+ TMP = (ROOT / "../tests/tmp").resolve() # temp directory for test files
10
+ IS_TMP_WRITEABLE = is_dir_writeable(TMP)
11
+ CUDA_IS_AVAILABLE = checks.cuda_is_available()
12
+ CUDA_DEVICE_COUNT = checks.cuda_device_count()
13
+
14
+ __all__ = (
15
+ "MODEL",
16
+ "CFG",
17
+ "SOURCE",
18
+ "TMP",
19
+ "IS_TMP_WRITEABLE",
20
+ "CUDA_IS_AVAILABLE",
21
+ "CUDA_DEVICE_COUNT",
22
+ )
tests/conftest.py ADDED
@@ -0,0 +1,71 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+
3
+ import shutil
4
+ from pathlib import Path
5
+
6
+ from tests import TMP
7
+
8
+
9
+ def pytest_addoption(parser):
10
+ """
11
+ Add custom command-line options to pytest.
12
+
13
+ Args:
14
+ parser (pytest.config.Parser): The pytest parser object.
15
+ """
16
+ parser.addoption("--slow", action="store_true", default=False, help="Run slow tests")
17
+
18
+
19
+ def pytest_collection_modifyitems(config, items):
20
+ """
21
+ Modify the list of test items to remove tests marked as slow if the --slow option is not provided.
22
+
23
+ Args:
24
+ config (pytest.config.Config): The pytest config object.
25
+ items (list): List of test items to be executed.
26
+ """
27
+ if not config.getoption("--slow"):
28
+ # Remove the item entirely from the list of test items if it's marked as 'slow'
29
+ items[:] = [item for item in items if "slow" not in item.keywords]
30
+
31
+
32
+ def pytest_sessionstart(session):
33
+ """
34
+ Initialize session configurations for pytest.
35
+
36
+ This function is automatically called by pytest after the 'Session' object has been created but before performing
37
+ test collection. It sets the initial seeds and prepares the temporary directory for the test session.
38
+
39
+ Args:
40
+ session (pytest.Session): The pytest session object.
41
+ """
42
+ from ultralytics.utils.torch_utils import init_seeds
43
+
44
+ init_seeds()
45
+ shutil.rmtree(TMP, ignore_errors=True) # delete any existing tests/tmp directory
46
+ TMP.mkdir(parents=True, exist_ok=True) # create a new empty directory
47
+
48
+
49
+ def pytest_terminal_summary(terminalreporter, exitstatus, config):
50
+ """
51
+ Cleanup operations after pytest session.
52
+
53
+ This function is automatically called by pytest at the end of the entire test session. It removes certain files
54
+ and directories used during testing.
55
+
56
+ Args:
57
+ terminalreporter (pytest.terminal.TerminalReporter): The terminal reporter object.
58
+ exitstatus (int): The exit status of the test run.
59
+ config (pytest.config.Config): The pytest config object.
60
+ """
61
+ from ultralytics.utils import WEIGHTS_DIR
62
+
63
+ # Remove files
64
+ models = [path for x in ["*.onnx", "*.torchscript"] for path in WEIGHTS_DIR.rglob(x)]
65
+ for file in ["bus.jpg", "yolov8n.onnx", "yolov8n.torchscript"] + models:
66
+ Path(file).unlink(missing_ok=True)
67
+
68
+ # Remove directories
69
+ models = [path for x in ["*.mlpackage", "*_openvino_model"] for path in WEIGHTS_DIR.rglob(x)]
70
+ for directory in [TMP.parents[1] / ".pytest_cache", TMP] + models:
71
+ shutil.rmtree(directory, ignore_errors=True)
tests/test_cli.py ADDED
@@ -0,0 +1,128 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+
3
+ import subprocess
4
+
5
+ import pytest
6
+
7
+ from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
8
+ from ultralytics.utils import ASSETS, WEIGHTS_DIR, checks
9
+
10
+ from tests import CUDA_DEVICE_COUNT, CUDA_IS_AVAILABLE
11
+
12
+ # Constants
13
+ TASK_MODEL_DATA = [(task, WEIGHTS_DIR / TASK2MODEL[task], TASK2DATA[task]) for task in TASKS]
14
+ MODELS = [WEIGHTS_DIR / TASK2MODEL[task] for task in TASKS]
15
+
16
+
17
+ def run(cmd):
18
+ """Execute a shell command using subprocess."""
19
+ subprocess.run(cmd.split(), check=True)
20
+
21
+
22
+ def test_special_modes():
23
+ """Test various special command modes of YOLO."""
24
+ run("yolo help")
25
+ run("yolo checks")
26
+ run("yolo version")
27
+ run("yolo settings reset")
28
+ run("yolo cfg")
29
+
30
+
31
+ @pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
32
+ def test_train(task, model, data):
33
+ """Test YOLO training for a given task, model, and data."""
34
+ run(f"yolo train {task} model={model} data={data} imgsz=32 epochs=1 cache=disk")
35
+
36
+
37
+ @pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
38
+ def test_val(task, model, data):
39
+ """Test YOLO validation for a given task, model, and data."""
40
+ run(f"yolo val {task} model={model} data={data} imgsz=32 save_txt save_json")
41
+
42
+
43
+ @pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
44
+ def test_predict(task, model, data):
45
+ """Test YOLO prediction on sample assets for a given task and model."""
46
+ run(f"yolo predict model={model} source={ASSETS} imgsz=32 save save_crop save_txt")
47
+
48
+
49
+ @pytest.mark.parametrize("model", MODELS)
50
+ def test_export(model):
51
+ """Test exporting a YOLO model to different formats."""
52
+ run(f"yolo export model={model} format=torchscript imgsz=32")
53
+
54
+
55
+ def test_rtdetr(task="detect", model="yolov8n-rtdetr.yaml", data="coco8.yaml"):
56
+ """Test the RTDETR functionality with the Ultralytics framework."""
57
+ # Warning: must use imgsz=640 (note also add coma, spaces, fraction=0.25 args to test single-image training)
58
+ run(f"yolo train {task} model={model} data={data} --imgsz= 160 epochs =1, cache = disk fraction=0.25")
59
+ run(f"yolo predict {task} model={model} source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
60
+
61
+
62
+ @pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="MobileSAM with CLIP is not supported in Python 3.12")
63
+ def test_fastsam(task="segment", model=WEIGHTS_DIR / "FastSAM-s.pt", data="coco8-seg.yaml"):
64
+ """Test FastSAM segmentation functionality within Ultralytics."""
65
+ source = ASSETS / "bus.jpg"
66
+
67
+ run(f"yolo segment val {task} model={model} data={data} imgsz=32")
68
+ run(f"yolo segment predict model={model} source={source} imgsz=32 save save_crop save_txt")
69
+
70
+ from ultralytics import FastSAM
71
+ from ultralytics.models.fastsam import FastSAMPrompt
72
+ from ultralytics.models.sam import Predictor
73
+
74
+ # Create a FastSAM model
75
+ sam_model = FastSAM(model) # or FastSAM-x.pt
76
+
77
+ # Run inference on an image
78
+ everything_results = sam_model(source, device="cpu", retina_masks=True, imgsz=1024, conf=0.4, iou=0.9)
79
+
80
+ # Remove small regions
81
+ new_masks, _ = Predictor.remove_small_regions(everything_results[0].masks.data, min_area=20)
82
+
83
+ # Everything prompt
84
+ prompt_process = FastSAMPrompt(source, everything_results, device="cpu")
85
+ ann = prompt_process.everything_prompt()
86
+
87
+ # Bbox default shape [0,0,0,0] -> [x1,y1,x2,y2]
88
+ ann = prompt_process.box_prompt(bbox=[200, 200, 300, 300])
89
+
90
+ # Text prompt
91
+ ann = prompt_process.text_prompt(text="a photo of a dog")
92
+
93
+ # Point prompt
94
+ # Points default [[0,0]] [[x1,y1],[x2,y2]]
95
+ # Point_label default [0] [1,0] 0:background, 1:foreground
96
+ ann = prompt_process.point_prompt(points=[[200, 200]], pointlabel=[1])
97
+ prompt_process.plot(annotations=ann, output="./")
98
+
99
+
100
+ def test_mobilesam():
101
+ """Test MobileSAM segmentation functionality using Ultralytics."""
102
+ from ultralytics import SAM
103
+
104
+ # Load the model
105
+ model = SAM(WEIGHTS_DIR / "mobile_sam.pt")
106
+
107
+ # Source
108
+ source = ASSETS / "zidane.jpg"
109
+
110
+ # Predict a segment based on a point prompt
111
+ model.predict(source, points=[900, 370], labels=[1])
112
+
113
+ # Predict a segment based on a box prompt
114
+ model.predict(source, bboxes=[439, 437, 524, 709])
115
+
116
+ # Predict all
117
+ # model(source)
118
+
119
+
120
+ # Slow Tests -----------------------------------------------------------------------------------------------------------
121
+ @pytest.mark.slow
122
+ @pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
123
+ @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
124
+ @pytest.mark.skipif(CUDA_DEVICE_COUNT < 2, reason="DDP is not available")
125
+ def test_train_gpu(task, model, data):
126
+ """Test YOLO training on GPU(s) for various tasks and models."""
127
+ run(f"yolo train {task} model={model} data={data} imgsz=32 epochs=1 device=0") # single GPU
128
+ run(f"yolo train {task} model={model} data={data} imgsz=32 epochs=1 device=0,1") # multi GPU
tests/test_cuda.py ADDED
@@ -0,0 +1,134 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+
3
+ from pathlib import Path
4
+ from itertools import product
5
+
6
+ import pytest
7
+ import torch
8
+
9
+ from ultralytics import YOLO
10
+ from ultralytics.utils import ASSETS, WEIGHTS_DIR
11
+ from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
12
+
13
+ from tests import CUDA_DEVICE_COUNT, CUDA_IS_AVAILABLE, MODEL, SOURCE
14
+
15
+
16
+ def test_checks():
17
+ """Validate CUDA settings against torch CUDA functions."""
18
+ assert torch.cuda.is_available() == CUDA_IS_AVAILABLE
19
+ assert torch.cuda.device_count() == CUDA_DEVICE_COUNT
20
+
21
+
22
+ @pytest.mark.slow
23
+ @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
24
+ @pytest.mark.parametrize(
25
+ "task, dynamic, int8, half, batch",
26
+ [ # generate all combinations but exclude those where both int8 and half are True
27
+ (task, dynamic, int8, half, batch)
28
+ # Note: tests reduced below pending compute availability expansion as GPU CI runner utilization is high
29
+ # for task, dynamic, int8, half, batch in product(TASKS, [True, False], [True, False], [True, False], [1, 2])
30
+ for task, dynamic, int8, half, batch in product(TASKS, [True], [True], [False], [2])
31
+ if not (int8 and half) # exclude cases where both int8 and half are True
32
+ ],
33
+ )
34
+ def test_export_engine_matrix(task, dynamic, int8, half, batch):
35
+ """Test YOLO exports to TensorRT format."""
36
+ file = YOLO(TASK2MODEL[task]).export(
37
+ format="engine",
38
+ imgsz=32,
39
+ dynamic=dynamic,
40
+ int8=int8,
41
+ half=half,
42
+ batch=batch,
43
+ data=TASK2DATA[task],
44
+ workspace=1, # reduce workspace GB for less resource utilization during testing
45
+ )
46
+ YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32) # exported model inference
47
+ Path(file).unlink() # cleanup
48
+ Path(file).with_suffix(".cache").unlink() if int8 else None # cleanup INT8 cache
49
+
50
+
51
+ @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
52
+ def test_train():
53
+ """Test model training on a minimal dataset."""
54
+ device = 0 if CUDA_DEVICE_COUNT == 1 else [0, 1]
55
+ YOLO(MODEL).train(data="coco8.yaml", imgsz=64, epochs=1, device=device) # requires imgsz>=64
56
+
57
+
58
+ @pytest.mark.slow
59
+ @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
60
+ def test_predict_multiple_devices():
61
+ """Validate model prediction on multiple devices."""
62
+ model = YOLO("yolov8n.pt")
63
+ model = model.cpu()
64
+ assert str(model.device) == "cpu"
65
+ _ = model(SOURCE) # CPU inference
66
+ assert str(model.device) == "cpu"
67
+
68
+ model = model.to("cuda:0")
69
+ assert str(model.device) == "cuda:0"
70
+ _ = model(SOURCE) # CUDA inference
71
+ assert str(model.device) == "cuda:0"
72
+
73
+ model = model.cpu()
74
+ assert str(model.device) == "cpu"
75
+ _ = model(SOURCE) # CPU inference
76
+ assert str(model.device) == "cpu"
77
+
78
+ model = model.cuda()
79
+ assert str(model.device) == "cuda:0"
80
+ _ = model(SOURCE) # CUDA inference
81
+ assert str(model.device) == "cuda:0"
82
+
83
+
84
+ @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
85
+ def test_autobatch():
86
+ """Check batch size for YOLO model using autobatch."""
87
+ from ultralytics.utils.autobatch import check_train_batch_size
88
+
89
+ check_train_batch_size(YOLO(MODEL).model.cuda(), imgsz=128, amp=True)
90
+
91
+
92
+ @pytest.mark.slow
93
+ @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
94
+ def test_utils_benchmarks():
95
+ """Profile YOLO models for performance benchmarks."""
96
+ from ultralytics.utils.benchmarks import ProfileModels
97
+
98
+ # Pre-export a dynamic engine model to use dynamic inference
99
+ YOLO(MODEL).export(format="engine", imgsz=32, dynamic=True, batch=1)
100
+ ProfileModels([MODEL], imgsz=32, half=False, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile()
101
+
102
+
103
+ @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
104
+ def test_predict_sam():
105
+ """Test SAM model prediction with various prompts."""
106
+ from ultralytics import SAM
107
+ from ultralytics.models.sam import Predictor as SAMPredictor
108
+
109
+ # Load a model
110
+ model = SAM(WEIGHTS_DIR / "sam_b.pt")
111
+
112
+ # Display model information (optional)
113
+ model.info()
114
+
115
+ # Run inference
116
+ model(SOURCE, device=0)
117
+
118
+ # Run inference with bboxes prompt
119
+ model(SOURCE, bboxes=[439, 437, 524, 709], device=0)
120
+
121
+ # Run inference with points prompt
122
+ model(ASSETS / "zidane.jpg", points=[900, 370], labels=[1], device=0)
123
+
124
+ # Create SAMPredictor
125
+ overrides = dict(conf=0.25, task="segment", mode="predict", imgsz=1024, model=WEIGHTS_DIR / "mobile_sam.pt")
126
+ predictor = SAMPredictor(overrides=overrides)
127
+
128
+ # Set image
129
+ predictor.set_image(ASSETS / "zidane.jpg") # set with image file
130
+ # predictor(bboxes=[439, 437, 524, 709])
131
+ # predictor(points=[900, 370], labels=[1])
132
+
133
+ # Reset image
134
+ predictor.reset_image()
tests/test_engine.py ADDED
@@ -0,0 +1,132 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+
3
+ import sys
4
+ from unittest import mock
5
+
6
+ from ultralytics import YOLO
7
+ from ultralytics.cfg import get_cfg
8
+ from ultralytics.engine.exporter import Exporter
9
+ from ultralytics.models.yolo import classify, detect, segment
10
+ from ultralytics.utils import ASSETS, DEFAULT_CFG, WEIGHTS_DIR
11
+
12
+ from tests import MODEL
13
+
14
+
15
+ def test_func(*args): # noqa
16
+ """Test function callback."""
17
+ print("callback test passed")
18
+
19
+
20
+ def test_export():
21
+ """Test model exporting functionality."""
22
+ exporter = Exporter()
23
+ exporter.add_callback("on_export_start", test_func)
24
+ assert test_func in exporter.callbacks["on_export_start"], "callback test failed"
25
+ f = exporter(model=YOLO("yolov8n.yaml").model)
26
+ YOLO(f)(ASSETS) # exported model inference
27
+
28
+
29
+ def test_detect():
30
+ """Test object detection functionality."""
31
+ overrides = {"data": "coco8.yaml", "model": "yolov8n.yaml", "imgsz": 32, "epochs": 1, "save": False}
32
+ cfg = get_cfg(DEFAULT_CFG)
33
+ cfg.data = "coco8.yaml"
34
+ cfg.imgsz = 32
35
+
36
+ # Trainer
37
+ trainer = detect.DetectionTrainer(overrides=overrides)
38
+ trainer.add_callback("on_train_start", test_func)
39
+ assert test_func in trainer.callbacks["on_train_start"], "callback test failed"
40
+ trainer.train()
41
+
42
+ # Validator
43
+ val = detect.DetectionValidator(args=cfg)
44
+ val.add_callback("on_val_start", test_func)
45
+ assert test_func in val.callbacks["on_val_start"], "callback test failed"
46
+ val(model=trainer.best) # validate best.pt
47
+
48
+ # Predictor
49
+ pred = detect.DetectionPredictor(overrides={"imgsz": [64, 64]})
50
+ pred.add_callback("on_predict_start", test_func)
51
+ assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
52
+ # Confirm there is no issue with sys.argv being empty.
53
+ with mock.patch.object(sys, "argv", []):
54
+ result = pred(source=ASSETS, model=MODEL)
55
+ assert len(result), "predictor test failed"
56
+
57
+ overrides["resume"] = trainer.last
58
+ trainer = detect.DetectionTrainer(overrides=overrides)
59
+ try:
60
+ trainer.train()
61
+ except Exception as e:
62
+ print(f"Expected exception caught: {e}")
63
+ return
64
+
65
+ Exception("Resume test failed!")
66
+
67
+
68
+ def test_segment():
69
+ """Test image segmentation functionality."""
70
+ overrides = {"data": "coco8-seg.yaml", "model": "yolov8n-seg.yaml", "imgsz": 32, "epochs": 1, "save": False}
71
+ cfg = get_cfg(DEFAULT_CFG)
72
+ cfg.data = "coco8-seg.yaml"
73
+ cfg.imgsz = 32
74
+ # YOLO(CFG_SEG).train(**overrides) # works
75
+
76
+ # Trainer
77
+ trainer = segment.SegmentationTrainer(overrides=overrides)
78
+ trainer.add_callback("on_train_start", test_func)
79
+ assert test_func in trainer.callbacks["on_train_start"], "callback test failed"
80
+ trainer.train()
81
+
82
+ # Validator
83
+ val = segment.SegmentationValidator(args=cfg)
84
+ val.add_callback("on_val_start", test_func)
85
+ assert test_func in val.callbacks["on_val_start"], "callback test failed"
86
+ val(model=trainer.best) # validate best.pt
87
+
88
+ # Predictor
89
+ pred = segment.SegmentationPredictor(overrides={"imgsz": [64, 64]})
90
+ pred.add_callback("on_predict_start", test_func)
91
+ assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
92
+ result = pred(source=ASSETS, model=WEIGHTS_DIR / "yolov8n-seg.pt")
93
+ assert len(result), "predictor test failed"
94
+
95
+ # Test resume
96
+ overrides["resume"] = trainer.last
97
+ trainer = segment.SegmentationTrainer(overrides=overrides)
98
+ try:
99
+ trainer.train()
100
+ except Exception as e:
101
+ print(f"Expected exception caught: {e}")
102
+ return
103
+
104
+ Exception("Resume test failed!")
105
+
106
+
107
+ def test_classify():
108
+ """Test image classification functionality."""
109
+ overrides = {"data": "imagenet10", "model": "yolov8n-cls.yaml", "imgsz": 32, "epochs": 1, "save": False}
110
+ cfg = get_cfg(DEFAULT_CFG)
111
+ cfg.data = "imagenet10"
112
+ cfg.imgsz = 32
113
+ # YOLO(CFG_SEG).train(**overrides) # works
114
+
115
+ # Trainer
116
+ trainer = classify.ClassificationTrainer(overrides=overrides)
117
+ trainer.add_callback("on_train_start", test_func)
118
+ assert test_func in trainer.callbacks["on_train_start"], "callback test failed"
119
+ trainer.train()
120
+
121
+ # Validator
122
+ val = classify.ClassificationValidator(args=cfg)
123
+ val.add_callback("on_val_start", test_func)
124
+ assert test_func in val.callbacks["on_val_start"], "callback test failed"
125
+ val(model=trainer.best)
126
+
127
+ # Predictor
128
+ pred = classify.ClassificationPredictor(overrides={"imgsz": [64, 64]})
129
+ pred.add_callback("on_predict_start", test_func)
130
+ assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
131
+ result = pred(source=ASSETS, model=trainer.best)
132
+ assert len(result), "predictor test failed"
tests/test_explorer.py ADDED
@@ -0,0 +1,61 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+
3
+ import PIL
4
+ import pytest
5
+
6
+ from ultralytics import Explorer
7
+ from ultralytics.utils import ASSETS
8
+
9
+
10
+ @pytest.mark.slow
11
+ def test_similarity():
12
+ """Test similarity calculations and SQL queries for correctness and response length."""
13
+ exp = Explorer(data="coco8.yaml")
14
+ exp.create_embeddings_table()
15
+ similar = exp.get_similar(idx=1)
16
+ assert len(similar) == 4
17
+ similar = exp.get_similar(img=ASSETS / "bus.jpg")
18
+ assert len(similar) == 4
19
+ similar = exp.get_similar(idx=[1, 2], limit=2)
20
+ assert len(similar) == 2
21
+ sim_idx = exp.similarity_index()
22
+ assert len(sim_idx) == 4
23
+ sql = exp.sql_query("WHERE labels LIKE '%zebra%'")
24
+ assert len(sql) == 1
25
+
26
+
27
+ @pytest.mark.slow
28
+ def test_det():
29
+ """Test detection functionalities and ensure the embedding table has bounding boxes."""
30
+ exp = Explorer(data="coco8.yaml", model="yolov8n.pt")
31
+ exp.create_embeddings_table(force=True)
32
+ assert len(exp.table.head()["bboxes"]) > 0
33
+ similar = exp.get_similar(idx=[1, 2], limit=10)
34
+ assert len(similar) > 0
35
+ # This is a loose test, just checks errors not correctness
36
+ similar = exp.plot_similar(idx=[1, 2], limit=10)
37
+ assert isinstance(similar, PIL.Image.Image)
38
+
39
+
40
+ @pytest.mark.slow
41
+ def test_seg():
42
+ """Test segmentation functionalities and verify the embedding table includes masks."""
43
+ exp = Explorer(data="coco8-seg.yaml", model="yolov8n-seg.pt")
44
+ exp.create_embeddings_table(force=True)
45
+ assert len(exp.table.head()["masks"]) > 0
46
+ similar = exp.get_similar(idx=[1, 2], limit=10)
47
+ assert len(similar) > 0
48
+ similar = exp.plot_similar(idx=[1, 2], limit=10)
49
+ assert isinstance(similar, PIL.Image.Image)
50
+
51
+
52
+ @pytest.mark.slow
53
+ def test_pose():
54
+ """Test pose estimation functionalities and check the embedding table for keypoints."""
55
+ exp = Explorer(data="coco8-pose.yaml", model="yolov8n-pose.pt")
56
+ exp.create_embeddings_table(force=True)
57
+ assert len(exp.table.head()["keypoints"]) > 0
58
+ similar = exp.get_similar(idx=[1, 2], limit=10)
59
+ assert len(similar) > 0
60
+ similar = exp.plot_similar(idx=[1, 2], limit=10)
61
+ assert isinstance(similar, PIL.Image.Image)