ultralytics 8.3.105__py3-none-any.whl → 8.3.106__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.105"
3
+ __version__ = "8.3.106"
4
4
 
5
5
  import os
6
6
 
@@ -86,7 +86,6 @@ from ultralytics.utils import (
86
86
  LINUX,
87
87
  LOGGER,
88
88
  MACOS,
89
- PYTHON_VERSION,
90
89
  RKNN_CHIPS,
91
90
  ROOT,
92
91
  WINDOWS,
@@ -795,7 +794,7 @@ class Exporter:
795
794
  def export_coreml(self, prefix=colorstr("CoreML:")):
796
795
  """YOLO CoreML export."""
797
796
  mlmodel = self.args.format.lower() == "mlmodel" # legacy *.mlmodel export format requested
798
- check_requirements("coremltools>=6.0,<=6.2" if mlmodel else "coremltools>=8.0")
797
+ check_requirements("coremltools>=8.0")
799
798
  import coremltools as ct # noqa
800
799
 
801
800
  LOGGER.info(f"\n{prefix} starting export with coremltools {ct.__version__}...")
@@ -819,11 +818,15 @@ class Exporter:
819
818
  # TODO CoreML Segment and Pose model pipelining
820
819
  model = self.model
821
820
  ts = torch.jit.trace(model.eval(), self.im, strict=False) # TorchScript model
821
+
822
+ # Based on apple's documentation it is better to leave out the minimum_deployment target and let that get set
823
+ # Internally based on the model conversion and output type.
824
+ # Setting minimum_depoloyment_target >= iOS16 will require setting compute_precision=ct.precision.FLOAT32.
825
+ # iOS16 adds in better support for FP16, but none of the CoreML NMS specifications handle FP16 as input.
822
826
  ct_model = ct.convert(
823
827
  ts,
824
828
  inputs=[ct.ImageType("image", shape=self.im.shape, scale=scale, bias=bias)], # expects ct.TensorType
825
829
  classifier_config=classifier_config,
826
- minimum_deployment_target=ct.target.iOS15, # warning: >=16 causes pipeline errors
827
830
  convert_to="neuralnetwork" if mlmodel else "mlprogram",
828
831
  )
829
832
  bits, mode = (8, "kmeans") if self.args.int8 else (16, "linear") if self.args.half else (32, None)
@@ -840,8 +843,6 @@ class Exporter:
840
843
  ct_model = cto.palettize_weights(ct_model, config=config)
841
844
  if self.args.nms and self.model.task == "detect":
842
845
  if mlmodel:
843
- # coremltools<=6.2 NMS export requires Python<3.11
844
- check_version(PYTHON_VERSION, "<3.11", name="Python ", hard=True)
845
846
  weights_dir = None
846
847
  else:
847
848
  ct_model.save(str(f)) # save otherwise weights_dir does not exist
@@ -1469,7 +1470,7 @@ class Exporter:
1469
1470
 
1470
1471
  # 3. Create NMS protobuf
1471
1472
  nms_spec = ct.proto.Model_pb2.Model()
1472
- nms_spec.specificationVersion = 9
1473
+ nms_spec.specificationVersion = spec.specificationVersion
1473
1474
  for i in range(2):
1474
1475
  decoder_output = model._spec.description.output[i].SerializeToString()
1475
1476
  nms_spec.description.input.add()
@@ -1522,7 +1523,7 @@ class Exporter:
1522
1523
  pipeline.spec.description.output[1].ParseFromString(nms_model._spec.description.output[1].SerializeToString())
1523
1524
 
1524
1525
  # Update metadata
1525
- pipeline.spec.specificationVersion = 9
1526
+ pipeline.spec.specificationVersion = spec.specificationVersion
1526
1527
  pipeline.spec.description.metadata.userDefined.update(
1527
1528
  {"IoU threshold": str(nms.iouThreshold), "Confidence threshold": str(nms.confidenceThreshold)}
1528
1529
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics
3
- Version: 8.3.105
3
+ Version: 8.3.106
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -1,13 +1,4 @@
1
- tests/__init__.py,sha256=xnMhv3O_DF1YrW4zk__ZywQzAaoTDjPKPoiI1Ktss1w,670
2
- tests/conftest.py,sha256=rsIAipRKfrVNoTaJ1LdpYue8AbcJ_fr3d3WIlM_6uXY,2982
3
- tests/test_cli.py,sha256=DPxUjcGAex_cmGMNaRIK7mT7wrILWaPBtlfXuHQpveI,5284
4
- tests/test_cuda.py,sha256=0uvTF4bY_Grsd_Xgtp7TdIEgMpUqKv8_kWA82NYDl_g,6260
5
- tests/test_engine.py,sha256=aGqZ8P7QO5C_nOa1b4FOyk92Ysdk5WiP-ST310Vyxys,4962
6
- tests/test_exports.py,sha256=dhZn86LdbapW15RthQF870LGxDjC1MUZhlGdBgPmgIQ,9716
7
- tests/test_integrations.py,sha256=ZgpddWHEVqiP4bGhVw8fLc2wdz0rCxuxr0FQ2dTgnIE,6067
8
- tests/test_python.py,sha256=ij0MV87WtbY2WVs0uP41GdVxt_p_M5Rrkldna3M5nXY,24620
9
- tests/test_solutions.py,sha256=428CUFC-ns0GDRZWt_er1Ma8Kb1jtDgSj3cw3T2HjWE,5530
10
- ultralytics/__init__.py,sha256=70uClMXEl0zWqRYtxBGmqsXSqZT6XKCKR-m47cxzkGA,730
1
+ ultralytics/__init__.py,sha256=ey81HB6cgSBcFyxUYLBPSmZvuTyw-WPic4IrVBhQboc,730
11
2
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
12
3
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
13
4
  ultralytics/cfg/__init__.py,sha256=UCUFiZg-bqJwpuLLaGgy7RvAMxD-nbcVsPLxSo8x3ZA,39821
@@ -111,7 +102,7 @@ ultralytics/data/loaders.py,sha256=_Gyp_BfGTZwsFdn4UnolXxdU_sAYZLIrv0L2TRI9R5g,2
111
102
  ultralytics/data/split_dota.py,sha256=p8eVGht9tABSVbf9vwvxA_AQYEva3IGHePKlMeNrn64,11872
112
103
  ultralytics/data/utils.py,sha256=aRPwIoLrCML_Kcd0dI9B6c5Ct4dvhdF36rDHtuf7Ww4,33217
113
104
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
114
- ultralytics/engine/exporter.py,sha256=XJYeJYloVPkXyl7-kCzbYt6KqqZPpLz4-BgkUrQFlOc,78346
105
+ ultralytics/engine/exporter.py,sha256=rIQpCkgC_f_3liWpkBUAhZTQmivN8ptDfkhpi39fyzY,78504
115
106
  ultralytics/engine/model.py,sha256=YgQKYZrPENSTvLENspg-bXI9FinzzWARfb0U-C9vH-M,52916
116
107
  ultralytics/engine/predictor.py,sha256=fRUh82EJlu_6ZlIy8NFovlCcgX53UbRYSXcLljOs7Sc,21669
117
108
  ultralytics/engine/results.py,sha256=H3pFJhUjYKvVyOUqqZjfIn8vnCpl81aYNOnregMrBoQ,79716
@@ -252,9 +243,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=XXnnKQ-MoLIexl8y2Vb0i-cCLyePE0n5BU
252
243
  ultralytics/utils/callbacks/raytune.py,sha256=omVZNNuzYxsZZXrF9xpbFv7R1Wjdx1j-gv0xXuZrQas,1122
253
244
  ultralytics/utils/callbacks/tensorboard.py,sha256=7eUX21_Ym7i6iN4euZzrqglphyl5xak1yl_-wfFshbg,5502
254
245
  ultralytics/utils/callbacks/wb.py,sha256=iDRFXI4IIDm8R5OI89DMTmjs8aHLo1HRCLkOFKdaMG4,7507
255
- ultralytics-8.3.105.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
256
- ultralytics-8.3.105.dist-info/METADATA,sha256=HYewTGSUpFpPREiAy_L5PYIA00RbBfj5jbm7miunRc0,37355
257
- ultralytics-8.3.105.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
258
- ultralytics-8.3.105.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
259
- ultralytics-8.3.105.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
260
- ultralytics-8.3.105.dist-info/RECORD,,
246
+ ultralytics-8.3.106.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
247
+ ultralytics-8.3.106.dist-info/METADATA,sha256=ljT7_fUugMTOUfhqXUXTuqgKPnVa-YBnNYzPJVZfizc,37355
248
+ ultralytics-8.3.106.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
249
+ ultralytics-8.3.106.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
250
+ ultralytics-8.3.106.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
251
+ ultralytics-8.3.106.dist-info/RECORD,,
tests/__init__.py DELETED
@@ -1,22 +0,0 @@
1
- # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
-
3
- from ultralytics.utils import ASSETS, ROOT, WEIGHTS_DIR, checks
4
-
5
- # Constants used in tests
6
- MODEL = WEIGHTS_DIR / "path with spaces" / "yolo11n.pt" # test spaces in path
7
- CFG = "yolo11n.yaml"
8
- SOURCE = ASSETS / "bus.jpg"
9
- SOURCES_LIST = [ASSETS / "bus.jpg", ASSETS, ASSETS / "*", ASSETS / "**/*.jpg"]
10
- TMP = (ROOT / "../tests/tmp").resolve() # temp directory for test files
11
- CUDA_IS_AVAILABLE = checks.cuda_is_available()
12
- CUDA_DEVICE_COUNT = checks.cuda_device_count()
13
-
14
- __all__ = (
15
- "MODEL",
16
- "CFG",
17
- "SOURCE",
18
- "SOURCES_LIST",
19
- "TMP",
20
- "CUDA_IS_AVAILABLE",
21
- "CUDA_DEVICE_COUNT",
22
- )
tests/conftest.py DELETED
@@ -1,83 +0,0 @@
1
- # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
-
3
- import shutil
4
- from pathlib import Path
5
-
6
- from tests import TMP
7
-
8
-
9
- def pytest_addoption(parser):
10
- """
11
- Add custom command-line options to pytest.
12
-
13
- Args:
14
- parser (pytest.config.Parser): The pytest parser object for adding custom command-line options.
15
-
16
- Returns:
17
- (None)
18
- """
19
- parser.addoption("--slow", action="store_true", default=False, help="Run slow tests")
20
-
21
-
22
- def pytest_collection_modifyitems(config, items):
23
- """
24
- Modify the list of test items to exclude tests marked as slow if the --slow option is not specified.
25
-
26
- Args:
27
- config (pytest.config.Config): The pytest configuration object that provides access to command-line options.
28
- items (list): The list of collected pytest item objects to be modified based on the presence of --slow option.
29
-
30
- Returns:
31
- (None): The function modifies the 'items' list in place.
32
- """
33
- if not config.getoption("--slow"):
34
- # Remove the item entirely from the list of test items if it's marked as 'slow'
35
- items[:] = [item for item in items if "slow" not in item.keywords]
36
-
37
-
38
- def pytest_sessionstart(session):
39
- """
40
- Initialize session configurations for pytest.
41
-
42
- This function is automatically called by pytest after the 'Session' object has been created but before performing
43
- test collection. It sets the initial seeds and prepares the temporary directory for the test session.
44
-
45
- Args:
46
- session (pytest.Session): The pytest session object.
47
-
48
- Returns:
49
- (None)
50
- """
51
- from ultralytics.utils.torch_utils import init_seeds
52
-
53
- init_seeds()
54
- shutil.rmtree(TMP, ignore_errors=True) # delete any existing tests/tmp directory
55
- TMP.mkdir(parents=True, exist_ok=True) # create a new empty directory
56
-
57
-
58
- def pytest_terminal_summary(terminalreporter, exitstatus, config):
59
- """
60
- Cleanup operations after pytest session.
61
-
62
- This function is automatically called by pytest at the end of the entire test session. It removes certain files
63
- and directories used during testing.
64
-
65
- Args:
66
- terminalreporter (pytest.terminal.TerminalReporter): The terminal reporter object used for terminal output.
67
- exitstatus (int): The exit status of the test run.
68
- config (pytest.config.Config): The pytest config object.
69
-
70
- Returns:
71
- (None)
72
- """
73
- from ultralytics.utils import WEIGHTS_DIR
74
-
75
- # Remove files
76
- models = [path for x in ["*.onnx", "*.torchscript"] for path in WEIGHTS_DIR.rglob(x)]
77
- for file in ["decelera_portrait_min.mov", "bus.jpg", "yolo11n.onnx", "yolo11n.torchscript"] + models:
78
- Path(file).unlink(missing_ok=True)
79
-
80
- # Remove directories
81
- models = [path for x in ["*.mlpackage", "*_openvino_model"] for path in WEIGHTS_DIR.rglob(x)]
82
- for directory in [WEIGHTS_DIR / "path with spaces", TMP.parents[1] / ".pytest_cache", TMP] + models:
83
- shutil.rmtree(directory, ignore_errors=True)
tests/test_cli.py DELETED
@@ -1,124 +0,0 @@
1
- # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
-
3
- import subprocess
4
-
5
- import pytest
6
- from PIL import Image
7
-
8
- from tests import CUDA_DEVICE_COUNT, CUDA_IS_AVAILABLE
9
- from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
10
- from ultralytics.utils import ASSETS, WEIGHTS_DIR, checks
11
- from ultralytics.utils.torch_utils import TORCH_1_9
12
-
13
- # Constants
14
- TASK_MODEL_DATA = [(task, WEIGHTS_DIR / TASK2MODEL[task], TASK2DATA[task]) for task in TASKS]
15
- MODELS = [WEIGHTS_DIR / TASK2MODEL[task] for task in TASKS]
16
-
17
-
18
- def run(cmd: str) -> None:
19
- """Execute a shell command using subprocess."""
20
- subprocess.run(cmd.split(), check=True)
21
-
22
-
23
- def test_special_modes() -> None:
24
- """Test various special command-line modes for YOLO functionality."""
25
- run("yolo help")
26
- run("yolo checks")
27
- run("yolo version")
28
- run("yolo settings reset")
29
- run("yolo cfg")
30
-
31
-
32
- @pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
33
- def test_train(task: str, model: str, data: str) -> None:
34
- """Test YOLO training for different tasks, models, and datasets."""
35
- run(f"yolo train {task} model={model} data={data} imgsz=32 epochs=1 cache=disk")
36
-
37
-
38
- @pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
39
- def test_val(task: str, model: str, data: str) -> None:
40
- """Test YOLO validation process for specified task, model, and data using a shell command."""
41
- run(f"yolo val {task} model={model} data={data} imgsz=32 save_txt save_json")
42
-
43
-
44
- @pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
45
- def test_predict(task: str, model: str, data: str) -> None:
46
- """Test YOLO prediction on provided sample assets for specified task and model."""
47
- run(f"yolo predict model={model} source={ASSETS} imgsz=32 save save_crop save_txt")
48
-
49
-
50
- @pytest.mark.parametrize("model", MODELS)
51
- def test_export(model: str) -> None:
52
- """Test exporting a YOLO model to TorchScript format."""
53
- run(f"yolo export model={model} format=torchscript imgsz=32")
54
-
55
-
56
- def test_rtdetr(task: str = "detect", model: str = "yolov8n-rtdetr.yaml", data: str = "coco8.yaml") -> None:
57
- """Test the RTDETR functionality within Ultralytics for detection tasks using specified model and data."""
58
- # Warning: must use imgsz=640 (note also add coma, spaces, fraction=0.25 args to test single-image training)
59
- run(f"yolo train {task} model={model} data={data} --imgsz= 160 epochs =1, cache = disk fraction=0.25") # spaces
60
- run(f"yolo predict {task} model={model} source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
61
- if TORCH_1_9:
62
- weights = WEIGHTS_DIR / "rtdetr-l.pt"
63
- run(f"yolo predict {task} model={weights} source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
64
-
65
-
66
- @pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="MobileSAM with CLIP is not supported in Python 3.12")
67
- def test_fastsam(
68
- task: str = "segment", model: str = WEIGHTS_DIR / "FastSAM-s.pt", data: str = "coco8-seg.yaml"
69
- ) -> None:
70
- """Test FastSAM model for segmenting objects in images using various prompts within Ultralytics."""
71
- source = ASSETS / "bus.jpg"
72
-
73
- run(f"yolo segment val {task} model={model} data={data} imgsz=32")
74
- run(f"yolo segment predict model={model} source={source} imgsz=32 save save_crop save_txt")
75
-
76
- from ultralytics import FastSAM
77
- from ultralytics.models.sam import Predictor
78
-
79
- # Create a FastSAM model
80
- sam_model = FastSAM(model) # or FastSAM-x.pt
81
-
82
- # Run inference on an image
83
- for s in (source, Image.open(source)):
84
- everything_results = sam_model(s, device="cpu", retina_masks=True, imgsz=320, conf=0.4, iou=0.9)
85
-
86
- # Remove small regions
87
- new_masks, _ = Predictor.remove_small_regions(everything_results[0].masks.data, min_area=20)
88
-
89
- # Run inference with bboxes and points and texts prompt at the same time
90
- sam_model(source, bboxes=[439, 437, 524, 709], points=[[200, 200]], labels=[1], texts="a photo of a dog")
91
-
92
-
93
- def test_mobilesam() -> None:
94
- """Test MobileSAM segmentation with point prompts using Ultralytics."""
95
- from ultralytics import SAM
96
-
97
- # Load the model
98
- model = SAM(WEIGHTS_DIR / "mobile_sam.pt")
99
-
100
- # Source
101
- source = ASSETS / "zidane.jpg"
102
-
103
- # Predict a segment based on a 1D point prompt and 1D labels.
104
- model.predict(source, points=[900, 370], labels=[1])
105
-
106
- # Predict a segment based on 3D points and 2D labels (multiple points per object).
107
- model.predict(source, points=[[[900, 370], [1000, 100]]], labels=[[1, 1]])
108
-
109
- # Predict a segment based on a box prompt
110
- model.predict(source, bboxes=[439, 437, 524, 709], save=True)
111
-
112
- # Predict all
113
- # model(source)
114
-
115
-
116
- # Slow Tests -----------------------------------------------------------------------------------------------------------
117
- @pytest.mark.slow
118
- @pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
119
- @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
120
- @pytest.mark.skipif(CUDA_DEVICE_COUNT < 2, reason="DDP is not available")
121
- def test_train_gpu(task: str, model: str, data: str) -> None:
122
- """Test YOLO training on GPU(s) for various tasks and models."""
123
- run(f"yolo train {task} model={model} data={data} imgsz=32 epochs=1 device=0") # single GPU
124
- run(f"yolo train {task} model={model} data={data} imgsz=32 epochs=1 device=0,1") # multi GPU
tests/test_cuda.py DELETED
@@ -1,164 +0,0 @@
1
- # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
-
3
- from itertools import product
4
- from pathlib import Path
5
-
6
- import pytest
7
- import torch
8
-
9
- from tests import CUDA_DEVICE_COUNT, CUDA_IS_AVAILABLE, MODEL, SOURCE
10
- from ultralytics import YOLO
11
- from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
12
- from ultralytics.utils import ASSETS, WEIGHTS_DIR
13
- from ultralytics.utils.checks import check_amp
14
-
15
-
16
- def test_checks():
17
- """Validate CUDA settings against torch CUDA functions."""
18
- assert torch.cuda.is_available() == CUDA_IS_AVAILABLE
19
- assert torch.cuda.device_count() == CUDA_DEVICE_COUNT
20
-
21
-
22
- @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
23
- def test_amp():
24
- """Test AMP training checks."""
25
- model = YOLO("yolo11n.pt").model.cuda()
26
- assert check_amp(model)
27
-
28
-
29
- @pytest.mark.slow
30
- @pytest.mark.skipif(True, reason="CUDA export tests disabled pending additional Ultralytics GPU server availability")
31
- @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
32
- @pytest.mark.parametrize(
33
- "task, dynamic, int8, half, batch",
34
- [ # generate all combinations but exclude those where both int8 and half are True
35
- (task, dynamic, int8, half, batch)
36
- # Note: tests reduced below pending compute availability expansion as GPU CI runner utilization is high
37
- # for task, dynamic, int8, half, batch in product(TASKS, [True, False], [True, False], [True, False], [1, 2])
38
- for task, dynamic, int8, half, batch in product(TASKS, [True], [True], [False], [2])
39
- if not (int8 and half) # exclude cases where both int8 and half are True
40
- ],
41
- )
42
- def test_export_engine_matrix(task, dynamic, int8, half, batch):
43
- """
44
- Test YOLO model export to TensorRT format for various configurations and run inference.
45
-
46
- Args:
47
- task (str): Task type like 'detect', 'segment', etc.
48
- dynamic (bool): Whether to use dynamic input size.
49
- int8 (bool): Whether to use INT8 precision.
50
- half (bool): Whether to use FP16 precision.
51
- batch (int): Batch size for export.
52
- """
53
- file = YOLO(TASK2MODEL[task]).export(
54
- format="engine",
55
- imgsz=32,
56
- dynamic=dynamic,
57
- int8=int8,
58
- half=half,
59
- batch=batch,
60
- data=TASK2DATA[task],
61
- workspace=1, # reduce workspace GB for less resource utilization during testing
62
- simplify=True, # use 'onnxslim'
63
- )
64
- YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32) # exported model inference
65
- Path(file).unlink() # cleanup
66
- Path(file).with_suffix(".cache").unlink() if int8 else None # cleanup INT8 cache
67
-
68
-
69
- @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
70
- def test_train():
71
- """Test model training on a minimal dataset using available CUDA devices."""
72
- device = 0 if CUDA_DEVICE_COUNT == 1 else [0, 1]
73
- YOLO(MODEL).train(data="coco8.yaml", imgsz=64, epochs=1, device=device) # requires imgsz>=64
74
-
75
-
76
- @pytest.mark.slow
77
- @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
78
- def test_predict_multiple_devices():
79
- """Validate model prediction consistency across CPU and CUDA devices."""
80
- model = YOLO("yolo11n.pt")
81
- model = model.cpu()
82
- assert str(model.device) == "cpu"
83
- _ = model(SOURCE) # CPU inference
84
- assert str(model.device) == "cpu"
85
-
86
- model = model.to("cuda:0")
87
- assert str(model.device) == "cuda:0"
88
- _ = model(SOURCE) # CUDA inference
89
- assert str(model.device) == "cuda:0"
90
-
91
- model = model.cpu()
92
- assert str(model.device) == "cpu"
93
- _ = model(SOURCE) # CPU inference
94
- assert str(model.device) == "cpu"
95
-
96
- model = model.cuda()
97
- assert str(model.device) == "cuda:0"
98
- _ = model(SOURCE) # CUDA inference
99
- assert str(model.device) == "cuda:0"
100
-
101
-
102
- @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
103
- def test_autobatch():
104
- """Check optimal batch size for YOLO model training using autobatch utility."""
105
- from ultralytics.utils.autobatch import check_train_batch_size
106
-
107
- check_train_batch_size(YOLO(MODEL).model.cuda(), imgsz=128, amp=True)
108
-
109
-
110
- @pytest.mark.slow
111
- @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
112
- def test_utils_benchmarks():
113
- """Profile YOLO models for performance benchmarks."""
114
- from ultralytics.utils.benchmarks import ProfileModels
115
-
116
- # Pre-export a dynamic engine model to use dynamic inference
117
- YOLO(MODEL).export(format="engine", imgsz=32, dynamic=True, batch=1)
118
- ProfileModels([MODEL], imgsz=32, half=False, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile()
119
-
120
-
121
- @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
122
- def test_predict_sam():
123
- """Test SAM model predictions using different prompts, including bounding boxes and point annotations."""
124
- from ultralytics import SAM
125
- from ultralytics.models.sam import Predictor as SAMPredictor
126
-
127
- # Load a model
128
- model = SAM(WEIGHTS_DIR / "sam2.1_b.pt")
129
-
130
- # Display model information (optional)
131
- model.info()
132
-
133
- # Run inference
134
- model(SOURCE, device=0)
135
-
136
- # Run inference with bboxes prompt
137
- model(SOURCE, bboxes=[439, 437, 524, 709], device=0)
138
-
139
- # Run inference with no labels
140
- model(ASSETS / "zidane.jpg", points=[900, 370], device=0)
141
-
142
- # Run inference with 1D points and 1D labels
143
- model(ASSETS / "zidane.jpg", points=[900, 370], labels=[1], device=0)
144
-
145
- # Run inference with 2D points and 1D labels
146
- model(ASSETS / "zidane.jpg", points=[[900, 370]], labels=[1], device=0)
147
-
148
- # Run inference with multiple 2D points and 1D labels
149
- model(ASSETS / "zidane.jpg", points=[[400, 370], [900, 370]], labels=[1, 1], device=0)
150
-
151
- # Run inference with 3D points and 2D labels (multiple points per object)
152
- model(ASSETS / "zidane.jpg", points=[[[900, 370], [1000, 100]]], labels=[[1, 1]], device=0)
153
-
154
- # Create SAMPredictor
155
- overrides = dict(conf=0.25, task="segment", mode="predict", imgsz=1024, model=WEIGHTS_DIR / "mobile_sam.pt")
156
- predictor = SAMPredictor(overrides=overrides)
157
-
158
- # Set image
159
- predictor.set_image(ASSETS / "zidane.jpg") # set with image file
160
- # predictor(bboxes=[439, 437, 524, 709])
161
- # predictor(points=[900, 370], labels=[1])
162
-
163
- # Reset image
164
- predictor.reset_image()
tests/test_engine.py DELETED
@@ -1,131 +0,0 @@
1
- # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
-
3
- import sys
4
- from unittest import mock
5
-
6
- from tests import MODEL
7
- from ultralytics import YOLO
8
- from ultralytics.cfg import get_cfg
9
- from ultralytics.engine.exporter import Exporter
10
- from ultralytics.models.yolo import classify, detect, segment
11
- from ultralytics.utils import ASSETS, DEFAULT_CFG, WEIGHTS_DIR
12
-
13
-
14
- def test_func(*args): # noqa
15
- """Test function callback for evaluating YOLO model performance metrics."""
16
- print("callback test passed")
17
-
18
-
19
- def test_export():
20
- """Tests the model exporting function by adding a callback and asserting its execution."""
21
- exporter = Exporter()
22
- exporter.add_callback("on_export_start", test_func)
23
- assert test_func in exporter.callbacks["on_export_start"], "callback test failed"
24
- f = exporter(model=YOLO("yolo11n.yaml").model)
25
- YOLO(f)(ASSETS) # exported model inference
26
-
27
-
28
- def test_detect():
29
- """Test YOLO object detection training, validation, and prediction functionality."""
30
- overrides = {"data": "coco8.yaml", "model": "yolo11n.yaml", "imgsz": 32, "epochs": 1, "save": False}
31
- cfg = get_cfg(DEFAULT_CFG)
32
- cfg.data = "coco8.yaml"
33
- cfg.imgsz = 32
34
-
35
- # Trainer
36
- trainer = detect.DetectionTrainer(overrides=overrides)
37
- trainer.add_callback("on_train_start", test_func)
38
- assert test_func in trainer.callbacks["on_train_start"], "callback test failed"
39
- trainer.train()
40
-
41
- # Validator
42
- val = detect.DetectionValidator(args=cfg)
43
- val.add_callback("on_val_start", test_func)
44
- assert test_func in val.callbacks["on_val_start"], "callback test failed"
45
- val(model=trainer.best) # validate best.pt
46
-
47
- # Predictor
48
- pred = detect.DetectionPredictor(overrides={"imgsz": [64, 64]})
49
- pred.add_callback("on_predict_start", test_func)
50
- assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
51
- # Confirm there is no issue with sys.argv being empty.
52
- with mock.patch.object(sys, "argv", []):
53
- result = pred(source=ASSETS, model=MODEL)
54
- assert len(result), "predictor test failed"
55
-
56
- overrides["resume"] = trainer.last
57
- trainer = detect.DetectionTrainer(overrides=overrides)
58
- try:
59
- trainer.train()
60
- except Exception as e:
61
- print(f"Expected exception caught: {e}")
62
- return
63
-
64
- Exception("Resume test failed!")
65
-
66
-
67
- def test_segment():
68
- """Tests image segmentation training, validation, and prediction pipelines using YOLO models."""
69
- overrides = {"data": "coco8-seg.yaml", "model": "yolo11n-seg.yaml", "imgsz": 32, "epochs": 1, "save": False}
70
- cfg = get_cfg(DEFAULT_CFG)
71
- cfg.data = "coco8-seg.yaml"
72
- cfg.imgsz = 32
73
- # YOLO(CFG_SEG).train(**overrides) # works
74
-
75
- # Trainer
76
- trainer = segment.SegmentationTrainer(overrides=overrides)
77
- trainer.add_callback("on_train_start", test_func)
78
- assert test_func in trainer.callbacks["on_train_start"], "callback test failed"
79
- trainer.train()
80
-
81
- # Validator
82
- val = segment.SegmentationValidator(args=cfg)
83
- val.add_callback("on_val_start", test_func)
84
- assert test_func in val.callbacks["on_val_start"], "callback test failed"
85
- val(model=trainer.best) # validate best.pt
86
-
87
- # Predictor
88
- pred = segment.SegmentationPredictor(overrides={"imgsz": [64, 64]})
89
- pred.add_callback("on_predict_start", test_func)
90
- assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
91
- result = pred(source=ASSETS, model=WEIGHTS_DIR / "yolo11n-seg.pt")
92
- assert len(result), "predictor test failed"
93
-
94
- # Test resume
95
- overrides["resume"] = trainer.last
96
- trainer = segment.SegmentationTrainer(overrides=overrides)
97
- try:
98
- trainer.train()
99
- except Exception as e:
100
- print(f"Expected exception caught: {e}")
101
- return
102
-
103
- Exception("Resume test failed!")
104
-
105
-
106
- def test_classify():
107
- """Test image classification including training, validation, and prediction phases."""
108
- overrides = {"data": "imagenet10", "model": "yolo11n-cls.yaml", "imgsz": 32, "epochs": 1, "save": False}
109
- cfg = get_cfg(DEFAULT_CFG)
110
- cfg.data = "imagenet10"
111
- cfg.imgsz = 32
112
- # YOLO(CFG_SEG).train(**overrides) # works
113
-
114
- # Trainer
115
- trainer = classify.ClassificationTrainer(overrides=overrides)
116
- trainer.add_callback("on_train_start", test_func)
117
- assert test_func in trainer.callbacks["on_train_start"], "callback test failed"
118
- trainer.train()
119
-
120
- # Validator
121
- val = classify.ClassificationValidator(args=cfg)
122
- val.add_callback("on_val_start", test_func)
123
- assert test_func in val.callbacks["on_val_start"], "callback test failed"
124
- val(model=trainer.best)
125
-
126
- # Predictor
127
- pred = classify.ClassificationPredictor(overrides={"imgsz": [64, 64]})
128
- pred.add_callback("on_predict_start", test_func)
129
- assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
130
- result = pred(source=ASSETS, model=trainer.best)
131
- assert len(result), "predictor test failed"