ultralytics 8.2.19__py3-none-any.whl → 8.2.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

tests/test_exports.py ADDED
@@ -0,0 +1,186 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+
3
+ import shutil
4
+ import uuid
5
+ from itertools import product
6
+ from pathlib import Path
7
+
8
+ import pytest
9
+
10
+ from ultralytics import YOLO
11
+ from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
12
+ from ultralytics.utils import (
13
+ IS_RASPBERRYPI,
14
+ LINUX,
15
+ MACOS,
16
+ WINDOWS,
17
+ Retry,
18
+ checks,
19
+ )
20
+ from ultralytics.utils.torch_utils import TORCH_1_9, TORCH_1_13
21
+ from tests import MODEL, SOURCE
22
+
23
+
24
+ def test_export_torchscript():
25
+ """Test YOLO exports to TorchScript format."""
26
+ file = YOLO(MODEL).export(format="torchscript", optimize=False, imgsz=32)
27
+ YOLO(file)(SOURCE, imgsz=32) # exported model inference
28
+
29
+
30
+ def test_export_onnx():
31
+ """Test YOLO exports to ONNX format."""
32
+ file = YOLO(MODEL).export(format="onnx", dynamic=True, imgsz=32)
33
+ YOLO(file)(SOURCE, imgsz=32) # exported model inference
34
+
35
+
36
+ @pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="OpenVINO not supported in Python 3.12")
37
+ @pytest.mark.skipif(not TORCH_1_13, reason="OpenVINO requires torch>=1.13")
38
+ def test_export_openvino():
39
+ """Test YOLO exports to OpenVINO format."""
40
+ file = YOLO(MODEL).export(format="openvino", imgsz=32)
41
+ YOLO(file)(SOURCE, imgsz=32) # exported model inference
42
+
43
+
44
+ @pytest.mark.slow
45
+ @pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="OpenVINO not supported in Python 3.12")
46
+ @pytest.mark.skipif(not TORCH_1_13, reason="OpenVINO requires torch>=1.13")
47
+ @pytest.mark.parametrize(
48
+ "task, dynamic, int8, half, batch",
49
+ [ # generate all combinations but exclude those where both int8 and half are True
50
+ (task, dynamic, int8, half, batch)
51
+ for task, dynamic, int8, half, batch in product(TASKS, [True, False], [True, False], [True, False], [1, 2])
52
+ if not (int8 and half) # exclude cases where both int8 and half are True
53
+ ],
54
+ )
55
+ def test_export_openvino_matrix(task, dynamic, int8, half, batch):
56
+ """Test YOLO exports to OpenVINO format."""
57
+ file = YOLO(TASK2MODEL[task]).export(
58
+ format="openvino",
59
+ imgsz=32,
60
+ dynamic=dynamic,
61
+ int8=int8,
62
+ half=half,
63
+ batch=batch,
64
+ data=TASK2DATA[task],
65
+ )
66
+ if WINDOWS:
67
+ # Use unique filenames due to Windows file permissions bug possibly due to latent threaded use
68
+ # See https://github.com/ultralytics/ultralytics/actions/runs/8957949304/job/24601616830?pr=10423
69
+ file = Path(file)
70
+ file = file.rename(file.with_stem(f"{file.stem}-{uuid.uuid4()}"))
71
+ YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32) # exported model inference
72
+ with Retry(times=3, delay=1): # retry in case of potential lingering multi-threaded file usage errors
73
+ shutil.rmtree(file)
74
+
75
+
76
+ @pytest.mark.slow
77
+ @pytest.mark.parametrize("task, dynamic, int8, half, batch", product(TASKS, [True, False], [False], [False], [1, 2]))
78
+ def test_export_onnx_matrix(task, dynamic, int8, half, batch):
79
+ """Test YOLO exports to ONNX format."""
80
+ file = YOLO(TASK2MODEL[task]).export(
81
+ format="onnx",
82
+ imgsz=32,
83
+ dynamic=dynamic,
84
+ int8=int8,
85
+ half=half,
86
+ batch=batch,
87
+ )
88
+ YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32) # exported model inference
89
+ Path(file).unlink() # cleanup
90
+
91
+
92
+ @pytest.mark.slow
93
+ @pytest.mark.parametrize("task, dynamic, int8, half, batch", product(TASKS, [False], [False], [False], [1, 2]))
94
+ def test_export_torchscript_matrix(task, dynamic, int8, half, batch):
95
+ """Test YOLO exports to TorchScript format."""
96
+ file = YOLO(TASK2MODEL[task]).export(
97
+ format="torchscript",
98
+ imgsz=32,
99
+ dynamic=dynamic,
100
+ int8=int8,
101
+ half=half,
102
+ batch=batch,
103
+ )
104
+ YOLO(file)([SOURCE] * 3, imgsz=64 if dynamic else 32) # exported model inference at batch=3
105
+ Path(file).unlink() # cleanup
106
+
107
+
108
+ @pytest.mark.slow
109
+ @pytest.mark.skipif(not MACOS, reason="CoreML inference only supported on macOS")
110
+ @pytest.mark.skipif(not TORCH_1_9, reason="CoreML>=7.2 not supported with PyTorch<=1.8")
111
+ @pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="CoreML not supported in Python 3.12")
112
+ @pytest.mark.parametrize(
113
+ "task, dynamic, int8, half, batch",
114
+ [ # generate all combinations but exclude those where both int8 and half are True
115
+ (task, dynamic, int8, half, batch)
116
+ for task, dynamic, int8, half, batch in product(TASKS, [False], [True, False], [True, False], [1])
117
+ if not (int8 and half) # exclude cases where both int8 and half are True
118
+ ],
119
+ )
120
+ def test_export_coreml_matrix(task, dynamic, int8, half, batch):
121
+ """Test YOLO exports to CoreML format."""
122
+ file = YOLO(TASK2MODEL[task]).export(
123
+ format="coreml",
124
+ imgsz=32,
125
+ dynamic=dynamic,
126
+ int8=int8,
127
+ half=half,
128
+ batch=batch,
129
+ )
130
+ YOLO(file)([SOURCE] * batch, imgsz=32) # exported model inference at batch=3
131
+ shutil.rmtree(file) # cleanup
132
+
133
+
134
+ @pytest.mark.skipif(not TORCH_1_9, reason="CoreML>=7.2 not supported with PyTorch<=1.8")
135
+ @pytest.mark.skipif(WINDOWS, reason="CoreML not supported on Windows") # RuntimeError: BlobWriter not loaded
136
+ @pytest.mark.skipif(IS_RASPBERRYPI, reason="CoreML not supported on Raspberry Pi")
137
+ @pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="CoreML not supported in Python 3.12")
138
+ def test_export_coreml():
139
+ """Test YOLO exports to CoreML format."""
140
+ if MACOS:
141
+ file = YOLO(MODEL).export(format="coreml", imgsz=32)
142
+ YOLO(file)(SOURCE, imgsz=32) # model prediction only supported on macOS for nms=False models
143
+ else:
144
+ YOLO(MODEL).export(format="coreml", nms=True, imgsz=32)
145
+
146
+
147
+ @pytest.mark.skipif(not LINUX, reason="Test disabled as TF suffers from install conflicts on Windows and macOS")
148
+ def test_export_tflite():
149
+ """
150
+ Test YOLO exports to TFLite format.
151
+
152
+ Note TF suffers from install conflicts on Windows and macOS.
153
+ """
154
+ model = YOLO(MODEL)
155
+ file = model.export(format="tflite", imgsz=32)
156
+ YOLO(file)(SOURCE, imgsz=32)
157
+
158
+
159
+ @pytest.mark.skipif(True, reason="Test disabled")
160
+ @pytest.mark.skipif(not LINUX, reason="TF suffers from install conflicts on Windows and macOS")
161
+ def test_export_pb():
162
+ """
163
+ Test YOLO exports to *.pb format.
164
+
165
+ Note TF suffers from install conflicts on Windows and macOS.
166
+ """
167
+ model = YOLO(MODEL)
168
+ file = model.export(format="pb", imgsz=32)
169
+ YOLO(file)(SOURCE, imgsz=32)
170
+
171
+
172
+ @pytest.mark.skipif(True, reason="Test disabled as Paddle protobuf and ONNX protobuf requirementsk conflict.")
173
+ def test_export_paddle():
174
+ """
175
+ Test YOLO exports to Paddle format.
176
+
177
+ Note Paddle protobuf requirements conflicting with onnx protobuf requirements.
178
+ """
179
+ YOLO(MODEL).export(format="paddle", imgsz=32)
180
+
181
+
182
+ @pytest.mark.slow
183
+ def test_export_ncnn():
184
+ """Test YOLO exports to NCNN format."""
185
+ file = YOLO(MODEL).export(format="ncnn", imgsz=32)
186
+ YOLO(file)(SOURCE, imgsz=32) # exported model inference
@@ -0,0 +1,145 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+
3
+ import contextlib
4
+ import os
5
+ import subprocess
6
+ import time
7
+ from pathlib import Path
8
+
9
+ import pytest
10
+
11
+ from ultralytics import YOLO, download
12
+ from ultralytics.utils import DATASETS_DIR, SETTINGS
13
+ from ultralytics.utils.checks import check_requirements
14
+
15
+ from tests import MODEL, SOURCE, TMP
16
+
17
+
18
+ @pytest.mark.skipif(not check_requirements("ray", install=False), reason="ray[tune] not installed")
19
+ def test_model_ray_tune():
20
+ """Tune YOLO model with Ray optimization library."""
21
+ YOLO("yolov8n-cls.yaml").tune(
22
+ use_ray=True, data="imagenet10", grace_period=1, iterations=1, imgsz=32, epochs=1, plots=False, device="cpu"
23
+ )
24
+
25
+
26
+ @pytest.mark.skipif(not check_requirements("mlflow", install=False), reason="mlflow not installed")
27
+ def test_mlflow():
28
+ """Test training with MLflow tracking enabled."""
29
+ SETTINGS["mlflow"] = True
30
+ YOLO("yolov8n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=3, plots=False, device="cpu")
31
+
32
+
33
+ @pytest.mark.skipif(True, reason="Test failing in scheduled CI https://github.com/ultralytics/ultralytics/pull/8868")
34
+ @pytest.mark.skipif(not check_requirements("mlflow", install=False), reason="mlflow not installed")
35
+ def test_mlflow_keep_run_active():
36
+ import mlflow
37
+
38
+ """Test training with MLflow tracking enabled."""
39
+ SETTINGS["mlflow"] = True
40
+ run_name = "Test Run"
41
+ os.environ["MLFLOW_RUN"] = run_name
42
+
43
+ # Test with MLFLOW_KEEP_RUN_ACTIVE=True
44
+ os.environ["MLFLOW_KEEP_RUN_ACTIVE"] = "True"
45
+ YOLO("yolov8n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
46
+ status = mlflow.active_run().info.status
47
+ assert status == "RUNNING", "MLflow run should be active when MLFLOW_KEEP_RUN_ACTIVE=True"
48
+
49
+ run_id = mlflow.active_run().info.run_id
50
+
51
+ # Test with MLFLOW_KEEP_RUN_ACTIVE=False
52
+ os.environ["MLFLOW_KEEP_RUN_ACTIVE"] = "False"
53
+ YOLO("yolov8n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
54
+ status = mlflow.get_run(run_id=run_id).info.status
55
+ assert status == "FINISHED", "MLflow run should be ended when MLFLOW_KEEP_RUN_ACTIVE=False"
56
+
57
+ # Test with MLFLOW_KEEP_RUN_ACTIVE not set
58
+ os.environ.pop("MLFLOW_KEEP_RUN_ACTIVE", None)
59
+ YOLO("yolov8n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
60
+ status = mlflow.get_run(run_id=run_id).info.status
61
+ assert status == "FINISHED", "MLflow run should be ended by default when MLFLOW_KEEP_RUN_ACTIVE is not set"
62
+
63
+
64
+ @pytest.mark.skipif(not check_requirements("tritonclient", install=False), reason="tritonclient[all] not installed")
65
+ def test_triton():
66
+ """Test NVIDIA Triton Server functionalities."""
67
+ check_requirements("tritonclient[all]")
68
+ from tritonclient.http import InferenceServerClient # noqa
69
+
70
+ # Create variables
71
+ model_name = "yolo"
72
+ triton_repo = TMP / "triton_repo" # Triton repo path
73
+ triton_model = triton_repo / model_name # Triton model path
74
+
75
+ # Export model to ONNX
76
+ f = YOLO(MODEL).export(format="onnx", dynamic=True)
77
+
78
+ # Prepare Triton repo
79
+ (triton_model / "1").mkdir(parents=True, exist_ok=True)
80
+ Path(f).rename(triton_model / "1" / "model.onnx")
81
+ (triton_model / "config.pbtxt").touch()
82
+
83
+ # Define image https://catalog.ngc.nvidia.com/orgs/nvidia/containers/tritonserver
84
+ tag = "nvcr.io/nvidia/tritonserver:23.09-py3" # 6.4 GB
85
+
86
+ # Pull the image
87
+ subprocess.call(f"docker pull {tag}", shell=True)
88
+
89
+ # Run the Triton server and capture the container ID
90
+ container_id = (
91
+ subprocess.check_output(
92
+ f"docker run -d --rm -v {triton_repo}:/models -p 8000:8000 {tag} tritonserver --model-repository=/models",
93
+ shell=True,
94
+ )
95
+ .decode("utf-8")
96
+ .strip()
97
+ )
98
+
99
+ # Wait for the Triton server to start
100
+ triton_client = InferenceServerClient(url="localhost:8000", verbose=False, ssl=False)
101
+
102
+ # Wait until model is ready
103
+ for _ in range(10):
104
+ with contextlib.suppress(Exception):
105
+ assert triton_client.is_model_ready(model_name)
106
+ break
107
+ time.sleep(1)
108
+
109
+ # Check Triton inference
110
+ YOLO(f"http://localhost:8000/{model_name}", "detect")(SOURCE) # exported model inference
111
+
112
+ # Kill and remove the container at the end of the test
113
+ subprocess.call(f"docker kill {container_id}", shell=True)
114
+
115
+
116
+ @pytest.mark.skipif(not check_requirements("pycocotools", install=False), reason="pycocotools not installed")
117
+ def test_pycocotools():
118
+ """Validate model predictions using pycocotools."""
119
+ from ultralytics.models.yolo.detect import DetectionValidator
120
+ from ultralytics.models.yolo.pose import PoseValidator
121
+ from ultralytics.models.yolo.segment import SegmentationValidator
122
+
123
+ # Download annotations after each dataset downloads first
124
+ url = "https://github.com/ultralytics/assets/releases/download/v8.2.0/"
125
+
126
+ args = {"model": "yolov8n.pt", "data": "coco8.yaml", "save_json": True, "imgsz": 64}
127
+ validator = DetectionValidator(args=args)
128
+ validator()
129
+ validator.is_coco = True
130
+ download(f"{url}instances_val2017.json", dir=DATASETS_DIR / "coco8/annotations")
131
+ _ = validator.eval_json(validator.stats)
132
+
133
+ args = {"model": "yolov8n-seg.pt", "data": "coco8-seg.yaml", "save_json": True, "imgsz": 64}
134
+ validator = SegmentationValidator(args=args)
135
+ validator()
136
+ validator.is_coco = True
137
+ download(f"{url}instances_val2017.json", dir=DATASETS_DIR / "coco8-seg/annotations")
138
+ _ = validator.eval_json(validator.stats)
139
+
140
+ args = {"model": "yolov8n-pose.pt", "data": "coco8-pose.yaml", "save_json": True, "imgsz": 64}
141
+ validator = PoseValidator(args=args)
142
+ validator()
143
+ validator.is_coco = True
144
+ download(f"{url}person_keypoints_val2017.json", dir=DATASETS_DIR / "coco8-pose/annotations")
145
+ _ = validator.eval_json(validator.stats)