bplusplus 1.1.0__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of bplusplus might be problematic. Click here for more details.

Files changed (97) hide show
  1. bplusplus/__init__.py +4 -2
  2. bplusplus/collect.py +69 -5
  3. bplusplus/hierarchical/test.py +670 -0
  4. bplusplus/hierarchical/train.py +676 -0
  5. bplusplus/prepare.py +228 -64
  6. bplusplus/resnet/test.py +473 -0
  7. bplusplus/resnet/train.py +329 -0
  8. bplusplus-1.2.0.dist-info/METADATA +249 -0
  9. bplusplus-1.2.0.dist-info/RECORD +12 -0
  10. bplusplus/yolov5detect/__init__.py +0 -1
  11. bplusplus/yolov5detect/detect.py +0 -444
  12. bplusplus/yolov5detect/export.py +0 -1530
  13. bplusplus/yolov5detect/insect.yaml +0 -8
  14. bplusplus/yolov5detect/models/__init__.py +0 -0
  15. bplusplus/yolov5detect/models/common.py +0 -1109
  16. bplusplus/yolov5detect/models/experimental.py +0 -130
  17. bplusplus/yolov5detect/models/hub/anchors.yaml +0 -56
  18. bplusplus/yolov5detect/models/hub/yolov3-spp.yaml +0 -52
  19. bplusplus/yolov5detect/models/hub/yolov3-tiny.yaml +0 -42
  20. bplusplus/yolov5detect/models/hub/yolov3.yaml +0 -52
  21. bplusplus/yolov5detect/models/hub/yolov5-bifpn.yaml +0 -49
  22. bplusplus/yolov5detect/models/hub/yolov5-fpn.yaml +0 -43
  23. bplusplus/yolov5detect/models/hub/yolov5-p2.yaml +0 -55
  24. bplusplus/yolov5detect/models/hub/yolov5-p34.yaml +0 -42
  25. bplusplus/yolov5detect/models/hub/yolov5-p6.yaml +0 -57
  26. bplusplus/yolov5detect/models/hub/yolov5-p7.yaml +0 -68
  27. bplusplus/yolov5detect/models/hub/yolov5-panet.yaml +0 -49
  28. bplusplus/yolov5detect/models/hub/yolov5l6.yaml +0 -61
  29. bplusplus/yolov5detect/models/hub/yolov5m6.yaml +0 -61
  30. bplusplus/yolov5detect/models/hub/yolov5n6.yaml +0 -61
  31. bplusplus/yolov5detect/models/hub/yolov5s-LeakyReLU.yaml +0 -50
  32. bplusplus/yolov5detect/models/hub/yolov5s-ghost.yaml +0 -49
  33. bplusplus/yolov5detect/models/hub/yolov5s-transformer.yaml +0 -49
  34. bplusplus/yolov5detect/models/hub/yolov5s6.yaml +0 -61
  35. bplusplus/yolov5detect/models/hub/yolov5x6.yaml +0 -61
  36. bplusplus/yolov5detect/models/segment/yolov5l-seg.yaml +0 -49
  37. bplusplus/yolov5detect/models/segment/yolov5m-seg.yaml +0 -49
  38. bplusplus/yolov5detect/models/segment/yolov5n-seg.yaml +0 -49
  39. bplusplus/yolov5detect/models/segment/yolov5s-seg.yaml +0 -49
  40. bplusplus/yolov5detect/models/segment/yolov5x-seg.yaml +0 -49
  41. bplusplus/yolov5detect/models/tf.py +0 -797
  42. bplusplus/yolov5detect/models/yolo.py +0 -495
  43. bplusplus/yolov5detect/models/yolov5l.yaml +0 -49
  44. bplusplus/yolov5detect/models/yolov5m.yaml +0 -49
  45. bplusplus/yolov5detect/models/yolov5n.yaml +0 -49
  46. bplusplus/yolov5detect/models/yolov5s.yaml +0 -49
  47. bplusplus/yolov5detect/models/yolov5x.yaml +0 -49
  48. bplusplus/yolov5detect/utils/__init__.py +0 -97
  49. bplusplus/yolov5detect/utils/activations.py +0 -134
  50. bplusplus/yolov5detect/utils/augmentations.py +0 -448
  51. bplusplus/yolov5detect/utils/autoanchor.py +0 -175
  52. bplusplus/yolov5detect/utils/autobatch.py +0 -70
  53. bplusplus/yolov5detect/utils/aws/__init__.py +0 -0
  54. bplusplus/yolov5detect/utils/aws/mime.sh +0 -26
  55. bplusplus/yolov5detect/utils/aws/resume.py +0 -41
  56. bplusplus/yolov5detect/utils/aws/userdata.sh +0 -27
  57. bplusplus/yolov5detect/utils/callbacks.py +0 -72
  58. bplusplus/yolov5detect/utils/dataloaders.py +0 -1385
  59. bplusplus/yolov5detect/utils/docker/Dockerfile +0 -73
  60. bplusplus/yolov5detect/utils/docker/Dockerfile-arm64 +0 -40
  61. bplusplus/yolov5detect/utils/docker/Dockerfile-cpu +0 -42
  62. bplusplus/yolov5detect/utils/downloads.py +0 -136
  63. bplusplus/yolov5detect/utils/flask_rest_api/README.md +0 -70
  64. bplusplus/yolov5detect/utils/flask_rest_api/example_request.py +0 -17
  65. bplusplus/yolov5detect/utils/flask_rest_api/restapi.py +0 -49
  66. bplusplus/yolov5detect/utils/general.py +0 -1294
  67. bplusplus/yolov5detect/utils/google_app_engine/Dockerfile +0 -25
  68. bplusplus/yolov5detect/utils/google_app_engine/additional_requirements.txt +0 -6
  69. bplusplus/yolov5detect/utils/google_app_engine/app.yaml +0 -16
  70. bplusplus/yolov5detect/utils/loggers/__init__.py +0 -476
  71. bplusplus/yolov5detect/utils/loggers/clearml/README.md +0 -222
  72. bplusplus/yolov5detect/utils/loggers/clearml/__init__.py +0 -0
  73. bplusplus/yolov5detect/utils/loggers/clearml/clearml_utils.py +0 -230
  74. bplusplus/yolov5detect/utils/loggers/clearml/hpo.py +0 -90
  75. bplusplus/yolov5detect/utils/loggers/comet/README.md +0 -250
  76. bplusplus/yolov5detect/utils/loggers/comet/__init__.py +0 -551
  77. bplusplus/yolov5detect/utils/loggers/comet/comet_utils.py +0 -151
  78. bplusplus/yolov5detect/utils/loggers/comet/hpo.py +0 -126
  79. bplusplus/yolov5detect/utils/loggers/comet/optimizer_config.json +0 -135
  80. bplusplus/yolov5detect/utils/loggers/wandb/__init__.py +0 -0
  81. bplusplus/yolov5detect/utils/loggers/wandb/wandb_utils.py +0 -210
  82. bplusplus/yolov5detect/utils/loss.py +0 -259
  83. bplusplus/yolov5detect/utils/metrics.py +0 -381
  84. bplusplus/yolov5detect/utils/plots.py +0 -517
  85. bplusplus/yolov5detect/utils/segment/__init__.py +0 -0
  86. bplusplus/yolov5detect/utils/segment/augmentations.py +0 -100
  87. bplusplus/yolov5detect/utils/segment/dataloaders.py +0 -366
  88. bplusplus/yolov5detect/utils/segment/general.py +0 -160
  89. bplusplus/yolov5detect/utils/segment/loss.py +0 -198
  90. bplusplus/yolov5detect/utils/segment/metrics.py +0 -225
  91. bplusplus/yolov5detect/utils/segment/plots.py +0 -152
  92. bplusplus/yolov5detect/utils/torch_utils.py +0 -482
  93. bplusplus/yolov5detect/utils/triton.py +0 -90
  94. bplusplus-1.1.0.dist-info/METADATA +0 -179
  95. bplusplus-1.1.0.dist-info/RECORD +0 -92
  96. {bplusplus-1.1.0.dist-info → bplusplus-1.2.0.dist-info}/LICENSE +0 -0
  97. {bplusplus-1.1.0.dist-info → bplusplus-1.2.0.dist-info}/WHEEL +0 -0
@@ -1,1530 +0,0 @@
1
- # Ultralytics YOLOv5 🚀, AGPL-3.0 license
2
- """
3
- Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit.
4
-
5
- Format | `export.py --include` | Model
6
- --- | --- | ---
7
- PyTorch | - | yolov5s.pt
8
- TorchScript | `torchscript` | yolov5s.torchscript
9
- ONNX | `onnx` | yolov5s.onnx
10
- OpenVINO | `openvino` | yolov5s_openvino_model/
11
- TensorRT | `engine` | yolov5s.engine
12
- CoreML | `coreml` | yolov5s.mlmodel
13
- TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/
14
- TensorFlow GraphDef | `pb` | yolov5s.pb
15
- TensorFlow Lite | `tflite` | yolov5s.tflite
16
- TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite
17
- TensorFlow.js | `tfjs` | yolov5s_web_model/
18
- PaddlePaddle | `paddle` | yolov5s_paddle_model/
19
-
20
- Requirements:
21
- $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU
22
- $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU
23
-
24
- Usage:
25
- $ python export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ...
26
-
27
- Inference:
28
- $ python detect.py --weights yolov5s.pt # PyTorch
29
- yolov5s.torchscript # TorchScript
30
- yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
31
- yolov5s_openvino_model # OpenVINO
32
- yolov5s.engine # TensorRT
33
- yolov5s.mlmodel # CoreML (macOS-only)
34
- yolov5s_saved_model # TensorFlow SavedModel
35
- yolov5s.pb # TensorFlow GraphDef
36
- yolov5s.tflite # TensorFlow Lite
37
- yolov5s_edgetpu.tflite # TensorFlow Edge TPU
38
- yolov5s_paddle_model # PaddlePaddle
39
-
40
- TensorFlow.js:
41
- $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example
42
- $ npm install
43
- $ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model
44
- $ npm start
45
- """
46
-
47
- import argparse
48
- import contextlib
49
- import json
50
- import os
51
- import platform
52
- import re
53
- import subprocess
54
- import sys
55
- import time
56
- import warnings
57
- from pathlib import Path
58
-
59
- import pandas as pd
60
- import torch
61
- from torch.utils.mobile_optimizer import optimize_for_mobile
62
-
63
- FILE = Path(__file__).resolve()
64
- ROOT = FILE.parents[0] # YOLOv5 root directory
65
- if str(ROOT) not in sys.path:
66
- sys.path.append(str(ROOT)) # add ROOT to PATH
67
- if platform.system() != "Windows":
68
- ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
69
-
70
- from models.experimental import attempt_load
71
- from models.yolo import ClassificationModel, Detect, DetectionModel, SegmentationModel
72
- from utils.dataloaders import LoadImages
73
- from utils.general import (
74
- LOGGER,
75
- Profile,
76
- check_dataset,
77
- check_img_size,
78
- check_requirements,
79
- check_version,
80
- check_yaml,
81
- colorstr,
82
- file_size,
83
- get_default_args,
84
- print_args,
85
- url2file,
86
- yaml_save,
87
- )
88
- from utils.torch_utils import select_device, smart_inference_mode
89
-
90
- MACOS = platform.system() == "Darwin" # macOS environment
91
-
92
-
93
- class iOSModel(torch.nn.Module):
94
- """An iOS-compatible wrapper for YOLOv5 models that normalizes input images based on their dimensions."""
95
-
96
- def __init__(self, model, im):
97
- """
98
- Initializes an iOS compatible model with normalization based on image dimensions.
99
-
100
- Args:
101
- model (torch.nn.Module): The PyTorch model to be adapted for iOS compatibility.
102
- im (torch.Tensor): An input tensor representing a batch of images with shape (B, C, H, W).
103
-
104
- Returns:
105
- None: This method does not return any value.
106
-
107
- Notes:
108
- This initializer configures normalization based on the input image dimensions, which is critical for
109
- ensuring the model's compatibility and proper functionality on iOS devices. The normalization step
110
- involves dividing by the image width if the image is square; otherwise, additional conditions might apply.
111
- """
112
- super().__init__()
113
- b, c, h, w = im.shape # batch, channel, height, width
114
- self.model = model
115
- self.nc = model.nc # number of classes
116
- if w == h:
117
- self.normalize = 1.0 / w
118
- else:
119
- self.normalize = torch.tensor([1.0 / w, 1.0 / h, 1.0 / w, 1.0 / h]) # broadcast (slower, smaller)
120
- # np = model(im)[0].shape[1] # number of points
121
- # self.normalize = torch.tensor([1. / w, 1. / h, 1. / w, 1. / h]).expand(np, 4) # explicit (faster, larger)
122
-
123
- def forward(self, x):
124
- """
125
- Run a forward pass on the input tensor, returning class confidences and normalized coordinates.
126
-
127
- Args:
128
- x (torch.Tensor): Input tensor containing the image data with shape (batch, channels, height, width).
129
-
130
- Returns:
131
- torch.Tensor: Concatenated tensor with normalized coordinates (xywh), confidence scores (conf),
132
- and class probabilities (cls), having shape (N, 4 + 1 + C), where N is the number of predictions,
133
- and C is the number of classes.
134
-
135
- Examples:
136
- ```python
137
- model = iOSModel(pretrained_model, input_image)
138
- output = model.forward(torch_input_tensor)
139
- ```
140
- """
141
- xywh, conf, cls = self.model(x)[0].squeeze().split((4, 1, self.nc), 1)
142
- return cls * conf, xywh * self.normalize # confidence (3780, 80), coordinates (3780, 4)
143
-
144
-
145
- def export_formats():
146
- r"""
147
- Returns a DataFrame of supported YOLOv5 model export formats and their properties.
148
-
149
- Returns:
150
- pandas.DataFrame: A DataFrame containing supported export formats and their properties. The DataFrame
151
- includes columns for format name, CLI argument suffix, file extension or directory name, and boolean flags
152
- indicating if the export format supports training and detection.
153
-
154
- Examples:
155
- ```python
156
- formats = export_formats()
157
- print(f"Supported export formats:\n{formats}")
158
- ```
159
-
160
- Notes:
161
- The DataFrame contains the following columns:
162
- - Format: The name of the model format (e.g., PyTorch, TorchScript, ONNX, etc.).
163
- - Include Argument: The argument to use with the export script to include this format.
164
- - File Suffix: File extension or directory name associated with the format.
165
- - Supports Training: Whether the format supports training.
166
- - Supports Detection: Whether the format supports detection.
167
- """
168
- x = [
169
- ["PyTorch", "-", ".pt", True, True],
170
- ["TorchScript", "torchscript", ".torchscript", True, True],
171
- ["ONNX", "onnx", ".onnx", True, True],
172
- ["OpenVINO", "openvino", "_openvino_model", True, False],
173
- ["TensorRT", "engine", ".engine", False, True],
174
- ["CoreML", "coreml", ".mlpackage", True, False],
175
- ["TensorFlow SavedModel", "saved_model", "_saved_model", True, True],
176
- ["TensorFlow GraphDef", "pb", ".pb", True, True],
177
- ["TensorFlow Lite", "tflite", ".tflite", True, False],
178
- ["TensorFlow Edge TPU", "edgetpu", "_edgetpu.tflite", False, False],
179
- ["TensorFlow.js", "tfjs", "_web_model", False, False],
180
- ["PaddlePaddle", "paddle", "_paddle_model", True, True],
181
- ]
182
- return pd.DataFrame(x, columns=["Format", "Argument", "Suffix", "CPU", "GPU"])
183
-
184
-
185
- def try_export(inner_func):
186
- """
187
- Log success or failure, execution time, and file size for YOLOv5 model export functions wrapped with @try_export.
188
-
189
- Args:
190
- inner_func (Callable): The model export function to be wrapped by the decorator.
191
-
192
- Returns:
193
- Callable: The wrapped function that logs execution details. When executed, this wrapper function returns either:
194
- - Tuple (str | torch.nn.Module): On success — the file path of the exported model and the model instance.
195
- - Tuple (None, None): On failure — None values indicating export failure.
196
-
197
- Examples:
198
- ```python
199
- @try_export
200
- def export_onnx(model, filepath):
201
- # implementation here
202
- pass
203
-
204
- exported_file, exported_model = export_onnx(yolo_model, 'path/to/save/model.onnx')
205
- ```
206
-
207
- Notes:
208
- For additional requirements and model export formats, refer to the
209
- [Ultralytics YOLOv5 GitHub repository](https://github.com/ultralytics/ultralytics).
210
- """
211
- inner_args = get_default_args(inner_func)
212
-
213
- def outer_func(*args, **kwargs):
214
- """Logs success/failure and execution details of model export functions wrapped with @try_export decorator."""
215
- prefix = inner_args["prefix"]
216
- try:
217
- with Profile() as dt:
218
- f, model = inner_func(*args, **kwargs)
219
- LOGGER.info(f"{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)")
220
- return f, model
221
- except Exception as e:
222
- LOGGER.info(f"{prefix} export failure ❌ {dt.t:.1f}s: {e}")
223
- return None, None
224
-
225
- return outer_func
226
-
227
-
228
- @try_export
229
- def export_torchscript(model, im, file, optimize, prefix=colorstr("TorchScript:")):
230
- """
231
- Export a YOLOv5 model to the TorchScript format.
232
-
233
- Args:
234
- model (torch.nn.Module): The YOLOv5 model to be exported.
235
- im (torch.Tensor): Example input tensor to be used for tracing the TorchScript model.
236
- file (Path): File path where the exported TorchScript model will be saved.
237
- optimize (bool): If True, applies optimizations for mobile deployment.
238
- prefix (str): Optional prefix for log messages. Default is 'TorchScript:'.
239
-
240
- Returns:
241
- (str | None, torch.jit.ScriptModule | None): A tuple containing the file path of the exported model
242
- (as a string) and the TorchScript model (as a torch.jit.ScriptModule). If the export fails, both elements
243
- of the tuple will be None.
244
-
245
- Notes:
246
- - This function uses tracing to create the TorchScript model.
247
- - Metadata, including the input image shape, model stride, and class names, is saved in an extra file (`config.txt`)
248
- within the TorchScript model package.
249
- - For mobile optimization, refer to the PyTorch tutorial: https://pytorch.org/tutorials/recipes/mobile_interpreter.html
250
-
251
- Example:
252
- ```python
253
- from pathlib import Path
254
- import torch
255
- from models.experimental import attempt_load
256
- from utils.torch_utils import select_device
257
-
258
- # Load model
259
- weights = 'yolov5s.pt'
260
- device = select_device('')
261
- model = attempt_load(weights, device=device)
262
-
263
- # Example input tensor
264
- im = torch.zeros(1, 3, 640, 640).to(device)
265
-
266
- # Export model
267
- file = Path('yolov5s.torchscript')
268
- export_torchscript(model, im, file, optimize=False)
269
- ```
270
- """
271
- LOGGER.info(f"\n{prefix} starting export with torch {torch.__version__}...")
272
- f = file.with_suffix(".torchscript")
273
-
274
- ts = torch.jit.trace(model, im, strict=False)
275
- d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names}
276
- extra_files = {"config.txt": json.dumps(d)} # torch._C.ExtraFilesMap()
277
- if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html
278
- optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files)
279
- else:
280
- ts.save(str(f), _extra_files=extra_files)
281
- return f, None
282
-
283
-
284
- @try_export
285
- def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr("ONNX:")):
286
- """
287
- Export a YOLOv5 model to ONNX format with dynamic axes support and optional model simplification.
288
-
289
- Args:
290
- model (torch.nn.Module): The YOLOv5 model to be exported.
291
- im (torch.Tensor): A sample input tensor for model tracing, usually the shape is (1, 3, height, width).
292
- file (pathlib.Path | str): The output file path where the ONNX model will be saved.
293
- opset (int): The ONNX opset version to use for export.
294
- dynamic (bool): If True, enables dynamic axes for batch, height, and width dimensions.
295
- simplify (bool): If True, applies ONNX model simplification for optimization.
296
- prefix (str): A prefix string for logging messages, defaults to 'ONNX:'.
297
-
298
- Returns:
299
- tuple[pathlib.Path | str, None]: The path to the saved ONNX model file and None (consistent with decorator).
300
-
301
- Raises:
302
- ImportError: If required libraries for export (e.g., 'onnx', 'onnx-simplifier') are not installed.
303
- AssertionError: If the simplification check fails.
304
-
305
- Notes:
306
- The required packages for this function can be installed via:
307
- ```
308
- pip install onnx onnx-simplifier onnxruntime onnxruntime-gpu
309
- ```
310
-
311
- Example:
312
- ```python
313
- from pathlib import Path
314
- import torch
315
- from models.experimental import attempt_load
316
- from utils.torch_utils import select_device
317
-
318
- # Load model
319
- weights = 'yolov5s.pt'
320
- device = select_device('')
321
- model = attempt_load(weights, map_location=device)
322
-
323
- # Example input tensor
324
- im = torch.zeros(1, 3, 640, 640).to(device)
325
-
326
- # Export model
327
- file_path = Path('yolov5s.onnx')
328
- export_onnx(model, im, file_path, opset=12, dynamic=True, simplify=True)
329
- ```
330
- """
331
- check_requirements("onnx>=1.12.0")
332
- import onnx
333
-
334
- LOGGER.info(f"\n{prefix} starting export with onnx {onnx.__version__}...")
335
- f = str(file.with_suffix(".onnx"))
336
-
337
- output_names = ["output0", "output1"] if isinstance(model, SegmentationModel) else ["output0"]
338
- if dynamic:
339
- dynamic = {"images": {0: "batch", 2: "height", 3: "width"}} # shape(1,3,640,640)
340
- if isinstance(model, SegmentationModel):
341
- dynamic["output0"] = {0: "batch", 1: "anchors"} # shape(1,25200,85)
342
- dynamic["output1"] = {0: "batch", 2: "mask_height", 3: "mask_width"} # shape(1,32,160,160)
343
- elif isinstance(model, DetectionModel):
344
- dynamic["output0"] = {0: "batch", 1: "anchors"} # shape(1,25200,85)
345
-
346
- torch.onnx.export(
347
- model.cpu() if dynamic else model, # --dynamic only compatible with cpu
348
- im.cpu() if dynamic else im,
349
- f,
350
- verbose=False,
351
- opset_version=opset,
352
- do_constant_folding=True, # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False
353
- input_names=["images"],
354
- output_names=output_names,
355
- dynamic_axes=dynamic or None,
356
- )
357
-
358
- # Checks
359
- model_onnx = onnx.load(f) # load onnx model
360
- onnx.checker.check_model(model_onnx) # check onnx model
361
-
362
- # Metadata
363
- d = {"stride": int(max(model.stride)), "names": model.names}
364
- for k, v in d.items():
365
- meta = model_onnx.metadata_props.add()
366
- meta.key, meta.value = k, str(v)
367
- onnx.save(model_onnx, f)
368
-
369
- # Simplify
370
- if simplify:
371
- try:
372
- cuda = torch.cuda.is_available()
373
- check_requirements(("onnxruntime-gpu" if cuda else "onnxruntime", "onnxslim"))
374
- import onnxslim
375
-
376
- LOGGER.info(f"{prefix} slimming with onnxslim {onnxslim.__version__}...")
377
- model_onnx = onnxslim.slim(model_onnx)
378
- onnx.save(model_onnx, f)
379
- except Exception as e:
380
- LOGGER.info(f"{prefix} simplifier failure: {e}")
381
- return f, model_onnx
382
-
383
-
384
- @try_export
385
- def export_openvino(file, metadata, half, int8, data, prefix=colorstr("OpenVINO:")):
386
- """
387
- Export a YOLOv5 model to OpenVINO format with optional FP16 and INT8 quantization.
388
-
389
- Args:
390
- file (Path): Path to the output file where the OpenVINO model will be saved.
391
- metadata (dict): Dictionary including model metadata such as names and strides.
392
- half (bool): If True, export the model with FP16 precision.
393
- int8 (bool): If True, export the model with INT8 quantization.
394
- data (str): Path to the dataset YAML file required for INT8 quantization.
395
- prefix (str): Prefix string for logging purposes (default is "OpenVINO:").
396
-
397
- Returns:
398
- (str, openvino.runtime.Model | None): The OpenVINO model file path and openvino.runtime.Model object if export is
399
- successful; otherwise, None.
400
-
401
- Notes:
402
- - Requires `openvino-dev` package version 2023.0 or higher. Install with:
403
- `$ pip install openvino-dev>=2023.0`
404
- - For INT8 quantization, also requires `nncf` library version 2.5.0 or higher. Install with:
405
- `$ pip install nncf>=2.5.0`
406
-
407
- Examples:
408
- ```python
409
- from pathlib import Path
410
- from ultralytics import YOLOv5
411
-
412
- model = YOLOv5('yolov5s.pt')
413
- export_openvino(Path('yolov5s.onnx'), metadata={'names': model.names, 'stride': model.stride}, half=True,
414
- int8=False, data='data.yaml')
415
- ```
416
-
417
- This will export the YOLOv5 model to OpenVINO with FP16 precision but without INT8 quantization, saving it to
418
- the specified file path.
419
- """
420
- check_requirements("openvino-dev>=2023.0") # requires openvino-dev: https://pypi.org/project/openvino-dev/
421
- import openvino.runtime as ov # noqa
422
- from openvino.tools import mo # noqa
423
-
424
- LOGGER.info(f"\n{prefix} starting export with openvino {ov.__version__}...")
425
- f = str(file).replace(file.suffix, f"_{'int8_' if int8 else ''}openvino_model{os.sep}")
426
- f_onnx = file.with_suffix(".onnx")
427
- f_ov = str(Path(f) / file.with_suffix(".xml").name)
428
-
429
- ov_model = mo.convert_model(f_onnx, model_name=file.stem, framework="onnx", compress_to_fp16=half) # export
430
-
431
- if int8:
432
- check_requirements("nncf>=2.5.0") # requires at least version 2.5.0 to use the post-training quantization
433
- import nncf
434
- import numpy as np
435
-
436
- from utils.dataloaders import create_dataloader
437
-
438
- def gen_dataloader(yaml_path, task="train", imgsz=640, workers=4):
439
- """Generates a DataLoader for model training or validation based on the given YAML dataset configuration."""
440
- data_yaml = check_yaml(yaml_path)
441
- data = check_dataset(data_yaml)
442
- dataloader = create_dataloader(
443
- data[task], imgsz=imgsz, batch_size=1, stride=32, pad=0.5, single_cls=False, rect=False, workers=workers
444
- )[0]
445
- return dataloader
446
-
447
- # noqa: F811
448
-
449
- def transform_fn(data_item):
450
- """
451
- Quantization transform function.
452
-
453
- Extracts and preprocess input data from dataloader item for quantization.
454
-
455
- Args:
456
- data_item: Tuple with data item produced by DataLoader during iteration
457
-
458
- Returns:
459
- input_tensor: Input data for quantization
460
- """
461
- assert data_item[0].dtype == torch.uint8, "input image must be uint8 for the quantization preprocessing"
462
-
463
- img = data_item[0].numpy().astype(np.float32) # uint8 to fp16/32
464
- img /= 255.0 # 0 - 255 to 0.0 - 1.0
465
- return np.expand_dims(img, 0) if img.ndim == 3 else img
466
-
467
- ds = gen_dataloader(data)
468
- quantization_dataset = nncf.Dataset(ds, transform_fn)
469
- ov_model = nncf.quantize(ov_model, quantization_dataset, preset=nncf.QuantizationPreset.MIXED)
470
-
471
- ov.serialize(ov_model, f_ov) # save
472
- yaml_save(Path(f) / file.with_suffix(".yaml").name, metadata) # add metadata.yaml
473
- return f, None
474
-
475
-
476
- @try_export
477
- def export_paddle(model, im, file, metadata, prefix=colorstr("PaddlePaddle:")):
478
- """
479
- Export a YOLOv5 PyTorch model to PaddlePaddle format using X2Paddle, saving the converted model and metadata.
480
-
481
- Args:
482
- model (torch.nn.Module): The YOLOv5 model to be exported.
483
- im (torch.Tensor): Input tensor used for model tracing during export.
484
- file (pathlib.Path): Path to the source file to be converted.
485
- metadata (dict): Additional metadata to be saved alongside the model.
486
- prefix (str): Prefix for logging information.
487
-
488
- Returns:
489
- tuple (str, None): A tuple where the first element is the path to the saved PaddlePaddle model, and the
490
- second element is None.
491
-
492
- Examples:
493
- ```python
494
- from pathlib import Path
495
- import torch
496
-
497
- # Assume 'model' is a pre-trained YOLOv5 model and 'im' is an example input tensor
498
- model = ... # Load your model here
499
- im = torch.randn((1, 3, 640, 640)) # Dummy input tensor for tracing
500
- file = Path("yolov5s.pt")
501
- metadata = {"stride": 32, "names": ["person", "bicycle", "car", "motorbike"]}
502
-
503
- export_paddle(model=model, im=im, file=file, metadata=metadata)
504
- ```
505
-
506
- Notes:
507
- Ensure that `paddlepaddle` and `x2paddle` are installed, as these are required for the export function. You can
508
- install them via pip:
509
- ```
510
- $ pip install paddlepaddle x2paddle
511
- ```
512
- """
513
- check_requirements(("paddlepaddle", "x2paddle"))
514
- import x2paddle
515
- from x2paddle.convert import pytorch2paddle
516
-
517
- LOGGER.info(f"\n{prefix} starting export with X2Paddle {x2paddle.__version__}...")
518
- f = str(file).replace(".pt", f"_paddle_model{os.sep}")
519
-
520
- pytorch2paddle(module=model, save_dir=f, jit_type="trace", input_examples=[im]) # export
521
- yaml_save(Path(f) / file.with_suffix(".yaml").name, metadata) # add metadata.yaml
522
- return f, None
523
-
524
-
525
- @try_export
526
- def export_coreml(model, im, file, int8, half, nms, mlmodel, prefix=colorstr("CoreML:")):
527
- """
528
- Export a YOLOv5 model to CoreML format with optional NMS, INT8, and FP16 support.
529
-
530
- Args:
531
- model (torch.nn.Module): The YOLOv5 model to be exported.
532
- im (torch.Tensor): Example input tensor to trace the model.
533
- file (pathlib.Path): Path object where the CoreML model will be saved.
534
- int8 (bool): Flag indicating whether to use INT8 quantization (default is False).
535
- half (bool): Flag indicating whether to use FP16 quantization (default is False).
536
- nms (bool): Flag indicating whether to include Non-Maximum Suppression (default is False).
537
- mlmodel (bool): Flag indicating whether to export as older *.mlmodel format (default is False).
538
- prefix (str): Prefix string for logging purposes (default is 'CoreML:').
539
-
540
- Returns:
541
- tuple[pathlib.Path | None, None]: The path to the saved CoreML model file, or (None, None) if there is an error.
542
-
543
- Notes:
544
- The exported CoreML model will be saved with a .mlmodel extension.
545
- Quantization is supported only on macOS.
546
-
547
- Example:
548
- ```python
549
- from pathlib import Path
550
- import torch
551
- from models.yolo import Model
552
- model = Model(cfg, ch=3, nc=80)
553
- im = torch.randn(1, 3, 640, 640)
554
- file = Path("yolov5s_coreml")
555
- export_coreml(model, im, file, int8=False, half=False, nms=True, mlmodel=False)
556
- ```
557
- """
558
- check_requirements("coremltools")
559
- import coremltools as ct
560
-
561
- LOGGER.info(f"\n{prefix} starting export with coremltools {ct.__version__}...")
562
- if mlmodel:
563
- f = file.with_suffix(".mlmodel")
564
- convert_to = "neuralnetwork"
565
- precision = None
566
- else:
567
- f = file.with_suffix(".mlpackage")
568
- convert_to = "mlprogram"
569
- precision = ct.precision.FLOAT16 if half else ct.precision.FLOAT32
570
- if nms:
571
- model = iOSModel(model, im)
572
- ts = torch.jit.trace(model, im, strict=False) # TorchScript model
573
- ct_model = ct.convert(
574
- ts,
575
- inputs=[ct.ImageType("image", shape=im.shape, scale=1 / 255, bias=[0, 0, 0])],
576
- convert_to=convert_to,
577
- compute_precision=precision,
578
- )
579
- bits, mode = (8, "kmeans") if int8 else (16, "linear") if half else (32, None)
580
- if bits < 32:
581
- if mlmodel:
582
- with warnings.catch_warnings():
583
- warnings.filterwarnings(
584
- "ignore", category=DeprecationWarning
585
- ) # suppress numpy==1.20 float warning, fixed in coremltools==7.0
586
- ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode)
587
- elif bits == 8:
588
- op_config = ct.optimize.coreml.OpPalettizerConfig(mode=mode, nbits=bits, weight_threshold=512)
589
- config = ct.optimize.coreml.OptimizationConfig(global_config=op_config)
590
- ct_model = ct.optimize.coreml.palettize_weights(ct_model, config)
591
- ct_model.save(f)
592
- return f, ct_model
593
-
594
-
595
- @try_export
596
- def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr("TensorRT:")):
597
- """
598
- Export a YOLOv5 model to TensorRT engine format, requiring GPU and TensorRT>=7.0.0.
599
-
600
- Args:
601
- model (torch.nn.Module): YOLOv5 model to be exported.
602
- im (torch.Tensor): Input tensor of shape (B, C, H, W).
603
- file (pathlib.Path): Path to save the exported model.
604
- half (bool): Set to True to export with FP16 precision.
605
- dynamic (bool): Set to True to enable dynamic input shapes.
606
- simplify (bool): Set to True to simplify the model during export.
607
- workspace (int): Workspace size in GB (default is 4).
608
- verbose (bool): Set to True for verbose logging output.
609
- prefix (str): Log message prefix.
610
-
611
- Returns:
612
- (pathlib.Path, None): Tuple containing the path to the exported model and None.
613
-
614
- Raises:
615
- AssertionError: If executed on CPU instead of GPU.
616
- RuntimeError: If there is a failure in parsing the ONNX file.
617
-
618
- Example:
619
- ```python
620
- from ultralytics import YOLOv5
621
- import torch
622
- from pathlib import Path
623
-
624
- model = YOLOv5('yolov5s.pt') # Load a pre-trained YOLOv5 model
625
- input_tensor = torch.randn(1, 3, 640, 640).cuda() # example input tensor on GPU
626
- export_path = Path('yolov5s.engine') # export destination
627
-
628
- export_engine(model.model, input_tensor, export_path, half=True, dynamic=True, simplify=True, workspace=8, verbose=True)
629
- ```
630
- """
631
- assert im.device.type != "cpu", "export running on CPU but must be on GPU, i.e. `python export.py --device 0`"
632
- try:
633
- import tensorrt as trt
634
- except Exception:
635
- if platform.system() == "Linux":
636
- check_requirements("nvidia-tensorrt", cmds="-U --index-url https://pypi.ngc.nvidia.com")
637
- import tensorrt as trt
638
-
639
- if trt.__version__[0] == "7": # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012
640
- grid = model.model[-1].anchor_grid
641
- model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid]
642
- export_onnx(model, im, file, 12, dynamic, simplify) # opset 12
643
- model.model[-1].anchor_grid = grid
644
- else: # TensorRT >= 8
645
- check_version(trt.__version__, "8.0.0", hard=True) # require tensorrt>=8.0.0
646
- export_onnx(model, im, file, 12, dynamic, simplify) # opset 12
647
- onnx = file.with_suffix(".onnx")
648
-
649
- LOGGER.info(f"\n{prefix} starting export with TensorRT {trt.__version__}...")
650
- is_trt10 = int(trt.__version__.split(".")[0]) >= 10 # is TensorRT >= 10
651
- assert onnx.exists(), f"failed to export ONNX file: {onnx}"
652
- f = file.with_suffix(".engine") # TensorRT engine file
653
- logger = trt.Logger(trt.Logger.INFO)
654
- if verbose:
655
- logger.min_severity = trt.Logger.Severity.VERBOSE
656
-
657
- builder = trt.Builder(logger)
658
- config = builder.create_builder_config()
659
- if is_trt10:
660
- config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30)
661
- else: # TensorRT versions 7, 8
662
- config.max_workspace_size = workspace * 1 << 30
663
- flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
664
- network = builder.create_network(flag)
665
- parser = trt.OnnxParser(network, logger)
666
- if not parser.parse_from_file(str(onnx)):
667
- raise RuntimeError(f"failed to load ONNX file: {onnx}")
668
-
669
- inputs = [network.get_input(i) for i in range(network.num_inputs)]
670
- outputs = [network.get_output(i) for i in range(network.num_outputs)]
671
- for inp in inputs:
672
- LOGGER.info(f'{prefix} input "{inp.name}" with shape{inp.shape} {inp.dtype}')
673
- for out in outputs:
674
- LOGGER.info(f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}')
675
-
676
- if dynamic:
677
- if im.shape[0] <= 1:
678
- LOGGER.warning(f"{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument")
679
- profile = builder.create_optimization_profile()
680
- for inp in inputs:
681
- profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape)
682
- config.add_optimization_profile(profile)
683
-
684
- LOGGER.info(f"{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine as {f}")
685
- if builder.platform_has_fast_fp16 and half:
686
- config.set_flag(trt.BuilderFlag.FP16)
687
-
688
- build = builder.build_serialized_network if is_trt10 else builder.build_engine
689
- with build(network, config) as engine, open(f, "wb") as t:
690
- t.write(engine if is_trt10 else engine.serialize())
691
- return f, None
692
-
693
-
694
- @try_export
695
- def export_saved_model(
696
- model,
697
- im,
698
- file,
699
- dynamic,
700
- tf_nms=False,
701
- agnostic_nms=False,
702
- topk_per_class=100,
703
- topk_all=100,
704
- iou_thres=0.45,
705
- conf_thres=0.25,
706
- keras=False,
707
- prefix=colorstr("TensorFlow SavedModel:"),
708
- ):
709
- """
710
- Export a YOLOv5 model to the TensorFlow SavedModel format, supporting dynamic axes and non-maximum suppression
711
- (NMS).
712
-
713
- Args:
714
- model (torch.nn.Module): The PyTorch model to convert.
715
- im (torch.Tensor): Sample input tensor with shape (B, C, H, W) for tracing.
716
- file (pathlib.Path): File path to save the exported model.
717
- dynamic (bool): Flag to indicate whether dynamic axes should be used.
718
- tf_nms (bool, optional): Enable TensorFlow non-maximum suppression (NMS). Default is False.
719
- agnostic_nms (bool, optional): Enable class-agnostic NMS. Default is False.
720
- topk_per_class (int, optional): Top K detections per class to keep before applying NMS. Default is 100.
721
- topk_all (int, optional): Top K detections across all classes to keep before applying NMS. Default is 100.
722
- iou_thres (float, optional): IoU threshold for NMS. Default is 0.45.
723
- conf_thres (float, optional): Confidence threshold for detections. Default is 0.25.
724
- keras (bool, optional): Save the model in Keras format if True. Default is False.
725
- prefix (str, optional): Prefix for logging messages. Default is "TensorFlow SavedModel:".
726
-
727
- Returns:
728
- tuple[str, tf.keras.Model | None]: A tuple containing the path to the saved model folder and the Keras model instance,
729
- or None if TensorFlow export fails.
730
-
731
- Notes:
732
- - The method supports TensorFlow versions up to 2.15.1.
733
- - TensorFlow NMS may not be supported in older TensorFlow versions.
734
- - If the TensorFlow version exceeds 2.13.1, it might cause issues when exporting to TFLite.
735
- Refer to: https://github.com/ultralytics/yolov5/issues/12489
736
-
737
- Example:
738
- ```python
739
- model, im = ... # Initialize your PyTorch model and input tensor
740
- export_saved_model(model, im, Path("yolov5_saved_model"), dynamic=True)
741
- ```
742
- """
743
- # YOLOv5 TensorFlow SavedModel export
744
- try:
745
- import tensorflow as tf
746
- except Exception:
747
- check_requirements(f"tensorflow{'' if torch.cuda.is_available() else '-macos' if MACOS else '-cpu'}<=2.15.1")
748
-
749
- import tensorflow as tf
750
- from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
751
-
752
- from models.tf import TFModel
753
-
754
- LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...")
755
- if tf.__version__ > "2.13.1":
756
- helper_url = "https://github.com/ultralytics/yolov5/issues/12489"
757
- LOGGER.info(
758
- f"WARNING ⚠️ using Tensorflow {tf.__version__} > 2.13.1 might cause issue when exporting the model to tflite {helper_url}"
759
- ) # handling issue https://github.com/ultralytics/yolov5/issues/12489
760
- f = str(file).replace(".pt", "_saved_model")
761
- batch_size, ch, *imgsz = list(im.shape) # BCHW
762
-
763
- tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
764
- im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow
765
- _ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
766
- inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size)
767
- outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
768
- keras_model = tf.keras.Model(inputs=inputs, outputs=outputs)
769
- keras_model.trainable = False
770
- keras_model.summary()
771
- if keras:
772
- keras_model.save(f, save_format="tf")
773
- else:
774
- spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)
775
- m = tf.function(lambda x: keras_model(x)) # full model
776
- m = m.get_concrete_function(spec)
777
- frozen_func = convert_variables_to_constants_v2(m)
778
- tfm = tf.Module()
779
- tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x), [spec])
780
- tfm.__call__(im)
781
- tf.saved_model.save(
782
- tfm,
783
- f,
784
- options=tf.saved_model.SaveOptions(experimental_custom_gradients=False)
785
- if check_version(tf.__version__, "2.6")
786
- else tf.saved_model.SaveOptions(),
787
- )
788
- return f, keras_model
789
-
790
-
791
- @try_export
792
- def export_pb(keras_model, file, prefix=colorstr("TensorFlow GraphDef:")):
793
- """
794
- Export YOLOv5 model to TensorFlow GraphDef (*.pb) format.
795
-
796
- Args:
797
- keras_model (tf.keras.Model): The Keras model to be converted.
798
- file (Path): The output file path where the GraphDef will be saved.
799
- prefix (str): Optional prefix string; defaults to a colored string indicating TensorFlow GraphDef export status.
800
-
801
- Returns:
802
- Tuple[Path, None]: The file path where the GraphDef model was saved and a None placeholder.
803
-
804
- Notes:
805
- For more details, refer to the guide on frozen graphs: https://github.com/leimao/Frozen_Graph_TensorFlow
806
-
807
- Example:
808
- ```python
809
- from pathlib import Path
810
- keras_model = ... # assume an existing Keras model
811
- file = Path("model.pb")
812
- export_pb(keras_model, file)
813
- ```
814
- """
815
- import tensorflow as tf
816
- from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
817
-
818
- LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...")
819
- f = file.with_suffix(".pb")
820
-
821
- m = tf.function(lambda x: keras_model(x)) # full model
822
- m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype))
823
- frozen_func = convert_variables_to_constants_v2(m)
824
- frozen_func.graph.as_graph_def()
825
- tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False)
826
- return f, None
827
-
828
-
829
- @try_export
830
- def export_tflite(
831
- keras_model, im, file, int8, per_tensor, data, nms, agnostic_nms, prefix=colorstr("TensorFlow Lite:")
832
- ):
833
- # YOLOv5 TensorFlow Lite export
834
- """
835
- Export a YOLOv5 model to TensorFlow Lite format with optional INT8 quantization and NMS support.
836
-
837
- Args:
838
- keras_model (tf.keras.Model): The Keras model to be exported.
839
- im (torch.Tensor): An input image tensor for normalization and model tracing.
840
- file (Path): The file path to save the TensorFlow Lite model.
841
- int8 (bool): Enables INT8 quantization if True.
842
- per_tensor (bool): If True, disables per-channel quantization.
843
- data (str): Path to the dataset for representative dataset generation in INT8 quantization.
844
- nms (bool): Enables Non-Maximum Suppression (NMS) if True.
845
- agnostic_nms (bool): Enables class-agnostic NMS if True.
846
- prefix (str): Prefix for log messages.
847
-
848
- Returns:
849
- (str | None, tflite.Model | None): The file path of the exported TFLite model and the TFLite model instance, or None
850
- if the export failed.
851
-
852
- Example:
853
- ```python
854
- from pathlib import Path
855
- import torch
856
- import tensorflow as tf
857
-
858
- # Load a Keras model wrapping a YOLOv5 model
859
- keras_model = tf.keras.models.load_model('path/to/keras_model.h5')
860
-
861
- # Example input tensor
862
- im = torch.zeros(1, 3, 640, 640)
863
-
864
- # Export the model
865
- export_tflite(keras_model, im, Path('model.tflite'), int8=True, per_tensor=False, data='data/coco.yaml',
866
- nms=True, agnostic_nms=False)
867
- ```
868
-
869
- Notes:
870
- - Ensure TensorFlow and TensorFlow Lite dependencies are installed.
871
- - INT8 quantization requires a representative dataset to achieve optimal accuracy.
872
- - TensorFlow Lite models are suitable for efficient inference on mobile and edge devices.
873
- """
874
- import tensorflow as tf
875
-
876
- LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...")
877
- batch_size, ch, *imgsz = list(im.shape) # BCHW
878
- f = str(file).replace(".pt", "-fp16.tflite")
879
-
880
- converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
881
- converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
882
- converter.target_spec.supported_types = [tf.float16]
883
- converter.optimizations = [tf.lite.Optimize.DEFAULT]
884
- if int8:
885
- from models.tf import representative_dataset_gen
886
-
887
- dataset = LoadImages(check_dataset(check_yaml(data))["train"], img_size=imgsz, auto=False)
888
- converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100)
889
- converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
890
- converter.target_spec.supported_types = []
891
- converter.inference_input_type = tf.uint8 # or tf.int8
892
- converter.inference_output_type = tf.uint8 # or tf.int8
893
- converter.experimental_new_quantizer = True
894
- if per_tensor:
895
- converter._experimental_disable_per_channel = True
896
- f = str(file).replace(".pt", "-int8.tflite")
897
- if nms or agnostic_nms:
898
- converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS)
899
-
900
- tflite_model = converter.convert()
901
- open(f, "wb").write(tflite_model)
902
- return f, None
903
-
904
-
905
- @try_export
906
- def export_edgetpu(file, prefix=colorstr("Edge TPU:")):
907
- """
908
- Exports a YOLOv5 model to Edge TPU compatible TFLite format; requires Linux and Edge TPU compiler.
909
-
910
- Args:
911
- file (Path): Path to the YOLOv5 model file to be exported (.pt format).
912
- prefix (str, optional): Prefix for logging messages. Defaults to colorstr("Edge TPU:").
913
-
914
- Returns:
915
- tuple[Path, None]: Path to the exported Edge TPU compatible TFLite model, None.
916
-
917
- Raises:
918
- AssertionError: If the system is not Linux.
919
- subprocess.CalledProcessError: If any subprocess call to install or run the Edge TPU compiler fails.
920
-
921
- Notes:
922
- To use this function, ensure you have the Edge TPU compiler installed on your Linux system. You can find
923
- installation instructions here: https://coral.ai/docs/edgetpu/compiler/.
924
-
925
- Example:
926
- ```python
927
- from pathlib import Path
928
- file = Path('yolov5s.pt')
929
- export_edgetpu(file)
930
- ```
931
- """
932
- cmd = "edgetpu_compiler --version"
933
- help_url = "https://coral.ai/docs/edgetpu/compiler/"
934
- assert platform.system() == "Linux", f"export only supported on Linux. See {help_url}"
935
- if subprocess.run(f"{cmd} > /dev/null 2>&1", shell=True).returncode != 0:
936
- LOGGER.info(f"\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}")
937
- sudo = subprocess.run("sudo --version >/dev/null", shell=True).returncode == 0 # sudo installed on system
938
- for c in (
939
- "curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -",
940
- 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list',
941
- "sudo apt-get update",
942
- "sudo apt-get install edgetpu-compiler",
943
- ):
944
- subprocess.run(c if sudo else c.replace("sudo ", ""), shell=True, check=True)
945
- ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]
946
-
947
- LOGGER.info(f"\n{prefix} starting export with Edge TPU compiler {ver}...")
948
- f = str(file).replace(".pt", "-int8_edgetpu.tflite") # Edge TPU model
949
- f_tfl = str(file).replace(".pt", "-int8.tflite") # TFLite model
950
-
951
- subprocess.run(
952
- [
953
- "edgetpu_compiler",
954
- "-s",
955
- "-d",
956
- "-k",
957
- "10",
958
- "--out_dir",
959
- str(file.parent),
960
- f_tfl,
961
- ],
962
- check=True,
963
- )
964
- return f, None
965
-
966
-
967
- @try_export
968
- def export_tfjs(file, int8, prefix=colorstr("TensorFlow.js:")):
969
- """
970
- Convert a YOLOv5 model to TensorFlow.js format with optional uint8 quantization.
971
-
972
- Args:
973
- file (Path): Path to the YOLOv5 model file to be converted, typically having a ".pt" or ".onnx" extension.
974
- int8 (bool): If True, applies uint8 quantization during the conversion process.
975
- prefix (str): Optional prefix for logging messages, default is 'TensorFlow.js:' with color formatting.
976
-
977
- Returns:
978
- (str, None): Tuple containing the output directory path as a string and None.
979
-
980
- Notes:
981
- - This function requires the `tensorflowjs` package. Install it using:
982
- ```shell
983
- pip install tensorflowjs
984
- ```
985
- - The converted TensorFlow.js model will be saved in a directory with the "_web_model" suffix appended to the original file name.
986
- - The conversion involves running shell commands that invoke the TensorFlow.js converter tool.
987
-
988
- Example:
989
- ```python
990
- from pathlib import Path
991
- file = Path('yolov5.onnx')
992
- export_tfjs(file, int8=False)
993
- ```
994
- """
995
- check_requirements("tensorflowjs")
996
- import tensorflowjs as tfjs
997
-
998
- LOGGER.info(f"\n{prefix} starting export with tensorflowjs {tfjs.__version__}...")
999
- f = str(file).replace(".pt", "_web_model") # js dir
1000
- f_pb = file.with_suffix(".pb") # *.pb path
1001
- f_json = f"{f}/model.json" # *.json path
1002
-
1003
- args = [
1004
- "tensorflowjs_converter",
1005
- "--input_format=tf_frozen_model",
1006
- "--quantize_uint8" if int8 else "",
1007
- "--output_node_names=Identity,Identity_1,Identity_2,Identity_3",
1008
- str(f_pb),
1009
- f,
1010
- ]
1011
- subprocess.run([arg for arg in args if arg], check=True)
1012
-
1013
- json = Path(f_json).read_text()
1014
- with open(f_json, "w") as j: # sort JSON Identity_* in ascending order
1015
- subst = re.sub(
1016
- r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, '
1017
- r'"Identity.?.?": {"name": "Identity.?.?"}, '
1018
- r'"Identity.?.?": {"name": "Identity.?.?"}, '
1019
- r'"Identity.?.?": {"name": "Identity.?.?"}}}',
1020
- r'{"outputs": {"Identity": {"name": "Identity"}, '
1021
- r'"Identity_1": {"name": "Identity_1"}, '
1022
- r'"Identity_2": {"name": "Identity_2"}, '
1023
- r'"Identity_3": {"name": "Identity_3"}}}',
1024
- json,
1025
- )
1026
- j.write(subst)
1027
- return f, None
1028
-
1029
-
1030
- def add_tflite_metadata(file, metadata, num_outputs):
1031
- """
1032
- Adds metadata to a TensorFlow Lite (TFLite) model file, supporting multiple outputs according to TensorFlow
1033
- guidelines.
1034
-
1035
- Args:
1036
- file (str): Path to the TFLite model file to which metadata will be added.
1037
- metadata (dict): Metadata information to be added to the model, structured as required by the TFLite metadata schema.
1038
- Common keys include "name", "description", "version", "author", and "license".
1039
- num_outputs (int): Number of output tensors the model has, used to configure the metadata properly.
1040
-
1041
- Returns:
1042
- None
1043
-
1044
- Example:
1045
- ```python
1046
- metadata = {
1047
- "name": "yolov5",
1048
- "description": "YOLOv5 object detection model",
1049
- "version": "1.0",
1050
- "author": "Ultralytics",
1051
- "license": "Apache License 2.0"
1052
- }
1053
- add_tflite_metadata("model.tflite", metadata, num_outputs=4)
1054
- ```
1055
-
1056
- Note:
1057
- TFLite metadata can include information such as model name, version, author, and other relevant details.
1058
- For more details on the structure of the metadata, refer to TensorFlow Lite
1059
- [metadata guidelines](https://www.tensorflow.org/lite/models/convert/metadata).
1060
- """
1061
- with contextlib.suppress(ImportError):
1062
- # check_requirements('tflite_support')
1063
- from tflite_support import flatbuffers
1064
- from tflite_support import metadata as _metadata
1065
- from tflite_support import metadata_schema_py_generated as _metadata_fb
1066
-
1067
- tmp_file = Path("/tmp/meta.txt")
1068
- with open(tmp_file, "w") as meta_f:
1069
- meta_f.write(str(metadata))
1070
-
1071
- model_meta = _metadata_fb.ModelMetadataT()
1072
- label_file = _metadata_fb.AssociatedFileT()
1073
- label_file.name = tmp_file.name
1074
- model_meta.associatedFiles = [label_file]
1075
-
1076
- subgraph = _metadata_fb.SubGraphMetadataT()
1077
- subgraph.inputTensorMetadata = [_metadata_fb.TensorMetadataT()]
1078
- subgraph.outputTensorMetadata = [_metadata_fb.TensorMetadataT()] * num_outputs
1079
- model_meta.subgraphMetadata = [subgraph]
1080
-
1081
- b = flatbuffers.Builder(0)
1082
- b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)
1083
- metadata_buf = b.Output()
1084
-
1085
- populator = _metadata.MetadataPopulator.with_model_file(file)
1086
- populator.load_metadata_buffer(metadata_buf)
1087
- populator.load_associated_files([str(tmp_file)])
1088
- populator.populate()
1089
- tmp_file.unlink()
1090
-
1091
-
1092
- def pipeline_coreml(model, im, file, names, y, mlmodel, prefix=colorstr("CoreML Pipeline:")):
1093
- """
1094
- Convert a PyTorch YOLOv5 model to CoreML format with Non-Maximum Suppression (NMS), handling different input/output
1095
- shapes, and saving the model.
1096
-
1097
- Args:
1098
- model (torch.nn.Module): The YOLOv5 PyTorch model to be converted.
1099
- im (torch.Tensor): Example input tensor with shape (N, C, H, W), where N is the batch size, C is the number of channels,
1100
- H is the height, and W is the width.
1101
- file (Path): Path to save the converted CoreML model.
1102
- names (dict[int, str]): Dictionary mapping class indices to class names.
1103
- y (torch.Tensor): Output tensor from the PyTorch model's forward pass.
1104
- mlmodel (bool): Flag indicating whether to export as older *.mlmodel format (default is False).
1105
- prefix (str): Custom prefix for logging messages.
1106
-
1107
- Returns:
1108
- (Path): Path to the saved CoreML model (.mlmodel).
1109
-
1110
- Raises:
1111
- AssertionError: If the number of class names does not match the number of classes in the model.
1112
-
1113
- Notes:
1114
- - This function requires `coremltools` to be installed.
1115
- - Running this function on a non-macOS environment might not support some features.
1116
- - Flexible input shapes and additional NMS options can be customized within the function.
1117
-
1118
- Examples:
1119
- ```python
1120
- from pathlib import Path
1121
- import torch
1122
-
1123
- model = torch.load('yolov5s.pt') # Load YOLOv5 model
1124
- im = torch.zeros((1, 3, 640, 640)) # Example input tensor
1125
-
1126
- names = {0: "person", 1: "bicycle", 2: "car", ...} # Define class names
1127
-
1128
- y = model(im) # Perform forward pass to get model output
1129
-
1130
- output_file = Path('yolov5s.mlmodel') # Convert to CoreML
1131
- pipeline_coreml(model, im, output_file, names, y)
1132
- ```
1133
- """
1134
- import coremltools as ct
1135
- from PIL import Image
1136
-
1137
- f = file.with_suffix(".mlmodel") if mlmodel else file.with_suffix(".mlpackage")
1138
- print(f"{prefix} starting pipeline with coremltools {ct.__version__}...")
1139
- batch_size, ch, h, w = list(im.shape) # BCHW
1140
- t = time.time()
1141
-
1142
- # YOLOv5 Output shapes
1143
- spec = model.get_spec()
1144
- out0, out1 = iter(spec.description.output)
1145
- if platform.system() == "Darwin":
1146
- img = Image.new("RGB", (w, h)) # img(192 width, 320 height)
1147
- # img = torch.zeros((*opt.img_size, 3)).numpy() # img size(320,192,3) iDetection
1148
- out = model.predict({"image": img})
1149
- out0_shape, out1_shape = out[out0.name].shape, out[out1.name].shape
1150
- else: # linux and windows can not run model.predict(), get sizes from pytorch output y
1151
- s = tuple(y[0].shape)
1152
- out0_shape, out1_shape = (s[1], s[2] - 5), (s[1], 4) # (3780, 80), (3780, 4)
1153
-
1154
- # Checks
1155
- nx, ny = spec.description.input[0].type.imageType.width, spec.description.input[0].type.imageType.height
1156
- na, nc = out0_shape
1157
- # na, nc = out0.type.multiArrayType.shape # number anchors, classes
1158
- assert len(names) == nc, f"{len(names)} names found for nc={nc}" # check
1159
-
1160
- # Define output shapes (missing)
1161
- out0.type.multiArrayType.shape[:] = out0_shape # (3780, 80)
1162
- out1.type.multiArrayType.shape[:] = out1_shape # (3780, 4)
1163
- # spec.neuralNetwork.preprocessing[0].featureName = '0'
1164
-
1165
- # Flexible input shapes
1166
- # from coremltools.models.neural_network import flexible_shape_utils
1167
- # s = [] # shapes
1168
- # s.append(flexible_shape_utils.NeuralNetworkImageSize(320, 192))
1169
- # s.append(flexible_shape_utils.NeuralNetworkImageSize(640, 384)) # (height, width)
1170
- # flexible_shape_utils.add_enumerated_image_sizes(spec, feature_name='image', sizes=s)
1171
- # r = flexible_shape_utils.NeuralNetworkImageSizeRange() # shape ranges
1172
- # r.add_height_range((192, 640))
1173
- # r.add_width_range((192, 640))
1174
- # flexible_shape_utils.update_image_size_range(spec, feature_name='image', size_range=r)
1175
-
1176
- # Print
1177
- print(spec.description)
1178
-
1179
- # Model from spec
1180
- weights_dir = None
1181
- weights_dir = None if mlmodel else str(f / "Data/com.apple.CoreML/weights")
1182
- model = ct.models.MLModel(spec, weights_dir=weights_dir)
1183
-
1184
- # 3. Create NMS protobuf
1185
- nms_spec = ct.proto.Model_pb2.Model()
1186
- nms_spec.specificationVersion = 5
1187
- for i in range(2):
1188
- decoder_output = model._spec.description.output[i].SerializeToString()
1189
- nms_spec.description.input.add()
1190
- nms_spec.description.input[i].ParseFromString(decoder_output)
1191
- nms_spec.description.output.add()
1192
- nms_spec.description.output[i].ParseFromString(decoder_output)
1193
-
1194
- nms_spec.description.output[0].name = "confidence"
1195
- nms_spec.description.output[1].name = "coordinates"
1196
-
1197
- output_sizes = [nc, 4]
1198
- for i in range(2):
1199
- ma_type = nms_spec.description.output[i].type.multiArrayType
1200
- ma_type.shapeRange.sizeRanges.add()
1201
- ma_type.shapeRange.sizeRanges[0].lowerBound = 0
1202
- ma_type.shapeRange.sizeRanges[0].upperBound = -1
1203
- ma_type.shapeRange.sizeRanges.add()
1204
- ma_type.shapeRange.sizeRanges[1].lowerBound = output_sizes[i]
1205
- ma_type.shapeRange.sizeRanges[1].upperBound = output_sizes[i]
1206
- del ma_type.shape[:]
1207
-
1208
- nms = nms_spec.nonMaximumSuppression
1209
- nms.confidenceInputFeatureName = out0.name # 1x507x80
1210
- nms.coordinatesInputFeatureName = out1.name # 1x507x4
1211
- nms.confidenceOutputFeatureName = "confidence"
1212
- nms.coordinatesOutputFeatureName = "coordinates"
1213
- nms.iouThresholdInputFeatureName = "iouThreshold"
1214
- nms.confidenceThresholdInputFeatureName = "confidenceThreshold"
1215
- nms.iouThreshold = 0.45
1216
- nms.confidenceThreshold = 0.25
1217
- nms.pickTop.perClass = True
1218
- nms.stringClassLabels.vector.extend(names.values())
1219
- nms_model = ct.models.MLModel(nms_spec)
1220
-
1221
- # 4. Pipeline models together
1222
- pipeline = ct.models.pipeline.Pipeline(
1223
- input_features=[
1224
- ("image", ct.models.datatypes.Array(3, ny, nx)),
1225
- ("iouThreshold", ct.models.datatypes.Double()),
1226
- ("confidenceThreshold", ct.models.datatypes.Double()),
1227
- ],
1228
- output_features=["confidence", "coordinates"],
1229
- )
1230
- pipeline.add_model(model)
1231
- pipeline.add_model(nms_model)
1232
-
1233
- # Correct datatypes
1234
- pipeline.spec.description.input[0].ParseFromString(model._spec.description.input[0].SerializeToString())
1235
- pipeline.spec.description.output[0].ParseFromString(nms_model._spec.description.output[0].SerializeToString())
1236
- pipeline.spec.description.output[1].ParseFromString(nms_model._spec.description.output[1].SerializeToString())
1237
-
1238
- # Update metadata
1239
- pipeline.spec.specificationVersion = 5
1240
- pipeline.spec.description.metadata.versionString = "https://github.com/ultralytics/yolov5"
1241
- pipeline.spec.description.metadata.shortDescription = "https://github.com/ultralytics/yolov5"
1242
- pipeline.spec.description.metadata.author = "glenn.jocher@ultralytics.com"
1243
- pipeline.spec.description.metadata.license = "https://github.com/ultralytics/yolov5/blob/master/LICENSE"
1244
- pipeline.spec.description.metadata.userDefined.update(
1245
- {
1246
- "classes": ",".join(names.values()),
1247
- "iou_threshold": str(nms.iouThreshold),
1248
- "confidence_threshold": str(nms.confidenceThreshold),
1249
- }
1250
- )
1251
-
1252
- # Save the model
1253
- model = ct.models.MLModel(pipeline.spec, weights_dir=weights_dir)
1254
- model.input_description["image"] = "Input image"
1255
- model.input_description["iouThreshold"] = f"(optional) IOU Threshold override (default: {nms.iouThreshold})"
1256
- model.input_description["confidenceThreshold"] = (
1257
- f"(optional) Confidence Threshold override (default: {nms.confidenceThreshold})"
1258
- )
1259
- model.output_description["confidence"] = 'Boxes × Class confidence (see user-defined metadata "classes")'
1260
- model.output_description["coordinates"] = "Boxes × [x, y, width, height] (relative to image size)"
1261
- model.save(f) # pipelined
1262
- print(f"{prefix} pipeline success ({time.time() - t:.2f}s), saved as {f} ({file_size(f):.1f} MB)")
1263
-
1264
-
1265
- @smart_inference_mode()
1266
- def run(
1267
- data=ROOT / "data/coco128.yaml", # 'dataset.yaml path'
1268
- weights=ROOT / "yolov5s.pt", # weights path
1269
- imgsz=(640, 640), # image (height, width)
1270
- batch_size=1, # batch size
1271
- device="cpu", # cuda device, i.e. 0 or 0,1,2,3 or cpu
1272
- include=("torchscript", "onnx"), # include formats
1273
- half=False, # FP16 half-precision export
1274
- inplace=False, # set YOLOv5 Detect() inplace=True
1275
- keras=False, # use Keras
1276
- optimize=False, # TorchScript: optimize for mobile
1277
- int8=False, # CoreML/TF INT8 quantization
1278
- per_tensor=False, # TF per tensor quantization
1279
- dynamic=False, # ONNX/TF/TensorRT: dynamic axes
1280
- simplify=False, # ONNX: simplify model
1281
- mlmodel=False, # CoreML: Export in *.mlmodel format
1282
- opset=12, # ONNX: opset version
1283
- verbose=False, # TensorRT: verbose log
1284
- workspace=4, # TensorRT: workspace size (GB)
1285
- nms=False, # TF: add NMS to model
1286
- agnostic_nms=False, # TF: add agnostic NMS to model
1287
- topk_per_class=100, # TF.js NMS: topk per class to keep
1288
- topk_all=100, # TF.js NMS: topk for all classes to keep
1289
- iou_thres=0.45, # TF.js NMS: IoU threshold
1290
- conf_thres=0.25, # TF.js NMS: confidence threshold
1291
- ):
1292
- """
1293
- Exports a YOLOv5 model to specified formats including ONNX, TensorRT, CoreML, and TensorFlow.
1294
-
1295
- Args:
1296
- data (str | Path): Path to the dataset YAML configuration file. Default is 'data/coco128.yaml'.
1297
- weights (str | Path): Path to the pretrained model weights file. Default is 'yolov5s.pt'.
1298
- imgsz (tuple): Image size as (height, width). Default is (640, 640).
1299
- batch_size (int): Batch size for exporting the model. Default is 1.
1300
- device (str): Device to run the export on, e.g., '0' for GPU, 'cpu' for CPU. Default is 'cpu'.
1301
- include (tuple): Formats to include in the export. Default is ('torchscript', 'onnx').
1302
- half (bool): Flag to export model with FP16 half-precision. Default is False.
1303
- inplace (bool): Set the YOLOv5 Detect() module inplace=True. Default is False.
1304
- keras (bool): Flag to use Keras for TensorFlow SavedModel export. Default is False.
1305
- optimize (bool): Optimize TorchScript model for mobile deployment. Default is False.
1306
- int8 (bool): Apply INT8 quantization for CoreML or TensorFlow models. Default is False.
1307
- per_tensor (bool): Apply per tensor quantization for TensorFlow models. Default is False.
1308
- dynamic (bool): Enable dynamic axes for ONNX, TensorFlow, or TensorRT exports. Default is False.
1309
- simplify (bool): Simplify the ONNX model during export. Default is False.
1310
- opset (int): ONNX opset version. Default is 12.
1311
- verbose (bool): Enable verbose logging for TensorRT export. Default is False.
1312
- workspace (int): TensorRT workspace size in GB. Default is 4.
1313
- nms (bool): Add non-maximum suppression (NMS) to the TensorFlow model. Default is False.
1314
- agnostic_nms (bool): Add class-agnostic NMS to the TensorFlow model. Default is False.
1315
- topk_per_class (int): Top-K boxes per class to keep for TensorFlow.js NMS. Default is 100.
1316
- topk_all (int): Top-K boxes for all classes to keep for TensorFlow.js NMS. Default is 100.
1317
- iou_thres (float): IoU threshold for NMS. Default is 0.45.
1318
- conf_thres (float): Confidence threshold for NMS. Default is 0.25.
1319
- mlmodel (bool): Flag to use *.mlmodel for CoreML export. Default is False.
1320
-
1321
- Returns:
1322
- None
1323
-
1324
- Notes:
1325
- - Model export is based on the specified formats in the 'include' argument.
1326
- - Be cautious of combinations where certain flags are mutually exclusive, such as `--half` and `--dynamic`.
1327
-
1328
- Example:
1329
- ```python
1330
- run(
1331
- data="data/coco128.yaml",
1332
- weights="yolov5s.pt",
1333
- imgsz=(640, 640),
1334
- batch_size=1,
1335
- device="cpu",
1336
- include=("torchscript", "onnx"),
1337
- half=False,
1338
- inplace=False,
1339
- keras=False,
1340
- optimize=False,
1341
- int8=False,
1342
- per_tensor=False,
1343
- dynamic=False,
1344
- simplify=False,
1345
- opset=12,
1346
- verbose=False,
1347
- mlmodel=False,
1348
- workspace=4,
1349
- nms=False,
1350
- agnostic_nms=False,
1351
- topk_per_class=100,
1352
- topk_all=100,
1353
- iou_thres=0.45,
1354
- conf_thres=0.25,
1355
- )
1356
- ```
1357
- """
1358
- t = time.time()
1359
- include = [x.lower() for x in include] # to lowercase
1360
- fmts = tuple(export_formats()["Argument"][1:]) # --include arguments
1361
- flags = [x in include for x in fmts]
1362
- assert sum(flags) == len(include), f"ERROR: Invalid --include {include}, valid --include arguments are {fmts}"
1363
- jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = flags # export booleans
1364
- file = Path(url2file(weights) if str(weights).startswith(("http:/", "https:/")) else weights) # PyTorch weights
1365
-
1366
- # Load PyTorch model
1367
- device = select_device(device)
1368
- if half:
1369
- assert device.type != "cpu" or coreml, "--half only compatible with GPU export, i.e. use --device 0"
1370
- assert not dynamic, "--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both"
1371
- model = attempt_load(weights, device=device, inplace=True, fuse=True) # load FP32 model
1372
-
1373
- # Checks
1374
- imgsz *= 2 if len(imgsz) == 1 else 1 # expand
1375
- if optimize:
1376
- assert device.type == "cpu", "--optimize not compatible with cuda devices, i.e. use --device cpu"
1377
-
1378
- # Input
1379
- gs = int(max(model.stride)) # grid size (max stride)
1380
- imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples
1381
- im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection
1382
-
1383
- # Update model
1384
- model.eval()
1385
- for k, m in model.named_modules():
1386
- if isinstance(m, Detect):
1387
- m.inplace = inplace
1388
- m.dynamic = dynamic
1389
- m.export = True
1390
-
1391
- for _ in range(2):
1392
- y = model(im) # dry runs
1393
- if half and not coreml:
1394
- im, model = im.half(), model.half() # to FP16
1395
- shape = tuple((y[0] if isinstance(y, tuple) else y).shape) # model output shape
1396
- metadata = {"stride": int(max(model.stride)), "names": model.names} # model metadata
1397
- LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)")
1398
-
1399
- # Exports
1400
- f = [""] * len(fmts) # exported filenames
1401
- warnings.filterwarnings(action="ignore", category=torch.jit.TracerWarning) # suppress TracerWarning
1402
- if jit: # TorchScript
1403
- f[0], _ = export_torchscript(model, im, file, optimize)
1404
- if engine: # TensorRT required before ONNX
1405
- f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose)
1406
- if onnx or xml: # OpenVINO requires ONNX
1407
- f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify)
1408
- if xml: # OpenVINO
1409
- f[3], _ = export_openvino(file, metadata, half, int8, data)
1410
- if coreml: # CoreML
1411
- f[4], ct_model = export_coreml(model, im, file, int8, half, nms, mlmodel)
1412
- if nms:
1413
- pipeline_coreml(ct_model, im, file, model.names, y, mlmodel)
1414
- if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats
1415
- assert not tflite or not tfjs, "TFLite and TF.js models must be exported separately, please pass only one type."
1416
- assert not isinstance(model, ClassificationModel), "ClassificationModel export to TF formats not yet supported."
1417
- f[5], s_model = export_saved_model(
1418
- model.cpu(),
1419
- im,
1420
- file,
1421
- dynamic,
1422
- tf_nms=nms or agnostic_nms or tfjs,
1423
- agnostic_nms=agnostic_nms or tfjs,
1424
- topk_per_class=topk_per_class,
1425
- topk_all=topk_all,
1426
- iou_thres=iou_thres,
1427
- conf_thres=conf_thres,
1428
- keras=keras,
1429
- )
1430
- if pb or tfjs: # pb prerequisite to tfjs
1431
- f[6], _ = export_pb(s_model, file)
1432
- if tflite or edgetpu:
1433
- f[7], _ = export_tflite(
1434
- s_model, im, file, int8 or edgetpu, per_tensor, data=data, nms=nms, agnostic_nms=agnostic_nms
1435
- )
1436
- if edgetpu:
1437
- f[8], _ = export_edgetpu(file)
1438
- add_tflite_metadata(f[8] or f[7], metadata, num_outputs=len(s_model.outputs))
1439
- if tfjs:
1440
- f[9], _ = export_tfjs(file, int8)
1441
- if paddle: # PaddlePaddle
1442
- f[10], _ = export_paddle(model, im, file, metadata)
1443
-
1444
- # Finish
1445
- f = [str(x) for x in f if x] # filter out '' and None
1446
- if any(f):
1447
- cls, det, seg = (isinstance(model, x) for x in (ClassificationModel, DetectionModel, SegmentationModel)) # type
1448
- det &= not seg # segmentation models inherit from SegmentationModel(DetectionModel)
1449
- dir = Path("segment" if seg else "classify" if cls else "")
1450
- h = "--half" if half else "" # --half FP16 inference arg
1451
- s = (
1452
- "# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference"
1453
- if cls
1454
- else "# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference"
1455
- if seg
1456
- else ""
1457
- )
1458
- LOGGER.info(
1459
- f'\nExport complete ({time.time() - t:.1f}s)'
1460
- f"\nResults saved to {colorstr('bold', file.parent.resolve())}"
1461
- f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}"
1462
- f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}"
1463
- f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}"
1464
- f'\nVisualize: https://netron.app'
1465
- )
1466
- return f # return list of exported files/dirs
1467
-
1468
-
1469
- def parse_opt(known=False):
1470
- """
1471
- Parse command-line options for YOLOv5 model export configurations.
1472
-
1473
- Args:
1474
- known (bool): If True, uses `argparse.ArgumentParser.parse_known_args`; otherwise, uses `argparse.ArgumentParser.parse_args`.
1475
- Default is False.
1476
-
1477
- Returns:
1478
- argparse.Namespace: Object containing parsed command-line arguments.
1479
-
1480
- Example:
1481
- ```python
1482
- opts = parse_opt()
1483
- print(opts.data)
1484
- print(opts.weights)
1485
- ```
1486
- """
1487
- parser = argparse.ArgumentParser()
1488
- parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="dataset.yaml path")
1489
- parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s.pt", help="model.pt path(s)")
1490
- parser.add_argument("--imgsz", "--img", "--img-size", nargs="+", type=int, default=[640, 640], help="image (h, w)")
1491
- parser.add_argument("--batch-size", type=int, default=1, help="batch size")
1492
- parser.add_argument("--device", default="cpu", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
1493
- parser.add_argument("--half", action="store_true", help="FP16 half-precision export")
1494
- parser.add_argument("--inplace", action="store_true", help="set YOLOv5 Detect() inplace=True")
1495
- parser.add_argument("--keras", action="store_true", help="TF: use Keras")
1496
- parser.add_argument("--optimize", action="store_true", help="TorchScript: optimize for mobile")
1497
- parser.add_argument("--int8", action="store_true", help="CoreML/TF/OpenVINO INT8 quantization")
1498
- parser.add_argument("--per-tensor", action="store_true", help="TF per-tensor quantization")
1499
- parser.add_argument("--dynamic", action="store_true", help="ONNX/TF/TensorRT: dynamic axes")
1500
- parser.add_argument("--simplify", action="store_true", help="ONNX: simplify model")
1501
- parser.add_argument("--mlmodel", action="store_true", help="CoreML: Export in *.mlmodel format")
1502
- parser.add_argument("--opset", type=int, default=17, help="ONNX: opset version")
1503
- parser.add_argument("--verbose", action="store_true", help="TensorRT: verbose log")
1504
- parser.add_argument("--workspace", type=int, default=4, help="TensorRT: workspace size (GB)")
1505
- parser.add_argument("--nms", action="store_true", help="TF: add NMS to model")
1506
- parser.add_argument("--agnostic-nms", action="store_true", help="TF: add agnostic NMS to model")
1507
- parser.add_argument("--topk-per-class", type=int, default=100, help="TF.js NMS: topk per class to keep")
1508
- parser.add_argument("--topk-all", type=int, default=100, help="TF.js NMS: topk for all classes to keep")
1509
- parser.add_argument("--iou-thres", type=float, default=0.45, help="TF.js NMS: IoU threshold")
1510
- parser.add_argument("--conf-thres", type=float, default=0.25, help="TF.js NMS: confidence threshold")
1511
- parser.add_argument(
1512
- "--include",
1513
- nargs="+",
1514
- default=["torchscript"],
1515
- help="torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle",
1516
- )
1517
- opt = parser.parse_known_args()[0] if known else parser.parse_args()
1518
- print_args(vars(opt))
1519
- return opt
1520
-
1521
-
1522
- def main(opt):
1523
- """Run(**vars(opt)) # Execute the run function with parsed options."""
1524
- for opt.weights in opt.weights if isinstance(opt.weights, list) else [opt.weights]:
1525
- run(**vars(opt))
1526
-
1527
-
1528
- if __name__ == "__main__":
1529
- opt = parse_opt()
1530
- main(opt)