dgenerate-ultralytics-headless 8.3.137__py3-none-any.whl → 8.3.224__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dgenerate_ultralytics_headless-8.3.137.dist-info → dgenerate_ultralytics_headless-8.3.224.dist-info}/METADATA +41 -34
- dgenerate_ultralytics_headless-8.3.224.dist-info/RECORD +285 -0
- {dgenerate_ultralytics_headless-8.3.137.dist-info → dgenerate_ultralytics_headless-8.3.224.dist-info}/WHEEL +1 -1
- tests/__init__.py +7 -6
- tests/conftest.py +15 -39
- tests/test_cli.py +17 -17
- tests/test_cuda.py +17 -8
- tests/test_engine.py +36 -10
- tests/test_exports.py +98 -37
- tests/test_integrations.py +12 -15
- tests/test_python.py +126 -82
- tests/test_solutions.py +319 -135
- ultralytics/__init__.py +27 -9
- ultralytics/cfg/__init__.py +83 -87
- ultralytics/cfg/datasets/Argoverse.yaml +4 -4
- ultralytics/cfg/datasets/DOTAv1.5.yaml +2 -2
- ultralytics/cfg/datasets/DOTAv1.yaml +2 -2
- ultralytics/cfg/datasets/GlobalWheat2020.yaml +2 -2
- ultralytics/cfg/datasets/HomeObjects-3K.yaml +4 -5
- ultralytics/cfg/datasets/ImageNet.yaml +3 -3
- ultralytics/cfg/datasets/Objects365.yaml +24 -20
- ultralytics/cfg/datasets/SKU-110K.yaml +9 -9
- ultralytics/cfg/datasets/VOC.yaml +10 -13
- ultralytics/cfg/datasets/VisDrone.yaml +43 -33
- ultralytics/cfg/datasets/african-wildlife.yaml +5 -5
- ultralytics/cfg/datasets/brain-tumor.yaml +4 -5
- ultralytics/cfg/datasets/carparts-seg.yaml +5 -5
- ultralytics/cfg/datasets/coco-pose.yaml +26 -4
- ultralytics/cfg/datasets/coco.yaml +4 -4
- ultralytics/cfg/datasets/coco128-seg.yaml +2 -2
- ultralytics/cfg/datasets/coco128.yaml +2 -2
- ultralytics/cfg/datasets/coco8-grayscale.yaml +103 -0
- ultralytics/cfg/datasets/coco8-multispectral.yaml +2 -2
- ultralytics/cfg/datasets/coco8-pose.yaml +23 -2
- ultralytics/cfg/datasets/coco8-seg.yaml +2 -2
- ultralytics/cfg/datasets/coco8.yaml +2 -2
- ultralytics/cfg/datasets/construction-ppe.yaml +32 -0
- ultralytics/cfg/datasets/crack-seg.yaml +5 -5
- ultralytics/cfg/datasets/dog-pose.yaml +32 -4
- ultralytics/cfg/datasets/dota8-multispectral.yaml +2 -2
- ultralytics/cfg/datasets/dota8.yaml +2 -2
- ultralytics/cfg/datasets/hand-keypoints.yaml +29 -4
- ultralytics/cfg/datasets/lvis.yaml +9 -9
- ultralytics/cfg/datasets/medical-pills.yaml +4 -5
- ultralytics/cfg/datasets/open-images-v7.yaml +7 -10
- ultralytics/cfg/datasets/package-seg.yaml +5 -5
- ultralytics/cfg/datasets/signature.yaml +4 -4
- ultralytics/cfg/datasets/tiger-pose.yaml +20 -4
- ultralytics/cfg/datasets/xView.yaml +5 -5
- ultralytics/cfg/default.yaml +96 -93
- ultralytics/cfg/trackers/botsort.yaml +16 -17
- ultralytics/cfg/trackers/bytetrack.yaml +9 -11
- ultralytics/data/__init__.py +4 -4
- ultralytics/data/annotator.py +12 -12
- ultralytics/data/augment.py +531 -564
- ultralytics/data/base.py +76 -81
- ultralytics/data/build.py +206 -42
- ultralytics/data/converter.py +179 -78
- ultralytics/data/dataset.py +121 -121
- ultralytics/data/loaders.py +114 -91
- ultralytics/data/split.py +28 -15
- ultralytics/data/split_dota.py +67 -48
- ultralytics/data/utils.py +110 -89
- ultralytics/engine/exporter.py +422 -460
- ultralytics/engine/model.py +224 -252
- ultralytics/engine/predictor.py +94 -89
- ultralytics/engine/results.py +345 -595
- ultralytics/engine/trainer.py +231 -134
- ultralytics/engine/tuner.py +279 -73
- ultralytics/engine/validator.py +53 -46
- ultralytics/hub/__init__.py +26 -28
- ultralytics/hub/auth.py +30 -16
- ultralytics/hub/google/__init__.py +34 -36
- ultralytics/hub/session.py +53 -77
- ultralytics/hub/utils.py +23 -109
- ultralytics/models/__init__.py +1 -1
- ultralytics/models/fastsam/__init__.py +1 -1
- ultralytics/models/fastsam/model.py +36 -18
- ultralytics/models/fastsam/predict.py +33 -44
- ultralytics/models/fastsam/utils.py +4 -5
- ultralytics/models/fastsam/val.py +12 -14
- ultralytics/models/nas/__init__.py +1 -1
- ultralytics/models/nas/model.py +16 -20
- ultralytics/models/nas/predict.py +12 -14
- ultralytics/models/nas/val.py +4 -5
- ultralytics/models/rtdetr/__init__.py +1 -1
- ultralytics/models/rtdetr/model.py +9 -9
- ultralytics/models/rtdetr/predict.py +22 -17
- ultralytics/models/rtdetr/train.py +20 -16
- ultralytics/models/rtdetr/val.py +79 -59
- ultralytics/models/sam/__init__.py +8 -2
- ultralytics/models/sam/amg.py +53 -38
- ultralytics/models/sam/build.py +29 -31
- ultralytics/models/sam/model.py +33 -38
- ultralytics/models/sam/modules/blocks.py +159 -182
- ultralytics/models/sam/modules/decoders.py +38 -47
- ultralytics/models/sam/modules/encoders.py +114 -133
- ultralytics/models/sam/modules/memory_attention.py +38 -31
- ultralytics/models/sam/modules/sam.py +114 -93
- ultralytics/models/sam/modules/tiny_encoder.py +268 -291
- ultralytics/models/sam/modules/transformer.py +59 -66
- ultralytics/models/sam/modules/utils.py +55 -72
- ultralytics/models/sam/predict.py +745 -341
- ultralytics/models/utils/loss.py +118 -107
- ultralytics/models/utils/ops.py +118 -71
- ultralytics/models/yolo/__init__.py +1 -1
- ultralytics/models/yolo/classify/predict.py +28 -26
- ultralytics/models/yolo/classify/train.py +50 -81
- ultralytics/models/yolo/classify/val.py +68 -61
- ultralytics/models/yolo/detect/predict.py +12 -15
- ultralytics/models/yolo/detect/train.py +56 -46
- ultralytics/models/yolo/detect/val.py +279 -223
- ultralytics/models/yolo/model.py +167 -86
- ultralytics/models/yolo/obb/predict.py +7 -11
- ultralytics/models/yolo/obb/train.py +23 -25
- ultralytics/models/yolo/obb/val.py +107 -99
- ultralytics/models/yolo/pose/__init__.py +1 -1
- ultralytics/models/yolo/pose/predict.py +12 -14
- ultralytics/models/yolo/pose/train.py +31 -69
- ultralytics/models/yolo/pose/val.py +119 -254
- ultralytics/models/yolo/segment/predict.py +21 -25
- ultralytics/models/yolo/segment/train.py +12 -66
- ultralytics/models/yolo/segment/val.py +126 -305
- ultralytics/models/yolo/world/train.py +53 -45
- ultralytics/models/yolo/world/train_world.py +51 -32
- ultralytics/models/yolo/yoloe/__init__.py +7 -7
- ultralytics/models/yolo/yoloe/predict.py +30 -37
- ultralytics/models/yolo/yoloe/train.py +89 -71
- ultralytics/models/yolo/yoloe/train_seg.py +15 -17
- ultralytics/models/yolo/yoloe/val.py +56 -41
- ultralytics/nn/__init__.py +9 -11
- ultralytics/nn/autobackend.py +179 -107
- ultralytics/nn/modules/__init__.py +67 -67
- ultralytics/nn/modules/activation.py +8 -7
- ultralytics/nn/modules/block.py +302 -323
- ultralytics/nn/modules/conv.py +61 -104
- ultralytics/nn/modules/head.py +488 -186
- ultralytics/nn/modules/transformer.py +183 -123
- ultralytics/nn/modules/utils.py +15 -20
- ultralytics/nn/tasks.py +327 -203
- ultralytics/nn/text_model.py +81 -65
- ultralytics/py.typed +1 -0
- ultralytics/solutions/__init__.py +12 -12
- ultralytics/solutions/ai_gym.py +19 -27
- ultralytics/solutions/analytics.py +36 -26
- ultralytics/solutions/config.py +29 -28
- ultralytics/solutions/distance_calculation.py +23 -24
- ultralytics/solutions/heatmap.py +17 -19
- ultralytics/solutions/instance_segmentation.py +21 -19
- ultralytics/solutions/object_blurrer.py +16 -17
- ultralytics/solutions/object_counter.py +48 -53
- ultralytics/solutions/object_cropper.py +22 -16
- ultralytics/solutions/parking_management.py +61 -58
- ultralytics/solutions/queue_management.py +19 -19
- ultralytics/solutions/region_counter.py +63 -50
- ultralytics/solutions/security_alarm.py +22 -25
- ultralytics/solutions/similarity_search.py +107 -60
- ultralytics/solutions/solutions.py +343 -262
- ultralytics/solutions/speed_estimation.py +35 -31
- ultralytics/solutions/streamlit_inference.py +104 -40
- ultralytics/solutions/templates/similarity-search.html +31 -24
- ultralytics/solutions/trackzone.py +24 -24
- ultralytics/solutions/vision_eye.py +11 -12
- ultralytics/trackers/__init__.py +1 -1
- ultralytics/trackers/basetrack.py +18 -27
- ultralytics/trackers/bot_sort.py +48 -39
- ultralytics/trackers/byte_tracker.py +94 -94
- ultralytics/trackers/track.py +7 -16
- ultralytics/trackers/utils/gmc.py +37 -69
- ultralytics/trackers/utils/kalman_filter.py +68 -76
- ultralytics/trackers/utils/matching.py +13 -17
- ultralytics/utils/__init__.py +251 -275
- ultralytics/utils/autobatch.py +19 -7
- ultralytics/utils/autodevice.py +68 -38
- ultralytics/utils/benchmarks.py +169 -130
- ultralytics/utils/callbacks/base.py +12 -13
- ultralytics/utils/callbacks/clearml.py +14 -15
- ultralytics/utils/callbacks/comet.py +139 -66
- ultralytics/utils/callbacks/dvc.py +19 -27
- ultralytics/utils/callbacks/hub.py +8 -6
- ultralytics/utils/callbacks/mlflow.py +6 -10
- ultralytics/utils/callbacks/neptune.py +11 -19
- ultralytics/utils/callbacks/platform.py +73 -0
- ultralytics/utils/callbacks/raytune.py +3 -4
- ultralytics/utils/callbacks/tensorboard.py +9 -12
- ultralytics/utils/callbacks/wb.py +33 -30
- ultralytics/utils/checks.py +163 -114
- ultralytics/utils/cpu.py +89 -0
- ultralytics/utils/dist.py +24 -20
- ultralytics/utils/downloads.py +176 -146
- ultralytics/utils/errors.py +11 -13
- ultralytics/utils/events.py +113 -0
- ultralytics/utils/export/__init__.py +7 -0
- ultralytics/utils/{export.py → export/engine.py} +81 -63
- ultralytics/utils/export/imx.py +294 -0
- ultralytics/utils/export/tensorflow.py +217 -0
- ultralytics/utils/files.py +33 -36
- ultralytics/utils/git.py +137 -0
- ultralytics/utils/instance.py +105 -120
- ultralytics/utils/logger.py +404 -0
- ultralytics/utils/loss.py +99 -61
- ultralytics/utils/metrics.py +649 -478
- ultralytics/utils/nms.py +337 -0
- ultralytics/utils/ops.py +263 -451
- ultralytics/utils/patches.py +70 -31
- ultralytics/utils/plotting.py +253 -223
- ultralytics/utils/tal.py +48 -61
- ultralytics/utils/torch_utils.py +244 -251
- ultralytics/utils/tqdm.py +438 -0
- ultralytics/utils/triton.py +22 -23
- ultralytics/utils/tuner.py +11 -10
- dgenerate_ultralytics_headless-8.3.137.dist-info/RECORD +0 -272
- {dgenerate_ultralytics_headless-8.3.137.dist-info → dgenerate_ultralytics_headless-8.3.224.dist-info}/entry_points.txt +0 -0
- {dgenerate_ultralytics_headless-8.3.137.dist-info → dgenerate_ultralytics_headless-8.3.224.dist-info}/licenses/LICENSE +0 -0
- {dgenerate_ultralytics_headless-8.3.137.dist-info → dgenerate_ultralytics_headless-8.3.224.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import random
|
|
5
|
+
import time
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from threading import Thread
|
|
8
|
+
from urllib.request import Request, urlopen
|
|
9
|
+
|
|
10
|
+
from ultralytics import SETTINGS, __version__
|
|
11
|
+
from ultralytics.utils import ARGV, ENVIRONMENT, GIT, IS_PIP_PACKAGE, ONLINE, PYTHON_VERSION, RANK, TESTS_RUNNING
|
|
12
|
+
from ultralytics.utils.downloads import GITHUB_ASSETS_NAMES
|
|
13
|
+
from ultralytics.utils.torch_utils import get_cpu_info
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def _post(url: str, data: dict, timeout: float = 5.0) -> None:
|
|
17
|
+
"""Send a one-shot JSON POST request."""
|
|
18
|
+
try:
|
|
19
|
+
body = json.dumps(data, separators=(",", ":")).encode() # compact JSON
|
|
20
|
+
req = Request(url, data=body, headers={"Content-Type": "application/json"})
|
|
21
|
+
urlopen(req, timeout=timeout).close()
|
|
22
|
+
except Exception:
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class Events:
|
|
27
|
+
"""Collect and send anonymous usage analytics with rate-limiting.
|
|
28
|
+
|
|
29
|
+
Event collection and transmission are enabled when sync is enabled in settings, the current process is rank -1 or 0,
|
|
30
|
+
tests are not running, the environment is online, and the installation source is either pip or the official
|
|
31
|
+
Ultralytics GitHub repository.
|
|
32
|
+
|
|
33
|
+
Attributes:
|
|
34
|
+
url (str): Measurement Protocol endpoint for receiving anonymous events.
|
|
35
|
+
events (list[dict]): In-memory queue of event payloads awaiting transmission.
|
|
36
|
+
rate_limit (float): Minimum time in seconds between POST requests.
|
|
37
|
+
t (float): Timestamp of the last transmission in seconds since the epoch.
|
|
38
|
+
metadata (dict): Static metadata describing runtime, installation source, and environment.
|
|
39
|
+
enabled (bool): Flag indicating whether analytics collection is active.
|
|
40
|
+
|
|
41
|
+
Methods:
|
|
42
|
+
__init__: Initialize the event queue, rate limiter, and runtime metadata.
|
|
43
|
+
__call__: Queue an event and trigger a non-blocking send when the rate limit elapses.
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
url = "https://www.google-analytics.com/mp/collect?measurement_id=G-X8NCJYTQXM&api_secret=QLQrATrNSwGRFRLE-cbHJw"
|
|
47
|
+
|
|
48
|
+
def __init__(self) -> None:
|
|
49
|
+
"""Initialize the Events instance with queue, rate limiter, and environment metadata."""
|
|
50
|
+
self.events = [] # pending events
|
|
51
|
+
self.rate_limit = 30.0 # rate limit (seconds)
|
|
52
|
+
self.t = 0.0 # last send timestamp (seconds)
|
|
53
|
+
self.metadata = {
|
|
54
|
+
"cli": Path(ARGV[0]).name == "yolo",
|
|
55
|
+
"install": "git" if GIT.is_repo else "pip" if IS_PIP_PACKAGE else "other",
|
|
56
|
+
"python": PYTHON_VERSION.rsplit(".", 1)[0], # i.e. 3.13
|
|
57
|
+
"CPU": get_cpu_info(),
|
|
58
|
+
# "GPU": get_gpu_info(index=0) if cuda else None,
|
|
59
|
+
"version": __version__,
|
|
60
|
+
"env": ENVIRONMENT,
|
|
61
|
+
"session_id": round(random.random() * 1e15),
|
|
62
|
+
"engagement_time_msec": 1000,
|
|
63
|
+
}
|
|
64
|
+
self.enabled = (
|
|
65
|
+
SETTINGS["sync"]
|
|
66
|
+
and RANK in {-1, 0}
|
|
67
|
+
and not TESTS_RUNNING
|
|
68
|
+
and ONLINE
|
|
69
|
+
and (IS_PIP_PACKAGE or GIT.origin == "https://github.com/ultralytics/ultralytics.git")
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
def __call__(self, cfg, device=None) -> None:
|
|
73
|
+
"""Queue an event and flush the queue asynchronously when the rate limit elapses.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
cfg (IterableSimpleNamespace): The configuration object containing mode and task information.
|
|
77
|
+
device (torch.device | str, optional): The device type (e.g., 'cpu', 'cuda').
|
|
78
|
+
"""
|
|
79
|
+
if not self.enabled:
|
|
80
|
+
# Events disabled, do nothing
|
|
81
|
+
return
|
|
82
|
+
|
|
83
|
+
# Attempt to enqueue a new event
|
|
84
|
+
if len(self.events) < 25: # Queue limited to 25 events to bound memory and traffic
|
|
85
|
+
params = {
|
|
86
|
+
**self.metadata,
|
|
87
|
+
"task": cfg.task,
|
|
88
|
+
"model": cfg.model if cfg.model in GITHUB_ASSETS_NAMES else "custom",
|
|
89
|
+
"device": str(device),
|
|
90
|
+
}
|
|
91
|
+
if cfg.mode == "export":
|
|
92
|
+
params["format"] = cfg.format
|
|
93
|
+
self.events.append({"name": cfg.mode, "params": params})
|
|
94
|
+
|
|
95
|
+
# Check rate limit and return early if under limit
|
|
96
|
+
t = time.time()
|
|
97
|
+
if (t - self.t) < self.rate_limit:
|
|
98
|
+
return
|
|
99
|
+
|
|
100
|
+
# Overrate limit: send a snapshot of queued events in a background thread
|
|
101
|
+
payload_events = list(self.events) # snapshot to avoid race with queue reset
|
|
102
|
+
Thread(
|
|
103
|
+
target=_post,
|
|
104
|
+
args=(self.url, {"client_id": SETTINGS["uuid"], "events": payload_events}), # SHA-256 anonymized
|
|
105
|
+
daemon=True,
|
|
106
|
+
).start()
|
|
107
|
+
|
|
108
|
+
# Reset queue and rate limit timer
|
|
109
|
+
self.events = []
|
|
110
|
+
self.t = t
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
events = Events()
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
|
2
|
+
|
|
3
|
+
from .engine import onnx2engine, torch2onnx
|
|
4
|
+
from .imx import torch2imx
|
|
5
|
+
from .tensorflow import keras2pb, onnx2saved_model, pb2tfjs, tflite2edgetpu
|
|
6
|
+
|
|
7
|
+
__all__ = ["keras2pb", "onnx2engine", "onnx2saved_model", "pb2tfjs", "tflite2edgetpu", "torch2imx", "torch2onnx"]
|
|
@@ -1,37 +1,40 @@
|
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
|
2
2
|
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
3
5
|
import json
|
|
4
6
|
from pathlib import Path
|
|
5
7
|
|
|
6
8
|
import torch
|
|
7
9
|
|
|
8
10
|
from ultralytics.utils import IS_JETSON, LOGGER
|
|
11
|
+
from ultralytics.utils.torch_utils import TORCH_2_4
|
|
9
12
|
|
|
10
13
|
|
|
11
|
-
def
|
|
12
|
-
torch_model,
|
|
13
|
-
im,
|
|
14
|
-
onnx_file,
|
|
15
|
-
opset=14,
|
|
16
|
-
input_names=["images"],
|
|
17
|
-
output_names=["output0"],
|
|
18
|
-
dynamic=False,
|
|
19
|
-
):
|
|
20
|
-
"""
|
|
21
|
-
Exports a PyTorch model to ONNX format.
|
|
14
|
+
def torch2onnx(
|
|
15
|
+
torch_model: torch.nn.Module,
|
|
16
|
+
im: torch.Tensor,
|
|
17
|
+
onnx_file: str,
|
|
18
|
+
opset: int = 14,
|
|
19
|
+
input_names: list[str] = ["images"],
|
|
20
|
+
output_names: list[str] = ["output0"],
|
|
21
|
+
dynamic: bool | dict = False,
|
|
22
|
+
) -> None:
|
|
23
|
+
"""Export a PyTorch model to ONNX format.
|
|
22
24
|
|
|
23
25
|
Args:
|
|
24
26
|
torch_model (torch.nn.Module): The PyTorch model to export.
|
|
25
27
|
im (torch.Tensor): Example input tensor for the model.
|
|
26
28
|
onnx_file (str): Path to save the exported ONNX file.
|
|
27
29
|
opset (int): ONNX opset version to use for export.
|
|
28
|
-
input_names (list): List of input tensor names.
|
|
29
|
-
output_names (list): List of output tensor names.
|
|
30
|
-
dynamic (bool | dict, optional): Whether to enable dynamic axes.
|
|
30
|
+
input_names (list[str]): List of input tensor names.
|
|
31
|
+
output_names (list[str]): List of output tensor names.
|
|
32
|
+
dynamic (bool | dict, optional): Whether to enable dynamic axes.
|
|
31
33
|
|
|
32
34
|
Notes:
|
|
33
|
-
|
|
35
|
+
Setting `do_constant_folding=True` may cause issues with DNN inference for torch>=1.12.
|
|
34
36
|
"""
|
|
37
|
+
kwargs = {"dynamo": False} if TORCH_2_4 else {}
|
|
35
38
|
torch.onnx.export(
|
|
36
39
|
torch_model,
|
|
37
40
|
im,
|
|
@@ -42,50 +45,50 @@ def export_onnx(
|
|
|
42
45
|
input_names=input_names,
|
|
43
46
|
output_names=output_names,
|
|
44
47
|
dynamic_axes=dynamic or None,
|
|
48
|
+
**kwargs,
|
|
45
49
|
)
|
|
46
50
|
|
|
47
51
|
|
|
48
|
-
def
|
|
49
|
-
onnx_file,
|
|
50
|
-
engine_file=None,
|
|
51
|
-
workspace=None,
|
|
52
|
-
half=False,
|
|
53
|
-
int8=False,
|
|
54
|
-
dynamic=False,
|
|
55
|
-
shape=(1, 3, 640, 640),
|
|
56
|
-
dla=None,
|
|
52
|
+
def onnx2engine(
|
|
53
|
+
onnx_file: str,
|
|
54
|
+
engine_file: str | None = None,
|
|
55
|
+
workspace: int | None = None,
|
|
56
|
+
half: bool = False,
|
|
57
|
+
int8: bool = False,
|
|
58
|
+
dynamic: bool = False,
|
|
59
|
+
shape: tuple[int, int, int, int] = (1, 3, 640, 640),
|
|
60
|
+
dla: int | None = None,
|
|
57
61
|
dataset=None,
|
|
58
|
-
metadata=None,
|
|
59
|
-
verbose=False,
|
|
60
|
-
prefix="",
|
|
61
|
-
):
|
|
62
|
-
"""
|
|
63
|
-
Exports a YOLO model to TensorRT engine format.
|
|
62
|
+
metadata: dict | None = None,
|
|
63
|
+
verbose: bool = False,
|
|
64
|
+
prefix: str = "",
|
|
65
|
+
) -> None:
|
|
66
|
+
"""Export a YOLO model to TensorRT engine format.
|
|
64
67
|
|
|
65
68
|
Args:
|
|
66
69
|
onnx_file (str): Path to the ONNX file to be converted.
|
|
67
70
|
engine_file (str, optional): Path to save the generated TensorRT engine file.
|
|
68
|
-
workspace (int, optional): Workspace size in GB for TensorRT.
|
|
69
|
-
half (bool, optional): Enable FP16 precision.
|
|
70
|
-
int8 (bool, optional): Enable INT8 precision.
|
|
71
|
-
dynamic (bool, optional): Enable dynamic input shapes.
|
|
72
|
-
shape (tuple, optional): Input shape (batch, channels, height, width).
|
|
73
|
-
dla (int, optional): DLA core to use (Jetson devices only).
|
|
74
|
-
dataset (ultralytics.data.build.InfiniteDataLoader, optional): Dataset for INT8 calibration.
|
|
75
|
-
metadata (dict, optional): Metadata to include in the engine file.
|
|
76
|
-
verbose (bool, optional): Enable verbose logging.
|
|
77
|
-
prefix (str, optional): Prefix for log messages.
|
|
71
|
+
workspace (int, optional): Workspace size in GB for TensorRT.
|
|
72
|
+
half (bool, optional): Enable FP16 precision.
|
|
73
|
+
int8 (bool, optional): Enable INT8 precision.
|
|
74
|
+
dynamic (bool, optional): Enable dynamic input shapes.
|
|
75
|
+
shape (tuple[int, int, int, int], optional): Input shape (batch, channels, height, width).
|
|
76
|
+
dla (int, optional): DLA core to use (Jetson devices only).
|
|
77
|
+
dataset (ultralytics.data.build.InfiniteDataLoader, optional): Dataset for INT8 calibration.
|
|
78
|
+
metadata (dict, optional): Metadata to include in the engine file.
|
|
79
|
+
verbose (bool, optional): Enable verbose logging.
|
|
80
|
+
prefix (str, optional): Prefix for log messages.
|
|
78
81
|
|
|
79
82
|
Raises:
|
|
80
83
|
ValueError: If DLA is enabled on non-Jetson devices or required precision is not set.
|
|
81
84
|
RuntimeError: If the ONNX file cannot be parsed.
|
|
82
85
|
|
|
83
86
|
Notes:
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
+
TensorRT version compatibility is handled for workspace size and engine building.
|
|
88
|
+
INT8 calibration requires a dataset and generates a calibration cache.
|
|
89
|
+
Metadata is serialized and written to the engine file if provided.
|
|
87
90
|
"""
|
|
88
|
-
import tensorrt as trt
|
|
91
|
+
import tensorrt as trt
|
|
89
92
|
|
|
90
93
|
engine_file = engine_file or Path(onnx_file).with_suffix(".engine")
|
|
91
94
|
|
|
@@ -96,12 +99,12 @@ def export_engine(
|
|
|
96
99
|
# Engine builder
|
|
97
100
|
builder = trt.Builder(logger)
|
|
98
101
|
config = builder.create_builder_config()
|
|
99
|
-
|
|
100
|
-
is_trt10 = int(trt.__version__.split(".")[0]) >= 10 # is TensorRT >= 10
|
|
101
|
-
if is_trt10 and
|
|
102
|
-
config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE,
|
|
103
|
-
elif
|
|
104
|
-
config.max_workspace_size =
|
|
102
|
+
workspace_bytes = int((workspace or 0) * (1 << 30))
|
|
103
|
+
is_trt10 = int(trt.__version__.split(".", 1)[0]) >= 10 # is TensorRT >= 10
|
|
104
|
+
if is_trt10 and workspace_bytes > 0:
|
|
105
|
+
config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace_bytes)
|
|
106
|
+
elif workspace_bytes > 0: # TensorRT versions 7, 8
|
|
107
|
+
config.max_workspace_size = workspace_bytes
|
|
105
108
|
flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
|
|
106
109
|
network = builder.create_network(flag)
|
|
107
110
|
half = builder.platform_has_fast_fp16 and half
|
|
@@ -134,29 +137,39 @@ def export_engine(
|
|
|
134
137
|
LOGGER.info(f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}')
|
|
135
138
|
|
|
136
139
|
if dynamic:
|
|
137
|
-
if shape[0] <= 1:
|
|
138
|
-
LOGGER.warning(f"{prefix} 'dynamic=True' model requires max batch size, i.e. 'batch=16'")
|
|
139
140
|
profile = builder.create_optimization_profile()
|
|
140
141
|
min_shape = (1, shape[1], 32, 32) # minimum input shape
|
|
141
142
|
max_shape = (*shape[:2], *(int(max(2, workspace or 2) * d) for d in shape[2:])) # max input shape
|
|
142
143
|
for inp in inputs:
|
|
143
144
|
profile.set_shape(inp.name, min=min_shape, opt=shape, max=max_shape)
|
|
144
145
|
config.add_optimization_profile(profile)
|
|
146
|
+
if int8:
|
|
147
|
+
config.set_calibration_profile(profile)
|
|
145
148
|
|
|
146
149
|
LOGGER.info(f"{prefix} building {'INT8' if int8 else 'FP' + ('16' if half else '32')} engine as {engine_file}")
|
|
147
150
|
if int8:
|
|
148
151
|
config.set_flag(trt.BuilderFlag.INT8)
|
|
149
|
-
config.set_calibration_profile(profile)
|
|
150
152
|
config.profiling_verbosity = trt.ProfilingVerbosity.DETAILED
|
|
151
153
|
|
|
152
154
|
class EngineCalibrator(trt.IInt8Calibrator):
|
|
153
|
-
"""
|
|
154
|
-
|
|
155
|
+
"""Custom INT8 calibrator for TensorRT engine optimization.
|
|
156
|
+
|
|
157
|
+
This calibrator provides the necessary interface for TensorRT to perform INT8 quantization calibration using
|
|
158
|
+
a dataset. It handles batch generation, caching, and calibration algorithm selection.
|
|
155
159
|
|
|
156
|
-
|
|
157
|
-
dataset
|
|
160
|
+
Attributes:
|
|
161
|
+
dataset: Dataset for calibration.
|
|
162
|
+
data_iter: Iterator over the calibration dataset.
|
|
163
|
+
algo (trt.CalibrationAlgoType): Calibration algorithm type.
|
|
158
164
|
batch (int): Batch size for calibration.
|
|
159
|
-
cache (
|
|
165
|
+
cache (Path): Path to save the calibration cache.
|
|
166
|
+
|
|
167
|
+
Methods:
|
|
168
|
+
get_algorithm: Get the calibration algorithm to use.
|
|
169
|
+
get_batch_size: Get the batch size to use for calibration.
|
|
170
|
+
get_batch: Get the next batch to use for calibration.
|
|
171
|
+
read_calibration_cache: Use existing cache instead of calibrating again.
|
|
172
|
+
write_calibration_cache: Write calibration cache to disk.
|
|
160
173
|
"""
|
|
161
174
|
|
|
162
175
|
def __init__(
|
|
@@ -164,10 +177,15 @@ def export_engine(
|
|
|
164
177
|
dataset, # ultralytics.data.build.InfiniteDataLoader
|
|
165
178
|
cache: str = "",
|
|
166
179
|
) -> None:
|
|
180
|
+
"""Initialize the INT8 calibrator with dataset and cache path."""
|
|
167
181
|
trt.IInt8Calibrator.__init__(self)
|
|
168
182
|
self.dataset = dataset
|
|
169
183
|
self.data_iter = iter(dataset)
|
|
170
|
-
self.algo =
|
|
184
|
+
self.algo = (
|
|
185
|
+
trt.CalibrationAlgoType.ENTROPY_CALIBRATION_2 # DLA quantization needs ENTROPY_CALIBRATION_2
|
|
186
|
+
if dla is not None
|
|
187
|
+
else trt.CalibrationAlgoType.MINMAX_CALIBRATION
|
|
188
|
+
)
|
|
171
189
|
self.batch = dataset.batch_size
|
|
172
190
|
self.cache = Path(cache)
|
|
173
191
|
|
|
@@ -179,22 +197,22 @@ def export_engine(
|
|
|
179
197
|
"""Get the batch size to use for calibration."""
|
|
180
198
|
return self.batch or 1
|
|
181
199
|
|
|
182
|
-
def get_batch(self, names) -> list:
|
|
200
|
+
def get_batch(self, names) -> list[int] | None:
|
|
183
201
|
"""Get the next batch to use for calibration, as a list of device memory pointers."""
|
|
184
202
|
try:
|
|
185
203
|
im0s = next(self.data_iter)["img"] / 255.0
|
|
186
204
|
im0s = im0s.to("cuda") if im0s.device.type == "cpu" else im0s
|
|
187
205
|
return [int(im0s.data_ptr())]
|
|
188
206
|
except StopIteration:
|
|
189
|
-
# Return
|
|
207
|
+
# Return None to signal to TensorRT there is no calibration data remaining
|
|
190
208
|
return None
|
|
191
209
|
|
|
192
|
-
def read_calibration_cache(self) -> bytes:
|
|
210
|
+
def read_calibration_cache(self) -> bytes | None:
|
|
193
211
|
"""Use existing cache instead of calibrating again, otherwise, implicitly return None."""
|
|
194
212
|
if self.cache.exists() and self.cache.suffix == ".cache":
|
|
195
213
|
return self.cache.read_bytes()
|
|
196
214
|
|
|
197
|
-
def write_calibration_cache(self, cache) -> None:
|
|
215
|
+
def write_calibration_cache(self, cache: bytes) -> None:
|
|
198
216
|
"""Write calibration cache to disk."""
|
|
199
217
|
_ = self.cache.write_bytes(cache)
|
|
200
218
|
|
|
@@ -0,0 +1,294 @@
|
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import subprocess
|
|
6
|
+
import types
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
import numpy as np
|
|
10
|
+
import torch
|
|
11
|
+
|
|
12
|
+
from ultralytics.nn.modules import Detect, Pose
|
|
13
|
+
from ultralytics.utils import LOGGER
|
|
14
|
+
from ultralytics.utils.tal import make_anchors
|
|
15
|
+
from ultralytics.utils.torch_utils import copy_attr
|
|
16
|
+
|
|
17
|
+
# Configuration for Model Compression Toolkit (MCT) quantization
|
|
18
|
+
MCT_CONFIG = {
|
|
19
|
+
"YOLO11": {
|
|
20
|
+
"detect": {
|
|
21
|
+
"layer_names": ["sub", "mul_2", "add_14", "cat_21"],
|
|
22
|
+
"weights_memory": 2585350.2439,
|
|
23
|
+
"n_layers": 238,
|
|
24
|
+
},
|
|
25
|
+
"pose": {
|
|
26
|
+
"layer_names": ["sub", "mul_2", "add_14", "cat_22", "cat_23", "mul_4", "add_15"],
|
|
27
|
+
"weights_memory": 2437771.67,
|
|
28
|
+
"n_layers": 257,
|
|
29
|
+
},
|
|
30
|
+
"classify": {"layer_names": [], "weights_memory": np.inf, "n_layers": 112},
|
|
31
|
+
},
|
|
32
|
+
"YOLOv8": {
|
|
33
|
+
"detect": {"layer_names": ["sub", "mul", "add_6", "cat_17"], "weights_memory": 2550540.8, "n_layers": 168},
|
|
34
|
+
"pose": {
|
|
35
|
+
"layer_names": ["add_7", "mul_2", "cat_19", "mul", "sub", "add_6", "cat_18"],
|
|
36
|
+
"weights_memory": 2482451.85,
|
|
37
|
+
"n_layers": 187,
|
|
38
|
+
},
|
|
39
|
+
"classify": {"layer_names": [], "weights_memory": np.inf, "n_layers": 73},
|
|
40
|
+
},
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class FXModel(torch.nn.Module):
|
|
45
|
+
"""A custom model class for torch.fx compatibility.
|
|
46
|
+
|
|
47
|
+
This class extends `torch.nn.Module` and is designed to ensure compatibility with torch.fx for tracing and graph
|
|
48
|
+
manipulation. It copies attributes from an existing model and explicitly sets the model attribute to ensure proper
|
|
49
|
+
copying.
|
|
50
|
+
|
|
51
|
+
Attributes:
|
|
52
|
+
model (nn.Module): The original model's layers.
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
def __init__(self, model, imgsz=(640, 640)):
|
|
56
|
+
"""Initialize the FXModel.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
model (nn.Module): The original model to wrap for torch.fx compatibility.
|
|
60
|
+
imgsz (tuple[int, int]): The input image size (height, width). Default is (640, 640).
|
|
61
|
+
"""
|
|
62
|
+
super().__init__()
|
|
63
|
+
copy_attr(self, model)
|
|
64
|
+
# Explicitly set `model` since `copy_attr` somehow does not copy it.
|
|
65
|
+
self.model = model.model
|
|
66
|
+
self.imgsz = imgsz
|
|
67
|
+
|
|
68
|
+
def forward(self, x):
|
|
69
|
+
"""Forward pass through the model.
|
|
70
|
+
|
|
71
|
+
This method performs the forward pass through the model, handling the dependencies between layers and saving
|
|
72
|
+
intermediate outputs.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
x (torch.Tensor): The input tensor to the model.
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
(torch.Tensor): The output tensor from the model.
|
|
79
|
+
"""
|
|
80
|
+
y = [] # outputs
|
|
81
|
+
for m in self.model:
|
|
82
|
+
if m.f != -1: # if not from previous layer
|
|
83
|
+
# from earlier layers
|
|
84
|
+
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f]
|
|
85
|
+
if isinstance(m, Detect):
|
|
86
|
+
m._inference = types.MethodType(_inference, m) # bind method to Detect
|
|
87
|
+
m.anchors, m.strides = (
|
|
88
|
+
x.transpose(0, 1)
|
|
89
|
+
for x in make_anchors(
|
|
90
|
+
torch.cat([s / m.stride.unsqueeze(-1) for s in self.imgsz], dim=1), m.stride, 0.5
|
|
91
|
+
)
|
|
92
|
+
)
|
|
93
|
+
if type(m) is Pose:
|
|
94
|
+
m.forward = types.MethodType(pose_forward, m) # bind method to Detect
|
|
95
|
+
x = m(x) # run
|
|
96
|
+
y.append(x) # save output
|
|
97
|
+
return x
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def _inference(self, x: list[torch.Tensor]) -> tuple[torch.Tensor]:
|
|
101
|
+
"""Decode boxes and cls scores for imx object detection."""
|
|
102
|
+
x_cat = torch.cat([xi.view(x[0].shape[0], self.no, -1) for xi in x], 2)
|
|
103
|
+
box, cls = x_cat.split((self.reg_max * 4, self.nc), 1)
|
|
104
|
+
dbox = self.decode_bboxes(self.dfl(box), self.anchors.unsqueeze(0)) * self.strides
|
|
105
|
+
return dbox.transpose(1, 2), cls.sigmoid().permute(0, 2, 1)
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def pose_forward(self, x: list[torch.Tensor]) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
|
109
|
+
"""Forward pass for imx pose estimation, including keypoint decoding."""
|
|
110
|
+
bs = x[0].shape[0] # batch size
|
|
111
|
+
kpt = torch.cat([self.cv4[i](x[i]).view(bs, self.nk, -1) for i in range(self.nl)], -1) # (bs, 17*3, h*w)
|
|
112
|
+
x = Detect.forward(self, x)
|
|
113
|
+
pred_kpt = self.kpts_decode(bs, kpt)
|
|
114
|
+
return (*x, pred_kpt.permute(0, 2, 1))
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
class NMSWrapper(torch.nn.Module):
|
|
118
|
+
"""Wrap PyTorch Module with multiclass_nms layer from sony_custom_layers."""
|
|
119
|
+
|
|
120
|
+
def __init__(
|
|
121
|
+
self,
|
|
122
|
+
model: torch.nn.Module,
|
|
123
|
+
score_threshold: float = 0.001,
|
|
124
|
+
iou_threshold: float = 0.7,
|
|
125
|
+
max_detections: int = 300,
|
|
126
|
+
task: str = "detect",
|
|
127
|
+
):
|
|
128
|
+
"""Initialize NMSWrapper with PyTorch Module and NMS parameters.
|
|
129
|
+
|
|
130
|
+
Args:
|
|
131
|
+
model (torch.nn.Module): Model instance.
|
|
132
|
+
score_threshold (float): Score threshold for non-maximum suppression.
|
|
133
|
+
iou_threshold (float): Intersection over union threshold for non-maximum suppression.
|
|
134
|
+
max_detections (int): The number of detections to return.
|
|
135
|
+
task (str): Task type, either 'detect' or 'pose'.
|
|
136
|
+
"""
|
|
137
|
+
super().__init__()
|
|
138
|
+
self.model = model
|
|
139
|
+
self.score_threshold = score_threshold
|
|
140
|
+
self.iou_threshold = iou_threshold
|
|
141
|
+
self.max_detections = max_detections
|
|
142
|
+
self.task = task
|
|
143
|
+
|
|
144
|
+
def forward(self, images):
|
|
145
|
+
"""Forward pass with model inference and NMS post-processing."""
|
|
146
|
+
from sony_custom_layers.pytorch import multiclass_nms_with_indices
|
|
147
|
+
|
|
148
|
+
# model inference
|
|
149
|
+
outputs = self.model(images)
|
|
150
|
+
boxes, scores = outputs[0], outputs[1]
|
|
151
|
+
nms_outputs = multiclass_nms_with_indices(
|
|
152
|
+
boxes=boxes,
|
|
153
|
+
scores=scores,
|
|
154
|
+
score_threshold=self.score_threshold,
|
|
155
|
+
iou_threshold=self.iou_threshold,
|
|
156
|
+
max_detections=self.max_detections,
|
|
157
|
+
)
|
|
158
|
+
if self.task == "pose":
|
|
159
|
+
kpts = outputs[2] # (bs, max_detections, kpts 17*3)
|
|
160
|
+
out_kpts = torch.gather(kpts, 1, nms_outputs.indices.unsqueeze(-1).expand(-1, -1, kpts.size(-1)))
|
|
161
|
+
return nms_outputs.boxes, nms_outputs.scores, nms_outputs.labels, out_kpts
|
|
162
|
+
return nms_outputs.boxes, nms_outputs.scores, nms_outputs.labels, nms_outputs.n_valid
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def torch2imx(
|
|
166
|
+
model: torch.nn.Module,
|
|
167
|
+
file: Path | str,
|
|
168
|
+
conf: float,
|
|
169
|
+
iou: float,
|
|
170
|
+
max_det: int,
|
|
171
|
+
metadata: dict | None = None,
|
|
172
|
+
gptq: bool = False,
|
|
173
|
+
dataset=None,
|
|
174
|
+
prefix: str = "",
|
|
175
|
+
):
|
|
176
|
+
"""Export YOLO model to IMX format for deployment on Sony IMX500 devices.
|
|
177
|
+
|
|
178
|
+
This function quantizes a YOLO model using Model Compression Toolkit (MCT) and exports it to IMX format compatible
|
|
179
|
+
with Sony IMX500 edge devices. It supports both YOLOv8n and YOLO11n models for detection and pose estimation tasks.
|
|
180
|
+
|
|
181
|
+
Args:
|
|
182
|
+
model (torch.nn.Module): The YOLO model to export. Must be YOLOv8n or YOLO11n.
|
|
183
|
+
file (Path | str): Output file path for the exported model.
|
|
184
|
+
conf (float): Confidence threshold for NMS post-processing.
|
|
185
|
+
iou (float): IoU threshold for NMS post-processing.
|
|
186
|
+
max_det (int): Maximum number of detections to return.
|
|
187
|
+
metadata (dict | None, optional): Metadata to embed in the ONNX model. Defaults to None.
|
|
188
|
+
gptq (bool, optional): Whether to use Gradient-Based Post Training Quantization. If False, uses standard Post
|
|
189
|
+
Training Quantization. Defaults to False.
|
|
190
|
+
dataset (optional): Representative dataset for quantization calibration. Defaults to None.
|
|
191
|
+
prefix (str, optional): Logging prefix string. Defaults to "".
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
f (Path): Path to the exported IMX model directory
|
|
195
|
+
|
|
196
|
+
Raises:
|
|
197
|
+
ValueError: If the model is not a supported YOLOv8n or YOLO11n variant.
|
|
198
|
+
|
|
199
|
+
Examples:
|
|
200
|
+
>>> from ultralytics import YOLO
|
|
201
|
+
>>> model = YOLO("yolo11n.pt")
|
|
202
|
+
>>> path, _ = export_imx(model, "model.imx", conf=0.25, iou=0.45, max_det=300)
|
|
203
|
+
|
|
204
|
+
Notes:
|
|
205
|
+
- Requires model_compression_toolkit, onnx, edgemdt_tpc, and sony_custom_layers packages
|
|
206
|
+
- Only supports YOLOv8n and YOLO11n models (detection and pose tasks)
|
|
207
|
+
- Output includes quantized ONNX model, IMX binary, and labels.txt file
|
|
208
|
+
"""
|
|
209
|
+
import model_compression_toolkit as mct
|
|
210
|
+
import onnx
|
|
211
|
+
from edgemdt_tpc import get_target_platform_capabilities
|
|
212
|
+
|
|
213
|
+
LOGGER.info(f"\n{prefix} starting export with model_compression_toolkit {mct.__version__}...")
|
|
214
|
+
|
|
215
|
+
def representative_dataset_gen(dataloader=dataset):
|
|
216
|
+
for batch in dataloader:
|
|
217
|
+
img = batch["img"]
|
|
218
|
+
img = img / 255.0
|
|
219
|
+
yield [img]
|
|
220
|
+
|
|
221
|
+
tpc = get_target_platform_capabilities(tpc_version="4.0", device_type="imx500")
|
|
222
|
+
|
|
223
|
+
bit_cfg = mct.core.BitWidthConfig()
|
|
224
|
+
mct_config = MCT_CONFIG["YOLO11" if "C2PSA" in model.__str__() else "YOLOv8"][model.task]
|
|
225
|
+
|
|
226
|
+
# Check if the model has the expected number of layers
|
|
227
|
+
if len(list(model.modules())) != mct_config["n_layers"]:
|
|
228
|
+
raise ValueError("IMX export only supported for YOLOv8n and YOLO11n models.")
|
|
229
|
+
|
|
230
|
+
for layer_name in mct_config["layer_names"]:
|
|
231
|
+
bit_cfg.set_manual_activation_bit_width([mct.core.common.network_editors.NodeNameFilter(layer_name)], 16)
|
|
232
|
+
|
|
233
|
+
config = mct.core.CoreConfig(
|
|
234
|
+
mixed_precision_config=mct.core.MixedPrecisionQuantizationConfig(num_of_images=10),
|
|
235
|
+
quantization_config=mct.core.QuantizationConfig(concat_threshold_update=True),
|
|
236
|
+
bit_width_config=bit_cfg,
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
resource_utilization = mct.core.ResourceUtilization(weights_memory=mct_config["weights_memory"])
|
|
240
|
+
|
|
241
|
+
quant_model = (
|
|
242
|
+
mct.gptq.pytorch_gradient_post_training_quantization( # Perform Gradient-Based Post Training Quantization
|
|
243
|
+
model=model,
|
|
244
|
+
representative_data_gen=representative_dataset_gen,
|
|
245
|
+
target_resource_utilization=resource_utilization,
|
|
246
|
+
gptq_config=mct.gptq.get_pytorch_gptq_config(
|
|
247
|
+
n_epochs=1000, use_hessian_based_weights=False, use_hessian_sample_attention=False
|
|
248
|
+
),
|
|
249
|
+
core_config=config,
|
|
250
|
+
target_platform_capabilities=tpc,
|
|
251
|
+
)[0]
|
|
252
|
+
if gptq
|
|
253
|
+
else mct.ptq.pytorch_post_training_quantization( # Perform post training quantization
|
|
254
|
+
in_module=model,
|
|
255
|
+
representative_data_gen=representative_dataset_gen,
|
|
256
|
+
target_resource_utilization=resource_utilization,
|
|
257
|
+
core_config=config,
|
|
258
|
+
target_platform_capabilities=tpc,
|
|
259
|
+
)[0]
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
if model.task != "classify":
|
|
263
|
+
quant_model = NMSWrapper(
|
|
264
|
+
model=quant_model,
|
|
265
|
+
score_threshold=conf or 0.001,
|
|
266
|
+
iou_threshold=iou,
|
|
267
|
+
max_detections=max_det,
|
|
268
|
+
task=model.task,
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
f = Path(str(file).replace(file.suffix, "_imx_model"))
|
|
272
|
+
f.mkdir(exist_ok=True)
|
|
273
|
+
onnx_model = f / Path(str(file.name).replace(file.suffix, "_imx.onnx")) # js dir
|
|
274
|
+
mct.exporter.pytorch_export_model(
|
|
275
|
+
model=quant_model, save_model_path=onnx_model, repr_dataset=representative_dataset_gen
|
|
276
|
+
)
|
|
277
|
+
|
|
278
|
+
model_onnx = onnx.load(onnx_model) # load onnx model
|
|
279
|
+
for k, v in metadata.items():
|
|
280
|
+
meta = model_onnx.metadata_props.add()
|
|
281
|
+
meta.key, meta.value = k, str(v)
|
|
282
|
+
|
|
283
|
+
onnx.save(model_onnx, onnx_model)
|
|
284
|
+
|
|
285
|
+
subprocess.run(
|
|
286
|
+
["imxconv-pt", "-i", str(onnx_model), "-o", str(f), "--no-input-persistency", "--overwrite-output"],
|
|
287
|
+
check=True,
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
# Needed for imx models.
|
|
291
|
+
with open(f / "labels.txt", "w", encoding="utf-8") as file:
|
|
292
|
+
file.writelines([f"{name}\n" for _, name in model.names.items()])
|
|
293
|
+
|
|
294
|
+
return f
|