ultralytics 8.3.198__py3-none-any.whl → 8.3.200__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tests/test_cuda.py +2 -3
- ultralytics/__init__.py +22 -9
- ultralytics/cfg/default.yaml +1 -1
- ultralytics/engine/exporter.py +19 -167
- ultralytics/engine/trainer.py +31 -28
- ultralytics/engine/tuner.py +2 -1
- ultralytics/nn/modules/head.py +1 -5
- ultralytics/nn/tasks.py +1 -1
- ultralytics/utils/benchmarks.py +1 -1
- ultralytics/utils/dist.py +11 -3
- ultralytics/utils/{export.py → export/__init__.py} +9 -7
- ultralytics/utils/export/imx.py +289 -0
- ultralytics/utils/plotting.py +5 -1
- ultralytics/utils/torch_utils.py +5 -49
- {ultralytics-8.3.198.dist-info → ultralytics-8.3.200.dist-info}/METADATA +1 -1
- {ultralytics-8.3.198.dist-info → ultralytics-8.3.200.dist-info}/RECORD +20 -19
- {ultralytics-8.3.198.dist-info → ultralytics-8.3.200.dist-info}/WHEEL +0 -0
- {ultralytics-8.3.198.dist-info → ultralytics-8.3.200.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.3.198.dist-info → ultralytics-8.3.200.dist-info}/licenses/LICENSE +0 -0
- {ultralytics-8.3.198.dist-info → ultralytics-8.3.200.dist-info}/top_level.txt +0 -0
tests/test_cuda.py
CHANGED
@@ -68,7 +68,7 @@ def test_export_onnx_matrix(task, dynamic, int8, half, batch, simplify, nms):
|
|
68
68
|
half=half,
|
69
69
|
batch=batch,
|
70
70
|
simplify=simplify,
|
71
|
-
nms=nms
|
71
|
+
nms=nms,
|
72
72
|
device=DEVICES[0],
|
73
73
|
)
|
74
74
|
YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32, device=DEVICES[0]) # exported model inference
|
@@ -76,7 +76,6 @@ def test_export_onnx_matrix(task, dynamic, int8, half, batch, simplify, nms):
|
|
76
76
|
|
77
77
|
|
78
78
|
@pytest.mark.slow
|
79
|
-
@pytest.mark.skipif(True, reason="CUDA export tests disabled pending additional Ultralytics GPU server availability")
|
80
79
|
@pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
|
81
80
|
@pytest.mark.parametrize(
|
82
81
|
"task, dynamic, int8, half, batch",
|
@@ -164,7 +163,7 @@ def test_autobatch():
|
|
164
163
|
|
165
164
|
|
166
165
|
@pytest.mark.slow
|
167
|
-
@pytest.mark.skipif(
|
166
|
+
@pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
|
168
167
|
def test_utils_benchmarks():
|
169
168
|
"""Profile YOLO models for performance benchmarks."""
|
170
169
|
from ultralytics.utils.benchmarks import ProfileModels
|
ultralytics/__init__.py
CHANGED
@@ -1,30 +1,43 @@
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
2
|
|
3
|
-
__version__ = "8.3.
|
3
|
+
__version__ = "8.3.200"
|
4
4
|
|
5
|
+
import importlib
|
5
6
|
import os
|
6
7
|
|
7
8
|
# Set ENV variables (place before imports)
|
8
9
|
if not os.environ.get("OMP_NUM_THREADS"):
|
9
10
|
os.environ["OMP_NUM_THREADS"] = "1" # default for reduced CPU utilization during training
|
10
11
|
|
11
|
-
from ultralytics.models import NAS, RTDETR, SAM, YOLO, YOLOE, FastSAM, YOLOWorld
|
12
12
|
from ultralytics.utils import ASSETS, SETTINGS
|
13
13
|
from ultralytics.utils.checks import check_yolo as checks
|
14
14
|
from ultralytics.utils.downloads import download
|
15
15
|
|
16
16
|
settings = SETTINGS
|
17
|
+
|
18
|
+
MODELS = ("YOLO", "YOLOWorld", "YOLOE", "NAS", "SAM", "FastSAM", "RTDETR")
|
19
|
+
|
17
20
|
__all__ = (
|
18
21
|
"__version__",
|
19
22
|
"ASSETS",
|
20
|
-
|
21
|
-
"YOLOWorld",
|
22
|
-
"YOLOE",
|
23
|
-
"NAS",
|
24
|
-
"SAM",
|
25
|
-
"FastSAM",
|
26
|
-
"RTDETR",
|
23
|
+
*MODELS,
|
27
24
|
"checks",
|
28
25
|
"download",
|
29
26
|
"settings",
|
30
27
|
)
|
28
|
+
|
29
|
+
|
30
|
+
def __getattr__(name: str):
|
31
|
+
"""Lazy-import model classes on first access."""
|
32
|
+
if name in MODELS:
|
33
|
+
return getattr(importlib.import_module("ultralytics.models"), name)
|
34
|
+
raise AttributeError(f"module {__name__} has no attribute {name}")
|
35
|
+
|
36
|
+
|
37
|
+
def __dir__():
|
38
|
+
"""Extend dir() to include lazily available model names for IDE autocompletion."""
|
39
|
+
return sorted(set(globals()) | set(MODELS))
|
40
|
+
|
41
|
+
|
42
|
+
if __name__ == "__main__":
|
43
|
+
print(__version__)
|
ultralytics/cfg/default.yaml
CHANGED
@@ -37,7 +37,7 @@ fraction: 1.0 # (float) fraction of training dataset to use (1.0 = all)
|
|
37
37
|
profile: False # (bool) profile ONNX/TensorRT speeds during training for loggers
|
38
38
|
freeze: # (int | list, optional) freeze first N layers (int) or specific layer indices (list)
|
39
39
|
multi_scale: False # (bool) multiscale training by varying image size
|
40
|
-
compile: False # (bool | str) enable torch.compile() backend='inductor'; True="default", False=off, or "default|reduce-overhead|max-autotune"
|
40
|
+
compile: False # (bool | str) enable torch.compile() backend='inductor'; True="default", False=off, or "default|reduce-overhead|max-autotune-no-cudagraphs"
|
41
41
|
|
42
42
|
# Segmentation
|
43
43
|
overlap_mask: True # (bool) merge instance masks into one mask during training (segment only)
|
ultralytics/engine/exporter.py
CHANGED
@@ -106,7 +106,7 @@ from ultralytics.utils.checks import (
|
|
106
106
|
is_sudo_available,
|
107
107
|
)
|
108
108
|
from ultralytics.utils.downloads import attempt_download_asset, get_github_assets, safe_download
|
109
|
-
from ultralytics.utils.export import
|
109
|
+
from ultralytics.utils.export import onnx2engine, torch2imx, torch2onnx
|
110
110
|
from ultralytics.utils.files import file_size, spaces_in_path
|
111
111
|
from ultralytics.utils.metrics import batch_probiou
|
112
112
|
from ultralytics.utils.nms import TorchNMS
|
@@ -284,7 +284,8 @@ class Exporter:
|
|
284
284
|
# Get the closest match if format is invalid
|
285
285
|
matches = difflib.get_close_matches(fmt, fmts, n=1, cutoff=0.6) # 60% similarity required to match
|
286
286
|
if not matches:
|
287
|
-
|
287
|
+
msg = "Model is already in PyTorch format." if fmt == "pt" else f"Invalid export format='{fmt}'."
|
288
|
+
raise ValueError(f"{msg} Valid formats are {fmts}")
|
288
289
|
LOGGER.warning(f"Invalid export format='{fmt}', updating to format='{matches[0]}'")
|
289
290
|
fmt = matches[0]
|
290
291
|
flags = [x == fmt for x in fmts]
|
@@ -408,9 +409,9 @@ class Exporter:
|
|
408
409
|
model = model.fuse()
|
409
410
|
|
410
411
|
if imx:
|
411
|
-
from ultralytics.utils.
|
412
|
+
from ultralytics.utils.export.imx import FXModel
|
412
413
|
|
413
|
-
model = FXModel(model)
|
414
|
+
model = FXModel(model, self.imgsz)
|
414
415
|
for m in model.modules():
|
415
416
|
if isinstance(m, Classify):
|
416
417
|
m.export = True
|
@@ -425,15 +426,6 @@ class Exporter:
|
|
425
426
|
elif isinstance(m, C2f) and not is_tf_format:
|
426
427
|
# EdgeTPU does not support FlexSplitV while split provides cleaner ONNX graph
|
427
428
|
m.forward = m.forward_split
|
428
|
-
if isinstance(m, Detect) and imx:
|
429
|
-
from ultralytics.utils.tal import make_anchors
|
430
|
-
|
431
|
-
m.anchors, m.strides = (
|
432
|
-
x.transpose(0, 1)
|
433
|
-
for x in make_anchors(
|
434
|
-
torch.cat([s / m.stride.unsqueeze(-1) for s in self.imgsz], dim=1), m.stride, 0.5
|
435
|
-
)
|
436
|
-
)
|
437
429
|
|
438
430
|
y = None
|
439
431
|
for _ in range(2): # dry runs
|
@@ -609,7 +601,7 @@ class Exporter:
|
|
609
601
|
self.args.opset = opset_version # for NMSModel
|
610
602
|
|
611
603
|
with arange_patch(self.args):
|
612
|
-
|
604
|
+
torch2onnx(
|
613
605
|
NMSModel(self.model, self.args) if self.args.nms else self.model,
|
614
606
|
self.im,
|
615
607
|
f,
|
@@ -922,7 +914,8 @@ class Exporter:
|
|
922
914
|
import tensorrt as trt # noqa
|
923
915
|
except ImportError:
|
924
916
|
if LINUX:
|
925
|
-
|
917
|
+
cuda_version = torch.version.cuda.split(".")[0]
|
918
|
+
check_requirements(f"tensorrt-cu{cuda_version}>7.0.0,!=10.1.0")
|
926
919
|
import tensorrt as trt # noqa
|
927
920
|
check_version(trt.__version__, ">=7.0.0", hard=True)
|
928
921
|
check_version(trt.__version__, "!=10.1.0", msg="https://github.com/ultralytics/ultralytics/pull/14239")
|
@@ -931,7 +924,7 @@ class Exporter:
|
|
931
924
|
LOGGER.info(f"\n{prefix} starting export with TensorRT {trt.__version__}...")
|
932
925
|
assert Path(f_onnx).exists(), f"failed to export ONNX file: {f_onnx}"
|
933
926
|
f = self.file.with_suffix(".engine") # TensorRT engine file
|
934
|
-
|
927
|
+
onnx2engine(
|
935
928
|
f_onnx,
|
936
929
|
f,
|
937
930
|
self.args.workspace,
|
@@ -1167,7 +1160,6 @@ class Exporter:
|
|
1167
1160
|
@try_export
|
1168
1161
|
def export_imx(self, prefix=colorstr("IMX:")):
|
1169
1162
|
"""Export YOLO model to IMX format."""
|
1170
|
-
gptq = False
|
1171
1163
|
assert LINUX, (
|
1172
1164
|
"export only supported on Linux. "
|
1173
1165
|
"See https://developer.aitrios.sony-semicon.com/en/raspberrypi-ai-camera/documentation/imx500-converter"
|
@@ -1180,13 +1172,6 @@ class Exporter:
|
|
1180
1172
|
check_requirements("imx500-converter[pt]>=3.16.1") # Separate requirements for imx500-converter
|
1181
1173
|
check_requirements("mct-quantizers>=1.6.0") # Separate for compatibility with model-compression-toolkit
|
1182
1174
|
|
1183
|
-
import model_compression_toolkit as mct
|
1184
|
-
import onnx
|
1185
|
-
from edgemdt_tpc import get_target_platform_capabilities
|
1186
|
-
from sony_custom_layers.pytorch import multiclass_nms_with_indices
|
1187
|
-
|
1188
|
-
LOGGER.info(f"\n{prefix} starting export with model_compression_toolkit {mct.__version__}...")
|
1189
|
-
|
1190
1175
|
# Install Java>=17
|
1191
1176
|
try:
|
1192
1177
|
java_output = subprocess.run(["java", "--version"], check=True, capture_output=True).stdout.decode()
|
@@ -1197,150 +1182,17 @@ class Exporter:
|
|
1197
1182
|
cmd = (["sudo"] if is_sudo_available() else []) + ["apt", "install", "-y", "openjdk-21-jre"]
|
1198
1183
|
subprocess.run(cmd, check=True)
|
1199
1184
|
|
1200
|
-
|
1201
|
-
|
1202
|
-
|
1203
|
-
|
1204
|
-
|
1205
|
-
|
1206
|
-
|
1207
|
-
|
1208
|
-
|
1209
|
-
if "C2PSA" in self.model.__str__(): # YOLO11
|
1210
|
-
if self.model.task == "detect":
|
1211
|
-
layer_names = ["sub", "mul_2", "add_14", "cat_21"]
|
1212
|
-
weights_memory = 2585350.2439
|
1213
|
-
n_layers = 238 # 238 layers for fused YOLO11n
|
1214
|
-
elif self.model.task == "pose":
|
1215
|
-
layer_names = ["sub", "mul_2", "add_14", "cat_22", "cat_23", "mul_4", "add_15"]
|
1216
|
-
weights_memory = 2437771.67
|
1217
|
-
n_layers = 257 # 257 layers for fused YOLO11n-pose
|
1218
|
-
else: # YOLOv8
|
1219
|
-
if self.model.task == "detect":
|
1220
|
-
layer_names = ["sub", "mul", "add_6", "cat_17"]
|
1221
|
-
weights_memory = 2550540.8
|
1222
|
-
n_layers = 168 # 168 layers for fused YOLOv8n
|
1223
|
-
elif self.model.task == "pose":
|
1224
|
-
layer_names = ["add_7", "mul_2", "cat_19", "mul", "sub", "add_6", "cat_18"]
|
1225
|
-
weights_memory = 2482451.85
|
1226
|
-
n_layers = 187 # 187 layers for fused YOLO11n-pose
|
1227
|
-
|
1228
|
-
# Check if the model has the expected number of layers
|
1229
|
-
if len(list(self.model.modules())) != n_layers:
|
1230
|
-
raise ValueError("IMX export only supported for YOLOv8n and YOLO11n models.")
|
1231
|
-
|
1232
|
-
for layer_name in layer_names:
|
1233
|
-
bit_cfg.set_manual_activation_bit_width([mct.core.common.network_editors.NodeNameFilter(layer_name)], 16)
|
1234
|
-
|
1235
|
-
config = mct.core.CoreConfig(
|
1236
|
-
mixed_precision_config=mct.core.MixedPrecisionQuantizationConfig(num_of_images=10),
|
1237
|
-
quantization_config=mct.core.QuantizationConfig(concat_threshold_update=True),
|
1238
|
-
bit_width_config=bit_cfg,
|
1239
|
-
)
|
1240
|
-
|
1241
|
-
resource_utilization = mct.core.ResourceUtilization(weights_memory=weights_memory)
|
1242
|
-
|
1243
|
-
quant_model = (
|
1244
|
-
mct.gptq.pytorch_gradient_post_training_quantization( # Perform Gradient-Based Post Training Quantization
|
1245
|
-
model=self.model,
|
1246
|
-
representative_data_gen=representative_dataset_gen,
|
1247
|
-
target_resource_utilization=resource_utilization,
|
1248
|
-
gptq_config=mct.gptq.get_pytorch_gptq_config(
|
1249
|
-
n_epochs=1000, use_hessian_based_weights=False, use_hessian_sample_attention=False
|
1250
|
-
),
|
1251
|
-
core_config=config,
|
1252
|
-
target_platform_capabilities=tpc,
|
1253
|
-
)[0]
|
1254
|
-
if gptq
|
1255
|
-
else mct.ptq.pytorch_post_training_quantization( # Perform post training quantization
|
1256
|
-
in_module=self.model,
|
1257
|
-
representative_data_gen=representative_dataset_gen,
|
1258
|
-
target_resource_utilization=resource_utilization,
|
1259
|
-
core_config=config,
|
1260
|
-
target_platform_capabilities=tpc,
|
1261
|
-
)[0]
|
1262
|
-
)
|
1263
|
-
|
1264
|
-
class NMSWrapper(torch.nn.Module):
|
1265
|
-
"""Wrap PyTorch Module with multiclass_nms layer from sony_custom_layers."""
|
1266
|
-
|
1267
|
-
def __init__(
|
1268
|
-
self,
|
1269
|
-
model: torch.nn.Module,
|
1270
|
-
score_threshold: float = 0.001,
|
1271
|
-
iou_threshold: float = 0.7,
|
1272
|
-
max_detections: int = 300,
|
1273
|
-
task: str = "detect",
|
1274
|
-
):
|
1275
|
-
"""
|
1276
|
-
Initialize NMSWrapper with PyTorch Module and NMS parameters.
|
1277
|
-
|
1278
|
-
Args:
|
1279
|
-
model (torch.nn.Module): Model instance.
|
1280
|
-
score_threshold (float): Score threshold for non-maximum suppression.
|
1281
|
-
iou_threshold (float): Intersection over union threshold for non-maximum suppression.
|
1282
|
-
max_detections (int): The number of detections to return.
|
1283
|
-
task (str): Task type, either 'detect' or 'pose'.
|
1284
|
-
"""
|
1285
|
-
super().__init__()
|
1286
|
-
self.model = model
|
1287
|
-
self.score_threshold = score_threshold
|
1288
|
-
self.iou_threshold = iou_threshold
|
1289
|
-
self.max_detections = max_detections
|
1290
|
-
self.task = task
|
1291
|
-
|
1292
|
-
def forward(self, images):
|
1293
|
-
"""Forward pass with model inference and NMS post-processing."""
|
1294
|
-
# model inference
|
1295
|
-
outputs = self.model(images)
|
1296
|
-
|
1297
|
-
boxes, scores = outputs[0], outputs[1]
|
1298
|
-
nms_outputs = multiclass_nms_with_indices(
|
1299
|
-
boxes=boxes,
|
1300
|
-
scores=scores,
|
1301
|
-
score_threshold=self.score_threshold,
|
1302
|
-
iou_threshold=self.iou_threshold,
|
1303
|
-
max_detections=self.max_detections,
|
1304
|
-
)
|
1305
|
-
if self.task == "pose":
|
1306
|
-
kpts = outputs[2] # (bs, max_detections, kpts 17*3)
|
1307
|
-
out_kpts = torch.gather(kpts, 1, nms_outputs.indices.unsqueeze(-1).expand(-1, -1, kpts.size(-1)))
|
1308
|
-
return nms_outputs.boxes, nms_outputs.scores, nms_outputs.labels, out_kpts
|
1309
|
-
return nms_outputs
|
1310
|
-
|
1311
|
-
quant_model = NMSWrapper(
|
1312
|
-
model=quant_model,
|
1313
|
-
score_threshold=self.args.conf or 0.001,
|
1314
|
-
iou_threshold=self.args.iou,
|
1315
|
-
max_detections=self.args.max_det,
|
1316
|
-
task=self.model.task,
|
1317
|
-
).to(self.device)
|
1318
|
-
|
1319
|
-
f = Path(str(self.file).replace(self.file.suffix, "_imx_model"))
|
1320
|
-
f.mkdir(exist_ok=True)
|
1321
|
-
onnx_model = f / Path(str(self.file.name).replace(self.file.suffix, "_imx.onnx")) # js dir
|
1322
|
-
mct.exporter.pytorch_export_model(
|
1323
|
-
model=quant_model, save_model_path=onnx_model, repr_dataset=representative_dataset_gen
|
1324
|
-
)
|
1325
|
-
|
1326
|
-
model_onnx = onnx.load(onnx_model) # load onnx model
|
1327
|
-
for k, v in self.metadata.items():
|
1328
|
-
meta = model_onnx.metadata_props.add()
|
1329
|
-
meta.key, meta.value = k, str(v)
|
1330
|
-
|
1331
|
-
onnx.save(model_onnx, onnx_model)
|
1332
|
-
|
1333
|
-
subprocess.run(
|
1334
|
-
["imxconv-pt", "-i", str(onnx_model), "-o", str(f), "--no-input-persistency", "--overwrite-output"],
|
1335
|
-
check=True,
|
1185
|
+
return torch2imx(
|
1186
|
+
self.model,
|
1187
|
+
self.file,
|
1188
|
+
self.args.conf,
|
1189
|
+
self.args.iou,
|
1190
|
+
self.args.max_det,
|
1191
|
+
metadata=self.metadata,
|
1192
|
+
dataset=self.get_int8_calibration_dataloader(prefix),
|
1193
|
+
prefix=prefix,
|
1336
1194
|
)
|
1337
1195
|
|
1338
|
-
# Needed for imx models.
|
1339
|
-
with open(f / "labels.txt", "w", encoding="utf-8") as file:
|
1340
|
-
file.writelines([f"{name}\n" for _, name in self.model.names.items()])
|
1341
|
-
|
1342
|
-
return f
|
1343
|
-
|
1344
1196
|
def _add_tflite_metadata(self, file):
|
1345
1197
|
"""Add metadata to *.tflite models per https://ai.google.dev/edge/litert/models/metadata."""
|
1346
1198
|
import zipfile
|
@@ -1530,7 +1382,7 @@ class NMSModel(torch.nn.Module):
|
|
1530
1382
|
for i in range(bs):
|
1531
1383
|
box, cls, score, extra = boxes[i], classes[i], scores[i], extras[i]
|
1532
1384
|
mask = score > self.args.conf
|
1533
|
-
if self.is_tf:
|
1385
|
+
if self.is_tf or (self.args.format == "onnx" and self.obb):
|
1534
1386
|
# TFLite GatherND error if mask is empty
|
1535
1387
|
score *= mask
|
1536
1388
|
# Explicit length otherwise reshape error, hardcoded to `self.args.max_det * 5`
|
ultralytics/engine/trainer.py
CHANGED
@@ -174,7 +174,22 @@ class BaseTrainer:
|
|
174
174
|
|
175
175
|
# Callbacks
|
176
176
|
self.callbacks = _callbacks or callbacks.get_default_callbacks()
|
177
|
-
|
177
|
+
|
178
|
+
if isinstance(self.args.device, str) and len(self.args.device): # i.e. device='0' or device='0,1,2,3'
|
179
|
+
world_size = len(self.args.device.split(","))
|
180
|
+
elif isinstance(self.args.device, (tuple, list)): # i.e. device=[0, 1, 2, 3] (multi-GPU from CLI is list)
|
181
|
+
world_size = len(self.args.device)
|
182
|
+
elif self.args.device in {"cpu", "mps"}: # i.e. device='cpu' or 'mps'
|
183
|
+
world_size = 0
|
184
|
+
elif torch.cuda.is_available(): # i.e. device=None or device='' or device=number
|
185
|
+
world_size = 1 # default to device 0
|
186
|
+
else: # i.e. device=None or device=''
|
187
|
+
world_size = 0
|
188
|
+
|
189
|
+
self.ddp = world_size > 1 and "LOCAL_RANK" not in os.environ
|
190
|
+
self.world_size = world_size
|
191
|
+
# Run subprocess if DDP training, else train normally
|
192
|
+
if RANK in {-1, 0} and not self.ddp:
|
178
193
|
callbacks.add_integration_callbacks(self)
|
179
194
|
# Start console logging immediately at trainer initialization
|
180
195
|
self.run_callbacks("on_pretrain_routine_start")
|
@@ -194,19 +209,8 @@ class BaseTrainer:
|
|
194
209
|
|
195
210
|
def train(self):
|
196
211
|
"""Allow device='', device=None on Multi-GPU systems to default to device=0."""
|
197
|
-
if isinstance(self.args.device, str) and len(self.args.device): # i.e. device='0' or device='0,1,2,3'
|
198
|
-
world_size = len(self.args.device.split(","))
|
199
|
-
elif isinstance(self.args.device, (tuple, list)): # i.e. device=[0, 1, 2, 3] (multi-GPU from CLI is list)
|
200
|
-
world_size = len(self.args.device)
|
201
|
-
elif self.args.device in {"cpu", "mps"}: # i.e. device='cpu' or 'mps'
|
202
|
-
world_size = 0
|
203
|
-
elif torch.cuda.is_available(): # i.e. device=None or device='' or device=number
|
204
|
-
world_size = 1 # default to device 0
|
205
|
-
else: # i.e. device=None or device=''
|
206
|
-
world_size = 0
|
207
|
-
|
208
212
|
# Run subprocess if DDP training, else train normally
|
209
|
-
if
|
213
|
+
if self.ddp:
|
210
214
|
# Argument checks
|
211
215
|
if self.args.rect:
|
212
216
|
LOGGER.warning("'rect=True' is incompatible with Multi-GPU training, setting 'rect=False'")
|
@@ -218,7 +222,7 @@ class BaseTrainer:
|
|
218
222
|
self.args.batch = 16
|
219
223
|
|
220
224
|
# Command
|
221
|
-
cmd, file = generate_ddp_command(
|
225
|
+
cmd, file = generate_ddp_command(self)
|
222
226
|
try:
|
223
227
|
LOGGER.info(f"{colorstr('DDP:')} debug command {' '.join(cmd)}")
|
224
228
|
subprocess.run(cmd, check=True)
|
@@ -228,7 +232,7 @@ class BaseTrainer:
|
|
228
232
|
ddp_cleanup(self, str(file))
|
229
233
|
|
230
234
|
else:
|
231
|
-
self._do_train(
|
235
|
+
self._do_train()
|
232
236
|
|
233
237
|
def _setup_scheduler(self):
|
234
238
|
"""Initialize training learning rate scheduler."""
|
@@ -238,20 +242,19 @@ class BaseTrainer:
|
|
238
242
|
self.lf = lambda x: max(1 - x / self.epochs, 0) * (1.0 - self.args.lrf) + self.args.lrf # linear
|
239
243
|
self.scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=self.lf)
|
240
244
|
|
241
|
-
def _setup_ddp(self
|
245
|
+
def _setup_ddp(self):
|
242
246
|
"""Initialize and set the DistributedDataParallel parameters for training."""
|
243
247
|
torch.cuda.set_device(RANK)
|
244
248
|
self.device = torch.device("cuda", RANK)
|
245
|
-
# LOGGER.info(f'DDP info: RANK {RANK}, WORLD_SIZE {world_size}, DEVICE {self.device}')
|
246
249
|
os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" # set to enforce timeout
|
247
250
|
dist.init_process_group(
|
248
251
|
backend="nccl" if dist.is_nccl_available() else "gloo",
|
249
252
|
timeout=timedelta(seconds=10800), # 3 hours
|
250
253
|
rank=RANK,
|
251
|
-
world_size=world_size,
|
254
|
+
world_size=self.world_size,
|
252
255
|
)
|
253
256
|
|
254
|
-
def _setup_train(self
|
257
|
+
def _setup_train(self):
|
255
258
|
"""Build dataloaders and optimizer on correct rank process."""
|
256
259
|
ckpt = self.setup_model()
|
257
260
|
self.model = self.model.to(self.device)
|
@@ -293,13 +296,13 @@ class BaseTrainer:
|
|
293
296
|
callbacks_backup = callbacks.default_callbacks.copy() # backup callbacks as check_amp() resets them
|
294
297
|
self.amp = torch.tensor(check_amp(self.model), device=self.device)
|
295
298
|
callbacks.default_callbacks = callbacks_backup # restore callbacks
|
296
|
-
if RANK > -1 and world_size > 1: # DDP
|
299
|
+
if RANK > -1 and self.world_size > 1: # DDP
|
297
300
|
dist.broadcast(self.amp.int(), src=0) # broadcast from rank 0 to all other ranks; gloo errors with boolean
|
298
301
|
self.amp = bool(self.amp) # as boolean
|
299
302
|
self.scaler = (
|
300
303
|
torch.amp.GradScaler("cuda", enabled=self.amp) if TORCH_2_4 else torch.cuda.amp.GradScaler(enabled=self.amp)
|
301
304
|
)
|
302
|
-
if world_size > 1:
|
305
|
+
if self.world_size > 1:
|
303
306
|
self.model = nn.parallel.DistributedDataParallel(self.model, device_ids=[RANK], find_unused_parameters=True)
|
304
307
|
|
305
308
|
# Check imgsz
|
@@ -312,7 +315,7 @@ class BaseTrainer:
|
|
312
315
|
self.args.batch = self.batch_size = self.auto_batch()
|
313
316
|
|
314
317
|
# Dataloaders
|
315
|
-
batch_size = self.batch_size // max(world_size, 1)
|
318
|
+
batch_size = self.batch_size // max(self.world_size, 1)
|
316
319
|
self.train_loader = self.get_dataloader(
|
317
320
|
self.data["train"], batch_size=batch_size, rank=LOCAL_RANK, mode="train"
|
318
321
|
)
|
@@ -350,11 +353,11 @@ class BaseTrainer:
|
|
350
353
|
self.scheduler.last_epoch = self.start_epoch - 1 # do not move
|
351
354
|
self.run_callbacks("on_pretrain_routine_end")
|
352
355
|
|
353
|
-
def _do_train(self
|
356
|
+
def _do_train(self):
|
354
357
|
"""Train the model with the specified world size."""
|
355
|
-
if world_size > 1:
|
356
|
-
self._setup_ddp(
|
357
|
-
self._setup_train(
|
358
|
+
if self.world_size > 1:
|
359
|
+
self._setup_ddp()
|
360
|
+
self._setup_train()
|
358
361
|
|
359
362
|
nb = len(self.train_loader) # number of batches
|
360
363
|
nw = max(round(self.args.warmup_epochs * nb), 100) if self.args.warmup_epochs > 0 else -1 # warmup iterations
|
@@ -365,7 +368,7 @@ class BaseTrainer:
|
|
365
368
|
self.run_callbacks("on_train_start")
|
366
369
|
LOGGER.info(
|
367
370
|
f"Image sizes {self.args.imgsz} train, {self.args.imgsz} val\n"
|
368
|
-
f"Using {self.train_loader.num_workers * (world_size or 1)} dataloader workers\n"
|
371
|
+
f"Using {self.train_loader.num_workers * (self.world_size or 1)} dataloader workers\n"
|
369
372
|
f"Logging results to {colorstr('bold', self.save_dir)}\n"
|
370
373
|
f"Starting training for " + (f"{self.args.time} hours..." if self.args.time else f"{self.epochs} epochs...")
|
371
374
|
)
|
@@ -417,7 +420,7 @@ class BaseTrainer:
|
|
417
420
|
loss, self.loss_items = unwrap_model(self.model).loss(batch, preds)
|
418
421
|
self.loss = loss.sum()
|
419
422
|
if RANK != -1:
|
420
|
-
self.loss *= world_size
|
423
|
+
self.loss *= self.world_size
|
421
424
|
self.tloss = (
|
422
425
|
(self.tloss * i + self.loss_items) / (i + 1) if self.tloss is not None else self.loss_items
|
423
426
|
)
|
ultralytics/engine/tuner.py
CHANGED
@@ -341,7 +341,8 @@ class Tuner:
|
|
341
341
|
hyp[k] = round(min(max(hyp[k], bounds[0]), bounds[1]), 5)
|
342
342
|
|
343
343
|
# Update types
|
344
|
-
|
344
|
+
if "close_mosaic" in hyp:
|
345
|
+
hyp["close_mosaic"] = int(round(hyp["close_mosaic"]))
|
345
346
|
|
346
347
|
return hyp
|
347
348
|
|
ultralytics/nn/modules/head.py
CHANGED
@@ -162,7 +162,7 @@ class Detect(nn.Module):
|
|
162
162
|
# Inference path
|
163
163
|
shape = x[0].shape # BCHW
|
164
164
|
x_cat = torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2)
|
165
|
-
if self.
|
165
|
+
if self.dynamic or self.shape != shape:
|
166
166
|
self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5))
|
167
167
|
self.shape = shape
|
168
168
|
|
@@ -182,8 +182,6 @@ class Detect(nn.Module):
|
|
182
182
|
dbox = self.decode_bboxes(self.dfl(box) * norm, self.anchors.unsqueeze(0) * norm[:, :2])
|
183
183
|
else:
|
184
184
|
dbox = self.decode_bboxes(self.dfl(box), self.anchors.unsqueeze(0)) * self.strides
|
185
|
-
if self.export and self.format == "imx":
|
186
|
-
return dbox.transpose(1, 2), cls.sigmoid().permute(0, 2, 1)
|
187
185
|
return torch.cat((dbox, cls.sigmoid()), 1)
|
188
186
|
|
189
187
|
def bias_init(self):
|
@@ -387,8 +385,6 @@ class Pose(Detect):
|
|
387
385
|
if self.training:
|
388
386
|
return x, kpt
|
389
387
|
pred_kpt = self.kpts_decode(bs, kpt)
|
390
|
-
if self.export and self.format == "imx":
|
391
|
-
return (*x, pred_kpt.permute(0, 2, 1))
|
392
388
|
return torch.cat([x, pred_kpt], 1) if self.export else (torch.cat([x[0], pred_kpt], 1), (x[1], kpt))
|
393
389
|
|
394
390
|
def kpts_decode(self, bs: int, kpts: torch.Tensor) -> torch.Tensor:
|
ultralytics/nn/tasks.py
CHANGED
@@ -1543,8 +1543,8 @@ def parse_model(d, ch, verbose=True):
|
|
1543
1543
|
max_channels = float("inf")
|
1544
1544
|
nc, act, scales = (d.get(x) for x in ("nc", "activation", "scales"))
|
1545
1545
|
depth, width, kpt_shape = (d.get(x, 1.0) for x in ("depth_multiple", "width_multiple", "kpt_shape"))
|
1546
|
+
scale = d.get("scale")
|
1546
1547
|
if scales:
|
1547
|
-
scale = d.get("scale")
|
1548
1548
|
if not scale:
|
1549
1549
|
scale = tuple(scales.keys())[0]
|
1550
1550
|
LOGGER.warning(f"no model scale passed. Assuming scale='{scale}'.")
|
ultralytics/utils/benchmarks.py
CHANGED
@@ -145,7 +145,7 @@ def benchmark(
|
|
145
145
|
assert not is_end2end
|
146
146
|
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 IMX exports not supported"
|
147
147
|
assert model.task == "detect", "IMX only supported for detection task"
|
148
|
-
assert "C2f" in model.__str__(), "IMX only supported for
|
148
|
+
assert "C2f" in model.__str__(), "IMX only supported for YOLOv8n and YOLO11n"
|
149
149
|
if format == "rknn":
|
150
150
|
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 RKNN exports not supported yet"
|
151
151
|
assert not is_end2end, "End-to-end models not supported by RKNN yet"
|
ultralytics/utils/dist.py
CHANGED
@@ -76,12 +76,11 @@ if __name__ == "__main__":
|
|
76
76
|
return file.name
|
77
77
|
|
78
78
|
|
79
|
-
def generate_ddp_command(
|
79
|
+
def generate_ddp_command(trainer):
|
80
80
|
"""
|
81
81
|
Generate command for distributed training.
|
82
82
|
|
83
83
|
Args:
|
84
|
-
world_size (int): Number of processes to spawn for distributed training.
|
85
84
|
trainer (ultralytics.engine.trainer.BaseTrainer): The trainer containing configuration for distributed training.
|
86
85
|
|
87
86
|
Returns:
|
@@ -95,7 +94,16 @@ def generate_ddp_command(world_size: int, trainer):
|
|
95
94
|
file = generate_ddp_file(trainer)
|
96
95
|
dist_cmd = "torch.distributed.run" if TORCH_1_9 else "torch.distributed.launch"
|
97
96
|
port = find_free_network_port()
|
98
|
-
cmd = [
|
97
|
+
cmd = [
|
98
|
+
sys.executable,
|
99
|
+
"-m",
|
100
|
+
dist_cmd,
|
101
|
+
"--nproc_per_node",
|
102
|
+
f"{trainer.world_size}",
|
103
|
+
"--master_port",
|
104
|
+
f"{port}",
|
105
|
+
file,
|
106
|
+
]
|
99
107
|
return cmd, file
|
100
108
|
|
101
109
|
|
@@ -9,8 +9,10 @@ import torch
|
|
9
9
|
|
10
10
|
from ultralytics.utils import IS_JETSON, LOGGER
|
11
11
|
|
12
|
+
from .imx import torch2imx # noqa
|
12
13
|
|
13
|
-
|
14
|
+
|
15
|
+
def torch2onnx(
|
14
16
|
torch_model: torch.nn.Module,
|
15
17
|
im: torch.Tensor,
|
16
18
|
onnx_file: str,
|
@@ -47,7 +49,7 @@ def export_onnx(
|
|
47
49
|
)
|
48
50
|
|
49
51
|
|
50
|
-
def
|
52
|
+
def onnx2engine(
|
51
53
|
onnx_file: str,
|
52
54
|
engine_file: str | None = None,
|
53
55
|
workspace: int | None = None,
|
@@ -98,12 +100,12 @@ def export_engine(
|
|
98
100
|
# Engine builder
|
99
101
|
builder = trt.Builder(logger)
|
100
102
|
config = builder.create_builder_config()
|
101
|
-
|
103
|
+
workspace_bytes = int((workspace or 0) * (1 << 30))
|
102
104
|
is_trt10 = int(trt.__version__.split(".", 1)[0]) >= 10 # is TensorRT >= 10
|
103
|
-
if is_trt10 and
|
104
|
-
config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE,
|
105
|
-
elif
|
106
|
-
config.max_workspace_size =
|
105
|
+
if is_trt10 and workspace_bytes > 0:
|
106
|
+
config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace_bytes)
|
107
|
+
elif workspace_bytes > 0: # TensorRT versions 7, 8
|
108
|
+
config.max_workspace_size = workspace_bytes
|
107
109
|
flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
|
108
110
|
network = builder.create_network(flag)
|
109
111
|
half = builder.platform_has_fast_fp16 and half
|
@@ -0,0 +1,289 @@
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
import subprocess
|
6
|
+
import types
|
7
|
+
from pathlib import Path
|
8
|
+
|
9
|
+
import torch
|
10
|
+
|
11
|
+
from ultralytics.nn.modules import Detect, Pose
|
12
|
+
from ultralytics.utils import LOGGER
|
13
|
+
from ultralytics.utils.tal import make_anchors
|
14
|
+
from ultralytics.utils.torch_utils import copy_attr
|
15
|
+
|
16
|
+
|
17
|
+
class FXModel(torch.nn.Module):
|
18
|
+
"""
|
19
|
+
A custom model class for torch.fx compatibility.
|
20
|
+
|
21
|
+
This class extends `torch.nn.Module` and is designed to ensure compatibility with torch.fx for tracing and graph
|
22
|
+
manipulation. It copies attributes from an existing model and explicitly sets the model attribute to ensure proper
|
23
|
+
copying.
|
24
|
+
|
25
|
+
Attributes:
|
26
|
+
model (nn.Module): The original model's layers.
|
27
|
+
"""
|
28
|
+
|
29
|
+
def __init__(self, model, imgsz=(640, 640)):
|
30
|
+
"""
|
31
|
+
Initialize the FXModel.
|
32
|
+
|
33
|
+
Args:
|
34
|
+
model (nn.Module): The original model to wrap for torch.fx compatibility.
|
35
|
+
imgsz (tuple[int, int]): The input image size (height, width). Default is (640, 640).
|
36
|
+
"""
|
37
|
+
super().__init__()
|
38
|
+
copy_attr(self, model)
|
39
|
+
# Explicitly set `model` since `copy_attr` somehow does not copy it.
|
40
|
+
self.model = model.model
|
41
|
+
self.imgsz = imgsz
|
42
|
+
|
43
|
+
def forward(self, x):
|
44
|
+
"""
|
45
|
+
Forward pass through the model.
|
46
|
+
|
47
|
+
This method performs the forward pass through the model, handling the dependencies between layers and saving
|
48
|
+
intermediate outputs.
|
49
|
+
|
50
|
+
Args:
|
51
|
+
x (torch.Tensor): The input tensor to the model.
|
52
|
+
|
53
|
+
Returns:
|
54
|
+
(torch.Tensor): The output tensor from the model.
|
55
|
+
"""
|
56
|
+
y = [] # outputs
|
57
|
+
for m in self.model:
|
58
|
+
if m.f != -1: # if not from previous layer
|
59
|
+
# from earlier layers
|
60
|
+
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f]
|
61
|
+
if isinstance(m, Detect):
|
62
|
+
m._inference = types.MethodType(_inference, m) # bind method to Detect
|
63
|
+
m.anchors, m.strides = (
|
64
|
+
x.transpose(0, 1)
|
65
|
+
for x in make_anchors(
|
66
|
+
torch.cat([s / m.stride.unsqueeze(-1) for s in self.imgsz], dim=1), m.stride, 0.5
|
67
|
+
)
|
68
|
+
)
|
69
|
+
if type(m) is Pose:
|
70
|
+
m.forward = types.MethodType(pose_forward, m) # bind method to Detect
|
71
|
+
x = m(x) # run
|
72
|
+
y.append(x) # save output
|
73
|
+
return x
|
74
|
+
|
75
|
+
|
76
|
+
def _inference(self, x: list[torch.Tensor]) -> tuple[torch.Tensor]:
|
77
|
+
"""Decode boxes and cls scores for imx object detection."""
|
78
|
+
x_cat = torch.cat([xi.view(x[0].shape[0], self.no, -1) for xi in x], 2)
|
79
|
+
box, cls = x_cat.split((self.reg_max * 4, self.nc), 1)
|
80
|
+
dbox = self.decode_bboxes(self.dfl(box), self.anchors.unsqueeze(0)) * self.strides
|
81
|
+
return dbox.transpose(1, 2), cls.sigmoid().permute(0, 2, 1)
|
82
|
+
|
83
|
+
|
84
|
+
def pose_forward(self, x: list[torch.Tensor]) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
85
|
+
"""Forward pass for imx pose estimation, including keypoint decoding."""
|
86
|
+
bs = x[0].shape[0] # batch size
|
87
|
+
kpt = torch.cat([self.cv4[i](x[i]).view(bs, self.nk, -1) for i in range(self.nl)], -1) # (bs, 17*3, h*w)
|
88
|
+
x = Detect.forward(self, x)
|
89
|
+
pred_kpt = self.kpts_decode(bs, kpt)
|
90
|
+
return (*x, pred_kpt.permute(0, 2, 1))
|
91
|
+
|
92
|
+
|
93
|
+
class NMSWrapper(torch.nn.Module):
|
94
|
+
"""Wrap PyTorch Module with multiclass_nms layer from sony_custom_layers."""
|
95
|
+
|
96
|
+
def __init__(
|
97
|
+
self,
|
98
|
+
model: torch.nn.Module,
|
99
|
+
score_threshold: float = 0.001,
|
100
|
+
iou_threshold: float = 0.7,
|
101
|
+
max_detections: int = 300,
|
102
|
+
task: str = "detect",
|
103
|
+
):
|
104
|
+
"""
|
105
|
+
Initialize NMSWrapper with PyTorch Module and NMS parameters.
|
106
|
+
|
107
|
+
Args:
|
108
|
+
model (torch.nn.Module): Model instance.
|
109
|
+
score_threshold (float): Score threshold for non-maximum suppression.
|
110
|
+
iou_threshold (float): Intersection over union threshold for non-maximum suppression.
|
111
|
+
max_detections (int): The number of detections to return.
|
112
|
+
task (str): Task type, either 'detect' or 'pose'.
|
113
|
+
"""
|
114
|
+
super().__init__()
|
115
|
+
self.model = model
|
116
|
+
self.score_threshold = score_threshold
|
117
|
+
self.iou_threshold = iou_threshold
|
118
|
+
self.max_detections = max_detections
|
119
|
+
self.task = task
|
120
|
+
|
121
|
+
def forward(self, images):
|
122
|
+
"""Forward pass with model inference and NMS post-processing."""
|
123
|
+
from sony_custom_layers.pytorch import multiclass_nms_with_indices
|
124
|
+
|
125
|
+
# model inference
|
126
|
+
outputs = self.model(images)
|
127
|
+
boxes, scores = outputs[0], outputs[1]
|
128
|
+
nms_outputs = multiclass_nms_with_indices(
|
129
|
+
boxes=boxes,
|
130
|
+
scores=scores,
|
131
|
+
score_threshold=self.score_threshold,
|
132
|
+
iou_threshold=self.iou_threshold,
|
133
|
+
max_detections=self.max_detections,
|
134
|
+
)
|
135
|
+
if self.task == "pose":
|
136
|
+
kpts = outputs[2] # (bs, max_detections, kpts 17*3)
|
137
|
+
out_kpts = torch.gather(kpts, 1, nms_outputs.indices.unsqueeze(-1).expand(-1, -1, kpts.size(-1)))
|
138
|
+
return nms_outputs.boxes, nms_outputs.scores, nms_outputs.labels, out_kpts
|
139
|
+
return nms_outputs.boxes, nms_outputs.scores, nms_outputs.labels, nms_outputs.n_valid
|
140
|
+
|
141
|
+
|
142
|
+
def torch2imx(
|
143
|
+
model: torch.nn.Module,
|
144
|
+
file: Path | str,
|
145
|
+
conf: float,
|
146
|
+
iou: float,
|
147
|
+
max_det: int,
|
148
|
+
metadata: dict | None = None,
|
149
|
+
gptq: bool = False,
|
150
|
+
dataset=None,
|
151
|
+
prefix: str = "",
|
152
|
+
):
|
153
|
+
"""
|
154
|
+
Export YOLO model to IMX format for deployment on Sony IMX500 devices.
|
155
|
+
|
156
|
+
This function quantizes a YOLO model using Model Compression Toolkit (MCT) and exports it
|
157
|
+
to IMX format compatible with Sony IMX500 edge devices. It supports both YOLOv8n and YOLO11n
|
158
|
+
models for detection and pose estimation tasks.
|
159
|
+
|
160
|
+
Args:
|
161
|
+
model (torch.nn.Module): The YOLO model to export. Must be YOLOv8n or YOLO11n.
|
162
|
+
file (Path | str): Output file path for the exported model.
|
163
|
+
conf (float): Confidence threshold for NMS post-processing.
|
164
|
+
iou (float): IoU threshold for NMS post-processing.
|
165
|
+
max_det (int): Maximum number of detections to return.
|
166
|
+
metadata (dict | None, optional): Metadata to embed in the ONNX model. Defaults to None.
|
167
|
+
gptq (bool, optional): Whether to use Gradient-Based Post Training Quantization.
|
168
|
+
If False, uses standard Post Training Quantization. Defaults to False.
|
169
|
+
dataset (optional): Representative dataset for quantization calibration. Defaults to None.
|
170
|
+
prefix (str, optional): Logging prefix string. Defaults to "".
|
171
|
+
|
172
|
+
Returns:
|
173
|
+
f (Path): Path to the exported IMX model directory
|
174
|
+
|
175
|
+
Raises:
|
176
|
+
ValueError: If the model is not a supported YOLOv8n or YOLO11n variant.
|
177
|
+
|
178
|
+
Example:
|
179
|
+
>>> from ultralytics import YOLO
|
180
|
+
>>> model = YOLO("yolo11n.pt")
|
181
|
+
>>> path, _ = export_imx(model, "model.imx", conf=0.25, iou=0.45, max_det=300)
|
182
|
+
|
183
|
+
Note:
|
184
|
+
- Requires model_compression_toolkit, onnx, edgemdt_tpc, and sony_custom_layers packages
|
185
|
+
- Only supports YOLOv8n and YOLO11n models (detection and pose tasks)
|
186
|
+
- Output includes quantized ONNX model, IMX binary, and labels.txt file
|
187
|
+
"""
|
188
|
+
import model_compression_toolkit as mct
|
189
|
+
import onnx
|
190
|
+
from edgemdt_tpc import get_target_platform_capabilities
|
191
|
+
|
192
|
+
LOGGER.info(f"\n{prefix} starting export with model_compression_toolkit {mct.__version__}...")
|
193
|
+
|
194
|
+
def representative_dataset_gen(dataloader=dataset):
|
195
|
+
for batch in dataloader:
|
196
|
+
img = batch["img"]
|
197
|
+
img = img / 255.0
|
198
|
+
yield [img]
|
199
|
+
|
200
|
+
tpc = get_target_platform_capabilities(tpc_version="4.0", device_type="imx500")
|
201
|
+
|
202
|
+
bit_cfg = mct.core.BitWidthConfig()
|
203
|
+
if "C2PSA" in model.__str__(): # YOLO11
|
204
|
+
if model.task == "detect":
|
205
|
+
layer_names = ["sub", "mul_2", "add_14", "cat_21"]
|
206
|
+
weights_memory = 2585350.2439
|
207
|
+
n_layers = 238 # 238 layers for fused YOLO11n
|
208
|
+
elif model.task == "pose":
|
209
|
+
layer_names = ["sub", "mul_2", "add_14", "cat_22", "cat_23", "mul_4", "add_15"]
|
210
|
+
weights_memory = 2437771.67
|
211
|
+
n_layers = 257 # 257 layers for fused YOLO11n-pose
|
212
|
+
else: # YOLOv8
|
213
|
+
if model.task == "detect":
|
214
|
+
layer_names = ["sub", "mul", "add_6", "cat_17"]
|
215
|
+
weights_memory = 2550540.8
|
216
|
+
n_layers = 168 # 168 layers for fused YOLOv8n
|
217
|
+
elif model.task == "pose":
|
218
|
+
layer_names = ["add_7", "mul_2", "cat_19", "mul", "sub", "add_6", "cat_18"]
|
219
|
+
weights_memory = 2482451.85
|
220
|
+
n_layers = 187 # 187 layers for fused YOLO11n-pose
|
221
|
+
|
222
|
+
# Check if the model has the expected number of layers
|
223
|
+
if len(list(model.modules())) != n_layers:
|
224
|
+
raise ValueError("IMX export only supported for YOLOv8n and YOLO11n models.")
|
225
|
+
|
226
|
+
for layer_name in layer_names:
|
227
|
+
bit_cfg.set_manual_activation_bit_width([mct.core.common.network_editors.NodeNameFilter(layer_name)], 16)
|
228
|
+
|
229
|
+
config = mct.core.CoreConfig(
|
230
|
+
mixed_precision_config=mct.core.MixedPrecisionQuantizationConfig(num_of_images=10),
|
231
|
+
quantization_config=mct.core.QuantizationConfig(concat_threshold_update=True),
|
232
|
+
bit_width_config=bit_cfg,
|
233
|
+
)
|
234
|
+
|
235
|
+
resource_utilization = mct.core.ResourceUtilization(weights_memory=weights_memory)
|
236
|
+
|
237
|
+
quant_model = (
|
238
|
+
mct.gptq.pytorch_gradient_post_training_quantization( # Perform Gradient-Based Post Training Quantization
|
239
|
+
model=model,
|
240
|
+
representative_data_gen=representative_dataset_gen,
|
241
|
+
target_resource_utilization=resource_utilization,
|
242
|
+
gptq_config=mct.gptq.get_pytorch_gptq_config(
|
243
|
+
n_epochs=1000, use_hessian_based_weights=False, use_hessian_sample_attention=False
|
244
|
+
),
|
245
|
+
core_config=config,
|
246
|
+
target_platform_capabilities=tpc,
|
247
|
+
)[0]
|
248
|
+
if gptq
|
249
|
+
else mct.ptq.pytorch_post_training_quantization( # Perform post training quantization
|
250
|
+
in_module=model,
|
251
|
+
representative_data_gen=representative_dataset_gen,
|
252
|
+
target_resource_utilization=resource_utilization,
|
253
|
+
core_config=config,
|
254
|
+
target_platform_capabilities=tpc,
|
255
|
+
)[0]
|
256
|
+
)
|
257
|
+
|
258
|
+
quant_model = NMSWrapper(
|
259
|
+
model=quant_model,
|
260
|
+
score_threshold=conf or 0.001,
|
261
|
+
iou_threshold=iou,
|
262
|
+
max_detections=max_det,
|
263
|
+
task=model.task,
|
264
|
+
)
|
265
|
+
|
266
|
+
f = Path(str(file).replace(file.suffix, "_imx_model"))
|
267
|
+
f.mkdir(exist_ok=True)
|
268
|
+
onnx_model = f / Path(str(file.name).replace(file.suffix, "_imx.onnx")) # js dir
|
269
|
+
mct.exporter.pytorch_export_model(
|
270
|
+
model=quant_model, save_model_path=onnx_model, repr_dataset=representative_dataset_gen
|
271
|
+
)
|
272
|
+
|
273
|
+
model_onnx = onnx.load(onnx_model) # load onnx model
|
274
|
+
for k, v in metadata.items():
|
275
|
+
meta = model_onnx.metadata_props.add()
|
276
|
+
meta.key, meta.value = k, str(v)
|
277
|
+
|
278
|
+
onnx.save(model_onnx, onnx_model)
|
279
|
+
|
280
|
+
subprocess.run(
|
281
|
+
["imxconv-pt", "-i", str(onnx_model), "-o", str(f), "--no-input-persistency", "--overwrite-output"],
|
282
|
+
check=True,
|
283
|
+
)
|
284
|
+
|
285
|
+
# Needed for imx models.
|
286
|
+
with open(f / "labels.txt", "w", encoding="utf-8") as file:
|
287
|
+
file.writelines([f"{name}\n" for _, name in model.names.items()])
|
288
|
+
|
289
|
+
return f
|
ultralytics/utils/plotting.py
CHANGED
@@ -934,13 +934,14 @@ def plt_color_scatter(v, f, bins: int = 20, cmap: str = "viridis", alpha: float
|
|
934
934
|
|
935
935
|
|
936
936
|
@plt_settings()
|
937
|
-
def plot_tune_results(csv_file: str = "tune_results.csv"):
|
937
|
+
def plot_tune_results(csv_file: str = "tune_results.csv", exclude_zero_fitness_points: bool = True):
|
938
938
|
"""
|
939
939
|
Plot the evolution results stored in a 'tune_results.csv' file. The function generates a scatter plot for each key
|
940
940
|
in the CSV, color-coded based on fitness scores. The best-performing configurations are highlighted on the plots.
|
941
941
|
|
942
942
|
Args:
|
943
943
|
csv_file (str, optional): Path to the CSV file containing the tuning results.
|
944
|
+
exclude_zero_fitness_points (bool, optional): Don't include points with zero fitness in tuning plots.
|
944
945
|
|
945
946
|
Examples:
|
946
947
|
>>> plot_tune_results("path/to/tune_results.csv")
|
@@ -962,6 +963,9 @@ def plot_tune_results(csv_file: str = "tune_results.csv"):
|
|
962
963
|
keys = [x.strip() for x in data.columns][num_metrics_columns:]
|
963
964
|
x = data.to_numpy()
|
964
965
|
fitness = x[:, 0] # fitness
|
966
|
+
if exclude_zero_fitness_points:
|
967
|
+
mask = fitness > 0 # exclude zero-fitness points
|
968
|
+
x, fitness = x[mask], fitness[mask]
|
965
969
|
j = np.argmax(fitness) # max fitness index
|
966
970
|
n = math.ceil(len(keys) ** 0.5) # columns and rows in plot
|
967
971
|
plt.figure(figsize=(10, 10), tight_layout=True)
|
ultralytics/utils/torch_utils.py
CHANGED
@@ -222,7 +222,7 @@ def select_device(device="", batch=0, newline=False, verbose=True):
|
|
222
222
|
f"'batch={batch}' must be a multiple of GPU count {n}. Try 'batch={batch // n * n}' or "
|
223
223
|
f"'batch={batch // n * n + n}', the nearest batch sizes evenly divisible by {n}."
|
224
224
|
)
|
225
|
-
space = " " *
|
225
|
+
space = " " * len(s)
|
226
226
|
for i, d in enumerate(devices):
|
227
227
|
s += f"{'' if i == 0 else space}CUDA:{d} ({get_gpu_info(i)})\n" # bytes to MB
|
228
228
|
arg = "cuda:0"
|
@@ -959,53 +959,6 @@ class EarlyStopping:
|
|
959
959
|
return stop
|
960
960
|
|
961
961
|
|
962
|
-
class FXModel(nn.Module):
|
963
|
-
"""
|
964
|
-
A custom model class for torch.fx compatibility.
|
965
|
-
|
966
|
-
This class extends `torch.nn.Module` and is designed to ensure compatibility with torch.fx for tracing and graph
|
967
|
-
manipulation. It copies attributes from an existing model and explicitly sets the model attribute to ensure proper
|
968
|
-
copying.
|
969
|
-
|
970
|
-
Attributes:
|
971
|
-
model (nn.Module): The original model's layers.
|
972
|
-
"""
|
973
|
-
|
974
|
-
def __init__(self, model):
|
975
|
-
"""
|
976
|
-
Initialize the FXModel.
|
977
|
-
|
978
|
-
Args:
|
979
|
-
model (nn.Module): The original model to wrap for torch.fx compatibility.
|
980
|
-
"""
|
981
|
-
super().__init__()
|
982
|
-
copy_attr(self, model)
|
983
|
-
# Explicitly set `model` since `copy_attr` somehow does not copy it.
|
984
|
-
self.model = model.model
|
985
|
-
|
986
|
-
def forward(self, x):
|
987
|
-
"""
|
988
|
-
Forward pass through the model.
|
989
|
-
|
990
|
-
This method performs the forward pass through the model, handling the dependencies between layers and saving
|
991
|
-
intermediate outputs.
|
992
|
-
|
993
|
-
Args:
|
994
|
-
x (torch.Tensor): The input tensor to the model.
|
995
|
-
|
996
|
-
Returns:
|
997
|
-
(torch.Tensor): The output tensor from the model.
|
998
|
-
"""
|
999
|
-
y = [] # outputs
|
1000
|
-
for m in self.model:
|
1001
|
-
if m.f != -1: # if not from previous layer
|
1002
|
-
# from earlier layers
|
1003
|
-
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f]
|
1004
|
-
x = m(x) # run
|
1005
|
-
y.append(x) # save output
|
1006
|
-
return x
|
1007
|
-
|
1008
|
-
|
1009
962
|
def attempt_compile(
|
1010
963
|
model: torch.nn.Module,
|
1011
964
|
device: torch.device,
|
@@ -1028,7 +981,7 @@ def attempt_compile(
|
|
1028
981
|
use_autocast (bool, optional): Whether to run warmup under autocast on CUDA or MPS devices.
|
1029
982
|
warmup (bool, optional): Whether to execute a single dummy forward pass to warm up the compiled model.
|
1030
983
|
mode (bool | str, optional): torch.compile mode. True → "default", False → no compile, or a string like
|
1031
|
-
"default", "reduce-overhead", "max-autotune".
|
984
|
+
"default", "reduce-overhead", "max-autotune-no-cudagraphs".
|
1032
985
|
|
1033
986
|
Returns:
|
1034
987
|
model (torch.nn.Module): Compiled model if compilation succeeds, otherwise the original unmodified model.
|
@@ -1050,6 +1003,9 @@ def attempt_compile(
|
|
1050
1003
|
mode = "default"
|
1051
1004
|
prefix = colorstr("compile:")
|
1052
1005
|
LOGGER.info(f"{prefix} starting torch.compile with '{mode}' mode...")
|
1006
|
+
if mode == "max-autotune":
|
1007
|
+
LOGGER.warning(f"{prefix} mode='{mode}' not recommended, using mode='max-autotune-no-cudagraphs' instead")
|
1008
|
+
mode = "max-autotune-no-cudagraphs"
|
1053
1009
|
t0 = time.perf_counter()
|
1054
1010
|
try:
|
1055
1011
|
model = torch.compile(model, mode=mode, backend="inductor")
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: ultralytics
|
3
|
-
Version: 8.3.
|
3
|
+
Version: 8.3.200
|
4
4
|
Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
@@ -1,18 +1,18 @@
|
|
1
1
|
tests/__init__.py,sha256=b4KP5_q-2IO8Br8YHOSLYnn7IwZS81l_vfEF2YPa2lM,894
|
2
2
|
tests/conftest.py,sha256=LXtQJcFNWPGuzauTGkiXgsvVC3llJKfg22WcmhRzuQc,2593
|
3
3
|
tests/test_cli.py,sha256=EMf5gTAopOnIz8VvzaM-Qb044o7D0flnUHYQ-2ffOM4,5670
|
4
|
-
tests/test_cuda.py,sha256=
|
4
|
+
tests/test_cuda.py,sha256=3eiigQIWEkqLsIznlqAMrAi3Dhd_N54Ojtm5LCQELyo,8022
|
5
5
|
tests/test_engine.py,sha256=8W4_D48ZBUp-DsUlRYxHTXzougycY8yggvpbVwQDLPg,5025
|
6
6
|
tests/test_exports.py,sha256=dWuroSyqXnrc0lE-RNTf7pZoXXXEkOs31u7nhOiEHS0,10994
|
7
7
|
tests/test_integrations.py,sha256=kl_AKmE_Qs1GB0_91iVwbzNxofm_hFTt0zzU6JF-pg4,6323
|
8
8
|
tests/test_python.py,sha256=2V23f2-JQsO-K4p1kj0IkCRxHykGwgd0edKJzRsBgdI,27911
|
9
9
|
tests/test_solutions.py,sha256=6wJ9-lhyWSAm7zaR4D9L_DrUA3iJU1NgqmbQO6PIuvo,13211
|
10
|
-
ultralytics/__init__.py,sha256=
|
10
|
+
ultralytics/__init__.py,sha256=HbegtDUzV_IT73kI3R9-_rqLLLNIXg_AKtl6bVTxJaQ,1120
|
11
11
|
ultralytics/py.typed,sha256=la67KBlbjXN-_-DfGNcdOcjYumVpKG_Tkw-8n5dnGB4,8
|
12
12
|
ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
|
13
13
|
ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
|
14
14
|
ultralytics/cfg/__init__.py,sha256=xX7qUxdcDgcjCKoQFEVQgzrwZodeKTF88CTKZe05d0Y,39955
|
15
|
-
ultralytics/cfg/default.yaml,sha256=
|
15
|
+
ultralytics/cfg/default.yaml,sha256=lfiQ1PVxNhOzEiaRxThPedmMAhShdR4Ti8uYktJn5CI,8901
|
16
16
|
ultralytics/cfg/datasets/Argoverse.yaml,sha256=J4ItoUlE_EiYTmp1DFKYHfbqHkj8j4wUtRJQhaMIlBM,3275
|
17
17
|
ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=VZ_KKFX0H2YvlFVJ8JHcLWYBZ2xiQ6Z-ROSTiKWpS7c,1211
|
18
18
|
ultralytics/cfg/datasets/DOTAv1.yaml,sha256=JrDuYcQ0JU9lJlCA-dCkMNko_jaj6MAVGHjsfjeZ_u0,1181
|
@@ -121,12 +121,12 @@ ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J
|
|
121
121
|
ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
|
122
122
|
ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
|
123
123
|
ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
|
124
|
-
ultralytics/engine/exporter.py,sha256=
|
124
|
+
ultralytics/engine/exporter.py,sha256=Cq0vr_KNTOipe8MrapSrujobs0hDqG16dvvRqVxh6M0,68149
|
125
125
|
ultralytics/engine/model.py,sha256=iwwaL2NR5NSwQ7R3juHzS3ds9W-CfhC_CjUcwMvcgsk,53426
|
126
126
|
ultralytics/engine/predictor.py,sha256=4lfw2RbBDE7939011FcSCuznscrcnMuabZtc8GXaKO4,22735
|
127
127
|
ultralytics/engine/results.py,sha256=uQ_tgvdxKAg28pRgb5WCHiqx9Ktu7wYiVbwZy_IJ5bo,71499
|
128
|
-
ultralytics/engine/trainer.py,sha256=
|
129
|
-
ultralytics/engine/tuner.py,sha256=
|
128
|
+
ultralytics/engine/trainer.py,sha256=25SIKM5Wi1XbpNz4SckmsfzbF60V-T4wKKa29FhXX1U,41035
|
129
|
+
ultralytics/engine/tuner.py,sha256=aUfZ6ogaER57XhN4yjs0eksYwMe7jRAj_PmuZ4pEIrI,21447
|
130
130
|
ultralytics/engine/validator.py,sha256=7tADPOXRZz0Yi7F-Z5SxcUnwytaa2MfbtuSdO8pp_l4,16966
|
131
131
|
ultralytics/hub/__init__.py,sha256=xCF02lzlPKbdmGfO3NxLuXl5Kb0MaBZp_-fAWDHZ8zw,6698
|
132
132
|
ultralytics/hub/auth.py,sha256=RIwZDWfW6vS2yGpZKR0xVl0-38itJYEFtmqY_M70bl8,6304
|
@@ -197,13 +197,13 @@ ultralytics/models/yolo/yoloe/train_seg.py,sha256=aCV7M8oQOvODFnU4piZdJh3tIrBJYA
|
|
197
197
|
ultralytics/models/yolo/yoloe/val.py,sha256=5Gd9EoFH0FmKKvWXBl4J7gBe9DVxIczN-s3ceHwdUDo,9458
|
198
198
|
ultralytics/nn/__init__.py,sha256=PJgOn2phQTTBR2P3s_JWvGeGXQpvw1znsumKow4tCuE,545
|
199
199
|
ultralytics/nn/autobackend.py,sha256=WWHIFvCI47Wpe3NCDkoUg3esjOTJ0XGEzG3luA_uG-8,41063
|
200
|
-
ultralytics/nn/tasks.py,sha256=
|
200
|
+
ultralytics/nn/tasks.py,sha256=WfZLAypHpNo0S99FSpQDHWXBe64nMxYktuTuHCidT-Q,70412
|
201
201
|
ultralytics/nn/text_model.py,sha256=pHqnKe8UueR1MuwJcIE_IvrnYIlt68QL796xjcRJs2A,15275
|
202
202
|
ultralytics/nn/modules/__init__.py,sha256=BPMbEm1daI7Tuds3zph2_afAX7Gq1uAqK8BfiCfKTZs,3198
|
203
203
|
ultralytics/nn/modules/activation.py,sha256=75JcIMH2Cu9GTC2Uf55r_5YLpxcrXQDaVoeGQ0hlUAU,2233
|
204
204
|
ultralytics/nn/modules/block.py,sha256=-5RfsA_ljekL8_bQPGupSn9dVcZ8V_lVsOGlhzIW1kg,70622
|
205
205
|
ultralytics/nn/modules/conv.py,sha256=U6P1ZuzQmIf09noKwp7syuWn-M98Tly2wMWOsDT3kOI,21457
|
206
|
-
ultralytics/nn/modules/head.py,sha256=
|
206
|
+
ultralytics/nn/modules/head.py,sha256=RpeAR7U8S5sqegmOk76Ch2a_jH4lnsHTZWft3CHbICA,53308
|
207
207
|
ultralytics/nn/modules/transformer.py,sha256=l6NuuFF7j_bogcNULHBBdj5l6sf7MwiVEGz8XcRyTUM,31366
|
208
208
|
ultralytics/nn/modules/utils.py,sha256=rn8yTObZGkQoqVzjbZWLaHiytppG4ffjMME4Lw60glM,6092
|
209
209
|
ultralytics/solutions/__init__.py,sha256=ZoeAQavTLp8aClnhZ9tbl6lxy86GxofyGvZWTx2aWkI,1209
|
@@ -239,14 +239,13 @@ ultralytics/trackers/utils/matching.py,sha256=I8SX0sBaBgr4GBJ9uDGOy5LnotgNZHpB2p
|
|
239
239
|
ultralytics/utils/__init__.py,sha256=whSIuj-0lV0SAp4YjOeBJZ2emP1Qa8pqLnrhRiwl2Qs,53503
|
240
240
|
ultralytics/utils/autobatch.py,sha256=i6KYLLSItKP1Q2IUlTPHrZhjcxl7UOjs0Seb8bF8pvM,5124
|
241
241
|
ultralytics/utils/autodevice.py,sha256=d9yq6eEn05fdfzfpxeSECd0YEO61er5f7T-0kjLdofg,8843
|
242
|
-
ultralytics/utils/benchmarks.py,sha256=
|
242
|
+
ultralytics/utils/benchmarks.py,sha256=wBsDrwtc6NRM9rIDmqeGQ_9yxOTetnchXXHwZSUhp18,31444
|
243
243
|
ultralytics/utils/checks.py,sha256=Uigc10tev2z9pLjjdYwCYkQ4BrjKmurOX2nYd6liqvU,34510
|
244
244
|
ultralytics/utils/cpu.py,sha256=OPlVxROWhQp-kEa9EkeNRKRQ-jz0KwySu5a-h91JZjk,3634
|
245
|
-
ultralytics/utils/dist.py,sha256=
|
245
|
+
ultralytics/utils/dist.py,sha256=5xQhWK0OLORvseAL08UmG1LYdkiDVLquxmaGSnqiSqo,4151
|
246
246
|
ultralytics/utils/downloads.py,sha256=JIlHfUg-qna5aOHRJupH7d5zob2qGZtRrs86Cp3zOJs,23029
|
247
247
|
ultralytics/utils/errors.py,sha256=XT9Ru7ivoBgofK6PlnyigGoa7Fmf5nEhyHtnD-8TRXI,1584
|
248
248
|
ultralytics/utils/events.py,sha256=v2RmLlx78_K6xQfOAuUTJMOexAgNdiuiOvvnsH65oDA,4679
|
249
|
-
ultralytics/utils/export.py,sha256=lDeEKzDecJ_F3X_AHOPIRdmDDqymRAFT0-K2hNRTWw4,9838
|
250
249
|
ultralytics/utils/files.py,sha256=kxE2rkBuZL288nSN7jxLljmDnBgc16rekEXeRjhbUoo,8213
|
251
250
|
ultralytics/utils/git.py,sha256=DcaxKNQfCiG3cxdzuw7M6l_VXgaSVqkERQt_vl8UyXM,5512
|
252
251
|
ultralytics/utils/instance.py,sha256=_b_jMTECWJGzncCiTg7FtTDSSeXGnbiAhaJhIsqbn9k,19043
|
@@ -256,9 +255,9 @@ ultralytics/utils/metrics.py,sha256=42zu-qeSvtL4JtvFDQy-7_5OJLwU4M8b5V8uRHBPFUQ,
|
|
256
255
|
ultralytics/utils/nms.py,sha256=AVOmPuUTEJqmq2J6rvjq-nHNxYIyabgzHdc41siyA0w,14161
|
257
256
|
ultralytics/utils/ops.py,sha256=PW3fgw1d18CA2ZNQZVJqUy054cJ_9tIcxd1XnA0FPgU,26905
|
258
257
|
ultralytics/utils/patches.py,sha256=0-2G4jXCIPnMonlft-cPcjfFcOXQS6ODwUDNUwanfg4,6541
|
259
|
-
ultralytics/utils/plotting.py,sha256=
|
258
|
+
ultralytics/utils/plotting.py,sha256=XWXZi02smBeFji3BSkMZNNNssXzO-dIxFaD15_N1f-4,47221
|
260
259
|
ultralytics/utils/tal.py,sha256=LrziY_ZHz4wln3oOnqAzgyPaXKoup17Sa103BpuaQFU,20935
|
261
|
-
ultralytics/utils/torch_utils.py,sha256=
|
260
|
+
ultralytics/utils/torch_utils.py,sha256=n-CMgLfQsg-SNF281nNHJm_kBdxPIrVr7xrI6gneL20,41771
|
262
261
|
ultralytics/utils/tqdm.py,sha256=ny5RIg2OTkWQ7gdaXfYaoIgR0Xn2_hNGB6tUpO2Unns,16137
|
263
262
|
ultralytics/utils/triton.py,sha256=fbMfTAUyoGiyslWtySzLZw53XmZJa7rF31CYFot0Wjs,5422
|
264
263
|
ultralytics/utils/tuner.py,sha256=9D4dSIvwwxcNSJcH2QJ92qiIVi9zu-1L7_PBZ8okDyE,6816
|
@@ -274,9 +273,11 @@ ultralytics/utils/callbacks/platform.py,sha256=a7T_8htoBB0uX1WIc392UJnhDjxkRyQMv
|
|
274
273
|
ultralytics/utils/callbacks/raytune.py,sha256=S6Bq16oQDQ8BQgnZzA0zJHGN_BBr8iAM_WtGoLiEcwg,1283
|
275
274
|
ultralytics/utils/callbacks/tensorboard.py,sha256=_4nfGK1dDLn6ijpvphBDhc-AS8qhS3jjY2CAWB7SNF0,5283
|
276
275
|
ultralytics/utils/callbacks/wb.py,sha256=ngQO8EJ1kxJDF1YajScVtzBbm26jGuejA0uWeOyvf5A,7685
|
277
|
-
ultralytics
|
278
|
-
ultralytics
|
279
|
-
ultralytics-8.3.
|
280
|
-
ultralytics-8.3.
|
281
|
-
ultralytics-8.3.
|
282
|
-
ultralytics-8.3.
|
276
|
+
ultralytics/utils/export/__init__.py,sha256=jQtf716PP0jt7bMoY9FkqmjG26KbvDzuR84jGhaBi2U,9901
|
277
|
+
ultralytics/utils/export/imx.py,sha256=Jl5nuNxqaP_bY5yrV2NypmoJSrexHE71TxR72SDdjcg,11394
|
278
|
+
ultralytics-8.3.200.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
|
279
|
+
ultralytics-8.3.200.dist-info/METADATA,sha256=2DrAMb7U96KD04C281jW4QGGqXPnrMCT91SDcbHCWGI,37667
|
280
|
+
ultralytics-8.3.200.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
281
|
+
ultralytics-8.3.200.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
|
282
|
+
ultralytics-8.3.200.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
|
283
|
+
ultralytics-8.3.200.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|