ultralytics 8.3.199__py3-none-any.whl → 8.3.201__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.199"
3
+ __version__ = "8.3.201"
4
4
 
5
5
  import importlib
6
6
  import os
@@ -856,7 +856,6 @@ def entrypoint(debug: str = "") -> None:
856
856
  return
857
857
 
858
858
  special = {
859
- "help": lambda: LOGGER.info(CLI_HELP_MSG),
860
859
  "checks": checks.collect_system_info,
861
860
  "version": lambda: LOGGER.info(__version__),
862
861
  "settings": lambda: handle_yolo_settings(args[1:]),
@@ -866,6 +865,7 @@ def entrypoint(debug: str = "") -> None:
866
865
  "logout": lambda: handle_yolo_hub(args),
867
866
  "copy-cfg": copy_default_cfg,
868
867
  "solutions": lambda: handle_yolo_solutions(args[1:]),
868
+ "help": lambda: LOGGER.info(CLI_HELP_MSG), # help below hub for -h flag precedence
869
869
  }
870
870
  full_args_dict = {**DEFAULT_CFG_DICT, **{k: None for k in TASKS}, **{k: None for k in MODES}, **special}
871
871
 
@@ -45,7 +45,7 @@ download: |
45
45
  # Convert labels
46
46
  names = "image", "x1", "y1", "x2", "y2", "class", "image_width", "image_height" # column names
47
47
  for d in "annotations_train.csv", "annotations_val.csv", "annotations_test.csv":
48
- x = pl.read_csv(dir / "annotations" / d, names=names, infer_schema_length=None).to_numpy() # annotations
48
+ x = pl.read_csv(dir / "annotations" / d, has_header=False, new_columns=names, infer_schema_length=None).to_numpy() # annotations
49
49
  images, unique_images = x[:, 0], np.unique(x[:, 0])
50
50
  with open((dir / d).with_suffix(".txt").__str__().replace("annotations_", ""), "w", encoding="utf-8") as f:
51
51
  f.writelines(f"./images/{s}\n" for s in unique_images)
@@ -106,7 +106,7 @@ from ultralytics.utils.checks import (
106
106
  is_sudo_available,
107
107
  )
108
108
  from ultralytics.utils.downloads import attempt_download_asset, get_github_assets, safe_download
109
- from ultralytics.utils.export import export_engine, export_onnx
109
+ from ultralytics.utils.export import onnx2engine, torch2imx, torch2onnx
110
110
  from ultralytics.utils.files import file_size, spaces_in_path
111
111
  from ultralytics.utils.metrics import batch_probiou
112
112
  from ultralytics.utils.nms import TorchNMS
@@ -194,8 +194,11 @@ def try_export(inner_func):
194
194
  dt = 0.0
195
195
  try:
196
196
  with Profile() as dt:
197
- f = inner_func(*args, **kwargs)
198
- LOGGER.info(f"{prefix} export success {dt.t:.1f}s, saved as '{f}' ({file_size(f):.1f} MB)")
197
+ f = inner_func(*args, **kwargs) # exported file/dir or tuple of (file/dir, *)
198
+ path = f if isinstance(f, (str, Path)) else f[0]
199
+ mb = file_size(path)
200
+ assert mb > 0.0, "0.0 MB output model size"
201
+ LOGGER.info(f"{prefix} export success ✅ {dt.t:.1f}s, saved as '{path}' ({mb:.1f} MB)")
199
202
  return f
200
203
  except Exception as e:
201
204
  LOGGER.error(f"{prefix} export failure {dt.t:.1f}s: {e}")
@@ -284,7 +287,8 @@ class Exporter:
284
287
  # Get the closest match if format is invalid
285
288
  matches = difflib.get_close_matches(fmt, fmts, n=1, cutoff=0.6) # 60% similarity required to match
286
289
  if not matches:
287
- raise ValueError(f"Invalid export format='{fmt}'. Valid formats are {fmts}")
290
+ msg = "Model is already in PyTorch format." if fmt == "pt" else f"Invalid export format='{fmt}'."
291
+ raise ValueError(f"{msg} Valid formats are {fmts}")
288
292
  LOGGER.warning(f"Invalid export format='{fmt}', updating to format='{matches[0]}'")
289
293
  fmt = matches[0]
290
294
  flags = [x == fmt for x in fmts]
@@ -408,9 +412,9 @@ class Exporter:
408
412
  model = model.fuse()
409
413
 
410
414
  if imx:
411
- from ultralytics.utils.torch_utils import FXModel
415
+ from ultralytics.utils.export.imx import FXModel
412
416
 
413
- model = FXModel(model)
417
+ model = FXModel(model, self.imgsz)
414
418
  for m in model.modules():
415
419
  if isinstance(m, Classify):
416
420
  m.export = True
@@ -425,15 +429,6 @@ class Exporter:
425
429
  elif isinstance(m, C2f) and not is_tf_format:
426
430
  # EdgeTPU does not support FlexSplitV while split provides cleaner ONNX graph
427
431
  m.forward = m.forward_split
428
- if isinstance(m, Detect) and imx:
429
- from ultralytics.utils.tal import make_anchors
430
-
431
- m.anchors, m.strides = (
432
- x.transpose(0, 1)
433
- for x in make_anchors(
434
- torch.cat([s / m.stride.unsqueeze(-1) for s in self.imgsz], dim=1), m.stride, 0.5
435
- )
436
- )
437
432
 
438
433
  y = None
439
434
  for _ in range(2): # dry runs
@@ -609,7 +604,7 @@ class Exporter:
609
604
  self.args.opset = opset_version # for NMSModel
610
605
 
611
606
  with arange_patch(self.args):
612
- export_onnx(
607
+ torch2onnx(
613
608
  NMSModel(self.model, self.args) if self.args.nms else self.model,
614
609
  self.im,
615
610
  f,
@@ -932,7 +927,7 @@ class Exporter:
932
927
  LOGGER.info(f"\n{prefix} starting export with TensorRT {trt.__version__}...")
933
928
  assert Path(f_onnx).exists(), f"failed to export ONNX file: {f_onnx}"
934
929
  f = self.file.with_suffix(".engine") # TensorRT engine file
935
- export_engine(
930
+ onnx2engine(
936
931
  f_onnx,
937
932
  f,
938
933
  self.args.workspace,
@@ -963,7 +958,7 @@ class Exporter:
963
958
  "tf_keras<=2.19.0", # required by 'onnx2tf' package
964
959
  "sng4onnx>=1.0.1", # required by 'onnx2tf' package
965
960
  "onnx_graphsurgeon>=0.3.26", # required by 'onnx2tf' package
966
- "ai-edge-litert>=1.2.0,<1.4.0", # required by 'onnx2tf' package
961
+ "ai-edge-litert>=1.2.0" + (",<1.4.0" if MACOS else ""), # required by 'onnx2tf' package
967
962
  "onnx>=1.12.0",
968
963
  "onnx2tf>=1.26.3",
969
964
  "onnxslim>=0.1.67",
@@ -1168,7 +1163,6 @@ class Exporter:
1168
1163
  @try_export
1169
1164
  def export_imx(self, prefix=colorstr("IMX:")):
1170
1165
  """Export YOLO model to IMX format."""
1171
- gptq = False
1172
1166
  assert LINUX, (
1173
1167
  "export only supported on Linux. "
1174
1168
  "See https://developer.aitrios.sony-semicon.com/en/raspberrypi-ai-camera/documentation/imx500-converter"
@@ -1181,13 +1175,6 @@ class Exporter:
1181
1175
  check_requirements("imx500-converter[pt]>=3.16.1") # Separate requirements for imx500-converter
1182
1176
  check_requirements("mct-quantizers>=1.6.0") # Separate for compatibility with model-compression-toolkit
1183
1177
 
1184
- import model_compression_toolkit as mct
1185
- import onnx
1186
- from edgemdt_tpc import get_target_platform_capabilities
1187
- from sony_custom_layers.pytorch import multiclass_nms_with_indices
1188
-
1189
- LOGGER.info(f"\n{prefix} starting export with model_compression_toolkit {mct.__version__}...")
1190
-
1191
1178
  # Install Java>=17
1192
1179
  try:
1193
1180
  java_output = subprocess.run(["java", "--version"], check=True, capture_output=True).stdout.decode()
@@ -1198,150 +1185,17 @@ class Exporter:
1198
1185
  cmd = (["sudo"] if is_sudo_available() else []) + ["apt", "install", "-y", "openjdk-21-jre"]
1199
1186
  subprocess.run(cmd, check=True)
1200
1187
 
1201
- def representative_dataset_gen(dataloader=self.get_int8_calibration_dataloader(prefix)):
1202
- for batch in dataloader:
1203
- img = batch["img"]
1204
- img = img / 255.0
1205
- yield [img]
1206
-
1207
- tpc = get_target_platform_capabilities(tpc_version="4.0", device_type="imx500")
1208
-
1209
- bit_cfg = mct.core.BitWidthConfig()
1210
- if "C2PSA" in self.model.__str__(): # YOLO11
1211
- if self.model.task == "detect":
1212
- layer_names = ["sub", "mul_2", "add_14", "cat_21"]
1213
- weights_memory = 2585350.2439
1214
- n_layers = 238 # 238 layers for fused YOLO11n
1215
- elif self.model.task == "pose":
1216
- layer_names = ["sub", "mul_2", "add_14", "cat_22", "cat_23", "mul_4", "add_15"]
1217
- weights_memory = 2437771.67
1218
- n_layers = 257 # 257 layers for fused YOLO11n-pose
1219
- else: # YOLOv8
1220
- if self.model.task == "detect":
1221
- layer_names = ["sub", "mul", "add_6", "cat_17"]
1222
- weights_memory = 2550540.8
1223
- n_layers = 168 # 168 layers for fused YOLOv8n
1224
- elif self.model.task == "pose":
1225
- layer_names = ["add_7", "mul_2", "cat_19", "mul", "sub", "add_6", "cat_18"]
1226
- weights_memory = 2482451.85
1227
- n_layers = 187 # 187 layers for fused YOLO11n-pose
1228
-
1229
- # Check if the model has the expected number of layers
1230
- if len(list(self.model.modules())) != n_layers:
1231
- raise ValueError("IMX export only supported for YOLOv8n and YOLO11n models.")
1232
-
1233
- for layer_name in layer_names:
1234
- bit_cfg.set_manual_activation_bit_width([mct.core.common.network_editors.NodeNameFilter(layer_name)], 16)
1235
-
1236
- config = mct.core.CoreConfig(
1237
- mixed_precision_config=mct.core.MixedPrecisionQuantizationConfig(num_of_images=10),
1238
- quantization_config=mct.core.QuantizationConfig(concat_threshold_update=True),
1239
- bit_width_config=bit_cfg,
1240
- )
1241
-
1242
- resource_utilization = mct.core.ResourceUtilization(weights_memory=weights_memory)
1243
-
1244
- quant_model = (
1245
- mct.gptq.pytorch_gradient_post_training_quantization( # Perform Gradient-Based Post Training Quantization
1246
- model=self.model,
1247
- representative_data_gen=representative_dataset_gen,
1248
- target_resource_utilization=resource_utilization,
1249
- gptq_config=mct.gptq.get_pytorch_gptq_config(
1250
- n_epochs=1000, use_hessian_based_weights=False, use_hessian_sample_attention=False
1251
- ),
1252
- core_config=config,
1253
- target_platform_capabilities=tpc,
1254
- )[0]
1255
- if gptq
1256
- else mct.ptq.pytorch_post_training_quantization( # Perform post training quantization
1257
- in_module=self.model,
1258
- representative_data_gen=representative_dataset_gen,
1259
- target_resource_utilization=resource_utilization,
1260
- core_config=config,
1261
- target_platform_capabilities=tpc,
1262
- )[0]
1263
- )
1264
-
1265
- class NMSWrapper(torch.nn.Module):
1266
- """Wrap PyTorch Module with multiclass_nms layer from sony_custom_layers."""
1267
-
1268
- def __init__(
1269
- self,
1270
- model: torch.nn.Module,
1271
- score_threshold: float = 0.001,
1272
- iou_threshold: float = 0.7,
1273
- max_detections: int = 300,
1274
- task: str = "detect",
1275
- ):
1276
- """
1277
- Initialize NMSWrapper with PyTorch Module and NMS parameters.
1278
-
1279
- Args:
1280
- model (torch.nn.Module): Model instance.
1281
- score_threshold (float): Score threshold for non-maximum suppression.
1282
- iou_threshold (float): Intersection over union threshold for non-maximum suppression.
1283
- max_detections (int): The number of detections to return.
1284
- task (str): Task type, either 'detect' or 'pose'.
1285
- """
1286
- super().__init__()
1287
- self.model = model
1288
- self.score_threshold = score_threshold
1289
- self.iou_threshold = iou_threshold
1290
- self.max_detections = max_detections
1291
- self.task = task
1292
-
1293
- def forward(self, images):
1294
- """Forward pass with model inference and NMS post-processing."""
1295
- # model inference
1296
- outputs = self.model(images)
1297
-
1298
- boxes, scores = outputs[0], outputs[1]
1299
- nms_outputs = multiclass_nms_with_indices(
1300
- boxes=boxes,
1301
- scores=scores,
1302
- score_threshold=self.score_threshold,
1303
- iou_threshold=self.iou_threshold,
1304
- max_detections=self.max_detections,
1305
- )
1306
- if self.task == "pose":
1307
- kpts = outputs[2] # (bs, max_detections, kpts 17*3)
1308
- out_kpts = torch.gather(kpts, 1, nms_outputs.indices.unsqueeze(-1).expand(-1, -1, kpts.size(-1)))
1309
- return nms_outputs.boxes, nms_outputs.scores, nms_outputs.labels, out_kpts
1310
- return nms_outputs.boxes, nms_outputs.scores, nms_outputs.labels, nms_outputs.n_valid
1311
-
1312
- quant_model = NMSWrapper(
1313
- model=quant_model,
1314
- score_threshold=self.args.conf or 0.001,
1315
- iou_threshold=self.args.iou,
1316
- max_detections=self.args.max_det,
1317
- task=self.model.task,
1318
- ).to(self.device)
1319
-
1320
- f = Path(str(self.file).replace(self.file.suffix, "_imx_model"))
1321
- f.mkdir(exist_ok=True)
1322
- onnx_model = f / Path(str(self.file.name).replace(self.file.suffix, "_imx.onnx")) # js dir
1323
- mct.exporter.pytorch_export_model(
1324
- model=quant_model, save_model_path=onnx_model, repr_dataset=representative_dataset_gen
1325
- )
1326
-
1327
- model_onnx = onnx.load(onnx_model) # load onnx model
1328
- for k, v in self.metadata.items():
1329
- meta = model_onnx.metadata_props.add()
1330
- meta.key, meta.value = k, str(v)
1331
-
1332
- onnx.save(model_onnx, onnx_model)
1333
-
1334
- subprocess.run(
1335
- ["imxconv-pt", "-i", str(onnx_model), "-o", str(f), "--no-input-persistency", "--overwrite-output"],
1336
- check=True,
1188
+ return torch2imx(
1189
+ self.model,
1190
+ self.file,
1191
+ self.args.conf,
1192
+ self.args.iou,
1193
+ self.args.max_det,
1194
+ metadata=self.metadata,
1195
+ dataset=self.get_int8_calibration_dataloader(prefix),
1196
+ prefix=prefix,
1337
1197
  )
1338
1198
 
1339
- # Needed for imx models.
1340
- with open(f / "labels.txt", "w", encoding="utf-8") as file:
1341
- file.writelines([f"{name}\n" for _, name in self.model.names.items()])
1342
-
1343
- return f
1344
-
1345
1199
  def _add_tflite_metadata(self, file):
1346
1200
  """Add metadata to *.tflite models per https://ai.google.dev/edge/litert/models/metadata."""
1347
1201
  import zipfile
@@ -1531,7 +1385,7 @@ class NMSModel(torch.nn.Module):
1531
1385
  for i in range(bs):
1532
1386
  box, cls, score, extra = boxes[i], classes[i], scores[i], extras[i]
1533
1387
  mask = score > self.args.conf
1534
- if self.is_tf:
1388
+ if self.is_tf or (self.args.format == "onnx" and self.obb):
1535
1389
  # TFLite GatherND error if mask is empty
1536
1390
  score *= mask
1537
1391
  # Explicit length otherwise reshape error, hardcoded to `self.args.max_det * 5`
@@ -174,7 +174,22 @@ class BaseTrainer:
174
174
 
175
175
  # Callbacks
176
176
  self.callbacks = _callbacks or callbacks.get_default_callbacks()
177
- if RANK in {-1, 0}:
177
+
178
+ if isinstance(self.args.device, str) and len(self.args.device): # i.e. device='0' or device='0,1,2,3'
179
+ world_size = len(self.args.device.split(","))
180
+ elif isinstance(self.args.device, (tuple, list)): # i.e. device=[0, 1, 2, 3] (multi-GPU from CLI is list)
181
+ world_size = len(self.args.device)
182
+ elif self.args.device in {"cpu", "mps"}: # i.e. device='cpu' or 'mps'
183
+ world_size = 0
184
+ elif torch.cuda.is_available(): # i.e. device=None or device='' or device=number
185
+ world_size = 1 # default to device 0
186
+ else: # i.e. device=None or device=''
187
+ world_size = 0
188
+
189
+ self.ddp = world_size > 1 and "LOCAL_RANK" not in os.environ
190
+ self.world_size = world_size
191
+ # Run subprocess if DDP training, else train normally
192
+ if RANK in {-1, 0} and not self.ddp:
178
193
  callbacks.add_integration_callbacks(self)
179
194
  # Start console logging immediately at trainer initialization
180
195
  self.run_callbacks("on_pretrain_routine_start")
@@ -194,19 +209,8 @@ class BaseTrainer:
194
209
 
195
210
  def train(self):
196
211
  """Allow device='', device=None on Multi-GPU systems to default to device=0."""
197
- if isinstance(self.args.device, str) and len(self.args.device): # i.e. device='0' or device='0,1,2,3'
198
- world_size = len(self.args.device.split(","))
199
- elif isinstance(self.args.device, (tuple, list)): # i.e. device=[0, 1, 2, 3] (multi-GPU from CLI is list)
200
- world_size = len(self.args.device)
201
- elif self.args.device in {"cpu", "mps"}: # i.e. device='cpu' or 'mps'
202
- world_size = 0
203
- elif torch.cuda.is_available(): # i.e. device=None or device='' or device=number
204
- world_size = 1 # default to device 0
205
- else: # i.e. device=None or device=''
206
- world_size = 0
207
-
208
212
  # Run subprocess if DDP training, else train normally
209
- if world_size > 1 and "LOCAL_RANK" not in os.environ:
213
+ if self.ddp:
210
214
  # Argument checks
211
215
  if self.args.rect:
212
216
  LOGGER.warning("'rect=True' is incompatible with Multi-GPU training, setting 'rect=False'")
@@ -218,7 +222,7 @@ class BaseTrainer:
218
222
  self.args.batch = 16
219
223
 
220
224
  # Command
221
- cmd, file = generate_ddp_command(world_size, self)
225
+ cmd, file = generate_ddp_command(self)
222
226
  try:
223
227
  LOGGER.info(f"{colorstr('DDP:')} debug command {' '.join(cmd)}")
224
228
  subprocess.run(cmd, check=True)
@@ -228,7 +232,7 @@ class BaseTrainer:
228
232
  ddp_cleanup(self, str(file))
229
233
 
230
234
  else:
231
- self._do_train(world_size)
235
+ self._do_train()
232
236
 
233
237
  def _setup_scheduler(self):
234
238
  """Initialize training learning rate scheduler."""
@@ -238,20 +242,19 @@ class BaseTrainer:
238
242
  self.lf = lambda x: max(1 - x / self.epochs, 0) * (1.0 - self.args.lrf) + self.args.lrf # linear
239
243
  self.scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=self.lf)
240
244
 
241
- def _setup_ddp(self, world_size):
245
+ def _setup_ddp(self):
242
246
  """Initialize and set the DistributedDataParallel parameters for training."""
243
247
  torch.cuda.set_device(RANK)
244
248
  self.device = torch.device("cuda", RANK)
245
- # LOGGER.info(f'DDP info: RANK {RANK}, WORLD_SIZE {world_size}, DEVICE {self.device}')
246
249
  os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" # set to enforce timeout
247
250
  dist.init_process_group(
248
251
  backend="nccl" if dist.is_nccl_available() else "gloo",
249
252
  timeout=timedelta(seconds=10800), # 3 hours
250
253
  rank=RANK,
251
- world_size=world_size,
254
+ world_size=self.world_size,
252
255
  )
253
256
 
254
- def _setup_train(self, world_size):
257
+ def _setup_train(self):
255
258
  """Build dataloaders and optimizer on correct rank process."""
256
259
  ckpt = self.setup_model()
257
260
  self.model = self.model.to(self.device)
@@ -293,13 +296,13 @@ class BaseTrainer:
293
296
  callbacks_backup = callbacks.default_callbacks.copy() # backup callbacks as check_amp() resets them
294
297
  self.amp = torch.tensor(check_amp(self.model), device=self.device)
295
298
  callbacks.default_callbacks = callbacks_backup # restore callbacks
296
- if RANK > -1 and world_size > 1: # DDP
299
+ if RANK > -1 and self.world_size > 1: # DDP
297
300
  dist.broadcast(self.amp.int(), src=0) # broadcast from rank 0 to all other ranks; gloo errors with boolean
298
301
  self.amp = bool(self.amp) # as boolean
299
302
  self.scaler = (
300
303
  torch.amp.GradScaler("cuda", enabled=self.amp) if TORCH_2_4 else torch.cuda.amp.GradScaler(enabled=self.amp)
301
304
  )
302
- if world_size > 1:
305
+ if self.world_size > 1:
303
306
  self.model = nn.parallel.DistributedDataParallel(self.model, device_ids=[RANK], find_unused_parameters=True)
304
307
 
305
308
  # Check imgsz
@@ -312,7 +315,7 @@ class BaseTrainer:
312
315
  self.args.batch = self.batch_size = self.auto_batch()
313
316
 
314
317
  # Dataloaders
315
- batch_size = self.batch_size // max(world_size, 1)
318
+ batch_size = self.batch_size // max(self.world_size, 1)
316
319
  self.train_loader = self.get_dataloader(
317
320
  self.data["train"], batch_size=batch_size, rank=LOCAL_RANK, mode="train"
318
321
  )
@@ -350,11 +353,11 @@ class BaseTrainer:
350
353
  self.scheduler.last_epoch = self.start_epoch - 1 # do not move
351
354
  self.run_callbacks("on_pretrain_routine_end")
352
355
 
353
- def _do_train(self, world_size=1):
356
+ def _do_train(self):
354
357
  """Train the model with the specified world size."""
355
- if world_size > 1:
356
- self._setup_ddp(world_size)
357
- self._setup_train(world_size)
358
+ if self.world_size > 1:
359
+ self._setup_ddp()
360
+ self._setup_train()
358
361
 
359
362
  nb = len(self.train_loader) # number of batches
360
363
  nw = max(round(self.args.warmup_epochs * nb), 100) if self.args.warmup_epochs > 0 else -1 # warmup iterations
@@ -365,7 +368,7 @@ class BaseTrainer:
365
368
  self.run_callbacks("on_train_start")
366
369
  LOGGER.info(
367
370
  f"Image sizes {self.args.imgsz} train, {self.args.imgsz} val\n"
368
- f"Using {self.train_loader.num_workers * (world_size or 1)} dataloader workers\n"
371
+ f"Using {self.train_loader.num_workers * (self.world_size or 1)} dataloader workers\n"
369
372
  f"Logging results to {colorstr('bold', self.save_dir)}\n"
370
373
  f"Starting training for " + (f"{self.args.time} hours..." if self.args.time else f"{self.epochs} epochs...")
371
374
  )
@@ -417,7 +420,7 @@ class BaseTrainer:
417
420
  loss, self.loss_items = unwrap_model(self.model).loss(batch, preds)
418
421
  self.loss = loss.sum()
419
422
  if RANK != -1:
420
- self.loss *= world_size
423
+ self.loss *= self.world_size
421
424
  self.tloss = (
422
425
  (self.tloss * i + self.loss_items) / (i + 1) if self.tloss is not None else self.loss_items
423
426
  )
@@ -341,7 +341,8 @@ class Tuner:
341
341
  hyp[k] = round(min(max(hyp[k], bounds[0]), bounds[1]), 5)
342
342
 
343
343
  # Update types
344
- hyp["close_mosaic"] = int(round(hyp["close_mosaic"]))
344
+ if "close_mosaic" in hyp:
345
+ hyp["close_mosaic"] = int(round(hyp["close_mosaic"]))
345
346
 
346
347
  return hyp
347
348
 
@@ -162,7 +162,7 @@ class Detect(nn.Module):
162
162
  # Inference path
163
163
  shape = x[0].shape # BCHW
164
164
  x_cat = torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2)
165
- if self.format != "imx" and (self.dynamic or self.shape != shape):
165
+ if self.dynamic or self.shape != shape:
166
166
  self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5))
167
167
  self.shape = shape
168
168
 
@@ -182,8 +182,6 @@ class Detect(nn.Module):
182
182
  dbox = self.decode_bboxes(self.dfl(box) * norm, self.anchors.unsqueeze(0) * norm[:, :2])
183
183
  else:
184
184
  dbox = self.decode_bboxes(self.dfl(box), self.anchors.unsqueeze(0)) * self.strides
185
- if self.export and self.format == "imx":
186
- return dbox.transpose(1, 2), cls.sigmoid().permute(0, 2, 1)
187
185
  return torch.cat((dbox, cls.sigmoid()), 1)
188
186
 
189
187
  def bias_init(self):
@@ -387,8 +385,6 @@ class Pose(Detect):
387
385
  if self.training:
388
386
  return x, kpt
389
387
  pred_kpt = self.kpts_decode(bs, kpt)
390
- if self.export and self.format == "imx":
391
- return (*x, pred_kpt.permute(0, 2, 1))
392
388
  return torch.cat([x, pred_kpt], 1) if self.export else (torch.cat([x[0], pred_kpt], 1), (x[1], kpt))
393
389
 
394
390
  def kpts_decode(self, bs: int, kpts: torch.Tensor) -> torch.Tensor:
ultralytics/nn/tasks.py CHANGED
@@ -1037,7 +1037,6 @@ class YOLOEModel(DetectionModel):
1037
1037
  if without_reprta:
1038
1038
  return txt_feats
1039
1039
 
1040
- assert not self.training
1041
1040
  head = self.model[-1]
1042
1041
  assert isinstance(head, YOLOEDetect)
1043
1042
  return head.get_tpe(txt_feats) # run auxiliary text head
@@ -145,7 +145,7 @@ def benchmark(
145
145
  assert not is_end2end
146
146
  assert not isinstance(model, YOLOWorld), "YOLOWorldv2 IMX exports not supported"
147
147
  assert model.task == "detect", "IMX only supported for detection task"
148
- assert "C2f" in model.__str__(), "IMX only supported for YOLOv8" # TODO: enable for YOLO11
148
+ assert "C2f" in model.__str__(), "IMX only supported for YOLOv8n and YOLO11n"
149
149
  if format == "rknn":
150
150
  assert not isinstance(model, YOLOWorld), "YOLOWorldv2 RKNN exports not supported yet"
151
151
  assert not is_end2end, "End-to-end models not supported by RKNN yet"
@@ -363,8 +363,8 @@ def check_requirements(requirements=ROOT.parent / "requirements.txt", exclude=()
363
363
  Check if installed dependencies meet Ultralytics YOLO models requirements and attempt to auto-update if needed.
364
364
 
365
365
  Args:
366
- requirements (Path | str | list[str]): Path to a requirements.txt file, a single package requirement as a
367
- string, or a list of package requirements as strings.
366
+ requirements (Path | str | list[str] | tuple[str]): Path to a requirements.txt file, a single package
367
+ requirement as a string, or a list of package requirements as strings.
368
368
  exclude (tuple): Tuple of package names to exclude from checking.
369
369
  install (bool): If True, attempt to auto-update packages that don't meet requirements.
370
370
  cmds (str): Additional commands to pass to the pip install command when auto-updating.
ultralytics/utils/dist.py CHANGED
@@ -76,12 +76,11 @@ if __name__ == "__main__":
76
76
  return file.name
77
77
 
78
78
 
79
- def generate_ddp_command(world_size: int, trainer):
79
+ def generate_ddp_command(trainer):
80
80
  """
81
81
  Generate command for distributed training.
82
82
 
83
83
  Args:
84
- world_size (int): Number of processes to spawn for distributed training.
85
84
  trainer (ultralytics.engine.trainer.BaseTrainer): The trainer containing configuration for distributed training.
86
85
 
87
86
  Returns:
@@ -95,7 +94,16 @@ def generate_ddp_command(world_size: int, trainer):
95
94
  file = generate_ddp_file(trainer)
96
95
  dist_cmd = "torch.distributed.run" if TORCH_1_9 else "torch.distributed.launch"
97
96
  port = find_free_network_port()
98
- cmd = [sys.executable, "-m", dist_cmd, "--nproc_per_node", f"{world_size}", "--master_port", f"{port}", file]
97
+ cmd = [
98
+ sys.executable,
99
+ "-m",
100
+ dist_cmd,
101
+ "--nproc_per_node",
102
+ f"{trainer.world_size}",
103
+ "--master_port",
104
+ f"{port}",
105
+ file,
106
+ ]
99
107
  return cmd, file
100
108
 
101
109
 
@@ -9,8 +9,10 @@ import torch
9
9
 
10
10
  from ultralytics.utils import IS_JETSON, LOGGER
11
11
 
12
+ from .imx import torch2imx # noqa
12
13
 
13
- def export_onnx(
14
+
15
+ def torch2onnx(
14
16
  torch_model: torch.nn.Module,
15
17
  im: torch.Tensor,
16
18
  onnx_file: str,
@@ -47,7 +49,7 @@ def export_onnx(
47
49
  )
48
50
 
49
51
 
50
- def export_engine(
52
+ def onnx2engine(
51
53
  onnx_file: str,
52
54
  engine_file: str | None = None,
53
55
  workspace: int | None = None,
@@ -98,12 +100,12 @@ def export_engine(
98
100
  # Engine builder
99
101
  builder = trt.Builder(logger)
100
102
  config = builder.create_builder_config()
101
- workspace = int((workspace or 0) * (1 << 30))
103
+ workspace_bytes = int((workspace or 0) * (1 << 30))
102
104
  is_trt10 = int(trt.__version__.split(".", 1)[0]) >= 10 # is TensorRT >= 10
103
- if is_trt10 and workspace > 0:
104
- config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace)
105
- elif workspace > 0: # TensorRT versions 7, 8
106
- config.max_workspace_size = workspace
105
+ if is_trt10 and workspace_bytes > 0:
106
+ config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace_bytes)
107
+ elif workspace_bytes > 0: # TensorRT versions 7, 8
108
+ config.max_workspace_size = workspace_bytes
107
109
  flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
108
110
  network = builder.create_network(flag)
109
111
  half = builder.platform_has_fast_fp16 and half
@@ -0,0 +1,289 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ from __future__ import annotations
4
+
5
+ import subprocess
6
+ import types
7
+ from pathlib import Path
8
+
9
+ import torch
10
+
11
+ from ultralytics.nn.modules import Detect, Pose
12
+ from ultralytics.utils import LOGGER
13
+ from ultralytics.utils.tal import make_anchors
14
+ from ultralytics.utils.torch_utils import copy_attr
15
+
16
+
17
+ class FXModel(torch.nn.Module):
18
+ """
19
+ A custom model class for torch.fx compatibility.
20
+
21
+ This class extends `torch.nn.Module` and is designed to ensure compatibility with torch.fx for tracing and graph
22
+ manipulation. It copies attributes from an existing model and explicitly sets the model attribute to ensure proper
23
+ copying.
24
+
25
+ Attributes:
26
+ model (nn.Module): The original model's layers.
27
+ """
28
+
29
+ def __init__(self, model, imgsz=(640, 640)):
30
+ """
31
+ Initialize the FXModel.
32
+
33
+ Args:
34
+ model (nn.Module): The original model to wrap for torch.fx compatibility.
35
+ imgsz (tuple[int, int]): The input image size (height, width). Default is (640, 640).
36
+ """
37
+ super().__init__()
38
+ copy_attr(self, model)
39
+ # Explicitly set `model` since `copy_attr` somehow does not copy it.
40
+ self.model = model.model
41
+ self.imgsz = imgsz
42
+
43
+ def forward(self, x):
44
+ """
45
+ Forward pass through the model.
46
+
47
+ This method performs the forward pass through the model, handling the dependencies between layers and saving
48
+ intermediate outputs.
49
+
50
+ Args:
51
+ x (torch.Tensor): The input tensor to the model.
52
+
53
+ Returns:
54
+ (torch.Tensor): The output tensor from the model.
55
+ """
56
+ y = [] # outputs
57
+ for m in self.model:
58
+ if m.f != -1: # if not from previous layer
59
+ # from earlier layers
60
+ x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f]
61
+ if isinstance(m, Detect):
62
+ m._inference = types.MethodType(_inference, m) # bind method to Detect
63
+ m.anchors, m.strides = (
64
+ x.transpose(0, 1)
65
+ for x in make_anchors(
66
+ torch.cat([s / m.stride.unsqueeze(-1) for s in self.imgsz], dim=1), m.stride, 0.5
67
+ )
68
+ )
69
+ if type(m) is Pose:
70
+ m.forward = types.MethodType(pose_forward, m) # bind method to Detect
71
+ x = m(x) # run
72
+ y.append(x) # save output
73
+ return x
74
+
75
+
76
+ def _inference(self, x: list[torch.Tensor]) -> tuple[torch.Tensor]:
77
+ """Decode boxes and cls scores for imx object detection."""
78
+ x_cat = torch.cat([xi.view(x[0].shape[0], self.no, -1) for xi in x], 2)
79
+ box, cls = x_cat.split((self.reg_max * 4, self.nc), 1)
80
+ dbox = self.decode_bboxes(self.dfl(box), self.anchors.unsqueeze(0)) * self.strides
81
+ return dbox.transpose(1, 2), cls.sigmoid().permute(0, 2, 1)
82
+
83
+
84
+ def pose_forward(self, x: list[torch.Tensor]) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
85
+ """Forward pass for imx pose estimation, including keypoint decoding."""
86
+ bs = x[0].shape[0] # batch size
87
+ kpt = torch.cat([self.cv4[i](x[i]).view(bs, self.nk, -1) for i in range(self.nl)], -1) # (bs, 17*3, h*w)
88
+ x = Detect.forward(self, x)
89
+ pred_kpt = self.kpts_decode(bs, kpt)
90
+ return (*x, pred_kpt.permute(0, 2, 1))
91
+
92
+
93
+ class NMSWrapper(torch.nn.Module):
94
+ """Wrap PyTorch Module with multiclass_nms layer from sony_custom_layers."""
95
+
96
+ def __init__(
97
+ self,
98
+ model: torch.nn.Module,
99
+ score_threshold: float = 0.001,
100
+ iou_threshold: float = 0.7,
101
+ max_detections: int = 300,
102
+ task: str = "detect",
103
+ ):
104
+ """
105
+ Initialize NMSWrapper with PyTorch Module and NMS parameters.
106
+
107
+ Args:
108
+ model (torch.nn.Module): Model instance.
109
+ score_threshold (float): Score threshold for non-maximum suppression.
110
+ iou_threshold (float): Intersection over union threshold for non-maximum suppression.
111
+ max_detections (int): The number of detections to return.
112
+ task (str): Task type, either 'detect' or 'pose'.
113
+ """
114
+ super().__init__()
115
+ self.model = model
116
+ self.score_threshold = score_threshold
117
+ self.iou_threshold = iou_threshold
118
+ self.max_detections = max_detections
119
+ self.task = task
120
+
121
+ def forward(self, images):
122
+ """Forward pass with model inference and NMS post-processing."""
123
+ from sony_custom_layers.pytorch import multiclass_nms_with_indices
124
+
125
+ # model inference
126
+ outputs = self.model(images)
127
+ boxes, scores = outputs[0], outputs[1]
128
+ nms_outputs = multiclass_nms_with_indices(
129
+ boxes=boxes,
130
+ scores=scores,
131
+ score_threshold=self.score_threshold,
132
+ iou_threshold=self.iou_threshold,
133
+ max_detections=self.max_detections,
134
+ )
135
+ if self.task == "pose":
136
+ kpts = outputs[2] # (bs, max_detections, kpts 17*3)
137
+ out_kpts = torch.gather(kpts, 1, nms_outputs.indices.unsqueeze(-1).expand(-1, -1, kpts.size(-1)))
138
+ return nms_outputs.boxes, nms_outputs.scores, nms_outputs.labels, out_kpts
139
+ return nms_outputs.boxes, nms_outputs.scores, nms_outputs.labels, nms_outputs.n_valid
140
+
141
+
142
+ def torch2imx(
143
+ model: torch.nn.Module,
144
+ file: Path | str,
145
+ conf: float,
146
+ iou: float,
147
+ max_det: int,
148
+ metadata: dict | None = None,
149
+ gptq: bool = False,
150
+ dataset=None,
151
+ prefix: str = "",
152
+ ):
153
+ """
154
+ Export YOLO model to IMX format for deployment on Sony IMX500 devices.
155
+
156
+ This function quantizes a YOLO model using Model Compression Toolkit (MCT) and exports it
157
+ to IMX format compatible with Sony IMX500 edge devices. It supports both YOLOv8n and YOLO11n
158
+ models for detection and pose estimation tasks.
159
+
160
+ Args:
161
+ model (torch.nn.Module): The YOLO model to export. Must be YOLOv8n or YOLO11n.
162
+ file (Path | str): Output file path for the exported model.
163
+ conf (float): Confidence threshold for NMS post-processing.
164
+ iou (float): IoU threshold for NMS post-processing.
165
+ max_det (int): Maximum number of detections to return.
166
+ metadata (dict | None, optional): Metadata to embed in the ONNX model. Defaults to None.
167
+ gptq (bool, optional): Whether to use Gradient-Based Post Training Quantization.
168
+ If False, uses standard Post Training Quantization. Defaults to False.
169
+ dataset (optional): Representative dataset for quantization calibration. Defaults to None.
170
+ prefix (str, optional): Logging prefix string. Defaults to "".
171
+
172
+ Returns:
173
+ f (Path): Path to the exported IMX model directory
174
+
175
+ Raises:
176
+ ValueError: If the model is not a supported YOLOv8n or YOLO11n variant.
177
+
178
+ Example:
179
+ >>> from ultralytics import YOLO
180
+ >>> model = YOLO("yolo11n.pt")
181
+ >>> path, _ = export_imx(model, "model.imx", conf=0.25, iou=0.45, max_det=300)
182
+
183
+ Note:
184
+ - Requires model_compression_toolkit, onnx, edgemdt_tpc, and sony_custom_layers packages
185
+ - Only supports YOLOv8n and YOLO11n models (detection and pose tasks)
186
+ - Output includes quantized ONNX model, IMX binary, and labels.txt file
187
+ """
188
+ import model_compression_toolkit as mct
189
+ import onnx
190
+ from edgemdt_tpc import get_target_platform_capabilities
191
+
192
+ LOGGER.info(f"\n{prefix} starting export with model_compression_toolkit {mct.__version__}...")
193
+
194
+ def representative_dataset_gen(dataloader=dataset):
195
+ for batch in dataloader:
196
+ img = batch["img"]
197
+ img = img / 255.0
198
+ yield [img]
199
+
200
+ tpc = get_target_platform_capabilities(tpc_version="4.0", device_type="imx500")
201
+
202
+ bit_cfg = mct.core.BitWidthConfig()
203
+ if "C2PSA" in model.__str__(): # YOLO11
204
+ if model.task == "detect":
205
+ layer_names = ["sub", "mul_2", "add_14", "cat_21"]
206
+ weights_memory = 2585350.2439
207
+ n_layers = 238 # 238 layers for fused YOLO11n
208
+ elif model.task == "pose":
209
+ layer_names = ["sub", "mul_2", "add_14", "cat_22", "cat_23", "mul_4", "add_15"]
210
+ weights_memory = 2437771.67
211
+ n_layers = 257 # 257 layers for fused YOLO11n-pose
212
+ else: # YOLOv8
213
+ if model.task == "detect":
214
+ layer_names = ["sub", "mul", "add_6", "cat_17"]
215
+ weights_memory = 2550540.8
216
+ n_layers = 168 # 168 layers for fused YOLOv8n
217
+ elif model.task == "pose":
218
+ layer_names = ["add_7", "mul_2", "cat_19", "mul", "sub", "add_6", "cat_18"]
219
+ weights_memory = 2482451.85
220
+ n_layers = 187 # 187 layers for fused YOLO11n-pose
221
+
222
+ # Check if the model has the expected number of layers
223
+ if len(list(model.modules())) != n_layers:
224
+ raise ValueError("IMX export only supported for YOLOv8n and YOLO11n models.")
225
+
226
+ for layer_name in layer_names:
227
+ bit_cfg.set_manual_activation_bit_width([mct.core.common.network_editors.NodeNameFilter(layer_name)], 16)
228
+
229
+ config = mct.core.CoreConfig(
230
+ mixed_precision_config=mct.core.MixedPrecisionQuantizationConfig(num_of_images=10),
231
+ quantization_config=mct.core.QuantizationConfig(concat_threshold_update=True),
232
+ bit_width_config=bit_cfg,
233
+ )
234
+
235
+ resource_utilization = mct.core.ResourceUtilization(weights_memory=weights_memory)
236
+
237
+ quant_model = (
238
+ mct.gptq.pytorch_gradient_post_training_quantization( # Perform Gradient-Based Post Training Quantization
239
+ model=model,
240
+ representative_data_gen=representative_dataset_gen,
241
+ target_resource_utilization=resource_utilization,
242
+ gptq_config=mct.gptq.get_pytorch_gptq_config(
243
+ n_epochs=1000, use_hessian_based_weights=False, use_hessian_sample_attention=False
244
+ ),
245
+ core_config=config,
246
+ target_platform_capabilities=tpc,
247
+ )[0]
248
+ if gptq
249
+ else mct.ptq.pytorch_post_training_quantization( # Perform post training quantization
250
+ in_module=model,
251
+ representative_data_gen=representative_dataset_gen,
252
+ target_resource_utilization=resource_utilization,
253
+ core_config=config,
254
+ target_platform_capabilities=tpc,
255
+ )[0]
256
+ )
257
+
258
+ quant_model = NMSWrapper(
259
+ model=quant_model,
260
+ score_threshold=conf or 0.001,
261
+ iou_threshold=iou,
262
+ max_detections=max_det,
263
+ task=model.task,
264
+ )
265
+
266
+ f = Path(str(file).replace(file.suffix, "_imx_model"))
267
+ f.mkdir(exist_ok=True)
268
+ onnx_model = f / Path(str(file.name).replace(file.suffix, "_imx.onnx")) # js dir
269
+ mct.exporter.pytorch_export_model(
270
+ model=quant_model, save_model_path=onnx_model, repr_dataset=representative_dataset_gen
271
+ )
272
+
273
+ model_onnx = onnx.load(onnx_model) # load onnx model
274
+ for k, v in metadata.items():
275
+ meta = model_onnx.metadata_props.add()
276
+ meta.key, meta.value = k, str(v)
277
+
278
+ onnx.save(model_onnx, onnx_model)
279
+
280
+ subprocess.run(
281
+ ["imxconv-pt", "-i", str(onnx_model), "-o", str(f), "--no-input-persistency", "--overwrite-output"],
282
+ check=True,
283
+ )
284
+
285
+ # Needed for imx models.
286
+ with open(f / "labels.txt", "w", encoding="utf-8") as file:
287
+ file.writelines([f"{name}\n" for _, name in model.names.items()])
288
+
289
+ return f
@@ -222,7 +222,7 @@ def select_device(device="", batch=0, newline=False, verbose=True):
222
222
  f"'batch={batch}' must be a multiple of GPU count {n}. Try 'batch={batch // n * n}' or "
223
223
  f"'batch={batch // n * n + n}', the nearest batch sizes evenly divisible by {n}."
224
224
  )
225
- space = " " * (len(s) + 1)
225
+ space = " " * len(s)
226
226
  for i, d in enumerate(devices):
227
227
  s += f"{'' if i == 0 else space}CUDA:{d} ({get_gpu_info(i)})\n" # bytes to MB
228
228
  arg = "cuda:0"
@@ -959,53 +959,6 @@ class EarlyStopping:
959
959
  return stop
960
960
 
961
961
 
962
- class FXModel(nn.Module):
963
- """
964
- A custom model class for torch.fx compatibility.
965
-
966
- This class extends `torch.nn.Module` and is designed to ensure compatibility with torch.fx for tracing and graph
967
- manipulation. It copies attributes from an existing model and explicitly sets the model attribute to ensure proper
968
- copying.
969
-
970
- Attributes:
971
- model (nn.Module): The original model's layers.
972
- """
973
-
974
- def __init__(self, model):
975
- """
976
- Initialize the FXModel.
977
-
978
- Args:
979
- model (nn.Module): The original model to wrap for torch.fx compatibility.
980
- """
981
- super().__init__()
982
- copy_attr(self, model)
983
- # Explicitly set `model` since `copy_attr` somehow does not copy it.
984
- self.model = model.model
985
-
986
- def forward(self, x):
987
- """
988
- Forward pass through the model.
989
-
990
- This method performs the forward pass through the model, handling the dependencies between layers and saving
991
- intermediate outputs.
992
-
993
- Args:
994
- x (torch.Tensor): The input tensor to the model.
995
-
996
- Returns:
997
- (torch.Tensor): The output tensor from the model.
998
- """
999
- y = [] # outputs
1000
- for m in self.model:
1001
- if m.f != -1: # if not from previous layer
1002
- # from earlier layers
1003
- x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f]
1004
- x = m(x) # run
1005
- y.append(x) # save output
1006
- return x
1007
-
1008
-
1009
962
  def attempt_compile(
1010
963
  model: torch.nn.Module,
1011
964
  device: torch.device,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics
3
- Version: 8.3.199
3
+ Version: 8.3.201
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -7,11 +7,11 @@ tests/test_exports.py,sha256=dWuroSyqXnrc0lE-RNTf7pZoXXXEkOs31u7nhOiEHS0,10994
7
7
  tests/test_integrations.py,sha256=kl_AKmE_Qs1GB0_91iVwbzNxofm_hFTt0zzU6JF-pg4,6323
8
8
  tests/test_python.py,sha256=2V23f2-JQsO-K4p1kj0IkCRxHykGwgd0edKJzRsBgdI,27911
9
9
  tests/test_solutions.py,sha256=6wJ9-lhyWSAm7zaR4D9L_DrUA3iJU1NgqmbQO6PIuvo,13211
10
- ultralytics/__init__.py,sha256=k_NhCYzBQ8jBVBIKdHD5Xwdmz9P0fjOmh9LE4pfpYZs,1120
10
+ ultralytics/__init__.py,sha256=sx0PRVUibg-eRGiTt0hTs3oj4681P_LK0E-vr8YuMaQ,1120
11
11
  ultralytics/py.typed,sha256=la67KBlbjXN-_-DfGNcdOcjYumVpKG_Tkw-8n5dnGB4,8
12
12
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
13
13
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
14
- ultralytics/cfg/__init__.py,sha256=xX7qUxdcDgcjCKoQFEVQgzrwZodeKTF88CTKZe05d0Y,39955
14
+ ultralytics/cfg/__init__.py,sha256=WY1NG2sliRbhjkKkrp7Ps94My8kFe3CGDHMGGbSJtWM,39996
15
15
  ultralytics/cfg/default.yaml,sha256=lfiQ1PVxNhOzEiaRxThPedmMAhShdR4Ti8uYktJn5CI,8901
16
16
  ultralytics/cfg/datasets/Argoverse.yaml,sha256=J4ItoUlE_EiYTmp1DFKYHfbqHkj8j4wUtRJQhaMIlBM,3275
17
17
  ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=VZ_KKFX0H2YvlFVJ8JHcLWYBZ2xiQ6Z-ROSTiKWpS7c,1211
@@ -20,7 +20,7 @@ ultralytics/cfg/datasets/GlobalWheat2020.yaml,sha256=dnr_loeYSE6Eo_f7V1yubILsMRB
20
20
  ultralytics/cfg/datasets/HomeObjects-3K.yaml,sha256=xEtSqEad-rtfGuIrERjjhdISggmPlvaX-315ZzKz50I,934
21
21
  ultralytics/cfg/datasets/ImageNet.yaml,sha256=GvDWypLVG_H3H67Ai8IC1pvK6fwcTtF5FRhzO1OXXDU,42530
22
22
  ultralytics/cfg/datasets/Objects365.yaml,sha256=eMQuA8B4ZGp_GsmMNKFP4CziMSVduyuAK1IANkAZaJw,9367
23
- ultralytics/cfg/datasets/SKU-110K.yaml,sha256=NwjmBQMdN4YLt3i9jcVe_4QinivCdGlk6edwgNsun2M,2585
23
+ ultralytics/cfg/datasets/SKU-110K.yaml,sha256=xvRkq3SdDOwBA91U85bln7HTXkod5MvFX6pt1PxTjJE,2609
24
24
  ultralytics/cfg/datasets/VOC.yaml,sha256=NhVLvsmLOwMIteW4DPKxetURP5bTaJvYc7w08-HYAUs,3785
25
25
  ultralytics/cfg/datasets/VisDrone.yaml,sha256=vIEBrCJLrKg8zYu5imnA5XQKrXwOpVKyaLvoz5oKAG8,3581
26
26
  ultralytics/cfg/datasets/african-wildlife.yaml,sha256=SuloMp9WAZBigGC8az-VLACsFhTM76_O29yhTvUqdnU,915
@@ -121,12 +121,12 @@ ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J
121
121
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
122
122
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
123
123
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
124
- ultralytics/engine/exporter.py,sha256=4nEy6_ZRTxVOox0egroPgQjeRvwNAfTKp53VVfe8vb8,74985
124
+ ultralytics/engine/exporter.py,sha256=IsZQ1g-7oQvmQRaVYWZiBA1M9lZOHJOmtVPZr-TVPiE,68363
125
125
  ultralytics/engine/model.py,sha256=iwwaL2NR5NSwQ7R3juHzS3ds9W-CfhC_CjUcwMvcgsk,53426
126
126
  ultralytics/engine/predictor.py,sha256=4lfw2RbBDE7939011FcSCuznscrcnMuabZtc8GXaKO4,22735
127
127
  ultralytics/engine/results.py,sha256=uQ_tgvdxKAg28pRgb5WCHiqx9Ktu7wYiVbwZy_IJ5bo,71499
128
- ultralytics/engine/trainer.py,sha256=aFGnBYH9xgS2qgZc-QdgRaiMxGOeeu27dWc31hsOAvo,41030
129
- ultralytics/engine/tuner.py,sha256=__OaI1oS3J37iqwruojxcnCYi6L7bgXmZ3bzNvinZk4,21409
128
+ ultralytics/engine/trainer.py,sha256=25SIKM5Wi1XbpNz4SckmsfzbF60V-T4wKKa29FhXX1U,41035
129
+ ultralytics/engine/tuner.py,sha256=aUfZ6ogaER57XhN4yjs0eksYwMe7jRAj_PmuZ4pEIrI,21447
130
130
  ultralytics/engine/validator.py,sha256=7tADPOXRZz0Yi7F-Z5SxcUnwytaa2MfbtuSdO8pp_l4,16966
131
131
  ultralytics/hub/__init__.py,sha256=xCF02lzlPKbdmGfO3NxLuXl5Kb0MaBZp_-fAWDHZ8zw,6698
132
132
  ultralytics/hub/auth.py,sha256=RIwZDWfW6vS2yGpZKR0xVl0-38itJYEFtmqY_M70bl8,6304
@@ -197,13 +197,13 @@ ultralytics/models/yolo/yoloe/train_seg.py,sha256=aCV7M8oQOvODFnU4piZdJh3tIrBJYA
197
197
  ultralytics/models/yolo/yoloe/val.py,sha256=5Gd9EoFH0FmKKvWXBl4J7gBe9DVxIczN-s3ceHwdUDo,9458
198
198
  ultralytics/nn/__init__.py,sha256=PJgOn2phQTTBR2P3s_JWvGeGXQpvw1znsumKow4tCuE,545
199
199
  ultralytics/nn/autobackend.py,sha256=WWHIFvCI47Wpe3NCDkoUg3esjOTJ0XGEzG3luA_uG-8,41063
200
- ultralytics/nn/tasks.py,sha256=WfZLAypHpNo0S99FSpQDHWXBe64nMxYktuTuHCidT-Q,70412
200
+ ultralytics/nn/tasks.py,sha256=1hz7w60SNYk7T5TRWBOPup-mbAqCJDgZ91rv9cheqdc,70379
201
201
  ultralytics/nn/text_model.py,sha256=pHqnKe8UueR1MuwJcIE_IvrnYIlt68QL796xjcRJs2A,15275
202
202
  ultralytics/nn/modules/__init__.py,sha256=BPMbEm1daI7Tuds3zph2_afAX7Gq1uAqK8BfiCfKTZs,3198
203
203
  ultralytics/nn/modules/activation.py,sha256=75JcIMH2Cu9GTC2Uf55r_5YLpxcrXQDaVoeGQ0hlUAU,2233
204
204
  ultralytics/nn/modules/block.py,sha256=-5RfsA_ljekL8_bQPGupSn9dVcZ8V_lVsOGlhzIW1kg,70622
205
205
  ultralytics/nn/modules/conv.py,sha256=U6P1ZuzQmIf09noKwp7syuWn-M98Tly2wMWOsDT3kOI,21457
206
- ultralytics/nn/modules/head.py,sha256=7-WuatR32jpuqR5IhwHuheAwAn_izX7e7cPOHEg7MmI,53556
206
+ ultralytics/nn/modules/head.py,sha256=RpeAR7U8S5sqegmOk76Ch2a_jH4lnsHTZWft3CHbICA,53308
207
207
  ultralytics/nn/modules/transformer.py,sha256=l6NuuFF7j_bogcNULHBBdj5l6sf7MwiVEGz8XcRyTUM,31366
208
208
  ultralytics/nn/modules/utils.py,sha256=rn8yTObZGkQoqVzjbZWLaHiytppG4ffjMME4Lw60glM,6092
209
209
  ultralytics/solutions/__init__.py,sha256=ZoeAQavTLp8aClnhZ9tbl6lxy86GxofyGvZWTx2aWkI,1209
@@ -239,14 +239,13 @@ ultralytics/trackers/utils/matching.py,sha256=I8SX0sBaBgr4GBJ9uDGOy5LnotgNZHpB2p
239
239
  ultralytics/utils/__init__.py,sha256=whSIuj-0lV0SAp4YjOeBJZ2emP1Qa8pqLnrhRiwl2Qs,53503
240
240
  ultralytics/utils/autobatch.py,sha256=i6KYLLSItKP1Q2IUlTPHrZhjcxl7UOjs0Seb8bF8pvM,5124
241
241
  ultralytics/utils/autodevice.py,sha256=d9yq6eEn05fdfzfpxeSECd0YEO61er5f7T-0kjLdofg,8843
242
- ultralytics/utils/benchmarks.py,sha256=lcIr--oKK0TCjUVbvrm-NtYrnszrEMuHJC9__ziM7y8,31458
243
- ultralytics/utils/checks.py,sha256=Uigc10tev2z9pLjjdYwCYkQ4BrjKmurOX2nYd6liqvU,34510
242
+ ultralytics/utils/benchmarks.py,sha256=wBsDrwtc6NRM9rIDmqeGQ_9yxOTetnchXXHwZSUhp18,31444
243
+ ultralytics/utils/checks.py,sha256=H4WvEOjaxrsG0pVIpJASGXs0m3yPFUcNZRwZjnSgowQ,34523
244
244
  ultralytics/utils/cpu.py,sha256=OPlVxROWhQp-kEa9EkeNRKRQ-jz0KwySu5a-h91JZjk,3634
245
- ultralytics/utils/dist.py,sha256=g7OKPrSgjIB2wgcncSFYtFuR-uW6J0-Y1z76k4gDSz0,4170
245
+ ultralytics/utils/dist.py,sha256=5xQhWK0OLORvseAL08UmG1LYdkiDVLquxmaGSnqiSqo,4151
246
246
  ultralytics/utils/downloads.py,sha256=JIlHfUg-qna5aOHRJupH7d5zob2qGZtRrs86Cp3zOJs,23029
247
247
  ultralytics/utils/errors.py,sha256=XT9Ru7ivoBgofK6PlnyigGoa7Fmf5nEhyHtnD-8TRXI,1584
248
248
  ultralytics/utils/events.py,sha256=v2RmLlx78_K6xQfOAuUTJMOexAgNdiuiOvvnsH65oDA,4679
249
- ultralytics/utils/export.py,sha256=lDeEKzDecJ_F3X_AHOPIRdmDDqymRAFT0-K2hNRTWw4,9838
250
249
  ultralytics/utils/files.py,sha256=kxE2rkBuZL288nSN7jxLljmDnBgc16rekEXeRjhbUoo,8213
251
250
  ultralytics/utils/git.py,sha256=DcaxKNQfCiG3cxdzuw7M6l_VXgaSVqkERQt_vl8UyXM,5512
252
251
  ultralytics/utils/instance.py,sha256=_b_jMTECWJGzncCiTg7FtTDSSeXGnbiAhaJhIsqbn9k,19043
@@ -258,7 +257,7 @@ ultralytics/utils/ops.py,sha256=PW3fgw1d18CA2ZNQZVJqUy054cJ_9tIcxd1XnA0FPgU,2690
258
257
  ultralytics/utils/patches.py,sha256=0-2G4jXCIPnMonlft-cPcjfFcOXQS6ODwUDNUwanfg4,6541
259
258
  ultralytics/utils/plotting.py,sha256=XWXZi02smBeFji3BSkMZNNNssXzO-dIxFaD15_N1f-4,47221
260
259
  ultralytics/utils/tal.py,sha256=LrziY_ZHz4wln3oOnqAzgyPaXKoup17Sa103BpuaQFU,20935
261
- ultralytics/utils/torch_utils.py,sha256=PBScEx9l8svOvrVD-qpNdw12F4NCdzjkVtCJ9OMNXRI,43276
260
+ ultralytics/utils/torch_utils.py,sha256=n-CMgLfQsg-SNF281nNHJm_kBdxPIrVr7xrI6gneL20,41771
262
261
  ultralytics/utils/tqdm.py,sha256=ny5RIg2OTkWQ7gdaXfYaoIgR0Xn2_hNGB6tUpO2Unns,16137
263
262
  ultralytics/utils/triton.py,sha256=fbMfTAUyoGiyslWtySzLZw53XmZJa7rF31CYFot0Wjs,5422
264
263
  ultralytics/utils/tuner.py,sha256=9D4dSIvwwxcNSJcH2QJ92qiIVi9zu-1L7_PBZ8okDyE,6816
@@ -274,9 +273,11 @@ ultralytics/utils/callbacks/platform.py,sha256=a7T_8htoBB0uX1WIc392UJnhDjxkRyQMv
274
273
  ultralytics/utils/callbacks/raytune.py,sha256=S6Bq16oQDQ8BQgnZzA0zJHGN_BBr8iAM_WtGoLiEcwg,1283
275
274
  ultralytics/utils/callbacks/tensorboard.py,sha256=_4nfGK1dDLn6ijpvphBDhc-AS8qhS3jjY2CAWB7SNF0,5283
276
275
  ultralytics/utils/callbacks/wb.py,sha256=ngQO8EJ1kxJDF1YajScVtzBbm26jGuejA0uWeOyvf5A,7685
277
- ultralytics-8.3.199.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
278
- ultralytics-8.3.199.dist-info/METADATA,sha256=otTH4SnJ1YgGgkjfI2OQimGCmpdi9ox0C9iHIF0KRmo,37667
279
- ultralytics-8.3.199.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
280
- ultralytics-8.3.199.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
281
- ultralytics-8.3.199.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
282
- ultralytics-8.3.199.dist-info/RECORD,,
276
+ ultralytics/utils/export/__init__.py,sha256=jQtf716PP0jt7bMoY9FkqmjG26KbvDzuR84jGhaBi2U,9901
277
+ ultralytics/utils/export/imx.py,sha256=Jl5nuNxqaP_bY5yrV2NypmoJSrexHE71TxR72SDdjcg,11394
278
+ ultralytics-8.3.201.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
279
+ ultralytics-8.3.201.dist-info/METADATA,sha256=ZnE5i7C-TsXEHSgxecxc36_EbEkDU0oCZwJEEKikIPc,37667
280
+ ultralytics-8.3.201.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
281
+ ultralytics-8.3.201.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
282
+ ultralytics-8.3.201.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
283
+ ultralytics-8.3.201.dist-info/RECORD,,