ultralytics-opencv-headless 8.3.246__py3-none-any.whl → 8.3.251__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. ultralytics/__init__.py +1 -1
  2. ultralytics/cfg/__init__.py +6 -4
  3. ultralytics/cfg/datasets/TT100K.yaml +346 -0
  4. ultralytics/data/converter.py +1 -1
  5. ultralytics/engine/model.py +4 -3
  6. ultralytics/engine/results.py +2 -2
  7. ultralytics/engine/trainer.py +28 -25
  8. ultralytics/engine/tuner.py +1 -0
  9. ultralytics/engine/validator.py +4 -1
  10. ultralytics/models/sam/modules/utils.py +1 -1
  11. ultralytics/models/yolo/detect/val.py +3 -3
  12. ultralytics/nn/modules/transformer.py +4 -4
  13. ultralytics/nn/tasks.py +2 -2
  14. ultralytics/solutions/object_counter.py +1 -1
  15. ultralytics/utils/benchmarks.py +1 -1
  16. ultralytics/utils/callbacks/platform.py +187 -64
  17. ultralytics/utils/checks.py +31 -17
  18. ultralytics/utils/export/imx.py +12 -2
  19. ultralytics/utils/logger.py +7 -2
  20. ultralytics/utils/metrics.py +3 -3
  21. ultralytics/utils/plotting.py +3 -1
  22. ultralytics/utils/tuner.py +11 -3
  23. {ultralytics_opencv_headless-8.3.246.dist-info → ultralytics_opencv_headless-8.3.251.dist-info}/METADATA +1 -1
  24. {ultralytics_opencv_headless-8.3.246.dist-info → ultralytics_opencv_headless-8.3.251.dist-info}/RECORD +28 -27
  25. {ultralytics_opencv_headless-8.3.246.dist-info → ultralytics_opencv_headless-8.3.251.dist-info}/WHEEL +0 -0
  26. {ultralytics_opencv_headless-8.3.246.dist-info → ultralytics_opencv_headless-8.3.251.dist-info}/entry_points.txt +0 -0
  27. {ultralytics_opencv_headless-8.3.246.dist-info → ultralytics_opencv_headless-8.3.251.dist-info}/licenses/LICENSE +0 -0
  28. {ultralytics_opencv_headless-8.3.246.dist-info → ultralytics_opencv_headless-8.3.251.dist-info}/top_level.txt +0 -0
@@ -8,11 +8,9 @@ from concurrent.futures import ThreadPoolExecutor
8
8
  from pathlib import Path
9
9
  from time import time
10
10
 
11
- from ultralytics.utils import ENVIRONMENT, GIT, LOGGER, PYTHON_VERSION, RANK, SETTINGS, TESTS_RUNNING
11
+ from ultralytics.utils import ENVIRONMENT, GIT, LOGGER, PYTHON_VERSION, RANK, SETTINGS, TESTS_RUNNING, colorstr
12
12
 
13
- _last_upload = 0 # Rate limit model uploads
14
- _console_logger = None # Global console logger instance
15
- _system_logger = None # Cached system logger instance
13
+ PREFIX = colorstr("Platform: ")
16
14
 
17
15
  try:
18
16
  assert not TESTS_RUNNING # do not log pytest
@@ -31,22 +29,133 @@ except (AssertionError, ImportError):
31
29
  _api_key = None
32
30
 
33
31
 
34
- def _send(event, data, project, name):
35
- """Send event to Platform endpoint."""
32
+ def resolve_platform_uri(uri, hard=True):
33
+ """Resolve ul:// URIs to signed URLs by authenticating with Ultralytics Platform.
34
+
35
+ Formats:
36
+ ul://username/datasets/slug -> Returns signed URL to NDJSON file
37
+ ul://username/project/model -> Returns signed URL to .pt file
38
+
39
+ Args:
40
+ uri (str): Platform URI starting with "ul://".
41
+ hard (bool): Whether to raise an error if resolution fails (FileNotFoundError only).
42
+
43
+ Returns:
44
+ (str | None): Signed URL on success, None if not found and hard=False.
45
+
46
+ Raises:
47
+ ValueError: If API key is missing/invalid or URI format is wrong.
48
+ PermissionError: If access is denied.
49
+ RuntimeError: If resource is not ready (e.g., dataset still processing).
50
+ FileNotFoundError: If resource not found and hard=True.
51
+ ConnectionError: If network request fails and hard=True.
52
+ """
53
+ import requests
54
+
55
+ path = uri[5:] # Remove "ul://"
56
+ parts = path.split("/")
57
+
58
+ api_key = os.getenv("ULTRALYTICS_API_KEY") or SETTINGS.get("api_key")
59
+ if not api_key:
60
+ raise ValueError(f"ULTRALYTICS_API_KEY required for '{uri}'. Get key at https://alpha.ultralytics.com/settings")
61
+
62
+ base = "https://alpha.ultralytics.com/api/webhooks"
63
+ headers = {"Authorization": f"Bearer {api_key}"}
64
+
65
+ # ul://username/datasets/slug
66
+ if len(parts) == 3 and parts[1] == "datasets":
67
+ username, _, slug = parts
68
+ url = f"{base}/datasets/{username}/{slug}/export"
69
+
70
+ # ul://username/project/model
71
+ elif len(parts) == 3:
72
+ username, project, model = parts
73
+ url = f"{base}/models/{username}/{project}/{model}/download"
74
+
75
+ else:
76
+ raise ValueError(f"Invalid platform URI: {uri}. Use ul://user/datasets/name or ul://user/project/model")
77
+
36
78
  try:
37
- requests.post(
79
+ r = requests.head(url, headers=headers, allow_redirects=False, timeout=30)
80
+
81
+ # Handle redirect responses (301, 302, 303, 307, 308)
82
+ if 300 <= r.status_code < 400 and "location" in r.headers:
83
+ return r.headers["location"] # Return signed URL
84
+
85
+ # Handle error responses
86
+ if r.status_code == 401:
87
+ raise ValueError(f"Invalid ULTRALYTICS_API_KEY for '{uri}'")
88
+ if r.status_code == 403:
89
+ raise PermissionError(f"Access denied for '{uri}'. Check dataset/model visibility settings.")
90
+ if r.status_code == 404:
91
+ if hard:
92
+ raise FileNotFoundError(f"Not found on platform: {uri}")
93
+ LOGGER.warning(f"Not found on platform: {uri}")
94
+ return None
95
+ if r.status_code == 409:
96
+ raise RuntimeError(f"Resource not ready: {uri}. Dataset may still be processing.")
97
+
98
+ # Unexpected response
99
+ r.raise_for_status()
100
+ raise RuntimeError(f"Unexpected response from platform for '{uri}': {r.status_code}")
101
+
102
+ except requests.exceptions.RequestException as e:
103
+ if hard:
104
+ raise ConnectionError(f"Failed to resolve {uri}: {e}") from e
105
+ LOGGER.warning(f"Failed to resolve {uri}: {e}")
106
+ return None
107
+
108
+
109
+ def _interp_plot(plot, n=101):
110
+ """Interpolate plot curve data from 1000 to n points to reduce storage size."""
111
+ import numpy as np
112
+
113
+ if not plot.get("x") or not plot.get("y"):
114
+ return plot # No interpolation needed (e.g., confusion_matrix)
115
+
116
+ x, y = np.array(plot["x"]), np.array(plot["y"])
117
+ if len(x) <= n:
118
+ return plot # Already small enough
119
+
120
+ # New x values (101 points gives clean 0.01 increments: 0, 0.01, 0.02, ..., 1.0)
121
+ x_new = np.linspace(x[0], x[-1], n)
122
+
123
+ # Interpolate y values (handle both 1D and 2D arrays)
124
+ if y.ndim == 1:
125
+ y_new = np.interp(x_new, x, y)
126
+ else:
127
+ y_new = np.array([np.interp(x_new, x, yi) for yi in y])
128
+
129
+ # Also interpolate ap if present (for PR curves)
130
+ result = {**plot, "x": x_new.tolist(), "y": y_new.tolist()}
131
+ if "ap" in plot:
132
+ result["ap"] = plot["ap"] # Keep AP values as-is (per-class scalars)
133
+
134
+ return result
135
+
136
+
137
+ def _send(event, data, project, name, model_id=None):
138
+ """Send event to Platform endpoint. Returns response JSON on success."""
139
+ try:
140
+ payload = {"event": event, "project": project, "name": name, "data": data}
141
+ if model_id:
142
+ payload["modelId"] = model_id
143
+ r = requests.post(
38
144
  "https://alpha.ultralytics.com/api/webhooks/training/metrics",
39
- json={"event": event, "project": project, "name": name, "data": data},
145
+ json=payload,
40
146
  headers={"Authorization": f"Bearer {_api_key}"},
41
147
  timeout=10,
42
- ).raise_for_status()
148
+ )
149
+ r.raise_for_status()
150
+ return r.json()
43
151
  except Exception as e:
44
152
  LOGGER.debug(f"Platform: Failed to send {event}: {e}")
153
+ return None
45
154
 
46
155
 
47
- def _send_async(event, data, project, name):
156
+ def _send_async(event, data, project, name, model_id=None):
48
157
  """Send event asynchronously using bounded thread pool."""
49
- _executor.submit(_send, event, data, project, name)
158
+ _executor.submit(_send, event, data, project, name, model_id)
50
159
 
51
160
 
52
161
  def _upload_model(model_path, project, name):
@@ -75,7 +184,8 @@ def _upload_model(model_path, project, name):
75
184
  timeout=600, # 10 min timeout for large models
76
185
  ).raise_for_status()
77
186
 
78
- LOGGER.info(f"Platform: Model uploaded to '{project}'")
187
+ # url = f"https://alpha.ultralytics.com/{project}/{name}"
188
+ # LOGGER.info(f"{PREFIX}Model uploaded to {url}")
79
189
  return data.get("gcsPath")
80
190
 
81
191
  except Exception as e:
@@ -141,59 +251,57 @@ def _get_environment_info():
141
251
 
142
252
  def on_pretrain_routine_start(trainer):
143
253
  """Initialize Platform logging at training start."""
144
- global _console_logger, _last_upload
145
-
146
254
  if RANK not in {-1, 0} or not trainer.args.project:
147
255
  return
148
256
 
149
- # Initialize upload timer to now so first checkpoint waits 15 min from training start
150
- _last_upload = time()
257
+ # Per-trainer state to isolate concurrent training runs
258
+ trainer._platform_model_id = None
259
+ trainer._platform_last_upload = time()
151
260
 
152
261
  project, name = str(trainer.args.project), str(trainer.args.name or "train")
153
- LOGGER.info(f"Platform: Streaming to project '{project}' as '{name}'")
262
+ url = f"https://alpha.ultralytics.com/{project}/{name}"
263
+ LOGGER.info(f"{PREFIX}Streaming to {url}")
154
264
 
155
265
  # Create callback to send console output to Platform
156
266
  def send_console_output(content, line_count, chunk_id):
157
267
  """Send batched console output to Platform webhook."""
158
- _send_async("console_output", {"chunkId": chunk_id, "content": content, "lineCount": line_count}, project, name)
268
+ _send_async(
269
+ "console_output",
270
+ {"chunkId": chunk_id, "content": content, "lineCount": line_count},
271
+ project,
272
+ name,
273
+ getattr(trainer, "_platform_model_id", None),
274
+ )
159
275
 
160
276
  # Start console capture with batching (5 lines or 5 seconds)
161
- _console_logger = ConsoleLogger(batch_size=5, flush_interval=5.0, on_flush=send_console_output)
162
- _console_logger.start_capture()
163
-
164
- # Gather model info for richer metadata
165
- model_info = {}
166
- try:
167
- info = model_info_for_loggers(trainer)
168
- model_info = {
169
- "parameters": info.get("model/parameters", 0),
170
- "gflops": info.get("model/GFLOPs", 0),
171
- "classes": getattr(trainer.model, "yaml", {}).get("nc", 0), # number of classes
172
- }
173
- except Exception:
174
- pass
277
+ trainer._platform_console_logger = ConsoleLogger(batch_size=5, flush_interval=5.0, on_flush=send_console_output)
278
+ trainer._platform_console_logger.start_capture()
175
279
 
176
280
  # Collect environment info (W&B-style metadata)
177
281
  environment = _get_environment_info()
178
282
 
179
- _send_async(
283
+ # Build trainArgs - callback runs before get_dataset() so args.data is still original (e.g., ul:// URIs)
284
+ # Note: model_info is sent later in on_fit_epoch_end (epoch 0) when the model is actually loaded
285
+ train_args = {k: str(v) for k, v in vars(trainer.args).items()}
286
+
287
+ # Send synchronously to get modelId for subsequent webhooks
288
+ response = _send(
180
289
  "training_started",
181
290
  {
182
- "trainArgs": {k: str(v) for k, v in vars(trainer.args).items()},
291
+ "trainArgs": train_args,
183
292
  "epochs": trainer.epochs,
184
293
  "device": str(trainer.device),
185
- "modelInfo": model_info,
186
294
  "environment": environment,
187
295
  },
188
296
  project,
189
297
  name,
190
298
  )
299
+ if response and response.get("modelId"):
300
+ trainer._platform_model_id = response["modelId"]
191
301
 
192
302
 
193
303
  def on_fit_epoch_end(trainer):
194
304
  """Log training and system metrics at epoch end."""
195
- global _system_logger
196
-
197
305
  if RANK not in {-1, 0} or not trainer.args.project:
198
306
  return
199
307
 
@@ -202,44 +310,55 @@ def on_fit_epoch_end(trainer):
202
310
 
203
311
  if trainer.optimizer and trainer.optimizer.param_groups:
204
312
  metrics["lr"] = trainer.optimizer.param_groups[0]["lr"]
313
+
314
+ # Extract model info at epoch 0 (sent as separate field, not in metrics)
315
+ model_info = None
205
316
  if trainer.epoch == 0:
206
317
  try:
207
- metrics.update(model_info_for_loggers(trainer))
318
+ info = model_info_for_loggers(trainer)
319
+ model_info = {
320
+ "parameters": info.get("model/parameters", 0),
321
+ "gflops": info.get("model/GFLOPs", 0),
322
+ "speedMs": info.get("model/speed_PyTorch(ms)", 0),
323
+ }
208
324
  except Exception:
209
325
  pass
210
326
 
211
- # Get system metrics (cache SystemLogger for efficiency)
327
+ # Get system metrics (cache SystemLogger on trainer for efficiency)
212
328
  system = {}
213
329
  try:
214
- if _system_logger is None:
215
- _system_logger = SystemLogger()
216
- system = _system_logger.get_metrics(rates=True)
330
+ if not hasattr(trainer, "_platform_system_logger"):
331
+ trainer._platform_system_logger = SystemLogger()
332
+ system = trainer._platform_system_logger.get_metrics(rates=True)
217
333
  except Exception:
218
334
  pass
219
335
 
336
+ payload = {
337
+ "epoch": trainer.epoch,
338
+ "metrics": metrics,
339
+ "system": system,
340
+ "fitness": trainer.fitness,
341
+ "best_fitness": trainer.best_fitness,
342
+ }
343
+ if model_info:
344
+ payload["modelInfo"] = model_info
345
+
220
346
  _send_async(
221
347
  "epoch_end",
222
- {
223
- "epoch": trainer.epoch,
224
- "metrics": metrics,
225
- "system": system,
226
- "fitness": trainer.fitness,
227
- "best_fitness": trainer.best_fitness,
228
- },
348
+ payload,
229
349
  project,
230
350
  name,
351
+ getattr(trainer, "_platform_model_id", None),
231
352
  )
232
353
 
233
354
 
234
355
  def on_model_save(trainer):
235
356
  """Upload model checkpoint (rate limited to every 15 min)."""
236
- global _last_upload
237
-
238
357
  if RANK not in {-1, 0} or not trainer.args.project:
239
358
  return
240
359
 
241
360
  # Rate limit to every 15 minutes (900 seconds)
242
- if time() - _last_upload < 900:
361
+ if time() - getattr(trainer, "_platform_last_upload", 0) < 900:
243
362
  return
244
363
 
245
364
  model_path = trainer.best if trainer.best and Path(trainer.best).exists() else trainer.last
@@ -248,22 +367,20 @@ def on_model_save(trainer):
248
367
 
249
368
  project, name = str(trainer.args.project), str(trainer.args.name or "train")
250
369
  _upload_model_async(model_path, project, name)
251
- _last_upload = time()
370
+ trainer._platform_last_upload = time()
252
371
 
253
372
 
254
373
  def on_train_end(trainer):
255
374
  """Log final results, upload best model, and send validation plot data."""
256
- global _console_logger
257
-
258
375
  if RANK not in {-1, 0} or not trainer.args.project:
259
376
  return
260
377
 
261
378
  project, name = str(trainer.args.project), str(trainer.args.name or "train")
262
379
 
263
380
  # Stop console capture
264
- if _console_logger:
265
- _console_logger.stop_capture()
266
- _console_logger = None
381
+ if hasattr(trainer, "_platform_console_logger") and trainer._platform_console_logger:
382
+ trainer._platform_console_logger.stop_capture()
383
+ trainer._platform_console_logger = None
267
384
 
268
385
  # Upload best model (blocking to ensure it completes)
269
386
  model_path = None
@@ -272,11 +389,15 @@ def on_train_end(trainer):
272
389
  model_size = Path(trainer.best).stat().st_size
273
390
  model_path = _upload_model(trainer.best, project, name)
274
391
 
275
- # Collect plots from trainer and validator
276
- plots = [info["data"] for info in getattr(trainer, "plots", {}).values() if info.get("data")]
277
- plots += [
278
- info["data"] for info in getattr(getattr(trainer, "validator", None), "plots", {}).values() if info.get("data")
279
- ]
392
+ # Collect plots from trainer and validator, deduplicating by type
393
+ plots_by_type = {}
394
+ for info in getattr(trainer, "plots", {}).values():
395
+ if info.get("data") and info["data"].get("type"):
396
+ plots_by_type[info["data"]["type"]] = info["data"]
397
+ for info in getattr(getattr(trainer, "validator", None), "plots", {}).values():
398
+ if info.get("data") and info["data"].get("type"):
399
+ plots_by_type.setdefault(info["data"]["type"], info["data"]) # Don't overwrite trainer plots
400
+ plots = [_interp_plot(p) for p in plots_by_type.values()] # Interpolate curves to reduce size
280
401
 
281
402
  # Get class names
282
403
  names = getattr(getattr(trainer, "validator", None), "names", None) or (trainer.data or {}).get("names")
@@ -297,8 +418,10 @@ def on_train_end(trainer):
297
418
  },
298
419
  project,
299
420
  name,
421
+ getattr(trainer, "_platform_model_id", None),
300
422
  )
301
- LOGGER.info(f"Platform: Training complete, results uploaded to '{project}' ({len(plots)} plots)")
423
+ url = f"https://alpha.ultralytics.com/{project}/{name}"
424
+ LOGGER.info(f"{PREFIX}View results at {url}")
302
425
 
303
426
 
304
427
  callbacks = (
@@ -12,6 +12,7 @@ import platform
12
12
  import re
13
13
  import shutil
14
14
  import subprocess
15
+ import sys
15
16
  import time
16
17
  from importlib import metadata
17
18
  from pathlib import Path
@@ -453,21 +454,15 @@ def check_requirements(requirements=ROOT.parent / "requirements.txt", exclude=()
453
454
  def attempt_install(packages, commands, use_uv):
454
455
  """Attempt package installation with uv if available, falling back to pip."""
455
456
  if use_uv:
456
- base = (
457
- f"uv pip install --no-cache-dir {packages} {commands} "
458
- f"--index-strategy=unsafe-best-match --break-system-packages"
457
+ # Use --python to explicitly target current interpreter (venv or system)
458
+ # This ensures correct installation when VIRTUAL_ENV env var isn't set
459
+ return subprocess.check_output(
460
+ f'uv pip install --no-cache-dir --python "{sys.executable}" {packages} {commands} '
461
+ f"--index-strategy=unsafe-best-match --break-system-packages",
462
+ shell=True,
463
+ stderr=subprocess.STDOUT,
464
+ text=True,
459
465
  )
460
- try:
461
- return subprocess.check_output(base, shell=True, stderr=subprocess.STDOUT, text=True)
462
- except subprocess.CalledProcessError as e:
463
- if e.output and "No virtual environment found" in e.output:
464
- return subprocess.check_output(
465
- base.replace("uv pip install", "uv pip install --system"),
466
- shell=True,
467
- stderr=subprocess.STDOUT,
468
- text=True,
469
- )
470
- raise
471
466
  return subprocess.check_output(
472
467
  f"pip install --no-cache-dir {packages} {commands}", shell=True, stderr=subprocess.STDOUT, text=True
473
468
  )
@@ -597,7 +592,7 @@ def check_file(file, suffix="", download=True, download_dir=".", hard=True):
597
592
  """Search/download file (if necessary), check suffix (if provided), and return path.
598
593
 
599
594
  Args:
600
- file (str): File name or path.
595
+ file (str): File name or path, URL, platform URI (ul://), or GCS path (gs://).
601
596
  suffix (str | tuple): Acceptable suffix or tuple of suffixes to validate against the file.
602
597
  download (bool): Whether to download the file if it doesn't exist locally.
603
598
  download_dir (str): Directory to download the file to.
@@ -615,7 +610,26 @@ def check_file(file, suffix="", download=True, download_dir=".", hard=True):
615
610
  or file.lower().startswith("grpc://")
616
611
  ): # file exists or gRPC Triton images
617
612
  return file
618
- elif download and file.lower().startswith(("https://", "http://", "rtsp://", "rtmp://", "tcp://")): # download
613
+ elif download and file.lower().startswith("ul://"): # Ultralytics Platform URI
614
+ from ultralytics.utils.callbacks.platform import resolve_platform_uri
615
+
616
+ url = resolve_platform_uri(file, hard=hard) # Convert to signed HTTPS URL
617
+ if url is None:
618
+ return [] # Not found, soft fail (consistent with file search behavior)
619
+ # Use URI path for unique directory structure: ul://user/project/model -> user/project/model/filename.pt
620
+ uri_path = file[5:] # Remove "ul://"
621
+ local_file = Path(download_dir) / uri_path / url2file(url)
622
+ if local_file.exists():
623
+ LOGGER.info(f"Found {clean_url(url)} locally at {local_file}")
624
+ else:
625
+ local_file.parent.mkdir(parents=True, exist_ok=True)
626
+ downloads.safe_download(url=url, file=local_file, unzip=False)
627
+ return str(local_file)
628
+ elif download and file.lower().startswith(
629
+ ("https://", "http://", "rtsp://", "rtmp://", "tcp://", "gs://")
630
+ ): # download
631
+ if file.startswith("gs://"):
632
+ file = "https://storage.googleapis.com/" + file[5:] # convert gs:// to public HTTPS URL
619
633
  url = file # warning: Pathlib turns :// -> :/
620
634
  file = Path(download_dir) / url2file(file) # '%2F' to '/', split https://url.com/file.txt?auth
621
635
  if file.exists():
@@ -950,7 +964,7 @@ def is_rockchip():
950
964
  with open("/proc/device-tree/compatible") as f:
951
965
  dev_str = f.read()
952
966
  *_, soc = dev_str.split(",")
953
- if soc.replace("\x00", "") in RKNN_CHIPS:
967
+ if soc.replace("\x00", "").split("-", 1)[0] in RKNN_CHIPS:
954
968
  return True
955
969
  except OSError:
956
970
  return False
@@ -3,14 +3,16 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import subprocess
6
+ import sys
6
7
  import types
7
8
  from pathlib import Path
9
+ from shutil import which
8
10
 
9
11
  import numpy as np
10
12
  import torch
11
13
 
12
14
  from ultralytics.nn.modules import Detect, Pose, Segment
13
- from ultralytics.utils import LOGGER
15
+ from ultralytics.utils import LOGGER, WINDOWS
14
16
  from ultralytics.utils.patches import onnx_export_patch
15
17
  from ultralytics.utils.tal import make_anchors
16
18
  from ultralytics.utils.torch_utils import copy_attr
@@ -303,8 +305,16 @@ def torch2imx(
303
305
 
304
306
  onnx.save(model_onnx, onnx_model)
305
307
 
308
+ # Find imxconv-pt binary - check venv bin directory first, then PATH
309
+ bin_dir = Path(sys.executable).parent
310
+ imxconv = bin_dir / ("imxconv-pt.exe" if WINDOWS else "imxconv-pt")
311
+ if not imxconv.exists():
312
+ imxconv = which("imxconv-pt") # fallback to PATH
313
+ if not imxconv:
314
+ raise FileNotFoundError("imxconv-pt not found. Install with: pip install imx500-converter[pt]")
315
+
306
316
  subprocess.run(
307
- ["imxconv-pt", "-i", str(onnx_model), "-o", str(f), "--no-input-persistency", "--overwrite-output"],
317
+ [str(imxconv), "-i", str(onnx_model), "-o", str(f), "--no-input-persistency", "--overwrite-output"],
308
318
  check=True,
309
319
  )
310
320
 
@@ -330,14 +330,19 @@ class SystemLogger:
330
330
 
331
331
  def _init_nvidia(self):
332
332
  """Initialize NVIDIA GPU monitoring with pynvml."""
333
+ if MACOS:
334
+ return False
335
+
333
336
  try:
334
- assert not MACOS
335
337
  check_requirements("nvidia-ml-py>=12.0.0")
336
338
  self.pynvml = __import__("pynvml")
337
339
  self.pynvml.nvmlInit()
338
340
  return True
339
341
  except Exception as e:
340
- LOGGER.warning(f"SystemLogger NVML init failed: {e}")
342
+ import torch
343
+
344
+ if torch.cuda.is_available():
345
+ LOGGER.warning(f"SystemLogger NVML init failed: {e}")
341
346
  return False
342
347
 
343
348
  def get_metrics(self, rates=False):
@@ -315,7 +315,7 @@ class ConfusionMatrix(DataExportMixin):
315
315
  matches (dict): Contains the indices of ground truths and predictions categorized into TP, FP and FN.
316
316
  """
317
317
 
318
- def __init__(self, names: dict[int, str] = [], task: str = "detect", save_matches: bool = False):
318
+ def __init__(self, names: dict[int, str] = {}, task: str = "detect", save_matches: bool = False):
319
319
  """Initialize a ConfusionMatrix instance.
320
320
 
321
321
  Args:
@@ -568,7 +568,6 @@ class ConfusionMatrix(DataExportMixin):
568
568
  fig.savefig(plot_fname, dpi=250)
569
569
  plt.close(fig)
570
570
  if on_plot:
571
- # Pass confusion matrix data for interactive plotting (raw counts only, normalization done on frontend)
572
571
  on_plot(plot_fname, {"type": "confusion_matrix", "matrix": self.matrix.tolist()})
573
572
 
574
573
  def print(self):
@@ -663,7 +662,8 @@ def plot_pr_curve(
663
662
  plt.close(fig)
664
663
  if on_plot:
665
664
  # Pass PR curve data for interactive plotting (class names stored at model level)
666
- on_plot(save_dir, {"type": "pr_curve", "x": px.tolist(), "y": py.tolist(), "ap": ap.tolist()})
665
+ # Transpose py to match other curves: y[class][point] format
666
+ on_plot(save_dir, {"type": "pr_curve", "x": px.tolist(), "y": py.T.tolist(), "ap": ap.tolist()})
667
667
 
668
668
 
669
669
  @plt_settings()
@@ -786,7 +786,6 @@ def plot_images(
786
786
  boxes[..., 0] += x
787
787
  boxes[..., 1] += y
788
788
  is_obb = boxes.shape[-1] == 5 # xywhr
789
- # TODO: this transformation might be unnecessary
790
789
  boxes = ops.xywhr2xyxyxyxy(boxes) if is_obb else ops.xywh2xyxy(boxes)
791
790
  for j, box in enumerate(boxes.astype(np.int64).tolist()):
792
791
  c = classes[j]
@@ -973,6 +972,9 @@ def plot_tune_results(csv_file: str = "tune_results.csv", exclude_zero_fitness_p
973
972
  if exclude_zero_fitness_points:
974
973
  mask = fitness > 0 # exclude zero-fitness points
975
974
  x, fitness = x[mask], fitness[mask]
975
+ if len(fitness) == 0:
976
+ LOGGER.warning("No valid fitness values to plot (all iterations may have failed)")
977
+ return
976
978
  # Iterative sigma rejection on lower bound only
977
979
  for _ in range(3): # max 3 iterations
978
980
  mean, std = fitness.mean(), fitness.std()
@@ -35,9 +35,6 @@ def run_ray_tune(
35
35
  >>> result_grid = model.tune(data="coco8.yaml", use_ray=True)
36
36
  """
37
37
  LOGGER.info("💡 Learn about RayTune at https://docs.ultralytics.com/integrations/ray-tune")
38
- if train_args is None:
39
- train_args = {}
40
-
41
38
  try:
42
39
  checks.check_requirements("ray[tune]")
43
40
 
@@ -87,12 +84,23 @@ def run_ray_tune(
87
84
  # Put the model in ray store
88
85
  task = model.task
89
86
  model_in_store = ray.put(model)
87
+ base_name = train_args.get("name", "tune")
90
88
 
91
89
  def _tune(config):
92
90
  """Train the YOLO model with the specified hyperparameters and return results."""
93
91
  model_to_train = ray.get(model_in_store) # get the model from ray store for tuning
94
92
  model_to_train.reset_callbacks()
95
93
  config.update(train_args)
94
+
95
+ # Set trial-specific name for W&B logging
96
+ try:
97
+ trial_id = tune.get_trial_id() # Get current trial ID (e.g., "2c2fc_00000")
98
+ trial_suffix = trial_id.split("_")[-1] if "_" in trial_id else trial_id
99
+ config["name"] = f"{base_name}_{trial_suffix}"
100
+ except Exception:
101
+ # Not in Ray Tune context or error getting trial ID, use base name
102
+ config["name"] = base_name
103
+
96
104
  results = model_to_train.train(**config)
97
105
  return results.results_dict
98
106
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics-opencv-headless
3
- Version: 8.3.246
3
+ Version: 8.3.251
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>