dgenerate-ultralytics-headless 8.3.186__py3-none-any.whl → 8.3.189__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. {dgenerate_ultralytics_headless-8.3.186.dist-info → dgenerate_ultralytics_headless-8.3.189.dist-info}/METADATA +6 -6
  2. {dgenerate_ultralytics_headless-8.3.186.dist-info → dgenerate_ultralytics_headless-8.3.189.dist-info}/RECORD +28 -28
  3. tests/test_python.py +2 -10
  4. ultralytics/__init__.py +1 -1
  5. ultralytics/cfg/datasets/SKU-110K.yaml +2 -2
  6. ultralytics/engine/exporter.py +4 -4
  7. ultralytics/engine/results.py +1 -4
  8. ultralytics/engine/trainer.py +3 -3
  9. ultralytics/models/rtdetr/val.py +3 -1
  10. ultralytics/models/sam/__init__.py +8 -2
  11. ultralytics/models/sam/modules/sam.py +6 -6
  12. ultralytics/models/sam/predict.py +363 -6
  13. ultralytics/models/yolo/detect/val.py +13 -2
  14. ultralytics/models/yolo/obb/val.py +3 -1
  15. ultralytics/models/yolo/segment/val.py +0 -3
  16. ultralytics/nn/autobackend.py +6 -3
  17. ultralytics/nn/tasks.py +2 -2
  18. ultralytics/utils/__init__.py +39 -94
  19. ultralytics/utils/benchmarks.py +16 -9
  20. ultralytics/utils/callbacks/wb.py +9 -3
  21. ultralytics/utils/downloads.py +44 -38
  22. ultralytics/utils/plotting.py +13 -20
  23. ultralytics/utils/torch_utils.py +50 -47
  24. ultralytics/utils/tqdm.py +25 -37
  25. {dgenerate_ultralytics_headless-8.3.186.dist-info → dgenerate_ultralytics_headless-8.3.189.dist-info}/WHEEL +0 -0
  26. {dgenerate_ultralytics_headless-8.3.186.dist-info → dgenerate_ultralytics_headless-8.3.189.dist-info}/entry_points.txt +0 -0
  27. {dgenerate_ultralytics_headless-8.3.186.dist-info → dgenerate_ultralytics_headless-8.3.189.dist-info}/licenses/LICENSE +0 -0
  28. {dgenerate_ultralytics_headless-8.3.186.dist-info → dgenerate_ultralytics_headless-8.3.189.dist-info}/top_level.txt +0 -0
@@ -12,6 +12,7 @@ import subprocess
12
12
  import sys
13
13
  import threading
14
14
  import time
15
+ from functools import lru_cache
15
16
  from pathlib import Path
16
17
  from threading import Lock
17
18
  from types import SimpleNamespace
@@ -134,17 +135,14 @@ class DataExportMixin:
134
135
  Mixin class for exporting validation metrics or prediction results in various formats.
135
136
 
136
137
  This class provides utilities to export performance metrics (e.g., mAP, precision, recall) or prediction results
137
- from classification, object detection, segmentation, or pose estimation tasks into various formats: Pandas
138
- DataFrame, CSV, XML, HTML, JSON and SQLite (SQL).
138
+ from classification, object detection, segmentation, or pose estimation tasks into various formats: Polars
139
+ DataFrame, CSV and JSON.
139
140
 
140
141
  Methods:
141
- to_df: Convert summary to a Pandas DataFrame.
142
+ to_df: Convert summary to a Polars DataFrame.
142
143
  to_csv: Export results as a CSV string.
143
- to_xml: Export results as an XML string (requires `lxml`).
144
- to_html: Export results as an HTML table.
145
144
  to_json: Export results as a JSON string.
146
145
  tojson: Deprecated alias for `to_json()`.
147
- to_sql: Export results to an SQLite database.
148
146
 
149
147
  Examples:
150
148
  >>> model = YOLO("yolo11n.pt")
@@ -152,12 +150,11 @@ class DataExportMixin:
152
150
  >>> df = results.to_df()
153
151
  >>> print(df)
154
152
  >>> csv_data = results.to_csv()
155
- >>> results.to_sql(table_name="yolo_results")
156
153
  """
157
154
 
158
155
  def to_df(self, normalize=False, decimals=5):
159
156
  """
160
- Create a pandas DataFrame from the prediction results summary or validation metrics.
157
+ Create a polars DataFrame from the prediction results summary or validation metrics.
161
158
 
162
159
  Args:
163
160
  normalize (bool, optional): Normalize numerical values for easier comparison.
@@ -166,13 +163,13 @@ class DataExportMixin:
166
163
  Returns:
167
164
  (DataFrame): DataFrame containing the summary data.
168
165
  """
169
- import pandas as pd # scope for faster 'import ultralytics'
166
+ import polars as pl # scope for faster 'import ultralytics'
170
167
 
171
- return pd.DataFrame(self.summary(normalize=normalize, decimals=decimals))
168
+ return pl.DataFrame(self.summary(normalize=normalize, decimals=decimals))
172
169
 
173
170
  def to_csv(self, normalize=False, decimals=5):
174
171
  """
175
- Export results to CSV string format.
172
+ Export results or metrics to CSV string format.
176
173
 
177
174
  Args:
178
175
  normalize (bool, optional): Normalize numeric values.
@@ -181,44 +178,25 @@ class DataExportMixin:
181
178
  Returns:
182
179
  (str): CSV content as string.
183
180
  """
184
- return self.to_df(normalize=normalize, decimals=decimals).to_csv()
181
+ import polars as pl
185
182
 
186
- def to_xml(self, normalize=False, decimals=5):
187
- """
188
- Export results to XML format.
189
-
190
- Args:
191
- normalize (bool, optional): Normalize numeric values.
192
- decimals (int, optional): Decimal precision.
193
-
194
- Returns:
195
- (str): XML string.
196
-
197
- Notes:
198
- Requires `lxml` package to be installed.
199
- """
200
- df = self.to_df(normalize=normalize, decimals=decimals)
201
- return '<?xml version="1.0" encoding="utf-8"?>\n<root></root>' if df.empty else df.to_xml(parser="etree")
202
-
203
- def to_html(self, normalize=False, decimals=5, index=False):
204
- """
205
- Export results to HTML table format.
206
-
207
- Args:
208
- normalize (bool, optional): Normalize numeric values.
209
- decimals (int, optional): Decimal precision.
210
- index (bool, optional): Whether to include index column in the HTML table.
211
-
212
- Returns:
213
- (str): HTML representation of the results.
214
- """
215
183
  df = self.to_df(normalize=normalize, decimals=decimals)
216
- return "<table></table>" if df.empty else df.to_html(index=index)
217
184
 
218
- def tojson(self, normalize=False, decimals=5):
219
- """Deprecated version of to_json()."""
220
- LOGGER.warning("'result.tojson()' is deprecated, replace with 'result.to_json()'.")
221
- return self.to_json(normalize, decimals)
185
+ try:
186
+ return df.write_csv()
187
+ except Exception:
188
+ # Minimal string conversion for any remaining complex types
189
+ def _to_str_simple(v):
190
+ if v is None:
191
+ return ""
192
+ if isinstance(v, (dict, list, tuple, set)):
193
+ return repr(v)
194
+ return str(v)
195
+
196
+ df_str = df.select(
197
+ [pl.col(c).map_elements(_to_str_simple, return_dtype=pl.String).alias(c) for c in df.columns]
198
+ )
199
+ return df_str.write_csv()
222
200
 
223
201
  def to_json(self, normalize=False, decimals=5):
224
202
  """
@@ -231,52 +209,7 @@ class DataExportMixin:
231
209
  Returns:
232
210
  (str): JSON-formatted string of the results.
233
211
  """
234
- return self.to_df(normalize=normalize, decimals=decimals).to_json(orient="records", indent=2)
235
-
236
- def to_sql(self, normalize=False, decimals=5, table_name="results", db_path="results.db"):
237
- """
238
- Save results to an SQLite database.
239
-
240
- Args:
241
- normalize (bool, optional): Normalize numeric values.
242
- decimals (int, optional): Decimal precision.
243
- table_name (str, optional): Name of the SQL table.
244
- db_path (str, optional): SQLite database file path.
245
- """
246
- df = self.to_df(normalize, decimals)
247
- if df.empty or df.columns.empty: # Exit if df is None or has no columns (i.e., no schema)
248
- return
249
-
250
- import sqlite3
251
-
252
- conn = sqlite3.connect(db_path)
253
- cursor = conn.cursor()
254
-
255
- # Dynamically create table schema based on summary to support prediction and validation results export
256
- columns = []
257
- for col in df.columns:
258
- sample_val = df[col].dropna().iloc[0] if not df[col].dropna().empty else ""
259
- if isinstance(sample_val, dict):
260
- col_type = "TEXT"
261
- elif isinstance(sample_val, (float, int)):
262
- col_type = "REAL"
263
- else:
264
- col_type = "TEXT"
265
- columns.append(f'"{col}" {col_type}') # Quote column names to handle special characters like hyphens
266
-
267
- # Create table (Drop table from db if it's already exist)
268
- cursor.execute(f'DROP TABLE IF EXISTS "{table_name}"')
269
- cursor.execute(f'CREATE TABLE "{table_name}" (id INTEGER PRIMARY KEY AUTOINCREMENT, {", ".join(columns)})')
270
-
271
- for _, row in df.iterrows():
272
- values = [json.dumps(v) if isinstance(v, dict) else v for v in row]
273
- column_names = ", ".join(f'"{col}"' for col in df.columns)
274
- placeholders = ", ".join("?" for _ in df.columns)
275
- cursor.execute(f'INSERT INTO "{table_name}" ({column_names}) VALUES ({placeholders})', values)
276
-
277
- conn.commit()
278
- conn.close()
279
- LOGGER.info(f"Results saved to SQL table '{table_name}' in '{db_path}'.")
212
+ return self.to_df(normalize=normalize, decimals=decimals).write_json()
280
213
 
281
214
 
282
215
  class SimpleClass:
@@ -795,14 +728,26 @@ def is_raspberrypi() -> bool:
795
728
  return "rpi" in DEVICE_MODEL
796
729
 
797
730
 
798
- def is_jetson() -> bool:
731
+ @lru_cache(maxsize=3)
732
+ def is_jetson(jetpack=None) -> bool:
799
733
  """
800
734
  Determine if the Python environment is running on an NVIDIA Jetson device.
801
735
 
736
+ Args:
737
+ jetpack (int | None): If specified, check for specific JetPack version (4, 5, 6).
738
+
802
739
  Returns:
803
740
  (bool): True if running on an NVIDIA Jetson device, False otherwise.
804
741
  """
805
- return "tegra" in DEVICE_MODEL
742
+ if jetson := ("tegra" in DEVICE_MODEL):
743
+ if jetpack:
744
+ try:
745
+ content = open("/etc/nv_tegra_release").read()
746
+ version_map = {4: "R32", 5: "R35", 6: "R36"} # JetPack to L4T major version mapping
747
+ return jetpack in version_map and version_map[jetpack] in content
748
+ except Exception:
749
+ return False
750
+ return jetson
806
751
 
807
752
 
808
753
  def is_online() -> bool:
@@ -77,7 +77,7 @@ def benchmark(
77
77
  **kwargs (Any): Additional keyword arguments for exporter.
78
78
 
79
79
  Returns:
80
- (pandas.DataFrame): A pandas DataFrame with benchmark results for each format, including file size, metric,
80
+ (polars.DataFrame): A polars DataFrame with benchmark results for each format, including file size, metric,
81
81
  and inference time.
82
82
 
83
83
  Examples:
@@ -88,10 +88,15 @@ def benchmark(
88
88
  imgsz = check_imgsz(imgsz)
89
89
  assert imgsz[0] == imgsz[1] if isinstance(imgsz, list) else True, "benchmark() only supports square imgsz."
90
90
 
91
- import pandas as pd # scope for faster 'import ultralytics'
91
+ import polars as pl # scope for faster 'import ultralytics'
92
+
93
+ pl.Config.set_tbl_cols(-1) # Show all columns
94
+ pl.Config.set_tbl_rows(-1) # Show all rows
95
+ pl.Config.set_tbl_width_chars(-1) # No width limit
96
+ pl.Config.set_tbl_hide_column_data_types(True) # Hide data types
97
+ pl.Config.set_tbl_hide_dataframe_shape(True) # Hide shape info
98
+ pl.Config.set_tbl_formatting("ASCII_BORDERS_ONLY_CONDENSED")
92
99
 
93
- pd.options.display.max_columns = 10
94
- pd.options.display.width = 120
95
100
  device = select_device(device, verbose=False)
96
101
  if isinstance(model, (str, Path)):
97
102
  model = YOLO(model)
@@ -193,22 +198,24 @@ def benchmark(
193
198
 
194
199
  # Print results
195
200
  check_yolo(device=device) # print system info
196
- df = pd.DataFrame(y, columns=["Format", "Status❔", "Size (MB)", key, "Inference time (ms/im)", "FPS"])
201
+ df = pl.DataFrame(y, schema=["Format", "Status❔", "Size (MB)", key, "Inference time (ms/im)", "FPS"], orient="row")
202
+ df = df.with_row_index(" ", offset=1) # add index info
203
+ df_display = df.with_columns(pl.all().cast(pl.String).fill_null("-"))
197
204
 
198
205
  name = model.model_name
199
206
  dt = time.time() - t0
200
207
  legend = "Benchmarks legend: - ✅ Success - ❎ Export passed but validation failed - ❌️ Export failed"
201
- s = f"\nBenchmarks complete for {name} on {data} at imgsz={imgsz} ({dt:.2f}s)\n{legend}\n{df.fillna('-')}\n"
208
+ s = f"\nBenchmarks complete for {name} on {data} at imgsz={imgsz} ({dt:.2f}s)\n{legend}\n{df_display}\n"
202
209
  LOGGER.info(s)
203
210
  with open("benchmarks.log", "a", errors="ignore", encoding="utf-8") as f:
204
211
  f.write(s)
205
212
 
206
213
  if verbose and isinstance(verbose, float):
207
- metrics = df[key].array # values to compare to floor
214
+ metrics = df[key].to_numpy() # values to compare to floor
208
215
  floor = verbose # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n
209
- assert all(x > floor for x in metrics if pd.notna(x)), f"Benchmark failure: metric(s) < floor {floor}"
216
+ assert all(x > floor for x in metrics if not np.isnan(x)), f"Benchmark failure: metric(s) < floor {floor}"
210
217
 
211
- return df
218
+ return df_display
212
219
 
213
220
 
214
221
  class RF100Benchmark:
@@ -34,13 +34,19 @@ def _custom_table(x, y, classes, title="Precision Recall Curve", x_title="Recall
34
34
  Returns:
35
35
  (wandb.Object): A wandb object suitable for logging, showcasing the crafted metric visualization.
36
36
  """
37
- import pandas # scope for faster 'import ultralytics'
37
+ import polars as pl # scope for faster 'import ultralytics'
38
+ import polars.selectors as cs
39
+
40
+ df = pl.DataFrame({"class": classes, "y": y, "x": x}).with_columns(cs.numeric().round(3))
41
+ data = df.select(["class", "y", "x"]).rows()
38
42
 
39
- df = pandas.DataFrame({"class": classes, "y": y, "x": x}).round(3)
40
43
  fields = {"x": "x", "y": "y", "class": "class"}
41
44
  string_fields = {"title": title, "x-axis-title": x_title, "y-axis-title": y_title}
42
45
  return wb.plot_table(
43
- "wandb/area-under-curve/v0", wb.Table(dataframe=df), fields=fields, string_fields=string_fields
46
+ "wandb/area-under-curve/v0",
47
+ wb.Table(data=data, columns=["class", "y", "x"]),
48
+ fields=fields,
49
+ string_fields=string_fields,
44
50
  )
45
51
 
46
52
 
@@ -1,12 +1,13 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
+ from __future__ import annotations
4
+
3
5
  import re
4
6
  import shutil
5
7
  import subprocess
6
8
  from itertools import repeat
7
9
  from multiprocessing.pool import ThreadPool
8
10
  from pathlib import Path
9
- from typing import List, Tuple
10
11
  from urllib import parse, request
11
12
 
12
13
  from ultralytics.utils import LOGGER, TQDM, checks, clean_url, emojis, is_online, url2file
@@ -41,7 +42,7 @@ GITHUB_ASSETS_NAMES = frozenset(
41
42
  GITHUB_ASSETS_STEMS = frozenset(k.rpartition(".")[0] for k in GITHUB_ASSETS_NAMES)
42
43
 
43
44
 
44
- def is_url(url, check: bool = False) -> bool:
45
+ def is_url(url: str | Path, check: bool = False) -> bool:
45
46
  """
46
47
  Validate if the given string is a URL and optionally check if the URL exists online.
47
48
 
@@ -68,7 +69,7 @@ def is_url(url, check: bool = False) -> bool:
68
69
  return False
69
70
 
70
71
 
71
- def delete_dsstore(path, files_to_delete=(".DS_Store", "__MACOSX")):
72
+ def delete_dsstore(path: str | Path, files_to_delete: tuple[str, ...] = (".DS_Store", "__MACOSX")) -> None:
72
73
  """
73
74
  Delete all specified system files in a directory.
74
75
 
@@ -91,7 +92,12 @@ def delete_dsstore(path, files_to_delete=(".DS_Store", "__MACOSX")):
91
92
  f.unlink()
92
93
 
93
94
 
94
- def zip_directory(directory, compress: bool = True, exclude=(".DS_Store", "__MACOSX"), progress: bool = True) -> Path:
95
+ def zip_directory(
96
+ directory: str | Path,
97
+ compress: bool = True,
98
+ exclude: tuple[str, ...] = (".DS_Store", "__MACOSX"),
99
+ progress: bool = True,
100
+ ) -> Path:
95
101
  """
96
102
  Zip the contents of a directory, excluding specified files.
97
103
 
@@ -129,9 +135,9 @@ def zip_directory(directory, compress: bool = True, exclude=(".DS_Store", "__MAC
129
135
 
130
136
 
131
137
  def unzip_file(
132
- file,
133
- path=None,
134
- exclude=(".DS_Store", "__MACOSX"),
138
+ file: str | Path,
139
+ path: str | Path | None = None,
140
+ exclude: tuple[str, ...] = (".DS_Store", "__MACOSX"),
135
141
  exist_ok: bool = False,
136
142
  progress: bool = True,
137
143
  ) -> Path:
@@ -198,8 +204,8 @@ def unzip_file(
198
204
 
199
205
 
200
206
  def check_disk_space(
201
- url: str = "https://ultralytics.com/assets/coco8.zip",
202
- path=Path.cwd(),
207
+ file_bytes: int,
208
+ path: str | Path = Path.cwd(),
203
209
  sf: float = 1.5,
204
210
  hard: bool = True,
205
211
  ) -> bool:
@@ -207,7 +213,7 @@ def check_disk_space(
207
213
  Check if there is sufficient disk space to download and store a file.
208
214
 
209
215
  Args:
210
- url (str, optional): The URL to the file.
216
+ file_bytes (int): The file size in bytes.
211
217
  path (str | Path, optional): The path or drive to check the available free space on.
212
218
  sf (float, optional): Safety factor, the multiplier for the required free space.
213
219
  hard (bool, optional): Whether to throw an error or not on insufficient disk space.
@@ -215,26 +221,14 @@ def check_disk_space(
215
221
  Returns:
216
222
  (bool): True if there is sufficient disk space, False otherwise.
217
223
  """
218
- import requests # slow import
219
-
220
- try:
221
- r = requests.head(url) # response
222
- assert r.status_code < 400, f"URL error for {url}: {r.status_code} {r.reason}" # check response
223
- except Exception:
224
- return True # requests issue, default to True
225
-
226
- # Check file size
227
- gib = 1 << 30 # bytes per GiB
228
- data = int(r.headers.get("Content-Length", 0)) / gib # file size (GB)
229
- total, used, free = (x / gib for x in shutil.disk_usage(path)) # bytes
230
-
231
- if data * sf < free:
224
+ total, used, free = shutil.disk_usage(path) # bytes
225
+ if file_bytes * sf < free:
232
226
  return True # sufficient space
233
227
 
234
228
  # Insufficient space
235
229
  text = (
236
- f"Insufficient free disk space {free:.1f} GB < {data * sf:.3f} GB required, "
237
- f"Please free {data * sf - free:.1f} GB additional disk space and try again."
230
+ f"Insufficient free disk space {free >> 30:.3f} GB < {int(file_bytes * sf) >> 30:.3f} GB required, "
231
+ f"Please free {int(file_bytes * sf - free) >> 30:.3f} GB additional disk space and try again."
238
232
  )
239
233
  if hard:
240
234
  raise MemoryError(text)
@@ -242,7 +236,7 @@ def check_disk_space(
242
236
  return False
243
237
 
244
238
 
245
- def get_google_drive_file_info(link: str) -> Tuple[str, str]:
239
+ def get_google_drive_file_info(link: str) -> tuple[str, str | None]:
246
240
  """
247
241
  Retrieve the direct download link and filename for a shareable Google Drive file link.
248
242
 
@@ -283,9 +277,9 @@ def get_google_drive_file_info(link: str) -> Tuple[str, str]:
283
277
 
284
278
 
285
279
  def safe_download(
286
- url,
287
- file=None,
288
- dir=None,
280
+ url: str | Path,
281
+ file: str | Path | None = None,
282
+ dir: str | Path | None = None,
289
283
  unzip: bool = True,
290
284
  delete: bool = False,
291
285
  curl: bool = False,
@@ -293,7 +287,7 @@ def safe_download(
293
287
  min_bytes: float = 1e0,
294
288
  exist_ok: bool = False,
295
289
  progress: bool = True,
296
- ):
290
+ ) -> Path | str:
297
291
  """
298
292
  Download files from a URL with options for retrying, unzipping, and deleting the downloaded file. Enhanced with
299
293
  robust partial download detection using Content-Length validation.
@@ -335,7 +329,6 @@ def safe_download(
335
329
  )
336
330
  desc = f"Downloading {uri} to '{f}'"
337
331
  f.parent.mkdir(parents=True, exist_ok=True) # make directory if missing
338
- check_disk_space(url, path=f.parent)
339
332
  curl_installed = shutil.which("curl")
340
333
  for i in range(retry + 1):
341
334
  try:
@@ -347,6 +340,9 @@ def safe_download(
347
340
  else: # urllib download
348
341
  with request.urlopen(url) as response:
349
342
  expected_size = int(response.getheader("Content-Length", 0))
343
+ if i == 0 and expected_size > 1048576:
344
+ check_disk_space(expected_size, path=f.parent)
345
+ buffer_size = max(8192, min(1048576, expected_size // 1000)) if expected_size else 8192
350
346
  with TQDM(
351
347
  total=expected_size,
352
348
  desc=desc,
@@ -356,7 +352,10 @@ def safe_download(
356
352
  unit_divisor=1024,
357
353
  ) as pbar:
358
354
  with open(f, "wb") as f_opened:
359
- for data in response:
355
+ while True:
356
+ data = response.read(buffer_size)
357
+ if not data:
358
+ break
360
359
  f_opened.write(data)
361
360
  pbar.update(len(data))
362
361
 
@@ -371,6 +370,8 @@ def safe_download(
371
370
  else:
372
371
  break # success
373
372
  f.unlink() # remove partial downloads
373
+ except MemoryError:
374
+ raise # Re-raise immediately - no point retrying if insufficient disk space
374
375
  except Exception as e:
375
376
  if i == 0 and not is_online():
376
377
  raise ConnectionError(emojis(f"❌ Download failure for {uri}. Environment is not online.")) from e
@@ -397,7 +398,7 @@ def get_github_assets(
397
398
  repo: str = "ultralytics/assets",
398
399
  version: str = "latest",
399
400
  retry: bool = False,
400
- ) -> Tuple[str, List[str]]:
401
+ ) -> tuple[str, list[str]]:
401
402
  """
402
403
  Retrieve the specified version's tag and assets from a GitHub repository.
403
404
 
@@ -430,7 +431,12 @@ def get_github_assets(
430
431
  return data["tag_name"], [x["name"] for x in data["assets"]] # tag, assets i.e. ['yolo11n.pt', 'yolov8s.pt', ...]
431
432
 
432
433
 
433
- def attempt_download_asset(file, repo: str = "ultralytics/assets", release: str = "v8.3.0", **kwargs) -> str:
434
+ def attempt_download_asset(
435
+ file: str | Path,
436
+ repo: str = "ultralytics/assets",
437
+ release: str = "v8.3.0",
438
+ **kwargs,
439
+ ) -> str:
434
440
  """
435
441
  Attempt to download a file from GitHub release assets if it is not found locally.
436
442
 
@@ -482,15 +488,15 @@ def attempt_download_asset(file, repo: str = "ultralytics/assets", release: str
482
488
 
483
489
 
484
490
  def download(
485
- url,
486
- dir=Path.cwd(),
491
+ url: str | list[str] | Path,
492
+ dir: Path = Path.cwd(),
487
493
  unzip: bool = True,
488
494
  delete: bool = False,
489
495
  curl: bool = False,
490
496
  threads: int = 1,
491
497
  retry: int = 3,
492
498
  exist_ok: bool = False,
493
- ):
499
+ ) -> None:
494
500
  """
495
501
  Download files from specified URLs to a given directory.
496
502
 
@@ -557,7 +557,7 @@ class Annotator:
557
557
  return width, height, width * height
558
558
 
559
559
 
560
- @TryExcept() # known issue https://github.com/ultralytics/yolov5/issues/5395
560
+ @TryExcept()
561
561
  @plt_settings()
562
562
  def plot_labels(boxes, cls, names=(), save_dir=Path(""), on_plot=None):
563
563
  """
@@ -571,7 +571,7 @@ def plot_labels(boxes, cls, names=(), save_dir=Path(""), on_plot=None):
571
571
  on_plot (Callable, optional): Function to call after plot is saved.
572
572
  """
573
573
  import matplotlib.pyplot as plt # scope for faster 'import ultralytics'
574
- import pandas
574
+ import polars
575
575
  from matplotlib.colors import LinearSegmentedColormap
576
576
 
577
577
  # Filter matplotlib>=3.7.2 warning
@@ -582,16 +582,7 @@ def plot_labels(boxes, cls, names=(), save_dir=Path(""), on_plot=None):
582
582
  LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ")
583
583
  nc = int(cls.max() + 1) # number of classes
584
584
  boxes = boxes[:1000000] # limit to 1M boxes
585
- x = pandas.DataFrame(boxes, columns=["x", "y", "width", "height"])
586
-
587
- try: # Seaborn correlogram
588
- import seaborn
589
-
590
- seaborn.pairplot(x, corner=True, diag_kind="auto", kind="hist", diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))
591
- plt.savefig(save_dir / "labels_correlogram.jpg", dpi=200)
592
- plt.close()
593
- except ImportError:
594
- pass # Skip if seaborn is not installed
585
+ x = polars.DataFrame(boxes, schema=["x", "y", "width", "height"])
595
586
 
596
587
  # Matplotlib labels
597
588
  subplot_3_4_color = LinearSegmentedColormap.from_list("white_blue", ["white", "blue"])
@@ -603,12 +594,13 @@ def plot_labels(boxes, cls, names=(), save_dir=Path(""), on_plot=None):
603
594
  if 0 < len(names) < 30:
604
595
  ax[0].set_xticks(range(len(names)))
605
596
  ax[0].set_xticklabels(list(names.values()), rotation=90, fontsize=10)
597
+ ax[0].bar_label(y[2])
606
598
  else:
607
599
  ax[0].set_xlabel("classes")
608
600
  boxes = np.column_stack([0.5 - boxes[:, 2:4] / 2, 0.5 + boxes[:, 2:4] / 2]) * 1000
609
601
  img = Image.fromarray(np.ones((1000, 1000, 3), dtype=np.uint8) * 255)
610
602
  for cls, box in zip(cls[:500], boxes[:500]):
611
- ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot
603
+ ImageDraw.Draw(img).rectangle(box.tolist(), width=1, outline=colors(cls)) # plot
612
604
  ax[1].imshow(img)
613
605
  ax[1].axis("off")
614
606
 
@@ -878,7 +870,7 @@ def plot_results(
878
870
  >>> plot_results("path/to/results.csv", segment=True)
879
871
  """
880
872
  import matplotlib.pyplot as plt # scope for faster 'import ultralytics'
881
- import pandas as pd
873
+ import polars as pl
882
874
  from scipy.ndimage import gaussian_filter1d
883
875
 
884
876
  save_dir = Path(file).parent if file else Path(dir)
@@ -899,11 +891,11 @@ def plot_results(
899
891
  assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot."
900
892
  for f in files:
901
893
  try:
902
- data = pd.read_csv(f)
894
+ data = pl.read_csv(f)
903
895
  s = [x.strip() for x in data.columns]
904
- x = data.values[:, 0]
896
+ x = data.select(data.columns[0]).to_numpy().flatten()
905
897
  for i, j in enumerate(index):
906
- y = data.values[:, j].astype("float")
898
+ y = data.select(data.columns[j]).to_numpy().flatten().astype("float")
907
899
  # y[y == 0] = np.nan # don't show zero values
908
900
  ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=8) # actual results
909
901
  ax[i].plot(x, gaussian_filter1d(y, sigma=3), ":", label="smooth", linewidth=2) # smoothing line
@@ -953,6 +945,7 @@ def plt_color_scatter(v, f, bins: int = 20, cmap: str = "viridis", alpha: float
953
945
  plt.scatter(v, f, c=colors, cmap=cmap, alpha=alpha, edgecolors=edgecolors)
954
946
 
955
947
 
948
+ @plt_settings()
956
949
  def plot_tune_results(csv_file: str = "tune_results.csv"):
957
950
  """
958
951
  Plot the evolution results stored in a 'tune_results.csv' file. The function generates a scatter plot for each key
@@ -965,7 +958,7 @@ def plot_tune_results(csv_file: str = "tune_results.csv"):
965
958
  >>> plot_tune_results("path/to/tune_results.csv")
966
959
  """
967
960
  import matplotlib.pyplot as plt # scope for faster 'import ultralytics'
968
- import pandas as pd
961
+ import polars as pl
969
962
  from scipy.ndimage import gaussian_filter1d
970
963
 
971
964
  def _save_one_file(file):
@@ -976,10 +969,10 @@ def plot_tune_results(csv_file: str = "tune_results.csv"):
976
969
 
977
970
  # Scatter plots for each hyperparameter
978
971
  csv_file = Path(csv_file)
979
- data = pd.read_csv(csv_file)
972
+ data = pl.read_csv(csv_file)
980
973
  num_metrics_columns = 1
981
974
  keys = [x.strip() for x in data.columns][num_metrics_columns:]
982
- x = data.values
975
+ x = data.to_numpy()
983
976
  fitness = x[:, 0] # fitness
984
977
  j = np.argmax(fitness) # max fitness index
985
978
  n = math.ceil(len(keys) ** 0.5) # columns and rows in plot