dgenerate-ultralytics-headless 8.3.185__py3-none-any.whl → 8.3.187__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. {dgenerate_ultralytics_headless-8.3.185.dist-info → dgenerate_ultralytics_headless-8.3.187.dist-info}/METADATA +6 -8
  2. {dgenerate_ultralytics_headless-8.3.185.dist-info → dgenerate_ultralytics_headless-8.3.187.dist-info}/RECORD +31 -30
  3. tests/test_python.py +2 -10
  4. ultralytics/__init__.py +1 -1
  5. ultralytics/cfg/datasets/Argoverse.yaml +2 -2
  6. ultralytics/cfg/datasets/Objects365.yaml +3 -3
  7. ultralytics/cfg/datasets/SKU-110K.yaml +4 -4
  8. ultralytics/cfg/datasets/VOC.yaml +2 -4
  9. ultralytics/cfg/datasets/VisDrone.yaml +2 -2
  10. ultralytics/cfg/datasets/xView.yaml +2 -2
  11. ultralytics/data/build.py +2 -2
  12. ultralytics/data/utils.py +0 -2
  13. ultralytics/engine/exporter.py +4 -1
  14. ultralytics/engine/results.py +1 -4
  15. ultralytics/engine/trainer.py +3 -3
  16. ultralytics/models/sam/__init__.py +8 -2
  17. ultralytics/models/sam/modules/sam.py +6 -6
  18. ultralytics/models/sam/predict.py +363 -6
  19. ultralytics/solutions/region_counter.py +3 -2
  20. ultralytics/utils/__init__.py +25 -162
  21. ultralytics/utils/autodevice.py +1 -1
  22. ultralytics/utils/benchmarks.py +9 -8
  23. ultralytics/utils/callbacks/wb.py +9 -3
  24. ultralytics/utils/downloads.py +29 -19
  25. ultralytics/utils/logger.py +10 -11
  26. ultralytics/utils/plotting.py +13 -20
  27. ultralytics/utils/tqdm.py +462 -0
  28. {dgenerate_ultralytics_headless-8.3.185.dist-info → dgenerate_ultralytics_headless-8.3.187.dist-info}/WHEEL +0 -0
  29. {dgenerate_ultralytics_headless-8.3.185.dist-info → dgenerate_ultralytics_headless-8.3.187.dist-info}/entry_points.txt +0 -0
  30. {dgenerate_ultralytics_headless-8.3.185.dist-info → dgenerate_ultralytics_headless-8.3.187.dist-info}/licenses/LICENSE +0 -0
  31. {dgenerate_ultralytics_headless-8.3.185.dist-info → dgenerate_ultralytics_headless-8.3.187.dist-info}/top_level.txt +0 -0
@@ -12,7 +12,6 @@ import subprocess
12
12
  import sys
13
13
  import threading
14
14
  import time
15
- import warnings
16
15
  from pathlib import Path
17
16
  from threading import Lock
18
17
  from types import SimpleNamespace
@@ -22,10 +21,10 @@ from urllib.parse import unquote
22
21
  import cv2
23
22
  import numpy as np
24
23
  import torch
25
- import tqdm
26
24
 
27
25
  from ultralytics import __version__
28
26
  from ultralytics.utils.patches import imread, imshow, imwrite, torch_save # for patches
27
+ from ultralytics.utils.tqdm import TQDM # noqa
29
28
 
30
29
  # PyTorch Multi-GPU DDP Constants
31
30
  RANK = int(os.getenv("RANK", -1))
@@ -41,7 +40,6 @@ DEFAULT_CFG_PATH = ROOT / "cfg/default.yaml"
41
40
  NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLO multiprocessing threads
42
41
  AUTOINSTALL = str(os.getenv("YOLO_AUTOINSTALL", True)).lower() == "true" # global auto-install mode
43
42
  VERBOSE = str(os.getenv("YOLO_VERBOSE", True)).lower() == "true" # global verbose mode
44
- TQDM_BAR_FORMAT = "{l_bar}{bar:10}{r_bar}" if VERBOSE else None # tqdm bar format
45
43
  LOGGING_NAME = "ultralytics"
46
44
  MACOS, LINUX, WINDOWS = (platform.system() == x for x in ["Darwin", "Linux", "Windows"]) # environment booleans
47
45
  MACOS_VERSION = platform.mac_ver()[0] if MACOS else None
@@ -130,90 +128,20 @@ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" # suppress verbose TF compiler warning
130
128
  os.environ["TORCH_CPP_LOG_LEVEL"] = "ERROR" # suppress "NNPACK.cpp could not initialize NNPACK" warnings
131
129
  os.environ["KINETO_LOG_LEVEL"] = "5" # suppress verbose PyTorch profiler output when computing FLOPs
132
130
 
133
- if TQDM_RICH := str(os.getenv("YOLO_TQDM_RICH", False)).lower() == "true":
134
- from rich.console import Console
135
- from rich.progress import BarColumn
136
- from tqdm import rich
137
-
138
- # Patch Rich Console width=200 and BarColumn bar_width=10 to solve width=80 missing bars bug
139
- _console_init = Console.__init__
140
- _bar_init = BarColumn.__init__
141
- Console.__init__ = lambda self, *a, **k: _console_init(self, *a, **{**k, "width": 200})
142
- BarColumn.__init__ = lambda self, bar_width=None, *a, **k: _bar_init(self, 10, *a, **k)
143
-
144
-
145
- class TQDM(rich.tqdm if TQDM_RICH else tqdm.tqdm):
146
- """
147
- A custom TQDM progress bar class that extends the original tqdm functionality.
148
-
149
- This class modifies the behavior of the original tqdm progress bar based on global settings and provides
150
- additional customization options for Ultralytics projects. The progress bar is automatically disabled when
151
- VERBOSE is False or when explicitly disabled.
152
-
153
- Attributes:
154
- disable (bool): Whether to disable the progress bar. Determined by the global VERBOSE setting and
155
- any passed 'disable' argument.
156
- bar_format (str): The format string for the progress bar. Uses the global TQDM_BAR_FORMAT if not
157
- explicitly set.
158
-
159
- Methods:
160
- __init__: Initialize the TQDM object with custom settings.
161
- __iter__: Return self as iterator to satisfy Iterable interface.
162
-
163
- Examples:
164
- >>> from ultralytics.utils import TQDM
165
- >>> for i in TQDM(range(100)):
166
- ... # Your processing code here
167
- ... pass
168
- """
169
-
170
- def __init__(self, *args, **kwargs):
171
- """
172
- Initialize a custom TQDM progress bar with Ultralytics-specific settings.
173
-
174
- Args:
175
- *args (Any): Variable length argument list to be passed to the original tqdm constructor.
176
- **kwargs (Any): Arbitrary keyword arguments to be passed to the original tqdm constructor.
177
-
178
- Notes:
179
- - The progress bar is disabled if VERBOSE is False or if 'disable' is explicitly set to True in kwargs.
180
- - The default bar format is set to TQDM_BAR_FORMAT unless overridden in kwargs.
181
- - In GitHub Actions, progress bars only update at completion to keep CI logs clean.
182
-
183
- Examples:
184
- >>> from ultralytics.utils import TQDM
185
- >>> for i in TQDM(range(100)):
186
- ... # Your code here
187
- ... pass
188
- """
189
- warnings.filterwarnings("ignore", category=tqdm.TqdmExperimentalWarning) # suppress tqdm.rich warning
190
- if is_github_action_running():
191
- kwargs["mininterval"] = 60 # only update every 60 seconds
192
- kwargs["disable"] = not VERBOSE or kwargs.get("disable", False) or LOGGER.getEffectiveLevel() > 20
193
- kwargs.setdefault("bar_format", TQDM_BAR_FORMAT) # override default value if passed
194
- super().__init__(*args, **kwargs)
195
-
196
- def __iter__(self):
197
- """Return self as iterator to satisfy Iterable interface."""
198
- return super().__iter__()
199
-
200
131
 
201
132
  class DataExportMixin:
202
133
  """
203
134
  Mixin class for exporting validation metrics or prediction results in various formats.
204
135
 
205
136
  This class provides utilities to export performance metrics (e.g., mAP, precision, recall) or prediction results
206
- from classification, object detection, segmentation, or pose estimation tasks into various formats: Pandas
207
- DataFrame, CSV, XML, HTML, JSON and SQLite (SQL).
137
+ from classification, object detection, segmentation, or pose estimation tasks into various formats: Polars
138
+ DataFrame, CSV and JSON.
208
139
 
209
140
  Methods:
210
- to_df: Convert summary to a Pandas DataFrame.
141
+ to_df: Convert summary to a Polars DataFrame.
211
142
  to_csv: Export results as a CSV string.
212
- to_xml: Export results as an XML string (requires `lxml`).
213
- to_html: Export results as an HTML table.
214
143
  to_json: Export results as a JSON string.
215
144
  tojson: Deprecated alias for `to_json()`.
216
- to_sql: Export results to an SQLite database.
217
145
 
218
146
  Examples:
219
147
  >>> model = YOLO("yolo11n.pt")
@@ -221,12 +149,11 @@ class DataExportMixin:
221
149
  >>> df = results.to_df()
222
150
  >>> print(df)
223
151
  >>> csv_data = results.to_csv()
224
- >>> results.to_sql(table_name="yolo_results")
225
152
  """
226
153
 
227
154
  def to_df(self, normalize=False, decimals=5):
228
155
  """
229
- Create a pandas DataFrame from the prediction results summary or validation metrics.
156
+ Create a polars DataFrame from the prediction results summary or validation metrics.
230
157
 
231
158
  Args:
232
159
  normalize (bool, optional): Normalize numerical values for easier comparison.
@@ -235,13 +162,13 @@ class DataExportMixin:
235
162
  Returns:
236
163
  (DataFrame): DataFrame containing the summary data.
237
164
  """
238
- import pandas as pd # scope for faster 'import ultralytics'
165
+ import polars as pl # scope for faster 'import ultralytics'
239
166
 
240
- return pd.DataFrame(self.summary(normalize=normalize, decimals=decimals))
167
+ return pl.DataFrame(self.summary(normalize=normalize, decimals=decimals))
241
168
 
242
169
  def to_csv(self, normalize=False, decimals=5):
243
170
  """
244
- Export results to CSV string format.
171
+ Export results or metrics to CSV string format.
245
172
 
246
173
  Args:
247
174
  normalize (bool, optional): Normalize numeric values.
@@ -250,44 +177,25 @@ class DataExportMixin:
250
177
  Returns:
251
178
  (str): CSV content as string.
252
179
  """
253
- return self.to_df(normalize=normalize, decimals=decimals).to_csv()
254
-
255
- def to_xml(self, normalize=False, decimals=5):
256
- """
257
- Export results to XML format.
180
+ import polars as pl
258
181
 
259
- Args:
260
- normalize (bool, optional): Normalize numeric values.
261
- decimals (int, optional): Decimal precision.
262
-
263
- Returns:
264
- (str): XML string.
265
-
266
- Notes:
267
- Requires `lxml` package to be installed.
268
- """
269
- df = self.to_df(normalize=normalize, decimals=decimals)
270
- return '<?xml version="1.0" encoding="utf-8"?>\n<root></root>' if df.empty else df.to_xml(parser="etree")
271
-
272
- def to_html(self, normalize=False, decimals=5, index=False):
273
- """
274
- Export results to HTML table format.
275
-
276
- Args:
277
- normalize (bool, optional): Normalize numeric values.
278
- decimals (int, optional): Decimal precision.
279
- index (bool, optional): Whether to include index column in the HTML table.
280
-
281
- Returns:
282
- (str): HTML representation of the results.
283
- """
284
182
  df = self.to_df(normalize=normalize, decimals=decimals)
285
- return "<table></table>" if df.empty else df.to_html(index=index)
286
183
 
287
- def tojson(self, normalize=False, decimals=5):
288
- """Deprecated version of to_json()."""
289
- LOGGER.warning("'result.tojson()' is deprecated, replace with 'result.to_json()'.")
290
- return self.to_json(normalize, decimals)
184
+ try:
185
+ return df.write_csv()
186
+ except Exception:
187
+ # Minimal string conversion for any remaining complex types
188
+ def _to_str_simple(v):
189
+ if v is None:
190
+ return ""
191
+ if isinstance(v, (dict, list, tuple, set)):
192
+ return repr(v)
193
+ return str(v)
194
+
195
+ df_str = df.select(
196
+ [pl.col(c).map_elements(_to_str_simple, return_dtype=pl.String).alias(c) for c in df.columns]
197
+ )
198
+ return df_str.write_csv()
291
199
 
292
200
  def to_json(self, normalize=False, decimals=5):
293
201
  """
@@ -300,52 +208,7 @@ class DataExportMixin:
300
208
  Returns:
301
209
  (str): JSON-formatted string of the results.
302
210
  """
303
- return self.to_df(normalize=normalize, decimals=decimals).to_json(orient="records", indent=2)
304
-
305
- def to_sql(self, normalize=False, decimals=5, table_name="results", db_path="results.db"):
306
- """
307
- Save results to an SQLite database.
308
-
309
- Args:
310
- normalize (bool, optional): Normalize numeric values.
311
- decimals (int, optional): Decimal precision.
312
- table_name (str, optional): Name of the SQL table.
313
- db_path (str, optional): SQLite database file path.
314
- """
315
- df = self.to_df(normalize, decimals)
316
- if df.empty or df.columns.empty: # Exit if df is None or has no columns (i.e., no schema)
317
- return
318
-
319
- import sqlite3
320
-
321
- conn = sqlite3.connect(db_path)
322
- cursor = conn.cursor()
323
-
324
- # Dynamically create table schema based on summary to support prediction and validation results export
325
- columns = []
326
- for col in df.columns:
327
- sample_val = df[col].dropna().iloc[0] if not df[col].dropna().empty else ""
328
- if isinstance(sample_val, dict):
329
- col_type = "TEXT"
330
- elif isinstance(sample_val, (float, int)):
331
- col_type = "REAL"
332
- else:
333
- col_type = "TEXT"
334
- columns.append(f'"{col}" {col_type}') # Quote column names to handle special characters like hyphens
335
-
336
- # Create table (Drop table from db if it's already exist)
337
- cursor.execute(f'DROP TABLE IF EXISTS "{table_name}"')
338
- cursor.execute(f'CREATE TABLE "{table_name}" (id INTEGER PRIMARY KEY AUTOINCREMENT, {", ".join(columns)})')
339
-
340
- for _, row in df.iterrows():
341
- values = [json.dumps(v) if isinstance(v, dict) else v for v in row]
342
- column_names = ", ".join(f'"{col}"' for col in df.columns)
343
- placeholders = ", ".join("?" for _ in df.columns)
344
- cursor.execute(f'INSERT INTO "{table_name}" ({column_names}) VALUES ({placeholders})', values)
345
-
346
- conn.commit()
347
- conn.close()
348
- LOGGER.info(f"Results saved to SQL table '{table_name}' in '{db_path}'.")
211
+ return self.to_df(normalize=normalize, decimals=decimals).write_json()
349
212
 
350
213
 
351
214
  class SimpleClass:
@@ -49,7 +49,7 @@ class GPUInfo:
49
49
  self.gpu_stats: List[Dict[str, Any]] = []
50
50
 
51
51
  try:
52
- check_requirements("pynvml>=12.0.0")
52
+ check_requirements("nvidia-ml-py>=12.0.0")
53
53
  self.pynvml = __import__("pynvml")
54
54
  self.pynvml.nvmlInit()
55
55
  self.nvml_available = True
@@ -77,7 +77,7 @@ def benchmark(
77
77
  **kwargs (Any): Additional keyword arguments for exporter.
78
78
 
79
79
  Returns:
80
- (pandas.DataFrame): A pandas DataFrame with benchmark results for each format, including file size, metric,
80
+ (polars.DataFrame): A polars DataFrame with benchmark results for each format, including file size, metric,
81
81
  and inference time.
82
82
 
83
83
  Examples:
@@ -88,10 +88,11 @@ def benchmark(
88
88
  imgsz = check_imgsz(imgsz)
89
89
  assert imgsz[0] == imgsz[1] if isinstance(imgsz, list) else True, "benchmark() only supports square imgsz."
90
90
 
91
- import pandas as pd # scope for faster 'import ultralytics'
91
+ import polars as pl # scope for faster 'import ultralytics'
92
92
 
93
- pd.options.display.max_columns = 10
94
- pd.options.display.width = 120
93
+ pl.Config.set_tbl_cols(10)
94
+ pl.Config.set_tbl_width_chars(120)
95
+ pl.Config.set_tbl_hide_dataframe_shape(True)
95
96
  device = select_device(device, verbose=False)
96
97
  if isinstance(model, (str, Path)):
97
98
  model = YOLO(model)
@@ -193,20 +194,20 @@ def benchmark(
193
194
 
194
195
  # Print results
195
196
  check_yolo(device=device) # print system info
196
- df = pd.DataFrame(y, columns=["Format", "Status❔", "Size (MB)", key, "Inference time (ms/im)", "FPS"])
197
+ df = pl.DataFrame(y, schema=["Format", "Status❔", "Size (MB)", key, "Inference time (ms/im)", "FPS"])
197
198
 
198
199
  name = model.model_name
199
200
  dt = time.time() - t0
200
201
  legend = "Benchmarks legend: - ✅ Success - ❎ Export passed but validation failed - ❌️ Export failed"
201
- s = f"\nBenchmarks complete for {name} on {data} at imgsz={imgsz} ({dt:.2f}s)\n{legend}\n{df.fillna('-')}\n"
202
+ s = f"\nBenchmarks complete for {name} on {data} at imgsz={imgsz} ({dt:.2f}s)\n{legend}\n{df.fill_null('-')}\n"
202
203
  LOGGER.info(s)
203
204
  with open("benchmarks.log", "a", errors="ignore", encoding="utf-8") as f:
204
205
  f.write(s)
205
206
 
206
207
  if verbose and isinstance(verbose, float):
207
- metrics = df[key].array # values to compare to floor
208
+ metrics = df[key].to_numpy() # values to compare to floor
208
209
  floor = verbose # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n
209
- assert all(x > floor for x in metrics if pd.notna(x)), f"Benchmark failure: metric(s) < floor {floor}"
210
+ assert all(x > floor for x in metrics if not np.isnan(x)), f"Benchmark failure: metric(s) < floor {floor}"
210
211
 
211
212
  return df
212
213
 
@@ -34,13 +34,19 @@ def _custom_table(x, y, classes, title="Precision Recall Curve", x_title="Recall
34
34
  Returns:
35
35
  (wandb.Object): A wandb object suitable for logging, showcasing the crafted metric visualization.
36
36
  """
37
- import pandas # scope for faster 'import ultralytics'
37
+ import polars as pl # scope for faster 'import ultralytics'
38
+ import polars.selectors as cs
39
+
40
+ df = pl.DataFrame({"class": classes, "y": y, "x": x}).with_columns(cs.numeric().round(3))
41
+ data = df.select(["class", "y", "x"]).rows()
38
42
 
39
- df = pandas.DataFrame({"class": classes, "y": y, "x": x}).round(3)
40
43
  fields = {"x": "x", "y": "y", "class": "class"}
41
44
  string_fields = {"title": title, "x-axis-title": x_title, "y-axis-title": y_title}
42
45
  return wb.plot_table(
43
- "wandb/area-under-curve/v0", wb.Table(dataframe=df), fields=fields, string_fields=string_fields
46
+ "wandb/area-under-curve/v0",
47
+ wb.Table(data=data, columns=["class", "y", "x"]),
48
+ fields=fields,
49
+ string_fields=string_fields,
44
50
  )
45
51
 
46
52
 
@@ -118,11 +118,11 @@ def zip_directory(directory, compress: bool = True, exclude=(".DS_Store", "__MAC
118
118
  raise FileNotFoundError(f"Directory '{directory}' does not exist.")
119
119
 
120
120
  # Zip with progress bar
121
- files_to_zip = [f for f in directory.rglob("*") if f.is_file() and all(x not in f.name for x in exclude)]
121
+ files = [f for f in directory.rglob("*") if f.is_file() and all(x not in f.name for x in exclude)] # files to zip
122
122
  zip_file = directory.with_suffix(".zip")
123
123
  compression = ZIP_DEFLATED if compress else ZIP_STORED
124
124
  with ZipFile(zip_file, "w", compression) as f:
125
- for file in TQDM(files_to_zip, desc=f"Zipping {directory} to {zip_file}...", unit="file", disable=not progress):
125
+ for file in TQDM(files, desc=f"Zipping {directory} to {zip_file}...", unit="files", disable=not progress):
126
126
  f.write(file, file.relative_to(directory))
127
127
 
128
128
  return zip_file # return path to zip file
@@ -187,7 +187,7 @@ def unzip_file(
187
187
  LOGGER.warning(f"Skipping {file} unzip as destination directory {path} is not empty.")
188
188
  return path
189
189
 
190
- for f in TQDM(files, desc=f"Unzipping {file} to {Path(path).resolve()}...", unit="file", disable=not progress):
190
+ for f in TQDM(files, desc=f"Unzipping {file} to {Path(path).resolve()}...", unit="files", disable=not progress):
191
191
  # Ensure the file is within the extract_path to avoid path traversal security vulnerability
192
192
  if ".." in Path(f).parts:
193
193
  LOGGER.warning(f"Potentially insecure file path: {f}, skipping extraction.")
@@ -295,7 +295,8 @@ def safe_download(
295
295
  progress: bool = True,
296
296
  ):
297
297
  """
298
- Download files from a URL with options for retrying, unzipping, and deleting the downloaded file.
298
+ Download files from a URL with options for retrying, unzipping, and deleting the downloaded file. Enhanced with
299
+ robust partial download detection using Content-Length validation.
299
300
 
300
301
  Args:
301
302
  url (str): The URL of the file to be downloaded.
@@ -342,24 +343,33 @@ def safe_download(
342
343
  s = "sS" * (not progress) # silent
343
344
  r = subprocess.run(["curl", "-#", f"-{s}L", url, "-o", f, "--retry", "3", "-C", "-"]).returncode
344
345
  assert r == 0, f"Curl return value {r}"
346
+ expected_size = None # Can't get size with curl
345
347
  else: # urllib download
346
- # torch.hub.download_url_to_file(url, f, progress=progress) # do not use as progress tqdm differs
347
- with request.urlopen(url) as response, TQDM(
348
- total=int(response.getheader("Content-Length", 0)),
349
- desc=desc,
350
- disable=not progress,
351
- unit="B",
352
- unit_scale=True,
353
- unit_divisor=1024,
354
- ) as pbar:
355
- with open(f, "wb") as f_opened:
356
- for data in response:
357
- f_opened.write(data)
358
- pbar.update(len(data))
348
+ with request.urlopen(url) as response:
349
+ expected_size = int(response.getheader("Content-Length", 0))
350
+ with TQDM(
351
+ total=expected_size,
352
+ desc=desc,
353
+ disable=not progress,
354
+ unit="B",
355
+ unit_scale=True,
356
+ unit_divisor=1024,
357
+ ) as pbar:
358
+ with open(f, "wb") as f_opened:
359
+ for data in response:
360
+ f_opened.write(data)
361
+ pbar.update(len(data))
359
362
 
360
363
  if f.exists():
361
- if f.stat().st_size > min_bytes:
362
- break # success
364
+ file_size = f.stat().st_size
365
+ if file_size > min_bytes:
366
+ # Check if download is complete (only if we have expected_size)
367
+ if expected_size and file_size != expected_size:
368
+ LOGGER.warning(
369
+ f"Partial download: {file_size}/{expected_size} bytes ({file_size / expected_size * 100:.1f}%)"
370
+ )
371
+ else:
372
+ break # success
363
373
  f.unlink() # remove partial downloads
364
374
  except Exception as e:
365
375
  if i == 0 and not is_online():
@@ -77,7 +77,7 @@ class ConsoleLogger:
77
77
  # State tracking
78
78
  self.last_line = ""
79
79
  self.last_time = 0.0
80
- self.last_progress_line = "" # Track 100% progress lines separately
80
+ self.last_progress_line = "" # Track last progress line for deduplication
81
81
  self.last_was_progress = False # Track if last line was a progress bar
82
82
 
83
83
  def start_capture(self):
@@ -127,15 +127,14 @@ class ConsoleLogger:
127
127
  for line in lines:
128
128
  line = line.rstrip()
129
129
 
130
- # Handle progress bars - only show 100% completions
131
- if ("it/s" in line and ("%|" in line or "━" in line)) or (
132
- "100%" in line and ("it/s" in line or "[" in line)
133
- ):
134
- if "100%" not in line:
135
- continue
136
- # Dedupe 100% lines by core content (strip timing)
137
- progress_core = line.split("[")[0].split("]")[0].strip()
138
- if progress_core == self.last_progress_line:
130
+ # Skip lines with only thin progress bars (partial progress)
131
+ if "" in line: # Has thin lines but no thick lines
132
+ continue
133
+
134
+ # Deduplicate completed progress bars only if they match the previous progress line
135
+ if " ━━" in line:
136
+ progress_core = line.split(" ━━")[0].strip()
137
+ if progress_core == self.last_progress_line and self.last_was_progress:
139
138
  continue
140
139
  self.last_progress_line = progress_core
141
140
  self.last_was_progress = True
@@ -271,7 +270,7 @@ class SystemLogger:
271
270
  """Initialize NVIDIA GPU monitoring with pynvml."""
272
271
  try:
273
272
  assert not MACOS
274
- check_requirements("pynvml>=12.0.0")
273
+ check_requirements("nvidia-ml-py>=12.0.0")
275
274
  self.pynvml = __import__("pynvml")
276
275
  self.pynvml.nvmlInit()
277
276
  return True
@@ -557,7 +557,7 @@ class Annotator:
557
557
  return width, height, width * height
558
558
 
559
559
 
560
- @TryExcept() # known issue https://github.com/ultralytics/yolov5/issues/5395
560
+ @TryExcept()
561
561
  @plt_settings()
562
562
  def plot_labels(boxes, cls, names=(), save_dir=Path(""), on_plot=None):
563
563
  """
@@ -571,7 +571,7 @@ def plot_labels(boxes, cls, names=(), save_dir=Path(""), on_plot=None):
571
571
  on_plot (Callable, optional): Function to call after plot is saved.
572
572
  """
573
573
  import matplotlib.pyplot as plt # scope for faster 'import ultralytics'
574
- import pandas
574
+ import polars
575
575
  from matplotlib.colors import LinearSegmentedColormap
576
576
 
577
577
  # Filter matplotlib>=3.7.2 warning
@@ -582,16 +582,7 @@ def plot_labels(boxes, cls, names=(), save_dir=Path(""), on_plot=None):
582
582
  LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ")
583
583
  nc = int(cls.max() + 1) # number of classes
584
584
  boxes = boxes[:1000000] # limit to 1M boxes
585
- x = pandas.DataFrame(boxes, columns=["x", "y", "width", "height"])
586
-
587
- try: # Seaborn correlogram
588
- import seaborn
589
-
590
- seaborn.pairplot(x, corner=True, diag_kind="auto", kind="hist", diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))
591
- plt.savefig(save_dir / "labels_correlogram.jpg", dpi=200)
592
- plt.close()
593
- except ImportError:
594
- pass # Skip if seaborn is not installed
585
+ x = polars.DataFrame(boxes, schema=["x", "y", "width", "height"])
595
586
 
596
587
  # Matplotlib labels
597
588
  subplot_3_4_color = LinearSegmentedColormap.from_list("white_blue", ["white", "blue"])
@@ -603,12 +594,13 @@ def plot_labels(boxes, cls, names=(), save_dir=Path(""), on_plot=None):
603
594
  if 0 < len(names) < 30:
604
595
  ax[0].set_xticks(range(len(names)))
605
596
  ax[0].set_xticklabels(list(names.values()), rotation=90, fontsize=10)
597
+ ax[0].bar_label(y[2])
606
598
  else:
607
599
  ax[0].set_xlabel("classes")
608
600
  boxes = np.column_stack([0.5 - boxes[:, 2:4] / 2, 0.5 + boxes[:, 2:4] / 2]) * 1000
609
601
  img = Image.fromarray(np.ones((1000, 1000, 3), dtype=np.uint8) * 255)
610
602
  for cls, box in zip(cls[:500], boxes[:500]):
611
- ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot
603
+ ImageDraw.Draw(img).rectangle(box.tolist(), width=1, outline=colors(cls)) # plot
612
604
  ax[1].imshow(img)
613
605
  ax[1].axis("off")
614
606
 
@@ -878,7 +870,7 @@ def plot_results(
878
870
  >>> plot_results("path/to/results.csv", segment=True)
879
871
  """
880
872
  import matplotlib.pyplot as plt # scope for faster 'import ultralytics'
881
- import pandas as pd
873
+ import polars as pl
882
874
  from scipy.ndimage import gaussian_filter1d
883
875
 
884
876
  save_dir = Path(file).parent if file else Path(dir)
@@ -899,11 +891,11 @@ def plot_results(
899
891
  assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot."
900
892
  for f in files:
901
893
  try:
902
- data = pd.read_csv(f)
894
+ data = pl.read_csv(f)
903
895
  s = [x.strip() for x in data.columns]
904
- x = data.values[:, 0]
896
+ x = data.select(data.columns[0]).to_numpy().flatten()
905
897
  for i, j in enumerate(index):
906
- y = data.values[:, j].astype("float")
898
+ y = data.select(data.columns[j]).to_numpy().flatten().astype("float")
907
899
  # y[y == 0] = np.nan # don't show zero values
908
900
  ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=8) # actual results
909
901
  ax[i].plot(x, gaussian_filter1d(y, sigma=3), ":", label="smooth", linewidth=2) # smoothing line
@@ -953,6 +945,7 @@ def plt_color_scatter(v, f, bins: int = 20, cmap: str = "viridis", alpha: float
953
945
  plt.scatter(v, f, c=colors, cmap=cmap, alpha=alpha, edgecolors=edgecolors)
954
946
 
955
947
 
948
+ @plt_settings()
956
949
  def plot_tune_results(csv_file: str = "tune_results.csv"):
957
950
  """
958
951
  Plot the evolution results stored in a 'tune_results.csv' file. The function generates a scatter plot for each key
@@ -965,7 +958,7 @@ def plot_tune_results(csv_file: str = "tune_results.csv"):
965
958
  >>> plot_tune_results("path/to/tune_results.csv")
966
959
  """
967
960
  import matplotlib.pyplot as plt # scope for faster 'import ultralytics'
968
- import pandas as pd
961
+ import polars as pl
969
962
  from scipy.ndimage import gaussian_filter1d
970
963
 
971
964
  def _save_one_file(file):
@@ -976,10 +969,10 @@ def plot_tune_results(csv_file: str = "tune_results.csv"):
976
969
 
977
970
  # Scatter plots for each hyperparameter
978
971
  csv_file = Path(csv_file)
979
- data = pd.read_csv(csv_file)
972
+ data = pl.read_csv(csv_file)
980
973
  num_metrics_columns = 1
981
974
  keys = [x.strip() for x in data.columns][num_metrics_columns:]
982
- x = data.values
975
+ x = data.to_numpy()
983
976
  fitness = x[:, 0] # fitness
984
977
  j = np.argmax(fitness) # max fitness index
985
978
  n = math.ceil(len(keys) ** 0.5) # columns and rows in plot