dgenerate-ultralytics-headless 8.3.242__py3-none-any.whl → 8.3.243__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dgenerate-ultralytics-headless
3
- Version: 8.3.242
3
+ Version: 8.3.243
4
4
  Summary: Automatically built Ultralytics package with python-opencv-headless dependency instead of python-opencv
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -50,7 +50,7 @@ Requires-Dist: ipython; extra == "dev"
50
50
  Requires-Dist: pytest; extra == "dev"
51
51
  Requires-Dist: pytest-cov; extra == "dev"
52
52
  Requires-Dist: coverage[toml]; extra == "dev"
53
- Requires-Dist: zensical>=0.0.9; python_version >= "3.10" and extra == "dev"
53
+ Requires-Dist: zensical>=0.0.15; python_version >= "3.10" and extra == "dev"
54
54
  Requires-Dist: mkdocs-ultralytics-plugin>=0.2.4; extra == "dev"
55
55
  Requires-Dist: minijinja>=2.0.0; extra == "dev"
56
56
  Provides-Extra: export
@@ -1,4 +1,4 @@
1
- dgenerate_ultralytics_headless-8.3.242.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
1
+ dgenerate_ultralytics_headless-8.3.243.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
2
2
  tests/__init__.py,sha256=bCox_hLdGRFYGLb2kd722VdNP2zEXNYNuLLYtqZSrbw,804
3
3
  tests/conftest.py,sha256=mOy9lGpNp7lk1hHl6_pVE0f9cU-72gnkoSm4TO-CNZU,2318
4
4
  tests/test_cli.py,sha256=GhIFHi-_WIJpDgoGNRi0DnjbfwP1wHbklBMnkCM-P_4,5464
@@ -8,7 +8,7 @@ tests/test_exports.py,sha256=UCLbjUnK8ZNldnJodrAxftUrwzO6ZNQxr7j64nDl9io,14137
8
8
  tests/test_integrations.py,sha256=6QgSh9n0J04RdUYz08VeVOnKmf4S5MDEQ0chzS7jo_c,6220
9
9
  tests/test_python.py,sha256=viMvRajIbDZdm64hRRg9i8qZ1sU9frwB69e56mxwEXk,29266
10
10
  tests/test_solutions.py,sha256=CIaphpmOXgz9AE9xcm1RWODKrwGfZLCc84IggGXArNM,14122
11
- ultralytics/__init__.py,sha256=oh5fizpewk5bLr7va83-trIOFOSneeHfFNRuV8zsHBw,1302
11
+ ultralytics/__init__.py,sha256=UOIrYdbdS134a26TB2-qzRTQUcvvSJ2MpF96UESJcH4,1302
12
12
  ultralytics/py.typed,sha256=la67KBlbjXN-_-DfGNcdOcjYumVpKG_Tkw-8n5dnGB4,8
13
13
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
14
14
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
@@ -254,7 +254,7 @@ ultralytics/utils/__init__.py,sha256=JfvODTB4mG_JOhTeCiPtq0iCEgiCh14hJf195rnOhLQ
254
254
  ultralytics/utils/autobatch.py,sha256=jiE4m_--H9UkXFDm_FqzcZk_hSTCGpS72XdVEKgZwAo,5114
255
255
  ultralytics/utils/autodevice.py,sha256=rXlPuo-iX-vZ4BabmMGEGh9Uxpau4R7Zlt1KCo9Xfyc,8892
256
256
  ultralytics/utils/benchmarks.py,sha256=S_W4S4pe2ktSRdSuWb6m09UEFQmZhmjl943bbo67hOI,32277
257
- ultralytics/utils/checks.py,sha256=hbM2pS7ffIbbTmeTe2AbQ-tCMM2H5WvkDMCpaeU8kbU,38203
257
+ ultralytics/utils/checks.py,sha256=9RGHIs4_heSFSL2YHRw0M3gLob6G9wQV3a24A0pTWrc,38411
258
258
  ultralytics/utils/cpu.py,sha256=OksKOlX93AsbSsFuoYvLXRXgpkOibrZSwQyW6lipt4Q,3493
259
259
  ultralytics/utils/dist.py,sha256=hOuY1-unhQAY-uWiZw3LWw36d1mqJuYK75NdlwB4oKE,4131
260
260
  ultralytics/utils/downloads.py,sha256=IyiGjjXqOyf1B0qLMk7vE6sSQ8s232OhKS8aj9XbTgs,22883
@@ -263,8 +263,8 @@ ultralytics/utils/events.py,sha256=6vqs_iSxoXIhQ804sOjApNZmXwNW9FUFtjaHPY8ta10,4
263
263
  ultralytics/utils/files.py,sha256=BdaRwEKqzle4glSj8n_jq6bDjTCAs_H1SN06ZOQ9qFU,8190
264
264
  ultralytics/utils/git.py,sha256=UdqeIiiEzg1qkerAZrg5YtTYPuJYwrpxW9N_6Pq6s8U,5501
265
265
  ultralytics/utils/instance.py,sha256=11mhefvTI9ftMqSirXuiViAi0Fxlo6v84qvNxfRNUoE,18862
266
- ultralytics/utils/logger.py,sha256=2G7_wAteN26SWaqm3VJgDc2XYcotz5uWolQllvMcLoI,16821
267
- ultralytics/utils/loss.py,sha256=JWkxM6IsJQwmlCsTc0bUQWPBH80UIDoItyUlSgF9Ukw,39579
266
+ ultralytics/utils/logger.py,sha256=US4pLBmRQNI31KEeqqKdBEXDLS1eE5J5hWR0xPDaGJI,18966
267
+ ultralytics/utils/loss.py,sha256=t-z7qkvqF8OtuRHrj2wmvClZV2CCumIRi9jnqkc9i_A,39573
268
268
  ultralytics/utils/metrics.py,sha256=apVQLSML4TKwreFwRtWPQ1R5_fpp7vPDuI1q3cTY24w,68674
269
269
  ultralytics/utils/nms.py,sha256=zv1rOzMF6WU8Kdk41VzNf1H1EMt_vZHcbDFbg3mnN2o,14248
270
270
  ultralytics/utils/ops.py,sha256=mbrqv36ovUp9FMIqClTHikOOViYEJ058CH-qDLkWbSw,25797
@@ -283,7 +283,7 @@ ultralytics/utils/callbacks/dvc.py,sha256=YT0Sa5P8Huj8Fn9jM2P6MYzUY3PIVxsa5BInVi
283
283
  ultralytics/utils/callbacks/hub.py,sha256=fVLqqr3ZM6hoYFlVMEeejfq1MWDrkWCskPFOG3HGILQ,4159
284
284
  ultralytics/utils/callbacks/mlflow.py,sha256=wCXjQgdufp9LYujqMzLZOmIOur6kvrApHNeo9dA7t_g,5323
285
285
  ultralytics/utils/callbacks/neptune.py,sha256=_vt3cMwDHCR-LyT3KtRikGpj6AG11oQ-skUUUUdZ74o,4391
286
- ultralytics/utils/callbacks/platform.py,sha256=a7T_8htoBB0uX1WIc392UJnhDjxkRyQMvhPYKR6wUTU,2008
286
+ ultralytics/utils/callbacks/platform.py,sha256=oWz8OvdgO3rCKe6VvqNOhwStS07ddJkvPy1O72SqYEc,9271
287
287
  ultralytics/utils/callbacks/raytune.py,sha256=Y0dFyNZVRuFovSh7nkgUIHTQL3xIXOACElgHuYbg_5I,1278
288
288
  ultralytics/utils/callbacks/tensorboard.py,sha256=PTJYvD2gqRUN8xw5VoTjvKnu2adukLfvhMlDgTnTiFU,4952
289
289
  ultralytics/utils/callbacks/wb.py,sha256=ghmL3gigOa-z_F54-TzMraKw9MAaYX-Wk4H8dLoRvX8,7705
@@ -291,8 +291,8 @@ ultralytics/utils/export/__init__.py,sha256=Cfh-PwVfTF_lwPp-Ss4wiX4z8Sm1XRPklsqd
291
291
  ultralytics/utils/export/engine.py,sha256=23-lC6dNsmz5vprSJzaN7UGNXrFlVedNcqhlOH_IXes,9956
292
292
  ultralytics/utils/export/imx.py,sha256=F3b334IZdwjF8PdP1s6QI3Ndd82_2e77clj8aGLzIDo,12856
293
293
  ultralytics/utils/export/tensorflow.py,sha256=igYzwbdblb9YgfV4Jgl5lMvynuVRcF51dAzI7j-BBI0,9966
294
- dgenerate_ultralytics_headless-8.3.242.dist-info/METADATA,sha256=wnKuuyVhvPwKkTBQIjfDNWO5aCY8kAFUK1ijt1MtEfY,38798
295
- dgenerate_ultralytics_headless-8.3.242.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
296
- dgenerate_ultralytics_headless-8.3.242.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
297
- dgenerate_ultralytics_headless-8.3.242.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
298
- dgenerate_ultralytics_headless-8.3.242.dist-info/RECORD,,
294
+ dgenerate_ultralytics_headless-8.3.243.dist-info/METADATA,sha256=HxU9hZqOz-XaKXszDolkAQLJ7LUbbtjns1jw2HVWAoM,38799
295
+ dgenerate_ultralytics_headless-8.3.243.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
296
+ dgenerate_ultralytics_headless-8.3.243.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
297
+ dgenerate_ultralytics_headless-8.3.243.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
298
+ dgenerate_ultralytics_headless-8.3.243.dist-info/RECORD,,
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.242"
3
+ __version__ = "8.3.243"
4
4
 
5
5
  import importlib
6
6
  import os
@@ -1,73 +1,290 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- from ultralytics.utils import RANK, SETTINGS
3
+ import os
4
+ import platform
5
+ import socket
6
+ import sys
7
+ from concurrent.futures import ThreadPoolExecutor
8
+ from pathlib import Path
9
+ from time import time
10
+
11
+ from ultralytics.utils import ENVIRONMENT, GIT, LOGGER, PYTHON_VERSION, RANK, SETTINGS, TESTS_RUNNING
12
+
13
+ _last_upload = 0 # Rate limit model uploads
14
+ _console_logger = None # Global console logger instance
15
+ _system_logger = None # Cached system logger instance
16
+
17
+ try:
18
+ assert not TESTS_RUNNING # do not log pytest
19
+ assert SETTINGS.get("platform", False) is True or os.getenv("ULTRALYTICS_API_KEY") or SETTINGS.get("api_key")
20
+ _api_key = os.getenv("ULTRALYTICS_API_KEY") or SETTINGS.get("api_key")
21
+ assert _api_key # verify API key is present
22
+
23
+ import requests
24
+
25
+ from ultralytics.utils.logger import ConsoleLogger, SystemLogger
26
+ from ultralytics.utils.torch_utils import model_info_for_loggers
27
+
28
+ _executor = ThreadPoolExecutor(max_workers=10) # Bounded thread pool for async operations
29
+
30
+ except (AssertionError, ImportError):
31
+ _api_key = None
32
+
33
+
34
+ def _send(event, data, project, name):
35
+ """Send event to Platform endpoint."""
36
+ try:
37
+ requests.post(
38
+ "https://alpha.ultralytics.com/api/webhooks/training/metrics",
39
+ json={"event": event, "project": project, "name": name, "data": data},
40
+ headers={"Authorization": f"Bearer {_api_key}"},
41
+ timeout=10,
42
+ ).raise_for_status()
43
+ except Exception as e:
44
+ LOGGER.debug(f"Platform: Failed to send {event}: {e}")
45
+
46
+
47
+ def _send_async(event, data, project, name):
48
+ """Send event asynchronously using bounded thread pool."""
49
+ _executor.submit(_send, event, data, project, name)
50
+
51
+
52
+ def _upload_model(model_path, project, name):
53
+ """Upload model checkpoint to Platform via signed URL."""
54
+ try:
55
+ model_path = Path(model_path)
56
+ if not model_path.exists():
57
+ return None
58
+
59
+ # Get signed upload URL
60
+ response = requests.post(
61
+ "https://alpha.ultralytics.com/api/webhooks/models/upload",
62
+ json={"project": project, "name": name, "filename": model_path.name},
63
+ headers={"Authorization": f"Bearer {_api_key}"},
64
+ timeout=10,
65
+ )
66
+ response.raise_for_status()
67
+ data = response.json()
68
+
69
+ # Upload to GCS
70
+ with open(model_path, "rb") as f:
71
+ requests.put(
72
+ data["uploadUrl"],
73
+ data=f,
74
+ headers={"Content-Type": "application/octet-stream"},
75
+ timeout=600, # 10 min timeout for large models
76
+ ).raise_for_status()
77
+
78
+ LOGGER.info(f"Platform: Model uploaded to '{project}'")
79
+ return data.get("gcsPath")
80
+
81
+ except Exception as e:
82
+ LOGGER.debug(f"Platform: Failed to upload model: {e}")
83
+ return None
84
+
85
+
86
+ def _upload_model_async(model_path, project, name):
87
+ """Upload model asynchronously using bounded thread pool."""
88
+ _executor.submit(_upload_model, model_path, project, name)
89
+
90
+
91
+ def _get_environment_info():
92
+ """Collect comprehensive environment info using existing ultralytics utilities."""
93
+ import torch
94
+
95
+ from ultralytics import __version__
96
+ from ultralytics.utils.torch_utils import get_cpu_info, get_gpu_info
97
+
98
+ env = {
99
+ "ultralyticsVersion": __version__,
100
+ "hostname": socket.gethostname(),
101
+ "os": platform.platform(),
102
+ "environment": ENVIRONMENT,
103
+ "pythonVersion": PYTHON_VERSION,
104
+ "pythonExecutable": sys.executable,
105
+ "cpuCount": os.cpu_count() or 0,
106
+ "cpu": get_cpu_info(),
107
+ "command": " ".join(sys.argv),
108
+ }
109
+
110
+ # Git info using cached GIT singleton (no subprocess calls)
111
+ try:
112
+ if GIT.is_repo:
113
+ if GIT.origin:
114
+ env["gitRepository"] = GIT.origin
115
+ if GIT.branch:
116
+ env["gitBranch"] = GIT.branch
117
+ if GIT.commit:
118
+ env["gitCommit"] = GIT.commit[:12] # Short hash
119
+ except Exception:
120
+ pass
121
+
122
+ # GPU info
123
+ try:
124
+ if torch.cuda.is_available():
125
+ env["gpuCount"] = torch.cuda.device_count()
126
+ env["gpuType"] = get_gpu_info(0) if torch.cuda.device_count() > 0 else None
127
+ except Exception:
128
+ pass
129
+
130
+ return env
4
131
 
5
132
 
6
133
  def on_pretrain_routine_start(trainer):
7
- """Initialize and start console logging immediately at the very beginning."""
8
- if RANK in {-1, 0}:
9
- from ultralytics.utils.logger import DEFAULT_LOG_PATH, ConsoleLogger, SystemLogger
134
+ """Initialize Platform logging at training start."""
135
+ global _console_logger, _last_upload
136
+
137
+ if RANK not in {-1, 0} or not trainer.args.project:
138
+ return
139
+
140
+ # Initialize upload timer to now so first checkpoint waits 15 min from training start
141
+ _last_upload = time()
142
+
143
+ project, name = str(trainer.args.project), str(trainer.args.name or "train")
144
+ LOGGER.info(f"Platform: Streaming to project '{project}' as '{name}'")
10
145
 
11
- trainer.system_logger = SystemLogger()
12
- trainer.console_logger = ConsoleLogger(DEFAULT_LOG_PATH)
13
- trainer.console_logger.start_capture()
146
+ # Create callback to send console output to Platform
147
+ def send_console_output(content, line_count, chunk_id):
148
+ """Send batched console output to Platform webhook."""
149
+ _send_async("console_output", {"chunkId": chunk_id, "content": content, "lineCount": line_count}, project, name)
14
150
 
151
+ # Start console capture with batching (5 lines or 5 seconds)
152
+ _console_logger = ConsoleLogger(batch_size=5, flush_interval=5.0, on_flush=send_console_output)
153
+ _console_logger.start_capture()
15
154
 
16
- def on_pretrain_routine_end(trainer):
17
- """Handle pre-training routine completion event."""
18
- pass
155
+ # Gather model info for richer metadata
156
+ model_info = {}
157
+ try:
158
+ info = model_info_for_loggers(trainer)
159
+ model_info = {
160
+ "parameters": info.get("model/parameters", 0),
161
+ "gflops": info.get("model/GFLOPs", 0),
162
+ "classes": getattr(trainer.model, "yaml", {}).get("nc", 0), # number of classes
163
+ }
164
+ except Exception:
165
+ pass
166
+
167
+ # Collect environment info (W&B-style metadata)
168
+ environment = _get_environment_info()
169
+
170
+ _send_async(
171
+ "training_started",
172
+ {
173
+ "trainArgs": {k: str(v) for k, v in vars(trainer.args).items()},
174
+ "epochs": trainer.epochs,
175
+ "device": str(trainer.device),
176
+ "modelInfo": model_info,
177
+ "environment": environment,
178
+ },
179
+ project,
180
+ name,
181
+ )
19
182
 
20
183
 
21
184
  def on_fit_epoch_end(trainer):
22
- """Handle end of training epoch event and collect system metrics."""
23
- if RANK in {-1, 0} and hasattr(trainer, "system_logger"):
24
- system_metrics = trainer.system_logger.get_metrics()
25
- print(system_metrics) # for debug
185
+ """Log training and system metrics at epoch end."""
186
+ global _system_logger
187
+
188
+ if RANK not in {-1, 0} or not trainer.args.project:
189
+ return
190
+
191
+ project, name = str(trainer.args.project), str(trainer.args.name or "train")
192
+ metrics = {**trainer.label_loss_items(trainer.tloss, prefix="train"), **trainer.metrics}
193
+
194
+ if trainer.optimizer and trainer.optimizer.param_groups:
195
+ metrics["lr"] = trainer.optimizer.param_groups[0]["lr"]
196
+ if trainer.epoch == 0:
197
+ try:
198
+ metrics.update(model_info_for_loggers(trainer))
199
+ except Exception:
200
+ pass
201
+
202
+ # Get system metrics (cache SystemLogger for efficiency)
203
+ system = {}
204
+ try:
205
+ if _system_logger is None:
206
+ _system_logger = SystemLogger()
207
+ system = _system_logger.get_metrics(rates=True)
208
+ except Exception:
209
+ pass
210
+
211
+ _send_async(
212
+ "epoch_end",
213
+ {
214
+ "epoch": trainer.epoch,
215
+ "metrics": metrics,
216
+ "system": system,
217
+ "fitness": trainer.fitness,
218
+ "best_fitness": trainer.best_fitness,
219
+ },
220
+ project,
221
+ name,
222
+ )
26
223
 
27
224
 
28
225
  def on_model_save(trainer):
29
- """Handle model checkpoint save event."""
30
- pass
226
+ """Upload model checkpoint (rate limited to every 15 min)."""
227
+ global _last_upload
31
228
 
229
+ if RANK not in {-1, 0} or not trainer.args.project:
230
+ return
32
231
 
33
- def on_train_end(trainer):
34
- """Stop console capture and finalize logs."""
35
- if logger := getattr(trainer, "console_logger", None):
36
- logger.stop_capture()
232
+ # Rate limit to every 15 minutes (900 seconds)
233
+ if time() - _last_upload < 900:
234
+ return
37
235
 
236
+ model_path = trainer.best if trainer.best and Path(trainer.best).exists() else trainer.last
237
+ if not model_path:
238
+ return
38
239
 
39
- def on_train_start(trainer):
40
- """Handle training start event."""
41
- pass
240
+ project, name = str(trainer.args.project), str(trainer.args.name or "train")
241
+ _upload_model_async(model_path, project, name)
242
+ _last_upload = time()
42
243
 
43
244
 
44
- def on_val_start(validator):
45
- """Handle validation start event."""
46
- pass
245
+ def on_train_end(trainer):
246
+ """Log final results and upload best model."""
247
+ global _console_logger
248
+
249
+ if RANK not in {-1, 0} or not trainer.args.project:
250
+ return
47
251
 
252
+ project, name = str(trainer.args.project), str(trainer.args.name or "train")
48
253
 
49
- def on_predict_start(predictor):
50
- """Handle prediction start event."""
51
- pass
254
+ # Stop console capture and flush remaining output
255
+ if _console_logger:
256
+ _console_logger.stop_capture()
257
+ _console_logger = None
52
258
 
259
+ # Upload best model (blocking to ensure it completes)
260
+ model_path = None
261
+ if trainer.best and Path(trainer.best).exists():
262
+ model_path = _upload_model(trainer.best, project, name)
53
263
 
54
- def on_export_start(exporter):
55
- """Handle model export start event."""
56
- pass
264
+ # Send training complete
265
+ _send(
266
+ "training_complete",
267
+ {
268
+ "results": {
269
+ "metrics": {**trainer.metrics, "fitness": trainer.fitness},
270
+ "bestEpoch": getattr(trainer, "best_epoch", trainer.epoch),
271
+ "bestFitness": trainer.best_fitness,
272
+ "modelPath": model_path or str(trainer.best) if trainer.best else None,
273
+ }
274
+ },
275
+ project,
276
+ name,
277
+ )
278
+ LOGGER.info(f"Platform: Training complete, results uploaded to '{project}'")
57
279
 
58
280
 
59
281
  callbacks = (
60
282
  {
61
283
  "on_pretrain_routine_start": on_pretrain_routine_start,
62
- "on_pretrain_routine_end": on_pretrain_routine_end,
63
284
  "on_fit_epoch_end": on_fit_epoch_end,
64
285
  "on_model_save": on_model_save,
65
286
  "on_train_end": on_train_end,
66
- "on_train_start": on_train_start,
67
- "on_val_start": on_val_start,
68
- "on_predict_start": on_predict_start,
69
- "on_export_start": on_export_start,
70
287
  }
71
- if SETTINGS.get("platform", False) is True # disabled for debugging
288
+ if _api_key
72
289
  else {}
73
290
  )
@@ -418,6 +418,11 @@ def check_requirements(requirements=ROOT.parent / "requirements.txt", exclude=()
418
418
  >>> check_requirements([("onnxruntime", "onnxruntime-gpu"), "numpy"])
419
419
  """
420
420
  prefix = colorstr("red", "bold", "requirements:")
421
+
422
+ if os.environ.get("ULTRALYTICS_SKIP_REQUIREMENTS_CHECKS", "0") == "1":
423
+ LOGGER.info(f"{prefix} ULTRALYTICS_SKIP_REQUIREMENTS_CHECKS=1 detected, skipping requirements check.")
424
+ return True
425
+
421
426
  if isinstance(requirements, Path): # requirements.txt file
422
427
  file = requirements.resolve()
423
428
  assert file.exists(), f"{prefix} {file} not found, check failed."
@@ -1,7 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
3
  import logging
4
- import queue
5
4
  import shutil
6
5
  import sys
7
6
  import threading
@@ -12,72 +11,81 @@ from pathlib import Path
12
11
  from ultralytics.utils import MACOS, RANK
13
12
  from ultralytics.utils.checks import check_requirements
14
13
 
15
- # Initialize default log file
16
- DEFAULT_LOG_PATH = Path("train.log")
17
- if RANK in {-1, 0} and DEFAULT_LOG_PATH.exists():
18
- DEFAULT_LOG_PATH.unlink(missing_ok=True)
19
-
20
14
 
21
15
  class ConsoleLogger:
22
- """Console output capture with API/file streaming and deduplication.
16
+ """Console output capture with batched streaming to file, API, or custom callback.
23
17
 
24
- Captures stdout/stderr output and streams it to either an API endpoint or local file, with intelligent deduplication
25
- to reduce noise from repetitive console output.
18
+ Captures stdout/stderr output and streams it with intelligent deduplication and configurable batching.
26
19
 
27
20
  Attributes:
28
- destination (str | Path): Target destination for streaming (URL or Path object).
29
- is_api (bool): Whether destination is an API endpoint (True) or local file (False).
30
- original_stdout: Reference to original sys.stdout for restoration.
31
- original_stderr: Reference to original sys.stderr for restoration.
32
- log_queue (queue.Queue): Thread-safe queue for buffering log messages.
21
+ destination (str | Path | None): Target destination for streaming (URL, Path, or None for callback-only).
22
+ batch_size (int): Number of lines to batch before flushing (default: 1 for immediate).
23
+ flush_interval (float): Seconds between automatic flushes (default: 5.0).
24
+ on_flush (callable | None): Optional callback function called with batched content on flush.
33
25
  active (bool): Whether console capture is currently active.
34
- worker_thread (threading.Thread): Background thread for processing log queue.
35
- last_line (str): Last processed line for deduplication.
36
- last_time (float): Timestamp of last processed line.
37
- last_progress_line (str): Last progress bar line for progress deduplication.
38
- last_was_progress (bool): Whether the last line was a progress bar.
39
26
 
40
27
  Examples:
41
- Basic file logging:
28
+ File logging (immediate):
42
29
  >>> logger = ConsoleLogger("training.log")
43
30
  >>> logger.start_capture()
44
31
  >>> print("This will be logged")
45
32
  >>> logger.stop_capture()
46
33
 
47
- API streaming:
48
- >>> logger = ConsoleLogger("https://api.example.com/logs")
34
+ API streaming with batching:
35
+ >>> logger = ConsoleLogger("https://api.example.com/logs", batch_size=10)
36
+ >>> logger.start_capture()
37
+
38
+ Custom callback with batching:
39
+ >>> def my_handler(content, line_count, chunk_id):
40
+ ... print(f"Received {line_count} lines")
41
+ >>> logger = ConsoleLogger(on_flush=my_handler, batch_size=5)
49
42
  >>> logger.start_capture()
50
- >>> # All output streams to API
51
- >>> logger.stop_capture()
52
43
  """
53
44
 
54
- def __init__(self, destination):
55
- """Initialize with API endpoint or local file path.
45
+ def __init__(self, destination=None, batch_size=1, flush_interval=5.0, on_flush=None):
46
+ """Initialize console logger with optional batching.
56
47
 
57
48
  Args:
58
- destination (str | Path): API endpoint URL (http/https) or local file path for streaming output.
49
+ destination (str | Path | None): API endpoint URL (http/https), local file path, or None.
50
+ batch_size (int): Lines to accumulate before flush (1 = immediate, higher = batched).
51
+ flush_interval (float): Max seconds between flushes when batching.
52
+ on_flush (callable | None): Callback(content: str, line_count: int, chunk_id: int) for custom handling.
59
53
  """
60
54
  self.destination = destination
61
55
  self.is_api = isinstance(destination, str) and destination.startswith(("http://", "https://"))
62
- if not self.is_api:
56
+ if destination is not None and not self.is_api:
63
57
  self.destination = Path(destination)
64
58
 
65
- # Console capture
59
+ # Batching configuration
60
+ self.batch_size = max(1, batch_size)
61
+ self.flush_interval = flush_interval
62
+ self.on_flush = on_flush
63
+
64
+ # Console capture state
66
65
  self.original_stdout = sys.stdout
67
66
  self.original_stderr = sys.stderr
68
- self.log_queue = queue.Queue(maxsize=1000)
69
67
  self.active = False
70
- self.worker_thread = None
68
+ self._log_handler = None # Track handler for cleanup
69
+
70
+ # Buffer for batching
71
+ self.buffer = []
72
+ self.buffer_lock = threading.Lock()
73
+ self.flush_thread = None
74
+ self.chunk_id = 0
71
75
 
72
- # State tracking
76
+ # Deduplication state
73
77
  self.last_line = ""
74
78
  self.last_time = 0.0
75
- self.last_progress_line = "" # Track last progress line for deduplication
79
+ self.last_progress_line = "" # Track progress sequence key for deduplication
76
80
  self.last_was_progress = False # Track if last line was a progress bar
77
81
 
78
82
  def start_capture(self):
79
- """Start capturing console output and redirect stdout/stderr to custom capture objects."""
80
- if self.active:
83
+ """Start capturing console output and redirect stdout/stderr.
84
+
85
+ Notes:
86
+ In DDP training, only activates on rank 0/-1 to prevent duplicate logging.
87
+ """
88
+ if self.active or RANK not in {-1, 0}:
81
89
  return
82
90
 
83
91
  self.active = True
@@ -86,23 +94,35 @@ class ConsoleLogger:
86
94
 
87
95
  # Hook Ultralytics logger
88
96
  try:
89
- handler = self._LogHandler(self._queue_log)
90
- logging.getLogger("ultralytics").addHandler(handler)
97
+ self._log_handler = self._LogHandler(self._queue_log)
98
+ logging.getLogger("ultralytics").addHandler(self._log_handler)
91
99
  except Exception:
92
100
  pass
93
101
 
94
- self.worker_thread = threading.Thread(target=self._stream_worker, daemon=True)
95
- self.worker_thread.start()
102
+ # Start background flush thread for batched mode
103
+ if self.batch_size > 1:
104
+ self.flush_thread = threading.Thread(target=self._flush_worker, daemon=True)
105
+ self.flush_thread.start()
96
106
 
97
107
  def stop_capture(self):
98
- """Stop capturing console output and restore original stdout/stderr."""
108
+ """Stop capturing console output and flush remaining buffer."""
99
109
  if not self.active:
100
110
  return
101
111
 
102
112
  self.active = False
103
113
  sys.stdout = self.original_stdout
104
114
  sys.stderr = self.original_stderr
105
- self.log_queue.put(None)
115
+
116
+ # Remove logging handler to prevent memory leak
117
+ if self._log_handler:
118
+ try:
119
+ logging.getLogger("ultralytics").removeHandler(self._log_handler)
120
+ except Exception:
121
+ pass
122
+ self._log_handler = None
123
+
124
+ # Final flush
125
+ self._flush_buffer()
106
126
 
107
127
  def _queue_log(self, text):
108
128
  """Queue console text with deduplication and timestamp processing."""
@@ -126,12 +146,34 @@ class ConsoleLogger:
126
146
  if "─" in line: # Has thin lines but no thick lines
127
147
  continue
128
148
 
129
- # Deduplicate completed progress bars only if they match the previous progress line
149
+ # Only show 100% completion lines for progress bars
130
150
  if " ━━" in line:
131
- progress_core = line.split(" ━━")[0].strip()
132
- if progress_core == self.last_progress_line and self.last_was_progress:
151
+ is_complete = "100%" in line
152
+
153
+ # Skip ALL non-complete progress lines
154
+ if not is_complete:
155
+ continue
156
+
157
+ # Extract sequence key to deduplicate multiple 100% lines for same sequence
158
+ parts = line.split()
159
+ seq_key = ""
160
+ if parts:
161
+ # Check for epoch pattern (X/Y at start)
162
+ if "/" in parts[0] and parts[0].replace("/", "").isdigit():
163
+ seq_key = parts[0] # e.g., "1/3"
164
+ elif parts[0] == "Class" and len(parts) > 1:
165
+ seq_key = f"{parts[0]}_{parts[1]}" # e.g., "Class_train:" or "Class_val:"
166
+ elif parts[0] in ("train:", "val:"):
167
+ seq_key = parts[0] # Phase identifier
168
+
169
+ # Skip if we already showed 100% for this sequence
170
+ if seq_key and self.last_progress_line == f"{seq_key}:done":
133
171
  continue
134
- self.last_progress_line = progress_core
172
+
173
+ # Mark this sequence as done
174
+ if seq_key:
175
+ self.last_progress_line = f"{seq_key}:done"
176
+
135
177
  self.last_was_progress = True
136
178
  else:
137
179
  # Skip empty line after progress bar
@@ -152,48 +194,62 @@ class ConsoleLogger:
152
194
  timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
153
195
  line = f"[{timestamp}] {line}"
154
196
 
155
- # Queue with overflow protection
156
- if not self._safe_put(f"{line}\n"):
157
- continue # Skip if queue handling fails
197
+ # Add to buffer and check if flush needed
198
+ should_flush = False
199
+ with self.buffer_lock:
200
+ self.buffer.append(line)
201
+ if len(self.buffer) >= self.batch_size:
202
+ should_flush = True
158
203
 
159
- def _safe_put(self, item):
160
- """Safely put item in queue with overflow handling."""
161
- try:
162
- self.log_queue.put_nowait(item)
163
- return True
164
- except queue.Full:
165
- try:
166
- self.log_queue.get_nowait() # Drop oldest
167
- self.log_queue.put_nowait(item)
168
- return True
169
- except queue.Empty:
170
- return False
171
-
172
- def _stream_worker(self):
173
- """Background worker for streaming logs to destination."""
204
+ # Flush outside lock to avoid deadlock
205
+ if should_flush:
206
+ self._flush_buffer()
207
+
208
+ def _flush_worker(self):
209
+ """Background worker that flushes buffer periodically."""
174
210
  while self.active:
211
+ time.sleep(self.flush_interval)
212
+ if self.active:
213
+ self._flush_buffer()
214
+
215
+ def _flush_buffer(self):
216
+ """Flush buffered lines to destination and/or callback."""
217
+ with self.buffer_lock:
218
+ if not self.buffer:
219
+ return
220
+ lines = self.buffer.copy()
221
+ self.buffer.clear()
222
+ self.chunk_id += 1
223
+ chunk_id = self.chunk_id # Capture under lock to avoid race
224
+
225
+ content = "\n".join(lines)
226
+ line_count = len(lines)
227
+
228
+ # Call custom callback if provided
229
+ if self.on_flush:
175
230
  try:
176
- log_text = self.log_queue.get(timeout=1)
177
- if log_text is None:
178
- break
179
- self._write_log(log_text)
180
- except queue.Empty:
181
- continue
231
+ self.on_flush(content, line_count, chunk_id)
232
+ except Exception:
233
+ pass # Silently ignore callback errors to avoid flooding stderr
234
+
235
+ # Write to destination (file or API)
236
+ if self.destination is not None:
237
+ self._write_destination(content)
182
238
 
183
- def _write_log(self, text):
184
- """Write log to API endpoint or local file destination."""
239
+ def _write_destination(self, content):
240
+ """Write content to file or API destination."""
185
241
  try:
186
242
  if self.is_api:
187
- import requests # scoped as slow import
243
+ import requests
188
244
 
189
- payload = {"timestamp": datetime.now().isoformat(), "message": text.strip()}
245
+ payload = {"timestamp": datetime.now().isoformat(), "message": content}
190
246
  requests.post(str(self.destination), json=payload, timeout=5)
191
247
  else:
192
248
  self.destination.parent.mkdir(parents=True, exist_ok=True)
193
249
  with self.destination.open("a", encoding="utf-8") as f:
194
- f.write(text)
250
+ f.write(content + "\n")
195
251
  except Exception as e:
196
- print(f"Platform logging error: {e}", file=self.original_stderr)
252
+ print(f"Console logger write error: {e}", file=self.original_stderr)
197
253
 
198
254
  class _ConsoleCapture:
199
255
  """Lightweight stdout/stderr capture."""
ultralytics/utils/loss.py CHANGED
@@ -498,7 +498,7 @@ class v8PoseLoss(v8DetectionLoss):
498
498
 
499
499
  def __call__(self, preds: Any, batch: dict[str, torch.Tensor]) -> tuple[torch.Tensor, torch.Tensor]:
500
500
  """Calculate the total loss and detach it for pose estimation."""
501
- loss = torch.zeros(5, device=self.device) # box, cls, dfl, kpt_location, kpt_visibility
501
+ loss = torch.zeros(5, device=self.device) # box, pose, kobj, cls, dfl
502
502
  feats, pred_kpts = preds if isinstance(preds[0], list) else preds[1]
503
503
  pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split(
504
504
  (self.reg_max * 4, self.nc), 1
@@ -560,7 +560,7 @@ class v8PoseLoss(v8DetectionLoss):
560
560
  loss[3] *= self.hyp.cls # cls gain
561
561
  loss[4] *= self.hyp.dfl # dfl gain
562
562
 
563
- return loss * batch_size, loss.detach() # loss(box, cls, dfl)
563
+ return loss * batch_size, loss.detach() # loss(box, pose, kobj, cls, dfl)
564
564
 
565
565
  @staticmethod
566
566
  def kpts_decode(anchor_points: torch.Tensor, pred_kpts: torch.Tensor) -> torch.Tensor: