pygpt-net 2.6.31__py3-none-any.whl → 2.6.33__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. pygpt_net/CHANGELOG.txt +12 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/app.py +3 -1
  4. pygpt_net/app_core.py +3 -1
  5. pygpt_net/config.py +3 -1
  6. pygpt_net/controller/__init__.py +5 -1
  7. pygpt_net/controller/audio/audio.py +13 -0
  8. pygpt_net/controller/chat/attachment.py +2 -0
  9. pygpt_net/controller/chat/common.py +18 -83
  10. pygpt_net/controller/lang/custom.py +2 -2
  11. pygpt_net/controller/media/__init__.py +12 -0
  12. pygpt_net/controller/media/media.py +115 -0
  13. pygpt_net/controller/painter/common.py +10 -11
  14. pygpt_net/controller/painter/painter.py +4 -12
  15. pygpt_net/controller/realtime/realtime.py +27 -2
  16. pygpt_net/controller/ui/mode.py +16 -2
  17. pygpt_net/core/audio/backend/pyaudio/realtime.py +51 -14
  18. pygpt_net/core/audio/output.py +3 -2
  19. pygpt_net/core/camera/camera.py +369 -53
  20. pygpt_net/core/image/image.py +6 -5
  21. pygpt_net/core/realtime/worker.py +1 -5
  22. pygpt_net/core/render/web/body.py +24 -3
  23. pygpt_net/core/text/utils.py +54 -2
  24. pygpt_net/core/types/image.py +7 -1
  25. pygpt_net/core/video/__init__.py +12 -0
  26. pygpt_net/core/video/video.py +290 -0
  27. pygpt_net/data/config/config.json +240 -212
  28. pygpt_net/data/config/models.json +243 -172
  29. pygpt_net/data/config/settings.json +194 -6
  30. pygpt_net/data/css/web-blocks.css +6 -0
  31. pygpt_net/data/css/web-chatgpt.css +6 -0
  32. pygpt_net/data/css/web-chatgpt_wide.css +6 -0
  33. pygpt_net/data/locale/locale.de.ini +31 -2
  34. pygpt_net/data/locale/locale.en.ini +41 -7
  35. pygpt_net/data/locale/locale.es.ini +31 -2
  36. pygpt_net/data/locale/locale.fr.ini +31 -2
  37. pygpt_net/data/locale/locale.it.ini +31 -2
  38. pygpt_net/data/locale/locale.pl.ini +34 -2
  39. pygpt_net/data/locale/locale.uk.ini +31 -2
  40. pygpt_net/data/locale/locale.zh.ini +31 -2
  41. pygpt_net/data/locale/plugin.cmd_web.en.ini +8 -0
  42. pygpt_net/item/model.py +22 -1
  43. pygpt_net/provider/api/google/__init__.py +38 -2
  44. pygpt_net/provider/api/google/video.py +364 -0
  45. pygpt_net/provider/api/openai/realtime/realtime.py +1 -2
  46. pygpt_net/provider/core/config/patch.py +226 -178
  47. pygpt_net/provider/core/model/patch.py +17 -2
  48. pygpt_net/provider/web/duckduck_search.py +212 -0
  49. pygpt_net/ui/layout/toolbox/audio.py +55 -0
  50. pygpt_net/ui/layout/toolbox/footer.py +14 -58
  51. pygpt_net/ui/layout/toolbox/image.py +3 -14
  52. pygpt_net/ui/layout/toolbox/raw.py +52 -0
  53. pygpt_net/ui/layout/toolbox/split.py +48 -0
  54. pygpt_net/ui/layout/toolbox/toolbox.py +8 -8
  55. pygpt_net/ui/layout/toolbox/video.py +49 -0
  56. pygpt_net/ui/widget/draw/painter.py +452 -84
  57. {pygpt_net-2.6.31.dist-info → pygpt_net-2.6.33.dist-info}/METADATA +28 -11
  58. {pygpt_net-2.6.31.dist-info → pygpt_net-2.6.33.dist-info}/RECORD +61 -51
  59. {pygpt_net-2.6.31.dist-info → pygpt_net-2.6.33.dist-info}/LICENSE +0 -0
  60. {pygpt_net-2.6.31.dist-info → pygpt_net-2.6.33.dist-info}/WHEEL +0 -0
  61. {pygpt_net-2.6.31.dist-info → pygpt_net-2.6.33.dist-info}/entry_points.txt +0 -0
@@ -1,14 +1,3 @@
1
- #!/usr/bin/env python3
2
- # -*- coding: utf-8 -*-
3
- # ================================================== #
4
- # This file is a part of PYGPT package #
5
- # Website: https://pygpt.net #
6
- # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
- # MIT License #
8
- # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.31 23:00:00 #
10
- # ================================================== #
11
-
12
1
  import threading
13
2
  from typing import Optional
14
3
 
@@ -53,6 +42,9 @@ class RealtimeSessionPyAudio(QObject):
53
42
  self._final = False
54
43
  self._tail_ms = 60 # add a small silence tail to avoid clicks
55
44
 
45
+ # one-shot guard to avoid double stop and duplicate callbacks
46
+ self._stopping = False
47
+
56
48
  # volume metering
57
49
  self._volume_emitter = volume_emitter
58
50
  self._vol_buffer = bytearray()
@@ -78,6 +70,13 @@ class RealtimeSessionPyAudio(QObject):
78
70
  except Exception:
79
71
  pass
80
72
 
73
+ # finished-state watchdog: guarantees stop()+on_stopped once playback is truly done
74
+ self._finish_timer = QTimer(self)
75
+ self._finish_timer.setTimerType(Qt.PreciseTimer)
76
+ self._finish_timer.setInterval(15) # fast but lightweight watchdog
77
+ self._finish_timer.timeout.connect(self._check_finished)
78
+ self._finish_timer.start()
79
+
81
80
  # stop callback (set by backend)
82
81
  self.on_stopped = None
83
82
 
@@ -124,15 +123,28 @@ class RealtimeSessionPyAudio(QObject):
124
123
  self._final = True
125
124
 
126
125
  def stop(self) -> None:
127
- """Stop playback and free resources."""
126
+ """Stop playback and free resources. Idempotent."""
127
+ # ensure this executes only once even if called from multiple paths
128
+ if self._stopping:
129
+ return
130
+ self._stopping = True
131
+
132
+ # stop timers first to prevent re-entry
133
+ try:
134
+ if self._finish_timer:
135
+ self._finish_timer.stop()
136
+ except Exception:
137
+ pass
128
138
  try:
129
139
  if self._vol_timer:
130
140
  self._vol_timer.stop()
131
141
  except Exception:
132
142
  pass
143
+
144
+ # gracefully stop PortAudio stream and close/terminate
133
145
  try:
134
146
  if self._stream and self._stream.is_active():
135
- self._stream.stop_stream()
147
+ self._stream.stop_stream() # drains queued audio per PortAudio docs
136
148
  except Exception:
137
149
  pass
138
150
  try:
@@ -197,11 +209,36 @@ class RealtimeSessionPyAudio(QObject):
197
209
 
198
210
  # auto-finish: when final and nothing more to play, complete and stop()
199
211
  if self._final and self._buffer_empty():
200
- QTimer.singleShot(0, self.stop) # stop on the GUI thread
212
+ # Return paComplete and request stop on the GUI thread.
213
+ # PaComplete deactivates the stream after the last callback buffer is played.
214
+ QTimer.singleShot(0, self.stop)
201
215
  return out, pyaudio.paComplete
202
216
 
203
217
  return out, pyaudio.paContinue
204
218
 
219
+ def _check_finished(self) -> None:
220
+ """
221
+ Watchdog that runs on the Qt thread to guarantee a single, reliable stop().
222
+ Triggers when PortAudio deactivates the stream, or when the buffer is fully
223
+ drained after mark_final().
224
+ """
225
+ if self._stopping:
226
+ return
227
+
228
+ # If underlying PA stream is no longer active, we are done.
229
+ try:
230
+ if self._stream is not None and not self._stream.is_active():
231
+ self.stop()
232
+ return
233
+ except Exception:
234
+ # If querying state fails, assume the stream is done and stop.
235
+ self.stop()
236
+ return
237
+
238
+ # If we've been marked final and our buffer is empty, finalize proactively.
239
+ if self._final and self._buffer_empty():
240
+ self.stop()
241
+
205
242
  def _buffer_empty(self) -> bool:
206
243
  """
207
244
  Check if internal buffer is empty.
@@ -43,8 +43,9 @@ class Output:
43
43
  return self.backends[backend]
44
44
 
45
45
  def setup(self):
46
- """Setup audio output backend"""
47
- pass
46
+ """Setup audio input backend"""
47
+ for b in self.backends.values():
48
+ b.set_rt_signals(self.window.controller.realtime.signals)
48
49
 
49
50
  def play(
50
51
  self,
@@ -6,13 +6,14 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.11 14:00:00 #
9
+ # Updated Date: 2025.09.02 09:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
13
13
  import time
14
14
 
15
- from PySide6.QtCore import QObject, Signal, QRunnable, Slot
15
+ from PySide6.QtCore import QObject, Signal, QRunnable, Slot, QEventLoop, QTimer, Qt
16
+ from PySide6.QtGui import QImage
16
17
 
17
18
 
18
19
  class Camera:
@@ -50,85 +51,400 @@ class CaptureWorker(QRunnable):
50
51
  self.args = args
51
52
  self.kwargs = kwargs
52
53
  self.window = None
54
+
55
+ # Common
53
56
  self.initialized = False
54
- self.capture = None
55
- self.frame = None
56
57
  self.allow_finish = False
58
+ self._fps_interval = 1.0 / 30.0 # default 30 FPS throttle
59
+
60
+ # Qt Multimedia objects (created in worker thread)
61
+ self.session = None
62
+ self.camera = None
63
+ self.sink = None
64
+ self.loop = None
65
+ self.poll_timer = None
66
+ self._qt_got_first_frame = False
67
+ self._probe_loop = None
68
+
69
+ # OpenCV fallback
70
+ self.cv_cap = None
71
+
72
+ # Timing (shared)
73
+ self._last_emit = 0.0
74
+
75
+ # =========================
76
+ # Qt Multimedia path
77
+ # =========================
78
+ def _select_camera_format(self, device, target_w: int, target_h: int):
79
+ """
80
+ Select best matching camera format by resolution.
57
81
 
58
- def setup_camera(self):
59
- """Initialize camera"""
82
+ :param device: QCameraDevice
83
+ :param target_w: target width
84
+ :param target_h: target height
85
+ """
60
86
  try:
61
- import cv2
62
- # get params from global config
63
- self.capture = cv2.VideoCapture(self.window.core.config.get('vision.capture.idx'))
64
- if not self.capture or not self.capture.isOpened():
65
- self.allow_finish = False
66
- self.signals.unfinished.emit()
67
- return
68
- self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, self.window.core.config.get('vision.capture.width'))
69
- self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, self.window.core.config.get('vision.capture.height'))
87
+ formats = list(device.videoFormats())
88
+ except Exception:
89
+ formats = []
90
+ if not formats:
91
+ return None
92
+
93
+ best = None
94
+ best_score = float('inf')
95
+ for f in formats:
96
+ res = f.resolution()
97
+ w, h = res.width(), res.height()
98
+ score = abs(w - target_w) + abs(h - target_h)
99
+ if score < best_score:
100
+ best_score = score
101
+ best = f
102
+ return best
103
+
104
+ def _init_qt(self) -> bool:
105
+ """
106
+ Try to initialize Qt camera pipeline.
107
+
108
+ :return: True if initialized
109
+ """
110
+ try:
111
+ from PySide6.QtMultimedia import (
112
+ QCamera,
113
+ QMediaDevices,
114
+ QMediaCaptureSession,
115
+ QVideoSink,
116
+ )
117
+
118
+ idx = int(self.window.core.config.get('vision.capture.idx'))
119
+ target_w = int(self.window.core.config.get('vision.capture.width'))
120
+ target_h = int(self.window.core.config.get('vision.capture.height'))
121
+ target_fps = 30
122
+ self._fps_interval = 1.0 / float(target_fps)
123
+
124
+ devices = list(QMediaDevices.videoInputs())
125
+ if not devices:
126
+ return False
127
+
128
+ if idx < 0 or idx >= len(devices):
129
+ idx = 0
130
+ dev = devices[idx]
131
+
132
+ self.camera = QCamera(dev)
133
+ fmt = self._select_camera_format(dev, target_w, target_h)
134
+ if fmt is not None:
135
+ self.camera.setCameraFormat(fmt)
136
+
137
+ self.session = QMediaCaptureSession()
138
+ self.session.setCamera(self.camera)
139
+
140
+ self.sink = QVideoSink()
141
+ self.sink.videoFrameChanged.connect(self.on_qt_frame_changed, Qt.DirectConnection)
142
+ self.session.setVideoOutput(self.sink)
143
+
144
+ self.camera.errorOccurred.connect(self._on_qt_camera_error, Qt.QueuedConnection)
145
+ return True
146
+
70
147
  except Exception as e:
148
+ # Qt Multimedia not available or failed to init
71
149
  self.window.core.debug.log(e)
150
+ return False
151
+
152
+ def _teardown_qt(self):
153
+ """Release Qt camera pipeline."""
154
+ try:
155
+ if self.sink is not None:
156
+ try:
157
+ self.sink.videoFrameChanged.disconnect(self.on_qt_frame_changed)
158
+ except Exception:
159
+ pass
160
+ if self.camera is not None and self.camera.isActive():
161
+ self.camera.stop()
162
+ except Exception:
163
+ pass
164
+ finally:
165
+ self.sink = None
166
+ self.session = None
167
+ self.camera = None
168
+
169
+ def _probe_qt_start(self, timeout_ms: int = 1500) -> bool:
170
+ """
171
+ Wait briefly for the first frame to confirm Qt pipeline is working.
172
+
173
+ :param timeout_ms: timeout in milliseconds
174
+ :return: True if first frame received
175
+ """
176
+ try:
177
+ if self.camera is None:
178
+ return False
179
+
180
+ self._qt_got_first_frame = False
181
+ self._probe_loop = QEventLoop()
182
+
183
+ # Timeout quits the probe loop
184
+ QTimer.singleShot(timeout_ms, self._probe_loop.quit)
185
+
186
+ # Start camera and wait for first frame or timeout
187
+ self.camera.start()
188
+ self._probe_loop.exec()
189
+
190
+ got = self._qt_got_first_frame
191
+ self._probe_loop = None
192
+ return got
193
+ except Exception as e:
194
+ self.window.core.debug.log(e)
195
+ return False
196
+
197
+ @Slot(object)
198
+ def _on_qt_camera_error(self, err):
199
+ """
200
+ Handle Qt camera errors.
201
+
202
+ :param err: error object
203
+ """
204
+ try:
205
+ # Stop loop if running
206
+ if self.loop is not None and self.loop.isRunning():
207
+ self.loop.quit()
208
+ if self._probe_loop is not None and self._probe_loop.isRunning():
209
+ self._probe_loop.quit()
210
+ except Exception:
211
+ pass
212
+ finally:
213
+ self.allow_finish = False
72
214
  if self.signals is not None:
73
- self.signals.error.emit(e)
74
- self.signals.finished.emit(e)
215
+ self.signals.error.emit(err)
216
+
217
+ @Slot(object)
218
+ def on_qt_frame_changed(self, video_frame):
219
+ """
220
+ Convert QVideoFrame to RGB numpy array and emit.
221
+
222
+ :param video_frame: QVideoFrame
223
+ """
224
+ try:
225
+ # Mark that we have a first frame for probe
226
+ if not self._qt_got_first_frame:
227
+ self._qt_got_first_frame = True
228
+ # If we are probing, quit the probe loop immediately
229
+ if self._probe_loop is not None and self._probe_loop.isRunning():
230
+ self._probe_loop.quit()
231
+
232
+ # Throttle FPS for normal operation path
233
+ now = time.monotonic()
234
+ if self.loop is not None and self.loop.isRunning():
235
+ if (now - self._last_emit) < self._fps_interval:
236
+ return
237
+
238
+ img = video_frame.toImage()
239
+ if img.isNull():
240
+ return
241
+
242
+ img = img.convertToFormat(QImage.Format.Format_RGB888)
243
+
244
+ w = img.width()
245
+ h = img.height()
246
+ bpl = img.bytesPerLine()
75
247
 
248
+ ptr = img.bits()
249
+ size = bpl * h
250
+ try:
251
+ ptr.setsize(size)
252
+ except Exception:
253
+ # Some bindings may not require setsize; ignore if unsupported
254
+ pass
255
+
256
+ import numpy as np
257
+ arr = np.frombuffer(ptr, dtype=np.uint8)
258
+
259
+ if bpl != w * 3:
260
+ arr = arr.reshape(h, bpl)[:, : w * 3]
261
+ arr = arr.reshape(h, w, 3).copy()
262
+ else:
263
+ arr = arr.reshape(h, w, 3).copy()
264
+
265
+ if self.signals is not None:
266
+ self.signals.capture.emit(arr)
267
+ self._last_emit = now
268
+
269
+ except Exception as e:
270
+ self.window.core.debug.log(e)
271
+
272
+ # =========================
273
+ # OpenCV fallback path
274
+ # =========================
275
+ def _init_cv2(self) -> bool:
276
+ """
277
+ Try to initialize OpenCV VideoCapture fallback.
278
+
279
+ :return: True if initialized
280
+ """
281
+ try:
282
+ import cv2
283
+ idx = int(self.window.core.config.get('vision.capture.idx'))
284
+ target_w = int(self.window.core.config.get('vision.capture.width'))
285
+ target_h = int(self.window.core.config.get('vision.capture.height'))
286
+ target_fps = 30
287
+ self._fps_interval = 1.0 / float(target_fps)
288
+
289
+ cap = cv2.VideoCapture(idx)
290
+ if not cap or not cap.isOpened():
291
+ return False
292
+
293
+ cap.set(cv2.CAP_PROP_FRAME_WIDTH, target_w)
294
+ cap.set(cv2.CAP_PROP_FRAME_HEIGHT, target_h)
295
+ self.cv_cap = cap
296
+ return True
297
+ except Exception as e:
298
+ self.window.core.debug.log(e)
299
+ return False
300
+
301
+ def _teardown_cv2(self):
302
+ """Release OpenCV capture."""
303
+ try:
304
+ if self.cv_cap is not None and self.cv_cap.isOpened():
305
+ self.cv_cap.release()
306
+ except Exception:
307
+ pass
308
+ finally:
309
+ self.cv_cap = None
310
+
311
+ # =========================
312
+ # Runner
313
+ # =========================
76
314
  @Slot()
77
315
  def run(self):
78
- """Frame capture loop"""
79
- target_fps = 30
80
- fps_interval = 1.0 / target_fps
316
+ """Run capture using Qt first; fall back to OpenCV if needed."""
81
317
  self.allow_finish = True
318
+ self._last_emit = 0.0
319
+
320
+ used_backend = None
82
321
  try:
83
- import cv2
84
- if not self.initialized:
85
- self.setup_camera()
86
- self.signals.started.emit()
87
- self.initialized = True
88
- last_frame_time = time.time()
89
- while True:
90
- if self.window.is_closing \
91
- or self.capture is None \
92
- or not self.capture.isOpened() \
93
- or self.window.controller.camera.stop:
94
- self.release() # release camera
95
- self.signals.stopped.emit()
96
- break
97
- _, frame = self.capture.read()
98
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
99
- now = time.time()
100
- if now - last_frame_time >= fps_interval:
101
- self.signals.capture.emit(frame)
102
- last_frame_time = now
322
+ # Try Qt Multimedia
323
+ if self._init_qt():
324
+ if self._probe_qt_start(timeout_ms=1500):
325
+ # Qt confirmed working; start main event-driven loop
326
+ used_backend = 'qt'
327
+ self.initialized = True
328
+ if self.signals is not None:
329
+ self.signals.started.emit()
330
+
331
+ self.loop = QEventLoop()
332
+
333
+ self.poll_timer = QTimer()
334
+ self.poll_timer.setTimerType(Qt.PreciseTimer)
335
+ self.poll_timer.setInterval(30)
336
+ self.poll_timer.timeout.connect(self._poll_stop_qt, Qt.DirectConnection)
337
+ self.poll_timer.start()
338
+
339
+ self.loop.exec()
340
+
341
+ if self.signals is not None:
342
+ self.signals.stopped.emit()
343
+ else:
344
+ # Fallback to OpenCV if no frames arrive quickly
345
+ print("QT camera init failed, trying CV2 fallback...")
346
+ self._teardown_qt()
347
+ else:
348
+ # Qt init failed outright, fallback to CV2
349
+ print("QT camera init failed, trying CV2 fallback...")
350
+
351
+ # Try OpenCV fallback if Qt was not used
352
+ if used_backend is None:
353
+ if self._init_cv2():
354
+ used_backend = 'cv2'
355
+ self.initialized = True
356
+ if self.signals is not None:
357
+ self.signals.started.emit()
358
+
359
+ import cv2
360
+ target_fps = 30
361
+ fps_interval = 1.0 / float(target_fps)
362
+ last_frame_time = time.time()
363
+
364
+ while True:
365
+ if self._should_stop():
366
+ break
367
+
368
+ ok, frame = self.cv_cap.read()
369
+ if not ok or frame is None:
370
+ continue
371
+
372
+ now = time.time()
373
+ if now - last_frame_time >= fps_interval:
374
+ # Convert BGR -> RGB for the controller/UI pipeline
375
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
376
+ if self.signals is not None:
377
+ self.signals.capture.emit(frame)
378
+ last_frame_time = now
379
+
380
+ if self.signals is not None:
381
+ self.signals.stopped.emit()
382
+ else:
383
+ # Both providers failed
384
+ self.allow_finish = False
103
385
 
104
386
  except Exception as e:
105
387
  self.window.core.debug.log(e)
106
388
  if self.signals is not None:
107
389
  self.signals.error.emit(e)
108
-
109
390
  finally:
110
- self.release() # release camera
391
+ # Cleanup resources
392
+ try:
393
+ if self.poll_timer is not None:
394
+ self.poll_timer.stop()
395
+ except Exception:
396
+ pass
397
+ self.poll_timer = None
398
+
399
+ if used_backend == 'qt':
400
+ self._teardown_qt()
401
+ else:
402
+ self._teardown_qt() # no-op if not initialized
403
+ self._teardown_cv2()
404
+
405
+ # Emit final state
111
406
  if self.signals is not None:
112
407
  if self.allow_finish:
113
408
  self.signals.finished.emit()
114
409
  else:
115
410
  self.signals.unfinished.emit()
411
+
116
412
  self.cleanup()
117
413
 
118
- def release(self):
119
- """Release camera"""
120
- if self.capture is not None and self.capture.isOpened():
121
- self.capture.release()
122
- self.capture = None
123
- self.frame = None
124
- self.initialized = False
414
+ def _poll_stop_qt(self):
415
+ """Check stop flags while running Qt pipeline."""
416
+ try:
417
+ if self._should_stop():
418
+ if self.camera is not None and self.camera.isActive():
419
+ self.camera.stop()
420
+ if self.loop is not None and self.loop.isRunning():
421
+ self.loop.quit()
422
+ except Exception as e:
423
+ self.window.core.debug.log(e)
424
+ if self.loop is not None and self.loop.isRunning():
425
+ self.loop.quit()
426
+
427
+ def _should_stop(self) -> bool:
428
+ """
429
+ Check external stop flags.
430
+
431
+ :return: True if should stop
432
+ """
433
+ try:
434
+ if getattr(self.window, 'is_closing', False):
435
+ return True
436
+ if self.window is not None and self.window.controller.camera.stop:
437
+ return True
438
+ except Exception:
439
+ return True
440
+ return False
125
441
 
126
442
  def cleanup(self):
127
443
  """Cleanup resources after worker execution."""
128
444
  sig = self.signals
129
445
  self.signals = None
130
- if sig is not None:
131
- try:
446
+ try:
447
+ if sig is not None:
132
448
  sig.deleteLater()
133
- except RuntimeError:
134
- pass
449
+ except RuntimeError:
450
+ pass
@@ -6,13 +6,13 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 08:00:00 #
9
+ # Updated Date: 2025.09.01 23:00:00 #
10
10
  # ================================================== #
11
11
 
12
- import os
13
12
  import uuid
14
- from time import strftime
13
+ import os
15
14
  from typing import List, Dict
15
+ from time import strftime
16
16
 
17
17
  from PySide6.QtCore import Slot, QObject
18
18
 
@@ -73,7 +73,7 @@ class Image(QObject):
73
73
  prompt,
74
74
  )
75
75
 
76
- @Slot()
76
+ @Slot(object)
77
77
  def handle_status(self, msg: str):
78
78
  """
79
79
  Handle thread status message
@@ -90,7 +90,7 @@ class Image(QObject):
90
90
  if is_log:
91
91
  print(msg)
92
92
 
93
- @Slot()
93
+ @Slot(object)
94
94
  def handle_error(self, msg: any):
95
95
  """
96
96
  Handle thread error message
@@ -99,6 +99,7 @@ class Image(QObject):
99
99
  """
100
100
  self.window.update_status(msg)
101
101
  self.window.core.debug.log(msg)
102
+ self.window.ui.dialogs.alert(msg)
102
103
 
103
104
  def save_image(self, path: str, image: bytes) -> bool:
104
105
  """
@@ -134,11 +134,7 @@ class RealtimeWorker(QRunnable):
134
134
  event = RealtimeEvent(RealtimeEvent.RT_OUTPUT_AUDIO_ERROR, {"error": e})
135
135
  self.opts.rt_signals.response.emit(event) if self.opts.rt_signals else None
136
136
  finally:
137
- try:
138
- event = RealtimeEvent(RealtimeEvent.RT_OUTPUT_AUDIO_END, {"ctx": self.ctx})
139
- self.opts.rt_signals.response.emit(event) if self.opts.rt_signals else None
140
- except Exception:
141
- pass
137
+ pass
142
138
  finally:
143
139
  # Robust asyncio teardown to avoid hangs on subsequent runs
144
140
  if loop is not None: