pygpt-net 2.6.33__py3-none-any.whl → 2.6.34__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. pygpt_net/CHANGELOG.txt +7 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/assistant/batch.py +14 -4
  4. pygpt_net/controller/assistant/files.py +1 -0
  5. pygpt_net/controller/assistant/store.py +195 -1
  6. pygpt_net/controller/camera/camera.py +1 -1
  7. pygpt_net/controller/chat/common.py +50 -46
  8. pygpt_net/controller/config/placeholder.py +95 -75
  9. pygpt_net/controller/dialogs/confirm.py +3 -1
  10. pygpt_net/controller/media/media.py +11 -3
  11. pygpt_net/controller/painter/common.py +231 -13
  12. pygpt_net/core/assistants/files.py +18 -0
  13. pygpt_net/core/camera/camera.py +31 -402
  14. pygpt_net/core/camera/worker.py +430 -0
  15. pygpt_net/core/filesystem/url.py +3 -0
  16. pygpt_net/core/render/web/body.py +65 -9
  17. pygpt_net/core/text/utils.py +3 -0
  18. pygpt_net/data/config/config.json +3 -3
  19. pygpt_net/data/config/models.json +3 -3
  20. pygpt_net/data/config/settings.json +10 -5
  21. pygpt_net/data/locale/locale.de.ini +8 -7
  22. pygpt_net/data/locale/locale.en.ini +9 -6
  23. pygpt_net/data/locale/locale.es.ini +8 -7
  24. pygpt_net/data/locale/locale.fr.ini +8 -7
  25. pygpt_net/data/locale/locale.it.ini +8 -7
  26. pygpt_net/data/locale/locale.pl.ini +8 -7
  27. pygpt_net/data/locale/locale.uk.ini +8 -7
  28. pygpt_net/data/locale/locale.zh.ini +8 -7
  29. pygpt_net/item/assistant.py +13 -1
  30. pygpt_net/provider/api/google/__init__.py +32 -23
  31. pygpt_net/provider/api/openai/store.py +45 -1
  32. pygpt_net/provider/llms/google.py +4 -0
  33. pygpt_net/ui/dialog/assistant_store.py +213 -203
  34. pygpt_net/ui/layout/chat/input.py +3 -3
  35. pygpt_net/ui/widget/draw/painter.py +16 -1
  36. pygpt_net/ui/widget/option/combo.py +5 -1
  37. pygpt_net/ui/widget/textarea/input.py +273 -3
  38. {pygpt_net-2.6.33.dist-info → pygpt_net-2.6.34.dist-info}/METADATA +9 -2
  39. {pygpt_net-2.6.33.dist-info → pygpt_net-2.6.34.dist-info}/RECORD +42 -41
  40. {pygpt_net-2.6.33.dist-info → pygpt_net-2.6.34.dist-info}/LICENSE +0 -0
  41. {pygpt_net-2.6.33.dist-info → pygpt_net-2.6.34.dist-info}/WHEEL +0 -0
  42. {pygpt_net-2.6.33.dist-info → pygpt_net-2.6.34.dist-info}/entry_points.txt +0 -0
@@ -6,14 +6,11 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.02 09:00:00 #
9
+ # Updated Date: 2025.09.02 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
13
- import time
14
-
15
- from PySide6.QtCore import QObject, Signal, QRunnable, Slot, QEventLoop, QTimer, Qt
16
- from PySide6.QtGui import QImage
13
+ from typing import List
17
14
 
18
15
 
19
16
  class Camera:
@@ -33,418 +30,50 @@ class Camera:
33
30
  if not os.path.exists(img_dir):
34
31
  os.makedirs(img_dir, exist_ok=True)
35
32
 
36
-
37
- class CaptureSignals(QObject):
38
- finished = Signal()
39
- unfinished = Signal()
40
- destroyed = Signal()
41
- started = Signal()
42
- stopped = Signal()
43
- capture = Signal(object)
44
- error = Signal(object)
45
-
46
-
47
- class CaptureWorker(QRunnable):
48
- def __init__(self, *args, **kwargs):
49
- super().__init__()
50
- self.signals = CaptureSignals()
51
- self.args = args
52
- self.kwargs = kwargs
53
- self.window = None
54
-
55
- # Common
56
- self.initialized = False
57
- self.allow_finish = False
58
- self._fps_interval = 1.0 / 30.0 # default 30 FPS throttle
59
-
60
- # Qt Multimedia objects (created in worker thread)
61
- self.session = None
62
- self.camera = None
63
- self.sink = None
64
- self.loop = None
65
- self.poll_timer = None
66
- self._qt_got_first_frame = False
67
- self._probe_loop = None
68
-
69
- # OpenCV fallback
70
- self.cv_cap = None
71
-
72
- # Timing (shared)
73
- self._last_emit = 0.0
74
-
75
- # =========================
76
- # Qt Multimedia path
77
- # =========================
78
- def _select_camera_format(self, device, target_w: int, target_h: int):
79
- """
80
- Select best matching camera format by resolution.
81
-
82
- :param device: QCameraDevice
83
- :param target_w: target width
84
- :param target_h: target height
85
- """
86
- try:
87
- formats = list(device.videoFormats())
88
- except Exception:
89
- formats = []
90
- if not formats:
91
- return None
92
-
93
- best = None
94
- best_score = float('inf')
95
- for f in formats:
96
- res = f.resolution()
97
- w, h = res.width(), res.height()
98
- score = abs(w - target_w) + abs(h - target_h)
99
- if score < best_score:
100
- best_score = score
101
- best = f
102
- return best
103
-
104
- def _init_qt(self) -> bool:
105
- """
106
- Try to initialize Qt camera pipeline.
107
-
108
- :return: True if initialized
109
- """
110
- try:
111
- from PySide6.QtMultimedia import (
112
- QCamera,
113
- QMediaDevices,
114
- QMediaCaptureSession,
115
- QVideoSink,
116
- )
117
-
118
- idx = int(self.window.core.config.get('vision.capture.idx'))
119
- target_w = int(self.window.core.config.get('vision.capture.width'))
120
- target_h = int(self.window.core.config.get('vision.capture.height'))
121
- target_fps = 30
122
- self._fps_interval = 1.0 / float(target_fps)
123
-
124
- devices = list(QMediaDevices.videoInputs())
125
- if not devices:
126
- return False
127
-
128
- if idx < 0 or idx >= len(devices):
129
- idx = 0
130
- dev = devices[idx]
131
-
132
- self.camera = QCamera(dev)
133
- fmt = self._select_camera_format(dev, target_w, target_h)
134
- if fmt is not None:
135
- self.camera.setCameraFormat(fmt)
136
-
137
- self.session = QMediaCaptureSession()
138
- self.session.setCamera(self.camera)
139
-
140
- self.sink = QVideoSink()
141
- self.sink.videoFrameChanged.connect(self.on_qt_frame_changed, Qt.DirectConnection)
142
- self.session.setVideoOutput(self.sink)
143
-
144
- self.camera.errorOccurred.connect(self._on_qt_camera_error, Qt.QueuedConnection)
145
- return True
146
-
147
- except Exception as e:
148
- # Qt Multimedia not available or failed to init
149
- self.window.core.debug.log(e)
150
- return False
151
-
152
- def _teardown_qt(self):
153
- """Release Qt camera pipeline."""
154
- try:
155
- if self.sink is not None:
156
- try:
157
- self.sink.videoFrameChanged.disconnect(self.on_qt_frame_changed)
158
- except Exception:
159
- pass
160
- if self.camera is not None and self.camera.isActive():
161
- self.camera.stop()
162
- except Exception:
163
- pass
164
- finally:
165
- self.sink = None
166
- self.session = None
167
- self.camera = None
168
-
169
- def _probe_qt_start(self, timeout_ms: int = 1500) -> bool:
170
- """
171
- Wait briefly for the first frame to confirm Qt pipeline is working.
172
-
173
- :param timeout_ms: timeout in milliseconds
174
- :return: True if first frame received
175
- """
176
- try:
177
- if self.camera is None:
178
- return False
179
-
180
- self._qt_got_first_frame = False
181
- self._probe_loop = QEventLoop()
182
-
183
- # Timeout quits the probe loop
184
- QTimer.singleShot(timeout_ms, self._probe_loop.quit)
185
-
186
- # Start camera and wait for first frame or timeout
187
- self.camera.start()
188
- self._probe_loop.exec()
189
-
190
- got = self._qt_got_first_frame
191
- self._probe_loop = None
192
- return got
193
- except Exception as e:
194
- self.window.core.debug.log(e)
195
- return False
196
-
197
- @Slot(object)
198
- def _on_qt_camera_error(self, err):
199
- """
200
- Handle Qt camera errors.
201
-
202
- :param err: error object
33
+ def get_devices_data(self) -> List[dict]:
203
34
  """
204
- try:
205
- # Stop loop if running
206
- if self.loop is not None and self.loop.isRunning():
207
- self.loop.quit()
208
- if self._probe_loop is not None and self._probe_loop.isRunning():
209
- self._probe_loop.quit()
210
- except Exception:
211
- pass
212
- finally:
213
- self.allow_finish = False
214
- if self.signals is not None:
215
- self.signals.error.emit(err)
35
+ Return a list of camera devices for UI selection.
216
36
 
217
- @Slot(object)
218
- def on_qt_frame_changed(self, video_frame):
219
- """
220
- Convert QVideoFrame to RGB numpy array and emit.
37
+ Format:
38
+ [
39
+ {'id': <int index>, 'name': <str description>},
40
+ ...
41
+ ]
221
42
 
222
- :param video_frame: QVideoFrame
43
+ 'id' is the ordinal index used by vision.capture.idx.
223
44
  """
224
45
  try:
225
- # Mark that we have a first frame for probe
226
- if not self._qt_got_first_frame:
227
- self._qt_got_first_frame = True
228
- # If we are probing, quit the probe loop immediately
229
- if self._probe_loop is not None and self._probe_loop.isRunning():
230
- self._probe_loop.quit()
231
-
232
- # Throttle FPS for normal operation path
233
- now = time.monotonic()
234
- if self.loop is not None and self.loop.isRunning():
235
- if (now - self._last_emit) < self._fps_interval:
236
- return
237
-
238
- img = video_frame.toImage()
239
- if img.isNull():
240
- return
241
-
242
- img = img.convertToFormat(QImage.Format.Format_RGB888)
243
-
244
- w = img.width()
245
- h = img.height()
246
- bpl = img.bytesPerLine()
247
-
248
- ptr = img.bits()
249
- size = bpl * h
250
- try:
251
- ptr.setsize(size)
252
- except Exception:
253
- # Some bindings may not require setsize; ignore if unsupported
254
- pass
255
-
256
- import numpy as np
257
- arr = np.frombuffer(ptr, dtype=np.uint8)
258
-
259
- if bpl != w * 3:
260
- arr = arr.reshape(h, bpl)[:, : w * 3]
261
- arr = arr.reshape(h, w, 3).copy()
262
- else:
263
- arr = arr.reshape(h, w, 3).copy()
264
-
265
- if self.signals is not None:
266
- self.signals.capture.emit(arr)
267
- self._last_emit = now
268
-
46
+ from PySide6.QtMultimedia import QMediaDevices
269
47
  except Exception as e:
48
+ # Qt Multimedia not available
270
49
  self.window.core.debug.log(e)
50
+ return []
271
51
 
272
- # =========================
273
- # OpenCV fallback path
274
- # =========================
275
- def _init_cv2(self) -> bool:
276
- """
277
- Try to initialize OpenCV VideoCapture fallback.
278
-
279
- :return: True if initialized
280
- """
281
52
  try:
282
- import cv2
283
- idx = int(self.window.core.config.get('vision.capture.idx'))
284
- target_w = int(self.window.core.config.get('vision.capture.width'))
285
- target_h = int(self.window.core.config.get('vision.capture.height'))
286
- target_fps = 30
287
- self._fps_interval = 1.0 / float(target_fps)
288
-
289
- cap = cv2.VideoCapture(idx)
290
- if not cap or not cap.isOpened():
291
- return False
292
-
293
- cap.set(cv2.CAP_PROP_FRAME_WIDTH, target_w)
294
- cap.set(cv2.CAP_PROP_FRAME_HEIGHT, target_h)
295
- self.cv_cap = cap
296
- return True
53
+ devices = list(QMediaDevices.videoInputs())
297
54
  except Exception as e:
298
55
  self.window.core.debug.log(e)
299
- return False
300
-
301
- def _teardown_cv2(self):
302
- """Release OpenCV capture."""
303
- try:
304
- if self.cv_cap is not None and self.cv_cap.isOpened():
305
- self.cv_cap.release()
306
- except Exception:
307
- pass
308
- finally:
309
- self.cv_cap = None
310
-
311
- # =========================
312
- # Runner
313
- # =========================
314
- @Slot()
315
- def run(self):
316
- """Run capture using Qt first; fall back to OpenCV if needed."""
317
- self.allow_finish = True
318
- self._last_emit = 0.0
319
-
320
- used_backend = None
321
- try:
322
- # Try Qt Multimedia
323
- if self._init_qt():
324
- if self._probe_qt_start(timeout_ms=1500):
325
- # Qt confirmed working; start main event-driven loop
326
- used_backend = 'qt'
327
- self.initialized = True
328
- if self.signals is not None:
329
- self.signals.started.emit()
330
-
331
- self.loop = QEventLoop()
332
-
333
- self.poll_timer = QTimer()
334
- self.poll_timer.setTimerType(Qt.PreciseTimer)
335
- self.poll_timer.setInterval(30)
336
- self.poll_timer.timeout.connect(self._poll_stop_qt, Qt.DirectConnection)
337
- self.poll_timer.start()
338
-
339
- self.loop.exec()
340
-
341
- if self.signals is not None:
342
- self.signals.stopped.emit()
343
- else:
344
- # Fallback to OpenCV if no frames arrive quickly
345
- print("QT camera init failed, trying CV2 fallback...")
346
- self._teardown_qt()
347
- else:
348
- # Qt init failed outright, fallback to CV2
349
- print("QT camera init failed, trying CV2 fallback...")
350
-
351
- # Try OpenCV fallback if Qt was not used
352
- if used_backend is None:
353
- if self._init_cv2():
354
- used_backend = 'cv2'
355
- self.initialized = True
356
- if self.signals is not None:
357
- self.signals.started.emit()
56
+ return []
358
57
 
359
- import cv2
360
- target_fps = 30
361
- fps_interval = 1.0 / float(target_fps)
362
- last_frame_time = time.time()
363
-
364
- while True:
365
- if self._should_stop():
366
- break
367
-
368
- ok, frame = self.cv_cap.read()
369
- if not ok or frame is None:
370
- continue
371
-
372
- now = time.time()
373
- if now - last_frame_time >= fps_interval:
374
- # Convert BGR -> RGB for the controller/UI pipeline
375
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
376
- if self.signals is not None:
377
- self.signals.capture.emit(frame)
378
- last_frame_time = now
379
-
380
- if self.signals is not None:
381
- self.signals.stopped.emit()
382
- else:
383
- # Both providers failed
384
- self.allow_finish = False
385
-
386
- except Exception as e:
387
- self.window.core.debug.log(e)
388
- if self.signals is not None:
389
- self.signals.error.emit(e)
390
- finally:
391
- # Cleanup resources
58
+ result = []
59
+ for idx, dev in enumerate(devices):
392
60
  try:
393
- if self.poll_timer is not None:
394
- self.poll_timer.stop()
61
+ name = dev.description()
395
62
  except Exception:
396
- pass
397
- self.poll_timer = None
398
-
399
- if used_backend == 'qt':
400
- self._teardown_qt()
401
- else:
402
- self._teardown_qt() # no-op if not initialized
403
- self._teardown_cv2()
404
-
405
- # Emit final state
406
- if self.signals is not None:
407
- if self.allow_finish:
408
- self.signals.finished.emit()
409
- else:
410
- self.signals.unfinished.emit()
63
+ name = f"Camera {idx}"
64
+ result.append({'id': idx, 'name': name})
65
+ return result
411
66
 
412
- self.cleanup()
413
-
414
- def _poll_stop_qt(self):
415
- """Check stop flags while running Qt pipeline."""
416
- try:
417
- if self._should_stop():
418
- if self.camera is not None and self.camera.isActive():
419
- self.camera.stop()
420
- if self.loop is not None and self.loop.isRunning():
421
- self.loop.quit()
422
- except Exception as e:
423
- self.window.core.debug.log(e)
424
- if self.loop is not None and self.loop.isRunning():
425
- self.loop.quit()
426
-
427
- def _should_stop(self) -> bool:
67
+ def get_devices(self) -> List[dict]:
428
68
  """
429
- Check external stop flags.
69
+ Get choices list of single-pair dicts {id: name}.
430
70
 
431
- :return: True if should stop
71
+ Example:
72
+ [
73
+ {'0': 'Integrated Camera'},
74
+ {'1': 'USB Camera'},
75
+ ...
76
+ ]
432
77
  """
433
- try:
434
- if getattr(self.window, 'is_closing', False):
435
- return True
436
- if self.window is not None and self.window.controller.camera.stop:
437
- return True
438
- except Exception:
439
- return True
440
- return False
441
-
442
- def cleanup(self):
443
- """Cleanup resources after worker execution."""
444
- sig = self.signals
445
- self.signals = None
446
- try:
447
- if sig is not None:
448
- sig.deleteLater()
449
- except RuntimeError:
450
- pass
78
+ items = self.get_devices_data()
79
+ return [{str(item['id']): item['name']} for item in items]