pygpt-net 2.6.33__py3-none-any.whl → 2.6.34__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +7 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/controller/assistant/batch.py +14 -4
- pygpt_net/controller/assistant/files.py +1 -0
- pygpt_net/controller/assistant/store.py +195 -1
- pygpt_net/controller/camera/camera.py +1 -1
- pygpt_net/controller/chat/common.py +50 -46
- pygpt_net/controller/config/placeholder.py +95 -75
- pygpt_net/controller/dialogs/confirm.py +3 -1
- pygpt_net/controller/media/media.py +11 -3
- pygpt_net/controller/painter/common.py +231 -13
- pygpt_net/core/assistants/files.py +18 -0
- pygpt_net/core/camera/camera.py +31 -402
- pygpt_net/core/camera/worker.py +430 -0
- pygpt_net/core/filesystem/url.py +3 -0
- pygpt_net/core/render/web/body.py +65 -9
- pygpt_net/core/text/utils.py +3 -0
- pygpt_net/data/config/config.json +3 -3
- pygpt_net/data/config/models.json +3 -3
- pygpt_net/data/config/settings.json +10 -5
- pygpt_net/data/locale/locale.de.ini +8 -7
- pygpt_net/data/locale/locale.en.ini +9 -6
- pygpt_net/data/locale/locale.es.ini +8 -7
- pygpt_net/data/locale/locale.fr.ini +8 -7
- pygpt_net/data/locale/locale.it.ini +8 -7
- pygpt_net/data/locale/locale.pl.ini +8 -7
- pygpt_net/data/locale/locale.uk.ini +8 -7
- pygpt_net/data/locale/locale.zh.ini +8 -7
- pygpt_net/item/assistant.py +13 -1
- pygpt_net/provider/api/google/__init__.py +32 -23
- pygpt_net/provider/api/openai/store.py +45 -1
- pygpt_net/provider/llms/google.py +4 -0
- pygpt_net/ui/dialog/assistant_store.py +213 -203
- pygpt_net/ui/layout/chat/input.py +3 -3
- pygpt_net/ui/widget/draw/painter.py +16 -1
- pygpt_net/ui/widget/option/combo.py +5 -1
- pygpt_net/ui/widget/textarea/input.py +273 -3
- {pygpt_net-2.6.33.dist-info → pygpt_net-2.6.34.dist-info}/METADATA +9 -2
- {pygpt_net-2.6.33.dist-info → pygpt_net-2.6.34.dist-info}/RECORD +42 -41
- {pygpt_net-2.6.33.dist-info → pygpt_net-2.6.34.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.33.dist-info → pygpt_net-2.6.34.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.33.dist-info → pygpt_net-2.6.34.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,430 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
# ================================================== #
|
|
4
|
+
# This file is a part of PYGPT package #
|
|
5
|
+
# Website: https://pygpt.net #
|
|
6
|
+
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
|
+
# MIT License #
|
|
8
|
+
# Created By : Marcin Szczygliński #
|
|
9
|
+
# Updated Date: 2025.09.02 16:00:00 #
|
|
10
|
+
# ================================================== #
|
|
11
|
+
|
|
12
|
+
import time
|
|
13
|
+
|
|
14
|
+
from PySide6.QtCore import QObject, Signal, QRunnable, Slot, QEventLoop, QTimer, Qt
|
|
15
|
+
from PySide6.QtGui import QImage
|
|
16
|
+
|
|
17
|
+
class CaptureSignals(QObject):
|
|
18
|
+
finished = Signal()
|
|
19
|
+
unfinished = Signal()
|
|
20
|
+
destroyed = Signal()
|
|
21
|
+
started = Signal()
|
|
22
|
+
stopped = Signal()
|
|
23
|
+
capture = Signal(object)
|
|
24
|
+
error = Signal(object)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class CaptureWorker(QRunnable):
|
|
28
|
+
def __init__(self, *args, **kwargs):
|
|
29
|
+
super().__init__()
|
|
30
|
+
self.signals = CaptureSignals()
|
|
31
|
+
self.args = args
|
|
32
|
+
self.kwargs = kwargs
|
|
33
|
+
self.window = None
|
|
34
|
+
|
|
35
|
+
# Common
|
|
36
|
+
self.initialized = False
|
|
37
|
+
self.allow_finish = False
|
|
38
|
+
self._fps_interval = 1.0 / 30.0 # default 30 FPS throttle
|
|
39
|
+
|
|
40
|
+
# Qt Multimedia objects (created in worker thread)
|
|
41
|
+
self.session = None
|
|
42
|
+
self.camera = None
|
|
43
|
+
self.sink = None
|
|
44
|
+
self.loop = None
|
|
45
|
+
self.poll_timer = None
|
|
46
|
+
self._qt_got_first_frame = False
|
|
47
|
+
self._probe_loop = None
|
|
48
|
+
|
|
49
|
+
# OpenCV fallback
|
|
50
|
+
self.cv_cap = None
|
|
51
|
+
|
|
52
|
+
# Timing (shared)
|
|
53
|
+
self._last_emit = 0.0
|
|
54
|
+
|
|
55
|
+
# =========================
|
|
56
|
+
# Qt Multimedia path
|
|
57
|
+
# =========================
|
|
58
|
+
def _select_camera_format(self, device, target_w: int, target_h: int):
|
|
59
|
+
"""
|
|
60
|
+
Select best matching camera format by resolution.
|
|
61
|
+
|
|
62
|
+
:param device: QCameraDevice
|
|
63
|
+
:param target_w: target width
|
|
64
|
+
:param target_h: target height
|
|
65
|
+
"""
|
|
66
|
+
try:
|
|
67
|
+
formats = list(device.videoFormats())
|
|
68
|
+
except Exception:
|
|
69
|
+
formats = []
|
|
70
|
+
if not formats:
|
|
71
|
+
return None
|
|
72
|
+
|
|
73
|
+
best = None
|
|
74
|
+
best_score = float('inf')
|
|
75
|
+
for f in formats:
|
|
76
|
+
res = f.resolution()
|
|
77
|
+
w, h = res.width(), res.height()
|
|
78
|
+
score = abs(w - target_w) + abs(h - target_h)
|
|
79
|
+
if score < best_score:
|
|
80
|
+
best_score = score
|
|
81
|
+
best = f
|
|
82
|
+
return best
|
|
83
|
+
|
|
84
|
+
def _init_qt(self) -> bool:
|
|
85
|
+
"""
|
|
86
|
+
Try to initialize Qt camera pipeline.
|
|
87
|
+
|
|
88
|
+
:return: True if initialized
|
|
89
|
+
"""
|
|
90
|
+
try:
|
|
91
|
+
from PySide6.QtMultimedia import (
|
|
92
|
+
QCamera,
|
|
93
|
+
QMediaDevices,
|
|
94
|
+
QMediaCaptureSession,
|
|
95
|
+
QVideoSink,
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
idx = int(self.window.core.config.get('vision.capture.idx') or 0)
|
|
99
|
+
target_w = int(self.window.core.config.get('vision.capture.width'))
|
|
100
|
+
target_h = int(self.window.core.config.get('vision.capture.height'))
|
|
101
|
+
target_fps = 30
|
|
102
|
+
self._fps_interval = 1.0 / float(target_fps)
|
|
103
|
+
|
|
104
|
+
devices = list(QMediaDevices.videoInputs())
|
|
105
|
+
if not devices:
|
|
106
|
+
return False
|
|
107
|
+
|
|
108
|
+
if idx < 0 or idx >= len(devices):
|
|
109
|
+
idx = 0
|
|
110
|
+
dev = devices[idx]
|
|
111
|
+
|
|
112
|
+
self.camera = QCamera(dev)
|
|
113
|
+
fmt = self._select_camera_format(dev, target_w, target_h)
|
|
114
|
+
if fmt is not None:
|
|
115
|
+
self.camera.setCameraFormat(fmt)
|
|
116
|
+
|
|
117
|
+
self.session = QMediaCaptureSession()
|
|
118
|
+
self.session.setCamera(self.camera)
|
|
119
|
+
|
|
120
|
+
self.sink = QVideoSink()
|
|
121
|
+
self.sink.videoFrameChanged.connect(self.on_qt_frame_changed, Qt.DirectConnection)
|
|
122
|
+
self.session.setVideoOutput(self.sink)
|
|
123
|
+
|
|
124
|
+
self.camera.errorOccurred.connect(self._on_qt_camera_error, Qt.QueuedConnection)
|
|
125
|
+
return True
|
|
126
|
+
|
|
127
|
+
except Exception as e:
|
|
128
|
+
# Qt Multimedia not available or failed to init
|
|
129
|
+
self.window.core.debug.log(e)
|
|
130
|
+
return False
|
|
131
|
+
|
|
132
|
+
def _teardown_qt(self):
|
|
133
|
+
"""Release Qt camera pipeline."""
|
|
134
|
+
try:
|
|
135
|
+
if self.sink is not None:
|
|
136
|
+
try:
|
|
137
|
+
self.sink.videoFrameChanged.disconnect(self.on_qt_frame_changed)
|
|
138
|
+
except Exception:
|
|
139
|
+
pass
|
|
140
|
+
if self.camera is not None and self.camera.isActive():
|
|
141
|
+
self.camera.stop()
|
|
142
|
+
except Exception:
|
|
143
|
+
pass
|
|
144
|
+
finally:
|
|
145
|
+
self.sink = None
|
|
146
|
+
self.session = None
|
|
147
|
+
self.camera = None
|
|
148
|
+
|
|
149
|
+
def _probe_qt_start(self, timeout_ms: int = 1500) -> bool:
|
|
150
|
+
"""
|
|
151
|
+
Wait briefly for the first frame to confirm Qt pipeline is working.
|
|
152
|
+
|
|
153
|
+
:param timeout_ms: timeout in milliseconds
|
|
154
|
+
:return: True if first frame received
|
|
155
|
+
"""
|
|
156
|
+
try:
|
|
157
|
+
if self.camera is None:
|
|
158
|
+
return False
|
|
159
|
+
|
|
160
|
+
self._qt_got_first_frame = False
|
|
161
|
+
self._probe_loop = QEventLoop()
|
|
162
|
+
|
|
163
|
+
# Timeout quits the probe loop
|
|
164
|
+
QTimer.singleShot(timeout_ms, self._probe_loop.quit)
|
|
165
|
+
|
|
166
|
+
# Start camera and wait for first frame or timeout
|
|
167
|
+
self.camera.start()
|
|
168
|
+
self._probe_loop.exec()
|
|
169
|
+
|
|
170
|
+
got = self._qt_got_first_frame
|
|
171
|
+
self._probe_loop = None
|
|
172
|
+
return got
|
|
173
|
+
except Exception as e:
|
|
174
|
+
self.window.core.debug.log(e)
|
|
175
|
+
return False
|
|
176
|
+
|
|
177
|
+
@Slot(object)
|
|
178
|
+
def _on_qt_camera_error(self, err):
|
|
179
|
+
"""
|
|
180
|
+
Handle Qt camera errors.
|
|
181
|
+
|
|
182
|
+
:param err: error object
|
|
183
|
+
"""
|
|
184
|
+
try:
|
|
185
|
+
# Stop loop if running
|
|
186
|
+
if self.loop is not None and self.loop.isRunning():
|
|
187
|
+
self.loop.quit()
|
|
188
|
+
if self._probe_loop is not None and self._probe_loop.isRunning():
|
|
189
|
+
self._probe_loop.quit()
|
|
190
|
+
except Exception:
|
|
191
|
+
pass
|
|
192
|
+
finally:
|
|
193
|
+
self.allow_finish = False
|
|
194
|
+
if self.signals is not None:
|
|
195
|
+
self.signals.error.emit(err)
|
|
196
|
+
|
|
197
|
+
@Slot(object)
|
|
198
|
+
def on_qt_frame_changed(self, video_frame):
|
|
199
|
+
"""
|
|
200
|
+
Convert QVideoFrame to RGB numpy array and emit.
|
|
201
|
+
|
|
202
|
+
:param video_frame: QVideoFrame
|
|
203
|
+
"""
|
|
204
|
+
try:
|
|
205
|
+
# Mark that we have a first frame for probe
|
|
206
|
+
if not self._qt_got_first_frame:
|
|
207
|
+
self._qt_got_first_frame = True
|
|
208
|
+
# If we are probing, quit the probe loop immediately
|
|
209
|
+
if self._probe_loop is not None and self._probe_loop.isRunning():
|
|
210
|
+
self._probe_loop.quit()
|
|
211
|
+
|
|
212
|
+
# Throttle FPS for normal operation path
|
|
213
|
+
now = time.monotonic()
|
|
214
|
+
if self.loop is not None and self.loop.isRunning():
|
|
215
|
+
if (now - self._last_emit) < self._fps_interval:
|
|
216
|
+
return
|
|
217
|
+
|
|
218
|
+
img = video_frame.toImage()
|
|
219
|
+
if img.isNull():
|
|
220
|
+
return
|
|
221
|
+
|
|
222
|
+
img = img.convertToFormat(QImage.Format.Format_RGB888)
|
|
223
|
+
|
|
224
|
+
w = img.width()
|
|
225
|
+
h = img.height()
|
|
226
|
+
bpl = img.bytesPerLine()
|
|
227
|
+
|
|
228
|
+
ptr = img.bits()
|
|
229
|
+
size = bpl * h
|
|
230
|
+
try:
|
|
231
|
+
ptr.setsize(size)
|
|
232
|
+
except Exception:
|
|
233
|
+
# Some bindings may not require setsize; ignore if unsupported
|
|
234
|
+
pass
|
|
235
|
+
|
|
236
|
+
import numpy as np
|
|
237
|
+
arr = np.frombuffer(ptr, dtype=np.uint8)
|
|
238
|
+
|
|
239
|
+
if bpl != w * 3:
|
|
240
|
+
arr = arr.reshape(h, bpl)[:, : w * 3]
|
|
241
|
+
arr = arr.reshape(h, w, 3).copy()
|
|
242
|
+
else:
|
|
243
|
+
arr = arr.reshape(h, w, 3).copy()
|
|
244
|
+
|
|
245
|
+
if self.signals is not None:
|
|
246
|
+
self.signals.capture.emit(arr)
|
|
247
|
+
self._last_emit = now
|
|
248
|
+
|
|
249
|
+
except Exception as e:
|
|
250
|
+
self.window.core.debug.log(e)
|
|
251
|
+
|
|
252
|
+
# =========================
|
|
253
|
+
# OpenCV fallback path
|
|
254
|
+
# =========================
|
|
255
|
+
def _init_cv2(self) -> bool:
|
|
256
|
+
"""
|
|
257
|
+
Try to initialize OpenCV VideoCapture fallback.
|
|
258
|
+
|
|
259
|
+
:return: True if initialized
|
|
260
|
+
"""
|
|
261
|
+
try:
|
|
262
|
+
import cv2
|
|
263
|
+
idx = int(self.window.core.config.get('vision.capture.idx'))
|
|
264
|
+
target_w = int(self.window.core.config.get('vision.capture.width'))
|
|
265
|
+
target_h = int(self.window.core.config.get('vision.capture.height'))
|
|
266
|
+
target_fps = 30
|
|
267
|
+
self._fps_interval = 1.0 / float(target_fps)
|
|
268
|
+
|
|
269
|
+
cap = cv2.VideoCapture(idx)
|
|
270
|
+
if not cap or not cap.isOpened():
|
|
271
|
+
return False
|
|
272
|
+
|
|
273
|
+
cap.set(cv2.CAP_PROP_FRAME_WIDTH, target_w)
|
|
274
|
+
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, target_h)
|
|
275
|
+
self.cv_cap = cap
|
|
276
|
+
return True
|
|
277
|
+
except Exception as e:
|
|
278
|
+
self.window.core.debug.log(e)
|
|
279
|
+
return False
|
|
280
|
+
|
|
281
|
+
def _teardown_cv2(self):
|
|
282
|
+
"""Release OpenCV capture."""
|
|
283
|
+
try:
|
|
284
|
+
if self.cv_cap is not None and self.cv_cap.isOpened():
|
|
285
|
+
self.cv_cap.release()
|
|
286
|
+
except Exception:
|
|
287
|
+
pass
|
|
288
|
+
finally:
|
|
289
|
+
self.cv_cap = None
|
|
290
|
+
|
|
291
|
+
# =========================
|
|
292
|
+
# Runner
|
|
293
|
+
# =========================
|
|
294
|
+
@Slot()
|
|
295
|
+
def run(self):
|
|
296
|
+
"""Run capture using Qt first; fall back to OpenCV if needed."""
|
|
297
|
+
self.allow_finish = True
|
|
298
|
+
self._last_emit = 0.0
|
|
299
|
+
|
|
300
|
+
used_backend = None
|
|
301
|
+
try:
|
|
302
|
+
# Try Qt Multimedia
|
|
303
|
+
if self._init_qt():
|
|
304
|
+
if self._probe_qt_start(timeout_ms=1500):
|
|
305
|
+
# Qt confirmed working; start main event-driven loop
|
|
306
|
+
used_backend = 'qt'
|
|
307
|
+
self.initialized = True
|
|
308
|
+
if self.signals is not None:
|
|
309
|
+
self.signals.started.emit()
|
|
310
|
+
|
|
311
|
+
self.loop = QEventLoop()
|
|
312
|
+
|
|
313
|
+
self.poll_timer = QTimer()
|
|
314
|
+
self.poll_timer.setTimerType(Qt.PreciseTimer)
|
|
315
|
+
self.poll_timer.setInterval(30)
|
|
316
|
+
self.poll_timer.timeout.connect(self._poll_stop_qt, Qt.DirectConnection)
|
|
317
|
+
self.poll_timer.start()
|
|
318
|
+
|
|
319
|
+
self.loop.exec()
|
|
320
|
+
|
|
321
|
+
if self.signals is not None:
|
|
322
|
+
self.signals.stopped.emit()
|
|
323
|
+
else:
|
|
324
|
+
# Fallback to OpenCV if no frames arrive quickly
|
|
325
|
+
print("QT camera init failed, trying CV2 fallback...")
|
|
326
|
+
self._teardown_qt()
|
|
327
|
+
else:
|
|
328
|
+
# Qt init failed outright, fallback to CV2
|
|
329
|
+
print("QT camera init failed, trying CV2 fallback...")
|
|
330
|
+
|
|
331
|
+
# Try OpenCV fallback if Qt was not used
|
|
332
|
+
if used_backend is None:
|
|
333
|
+
if self._init_cv2():
|
|
334
|
+
used_backend = 'cv2'
|
|
335
|
+
self.initialized = True
|
|
336
|
+
if self.signals is not None:
|
|
337
|
+
self.signals.started.emit()
|
|
338
|
+
|
|
339
|
+
import cv2
|
|
340
|
+
target_fps = 30
|
|
341
|
+
fps_interval = 1.0 / float(target_fps)
|
|
342
|
+
last_frame_time = time.time()
|
|
343
|
+
|
|
344
|
+
while True:
|
|
345
|
+
if self._should_stop():
|
|
346
|
+
break
|
|
347
|
+
|
|
348
|
+
ok, frame = self.cv_cap.read()
|
|
349
|
+
if not ok or frame is None:
|
|
350
|
+
continue
|
|
351
|
+
|
|
352
|
+
now = time.time()
|
|
353
|
+
if now - last_frame_time >= fps_interval:
|
|
354
|
+
# Convert BGR -> RGB for the controller/UI pipeline
|
|
355
|
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
356
|
+
if self.signals is not None:
|
|
357
|
+
self.signals.capture.emit(frame)
|
|
358
|
+
last_frame_time = now
|
|
359
|
+
|
|
360
|
+
if self.signals is not None:
|
|
361
|
+
self.signals.stopped.emit()
|
|
362
|
+
else:
|
|
363
|
+
# Both providers failed
|
|
364
|
+
self.allow_finish = False
|
|
365
|
+
|
|
366
|
+
except Exception as e:
|
|
367
|
+
self.window.core.debug.log(e)
|
|
368
|
+
if self.signals is not None:
|
|
369
|
+
self.signals.error.emit(e)
|
|
370
|
+
finally:
|
|
371
|
+
# Cleanup resources
|
|
372
|
+
try:
|
|
373
|
+
if self.poll_timer is not None:
|
|
374
|
+
self.poll_timer.stop()
|
|
375
|
+
except Exception:
|
|
376
|
+
pass
|
|
377
|
+
self.poll_timer = None
|
|
378
|
+
|
|
379
|
+
if used_backend == 'qt':
|
|
380
|
+
self._teardown_qt()
|
|
381
|
+
else:
|
|
382
|
+
self._teardown_qt() # no-op if not initialized
|
|
383
|
+
self._teardown_cv2()
|
|
384
|
+
|
|
385
|
+
# Emit final state
|
|
386
|
+
if self.signals is not None:
|
|
387
|
+
if self.allow_finish:
|
|
388
|
+
self.signals.finished.emit()
|
|
389
|
+
else:
|
|
390
|
+
self.signals.unfinished.emit()
|
|
391
|
+
|
|
392
|
+
self.cleanup()
|
|
393
|
+
|
|
394
|
+
def _poll_stop_qt(self):
|
|
395
|
+
"""Check stop flags while running Qt pipeline."""
|
|
396
|
+
try:
|
|
397
|
+
if self._should_stop():
|
|
398
|
+
if self.camera is not None and self.camera.isActive():
|
|
399
|
+
self.camera.stop()
|
|
400
|
+
if self.loop is not None and self.loop.isRunning():
|
|
401
|
+
self.loop.quit()
|
|
402
|
+
except Exception as e:
|
|
403
|
+
self.window.core.debug.log(e)
|
|
404
|
+
if self.loop is not None and self.loop.isRunning():
|
|
405
|
+
self.loop.quit()
|
|
406
|
+
|
|
407
|
+
def _should_stop(self) -> bool:
|
|
408
|
+
"""
|
|
409
|
+
Check external stop flags.
|
|
410
|
+
|
|
411
|
+
:return: True if should stop
|
|
412
|
+
"""
|
|
413
|
+
try:
|
|
414
|
+
if getattr(self.window, 'is_closing', False):
|
|
415
|
+
return True
|
|
416
|
+
if self.window is not None and self.window.controller.camera.stop:
|
|
417
|
+
return True
|
|
418
|
+
except Exception:
|
|
419
|
+
return True
|
|
420
|
+
return False
|
|
421
|
+
|
|
422
|
+
def cleanup(self):
|
|
423
|
+
"""Cleanup resources after worker execution."""
|
|
424
|
+
sig = self.signals
|
|
425
|
+
self.signals = None
|
|
426
|
+
try:
|
|
427
|
+
if sig is not None:
|
|
428
|
+
sig.deleteLater()
|
|
429
|
+
except RuntimeError:
|
|
430
|
+
pass
|
pygpt_net/core/filesystem/url.py
CHANGED
|
@@ -44,6 +44,9 @@ class Url:
|
|
|
44
44
|
if pid in self.window.ui.nodes['output']:
|
|
45
45
|
self.window.ui.nodes['output'][pid].on_focus_js()
|
|
46
46
|
return
|
|
47
|
+
elif url.toString().startswith('bridge://play_video/'):
|
|
48
|
+
self.window.controller.media.play_video(url.toString().replace("bridge://play_video/", ""))
|
|
49
|
+
return
|
|
47
50
|
|
|
48
51
|
# -------------
|
|
49
52
|
extra_schemes = (
|
|
@@ -68,6 +68,13 @@ class Body:
|
|
|
68
68
|
let pendingHighlightRoot = null;
|
|
69
69
|
let pendingHighlightMath = false;
|
|
70
70
|
let scrollScheduled = false;
|
|
71
|
+
|
|
72
|
+
// Auto-follow state: when false, live stream auto-scroll is suppressed
|
|
73
|
+
let autoFollow = true;
|
|
74
|
+
let lastScrollTop = 0;
|
|
75
|
+
// Tracks whether user has performed any scroll-related interaction
|
|
76
|
+
let userInteracted = false;
|
|
77
|
+
const AUTO_FOLLOW_REENABLE_PX = 8; // px from bottom to re-enable auto-follow
|
|
71
78
|
|
|
72
79
|
// timers
|
|
73
80
|
let tipsTimers = [];
|
|
@@ -214,6 +221,8 @@ class Body:
|
|
|
214
221
|
return distanceToBottom <= marginPx;
|
|
215
222
|
}
|
|
216
223
|
function scheduleScroll(live = false) {
|
|
224
|
+
// Skip scheduling live auto-scroll when user disabled follow
|
|
225
|
+
if (live === true && autoFollow !== true) return;
|
|
217
226
|
if (scrollScheduled) return;
|
|
218
227
|
scrollScheduled = true;
|
|
219
228
|
requestAnimationFrame(function() {
|
|
@@ -221,21 +230,32 @@ class Body:
|
|
|
221
230
|
scrollToBottom(live);
|
|
222
231
|
});
|
|
223
232
|
}
|
|
233
|
+
// Force immediate scroll to bottom (pre-interaction bootstrap)
|
|
234
|
+
function forceScrollToBottomImmediate() {
|
|
235
|
+
const el = document.scrollingElement || document.documentElement;
|
|
236
|
+
el.scrollTop = el.scrollHeight; // no behavior, no RAF, deterministic
|
|
237
|
+
prevScroll = el.scrollHeight;
|
|
238
|
+
}
|
|
224
239
|
function scrollToBottom(live = false) {
|
|
225
240
|
const el = document.scrollingElement || document.documentElement;
|
|
226
241
|
const marginPx = 450;
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
242
|
+
const behavior = (live === true) ? 'instant' : 'smooth';
|
|
243
|
+
|
|
244
|
+
// Respect user-follow state during live updates
|
|
245
|
+
if (live === true && autoFollow !== true) {
|
|
246
|
+
// Keep prevScroll consistent for potential consumers
|
|
247
|
+
prevScroll = el.scrollHeight;
|
|
248
|
+
return;
|
|
232
249
|
}
|
|
233
|
-
|
|
250
|
+
|
|
251
|
+
// Allow initial auto-follow before any user interaction
|
|
252
|
+
if ((live === true && userInteracted === false) || isNearBottom(marginPx) || live == false) {
|
|
234
253
|
el.scrollTo({ top: el.scrollHeight, behavior });
|
|
235
254
|
}
|
|
236
255
|
prevScroll = el.scrollHeight;
|
|
237
256
|
}
|
|
238
257
|
function appendToInput(content) {
|
|
258
|
+
userInteracted = false;
|
|
239
259
|
const element = els.appendInput || document.getElementById('_append_input_');
|
|
240
260
|
if (element) {
|
|
241
261
|
element.insertAdjacentHTML('beforeend', content);
|
|
@@ -286,6 +306,7 @@ class Body:
|
|
|
286
306
|
if (DEBUG_MODE) {
|
|
287
307
|
log("-- CLEAN DOM --");
|
|
288
308
|
}
|
|
309
|
+
userInteracted = false;
|
|
289
310
|
const el = els.nodes || document.getElementById('_nodes_');
|
|
290
311
|
if (el) {
|
|
291
312
|
el.replaceChildren();
|
|
@@ -381,8 +402,11 @@ class Body:
|
|
|
381
402
|
if (DEBUG_MODE) {
|
|
382
403
|
log("STREAM BEGIN");
|
|
383
404
|
}
|
|
405
|
+
userInteracted = false;
|
|
384
406
|
clearOutput();
|
|
385
|
-
|
|
407
|
+
// Ensure initial auto-follow baseline before any chunks overflow
|
|
408
|
+
forceScrollToBottomImmediate();
|
|
409
|
+
scheduleScroll(); // keep existing logic
|
|
386
410
|
}
|
|
387
411
|
function endStream() {
|
|
388
412
|
if (DEBUG_MODE) {
|
|
@@ -481,7 +505,12 @@ class Body:
|
|
|
481
505
|
}
|
|
482
506
|
}
|
|
483
507
|
}
|
|
484
|
-
|
|
508
|
+
// Initial auto-follow until first user interaction
|
|
509
|
+
if (userInteracted === false) {
|
|
510
|
+
forceScrollToBottomImmediate();
|
|
511
|
+
} else {
|
|
512
|
+
scheduleScroll(true);
|
|
513
|
+
}
|
|
485
514
|
}
|
|
486
515
|
function nextStream() {
|
|
487
516
|
hideTips();
|
|
@@ -808,6 +837,30 @@ class Body:
|
|
|
808
837
|
removeClassFromMsg(id, 'msg-highlight');
|
|
809
838
|
}
|
|
810
839
|
});
|
|
840
|
+
// Wheel up disables auto-follow immediately (works even at absolute bottom)
|
|
841
|
+
document.addEventListener('wheel', function(ev) {
|
|
842
|
+
userInteracted = true;
|
|
843
|
+
if (ev.deltaY < 0) {
|
|
844
|
+
autoFollow = false;
|
|
845
|
+
}
|
|
846
|
+
}, { passive: true });
|
|
847
|
+
|
|
848
|
+
// Track scroll direction and restore auto-follow when user returns to bottom
|
|
849
|
+
window.addEventListener('scroll', function() {
|
|
850
|
+
const el = document.scrollingElement || document.documentElement;
|
|
851
|
+
const top = el.scrollTop;
|
|
852
|
+
|
|
853
|
+
// User scrolled up (ignore tiny jitter)
|
|
854
|
+
if (top + 1 < lastScrollTop) {
|
|
855
|
+
autoFollow = false;
|
|
856
|
+
} else if (!autoFollow) {
|
|
857
|
+
const distanceToBottom = el.scrollHeight - el.clientHeight - top;
|
|
858
|
+
if (distanceToBottom <= AUTO_FOLLOW_REENABLE_PX) {
|
|
859
|
+
autoFollow = true;
|
|
860
|
+
}
|
|
861
|
+
}
|
|
862
|
+
lastScrollTop = top;
|
|
863
|
+
}, { passive: true });
|
|
811
864
|
container.addEventListener('click', function(event) {
|
|
812
865
|
const copyButton = event.target.closest('.code-header-copy');
|
|
813
866
|
if (copyButton) {
|
|
@@ -1093,7 +1146,7 @@ class Body:
|
|
|
1093
1146
|
<video class="video-player" controls>
|
|
1094
1147
|
<source src="{path}" type="video/{ext[1:]}">
|
|
1095
1148
|
</video>
|
|
1096
|
-
<p><a href="{url}" class="title">{elide_filename(basename)}</a></p>
|
|
1149
|
+
<p><a href="bridge://play_video/{url}" class="title">{elide_filename(basename)}</a></p>
|
|
1097
1150
|
</div>
|
|
1098
1151
|
'''
|
|
1099
1152
|
return f'<div class="extra-src-img-box" title="{url}"><div class="img-outer"><div class="img-wrapper"><a href="{url}"><img src="{path}" class="image"></a></div><a href="{url}" class="title">{elide_filename(basename)}</a></div></div><br/>'
|
|
@@ -1239,6 +1292,9 @@ class Body:
|
|
|
1239
1292
|
def get_html(self, pid: int) -> str:
|
|
1240
1293
|
"""
|
|
1241
1294
|
Build webview HTML code (fast path, minimal allocations)
|
|
1295
|
+
|
|
1296
|
+
:param pid: process ID
|
|
1297
|
+
:return: HTML code
|
|
1242
1298
|
"""
|
|
1243
1299
|
cfg_get = self.window.core.config.get
|
|
1244
1300
|
style = cfg_get("theme.style", "blocks")
|
pygpt_net/core/text/utils.py
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
{
|
|
2
2
|
"__meta__": {
|
|
3
|
-
"version": "2.6.
|
|
4
|
-
"app.version": "2.6.
|
|
5
|
-
"updated_at": "2025-09-
|
|
3
|
+
"version": "2.6.34",
|
|
4
|
+
"app.version": "2.6.34",
|
|
5
|
+
"updated_at": "2025-09-03T00:00:00"
|
|
6
6
|
},
|
|
7
7
|
"access.audio.event.speech": false,
|
|
8
8
|
"access.audio.event.speech.disabled": [],
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
{
|
|
2
2
|
"__meta__": {
|
|
3
|
-
"version": "2.6.
|
|
4
|
-
"app.version": "2.6.
|
|
5
|
-
"updated_at": "2025-09-
|
|
3
|
+
"version": "2.6.34",
|
|
4
|
+
"app.version": "2.6.34",
|
|
5
|
+
"updated_at": "2025-09-03T08:03:34"
|
|
6
6
|
},
|
|
7
7
|
"items": {
|
|
8
8
|
"SpeakLeash/bielik-11b-v2.3-instruct:Q4_K_M": {
|
|
@@ -1520,7 +1520,8 @@
|
|
|
1520
1520
|
},
|
|
1521
1521
|
"vision.capture.idx": {
|
|
1522
1522
|
"section": "vision",
|
|
1523
|
-
"type": "
|
|
1523
|
+
"type": "combo",
|
|
1524
|
+
"use": "camera_devices",
|
|
1524
1525
|
"slider": true,
|
|
1525
1526
|
"label": "settings.vision.capture.idx",
|
|
1526
1527
|
"description": "settings.vision.capture.idx.desc",
|
|
@@ -1529,7 +1530,8 @@
|
|
|
1529
1530
|
"max": 3,
|
|
1530
1531
|
"multiplier": 1,
|
|
1531
1532
|
"step": 1,
|
|
1532
|
-
"advanced": false
|
|
1533
|
+
"advanced": false,
|
|
1534
|
+
"tab": "camera"
|
|
1533
1535
|
},
|
|
1534
1536
|
"vision.capture.width": {
|
|
1535
1537
|
"section": "vision",
|
|
@@ -1541,7 +1543,8 @@
|
|
|
1541
1543
|
"max": 4096,
|
|
1542
1544
|
"multiplier": 1,
|
|
1543
1545
|
"step": 1,
|
|
1544
|
-
"advanced": false
|
|
1546
|
+
"advanced": false,
|
|
1547
|
+
"tab": "camera"
|
|
1545
1548
|
},
|
|
1546
1549
|
"vision.capture.height": {
|
|
1547
1550
|
"section": "vision",
|
|
@@ -1553,7 +1556,8 @@
|
|
|
1553
1556
|
"max": 4096,
|
|
1554
1557
|
"multiplier": 1,
|
|
1555
1558
|
"step": 1,
|
|
1556
|
-
"advanced": false
|
|
1559
|
+
"advanced": false,
|
|
1560
|
+
"tab": "camera"
|
|
1557
1561
|
},
|
|
1558
1562
|
"vision.capture.quality": {
|
|
1559
1563
|
"section": "vision",
|
|
@@ -1565,7 +1569,8 @@
|
|
|
1565
1569
|
"max": 100,
|
|
1566
1570
|
"multiplier": 1,
|
|
1567
1571
|
"step": 1,
|
|
1568
|
-
"advanced": false
|
|
1572
|
+
"advanced": false,
|
|
1573
|
+
"tab": "camera"
|
|
1569
1574
|
},
|
|
1570
1575
|
"audio.input.backend": {
|
|
1571
1576
|
"section": "audio",
|