pygpt-net 2.6.11__py3-none-any.whl → 2.6.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,14 +6,16 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.18 01:00:00 #
9
+ # Updated Date: 2025.08.19 07:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import json
13
13
  import os
14
14
  import re
15
+
15
16
  from datetime import datetime
16
17
  from typing import Optional, List, Any
18
+ from time import monotonic
17
19
 
18
20
  from pygpt_net.core.render.base import BaseRenderer
19
21
  from pygpt_net.core.text.utils import has_unclosed_code_tag
@@ -57,7 +59,7 @@ class Renderer(BaseRenderer):
57
59
  self.body = Body(window)
58
60
  self.helpers = Helpers(window)
59
61
  self.parser = Parser(window)
60
- self.pids = {} # per node data
62
+ self.pids = {}
61
63
  self.prev_chunk_replace = False
62
64
  self.prev_chunk_newline = False
63
65
 
@@ -66,6 +68,9 @@ class Renderer(BaseRenderer):
66
68
  self._icon_sync = os.path.join(app_path, "data", "icons", "sync.svg")
67
69
  self._file_prefix = 'file:///' if self.window and self.window.core.platforms.is_windows() else 'file://'
68
70
 
71
+ self._thr = {}
72
+ self._throttle_interval = 0.01 # 10 ms delay
73
+
69
74
  def prepare(self):
70
75
  """
71
76
  Prepare renderer
@@ -104,8 +109,6 @@ class Renderer(BaseRenderer):
104
109
  if pid is None or pid not in self.pids:
105
110
  return
106
111
  self.pids[pid].loaded = True
107
- node = self.get_output_node(meta)
108
-
109
112
  if self.pids[pid].html != "" and not self.pids[pid].use_buffer:
110
113
  self.clear_chunks_input(pid)
111
114
  self.clear_chunks_output(pid)
@@ -113,8 +116,6 @@ class Renderer(BaseRenderer):
113
116
  self.append(pid, self.pids[pid].html, flush=True)
114
117
  self.pids[pid].html = ""
115
118
 
116
- node.setUpdatesEnabled(True)
117
-
118
119
  def get_pid(self, meta: CtxMeta):
119
120
  """
120
121
  Get PID for context meta
@@ -299,6 +300,9 @@ class Renderer(BaseRenderer):
299
300
  pid = self.get_or_create_pid(meta)
300
301
  if pid is None:
301
302
  return
303
+
304
+ self._throttle_emit(pid, force=True)
305
+ self._throttle_reset(pid)
302
306
  if self.window.controller.agent.legacy.enabled():
303
307
  if self.pids[pid].item is not None:
304
308
  self.append_context_item(meta, self.pids[pid].item)
@@ -335,6 +339,7 @@ class Renderer(BaseRenderer):
335
339
  self.pids[pid].use_buffer = True
336
340
  self.pids[pid].html = ""
337
341
  prev_ctx = None
342
+ next_item = None
338
343
  total = len(items)
339
344
  for i, item in enumerate(items):
340
345
  self.update_names(meta, item)
@@ -349,14 +354,17 @@ class Renderer(BaseRenderer):
349
354
  next_ctx=next_item
350
355
  )
351
356
  prev_ctx = item
352
- self.pids[pid].use_buffer = False
353
357
 
358
+ prev_ctx = None
359
+ next_item = None
360
+ self.pids[pid].use_buffer = False
354
361
  if self.pids[pid].html != "":
355
362
  self.append(
356
363
  pid,
357
364
  self.pids[pid].html,
358
365
  flush=True
359
366
  )
367
+ self.parser.reset()
360
368
 
361
369
  def append_input(
362
370
  self, meta: CtxMeta,
@@ -467,6 +475,8 @@ class Renderer(BaseRenderer):
467
475
  if not text_chunk:
468
476
  if begin:
469
477
  pctx.clear()
478
+ self._throttle_emit(pid, force=True)
479
+ self._throttle_reset(pid)
470
480
  return
471
481
 
472
482
  name_header_str = self.get_name_header(ctx)
@@ -479,8 +489,10 @@ class Renderer(BaseRenderer):
479
489
  debug = self.append_debug(ctx, pid, "stream")
480
490
  if debug:
481
491
  text_chunk = debug + text_chunk
482
- pctx.clear() # reset buffer
483
- pctx.is_cmd = False # reset command flag
492
+ self._throttle_emit(pid, force=True)
493
+ self._throttle_reset(pid)
494
+ pctx.clear()
495
+ pctx.is_cmd = False
484
496
  self.clear_chunks_output(pid)
485
497
  self.prev_chunk_replace = False
486
498
 
@@ -496,11 +508,12 @@ class Renderer(BaseRenderer):
496
508
  del buffer_to_parse
497
509
  is_code_block = html.endswith(self.ENDINGS_CODE)
498
510
  is_list = html.endswith(self.ENDINGS_LIST)
499
- is_newline = ("\n" in text_chunk) or buffer.endswith("\n") or is_code_block
511
+ is_n = "\n" in text_chunk
512
+ is_newline = is_n or buffer.endswith("\n") or is_code_block
500
513
  force_replace = False
501
514
  if self.prev_chunk_newline:
502
515
  force_replace = True
503
- if "\n" in text_chunk:
516
+ if is_n:
504
517
  self.prev_chunk_newline = True
505
518
  else:
506
519
  self.prev_chunk_newline = False
@@ -509,38 +522,26 @@ class Renderer(BaseRenderer):
509
522
  if is_newline or force_replace or is_list:
510
523
  replace = True
511
524
  if is_code_block:
512
- # don't replace if it is a code block
513
- if "\n" not in text_chunk:
514
- # if there is no newline in raw_chunk, then don't replace
525
+ if not is_n:
515
526
  replace = False
516
527
 
517
528
  if not is_code_block:
518
- text_chunk = text_chunk.replace("\n", "<br/>")
529
+ if is_n:
530
+ text_chunk = text_chunk.replace("\n", "<br/>")
519
531
  else:
520
- if self.prev_chunk_replace and not has_unclosed_code_tag(text_chunk):
521
- # if previous chunk was replaced and current is code block, then add \n to chunk
522
- text_chunk = "".join(("\n", text_chunk)) # add newline to chunk
532
+ if self.prev_chunk_replace and (is_code_block and not has_unclosed_code_tag(text_chunk)):
533
+ text_chunk = "\n" + text_chunk
523
534
 
524
535
  self.prev_chunk_replace = replace
525
536
 
526
- # hide loading spinner if it is the beginning of the text
527
537
  if begin:
528
538
  try:
529
539
  self.get_output_node(meta).page().runJavaScript("hideLoading();")
530
540
  except Exception:
531
541
  pass
532
542
 
533
- # emit chunk to output node
534
- try:
535
- self.get_output_node(meta).page().bridge.chunk.emit(
536
- name_header_str or "",
537
- self.sanitize_html(html) if replace else "",
538
- self.sanitize_html(text_chunk) if not replace else "",
539
- bool(replace),
540
- bool(is_code_block),
541
- )
542
- except Exception:
543
- pass
543
+ self._throttle_queue(pid, name_header_str or "", html, text_chunk, replace, bool(is_code_block))
544
+ self._throttle_emit(pid, force=False)
544
545
 
545
546
  def next_chunk(
546
547
  self,
@@ -554,6 +555,8 @@ class Renderer(BaseRenderer):
554
555
  :param ctx: context item
555
556
  """
556
557
  pid = self.get_or_create_pid(meta)
558
+ self._throttle_emit(pid, force=True)
559
+ self._throttle_reset(pid)
557
560
  self.pids[pid].item = ctx
558
561
  self.pids[pid].buffer = ""
559
562
  self.update_names(meta, ctx)
@@ -561,7 +564,8 @@ class Renderer(BaseRenderer):
561
564
  self.prev_chunk_newline = False
562
565
  try:
563
566
  self.get_output_node(meta).page().runJavaScript(
564
- "nextStream();")
567
+ "nextStream();"
568
+ )
565
569
  except Exception:
566
570
  pass
567
571
 
@@ -635,7 +639,6 @@ class Renderer(BaseRenderer):
635
639
  to_append = self.pids[pid].live_buffer
636
640
  if has_unclosed_code_tag(self.pids[pid].live_buffer):
637
641
  to_append += "\n```"
638
- print(to_append)
639
642
  try:
640
643
  self.get_output_node(meta).page().runJavaScript(
641
644
  f"""replaceLive({self.to_json(
@@ -717,8 +720,9 @@ class Renderer(BaseRenderer):
717
720
  """
718
721
  if self.pids[pid].loaded and not self.pids[pid].use_buffer:
719
722
  self.clear_chunks(pid)
720
- self.flush_output(pid, html)
721
- self.pids[pid].html = ""
723
+ if html:
724
+ self.flush_output(pid, html)
725
+ self.pids[pid].clear()
722
726
  else:
723
727
  if not flush:
724
728
  self.pids[pid].append_html(html)
@@ -911,6 +915,7 @@ class Renderer(BaseRenderer):
911
915
  node.reset_current_content()
912
916
  self.reset_names_by_pid(pid)
913
917
  self.prev_chunk_replace = False
918
+ self._throttle_reset(pid)
914
919
 
915
920
  def clear_input(self):
916
921
  """Clear input"""
@@ -977,6 +982,7 @@ class Renderer(BaseRenderer):
977
982
  self.get_output_node_by_pid(pid).page().runJavaScript(js)
978
983
  except Exception:
979
984
  pass
985
+ self._throttle_reset(pid)
980
986
 
981
987
  def clear_nodes(
982
988
  self,
@@ -1191,12 +1197,11 @@ class Renderer(BaseRenderer):
1191
1197
  """
1192
1198
  try:
1193
1199
  self.get_output_node_by_pid(pid).page().runJavaScript(
1194
- f"""if (typeof window.appendNode !== 'undefined') appendNode({self.to_json(
1195
- self.sanitize_html(html)
1196
- )});"""
1200
+ f"""if (typeof window.appendNode !== 'undefined') appendNode({self.to_json(self.sanitize_html(html))});"""
1197
1201
  )
1198
1202
  except Exception:
1199
1203
  pass
1204
+ html = None
1200
1205
 
1201
1206
  def reload(self):
1202
1207
  """Reload output, called externally only on theme change to redraw content"""
@@ -1232,17 +1237,10 @@ class Renderer(BaseRenderer):
1232
1237
  pid = self.get_or_create_pid(meta)
1233
1238
  if pid is None:
1234
1239
  return
1235
- html = self.body.get_html(pid)
1236
- self.pids[pid].loaded = False
1237
1240
  node = self.get_output_node_by_pid(pid)
1238
1241
  if node is not None:
1239
- # hard reset
1240
- # old_view = node
1241
- # new_view = old_view.hard_reset()
1242
- # self.window.ui.nodes['output'][pid] = new_view
1243
1242
  node.resetPage()
1244
- node.setHtml(html, baseUrl="file://")
1245
- self.pids[pid].html = ""
1243
+ self._throttle_reset(pid)
1246
1244
 
1247
1245
  def get_output_node(
1248
1246
  self,
@@ -1421,6 +1419,7 @@ class Renderer(BaseRenderer):
1421
1419
  self.clear_chunks(pid)
1422
1420
  self.clear_nodes(pid)
1423
1421
  self.pids[pid].html = ""
1422
+ self._throttle_reset(pid)
1424
1423
 
1425
1424
  def scroll_to_bottom(self):
1426
1425
  """Scroll to bottom"""
@@ -1573,6 +1572,7 @@ class Renderer(BaseRenderer):
1573
1572
  :param ctx: context item
1574
1573
  :param pid: context PID
1575
1574
  :param title: debug title
1575
+ :return: HTML debug info
1576
1576
  """
1577
1577
  if title is None:
1578
1578
  title = "debug"
@@ -1589,6 +1589,115 @@ class Renderer(BaseRenderer):
1589
1589
  def remove_pid(self, pid: int):
1590
1590
  """
1591
1591
  Remove PID from renderer
1592
+
1593
+ :param pid: context PID
1592
1594
  """
1593
1595
  if pid in self.pids:
1594
- del self.pids[pid]
1596
+ del self.pids[pid]
1597
+ self._thr.pop(pid, None)
1598
+
1599
+ def _throttle_get(self, pid: int) -> dict:
1600
+ """
1601
+ Return per-pid throttle state
1602
+
1603
+ :param pid: context PID
1604
+ :return: throttle state dictionary
1605
+ """
1606
+ thr = self._thr.get(pid)
1607
+ if thr is None:
1608
+ thr = {"last": 0.0, "op": 0, "name": "", "replace_html": "", "append": [], "code": False}
1609
+ self._thr[pid] = thr
1610
+ return thr
1611
+
1612
+ def _throttle_reset(self, pid: Optional[int]):
1613
+ """
1614
+ Reset throttle state
1615
+
1616
+ :param pid: context PID
1617
+ """
1618
+ if pid is None:
1619
+ return
1620
+ thr = self._thr.get(pid)
1621
+ if thr is None:
1622
+ return
1623
+ thr["op"] = 0
1624
+ thr["name"] = ""
1625
+ thr["replace_html"] = ""
1626
+ thr["append"].clear()
1627
+ thr["code"] = False
1628
+
1629
+ def _throttle_queue(
1630
+ self,
1631
+ pid: int,
1632
+ name: str,
1633
+ html: str,
1634
+ text_chunk: str,
1635
+ replace: bool,
1636
+ is_code_block: bool
1637
+ ):
1638
+ """
1639
+ Queue chunk for throttled emit
1640
+
1641
+ :param pid: context PID
1642
+ :param name: name of the chunk
1643
+ :param html: HTML content of the chunk
1644
+ :param text_chunk: raw text chunk
1645
+ :param replace: whether to replace the current content
1646
+ :param is_code_block: whether the chunk is a code block
1647
+ """
1648
+ thr = self._throttle_get(pid)
1649
+ if name:
1650
+ thr["name"] = name
1651
+ if replace:
1652
+ thr["op"] = 1
1653
+ thr["replace_html"] = html
1654
+ thr["append"].clear()
1655
+ thr["code"] = bool(is_code_block)
1656
+ else:
1657
+ if thr["op"] != 1:
1658
+ thr["op"] = 2
1659
+ thr["append"].append(text_chunk)
1660
+ thr["code"] = bool(is_code_block)
1661
+
1662
+ def _throttle_emit(self, pid: int, force: bool = False):
1663
+ """
1664
+ Emit queued chunks if due
1665
+
1666
+ :param pid: context PID
1667
+ :param force: force emit regardless of throttle interval
1668
+ """
1669
+ thr = self._throttle_get(pid)
1670
+ now = monotonic()
1671
+ if not force and (now - thr["last"] < self._throttle_interval):
1672
+ return
1673
+ if thr["op"] == 1:
1674
+ try:
1675
+ node = self.get_output_node_by_pid(pid)
1676
+ if node is not None:
1677
+ node.page().bridge.chunk.emit(
1678
+ thr["name"],
1679
+ self.sanitize_html(thr["replace_html"]),
1680
+ "",
1681
+ True,
1682
+ bool(thr["code"]),
1683
+ )
1684
+ except Exception:
1685
+ pass
1686
+ thr["last"] = now
1687
+ self._throttle_reset(pid)
1688
+ elif thr["op"] == 2 and thr["append"]:
1689
+ append_str = "".join(thr["append"])
1690
+ try:
1691
+ node = self.get_output_node_by_pid(pid)
1692
+ if node is not None:
1693
+ node.page().bridge.chunk.emit(
1694
+ thr["name"],
1695
+ "",
1696
+ self.sanitize_html(append_str),
1697
+ False,
1698
+ bool(thr["code"]),
1699
+ )
1700
+ except Exception:
1701
+ pass
1702
+ thr["last"] = now
1703
+ self._throttle_reset(pid)
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.6.11",
4
- "app.version": "2.6.11",
5
- "updated_at": "2025-08-18T00:00:00"
3
+ "version": "2.6.12",
4
+ "app.version": "2.6.12",
5
+ "updated_at": "2025-08-19T00:00:00"
6
6
  },
7
7
  "access.audio.event.speech": false,
8
8
  "access.audio.event.speech.disabled": [],
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.6.11",
4
- "app.version": "2.6.11",
5
- "updated_at": "2025-08-18T23:07:35"
3
+ "version": "2.6.12",
4
+ "app.version": "2.6.12",
5
+ "updated_at": "2025-08-19T23:07:35"
6
6
  },
7
7
  "items": {
8
8
  "SpeakLeash/bielik-11b-v2.3-instruct:Q4_K_M": {
pygpt_net/item/ctx.py CHANGED
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.15 23:00:00 #
9
+ # Updated Date: 2025.08.19 07:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -103,7 +103,7 @@ class CtxItem:
103
103
  if self.input is None:
104
104
  return None
105
105
  if self.hidden_input:
106
- return self.input + "\n\n" + self.hidden_input
106
+ return "\n\n".join(self.input, self.hidden_input)
107
107
  return self.input
108
108
 
109
109
  @property
@@ -116,7 +116,7 @@ class CtxItem:
116
116
  if self.output is None:
117
117
  return None
118
118
  if self.hidden_output:
119
- return self.output + "\n\n" + self.hidden_output
119
+ return "\n\n".join(self.output, self.hidden_output)
120
120
  return self.output
121
121
 
122
122
  def clear_reply(self):
pygpt_net/launcher.py CHANGED
@@ -6,12 +6,9 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.11 00:00:00 #
9
+ # Updated Date: 2025.08.19 07:00:00 #
10
10
  # ================================================== #
11
11
 
12
- import asyncio
13
- from qasync import QEventLoop
14
-
15
12
  import os
16
13
  import sys
17
14
  import argparse
@@ -123,8 +120,6 @@ class Launcher:
123
120
  QCoreApplication.setAttribute(Qt.AA_ShareOpenGLContexts)
124
121
  self.app = QApplication(sys.argv)
125
122
  self.app.setAttribute(QtCore.Qt.AA_DontUseNativeMenuBar)
126
- self.loop = QEventLoop(self.app)
127
- asyncio.set_event_loop(self.loop)
128
123
  self.window = MainWindow(self.app, args=args)
129
124
  self.shortcut_filter = GlobalShortcutFilter(self.window)
130
125
 
@@ -295,6 +290,4 @@ class Launcher:
295
290
  # self.window.core.debug.mem("INIT") # debug memory usage
296
291
  signal.signal(signal.SIGTERM, self.handle_signal)
297
292
  signal.signal(signal.SIGINT, self.handle_signal)
298
- with self.loop:
299
- self.loop.run_forever()
300
- # sys.exit(self.app.exec())
293
+ sys.exit(self.app.exec())
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.12 19:00:00 #
9
+ # Updated Date: 2025.08.19 07:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from openai import OpenAI
@@ -63,6 +63,7 @@ class Gpt:
63
63
  self.vision = Vision(window)
64
64
  self.client = None
65
65
  self.locked = False
66
+ self.last_client_args = None # last client args used, for debug purposes
66
67
 
67
68
  def get_client(
68
69
  self,
@@ -78,7 +79,15 @@ class Gpt:
78
79
  """
79
80
  # update client args by mode and model
80
81
  args = self.window.core.models.prepare_client_args(mode, model)
81
- self.client = OpenAI(**args)
82
+ if self.client is None or self.last_client_args != args:
83
+ if self.client is not None:
84
+ try:
85
+ self.client.close() # close previous client if exists
86
+ except Exception as e:
87
+ self.window.core.debug.log(e)
88
+ print("Error closing previous GPT client:", e)
89
+ self.client = OpenAI(**args)
90
+ self.last_client_args = args
82
91
  return self.client
83
92
 
84
93
  def call(self, context: BridgeContext, extra: dict = None) -> bool:
@@ -308,8 +317,8 @@ class Gpt:
308
317
  return
309
318
  if self.client is not None:
310
319
  try:
311
- self.client.close()
312
- self.client = None
320
+ pass
321
+ # self.client.close()
313
322
  except Exception as e:
314
323
  self.window.core.debug.log(e)
315
324
  print("Error closing GPT client:", e)
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.07.16 15:00:00 #
9
+ # Updated Date: 2025.08.19 07:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -185,8 +185,7 @@ class Body:
185
185
  function highlightCode() {
186
186
  document.querySelectorAll('pre code').forEach(el => {
187
187
  if (!el.classList.contains('hljs')) hljs.highlightElement(el);
188
- });
189
- restoreCollapsedCode();
188
+ });
190
189
  }
191
190
  function scrollToBottom() {
192
191
  getScrollPosition(); // store using bridge
pygpt_net/ui/main.py CHANGED
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.18 01:00:00 #
9
+ # Updated Date: 2025.08.19 07:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -115,7 +115,10 @@ class MainWindow(QMainWindow, QtStyleTools):
115
115
  if not render_debug:
116
116
  QLoggingCategory.setFilterRules("*.info=false")
117
117
  else:
118
- os.environ["QTWEBENGINE_CHROMIUM_FLAGS"] = "--enable-logging --log-level=0"
118
+ if "QTWEBENGINE_CHROMIUM_FLAGS" in os.environ:
119
+ os.environ["QTWEBENGINE_CHROMIUM_FLAGS"] += " --enable-logging --log-level=0"
120
+ else:
121
+ os.environ["QTWEBENGINE_CHROMIUM_FLAGS"] = "--enable-logging --log-level=0"
119
122
 
120
123
  # OpenGL disable
121
124
  if self.core.config.get("render.open_gl") is False:
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.08 21:00:00 #
9
+ # Updated Date: 2025.08.19 07:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import re
@@ -207,12 +207,7 @@ class HtmlOutput(QWebEngineView):
207
207
 
208
208
  :param success: True if loaded successfully
209
209
  """
210
- if success:
211
- event = RenderEvent(RenderEvent.ON_PAGE_LOAD, {
212
- "meta": self.meta,
213
- "tab": self.tab,
214
- })
215
- self.window.dispatch(event)
210
+ pass
216
211
 
217
212
  def get_selected_text(self) -> str:
218
213
  """