pygpt-net 2.6.36__py3-none-any.whl → 2.6.38__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. pygpt_net/CHANGELOG.txt +12 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/chat/handler/anthropic_stream.py +164 -0
  4. pygpt_net/controller/chat/handler/google_stream.py +181 -0
  5. pygpt_net/controller/chat/handler/langchain_stream.py +24 -0
  6. pygpt_net/controller/chat/handler/llamaindex_stream.py +47 -0
  7. pygpt_net/controller/chat/handler/openai_stream.py +260 -0
  8. pygpt_net/controller/chat/handler/utils.py +210 -0
  9. pygpt_net/controller/chat/handler/worker.py +570 -0
  10. pygpt_net/controller/chat/handler/xai_stream.py +135 -0
  11. pygpt_net/controller/chat/stream.py +1 -1
  12. pygpt_net/controller/ctx/ctx.py +1 -1
  13. pygpt_net/controller/debug/debug.py +6 -6
  14. pygpt_net/controller/model/editor.py +3 -0
  15. pygpt_net/controller/model/importer.py +9 -2
  16. pygpt_net/controller/plugins/plugins.py +11 -3
  17. pygpt_net/controller/presets/presets.py +2 -2
  18. pygpt_net/core/bridge/context.py +35 -35
  19. pygpt_net/core/bridge/worker.py +40 -16
  20. pygpt_net/core/ctx/bag.py +7 -2
  21. pygpt_net/core/ctx/reply.py +17 -2
  22. pygpt_net/core/db/viewer.py +19 -34
  23. pygpt_net/core/render/plain/pid.py +12 -1
  24. pygpt_net/core/render/web/body.py +30 -39
  25. pygpt_net/core/tabs/tab.py +24 -1
  26. pygpt_net/data/config/config.json +10 -3
  27. pygpt_net/data/config/models.json +3 -3
  28. pygpt_net/data/config/settings.json +105 -0
  29. pygpt_net/data/css/style.dark.css +2 -3
  30. pygpt_net/data/css/style.light.css +2 -3
  31. pygpt_net/data/locale/locale.de.ini +3 -1
  32. pygpt_net/data/locale/locale.en.ini +19 -1
  33. pygpt_net/data/locale/locale.es.ini +3 -1
  34. pygpt_net/data/locale/locale.fr.ini +3 -1
  35. pygpt_net/data/locale/locale.it.ini +3 -1
  36. pygpt_net/data/locale/locale.pl.ini +4 -2
  37. pygpt_net/data/locale/locale.uk.ini +3 -1
  38. pygpt_net/data/locale/locale.zh.ini +3 -1
  39. pygpt_net/item/assistant.py +51 -2
  40. pygpt_net/item/attachment.py +21 -20
  41. pygpt_net/item/calendar_note.py +19 -2
  42. pygpt_net/item/ctx.py +115 -2
  43. pygpt_net/item/index.py +9 -2
  44. pygpt_net/item/mode.py +9 -6
  45. pygpt_net/item/model.py +20 -3
  46. pygpt_net/item/notepad.py +14 -2
  47. pygpt_net/item/preset.py +42 -2
  48. pygpt_net/item/prompt.py +8 -2
  49. pygpt_net/plugin/cmd_files/plugin.py +2 -2
  50. pygpt_net/provider/api/__init__.py +5 -3
  51. pygpt_net/provider/api/anthropic/__init__.py +190 -29
  52. pygpt_net/provider/api/anthropic/audio.py +30 -0
  53. pygpt_net/provider/api/anthropic/chat.py +341 -0
  54. pygpt_net/provider/api/anthropic/image.py +25 -0
  55. pygpt_net/provider/api/anthropic/tools.py +266 -0
  56. pygpt_net/provider/api/anthropic/vision.py +142 -0
  57. pygpt_net/provider/api/google/chat.py +2 -2
  58. pygpt_net/provider/api/google/realtime/client.py +2 -2
  59. pygpt_net/provider/api/google/tools.py +58 -48
  60. pygpt_net/provider/api/google/vision.py +7 -1
  61. pygpt_net/provider/api/openai/chat.py +1 -0
  62. pygpt_net/provider/api/openai/vision.py +6 -0
  63. pygpt_net/provider/api/x_ai/__init__.py +247 -0
  64. pygpt_net/provider/api/x_ai/audio.py +32 -0
  65. pygpt_net/provider/api/x_ai/chat.py +968 -0
  66. pygpt_net/provider/api/x_ai/image.py +208 -0
  67. pygpt_net/provider/api/x_ai/remote.py +262 -0
  68. pygpt_net/provider/api/x_ai/tools.py +120 -0
  69. pygpt_net/provider/api/x_ai/vision.py +119 -0
  70. pygpt_net/provider/core/attachment/json_file.py +2 -2
  71. pygpt_net/provider/core/config/patch.py +28 -0
  72. pygpt_net/provider/llms/anthropic.py +4 -2
  73. pygpt_net/tools/text_editor/tool.py +4 -1
  74. pygpt_net/tools/text_editor/ui/dialogs.py +1 -1
  75. pygpt_net/ui/base/config_dialog.py +5 -11
  76. pygpt_net/ui/dialog/db.py +177 -59
  77. pygpt_net/ui/dialog/dictionary.py +57 -59
  78. pygpt_net/ui/dialog/editor.py +3 -2
  79. pygpt_net/ui/dialog/image.py +1 -1
  80. pygpt_net/ui/dialog/logger.py +3 -2
  81. pygpt_net/ui/dialog/models.py +16 -16
  82. pygpt_net/ui/dialog/plugins.py +63 -60
  83. pygpt_net/ui/layout/ctx/ctx_list.py +3 -4
  84. pygpt_net/ui/layout/toolbox/__init__.py +2 -2
  85. pygpt_net/ui/layout/toolbox/assistants.py +8 -9
  86. pygpt_net/ui/layout/toolbox/presets.py +2 -2
  87. pygpt_net/ui/main.py +9 -4
  88. pygpt_net/ui/widget/element/labels.py +20 -4
  89. pygpt_net/ui/widget/textarea/editor.py +0 -4
  90. pygpt_net/ui/widget/textarea/web.py +1 -1
  91. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.38.dist-info}/METADATA +18 -6
  92. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.38.dist-info}/RECORD +95 -76
  93. pygpt_net/controller/chat/handler/stream_worker.py +0 -1136
  94. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.38.dist-info}/LICENSE +0 -0
  95. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.38.dist-info}/WHEEL +0 -0
  96. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.38.dist-info}/entry_points.txt +0 -0
@@ -18,7 +18,7 @@ from pygpt_net.core.events import RenderEvent
18
18
  from pygpt_net.core.types import MODE_ASSISTANT
19
19
  from pygpt_net.item.ctx import CtxItem
20
20
 
21
- from .handler.stream_worker import StreamWorker
21
+ from .handler.worker import StreamWorker
22
22
 
23
23
  class Stream:
24
24
  def __init__(self, window=None):
@@ -295,7 +295,7 @@ class Ctx:
295
295
 
296
296
  def clean_memory(self):
297
297
  """Clean memory"""
298
- self.window.core.api.openai.close() # clear gpt client
298
+ pass
299
299
 
300
300
  def new_in_group(
301
301
  self,
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.18 01:00:00 #
9
+ # Updated Date: 2025.09.05 18:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from datetime import datetime
@@ -31,6 +31,7 @@ class Debug(QObject):
31
31
  self.is_logger = False # logger window opened
32
32
  self.is_app_log = False # app log window opened
33
33
  self.allow_level_change = False # allow changing log level
34
+ self._ids = None
34
35
 
35
36
  def update(self):
36
37
  """Update debug"""
@@ -100,12 +101,11 @@ class Debug(QObject):
100
101
 
101
102
  :param all: update all debug windows
102
103
  """
103
- # not_realtime = ['context']
104
- not_realtime = []
105
- for id in self.window.controller.dialogs.debug.get_ids():
104
+ if self._ids is None:
105
+ self._ids = self.window.controller.dialogs.debug.get_ids()
106
+ for id in self._ids:
106
107
  if self.window.controller.dialogs.debug.is_active(id):
107
- if all or id not in not_realtime:
108
- self.window.controller.dialogs.debug.update_worker(id)
108
+ self.window.controller.dialogs.debug.update_worker(id)
109
109
 
110
110
  def post_setup(self):
111
111
  """Post setup debug"""
@@ -37,15 +37,18 @@ class Editor:
37
37
  "id": {
38
38
  "type": "text",
39
39
  "label": "model.id",
40
+ "description": "model.id.desc",
40
41
  },
41
42
  "name": {
42
43
  "type": "text",
43
44
  "label": "model.name",
45
+ "description": "model.name.desc",
44
46
  },
45
47
  "provider": {
46
48
  "type": "combo",
47
49
  "use": "llm_providers",
48
50
  "label": "model.provider",
51
+ "description": "model.provider.desc",
49
52
  },
50
53
  "ctx": {
51
54
  "type": "int",
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.26 23:00:00 #
9
+ # Updated Date: 2025.09.05 18:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -503,7 +503,7 @@ class Importer:
503
503
  return models
504
504
  else:
505
505
  for model in ollama_models:
506
- name = model.get('name').replace(":latest", "")
506
+ name = model.get('name')
507
507
  m = self.window.core.models.create_empty(append=False)
508
508
  m.id = name
509
509
  m.name = name
@@ -541,6 +541,13 @@ class Importer:
541
541
  base_models[key] = copy.deepcopy(self.pending[key])
542
542
  base_models[key].imported = True
543
543
  added = True
544
+ else:
545
+ # add provider suffix - to key
546
+ new_key = f"{key}-{self.provider}"
547
+ if new_key not in base_models:
548
+ base_models[new_key] = copy.deepcopy(self.pending[key])
549
+ base_models[new_key].imported = True
550
+ added = True
544
551
  for key in list(self.removed.keys()):
545
552
  if key in base_models:
546
553
  del base_models[key]
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.15 23:00:00 #
9
+ # Updated Date: 2025.09.05 18:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import List, Dict, Any, Optional
@@ -34,6 +34,8 @@ class Plugins:
34
34
  self.settings = Settings(window)
35
35
  self.presets = Presets(window)
36
36
  self.enabled = {}
37
+ self._ids = None
38
+ self._ids_with_update = None
37
39
  self._suspend_updates = 0
38
40
 
39
41
  def _begin_batch(self):
@@ -316,9 +318,15 @@ class Plugins:
316
318
  def on_post_update(self):
317
319
  """Called on post update"""
318
320
  pm = self.window.core.plugins
319
- for pid in pm.get_ids():
321
+ if self._ids is None:
322
+ self._ids = pm.get_ids()
323
+ if self._ids_with_update is None:
324
+ self._ids_with_update = [pid for pid in self._ids if hasattr(self.window.core.plugins.get(pid), "on_post_update")]
325
+ if len(self._ids_with_update) == 0:
326
+ return
327
+ for pid in self._ids_with_update:
320
328
  if self.is_enabled(pid):
321
- fn = getattr(pm.get(pid), "on_post_update", None)
329
+ fn = pm.get(pid).on_post_update
322
330
  if callable(fn):
323
331
  fn()
324
332
 
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.15 03:00:00 #
9
+ # Updated Date: 2025.09.05 18:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import re
@@ -440,7 +440,7 @@ class Presets:
440
440
  if preset_id and preset_id in w.core.presets.items:
441
441
  preset = w.core.presets.items[preset_id]
442
442
  preset.prompt = w.core.config.get('prompt')
443
- w.core.presets.save(preset)
443
+ w.core.presets.save(preset_id)
444
444
 
445
445
  def select_model(self):
446
446
  """Select model by current preset"""
@@ -22,10 +22,10 @@ class MultimodalContext:
22
22
  """
23
23
  Multimodal context
24
24
  """
25
- is_audio_input: bool = False
26
- is_audio_output: bool = False
27
- audio_data: Optional[Any] = None
28
- audio_format: str = "wav"
25
+ is_audio_input: bool = False # is audio input
26
+ is_audio_output: bool = False # is audio output
27
+ audio_data: Optional[Any] = None # audio data (bytes or file-like object)
28
+ audio_format: str = "wav" # audio format (wav, mp3, etc.)
29
29
 
30
30
  def __init__(self, **kwargs):
31
31
  """
@@ -59,32 +59,32 @@ class BridgeContext:
59
59
  """
60
60
  Bridge context
61
61
  """
62
- assistant_id: str = ""
63
- attachments: dict = field(default_factory=dict)
64
- ctx: Optional[CtxItem] = None
65
- external_functions: list = field(default_factory=list)
66
- file_ids: list = field(default_factory=list)
67
- force: bool = False # Force mode flag
68
- force_sync: bool = False # Force sync flag
69
- history: list = field(default_factory=list)
70
- idx: Optional[Any] = None
71
- idx_mode: str = "chat"
72
- is_expert_call: bool = False # Expert call flag
73
- max_tokens: int = 0
74
- mode: Optional[Any] = None
62
+ assistant_id: str = "" # OpenAI Assistant ID
63
+ attachments: dict = field(default_factory=dict) # id -> AttachmentItem
64
+ ctx: Optional[CtxItem] = None # CtxItem instance
65
+ external_functions: list = field(default_factory=list) # list of tools definitions
66
+ file_ids: list = field(default_factory=list) # list of uploaded file IDs
67
+ force: bool = False # force send
68
+ force_sync: bool = False # force synchronous plugin call
69
+ history: list = field(default_factory=list) # list of messages
70
+ idx: Optional[Any] = None # index name for LlamaIndex
71
+ idx_mode: str = "chat" # sub-mode for LlamaIndex
72
+ is_expert_call: bool = False # is expert call
73
+ max_tokens: int = 0 # max tokens
74
+ mode: Optional[Any] = None # mode to use
75
75
  model: Optional[ModelItem] = None # model instance, not model name
76
76
  multimodal_ctx: MultimodalContext = field(default_factory=lambda: MultimodalContext()) # AudioContext
77
77
  parent_mode: Optional[Any] = None # real mode (global)
78
78
  preset: Optional[Any] = None # PresetItem
79
- prompt: str = ""
79
+ prompt: str = "" # user input prompt
80
80
  reply_context: Optional[Any] = None # ReplyContext
81
- request: bool = False # Use normal request instead of quick call
82
- stream: bool = False
83
- system_prompt: str = ""
84
- system_prompt_raw: str = "" # without plugins addons
85
- temperature: float = 1.0
86
- thread_id: str = ""
87
- tools_outputs: list = field(default_factory=list)
81
+ request: bool = False # use normal request instead of quick call
82
+ stream: bool = False # stream enabled
83
+ system_prompt: str = "" # system prompt
84
+ system_prompt_raw: str = "" # system prompt without plugins addons
85
+ temperature: float = 1.0 # temperature
86
+ thread_id: str = "" # OpenAI Assistants thread ID for chat mode
87
+ tools_outputs: list = field(default_factory=list) # list of tools outputs
88
88
 
89
89
  def __init__(self, **kwargs):
90
90
  """
@@ -98,24 +98,24 @@ class BridgeContext:
98
98
  self.ctx = kwargs.get("ctx", None)
99
99
  self.external_functions = list(kwargs.get("external_functions", []))
100
100
  self.file_ids = list(kwargs.get("file_ids", []))
101
- self.force = kwargs.get("force", False) # Force mode flag
102
- self.force_sync = kwargs.get("force_sync", False) # Force sync flag
101
+ self.force = kwargs.get("force", False)
102
+ self.force_sync = kwargs.get("force_sync", False)
103
103
  self.history = list(kwargs.get("history", []))
104
104
  self.idx = kwargs.get("idx", None)
105
105
  self.idx_mode = kwargs.get("idx_mode", "chat")
106
- self.is_expert_call = kwargs.get("is_expert_call", False) # Expert call flag
106
+ self.is_expert_call = kwargs.get("is_expert_call", False)
107
107
  self.max_tokens = kwargs.get("max_tokens", 0)
108
108
  self.mode = kwargs.get("mode", None)
109
- self.model = kwargs.get("model", None) # model instance, not model name
110
- self.multimodal_ctx = kwargs.get("multimodal_ctx", MultimodalContext()) # AudioContext
111
- self.parent_mode = kwargs.get("parent_mode", None) # real mode (global)
112
- self.preset = kwargs.get("preset", None) # PresetItem
109
+ self.model = kwargs.get("model", None)
110
+ self.multimodal_ctx = kwargs.get("multimodal_ctx", MultimodalContext())
111
+ self.parent_mode = kwargs.get("parent_mode", None)
112
+ self.preset = kwargs.get("preset", None)
113
113
  self.prompt = kwargs.get("prompt", "")
114
- self.reply_context = kwargs.get("reply_ctx", kwargs.get("reply_context", None)) # ReplyContext
115
- self.request = kwargs.get("request", False) # Use normal request instead of quick call
114
+ self.reply_context = kwargs.get("reply_ctx", kwargs.get("reply_context", None))
115
+ self.request = kwargs.get("request", False)
116
116
  self.stream = kwargs.get("stream", False)
117
117
  self.system_prompt = kwargs.get("system_prompt", "")
118
- self.system_prompt_raw = kwargs.get("system_prompt_raw", "") # without plugins addons
118
+ self.system_prompt_raw = kwargs.get("system_prompt_raw", "")
119
119
  self.temperature = kwargs.get("temperature", 1.0)
120
120
  self.thread_id = kwargs.get("thread_id", "")
121
121
  self.tools_outputs = list(kwargs.get("tools_outputs", []))
@@ -46,7 +46,8 @@ class BridgeWorker(QRunnable):
46
46
  @Slot()
47
47
  def run(self):
48
48
  """Run bridge worker"""
49
- self.window.core.debug.info("[bridge] Worker started.")
49
+ core = self.window.core
50
+ core.debug.info("[bridge] Worker started.")
50
51
  result = False
51
52
 
52
53
  try:
@@ -64,7 +65,7 @@ class BridgeWorker(QRunnable):
64
65
  if self.mode == MODE_LANGCHAIN:
65
66
  raise Exception("Langchain mode is deprecated from v2.5.20 and no longer supported. ")
66
67
  """
67
- result = self.window.core.chain.call(
68
+ result = core.chain.call(
68
69
  context=self.context,
69
70
  extra=self.extra,
70
71
  )
@@ -74,15 +75,18 @@ class BridgeWorker(QRunnable):
74
75
 
75
76
  # LlamaIndex: chat with files
76
77
  if self.mode == MODE_LLAMA_INDEX:
77
- result = self.window.core.idx.chat.call(
78
+ result = core.idx.chat.call(
78
79
  context=self.context,
79
80
  extra=self.extra,
80
81
  signals=self.signals,
81
82
  )
82
83
 
83
- # LlamaIndex: agents
84
- elif self.mode in [MODE_AGENT_LLAMA, MODE_AGENT_OPENAI]:
85
- result = self.window.core.agents.runner.call(
84
+ # Agents (OpenAI, Llama)
85
+ elif self.mode in (
86
+ MODE_AGENT_LLAMA,
87
+ MODE_AGENT_OPENAI
88
+ ):
89
+ result = core.agents.runner.call(
86
90
  context=self.context,
87
91
  extra=self.extra,
88
92
  signals=self.signals,
@@ -91,11 +95,11 @@ class BridgeWorker(QRunnable):
91
95
  self.cleanup()
92
96
  return # don't emit any signals (handled in agent runner, step by step)
93
97
  else:
94
- self.extra["error"] = str(self.window.core.agents.runner.get_error())
98
+ self.extra["error"] = str(core.agents.runner.get_error())
95
99
 
96
- # Loop: next step
100
+ # Agents loop: next step
97
101
  elif self.mode == MODE_LOOP_NEXT: # virtual mode
98
- result = self.window.core.agents.runner.loop.run_next(
102
+ result = core.agents.runner.loop.run_next(
99
103
  context=self.context,
100
104
  extra=self.extra,
101
105
  signals=self.signals,
@@ -103,27 +107,47 @@ class BridgeWorker(QRunnable):
103
107
  if result:
104
108
  return # don't emit any signals (handled in agent runner, step by step)
105
109
  else:
106
- self.extra["error"] = str(self.window.core.agents.runner.get_error())
110
+ self.extra["error"] = str(core.agents.runner.get_error())
107
111
 
108
112
  # API SDK: chat, completion, vision, image, assistants
109
113
  else:
110
- sdk = "openai"
114
+ sdk = "openai" # default to OpenAI SDK
111
115
  model = self.context.model
112
116
  if model.provider == "google":
113
- if self.window.core.config.get("api_native_google", False):
117
+ if core.config.get("api_native_google", False):
114
118
  sdk = "google"
119
+ elif model.provider == "anthropic":
120
+ if core.config.get("api_native_anthropic", False):
121
+ sdk = "anthropic"
122
+ elif model.provider == "x_ai":
123
+ if core.config.get("api_native_xai", False):
124
+ sdk = "x_ai"
115
125
 
116
126
  # call appropriate SDK
117
127
  if sdk == "google":
118
- # print("Using Google SDK")
119
- result = self.window.core.api.google.call(
128
+ core.debug.info("[bridge] Using Google SDK.")
129
+ result = core.api.google.call(
130
+ context=self.context,
131
+ extra=self.extra,
132
+ rt_signals=self.rt_signals,
133
+ )
134
+ elif sdk == "anthropic":
135
+ core.debug.info("[bridge] Using Anthropic SDK.")
136
+ result = core.api.anthropic.call(
137
+ context=self.context,
138
+ extra=self.extra,
139
+ rt_signals=self.rt_signals,
140
+ )
141
+ elif sdk == "x_ai":
142
+ core.debug.info("[bridge] Using xAI SDK.")
143
+ result = core.api.xai.call(
120
144
  context=self.context,
121
145
  extra=self.extra,
122
146
  rt_signals=self.rt_signals,
123
147
  )
124
148
  elif sdk == "openai":
125
- # print("Using OpenAI SDK")
126
- result = self.window.core.api.openai.call(
149
+ core.debug.info("[bridge] Using OpenAI SDK.")
150
+ result = core.api.openai.call(
127
151
  context=self.context,
128
152
  extra=self.extra,
129
153
  rt_signals=self.rt_signals,
pygpt_net/core/ctx/bag.py CHANGED
@@ -6,16 +6,21 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.19 07:00:00 #
9
+ # Updated Date: 2025.09.05 18:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import List
13
+ from dataclasses import dataclass, field
13
14
 
14
15
  from pygpt_net.item.ctx import CtxItem
15
16
 
16
17
 
18
+ @dataclass(slots=True)
17
19
  class Bag:
18
- __slots__ = ('window', 'meta', 'tab_id', 'items')
20
+ window: object = None
21
+ meta: object = None
22
+ tab_id: int = 0
23
+ items: List[CtxItem] = field(default_factory=list)
19
24
 
20
25
  def __init__(self, window=None):
21
26
  """
@@ -6,12 +6,14 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.07.26 18:00:00 #
9
+ # Updated Date: 2025.09.05 18:00:00 #
10
10
  # ================================================== #
11
11
 
12
- from typing import Dict, Any
12
+ from typing import Dict, Any, Optional
13
+ from dataclasses import dataclass, field
13
14
 
14
15
 
16
+ @dataclass(slots=True)
15
17
  class ReplyContext:
16
18
 
17
19
  AGENT_CONTINUE = "agent.continue"
@@ -21,6 +23,15 @@ class ReplyContext:
21
23
  EXPERT_CALL = "expert.call"
22
24
  EXPERT_RESPONSE = "expert.response"
23
25
 
26
+ type: Optional[object] = None
27
+ bridge_context: Optional[object] = None
28
+ ctx: Optional[object] = None
29
+ prev_ctx: Optional[object] = None
30
+ parent_id: Optional[object] = None
31
+ input: str = ""
32
+ internal: bool = False
33
+ cmds: list = field(default_factory=list)
34
+
24
35
  def __init__(self):
25
36
  """Reply context"""
26
37
  self.type = None
@@ -55,7 +66,11 @@ class ReplyContext:
55
66
  data["prev_ctx"] = self.prev_ctx.to_dict()
56
67
  return data
57
68
 
69
+
70
+ @dataclass(slots=True)
58
71
  class Reply:
72
+ window: Optional[object] = None
73
+
59
74
  def __init__(self, window=None):
60
75
  """
61
76
  Reply core
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 08:00:00 #
9
+ # Updated Date: 2025.09.05 18:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import json
@@ -65,16 +65,13 @@ class Viewer:
65
65
  limit_clause = f" LIMIT {limit} OFFSET {offset}"
66
66
 
67
67
  params = {}
68
- if search_query:
69
- search_clauses = [f"{column} LIKE :search_query" for column in search_fields]
70
- where_clause = f" WHERE ({' OR '.join(search_clauses)})"
71
- params['search_query'] = f"%{search_query}%"
72
-
73
- # apply filters
74
- # filters = {
75
- # "column1": "value1",
76
- # "column2": "value2" # AND condition
77
- # }
68
+ if search_query is not None:
69
+ sq = search_query.strip()
70
+ if sq:
71
+ search_clauses = [f"{column} LIKE :search_query" for column in search_fields]
72
+ where_clause = f" WHERE ({' OR '.join(search_clauses)})"
73
+ params['search_query'] = f"%{sq}%"
74
+
78
75
  if filters:
79
76
  filter_clauses = [f"{column} = :filter_{column}" for column in filters.keys()]
80
77
  if where_clause == "":
@@ -82,13 +79,13 @@ class Viewer:
82
79
  else:
83
80
  where_clause += f" AND ({' AND '.join(filter_clauses)})"
84
81
  for column, value in filters.items():
85
- params[f"filter_{column}"] = value # filter placeholder prefixed with 'filter_'
82
+ params[f"filter_{column}"] = value
86
83
 
87
84
  query = f"{base_query}{where_clause}{order_clause}{limit_clause}"
88
85
  stmt = text(query).bindparams(**params)
89
86
  with self.database.get_db().connect() as conn:
90
87
  result = conn.execute(stmt).fetchall()
91
- return result
88
+ return [tuple(r) for r in result]
92
89
 
93
90
  def count_rows(
94
91
  self,
@@ -116,15 +113,12 @@ class Viewer:
116
113
  else:
117
114
  search_fields = tables[table]['search_fields']
118
115
 
119
- if search_query:
120
- where_clause = f" WHERE {' OR '.join([f'{column} LIKE :search_query' for column in search_fields])}"
121
- params['search_query'] = f"%{search_query}%"
116
+ if search_query is not None:
117
+ sq = search_query.strip()
118
+ if sq:
119
+ where_clause = f" WHERE {' OR '.join([f'{column} LIKE :search_query' for column in search_fields])}"
120
+ params['search_query'] = f"%{sq}%"
122
121
 
123
- # apply filters
124
- # filters = {
125
- # "column1": "value1",
126
- # "column2": "value2" # AND condition
127
- # }
128
122
  if filters:
129
123
  filter_clauses = [f"{column} = :filter_{column}" for column in filters.keys()]
130
124
  if where_clause == "":
@@ -132,13 +126,13 @@ class Viewer:
132
126
  else:
133
127
  where_clause += f" AND ({' AND '.join(filter_clauses)})"
134
128
  for column, value in filters.items():
135
- params[f"filter_{column}"] = value # filter placeholder prefixed with 'filter_'
129
+ params[f"filter_{column}"] = value
136
130
 
137
131
  query = f"{base_query}{where_clause}"
138
132
  stmt = text(query).bindparams(**params)
139
133
  with self.database.get_db().connect() as conn:
140
134
  count = conn.execute(stmt).scalar()
141
- return count
135
+ return int(count) if count is not None else 0
142
136
 
143
137
  def is_auto_backup(self) -> bool:
144
138
  """
@@ -154,14 +148,12 @@ class Viewer:
154
148
 
155
149
  :param data: Dictionary with table and row_id keys
156
150
  """
157
- # create backup
158
151
  if self.is_auto_backup():
159
152
  backup_path = self.database.make_backup()
160
153
  if backup_path:
161
154
  msg = f"[DB] Created DB backup: {backup_path}"
162
155
  self.log(msg)
163
156
 
164
- # delete row
165
157
  with self.database.get_db().begin() as conn:
166
158
  conn.execute(
167
159
  text(f"DELETE FROM {data['table']} WHERE id = :row_id")
@@ -184,35 +176,30 @@ class Viewer:
184
176
  timestamp_columns = tables[data['table']]['timestamp_columns']
185
177
  primary_key = tables[data['table']]['primary_key']
186
178
 
187
- # check JSON
188
179
  if field in json_columns or field.endswith("_json"):
189
180
  try:
190
- value = json.dumps(json.loads(value)) # validate and pack JSON
181
+ value = json.dumps(json.loads(value))
191
182
  except:
192
183
  raise ValueError(f"Invalid JSON value for column {field}")
193
184
 
194
- # check timestamp
195
185
  if field in timestamp_columns or field.endswith("_ts"):
196
186
  try:
197
187
  value = int(value)
198
188
  except:
199
189
  raise ValueError(f"Invalid timestamp value for column {field}")
200
190
 
201
- # check foreign id field
202
191
  if field.endswith("_id"):
203
192
  try:
204
193
  value = int(value)
205
194
  except:
206
195
  raise ValueError(f"Invalid _id value for column {field}")
207
196
 
208
- # create backup
209
197
  if self.is_auto_backup():
210
198
  backup_path = self.database.make_backup()
211
199
  if backup_path:
212
200
  msg = f"[DB] Created DB backup: {backup_path}"
213
201
  self.log(msg)
214
202
 
215
- # update row
216
203
  with self.database.get_db().begin() as conn:
217
204
  conn.execute(
218
205
  text(f"UPDATE {data['table']} SET {data['field']} = :value WHERE {primary_key} = :id")
@@ -229,17 +216,15 @@ class Viewer:
229
216
  :param data: Dictionary with table key
230
217
  :param reset: Reset table sequence
231
218
  """
232
- # create backup
233
219
  if self.is_auto_backup():
234
220
  backup_path = self.database.make_backup()
235
221
  if backup_path:
236
222
  msg = f"[DB] Created DB backup: {backup_path}"
237
223
  self.log(msg)
238
224
 
239
- # truncate table
240
225
  with self.database.get_db().begin() as conn:
241
226
  conn.execute(text(f"DELETE FROM {data['table']}"))
242
- if reset: # reset table sequence (autoincrement)
227
+ if reset:
243
228
  conn.execute(text(f"DELETE FROM sqlite_sequence WHERE name='{data['table']}'"))
244
229
  msg = f"[DB] Truncated table {data['table']}"
245
230
  else:
@@ -6,13 +6,24 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.12 19:00:00 #
9
+ # Updated Date: 2025.09.05 18:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import io
13
+ from dataclasses import dataclass, field
13
14
 
15
+
16
+ @dataclass(slots=True)
14
17
  class PidData:
15
18
 
19
+ pid: object = None
20
+ meta: object = None
21
+ images_appended: list = field(default_factory=list)
22
+ urls_appended: list = field(default_factory=list)
23
+ files_appended: list = field(default_factory=list)
24
+ _buffer: io.StringIO = field(default_factory=io.StringIO)
25
+ is_cmd: bool = False
26
+
16
27
  def __init__(self, pid, meta=None):
17
28
  """Pid Data"""
18
29
  self.pid = pid