pygpt-net 2.6.20__py3-none-any.whl → 2.6.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. pygpt_net/CHANGELOG.txt +9 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/agent/agent.py +130 -2
  4. pygpt_net/controller/agent/experts.py +93 -96
  5. pygpt_net/controller/agent/llama.py +2 -1
  6. pygpt_net/controller/assistant/assistant.py +18 -1
  7. pygpt_net/controller/attachment/attachment.py +17 -1
  8. pygpt_net/controller/camera/camera.py +15 -7
  9. pygpt_net/controller/chat/chat.py +2 -2
  10. pygpt_net/controller/chat/common.py +50 -33
  11. pygpt_net/controller/chat/image.py +67 -77
  12. pygpt_net/controller/chat/input.py +94 -166
  13. pygpt_net/controller/chat/output.py +83 -140
  14. pygpt_net/controller/chat/response.py +83 -102
  15. pygpt_net/controller/chat/text.py +116 -149
  16. pygpt_net/controller/ctx/common.py +2 -1
  17. pygpt_net/controller/ctx/ctx.py +86 -6
  18. pygpt_net/controller/files/files.py +13 -1
  19. pygpt_net/controller/idx/idx.py +26 -2
  20. pygpt_net/controller/kernel/reply.py +53 -66
  21. pygpt_net/controller/kernel/stack.py +16 -16
  22. pygpt_net/controller/model/importer.py +2 -1
  23. pygpt_net/controller/model/model.py +62 -3
  24. pygpt_net/controller/settings/editor.py +4 -4
  25. pygpt_net/controller/ui/ui.py +16 -2
  26. pygpt_net/core/agents/observer/evaluation.py +3 -3
  27. pygpt_net/core/agents/provider.py +25 -3
  28. pygpt_net/core/agents/runner.py +4 -1
  29. pygpt_net/core/agents/runners/llama_workflow.py +19 -7
  30. pygpt_net/core/agents/runners/loop.py +3 -1
  31. pygpt_net/core/agents/runners/openai_workflow.py +17 -3
  32. pygpt_net/core/agents/tools.py +4 -1
  33. pygpt_net/core/bridge/context.py +34 -37
  34. pygpt_net/core/ctx/ctx.py +1 -1
  35. pygpt_net/core/db/database.py +2 -2
  36. pygpt_net/core/debug/debug.py +12 -1
  37. pygpt_net/core/dispatcher/dispatcher.py +24 -1
  38. pygpt_net/core/events/app.py +7 -7
  39. pygpt_net/core/events/control.py +26 -26
  40. pygpt_net/core/events/event.py +6 -3
  41. pygpt_net/core/events/kernel.py +2 -2
  42. pygpt_net/core/events/render.py +13 -13
  43. pygpt_net/core/experts/experts.py +76 -82
  44. pygpt_net/core/experts/worker.py +12 -12
  45. pygpt_net/core/models/models.py +5 -1
  46. pygpt_net/core/models/ollama.py +14 -5
  47. pygpt_net/core/render/web/helpers.py +2 -2
  48. pygpt_net/core/render/web/renderer.py +4 -4
  49. pygpt_net/core/types/__init__.py +2 -1
  50. pygpt_net/core/types/agent.py +4 -4
  51. pygpt_net/core/types/base.py +19 -0
  52. pygpt_net/core/types/console.py +6 -6
  53. pygpt_net/core/types/mode.py +8 -8
  54. pygpt_net/core/types/multimodal.py +3 -3
  55. pygpt_net/core/types/openai.py +2 -1
  56. pygpt_net/data/config/config.json +4 -4
  57. pygpt_net/data/config/models.json +19 -3
  58. pygpt_net/data/config/settings.json +14 -14
  59. pygpt_net/data/locale/locale.en.ini +2 -2
  60. pygpt_net/item/ctx.py +256 -240
  61. pygpt_net/item/model.py +59 -116
  62. pygpt_net/item/preset.py +122 -105
  63. pygpt_net/provider/agents/llama_index/workflow/planner.py +3 -3
  64. pygpt_net/provider/agents/openai/agent.py +4 -12
  65. pygpt_net/provider/agents/openai/agent_b2b.py +10 -15
  66. pygpt_net/provider/agents/openai/agent_planner.py +4 -4
  67. pygpt_net/provider/agents/openai/agent_with_experts.py +3 -7
  68. pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +4 -8
  69. pygpt_net/provider/agents/openai/agent_with_feedback.py +4 -8
  70. pygpt_net/provider/agents/openai/bot_researcher.py +2 -18
  71. pygpt_net/provider/agents/openai/bots/__init__.py +0 -0
  72. pygpt_net/provider/agents/openai/bots/research_bot/__init__.py +0 -0
  73. pygpt_net/provider/agents/openai/bots/research_bot/agents/__init__.py +0 -0
  74. pygpt_net/provider/agents/openai/bots/research_bot/agents/planner_agent.py +1 -1
  75. pygpt_net/provider/agents/openai/bots/research_bot/agents/search_agent.py +1 -0
  76. pygpt_net/provider/agents/openai/bots/research_bot/agents/writer_agent.py +1 -1
  77. pygpt_net/provider/agents/openai/bots/research_bot/manager.py +1 -10
  78. pygpt_net/provider/agents/openai/evolve.py +5 -9
  79. pygpt_net/provider/agents/openai/supervisor.py +4 -8
  80. pygpt_net/provider/core/config/patch.py +10 -3
  81. pygpt_net/provider/core/ctx/db_sqlite/utils.py +43 -43
  82. pygpt_net/provider/core/model/patch.py +11 -1
  83. pygpt_net/provider/core/preset/json_file.py +47 -49
  84. pygpt_net/provider/gpt/agents/experts.py +2 -2
  85. {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.21.dist-info}/METADATA +13 -6
  86. {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.21.dist-info}/RECORD +86 -85
  87. {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.21.dist-info}/LICENSE +0 -0
  88. {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.21.dist-info}/WHEEL +0 -0
  89. {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.21.dist-info}/entry_points.txt +0 -0
@@ -6,10 +6,9 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.18 01:00:00 #
9
+ # Updated Date: 2025.08.23 15:00:00 #
10
10
  # ================================================== #
11
11
 
12
- import os
13
12
  from typing import Optional, Any, Dict
14
13
 
15
14
  from pygpt_net.core.bridge import BridgeContext
@@ -17,12 +16,8 @@ from pygpt_net.core.bridge.context import MultimodalContext
17
16
  from pygpt_net.core.events import Event, AppEvent, KernelEvent, RenderEvent
18
17
  from pygpt_net.core.types import (
19
18
  MODE_AGENT,
20
- MODE_AGENT_LLAMA,
21
- MODE_AGENT_OPENAI,
22
- MODE_LLAMA_INDEX,
23
19
  MODE_ASSISTANT,
24
20
  MODE_IMAGE,
25
- MODE_CHAT,
26
21
  )
27
22
  from pygpt_net.item.ctx import CtxItem
28
23
  from pygpt_net.utils import trans
@@ -56,110 +51,51 @@ class Input:
56
51
 
57
52
  :param force: force send
58
53
  """
59
- # self.window.core.debug.mem("BEGIN") # debug memory usage
60
-
61
- self.window.controller.agent.experts.unlock() # unlock experts
62
- self.window.controller.agent.llama.reset_eval_step() # reset evaluation steps
63
- self.window.controller.ui.tabs.switch_to_first_chat() # switch to first active chat tab
54
+ dispatch = self.window.dispatch
55
+ mode = self.window.core.config.get('mode')
56
+ event = Event(Event.INPUT_BEGIN, {
57
+ 'mode': mode,
58
+ 'force': force,
59
+ 'stop': False,
60
+ })
61
+ dispatch(event)
62
+ stop = event.data.get('stop', False)
64
63
 
65
64
  # get text from input
66
65
  text = self.window.ui.nodes['input'].toPlainText().strip()
67
- mode = self.window.core.config.get('mode')
68
66
 
69
67
  if not force:
70
- self.window.dispatch(AppEvent(AppEvent.INPUT_SENT)) # app event
71
-
72
- # check if not in edit mode
73
- if self.window.controller.ctx.extra.is_editing():
74
- self.window.controller.ctx.extra.edit_submit()
68
+ dispatch(AppEvent(AppEvent.INPUT_SENT)) # app event
69
+ if stop:
75
70
  return
76
71
 
77
- # if agent mode: iterations check, show alert confirm if infinity loop
78
- if self.window.controller.agent.common.is_infinity_loop(mode):
79
- self.window.controller.agent.common.display_infinity_loop_confirm()
80
- return
81
-
82
- # check for agent is selected
83
- if mode in [MODE_AGENT_OPENAI, MODE_AGENT_LLAMA]:
84
- preset = self.window.controller.presets.get_current()
85
- if not preset or preset.name == "*":
86
- self.window.ui.dialogs.alert(
87
- trans("dialog.agent.not_selected"))
88
- return
89
-
90
- # check ollama model
91
- model = self.window.core.config.get('model')
92
- if model:
93
- model_data = self.window.core.models.get(model)
94
- if model_data is not None and model_data.is_ollama():
95
- if (mode == MODE_LLAMA_INDEX or
96
- (mode == MODE_CHAT and not model_data.is_openai_supported() and model_data.is_ollama())):
97
- model_id = model_data.get_ollama_model()
98
- # load ENV vars first
99
- if ('env' in model_data.llama_index
100
- and model_data.llama_index['env'] is not None):
101
- for item in model_data.llama_index['env']:
102
- key = item.get('name', '').strip()
103
- value = item.get('value', '').strip()
104
- os.environ[key] = value
105
- status = self.window.core.models.ollama.check_model(model_id)
106
- is_installed = status.get('is_installed', False)
107
- is_model = status.get('is_model', False)
108
- if not is_installed:
109
- self.window.ui.dialogs.alert(trans("dialog.ollama.not_installed"))
110
- return
111
- if not is_model:
112
- self.window.ui.dialogs.alert(
113
- trans("dialog.ollama.model_not_found").replace("{model}", model_id))
114
- return
115
-
116
72
  # listen for stop command
117
73
  if self.generating \
118
74
  and text is not None \
119
75
  and text.lower().strip() in self.stop_commands:
120
76
  self.window.controller.kernel.stop() # TODO: to chat main
121
- self.window.dispatch(RenderEvent(RenderEvent.CLEAR_INPUT))
77
+ dispatch(RenderEvent(RenderEvent.CLEAR_INPUT))
122
78
  return
123
79
 
124
- # agent modes
125
- if mode == MODE_AGENT:
126
- self.window.controller.agent.legacy.on_user_send(text) # begin Legacy (autonomous) agent flow
127
- elif mode in [MODE_AGENT_LLAMA, MODE_AGENT_OPENAI]:
128
- self.window.controller.agent.llama.on_user_send(text) # begin LlamaIndex adn OpenAI agent flow
129
-
130
80
  # event: user input send (manually)
131
81
  event = Event(Event.USER_SEND, {
82
+ 'mode': mode,
132
83
  'value': text,
133
84
  })
134
- self.window.dispatch(Event(Event.USER_SEND, {
135
- 'value': text,
136
- }))
85
+ dispatch(event)
137
86
  text = event.data['value']
138
87
 
139
- # handle attachments with additional context (not images here)
140
- if mode != MODE_ASSISTANT and self.window.controller.chat.attachment.has(mode):
141
- self.window.dispatch(KernelEvent(KernelEvent.STATE_BUSY, {
142
- "id": "chat",
143
- "msg": "Reading attachments..."
144
- }))
145
- try:
146
- self.window.controller.chat.attachment.handle(mode, text)
147
- return # return here, will be handled in signal
148
- except Exception as e:
149
- self.window.dispatch(KernelEvent(KernelEvent.STATE_ERROR, {
150
- "id": "chat",
151
- "msg": f"Error reading attachments: {str(e)}"
152
- }))
153
- return
88
+ # if attachments, return here - send will be handled via signal after upload
89
+ if self.handle_attachment(mode, text):
90
+ return
154
91
 
155
- # event: handle input
92
+ # kernel event: handle input
156
93
  context = BridgeContext()
157
94
  context.prompt = text
158
- event = KernelEvent(KernelEvent.INPUT_USER, {
95
+ dispatch(KernelEvent(KernelEvent.INPUT_USER, {
159
96
  'context': context,
160
97
  'extra': {},
161
- })
162
- self.window.dispatch(event)
98
+ }))
163
99
 
164
100
  def send(
165
101
  self,
@@ -172,21 +108,13 @@ class Input:
172
108
  :param context: bridge context
173
109
  :param extra: extra data
174
110
  """
175
- text = str(context.prompt)
176
- prev_ctx = context.ctx
177
- force = extra.get("force", False)
178
- reply = extra.get("reply", False)
179
- internal = extra.get("internal", False)
180
- parent_id = extra.get("parent_id", None)
181
- multimodal_ctx = context.multimodal_ctx
182
111
  self.execute(
183
- text=text,
184
- force=force,
185
- reply=reply,
186
- internal=internal,
187
- prev_ctx=prev_ctx,
188
- parent_id=parent_id,
189
- multimodal_ctx=multimodal_ctx,
112
+ text=str(context.prompt),
113
+ force=extra.get("force", False),
114
+ reply=extra.get("reply", False),
115
+ internal=extra.get("internal", False),
116
+ prev_ctx=context.ctx,
117
+ multimodal_ctx=context.multimodal_ctx,
190
118
  )
191
119
 
192
120
  def execute(
@@ -196,7 +124,6 @@ class Input:
196
124
  reply: bool = False,
197
125
  internal: bool = False,
198
126
  prev_ctx: Optional[CtxItem] = None,
199
- parent_id: Optional[int] = None,
200
127
  multimodal_ctx: Optional[MultimodalContext] = None,
201
128
  ):
202
129
  """
@@ -207,10 +134,14 @@ class Input:
207
134
  :param reply: reply mode (from plugins)
208
135
  :param internal: internal call
209
136
  :param prev_ctx: previous context (if reply)
210
- :param parent_id: parent id (if expert)
211
137
  :param multimodal_ctx: multimodal context
212
138
  """
213
- self.window.dispatch(KernelEvent(KernelEvent.STATE_IDLE, {
139
+ core = self.window.core
140
+ controller = self.window.controller
141
+ dispatch = self.window.dispatch
142
+ log = controller.chat.log
143
+
144
+ dispatch(KernelEvent(KernelEvent.STATE_IDLE, {
214
145
  "id": "chat",
215
146
  }))
216
147
 
@@ -218,106 +149,103 @@ class Input:
218
149
  if self.locked and not force and not internal:
219
150
  return
220
151
 
221
- self.log("Begin.")
152
+ log("Begin.")
222
153
  self.generating = True # set generating flag
223
154
 
224
- mode = self.window.core.config.get('mode')
155
+ # check if assistant is selected
156
+ mode = core.config.get('mode')
225
157
  if mode == MODE_ASSISTANT:
226
- # check if assistant is selected
227
- if self.window.core.config.get('assistant') is None \
228
- or self.window.core.config.get('assistant') == "":
229
- self.window.ui.dialogs.alert(trans('error.assistant_not_selected'))
158
+ if not controller.assistant.check():
230
159
  self.generating = False # unlock
231
160
  return
232
- elif self.window.controller.ui.vision.has_vision():
233
- # handle auto capture
234
- self.window.controller.camera.handle_auto_capture()
235
161
 
236
- # unlock Assistants run thread if locked
237
- self.window.controller.assistant.threads.stop = False
238
- self.window.controller.kernel.resume()
162
+ # handle camera capture
163
+ controller.camera.handle_auto_capture(mode)
239
164
 
240
- self.log(f"Input prompt: {text}") # log
165
+ # unlock if locked
166
+ controller.assistant.resume()
167
+ controller.kernel.resume()
241
168
 
242
- # agent mode
243
- if mode == MODE_AGENT:
244
- self.log(f"Agent: input before: {text}")
245
- text = self.window.controller.agent.legacy.on_input_before(text)
169
+ log(f"Input prompt: {text}") # log
246
170
 
247
- # event: before input
171
+ # event: before input handle
248
172
  event = Event(Event.INPUT_BEFORE, {
249
- 'value': text,
250
173
  'mode': mode,
174
+ 'value': text,
175
+ 'multimodal_ctx': multimodal_ctx,
176
+ 'stop': False,
177
+ 'silent': False, # silent mode (without error messages)
251
178
  })
252
- self.window.dispatch(event)
179
+ dispatch(event)
253
180
  text = event.data['value']
181
+ stop = event.data.get('stop', False)
182
+ silent = event.data.get('silent', False)
254
183
 
255
- # check if image captured from camera, # check if attachment exists
256
- camera_captured = (self.window.controller.ui.vision.has_vision()
257
- and self.window.controller.attachment.has(mode))
258
-
259
- # allow empty text input only if multimodal data, otherwise abort
260
- is_audio = multimodal_ctx is not None and multimodal_ctx.is_audio_input
261
- if len(text.strip()) == 0 and (not camera_captured and not is_audio):
262
- self.generating = False # unlock as not generating
263
- return
264
-
265
- # check API key, show monit if no API key for current provider
266
- model = self.window.core.config.get('model')
267
- if model:
268
- model_data = self.window.core.models.get(model)
269
- if not self.window.controller.chat.common.check_api_key(mode=mode, model=model_data, monit=False):
270
- self.window.controller.chat.common.check_api_key(mode=mode, model=model_data, monit=True)
271
- self.generating = False
272
- self.window.dispatch(KernelEvent(KernelEvent.STATE_ERROR, {
184
+ if stop: # abort via event
185
+ self.generating = False
186
+ if not silent:
187
+ dispatch(KernelEvent(KernelEvent.STATE_ERROR, {
273
188
  "id": "chat",
274
189
  }))
275
- return
190
+ return
276
191
 
277
192
  # set state to: busy
278
- self.window.dispatch(KernelEvent(KernelEvent.STATE_BUSY, {
193
+ dispatch(KernelEvent(KernelEvent.STATE_BUSY, {
279
194
  "id": "chat",
280
195
  "msg": trans('status.sending'),
281
196
  }))
282
197
 
283
198
  # clear input field if clear-on-send is enabled
284
- if self.window.core.config.get('send_clear') and not force and not internal:
285
- self.window.dispatch(RenderEvent(RenderEvent.CLEAR_INPUT))
286
-
287
- # prepare ctx, create new ctx meta if there is no ctx, or no ctx selected
288
- if self.window.core.ctx.count_meta() == 0 or self.window.core.ctx.get_current() is None:
289
- self.window.core.ctx.new()
290
- self.window.controller.ctx.update()
291
- self.log("New context created...") # log
292
- else:
293
- # check if current ctx is allowed for this mode - if not, then auto-create new ctx
294
- self.window.controller.ctx.handle_allowed(mode)
199
+ if core.config.get('send_clear') and not force and not internal:
200
+ dispatch(RenderEvent(RenderEvent.CLEAR_INPUT))
295
201
 
296
- # update mode in ctx
297
- self.window.controller.ctx.update_mode_in_current()
202
+ # create ctx, handle allowed, etc.
203
+ dispatch(Event(Event.INPUT_ACCEPT, {
204
+ 'value': text,
205
+ 'multimodal_ctx': multimodal_ctx,
206
+ 'mode': mode,
207
+ }))
298
208
 
299
209
  # send input to API
300
210
  if mode == MODE_IMAGE:
301
- self.window.controller.chat.image.send(
211
+ controller.chat.image.send(
302
212
  text=text,
303
213
  prev_ctx=prev_ctx,
304
- parent_id=parent_id,
305
- ) # image mode
214
+ ) # image generation
306
215
  else:
307
- # async
308
- self.window.controller.chat.text.send(
216
+ controller.chat.text.send(
309
217
  text=text,
310
218
  reply=reply,
311
219
  internal=internal,
312
220
  prev_ctx=prev_ctx,
313
- parent_id=parent_id,
314
221
  multimodal_ctx=multimodal_ctx,
315
- ) # text mode: OpenAI, Langchain, Llama, etc.
222
+ ) # text mode: OpenAI, LlamaIndex, etc.
316
223
 
317
- def log(self, data: Any):
224
+ def handle_attachment(self, mode: str, text: str) -> bool:
318
225
  """
319
- Log data to debug
226
+ Handle attachments with additional context (not images here)
320
227
 
321
- :param data: Data to log
228
+ :param mode: Mode (e.g., MODE_ASSISTANT, MODE_CHAT)
229
+ :param text: Input text
230
+ :return: bool: True if attachments exists, False otherwise
322
231
  """
323
- self.window.controller.chat.log(data)
232
+ controller = self.window.controller
233
+ dispatch = self.window.dispatch
234
+ exists = False
235
+
236
+ # handle attachments with additional context (not images here)
237
+ if mode != MODE_ASSISTANT and controller.chat.attachment.has(mode):
238
+ exists = True
239
+ dispatch(KernelEvent(KernelEvent.STATE_BUSY, {
240
+ "id": "chat",
241
+ "msg": "Reading attachments..."
242
+ }))
243
+ try:
244
+ controller.chat.attachment.handle(mode, text)
245
+ except Exception as e:
246
+ dispatch(KernelEvent(KernelEvent.STATE_ERROR, {
247
+ "id": "chat",
248
+ "msg": f"Error reading attachments: {e}"
249
+ }))
250
+
251
+ return exists