pygpt-net 2.6.0.post1__py3-none-any.whl → 2.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. pygpt_net/CHANGELOG.txt +4 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/app.py +13 -9
  4. pygpt_net/controller/chat/response.py +5 -1
  5. pygpt_net/controller/model/editor.py +45 -4
  6. pygpt_net/controller/presets/editor.py +71 -16
  7. pygpt_net/controller/presets/presets.py +4 -3
  8. pygpt_net/core/agents/provider.py +2 -1
  9. pygpt_net/core/agents/runner.py +114 -8
  10. pygpt_net/core/agents/runners/helpers.py +3 -2
  11. pygpt_net/core/agents/runners/llama_workflow.py +176 -22
  12. pygpt_net/core/agents/runners/loop.py +22 -13
  13. pygpt_net/core/experts/experts.py +17 -23
  14. pygpt_net/core/idx/chat.py +24 -34
  15. pygpt_net/core/idx/response.py +5 -2
  16. pygpt_net/core/locale/locale.py +73 -45
  17. pygpt_net/core/render/web/body.py +152 -207
  18. pygpt_net/core/render/web/renderer.py +4 -2
  19. pygpt_net/data/config/config.json +2 -2
  20. pygpt_net/data/config/models.json +2 -2
  21. pygpt_net/data/locale/locale.de.ini +10 -8
  22. pygpt_net/data/locale/locale.en.ini +10 -8
  23. pygpt_net/data/locale/locale.es.ini +10 -8
  24. pygpt_net/data/locale/locale.fr.ini +10 -8
  25. pygpt_net/data/locale/locale.it.ini +10 -8
  26. pygpt_net/data/locale/locale.pl.ini +10 -8
  27. pygpt_net/data/locale/locale.uk.ini +10 -8
  28. pygpt_net/data/locale/locale.zh.ini +10 -8
  29. pygpt_net/item/ctx.py +2 -1
  30. pygpt_net/plugin/cmd_files/worker.py +19 -16
  31. pygpt_net/provider/agents/base.py +4 -1
  32. pygpt_net/provider/agents/llama_index/codeact_workflow.py +95 -0
  33. pygpt_net/provider/agents/llama_index/legacy/__init__.py +0 -0
  34. pygpt_net/provider/agents/llama_index/{openai.py → legacy/openai.py} +2 -2
  35. pygpt_net/provider/agents/llama_index/{openai_assistant.py → legacy/openai_assistant.py} +2 -2
  36. pygpt_net/provider/agents/llama_index/{planner.py → legacy/planner.py} +3 -3
  37. pygpt_net/provider/agents/llama_index/{react.py → legacy/react.py} +3 -3
  38. pygpt_net/provider/agents/llama_index/openai_workflow.py +52 -0
  39. pygpt_net/provider/agents/llama_index/planner_workflow.py +115 -0
  40. pygpt_net/provider/agents/llama_index/react_workflow.py +6 -4
  41. pygpt_net/provider/agents/llama_index/workflow/__init__.py +0 -0
  42. pygpt_net/provider/agents/llama_index/{codeact_agent_custom.py → workflow/codeact.py} +124 -8
  43. pygpt_net/provider/agents/llama_index/workflow/events.py +24 -0
  44. pygpt_net/provider/agents/llama_index/workflow/openai.py +634 -0
  45. pygpt_net/provider/agents/llama_index/workflow/planner.py +601 -0
  46. pygpt_net/provider/agents/openai/agent.py +1 -0
  47. pygpt_net/provider/agents/openai/agent_b2b.py +2 -0
  48. pygpt_net/provider/agents/openai/agent_planner.py +1 -0
  49. pygpt_net/provider/agents/openai/agent_with_experts.py +1 -0
  50. pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +1 -0
  51. pygpt_net/provider/agents/openai/agent_with_feedback.py +1 -0
  52. pygpt_net/provider/agents/openai/evolve.py +1 -0
  53. pygpt_net/provider/core/preset/patch.py +11 -17
  54. pygpt_net/ui/widget/lists/experts.py +3 -2
  55. {pygpt_net-2.6.0.post1.dist-info → pygpt_net-2.6.1.dist-info}/METADATA +14 -6
  56. {pygpt_net-2.6.0.post1.dist-info → pygpt_net-2.6.1.dist-info}/RECORD +59 -53
  57. pygpt_net/data/config/presets/agent_react_workflow.json +0 -34
  58. pygpt_net/provider/agents/llama_index/code_act.py +0 -58
  59. {pygpt_net-2.6.0.post1.dist-info → pygpt_net-2.6.1.dist-info}/LICENSE +0 -0
  60. {pygpt_net-2.6.0.post1.dist-info → pygpt_net-2.6.1.dist-info}/WHEEL +0 -0
  61. {pygpt_net-2.6.0.post1.dist-info → pygpt_net-2.6.1.dist-info}/entry_points.txt +0 -0
@@ -6,11 +6,11 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.03 14:00:00 #
9
+ # Updated Date: 2025.08.14 01:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import re
13
- from typing import Optional, Any, List
13
+ from typing import Optional, Any, List, Union
14
14
 
15
15
  from llama_index.core.workflow import Context
16
16
  from llama_index.core.agent.workflow import (
@@ -19,9 +19,12 @@ from llama_index.core.agent.workflow import (
19
19
  AgentStream,
20
20
  AgentOutput,
21
21
  )
22
+ from workflows.errors import WorkflowCancelledByUser
23
+
22
24
  from pygpt_net.core.bridge.worker import BridgeSignals
23
25
  from pygpt_net.core.events import KernelEvent
24
26
  from pygpt_net.item.ctx import CtxItem
27
+ from pygpt_net.provider.agents.llama_index.workflow.events import StepEvent
25
28
 
26
29
  from .base import BaseRunner
27
30
 
@@ -64,22 +67,103 @@ class LlamaWorkflow(BaseRunner):
64
67
  agent_ctx = Context(agent)
65
68
  memory = self.window.core.idx.chat.get_memory_buffer(history, llm)
66
69
  self.set_busy(signals)
67
- await self.run_agent(
68
- agent=agent,
69
- ctx=agent_ctx,
70
- query=prompt,
71
- memory=memory,
72
- verbose=verbose,
73
- item_ctx=ctx,
74
- signals=signals,
75
- )
70
+
71
+ try:
72
+ ctx = await self.run_agent(
73
+ agent=agent,
74
+ ctx=agent_ctx,
75
+ query=prompt,
76
+ memory=memory,
77
+ verbose=verbose,
78
+ item_ctx=ctx,
79
+ signals=signals,
80
+ use_partials=self.window.core.config.get("agent.openai.response.split", True)
81
+ )
82
+ except WorkflowCancelledByUser:
83
+ print("\n\n[STOP] Workflow stopped by user.")
84
+ except Exception as e:
85
+ self.window.core.debug.log(f"Error running agent workflow: {e}")
86
+ ctx.extra["error"] = str(e)
87
+ self.set_idle(signals)
88
+ return False
89
+
90
+ if ctx.partial:
91
+ ctx.partial = False # reset partial flag
92
+
93
+ response_ctx = self.make_response(ctx)
94
+ self.end_stream(response_ctx, signals)
95
+ self.send_response(response_ctx, signals, KernelEvent.APPEND_DATA) # send response
96
+
97
+ self.set_idle(signals)
98
+ return True
99
+
100
+ async def run_once(
101
+ self,
102
+ agent: Any,
103
+ ctx: CtxItem,
104
+ prompt: str,
105
+ signals: BridgeSignals,
106
+ verbose: bool = False,
107
+ history: List[CtxItem] = None,
108
+ llm: Any = None,
109
+ ) -> Union[CtxItem, None]:
110
+ """
111
+ Run agent workflow
112
+
113
+ :param agent: Agent instance
114
+ :param ctx: Input context
115
+ :param prompt: input text
116
+ :param signals: BridgeSignals
117
+ :param verbose: verbose mode
118
+ :param history: chat history
119
+ :param llm: LLM instance
120
+ :return: True if success
121
+ """
122
+ if self.is_stopped():
123
+ return None # abort if stopped
124
+
125
+ memory = self.window.core.idx.chat.get_memory_buffer(history, llm)
126
+ agent_ctx = Context(agent)
127
+ try:
128
+ ctx = await self.run_agent(
129
+ agent=agent,
130
+ ctx=agent_ctx,
131
+ query=prompt,
132
+ memory=memory,
133
+ verbose=verbose,
134
+ item_ctx=ctx,
135
+ signals=signals,
136
+ use_partials=False, # use partials for streaming
137
+ )
138
+ except WorkflowCancelledByUser:
139
+ print("\n\n[STOP] Workflow stopped by user.")
140
+ except Exception as e:
141
+ self.window.core.debug.log(f"Error running agent workflow: {e}")
142
+ ctx.extra["error"] = str(e)
143
+ return None
144
+
145
+ if ctx.agent_final_response:
146
+ ctx.output = ctx.agent_final_response # set output to current context
147
+ else:
148
+ ctx.output = ctx.live_output
149
+
150
+ return ctx
151
+
152
+ def make_response(
153
+ self,
154
+ ctx: CtxItem
155
+ ) -> CtxItem:
156
+ """
157
+ Create a response context item with the given input and output.
158
+
159
+ :param ctx: CtxItem - the context item to use as a base
160
+ """
76
161
  response_ctx = self.add_ctx(ctx, with_tool_outputs=True)
77
- response_ctx.set_input("inp")
162
+ response_ctx.set_input("")
78
163
 
79
164
  prev_output = ctx.live_output
80
- # remove all <execute>...</execute>
81
165
  if prev_output:
82
- prev_output = re.sub(r'<execute>.*?</execute>', '', prev_output, flags=re.DOTALL)
166
+ prev_output = self.filter_output(prev_output) # remove all <execute>...</execute>
83
167
 
84
168
  response_ctx.set_agent_final_response(ctx.agent_final_response) # always set to further use
85
169
  response_ctx.set_output(prev_output) # append from stream
@@ -94,12 +178,19 @@ class LlamaWorkflow(BaseRunner):
94
178
  self.window.core.agents.tools.append_tool_outputs(response_ctx)
95
179
  else:
96
180
  self.window.core.agents.tools.extract_tool_outputs(response_ctx)
97
- self.end_stream(response_ctx, signals)
98
181
 
99
- # send response
100
- self.send_response(response_ctx, signals, KernelEvent.APPEND_DATA)
101
- self.set_idle(signals)
102
- return True
182
+ return response_ctx
183
+
184
+ def filter_output(self, output: str) -> str:
185
+ """
186
+ Filter output to remove unwanted tags
187
+
188
+ :param output: Output string
189
+ :return: Filtered output string
190
+ """
191
+ # Remove <execute>...</execute> tags
192
+ filtered_output = re.sub(r'<execute>.*?</execute>', '', output, flags=re.DOTALL)
193
+ return filtered_output
103
194
 
104
195
  async def run_agent(
105
196
  self,
@@ -109,7 +200,9 @@ class LlamaWorkflow(BaseRunner):
109
200
  memory,
110
201
  verbose=False,
111
202
  item_ctx: Optional[CtxItem] = None,
112
- signals: Optional[BridgeSignals] = None):
203
+ signals: Optional[BridgeSignals] = None,
204
+ use_partials: bool = True,
205
+ ):
113
206
  """
114
207
  Run agent workflow
115
208
  This method runs the agent's workflow, processes tool calls, and streams events.
@@ -121,9 +214,16 @@ class LlamaWorkflow(BaseRunner):
121
214
  :param verbose: Verbose mode (default: False)
122
215
  :param item_ctx: Optional CtxItem for additional context
123
216
  :param signals: Optional BridgeSignals for communication
217
+ :param use_partials: If True, use partial context items for streaming
124
218
  :return: handler for the agent workflow
125
219
  """
126
- handler = agent.run(query, ctx=ctx, memory=memory, verbose=verbose)
220
+ handler = agent.run(
221
+ query,
222
+ ctx=ctx,
223
+ memory=memory,
224
+ verbose=verbose,
225
+ on_stop=self.is_stopped,
226
+ )
127
227
  if verbose:
128
228
  print(f"User: {query}")
129
229
 
@@ -134,7 +234,11 @@ class LlamaWorkflow(BaseRunner):
134
234
 
135
235
  async for event in handler.stream_events():
136
236
  if self.is_stopped():
237
+ # persist current output on stop
238
+ item_ctx.output = item_ctx.live_output
239
+ self.window.core.ctx.update_item(item_ctx)
137
240
  self.end_stream(item_ctx, signals)
241
+ await handler.cancel_run() # cancel, will raise WorkflowCancelledByUser
138
242
  break
139
243
  if isinstance(event, ToolCallResult):
140
244
  output = f"\n-----------\nExecution result:\n{event.tool_output}"
@@ -155,6 +259,19 @@ class LlamaWorkflow(BaseRunner):
155
259
  item_ctx.stream = formatted
156
260
  if item_ctx.stream_agent_output:
157
261
  self.send_stream(item_ctx, signals, begin)
262
+ elif isinstance(event, StepEvent):
263
+ self.set_busy(signals)
264
+ if not use_partials:
265
+ continue
266
+ if verbose:
267
+ print("\n\n-----STEP-----\n\n")
268
+ print(f"[{event.name}] {event.index}/{event.total} meta={event.meta}")
269
+ item_ctx = self.on_next_ctx(
270
+ item_ctx,
271
+ signals=signals,
272
+ begin=begin,
273
+ stream=True,
274
+ )
158
275
  elif isinstance(event, AgentStream):
159
276
  if verbose:
160
277
  print(f"{event.delta}", end="", flush=True)
@@ -171,4 +288,41 @@ class LlamaWorkflow(BaseRunner):
171
288
  if verbose:
172
289
  print(f"\nFinal response: {answer}")
173
290
 
174
- return await handler
291
+ return item_ctx
292
+
293
+ def on_next_ctx(
294
+ self,
295
+ ctx: CtxItem,
296
+ signals: BridgeSignals,
297
+ begin: bool = False,
298
+ stream: bool = True,
299
+ ) -> CtxItem:
300
+ """
301
+ Callback for next context in cycle
302
+
303
+ :param ctx: CtxItem
304
+ :param signals: BridgeSignals
305
+ :param begin: if True, flush current output to before buffer and clear current buffer
306
+ :param stream: is streaming enabled
307
+ :return: CtxItem - the next context item in the cycle
308
+ """
309
+ # finish current stream
310
+ ctx.stream = "\n"
311
+ ctx.extra["agent_output"] = True # allow usage in history
312
+ ctx.output = ctx.live_output # set output to current context
313
+ ctx.output = self.filter_output(ctx.output)
314
+ self.window.core.ctx.update_item(ctx)
315
+
316
+ if stream:
317
+ self.send_stream(ctx, signals, begin)
318
+ self.end_stream(ctx, signals)
319
+
320
+ # create and return next context item
321
+ next_ctx = self.add_next_ctx(ctx)
322
+ next_ctx.set_input("")
323
+ next_ctx.set_output("")
324
+ next_ctx.partial = True
325
+ next_ctx.extra["agent_output"] = True # allow usage in history
326
+
327
+ self.send_response(next_ctx, signals, KernelEvent.APPEND_DATA)
328
+ return next_ctx
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.03 14:00:00 #
9
+ # Updated Date: 2025.08.14 03:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional, List
@@ -51,20 +51,28 @@ class Loop(BaseRunner):
51
51
  if self.is_stopped():
52
52
  return "" # abort if stopped
53
53
 
54
- verbose = self.window.core.config.get("agent.llama.verbose", False)
55
54
  model = self.window.core.models.get(model_name)
56
- llm = self.window.core.idx.llm.get(model, stream=False)
57
- kwargs = {
58
- "context": BridgeContext(),
59
- "tools": tools,
60
- "llm": llm,
61
- "chat_history": [],
62
- "max_iterations": 10,
63
- "verbose": verbose,
55
+ ctx = CtxItem()
56
+ bridge_context = BridgeContext(
57
+ ctx=ctx,
58
+ history=[],
59
+ model=model,
60
+ prompt=self.prepare_input(input),
61
+ stream=False,
62
+ )
63
+ extra = {
64
+ "agent_provider": "react", # use React workflow provider
65
+ "agent_tools": tools,
64
66
  }
65
- provider = self.window.core.agents.provider.get("react")
66
- agent = provider.get_agent(self.window, kwargs)
67
- return agent.chat(self.prepare_input(input))
67
+ response_ctx = self.window.core.agents.runner.call_once(
68
+ context=bridge_context,
69
+ extra=extra,
70
+ signals=None,
71
+ )
72
+ if response_ctx:
73
+ return str(response_ctx.output)
74
+ else:
75
+ return "No response from evaluator."
68
76
 
69
77
  def run_next(
70
78
  self,
@@ -166,6 +174,7 @@ class Loop(BaseRunner):
166
174
  context.history = self.window.core.ctx.all(meta_id=ctx.meta.id)
167
175
  context.prompt = instruction # use instruction as prompt
168
176
  preset = self.window.controller.presets.get_current()
177
+ context.preset = preset
169
178
  extra = {
170
179
  "agent_idx": preset.idx,
171
180
  "agent_provider": preset.agent_provider,
@@ -6,14 +6,13 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.11 14:00:00 #
9
+ # Updated Date: 2025.08.14 01:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import json
13
13
  from typing import Dict, List, Optional
14
14
 
15
15
  from PySide6.QtCore import QRunnable, QObject, Signal, Slot
16
- from llama_index.core.base.llms.types import ChatMessage, MessageRole
17
16
  from llama_index.core.tools import QueryEngineTool
18
17
 
19
18
  from pygpt_net.core.types import (
@@ -858,29 +857,24 @@ class ExpertWorker(QRunnable):
858
857
  :return: True if success, False otherwise
859
858
  """
860
859
  history = self.window.core.agents.memory.prepare(context)
861
- if system_prompt:
862
- msg = ChatMessage(
863
- role=MessageRole.SYSTEM,
864
- content=system_prompt
865
- )
866
- history.insert(0, msg)
867
- kwargs = {
868
- "context": context,
869
- "tools": tools,
870
- "llm": llm,
871
- "chat_history": history,
872
- "max_iterations": 30,
873
- "verbose": verbose,
874
- "system_prompt": system_prompt,
875
- "are_commands": self.window.core.config.get("cmd"),
876
- }
877
- provider = self.window.core.agents.provider.get("planner")
878
- agent = provider.get_agent(self.window, kwargs)
879
- response_ctx = self.window.core.agents.runner.llama_plan.run_once(
880
- agent=agent,
860
+ bridge_context = BridgeContext(
881
861
  ctx=ctx,
862
+ system_prompt=system_prompt,
863
+ model=context.model,
882
864
  prompt=query,
883
- verbose=verbose,
865
+ stream=False,
866
+ is_expert_call=True, # mark as expert call
867
+ )
868
+ extra = {
869
+ "agent_provider": "react", # use react workflow provider
870
+ "agent_idx": context.idx, # index to use
871
+ "agent_tools": tools, # tools to use
872
+ "agent_history": history, # already prepared history
873
+ }
874
+ response_ctx = self.window.core.agents.runner.call_once(
875
+ context=bridge_context,
876
+ extra=extra,
877
+ signals=None,
884
878
  )
885
879
  if response_ctx:
886
880
  return str(response_ctx.output)
@@ -6,9 +6,9 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.03 14:00:00 #
9
+ # Updated Date: 2025.08.14 01:00:00 #
10
10
  # ================================================== #
11
- import asyncio
11
+
12
12
  import json
13
13
  from typing import Optional, Dict, Any, List
14
14
 
@@ -338,7 +338,7 @@ class Chat:
338
338
  tools=tools,
339
339
  ctx=ctx,
340
340
  query=query,
341
- history=history,
341
+ history=context.history,
342
342
  llm=llm,
343
343
  index=index,
344
344
  system_prompt=system_prompt,
@@ -377,7 +377,7 @@ class Chat:
377
377
  tools=tools,
378
378
  ctx=ctx,
379
379
  query=query,
380
- history=history,
380
+ history=context.history,
381
381
  llm=llm,
382
382
  index=index,
383
383
  system_prompt=system_prompt,
@@ -490,7 +490,7 @@ class Chat:
490
490
  chat_mode: str = MODE_CHAT,
491
491
  verbose: bool = False,
492
492
 
493
- ) -> bool:
493
+ ) -> str:
494
494
  """
495
495
  Call agent with tools and index
496
496
 
@@ -507,7 +507,6 @@ class Chat:
507
507
  :param verbose: Verbose mode, default is False
508
508
  :return: True if success, False otherwise
509
509
  """
510
- index_tool = None
511
510
  if index:
512
511
  query_engine = index.as_query_engine(
513
512
  llm=llm,
@@ -522,35 +521,26 @@ class Chat:
522
521
  )
523
522
  tools.append(index_tool)
524
523
 
525
- workdir = self.window.core.config.get_user_dir('data')
526
- if self.window.core.plugins.get_option("cmd_code_interpreter", "sandbox_ipython"):
527
- workdir = "/data"
528
-
529
- kwargs = {
530
- "context": context,
531
- "tools": tools,
532
- "retriever_tool": index_tool,
533
- "llm": llm,
534
- "chat_history": history,
535
- "max_iterations": 0,
536
- "verbose": verbose,
537
- "system_prompt": system_prompt,
538
- "are_commands": True,
539
- "workdir": workdir,
540
- }
541
- provider = self.window.core.agents.provider.get("react_workflow")
542
- agent = provider.get_agent(self.window, kwargs)
543
-
544
- kwargs = {
545
- "agent": agent,
546
- "ctx": ctx,
547
- "prompt": query,
548
- "signals": signals,
549
- "verbose": verbose,
550
- "history": history,
551
- "llm": llm,
524
+ bridge_context = BridgeContext(
525
+ ctx=ctx,
526
+ model=context.model,
527
+ history=history,
528
+ prompt=query,
529
+ stream=False,
530
+ )
531
+ extra = {
532
+ "agent_provider": "react", # use React workflow provider
533
+ "agent_tools": tools,
552
534
  }
553
- return asyncio.run(self.window.core.agents.runner.llama_workflow.run(**kwargs))
535
+ response_ctx = self.window.core.agents.runner.call_once(
536
+ context=bridge_context,
537
+ extra=extra,
538
+ signals=None,
539
+ )
540
+ if response_ctx:
541
+ return str(response_ctx.output)
542
+ else:
543
+ return "No response from agent."
554
544
 
555
545
  def is_stream_allowed(self) -> bool:
556
546
  """
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.07.22 22:00:00 #
9
+ # Updated Date: 2025.08.14 01:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Any
@@ -28,7 +28,10 @@ class Response:
28
28
  model: ModelItem,
29
29
  response: Any
30
30
  ) -> None:
31
- pass
31
+ output = str(response)
32
+ if output is None:
33
+ output = ""
34
+ ctx.set_output(output, "")
32
35
 
33
36
  def from_index(
34
37
  self,