pygpt-net 2.6.0.post1__py3-none-any.whl → 2.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. pygpt_net/CHANGELOG.txt +4 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/app.py +13 -9
  4. pygpt_net/controller/chat/response.py +5 -1
  5. pygpt_net/controller/model/editor.py +45 -4
  6. pygpt_net/controller/presets/editor.py +71 -16
  7. pygpt_net/controller/presets/presets.py +4 -3
  8. pygpt_net/core/agents/provider.py +2 -1
  9. pygpt_net/core/agents/runner.py +114 -8
  10. pygpt_net/core/agents/runners/helpers.py +3 -2
  11. pygpt_net/core/agents/runners/llama_workflow.py +176 -22
  12. pygpt_net/core/agents/runners/loop.py +22 -13
  13. pygpt_net/core/experts/experts.py +17 -23
  14. pygpt_net/core/idx/chat.py +24 -34
  15. pygpt_net/core/idx/response.py +5 -2
  16. pygpt_net/core/locale/locale.py +73 -45
  17. pygpt_net/core/render/web/body.py +152 -207
  18. pygpt_net/core/render/web/renderer.py +4 -2
  19. pygpt_net/data/config/config.json +2 -2
  20. pygpt_net/data/config/models.json +2 -2
  21. pygpt_net/data/locale/locale.de.ini +10 -8
  22. pygpt_net/data/locale/locale.en.ini +10 -8
  23. pygpt_net/data/locale/locale.es.ini +10 -8
  24. pygpt_net/data/locale/locale.fr.ini +10 -8
  25. pygpt_net/data/locale/locale.it.ini +10 -8
  26. pygpt_net/data/locale/locale.pl.ini +10 -8
  27. pygpt_net/data/locale/locale.uk.ini +10 -8
  28. pygpt_net/data/locale/locale.zh.ini +10 -8
  29. pygpt_net/item/ctx.py +2 -1
  30. pygpt_net/plugin/cmd_files/worker.py +19 -16
  31. pygpt_net/provider/agents/base.py +4 -1
  32. pygpt_net/provider/agents/llama_index/codeact_workflow.py +95 -0
  33. pygpt_net/provider/agents/llama_index/legacy/__init__.py +0 -0
  34. pygpt_net/provider/agents/llama_index/{openai.py → legacy/openai.py} +2 -2
  35. pygpt_net/provider/agents/llama_index/{openai_assistant.py → legacy/openai_assistant.py} +2 -2
  36. pygpt_net/provider/agents/llama_index/{planner.py → legacy/planner.py} +3 -3
  37. pygpt_net/provider/agents/llama_index/{react.py → legacy/react.py} +3 -3
  38. pygpt_net/provider/agents/llama_index/openai_workflow.py +52 -0
  39. pygpt_net/provider/agents/llama_index/planner_workflow.py +115 -0
  40. pygpt_net/provider/agents/llama_index/react_workflow.py +6 -4
  41. pygpt_net/provider/agents/llama_index/workflow/__init__.py +0 -0
  42. pygpt_net/provider/agents/llama_index/{codeact_agent_custom.py → workflow/codeact.py} +124 -8
  43. pygpt_net/provider/agents/llama_index/workflow/events.py +24 -0
  44. pygpt_net/provider/agents/llama_index/workflow/openai.py +634 -0
  45. pygpt_net/provider/agents/llama_index/workflow/planner.py +601 -0
  46. pygpt_net/provider/agents/openai/agent.py +1 -0
  47. pygpt_net/provider/agents/openai/agent_b2b.py +2 -0
  48. pygpt_net/provider/agents/openai/agent_planner.py +1 -0
  49. pygpt_net/provider/agents/openai/agent_with_experts.py +1 -0
  50. pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +1 -0
  51. pygpt_net/provider/agents/openai/agent_with_feedback.py +1 -0
  52. pygpt_net/provider/agents/openai/evolve.py +1 -0
  53. pygpt_net/provider/core/preset/patch.py +11 -17
  54. pygpt_net/ui/widget/lists/experts.py +3 -2
  55. {pygpt_net-2.6.0.post1.dist-info → pygpt_net-2.6.1.dist-info}/METADATA +14 -6
  56. {pygpt_net-2.6.0.post1.dist-info → pygpt_net-2.6.1.dist-info}/RECORD +59 -53
  57. pygpt_net/data/config/presets/agent_react_workflow.json +0 -34
  58. pygpt_net/provider/agents/llama_index/code_act.py +0 -58
  59. {pygpt_net-2.6.0.post1.dist-info → pygpt_net-2.6.1.dist-info}/LICENSE +0 -0
  60. {pygpt_net-2.6.0.post1.dist-info → pygpt_net-2.6.1.dist-info}/WHEEL +0 -0
  61. {pygpt_net-2.6.0.post1.dist-info → pygpt_net-2.6.1.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,115 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.08.14 03:00:00 #
10
+ # ================================================== #
11
+
12
+ from typing import Dict, Any, List
13
+
14
+ from pygpt_net.core.bridge import BridgeContext
15
+ from pygpt_net.core.types import (
16
+ AGENT_TYPE_LLAMA,
17
+ AGENT_MODE_WORKFLOW,
18
+ )
19
+ from llama_index.core.llms.llm import LLM
20
+ from llama_index.core.tools.types import BaseTool
21
+
22
+ from .workflow.planner import (
23
+ DEFAULT_INITIAL_PLAN_PROMPT,
24
+ DEFAULT_PLAN_REFINE_PROMPT,
25
+ DEFAULT_EXECUTE_PROMPT
26
+ )
27
+ from ..base import BaseAgent
28
+
29
+ class PlannerAgent(BaseAgent):
30
+ def __init__(self, *args, **kwargs):
31
+ super(PlannerAgent, self).__init__(*args, **kwargs)
32
+ self.id = "planner"
33
+ self.type = AGENT_TYPE_LLAMA
34
+ self.mode = AGENT_MODE_WORKFLOW
35
+ self.name = "Planner"
36
+
37
+ def get_agent(self, window, kwargs: Dict[str, Any]):
38
+ """
39
+ Get agent instance
40
+
41
+ :param window: Window instance
42
+ :param kwargs: Agent parameters
43
+ :return: PlannerWorkflow instance
44
+ """
45
+ from .workflow.planner import PlannerWorkflow
46
+
47
+ context = kwargs.get("context", BridgeContext())
48
+ preset = context.preset
49
+ tools: List[BaseTool] = kwargs.get("tools", []) or []
50
+ llm: LLM = kwargs.get("llm", None)
51
+ verbose: bool = kwargs.get("verbose", False)
52
+ max_steps: int = kwargs.get("max_steps", 12)
53
+
54
+ # get prompts from options or use defaults
55
+ prompt_step = self.get_option(preset, "step", "prompt")
56
+ prompt_plan_initial = self.get_option(preset, "plan", "prompt")
57
+ prompt_plan_refine = self.get_option(preset, "plan_refine", "prompt")
58
+ if not prompt_step:
59
+ prompt_step = DEFAULT_EXECUTE_PROMPT
60
+ if not prompt_plan_initial:
61
+ prompt_plan_initial = DEFAULT_INITIAL_PLAN_PROMPT
62
+ if not prompt_plan_refine:
63
+ prompt_plan_refine = DEFAULT_PLAN_REFINE_PROMPT
64
+
65
+ return PlannerWorkflow(
66
+ tools=tools,
67
+ llm=llm,
68
+ verbose=verbose,
69
+ max_steps=max_steps,
70
+ system_prompt=prompt_step,
71
+ initial_plan_prompt= prompt_plan_initial,
72
+ plan_refine_prompt= prompt_plan_refine,
73
+ )
74
+
75
+ def get_options(self) -> Dict[str, Any]:
76
+ """
77
+ Return Agent options
78
+
79
+ :return: dict of options
80
+ """
81
+ return {
82
+ "step": {
83
+ "label": "Execute prompt",
84
+ "options": {
85
+ "prompt": {
86
+ "type": "textarea",
87
+ "label": "Prompt",
88
+ "description": "Steps execute prompt",
89
+ "default": DEFAULT_EXECUTE_PROMPT,
90
+ },
91
+ }
92
+ },
93
+ "plan": {
94
+ "label": "Planner (initial))",
95
+ "options": {
96
+ "prompt": {
97
+ "type": "textarea",
98
+ "label": "Prompt",
99
+ "description": "Initial plan prompt",
100
+ "default": DEFAULT_INITIAL_PLAN_PROMPT,
101
+ },
102
+ }
103
+ },
104
+ "plan_refine": {
105
+ "label": "Planner (refine)",
106
+ "options": {
107
+ "prompt": {
108
+ "type": "textarea",
109
+ "label": "Prompt",
110
+ "description": "Plan refine prompt",
111
+ "default": DEFAULT_PLAN_REFINE_PROMPT,
112
+ },
113
+ }
114
+ },
115
+ }
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.12 19:00:00 #
9
+ # Updated Date: 2025.08.14 03:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Dict, Any
@@ -23,10 +23,10 @@ from ..base import BaseAgent
23
23
  class ReactWorkflowAgent(BaseAgent):
24
24
  def __init__(self, *args, **kwargs):
25
25
  super(ReactWorkflowAgent, self).__init__(*args, **kwargs)
26
- self.id = "react_workflow"
26
+ self.id = "react"
27
27
  self.type = AGENT_TYPE_LLAMA
28
28
  self.mode = AGENT_MODE_WORKFLOW
29
- self.name = "ReAct (Workflow)"
29
+ self.name = "ReAct"
30
30
 
31
31
  def get_agent(self, window, kwargs: Dict[str, Any]):
32
32
  """
@@ -36,7 +36,6 @@ class ReactWorkflowAgent(BaseAgent):
36
36
  :param kwargs: keyword arguments
37
37
  :return: Agent provider instance
38
38
  """
39
-
40
39
  from llama_index.core.agent.workflow import ReActAgent as Agent
41
40
 
42
41
  tools = kwargs.get("tools", [])
@@ -44,6 +43,7 @@ class ReactWorkflowAgent(BaseAgent):
44
43
  llm = kwargs.get("llm", None)
45
44
  chat_history = kwargs.get("chat_history", [])
46
45
  max_iterations = kwargs.get("max_iterations", 10)
46
+ system_prompt = kwargs.get("system_prompt", None)
47
47
 
48
48
  """
49
49
  # TODO: multimodal support
@@ -62,10 +62,12 @@ class ReactWorkflowAgent(BaseAgent):
62
62
  )
63
63
  return step_engine.as_agent()
64
64
  """
65
+ # system prompt for ReAct agent is added to messages
65
66
  return Agent(
66
67
  tools=tools,
67
68
  llm=llm,
68
69
  chat_history=chat_history,
69
70
  max_iterations=max_iterations,
71
+ system_prompt=system_prompt,
70
72
  verbose=verbose,
71
73
  )
@@ -1,3 +1,15 @@
1
+ # -*- coding: utf-8 -*-
2
+ # ================================================== #
3
+ # This file is a part of PYGPT package #
4
+ # Website: https://pygpt.net #
5
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
6
+ # MIT License #
7
+ # Created By : Marcin Szczygliński #
8
+ # Updated Date: 2025.08.14 01:00:00 #
9
+ # ================================================== #
10
+
11
+ # >>> Based on LlamaIndex CodeActAgent implementation, with custom plugin tool support <<<
12
+
1
13
  import asyncio
2
14
  import inspect
3
15
  import json
@@ -23,6 +35,9 @@ from llama_index.core.objects import ObjectRetriever
23
35
  from llama_index.core.prompts import BasePromptTemplate, PromptTemplate
24
36
  from llama_index.core.tools import BaseTool, FunctionTool
25
37
  from llama_index.core.workflow import Context
38
+ from workflows.errors import WorkflowCancelledByUser
39
+
40
+ from pygpt_net.provider.agents.llama_index.workflow.events import StepEvent
26
41
 
27
42
  DEFAULT_CODE_ACT_PROMPT = """You are a helpful AI assistant that can write and execute Python code to solve problems.
28
43
  In addition to executing code using the <execute> tags, you have access to a unified plugin tool via the function tool(cmd, **params).
@@ -107,6 +122,7 @@ class CodeActAgent(BaseWorkflowAgent):
107
122
  _plugin_tools: Dict[str, Callable] = PrivateAttr(default_factory=dict)
108
123
  _plugin_specs: Optional[List] = PrivateAttr(default_factory=list)
109
124
  _plugin_tool_fn: Union[Callable, Awaitable] = PrivateAttr(default=None)
125
+ _on_stop: Optional[Callable] = PrivateAttr(default=None)
110
126
 
111
127
  def __init__(
112
128
  self,
@@ -122,6 +138,7 @@ class CodeActAgent(BaseWorkflowAgent):
122
138
  can_handoff_to: Optional[List[str]] = None,
123
139
  llm: Optional[LLM] = None,
124
140
  code_act_system_prompt: Union[str, BasePromptTemplate] = DEFAULT_CODE_ACT_PROMPT,
141
+ on_stop: Optional[Callable] = None,
125
142
  ):
126
143
  tools = tools or []
127
144
  tools.append(FunctionTool.from_defaults(plugin_tool_fn, name=PLUGIN_TOOL_NAME))
@@ -130,6 +147,7 @@ class CodeActAgent(BaseWorkflowAgent):
130
147
  object.__setattr__(self, "_plugin_tools", plugin_tools or {})
131
148
  object.__setattr__(self, "_plugin_tool_fn", plugin_tool_fn)
132
149
  object.__setattr__(self, "_plugin_specs", plugin_specs or [])
150
+ object.__setattr__(self, "_on_stop", on_stop)
133
151
 
134
152
  if self._plugin_tools and self._plugin_specs:
135
153
  available_commands = "\n".join(self._plugin_specs)
@@ -175,9 +193,26 @@ class CodeActAgent(BaseWorkflowAgent):
175
193
  code_execute_fn=code_execute_fn,
176
194
  )
177
195
 
196
+ def _stopped(self) -> bool:
197
+ """
198
+ Check if the workflow has been stopped.
199
+
200
+ :return: True if the workflow is stopped, False otherwise.
201
+ """
202
+ if self._on_stop:
203
+ try:
204
+ return self._on_stop()
205
+ except Exception:
206
+ return False
207
+ return False
208
+
178
209
  def _get_tool_fns(self, tools: Sequence[BaseTool]) -> List[Callable]:
179
210
  """
180
211
  Get the tool functions while validating that they are valid for CodeActAgent.
212
+
213
+ :param tools: A sequence of BaseTool instances.
214
+ :return: A list of callable functions from the tools.
215
+ :raises ValueError: If a tool requires context or is not a FunctionTool.
181
216
  """
182
217
  callables = []
183
218
  for tool in tools:
@@ -200,6 +235,14 @@ class CodeActAgent(BaseWorkflowAgent):
200
235
  def _extract_code_from_response(self, response_text: str) -> Optional[str]:
201
236
  """
202
237
  Extract code from the LLM response using XML-style <execute> tags.
238
+
239
+ Expected format:
240
+ <execute>
241
+ print('Hello, World!')
242
+ </execute>
243
+
244
+ :param response_text: The text response from the LLM.
245
+ :return: The extracted code as a string, or None if no code is found.
203
246
  """
204
247
  execute_pattern = r"<execute>(.*?)</execute>"
205
248
  execute_matches = re.findall(execute_pattern, response_text, re.DOTALL)
@@ -210,10 +253,14 @@ class CodeActAgent(BaseWorkflowAgent):
210
253
  def _extract_plugin_tool_calls(self, response_text: str) -> List[Dict]:
211
254
  """
212
255
  Extract plugin tool calls from the LLM response.
256
+
213
257
  Expected format (JSON inside XML-style <tool> tags):
214
258
  <tool>
215
259
  { "cmd": "tool_name", "params": {"arg": "value"} }
216
260
  </tool>
261
+
262
+ :param response_text: The text response from the LLM.
263
+ :return: A list of dictionaries representing the plugin tool calls.
217
264
  """
218
265
  pattern = r"<tool>(.*?)</tool>"
219
266
  matches = re.findall(pattern, response_text, re.DOTALL)
@@ -227,9 +274,48 @@ class CodeActAgent(BaseWorkflowAgent):
227
274
  continue
228
275
  return plugin_calls
229
276
 
277
+ def _emit_step_event(
278
+ self,
279
+ ctx: Context,
280
+ name: str,
281
+ index: Optional[int] = None,
282
+ total: Optional[int] = None,
283
+ meta: Optional[dict] = None,
284
+ ) -> None:
285
+ """
286
+ Emits a step event to the context stream.
287
+
288
+ :param ctx: The context to write the event to.
289
+ :param name: The name of the step (e.g., "make_plan", "execute_plan", "subtask").
290
+ :param index: The index of the step (optional).
291
+ :param total: The total number of steps (optional).
292
+ :param meta: Additional metadata for the step (optional).
293
+ """
294
+ try:
295
+ ctx.write_event_to_stream(
296
+ StepEvent(name=name, index=index, total=total, meta=meta or {})
297
+ )
298
+ except Exception:
299
+ # Fallback for older versions of AgentStream
300
+ try:
301
+ ctx.write_event_to_stream(
302
+ AgentStream(
303
+ delta="",
304
+ response="",
305
+ current_agent_name="PlannerWorkflow",
306
+ tool_calls=[],
307
+ raw={"StepEvent": {"name": name, "index": index, "total": total, "meta": meta or {}}}
308
+ )
309
+ )
310
+ except Exception:
311
+ pass
312
+
230
313
  def _get_tool_descriptions(self, tools: Sequence[BaseTool]) -> str:
231
314
  """
232
315
  Generate tool descriptions for the system prompt using tool metadata.
316
+
317
+ :param tools: A sequence of BaseTool instances.
318
+ :return: A string containing the formatted tool descriptions.
233
319
  """
234
320
  tool_descriptions = []
235
321
  tool_fns = self._get_tool_fns(tools)
@@ -253,11 +339,20 @@ class CodeActAgent(BaseWorkflowAgent):
253
339
  ) -> AgentOutput:
254
340
  """
255
341
  Takes a step in the agent's workflow, executing code and calling tools as needed.
342
+
343
+ :param ctx: The context for the agent's execution.
344
+ :param llm_input: The input messages for the LLM.
345
+ :param tools: The tools available for the agent to use.
346
+ :param memory: The memory object to store and retrieve messages.
347
+ :return: An AgentOutput object containing the response and any tool calls made.
348
+ :raises ValueError: If code_execute_fn is not provided or if an unknown tool name is encountered.
256
349
  """
257
350
  if not self.code_execute_fn:
258
351
  raise ValueError("code_execute_fn must be provided for CodeActAgent")
259
352
 
260
- scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
353
+ # self._emit_step_event(ctx, name="step", meta={"query": str(llm_input)})
354
+
355
+ scratchpad: List[ChatMessage] = await ctx.store.get(self.scratchpad_key, default=[])
261
356
  current_llm_input = [*llm_input, *scratchpad]
262
357
  tool_descriptions = self._get_tool_descriptions(tools)
263
358
  system_prompt = self.code_act_system_prompt.format(tool_descriptions=tool_descriptions)
@@ -287,6 +382,11 @@ class CodeActAgent(BaseWorkflowAgent):
287
382
  full_response_text = ""
288
383
 
289
384
  async for last_chat_response in response:
385
+
386
+ # stop callback
387
+ if self._stopped():
388
+ raise WorkflowCancelledByUser("Workflow was stopped by user.")
389
+
290
390
  delta = last_chat_response.delta or ""
291
391
  full_response_text += delta
292
392
  raw = (
@@ -331,7 +431,7 @@ class CodeActAgent(BaseWorkflowAgent):
331
431
 
332
432
  message = ChatMessage(role="assistant", content=full_response_text)
333
433
  scratchpad.append(message)
334
- await ctx.set(self.scratchpad_key, scratchpad)
434
+ await ctx.store.set(self.scratchpad_key, scratchpad)
335
435
 
336
436
  raw = (
337
437
  last_chat_response.raw.model_dump()
@@ -347,12 +447,20 @@ class CodeActAgent(BaseWorkflowAgent):
347
447
  )
348
448
 
349
449
  async def handle_tool_call_results(
350
- self, ctx: Context, results: List[ToolCallResult], memory: BaseMemory
450
+ self,
451
+ ctx: Context,
452
+ results: List[ToolCallResult],
453
+ memory: BaseMemory
351
454
  ) -> None:
352
455
  """
353
456
  Handles the results of tool calls made by the agent.
457
+
458
+ :param ctx: The context for the agent's execution.
459
+ :param results: The results of the tool calls made.
460
+ :param memory: The memory object to store and retrieve messages.
461
+ :raises ValueError: If an unknown tool name is encountered.
354
462
  """
355
- scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
463
+ scratchpad: List[ChatMessage] = await ctx.store.get(self.scratchpad_key, default=[])
356
464
 
357
465
  for tool_call_result in results:
358
466
  if tool_call_result.tool_name == EXECUTE_TOOL_NAME:
@@ -376,15 +484,23 @@ class CodeActAgent(BaseWorkflowAgent):
376
484
  else:
377
485
  raise ValueError(f"Unknown tool name: {tool_call_result.tool_name}")
378
486
 
379
- await ctx.set(self.scratchpad_key, scratchpad)
487
+ await ctx.store.set(self.scratchpad_key, scratchpad)
380
488
 
381
489
  async def finalize(
382
- self, ctx: Context, output: AgentOutput, memory: BaseMemory
490
+ self,
491
+ ctx: Context,
492
+ output: AgentOutput,
493
+ memory: BaseMemory
383
494
  ) -> AgentOutput:
384
495
  """
385
496
  Finalizes the agent's workflow, clearing the scratchpad and returning the output.
497
+
498
+ :param ctx: The context for the agent's execution.
499
+ :param output: The output from the agent's workflow.
500
+ :param memory: The memory object to store and retrieve messages.
501
+ :return: The final output of the agent's workflow.
386
502
  """
387
- scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
503
+ scratchpad: List[ChatMessage] = await ctx.store.get(self.scratchpad_key, default=[])
388
504
  await memory.aput_messages(scratchpad)
389
- await ctx.set(self.scratchpad_key, [])
505
+ await ctx.store.set(self.scratchpad_key, [])
390
506
  return output
@@ -0,0 +1,24 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.08.14 01:00:00 #
10
+ # ================================================== #
11
+
12
+ from typing import Optional, Dict, Any
13
+ from pydantic import Field
14
+
15
+ from llama_index.core.workflow import (
16
+ Event,
17
+ )
18
+
19
+ class StepEvent(Event):
20
+ """Represents an event that occurs during a step in the workflow."""
21
+ name: str # eg. "make_plan", "execute_plan", "subtask"
22
+ index: Optional[int] = None
23
+ total: Optional[int] = None
24
+ meta: Dict[str, Any] = Field(default_factory=dict)