pygpt-net 2.6.0.post2__py3-none-any.whl → 2.6.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +4 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/app.py +13 -9
- pygpt_net/controller/chat/response.py +5 -1
- pygpt_net/controller/model/editor.py +45 -4
- pygpt_net/controller/presets/editor.py +71 -16
- pygpt_net/controller/presets/presets.py +4 -3
- pygpt_net/core/agents/provider.py +2 -1
- pygpt_net/core/agents/runner.py +114 -8
- pygpt_net/core/agents/runners/helpers.py +3 -2
- pygpt_net/core/agents/runners/llama_workflow.py +176 -22
- pygpt_net/core/agents/runners/loop.py +22 -13
- pygpt_net/core/experts/experts.py +17 -23
- pygpt_net/core/idx/chat.py +24 -34
- pygpt_net/core/idx/response.py +5 -2
- pygpt_net/core/locale/locale.py +73 -45
- pygpt_net/core/render/web/body.py +152 -207
- pygpt_net/core/render/web/renderer.py +4 -2
- pygpt_net/data/config/config.json +2 -2
- pygpt_net/data/config/models.json +2 -2
- pygpt_net/data/locale/locale.de.ini +10 -8
- pygpt_net/data/locale/locale.en.ini +10 -8
- pygpt_net/data/locale/locale.es.ini +10 -8
- pygpt_net/data/locale/locale.fr.ini +10 -8
- pygpt_net/data/locale/locale.it.ini +10 -8
- pygpt_net/data/locale/locale.pl.ini +10 -8
- pygpt_net/data/locale/locale.uk.ini +10 -8
- pygpt_net/data/locale/locale.zh.ini +10 -8
- pygpt_net/item/ctx.py +2 -1
- pygpt_net/plugin/cmd_files/worker.py +19 -16
- pygpt_net/provider/agents/base.py +4 -1
- pygpt_net/provider/agents/llama_index/codeact_workflow.py +95 -0
- pygpt_net/provider/agents/llama_index/legacy/__init__.py +0 -0
- pygpt_net/provider/agents/llama_index/{openai.py → legacy/openai.py} +2 -2
- pygpt_net/provider/agents/llama_index/{openai_assistant.py → legacy/openai_assistant.py} +2 -2
- pygpt_net/provider/agents/llama_index/{planner.py → legacy/planner.py} +3 -3
- pygpt_net/provider/agents/llama_index/{react.py → legacy/react.py} +3 -3
- pygpt_net/provider/agents/llama_index/openai_workflow.py +52 -0
- pygpt_net/provider/agents/llama_index/planner_workflow.py +115 -0
- pygpt_net/provider/agents/llama_index/react_workflow.py +6 -4
- pygpt_net/provider/agents/llama_index/workflow/__init__.py +0 -0
- pygpt_net/provider/agents/llama_index/{codeact_agent_custom.py → workflow/codeact.py} +124 -8
- pygpt_net/provider/agents/llama_index/workflow/events.py +24 -0
- pygpt_net/provider/agents/llama_index/workflow/openai.py +634 -0
- pygpt_net/provider/agents/llama_index/workflow/planner.py +601 -0
- pygpt_net/provider/agents/openai/agent.py +1 -0
- pygpt_net/provider/agents/openai/agent_b2b.py +2 -0
- pygpt_net/provider/agents/openai/agent_planner.py +1 -0
- pygpt_net/provider/agents/openai/agent_with_experts.py +1 -0
- pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +1 -0
- pygpt_net/provider/agents/openai/agent_with_feedback.py +1 -0
- pygpt_net/provider/agents/openai/evolve.py +1 -0
- pygpt_net/provider/core/preset/patch.py +11 -17
- pygpt_net/ui/widget/lists/experts.py +3 -2
- {pygpt_net-2.6.0.post2.dist-info → pygpt_net-2.6.1.dist-info}/METADATA +12 -4
- {pygpt_net-2.6.0.post2.dist-info → pygpt_net-2.6.1.dist-info}/RECORD +59 -53
- pygpt_net/data/config/presets/agent_react_workflow.json +0 -34
- pygpt_net/provider/agents/llama_index/code_act.py +0 -58
- {pygpt_net-2.6.0.post2.dist-info → pygpt_net-2.6.1.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.0.post2.dist-info → pygpt_net-2.6.1.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.0.post2.dist-info → pygpt_net-2.6.1.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,601 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
# ================================================== #
|
|
4
|
+
# This file is a part of PYGPT package #
|
|
5
|
+
# Website: https://pygpt.net #
|
|
6
|
+
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
|
+
# MIT License #
|
|
8
|
+
# Created By : Marcin Szczygliński #
|
|
9
|
+
# Updated Date: 2025.08.14 03:00:00 #
|
|
10
|
+
# ================================================== #
|
|
11
|
+
|
|
12
|
+
from typing import List, Optional, Callable
|
|
13
|
+
import inspect
|
|
14
|
+
from pydantic import BaseModel, Field, ValidationError
|
|
15
|
+
|
|
16
|
+
from llama_index.core.workflow import (
|
|
17
|
+
Workflow,
|
|
18
|
+
Context,
|
|
19
|
+
StartEvent,
|
|
20
|
+
StopEvent,
|
|
21
|
+
Event,
|
|
22
|
+
step,
|
|
23
|
+
)
|
|
24
|
+
from llama_index.core.llms.llm import LLM
|
|
25
|
+
from llama_index.core.prompts import PromptTemplate
|
|
26
|
+
from llama_index.core.tools.types import BaseTool
|
|
27
|
+
|
|
28
|
+
from llama_index.core.agent.workflow import (
|
|
29
|
+
FunctionAgent,
|
|
30
|
+
ToolCall,
|
|
31
|
+
ToolCallResult,
|
|
32
|
+
AgentStream,
|
|
33
|
+
AgentOutput,
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
from .events import StepEvent
|
|
37
|
+
|
|
38
|
+
try:
|
|
39
|
+
from llama_index.core.memory import ChatMemoryBuffer
|
|
40
|
+
except Exception:
|
|
41
|
+
try:
|
|
42
|
+
from llama_index.memory import ChatMemoryBuffer # old import
|
|
43
|
+
except Exception:
|
|
44
|
+
ChatMemoryBuffer = None
|
|
45
|
+
|
|
46
|
+
class SubTask(BaseModel):
|
|
47
|
+
name: str = Field(..., description="The name of the sub-task.")
|
|
48
|
+
input: str = Field(..., description="The input prompt for the sub-task.")
|
|
49
|
+
expected_output: str = Field(..., description="The expected output of the sub-task.")
|
|
50
|
+
dependencies: List[str] = Field(
|
|
51
|
+
..., description="Names of sub-tasks that must be completed before this sub-task."
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class Plan(BaseModel):
|
|
56
|
+
sub_tasks: List[SubTask] = Field(..., description="The sub-tasks in the plan.")
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
DEFAULT_INITIAL_PLAN_PROMPT = """\
|
|
60
|
+
You have the following prior context/memory (may be empty):
|
|
61
|
+
{memory_context}
|
|
62
|
+
|
|
63
|
+
Think step-by-step. Given a task and a set of tools, create a comprehensive, end-to-end plan to accomplish the task.
|
|
64
|
+
Keep in mind not every task needs to be decomposed into multiple sub-tasks if it is simple enough.
|
|
65
|
+
The plan should end with a sub-task that can achieve the overall task.
|
|
66
|
+
|
|
67
|
+
The tools available are:
|
|
68
|
+
{tools_str}
|
|
69
|
+
|
|
70
|
+
Overall Task: {task}
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
DEFAULT_PLAN_REFINE_PROMPT = """\
|
|
74
|
+
You have the following prior context/memory (may be empty):
|
|
75
|
+
{memory_context}
|
|
76
|
+
|
|
77
|
+
Think step-by-step. Given an overall task, a set of tools, and completed sub-tasks, update (if needed) the remaining sub-tasks so that the overall task can still be completed.
|
|
78
|
+
The plan should end with a sub-task that can achieve and satisfy the overall task.
|
|
79
|
+
If you do update the plan, only create new sub-tasks that will replace the remaining sub-tasks, do NOT repeat tasks that are already completed.
|
|
80
|
+
If the remaining sub-tasks are enough to achieve the overall task, it is ok to skip this step, and instead explain why the plan is complete.
|
|
81
|
+
|
|
82
|
+
The tools available are:
|
|
83
|
+
{tools_str}
|
|
84
|
+
|
|
85
|
+
Completed Sub-Tasks + Outputs:
|
|
86
|
+
{completed_outputs}
|
|
87
|
+
|
|
88
|
+
Remaining Sub-Tasks:
|
|
89
|
+
{remaining_sub_tasks}
|
|
90
|
+
|
|
91
|
+
Overall Task: {task}
|
|
92
|
+
"""
|
|
93
|
+
|
|
94
|
+
DEFAULT_EXECUTE_PROMPT = """\
|
|
95
|
+
You execute the given sub-task using the tools. Return concise outputs.
|
|
96
|
+
"""
|
|
97
|
+
|
|
98
|
+
class QueryEvent(StartEvent):
|
|
99
|
+
query: str
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class PlanReady(Event):
|
|
103
|
+
plan: Plan
|
|
104
|
+
query: str
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
class FinalEvent(StopEvent):
|
|
108
|
+
pass
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
class PlannerWorkflow(Workflow):
|
|
112
|
+
def __init__(
|
|
113
|
+
self,
|
|
114
|
+
tools: List[BaseTool],
|
|
115
|
+
llm: LLM,
|
|
116
|
+
system_prompt: Optional[str] = None,
|
|
117
|
+
initial_plan_prompt: str = DEFAULT_INITIAL_PLAN_PROMPT,
|
|
118
|
+
plan_refine_prompt: str = DEFAULT_PLAN_REFINE_PROMPT,
|
|
119
|
+
verbose: bool = False,
|
|
120
|
+
max_steps: int = 12,
|
|
121
|
+
memory_char_limit: int = 8000,
|
|
122
|
+
clear_executor_memory_between_subtasks: bool = False,
|
|
123
|
+
executor_memory_factory: Optional[Callable[[], object]] = None,
|
|
124
|
+
on_stop: Optional[Callable] = None,
|
|
125
|
+
):
|
|
126
|
+
super().__init__(timeout=None, verbose=verbose)
|
|
127
|
+
self._planner_llm = llm
|
|
128
|
+
self._initial_plan_prompt = PromptTemplate(initial_plan_prompt)
|
|
129
|
+
self._plan_refine_prompt = PromptTemplate(plan_refine_prompt)
|
|
130
|
+
self._tools = tools
|
|
131
|
+
self._max_steps = max_steps
|
|
132
|
+
self._memory = None
|
|
133
|
+
self.verbose = verbose
|
|
134
|
+
self._memory_char_limit = memory_char_limit
|
|
135
|
+
self._on_stop = on_stop
|
|
136
|
+
|
|
137
|
+
self._executor = FunctionAgent(
|
|
138
|
+
name="PlannerExecutor",
|
|
139
|
+
description="Executes planner sub-tasks using available tools.",
|
|
140
|
+
tools=tools,
|
|
141
|
+
llm=llm,
|
|
142
|
+
system_prompt=system_prompt or DEFAULT_EXECUTE_PROMPT,
|
|
143
|
+
max_steps=max_steps,
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
if executor_memory_factory is not None:
|
|
147
|
+
self._executor_mem_factory = executor_memory_factory
|
|
148
|
+
else:
|
|
149
|
+
def _default_factory():
|
|
150
|
+
if ChatMemoryBuffer is not None:
|
|
151
|
+
return ChatMemoryBuffer.from_defaults()
|
|
152
|
+
return None
|
|
153
|
+
self._executor_mem_factory = _default_factory
|
|
154
|
+
|
|
155
|
+
self._clear_exec_mem_between_subtasks = clear_executor_memory_between_subtasks
|
|
156
|
+
|
|
157
|
+
def _stopped(self) -> bool:
|
|
158
|
+
"""
|
|
159
|
+
Check if the workflow has been stopped.
|
|
160
|
+
|
|
161
|
+
:return: True if the workflow is stopped, False otherwise.
|
|
162
|
+
"""
|
|
163
|
+
if self._on_stop:
|
|
164
|
+
try:
|
|
165
|
+
return self._on_stop()
|
|
166
|
+
except Exception:
|
|
167
|
+
return False
|
|
168
|
+
return False
|
|
169
|
+
|
|
170
|
+
def _emit_step_event(
|
|
171
|
+
self,
|
|
172
|
+
ctx: Context,
|
|
173
|
+
name: str,
|
|
174
|
+
index: Optional[int] = None,
|
|
175
|
+
total: Optional[int] = None,
|
|
176
|
+
meta: Optional[dict] = None,
|
|
177
|
+
) -> None:
|
|
178
|
+
"""
|
|
179
|
+
Emits a step event to the context stream.
|
|
180
|
+
|
|
181
|
+
:param ctx: The context to write the event to.
|
|
182
|
+
:param name: The name of the step (e.g., "make_plan", "execute_plan", "subtask").
|
|
183
|
+
:param index: The index of the step (optional).
|
|
184
|
+
:param total: The total number of steps (optional).
|
|
185
|
+
:param meta: Additional metadata for the step (optional).
|
|
186
|
+
"""
|
|
187
|
+
try:
|
|
188
|
+
ctx.write_event_to_stream(
|
|
189
|
+
StepEvent(name=name, index=index, total=total, meta=meta or {})
|
|
190
|
+
)
|
|
191
|
+
except Exception:
|
|
192
|
+
# fallback for older versions of AgentStream
|
|
193
|
+
try:
|
|
194
|
+
ctx.write_event_to_stream(
|
|
195
|
+
AgentStream(
|
|
196
|
+
delta="",
|
|
197
|
+
response="",
|
|
198
|
+
current_agent_name="PlannerWorkflow",
|
|
199
|
+
tool_calls=[],
|
|
200
|
+
raw={"StepEvent": {"name": name, "index": index, "total": total, "meta": meta or {}}}
|
|
201
|
+
)
|
|
202
|
+
)
|
|
203
|
+
except Exception:
|
|
204
|
+
pass
|
|
205
|
+
|
|
206
|
+
def _reset_executor_memory(self):
|
|
207
|
+
"""Reset the memory of the executor agent to a new instance or clear it."""
|
|
208
|
+
try:
|
|
209
|
+
new_mem = self._executor_mem_factory()
|
|
210
|
+
if hasattr(self._executor, "memory"):
|
|
211
|
+
self._executor.memory = new_mem
|
|
212
|
+
except Exception:
|
|
213
|
+
mem = getattr(self._executor, "memory", None)
|
|
214
|
+
for attr in ("reset", "clear", "flush"):
|
|
215
|
+
fn = getattr(mem, attr, None)
|
|
216
|
+
if callable(fn):
|
|
217
|
+
try:
|
|
218
|
+
fn()
|
|
219
|
+
break
|
|
220
|
+
except Exception:
|
|
221
|
+
pass
|
|
222
|
+
|
|
223
|
+
def run(
|
|
224
|
+
self,
|
|
225
|
+
query: str,
|
|
226
|
+
ctx: Optional[Context] = None,
|
|
227
|
+
memory=None,
|
|
228
|
+
verbose: bool = False,
|
|
229
|
+
**kwargs
|
|
230
|
+
):
|
|
231
|
+
"""
|
|
232
|
+
Run the planner workflow with the given query and context.
|
|
233
|
+
|
|
234
|
+
:param query: The input query string to process.
|
|
235
|
+
:param ctx: The context in which the workflow is executed (optional).
|
|
236
|
+
:param memory: custom memory buffer to use for the agent (optional).
|
|
237
|
+
:param verbose: Whether to enable verbose output (default: False).
|
|
238
|
+
:param kwargs: Additional keyword arguments (not used).
|
|
239
|
+
:return: The result of the workflow execution.
|
|
240
|
+
"""
|
|
241
|
+
if verbose:
|
|
242
|
+
self.verbose = True
|
|
243
|
+
|
|
244
|
+
self._memory = memory
|
|
245
|
+
self._reset_executor_memory()
|
|
246
|
+
|
|
247
|
+
return super().run(ctx=ctx, query=query)
|
|
248
|
+
|
|
249
|
+
def _memory_to_text(self, memory) -> str:
|
|
250
|
+
"""
|
|
251
|
+
Convert the memory object to a text representation.
|
|
252
|
+
|
|
253
|
+
:param memory: The memory object to convert, which can be a string, list, or other types.
|
|
254
|
+
:return: A string representation of the memory content, truncated if it exceeds the character limit.
|
|
255
|
+
"""
|
|
256
|
+
if not memory:
|
|
257
|
+
return ""
|
|
258
|
+
try:
|
|
259
|
+
if isinstance(memory, str):
|
|
260
|
+
text = memory
|
|
261
|
+
elif isinstance(memory, list):
|
|
262
|
+
parts = []
|
|
263
|
+
for m in memory:
|
|
264
|
+
if isinstance(m, str):
|
|
265
|
+
parts.append(m)
|
|
266
|
+
elif isinstance(m, dict) and ("content" in m or "text" in m):
|
|
267
|
+
role = m.get("role", "user")
|
|
268
|
+
content = m.get("content", m.get("text", ""))
|
|
269
|
+
parts.append(f"{role}: {content}")
|
|
270
|
+
else:
|
|
271
|
+
# ChatMessage-like object
|
|
272
|
+
role = getattr(m, "role", None) or getattr(m, "sender", "user")
|
|
273
|
+
content = getattr(m, "content", None) or getattr(m, "text", "")
|
|
274
|
+
parts.append(f"{role}: {content}")
|
|
275
|
+
text = "\n".join(parts)
|
|
276
|
+
else:
|
|
277
|
+
for attr in ("to_string", "to_str"):
|
|
278
|
+
fn = getattr(memory, attr, None)
|
|
279
|
+
if callable(fn):
|
|
280
|
+
text = fn()
|
|
281
|
+
break
|
|
282
|
+
else:
|
|
283
|
+
for attr in ("get", "messages", "get_all", "dump"):
|
|
284
|
+
fn = getattr(memory, attr, None)
|
|
285
|
+
if callable(fn):
|
|
286
|
+
data = fn()
|
|
287
|
+
text = self._memory_to_text(data)
|
|
288
|
+
break
|
|
289
|
+
else:
|
|
290
|
+
text = str(memory)
|
|
291
|
+
except Exception:
|
|
292
|
+
text = str(memory)
|
|
293
|
+
|
|
294
|
+
if self._memory_char_limit and len(text) > self._memory_char_limit:
|
|
295
|
+
text = "...[truncated]...\n" + text[-self._memory_char_limit:]
|
|
296
|
+
return text
|
|
297
|
+
|
|
298
|
+
async def _emit_text(
|
|
299
|
+
self,
|
|
300
|
+
ctx: Context,
|
|
301
|
+
text: str,
|
|
302
|
+
agent_name: str = "PlannerWorkflow"
|
|
303
|
+
):
|
|
304
|
+
"""
|
|
305
|
+
Emit a text message to the context stream.
|
|
306
|
+
|
|
307
|
+
:param ctx: The context to write the event to
|
|
308
|
+
:param text: The text message to emit.
|
|
309
|
+
:param agent_name: The name of the agent emitting the text (default: "PlannerWorkflow").
|
|
310
|
+
"""
|
|
311
|
+
try:
|
|
312
|
+
ctx.write_event_to_stream(AgentStream(delta=text))
|
|
313
|
+
except ValidationError:
|
|
314
|
+
ctx.write_event_to_stream(
|
|
315
|
+
AgentStream(
|
|
316
|
+
delta=text,
|
|
317
|
+
response=text,
|
|
318
|
+
current_agent_name=agent_name,
|
|
319
|
+
tool_calls=[],
|
|
320
|
+
raw={},
|
|
321
|
+
)
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
def _to_text(self, resp) -> str:
|
|
325
|
+
"""
|
|
326
|
+
Convert the response object to a text representation.
|
|
327
|
+
|
|
328
|
+
:param resp: The response object to convert, which can be a string, list, or other types.
|
|
329
|
+
:return: A string representation of the response content.
|
|
330
|
+
"""
|
|
331
|
+
try:
|
|
332
|
+
if resp is None or str(resp) == "assistant: None":
|
|
333
|
+
return ""
|
|
334
|
+
msg = getattr(resp, "message", None)
|
|
335
|
+
if msg is not None:
|
|
336
|
+
return getattr(msg, "content", "") or ""
|
|
337
|
+
c = getattr(resp, "content", None)
|
|
338
|
+
if c is not None:
|
|
339
|
+
if isinstance(c, list):
|
|
340
|
+
parts = []
|
|
341
|
+
for s in c:
|
|
342
|
+
parts.append(getattr(s, "text", s if isinstance(s, str) else str(s)))
|
|
343
|
+
return "".join(parts)
|
|
344
|
+
return c if isinstance(c, str) else str(c)
|
|
345
|
+
return str(resp)
|
|
346
|
+
except Exception:
|
|
347
|
+
return str(resp)
|
|
348
|
+
|
|
349
|
+
def _truncate(self, text: str, limit: int) -> str:
|
|
350
|
+
"""
|
|
351
|
+
Truncate the text to a specified character limit, adding a prefix if truncated.
|
|
352
|
+
|
|
353
|
+
:param text: The text to truncate.
|
|
354
|
+
:param limit: The maximum number of characters to keep in the text.
|
|
355
|
+
:return: Truncated text
|
|
356
|
+
"""
|
|
357
|
+
if not text or not limit or limit <= 0:
|
|
358
|
+
return text or ""
|
|
359
|
+
if len(text) <= limit:
|
|
360
|
+
return text
|
|
361
|
+
return "...[truncated]...\n" + text[-limit:]
|
|
362
|
+
|
|
363
|
+
def _build_context_for_subtask(
|
|
364
|
+
self,
|
|
365
|
+
completed: list[tuple[str, str]],
|
|
366
|
+
dependencies: list[str],
|
|
367
|
+
char_limit: int,
|
|
368
|
+
) -> str:
|
|
369
|
+
"""
|
|
370
|
+
Build context for a sub-task based on completed tasks and dependencies.
|
|
371
|
+
|
|
372
|
+
:param completed: List of completed sub-tasks with their outputs.
|
|
373
|
+
:param dependencies: List of sub-task names that this sub-task depends on.
|
|
374
|
+
:param char_limit: Character limit for the context text.
|
|
375
|
+
:return: A formatted string containing the context for the sub-task.
|
|
376
|
+
"""
|
|
377
|
+
if not completed:
|
|
378
|
+
return ""
|
|
379
|
+
|
|
380
|
+
if dependencies:
|
|
381
|
+
selected = [(n, out) for (n, out) in completed if n in set(dependencies)]
|
|
382
|
+
if not selected:
|
|
383
|
+
return ""
|
|
384
|
+
else:
|
|
385
|
+
selected = completed
|
|
386
|
+
|
|
387
|
+
parts = []
|
|
388
|
+
for idx, (name, output) in enumerate(selected, 1):
|
|
389
|
+
clean = (output or "").strip()
|
|
390
|
+
if not clean:
|
|
391
|
+
continue
|
|
392
|
+
parts.append(f"[{idx}] {name} -> {clean}")
|
|
393
|
+
|
|
394
|
+
if not parts:
|
|
395
|
+
return ""
|
|
396
|
+
|
|
397
|
+
ctx_text = "Completed sub-tasks context:\n" + "\n".join(parts)
|
|
398
|
+
return self._truncate(ctx_text, char_limit or 8000)
|
|
399
|
+
|
|
400
|
+
async def _run_subtask(self, ctx: Context, prompt: str) -> str:
|
|
401
|
+
"""
|
|
402
|
+
Run a sub-task using the executor agent.
|
|
403
|
+
|
|
404
|
+
:param ctx: The context in which the sub-task is executed.
|
|
405
|
+
:param prompt: The prompt for the sub-task.
|
|
406
|
+
"""
|
|
407
|
+
if self._clear_exec_mem_between_subtasks:
|
|
408
|
+
self._reset_executor_memory()
|
|
409
|
+
|
|
410
|
+
sig = inspect.signature(self._executor.run)
|
|
411
|
+
kwargs = {}
|
|
412
|
+
if "user_msg" in sig.parameters:
|
|
413
|
+
kwargs["user_msg"] = prompt
|
|
414
|
+
elif "input" in sig.parameters:
|
|
415
|
+
kwargs["input"] = prompt
|
|
416
|
+
elif "query" in sig.parameters:
|
|
417
|
+
kwargs["query"] = prompt
|
|
418
|
+
elif "task" in sig.parameters:
|
|
419
|
+
kwargs["task"] = prompt
|
|
420
|
+
if "max_steps" in sig.parameters:
|
|
421
|
+
kwargs["max_steps"] = self._max_steps
|
|
422
|
+
|
|
423
|
+
handler = self._executor.run(**kwargs)
|
|
424
|
+
last_answer = ""
|
|
425
|
+
has_stream = False
|
|
426
|
+
stream_buf = []
|
|
427
|
+
|
|
428
|
+
async def _stream():
|
|
429
|
+
nonlocal last_answer, has_stream
|
|
430
|
+
|
|
431
|
+
async for e in handler.stream_events():
|
|
432
|
+
if isinstance(e, StopEvent):
|
|
433
|
+
continue
|
|
434
|
+
|
|
435
|
+
# stop callback
|
|
436
|
+
if self._stopped():
|
|
437
|
+
ctx.write_event_to_stream(StopEvent())
|
|
438
|
+
await handler.cancel_run()
|
|
439
|
+
return last_answer or ("".join(stream_buf).strip() if stream_buf else "")
|
|
440
|
+
|
|
441
|
+
if isinstance(e, AgentStream):
|
|
442
|
+
delta = getattr(e, "delta", None)
|
|
443
|
+
if delta:
|
|
444
|
+
has_stream = True
|
|
445
|
+
stream_buf.append(str(delta))
|
|
446
|
+
if not getattr(e, "current_agent_name", None):
|
|
447
|
+
try:
|
|
448
|
+
e.current_agent_name = self._executor.name
|
|
449
|
+
except Exception:
|
|
450
|
+
pass
|
|
451
|
+
ctx.write_event_to_stream(e)
|
|
452
|
+
continue
|
|
453
|
+
|
|
454
|
+
if isinstance(e, AgentOutput):
|
|
455
|
+
resp = getattr(e, "response", None)
|
|
456
|
+
content = self._to_text(resp).strip()
|
|
457
|
+
last_answer = content or ("".join(stream_buf).strip() if stream_buf else "")
|
|
458
|
+
if not has_stream and last_answer:
|
|
459
|
+
ctx.write_event_to_stream(
|
|
460
|
+
AgentStream(
|
|
461
|
+
delta=last_answer,
|
|
462
|
+
response=last_answer,
|
|
463
|
+
current_agent_name=f"{self._executor.name} (subtask)",
|
|
464
|
+
tool_calls=e.tool_calls,
|
|
465
|
+
raw=e.raw,
|
|
466
|
+
)
|
|
467
|
+
)
|
|
468
|
+
continue
|
|
469
|
+
|
|
470
|
+
if isinstance(e, (ToolCall, ToolCallResult)):
|
|
471
|
+
ctx.write_event_to_stream(e)
|
|
472
|
+
continue
|
|
473
|
+
|
|
474
|
+
if isinstance(e, Event):
|
|
475
|
+
ctx.write_event_to_stream(e)
|
|
476
|
+
|
|
477
|
+
try:
|
|
478
|
+
await handler
|
|
479
|
+
except Exception:
|
|
480
|
+
pass
|
|
481
|
+
|
|
482
|
+
return last_answer or ("".join(stream_buf).strip() if stream_buf else "")
|
|
483
|
+
|
|
484
|
+
try:
|
|
485
|
+
return await _stream()
|
|
486
|
+
except Exception as ex:
|
|
487
|
+
await self._emit_text(ctx, f"\n`Sub-task failed: {ex}`")
|
|
488
|
+
return last_answer or ("".join(stream_buf).strip() if stream_buf else "")
|
|
489
|
+
|
|
490
|
+
@step
|
|
491
|
+
async def make_plan(self, ctx: Context, ev: QueryEvent) -> PlanReady:
|
|
492
|
+
"""
|
|
493
|
+
Create a plan based on the provided query and available tools.
|
|
494
|
+
|
|
495
|
+
:param ctx: Context in which the plan is created.
|
|
496
|
+
:param ev: QueryEvent containing the query to process.
|
|
497
|
+
:return: PlanReady event containing the generated plan and query.
|
|
498
|
+
"""
|
|
499
|
+
tools_str = ""
|
|
500
|
+
for t in self._tools:
|
|
501
|
+
tools_str += f"{(t.metadata.name or '').strip()}: {(t.metadata.description or '').strip()}\n"
|
|
502
|
+
|
|
503
|
+
memory_context = self._memory_to_text(self._memory)
|
|
504
|
+
|
|
505
|
+
try:
|
|
506
|
+
plan = await self._planner_llm.astructured_predict(
|
|
507
|
+
Plan,
|
|
508
|
+
self._initial_plan_prompt,
|
|
509
|
+
tools_str=tools_str,
|
|
510
|
+
task=ev.query,
|
|
511
|
+
memory_context=memory_context,
|
|
512
|
+
)
|
|
513
|
+
except (ValueError, ValidationError):
|
|
514
|
+
plan = Plan(sub_tasks=[SubTask(name="default", input=ev.query, expected_output="", dependencies=[])])
|
|
515
|
+
|
|
516
|
+
lines = ["`Current plan:`"]
|
|
517
|
+
for i, st in enumerate(plan.sub_tasks, 1):
|
|
518
|
+
lines.append(
|
|
519
|
+
f"\n**===== Sub Task {i}: {st.name} =====**\n"
|
|
520
|
+
f"Expected output: {st.expected_output}\n"
|
|
521
|
+
f"Dependencies: {st.dependencies}\n"
|
|
522
|
+
)
|
|
523
|
+
await self._emit_text(ctx, "\n".join(lines))
|
|
524
|
+
return PlanReady(plan=plan, query=ev.query)
|
|
525
|
+
|
|
526
|
+
@step
|
|
527
|
+
async def execute_plan(self, ctx: Context, ev: PlanReady) -> FinalEvent:
|
|
528
|
+
"""
|
|
529
|
+
Execute the plan created in the previous step.
|
|
530
|
+
|
|
531
|
+
:param ctx: Context in which the plan is executed.
|
|
532
|
+
:param ev: PlanReady event containing the plan and query.
|
|
533
|
+
"""
|
|
534
|
+
plan_sub_tasks = list(ev.plan.sub_tasks)
|
|
535
|
+
total = len(plan_sub_tasks)
|
|
536
|
+
|
|
537
|
+
last_answer = ""
|
|
538
|
+
completed: list[tuple[str, str]] = [] # (name, output)
|
|
539
|
+
|
|
540
|
+
await self._emit_text(ctx, "\n\n`Executing plan...`")
|
|
541
|
+
|
|
542
|
+
for i, st in enumerate(plan_sub_tasks, 1):
|
|
543
|
+
self._emit_step_event(
|
|
544
|
+
ctx,
|
|
545
|
+
name="subtask",
|
|
546
|
+
index=i,
|
|
547
|
+
total=total,
|
|
548
|
+
meta={
|
|
549
|
+
"name": st.name,
|
|
550
|
+
"expected_output": st.expected_output,
|
|
551
|
+
"dependencies": st.dependencies,
|
|
552
|
+
"input": st.input,
|
|
553
|
+
},
|
|
554
|
+
)
|
|
555
|
+
|
|
556
|
+
header = (
|
|
557
|
+
f"\n\n**===== Sub Task {i}/{total}: {st.name} =====**\n"
|
|
558
|
+
f"Expected output: {st.expected_output}\n"
|
|
559
|
+
f"Dependencies: {st.dependencies}\n"
|
|
560
|
+
)
|
|
561
|
+
|
|
562
|
+
# stop callback
|
|
563
|
+
if self._stopped():
|
|
564
|
+
await self._emit_text(ctx, "\n`Plan execution stopped.`")
|
|
565
|
+
return FinalEvent(result=last_answer or "Plan execution stopped.")
|
|
566
|
+
|
|
567
|
+
await self._emit_text(ctx, header)
|
|
568
|
+
|
|
569
|
+
# build context for sub-task
|
|
570
|
+
ctx_text = self._build_context_for_subtask(
|
|
571
|
+
completed=completed,
|
|
572
|
+
dependencies=st.dependencies or [],
|
|
573
|
+
char_limit=self._memory_char_limit,
|
|
574
|
+
)
|
|
575
|
+
|
|
576
|
+
# make composed prompt for sub-task
|
|
577
|
+
if ctx_text:
|
|
578
|
+
composed_prompt = (
|
|
579
|
+
f"{ctx_text}\n\n"
|
|
580
|
+
f"Now execute the next sub-task: {st.name}\n"
|
|
581
|
+
f"Instructions:\n{st.input}\n"
|
|
582
|
+
f"Return only the final output."
|
|
583
|
+
)
|
|
584
|
+
else:
|
|
585
|
+
composed_prompt = st.input
|
|
586
|
+
|
|
587
|
+
# run the sub-task
|
|
588
|
+
sub_answer = await self._run_subtask(ctx, composed_prompt)
|
|
589
|
+
sub_answer = (sub_answer or "").strip()
|
|
590
|
+
|
|
591
|
+
await self._emit_text(ctx, f"\n\n`Finished Sub Task {i}/{total}: {st.name}`")
|
|
592
|
+
|
|
593
|
+
# save completed sub-task
|
|
594
|
+
completed.append((st.name, sub_answer))
|
|
595
|
+
if sub_answer:
|
|
596
|
+
last_answer = sub_answer
|
|
597
|
+
|
|
598
|
+
# TODO: refine plan if needed
|
|
599
|
+
|
|
600
|
+
await self._emit_text(ctx, "\n\n`Plan execution finished.`")
|
|
601
|
+
return FinalEvent(result=last_answer or "Plan finished.")
|
|
@@ -348,6 +348,7 @@ class Agent(BaseAgent):
|
|
|
348
348
|
handler.to_buffer(title)
|
|
349
349
|
async for event in result.stream_events():
|
|
350
350
|
if bridge.stopped():
|
|
351
|
+
result.cancel()
|
|
351
352
|
bridge.on_stop(ctx)
|
|
352
353
|
break
|
|
353
354
|
final_output, response_id = handler.handle(event, ctx)
|
|
@@ -391,6 +392,7 @@ class Agent(BaseAgent):
|
|
|
391
392
|
handler.to_buffer(title)
|
|
392
393
|
async for event in result.stream_events():
|
|
393
394
|
if bridge.stopped():
|
|
395
|
+
result.cancel()
|
|
394
396
|
bridge.on_stop(ctx)
|
|
395
397
|
break
|
|
396
398
|
final_output, response_id = handler.handle(event, ctx)
|