pygpt-net 2.6.62__py3-none-any.whl → 2.6.63__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +5 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/controller/presets/editor.py +65 -1
- pygpt_net/core/agents/custom/llama_index/runner.py +15 -52
- pygpt_net/core/agents/custom/runner.py +194 -76
- pygpt_net/core/agents/runners/llama_workflow.py +60 -10
- pygpt_net/data/config/config.json +3 -3
- pygpt_net/data/config/models.json +3 -3
- pygpt_net/data/config/presets/agent_openai_b2b.json +1 -15
- pygpt_net/data/config/presets/agent_openai_coder.json +1 -15
- pygpt_net/data/config/presets/agent_openai_evolve.json +1 -23
- pygpt_net/data/config/presets/agent_openai_planner.json +1 -21
- pygpt_net/data/config/presets/agent_openai_researcher.json +1 -21
- pygpt_net/data/config/presets/agent_openai_supervisor.json +1 -13
- pygpt_net/data/config/presets/agent_openai_writer.json +1 -15
- pygpt_net/data/config/presets/agent_supervisor.json +1 -11
- pygpt_net/data/js/app/runtime.js +4 -1
- pygpt_net/data/js/app.min.js +3 -2
- pygpt_net/data/locale/locale.en.ini +5 -0
- pygpt_net/js_rc.py +13 -10
- pygpt_net/provider/agents/base.py +0 -0
- pygpt_net/provider/agents/llama_index/flow_from_schema.py +0 -0
- pygpt_net/provider/agents/llama_index/workflow/codeact.py +0 -0
- pygpt_net/provider/agents/llama_index/workflow/planner.py +229 -29
- pygpt_net/provider/agents/llama_index/workflow/supervisor.py +0 -0
- pygpt_net/provider/agents/openai/agent.py +0 -0
- pygpt_net/provider/agents/openai/agent_b2b.py +4 -4
- pygpt_net/provider/agents/openai/agent_planner.py +617 -262
- pygpt_net/provider/agents/openai/agent_with_experts.py +0 -0
- pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +4 -4
- pygpt_net/provider/agents/openai/agent_with_feedback.py +4 -4
- pygpt_net/provider/agents/openai/evolve.py +6 -6
- pygpt_net/provider/agents/openai/flow_from_schema.py +0 -0
- pygpt_net/provider/agents/openai/supervisor.py +290 -37
- pygpt_net/provider/api/x_ai/__init__.py +0 -0
- pygpt_net/provider/core/agent/__init__.py +0 -0
- pygpt_net/provider/core/agent/base.py +0 -0
- pygpt_net/provider/core/agent/json_file.py +0 -0
- pygpt_net/provider/core/config/patches/patch_before_2_6_42.py +0 -0
- pygpt_net/provider/llms/base.py +0 -0
- pygpt_net/provider/llms/deepseek_api.py +0 -0
- pygpt_net/provider/llms/google.py +0 -0
- pygpt_net/provider/llms/hugging_face_api.py +0 -0
- pygpt_net/provider/llms/hugging_face_router.py +0 -0
- pygpt_net/provider/llms/mistral.py +0 -0
- pygpt_net/provider/llms/perplexity.py +0 -0
- pygpt_net/provider/llms/x_ai.py +0 -0
- pygpt_net/ui/widget/dialog/confirm.py +34 -8
- pygpt_net/ui/widget/textarea/input.py +1 -1
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.63.dist-info}/METADATA +7 -2
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.63.dist-info}/RECORD +34 -34
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.63.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.63.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.63.dist-info}/entry_points.txt +0 -0
|
@@ -6,11 +6,11 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.09.
|
|
9
|
+
# Updated Date: 2025.09.27 14:40:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from dataclasses import dataclass
|
|
13
|
-
from typing import Dict, Any, Tuple,
|
|
13
|
+
from typing import Dict, Any, Tuple, Optional, List
|
|
14
14
|
|
|
15
15
|
from agents import (
|
|
16
16
|
Agent as OpenAIAgent,
|
|
@@ -38,56 +38,111 @@ from pygpt_net.utils import trans
|
|
|
38
38
|
|
|
39
39
|
from ..base import BaseAgent
|
|
40
40
|
|
|
41
|
+
|
|
42
|
+
# ---------- Structured types to mirror the LlamaIndex Planner ----------
|
|
41
43
|
@dataclass
|
|
42
|
-
class
|
|
43
|
-
|
|
44
|
-
|
|
44
|
+
class SubTask:
|
|
45
|
+
name: str
|
|
46
|
+
input: str
|
|
47
|
+
expected_output: str
|
|
48
|
+
dependencies: List[str]
|
|
49
|
+
|
|
45
50
|
|
|
46
51
|
@dataclass
|
|
47
|
-
class
|
|
48
|
-
|
|
52
|
+
class Plan:
|
|
53
|
+
sub_tasks: List[SubTask]
|
|
49
54
|
|
|
50
|
-
class Agent(BaseAgent):
|
|
51
55
|
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
Format the plan using markdown.
|
|
58
|
-
|
|
59
|
-
Example:
|
|
60
|
-
--------
|
|
61
|
-
|
|
62
|
-
**Sub-task 1: <name 1>**
|
|
63
|
-
|
|
64
|
-
- Description: <subtask description 1>
|
|
65
|
-
- Expected output: <expected output 1>
|
|
66
|
-
- Dependencies: []
|
|
67
|
-
- Required Tools: []
|
|
68
|
-
|
|
69
|
-
**Sub-task 2: <name 2>**
|
|
70
|
-
|
|
71
|
-
- Description: <subtask description 2>
|
|
72
|
-
- Expected output: <expected output 2>
|
|
73
|
-
- Dependencies: [<name 1>]
|
|
74
|
-
- Required Tools: [WebSearch]
|
|
75
|
-
|
|
76
|
-
...
|
|
77
|
-
"""
|
|
56
|
+
@dataclass
|
|
57
|
+
class PlanRefinement:
|
|
58
|
+
is_done: bool
|
|
59
|
+
reason: Optional[str]
|
|
60
|
+
plan: Optional[Plan]
|
|
78
61
|
|
|
79
|
-
PROMPT = (
|
|
80
|
-
"Prepare a comprehensive and detailed response to the question based on the action plan. "
|
|
81
|
-
"Follow each step outlined in the plan. "
|
|
82
|
-
"If any feedback is provided, use it to improve the response."
|
|
83
|
-
)
|
|
84
62
|
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
63
|
+
class Agent(BaseAgent):
|
|
64
|
+
# System prompts used as templates, exposed in options (planner.initial_prompt, refine.prompt).
|
|
65
|
+
DEFAULT_INITIAL_PLAN_PROMPT = """\
|
|
66
|
+
You have the following prior context/memory (may be empty):
|
|
67
|
+
{memory_context}
|
|
68
|
+
|
|
69
|
+
Think step-by-step. Given a task and a set of tools, create a comprehensive, end-to-end plan to accomplish the task.
|
|
70
|
+
Keep in mind not every task needs to be decomposed into multiple sub-tasks if it is simple enough.
|
|
71
|
+
The plan should end with a sub-task that can achieve the overall task.
|
|
72
|
+
|
|
73
|
+
The tools available are:
|
|
74
|
+
{tools_str}
|
|
75
|
+
|
|
76
|
+
Overall Task: {task}
|
|
77
|
+
|
|
78
|
+
Return a JSON object that matches this schema exactly:
|
|
79
|
+
{
|
|
80
|
+
"sub_tasks": [
|
|
81
|
+
{
|
|
82
|
+
"name": "string",
|
|
83
|
+
"input": "string",
|
|
84
|
+
"expected_output": "string",
|
|
85
|
+
"dependencies": ["string", "..."]
|
|
86
|
+
}
|
|
87
|
+
]
|
|
88
|
+
}
|
|
89
|
+
"""
|
|
90
|
+
|
|
91
|
+
DEFAULT_PLAN_REFINE_PROMPT = """\
|
|
92
|
+
You have the following prior context/memory (may be empty):
|
|
93
|
+
{memory_context}
|
|
94
|
+
|
|
95
|
+
Think step-by-step. Given an overall task, a set of tools, and completed sub-tasks, decide whether the overall task is already satisfied.
|
|
96
|
+
If not, update the remaining sub-tasks so that the overall task can still be completed.
|
|
97
|
+
|
|
98
|
+
Completion criteria (ALL must be true to set is_done=true):
|
|
99
|
+
- A final, user-facing answer that directly satisfies "Overall Task" already exists within "Completed Sub-Tasks + Outputs".
|
|
100
|
+
- The final answer matches any explicit format and language requested in "Overall Task".
|
|
101
|
+
- No critical transformation/summarization/finalization step remains among "Remaining Sub-Tasks" (e.g., steps like: provide/present/report/answer/summarize/finalize/deliver the result).
|
|
102
|
+
- The final answer does not rely on placeholders such as "will be provided later" or "see plan above".
|
|
103
|
+
|
|
104
|
+
If ANY of the above is false, set is_done=false.
|
|
105
|
+
|
|
106
|
+
Update policy:
|
|
107
|
+
- If the remaining sub-tasks are already reasonable and correctly ordered, do not propose changes: set is_done=false and omit "plan".
|
|
108
|
+
- Only propose a new "plan" if you need to REPLACE the "Remaining Sub-Tasks" (e.g., wrong order, missing critical steps, or new info from completed outputs).
|
|
109
|
+
- Do NOT repeat any completed sub-task. New sub-tasks must replace only the "Remaining Sub-Tasks".
|
|
110
|
+
|
|
111
|
+
Output schema (strict JSON):
|
|
112
|
+
{
|
|
113
|
+
"is_done": true|false,
|
|
114
|
+
"reason": "string or null",
|
|
115
|
+
"plan": {
|
|
116
|
+
"sub_tasks": [
|
|
117
|
+
{
|
|
118
|
+
"name": "string",
|
|
119
|
+
"input": "string",
|
|
120
|
+
"expected_output": "string",
|
|
121
|
+
"dependencies": ["string", "..."]
|
|
122
|
+
}
|
|
123
|
+
]
|
|
124
|
+
} | null
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
The tools available are:
|
|
128
|
+
{tools_str}
|
|
129
|
+
|
|
130
|
+
Completed Sub-Tasks + Outputs:
|
|
131
|
+
{completed_outputs}
|
|
132
|
+
|
|
133
|
+
Remaining Sub-Tasks:
|
|
134
|
+
{remaining_sub_tasks}
|
|
135
|
+
|
|
136
|
+
Overall Task: {task}
|
|
137
|
+
"""
|
|
138
|
+
|
|
139
|
+
# Base executor instruction used by the main execution agent (internal default).
|
|
140
|
+
# Note: keep this concise but explicit that tools must be used for any external action.
|
|
141
|
+
PROMPT = (
|
|
142
|
+
"You are an execution agent. Follow each sub-task strictly and use the available tools to take actions. "
|
|
143
|
+
"Do not claim that you cannot access files or the web; instead, invoke the appropriate tool. "
|
|
144
|
+
"For local files prefer the sequence: cwd -> find (pattern, path, recursive=true) -> read_file(path). "
|
|
145
|
+
"Return only the final output unless explicitly asked for intermediate thoughts."
|
|
91
146
|
)
|
|
92
147
|
|
|
93
148
|
def __init__(self, *args, **kwargs):
|
|
@@ -96,6 +151,179 @@ class Agent(BaseAgent):
|
|
|
96
151
|
self.type = AGENT_TYPE_OPENAI
|
|
97
152
|
self.mode = AGENT_MODE_OPENAI
|
|
98
153
|
self.name = "Planner"
|
|
154
|
+
self._memory_char_limit = 8000 # consistent with the LlamaIndex workflow
|
|
155
|
+
|
|
156
|
+
# ---------- Helpers: planning/execution parity with LlamaIndex + bridge persistence ----------
|
|
157
|
+
|
|
158
|
+
def _truncate(self, text: str, limit: int) -> str:
|
|
159
|
+
if not text or not limit or limit <= 0:
|
|
160
|
+
return text or ""
|
|
161
|
+
if len(text) <= limit:
|
|
162
|
+
return text
|
|
163
|
+
return "...[truncated]...\n" + text[-limit:]
|
|
164
|
+
|
|
165
|
+
def _memory_to_text(self, messages: Optional[List[Dict[str, Any]]]) -> str:
|
|
166
|
+
if not messages:
|
|
167
|
+
return ""
|
|
168
|
+
try:
|
|
169
|
+
parts = []
|
|
170
|
+
for m in messages:
|
|
171
|
+
role = m.get("role", "user")
|
|
172
|
+
content = m.get("content", "")
|
|
173
|
+
parts.append(f"{role}: {content}")
|
|
174
|
+
text = "\n".join(parts)
|
|
175
|
+
except Exception:
|
|
176
|
+
try:
|
|
177
|
+
text = str(messages)
|
|
178
|
+
except Exception:
|
|
179
|
+
text = ""
|
|
180
|
+
return self._truncate(text, self._memory_char_limit)
|
|
181
|
+
|
|
182
|
+
def _tools_to_str(self, tools: List[Any]) -> str:
|
|
183
|
+
out = []
|
|
184
|
+
for t in tools or []:
|
|
185
|
+
try:
|
|
186
|
+
meta = getattr(t, "metadata", None)
|
|
187
|
+
if meta is not None:
|
|
188
|
+
name = (getattr(meta, "name", "") or "").strip()
|
|
189
|
+
desc = (getattr(meta, "description", "") or "").strip()
|
|
190
|
+
if name or desc:
|
|
191
|
+
out.append(f"{name}: {desc}")
|
|
192
|
+
continue
|
|
193
|
+
# Fallback for function-style tools
|
|
194
|
+
name = (getattr(t, "name", "") or "").strip()
|
|
195
|
+
desc = (getattr(t, "description", "") or "").strip()
|
|
196
|
+
if name or desc:
|
|
197
|
+
out.append(f"{name}: {desc}")
|
|
198
|
+
continue
|
|
199
|
+
if isinstance(t, dict):
|
|
200
|
+
name = (t.get("name") or "").strip()
|
|
201
|
+
desc = (t.get("description") or "").strip()
|
|
202
|
+
if name or desc:
|
|
203
|
+
out.append(f"{name}: {desc}")
|
|
204
|
+
continue
|
|
205
|
+
out.append(str(t))
|
|
206
|
+
except Exception:
|
|
207
|
+
out.append(str(t))
|
|
208
|
+
return "\n".join(out)
|
|
209
|
+
|
|
210
|
+
def _format_subtasks(self, sub_tasks: List[SubTask]) -> str:
|
|
211
|
+
parts = []
|
|
212
|
+
for i, st in enumerate(sub_tasks or [], 1):
|
|
213
|
+
parts.append(
|
|
214
|
+
f"[{i}] name={st.name}\n"
|
|
215
|
+
f" input={st.input}\n"
|
|
216
|
+
f" expected_output={st.expected_output}\n"
|
|
217
|
+
f" dependencies={st.dependencies}"
|
|
218
|
+
)
|
|
219
|
+
return "\n".join(parts) if parts else "(none)"
|
|
220
|
+
|
|
221
|
+
def _format_completed(self, completed: List[Tuple[str, str]]) -> str:
|
|
222
|
+
if not completed:
|
|
223
|
+
return "(none)"
|
|
224
|
+
parts = []
|
|
225
|
+
for i, (name, out) in enumerate(completed, 1):
|
|
226
|
+
parts.append(f"[{i}] {name} -> {self._truncate((out or '').strip(), 2000)}")
|
|
227
|
+
joined = "\n".join(parts)
|
|
228
|
+
return self._truncate(joined, self._memory_char_limit or 8000)
|
|
229
|
+
|
|
230
|
+
def _build_context_for_subtask(
|
|
231
|
+
self,
|
|
232
|
+
completed: List[Tuple[str, str]],
|
|
233
|
+
dependencies: List[str],
|
|
234
|
+
char_limit: int,
|
|
235
|
+
) -> str:
|
|
236
|
+
if not completed:
|
|
237
|
+
return ""
|
|
238
|
+
if dependencies:
|
|
239
|
+
selected = [(n, out) for (n, out) in completed if n in set(dependencies)]
|
|
240
|
+
if not selected:
|
|
241
|
+
return ""
|
|
242
|
+
else:
|
|
243
|
+
selected = completed
|
|
244
|
+
|
|
245
|
+
parts = []
|
|
246
|
+
for idx, (name, output) in enumerate(selected, 1):
|
|
247
|
+
clean = (output or "").strip()
|
|
248
|
+
if not clean:
|
|
249
|
+
continue
|
|
250
|
+
parts.append(f"[{idx}] {name} -> {clean}")
|
|
251
|
+
|
|
252
|
+
if not parts:
|
|
253
|
+
return ""
|
|
254
|
+
ctx_text = "Completed sub-tasks context:\n" + "\n".join(parts)
|
|
255
|
+
return self._truncate(ctx_text, char_limit or 8000)
|
|
256
|
+
|
|
257
|
+
def _compose_subtask_prompt(self, st: SubTask, completed: List[Tuple[str, str]]) -> str:
|
|
258
|
+
"""
|
|
259
|
+
Compose the prompt for a single sub-task. Keep it explicit that tools should be used.
|
|
260
|
+
"""
|
|
261
|
+
ctx_text = self._build_context_for_subtask(
|
|
262
|
+
completed=completed,
|
|
263
|
+
dependencies=st.dependencies or [],
|
|
264
|
+
char_limit=self._memory_char_limit,
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
# Small, generic tool usage hint keeps the model from refusing actions.
|
|
268
|
+
tool_hint = (
|
|
269
|
+
"Use tools to take actions. For file operations use: "
|
|
270
|
+
"'cwd' -> 'find' (pattern, path, recursive=true) -> 'read_file(path)'."
|
|
271
|
+
)
|
|
272
|
+
|
|
273
|
+
if ctx_text:
|
|
274
|
+
return (
|
|
275
|
+
f"{ctx_text}\n\n"
|
|
276
|
+
f"{tool_hint}\n"
|
|
277
|
+
f"Now execute the next sub-task: {st.name}\n"
|
|
278
|
+
f"Instructions:\n{st.input}\n"
|
|
279
|
+
f"Return only the final output."
|
|
280
|
+
)
|
|
281
|
+
return (
|
|
282
|
+
f"{tool_hint}\n"
|
|
283
|
+
f"{st.input}\n\n"
|
|
284
|
+
f"Return only the final output."
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
def _agent_label(
|
|
288
|
+
self,
|
|
289
|
+
step: str,
|
|
290
|
+
index: Optional[int] = None,
|
|
291
|
+
total: Optional[int] = None,
|
|
292
|
+
subtask_name: Optional[str] = None,
|
|
293
|
+
) -> str:
|
|
294
|
+
if step == "subtask":
|
|
295
|
+
if index and total:
|
|
296
|
+
base = f"Sub-task {index}/{total}"
|
|
297
|
+
elif index:
|
|
298
|
+
base = f"Sub-task {index}"
|
|
299
|
+
else:
|
|
300
|
+
base = "Sub-task"
|
|
301
|
+
return f"{base}: {subtask_name}" if subtask_name else base
|
|
302
|
+
if step == "refine":
|
|
303
|
+
if index and total:
|
|
304
|
+
return f"Refine {index}/{total}"
|
|
305
|
+
return "Refine" if not index else f"Refine {index}"
|
|
306
|
+
if step in {"make_plan", "plan"}:
|
|
307
|
+
return "Plan"
|
|
308
|
+
if step in {"execute", "execute_plan"}:
|
|
309
|
+
return "Execute"
|
|
310
|
+
return step or "Step"
|
|
311
|
+
|
|
312
|
+
def prepare_model(
|
|
313
|
+
self,
|
|
314
|
+
model: ModelItem,
|
|
315
|
+
window: Any,
|
|
316
|
+
previous_response_id: Optional[str],
|
|
317
|
+
kwargs: Dict[str, Any]
|
|
318
|
+
) -> Dict[str, Any]:
|
|
319
|
+
"""
|
|
320
|
+
Prepare per-run kwargs (keep parity with other agents).
|
|
321
|
+
"""
|
|
322
|
+
if model.provider == "openai" and previous_response_id:
|
|
323
|
+
kwargs["previous_response_id"] = previous_response_id
|
|
324
|
+
return kwargs
|
|
325
|
+
|
|
326
|
+
# ---------- OpenAI Agents providers ----------
|
|
99
327
|
|
|
100
328
|
def get_agent(self, window, kwargs: Dict[str, Any]):
|
|
101
329
|
"""
|
|
@@ -107,56 +335,69 @@ class Agent(BaseAgent):
|
|
|
107
335
|
"""
|
|
108
336
|
context = kwargs.get("context", BridgeContext())
|
|
109
337
|
preset = context.preset
|
|
110
|
-
|
|
338
|
+
# Keep a stable display name; fallback to 'Executor' if no preset
|
|
339
|
+
agent_name = (preset.name if preset and getattr(preset, "name", None) else "Executor")
|
|
111
340
|
model = kwargs.get("model", ModelItem())
|
|
112
341
|
tools = kwargs.get("function_tools", [])
|
|
113
342
|
handoffs = kwargs.get("handoffs", [])
|
|
114
|
-
|
|
343
|
+
|
|
344
|
+
# Use internal default prompt, not options
|
|
345
|
+
base_instructions = self.PROMPT
|
|
346
|
+
|
|
347
|
+
allow_local_tools = bool(kwargs.get("allow_local_tools", False))
|
|
348
|
+
allow_remote_tools = bool(kwargs.get("allow_remote_tools", False))
|
|
349
|
+
|
|
350
|
+
cfg = {
|
|
115
351
|
"name": agent_name,
|
|
116
|
-
"instructions":
|
|
352
|
+
"instructions": base_instructions,
|
|
117
353
|
"model": window.core.agents.provider.get_openai_model(model),
|
|
118
354
|
}
|
|
119
355
|
if handoffs:
|
|
120
|
-
|
|
356
|
+
cfg["handoffs"] = handoffs
|
|
121
357
|
|
|
122
358
|
tool_kwargs = append_tools(
|
|
123
359
|
tools=tools,
|
|
124
360
|
window=window,
|
|
125
361
|
model=model,
|
|
126
362
|
preset=preset,
|
|
127
|
-
allow_local_tools=
|
|
128
|
-
allow_remote_tools=
|
|
363
|
+
allow_local_tools=allow_local_tools,
|
|
364
|
+
allow_remote_tools=allow_remote_tools,
|
|
129
365
|
)
|
|
130
|
-
|
|
131
|
-
|
|
366
|
+
# NOTE: do not remove this update; it attaches tools so the agent can invoke them.
|
|
367
|
+
cfg.update(tool_kwargs)
|
|
368
|
+
|
|
369
|
+
# Optional: expose tool names inside instructions to gently steer the model.
|
|
370
|
+
try:
|
|
371
|
+
tool_names = [getattr(t, "name", "").strip() for t in tool_kwargs.get("tools", [])]
|
|
372
|
+
tool_names = [n for n in tool_names if n]
|
|
373
|
+
if tool_names:
|
|
374
|
+
cfg["instructions"] = (
|
|
375
|
+
f"{cfg['instructions']} "
|
|
376
|
+
f"Available tools: {', '.join(tool_names)}."
|
|
377
|
+
)
|
|
378
|
+
except Exception:
|
|
379
|
+
pass
|
|
132
380
|
|
|
133
|
-
|
|
381
|
+
return OpenAIAgent(**cfg)
|
|
382
|
+
|
|
383
|
+
def get_planner(
|
|
134
384
|
self,
|
|
135
385
|
window,
|
|
136
386
|
model: ModelItem,
|
|
137
|
-
instructions: str,
|
|
138
387
|
preset: PresetItem,
|
|
139
388
|
tools: list,
|
|
140
389
|
allow_local_tools: bool = False,
|
|
141
390
|
allow_remote_tools: bool = False,
|
|
142
391
|
) -> OpenAIAgent:
|
|
143
392
|
"""
|
|
144
|
-
Return Agent provider instance
|
|
145
|
-
|
|
146
|
-
:param window: window instance
|
|
147
|
-
:param model: Model item for the evaluator agent
|
|
148
|
-
:param instructions: Instructions for the evaluator agent
|
|
149
|
-
:param preset: Preset item for additional context
|
|
150
|
-
:param tools: List of function tools to use
|
|
151
|
-
:param allow_local_tools: Whether to allow local tools
|
|
152
|
-
:param allow_remote_tools: Whether to allow remote tools
|
|
153
|
-
:return: Agent provider instance
|
|
393
|
+
Return Agent provider instance producing a structured Plan.
|
|
154
394
|
"""
|
|
155
395
|
kwargs = {
|
|
156
|
-
"name": "
|
|
157
|
-
|
|
396
|
+
"name": "StructuredPlanner",
|
|
397
|
+
# Minimal instructions; the full template is injected as user content.
|
|
398
|
+
"instructions": "Return a JSON object matching the provided schema.",
|
|
158
399
|
"model": window.core.agents.provider.get_openai_model(model),
|
|
159
|
-
"output_type":
|
|
400
|
+
"output_type": Plan,
|
|
160
401
|
}
|
|
161
402
|
tool_kwargs = append_tools(
|
|
162
403
|
tools=tools,
|
|
@@ -166,36 +407,26 @@ class Agent(BaseAgent):
|
|
|
166
407
|
allow_local_tools=allow_local_tools,
|
|
167
408
|
allow_remote_tools=allow_remote_tools,
|
|
168
409
|
)
|
|
169
|
-
kwargs.update(tool_kwargs)
|
|
410
|
+
kwargs.update(tool_kwargs) # update kwargs with tools
|
|
170
411
|
return OpenAIAgent(**kwargs)
|
|
171
412
|
|
|
172
|
-
def
|
|
413
|
+
def get_refiner(
|
|
173
414
|
self,
|
|
174
415
|
window,
|
|
175
416
|
model: ModelItem,
|
|
176
|
-
instructions: str,
|
|
177
417
|
preset: PresetItem,
|
|
178
418
|
tools: list,
|
|
179
419
|
allow_local_tools: bool = False,
|
|
180
420
|
allow_remote_tools: bool = False,
|
|
181
421
|
) -> OpenAIAgent:
|
|
182
422
|
"""
|
|
183
|
-
Return Agent provider instance
|
|
184
|
-
|
|
185
|
-
:param window: window instance
|
|
186
|
-
:param model: Model item for the evaluator agent
|
|
187
|
-
:param instructions: Instructions for the evaluator agent
|
|
188
|
-
:param preset: Preset item for additional context
|
|
189
|
-
:param tools: List of function tools to use
|
|
190
|
-
:param allow_local_tools: Whether to allow local tools
|
|
191
|
-
:param allow_remote_tools: Whether to allow remote tools
|
|
192
|
-
:return: Agent provider instance
|
|
423
|
+
Return Agent provider instance producing a structured PlanRefinement.
|
|
193
424
|
"""
|
|
194
425
|
kwargs = {
|
|
195
|
-
"name": "
|
|
196
|
-
"instructions":
|
|
426
|
+
"name": "PlanRefiner",
|
|
427
|
+
"instructions": "Refine remaining plan steps and return a strict JSON object as instructed.",
|
|
197
428
|
"model": window.core.agents.provider.get_openai_model(model),
|
|
198
|
-
"output_type":
|
|
429
|
+
"output_type": PlanRefinement,
|
|
199
430
|
}
|
|
200
431
|
tool_kwargs = append_tools(
|
|
201
432
|
tools=tools,
|
|
@@ -205,7 +436,7 @@ class Agent(BaseAgent):
|
|
|
205
436
|
allow_local_tools=allow_local_tools,
|
|
206
437
|
allow_remote_tools=allow_remote_tools,
|
|
207
438
|
)
|
|
208
|
-
kwargs.update(tool_kwargs)
|
|
439
|
+
kwargs.update(tool_kwargs)
|
|
209
440
|
return OpenAIAgent(**kwargs)
|
|
210
441
|
|
|
211
442
|
async def run(
|
|
@@ -237,7 +468,7 @@ class Agent(BaseAgent):
|
|
|
237
468
|
model = agent_kwargs.get("model", ModelItem())
|
|
238
469
|
verbose = agent_kwargs.get("verbose", False)
|
|
239
470
|
context = agent_kwargs.get("context", BridgeContext())
|
|
240
|
-
max_steps = agent_kwargs.get("max_iterations", 10)
|
|
471
|
+
max_steps = int(agent_kwargs.get("max_iterations", 10))
|
|
241
472
|
tools = agent_kwargs.get("function_tools", [])
|
|
242
473
|
preset = context.preset
|
|
243
474
|
|
|
@@ -251,185 +482,328 @@ class Agent(BaseAgent):
|
|
|
251
482
|
if experts:
|
|
252
483
|
agent_kwargs["handoffs"] = experts
|
|
253
484
|
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
485
|
+
# Executor must have access to the same tool set as planner/refiner.
|
|
486
|
+
# If not explicitly provided, inherit allow_* flags from planner options.
|
|
487
|
+
exec_allow_local_tools = agent_kwargs.get("allow_local_tools")
|
|
488
|
+
exec_allow_remote_tools = agent_kwargs.get("allow_remote_tools")
|
|
489
|
+
if exec_allow_local_tools is None:
|
|
490
|
+
exec_allow_local_tools = bool(self.get_option(preset, "planner", "allow_local_tools"))
|
|
491
|
+
if exec_allow_remote_tools is None:
|
|
492
|
+
exec_allow_remote_tools = bool(self.get_option(preset, "planner", "allow_remote_tools"))
|
|
493
|
+
|
|
494
|
+
# executor agent (FunctionAgent equivalent)
|
|
495
|
+
agent_exec_kwargs = dict(agent_kwargs)
|
|
496
|
+
agent_exec_kwargs["allow_local_tools"] = bool(exec_allow_local_tools)
|
|
497
|
+
agent_exec_kwargs["allow_remote_tools"] = bool(exec_allow_remote_tools)
|
|
498
|
+
agent = self.get_agent(window, agent_exec_kwargs)
|
|
499
|
+
|
|
500
|
+
# options
|
|
501
|
+
planner_model_name = self.get_option(preset, "planner", "model")
|
|
502
|
+
planner_model = window.core.models.get(planner_model_name) if planner_model_name else agent_kwargs.get("model",
|
|
503
|
+
ModelItem())
|
|
504
|
+
planner_allow_local_tools = bool(self.get_option(preset, "planner", "allow_local_tools"))
|
|
505
|
+
planner_allow_remote_tools = bool(self.get_option(preset, "planner", "allow_remote_tools"))
|
|
506
|
+
planner_prompt_tpl = self.get_option(preset, "planner", "initial_prompt") or self.DEFAULT_INITIAL_PLAN_PROMPT
|
|
507
|
+
|
|
508
|
+
refine_model_name = self.get_option(preset, "refine", "model") or planner_model_name
|
|
509
|
+
refine_allow_local_tools = bool(self.get_option(preset, "refine", "allow_local_tools"))
|
|
510
|
+
refine_allow_remote_tools = bool(self.get_option(preset, "refine", "allow_remote_tools"))
|
|
511
|
+
refine_prompt_tpl = self.get_option(preset, "refine", "prompt") or self.DEFAULT_PLAN_REFINE_PROMPT
|
|
512
|
+
_after_each_val = self.get_option(preset, "refine", "after_each_subtask")
|
|
513
|
+
refine_after_each = True if _after_each_val is None else bool(_after_each_val)
|
|
514
|
+
|
|
515
|
+
# Common Runner kwargs baseline
|
|
516
|
+
common_kwargs: Dict[str, Any] = {
|
|
517
|
+
"max_turns": max_steps,
|
|
270
518
|
}
|
|
271
519
|
if model.provider != "openai":
|
|
272
520
|
custom_provider = get_custom_model_provider(window, model)
|
|
273
|
-
|
|
521
|
+
common_kwargs["run_config"] = RunConfig(model_provider=custom_provider)
|
|
274
522
|
else:
|
|
275
523
|
set_openai_env(window)
|
|
276
|
-
if previous_response_id:
|
|
277
|
-
kwargs["previous_response_id"] = previous_response_id
|
|
278
524
|
|
|
279
|
-
|
|
525
|
+
# Build tool list description and memory context for prompts
|
|
526
|
+
tools_str = self._tools_to_str(tools)
|
|
527
|
+
query = messages[-1]["content"] if messages else ""
|
|
528
|
+
memory_context = self._memory_to_text(messages)
|
|
529
|
+
|
|
530
|
+
# Step lifecycle control for bridge
|
|
531
|
+
begin = True # first block only
|
|
532
|
+
|
|
533
|
+
# ---------- Make plan (structured) ----------
|
|
280
534
|
planner = self.get_planner(
|
|
281
535
|
window=window,
|
|
282
|
-
model=
|
|
283
|
-
instructions=planner_instructions,
|
|
536
|
+
model=planner_model,
|
|
284
537
|
preset=preset,
|
|
285
538
|
tools=tools,
|
|
286
539
|
allow_local_tools=planner_allow_local_tools,
|
|
287
540
|
allow_remote_tools=planner_allow_remote_tools,
|
|
288
541
|
)
|
|
289
542
|
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
instructions=feedback_instructions,
|
|
295
|
-
preset=preset,
|
|
296
|
-
tools=tools,
|
|
297
|
-
allow_local_tools=feedback_allow_local_tools,
|
|
298
|
-
allow_remote_tools=feedback_allow_remote_tools,
|
|
543
|
+
plan_prompt = planner_prompt_tpl.format(
|
|
544
|
+
memory_context=memory_context,
|
|
545
|
+
tools_str=tools_str,
|
|
546
|
+
task=query,
|
|
299
547
|
)
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
kwargs["input"] = input_items
|
|
316
|
-
ctx.set_agent_name(agent.name)
|
|
317
|
-
if bridge.stopped():
|
|
318
|
-
bridge.on_stop(ctx)
|
|
319
|
-
break
|
|
320
|
-
|
|
321
|
-
result = await Runner.run(
|
|
322
|
-
agent,
|
|
323
|
-
**kwargs
|
|
548
|
+
plan_input_items: List[TResponseInputItem] = [{"role": "user", "content": plan_prompt}]
|
|
549
|
+
|
|
550
|
+
try:
|
|
551
|
+
planner_result = await Runner.run(planner, plan_input_items)
|
|
552
|
+
plan_obj: Optional[Plan] = planner_result.final_output # type: ignore
|
|
553
|
+
except Exception:
|
|
554
|
+
plan_obj = None
|
|
555
|
+
|
|
556
|
+
if not plan_obj or not getattr(plan_obj, "sub_tasks", None):
|
|
557
|
+
plan_obj = Plan(sub_tasks=[
|
|
558
|
+
SubTask(
|
|
559
|
+
name="default",
|
|
560
|
+
input=f"{query}",
|
|
561
|
+
expected_output="",
|
|
562
|
+
dependencies=[],
|
|
324
563
|
)
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
print("Response is good enough, exiting.")
|
|
353
|
-
break
|
|
354
|
-
|
|
355
|
-
print("Re-running with feedback")
|
|
356
|
-
input_items.append({"content": f"Feedback: {result.feedback}", "role": "user"})
|
|
357
|
-
|
|
358
|
-
if use_partial_ctx:
|
|
359
|
-
ctx = bridge.on_next_ctx(
|
|
360
|
-
ctx=ctx,
|
|
361
|
-
input=result.feedback, # new ctx: input
|
|
362
|
-
output=final_output, # prev ctx: output
|
|
363
|
-
response_id=response_id,
|
|
364
|
-
stream=False,
|
|
365
|
-
)
|
|
564
|
+
])
|
|
565
|
+
|
|
566
|
+
# Present current plan as a dedicated step
|
|
567
|
+
plan_lines = ["`Current plan:`"]
|
|
568
|
+
for i, st in enumerate(plan_obj.sub_tasks, 1):
|
|
569
|
+
plan_lines.append(
|
|
570
|
+
f"\n**===== Sub Task {i}: {st.name} =====**\n"
|
|
571
|
+
f"Expected output: {st.expected_output}\n"
|
|
572
|
+
f"Dependencies: {st.dependencies}\n\n"
|
|
573
|
+
)
|
|
574
|
+
plan_text = "\n".join(plan_lines)
|
|
575
|
+
|
|
576
|
+
ctx.set_agent_name(self._agent_label("make_plan"))
|
|
577
|
+
ctx.stream = plan_text
|
|
578
|
+
bridge.on_step(ctx, begin)
|
|
579
|
+
begin = False
|
|
580
|
+
|
|
581
|
+
# Persist plan step boundary without leaking inputs
|
|
582
|
+
if use_partial_ctx:
|
|
583
|
+
ctx = bridge.on_next_ctx(
|
|
584
|
+
ctx=ctx,
|
|
585
|
+
input="",
|
|
586
|
+
output=plan_text,
|
|
587
|
+
response_id="",
|
|
588
|
+
finish=False,
|
|
589
|
+
stream=stream,
|
|
590
|
+
)
|
|
366
591
|
else:
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
592
|
+
bridge.on_next(ctx)
|
|
593
|
+
|
|
594
|
+
# ---------- Execute plan with optional refinement after each sub-task ----------
|
|
595
|
+
plan_sub_tasks: List[SubTask] = list(plan_obj.sub_tasks)
|
|
596
|
+
last_answer = ""
|
|
597
|
+
completed: List[Tuple[str, str]] = [] # (name, output)
|
|
598
|
+
|
|
599
|
+
# Prepare static prompt parts for refinement
|
|
600
|
+
memory_context = self._memory_to_text(messages) # re-evaluate after plan message
|
|
601
|
+
|
|
602
|
+
# shared stream handler for sub-task streaming
|
|
603
|
+
handler = StreamHandler(window, bridge)
|
|
604
|
+
|
|
605
|
+
# keep track of previous response id for provider continuity
|
|
606
|
+
prev_rid: Optional[str] = previous_response_id
|
|
607
|
+
|
|
608
|
+
i = 0
|
|
609
|
+
while i < len(plan_sub_tasks):
|
|
610
|
+
if bridge.stopped():
|
|
611
|
+
bridge.on_stop(ctx)
|
|
612
|
+
break
|
|
613
|
+
|
|
614
|
+
st = plan_sub_tasks[i]
|
|
615
|
+
total = len(plan_sub_tasks)
|
|
616
|
+
|
|
617
|
+
# UI header for the sub-task
|
|
618
|
+
subtask_label = self._agent_label("subtask", index=i + 1, total=total, subtask_name=st.name)
|
|
619
|
+
header = (
|
|
620
|
+
f"\n\n**===== Sub Task {i + 1}/{total}: {st.name} =====**\n"
|
|
621
|
+
f"Expected output: {st.expected_output}\n"
|
|
622
|
+
f"Dependencies: {st.dependencies}\n\n"
|
|
623
|
+
)
|
|
624
|
+
|
|
625
|
+
# Compose sub-task prompt and open a new persisted step
|
|
626
|
+
composed_prompt = self._compose_subtask_prompt(st, completed)
|
|
627
|
+
ctx.set_agent_name(subtask_label)
|
|
628
|
+
ctx.stream = header
|
|
629
|
+
bridge.on_step(ctx, False) # open a new step block
|
|
630
|
+
|
|
631
|
+
exec_kwargs = dict(common_kwargs)
|
|
632
|
+
exec_items: List[TResponseInputItem] = [{"role": "user", "content": composed_prompt}]
|
|
633
|
+
exec_kwargs["input"] = exec_items
|
|
634
|
+
exec_kwargs = self.prepare_model(model, window, prev_rid, exec_kwargs)
|
|
635
|
+
|
|
636
|
+
sub_answer = ""
|
|
637
|
+
sub_rid = ""
|
|
638
|
+
|
|
639
|
+
if not stream:
|
|
640
|
+
try:
|
|
641
|
+
result = await Runner.run(agent, **exec_kwargs)
|
|
642
|
+
sub_rid = getattr(result, "last_response_id", "") or ""
|
|
643
|
+
sub_answer = str(getattr(result, "final_output", "") or "")
|
|
644
|
+
except Exception as ex:
|
|
645
|
+
sub_answer = f"Sub-task failed: {ex}"
|
|
646
|
+
|
|
647
|
+
if sub_answer:
|
|
648
|
+
ctx.stream = sub_answer
|
|
649
|
+
bridge.on_step(ctx, True)
|
|
650
|
+
else:
|
|
651
|
+
result = Runner.run_streamed(agent, **exec_kwargs)
|
|
376
652
|
handler.reset()
|
|
653
|
+
handler.begin = False
|
|
377
654
|
async for event in result.stream_events():
|
|
378
655
|
if bridge.stopped():
|
|
379
656
|
result.cancel()
|
|
380
657
|
bridge.on_stop(ctx)
|
|
381
658
|
break
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
659
|
+
sub_answer, sub_rid = handler.handle(event, ctx)
|
|
660
|
+
|
|
661
|
+
# Save completed sub-task
|
|
662
|
+
sub_answer = (sub_answer or "").strip()
|
|
663
|
+
completed.append((st.name, sub_answer))
|
|
664
|
+
if sub_answer:
|
|
665
|
+
last_answer = sub_answer
|
|
666
|
+
if sub_rid:
|
|
667
|
+
prev_rid = sub_rid
|
|
668
|
+
response_id = sub_rid # keep latest rid for return
|
|
669
|
+
|
|
670
|
+
# Close persisted step (finish only if last and no refine)
|
|
671
|
+
is_last_subtask = (i + 1 == len(plan_sub_tasks))
|
|
672
|
+
will_refine = (refine_after_each and not is_last_subtask)
|
|
673
|
+
if use_partial_ctx:
|
|
674
|
+
ctx = bridge.on_next_ctx(
|
|
675
|
+
ctx=ctx,
|
|
676
|
+
input="",
|
|
677
|
+
output=sub_answer if sub_answer else header.strip(),
|
|
678
|
+
response_id=sub_rid,
|
|
679
|
+
finish=(is_last_subtask and not will_refine),
|
|
680
|
+
stream=stream,
|
|
681
|
+
)
|
|
682
|
+
if stream:
|
|
683
|
+
handler.new()
|
|
684
|
+
else:
|
|
685
|
+
bridge.on_next(ctx)
|
|
686
|
+
|
|
687
|
+
if bridge.stopped():
|
|
688
|
+
bridge.on_stop(ctx)
|
|
689
|
+
break
|
|
690
|
+
|
|
691
|
+
# Optional legacy-style refine after each sub-task (if there are remaining ones)
|
|
692
|
+
i += 1
|
|
693
|
+
if refine_after_each and i < len(plan_sub_tasks):
|
|
694
|
+
remaining = plan_sub_tasks[i:]
|
|
695
|
+
refine_label = self._agent_label("refine", index=i, total=len(plan_sub_tasks))
|
|
696
|
+
|
|
697
|
+
# Start refine step
|
|
698
|
+
refine_display = "\n`Refining remaining plan...`"
|
|
699
|
+
ctx.set_agent_name(refine_label)
|
|
700
|
+
ctx.stream = refine_display
|
|
701
|
+
bridge.on_step(ctx, False)
|
|
702
|
+
|
|
703
|
+
# Build refine prompt
|
|
704
|
+
completed_text = self._format_completed(completed)
|
|
705
|
+
remaining_text = self._format_subtasks(remaining)
|
|
706
|
+
refine_prompt = refine_prompt_tpl.format(
|
|
707
|
+
memory_context=memory_context,
|
|
708
|
+
tools_str=tools_str,
|
|
709
|
+
completed_outputs=completed_text,
|
|
710
|
+
remaining_sub_tasks=remaining_text,
|
|
711
|
+
task=query,
|
|
712
|
+
)
|
|
713
|
+
model_refiner = window.core.models.get(refine_model_name) if refine_model_name else planner_model
|
|
714
|
+
refiner = self.get_refiner(
|
|
715
|
+
window=window,
|
|
716
|
+
model=model_refiner,
|
|
717
|
+
preset=preset,
|
|
718
|
+
tools=tools,
|
|
719
|
+
allow_local_tools=refine_allow_local_tools,
|
|
720
|
+
allow_remote_tools=refine_allow_remote_tools,
|
|
721
|
+
)
|
|
386
722
|
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
723
|
+
refinement: Optional[PlanRefinement] = None
|
|
724
|
+
refine_rid = ""
|
|
725
|
+
try:
|
|
726
|
+
refinement_result = await Runner.run(refiner, [{"role": "user", "content": refine_prompt}])
|
|
727
|
+
refinement = refinement_result.final_output # type: ignore
|
|
728
|
+
refine_rid = getattr(refinement_result, "last_response_id", "") or ""
|
|
729
|
+
except Exception:
|
|
730
|
+
refinement = None
|
|
731
|
+
|
|
732
|
+
if refinement is None:
|
|
733
|
+
refine_display += "\n`Refine step failed to parse; continuing without changes.`"
|
|
734
|
+
ctx.stream = "\n`Refine step failed to parse; continuing without changes.`"
|
|
735
|
+
bridge.on_step(ctx, True)
|
|
736
|
+
# finalize refine step
|
|
737
|
+
if use_partial_ctx:
|
|
738
|
+
ctx = bridge.on_next_ctx(
|
|
739
|
+
ctx=ctx,
|
|
740
|
+
input="",
|
|
741
|
+
output=refine_display,
|
|
742
|
+
response_id=refine_rid,
|
|
743
|
+
finish=False,
|
|
744
|
+
stream=False,
|
|
745
|
+
)
|
|
746
|
+
else:
|
|
747
|
+
bridge.on_next(ctx)
|
|
748
|
+
continue
|
|
390
749
|
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
750
|
+
if getattr(refinement, "is_done", False):
|
|
751
|
+
reason = getattr(refinement, "reason", "") or "Planner judged the task as satisfied."
|
|
752
|
+
done_msg = f"\n`Planner marked the plan as complete: {reason}`"
|
|
753
|
+
refine_display += done_msg
|
|
754
|
+
ctx.stream = done_msg
|
|
755
|
+
bridge.on_step(ctx, True)
|
|
395
756
|
|
|
396
|
-
|
|
397
|
-
if result.score == "pass":
|
|
398
|
-
info += f"\n\n**{trans('agent.eval.score.good')}**\n"
|
|
757
|
+
# finalize refine step as the last block
|
|
399
758
|
if use_partial_ctx:
|
|
400
759
|
ctx = bridge.on_next_ctx(
|
|
401
760
|
ctx=ctx,
|
|
402
|
-
input=
|
|
403
|
-
output=
|
|
404
|
-
response_id=response_id,
|
|
761
|
+
input="",
|
|
762
|
+
output=refine_display,
|
|
763
|
+
response_id=refine_rid or (response_id or ""),
|
|
405
764
|
finish=True,
|
|
406
|
-
stream=
|
|
765
|
+
stream=False,
|
|
407
766
|
)
|
|
408
767
|
else:
|
|
409
|
-
ctx
|
|
410
|
-
bridge.on_step(ctx, False)
|
|
411
|
-
final_output += info
|
|
768
|
+
bridge.on_next(ctx)
|
|
412
769
|
break
|
|
413
770
|
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
771
|
+
if refinement.plan and getattr(refinement.plan, "sub_tasks", None):
|
|
772
|
+
completed_names = {n for (n, _) in completed}
|
|
773
|
+
new_remaining = [st for st in refinement.plan.sub_tasks if st.name not in completed_names]
|
|
774
|
+
|
|
775
|
+
current_remaining_repr = self._format_subtasks(remaining)
|
|
776
|
+
new_remaining_repr = self._format_subtasks(new_remaining)
|
|
777
|
+
if new_remaining_repr.strip() != current_remaining_repr.strip():
|
|
778
|
+
plan_sub_tasks = plan_sub_tasks[:i] + new_remaining
|
|
779
|
+
# Present the updated tail of the plan
|
|
780
|
+
lines = ["`Updated remaining plan:`"]
|
|
781
|
+
for k, st_upd in enumerate(new_remaining, i + 1):
|
|
782
|
+
lines.append(
|
|
783
|
+
f"\n**===== Sub Task {k}/{len(plan_sub_tasks)}: {st_upd.name} =====**\n"
|
|
784
|
+
f"Expected output: {st_upd.expected_output}\n"
|
|
785
|
+
f"Dependencies: {st_upd.dependencies}\n\n"
|
|
786
|
+
)
|
|
787
|
+
upd_text = "\n".join(lines)
|
|
788
|
+
refine_display += "\n" + upd_text
|
|
789
|
+
ctx.stream = upd_text
|
|
790
|
+
bridge.on_step(ctx, True)
|
|
791
|
+
|
|
792
|
+
# finalize refine step (no extra noise)
|
|
417
793
|
if use_partial_ctx:
|
|
418
794
|
ctx = bridge.on_next_ctx(
|
|
419
795
|
ctx=ctx,
|
|
420
|
-
input=
|
|
421
|
-
output=
|
|
422
|
-
response_id=
|
|
423
|
-
|
|
796
|
+
input="",
|
|
797
|
+
output=refine_display,
|
|
798
|
+
response_id=refine_rid,
|
|
799
|
+
finish=False,
|
|
800
|
+
stream=False,
|
|
424
801
|
)
|
|
425
|
-
handler.new()
|
|
426
802
|
else:
|
|
427
|
-
ctx
|
|
428
|
-
bridge.on_step(ctx, False)
|
|
429
|
-
handler.to_buffer(info)
|
|
430
|
-
|
|
431
|
-
return ctx, final_output, response_id
|
|
803
|
+
bridge.on_next(ctx)
|
|
432
804
|
|
|
805
|
+
# Return last answer (final block already closed in the loop)
|
|
806
|
+
return ctx, (last_answer or "Plan finished."), (response_id or "")
|
|
433
807
|
|
|
434
808
|
def get_options(self) -> Dict[str, Any]:
|
|
435
809
|
"""
|
|
@@ -438,29 +812,6 @@ class Agent(BaseAgent):
|
|
|
438
812
|
:return: dict of options
|
|
439
813
|
"""
|
|
440
814
|
return {
|
|
441
|
-
"base": {
|
|
442
|
-
"label": trans("agent.option.section.base"),
|
|
443
|
-
"options": {
|
|
444
|
-
"prompt": {
|
|
445
|
-
"type": "textarea",
|
|
446
|
-
"label": trans("agent.option.prompt"),
|
|
447
|
-
"description": trans("agent.option.prompt.base.desc"),
|
|
448
|
-
"default": self.PROMPT,
|
|
449
|
-
},
|
|
450
|
-
"allow_local_tools": {
|
|
451
|
-
"type": "bool",
|
|
452
|
-
"label": trans("agent.option.tools.local"),
|
|
453
|
-
"description": trans("agent.option.tools.local.desc"),
|
|
454
|
-
"default": False,
|
|
455
|
-
},
|
|
456
|
-
"allow_remote_tools": {
|
|
457
|
-
"type": "bool",
|
|
458
|
-
"label": trans("agent.option.tools.remote"),
|
|
459
|
-
"description": trans("agent.option.tools.remote.desc"),
|
|
460
|
-
"default": False,
|
|
461
|
-
},
|
|
462
|
-
}
|
|
463
|
-
},
|
|
464
815
|
"planner": {
|
|
465
816
|
"label": trans("agent.option.section.planner"),
|
|
466
817
|
"options": {
|
|
@@ -468,30 +819,30 @@ class Agent(BaseAgent):
|
|
|
468
819
|
"label": trans("agent.option.model"),
|
|
469
820
|
"type": "combo",
|
|
470
821
|
"use": "models",
|
|
471
|
-
"default": "
|
|
822
|
+
"default": "gpt-4o",
|
|
472
823
|
},
|
|
473
|
-
"
|
|
824
|
+
"initial_prompt": {
|
|
474
825
|
"type": "textarea",
|
|
475
826
|
"label": trans("agent.option.prompt"),
|
|
476
827
|
"description": trans("agent.option.prompt.planner.desc"),
|
|
477
|
-
"default": self.
|
|
828
|
+
"default": self.DEFAULT_INITIAL_PLAN_PROMPT,
|
|
478
829
|
},
|
|
479
830
|
"allow_local_tools": {
|
|
480
831
|
"type": "bool",
|
|
481
832
|
"label": trans("agent.option.tools.local"),
|
|
482
833
|
"description": trans("agent.option.tools.local.desc"),
|
|
483
|
-
"default":
|
|
834
|
+
"default": True,
|
|
484
835
|
},
|
|
485
836
|
"allow_remote_tools": {
|
|
486
837
|
"type": "bool",
|
|
487
838
|
"label": trans("agent.option.tools.remote"),
|
|
488
839
|
"description": trans("agent.option.tools.remote.desc"),
|
|
489
|
-
"default":
|
|
840
|
+
"default": True,
|
|
490
841
|
},
|
|
491
842
|
}
|
|
492
843
|
},
|
|
493
|
-
"
|
|
494
|
-
"label": trans("agent.option.section.
|
|
844
|
+
"refine": {
|
|
845
|
+
"label": trans("agent.option.section.refine"),
|
|
495
846
|
"options": {
|
|
496
847
|
"model": {
|
|
497
848
|
"label": trans("agent.option.model"),
|
|
@@ -502,23 +853,27 @@ class Agent(BaseAgent):
|
|
|
502
853
|
"prompt": {
|
|
503
854
|
"type": "textarea",
|
|
504
855
|
"label": trans("agent.option.prompt"),
|
|
505
|
-
"description": trans("agent.option.prompt.
|
|
506
|
-
"default": self.
|
|
856
|
+
"description": trans("agent.option.prompt.refine.desc"),
|
|
857
|
+
"default": self.DEFAULT_PLAN_REFINE_PROMPT,
|
|
858
|
+
},
|
|
859
|
+
"after_each_subtask": {
|
|
860
|
+
"type": "bool",
|
|
861
|
+
"label": trans("agent.option.refine.after_each"),
|
|
862
|
+
"description": trans("agent.option.refine.after_each.desc"),
|
|
863
|
+
"default": True,
|
|
507
864
|
},
|
|
508
865
|
"allow_local_tools": {
|
|
509
866
|
"type": "bool",
|
|
510
867
|
"label": trans("agent.option.tools.local"),
|
|
511
868
|
"description": trans("agent.option.tools.local.desc"),
|
|
512
|
-
"default":
|
|
869
|
+
"default": True,
|
|
513
870
|
},
|
|
514
871
|
"allow_remote_tools": {
|
|
515
872
|
"type": "bool",
|
|
516
873
|
"label": trans("agent.option.tools.remote"),
|
|
517
874
|
"description": trans("agent.option.tools.remote.desc"),
|
|
518
|
-
"default":
|
|
875
|
+
"default": True,
|
|
519
876
|
},
|
|
520
877
|
}
|
|
521
878
|
},
|
|
522
|
-
}
|
|
523
|
-
|
|
524
|
-
|
|
879
|
+
}
|