pygpt-net 2.6.62__py3-none-any.whl → 2.6.63__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +5 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/controller/presets/editor.py +65 -1
- pygpt_net/core/agents/custom/llama_index/runner.py +15 -52
- pygpt_net/core/agents/custom/runner.py +194 -76
- pygpt_net/core/agents/runners/llama_workflow.py +60 -10
- pygpt_net/data/config/config.json +3 -3
- pygpt_net/data/config/models.json +3 -3
- pygpt_net/data/config/presets/agent_openai_b2b.json +1 -15
- pygpt_net/data/config/presets/agent_openai_coder.json +1 -15
- pygpt_net/data/config/presets/agent_openai_evolve.json +1 -23
- pygpt_net/data/config/presets/agent_openai_planner.json +1 -21
- pygpt_net/data/config/presets/agent_openai_researcher.json +1 -21
- pygpt_net/data/config/presets/agent_openai_supervisor.json +1 -13
- pygpt_net/data/config/presets/agent_openai_writer.json +1 -15
- pygpt_net/data/config/presets/agent_supervisor.json +1 -11
- pygpt_net/data/js/app/runtime.js +4 -1
- pygpt_net/data/js/app.min.js +3 -2
- pygpt_net/data/locale/locale.en.ini +5 -0
- pygpt_net/js_rc.py +13 -10
- pygpt_net/provider/agents/base.py +0 -0
- pygpt_net/provider/agents/llama_index/flow_from_schema.py +0 -0
- pygpt_net/provider/agents/llama_index/workflow/codeact.py +0 -0
- pygpt_net/provider/agents/llama_index/workflow/planner.py +229 -29
- pygpt_net/provider/agents/llama_index/workflow/supervisor.py +0 -0
- pygpt_net/provider/agents/openai/agent.py +0 -0
- pygpt_net/provider/agents/openai/agent_b2b.py +4 -4
- pygpt_net/provider/agents/openai/agent_planner.py +617 -262
- pygpt_net/provider/agents/openai/agent_with_experts.py +0 -0
- pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +4 -4
- pygpt_net/provider/agents/openai/agent_with_feedback.py +4 -4
- pygpt_net/provider/agents/openai/evolve.py +6 -6
- pygpt_net/provider/agents/openai/flow_from_schema.py +0 -0
- pygpt_net/provider/agents/openai/supervisor.py +290 -37
- pygpt_net/provider/api/x_ai/__init__.py +0 -0
- pygpt_net/provider/core/agent/__init__.py +0 -0
- pygpt_net/provider/core/agent/base.py +0 -0
- pygpt_net/provider/core/agent/json_file.py +0 -0
- pygpt_net/provider/core/config/patches/patch_before_2_6_42.py +0 -0
- pygpt_net/provider/llms/base.py +0 -0
- pygpt_net/provider/llms/deepseek_api.py +0 -0
- pygpt_net/provider/llms/google.py +0 -0
- pygpt_net/provider/llms/hugging_face_api.py +0 -0
- pygpt_net/provider/llms/hugging_face_router.py +0 -0
- pygpt_net/provider/llms/mistral.py +0 -0
- pygpt_net/provider/llms/perplexity.py +0 -0
- pygpt_net/provider/llms/x_ai.py +0 -0
- pygpt_net/ui/widget/dialog/confirm.py +34 -8
- pygpt_net/ui/widget/textarea/input.py +1 -1
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.63.dist-info}/METADATA +7 -2
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.63.dist-info}/RECORD +34 -34
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.63.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.63.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.63.dist-info}/entry_points.txt +0 -0
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.09.
|
|
9
|
+
# Updated Date: 2025.09.27 10:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import List, Optional, Callable
|
|
@@ -22,6 +22,7 @@ from llama_index.core.workflow import (
|
|
|
22
22
|
step,
|
|
23
23
|
)
|
|
24
24
|
from llama_index.core.llms.llm import LLM
|
|
25
|
+
# noqa
|
|
25
26
|
from llama_index.core.prompts import PromptTemplate
|
|
26
27
|
from llama_index.core.tools.types import BaseTool
|
|
27
28
|
|
|
@@ -56,6 +57,16 @@ class Plan(BaseModel):
|
|
|
56
57
|
sub_tasks: List[SubTask] = Field(..., description="The sub-tasks in the plan.")
|
|
57
58
|
|
|
58
59
|
|
|
60
|
+
# Structured refinement output to emulate the legacy Planner's refine behavior.
|
|
61
|
+
class PlanRefinement(BaseModel):
|
|
62
|
+
is_done: bool = Field(..., description="Whether the overall task is already satisfied.")
|
|
63
|
+
reason: Optional[str] = Field(None, description="Short justification why the plan is complete or needs update.")
|
|
64
|
+
plan: Optional[Plan] = Field(
|
|
65
|
+
default=None,
|
|
66
|
+
description="An updated plan that replaces the remaining sub-tasks. Omit if is_done=True or no update is needed.",
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
|
|
59
70
|
DEFAULT_INITIAL_PLAN_PROMPT = """\
|
|
60
71
|
You have the following prior context/memory (may be empty):
|
|
61
72
|
{memory_context}
|
|
@@ -70,14 +81,29 @@ The tools available are:
|
|
|
70
81
|
Overall Task: {task}
|
|
71
82
|
"""
|
|
72
83
|
|
|
84
|
+
# Refinement prompt tuned to prevent premature completion and enforce "final deliverable present" rule.
|
|
73
85
|
DEFAULT_PLAN_REFINE_PROMPT = """\
|
|
74
86
|
You have the following prior context/memory (may be empty):
|
|
75
87
|
{memory_context}
|
|
76
88
|
|
|
77
|
-
Think step-by-step. Given an overall task, a set of tools, and completed sub-tasks,
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
89
|
+
Think step-by-step. Given an overall task, a set of tools, and completed sub-tasks, decide whether the overall task is already satisfied.
|
|
90
|
+
If not, update the remaining sub-tasks so that the overall task can still be completed.
|
|
91
|
+
|
|
92
|
+
Completion criteria (ALL must be true to set is_done=true):
|
|
93
|
+
- A final, user-facing answer that directly satisfies "Overall Task" already exists within "Completed Sub-Tasks + Outputs".
|
|
94
|
+
- The final answer matches any explicit format and language requested in "Overall Task".
|
|
95
|
+
- No critical transformation/summarization/finalization step remains among "Remaining Sub-Tasks" (e.g., steps like: provide/present/report/answer/summarize/finalize/deliver the result).
|
|
96
|
+
- The final answer does not rely on placeholders such as "will be provided later" or "see plan above".
|
|
97
|
+
|
|
98
|
+
If ANY of the above is false, set is_done=false.
|
|
99
|
+
|
|
100
|
+
Update policy:
|
|
101
|
+
- If the remaining sub-tasks are already reasonable and correctly ordered, do not propose changes: set is_done=false and omit "plan".
|
|
102
|
+
- Only propose a new "plan" if you need to REPLACE the "Remaining Sub-Tasks" (e.g., wrong order, missing critical steps, or new info from completed outputs).
|
|
103
|
+
- Do NOT repeat any completed sub-task. New sub-tasks must replace only the "Remaining Sub-Tasks".
|
|
104
|
+
|
|
105
|
+
Output schema:
|
|
106
|
+
- Return a JSON object matching the schema with fields: is_done (bool), reason (string), and optional plan (Plan).
|
|
81
107
|
|
|
82
108
|
The tools available are:
|
|
83
109
|
{tools_str}
|
|
@@ -122,6 +148,7 @@ class PlannerWorkflow(Workflow):
|
|
|
122
148
|
clear_executor_memory_between_subtasks: bool = False,
|
|
123
149
|
executor_memory_factory: Optional[Callable[[], object]] = None,
|
|
124
150
|
on_stop: Optional[Callable] = None,
|
|
151
|
+
refine_after_each_subtask: bool = True,
|
|
125
152
|
):
|
|
126
153
|
super().__init__(timeout=None, verbose=verbose)
|
|
127
154
|
self._planner_llm = llm
|
|
@@ -158,6 +185,9 @@ class PlannerWorkflow(Workflow):
|
|
|
158
185
|
|
|
159
186
|
self._clear_exec_mem_between_subtasks = clear_executor_memory_between_subtasks
|
|
160
187
|
|
|
188
|
+
# Controls whether the legacy-style refine happens after every sub-task execution.
|
|
189
|
+
self._refine_after_each_subtask = refine_after_each_subtask
|
|
190
|
+
|
|
161
191
|
def _stopped(self) -> bool:
|
|
162
192
|
"""
|
|
163
193
|
Check if the workflow has been stopped.
|
|
@@ -171,6 +201,32 @@ class PlannerWorkflow(Workflow):
|
|
|
171
201
|
return False
|
|
172
202
|
return False
|
|
173
203
|
|
|
204
|
+
# Build human-friendly, step-scoped labels to display in the UI instead of agent names.
|
|
205
|
+
def _agent_label(
|
|
206
|
+
self,
|
|
207
|
+
step: str,
|
|
208
|
+
index: Optional[int] = None,
|
|
209
|
+
total: Optional[int] = None,
|
|
210
|
+
subtask_name: Optional[str] = None,
|
|
211
|
+
) -> str:
|
|
212
|
+
if step == "subtask":
|
|
213
|
+
if index and total:
|
|
214
|
+
base = f"Sub-task {index}/{total}"
|
|
215
|
+
elif index:
|
|
216
|
+
base = f"Sub-task {index}"
|
|
217
|
+
else:
|
|
218
|
+
base = "Sub-task"
|
|
219
|
+
return f"{base}: {subtask_name}" if subtask_name else base
|
|
220
|
+
if step == "refine":
|
|
221
|
+
if index and total:
|
|
222
|
+
return f"Refine {index}/{total}"
|
|
223
|
+
return "Refine" if not index else f"Refine {index}"
|
|
224
|
+
if step in {"make_plan", "plan"}:
|
|
225
|
+
return "Plan"
|
|
226
|
+
if step in {"execute", "execute_plan"}:
|
|
227
|
+
return "Execute"
|
|
228
|
+
return step or "Step"
|
|
229
|
+
|
|
174
230
|
def _emit_step_event(
|
|
175
231
|
self,
|
|
176
232
|
ctx: Context,
|
|
@@ -188,9 +244,15 @@ class PlannerWorkflow(Workflow):
|
|
|
188
244
|
:param total: The total number of steps (optional).
|
|
189
245
|
:param meta: Additional metadata for the step (optional).
|
|
190
246
|
"""
|
|
191
|
-
# Always pass a friendly
|
|
247
|
+
# Always pass a friendly per-step label as "agent_name".
|
|
192
248
|
m = dict(meta or {})
|
|
193
|
-
|
|
249
|
+
label = self._agent_label(
|
|
250
|
+
step=name,
|
|
251
|
+
index=index,
|
|
252
|
+
total=total,
|
|
253
|
+
subtask_name=m.get("name"),
|
|
254
|
+
)
|
|
255
|
+
m["agent_name"] = label
|
|
194
256
|
|
|
195
257
|
try:
|
|
196
258
|
ctx.write_event_to_stream(
|
|
@@ -203,7 +265,7 @@ class PlannerWorkflow(Workflow):
|
|
|
203
265
|
AgentStream(
|
|
204
266
|
delta="",
|
|
205
267
|
response="",
|
|
206
|
-
current_agent_name=
|
|
268
|
+
current_agent_name=label,
|
|
207
269
|
tool_calls=[],
|
|
208
270
|
raw={"StepEvent": {"name": name, "index": index, "total": total, "meta": m}}
|
|
209
271
|
)
|
|
@@ -314,7 +376,7 @@ class PlannerWorkflow(Workflow):
|
|
|
314
376
|
|
|
315
377
|
:param ctx: The context to write the event to
|
|
316
378
|
:param text: The text message to emit.
|
|
317
|
-
:param agent_name: The name
|
|
379
|
+
:param agent_name: The name/label to display in UI (we pass per-step labels here).
|
|
318
380
|
"""
|
|
319
381
|
# Always try to include agent name; fall back to minimal event for older validators.
|
|
320
382
|
try:
|
|
@@ -406,12 +468,13 @@ class PlannerWorkflow(Workflow):
|
|
|
406
468
|
ctx_text = "Completed sub-tasks context:\n" + "\n".join(parts)
|
|
407
469
|
return self._truncate(ctx_text, char_limit or 8000)
|
|
408
470
|
|
|
409
|
-
async def _run_subtask(self, ctx: Context, prompt: str) -> str:
|
|
471
|
+
async def _run_subtask(self, ctx: Context, prompt: str, agent_label: Optional[str] = None) -> str:
|
|
410
472
|
"""
|
|
411
473
|
Run a sub-task using the executor agent.
|
|
412
474
|
|
|
413
475
|
:param ctx: The context in which the sub-task is executed.
|
|
414
476
|
:param prompt: The prompt for the sub-task.
|
|
477
|
+
:param agent_label: Per-step UI label (e.g., 'Sub-task 1/7: ...') used instead of agent name.
|
|
415
478
|
"""
|
|
416
479
|
if self._clear_exec_mem_between_subtasks:
|
|
417
480
|
self._reset_executor_memory()
|
|
@@ -452,15 +515,15 @@ class PlannerWorkflow(Workflow):
|
|
|
452
515
|
if delta:
|
|
453
516
|
has_stream = True
|
|
454
517
|
stream_buf.append(str(delta))
|
|
455
|
-
#
|
|
518
|
+
# Force the per-step label for executor events.
|
|
456
519
|
try:
|
|
457
|
-
e.current_agent_name = self._display_executor_name
|
|
520
|
+
e.current_agent_name = agent_label or self._display_executor_name
|
|
458
521
|
except Exception:
|
|
459
522
|
try:
|
|
460
523
|
e = AgentStream(
|
|
461
524
|
delta=getattr(e, "delta", ""),
|
|
462
525
|
response=getattr(e, "response", ""),
|
|
463
|
-
current_agent_name=self._display_executor_name,
|
|
526
|
+
current_agent_name=agent_label or self._display_executor_name,
|
|
464
527
|
tool_calls=getattr(e, "tool_calls", []),
|
|
465
528
|
raw=getattr(e, "raw", {}),
|
|
466
529
|
)
|
|
@@ -478,7 +541,7 @@ class PlannerWorkflow(Workflow):
|
|
|
478
541
|
AgentStream(
|
|
479
542
|
delta=last_answer,
|
|
480
543
|
response=last_answer,
|
|
481
|
-
current_agent_name=self._display_executor_name,
|
|
544
|
+
current_agent_name=agent_label or self._display_executor_name,
|
|
482
545
|
tool_calls=e.tool_calls,
|
|
483
546
|
raw=e.raw,
|
|
484
547
|
)
|
|
@@ -502,9 +565,65 @@ class PlannerWorkflow(Workflow):
|
|
|
502
565
|
try:
|
|
503
566
|
return await _stream()
|
|
504
567
|
except Exception as ex:
|
|
505
|
-
await self._emit_text(ctx, f"\n`Sub-task failed: {ex}`", agent_name=self._display_executor_name)
|
|
568
|
+
await self._emit_text(ctx, f"\n`Sub-task failed: {ex}`", agent_name=agent_label or self._display_executor_name)
|
|
506
569
|
return last_answer or ("".join(stream_buf).strip() if stream_buf else "")
|
|
507
570
|
|
|
571
|
+
# Helper to render sub-tasks into a readable string for prompts and UI.
|
|
572
|
+
def _format_subtasks(self, sub_tasks: List[SubTask]) -> str:
|
|
573
|
+
parts = []
|
|
574
|
+
for i, st in enumerate(sub_tasks, 1):
|
|
575
|
+
parts.append(
|
|
576
|
+
f"[{i}] name={st.name}\n"
|
|
577
|
+
f" input={st.input}\n"
|
|
578
|
+
f" expected_output={st.expected_output}\n"
|
|
579
|
+
f" dependencies={st.dependencies}"
|
|
580
|
+
)
|
|
581
|
+
return "\n".join(parts) if parts else "(none)"
|
|
582
|
+
|
|
583
|
+
# Helper to render completed outputs for refinement prompt.
|
|
584
|
+
def _format_completed(self, completed: list[tuple[str, str]]) -> str:
|
|
585
|
+
if not completed:
|
|
586
|
+
return "(none)"
|
|
587
|
+
parts = []
|
|
588
|
+
for i, (name, out) in enumerate(completed, 1):
|
|
589
|
+
parts.append(f"[{i}] {name} -> {self._truncate((out or '').strip(), 2000)}")
|
|
590
|
+
joined = "\n".join(parts)
|
|
591
|
+
return self._truncate(joined, self._memory_char_limit or 8000)
|
|
592
|
+
|
|
593
|
+
async def _refine_plan(
|
|
594
|
+
self,
|
|
595
|
+
ctx: Context,
|
|
596
|
+
task: str,
|
|
597
|
+
tools_str: str,
|
|
598
|
+
completed: list[tuple[str, str]],
|
|
599
|
+
remaining: List[SubTask],
|
|
600
|
+
memory_context: str,
|
|
601
|
+
agent_label: Optional[str] = None,
|
|
602
|
+
) -> Optional[PlanRefinement]:
|
|
603
|
+
"""
|
|
604
|
+
Ask the planner LLM to refine the plan. Returns a PlanRefinement or None on failure.
|
|
605
|
+
"""
|
|
606
|
+
completed_text = self._format_completed(completed)
|
|
607
|
+
remaining_text = self._format_subtasks(remaining)
|
|
608
|
+
|
|
609
|
+
# Emit a lightweight status line to the UI.
|
|
610
|
+
await self._emit_text(ctx, "\n`Refining remaining plan...`", agent_name=agent_label or "Refine")
|
|
611
|
+
|
|
612
|
+
try:
|
|
613
|
+
refinement = await self._planner_llm.astructured_predict(
|
|
614
|
+
PlanRefinement,
|
|
615
|
+
self._plan_refine_prompt,
|
|
616
|
+
tools_str=tools_str,
|
|
617
|
+
task=task,
|
|
618
|
+
completed_outputs=completed_text,
|
|
619
|
+
remaining_sub_tasks=remaining_text,
|
|
620
|
+
memory_context=memory_context,
|
|
621
|
+
)
|
|
622
|
+
return refinement
|
|
623
|
+
except (ValueError, ValidationError):
|
|
624
|
+
# Graceful fallback if the model fails to conform to schema.
|
|
625
|
+
return None
|
|
626
|
+
|
|
508
627
|
@step
|
|
509
628
|
async def make_plan(self, ctx: Context, ev: QueryEvent) -> PlanReady:
|
|
510
629
|
"""
|
|
@@ -538,7 +657,8 @@ class PlannerWorkflow(Workflow):
|
|
|
538
657
|
f"Expected output: {st.expected_output}\n"
|
|
539
658
|
f"Dependencies: {st.dependencies}\n\n"
|
|
540
659
|
)
|
|
541
|
-
|
|
660
|
+
# Use a per-step label for plan creation
|
|
661
|
+
await self._emit_text(ctx, "\n".join(lines), agent_name=self._agent_label("make_plan"))
|
|
542
662
|
return PlanReady(plan=plan, query=ev.query)
|
|
543
663
|
|
|
544
664
|
@step
|
|
@@ -555,36 +675,51 @@ class PlannerWorkflow(Workflow):
|
|
|
555
675
|
last_answer = ""
|
|
556
676
|
completed: list[tuple[str, str]] = [] # (name, output)
|
|
557
677
|
|
|
558
|
-
|
|
678
|
+
# Start executing with a per-step label
|
|
679
|
+
execute_label = self._agent_label("execute")
|
|
680
|
+
await self._emit_text(ctx, "\n\n`Executing plan...`", agent_name=execute_label)
|
|
681
|
+
|
|
682
|
+
# Prepare static prompt parts for refinement.
|
|
683
|
+
tools_str = ""
|
|
684
|
+
for t in self._tools:
|
|
685
|
+
tools_str += f"{(t.metadata.name or '').strip()}: {(t.metadata.description or '').strip()}\n"
|
|
686
|
+
memory_context = self._memory_to_text(self._memory)
|
|
687
|
+
|
|
688
|
+
i = 0 # manual index to allow in-place plan updates during refinement
|
|
689
|
+
while i < len(plan_sub_tasks):
|
|
690
|
+
st = plan_sub_tasks[i]
|
|
691
|
+
total = len(plan_sub_tasks)
|
|
692
|
+
|
|
693
|
+
# Compute label for this sub-task
|
|
694
|
+
subtask_label = self._agent_label("subtask", index=i + 1, total=total, subtask_name=st.name)
|
|
559
695
|
|
|
560
|
-
for i, st in enumerate(plan_sub_tasks, 1):
|
|
561
696
|
self._emit_step_event(
|
|
562
697
|
ctx,
|
|
563
698
|
name="subtask",
|
|
564
|
-
index=i,
|
|
699
|
+
index=i + 1,
|
|
565
700
|
total=total,
|
|
566
701
|
meta={
|
|
567
702
|
"name": st.name,
|
|
568
703
|
"expected_output": st.expected_output,
|
|
569
704
|
"dependencies": st.dependencies,
|
|
570
705
|
"input": st.input,
|
|
571
|
-
#
|
|
572
|
-
"agent_name":
|
|
706
|
+
# UI label for this sub-task step
|
|
707
|
+
"agent_name": subtask_label,
|
|
573
708
|
},
|
|
574
709
|
)
|
|
575
710
|
|
|
576
711
|
header = (
|
|
577
|
-
f"\n\n**===== Sub Task {i}/{total}: {st.name} =====**\n"
|
|
712
|
+
f"\n\n**===== Sub Task {i + 1}/{total}: {st.name} =====**\n"
|
|
578
713
|
f"Expected output: {st.expected_output}\n"
|
|
579
714
|
f"Dependencies: {st.dependencies}\n\n"
|
|
580
715
|
)
|
|
581
716
|
|
|
582
717
|
# stop callback
|
|
583
718
|
if self._stopped():
|
|
584
|
-
await self._emit_text(ctx, "\n`Plan execution stopped.`", agent_name=
|
|
719
|
+
await self._emit_text(ctx, "\n`Plan execution stopped.`", agent_name=execute_label)
|
|
585
720
|
return FinalEvent(result=last_answer or "Plan execution stopped.")
|
|
586
721
|
|
|
587
|
-
await self._emit_text(ctx, header, agent_name=
|
|
722
|
+
await self._emit_text(ctx, header, agent_name=subtask_label)
|
|
588
723
|
|
|
589
724
|
# build context for sub-task
|
|
590
725
|
ctx_text = self._build_context_for_subtask(
|
|
@@ -604,18 +739,83 @@ class PlannerWorkflow(Workflow):
|
|
|
604
739
|
else:
|
|
605
740
|
composed_prompt = st.input
|
|
606
741
|
|
|
607
|
-
# run the sub-task
|
|
608
|
-
sub_answer = await self._run_subtask(ctx, composed_prompt)
|
|
742
|
+
# run the sub-task with the per-step label
|
|
743
|
+
sub_answer = await self._run_subtask(ctx, composed_prompt, agent_label=subtask_label)
|
|
609
744
|
sub_answer = (sub_answer or "").strip()
|
|
610
745
|
|
|
611
|
-
await self._emit_text(ctx, f"\n\n`Finished Sub Task {i}/{total}: {st.name}`", agent_name=
|
|
746
|
+
await self._emit_text(ctx, f"\n\n`Finished Sub Task {i + 1}/{total}: {st.name}`", agent_name=subtask_label)
|
|
612
747
|
|
|
613
748
|
# save completed sub-task
|
|
614
749
|
completed.append((st.name, sub_answer))
|
|
615
750
|
if sub_answer:
|
|
616
751
|
last_answer = sub_answer
|
|
617
752
|
|
|
618
|
-
#
|
|
753
|
+
# Early stop check (external cancel)
|
|
754
|
+
if self._stopped():
|
|
755
|
+
await self._emit_text(ctx, "\n`Plan execution stopped.`", agent_name=execute_label)
|
|
756
|
+
return FinalEvent(result=last_answer or "Plan execution stopped.")
|
|
757
|
+
|
|
758
|
+
# Optional legacy-style refine after each sub-task
|
|
759
|
+
i += 1 # move pointer to the next item before potential replacement of tail
|
|
760
|
+
if self._refine_after_each_subtask and i < len(plan_sub_tasks):
|
|
761
|
+
remaining = plan_sub_tasks[i:]
|
|
762
|
+
# Label for refine step
|
|
763
|
+
refine_label = self._agent_label("refine", index=i, total=len(plan_sub_tasks))
|
|
764
|
+
|
|
765
|
+
# Emit a step event for refine to keep UI parity with the legacy Planner.
|
|
766
|
+
self._emit_step_event(
|
|
767
|
+
ctx,
|
|
768
|
+
name="refine",
|
|
769
|
+
index=i,
|
|
770
|
+
total=len(plan_sub_tasks),
|
|
771
|
+
meta={"agent_name": refine_label},
|
|
772
|
+
)
|
|
773
|
+
|
|
774
|
+
refinement = await self._refine_plan(
|
|
775
|
+
ctx=ctx,
|
|
776
|
+
task=ev.query,
|
|
777
|
+
tools_str=tools_str,
|
|
778
|
+
completed=completed,
|
|
779
|
+
remaining=remaining,
|
|
780
|
+
memory_context=memory_context,
|
|
781
|
+
agent_label=refine_label,
|
|
782
|
+
)
|
|
783
|
+
|
|
784
|
+
# If refinement failed to parse, skip gracefully.
|
|
785
|
+
if refinement is None:
|
|
786
|
+
continue
|
|
787
|
+
|
|
788
|
+
# If the planner states the task is complete, stop early.
|
|
789
|
+
if getattr(refinement, "is_done", False):
|
|
790
|
+
reason = getattr(refinement, "reason", "") or "Planner judged the task as satisfied."
|
|
791
|
+
await self._emit_text(
|
|
792
|
+
ctx,
|
|
793
|
+
f"\n`Planner marked the plan as complete: {reason}`",
|
|
794
|
+
agent_name=refine_label,
|
|
795
|
+
)
|
|
796
|
+
await self._emit_text(ctx, "\n\n`Plan execution finished.`", agent_name=execute_label)
|
|
797
|
+
return FinalEvent(result=last_answer or "Plan finished.")
|
|
798
|
+
|
|
799
|
+
# If an updated plan was provided, replace the remaining sub-tasks.
|
|
800
|
+
if refinement.plan and refinement.plan.sub_tasks is not None:
|
|
801
|
+
# Filter out any sub-tasks that repeat completed names to avoid loops.
|
|
802
|
+
completed_names = {n for (n, _) in completed}
|
|
803
|
+
new_remaining = [st for st in refinement.plan.sub_tasks if st.name not in completed_names]
|
|
804
|
+
|
|
805
|
+
# If nothing changes, continue.
|
|
806
|
+
current_remaining_repr = self._format_subtasks(remaining)
|
|
807
|
+
new_remaining_repr = self._format_subtasks(new_remaining)
|
|
808
|
+
if new_remaining_repr.strip() != current_remaining_repr.strip():
|
|
809
|
+
plan_sub_tasks = plan_sub_tasks[:i] + new_remaining
|
|
810
|
+
# Present the updated tail of the plan to the UI.
|
|
811
|
+
lines = ["`Updated remaining plan:`"]
|
|
812
|
+
for k, st_upd in enumerate(new_remaining, i + 1):
|
|
813
|
+
lines.append(
|
|
814
|
+
f"\n**===== Sub Task {k}/{len(plan_sub_tasks)}: {st_upd.name} =====**\n"
|
|
815
|
+
f"Expected output: {st_upd.expected_output}\n"
|
|
816
|
+
f"Dependencies: {st_upd.dependencies}\n\n"
|
|
817
|
+
)
|
|
818
|
+
await self._emit_text(ctx, "\n".join(lines), agent_name=refine_label)
|
|
619
819
|
|
|
620
|
-
await self._emit_text(ctx, "\n\n`Plan execution finished.`", agent_name=
|
|
820
|
+
await self._emit_text(ctx, "\n\n`Plan execution finished.`", agent_name=execute_label)
|
|
621
821
|
return FinalEvent(result=last_answer or "Plan finished.")
|
|
File without changes
|
|
File without changes
|
|
@@ -449,13 +449,13 @@ class Agent(BaseAgent):
|
|
|
449
449
|
"type": "bool",
|
|
450
450
|
"label": trans("agent.option.tools.local"),
|
|
451
451
|
"description": trans("agent.option.tools.local.desc"),
|
|
452
|
-
"default":
|
|
452
|
+
"default": True,
|
|
453
453
|
},
|
|
454
454
|
"allow_remote_tools": {
|
|
455
455
|
"type": "bool",
|
|
456
456
|
"label": trans("agent.option.tools.remote"),
|
|
457
457
|
"description": trans("agent.option.tools.remote.desc"),
|
|
458
|
-
"default":
|
|
458
|
+
"default": True,
|
|
459
459
|
},
|
|
460
460
|
}
|
|
461
461
|
},
|
|
@@ -483,13 +483,13 @@ class Agent(BaseAgent):
|
|
|
483
483
|
"type": "bool",
|
|
484
484
|
"label": trans("agent.option.tools.local"),
|
|
485
485
|
"description": trans("agent.option.tools.local.desc"),
|
|
486
|
-
"default":
|
|
486
|
+
"default": True,
|
|
487
487
|
},
|
|
488
488
|
"allow_remote_tools": {
|
|
489
489
|
"type": "bool",
|
|
490
490
|
"label": trans("agent.option.tools.remote"),
|
|
491
491
|
"description": trans("agent.option.tools.remote.desc"),
|
|
492
|
-
"default":
|
|
492
|
+
"default": True,
|
|
493
493
|
},
|
|
494
494
|
}
|
|
495
495
|
},
|