pygpt-net 2.6.61__py3-none-any.whl → 2.6.63__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. pygpt_net/CHANGELOG.txt +12 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/chat/response.py +8 -2
  4. pygpt_net/controller/presets/editor.py +65 -1
  5. pygpt_net/controller/settings/profile.py +16 -4
  6. pygpt_net/controller/settings/workdir.py +30 -5
  7. pygpt_net/controller/theme/common.py +4 -2
  8. pygpt_net/controller/theme/markdown.py +2 -2
  9. pygpt_net/controller/theme/theme.py +2 -1
  10. pygpt_net/controller/ui/ui.py +31 -3
  11. pygpt_net/core/agents/custom/llama_index/runner.py +30 -52
  12. pygpt_net/core/agents/custom/runner.py +199 -76
  13. pygpt_net/core/agents/runners/llama_workflow.py +122 -12
  14. pygpt_net/core/agents/runners/openai_workflow.py +2 -1
  15. pygpt_net/core/node_editor/types.py +13 -1
  16. pygpt_net/core/render/web/renderer.py +76 -11
  17. pygpt_net/data/config/config.json +3 -3
  18. pygpt_net/data/config/models.json +3 -3
  19. pygpt_net/data/config/presets/agent_openai_b2b.json +1 -15
  20. pygpt_net/data/config/presets/agent_openai_coder.json +1 -15
  21. pygpt_net/data/config/presets/agent_openai_evolve.json +1 -23
  22. pygpt_net/data/config/presets/agent_openai_planner.json +1 -21
  23. pygpt_net/data/config/presets/agent_openai_researcher.json +1 -21
  24. pygpt_net/data/config/presets/agent_openai_supervisor.json +1 -13
  25. pygpt_net/data/config/presets/agent_openai_writer.json +1 -15
  26. pygpt_net/data/config/presets/agent_supervisor.json +1 -11
  27. pygpt_net/data/css/style.dark.css +18 -0
  28. pygpt_net/data/css/style.light.css +20 -1
  29. pygpt_net/data/js/app/runtime.js +4 -1
  30. pygpt_net/data/js/app.min.js +3 -2
  31. pygpt_net/data/locale/locale.de.ini +2 -0
  32. pygpt_net/data/locale/locale.en.ini +7 -0
  33. pygpt_net/data/locale/locale.es.ini +2 -0
  34. pygpt_net/data/locale/locale.fr.ini +2 -0
  35. pygpt_net/data/locale/locale.it.ini +2 -0
  36. pygpt_net/data/locale/locale.pl.ini +3 -1
  37. pygpt_net/data/locale/locale.uk.ini +2 -0
  38. pygpt_net/data/locale/locale.zh.ini +2 -0
  39. pygpt_net/item/ctx.py +23 -1
  40. pygpt_net/js_rc.py +13 -10
  41. pygpt_net/provider/agents/base.py +0 -0
  42. pygpt_net/provider/agents/llama_index/flow_from_schema.py +0 -0
  43. pygpt_net/provider/agents/llama_index/workflow/codeact.py +9 -6
  44. pygpt_net/provider/agents/llama_index/workflow/openai.py +38 -11
  45. pygpt_net/provider/agents/llama_index/workflow/planner.py +248 -28
  46. pygpt_net/provider/agents/llama_index/workflow/supervisor.py +60 -10
  47. pygpt_net/provider/agents/openai/agent.py +3 -1
  48. pygpt_net/provider/agents/openai/agent_b2b.py +17 -13
  49. pygpt_net/provider/agents/openai/agent_planner.py +617 -258
  50. pygpt_net/provider/agents/openai/agent_with_experts.py +4 -1
  51. pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +8 -6
  52. pygpt_net/provider/agents/openai/agent_with_feedback.py +8 -6
  53. pygpt_net/provider/agents/openai/evolve.py +12 -8
  54. pygpt_net/provider/agents/openai/flow_from_schema.py +0 -0
  55. pygpt_net/provider/agents/openai/supervisor.py +292 -37
  56. pygpt_net/provider/api/openai/agents/response.py +1 -0
  57. pygpt_net/provider/api/x_ai/__init__.py +0 -0
  58. pygpt_net/provider/core/agent/__init__.py +0 -0
  59. pygpt_net/provider/core/agent/base.py +0 -0
  60. pygpt_net/provider/core/agent/json_file.py +0 -0
  61. pygpt_net/provider/core/config/patch.py +8 -0
  62. pygpt_net/provider/core/config/patches/patch_before_2_6_42.py +0 -0
  63. pygpt_net/provider/llms/base.py +0 -0
  64. pygpt_net/provider/llms/deepseek_api.py +0 -0
  65. pygpt_net/provider/llms/google.py +0 -0
  66. pygpt_net/provider/llms/hugging_face_api.py +0 -0
  67. pygpt_net/provider/llms/hugging_face_router.py +0 -0
  68. pygpt_net/provider/llms/mistral.py +0 -0
  69. pygpt_net/provider/llms/perplexity.py +0 -0
  70. pygpt_net/provider/llms/x_ai.py +0 -0
  71. pygpt_net/tools/agent_builder/tool.py +6 -0
  72. pygpt_net/tools/agent_builder/ui/dialogs.py +0 -41
  73. pygpt_net/ui/layout/toolbox/presets.py +14 -2
  74. pygpt_net/ui/main.py +2 -2
  75. pygpt_net/ui/widget/dialog/confirm.py +55 -5
  76. pygpt_net/ui/widget/draw/painter.py +90 -1
  77. pygpt_net/ui/widget/lists/preset.py +289 -25
  78. pygpt_net/ui/widget/node_editor/editor.py +53 -15
  79. pygpt_net/ui/widget/node_editor/node.py +82 -104
  80. pygpt_net/ui/widget/node_editor/view.py +4 -5
  81. pygpt_net/ui/widget/textarea/input.py +155 -21
  82. {pygpt_net-2.6.61.dist-info → pygpt_net-2.6.63.dist-info}/METADATA +22 -8
  83. {pygpt_net-2.6.61.dist-info → pygpt_net-2.6.63.dist-info}/RECORD +70 -70
  84. {pygpt_net-2.6.61.dist-info → pygpt_net-2.6.63.dist-info}/LICENSE +0 -0
  85. {pygpt_net-2.6.61.dist-info → pygpt_net-2.6.63.dist-info}/WHEEL +0 -0
  86. {pygpt_net-2.6.61.dist-info → pygpt_net-2.6.63.dist-info}/entry_points.txt +0 -0
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.24 02:00:00 #
9
+ # Updated Date: 2025.09.27 10:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import List, Optional, Callable
@@ -22,6 +22,7 @@ from llama_index.core.workflow import (
22
22
  step,
23
23
  )
24
24
  from llama_index.core.llms.llm import LLM
25
+ # noqa
25
26
  from llama_index.core.prompts import PromptTemplate
26
27
  from llama_index.core.tools.types import BaseTool
27
28
 
@@ -56,6 +57,16 @@ class Plan(BaseModel):
56
57
  sub_tasks: List[SubTask] = Field(..., description="The sub-tasks in the plan.")
57
58
 
58
59
 
60
+ # Structured refinement output to emulate the legacy Planner's refine behavior.
61
+ class PlanRefinement(BaseModel):
62
+ is_done: bool = Field(..., description="Whether the overall task is already satisfied.")
63
+ reason: Optional[str] = Field(None, description="Short justification why the plan is complete or needs update.")
64
+ plan: Optional[Plan] = Field(
65
+ default=None,
66
+ description="An updated plan that replaces the remaining sub-tasks. Omit if is_done=True or no update is needed.",
67
+ )
68
+
69
+
59
70
  DEFAULT_INITIAL_PLAN_PROMPT = """\
60
71
  You have the following prior context/memory (may be empty):
61
72
  {memory_context}
@@ -70,14 +81,29 @@ The tools available are:
70
81
  Overall Task: {task}
71
82
  """
72
83
 
84
+ # Refinement prompt tuned to prevent premature completion and enforce "final deliverable present" rule.
73
85
  DEFAULT_PLAN_REFINE_PROMPT = """\
74
86
  You have the following prior context/memory (may be empty):
75
87
  {memory_context}
76
88
 
77
- Think step-by-step. Given an overall task, a set of tools, and completed sub-tasks, update (if needed) the remaining sub-tasks so that the overall task can still be completed.
78
- The plan should end with a sub-task that can achieve and satisfy the overall task.
79
- If you do update the plan, only create new sub-tasks that will replace the remaining sub-tasks, do NOT repeat tasks that are already completed.
80
- If the remaining sub-tasks are enough to achieve the overall task, it is ok to skip this step, and instead explain why the plan is complete.
89
+ Think step-by-step. Given an overall task, a set of tools, and completed sub-tasks, decide whether the overall task is already satisfied.
90
+ If not, update the remaining sub-tasks so that the overall task can still be completed.
91
+
92
+ Completion criteria (ALL must be true to set is_done=true):
93
+ - A final, user-facing answer that directly satisfies "Overall Task" already exists within "Completed Sub-Tasks + Outputs".
94
+ - The final answer matches any explicit format and language requested in "Overall Task".
95
+ - No critical transformation/summarization/finalization step remains among "Remaining Sub-Tasks" (e.g., steps like: provide/present/report/answer/summarize/finalize/deliver the result).
96
+ - The final answer does not rely on placeholders such as "will be provided later" or "see plan above".
97
+
98
+ If ANY of the above is false, set is_done=false.
99
+
100
+ Update policy:
101
+ - If the remaining sub-tasks are already reasonable and correctly ordered, do not propose changes: set is_done=false and omit "plan".
102
+ - Only propose a new "plan" if you need to REPLACE the "Remaining Sub-Tasks" (e.g., wrong order, missing critical steps, or new info from completed outputs).
103
+ - Do NOT repeat any completed sub-task. New sub-tasks must replace only the "Remaining Sub-Tasks".
104
+
105
+ Output schema:
106
+ - Return a JSON object matching the schema with fields: is_done (bool), reason (string), and optional plan (Plan).
81
107
 
82
108
  The tools available are:
83
109
  {tools_str}
@@ -122,6 +148,7 @@ class PlannerWorkflow(Workflow):
122
148
  clear_executor_memory_between_subtasks: bool = False,
123
149
  executor_memory_factory: Optional[Callable[[], object]] = None,
124
150
  on_stop: Optional[Callable] = None,
151
+ refine_after_each_subtask: bool = True,
125
152
  ):
126
153
  super().__init__(timeout=None, verbose=verbose)
127
154
  self._planner_llm = llm
@@ -134,6 +161,10 @@ class PlannerWorkflow(Workflow):
134
161
  self._memory_char_limit = memory_char_limit
135
162
  self._on_stop = on_stop
136
163
 
164
+ # Human-friendly display names propagated to UI via workflow events.
165
+ self._display_planner_name: str = "PlannerWorkflow"
166
+ self._display_executor_name: str = "FunctionAgent"
167
+
137
168
  self._executor = FunctionAgent(
138
169
  name="PlannerExecutor",
139
170
  description="Executes planner sub-tasks using available tools.",
@@ -154,6 +185,9 @@ class PlannerWorkflow(Workflow):
154
185
 
155
186
  self._clear_exec_mem_between_subtasks = clear_executor_memory_between_subtasks
156
187
 
188
+ # Controls whether the legacy-style refine happens after every sub-task execution.
189
+ self._refine_after_each_subtask = refine_after_each_subtask
190
+
157
191
  def _stopped(self) -> bool:
158
192
  """
159
193
  Check if the workflow has been stopped.
@@ -167,6 +201,32 @@ class PlannerWorkflow(Workflow):
167
201
  return False
168
202
  return False
169
203
 
204
+ # Build human-friendly, step-scoped labels to display in the UI instead of agent names.
205
+ def _agent_label(
206
+ self,
207
+ step: str,
208
+ index: Optional[int] = None,
209
+ total: Optional[int] = None,
210
+ subtask_name: Optional[str] = None,
211
+ ) -> str:
212
+ if step == "subtask":
213
+ if index and total:
214
+ base = f"Sub-task {index}/{total}"
215
+ elif index:
216
+ base = f"Sub-task {index}"
217
+ else:
218
+ base = "Sub-task"
219
+ return f"{base}: {subtask_name}" if subtask_name else base
220
+ if step == "refine":
221
+ if index and total:
222
+ return f"Refine {index}/{total}"
223
+ return "Refine" if not index else f"Refine {index}"
224
+ if step in {"make_plan", "plan"}:
225
+ return "Plan"
226
+ if step in {"execute", "execute_plan"}:
227
+ return "Execute"
228
+ return step or "Step"
229
+
170
230
  def _emit_step_event(
171
231
  self,
172
232
  ctx: Context,
@@ -184,9 +244,19 @@ class PlannerWorkflow(Workflow):
184
244
  :param total: The total number of steps (optional).
185
245
  :param meta: Additional metadata for the step (optional).
186
246
  """
247
+ # Always pass a friendly per-step label as "agent_name".
248
+ m = dict(meta or {})
249
+ label = self._agent_label(
250
+ step=name,
251
+ index=index,
252
+ total=total,
253
+ subtask_name=m.get("name"),
254
+ )
255
+ m["agent_name"] = label
256
+
187
257
  try:
188
258
  ctx.write_event_to_stream(
189
- StepEvent(name=name, index=index, total=total, meta=meta or {})
259
+ StepEvent(name=name, index=index, total=total, meta=m)
190
260
  )
191
261
  except Exception:
192
262
  # fallback for older versions of AgentStream
@@ -195,9 +265,9 @@ class PlannerWorkflow(Workflow):
195
265
  AgentStream(
196
266
  delta="",
197
267
  response="",
198
- current_agent_name="PlannerWorkflow",
268
+ current_agent_name=label,
199
269
  tool_calls=[],
200
- raw={"StepEvent": {"name": name, "index": index, "total": total, "meta": meta or {}}}
270
+ raw={"StepEvent": {"name": name, "index": index, "total": total, "meta": m}}
201
271
  )
202
272
  )
203
273
  except Exception:
@@ -306,11 +376,10 @@ class PlannerWorkflow(Workflow):
306
376
 
307
377
  :param ctx: The context to write the event to
308
378
  :param text: The text message to emit.
309
- :param agent_name: The name of the agent emitting the text (default: "PlannerWorkflow").
379
+ :param agent_name: The name/label to display in UI (we pass per-step labels here).
310
380
  """
381
+ # Always try to include agent name; fall back to minimal event for older validators.
311
382
  try:
312
- ctx.write_event_to_stream(AgentStream(delta=text))
313
- except ValidationError:
314
383
  ctx.write_event_to_stream(
315
384
  AgentStream(
316
385
  delta=text,
@@ -320,6 +389,8 @@ class PlannerWorkflow(Workflow):
320
389
  raw={},
321
390
  )
322
391
  )
392
+ except ValidationError:
393
+ ctx.write_event_to_stream(AgentStream(delta=text))
323
394
 
324
395
  def _to_text(self, resp) -> str:
325
396
  """
@@ -397,12 +468,13 @@ class PlannerWorkflow(Workflow):
397
468
  ctx_text = "Completed sub-tasks context:\n" + "\n".join(parts)
398
469
  return self._truncate(ctx_text, char_limit or 8000)
399
470
 
400
- async def _run_subtask(self, ctx: Context, prompt: str) -> str:
471
+ async def _run_subtask(self, ctx: Context, prompt: str, agent_label: Optional[str] = None) -> str:
401
472
  """
402
473
  Run a sub-task using the executor agent.
403
474
 
404
475
  :param ctx: The context in which the sub-task is executed.
405
476
  :param prompt: The prompt for the sub-task.
477
+ :param agent_label: Per-step UI label (e.g., 'Sub-task 1/7: ...') used instead of agent name.
406
478
  """
407
479
  if self._clear_exec_mem_between_subtasks:
408
480
  self._reset_executor_memory()
@@ -443,9 +515,18 @@ class PlannerWorkflow(Workflow):
443
515
  if delta:
444
516
  has_stream = True
445
517
  stream_buf.append(str(delta))
446
- if not getattr(e, "current_agent_name", None):
518
+ # Force the per-step label for executor events.
519
+ try:
520
+ e.current_agent_name = agent_label or self._display_executor_name
521
+ except Exception:
447
522
  try:
448
- e.current_agent_name = self._executor.name
523
+ e = AgentStream(
524
+ delta=getattr(e, "delta", ""),
525
+ response=getattr(e, "response", ""),
526
+ current_agent_name=agent_label or self._display_executor_name,
527
+ tool_calls=getattr(e, "tool_calls", []),
528
+ raw=getattr(e, "raw", {}),
529
+ )
449
530
  except Exception:
450
531
  pass
451
532
  ctx.write_event_to_stream(e)
@@ -460,7 +541,7 @@ class PlannerWorkflow(Workflow):
460
541
  AgentStream(
461
542
  delta=last_answer,
462
543
  response=last_answer,
463
- current_agent_name=f"{self._executor.name} (subtask)",
544
+ current_agent_name=agent_label or self._display_executor_name,
464
545
  tool_calls=e.tool_calls,
465
546
  raw=e.raw,
466
547
  )
@@ -484,9 +565,65 @@ class PlannerWorkflow(Workflow):
484
565
  try:
485
566
  return await _stream()
486
567
  except Exception as ex:
487
- await self._emit_text(ctx, f"\n`Sub-task failed: {ex}`")
568
+ await self._emit_text(ctx, f"\n`Sub-task failed: {ex}`", agent_name=agent_label or self._display_executor_name)
488
569
  return last_answer or ("".join(stream_buf).strip() if stream_buf else "")
489
570
 
571
+ # Helper to render sub-tasks into a readable string for prompts and UI.
572
+ def _format_subtasks(self, sub_tasks: List[SubTask]) -> str:
573
+ parts = []
574
+ for i, st in enumerate(sub_tasks, 1):
575
+ parts.append(
576
+ f"[{i}] name={st.name}\n"
577
+ f" input={st.input}\n"
578
+ f" expected_output={st.expected_output}\n"
579
+ f" dependencies={st.dependencies}"
580
+ )
581
+ return "\n".join(parts) if parts else "(none)"
582
+
583
+ # Helper to render completed outputs for refinement prompt.
584
+ def _format_completed(self, completed: list[tuple[str, str]]) -> str:
585
+ if not completed:
586
+ return "(none)"
587
+ parts = []
588
+ for i, (name, out) in enumerate(completed, 1):
589
+ parts.append(f"[{i}] {name} -> {self._truncate((out or '').strip(), 2000)}")
590
+ joined = "\n".join(parts)
591
+ return self._truncate(joined, self._memory_char_limit or 8000)
592
+
593
+ async def _refine_plan(
594
+ self,
595
+ ctx: Context,
596
+ task: str,
597
+ tools_str: str,
598
+ completed: list[tuple[str, str]],
599
+ remaining: List[SubTask],
600
+ memory_context: str,
601
+ agent_label: Optional[str] = None,
602
+ ) -> Optional[PlanRefinement]:
603
+ """
604
+ Ask the planner LLM to refine the plan. Returns a PlanRefinement or None on failure.
605
+ """
606
+ completed_text = self._format_completed(completed)
607
+ remaining_text = self._format_subtasks(remaining)
608
+
609
+ # Emit a lightweight status line to the UI.
610
+ await self._emit_text(ctx, "\n`Refining remaining plan...`", agent_name=agent_label or "Refine")
611
+
612
+ try:
613
+ refinement = await self._planner_llm.astructured_predict(
614
+ PlanRefinement,
615
+ self._plan_refine_prompt,
616
+ tools_str=tools_str,
617
+ task=task,
618
+ completed_outputs=completed_text,
619
+ remaining_sub_tasks=remaining_text,
620
+ memory_context=memory_context,
621
+ )
622
+ return refinement
623
+ except (ValueError, ValidationError):
624
+ # Graceful fallback if the model fails to conform to schema.
625
+ return None
626
+
490
627
  @step
491
628
  async def make_plan(self, ctx: Context, ev: QueryEvent) -> PlanReady:
492
629
  """
@@ -520,7 +657,8 @@ class PlannerWorkflow(Workflow):
520
657
  f"Expected output: {st.expected_output}\n"
521
658
  f"Dependencies: {st.dependencies}\n\n"
522
659
  )
523
- await self._emit_text(ctx, "\n".join(lines))
660
+ # Use a per-step label for plan creation
661
+ await self._emit_text(ctx, "\n".join(lines), agent_name=self._agent_label("make_plan"))
524
662
  return PlanReady(plan=plan, query=ev.query)
525
663
 
526
664
  @step
@@ -537,34 +675,51 @@ class PlannerWorkflow(Workflow):
537
675
  last_answer = ""
538
676
  completed: list[tuple[str, str]] = [] # (name, output)
539
677
 
540
- await self._emit_text(ctx, "\n\n`Executing plan...`")
678
+ # Start executing with a per-step label
679
+ execute_label = self._agent_label("execute")
680
+ await self._emit_text(ctx, "\n\n`Executing plan...`", agent_name=execute_label)
681
+
682
+ # Prepare static prompt parts for refinement.
683
+ tools_str = ""
684
+ for t in self._tools:
685
+ tools_str += f"{(t.metadata.name or '').strip()}: {(t.metadata.description or '').strip()}\n"
686
+ memory_context = self._memory_to_text(self._memory)
687
+
688
+ i = 0 # manual index to allow in-place plan updates during refinement
689
+ while i < len(plan_sub_tasks):
690
+ st = plan_sub_tasks[i]
691
+ total = len(plan_sub_tasks)
692
+
693
+ # Compute label for this sub-task
694
+ subtask_label = self._agent_label("subtask", index=i + 1, total=total, subtask_name=st.name)
541
695
 
542
- for i, st in enumerate(plan_sub_tasks, 1):
543
696
  self._emit_step_event(
544
697
  ctx,
545
698
  name="subtask",
546
- index=i,
699
+ index=i + 1,
547
700
  total=total,
548
701
  meta={
549
702
  "name": st.name,
550
703
  "expected_output": st.expected_output,
551
704
  "dependencies": st.dependencies,
552
705
  "input": st.input,
706
+ # UI label for this sub-task step
707
+ "agent_name": subtask_label,
553
708
  },
554
709
  )
555
710
 
556
711
  header = (
557
- f"\n\n**===== Sub Task {i}/{total}: {st.name} =====**\n"
712
+ f"\n\n**===== Sub Task {i + 1}/{total}: {st.name} =====**\n"
558
713
  f"Expected output: {st.expected_output}\n"
559
714
  f"Dependencies: {st.dependencies}\n\n"
560
715
  )
561
716
 
562
717
  # stop callback
563
718
  if self._stopped():
564
- await self._emit_text(ctx, "\n`Plan execution stopped.`")
719
+ await self._emit_text(ctx, "\n`Plan execution stopped.`", agent_name=execute_label)
565
720
  return FinalEvent(result=last_answer or "Plan execution stopped.")
566
721
 
567
- await self._emit_text(ctx, header)
722
+ await self._emit_text(ctx, header, agent_name=subtask_label)
568
723
 
569
724
  # build context for sub-task
570
725
  ctx_text = self._build_context_for_subtask(
@@ -584,18 +739,83 @@ class PlannerWorkflow(Workflow):
584
739
  else:
585
740
  composed_prompt = st.input
586
741
 
587
- # run the sub-task
588
- sub_answer = await self._run_subtask(ctx, composed_prompt)
742
+ # run the sub-task with the per-step label
743
+ sub_answer = await self._run_subtask(ctx, composed_prompt, agent_label=subtask_label)
589
744
  sub_answer = (sub_answer or "").strip()
590
745
 
591
- await self._emit_text(ctx, f"\n\n`Finished Sub Task {i}/{total}: {st.name}`")
746
+ await self._emit_text(ctx, f"\n\n`Finished Sub Task {i + 1}/{total}: {st.name}`", agent_name=subtask_label)
592
747
 
593
748
  # save completed sub-task
594
749
  completed.append((st.name, sub_answer))
595
750
  if sub_answer:
596
751
  last_answer = sub_answer
597
752
 
598
- # TODO: refine plan if needed
753
+ # Early stop check (external cancel)
754
+ if self._stopped():
755
+ await self._emit_text(ctx, "\n`Plan execution stopped.`", agent_name=execute_label)
756
+ return FinalEvent(result=last_answer or "Plan execution stopped.")
757
+
758
+ # Optional legacy-style refine after each sub-task
759
+ i += 1 # move pointer to the next item before potential replacement of tail
760
+ if self._refine_after_each_subtask and i < len(plan_sub_tasks):
761
+ remaining = plan_sub_tasks[i:]
762
+ # Label for refine step
763
+ refine_label = self._agent_label("refine", index=i, total=len(plan_sub_tasks))
764
+
765
+ # Emit a step event for refine to keep UI parity with the legacy Planner.
766
+ self._emit_step_event(
767
+ ctx,
768
+ name="refine",
769
+ index=i,
770
+ total=len(plan_sub_tasks),
771
+ meta={"agent_name": refine_label},
772
+ )
773
+
774
+ refinement = await self._refine_plan(
775
+ ctx=ctx,
776
+ task=ev.query,
777
+ tools_str=tools_str,
778
+ completed=completed,
779
+ remaining=remaining,
780
+ memory_context=memory_context,
781
+ agent_label=refine_label,
782
+ )
783
+
784
+ # If refinement failed to parse, skip gracefully.
785
+ if refinement is None:
786
+ continue
787
+
788
+ # If the planner states the task is complete, stop early.
789
+ if getattr(refinement, "is_done", False):
790
+ reason = getattr(refinement, "reason", "") or "Planner judged the task as satisfied."
791
+ await self._emit_text(
792
+ ctx,
793
+ f"\n`Planner marked the plan as complete: {reason}`",
794
+ agent_name=refine_label,
795
+ )
796
+ await self._emit_text(ctx, "\n\n`Plan execution finished.`", agent_name=execute_label)
797
+ return FinalEvent(result=last_answer or "Plan finished.")
798
+
799
+ # If an updated plan was provided, replace the remaining sub-tasks.
800
+ if refinement.plan and refinement.plan.sub_tasks is not None:
801
+ # Filter out any sub-tasks that repeat completed names to avoid loops.
802
+ completed_names = {n for (n, _) in completed}
803
+ new_remaining = [st for st in refinement.plan.sub_tasks if st.name not in completed_names]
804
+
805
+ # If nothing changes, continue.
806
+ current_remaining_repr = self._format_subtasks(remaining)
807
+ new_remaining_repr = self._format_subtasks(new_remaining)
808
+ if new_remaining_repr.strip() != current_remaining_repr.strip():
809
+ plan_sub_tasks = plan_sub_tasks[:i] + new_remaining
810
+ # Present the updated tail of the plan to the UI.
811
+ lines = ["`Updated remaining plan:`"]
812
+ for k, st_upd in enumerate(new_remaining, i + 1):
813
+ lines.append(
814
+ f"\n**===== Sub Task {k}/{len(plan_sub_tasks)}: {st_upd.name} =====**\n"
815
+ f"Expected output: {st_upd.expected_output}\n"
816
+ f"Dependencies: {st_upd.dependencies}\n\n"
817
+ )
818
+ await self._emit_text(ctx, "\n".join(lines), agent_name=refine_label)
599
819
 
600
- await self._emit_text(ctx, "\n\n`Plan execution finished.`")
820
+ await self._emit_text(ctx, "\n\n`Plan execution finished.`", agent_name=execute_label)
601
821
  return FinalEvent(result=last_answer or "Plan finished.")
@@ -1,3 +1,5 @@
1
+ # workflow/supervisor.py
2
+
1
3
  #!/usr/bin/env python3
2
4
  # -*- coding: utf-8 -*-
3
5
  # ================================================== #
@@ -6,11 +8,11 @@
6
8
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
9
  # MIT License #
8
10
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.17 02:00:00 #
11
+ # Updated Date: 2025.09.26 22:25:00 #
10
12
  # ================================================== #
11
13
 
12
14
  import re
13
- from typing import Optional, Literal, List
15
+ from typing import Optional, Literal, List, Callable, Any
14
16
  from pydantic import BaseModel, ValidationError
15
17
  from llama_index.core.workflow import Workflow, Context, StartEvent, StopEvent, Event, step
16
18
  from llama_index.core.agent.workflow import FunctionAgent, AgentStream
@@ -173,8 +175,6 @@ class SupervisorWorkflow(Workflow):
173
175
  :param agent_name: The name of the agent emitting the text (default: "PlannerWorkflow").
174
176
  """
175
177
  try:
176
- ctx.write_event_to_stream(AgentStream(delta=text))
177
- except ValidationError:
178
178
  ctx.write_event_to_stream(
179
179
  AgentStream(
180
180
  delta=text,
@@ -184,11 +184,47 @@ class SupervisorWorkflow(Workflow):
184
184
  raw={},
185
185
  )
186
186
  )
187
+ except ValidationError:
188
+ ctx.write_event_to_stream(AgentStream(delta=text))
189
+
190
+ async def _emit_step(self, ctx: Context, agent_name: str, index: int, total: int, meta: Optional[dict] = None):
191
+ """
192
+ Emit a StepEvent that your runner uses to split UI into blocks.
193
+ Mirrors the behavior used by the schema-driven workflow.
194
+ """
195
+ from pygpt_net.provider.agents.llama_index.workflow.events import StepEvent
196
+ try:
197
+ ctx.write_event_to_stream(
198
+ StepEvent(
199
+ name="next",
200
+ index=index,
201
+ total=total,
202
+ meta={"agent_name": agent_name, **(meta or {})},
203
+ )
204
+ )
205
+ except Exception:
206
+ pass
207
+
208
+ async def _run_muted(self, ctx: Context, awaitable) -> Any:
209
+ """
210
+ Execute an agent call while muting all events sent to ctx.
211
+ Matches schema-style emission: we control all UI events ourselves.
212
+ """
213
+ orig_write = ctx.write_event_to_stream
214
+
215
+ def _noop(ev: Any) -> None:
216
+ return None
217
+
218
+ ctx.write_event_to_stream = _noop
219
+ try:
220
+ return await awaitable
221
+ finally:
222
+ ctx.write_event_to_stream = orig_write
187
223
 
188
224
  @step
189
225
  async def supervisor_step(self, ctx: Context, ev: InputEvent) -> ExecuteEvent | OutputEvent:
190
226
  """
191
- Supervisor step to process the user's input and generate an instruction for the Worker.
227
+ Supervisor step: run Supervisor silently, then emit exactly one UI block like schema.
192
228
 
193
229
  :param ctx: Context for the workflow
194
230
  :param ev: InputEvent containing the user's message and context.
@@ -206,21 +242,33 @@ class SupervisorWorkflow(Workflow):
206
242
  "Return ONE JSON following the schema.\n</control>"
207
243
  )
208
244
  sup_input = "\n".join(parts)
209
- sup_resp = await self._supervisor.run(user_msg=sup_input, memory=self._supervisor_memory)
245
+
246
+ # Run Supervisor with stream muted to avoid extra blocks/finishes.
247
+ sup_resp = await self._run_muted(ctx, self._supervisor.run(user_msg=sup_input, memory=self._supervisor_memory))
210
248
  directive = parse_supervisor_json(str(sup_resp))
211
249
 
250
+ # Final/ask_user/max_rounds -> emit single Supervisor block and stop (schema-like).
212
251
  if directive.action == "final":
252
+ await self._emit_step(ctx, agent_name=self._supervisor.name, index=ev.round_idx + 1, total=ev.max_rounds)
213
253
  await self._emit_text(ctx, f"\n\n{directive.final_answer or str(sup_resp)}", agent_name=self._supervisor.name)
214
254
  return OutputEvent(status="final", final_answer=directive.final_answer or str(sup_resp), rounds_used=ev.round_idx)
255
+
215
256
  if directive.action == "ask_user" and ev.stop_on_ask_user:
257
+ await self._emit_step(ctx, agent_name=self._supervisor.name, index=ev.round_idx + 1, total=ev.max_rounds)
216
258
  q = directive.question or "I need more information, please clarify."
217
259
  await self._emit_text(ctx, f"\n\n{q}", agent_name=self._supervisor.name)
218
260
  return OutputEvent(status="ask_user", final_answer=q, rounds_used=ev.round_idx)
261
+
219
262
  if ev.round_idx >= ev.max_rounds:
263
+ await self._emit_step(ctx, agent_name=self._supervisor.name, index=ev.round_idx + 1, total=ev.max_rounds)
220
264
  await self._emit_text(ctx, "\n\nMax rounds exceeded.", agent_name=self._supervisor.name)
221
265
  return OutputEvent(status="max_rounds", final_answer="Exceeded maximum number of iterations.", rounds_used=ev.round_idx)
222
266
 
267
+ # Emit exactly one Supervisor block with the instruction (no JSON leakage, no duplicates).
223
268
  instruction = (directive.instruction or "").strip() or "Perform a step that gets closest to fulfilling the DoD."
269
+ await self._emit_step(ctx, agent_name=self._supervisor.name, index=ev.round_idx + 1, total=ev.max_rounds)
270
+ await self._emit_text(ctx, f"\n\n{instruction}", agent_name=self._supervisor.name)
271
+
224
272
  return ExecuteEvent(
225
273
  instruction=instruction,
226
274
  round_idx=ev.round_idx,
@@ -232,17 +280,19 @@ class SupervisorWorkflow(Workflow):
232
280
  @step
233
281
  async def worker_step(self, ctx: Context, ev: ExecuteEvent) -> InputEvent:
234
282
  """
235
- Worker step to execute the Supervisor's instruction.
283
+ Worker step: run Worker silently and emit exactly one UI block like schema.
236
284
 
237
285
  :param ctx: Context for the workflow
238
286
  :param ev: ExecuteEvent containing the instruction and context.
239
287
  :return: InputEvent for the next round or final output.
240
288
  """
289
+ # Run Worker with stream muted; we will emit a single block with the final text.
241
290
  worker_input = f"Instruction from Supervisor:\n{ev.instruction}\n"
242
- await self._emit_text(ctx, f"\n\n**Supervisor:** {ev.instruction}", agent_name=self._worker.name)
291
+ worker_resp = await self._run_muted(ctx, self._worker.run(user_msg=worker_input, memory=self._worker_memory))
243
292
 
244
- worker_resp = await self._worker.run(user_msg=worker_input, memory=self._worker_memory)
245
- await self._emit_text(ctx, f"\n\n**Worker:** {worker_resp}", agent_name=self._worker.name)
293
+ # Emit exactly one Worker block (schema-style: one AgentStream per node).
294
+ await self._emit_step(ctx, agent_name=self._worker.name, index=ev.round_idx + 1, total=ev.max_rounds)
295
+ await self._emit_text(ctx, f"\n\n{str(worker_resp)}", agent_name=self._worker.name)
246
296
 
247
297
  return InputEvent(
248
298
  user_msg="",
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.24 03:00:00 #
9
+ # Updated Date: 2025.09.26 17:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Dict, Any, Tuple, Optional
@@ -133,6 +133,8 @@ class Agent(BaseAgent):
133
133
  "preset": preset,
134
134
  "is_expert_call": False,
135
135
  }
136
+
137
+ ctx.set_agent_name(agent.name)
136
138
  # call computer agent if computer tool is enabled
137
139
  if is_computer_tool(**tool_kwargs):
138
140
  computer = LocalComputer(window)