pygpt-net 2.6.63__py3-none-any.whl → 2.6.64__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +6 -0
- pygpt_net/__init__.py +1 -1
- pygpt_net/controller/attachment/attachment.py +17 -8
- pygpt_net/controller/camera/camera.py +4 -4
- pygpt_net/controller/lang/custom.py +2 -2
- pygpt_net/controller/ui/mode.py +18 -3
- pygpt_net/core/render/web/renderer.py +11 -0
- pygpt_net/data/config/config.json +2 -2
- pygpt_net/data/config/models.json +2 -2
- pygpt_net/data/config/presets/agent_openai_coder.json +15 -1
- pygpt_net/data/js/app/runtime.js +11 -4
- pygpt_net/data/js/app/scroll.js +14 -0
- pygpt_net/data/js/app.min.js +7 -6
- pygpt_net/data/locale/locale.de.ini +32 -0
- pygpt_net/data/locale/locale.en.ini +34 -2
- pygpt_net/data/locale/locale.es.ini +32 -0
- pygpt_net/data/locale/locale.fr.ini +32 -0
- pygpt_net/data/locale/locale.it.ini +32 -0
- pygpt_net/data/locale/locale.pl.ini +34 -2
- pygpt_net/data/locale/locale.uk.ini +32 -0
- pygpt_net/data/locale/locale.zh.ini +32 -0
- pygpt_net/js_rc.py +7574 -7505
- pygpt_net/provider/agents/llama_index/planner_workflow.py +15 -3
- pygpt_net/provider/agents/llama_index/workflow/planner.py +69 -41
- pygpt_net/provider/agents/openai/agent_planner.py +57 -35
- pygpt_net/provider/agents/openai/evolve.py +0 -3
- pygpt_net/provider/api/google/__init__.py +9 -3
- pygpt_net/provider/api/google/image.py +11 -1
- pygpt_net/provider/api/google/music.py +375 -0
- pygpt_net/ui/widget/option/combo.py +149 -11
- pygpt_net/ui/widget/textarea/web.py +1 -1
- pygpt_net/ui/widget/vision/camera.py +135 -12
- {pygpt_net-2.6.63.dist-info → pygpt_net-2.6.64.dist-info}/METADATA +8 -2
- {pygpt_net-2.6.63.dist-info → pygpt_net-2.6.64.dist-info}/RECORD +37 -36
- {pygpt_net-2.6.63.dist-info → pygpt_net-2.6.64.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.63.dist-info → pygpt_net-2.6.64.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.63.dist-info → pygpt_net-2.6.64.dist-info}/entry_points.txt +0 -0
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.09.27 17:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Dict, Any, List
|
|
@@ -56,12 +56,16 @@ class PlannerAgent(BaseAgent):
|
|
|
56
56
|
prompt_step = self.get_option(preset, "step", "prompt")
|
|
57
57
|
prompt_plan_initial = self.get_option(preset, "plan", "prompt")
|
|
58
58
|
prompt_plan_refine = self.get_option(preset, "plan_refine", "prompt")
|
|
59
|
+
prompt_plan_refine_each_step = self.get_option(preset, "plan_refine", "after_each_subtask")
|
|
59
60
|
if not prompt_step:
|
|
60
61
|
prompt_step = DEFAULT_EXECUTE_PROMPT
|
|
61
62
|
if not prompt_plan_initial:
|
|
62
63
|
prompt_plan_initial = DEFAULT_INITIAL_PLAN_PROMPT
|
|
63
64
|
if not prompt_plan_refine:
|
|
64
65
|
prompt_plan_refine = DEFAULT_PLAN_REFINE_PROMPT
|
|
66
|
+
if prompt_plan_refine_each_step is None:
|
|
67
|
+
prompt_plan_refine_each_step = True
|
|
68
|
+
|
|
65
69
|
|
|
66
70
|
return PlannerWorkflow(
|
|
67
71
|
tools=tools,
|
|
@@ -69,8 +73,9 @@ class PlannerAgent(BaseAgent):
|
|
|
69
73
|
verbose=verbose,
|
|
70
74
|
max_steps=max_steps,
|
|
71
75
|
system_prompt=prompt_step,
|
|
72
|
-
initial_plan_prompt=
|
|
73
|
-
plan_refine_prompt=
|
|
76
|
+
initial_plan_prompt=prompt_plan_initial,
|
|
77
|
+
plan_refine_prompt=prompt_plan_refine,
|
|
78
|
+
refine_after_each_subtask=prompt_plan_refine_each_step,
|
|
74
79
|
)
|
|
75
80
|
|
|
76
81
|
def get_options(self) -> Dict[str, Any]:
|
|
@@ -79,6 +84,7 @@ class PlannerAgent(BaseAgent):
|
|
|
79
84
|
|
|
80
85
|
:return: dict of options
|
|
81
86
|
"""
|
|
87
|
+
# step model -> from globals
|
|
82
88
|
return {
|
|
83
89
|
"step": {
|
|
84
90
|
"label": trans("agent.planner.step.label"),
|
|
@@ -111,6 +117,12 @@ class PlannerAgent(BaseAgent):
|
|
|
111
117
|
"description": trans("agent.planner.refine.prompt.desc"),
|
|
112
118
|
"default": DEFAULT_PLAN_REFINE_PROMPT,
|
|
113
119
|
},
|
|
120
|
+
"after_each_subtask": {
|
|
121
|
+
"type": "bool",
|
|
122
|
+
"label": trans("agent.option.refine.after_each"),
|
|
123
|
+
"description": trans("agent.option.refine.after_each.desc"),
|
|
124
|
+
"default": True,
|
|
125
|
+
},
|
|
114
126
|
}
|
|
115
127
|
},
|
|
116
128
|
}
|
|
@@ -44,6 +44,10 @@ except Exception:
|
|
|
44
44
|
except Exception:
|
|
45
45
|
ChatMemoryBuffer = None
|
|
46
46
|
|
|
47
|
+
# Translation utility
|
|
48
|
+
from pygpt_net.utils import trans
|
|
49
|
+
|
|
50
|
+
|
|
47
51
|
class SubTask(BaseModel):
|
|
48
52
|
name: str = Field(..., description="The name of the sub-task.")
|
|
49
53
|
input: str = Field(..., description="The input prompt for the sub-task.")
|
|
@@ -162,8 +166,8 @@ class PlannerWorkflow(Workflow):
|
|
|
162
166
|
self._on_stop = on_stop
|
|
163
167
|
|
|
164
168
|
# Human-friendly display names propagated to UI via workflow events.
|
|
165
|
-
self._display_planner_name: str = "
|
|
166
|
-
self._display_executor_name: str = "
|
|
169
|
+
self._display_planner_name: str = trans("agent.planner.display.planner") # UI label
|
|
170
|
+
self._display_executor_name: str = trans("agent.planner.display.executor_agent") # UI label
|
|
167
171
|
|
|
168
172
|
self._executor = FunctionAgent(
|
|
169
173
|
name="PlannerExecutor",
|
|
@@ -211,21 +215,21 @@ class PlannerWorkflow(Workflow):
|
|
|
211
215
|
) -> str:
|
|
212
216
|
if step == "subtask":
|
|
213
217
|
if index and total:
|
|
214
|
-
base =
|
|
218
|
+
base = trans("agent.planner.label.subtask.index_total").format(index=index, total=total)
|
|
215
219
|
elif index:
|
|
216
|
-
base =
|
|
220
|
+
base = trans("agent.planner.label.subtask.index").format(index=index)
|
|
217
221
|
else:
|
|
218
|
-
base = "
|
|
219
|
-
return
|
|
222
|
+
base = trans("agent.planner.label.subtask")
|
|
223
|
+
return trans("agent.planner.label.with_name").format(base=base, name=subtask_name) if subtask_name else base
|
|
220
224
|
if step == "refine":
|
|
221
225
|
if index and total:
|
|
222
|
-
return
|
|
223
|
-
return "
|
|
226
|
+
return trans("agent.planner.label.refine.index_total").format(index=index, total=total)
|
|
227
|
+
return trans("agent.planner.label.refine.index").format(index=index) if index else trans("agent.planner.label.refine")
|
|
224
228
|
if step in {"make_plan", "plan"}:
|
|
225
|
-
return "
|
|
229
|
+
return trans("agent.planner.label.plan")
|
|
226
230
|
if step in {"execute", "execute_plan"}:
|
|
227
|
-
return "
|
|
228
|
-
return step
|
|
231
|
+
return trans("agent.planner.label.execute")
|
|
232
|
+
return trans("agent.planner.label.step")
|
|
229
233
|
|
|
230
234
|
def _emit_step_event(
|
|
231
235
|
self,
|
|
@@ -369,22 +373,23 @@ class PlannerWorkflow(Workflow):
|
|
|
369
373
|
self,
|
|
370
374
|
ctx: Context,
|
|
371
375
|
text: str,
|
|
372
|
-
agent_name: str =
|
|
376
|
+
agent_name: Optional[str] = None
|
|
373
377
|
):
|
|
374
378
|
"""
|
|
375
379
|
Emit a text message to the context stream.
|
|
376
380
|
|
|
377
|
-
:param ctx: The context to write the event to
|
|
381
|
+
:param ctx: The context to write the event to.
|
|
378
382
|
:param text: The text message to emit.
|
|
379
383
|
:param agent_name: The name/label to display in UI (we pass per-step labels here).
|
|
380
384
|
"""
|
|
385
|
+
label = agent_name or self._display_planner_name
|
|
381
386
|
# Always try to include agent name; fall back to minimal event for older validators.
|
|
382
387
|
try:
|
|
383
388
|
ctx.write_event_to_stream(
|
|
384
389
|
AgentStream(
|
|
385
390
|
delta=text,
|
|
386
391
|
response=text,
|
|
387
|
-
current_agent_name=
|
|
392
|
+
current_agent_name=label,
|
|
388
393
|
tool_calls=[],
|
|
389
394
|
raw={},
|
|
390
395
|
)
|
|
@@ -565,7 +570,11 @@ class PlannerWorkflow(Workflow):
|
|
|
565
570
|
try:
|
|
566
571
|
return await _stream()
|
|
567
572
|
except Exception as ex:
|
|
568
|
-
await self._emit_text(
|
|
573
|
+
await self._emit_text(
|
|
574
|
+
ctx,
|
|
575
|
+
f"\n`{trans('agent.planner.ui.subtask_failed').format(error=ex)}`",
|
|
576
|
+
agent_name=agent_label or self._display_executor_name,
|
|
577
|
+
)
|
|
569
578
|
return last_answer or ("".join(stream_buf).strip() if stream_buf else "")
|
|
570
579
|
|
|
571
580
|
# Helper to render sub-tasks into a readable string for prompts and UI.
|
|
@@ -607,7 +616,11 @@ class PlannerWorkflow(Workflow):
|
|
|
607
616
|
remaining_text = self._format_subtasks(remaining)
|
|
608
617
|
|
|
609
618
|
# Emit a lightweight status line to the UI.
|
|
610
|
-
await self._emit_text(
|
|
619
|
+
await self._emit_text(
|
|
620
|
+
ctx,
|
|
621
|
+
f"\n`{trans('agent.planner.ui.refining_remaining_plan')}`",
|
|
622
|
+
agent_name=agent_label or trans("agent.planner.label.refine"),
|
|
623
|
+
)
|
|
611
624
|
|
|
612
625
|
try:
|
|
613
626
|
refinement = await self._planner_llm.astructured_predict(
|
|
@@ -650,12 +663,13 @@ class PlannerWorkflow(Workflow):
|
|
|
650
663
|
except (ValueError, ValidationError):
|
|
651
664
|
plan = Plan(sub_tasks=[SubTask(name="default", input=ev.query, expected_output="", dependencies=[])])
|
|
652
665
|
|
|
653
|
-
lines = ["`
|
|
666
|
+
lines = [f"`{trans('agent.planner.ui.current_plan')}`"]
|
|
654
667
|
for i, st in enumerate(plan.sub_tasks, 1):
|
|
668
|
+
header = trans("agent.planner.ui.subtask_header.one").format(index=i, name=st.name)
|
|
655
669
|
lines.append(
|
|
656
|
-
f"\n
|
|
657
|
-
f"
|
|
658
|
-
f"
|
|
670
|
+
f"\n{header}\n"
|
|
671
|
+
f"{trans('agent.planner.ui.expected_output')} {st.expected_output}\n"
|
|
672
|
+
f"{trans('agent.planner.ui.dependencies')} {st.dependencies}\n\n"
|
|
659
673
|
)
|
|
660
674
|
# Use a per-step label for plan creation
|
|
661
675
|
await self._emit_text(ctx, "\n".join(lines), agent_name=self._agent_label("make_plan"))
|
|
@@ -677,7 +691,7 @@ class PlannerWorkflow(Workflow):
|
|
|
677
691
|
|
|
678
692
|
# Start executing with a per-step label
|
|
679
693
|
execute_label = self._agent_label("execute")
|
|
680
|
-
await self._emit_text(ctx, "\n\n`
|
|
694
|
+
await self._emit_text(ctx, f"\n\n`{trans('agent.planner.ui.executing_plan')}`", agent_name=execute_label)
|
|
681
695
|
|
|
682
696
|
# Prepare static prompt parts for refinement.
|
|
683
697
|
tools_str = ""
|
|
@@ -708,18 +722,21 @@ class PlannerWorkflow(Workflow):
|
|
|
708
722
|
},
|
|
709
723
|
)
|
|
710
724
|
|
|
711
|
-
header = (
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
725
|
+
header = trans("agent.planner.ui.subtask_header.progress").format(
|
|
726
|
+
index=i + 1, total=total, name=st.name
|
|
727
|
+
)
|
|
728
|
+
header_block = (
|
|
729
|
+
f"\n\n{header}\n"
|
|
730
|
+
f"{trans('agent.planner.ui.expected_output')} {st.expected_output}\n"
|
|
731
|
+
f"{trans('agent.planner.ui.dependencies')} {st.dependencies}\n\n"
|
|
715
732
|
)
|
|
716
733
|
|
|
717
734
|
# stop callback
|
|
718
735
|
if self._stopped():
|
|
719
|
-
await self._emit_text(ctx, "\n`
|
|
720
|
-
return FinalEvent(result=last_answer or "
|
|
736
|
+
await self._emit_text(ctx, f"\n`{trans('agent.planner.ui.execution_stopped')}`", agent_name=execute_label)
|
|
737
|
+
return FinalEvent(result=last_answer or trans("agent.planner.ui.execution_stopped"))
|
|
721
738
|
|
|
722
|
-
await self._emit_text(ctx,
|
|
739
|
+
await self._emit_text(ctx, header_block, agent_name=subtask_label)
|
|
723
740
|
|
|
724
741
|
# build context for sub-task
|
|
725
742
|
ctx_text = self._build_context_for_subtask(
|
|
@@ -728,7 +745,7 @@ class PlannerWorkflow(Workflow):
|
|
|
728
745
|
char_limit=self._memory_char_limit,
|
|
729
746
|
)
|
|
730
747
|
|
|
731
|
-
# make composed prompt for sub-task
|
|
748
|
+
# make composed prompt for sub-task (internal; do not translate)
|
|
732
749
|
if ctx_text:
|
|
733
750
|
composed_prompt = (
|
|
734
751
|
f"{ctx_text}\n\n"
|
|
@@ -743,7 +760,11 @@ class PlannerWorkflow(Workflow):
|
|
|
743
760
|
sub_answer = await self._run_subtask(ctx, composed_prompt, agent_label=subtask_label)
|
|
744
761
|
sub_answer = (sub_answer or "").strip()
|
|
745
762
|
|
|
746
|
-
await self._emit_text(
|
|
763
|
+
await self._emit_text(
|
|
764
|
+
ctx,
|
|
765
|
+
f"\n\n`{trans('agent.planner.ui.subtask_finished').format(index=i + 1, total=total, name=st.name)}`",
|
|
766
|
+
agent_name=subtask_label,
|
|
767
|
+
)
|
|
747
768
|
|
|
748
769
|
# save completed sub-task
|
|
749
770
|
completed.append((st.name, sub_answer))
|
|
@@ -752,8 +773,8 @@ class PlannerWorkflow(Workflow):
|
|
|
752
773
|
|
|
753
774
|
# Early stop check (external cancel)
|
|
754
775
|
if self._stopped():
|
|
755
|
-
await self._emit_text(ctx, "\n`
|
|
756
|
-
return FinalEvent(result=last_answer or "
|
|
776
|
+
await self._emit_text(ctx, f"\n`{trans('agent.planner.ui.execution_stopped')}`", agent_name=execute_label)
|
|
777
|
+
return FinalEvent(result=last_answer or trans("agent.planner.ui.execution_stopped"))
|
|
757
778
|
|
|
758
779
|
# Optional legacy-style refine after each sub-task
|
|
759
780
|
i += 1 # move pointer to the next item before potential replacement of tail
|
|
@@ -790,11 +811,15 @@ class PlannerWorkflow(Workflow):
|
|
|
790
811
|
reason = getattr(refinement, "reason", "") or "Planner judged the task as satisfied."
|
|
791
812
|
await self._emit_text(
|
|
792
813
|
ctx,
|
|
793
|
-
f"\n`
|
|
814
|
+
f"\n`{trans('agent.planner.ui.plan_marked_complete').format(reason=reason)}`",
|
|
794
815
|
agent_name=refine_label,
|
|
795
816
|
)
|
|
796
|
-
await self._emit_text(
|
|
797
|
-
|
|
817
|
+
await self._emit_text(
|
|
818
|
+
ctx,
|
|
819
|
+
f"\n\n`{trans('agent.planner.ui.plan_execution_finished')}`",
|
|
820
|
+
agent_name=execute_label,
|
|
821
|
+
)
|
|
822
|
+
return FinalEvent(result=last_answer or trans("agent.planner.ui.plan_finished"))
|
|
798
823
|
|
|
799
824
|
# If an updated plan was provided, replace the remaining sub-tasks.
|
|
800
825
|
if refinement.plan and refinement.plan.sub_tasks is not None:
|
|
@@ -808,14 +833,17 @@ class PlannerWorkflow(Workflow):
|
|
|
808
833
|
if new_remaining_repr.strip() != current_remaining_repr.strip():
|
|
809
834
|
plan_sub_tasks = plan_sub_tasks[:i] + new_remaining
|
|
810
835
|
# Present the updated tail of the plan to the UI.
|
|
811
|
-
lines = ["`
|
|
836
|
+
lines = [f"`{trans('agent.planner.ui.updated_remaining_plan')}`"]
|
|
812
837
|
for k, st_upd in enumerate(new_remaining, i + 1):
|
|
838
|
+
upd_header = trans("agent.planner.ui.subtask_header.progress").format(
|
|
839
|
+
index=k, total=len(plan_sub_tasks), name=st_upd.name
|
|
840
|
+
)
|
|
813
841
|
lines.append(
|
|
814
|
-
f"\n
|
|
815
|
-
f"
|
|
816
|
-
f"
|
|
842
|
+
f"\n{upd_header}\n"
|
|
843
|
+
f"{trans('agent.planner.ui.expected_output')} {st_upd.expected_output}\n"
|
|
844
|
+
f"{trans('agent.planner.ui.dependencies')} {st_upd.dependencies}\n\n"
|
|
817
845
|
)
|
|
818
846
|
await self._emit_text(ctx, "\n".join(lines), agent_name=refine_label)
|
|
819
847
|
|
|
820
|
-
await self._emit_text(ctx, "\n\n`
|
|
821
|
-
return FinalEvent(result=last_answer or "
|
|
848
|
+
await self._emit_text(ctx, f"\n\n`{trans('agent.planner.ui.plan_execution_finished')}`", agent_name=execute_label)
|
|
849
|
+
return FinalEvent(result=last_answer or trans("agent.planner.ui.plan_finished"))
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.09.27
|
|
9
|
+
# Updated Date: 2025.09.27 17:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from dataclasses import dataclass
|
|
@@ -293,21 +293,22 @@ Overall Task: {task}
|
|
|
293
293
|
) -> str:
|
|
294
294
|
if step == "subtask":
|
|
295
295
|
if index and total:
|
|
296
|
-
base =
|
|
296
|
+
base = trans("agent.planner.label.subtask.index_total").format(index=index, total=total)
|
|
297
297
|
elif index:
|
|
298
|
-
base =
|
|
298
|
+
base = trans("agent.planner.label.subtask.index").format(index=index)
|
|
299
299
|
else:
|
|
300
|
-
base = "
|
|
301
|
-
return
|
|
300
|
+
base = trans("agent.planner.label.subtask")
|
|
301
|
+
return trans("agent.planner.label.with_name").format(base=base, name=subtask_name) if subtask_name else base
|
|
302
302
|
if step == "refine":
|
|
303
303
|
if index and total:
|
|
304
|
-
return
|
|
305
|
-
return "
|
|
304
|
+
return trans("agent.planner.label.refine.index_total").format(index=index, total=total)
|
|
305
|
+
return trans("agent.planner.label.refine.index").format(index=index) if index else trans(
|
|
306
|
+
"agent.planner.label.refine")
|
|
306
307
|
if step in {"make_plan", "plan"}:
|
|
307
|
-
return "
|
|
308
|
+
return trans("agent.planner.label.plan")
|
|
308
309
|
if step in {"execute", "execute_plan"}:
|
|
309
|
-
return "
|
|
310
|
-
return step
|
|
310
|
+
return trans("agent.planner.label.execute")
|
|
311
|
+
return trans("agent.planner.label.step")
|
|
311
312
|
|
|
312
313
|
def prepare_model(
|
|
313
314
|
self,
|
|
@@ -335,14 +336,16 @@ Overall Task: {task}
|
|
|
335
336
|
"""
|
|
336
337
|
context = kwargs.get("context", BridgeContext())
|
|
337
338
|
preset = context.preset
|
|
338
|
-
# Keep a stable display name; fallback to 'Executor' if no preset
|
|
339
|
-
agent_name = (
|
|
339
|
+
# Keep a stable display name; fallback to translated 'Executor' if no preset
|
|
340
|
+
agent_name = (
|
|
341
|
+
preset.name if preset and getattr(preset, "name", None) else trans("agent.planner.display.executor"))
|
|
340
342
|
model = kwargs.get("model", ModelItem())
|
|
341
343
|
tools = kwargs.get("function_tools", [])
|
|
342
344
|
handoffs = kwargs.get("handoffs", [])
|
|
343
345
|
|
|
344
|
-
# Use
|
|
345
|
-
|
|
346
|
+
# Use prompt from options if provided; fallback to internal default.
|
|
347
|
+
step_prompt = self.get_option(preset, "step", "prompt") if preset else None
|
|
348
|
+
base_instructions = step_prompt or self.PROMPT
|
|
346
349
|
|
|
347
350
|
allow_local_tools = bool(kwargs.get("allow_local_tools", False))
|
|
348
351
|
allow_remote_tools = bool(kwargs.get("allow_remote_tools", False))
|
|
@@ -564,12 +567,13 @@ Overall Task: {task}
|
|
|
564
567
|
])
|
|
565
568
|
|
|
566
569
|
# Present current plan as a dedicated step
|
|
567
|
-
plan_lines = ["`
|
|
570
|
+
plan_lines = [f"`{trans('agent.planner.ui.current_plan')}`"]
|
|
568
571
|
for i, st in enumerate(plan_obj.sub_tasks, 1):
|
|
572
|
+
header = trans("agent.planner.ui.subtask_header.one").format(index=i, name=st.name)
|
|
569
573
|
plan_lines.append(
|
|
570
|
-
f"\n
|
|
571
|
-
f"
|
|
572
|
-
f"
|
|
574
|
+
f"\n{header}\n"
|
|
575
|
+
f"{trans('agent.planner.ui.expected_output')} {st.expected_output}\n"
|
|
576
|
+
f"{trans('agent.planner.ui.dependencies')} {st.dependencies}\n\n"
|
|
573
577
|
)
|
|
574
578
|
plan_text = "\n".join(plan_lines)
|
|
575
579
|
|
|
@@ -616,16 +620,19 @@ Overall Task: {task}
|
|
|
616
620
|
|
|
617
621
|
# UI header for the sub-task
|
|
618
622
|
subtask_label = self._agent_label("subtask", index=i + 1, total=total, subtask_name=st.name)
|
|
619
|
-
header = (
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
+
header = trans("agent.planner.ui.subtask_header.progress").format(
|
|
624
|
+
index=i + 1, total=total, name=st.name
|
|
625
|
+
)
|
|
626
|
+
header_block = (
|
|
627
|
+
f"\n\n{header}\n"
|
|
628
|
+
f"{trans('agent.planner.ui.expected_output')} {st.expected_output}\n"
|
|
629
|
+
f"{trans('agent.planner.ui.dependencies')} {st.dependencies}\n\n"
|
|
623
630
|
)
|
|
624
631
|
|
|
625
632
|
# Compose sub-task prompt and open a new persisted step
|
|
626
633
|
composed_prompt = self._compose_subtask_prompt(st, completed)
|
|
627
634
|
ctx.set_agent_name(subtask_label)
|
|
628
|
-
ctx.stream =
|
|
635
|
+
ctx.stream = header_block
|
|
629
636
|
bridge.on_step(ctx, False) # open a new step block
|
|
630
637
|
|
|
631
638
|
exec_kwargs = dict(common_kwargs)
|
|
@@ -642,7 +649,7 @@ Overall Task: {task}
|
|
|
642
649
|
sub_rid = getattr(result, "last_response_id", "") or ""
|
|
643
650
|
sub_answer = str(getattr(result, "final_output", "") or "")
|
|
644
651
|
except Exception as ex:
|
|
645
|
-
sub_answer =
|
|
652
|
+
sub_answer = trans("agent.planner.ui.subtask_failed").format(error=ex)
|
|
646
653
|
|
|
647
654
|
if sub_answer:
|
|
648
655
|
ctx.stream = sub_answer
|
|
@@ -674,7 +681,7 @@ Overall Task: {task}
|
|
|
674
681
|
ctx = bridge.on_next_ctx(
|
|
675
682
|
ctx=ctx,
|
|
676
683
|
input="",
|
|
677
|
-
output=sub_answer if sub_answer else
|
|
684
|
+
output=sub_answer if sub_answer else header_block.strip(),
|
|
678
685
|
response_id=sub_rid,
|
|
679
686
|
finish=(is_last_subtask and not will_refine),
|
|
680
687
|
stream=stream,
|
|
@@ -695,7 +702,7 @@ Overall Task: {task}
|
|
|
695
702
|
refine_label = self._agent_label("refine", index=i, total=len(plan_sub_tasks))
|
|
696
703
|
|
|
697
704
|
# Start refine step
|
|
698
|
-
refine_display = "\n`
|
|
705
|
+
refine_display = f"\n`{trans('agent.planner.ui.refining_remaining_plan')}`"
|
|
699
706
|
ctx.set_agent_name(refine_label)
|
|
700
707
|
ctx.stream = refine_display
|
|
701
708
|
bridge.on_step(ctx, False)
|
|
@@ -730,8 +737,8 @@ Overall Task: {task}
|
|
|
730
737
|
refinement = None
|
|
731
738
|
|
|
732
739
|
if refinement is None:
|
|
733
|
-
refine_display += "\n`
|
|
734
|
-
ctx.stream = "\n`
|
|
740
|
+
refine_display += f"\n`{trans('agent.planner.ui.refine_failed_parse')}`"
|
|
741
|
+
ctx.stream = f"\n`{trans('agent.planner.ui.refine_failed_parse')}`"
|
|
735
742
|
bridge.on_step(ctx, True)
|
|
736
743
|
# finalize refine step
|
|
737
744
|
if use_partial_ctx:
|
|
@@ -749,7 +756,7 @@ Overall Task: {task}
|
|
|
749
756
|
|
|
750
757
|
if getattr(refinement, "is_done", False):
|
|
751
758
|
reason = getattr(refinement, "reason", "") or "Planner judged the task as satisfied."
|
|
752
|
-
done_msg = f"\n`
|
|
759
|
+
done_msg = f"\n`{trans('agent.planner.ui.plan_marked_complete').format(reason=reason)}`"
|
|
753
760
|
refine_display += done_msg
|
|
754
761
|
ctx.stream = done_msg
|
|
755
762
|
bridge.on_step(ctx, True)
|
|
@@ -777,12 +784,15 @@ Overall Task: {task}
|
|
|
777
784
|
if new_remaining_repr.strip() != current_remaining_repr.strip():
|
|
778
785
|
plan_sub_tasks = plan_sub_tasks[:i] + new_remaining
|
|
779
786
|
# Present the updated tail of the plan
|
|
780
|
-
lines = ["`
|
|
787
|
+
lines = [f"`{trans('agent.planner.ui.updated_remaining_plan')}`"]
|
|
781
788
|
for k, st_upd in enumerate(new_remaining, i + 1):
|
|
789
|
+
upd_header = trans("agent.planner.ui.subtask_header.progress").format(
|
|
790
|
+
index=k, total=len(plan_sub_tasks), name=st_upd.name
|
|
791
|
+
)
|
|
782
792
|
lines.append(
|
|
783
|
-
f"\n
|
|
784
|
-
f"
|
|
785
|
-
f"
|
|
793
|
+
f"\n{upd_header}\n"
|
|
794
|
+
f"{trans('agent.planner.ui.expected_output')} {st_upd.expected_output}\n"
|
|
795
|
+
f"{trans('agent.planner.ui.dependencies')} {st_upd.dependencies}\n\n"
|
|
786
796
|
)
|
|
787
797
|
upd_text = "\n".join(lines)
|
|
788
798
|
refine_display += "\n" + upd_text
|
|
@@ -803,7 +813,7 @@ Overall Task: {task}
|
|
|
803
813
|
bridge.on_next(ctx)
|
|
804
814
|
|
|
805
815
|
# Return last answer (final block already closed in the loop)
|
|
806
|
-
return ctx, (last_answer or "
|
|
816
|
+
return ctx, (last_answer or trans("agent.planner.ui.plan_finished")), (response_id or "")
|
|
807
817
|
|
|
808
818
|
def get_options(self) -> Dict[str, Any]:
|
|
809
819
|
"""
|
|
@@ -811,7 +821,19 @@ Overall Task: {task}
|
|
|
811
821
|
|
|
812
822
|
:return: dict of options
|
|
813
823
|
"""
|
|
824
|
+
# step model -> from globals
|
|
814
825
|
return {
|
|
826
|
+
"step": {
|
|
827
|
+
"label": trans("agent.planner.step.label"),
|
|
828
|
+
"options": {
|
|
829
|
+
"prompt": {
|
|
830
|
+
"type": "textarea",
|
|
831
|
+
"label": trans("agent.option.prompt"),
|
|
832
|
+
"description": trans("agent.planner.step.prompt.desc"),
|
|
833
|
+
"default": self.PROMPT,
|
|
834
|
+
},
|
|
835
|
+
}
|
|
836
|
+
},
|
|
815
837
|
"planner": {
|
|
816
838
|
"label": trans("agent.option.section.planner"),
|
|
817
839
|
"options": {
|
|
@@ -842,7 +864,7 @@ Overall Task: {task}
|
|
|
842
864
|
}
|
|
843
865
|
},
|
|
844
866
|
"refine": {
|
|
845
|
-
"label": trans("agent.
|
|
867
|
+
"label": trans("agent.planner.refine.label"),
|
|
846
868
|
"options": {
|
|
847
869
|
"model": {
|
|
848
870
|
"label": trans("agent.option.model"),
|
|
@@ -330,7 +330,6 @@ class Agent(BaseAgent):
|
|
|
330
330
|
choose_query = self.make_choose_query(results)
|
|
331
331
|
choose_items.append(choose_query)
|
|
332
332
|
|
|
333
|
-
ctx.set_agent_name(chooser.name)
|
|
334
333
|
chooser_result = await Runner.run(chooser, choose_items)
|
|
335
334
|
result: ChooseFeedback = chooser_result.final_output
|
|
336
335
|
choose = result.answer_number
|
|
@@ -348,7 +347,6 @@ class Agent(BaseAgent):
|
|
|
348
347
|
bridge.on_stop(ctx)
|
|
349
348
|
break
|
|
350
349
|
|
|
351
|
-
ctx.set_agent_name(evaluator.name)
|
|
352
350
|
evaluator_result = await Runner.run(evaluator, input_items)
|
|
353
351
|
result: EvaluationFeedback = evaluator_result.final_output
|
|
354
352
|
|
|
@@ -443,7 +441,6 @@ class Agent(BaseAgent):
|
|
|
443
441
|
window.core.api.openai.responses.unpack_agent_response(results[choose], ctx)
|
|
444
442
|
input_items = results[choose].to_input_list()
|
|
445
443
|
|
|
446
|
-
ctx.set_agent_name(evaluator.name)
|
|
447
444
|
evaluator_result = await Runner.run(evaluator, input_items)
|
|
448
445
|
result: EvaluationFeedback = evaluator_result.final_output
|
|
449
446
|
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.09.
|
|
9
|
+
# Updated Date: 2025.09.14 00:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import os
|
|
@@ -33,6 +33,7 @@ from .audio import Audio
|
|
|
33
33
|
from .image import Image
|
|
34
34
|
from .realtime import Realtime
|
|
35
35
|
from .video import Video
|
|
36
|
+
from .music import Music
|
|
36
37
|
|
|
37
38
|
class ApiGoogle:
|
|
38
39
|
def __init__(self, window=None):
|
|
@@ -49,6 +50,7 @@ class ApiGoogle:
|
|
|
49
50
|
self.image = Image(window)
|
|
50
51
|
self.realtime = Realtime(window)
|
|
51
52
|
self.video = Video(window)
|
|
53
|
+
self.music = Music(window)
|
|
52
54
|
self.client: Optional[genai.Client] = None
|
|
53
55
|
self.locked = False
|
|
54
56
|
self.last_client_args: Optional[Dict[str, Any]] = None
|
|
@@ -135,10 +137,14 @@ class ApiGoogle:
|
|
|
135
137
|
self.vision.append_images(ctx)
|
|
136
138
|
|
|
137
139
|
elif mode == MODE_IMAGE:
|
|
140
|
+
# Route to video / music / image based on selected model.
|
|
138
141
|
if context.model.is_video_output():
|
|
139
142
|
return self.video.generate(context=context, extra=extra) # veo, etc.
|
|
140
|
-
|
|
141
|
-
|
|
143
|
+
# Lyria / music models
|
|
144
|
+
if self.music.is_music_model(model.id if model else ""):
|
|
145
|
+
return self.music.generate(context=context, extra=extra) # lyria, etc.
|
|
146
|
+
# Default: image
|
|
147
|
+
return self.image.generate(context=context, extra=extra) # imagen, etc.
|
|
142
148
|
|
|
143
149
|
elif mode == MODE_ASSISTANT:
|
|
144
150
|
return False # not implemented for Google
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.09.14 00:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import mimetypes
|
|
@@ -45,6 +45,16 @@ class Image:
|
|
|
45
45
|
:param sync: run synchronously (blocking) if True
|
|
46
46
|
:return: True if started
|
|
47
47
|
"""
|
|
48
|
+
# Music fast-path: delegate to Music flow if a music model is selected (e.g., Lyria).
|
|
49
|
+
# This keeps image flow unchanged while enabling music in the same "image" mode.
|
|
50
|
+
try:
|
|
51
|
+
model_id = (context.model.id if context and context.model else "") or ""
|
|
52
|
+
if self.window and hasattr(self.window.core.api.google, "music"):
|
|
53
|
+
if self.window.core.api.google.music.is_music_model(model_id):
|
|
54
|
+
return self.window.core.api.google.music.generate(context=context, extra=extra, sync=sync)
|
|
55
|
+
except Exception:
|
|
56
|
+
pass
|
|
57
|
+
|
|
48
58
|
extra = extra or {}
|
|
49
59
|
ctx = context.ctx or CtxItem()
|
|
50
60
|
model = context.model
|