pygpt-net 2.6.62__py3-none-any.whl → 2.6.64__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +11 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/controller/attachment/attachment.py +17 -8
- pygpt_net/controller/camera/camera.py +4 -4
- pygpt_net/controller/lang/custom.py +2 -2
- pygpt_net/controller/presets/editor.py +65 -1
- pygpt_net/controller/ui/mode.py +18 -3
- pygpt_net/core/agents/custom/llama_index/runner.py +15 -52
- pygpt_net/core/agents/custom/runner.py +194 -76
- pygpt_net/core/agents/runners/llama_workflow.py +60 -10
- pygpt_net/core/render/web/renderer.py +11 -0
- pygpt_net/data/config/config.json +3 -3
- pygpt_net/data/config/models.json +3 -3
- pygpt_net/data/config/presets/agent_openai_b2b.json +1 -15
- pygpt_net/data/config/presets/agent_openai_coder.json +0 -0
- pygpt_net/data/config/presets/agent_openai_evolve.json +1 -23
- pygpt_net/data/config/presets/agent_openai_planner.json +1 -21
- pygpt_net/data/config/presets/agent_openai_researcher.json +1 -21
- pygpt_net/data/config/presets/agent_openai_supervisor.json +1 -13
- pygpt_net/data/config/presets/agent_openai_writer.json +1 -15
- pygpt_net/data/config/presets/agent_supervisor.json +1 -11
- pygpt_net/data/js/app/runtime.js +10 -0
- pygpt_net/data/js/app/scroll.js +14 -0
- pygpt_net/data/js/app.min.js +6 -4
- pygpt_net/data/locale/locale.de.ini +32 -0
- pygpt_net/data/locale/locale.en.ini +37 -0
- pygpt_net/data/locale/locale.es.ini +32 -0
- pygpt_net/data/locale/locale.fr.ini +32 -0
- pygpt_net/data/locale/locale.it.ini +32 -0
- pygpt_net/data/locale/locale.pl.ini +34 -2
- pygpt_net/data/locale/locale.uk.ini +32 -0
- pygpt_net/data/locale/locale.zh.ini +32 -0
- pygpt_net/js_rc.py +7571 -7499
- pygpt_net/provider/agents/base.py +0 -0
- pygpt_net/provider/agents/llama_index/flow_from_schema.py +0 -0
- pygpt_net/provider/agents/llama_index/planner_workflow.py +15 -3
- pygpt_net/provider/agents/llama_index/workflow/codeact.py +0 -0
- pygpt_net/provider/agents/llama_index/workflow/planner.py +272 -44
- pygpt_net/provider/agents/llama_index/workflow/supervisor.py +0 -0
- pygpt_net/provider/agents/openai/agent.py +0 -0
- pygpt_net/provider/agents/openai/agent_b2b.py +4 -4
- pygpt_net/provider/agents/openai/agent_planner.py +631 -254
- pygpt_net/provider/agents/openai/agent_with_experts.py +0 -0
- pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +4 -4
- pygpt_net/provider/agents/openai/agent_with_feedback.py +4 -4
- pygpt_net/provider/agents/openai/evolve.py +6 -9
- pygpt_net/provider/agents/openai/flow_from_schema.py +0 -0
- pygpt_net/provider/agents/openai/supervisor.py +290 -37
- pygpt_net/provider/api/google/__init__.py +9 -3
- pygpt_net/provider/api/google/image.py +11 -1
- pygpt_net/provider/api/google/music.py +375 -0
- pygpt_net/provider/api/x_ai/__init__.py +0 -0
- pygpt_net/provider/core/agent/__init__.py +0 -0
- pygpt_net/provider/core/agent/base.py +0 -0
- pygpt_net/provider/core/agent/json_file.py +0 -0
- pygpt_net/provider/core/config/patches/patch_before_2_6_42.py +0 -0
- pygpt_net/provider/llms/base.py +0 -0
- pygpt_net/provider/llms/deepseek_api.py +0 -0
- pygpt_net/provider/llms/google.py +0 -0
- pygpt_net/provider/llms/hugging_face_api.py +0 -0
- pygpt_net/provider/llms/hugging_face_router.py +0 -0
- pygpt_net/provider/llms/mistral.py +0 -0
- pygpt_net/provider/llms/perplexity.py +0 -0
- pygpt_net/provider/llms/x_ai.py +0 -0
- pygpt_net/ui/widget/dialog/confirm.py +34 -8
- pygpt_net/ui/widget/option/combo.py +149 -11
- pygpt_net/ui/widget/textarea/input.py +1 -1
- pygpt_net/ui/widget/textarea/web.py +1 -1
- pygpt_net/ui/widget/vision/camera.py +135 -12
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.64.dist-info}/METADATA +13 -2
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.64.dist-info}/RECORD +53 -52
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.64.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.64.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.64.dist-info}/entry_points.txt +0 -0
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.09.
|
|
9
|
+
# Updated Date: 2025.09.27 06:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from __future__ import annotations
|
|
@@ -62,13 +62,133 @@ class FlowOrchestrator:
|
|
|
62
62
|
- First agent (in the whole flow) gets full initial messages from the app.
|
|
63
63
|
- Next agent WITHOUT memory gets only last step's displayed content as a single 'user' message.
|
|
64
64
|
- Agent WITH memory:
|
|
65
|
-
* if memory has items -> use
|
|
66
|
-
* if memory empty -> seed
|
|
65
|
+
* if memory has items -> use base history (items[:-1]) and pass last displayed content as a single 'user' baton;
|
|
66
|
+
* if memory empty -> seed baton from last displayed content (or initial messages as fallback).
|
|
67
67
|
"""
|
|
68
68
|
def __init__(self, window, logger: Optional[Logger] = None) -> None:
|
|
69
69
|
self.window = window
|
|
70
70
|
self.logger = logger or NullLogger()
|
|
71
71
|
|
|
72
|
+
# ---------- Helpers (production-ready) ----------
|
|
73
|
+
|
|
74
|
+
def _extract_text_from_item(self, item: TResponseInputItem) -> str:
|
|
75
|
+
"""Best-effort extract plain text from TResponseInputItem."""
|
|
76
|
+
if isinstance(item, dict):
|
|
77
|
+
content = item.get("content", "")
|
|
78
|
+
if isinstance(content, str):
|
|
79
|
+
return content
|
|
80
|
+
if isinstance(content, list):
|
|
81
|
+
parts = []
|
|
82
|
+
for p in content:
|
|
83
|
+
if isinstance(p, dict):
|
|
84
|
+
t = p.get("text")
|
|
85
|
+
if isinstance(t, str):
|
|
86
|
+
parts.append(t)
|
|
87
|
+
return "\n".join(parts)
|
|
88
|
+
return ""
|
|
89
|
+
if isinstance(item, str):
|
|
90
|
+
return item
|
|
91
|
+
return ""
|
|
92
|
+
|
|
93
|
+
def _build_baton_input(
|
|
94
|
+
self,
|
|
95
|
+
*,
|
|
96
|
+
node_id: str,
|
|
97
|
+
g: FlowGraph,
|
|
98
|
+
mem: MemoryManager,
|
|
99
|
+
initial_messages: List[TResponseInputItem],
|
|
100
|
+
first_dispatch_done: bool,
|
|
101
|
+
last_plain_output: str,
|
|
102
|
+
dbg: DebugConfig,
|
|
103
|
+
) -> tuple[List[TResponseInputItem], str, Optional[str], Any, str]:
|
|
104
|
+
"""
|
|
105
|
+
Returns: (prepared_items, baton_user_text, mem_id, mem_state, source_tag)
|
|
106
|
+
Mirrors LI baton/memory policy.
|
|
107
|
+
"""
|
|
108
|
+
mem_id = g.agent_to_memory.get(node_id)
|
|
109
|
+
mem_state = mem.get(mem_id) if mem_id else None
|
|
110
|
+
|
|
111
|
+
baton_user_text = ""
|
|
112
|
+
source = ""
|
|
113
|
+
|
|
114
|
+
if mem_state and mem_state.items:
|
|
115
|
+
# memory with history -> base history + baton from last output (preferred)
|
|
116
|
+
base_items = list(mem_state.items[:-1]) if len(mem_state.items) >= 1 else []
|
|
117
|
+
if last_plain_output and last_plain_output.strip():
|
|
118
|
+
baton_user_text = last_plain_output
|
|
119
|
+
prepared = base_items + [{"role": "user", "content": baton_user_text}]
|
|
120
|
+
source = "memory:existing_to_user_baton"
|
|
121
|
+
else:
|
|
122
|
+
# fallback: use last assistant content as baton
|
|
123
|
+
last_ass = mem_state.items[-1] if isinstance(mem_state.items[-1], dict) else {}
|
|
124
|
+
if isinstance(last_ass.get("content"), str):
|
|
125
|
+
baton_user_text = last_ass.get("content", "")
|
|
126
|
+
elif isinstance(last_ass.get("content"), list) and last_ass["content"]:
|
|
127
|
+
baton_user_text = last_ass["content"][0].get("text", "") or ""
|
|
128
|
+
else:
|
|
129
|
+
baton_user_text = ""
|
|
130
|
+
prepared = base_items + [{"role": "user", "content": baton_user_text}]
|
|
131
|
+
source = "memory:existing_to_last_assistant"
|
|
132
|
+
return sanitize_input_items(prepared), baton_user_text, mem_id, mem_state, source
|
|
133
|
+
|
|
134
|
+
if mem_state:
|
|
135
|
+
# memory attached but empty -> seed from last output else from initial (use last user msg as baton)
|
|
136
|
+
if last_plain_output and last_plain_output.strip():
|
|
137
|
+
baton_user_text = last_plain_output
|
|
138
|
+
prepared = [{"role": "user", "content": baton_user_text}]
|
|
139
|
+
source = "memory:seed_from_last_output"
|
|
140
|
+
else:
|
|
141
|
+
base_items = list(initial_messages[:-1]) if initial_messages else []
|
|
142
|
+
last_item = initial_messages[-1] if initial_messages else {"role": "user", "content": ""}
|
|
143
|
+
baton_user_text = self._extract_text_from_item(last_item)
|
|
144
|
+
prepared = base_items + [{"role": "user", "content": baton_user_text}]
|
|
145
|
+
source = "memory:seed_from_initial"
|
|
146
|
+
return sanitize_input_items(prepared), baton_user_text, mem_id, mem_state, source
|
|
147
|
+
|
|
148
|
+
# no memory attached
|
|
149
|
+
if not first_dispatch_done:
|
|
150
|
+
# first agent: pass initial messages as-is; baton is last user text (for potential external memory)
|
|
151
|
+
last_item = initial_messages[-1] if initial_messages else {"role": "user", "content": ""}
|
|
152
|
+
baton_user_text = self._extract_text_from_item(last_item)
|
|
153
|
+
return sanitize_input_items(list(initial_messages)), baton_user_text, None, None, "no-mem:first_initial"
|
|
154
|
+
else:
|
|
155
|
+
baton_user_text = last_plain_output if last_plain_output and last_plain_output.strip() else (
|
|
156
|
+
self._extract_text_from_item(initial_messages[-1]) if initial_messages else ""
|
|
157
|
+
)
|
|
158
|
+
prepared = [{"role": "user", "content": baton_user_text}]
|
|
159
|
+
return sanitize_input_items(prepared), baton_user_text, None, None, "no-mem:last_output"
|
|
160
|
+
|
|
161
|
+
def _update_memory_after_step(
|
|
162
|
+
self,
|
|
163
|
+
*,
|
|
164
|
+
node_id: str,
|
|
165
|
+
mem_state: Any,
|
|
166
|
+
baton_user_text: str,
|
|
167
|
+
display_text: str,
|
|
168
|
+
last_response_id: Optional[str],
|
|
169
|
+
dbg: DebugConfig,
|
|
170
|
+
) -> None:
|
|
171
|
+
"""Update memory strictly with [user baton, assistant display_text], mirroring LI semantics."""
|
|
172
|
+
if not mem_state:
|
|
173
|
+
return
|
|
174
|
+
base_items = list(mem_state.items[:-1]) if getattr(mem_state, "items", None) else []
|
|
175
|
+
new_mem = (base_items or []) + [
|
|
176
|
+
{"role": "user", "content": baton_user_text or ""},
|
|
177
|
+
{"role": "assistant", "content": [{"type": "output_text", "text": display_text or ""}]},
|
|
178
|
+
]
|
|
179
|
+
try:
|
|
180
|
+
mem_state.set_from(new_mem, last_response_id)
|
|
181
|
+
if dbg.log_inputs:
|
|
182
|
+
self.logger.debug(
|
|
183
|
+
f"[memory] {node_id} updated len {len(base_items)} -> {len(new_mem)} "
|
|
184
|
+
f"user='{ellipsize(baton_user_text or '', dbg.preview_chars)}' "
|
|
185
|
+
f"assist='{ellipsize(display_text or '', dbg.preview_chars)}'"
|
|
186
|
+
)
|
|
187
|
+
except Exception as e:
|
|
188
|
+
self.logger.error(f"[memory] update failed for {node_id}: {e}")
|
|
189
|
+
|
|
190
|
+
# ---------- Main flow ----------
|
|
191
|
+
|
|
72
192
|
async def run_flow(
|
|
73
193
|
self,
|
|
74
194
|
schema: List[Dict[str, Any]],
|
|
@@ -174,55 +294,34 @@ class FlowOrchestrator:
|
|
|
174
294
|
f" role='{node_rt.role}'"
|
|
175
295
|
)
|
|
176
296
|
|
|
177
|
-
#
|
|
178
|
-
mem_id =
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
if mem_state and not mem_state.is_empty():
|
|
188
|
-
# memory present and already has history
|
|
189
|
-
input_items = list(mem_state.items)
|
|
190
|
-
input_source = "memory:existing"
|
|
191
|
-
elif mem_state:
|
|
192
|
-
# memory present but empty -> seed from last output or initial messages
|
|
193
|
-
if last_plain_output and last_plain_output.strip():
|
|
194
|
-
input_items = [{"role": "user", "content": last_plain_output}]
|
|
195
|
-
input_source = "memory:seeded_from_last"
|
|
196
|
-
else:
|
|
197
|
-
input_items = list(initial_messages)
|
|
198
|
-
input_source = "memory:seeded_from_initial"
|
|
199
|
-
else:
|
|
200
|
-
# no memory -> first agent gets initial messages; others get only last output
|
|
201
|
-
if not first_dispatch_done:
|
|
202
|
-
input_items = list(initial_messages)
|
|
203
|
-
input_source = "no-mem:initial"
|
|
204
|
-
else:
|
|
205
|
-
if last_plain_output and last_plain_output.strip():
|
|
206
|
-
input_items = [{"role": "user", "content": last_plain_output}]
|
|
207
|
-
input_source = "no-mem:last_output"
|
|
208
|
-
else:
|
|
209
|
-
input_items = list(initial_messages)
|
|
210
|
-
input_source = "no-mem:fallback_initial"
|
|
211
|
-
|
|
212
|
-
prepared_items = sanitize_input_items(input_items)
|
|
297
|
+
# Input build using baton policy (LI parity)
|
|
298
|
+
prepared_items, baton_user_text, mem_id, mem_state, input_source = self._build_baton_input(
|
|
299
|
+
node_id=current_id,
|
|
300
|
+
g=g,
|
|
301
|
+
mem=mem,
|
|
302
|
+
initial_messages=initial_messages,
|
|
303
|
+
first_dispatch_done=first_dispatch_done,
|
|
304
|
+
last_plain_output=last_plain_output,
|
|
305
|
+
dbg=dbg,
|
|
306
|
+
)
|
|
213
307
|
|
|
214
308
|
if dbg.log_inputs:
|
|
215
309
|
self.logger.debug(f"[input] source={input_source} items={len(prepared_items)} "
|
|
216
310
|
f"preview={items_preview(prepared_items, dbg.preview_chars)}")
|
|
311
|
+
if mem_id:
|
|
312
|
+
mem_info = f"{mem_id} (len={len(mem_state.items) if mem_state else 0})"
|
|
313
|
+
self.logger.debug(f"[memory] attached={bool(mem_id)} mem_id={mem_info}")
|
|
217
314
|
|
|
218
315
|
# Build agent with per-node runtime
|
|
316
|
+
# Restrict friendly_map only to allowed outgoing routes of current node
|
|
317
|
+
allowed_map = {rid: fs.agents[rid].name or rid for rid in (node.outputs or []) if rid in fs.agents}
|
|
219
318
|
built = factory.build(
|
|
220
319
|
node=node,
|
|
221
320
|
node_runtime=node_rt,
|
|
222
321
|
preset=preset,
|
|
223
322
|
function_tools=function_tools,
|
|
224
323
|
force_router=False, # auto on multi-output
|
|
225
|
-
friendly_map=
|
|
324
|
+
friendly_map=allowed_map,
|
|
226
325
|
handoffs_enabled=True,
|
|
227
326
|
context=agent_kwargs.get("context"),
|
|
228
327
|
)
|
|
@@ -243,37 +342,42 @@ class FlowOrchestrator:
|
|
|
243
342
|
|
|
244
343
|
# Header for UI
|
|
245
344
|
ctx.set_agent_name(agent.name)
|
|
246
|
-
# title = f"\n\n**{built.name}**\n\n"
|
|
247
|
-
# ctx.stream = title
|
|
248
345
|
bridge.on_step(ctx, begin)
|
|
249
346
|
begin = False
|
|
250
347
|
handler.begin = begin
|
|
251
|
-
# if not use_partial_ctx:
|
|
252
|
-
# handler.to_buffer(title)
|
|
253
348
|
|
|
254
349
|
display_text = "" # what we show to UI for this step
|
|
255
350
|
next_id: Optional[str] = None
|
|
256
351
|
|
|
257
352
|
# --- EXECUTION ---
|
|
258
353
|
if stream and not multi_output:
|
|
259
|
-
# Full token streaming (single-output agent)
|
|
354
|
+
# Full token streaming (single-output agent) – collect full buffer for baton
|
|
260
355
|
result = Runner.run_streamed(agent, **run_kwargs)
|
|
261
356
|
handler.reset()
|
|
357
|
+
# Optional local accumulator; prefer handler.buffer after loop
|
|
358
|
+
last_chunk = ""
|
|
262
359
|
|
|
263
360
|
async for event in result.stream_events():
|
|
264
361
|
if bridge.stopped():
|
|
265
362
|
result.cancel()
|
|
266
363
|
bridge.on_stop(ctx)
|
|
267
364
|
break
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
365
|
+
chunk, last_response_id = handler.handle(event, ctx)
|
|
366
|
+
if chunk:
|
|
367
|
+
last_chunk = chunk
|
|
368
|
+
|
|
369
|
+
# Use full buffer if available (ensures baton sees complete output)
|
|
370
|
+
display_text = getattr(handler, "buffer", "") or last_chunk or ""
|
|
371
|
+
|
|
372
|
+
# Update memory strictly with baton + displayed text
|
|
373
|
+
self._update_memory_after_step(
|
|
374
|
+
node_id=current_id,
|
|
375
|
+
mem_state=mem_state,
|
|
376
|
+
baton_user_text=baton_user_text,
|
|
377
|
+
display_text=display_text,
|
|
378
|
+
last_response_id=last_response_id,
|
|
379
|
+
dbg=dbg,
|
|
380
|
+
)
|
|
277
381
|
|
|
278
382
|
# Route: first edge or END
|
|
279
383
|
outs = g.get_next(current_id)
|
|
@@ -308,13 +412,15 @@ class FlowOrchestrator:
|
|
|
308
412
|
decision = parse_route_output(raw_text, allowed_routes)
|
|
309
413
|
display_text = decision.content or ""
|
|
310
414
|
|
|
311
|
-
#
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
415
|
+
# Update memory with baton + displayed content
|
|
416
|
+
self._update_memory_after_step(
|
|
417
|
+
node_id=current_id,
|
|
418
|
+
mem_state=mem_state,
|
|
419
|
+
baton_user_text=baton_user_text,
|
|
420
|
+
display_text=display_text,
|
|
421
|
+
last_response_id=last_response_id,
|
|
422
|
+
dbg=dbg,
|
|
423
|
+
)
|
|
318
424
|
|
|
319
425
|
# Route decision
|
|
320
426
|
if decision.valid:
|
|
@@ -348,11 +454,15 @@ class FlowOrchestrator:
|
|
|
348
454
|
if not use_partial_ctx:
|
|
349
455
|
handler.to_buffer(display_text)
|
|
350
456
|
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
457
|
+
# Update memory with baton + displayed content
|
|
458
|
+
self._update_memory_after_step(
|
|
459
|
+
node_id=current_id,
|
|
460
|
+
mem_state=mem_state,
|
|
461
|
+
baton_user_text=baton_user_text,
|
|
462
|
+
display_text=display_text,
|
|
463
|
+
last_response_id=last_response_id,
|
|
464
|
+
dbg=dbg,
|
|
465
|
+
)
|
|
356
466
|
|
|
357
467
|
if decision.valid:
|
|
358
468
|
next_id = decision.route
|
|
@@ -373,11 +483,15 @@ class FlowOrchestrator:
|
|
|
373
483
|
if not use_partial_ctx:
|
|
374
484
|
handler.to_buffer(display_text)
|
|
375
485
|
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
486
|
+
# Update memory with baton + displayed content
|
|
487
|
+
self._update_memory_after_step(
|
|
488
|
+
node_id=current_id,
|
|
489
|
+
mem_state=mem_state,
|
|
490
|
+
baton_user_text=baton_user_text,
|
|
491
|
+
display_text=display_text,
|
|
492
|
+
last_response_id=last_response_id,
|
|
493
|
+
dbg=dbg,
|
|
494
|
+
)
|
|
381
495
|
|
|
382
496
|
if decision.valid:
|
|
383
497
|
next_id = decision.route
|
|
@@ -397,11 +511,15 @@ class FlowOrchestrator:
|
|
|
397
511
|
if not use_partial_ctx:
|
|
398
512
|
handler.to_buffer(display_text)
|
|
399
513
|
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
514
|
+
# Update memory with baton + displayed text
|
|
515
|
+
self._update_memory_after_step(
|
|
516
|
+
node_id=current_id,
|
|
517
|
+
mem_state=mem_state,
|
|
518
|
+
baton_user_text=baton_user_text,
|
|
519
|
+
display_text=display_text,
|
|
520
|
+
last_response_id=last_response_id,
|
|
521
|
+
dbg=dbg,
|
|
522
|
+
)
|
|
405
523
|
|
|
406
524
|
outs = g.get_next(current_id)
|
|
407
525
|
next_id = outs[0] if outs else g.first_connected_end(current_id)
|
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
# core/agents/runners/llama_workflow.py
|
|
2
|
+
|
|
1
3
|
#!/usr/bin/env python3
|
|
2
4
|
# -*- coding: utf-8 -*-
|
|
3
5
|
# ================================================== #
|
|
@@ -6,7 +8,7 @@
|
|
|
6
8
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
9
|
# MIT License #
|
|
8
10
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.09.
|
|
11
|
+
# Updated Date: 2025.09.27 06:00:00 #
|
|
10
12
|
# ================================================== #
|
|
11
13
|
|
|
12
14
|
import re
|
|
@@ -260,6 +262,11 @@ class LlamaWorkflow(BaseRunner):
|
|
|
260
262
|
# Keep last known agent name to avoid redundant ctx updates.
|
|
261
263
|
last_agent_name: Optional[str] = None
|
|
262
264
|
|
|
265
|
+
# Track whether current block has already produced user-visible tokens.
|
|
266
|
+
# This prevents creating empty DB items and preserves order.
|
|
267
|
+
content_written: bool = False
|
|
268
|
+
block_open: bool = False # logical "block" opened after first StepEvent
|
|
269
|
+
|
|
263
270
|
async for event in handler.stream_events():
|
|
264
271
|
if self.is_stopped():
|
|
265
272
|
# persist current output on stop
|
|
@@ -269,6 +276,7 @@ class LlamaWorkflow(BaseRunner):
|
|
|
269
276
|
self.end_stream(item_ctx, signals)
|
|
270
277
|
await handler.cancel_run() # cancel, will raise WorkflowCancelledByUser
|
|
271
278
|
break
|
|
279
|
+
|
|
272
280
|
if isinstance(event, ToolCallResult):
|
|
273
281
|
output = f"\n-----------\nExecution result:\n{event.tool_output}"
|
|
274
282
|
if verbose:
|
|
@@ -276,8 +284,11 @@ class LlamaWorkflow(BaseRunner):
|
|
|
276
284
|
formatted = "\n```output\n" + str(event.tool_output) + "\n```\n"
|
|
277
285
|
item_ctx.live_output += formatted
|
|
278
286
|
item_ctx.stream = formatted
|
|
287
|
+
content_written = True
|
|
279
288
|
if item_ctx.stream_agent_output and flush:
|
|
280
289
|
self.send_stream(item_ctx, signals, begin)
|
|
290
|
+
begin = False
|
|
291
|
+
|
|
281
292
|
elif isinstance(event, ToolCall):
|
|
282
293
|
if "code" in event.tool_kwargs:
|
|
283
294
|
output = f"\n-----------\nTool call code:\n{event.tool_kwargs['code']}"
|
|
@@ -286,23 +297,43 @@ class LlamaWorkflow(BaseRunner):
|
|
|
286
297
|
formatted = "\n```python\n" + str(event.tool_kwargs['code']) + "\n```\n"
|
|
287
298
|
item_ctx.live_output += formatted
|
|
288
299
|
item_ctx.stream = formatted
|
|
300
|
+
content_written = True
|
|
289
301
|
if item_ctx.stream_agent_output and flush:
|
|
290
302
|
self.send_stream(item_ctx, signals, begin)
|
|
303
|
+
begin = False
|
|
304
|
+
|
|
291
305
|
elif isinstance(event, StepEvent):
|
|
306
|
+
# UI splitting strategy aligned with OpenAI flow:
|
|
307
|
+
# - do NOT start a new DB item at the first StepEvent
|
|
308
|
+
# - only finalize the previous item if it already produced content
|
|
309
|
+
# (prevents empty items and ordering glitches)
|
|
292
310
|
self.set_busy(signals)
|
|
293
311
|
if not use_partials:
|
|
312
|
+
# We still want to propagate the name early if provided.
|
|
313
|
+
try:
|
|
314
|
+
meta = getattr(event, "meta", {}) or {}
|
|
315
|
+
next_name = meta.get("agent_name")
|
|
316
|
+
if next_name:
|
|
317
|
+
last_agent_name = self._apply_agent_name_to_ctx(item_ctx, next_name, last_agent_name)
|
|
318
|
+
except Exception:
|
|
319
|
+
pass
|
|
320
|
+
begin = True
|
|
294
321
|
continue
|
|
322
|
+
|
|
295
323
|
if verbose:
|
|
296
324
|
print("\n\n-----STEP-----\n\n")
|
|
297
325
|
print(f"[{event.name}] {event.index}/{event.total} meta={event.meta}")
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
326
|
+
|
|
327
|
+
# If there was an open block with content -> finalize it to a new DB item.
|
|
328
|
+
if block_open and content_written:
|
|
329
|
+
if flush:
|
|
330
|
+
item_ctx = self.on_next_ctx(
|
|
331
|
+
item_ctx,
|
|
332
|
+
signals=signals,
|
|
333
|
+
begin=begin,
|
|
334
|
+
stream=True,
|
|
335
|
+
)
|
|
336
|
+
# Apply next agent name on the fresh ctx (so UI header is correct from token #1).
|
|
306
337
|
try:
|
|
307
338
|
meta = getattr(event, "meta", {}) or {}
|
|
308
339
|
next_name = meta.get("agent_name")
|
|
@@ -310,8 +341,22 @@ class LlamaWorkflow(BaseRunner):
|
|
|
310
341
|
last_agent_name = self._apply_agent_name_to_ctx(item_ctx, next_name, last_agent_name)
|
|
311
342
|
except Exception:
|
|
312
343
|
pass
|
|
313
|
-
|
|
344
|
+
else:
|
|
345
|
+
# First step or previous step had no visible content: just propagate the name.
|
|
346
|
+
try:
|
|
347
|
+
meta = getattr(event, "meta", {}) or {}
|
|
348
|
+
next_name = meta.get("agent_name")
|
|
349
|
+
if next_name:
|
|
350
|
+
last_agent_name = self._apply_agent_name_to_ctx(item_ctx, next_name, last_agent_name)
|
|
351
|
+
except Exception:
|
|
352
|
+
pass
|
|
353
|
+
|
|
354
|
+
# Prepare for the upcoming tokens (new block begins).
|
|
355
|
+
block_open = True
|
|
356
|
+
content_written = False
|
|
314
357
|
begin = True
|
|
358
|
+
continue
|
|
359
|
+
|
|
315
360
|
elif isinstance(event, AgentStream):
|
|
316
361
|
# Update agent name from event if present; fallback to header parsing.
|
|
317
362
|
name = getattr(event, "current_agent_name", None)
|
|
@@ -325,9 +370,11 @@ class LlamaWorkflow(BaseRunner):
|
|
|
325
370
|
if event.delta:
|
|
326
371
|
item_ctx.live_output += event.delta
|
|
327
372
|
item_ctx.stream = event.delta
|
|
373
|
+
content_written = True
|
|
328
374
|
if item_ctx.stream_agent_output and flush:
|
|
329
375
|
self.send_stream(item_ctx, signals, begin) # send stream to webview
|
|
330
376
|
begin = False
|
|
377
|
+
|
|
331
378
|
elif isinstance(event, AgentOutput):
|
|
332
379
|
# Ensure final agent name is applied as well.
|
|
333
380
|
name = getattr(event, "current_agent_name", None)
|
|
@@ -338,6 +385,9 @@ class LlamaWorkflow(BaseRunner):
|
|
|
338
385
|
item_ctx.set_agent_final_response(answer)
|
|
339
386
|
if verbose:
|
|
340
387
|
print(f"\nFinal response: {answer}")
|
|
388
|
+
# Do not split the block here – we will either:
|
|
389
|
+
# - split on the next StepEvent, or
|
|
390
|
+
# - finalize once at the end (make_response), just like OpenAI flow does.
|
|
341
391
|
|
|
342
392
|
return item_ctx
|
|
343
393
|
|
|
@@ -386,6 +386,11 @@ class Renderer(BaseRenderer):
|
|
|
386
386
|
except Exception:
|
|
387
387
|
pass
|
|
388
388
|
|
|
389
|
+
try:
|
|
390
|
+
self.get_output_node(meta).page().runJavaScript("if (typeof window.begin !== 'undefined') begin();")
|
|
391
|
+
except Exception:
|
|
392
|
+
pass
|
|
393
|
+
|
|
389
394
|
def end(self, meta: CtxMeta, ctx: CtxItem, stream: bool = False):
|
|
390
395
|
"""
|
|
391
396
|
Render end
|
|
@@ -402,6 +407,12 @@ class Renderer(BaseRenderer):
|
|
|
402
407
|
self.pids[pid].item = None
|
|
403
408
|
else:
|
|
404
409
|
self.reload()
|
|
410
|
+
|
|
411
|
+
try:
|
|
412
|
+
self.get_output_node(meta).page().runJavaScript("if (typeof window.end !== 'undefined') end();")
|
|
413
|
+
except Exception:
|
|
414
|
+
pass
|
|
415
|
+
|
|
405
416
|
self.pids[pid].clear()
|
|
406
417
|
self.auto_cleanup(meta)
|
|
407
418
|
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
{
|
|
2
2
|
"__meta__": {
|
|
3
|
-
"version": "2.6.
|
|
4
|
-
"app.version": "2.6.
|
|
5
|
-
"updated_at": "2025-09-
|
|
3
|
+
"version": "2.6.64",
|
|
4
|
+
"app.version": "2.6.64",
|
|
5
|
+
"updated_at": "2025-09-27T00:00:00"
|
|
6
6
|
},
|
|
7
7
|
"access.audio.event.speech": false,
|
|
8
8
|
"access.audio.event.speech.disabled": [],
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
{
|
|
2
2
|
"__meta__": {
|
|
3
|
-
"version": "2.6.
|
|
4
|
-
"app.version": "2.6.
|
|
5
|
-
"updated_at": "2025-09-
|
|
3
|
+
"version": "2.6.64",
|
|
4
|
+
"app.version": "2.6.64",
|
|
5
|
+
"updated_at": "2025-09-27T00:00:00"
|
|
6
6
|
},
|
|
7
7
|
"items": {
|
|
8
8
|
"SpeakLeash/bielik-11b-v2.3-instruct:Q4_K_M": {
|
|
@@ -31,21 +31,7 @@
|
|
|
31
31
|
"enabled": true,
|
|
32
32
|
"description": "",
|
|
33
33
|
"remote_tools": "",
|
|
34
|
-
"extra": {
|
|
35
|
-
"openai_agent_b2b": {
|
|
36
|
-
"bot_1": {
|
|
37
|
-
"prompt": "You're an advanced AI assistant and an expert in every field. Imagine that I am also such an AI assistant and converse with me in an expert manner. As two assistants, let's brainstorm and arrive at some advanced solutions.",
|
|
38
|
-
"allow_local_tools": false,
|
|
39
|
-
"allow_remote_tools": false
|
|
40
|
-
},
|
|
41
|
-
"bot_2": {
|
|
42
|
-
"model": "gpt-4o",
|
|
43
|
-
"prompt": "You're an advanced AI assistant and an expert in every field. Imagine that I am also such an AI assistant and converse with me in an expert manner. As two assistants, let's brainstorm and arrive at some advanced solutions.",
|
|
44
|
-
"allow_local_tools": false,
|
|
45
|
-
"allow_remote_tools": false
|
|
46
|
-
}
|
|
47
|
-
}
|
|
48
|
-
},
|
|
34
|
+
"extra": {},
|
|
49
35
|
"__meta__": {
|
|
50
36
|
"version": "2.5.94",
|
|
51
37
|
"app.version": "2.5.94",
|
|
File without changes
|
|
@@ -31,29 +31,7 @@
|
|
|
31
31
|
"enabled": true,
|
|
32
32
|
"description": "",
|
|
33
33
|
"remote_tools": "",
|
|
34
|
-
"extra": {
|
|
35
|
-
"openai_agent_evolve": {
|
|
36
|
-
"base": {
|
|
37
|
-
"num_parents": 2,
|
|
38
|
-
"max_generations": 10,
|
|
39
|
-
"prompt": "You generate a response based on the user's input. If there is any feedback provided, use it to improve the response.",
|
|
40
|
-
"allow_local_tools": false,
|
|
41
|
-
"allow_remote_tools": false
|
|
42
|
-
},
|
|
43
|
-
"chooser": {
|
|
44
|
-
"model": "gpt-4o",
|
|
45
|
-
"prompt": "I will give you a list of different answers to the given question. From the provided list, choose the best and most accurate answer and return the number of that answer to me, without any explanation, just the number of the answer.",
|
|
46
|
-
"allow_local_tools": false,
|
|
47
|
-
"allow_remote_tools": false
|
|
48
|
-
},
|
|
49
|
-
"feedback": {
|
|
50
|
-
"model": "gpt-4o",
|
|
51
|
-
"prompt": "You evaluate a result and decide if it's good enough. If it's not good enough, you provide feedback on what needs to be improved. Never give it a pass on the first try. After 5 attempts, you can give it a pass if the result is good enough - do not go for perfection.",
|
|
52
|
-
"allow_local_tools": false,
|
|
53
|
-
"allow_remote_tools": false
|
|
54
|
-
}
|
|
55
|
-
}
|
|
56
|
-
},
|
|
34
|
+
"extra": {},
|
|
57
35
|
"__meta__": {
|
|
58
36
|
"version": "2.5.85",
|
|
59
37
|
"app.version": "2.5.85",
|
|
@@ -31,27 +31,7 @@
|
|
|
31
31
|
"enabled": true,
|
|
32
32
|
"description": "",
|
|
33
33
|
"remote_tools": "",
|
|
34
|
-
"extra": {
|
|
35
|
-
"openai_agent_planner": {
|
|
36
|
-
"base": {
|
|
37
|
-
"prompt": "Prepare a comprehensive and detailed response to the question based on the action plan. Follow each step outlined in the plan. If any feedback is provided, use it to improve the response.",
|
|
38
|
-
"allow_local_tools": false,
|
|
39
|
-
"allow_remote_tools": false
|
|
40
|
-
},
|
|
41
|
-
"planner": {
|
|
42
|
-
"model": "o3-mini-high",
|
|
43
|
-
"prompt": "Make a plan of task execution for the query by dividing a task into smaller steps. Do not provide any solutions here. The plan should only contain a list of steps as instructions for someone else to follow. Prepare a plan in the language in which the query was made. Format the plan using markdown.\n\nExample:\n\n----------------\n\n**Sub-task 1: <name>**\n\n- Description: <subtask description>\n- Expected output: <expected output>\n- Dependencies: []\n- Required Tools: []\n\n**Sub-task 2: <name>**\n\n- Description: <subtask description>\n- Expected output: <expected output>\n- Dependencies: [<subtask's 1 name>]\n- Required Tools: [WebSearch]\n\n[...]",
|
|
44
|
-
"allow_local_tools": false,
|
|
45
|
-
"allow_remote_tools": false
|
|
46
|
-
},
|
|
47
|
-
"feedback": {
|
|
48
|
-
"model": "gpt-4o",
|
|
49
|
-
"prompt": "You evaluate a result and decide if it's good enough. If it's not good enough, you provide feedback on what needs to be improved. Never give it a pass on the first try. After 5 attempts, you can give it a pass if the result is good enough - do not go for perfection, but ensure all tasks are completed.",
|
|
50
|
-
"allow_local_tools": false,
|
|
51
|
-
"allow_remote_tools": false
|
|
52
|
-
}
|
|
53
|
-
}
|
|
54
|
-
},
|
|
34
|
+
"extra": {},
|
|
55
35
|
"__meta__": {
|
|
56
36
|
"version": "2.5.81",
|
|
57
37
|
"app.version": "2.5.81",
|