pygpt-net 2.6.62__py3-none-any.whl → 2.6.63__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +5 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/controller/presets/editor.py +65 -1
- pygpt_net/core/agents/custom/llama_index/runner.py +15 -52
- pygpt_net/core/agents/custom/runner.py +194 -76
- pygpt_net/core/agents/runners/llama_workflow.py +60 -10
- pygpt_net/data/config/config.json +3 -3
- pygpt_net/data/config/models.json +3 -3
- pygpt_net/data/config/presets/agent_openai_b2b.json +1 -15
- pygpt_net/data/config/presets/agent_openai_coder.json +1 -15
- pygpt_net/data/config/presets/agent_openai_evolve.json +1 -23
- pygpt_net/data/config/presets/agent_openai_planner.json +1 -21
- pygpt_net/data/config/presets/agent_openai_researcher.json +1 -21
- pygpt_net/data/config/presets/agent_openai_supervisor.json +1 -13
- pygpt_net/data/config/presets/agent_openai_writer.json +1 -15
- pygpt_net/data/config/presets/agent_supervisor.json +1 -11
- pygpt_net/data/js/app/runtime.js +4 -1
- pygpt_net/data/js/app.min.js +3 -2
- pygpt_net/data/locale/locale.en.ini +5 -0
- pygpt_net/js_rc.py +13 -10
- pygpt_net/provider/agents/base.py +0 -0
- pygpt_net/provider/agents/llama_index/flow_from_schema.py +0 -0
- pygpt_net/provider/agents/llama_index/workflow/codeact.py +0 -0
- pygpt_net/provider/agents/llama_index/workflow/planner.py +229 -29
- pygpt_net/provider/agents/llama_index/workflow/supervisor.py +0 -0
- pygpt_net/provider/agents/openai/agent.py +0 -0
- pygpt_net/provider/agents/openai/agent_b2b.py +4 -4
- pygpt_net/provider/agents/openai/agent_planner.py +617 -262
- pygpt_net/provider/agents/openai/agent_with_experts.py +0 -0
- pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +4 -4
- pygpt_net/provider/agents/openai/agent_with_feedback.py +4 -4
- pygpt_net/provider/agents/openai/evolve.py +6 -6
- pygpt_net/provider/agents/openai/flow_from_schema.py +0 -0
- pygpt_net/provider/agents/openai/supervisor.py +290 -37
- pygpt_net/provider/api/x_ai/__init__.py +0 -0
- pygpt_net/provider/core/agent/__init__.py +0 -0
- pygpt_net/provider/core/agent/base.py +0 -0
- pygpt_net/provider/core/agent/json_file.py +0 -0
- pygpt_net/provider/core/config/patches/patch_before_2_6_42.py +0 -0
- pygpt_net/provider/llms/base.py +0 -0
- pygpt_net/provider/llms/deepseek_api.py +0 -0
- pygpt_net/provider/llms/google.py +0 -0
- pygpt_net/provider/llms/hugging_face_api.py +0 -0
- pygpt_net/provider/llms/hugging_face_router.py +0 -0
- pygpt_net/provider/llms/mistral.py +0 -0
- pygpt_net/provider/llms/perplexity.py +0 -0
- pygpt_net/provider/llms/x_ai.py +0 -0
- pygpt_net/ui/widget/dialog/confirm.py +34 -8
- pygpt_net/ui/widget/textarea/input.py +1 -1
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.63.dist-info}/METADATA +7 -2
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.63.dist-info}/RECORD +34 -34
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.63.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.63.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.63.dist-info}/entry_points.txt +0 -0
pygpt_net/CHANGELOG.txt
CHANGED
pygpt_net/__init__.py
CHANGED
|
@@ -6,15 +6,15 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.09.
|
|
9
|
+
# Updated Date: 2025.09.27 00:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
__author__ = "Marcin Szczygliński"
|
|
13
13
|
__copyright__ = "Copyright 2025, Marcin Szczygliński"
|
|
14
14
|
__credits__ = ["Marcin Szczygliński"]
|
|
15
15
|
__license__ = "MIT"
|
|
16
|
-
__version__ = "2.6.
|
|
17
|
-
__build__ = "2025-09-
|
|
16
|
+
__version__ = "2.6.63"
|
|
17
|
+
__build__ = "2025-09-27"
|
|
18
18
|
__maintainer__ = "Marcin Szczygliński"
|
|
19
19
|
__github__ = "https://github.com/szczyglis-dev/py-gpt"
|
|
20
20
|
__report__ = "https://github.com/szczyglis-dev/py-gpt/issues"
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.09.
|
|
9
|
+
# Updated Date: 2025.09.27 00:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import datetime
|
|
@@ -393,6 +393,9 @@ class Editor:
|
|
|
393
393
|
value=extra_options[key].get('default', None),
|
|
394
394
|
)
|
|
395
395
|
|
|
396
|
+
# ensure combo defaults are effectively applied for this tab (only empty values are updated)
|
|
397
|
+
self._apply_combo_defaults_for_group(option_key, extra_options)
|
|
398
|
+
|
|
396
399
|
def load_extra_defaults(self):
|
|
397
400
|
"""Load extra options defaults for preset editor"""
|
|
398
401
|
if not self.tab_options_idx:
|
|
@@ -423,6 +426,8 @@ class Editor:
|
|
|
423
426
|
option=extra_options[key],
|
|
424
427
|
value=value,
|
|
425
428
|
)
|
|
429
|
+
# ensure combo defaults are effectively applied for this tab (only empty values are updated)
|
|
430
|
+
self._apply_combo_defaults_for_group(option_key, extra_options)
|
|
426
431
|
|
|
427
432
|
def load_extra_defaults_current(self):
|
|
428
433
|
"""Load extra options defaults on mode change"""
|
|
@@ -479,6 +484,8 @@ class Editor:
|
|
|
479
484
|
option=extra_options[key],
|
|
480
485
|
value=value,
|
|
481
486
|
)
|
|
487
|
+
# ensure combo defaults are effectively applied for this tab (only empty values are updated)
|
|
488
|
+
self._apply_combo_defaults_for_group(option_key, extra_options)
|
|
482
489
|
|
|
483
490
|
def append_extra_options(self, preset: PresetItem):
|
|
484
491
|
"""
|
|
@@ -785,6 +792,9 @@ class Editor:
|
|
|
785
792
|
value=opt_schema.get('default'),
|
|
786
793
|
)
|
|
787
794
|
|
|
795
|
+
# ensure combo defaults are effectively applied for this tab (only empty values are updated)
|
|
796
|
+
self._apply_combo_defaults_for_group(config_id, schema_options)
|
|
797
|
+
|
|
788
798
|
# 4) Recompute mapping fully based on actual tabs and their 'agent_id' properties.
|
|
789
799
|
self._rebuild_tab_index_mapping()
|
|
790
800
|
|
|
@@ -1521,6 +1531,9 @@ class Editor:
|
|
|
1521
1531
|
value=opt_schema.get('default'),
|
|
1522
1532
|
)
|
|
1523
1533
|
|
|
1534
|
+
# ensure combo defaults are effectively applied for this tab (only empty values are updated)
|
|
1535
|
+
self._apply_combo_defaults_for_group(config_id, schema_options)
|
|
1536
|
+
|
|
1524
1537
|
# 7) Recompute the index mapping strictly from the QTabWidget
|
|
1525
1538
|
self._rebuild_tab_index_mapping()
|
|
1526
1539
|
|
|
@@ -1529,3 +1542,54 @@ class Editor:
|
|
|
1529
1542
|
|
|
1530
1543
|
finally:
|
|
1531
1544
|
tabs.setUpdatesEnabled(True)
|
|
1545
|
+
|
|
1546
|
+
# ---------- Helpers for reliable combo defaults in agent extra options ----------
|
|
1547
|
+
|
|
1548
|
+
def _apply_combo_defaults_for_group(self, parent_id: str, schema_options: Dict[str, Any]) -> None:
|
|
1549
|
+
"""
|
|
1550
|
+
Ensure that combo-type inputs inside a given UI config group have their default values applied
|
|
1551
|
+
when the current value is empty ("", None or "_"). This avoids the situation where combo boxes
|
|
1552
|
+
remain uninitialized while other field types receive defaults correctly.
|
|
1553
|
+
|
|
1554
|
+
This function never overrides a non-empty value set by the user or loaded from a preset.
|
|
1555
|
+
"""
|
|
1556
|
+
if not schema_options:
|
|
1557
|
+
return
|
|
1558
|
+
|
|
1559
|
+
get_value = self.window.controller.config.get_value
|
|
1560
|
+
apply_value = self.window.controller.config.apply_value
|
|
1561
|
+
|
|
1562
|
+
for key, opt_schema in schema_options.items():
|
|
1563
|
+
if not isinstance(opt_schema, dict):
|
|
1564
|
+
continue
|
|
1565
|
+
if opt_schema.get('type') != 'combo':
|
|
1566
|
+
continue
|
|
1567
|
+
|
|
1568
|
+
default_val = opt_schema.get('default', None)
|
|
1569
|
+
if default_val is None:
|
|
1570
|
+
continue
|
|
1571
|
+
|
|
1572
|
+
current_val = get_value(
|
|
1573
|
+
parent_id=parent_id,
|
|
1574
|
+
key=key,
|
|
1575
|
+
option=opt_schema,
|
|
1576
|
+
)
|
|
1577
|
+
|
|
1578
|
+
# Treat "_", "", None as empty and safe to replace with default
|
|
1579
|
+
if current_val in (None, "", "_"):
|
|
1580
|
+
# First try apply_value (standard path)
|
|
1581
|
+
apply_value(
|
|
1582
|
+
parent_id=parent_id,
|
|
1583
|
+
key=key,
|
|
1584
|
+
option=opt_schema,
|
|
1585
|
+
value=default_val,
|
|
1586
|
+
)
|
|
1587
|
+
# Additionally set directly on widget if accessible to guard against timing of key population
|
|
1588
|
+
try:
|
|
1589
|
+
widget_group = self.window.ui.config.get(parent_id, {})
|
|
1590
|
+
widget = widget_group.get(key)
|
|
1591
|
+
if widget and hasattr(widget, "set_value"):
|
|
1592
|
+
widget.set_value(default_val)
|
|
1593
|
+
except Exception:
|
|
1594
|
+
# Silent fallback; apply_value above should already handle most cases
|
|
1595
|
+
pass
|
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
# core/agents/runners/llama_workflow.py
|
|
2
|
+
|
|
1
3
|
#!/usr/bin/env python3
|
|
2
4
|
# -*- coding: utf-8 -*-
|
|
3
5
|
# ================================================== #
|
|
@@ -6,7 +8,7 @@
|
|
|
6
8
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
9
|
# MIT License #
|
|
8
10
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.09.
|
|
11
|
+
# Updated Date: 2025.09.27 06:00:00 #
|
|
10
12
|
# ================================================== #
|
|
11
13
|
|
|
12
14
|
from __future__ import annotations
|
|
@@ -207,37 +209,6 @@ class DynamicFlowWorkflowLI(Workflow):
|
|
|
207
209
|
return False
|
|
208
210
|
return False
|
|
209
211
|
|
|
210
|
-
def _friendly_map(self) -> Dict[str, str]:
|
|
211
|
-
return {aid: a.name or aid for aid, a in self.fs.agents.items()}
|
|
212
|
-
|
|
213
|
-
def _friendly_map_for_routes(self, route_ids: List[str]) -> Dict[str, Any]:
|
|
214
|
-
"""
|
|
215
|
-
Build a friendly map for the given route ids:
|
|
216
|
-
- Always include a human-friendly name.
|
|
217
|
-
- Include role only if provided in preset options or schema and non-empty.
|
|
218
|
-
"""
|
|
219
|
-
out: Dict[str, Any] = {}
|
|
220
|
-
for rid in route_ids or []:
|
|
221
|
-
a = self.fs.agents.get(rid)
|
|
222
|
-
name = (a.name if a and a.name else rid)
|
|
223
|
-
# Prefer preset option, then schema role
|
|
224
|
-
role_opt = None
|
|
225
|
-
try:
|
|
226
|
-
role_opt = self.option_get(rid, "role", None)
|
|
227
|
-
except Exception:
|
|
228
|
-
role_opt = None
|
|
229
|
-
role_schema = getattr(a, "role", None) if a is not None else None
|
|
230
|
-
role_val = None
|
|
231
|
-
if isinstance(role_opt, str) and role_opt.strip():
|
|
232
|
-
role_val = role_opt.strip()
|
|
233
|
-
elif isinstance(role_schema, str) and role_schema.strip():
|
|
234
|
-
role_val = role_schema.strip()
|
|
235
|
-
item = {"name": name}
|
|
236
|
-
if role_val:
|
|
237
|
-
item["role"] = role_val
|
|
238
|
-
out[rid] = item
|
|
239
|
-
return out
|
|
240
|
-
|
|
241
212
|
async def _emit(self, ctx: Context, ev: Any):
|
|
242
213
|
if self.dbg.event_echo:
|
|
243
214
|
self.logger.debug(f"[event] emit {ev.__class__.__name__}")
|
|
@@ -245,8 +216,8 @@ class DynamicFlowWorkflowLI(Workflow):
|
|
|
245
216
|
|
|
246
217
|
async def _emit_agent_text(self, ctx: Context, text: str, agent_name: str = "Agent"):
|
|
247
218
|
"""
|
|
248
|
-
Emit AgentStream(delta=text) robustly. If
|
|
249
|
-
fall back to extended AgentStream
|
|
219
|
+
Emit AgentStream(delta=text) robustly. If env requires extra fields,
|
|
220
|
+
fall back to extended AgentStream.
|
|
250
221
|
"""
|
|
251
222
|
try:
|
|
252
223
|
if self.dbg.event_echo:
|
|
@@ -266,16 +237,11 @@ class DynamicFlowWorkflowLI(Workflow):
|
|
|
266
237
|
)
|
|
267
238
|
|
|
268
239
|
async def _emit_header(self, ctx: Context, name: str):
|
|
269
|
-
|
|
270
|
-
self.logger.debug(f"[event] header emit begin name='{name}'")
|
|
240
|
+
# Lightweight header to ensure agent name is known before tokens.
|
|
271
241
|
await self._emit_agent_text(ctx, "", agent_name=name)
|
|
272
|
-
# await self._emit_agent_text(ctx, f"\n\n**{name}**\n\n", agent_name=name)
|
|
273
|
-
if self.dbg.event_echo:
|
|
274
|
-
self.logger.debug("[event] header emit done")
|
|
275
242
|
|
|
276
243
|
async def _emit_step_sep(self, ctx: Context, node_id: str):
|
|
277
244
|
try:
|
|
278
|
-
# Include human-friendly agent name in StepEvent meta for downstream ctx propagation.
|
|
279
245
|
a = self.fs.agents.get(node_id)
|
|
280
246
|
friendly_name = (a.name if a and a.name else node_id)
|
|
281
247
|
await self._emit(
|
|
@@ -350,6 +316,9 @@ class DynamicFlowWorkflowLI(Workflow):
|
|
|
350
316
|
return user_msg, [], "no-mem:last_output"
|
|
351
317
|
|
|
352
318
|
async def _update_memory_after_step(self, node_id: str, user_msg_text: str, display_text: str):
|
|
319
|
+
"""
|
|
320
|
+
Update per-node memory after a step, storing baton user message and assistant output.
|
|
321
|
+
"""
|
|
353
322
|
mem_id = self.g.agent_to_memory.get(node_id)
|
|
354
323
|
mem_state = self.mem.get(mem_id) if mem_id else None
|
|
355
324
|
if not mem_state:
|
|
@@ -374,7 +343,7 @@ class DynamicFlowWorkflowLI(Workflow):
|
|
|
374
343
|
# ============== Workflow steps ==============
|
|
375
344
|
|
|
376
345
|
def run(self, query: str, ctx: Optional[Context] = None, memory: Any = None, verbose: bool = False, on_stop=None):
|
|
377
|
-
"""Entry point used by
|
|
346
|
+
"""Entry point used by LlamaWorkflow runner."""
|
|
378
347
|
self._on_stop = on_stop
|
|
379
348
|
|
|
380
349
|
# Build initial chat once
|
|
@@ -444,8 +413,9 @@ class DynamicFlowWorkflowLI(Workflow):
|
|
|
444
413
|
return FlowTickEvent() if self._current_ids else FlowStopEvent(final_answer=self._last_plain_output or "")
|
|
445
414
|
|
|
446
415
|
node: AgentNode = self.fs.agents[current_id]
|
|
447
|
-
|
|
448
|
-
|
|
416
|
+
|
|
417
|
+
# IMPORTANT: emit StepEvent also for the very first agent step.
|
|
418
|
+
await self._emit_step_sep(ctx, current_id)
|
|
449
419
|
await self._emit_header(ctx, node.name or current_id)
|
|
450
420
|
|
|
451
421
|
# Resolve runtime + per-node LLM/tools
|
|
@@ -474,11 +444,10 @@ class DynamicFlowWorkflowLI(Workflow):
|
|
|
474
444
|
f"user='{ellipsize(user_msg_text, self.dbg.preview_chars)}'"
|
|
475
445
|
)
|
|
476
446
|
|
|
477
|
-
#
|
|
447
|
+
# Build agent
|
|
478
448
|
allowed_routes_now = list(node.outputs or [])
|
|
479
|
-
friendly_map = self.
|
|
449
|
+
friendly_map = {rid: self.fs.agents.get(rid).name or rid for rid in allowed_routes_now if rid in self.fs.agents}
|
|
480
450
|
|
|
481
|
-
# Build agent (chat_history/max_iterations in ctor – best practice)
|
|
482
451
|
built = self.factory.build(
|
|
483
452
|
node=node,
|
|
484
453
|
node_runtime=node_rt,
|
|
@@ -528,9 +497,6 @@ class DynamicFlowWorkflowLI(Workflow):
|
|
|
528
497
|
display_text = decision.content or ""
|
|
529
498
|
if display_text:
|
|
530
499
|
await self._emit_agent_text(ctx, display_text, agent_name=(node.name or current_id))
|
|
531
|
-
if self.dbg.log_memory_dump:
|
|
532
|
-
self.logger.debug(f"[mem.prep] node={current_id} save user='{ellipsize(user_msg_text, self.dbg.preview_chars)}' "
|
|
533
|
-
f"assist='{ellipsize(display_text, self.dbg.preview_chars)}'")
|
|
534
500
|
await self._update_memory_after_step(current_id, user_msg_text, display_text)
|
|
535
501
|
next_id = decision.route if decision.valid else (allowed_routes[0] if allowed_routes else None)
|
|
536
502
|
if self.dbg.log_routes:
|
|
@@ -541,9 +507,6 @@ class DynamicFlowWorkflowLI(Workflow):
|
|
|
541
507
|
display_text = raw_text_clean or ""
|
|
542
508
|
if display_text:
|
|
543
509
|
await self._emit_agent_text(ctx, display_text, agent_name=(node.name or current_id))
|
|
544
|
-
if self.dbg.log_memory_dump:
|
|
545
|
-
self.logger.debug(f"[mem.prep] node={current_id} save user='{ellipsize(user_msg_text, self.dbg.preview_chars)}' "
|
|
546
|
-
f"assist='{ellipsize(display_text, self.dbg.preview_chars)}'")
|
|
547
510
|
await self._update_memory_after_step(current_id, user_msg_text, display_text)
|
|
548
511
|
outs = self.g.get_next(current_id)
|
|
549
512
|
next_id = outs[0] if outs else self.g.first_connected_end(current_id)
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.09.
|
|
9
|
+
# Updated Date: 2025.09.27 06:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from __future__ import annotations
|
|
@@ -62,13 +62,133 @@ class FlowOrchestrator:
|
|
|
62
62
|
- First agent (in the whole flow) gets full initial messages from the app.
|
|
63
63
|
- Next agent WITHOUT memory gets only last step's displayed content as a single 'user' message.
|
|
64
64
|
- Agent WITH memory:
|
|
65
|
-
* if memory has items -> use
|
|
66
|
-
* if memory empty -> seed
|
|
65
|
+
* if memory has items -> use base history (items[:-1]) and pass last displayed content as a single 'user' baton;
|
|
66
|
+
* if memory empty -> seed baton from last displayed content (or initial messages as fallback).
|
|
67
67
|
"""
|
|
68
68
|
def __init__(self, window, logger: Optional[Logger] = None) -> None:
|
|
69
69
|
self.window = window
|
|
70
70
|
self.logger = logger or NullLogger()
|
|
71
71
|
|
|
72
|
+
# ---------- Helpers (production-ready) ----------
|
|
73
|
+
|
|
74
|
+
def _extract_text_from_item(self, item: TResponseInputItem) -> str:
|
|
75
|
+
"""Best-effort extract plain text from TResponseInputItem."""
|
|
76
|
+
if isinstance(item, dict):
|
|
77
|
+
content = item.get("content", "")
|
|
78
|
+
if isinstance(content, str):
|
|
79
|
+
return content
|
|
80
|
+
if isinstance(content, list):
|
|
81
|
+
parts = []
|
|
82
|
+
for p in content:
|
|
83
|
+
if isinstance(p, dict):
|
|
84
|
+
t = p.get("text")
|
|
85
|
+
if isinstance(t, str):
|
|
86
|
+
parts.append(t)
|
|
87
|
+
return "\n".join(parts)
|
|
88
|
+
return ""
|
|
89
|
+
if isinstance(item, str):
|
|
90
|
+
return item
|
|
91
|
+
return ""
|
|
92
|
+
|
|
93
|
+
def _build_baton_input(
|
|
94
|
+
self,
|
|
95
|
+
*,
|
|
96
|
+
node_id: str,
|
|
97
|
+
g: FlowGraph,
|
|
98
|
+
mem: MemoryManager,
|
|
99
|
+
initial_messages: List[TResponseInputItem],
|
|
100
|
+
first_dispatch_done: bool,
|
|
101
|
+
last_plain_output: str,
|
|
102
|
+
dbg: DebugConfig,
|
|
103
|
+
) -> tuple[List[TResponseInputItem], str, Optional[str], Any, str]:
|
|
104
|
+
"""
|
|
105
|
+
Returns: (prepared_items, baton_user_text, mem_id, mem_state, source_tag)
|
|
106
|
+
Mirrors LI baton/memory policy.
|
|
107
|
+
"""
|
|
108
|
+
mem_id = g.agent_to_memory.get(node_id)
|
|
109
|
+
mem_state = mem.get(mem_id) if mem_id else None
|
|
110
|
+
|
|
111
|
+
baton_user_text = ""
|
|
112
|
+
source = ""
|
|
113
|
+
|
|
114
|
+
if mem_state and mem_state.items:
|
|
115
|
+
# memory with history -> base history + baton from last output (preferred)
|
|
116
|
+
base_items = list(mem_state.items[:-1]) if len(mem_state.items) >= 1 else []
|
|
117
|
+
if last_plain_output and last_plain_output.strip():
|
|
118
|
+
baton_user_text = last_plain_output
|
|
119
|
+
prepared = base_items + [{"role": "user", "content": baton_user_text}]
|
|
120
|
+
source = "memory:existing_to_user_baton"
|
|
121
|
+
else:
|
|
122
|
+
# fallback: use last assistant content as baton
|
|
123
|
+
last_ass = mem_state.items[-1] if isinstance(mem_state.items[-1], dict) else {}
|
|
124
|
+
if isinstance(last_ass.get("content"), str):
|
|
125
|
+
baton_user_text = last_ass.get("content", "")
|
|
126
|
+
elif isinstance(last_ass.get("content"), list) and last_ass["content"]:
|
|
127
|
+
baton_user_text = last_ass["content"][0].get("text", "") or ""
|
|
128
|
+
else:
|
|
129
|
+
baton_user_text = ""
|
|
130
|
+
prepared = base_items + [{"role": "user", "content": baton_user_text}]
|
|
131
|
+
source = "memory:existing_to_last_assistant"
|
|
132
|
+
return sanitize_input_items(prepared), baton_user_text, mem_id, mem_state, source
|
|
133
|
+
|
|
134
|
+
if mem_state:
|
|
135
|
+
# memory attached but empty -> seed from last output else from initial (use last user msg as baton)
|
|
136
|
+
if last_plain_output and last_plain_output.strip():
|
|
137
|
+
baton_user_text = last_plain_output
|
|
138
|
+
prepared = [{"role": "user", "content": baton_user_text}]
|
|
139
|
+
source = "memory:seed_from_last_output"
|
|
140
|
+
else:
|
|
141
|
+
base_items = list(initial_messages[:-1]) if initial_messages else []
|
|
142
|
+
last_item = initial_messages[-1] if initial_messages else {"role": "user", "content": ""}
|
|
143
|
+
baton_user_text = self._extract_text_from_item(last_item)
|
|
144
|
+
prepared = base_items + [{"role": "user", "content": baton_user_text}]
|
|
145
|
+
source = "memory:seed_from_initial"
|
|
146
|
+
return sanitize_input_items(prepared), baton_user_text, mem_id, mem_state, source
|
|
147
|
+
|
|
148
|
+
# no memory attached
|
|
149
|
+
if not first_dispatch_done:
|
|
150
|
+
# first agent: pass initial messages as-is; baton is last user text (for potential external memory)
|
|
151
|
+
last_item = initial_messages[-1] if initial_messages else {"role": "user", "content": ""}
|
|
152
|
+
baton_user_text = self._extract_text_from_item(last_item)
|
|
153
|
+
return sanitize_input_items(list(initial_messages)), baton_user_text, None, None, "no-mem:first_initial"
|
|
154
|
+
else:
|
|
155
|
+
baton_user_text = last_plain_output if last_plain_output and last_plain_output.strip() else (
|
|
156
|
+
self._extract_text_from_item(initial_messages[-1]) if initial_messages else ""
|
|
157
|
+
)
|
|
158
|
+
prepared = [{"role": "user", "content": baton_user_text}]
|
|
159
|
+
return sanitize_input_items(prepared), baton_user_text, None, None, "no-mem:last_output"
|
|
160
|
+
|
|
161
|
+
def _update_memory_after_step(
|
|
162
|
+
self,
|
|
163
|
+
*,
|
|
164
|
+
node_id: str,
|
|
165
|
+
mem_state: Any,
|
|
166
|
+
baton_user_text: str,
|
|
167
|
+
display_text: str,
|
|
168
|
+
last_response_id: Optional[str],
|
|
169
|
+
dbg: DebugConfig,
|
|
170
|
+
) -> None:
|
|
171
|
+
"""Update memory strictly with [user baton, assistant display_text], mirroring LI semantics."""
|
|
172
|
+
if not mem_state:
|
|
173
|
+
return
|
|
174
|
+
base_items = list(mem_state.items[:-1]) if getattr(mem_state, "items", None) else []
|
|
175
|
+
new_mem = (base_items or []) + [
|
|
176
|
+
{"role": "user", "content": baton_user_text or ""},
|
|
177
|
+
{"role": "assistant", "content": [{"type": "output_text", "text": display_text or ""}]},
|
|
178
|
+
]
|
|
179
|
+
try:
|
|
180
|
+
mem_state.set_from(new_mem, last_response_id)
|
|
181
|
+
if dbg.log_inputs:
|
|
182
|
+
self.logger.debug(
|
|
183
|
+
f"[memory] {node_id} updated len {len(base_items)} -> {len(new_mem)} "
|
|
184
|
+
f"user='{ellipsize(baton_user_text or '', dbg.preview_chars)}' "
|
|
185
|
+
f"assist='{ellipsize(display_text or '', dbg.preview_chars)}'"
|
|
186
|
+
)
|
|
187
|
+
except Exception as e:
|
|
188
|
+
self.logger.error(f"[memory] update failed for {node_id}: {e}")
|
|
189
|
+
|
|
190
|
+
# ---------- Main flow ----------
|
|
191
|
+
|
|
72
192
|
async def run_flow(
|
|
73
193
|
self,
|
|
74
194
|
schema: List[Dict[str, Any]],
|
|
@@ -174,55 +294,34 @@ class FlowOrchestrator:
|
|
|
174
294
|
f" role='{node_rt.role}'"
|
|
175
295
|
)
|
|
176
296
|
|
|
177
|
-
#
|
|
178
|
-
mem_id =
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
if mem_state and not mem_state.is_empty():
|
|
188
|
-
# memory present and already has history
|
|
189
|
-
input_items = list(mem_state.items)
|
|
190
|
-
input_source = "memory:existing"
|
|
191
|
-
elif mem_state:
|
|
192
|
-
# memory present but empty -> seed from last output or initial messages
|
|
193
|
-
if last_plain_output and last_plain_output.strip():
|
|
194
|
-
input_items = [{"role": "user", "content": last_plain_output}]
|
|
195
|
-
input_source = "memory:seeded_from_last"
|
|
196
|
-
else:
|
|
197
|
-
input_items = list(initial_messages)
|
|
198
|
-
input_source = "memory:seeded_from_initial"
|
|
199
|
-
else:
|
|
200
|
-
# no memory -> first agent gets initial messages; others get only last output
|
|
201
|
-
if not first_dispatch_done:
|
|
202
|
-
input_items = list(initial_messages)
|
|
203
|
-
input_source = "no-mem:initial"
|
|
204
|
-
else:
|
|
205
|
-
if last_plain_output and last_plain_output.strip():
|
|
206
|
-
input_items = [{"role": "user", "content": last_plain_output}]
|
|
207
|
-
input_source = "no-mem:last_output"
|
|
208
|
-
else:
|
|
209
|
-
input_items = list(initial_messages)
|
|
210
|
-
input_source = "no-mem:fallback_initial"
|
|
211
|
-
|
|
212
|
-
prepared_items = sanitize_input_items(input_items)
|
|
297
|
+
# Input build using baton policy (LI parity)
|
|
298
|
+
prepared_items, baton_user_text, mem_id, mem_state, input_source = self._build_baton_input(
|
|
299
|
+
node_id=current_id,
|
|
300
|
+
g=g,
|
|
301
|
+
mem=mem,
|
|
302
|
+
initial_messages=initial_messages,
|
|
303
|
+
first_dispatch_done=first_dispatch_done,
|
|
304
|
+
last_plain_output=last_plain_output,
|
|
305
|
+
dbg=dbg,
|
|
306
|
+
)
|
|
213
307
|
|
|
214
308
|
if dbg.log_inputs:
|
|
215
309
|
self.logger.debug(f"[input] source={input_source} items={len(prepared_items)} "
|
|
216
310
|
f"preview={items_preview(prepared_items, dbg.preview_chars)}")
|
|
311
|
+
if mem_id:
|
|
312
|
+
mem_info = f"{mem_id} (len={len(mem_state.items) if mem_state else 0})"
|
|
313
|
+
self.logger.debug(f"[memory] attached={bool(mem_id)} mem_id={mem_info}")
|
|
217
314
|
|
|
218
315
|
# Build agent with per-node runtime
|
|
316
|
+
# Restrict friendly_map only to allowed outgoing routes of current node
|
|
317
|
+
allowed_map = {rid: fs.agents[rid].name or rid for rid in (node.outputs or []) if rid in fs.agents}
|
|
219
318
|
built = factory.build(
|
|
220
319
|
node=node,
|
|
221
320
|
node_runtime=node_rt,
|
|
222
321
|
preset=preset,
|
|
223
322
|
function_tools=function_tools,
|
|
224
323
|
force_router=False, # auto on multi-output
|
|
225
|
-
friendly_map=
|
|
324
|
+
friendly_map=allowed_map,
|
|
226
325
|
handoffs_enabled=True,
|
|
227
326
|
context=agent_kwargs.get("context"),
|
|
228
327
|
)
|
|
@@ -243,37 +342,42 @@ class FlowOrchestrator:
|
|
|
243
342
|
|
|
244
343
|
# Header for UI
|
|
245
344
|
ctx.set_agent_name(agent.name)
|
|
246
|
-
# title = f"\n\n**{built.name}**\n\n"
|
|
247
|
-
# ctx.stream = title
|
|
248
345
|
bridge.on_step(ctx, begin)
|
|
249
346
|
begin = False
|
|
250
347
|
handler.begin = begin
|
|
251
|
-
# if not use_partial_ctx:
|
|
252
|
-
# handler.to_buffer(title)
|
|
253
348
|
|
|
254
349
|
display_text = "" # what we show to UI for this step
|
|
255
350
|
next_id: Optional[str] = None
|
|
256
351
|
|
|
257
352
|
# --- EXECUTION ---
|
|
258
353
|
if stream and not multi_output:
|
|
259
|
-
# Full token streaming (single-output agent)
|
|
354
|
+
# Full token streaming (single-output agent) – collect full buffer for baton
|
|
260
355
|
result = Runner.run_streamed(agent, **run_kwargs)
|
|
261
356
|
handler.reset()
|
|
357
|
+
# Optional local accumulator; prefer handler.buffer after loop
|
|
358
|
+
last_chunk = ""
|
|
262
359
|
|
|
263
360
|
async for event in result.stream_events():
|
|
264
361
|
if bridge.stopped():
|
|
265
362
|
result.cancel()
|
|
266
363
|
bridge.on_stop(ctx)
|
|
267
364
|
break
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
365
|
+
chunk, last_response_id = handler.handle(event, ctx)
|
|
366
|
+
if chunk:
|
|
367
|
+
last_chunk = chunk
|
|
368
|
+
|
|
369
|
+
# Use full buffer if available (ensures baton sees complete output)
|
|
370
|
+
display_text = getattr(handler, "buffer", "") or last_chunk or ""
|
|
371
|
+
|
|
372
|
+
# Update memory strictly with baton + displayed text
|
|
373
|
+
self._update_memory_after_step(
|
|
374
|
+
node_id=current_id,
|
|
375
|
+
mem_state=mem_state,
|
|
376
|
+
baton_user_text=baton_user_text,
|
|
377
|
+
display_text=display_text,
|
|
378
|
+
last_response_id=last_response_id,
|
|
379
|
+
dbg=dbg,
|
|
380
|
+
)
|
|
277
381
|
|
|
278
382
|
# Route: first edge or END
|
|
279
383
|
outs = g.get_next(current_id)
|
|
@@ -308,13 +412,15 @@ class FlowOrchestrator:
|
|
|
308
412
|
decision = parse_route_output(raw_text, allowed_routes)
|
|
309
413
|
display_text = decision.content or ""
|
|
310
414
|
|
|
311
|
-
#
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
415
|
+
# Update memory with baton + displayed content
|
|
416
|
+
self._update_memory_after_step(
|
|
417
|
+
node_id=current_id,
|
|
418
|
+
mem_state=mem_state,
|
|
419
|
+
baton_user_text=baton_user_text,
|
|
420
|
+
display_text=display_text,
|
|
421
|
+
last_response_id=last_response_id,
|
|
422
|
+
dbg=dbg,
|
|
423
|
+
)
|
|
318
424
|
|
|
319
425
|
# Route decision
|
|
320
426
|
if decision.valid:
|
|
@@ -348,11 +454,15 @@ class FlowOrchestrator:
|
|
|
348
454
|
if not use_partial_ctx:
|
|
349
455
|
handler.to_buffer(display_text)
|
|
350
456
|
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
457
|
+
# Update memory with baton + displayed content
|
|
458
|
+
self._update_memory_after_step(
|
|
459
|
+
node_id=current_id,
|
|
460
|
+
mem_state=mem_state,
|
|
461
|
+
baton_user_text=baton_user_text,
|
|
462
|
+
display_text=display_text,
|
|
463
|
+
last_response_id=last_response_id,
|
|
464
|
+
dbg=dbg,
|
|
465
|
+
)
|
|
356
466
|
|
|
357
467
|
if decision.valid:
|
|
358
468
|
next_id = decision.route
|
|
@@ -373,11 +483,15 @@ class FlowOrchestrator:
|
|
|
373
483
|
if not use_partial_ctx:
|
|
374
484
|
handler.to_buffer(display_text)
|
|
375
485
|
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
486
|
+
# Update memory with baton + displayed content
|
|
487
|
+
self._update_memory_after_step(
|
|
488
|
+
node_id=current_id,
|
|
489
|
+
mem_state=mem_state,
|
|
490
|
+
baton_user_text=baton_user_text,
|
|
491
|
+
display_text=display_text,
|
|
492
|
+
last_response_id=last_response_id,
|
|
493
|
+
dbg=dbg,
|
|
494
|
+
)
|
|
381
495
|
|
|
382
496
|
if decision.valid:
|
|
383
497
|
next_id = decision.route
|
|
@@ -397,11 +511,15 @@ class FlowOrchestrator:
|
|
|
397
511
|
if not use_partial_ctx:
|
|
398
512
|
handler.to_buffer(display_text)
|
|
399
513
|
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
514
|
+
# Update memory with baton + displayed text
|
|
515
|
+
self._update_memory_after_step(
|
|
516
|
+
node_id=current_id,
|
|
517
|
+
mem_state=mem_state,
|
|
518
|
+
baton_user_text=baton_user_text,
|
|
519
|
+
display_text=display_text,
|
|
520
|
+
last_response_id=last_response_id,
|
|
521
|
+
dbg=dbg,
|
|
522
|
+
)
|
|
405
523
|
|
|
406
524
|
outs = g.get_next(current_id)
|
|
407
525
|
next_id = outs[0] if outs else g.first_connected_end(current_id)
|