pygpt-net 2.6.59__py3-none-any.whl → 2.6.60__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. pygpt_net/CHANGELOG.txt +4 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/app.py +9 -5
  4. pygpt_net/controller/__init__.py +1 -0
  5. pygpt_net/controller/presets/editor.py +442 -39
  6. pygpt_net/core/agents/custom/__init__.py +275 -0
  7. pygpt_net/core/agents/custom/debug.py +64 -0
  8. pygpt_net/core/agents/custom/factory.py +109 -0
  9. pygpt_net/core/agents/custom/graph.py +71 -0
  10. pygpt_net/core/agents/custom/llama_index/__init__.py +10 -0
  11. pygpt_net/core/agents/custom/llama_index/factory.py +89 -0
  12. pygpt_net/core/agents/custom/llama_index/router_streamer.py +106 -0
  13. pygpt_net/core/agents/custom/llama_index/runner.py +529 -0
  14. pygpt_net/core/agents/custom/llama_index/stream.py +56 -0
  15. pygpt_net/core/agents/custom/llama_index/utils.py +242 -0
  16. pygpt_net/core/agents/custom/logging.py +50 -0
  17. pygpt_net/core/agents/custom/memory.py +51 -0
  18. pygpt_net/core/agents/custom/router.py +116 -0
  19. pygpt_net/core/agents/custom/router_streamer.py +187 -0
  20. pygpt_net/core/agents/custom/runner.py +454 -0
  21. pygpt_net/core/agents/custom/schema.py +125 -0
  22. pygpt_net/core/agents/custom/utils.py +181 -0
  23. pygpt_net/core/agents/provider.py +72 -7
  24. pygpt_net/core/agents/runner.py +7 -4
  25. pygpt_net/core/agents/runners/helpers.py +1 -1
  26. pygpt_net/core/agents/runners/llama_workflow.py +3 -0
  27. pygpt_net/core/agents/runners/openai_workflow.py +8 -1
  28. pygpt_net/{ui/widget/builder → core/node_editor}/__init__.py +2 -2
  29. pygpt_net/core/{builder → node_editor}/graph.py +11 -218
  30. pygpt_net/core/node_editor/models.py +111 -0
  31. pygpt_net/core/node_editor/types.py +76 -0
  32. pygpt_net/core/node_editor/utils.py +17 -0
  33. pygpt_net/core/render/web/renderer.py +10 -8
  34. pygpt_net/data/config/config.json +3 -3
  35. pygpt_net/data/config/models.json +3 -3
  36. pygpt_net/data/locale/locale.en.ini +4 -4
  37. pygpt_net/item/agent.py +5 -1
  38. pygpt_net/item/preset.py +19 -1
  39. pygpt_net/provider/agents/base.py +33 -2
  40. pygpt_net/provider/agents/llama_index/flow_from_schema.py +92 -0
  41. pygpt_net/provider/agents/openai/flow_from_schema.py +96 -0
  42. pygpt_net/provider/core/agent/json_file.py +11 -5
  43. pygpt_net/tools/agent_builder/tool.py +217 -52
  44. pygpt_net/tools/agent_builder/ui/dialogs.py +119 -24
  45. pygpt_net/tools/agent_builder/ui/list.py +37 -10
  46. pygpt_net/ui/dialog/preset.py +16 -1
  47. pygpt_net/ui/main.py +1 -1
  48. pygpt_net/{core/builder → ui/widget/node_editor}/__init__.py +2 -2
  49. pygpt_net/ui/widget/node_editor/command.py +373 -0
  50. pygpt_net/ui/widget/node_editor/editor.py +2038 -0
  51. pygpt_net/ui/widget/node_editor/item.py +492 -0
  52. pygpt_net/ui/widget/node_editor/node.py +1205 -0
  53. pygpt_net/ui/widget/node_editor/utils.py +17 -0
  54. pygpt_net/ui/widget/node_editor/view.py +247 -0
  55. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.60.dist-info}/METADATA +72 -2
  56. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.60.dist-info}/RECORD +59 -33
  57. pygpt_net/core/agents/custom.py +0 -150
  58. pygpt_net/ui/widget/builder/editor.py +0 -2001
  59. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.60.dist-info}/LICENSE +0 -0
  60. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.60.dist-info}/WHEEL +0 -0
  61. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.60.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,242 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.24 23:00:00 #
10
+ # ================================================== #
11
+
12
+ from __future__ import annotations
13
+ from dataclasses import dataclass
14
+ from typing import Any, Callable, Dict, List, Optional, Union
15
+
16
+ from agents import TResponseInputItem
17
+ from pygpt_net.item.model import ModelItem
18
+ from pygpt_net.item.preset import PresetItem
19
+
20
+ try:
21
+ from llama_index.core.llms import ChatMessage, MessageRole
22
+ from llama_index.core.tools import BaseTool, FunctionTool
23
+ from llama_index.core.llms.llm import LLM
24
+ except Exception:
25
+ ChatMessage = None
26
+ MessageRole = None
27
+ BaseTool = object # type: ignore
28
+ FunctionTool = None
29
+ LLM = object # type: ignore
30
+
31
+ OptionGetter = Callable[[str, str, Any], Any]
32
+
33
+
34
+ def make_option_getter(base_agent, preset: Optional[PresetItem]) -> OptionGetter:
35
+ def option_get(section: str, key: str, default: Any = None) -> Any:
36
+ if preset is None:
37
+ return default
38
+ try:
39
+ val = base_agent.get_option(preset, section, key)
40
+ return default if val in (None, "") else val
41
+ except Exception:
42
+ return default
43
+ return option_get
44
+
45
+
46
+ @dataclass
47
+ class NodeRuntime:
48
+ model: ModelItem
49
+ instructions: str
50
+ allow_local_tools: bool
51
+ allow_remote_tools: bool
52
+
53
+
54
+ def resolve_node_runtime(
55
+ *,
56
+ window,
57
+ node,
58
+ option_get: OptionGetter,
59
+ default_model: ModelItem,
60
+ base_prompt: Optional[str],
61
+ schema_allow_local: Optional[bool],
62
+ schema_allow_remote: Optional[bool],
63
+ default_allow_local: bool,
64
+ default_allow_remote: bool,
65
+ ) -> NodeRuntime:
66
+ model_name = option_get(node.id, "model", None)
67
+ model_item: ModelItem = default_model
68
+ try:
69
+ if model_name:
70
+ cand = window.core.models.get(model_name)
71
+ if cand:
72
+ model_item = cand
73
+ except Exception:
74
+ model_item = default_model
75
+
76
+ prompt_opt = option_get(node.id, "prompt", None)
77
+ instructions = (prompt_opt or getattr(node, "instruction", None) or base_prompt or "").strip()
78
+
79
+ allow_local_tools = bool(
80
+ option_get(
81
+ node.id, "allow_local_tools",
82
+ schema_allow_local if schema_allow_local is not None else default_allow_local
83
+ )
84
+ )
85
+ allow_remote_tools = bool(
86
+ option_get(
87
+ node.id, "allow_remote_tools",
88
+ schema_allow_remote if schema_allow_remote is not None else default_allow_remote
89
+ )
90
+ )
91
+
92
+ return NodeRuntime(
93
+ model=model_item,
94
+ instructions=instructions,
95
+ allow_local_tools=allow_local_tools,
96
+ allow_remote_tools=allow_remote_tools,
97
+ )
98
+
99
+
100
+ def sanitize_input_items(items: List[TResponseInputItem]) -> List[TResponseInputItem]:
101
+ sanitized: List[TResponseInputItem] = []
102
+ for it in items or []:
103
+ if isinstance(it, dict):
104
+ new_it: Dict[str, Any] = dict(it)
105
+ new_it.pop("id", None)
106
+ new_it.pop("message_id", None)
107
+ if "content" in new_it and isinstance(new_it["content"], list):
108
+ new_content = []
109
+ for part in new_it["content"]:
110
+ if isinstance(part, dict):
111
+ p = dict(part); p.pop("id", None)
112
+ new_content.append(p)
113
+ else:
114
+ new_content.append(part)
115
+ new_it["content"] = new_content
116
+ sanitized.append(new_it)
117
+ else:
118
+ sanitized.append(it)
119
+ return sanitized
120
+
121
+
122
+ def content_to_str(content: Union[str, List[Dict[str, Any]], None]) -> str:
123
+ if isinstance(content, str):
124
+ return content
125
+ if isinstance(content, list):
126
+ out: List[str] = []
127
+ for part in content:
128
+ if isinstance(part, dict) and "text" in part:
129
+ out.append(str(part["text"]))
130
+ return "\n".join(out)
131
+ return ""
132
+
133
+
134
+ def to_li_chat_messages(items: List[TResponseInputItem]) -> List[ChatMessage]:
135
+ if ChatMessage is None or MessageRole is None:
136
+ return []
137
+ msgs: List[ChatMessage] = []
138
+ for it in items or []:
139
+ if not isinstance(it, dict):
140
+ continue
141
+ role = str(it.get("role", "")).lower()
142
+ text = content_to_str(it.get("content"))
143
+ if not text:
144
+ continue
145
+ if role == "user":
146
+ msgs.append(ChatMessage(role=MessageRole.USER, content=text))
147
+ elif role == "assistant":
148
+ msgs.append(ChatMessage(role=MessageRole.ASSISTANT, content=text))
149
+ elif role == "system":
150
+ msgs.append(ChatMessage(role=MessageRole.SYSTEM, content=text))
151
+ else:
152
+ msgs.append(ChatMessage(role=MessageRole.USER, content=text))
153
+ return msgs
154
+
155
+
156
+ def single_user_msg(text: str) -> List[ChatMessage]:
157
+ if ChatMessage is None or MessageRole is None:
158
+ return []
159
+ return [ChatMessage(role=MessageRole.USER, content=text or "")]
160
+
161
+
162
+ def coerce_li_tools(function_tools: List[Any]) -> List[Any]:
163
+ if BaseTool is object:
164
+ return []
165
+ tools_out: List[Any] = []
166
+ for t in function_tools or []:
167
+ try:
168
+ if isinstance(t, BaseTool):
169
+ tools_out.append(t)
170
+ elif callable(t) and FunctionTool is not None:
171
+ tools_out.append(FunctionTool.from_defaults(t))
172
+ elif isinstance(t, dict):
173
+ fn = t.get("fn") or t.get("callable")
174
+ if callable(fn) and FunctionTool is not None:
175
+ tools_out.append(FunctionTool.from_defaults(fn))
176
+ except Exception:
177
+ continue
178
+ return tools_out
179
+
180
+
181
+ def resolve_llm(window, node_model: ModelItem, base_llm: Any, stream: bool) -> Any:
182
+ """
183
+ Best practice in your app: if per-node model set -> window.core.idx.llm.get(model, stream),
184
+ else reuse the base_llm provided from the app.
185
+ """
186
+ try:
187
+ if node_model and hasattr(node_model, "name") and getattr(window.core, "idx", None):
188
+ return window.core.idx.llm.get(node_model, stream=stream)
189
+ except Exception:
190
+ pass
191
+ return base_llm
192
+
193
+
194
+ def patch_last_assistant_output(items: List[TResponseInputItem], text: str) -> List[TResponseInputItem]:
195
+ if not items:
196
+ return items
197
+ patched = list(items)
198
+ idx = None
199
+ for i in range(len(patched) - 1, -1, -1):
200
+ it = patched[i]
201
+ if isinstance(it, dict) and it.get("role") == "assistant":
202
+ idx = i
203
+ break
204
+ if idx is None:
205
+ return patched
206
+ patched[idx] = {"role": "assistant", "content": [{"type": "output_text", "text": text or ""}]}
207
+ return sanitize_input_items(patched)
208
+
209
+ def extract_agent_text(ret: Any) -> str:
210
+ """
211
+ Extract plain text from various LlamaIndex agent return types.
212
+ Prefer ret.response.message/content/text if present; avoid str(ret) which may add 'assistant:'.
213
+ """
214
+ try:
215
+ resp = getattr(ret, "response", None)
216
+ if resp is None:
217
+ # Some agents return plain string
218
+ return str(ret or "")
219
+ # ChatResponse(message=LLMMessage(content=...)) or text=...
220
+ msg = getattr(resp, "message", None)
221
+ if msg is not None:
222
+ # LLMMessage variants
223
+ content = getattr(msg, "content", None) or getattr(msg, "text", None)
224
+ if isinstance(content, str):
225
+ return content
226
+ text = getattr(resp, "text", None)
227
+ if isinstance(text, str):
228
+ return text
229
+ # Fallback
230
+ return str(resp)
231
+ except Exception:
232
+ return str(ret or "")
233
+
234
+
235
+ def strip_role_prefixes(text: str) -> str:
236
+ """
237
+ Remove leading 'assistant:' / 'user:' / 'system:' if leaked from stringified outputs.
238
+ """
239
+ if not text:
240
+ return ""
241
+ import re
242
+ return re.sub(r"^\s*(assistant|user|system)\s*:\s*", "", text.strip(), flags=re.IGNORECASE)
@@ -0,0 +1,50 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.24 23:00:00 #
10
+ # ================================================== #
11
+
12
+ from __future__ import annotations
13
+ from typing import Protocol, Optional, Any
14
+
15
+
16
+ class Logger(Protocol):
17
+ """Minimal logger protocol used across the flow core."""
18
+ def debug(self, msg: str, **kwargs: Any) -> None: ...
19
+ def info(self, msg: str, **kwargs: Any) -> None: ...
20
+ def warning(self, msg: str, **kwargs: Any) -> None: ...
21
+ def error(self, msg: str, **kwargs: Any) -> None: ...
22
+
23
+
24
+ class NullLogger:
25
+ """No-op logger."""
26
+ def debug(self, msg: str, **kwargs: Any) -> None: pass
27
+ def info(self, msg: str, **kwargs: Any) -> None: pass
28
+ def warning(self, msg: str, **kwargs: Any) -> None: pass
29
+ def error(self, msg: str, **kwargs: Any) -> None: pass
30
+
31
+
32
+ class StdLogger:
33
+ """Simple stdout logger; can be replaced by external one from your app."""
34
+ def __init__(self, prefix: str = "[flow]"):
35
+ self.prefix = prefix
36
+
37
+ def _fmt(self, level: str, msg: str) -> str:
38
+ return f"{self.prefix} {level.upper()}: {msg}"
39
+
40
+ def debug(self, msg: str, **kwargs: Any) -> None:
41
+ print(self._fmt("debug", msg))
42
+
43
+ def info(self, msg: str, **kwargs: Any) -> None:
44
+ print(self._fmt("info", msg))
45
+
46
+ def warning(self, msg: str, **kwargs: Any) -> None:
47
+ print(self._fmt("warning", msg))
48
+
49
+ def error(self, msg: str, **kwargs: Any) -> None:
50
+ print(self._fmt("error", msg))
@@ -0,0 +1,51 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.24 23:00:00 #
10
+ # ================================================== #
11
+
12
+ from __future__ import annotations
13
+ from dataclasses import dataclass, field
14
+ from typing import List, Optional, Dict, Any
15
+
16
+ from agents import TResponseInputItem
17
+
18
+
19
+ @dataclass
20
+ class MemoryState:
21
+ """Holds conversation items and last response id for a memory node."""
22
+ mem_id: str
23
+ items: List[TResponseInputItem] = field(default_factory=list)
24
+ last_response_id: Optional[str] = None
25
+
26
+ def is_empty(self) -> bool:
27
+ return not self.items
28
+
29
+ def set_from(self, items: List[TResponseInputItem], last_response_id: Optional[str]) -> None:
30
+ self.items = list(items or [])
31
+ self.last_response_id = last_response_id
32
+
33
+ def update_from_result(self, items: List[TResponseInputItem], last_response_id: Optional[str]) -> None:
34
+ self.set_from(items, last_response_id)
35
+
36
+
37
+ class MemoryManager:
38
+ """Manages MemoryState instances keyed by memory node id."""
39
+ def __init__(self) -> None:
40
+ self._mem: Dict[str, MemoryState] = {}
41
+
42
+ def get(self, mem_id: str) -> MemoryState:
43
+ if mem_id not in self._mem:
44
+ self._mem[mem_id] = MemoryState(mem_id=mem_id)
45
+ return self._mem[mem_id]
46
+
47
+ def set(self, mem_id: str, items: List[TResponseInputItem], last_response_id: Optional[str]) -> None:
48
+ self.get(mem_id).set_from(items, last_response_id)
49
+
50
+ def snapshot(self) -> Dict[str, MemoryState]:
51
+ return dict(self._mem)
@@ -0,0 +1,116 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.24 23:00:00 #
10
+ # ================================================== #
11
+
12
+ from __future__ import annotations
13
+ import json
14
+ import re
15
+ from dataclasses import dataclass
16
+ from typing import List, Optional, Tuple, Any
17
+
18
+
19
+ @dataclass
20
+ class RouteDecision:
21
+ route: Optional[str] # next node id or "end"
22
+ content: str # human-facing content to display
23
+ raw: str # raw model output
24
+ valid: bool
25
+ error: Optional[str] = None
26
+
27
+
28
+ def build_router_instruction(agent_name: str, current_id: str, allowed_routes: List[str], friendly_map: dict[str, str]) -> str:
29
+ """
30
+ Builds an instruction that forces the model to output JSON with next route and content.
31
+ """
32
+ allowed = ", ".join(allowed_routes)
33
+ friendly = {rid: friendly_map.get(rid, rid) for rid in allowed_routes}
34
+ return (
35
+ "You are a routing-capable agent in a multi-agent flow.\n"
36
+ f"Your id is: {current_id}, name: {agent_name}.\n"
37
+ "You MUST respond ONLY with a single JSON object and nothing else.\n"
38
+ "Schema:\n"
39
+ '{\n'
40
+ ' "route": "<ID of the next agent from allowed_routes OR the string \'end\'>",\n'
41
+ ' "content": "<final response text for the user (or tool result)>"\n'
42
+ '}\n'
43
+ "Rules:\n"
44
+ f"- allowed_routes: [{allowed}]\n"
45
+ "- If you want to finish the flow, set route to \"end\".\n"
46
+ "- content must contain the user-facing answer (you may include structured data as JSON or Markdown inside content).\n"
47
+ "- Do NOT add any commentary outside of the JSON. No leading or trailing text.\n"
48
+ "- If using tools, still return the final JSON with tool results summarized in content.\n"
49
+ f"- Human-friendly route names: {json.dumps(friendly)}\n"
50
+ )
51
+
52
+
53
+ def _extract_json_block(text: str) -> Optional[str]:
54
+ """
55
+ Extract JSON block from the text. Supports raw JSON or fenced ```json blocks.
56
+ """
57
+ # fenced block
58
+ m = re.search(r"```json\s*(\{.*?\})\s*```", text, flags=re.DOTALL | re.IGNORECASE)
59
+ if m:
60
+ return m.group(1).strip()
61
+
62
+ # first JSON object occurrence fallback
63
+ brace_idx = text.find("{")
64
+ if brace_idx != -1:
65
+ # naive attempt to find matching closing brace
66
+ snippet = text[brace_idx:]
67
+ # try to trim trailing content after last closing brace
68
+ last_close = snippet.rfind("}")
69
+ if last_close != -1:
70
+ return snippet[: last_close + 1].strip()
71
+ return None
72
+
73
+
74
+ def parse_route_output(raw_text: str, allowed_routes: List[str]) -> RouteDecision:
75
+ """
76
+ Parse model output enforcing the {route, content} schema.
77
+ """
78
+ text = raw_text.strip()
79
+ candidate = text
80
+
81
+ # Try direct JSON parse
82
+ parsed: Optional[Any] = None
83
+ for attempt in range(2):
84
+ try:
85
+ parsed = json.loads(candidate)
86
+ break
87
+ except Exception:
88
+ if attempt == 0:
89
+ block = _extract_json_block(text)
90
+ if block:
91
+ candidate = block
92
+ continue
93
+ parsed = None
94
+ break
95
+
96
+ if isinstance(parsed, dict):
97
+ route = parsed.get("route")
98
+ content = parsed.get("content", "")
99
+ # Normalize route strings
100
+ if isinstance(route, str):
101
+ route_norm = route.strip().lower()
102
+ if route_norm == "end":
103
+ return RouteDecision(route="end", content=str(content), raw=raw_text, valid=True)
104
+ # exact match against allowed
105
+ if route in allowed_routes:
106
+ return RouteDecision(route=route, content=str(content), raw=raw_text, valid=True)
107
+ return RouteDecision(
108
+ route=None,
109
+ content=str(parsed.get("content", "")),
110
+ raw=raw_text,
111
+ valid=False,
112
+ error=f"Invalid or disallowed route: {route}",
113
+ )
114
+
115
+ # Not a valid JSON – fallback, pass through content as-is
116
+ return RouteDecision(route=None, content=raw_text, raw=raw_text, valid=False, error="Malformed JSON")
@@ -0,0 +1,187 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.24 23:00:00 #
10
+ # ================================================== #
11
+
12
+ from __future__ import annotations
13
+ import re
14
+ import json
15
+ from typing import Optional, Tuple, Any
16
+
17
+ from pygpt_net.core.agents.bridge import ConnectionContext
18
+ from pygpt_net.item.ctx import CtxItem
19
+ from pygpt_net.provider.api.openai.agents.response import StreamHandler
20
+
21
+ from openai.types.responses import (
22
+ ResponseTextDeltaEvent,
23
+ ResponseCreatedEvent,
24
+ ResponseCompletedEvent,
25
+ )
26
+
27
+ from .logging import Logger, NullLogger
28
+
29
+
30
+ class DelayedRouterStreamer:
31
+ """
32
+ Delayed streaming for multi-output router agents:
33
+ - Collect tokens silently (no UI flush).
34
+ - After completion, reveal only parsed `content` to UI.
35
+ """
36
+ def __init__(self, window, bridge: ConnectionContext):
37
+ self.window = window
38
+ self.bridge = bridge
39
+ self.handler = StreamHandler(window, bridge)
40
+ self._last_response_id: Optional[str] = None
41
+
42
+ def reset(self) -> None:
43
+ self.handler.reset()
44
+ self.handler.buffer = ""
45
+ self._last_response_id = None
46
+
47
+ def handle_event(self, event: Any, ctx: CtxItem) -> Tuple[str, Optional[str]]:
48
+ text, resp_id = self.handler.handle(event, ctx, flush=False, buffer=True)
49
+ if resp_id:
50
+ self._last_response_id = resp_id
51
+ return self.handler.buffer, self._last_response_id
52
+
53
+ @property
54
+ def buffer(self) -> str:
55
+ return self.handler.buffer
56
+
57
+ @property
58
+ def last_response_id(self) -> Optional[str]:
59
+ return self._last_response_id
60
+
61
+
62
+ class RealtimeRouterStreamer:
63
+ """
64
+ Realtime streaming for multi-output router agents:
65
+ - Detect `"content": "<...>"` in streamed JSON and emit decoded content incrementally to UI.
66
+ - After completion, caller parses the final route from full buffer.
67
+ """
68
+ CONTENT_PATTERN = re.compile(r'"content"\s*:\s*"')
69
+
70
+ def __init__(
71
+ self,
72
+ window,
73
+ bridge: ConnectionContext,
74
+ handler: Optional[StreamHandler] = None,
75
+ buffer_to_handler: bool = True,
76
+ logger: Optional[Logger] = None,
77
+ ):
78
+ self.window = window
79
+ self.bridge = bridge
80
+ self.handler = handler
81
+ self.buffer_to_handler = buffer_to_handler
82
+ self.logger = logger or NullLogger()
83
+
84
+ self._raw: str = ""
85
+ self._last_response_id: Optional[str] = None
86
+
87
+ self._content_started: bool = False
88
+ self._content_closed: bool = False
89
+ self._content_start_idx: int = -1
90
+ self._content_raw: str = ""
91
+ self._content_decoded: str = ""
92
+
93
+ def reset(self) -> None:
94
+ self._raw = ""
95
+ self._last_response_id = None
96
+ self._content_started = False
97
+ self._content_closed = False
98
+ self._content_start_idx = -1
99
+ self._content_raw = ""
100
+ self._content_decoded = ""
101
+
102
+ def handle_event(self, event: Any, ctx: CtxItem) -> None:
103
+ if event.type != "raw_response_event":
104
+ return
105
+
106
+ data = event.data
107
+ if isinstance(data, ResponseCreatedEvent):
108
+ self._last_response_id = data.response.id
109
+ return
110
+
111
+ if isinstance(data, ResponseTextDeltaEvent):
112
+ delta = data.delta or ""
113
+ if not delta:
114
+ return
115
+ prev_len = len(self._raw)
116
+ self._raw += delta
117
+
118
+ if not self._content_started:
119
+ m = self.CONTENT_PATTERN.search(self._raw)
120
+ if m:
121
+ self._content_started = True
122
+ self._content_start_idx = m.end()
123
+ self.logger.debug("[router-realtime] content field detected in stream.")
124
+ if len(self._raw) > self._content_start_idx:
125
+ self._process_new_content(ctx)
126
+ return
127
+
128
+ if self._content_started and not self._content_closed:
129
+ self._process_new_content(ctx)
130
+ return
131
+
132
+ if isinstance(data, ResponseCompletedEvent):
133
+ if self._content_started:
134
+ self.logger.debug("[router-realtime] stream completed; final JSON will be parsed by runner.")
135
+ return
136
+
137
+ def _process_new_content(self, ctx: CtxItem) -> None:
138
+ sub = self._raw[self._content_start_idx:]
139
+ close_idx = self._find_unescaped_quote(sub)
140
+ if close_idx is not None:
141
+ content_portion = sub[:close_idx]
142
+ self._content_closed = True
143
+ self.logger.debug("[router-realtime] content field closed in stream.")
144
+ else:
145
+ content_portion = sub
146
+
147
+ new_raw_piece = content_portion[len(self._content_raw):]
148
+ if not new_raw_piece:
149
+ return
150
+
151
+ self._content_raw += new_raw_piece
152
+
153
+ try:
154
+ decoded_full: str = json.loads(f'"{self._content_raw}"')
155
+ new_suffix = decoded_full[len(self._content_decoded):]
156
+ if new_suffix:
157
+ ctx.stream = new_suffix
158
+ self.bridge.on_step(ctx, False)
159
+ if self.handler is not None and self.buffer_to_handler:
160
+ self.handler.to_buffer(new_suffix)
161
+ self._content_decoded = decoded_full
162
+ except Exception:
163
+ # wait for more tokens
164
+ pass
165
+
166
+ @staticmethod
167
+ def _find_unescaped_quote(s: str) -> Optional[int]:
168
+ i = 0
169
+ while i < len(s):
170
+ if s[i] == '"':
171
+ j = i - 1
172
+ bs = 0
173
+ while j >= 0 and s[j] == '\\':
174
+ bs += 1
175
+ j -= 1
176
+ if bs % 2 == 0:
177
+ return i
178
+ i += 1
179
+ return None
180
+
181
+ @property
182
+ def buffer(self) -> str:
183
+ return self._raw
184
+
185
+ @property
186
+ def last_response_id(self) -> Optional[str]:
187
+ return self._last_response_id