pygpt-net 2.6.59__py3-none-any.whl → 2.6.61__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. pygpt_net/CHANGELOG.txt +11 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/app.py +9 -5
  4. pygpt_net/controller/__init__.py +1 -0
  5. pygpt_net/controller/chat/common.py +115 -6
  6. pygpt_net/controller/chat/input.py +4 -1
  7. pygpt_net/controller/presets/editor.py +442 -39
  8. pygpt_net/controller/presets/presets.py +121 -6
  9. pygpt_net/controller/settings/editor.py +0 -15
  10. pygpt_net/controller/theme/markdown.py +2 -5
  11. pygpt_net/controller/ui/ui.py +4 -7
  12. pygpt_net/core/agents/custom/__init__.py +281 -0
  13. pygpt_net/core/agents/custom/debug.py +64 -0
  14. pygpt_net/core/agents/custom/factory.py +109 -0
  15. pygpt_net/core/agents/custom/graph.py +71 -0
  16. pygpt_net/core/agents/custom/llama_index/__init__.py +10 -0
  17. pygpt_net/core/agents/custom/llama_index/factory.py +100 -0
  18. pygpt_net/core/agents/custom/llama_index/router_streamer.py +106 -0
  19. pygpt_net/core/agents/custom/llama_index/runner.py +562 -0
  20. pygpt_net/core/agents/custom/llama_index/stream.py +56 -0
  21. pygpt_net/core/agents/custom/llama_index/utils.py +253 -0
  22. pygpt_net/core/agents/custom/logging.py +50 -0
  23. pygpt_net/core/agents/custom/memory.py +51 -0
  24. pygpt_net/core/agents/custom/router.py +155 -0
  25. pygpt_net/core/agents/custom/router_streamer.py +187 -0
  26. pygpt_net/core/agents/custom/runner.py +455 -0
  27. pygpt_net/core/agents/custom/schema.py +127 -0
  28. pygpt_net/core/agents/custom/utils.py +193 -0
  29. pygpt_net/core/agents/provider.py +72 -7
  30. pygpt_net/core/agents/runner.py +7 -4
  31. pygpt_net/core/agents/runners/helpers.py +1 -1
  32. pygpt_net/core/agents/runners/llama_workflow.py +3 -0
  33. pygpt_net/core/agents/runners/openai_workflow.py +8 -1
  34. pygpt_net/core/db/viewer.py +11 -5
  35. pygpt_net/{ui/widget/builder → core/node_editor}/__init__.py +2 -2
  36. pygpt_net/core/{builder → node_editor}/graph.py +28 -226
  37. pygpt_net/core/node_editor/models.py +118 -0
  38. pygpt_net/core/node_editor/types.py +78 -0
  39. pygpt_net/core/node_editor/utils.py +17 -0
  40. pygpt_net/core/presets/presets.py +216 -29
  41. pygpt_net/core/render/markdown/parser.py +0 -2
  42. pygpt_net/core/render/web/renderer.py +10 -8
  43. pygpt_net/data/config/config.json +5 -6
  44. pygpt_net/data/config/models.json +3 -3
  45. pygpt_net/data/config/settings.json +2 -38
  46. pygpt_net/data/locale/locale.de.ini +64 -1
  47. pygpt_net/data/locale/locale.en.ini +63 -4
  48. pygpt_net/data/locale/locale.es.ini +64 -1
  49. pygpt_net/data/locale/locale.fr.ini +64 -1
  50. pygpt_net/data/locale/locale.it.ini +64 -1
  51. pygpt_net/data/locale/locale.pl.ini +65 -2
  52. pygpt_net/data/locale/locale.uk.ini +64 -1
  53. pygpt_net/data/locale/locale.zh.ini +64 -1
  54. pygpt_net/data/locale/plugin.cmd_system.en.ini +62 -66
  55. pygpt_net/item/agent.py +5 -1
  56. pygpt_net/item/preset.py +19 -1
  57. pygpt_net/provider/agents/base.py +33 -2
  58. pygpt_net/provider/agents/llama_index/flow_from_schema.py +92 -0
  59. pygpt_net/provider/agents/openai/flow_from_schema.py +96 -0
  60. pygpt_net/provider/core/agent/json_file.py +11 -5
  61. pygpt_net/provider/core/config/patch.py +10 -1
  62. pygpt_net/provider/core/config/patches/patch_before_2_6_42.py +0 -6
  63. pygpt_net/tools/agent_builder/tool.py +233 -52
  64. pygpt_net/tools/agent_builder/ui/dialogs.py +172 -28
  65. pygpt_net/tools/agent_builder/ui/list.py +37 -10
  66. pygpt_net/ui/__init__.py +2 -4
  67. pygpt_net/ui/dialog/about.py +58 -38
  68. pygpt_net/ui/dialog/db.py +142 -3
  69. pygpt_net/ui/dialog/preset.py +62 -8
  70. pygpt_net/ui/layout/toolbox/presets.py +52 -16
  71. pygpt_net/ui/main.py +1 -1
  72. pygpt_net/ui/widget/dialog/db.py +0 -0
  73. pygpt_net/ui/widget/lists/preset.py +644 -60
  74. pygpt_net/{core/builder → ui/widget/node_editor}/__init__.py +2 -2
  75. pygpt_net/ui/widget/node_editor/command.py +373 -0
  76. pygpt_net/ui/widget/node_editor/config.py +157 -0
  77. pygpt_net/ui/widget/node_editor/editor.py +2070 -0
  78. pygpt_net/ui/widget/node_editor/item.py +493 -0
  79. pygpt_net/ui/widget/node_editor/node.py +1460 -0
  80. pygpt_net/ui/widget/node_editor/utils.py +17 -0
  81. pygpt_net/ui/widget/node_editor/view.py +364 -0
  82. pygpt_net/ui/widget/tabs/output.py +1 -1
  83. pygpt_net/ui/widget/textarea/input.py +2 -2
  84. pygpt_net/utils.py +114 -2
  85. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.61.dist-info}/METADATA +80 -93
  86. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.61.dist-info}/RECORD +88 -61
  87. pygpt_net/core/agents/custom.py +0 -150
  88. pygpt_net/ui/widget/builder/editor.py +0 -2001
  89. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.61.dist-info}/LICENSE +0 -0
  90. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.61.dist-info}/WHEEL +0 -0
  91. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.61.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,253 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.25 14:00:00 #
10
+ # ================================================== #
11
+
12
+ from __future__ import annotations
13
+ from dataclasses import dataclass
14
+ from typing import Any, Callable, Dict, List, Optional, Union
15
+
16
+ from agents import TResponseInputItem
17
+ from pygpt_net.item.model import ModelItem
18
+ from pygpt_net.item.preset import PresetItem
19
+
20
+ try:
21
+ from llama_index.core.llms import ChatMessage, MessageRole
22
+ from llama_index.core.tools import BaseTool, FunctionTool
23
+ from llama_index.core.llms.llm import LLM
24
+ except Exception:
25
+ ChatMessage = None
26
+ MessageRole = None
27
+ BaseTool = object # type: ignore
28
+ FunctionTool = None
29
+ LLM = object # type: ignore
30
+
31
+ OptionGetter = Callable[[str, str, Any], Any]
32
+
33
+
34
+ def make_option_getter(base_agent, preset: Optional[PresetItem]) -> OptionGetter:
35
+ def option_get(section: str, key: str, default: Any = None) -> Any:
36
+ if preset is None:
37
+ return default
38
+ try:
39
+ val = base_agent.get_option(preset, section, key)
40
+ return default if val in (None, "") else val
41
+ except Exception:
42
+ return default
43
+ return option_get
44
+
45
+
46
+ @dataclass
47
+ class NodeRuntime:
48
+ model: ModelItem
49
+ instructions: str
50
+ role: Optional[str]
51
+ allow_local_tools: bool
52
+ allow_remote_tools: bool
53
+
54
+
55
+ def resolve_node_runtime(
56
+ *,
57
+ window,
58
+ node,
59
+ option_get: OptionGetter,
60
+ default_model: ModelItem,
61
+ base_prompt: Optional[str],
62
+ schema_allow_local: Optional[bool],
63
+ schema_allow_remote: Optional[bool],
64
+ default_allow_local: bool,
65
+ default_allow_remote: bool,
66
+ ) -> NodeRuntime:
67
+ model_name = option_get(node.id, "model", None)
68
+ model_item: ModelItem = default_model
69
+ try:
70
+ if model_name:
71
+ cand = window.core.models.get(model_name)
72
+ if cand:
73
+ model_item = cand
74
+ except Exception:
75
+ model_item = default_model
76
+
77
+ prompt_opt = option_get(node.id, "prompt", None)
78
+ instructions = (prompt_opt or getattr(node, "instruction", None) or base_prompt or "").strip()
79
+
80
+ # Role resolve (optional)
81
+ role_opt = option_get(node.id, "role", None)
82
+ role_from_schema = getattr(node, "role", None) if hasattr(node, "role") else None
83
+ role: Optional[str] = None
84
+ if isinstance(role_opt, str) and role_opt.strip():
85
+ role = role_opt.strip()
86
+ elif isinstance(role_from_schema, str) and role_from_schema.strip():
87
+ role = role_from_schema.strip()
88
+
89
+ allow_local_tools = bool(
90
+ option_get(
91
+ node.id, "allow_local_tools",
92
+ schema_allow_local if schema_allow_local is not None else default_allow_local
93
+ )
94
+ )
95
+ allow_remote_tools = bool(
96
+ option_get(
97
+ node.id, "allow_remote_tools",
98
+ schema_allow_remote if schema_allow_remote is not None else default_allow_remote
99
+ )
100
+ )
101
+
102
+ return NodeRuntime(
103
+ model=model_item,
104
+ instructions=instructions,
105
+ role=role,
106
+ allow_local_tools=allow_local_tools,
107
+ allow_remote_tools=allow_remote_tools,
108
+ )
109
+
110
+
111
+ def sanitize_input_items(items: List[TResponseInputItem]) -> List[TResponseInputItem]:
112
+ sanitized: List[TResponseInputItem] = []
113
+ for it in items or []:
114
+ if isinstance(it, dict):
115
+ new_it: Dict[str, Any] = dict(it)
116
+ new_it.pop("id", None)
117
+ new_it.pop("message_id", None)
118
+ if "content" in new_it and isinstance(new_it["content"], list):
119
+ new_content = []
120
+ for part in new_it["content"]:
121
+ if isinstance(part, dict):
122
+ p = dict(part); p.pop("id", None)
123
+ new_content.append(p)
124
+ else:
125
+ new_content.append(part)
126
+ new_it["content"] = new_content
127
+ sanitized.append(new_it)
128
+ else:
129
+ sanitized.append(it)
130
+ return sanitized
131
+
132
+
133
+ def content_to_str(content: Union[str, List[Dict[str, Any]], None]) -> str:
134
+ if isinstance(content, str):
135
+ return content
136
+ if isinstance(content, list):
137
+ out: List[str] = []
138
+ for part in content:
139
+ if isinstance(part, dict) and "text" in part:
140
+ out.append(str(part["text"]))
141
+ return "\n".join(out)
142
+ return ""
143
+
144
+
145
+ def to_li_chat_messages(items: List[TResponseInputItem]) -> List[ChatMessage]:
146
+ if ChatMessage is None or MessageRole is None:
147
+ return []
148
+ msgs: List[ChatMessage] = []
149
+ for it in items or []:
150
+ if not isinstance(it, dict):
151
+ continue
152
+ role = str(it.get("role", "")).lower()
153
+ text = content_to_str(it.get("content"))
154
+ if not text:
155
+ continue
156
+ if role == "user":
157
+ msgs.append(ChatMessage(role=MessageRole.USER, content=text))
158
+ elif role == "assistant":
159
+ msgs.append(ChatMessage(role=MessageRole.ASSISTANT, content=text))
160
+ elif role == "system":
161
+ msgs.append(ChatMessage(role=MessageRole.SYSTEM, content=text))
162
+ else:
163
+ msgs.append(ChatMessage(role=MessageRole.USER, content=text))
164
+ return msgs
165
+
166
+
167
+ def single_user_msg(text: str) -> List[ChatMessage]:
168
+ if ChatMessage is None or MessageRole is None:
169
+ return []
170
+ return [ChatMessage(role=MessageRole.USER, content=text or "")]
171
+
172
+
173
+ def coerce_li_tools(function_tools: List[Any]) -> List[Any]:
174
+ if BaseTool is object:
175
+ return []
176
+ tools_out: List[Any] = []
177
+ for t in function_tools or []:
178
+ try:
179
+ if isinstance(t, BaseTool):
180
+ tools_out.append(t)
181
+ elif callable(t) and FunctionTool is not None:
182
+ tools_out.append(FunctionTool.from_defaults(t))
183
+ elif isinstance(t, dict):
184
+ fn = t.get("fn") or t.get("callable")
185
+ if callable(fn) and FunctionTool is not None:
186
+ tools_out.append(FunctionTool.from_defaults(fn))
187
+ except Exception:
188
+ continue
189
+ return tools_out
190
+
191
+
192
+ def resolve_llm(window, node_model: ModelItem, base_llm: Any, stream: bool) -> Any:
193
+ """
194
+ Best practice in your app: if per-node model set -> window.core.idx.llm.get(model, stream),
195
+ else reuse the base_llm provided from the app.
196
+ """
197
+ try:
198
+ if node_model and hasattr(node_model, "name") and getattr(window.core, "idx", None):
199
+ return window.core.idx.llm.get(node_model, stream=stream)
200
+ except Exception:
201
+ pass
202
+ return base_llm
203
+
204
+
205
+ def patch_last_assistant_output(items: List[TResponseInputItem], text: str) -> List[TResponseInputItem]:
206
+ if not items:
207
+ return items
208
+ patched = list(items)
209
+ idx = None
210
+ for i in range(len(patched) - 1, -1, -1):
211
+ it = patched[i]
212
+ if isinstance(it, dict) and it.get("role") == "assistant":
213
+ idx = i
214
+ break
215
+ if idx is None:
216
+ return patched
217
+ patched[idx] = {"role": "assistant", "content": [{"type": "output_text", "text": text or ""}]}
218
+ return sanitize_input_items(patched)
219
+
220
+ def extract_agent_text(ret: Any) -> str:
221
+ """
222
+ Extract plain text from various LlamaIndex agent return types.
223
+ Prefer ret.response.message/content/text if present; avoid str(ret) which may add 'assistant:'.
224
+ """
225
+ try:
226
+ resp = getattr(ret, "response", None)
227
+ if resp is None:
228
+ # Some agents return plain string
229
+ return str(ret or "")
230
+ # ChatResponse(message=LLMMessage(content=...)) or text=...
231
+ msg = getattr(resp, "message", None)
232
+ if msg is not None:
233
+ # LLMMessage variants
234
+ content = getattr(msg, "content", None) or getattr(msg, "text", None)
235
+ if isinstance(content, str):
236
+ return content
237
+ text = getattr(resp, "text", None)
238
+ if isinstance(text, str):
239
+ return text
240
+ # Fallback
241
+ return str(resp)
242
+ except Exception:
243
+ return str(ret or "")
244
+
245
+
246
+ def strip_role_prefixes(text: str) -> str:
247
+ """
248
+ Remove leading 'assistant:' / 'user:' / 'system:' if leaked from stringified outputs.
249
+ """
250
+ if not text:
251
+ return ""
252
+ import re
253
+ return re.sub(r"^\s*(assistant|user|system)\s*:\s*", "", text.strip(), flags=re.IGNORECASE)
@@ -0,0 +1,50 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.24 23:00:00 #
10
+ # ================================================== #
11
+
12
+ from __future__ import annotations
13
+ from typing import Protocol, Optional, Any
14
+
15
+
16
+ class Logger(Protocol):
17
+ """Minimal logger protocol used across the flow core."""
18
+ def debug(self, msg: str, **kwargs: Any) -> None: ...
19
+ def info(self, msg: str, **kwargs: Any) -> None: ...
20
+ def warning(self, msg: str, **kwargs: Any) -> None: ...
21
+ def error(self, msg: str, **kwargs: Any) -> None: ...
22
+
23
+
24
+ class NullLogger:
25
+ """No-op logger."""
26
+ def debug(self, msg: str, **kwargs: Any) -> None: pass
27
+ def info(self, msg: str, **kwargs: Any) -> None: pass
28
+ def warning(self, msg: str, **kwargs: Any) -> None: pass
29
+ def error(self, msg: str, **kwargs: Any) -> None: pass
30
+
31
+
32
+ class StdLogger:
33
+ """Simple stdout logger; can be replaced by external one from your app."""
34
+ def __init__(self, prefix: str = "[flow]"):
35
+ self.prefix = prefix
36
+
37
+ def _fmt(self, level: str, msg: str) -> str:
38
+ return f"{self.prefix} {level.upper()}: {msg}"
39
+
40
+ def debug(self, msg: str, **kwargs: Any) -> None:
41
+ print(self._fmt("debug", msg))
42
+
43
+ def info(self, msg: str, **kwargs: Any) -> None:
44
+ print(self._fmt("info", msg))
45
+
46
+ def warning(self, msg: str, **kwargs: Any) -> None:
47
+ print(self._fmt("warning", msg))
48
+
49
+ def error(self, msg: str, **kwargs: Any) -> None:
50
+ print(self._fmt("error", msg))
@@ -0,0 +1,51 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.24 23:00:00 #
10
+ # ================================================== #
11
+
12
+ from __future__ import annotations
13
+ from dataclasses import dataclass, field
14
+ from typing import List, Optional, Dict, Any
15
+
16
+ from agents import TResponseInputItem
17
+
18
+
19
+ @dataclass
20
+ class MemoryState:
21
+ """Holds conversation items and last response id for a memory node."""
22
+ mem_id: str
23
+ items: List[TResponseInputItem] = field(default_factory=list)
24
+ last_response_id: Optional[str] = None
25
+
26
+ def is_empty(self) -> bool:
27
+ return not self.items
28
+
29
+ def set_from(self, items: List[TResponseInputItem], last_response_id: Optional[str]) -> None:
30
+ self.items = list(items or [])
31
+ self.last_response_id = last_response_id
32
+
33
+ def update_from_result(self, items: List[TResponseInputItem], last_response_id: Optional[str]) -> None:
34
+ self.set_from(items, last_response_id)
35
+
36
+
37
+ class MemoryManager:
38
+ """Manages MemoryState instances keyed by memory node id."""
39
+ def __init__(self) -> None:
40
+ self._mem: Dict[str, MemoryState] = {}
41
+
42
+ def get(self, mem_id: str) -> MemoryState:
43
+ if mem_id not in self._mem:
44
+ self._mem[mem_id] = MemoryState(mem_id=mem_id)
45
+ return self._mem[mem_id]
46
+
47
+ def set(self, mem_id: str, items: List[TResponseInputItem], last_response_id: Optional[str]) -> None:
48
+ self.get(mem_id).set_from(items, last_response_id)
49
+
50
+ def snapshot(self) -> Dict[str, MemoryState]:
51
+ return dict(self._mem)
@@ -0,0 +1,155 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.25 14:00:00 #
10
+ # ================================================== #
11
+
12
+ from __future__ import annotations
13
+ import json
14
+ import re
15
+ from dataclasses import dataclass
16
+ from typing import List, Optional, Tuple, Any, Dict
17
+
18
+
19
+ @dataclass
20
+ class RouteDecision:
21
+ route: Optional[str] # next node id or "end"
22
+ content: str # human-facing content to display
23
+ raw: str # raw model output
24
+ valid: bool
25
+ error: Optional[str] = None
26
+
27
+
28
+ def build_router_instruction(
29
+ agent_name: str,
30
+ current_id: str,
31
+ allowed_routes: List[str],
32
+ friendly_map: Dict[str, Any],
33
+ ) -> str:
34
+ """
35
+ Builds an instruction that forces the model to output JSON with next route and content.
36
+
37
+ Additionally, if the provided friendly_map contains role information for any of the
38
+ allowed routes (e.g. friendly_map[id] is a dict with a "role" key), these roles
39
+ are included in the instruction to improve routing. This is optional and included
40
+ only when present and non-empty.
41
+ """
42
+ allowed = ", ".join(allowed_routes)
43
+
44
+ # Normalize human-friendly names: accept either a plain string or a dict with
45
+ # common name keys such as "name", "title" or "label".
46
+ def _extract_name(val: Any, default_name: str) -> str:
47
+ if isinstance(val, str):
48
+ return val
49
+ if isinstance(val, dict):
50
+ return str(val.get("name") or val.get("title") or val.get("label") or default_name)
51
+ return default_name
52
+
53
+ # Extract names and optional roles for allowed routes, without changing input map semantics.
54
+ friendly_names = {rid: _extract_name(friendly_map.get(rid), rid) for rid in allowed_routes}
55
+
56
+ # Roles are optional; include only non-empty strings.
57
+ friendly_roles: Dict[str, str] = {}
58
+ for rid in allowed_routes:
59
+ val = friendly_map.get(rid)
60
+ role_val: Optional[str] = None
61
+ if isinstance(val, dict):
62
+ role_val = val.get("role")
63
+ if isinstance(role_val, str) and role_val.strip():
64
+ friendly_roles[rid] = role_val.strip()
65
+
66
+ # Base instruction
67
+ instr = (
68
+ "You are a routing-capable agent in a multi-agent flow.\n"
69
+ f"Your id is: {current_id}, name: {agent_name}.\n"
70
+ "You MUST respond ONLY with a single JSON object and nothing else.\n"
71
+ "Schema:\n"
72
+ '{\n'
73
+ ' "route": "<ID of the next agent from allowed_routes OR the string \'end\'>",\n'
74
+ ' "content": "<final response text for the user (or tool result)>"\n'
75
+ '}\n'
76
+ "Rules:\n"
77
+ f"- allowed_routes: [{allowed}]\n"
78
+ "- If you want to finish the flow, set route to \"end\".\n"
79
+ "- content must contain the user-facing answer (you may include structured data as JSON or Markdown inside content).\n"
80
+ "- Do NOT add any commentary outside of the JSON. No leading or trailing text.\n"
81
+ "- If using tools, still return the final JSON with tool results summarized in content.\n"
82
+ f"- Human-friendly route names: {json.dumps(friendly_names)}\n"
83
+ )
84
+
85
+ # Append roles only if any are present (optional).
86
+ if friendly_roles:
87
+ instr += f"- Human-friendly route roles (optional): {json.dumps(friendly_roles)}\n"
88
+
89
+ return instr
90
+
91
+
92
+ def _extract_json_block(text: str) -> Optional[str]:
93
+ """
94
+ Extract JSON block from the text. Supports raw JSON or fenced ```json blocks.
95
+ """
96
+ # fenced block
97
+ m = re.search(r"```json\s*(\{.*?\})\s*```", text, flags=re.DOTALL | re.IGNORECASE)
98
+ if m:
99
+ return m.group(1).strip()
100
+
101
+ # first JSON object occurrence fallback
102
+ brace_idx = text.find("{")
103
+ if brace_idx != -1:
104
+ # naive attempt to find matching closing brace
105
+ snippet = text[brace_idx:]
106
+ # try to trim trailing content after last closing brace
107
+ last_close = snippet.rfind("}")
108
+ if last_close != -1:
109
+ return snippet[: last_close + 1].strip()
110
+ return None
111
+
112
+
113
+ def parse_route_output(raw_text: str, allowed_routes: List[str]) -> RouteDecision:
114
+ """
115
+ Parse model output enforcing the {route, content} schema.
116
+ """
117
+ text = raw_text.strip()
118
+ candidate = text
119
+
120
+ # Try direct JSON parse
121
+ parsed: Optional[Any] = None
122
+ for attempt in range(2):
123
+ try:
124
+ parsed = json.loads(candidate)
125
+ break
126
+ except Exception:
127
+ if attempt == 0:
128
+ block = _extract_json_block(text)
129
+ if block:
130
+ candidate = block
131
+ continue
132
+ parsed = None
133
+ break
134
+
135
+ if isinstance(parsed, dict):
136
+ route = parsed.get("route")
137
+ content = parsed.get("content", "")
138
+ # Normalize route strings
139
+ if isinstance(route, str):
140
+ route_norm = route.strip().lower()
141
+ if route_norm == "end":
142
+ return RouteDecision(route="end", content=str(content), raw=raw_text, valid=True)
143
+ # exact match against allowed
144
+ if route in allowed_routes:
145
+ return RouteDecision(route=route, content=str(content), raw=raw_text, valid=True)
146
+ return RouteDecision(
147
+ route=None,
148
+ content=str(parsed.get("content", "")),
149
+ raw=raw_text,
150
+ valid=False,
151
+ error=f"Invalid or disallowed route: {route}",
152
+ )
153
+
154
+ # Not a valid JSON – fallback, pass through content as-is
155
+ return RouteDecision(route=None, content=raw_text, raw=raw_text, valid=False, error="Malformed JSON")
@@ -0,0 +1,187 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.24 23:00:00 #
10
+ # ================================================== #
11
+
12
+ from __future__ import annotations
13
+ import re
14
+ import json
15
+ from typing import Optional, Tuple, Any
16
+
17
+ from pygpt_net.core.agents.bridge import ConnectionContext
18
+ from pygpt_net.item.ctx import CtxItem
19
+ from pygpt_net.provider.api.openai.agents.response import StreamHandler
20
+
21
+ from openai.types.responses import (
22
+ ResponseTextDeltaEvent,
23
+ ResponseCreatedEvent,
24
+ ResponseCompletedEvent,
25
+ )
26
+
27
+ from .logging import Logger, NullLogger
28
+
29
+
30
+ class DelayedRouterStreamer:
31
+ """
32
+ Delayed streaming for multi-output router agents:
33
+ - Collect tokens silently (no UI flush).
34
+ - After completion, reveal only parsed `content` to UI.
35
+ """
36
+ def __init__(self, window, bridge: ConnectionContext):
37
+ self.window = window
38
+ self.bridge = bridge
39
+ self.handler = StreamHandler(window, bridge)
40
+ self._last_response_id: Optional[str] = None
41
+
42
+ def reset(self) -> None:
43
+ self.handler.reset()
44
+ self.handler.buffer = ""
45
+ self._last_response_id = None
46
+
47
+ def handle_event(self, event: Any, ctx: CtxItem) -> Tuple[str, Optional[str]]:
48
+ text, resp_id = self.handler.handle(event, ctx, flush=False, buffer=True)
49
+ if resp_id:
50
+ self._last_response_id = resp_id
51
+ return self.handler.buffer, self._last_response_id
52
+
53
+ @property
54
+ def buffer(self) -> str:
55
+ return self.handler.buffer
56
+
57
+ @property
58
+ def last_response_id(self) -> Optional[str]:
59
+ return self._last_response_id
60
+
61
+
62
+ class RealtimeRouterStreamer:
63
+ """
64
+ Realtime streaming for multi-output router agents:
65
+ - Detect `"content": "<...>"` in streamed JSON and emit decoded content incrementally to UI.
66
+ - After completion, caller parses the final route from full buffer.
67
+ """
68
+ CONTENT_PATTERN = re.compile(r'"content"\s*:\s*"')
69
+
70
+ def __init__(
71
+ self,
72
+ window,
73
+ bridge: ConnectionContext,
74
+ handler: Optional[StreamHandler] = None,
75
+ buffer_to_handler: bool = True,
76
+ logger: Optional[Logger] = None,
77
+ ):
78
+ self.window = window
79
+ self.bridge = bridge
80
+ self.handler = handler
81
+ self.buffer_to_handler = buffer_to_handler
82
+ self.logger = logger or NullLogger()
83
+
84
+ self._raw: str = ""
85
+ self._last_response_id: Optional[str] = None
86
+
87
+ self._content_started: bool = False
88
+ self._content_closed: bool = False
89
+ self._content_start_idx: int = -1
90
+ self._content_raw: str = ""
91
+ self._content_decoded: str = ""
92
+
93
+ def reset(self) -> None:
94
+ self._raw = ""
95
+ self._last_response_id = None
96
+ self._content_started = False
97
+ self._content_closed = False
98
+ self._content_start_idx = -1
99
+ self._content_raw = ""
100
+ self._content_decoded = ""
101
+
102
+ def handle_event(self, event: Any, ctx: CtxItem) -> None:
103
+ if event.type != "raw_response_event":
104
+ return
105
+
106
+ data = event.data
107
+ if isinstance(data, ResponseCreatedEvent):
108
+ self._last_response_id = data.response.id
109
+ return
110
+
111
+ if isinstance(data, ResponseTextDeltaEvent):
112
+ delta = data.delta or ""
113
+ if not delta:
114
+ return
115
+ prev_len = len(self._raw)
116
+ self._raw += delta
117
+
118
+ if not self._content_started:
119
+ m = self.CONTENT_PATTERN.search(self._raw)
120
+ if m:
121
+ self._content_started = True
122
+ self._content_start_idx = m.end()
123
+ self.logger.debug("[router-realtime] content field detected in stream.")
124
+ if len(self._raw) > self._content_start_idx:
125
+ self._process_new_content(ctx)
126
+ return
127
+
128
+ if self._content_started and not self._content_closed:
129
+ self._process_new_content(ctx)
130
+ return
131
+
132
+ if isinstance(data, ResponseCompletedEvent):
133
+ if self._content_started:
134
+ self.logger.debug("[router-realtime] stream completed; final JSON will be parsed by runner.")
135
+ return
136
+
137
+ def _process_new_content(self, ctx: CtxItem) -> None:
138
+ sub = self._raw[self._content_start_idx:]
139
+ close_idx = self._find_unescaped_quote(sub)
140
+ if close_idx is not None:
141
+ content_portion = sub[:close_idx]
142
+ self._content_closed = True
143
+ self.logger.debug("[router-realtime] content field closed in stream.")
144
+ else:
145
+ content_portion = sub
146
+
147
+ new_raw_piece = content_portion[len(self._content_raw):]
148
+ if not new_raw_piece:
149
+ return
150
+
151
+ self._content_raw += new_raw_piece
152
+
153
+ try:
154
+ decoded_full: str = json.loads(f'"{self._content_raw}"')
155
+ new_suffix = decoded_full[len(self._content_decoded):]
156
+ if new_suffix:
157
+ ctx.stream = new_suffix
158
+ self.bridge.on_step(ctx, False)
159
+ if self.handler is not None and self.buffer_to_handler:
160
+ self.handler.to_buffer(new_suffix)
161
+ self._content_decoded = decoded_full
162
+ except Exception:
163
+ # wait for more tokens
164
+ pass
165
+
166
+ @staticmethod
167
+ def _find_unescaped_quote(s: str) -> Optional[int]:
168
+ i = 0
169
+ while i < len(s):
170
+ if s[i] == '"':
171
+ j = i - 1
172
+ bs = 0
173
+ while j >= 0 and s[j] == '\\':
174
+ bs += 1
175
+ j -= 1
176
+ if bs % 2 == 0:
177
+ return i
178
+ i += 1
179
+ return None
180
+
181
+ @property
182
+ def buffer(self) -> str:
183
+ return self._raw
184
+
185
+ @property
186
+ def last_response_id(self) -> Optional[str]:
187
+ return self._last_response_id