pygpt-net 2.6.59__py3-none-any.whl → 2.6.60__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. pygpt_net/CHANGELOG.txt +4 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/app.py +9 -5
  4. pygpt_net/controller/__init__.py +1 -0
  5. pygpt_net/controller/presets/editor.py +442 -39
  6. pygpt_net/core/agents/custom/__init__.py +275 -0
  7. pygpt_net/core/agents/custom/debug.py +64 -0
  8. pygpt_net/core/agents/custom/factory.py +109 -0
  9. pygpt_net/core/agents/custom/graph.py +71 -0
  10. pygpt_net/core/agents/custom/llama_index/__init__.py +10 -0
  11. pygpt_net/core/agents/custom/llama_index/factory.py +89 -0
  12. pygpt_net/core/agents/custom/llama_index/router_streamer.py +106 -0
  13. pygpt_net/core/agents/custom/llama_index/runner.py +529 -0
  14. pygpt_net/core/agents/custom/llama_index/stream.py +56 -0
  15. pygpt_net/core/agents/custom/llama_index/utils.py +242 -0
  16. pygpt_net/core/agents/custom/logging.py +50 -0
  17. pygpt_net/core/agents/custom/memory.py +51 -0
  18. pygpt_net/core/agents/custom/router.py +116 -0
  19. pygpt_net/core/agents/custom/router_streamer.py +187 -0
  20. pygpt_net/core/agents/custom/runner.py +454 -0
  21. pygpt_net/core/agents/custom/schema.py +125 -0
  22. pygpt_net/core/agents/custom/utils.py +181 -0
  23. pygpt_net/core/agents/provider.py +72 -7
  24. pygpt_net/core/agents/runner.py +7 -4
  25. pygpt_net/core/agents/runners/helpers.py +1 -1
  26. pygpt_net/core/agents/runners/llama_workflow.py +3 -0
  27. pygpt_net/core/agents/runners/openai_workflow.py +8 -1
  28. pygpt_net/{ui/widget/builder → core/node_editor}/__init__.py +2 -2
  29. pygpt_net/core/{builder → node_editor}/graph.py +11 -218
  30. pygpt_net/core/node_editor/models.py +111 -0
  31. pygpt_net/core/node_editor/types.py +76 -0
  32. pygpt_net/core/node_editor/utils.py +17 -0
  33. pygpt_net/core/render/web/renderer.py +10 -8
  34. pygpt_net/data/config/config.json +3 -3
  35. pygpt_net/data/config/models.json +3 -3
  36. pygpt_net/data/locale/locale.en.ini +4 -4
  37. pygpt_net/item/agent.py +5 -1
  38. pygpt_net/item/preset.py +19 -1
  39. pygpt_net/provider/agents/base.py +33 -2
  40. pygpt_net/provider/agents/llama_index/flow_from_schema.py +92 -0
  41. pygpt_net/provider/agents/openai/flow_from_schema.py +96 -0
  42. pygpt_net/provider/core/agent/json_file.py +11 -5
  43. pygpt_net/tools/agent_builder/tool.py +217 -52
  44. pygpt_net/tools/agent_builder/ui/dialogs.py +119 -24
  45. pygpt_net/tools/agent_builder/ui/list.py +37 -10
  46. pygpt_net/ui/dialog/preset.py +16 -1
  47. pygpt_net/ui/main.py +1 -1
  48. pygpt_net/{core/builder → ui/widget/node_editor}/__init__.py +2 -2
  49. pygpt_net/ui/widget/node_editor/command.py +373 -0
  50. pygpt_net/ui/widget/node_editor/editor.py +2038 -0
  51. pygpt_net/ui/widget/node_editor/item.py +492 -0
  52. pygpt_net/ui/widget/node_editor/node.py +1205 -0
  53. pygpt_net/ui/widget/node_editor/utils.py +17 -0
  54. pygpt_net/ui/widget/node_editor/view.py +247 -0
  55. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.60.dist-info}/METADATA +72 -2
  56. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.60.dist-info}/RECORD +59 -33
  57. pygpt_net/core/agents/custom.py +0 -150
  58. pygpt_net/ui/widget/builder/editor.py +0 -2001
  59. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.60.dist-info}/LICENSE +0 -0
  60. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.60.dist-info}/WHEEL +0 -0
  61. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.60.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,106 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.24 23:00:00 #
10
+ # ================================================== #
11
+
12
+ from __future__ import annotations
13
+ import json
14
+ import re
15
+ from typing import Optional
16
+
17
+
18
+ class DelayedRouterStreamerLI:
19
+ """Collect raw JSON stream (no UI)."""
20
+ def __init__(self):
21
+ self._raw = ""
22
+ def reset(self):
23
+ self._raw = ""
24
+ def handle_delta(self, delta: str):
25
+ self._raw += delta or ""
26
+ @property
27
+ def buffer(self) -> str:
28
+ return self._raw
29
+
30
+
31
+ class RealtimeRouterStreamerLI:
32
+ """
33
+ Stream only JSON 'content' string incrementally.
34
+ handle_delta(delta) -> returns decoded content suffix to emit (may be '').
35
+ """
36
+ CONTENT_PATTERN = re.compile(r'"content"\s*:\s*"')
37
+
38
+ def __init__(self):
39
+ self._raw = ""
40
+ self._content_started = False
41
+ self._content_closed = False
42
+ self._content_start_idx = -1
43
+ self._content_raw = ""
44
+ self._content_decoded = ""
45
+
46
+ def reset(self):
47
+ self._raw = ""
48
+ self._content_started = False
49
+ self._content_closed = False
50
+ self._content_start_idx = -1
51
+ self._content_raw = ""
52
+ self._content_decoded = ""
53
+
54
+ def handle_delta(self, delta: str) -> str:
55
+ if not delta:
56
+ return ""
57
+ self._raw += delta
58
+ if not self._content_started:
59
+ m = self.CONTENT_PATTERN.search(self._raw)
60
+ if m:
61
+ self._content_started = True
62
+ self._content_start_idx = m.end()
63
+ else:
64
+ return ""
65
+ if self._content_started and not self._content_closed:
66
+ return self._process()
67
+ return ""
68
+
69
+ def _process(self) -> str:
70
+ sub = self._raw[self._content_start_idx:]
71
+ close_idx = self._find_unescaped_quote(sub)
72
+ if close_idx is not None:
73
+ portion = sub[:close_idx]
74
+ self._content_closed = True
75
+ else:
76
+ portion = sub
77
+ new_raw_piece = portion[len(self._content_raw):]
78
+ if not new_raw_piece:
79
+ return ""
80
+ self._content_raw += new_raw_piece
81
+ try:
82
+ decoded_full = json.loads(f'"{self._content_raw}"')
83
+ new_suffix = decoded_full[len(self._content_decoded):]
84
+ self._content_decoded = decoded_full
85
+ return new_suffix
86
+ except Exception:
87
+ return ""
88
+
89
+ @staticmethod
90
+ def _find_unescaped_quote(s: str) -> Optional[int]:
91
+ i = 0
92
+ while i < len(s):
93
+ if s[i] == '"':
94
+ j = i - 1
95
+ bs = 0
96
+ while j >= 0 and s[j] == '\\':
97
+ bs += 1
98
+ j -= 1
99
+ if bs % 2 == 0:
100
+ return i
101
+ i += 1
102
+ return None
103
+
104
+ @property
105
+ def buffer(self) -> str:
106
+ return self._raw
@@ -0,0 +1,529 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.24 23:00:00 #
10
+ # ================================================== #
11
+
12
+ from __future__ import annotations
13
+ import asyncio
14
+ from dataclasses import dataclass
15
+ from typing import Any, Dict, List, Optional
16
+ from time import perf_counter
17
+ from pydantic import ValidationError
18
+
19
+ from agents import TResponseInputItem
20
+ from pygpt_net.item.model import ModelItem
21
+ from pygpt_net.item.preset import PresetItem
22
+
23
+ # Shared (reused) components from OpenAI flow core
24
+ from ..logging import Logger, NullLogger
25
+ from ..schema import FlowSchema, AgentNode, parse_schema
26
+ from ..graph import FlowGraph, build_graph
27
+ from ..memory import MemoryManager
28
+ from ..router import parse_route_output
29
+ from ..debug import ellipsize
30
+
31
+ # LI-specific utils/factory
32
+ from .utils import (
33
+ sanitize_input_items,
34
+ NodeRuntime,
35
+ OptionGetter,
36
+ resolve_node_runtime,
37
+ to_li_chat_messages,
38
+ resolve_llm,
39
+ extract_agent_text,
40
+ strip_role_prefixes,
41
+ )
42
+ from .factory import AgentFactoryLI
43
+
44
+ # LlamaIndex Workflow primitives + events
45
+ from llama_index.core.workflow import Workflow, Context, StartEvent, StopEvent, Event, step
46
+ from llama_index.core.agent.workflow import AgentStream
47
+
48
+ # App-specific event used as visual separator between agents
49
+ from pygpt_net.provider.agents.llama_index.workflow.events import StepEvent
50
+
51
+ # Cooperative cancellation error used by main runner
52
+ from workflows.errors import WorkflowCancelledByUser
53
+
54
+
55
+ @dataclass
56
+ class DebugConfig:
57
+ """Config values controlled via get_option(..., 'debug', ...)."""
58
+ log_runtime: bool = True
59
+ log_routes: bool = True
60
+ log_inputs: bool = True
61
+ log_outputs: bool = True
62
+ log_tools: bool = True
63
+ log_llm: bool = True
64
+ log_schema: bool = True
65
+ log_memory_dump: bool = True
66
+ preview_chars: int = 280
67
+ step_timeout_sec: int = 0
68
+ timeit_agent_run: bool = True
69
+ event_echo: bool = True
70
+
71
+
72
+ class FlowStartEvent(StartEvent):
73
+ query: str
74
+
75
+
76
+ class FlowTickEvent(Event):
77
+ """Tick event drives one step of the flow loop."""
78
+ pass
79
+
80
+
81
+ class FlowStopEvent(StopEvent):
82
+ final_answer: str = ""
83
+
84
+
85
+ class DynamicFlowWorkflowLI(Workflow):
86
+ """
87
+ LlamaIndex Workflow mirroring OpenAI dynamic flow:
88
+ - Emits AgentStream header + AgentStream content per step (no token-by-token from single LI agents).
89
+ - Emits StepEvent between agents (your runner uses it to separate UI blocks).
90
+ - Routing via JSON {route, content} (UI sees only content; JSON never leaks).
91
+ - Memory/no-memory with baton policy:
92
+ * first agent w/o memory: initial messages (or query),
93
+ * next w/o memory: only last displayed content as single user message,
94
+ * agent with memory: if empty -> seed from last output (fallback initial); else use its history.
95
+ After step, memory is updated with [user baton, assistant content].
96
+ - Per-agent model/prompt/allow_* via get_option(node_id, ...).
97
+ """
98
+
99
+ def __init__(
100
+ self,
101
+ *,
102
+ window,
103
+ logger: Optional[Logger],
104
+ schema: List[Dict[str, Any]],
105
+ initial_messages: Optional[List[TResponseInputItem]],
106
+ preset: Optional[PresetItem],
107
+ default_model: ModelItem,
108
+ option_get: OptionGetter,
109
+ router_stream_mode: str,
110
+ allow_local_tools_default: bool,
111
+ allow_remote_tools_default: bool,
112
+ max_iterations: int,
113
+ llm: Any,
114
+ tools: List[Any],
115
+ stream: bool,
116
+ base_prompt: Optional[str],
117
+ timeout: int = 120,
118
+ verbose: bool = True,
119
+ ):
120
+ super().__init__(timeout=timeout, verbose=verbose)
121
+ self.window = window
122
+ self.logger = logger or NullLogger()
123
+
124
+ # Graph/schema
125
+ self.fs: FlowSchema = parse_schema(schema or [])
126
+ self.g: FlowGraph = build_graph(self.fs)
127
+ self.mem = MemoryManager()
128
+ self.factory = AgentFactoryLI(window, self.logger)
129
+
130
+ # Options
131
+ self.preset = preset
132
+ self.default_model = default_model
133
+ self.option_get = option_get
134
+ self.router_stream_mode = (router_stream_mode or "off").lower() # LI: agents don't token-stream
135
+ self.allow_local_tools_default = allow_local_tools_default
136
+ self.allow_remote_tools_default = allow_remote_tools_default
137
+ self.max_iterations = int(max_iterations or 20)
138
+
139
+ # Base LLM/tools from app (per-node model override via resolve_llm)
140
+ self.llm_base = llm
141
+ self.tools_base = tools or []
142
+ self.stream = bool(stream) # kept for symmetry with OpenAI; LI agents don't stream tokens
143
+ self.base_prompt = base_prompt or ""
144
+
145
+ # Runtime
146
+ self._on_stop = None
147
+ self._cancelled = False
148
+ self._current_ids: List[str] = []
149
+ self._steps = 0
150
+ self._first_dispatch_done = False
151
+ self._last_plain_output = ""
152
+ self._initial_items = sanitize_input_items(initial_messages or [])
153
+ self._initial_chat = [] # List[ChatMessage]
154
+
155
+ # Debug config (read once)
156
+ self.dbg = DebugConfig(
157
+ log_runtime=bool(self.option_get("debug", "log_runtime", True)),
158
+ log_routes=bool(self.option_get("debug", "log_routes", True)),
159
+ log_inputs=bool(self.option_get("debug", "log_inputs", True)),
160
+ log_outputs=bool(self.option_get("debug", "log_outputs", True)),
161
+ log_tools=bool(self.option_get("debug", "log_tools", True)),
162
+ log_llm=bool(self.option_get("debug", "log_llm", True)),
163
+ log_schema=bool(self.option_get("debug", "log_schema", True)),
164
+ log_memory_dump=bool(self.option_get("debug", "log_memory_dump", True)),
165
+ preview_chars=int(self.option_get("debug", "preview_chars", 280)),
166
+ step_timeout_sec=int(self.option_get("debug", "step_timeout_sec", 0)),
167
+ timeit_agent_run=bool(self.option_get("debug", "timeit_agent_run", True)),
168
+ event_echo=bool(self.option_get("debug", "event_echo", True)),
169
+ )
170
+
171
+ # One-shot schema/graph dump
172
+ if self.dbg.log_schema:
173
+ try:
174
+ self._dump_schema_debug()
175
+ except Exception as e:
176
+ self.logger.error(f"[debug] schema dump failed: {e}")
177
+
178
+ # ============== Debug helpers ==============
179
+
180
+ def _dump_schema_debug(self):
181
+ self.logger.info("[debug] ===== Schema/Graph =====")
182
+ self.logger.info(f"[debug] agents={len(self.fs.agents)} memories={len(self.fs.memories)} "
183
+ f"starts={len(self.fs.starts)} ends={len(self.fs.ends)}")
184
+ self.logger.info(f"[debug] start_targets={self.g.start_targets} end_nodes={self.g.end_nodes}")
185
+ for nid, outs in self.g.adjacency.items():
186
+ self.logger.info(f"[debug] edge {nid} -> {outs}")
187
+ for aid, mem_id in self.g.agent_to_memory.items():
188
+ self.logger.info(f"[debug] agent_to_memory {aid} -> {mem_id}")
189
+ self.logger.info("[debug] ========================")
190
+
191
+ def _tool_names(self, tools: List[Any]) -> List[str]:
192
+ names = []
193
+ for t in tools or []:
194
+ n = getattr(getattr(t, "metadata", None), "name", None) or getattr(t, "name", None) or t.__class__.__name__
195
+ names.append(str(n))
196
+ return names
197
+
198
+ # ============== Internal helpers ==============
199
+
200
+ def _is_stopped(self) -> bool:
201
+ if self._cancelled:
202
+ return True
203
+ if callable(self._on_stop):
204
+ try:
205
+ return bool(self._on_stop())
206
+ except Exception:
207
+ return False
208
+ return False
209
+
210
+ def _friendly_map(self) -> Dict[str, str]:
211
+ return {aid: a.name or aid for aid, a in self.fs.agents.items()}
212
+
213
+ async def _emit(self, ctx: Context, ev: Any):
214
+ if self.dbg.event_echo:
215
+ self.logger.debug(f"[event] emit {ev.__class__.__name__}")
216
+ ctx.write_event_to_stream(ev)
217
+
218
+ async def _emit_agent_text(self, ctx: Context, text: str, agent_name: str = "Agent"):
219
+ """
220
+ Emit AgentStream(delta=text) robustly. If your env requires extra fields,
221
+ fall back to extended AgentStream like in your SupervisorWorkflow.
222
+ """
223
+ try:
224
+ if self.dbg.event_echo:
225
+ self.logger.debug(f"[event] AgentStream delta len={len(text or '')}")
226
+ ctx.write_event_to_stream(AgentStream(delta=text or ""))
227
+ except ValidationError:
228
+ if self.dbg.event_echo:
229
+ self.logger.debug("[event] AgentStream ValidationError -> using extended fields")
230
+ ctx.write_event_to_stream(
231
+ AgentStream(
232
+ delta=text or "",
233
+ response=text or "",
234
+ current_agent_name=agent_name or "Agent",
235
+ tool_calls=[],
236
+ raw={},
237
+ )
238
+ )
239
+
240
+ async def _emit_header(self, ctx: Context, name: str):
241
+ if self.dbg.event_echo:
242
+ self.logger.debug(f"[event] header emit begin name='{name}'")
243
+ await self._emit_agent_text(ctx, f"\n\n**{name}**\n\n", agent_name=name)
244
+ if self.dbg.event_echo:
245
+ self.logger.debug("[event] header emit done")
246
+
247
+ async def _emit_step_sep(self, ctx: Context, node_id: str):
248
+ try:
249
+ await self._emit(ctx, StepEvent(name="next", index=self._steps, total=self.max_iterations, meta={"node": node_id}))
250
+ except Exception as e:
251
+ self.logger.error(f"[event] StepEvent emit failed: {e}")
252
+
253
+ def _resolve_node_runtime(self, node: AgentNode) -> NodeRuntime:
254
+ return resolve_node_runtime(
255
+ window=self.window,
256
+ node=node,
257
+ option_get=self.option_get,
258
+ default_model=self.default_model,
259
+ base_prompt=self.base_prompt,
260
+ schema_allow_local=node.allow_local_tools,
261
+ schema_allow_remote=node.allow_remote_tools,
262
+ default_allow_local=self.allow_local_tools_default,
263
+ default_allow_remote=self.allow_remote_tools_default,
264
+ )
265
+
266
+ def _build_input_for_node(self, node_id: str) -> tuple[str, list, str]:
267
+ """
268
+ Return (user_msg_text, chat_history_msgs, source_tag) using baton/memory policy.
269
+ """
270
+ mem_id = self.g.agent_to_memory.get(node_id)
271
+ mem_state = self.mem.get(mem_id) if mem_id else None
272
+
273
+ # memory with history
274
+ if mem_state and mem_state.items:
275
+ base_items = list(mem_state.items[:-1]) if len(mem_state.items) >= 1 else []
276
+ if self._last_plain_output.strip():
277
+ user_msg_text = self._last_plain_output
278
+ else:
279
+ last_ass = mem_state.items[-1] if isinstance(mem_state.items[-1], dict) else {}
280
+ if isinstance(last_ass.get("content"), str):
281
+ user_msg_text = last_ass.get("content", "")
282
+ elif isinstance(last_ass.get("content"), list) and last_ass["content"]:
283
+ user_msg_text = last_ass["content"][0].get("text", "")
284
+ else:
285
+ user_msg_text = ""
286
+ chat_history_msgs = to_li_chat_messages(base_items)
287
+ return user_msg_text, chat_history_msgs, "memory:existing_to_user_baton"
288
+
289
+ # memory empty
290
+ if mem_state:
291
+ if self._last_plain_output.strip():
292
+ return self._last_plain_output, [], "memory:seed_from_last_output"
293
+ else:
294
+ chat_hist = self._initial_chat[:-1] if self._initial_chat else []
295
+ user_msg = self._initial_chat[-1].content if self._initial_chat else ""
296
+ return user_msg, chat_hist, "memory:seed_from_initial"
297
+
298
+ # no memory
299
+ if not self._first_dispatch_done:
300
+ chat_hist = self._initial_chat[:-1] if self._initial_chat else []
301
+ user_msg = self._initial_chat[-1].content if self._initial_chat else ""
302
+ return user_msg, chat_hist, "no-mem:first_initial"
303
+ else:
304
+ user_msg = self._last_plain_output if self._last_plain_output.strip() else (
305
+ self._initial_chat[-1].content if self._initial_chat else ""
306
+ )
307
+ return user_msg, [], "no-mem:last_output"
308
+
309
+ async def _update_memory_after_step(self, node_id: str, user_msg_text: str, display_text: str):
310
+ mem_id = self.g.agent_to_memory.get(node_id)
311
+ mem_state = self.mem.get(mem_id) if mem_id else None
312
+ if not mem_state:
313
+ if self.dbg.log_memory_dump:
314
+ self.logger.debug(f"[memory] no memory for {node_id}; skip update.")
315
+ return
316
+ before_len = len(mem_state.items)
317
+ base_items = list(mem_state.items[:-1]) if mem_state.items else []
318
+ new_mem = (base_items or []) + [
319
+ {"role": "user", "content": user_msg_text},
320
+ {"role": "assistant", "content": [{"type": "output_text", "text": display_text}]},
321
+ ]
322
+ mem_state.set_from(new_mem, None)
323
+ after_len = len(mem_state.items)
324
+ if self.dbg.log_memory_dump:
325
+ self.logger.debug(
326
+ f"[memory] {node_id} updated mem_id={mem_id} len {before_len} -> {after_len} "
327
+ f"user='{ellipsize(user_msg_text, self.dbg.preview_chars)}' "
328
+ f"assist='{ellipsize(display_text, self.dbg.preview_chars)}'"
329
+ )
330
+
331
+ # ============== Workflow steps ==============
332
+
333
+ def run(self, query: str, ctx: Optional[Context] = None, memory: Any = None, verbose: bool = False, on_stop=None):
334
+ """Entry point used by your LlamaWorkflow runner."""
335
+ self._on_stop = on_stop
336
+
337
+ # Build initial chat once
338
+ if self._initial_items:
339
+ self._initial_chat = to_li_chat_messages(self._initial_items)
340
+ if self.dbg.log_inputs:
341
+ prev = ellipsize(str(self._initial_items), self.dbg.preview_chars)
342
+ self.logger.debug(f"[debug] initial_items count={len(self._initial_items)} preview={prev}")
343
+ else:
344
+ from llama_index.core.llms import ChatMessage, MessageRole
345
+ self._initial_chat = [ChatMessage(role=MessageRole.USER, content=query or "")]
346
+ if self.dbg.log_inputs:
347
+ self.logger.debug(f"[debug] initial from query='{ellipsize(query, self.dbg.preview_chars)}'")
348
+
349
+ # Pick START
350
+ if self.g.start_targets:
351
+ self._current_ids = [self.g.start_targets[0]]
352
+ self.logger.info(f"[step] START -> {self._current_ids[0]}")
353
+ else:
354
+ default_agent = self.g.pick_default_start_agent()
355
+ if default_agent is None:
356
+ self.logger.error("[step] No START and no agents in schema.")
357
+ return super().run(ctx=ctx, start_event=FlowStartEvent(query=query or ""))
358
+ self._current_ids = [default_agent]
359
+ self.logger.info(f"[step] START (auto lowest-id) -> {default_agent}")
360
+
361
+ self._steps = 0
362
+ self._first_dispatch_done = False
363
+ self._last_plain_output = ""
364
+
365
+ return super().run(ctx=ctx, start_event=FlowStartEvent(query=query or ""))
366
+
367
+ @step
368
+ async def start_step(self, ctx: Context, ev: FlowStartEvent) -> FlowTickEvent | FlowStopEvent:
369
+ self.logger.debug("[step] start_step")
370
+ if not self._current_ids:
371
+ await self._emit_agent_text(ctx, "Flow has no start and no agents.\n", agent_name="Flow")
372
+ return FlowStopEvent(final_answer="")
373
+ return FlowTickEvent()
374
+
375
+ @step
376
+ async def loop_step(self, ctx: Context, ev: FlowTickEvent) -> FlowTickEvent | FlowStopEvent:
377
+ # Cooperative stop
378
+ if self._is_stopped():
379
+ self.logger.info("[step] loop_step: stopped() -> cancelling")
380
+ raise WorkflowCancelledByUser()
381
+
382
+ # Termination conditions
383
+ if not self._current_ids or (self._steps >= self.max_iterations > 0):
384
+ self.logger.info(f"[step] loop_step: done (ids={self._current_ids}, steps={self._steps})")
385
+ return FlowStopEvent(final_answer=self._last_plain_output or "")
386
+
387
+ current_id = self._current_ids[0]
388
+ self._steps += 1
389
+ self.logger.debug(f"[step] loop_step#{self._steps} current_id={current_id}")
390
+
391
+ # Reached END?
392
+ if current_id in self.fs.ends:
393
+ self.logger.info(f"[step] loop_step: reached END {current_id}")
394
+ return FlowStopEvent(final_answer=self._last_plain_output or "")
395
+
396
+ # If unknown id: jump to END if any
397
+ if current_id not in self.fs.agents:
398
+ self.logger.warning(f"[step] loop_step: {current_id} not an agent; jumping to END if any.")
399
+ end_id = self.g.end_nodes[0] if self.g.end_nodes else None
400
+ self._current_ids = [end_id] if end_id else []
401
+ return FlowTickEvent() if self._current_ids else FlowStopEvent(final_answer=self._last_plain_output or "")
402
+
403
+ node: AgentNode = self.fs.agents[current_id]
404
+ if self._steps > 1:
405
+ await self._emit_step_sep(ctx, current_id)
406
+ await self._emit_header(ctx, node.name or current_id)
407
+
408
+ # Resolve runtime + per-node LLM/tools
409
+ node_rt = self._resolve_node_runtime(node)
410
+ if self.dbg.log_runtime:
411
+ self.logger.debug(
412
+ f"[runtime] model={getattr(node_rt.model,'name',str(node_rt.model))} "
413
+ f"allow_local={node_rt.allow_local_tools} allow_remote={node_rt.allow_remote_tools} "
414
+ f"instructions='{ellipsize(node_rt.instructions, self.dbg.preview_chars)}'"
415
+ )
416
+
417
+ llm_node = resolve_llm(self.window, node_rt.model, self.llm_base, self.stream)
418
+ if self.dbg.log_llm:
419
+ self.logger.debug(f"[llm] using={llm_node.__class__.__name__} id={getattr(llm_node,'model',None) or getattr(llm_node,'_model',None)}")
420
+
421
+ tools_node = self.tools_base if (node_rt.allow_local_tools or node_rt.allow_remote_tools) else []
422
+ if self.dbg.log_tools:
423
+ self.logger.debug(f"[tools] count={len(tools_node)} names={self._tool_names(tools_node)}")
424
+
425
+ # Build input (baton/memory)
426
+ user_msg_text, chat_history_msgs, src = self._build_input_for_node(current_id)
427
+ if self.dbg.log_inputs:
428
+ self.logger.debug(
429
+ f"[input] src={src} chat_hist={len(chat_history_msgs)} "
430
+ f"user='{ellipsize(user_msg_text, self.dbg.preview_chars)}'"
431
+ )
432
+
433
+ # Build agent (chat_history/max_iterations in ctor – best practice)
434
+ built = self.factory.build(
435
+ node=node,
436
+ node_runtime=node_rt,
437
+ llm=llm_node,
438
+ tools=tools_node,
439
+ friendly_map=self._friendly_map(),
440
+ chat_history=chat_history_msgs,
441
+ max_iterations=self.max_iterations,
442
+ )
443
+ agent = built.instance
444
+ multi_output = built.multi_output
445
+ allowed_routes = built.allowed_routes
446
+
447
+ if self.dbg.log_routes:
448
+ self.logger.debug(f"[routing] multi_output={multi_output} routes={allowed_routes} mode={self.router_stream_mode}")
449
+
450
+ display_text = ""
451
+ next_id: Optional[str] = None
452
+
453
+ # Execute (single LI agent doesn't token-stream; Workflow emits blocks)
454
+ try:
455
+ t0 = perf_counter()
456
+ if self.dbg.step_timeout_sec > 0:
457
+ ret = await asyncio.wait_for(agent.run(user_msg=user_msg_text), timeout=self.dbg.step_timeout_sec)
458
+ else:
459
+ ret = await agent.run(user_msg=user_msg_text)
460
+ dt_ms = (perf_counter() - t0) * 1000.0
461
+ if self.dbg.timeit_agent_run:
462
+ self.logger.debug(f"[time] agent.run took {dt_ms:.1f} ms")
463
+ except asyncio.TimeoutError:
464
+ self.logger.error(f"[error] agent.run timeout after {self.dbg.step_timeout_sec}s on node={current_id}")
465
+ ret = None
466
+ except Exception as e:
467
+ self.logger.error(f"[error] agent.run failed on node={current_id}: {e}")
468
+ ret = None
469
+
470
+ # Extract and sanitize text
471
+ raw_text = extract_agent_text(ret) if ret is not None else ""
472
+ raw_text_clean = strip_role_prefixes(raw_text)
473
+
474
+ if self.dbg.log_outputs:
475
+ self.logger.debug(f"[out.raw] len={len(raw_text)} preview='{ellipsize(raw_text, self.dbg.preview_chars)}'")
476
+ self.logger.debug(f"[out.cln] len={len(raw_text_clean)} preview='{ellipsize(raw_text_clean, self.dbg.preview_chars)}'")
477
+
478
+ if multi_output:
479
+ decision = parse_route_output(raw_text_clean or "", allowed_routes)
480
+ display_text = decision.content or ""
481
+ if display_text:
482
+ await self._emit_agent_text(ctx, display_text, agent_name=(node.name or current_id))
483
+ if self.dbg.log_memory_dump:
484
+ self.logger.debug(f"[mem.prep] node={current_id} save user='{ellipsize(user_msg_text, self.dbg.preview_chars)}' "
485
+ f"assist='{ellipsize(display_text, self.dbg.preview_chars)}'")
486
+ await self._update_memory_after_step(current_id, user_msg_text, display_text)
487
+ next_id = decision.route if decision.valid else (allowed_routes[0] if allowed_routes else None)
488
+ if self.dbg.log_routes:
489
+ self.logger.debug(
490
+ f"[route] node={current_id} valid={decision.valid} next={next_id} content_len={len(display_text)}"
491
+ )
492
+ else:
493
+ display_text = raw_text_clean or ""
494
+ if display_text:
495
+ await self._emit_agent_text(ctx, display_text, agent_name=(node.name or current_id))
496
+ if self.dbg.log_memory_dump:
497
+ self.logger.debug(f"[mem.prep] node={current_id} save user='{ellipsize(user_msg_text, self.dbg.preview_chars)}' "
498
+ f"assist='{ellipsize(display_text, self.dbg.preview_chars)}'")
499
+ await self._update_memory_after_step(current_id, user_msg_text, display_text)
500
+ outs = self.g.get_next(current_id)
501
+ next_id = outs[0] if outs else self.g.first_connected_end(current_id)
502
+ if self.dbg.log_routes:
503
+ self.logger.debug(f"[route] node={current_id} next={next_id} (first edge or END)")
504
+
505
+ if self.dbg.log_outputs:
506
+ self.logger.debug(f"[output] preview='{ellipsize(display_text, self.dbg.preview_chars)}'")
507
+
508
+ # Update baton and next
509
+ self._first_dispatch_done = True
510
+ self._last_plain_output = display_text
511
+
512
+ if isinstance(next_id, str) and next_id.lower() == "end":
513
+ end_id = self.g.first_connected_end(current_id) or (self.g.end_nodes[0] if self.g.end_nodes else None)
514
+ self._current_ids = [end_id] if end_id else []
515
+ if not self._current_ids:
516
+ self.logger.info("[step] next=END (no concrete END node); stopping now.")
517
+ return FlowStopEvent(final_answer=self._last_plain_output or "")
518
+ elif next_id:
519
+ self._current_ids = [next_id]
520
+ else:
521
+ end_id = self.g.first_connected_end(current_id)
522
+ if end_id:
523
+ self._current_ids = [end_id]
524
+ else:
525
+ self._current_ids = []
526
+ self.logger.info("[step] no next, no END; stopping.")
527
+ return FlowStopEvent(final_answer=self._last_plain_output or "")
528
+
529
+ return FlowTickEvent()
@@ -0,0 +1,56 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.24 23:00:00 #
10
+ # ================================================== #
11
+
12
+ from __future__ import annotations
13
+ import io
14
+ from typing import Tuple
15
+
16
+ from pygpt_net.core.agents.bridge import ConnectionContext
17
+ from pygpt_net.item.ctx import CtxItem
18
+
19
+
20
+ class LIStreamHandler:
21
+ """
22
+ Minimal streaming helper for LlamaIndex events, API-compatible with your usage:
23
+ - reset(), new(), to_buffer(text)
24
+ - begin flag
25
+ - handle_token(delta, ctx) -> returns (buffer, None)
26
+ """
27
+ def __init__(self, bridge: ConnectionContext):
28
+ self.bridge = bridge
29
+ self._buf = io.StringIO()
30
+ self.begin = True
31
+
32
+ @property
33
+ def buffer(self) -> str:
34
+ return self._buf.getvalue()
35
+
36
+ def reset(self):
37
+ self._buf = io.StringIO()
38
+
39
+ def new(self):
40
+ self.reset()
41
+ self.begin = True
42
+
43
+ def to_buffer(self, text: str):
44
+ if text:
45
+ self._buf.write(text)
46
+
47
+ def handle_token(self, delta: str, ctx: CtxItem, flush: bool = True, buffer: bool = True) -> Tuple[str, None]:
48
+ if not delta:
49
+ return self.buffer, None
50
+ ctx.stream = delta
51
+ if flush:
52
+ self.bridge.on_step(ctx, self.begin)
53
+ if buffer:
54
+ self._buf.write(delta)
55
+ self.begin = False
56
+ return self.buffer, None