pygpt-net 2.6.59__py3-none-any.whl → 2.6.61__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. pygpt_net/CHANGELOG.txt +11 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/app.py +9 -5
  4. pygpt_net/controller/__init__.py +1 -0
  5. pygpt_net/controller/chat/common.py +115 -6
  6. pygpt_net/controller/chat/input.py +4 -1
  7. pygpt_net/controller/presets/editor.py +442 -39
  8. pygpt_net/controller/presets/presets.py +121 -6
  9. pygpt_net/controller/settings/editor.py +0 -15
  10. pygpt_net/controller/theme/markdown.py +2 -5
  11. pygpt_net/controller/ui/ui.py +4 -7
  12. pygpt_net/core/agents/custom/__init__.py +281 -0
  13. pygpt_net/core/agents/custom/debug.py +64 -0
  14. pygpt_net/core/agents/custom/factory.py +109 -0
  15. pygpt_net/core/agents/custom/graph.py +71 -0
  16. pygpt_net/core/agents/custom/llama_index/__init__.py +10 -0
  17. pygpt_net/core/agents/custom/llama_index/factory.py +100 -0
  18. pygpt_net/core/agents/custom/llama_index/router_streamer.py +106 -0
  19. pygpt_net/core/agents/custom/llama_index/runner.py +562 -0
  20. pygpt_net/core/agents/custom/llama_index/stream.py +56 -0
  21. pygpt_net/core/agents/custom/llama_index/utils.py +253 -0
  22. pygpt_net/core/agents/custom/logging.py +50 -0
  23. pygpt_net/core/agents/custom/memory.py +51 -0
  24. pygpt_net/core/agents/custom/router.py +155 -0
  25. pygpt_net/core/agents/custom/router_streamer.py +187 -0
  26. pygpt_net/core/agents/custom/runner.py +455 -0
  27. pygpt_net/core/agents/custom/schema.py +127 -0
  28. pygpt_net/core/agents/custom/utils.py +193 -0
  29. pygpt_net/core/agents/provider.py +72 -7
  30. pygpt_net/core/agents/runner.py +7 -4
  31. pygpt_net/core/agents/runners/helpers.py +1 -1
  32. pygpt_net/core/agents/runners/llama_workflow.py +3 -0
  33. pygpt_net/core/agents/runners/openai_workflow.py +8 -1
  34. pygpt_net/core/db/viewer.py +11 -5
  35. pygpt_net/{ui/widget/builder → core/node_editor}/__init__.py +2 -2
  36. pygpt_net/core/{builder → node_editor}/graph.py +28 -226
  37. pygpt_net/core/node_editor/models.py +118 -0
  38. pygpt_net/core/node_editor/types.py +78 -0
  39. pygpt_net/core/node_editor/utils.py +17 -0
  40. pygpt_net/core/presets/presets.py +216 -29
  41. pygpt_net/core/render/markdown/parser.py +0 -2
  42. pygpt_net/core/render/web/renderer.py +10 -8
  43. pygpt_net/data/config/config.json +5 -6
  44. pygpt_net/data/config/models.json +3 -3
  45. pygpt_net/data/config/settings.json +2 -38
  46. pygpt_net/data/locale/locale.de.ini +64 -1
  47. pygpt_net/data/locale/locale.en.ini +63 -4
  48. pygpt_net/data/locale/locale.es.ini +64 -1
  49. pygpt_net/data/locale/locale.fr.ini +64 -1
  50. pygpt_net/data/locale/locale.it.ini +64 -1
  51. pygpt_net/data/locale/locale.pl.ini +65 -2
  52. pygpt_net/data/locale/locale.uk.ini +64 -1
  53. pygpt_net/data/locale/locale.zh.ini +64 -1
  54. pygpt_net/data/locale/plugin.cmd_system.en.ini +62 -66
  55. pygpt_net/item/agent.py +5 -1
  56. pygpt_net/item/preset.py +19 -1
  57. pygpt_net/provider/agents/base.py +33 -2
  58. pygpt_net/provider/agents/llama_index/flow_from_schema.py +92 -0
  59. pygpt_net/provider/agents/openai/flow_from_schema.py +96 -0
  60. pygpt_net/provider/core/agent/json_file.py +11 -5
  61. pygpt_net/provider/core/config/patch.py +10 -1
  62. pygpt_net/provider/core/config/patches/patch_before_2_6_42.py +0 -6
  63. pygpt_net/tools/agent_builder/tool.py +233 -52
  64. pygpt_net/tools/agent_builder/ui/dialogs.py +172 -28
  65. pygpt_net/tools/agent_builder/ui/list.py +37 -10
  66. pygpt_net/ui/__init__.py +2 -4
  67. pygpt_net/ui/dialog/about.py +58 -38
  68. pygpt_net/ui/dialog/db.py +142 -3
  69. pygpt_net/ui/dialog/preset.py +62 -8
  70. pygpt_net/ui/layout/toolbox/presets.py +52 -16
  71. pygpt_net/ui/main.py +1 -1
  72. pygpt_net/ui/widget/dialog/db.py +0 -0
  73. pygpt_net/ui/widget/lists/preset.py +644 -60
  74. pygpt_net/{core/builder → ui/widget/node_editor}/__init__.py +2 -2
  75. pygpt_net/ui/widget/node_editor/command.py +373 -0
  76. pygpt_net/ui/widget/node_editor/config.py +157 -0
  77. pygpt_net/ui/widget/node_editor/editor.py +2070 -0
  78. pygpt_net/ui/widget/node_editor/item.py +493 -0
  79. pygpt_net/ui/widget/node_editor/node.py +1460 -0
  80. pygpt_net/ui/widget/node_editor/utils.py +17 -0
  81. pygpt_net/ui/widget/node_editor/view.py +364 -0
  82. pygpt_net/ui/widget/tabs/output.py +1 -1
  83. pygpt_net/ui/widget/textarea/input.py +2 -2
  84. pygpt_net/utils.py +114 -2
  85. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.61.dist-info}/METADATA +80 -93
  86. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.61.dist-info}/RECORD +88 -61
  87. pygpt_net/core/agents/custom.py +0 -150
  88. pygpt_net/ui/widget/builder/editor.py +0 -2001
  89. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.61.dist-info}/LICENSE +0 -0
  90. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.61.dist-info}/WHEEL +0 -0
  91. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.61.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,562 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.25 14:00:00 #
10
+ # ================================================== #
11
+
12
+ from __future__ import annotations
13
+ import asyncio
14
+ from dataclasses import dataclass
15
+ from typing import Any, Dict, List, Optional
16
+ from time import perf_counter
17
+ from pydantic import ValidationError
18
+
19
+ from agents import TResponseInputItem
20
+ from pygpt_net.item.model import ModelItem
21
+ from pygpt_net.item.preset import PresetItem
22
+
23
+ # Shared (reused) components from OpenAI flow core
24
+ from ..logging import Logger, NullLogger
25
+ from ..schema import FlowSchema, AgentNode, parse_schema
26
+ from ..graph import FlowGraph, build_graph
27
+ from ..memory import MemoryManager
28
+ from ..router import parse_route_output
29
+ from ..debug import ellipsize
30
+
31
+ # LI-specific utils/factory
32
+ from .utils import (
33
+ sanitize_input_items,
34
+ NodeRuntime,
35
+ OptionGetter,
36
+ resolve_node_runtime,
37
+ to_li_chat_messages,
38
+ resolve_llm,
39
+ extract_agent_text,
40
+ strip_role_prefixes,
41
+ )
42
+ from .factory import AgentFactoryLI
43
+
44
+ # LlamaIndex Workflow primitives + events
45
+ from llama_index.core.workflow import Workflow, Context, StartEvent, StopEvent, Event, step
46
+ from llama_index.core.agent.workflow import AgentStream
47
+
48
+ # App-specific event used as visual separator between agents
49
+ from pygpt_net.provider.agents.llama_index.workflow.events import StepEvent
50
+
51
+ # Cooperative cancellation error used by main runner
52
+ from workflows.errors import WorkflowCancelledByUser
53
+
54
+
55
+ @dataclass
56
+ class DebugConfig:
57
+ """Config values controlled via get_option(..., 'debug', ...)."""
58
+ log_runtime: bool = True
59
+ log_routes: bool = True
60
+ log_inputs: bool = True
61
+ log_outputs: bool = True
62
+ log_tools: bool = True
63
+ log_llm: bool = True
64
+ log_schema: bool = True
65
+ log_memory_dump: bool = True
66
+ preview_chars: int = 280
67
+ step_timeout_sec: int = 0
68
+ timeit_agent_run: bool = True
69
+ event_echo: bool = True
70
+
71
+
72
+ class FlowStartEvent(StartEvent):
73
+ query: str
74
+
75
+
76
+ class FlowTickEvent(Event):
77
+ """Tick event drives one step of the flow loop."""
78
+ pass
79
+
80
+
81
+ class FlowStopEvent(StopEvent):
82
+ final_answer: str = ""
83
+
84
+
85
+ class DynamicFlowWorkflowLI(Workflow):
86
+ """
87
+ LlamaIndex Workflow mirroring OpenAI dynamic flow:
88
+ - Emits AgentStream header + AgentStream content per step (no token-by-token from single LI agents).
89
+ - Emits StepEvent between agents (your runner uses it to separate UI blocks).
90
+ - Routing via JSON {route, content} (UI sees only content; JSON never leaks).
91
+ - Memory/no-memory with baton policy:
92
+ * first agent w/o memory: initial messages (or query),
93
+ * next w/o memory: only last displayed content as single user message,
94
+ * agent with memory: if empty -> seed from last output (fallback initial); else use its history.
95
+ After step, memory is updated with [user baton, assistant content].
96
+ - Per-agent model/prompt/allow_* via get_option(node_id, ...).
97
+ """
98
+
99
+ def __init__(
100
+ self,
101
+ *,
102
+ window,
103
+ logger: Optional[Logger],
104
+ schema: List[Dict[str, Any]],
105
+ initial_messages: Optional[List[TResponseInputItem]],
106
+ preset: Optional[PresetItem],
107
+ default_model: ModelItem,
108
+ option_get: OptionGetter,
109
+ router_stream_mode: str,
110
+ allow_local_tools_default: bool,
111
+ allow_remote_tools_default: bool,
112
+ max_iterations: int,
113
+ llm: Any,
114
+ tools: List[Any],
115
+ stream: bool,
116
+ base_prompt: Optional[str],
117
+ timeout: int = 120,
118
+ verbose: bool = True,
119
+ ):
120
+ super().__init__(timeout=timeout, verbose=verbose)
121
+ self.window = window
122
+ self.logger = logger or NullLogger()
123
+
124
+ # Graph/schema
125
+ self.fs: FlowSchema = parse_schema(schema or [])
126
+ self.g: FlowGraph = build_graph(self.fs)
127
+ self.mem = MemoryManager()
128
+ self.factory = AgentFactoryLI(window, self.logger)
129
+
130
+ # Options
131
+ self.preset = preset
132
+ self.default_model = default_model
133
+ self.option_get = option_get
134
+ self.router_stream_mode = (router_stream_mode or "off").lower() # LI: agents don't token-stream
135
+ self.allow_local_tools_default = allow_local_tools_default
136
+ self.allow_remote_tools_default = allow_remote_tools_default
137
+ self.max_iterations = int(max_iterations or 20)
138
+
139
+ # Base LLM/tools from app (per-node model override via resolve_llm)
140
+ self.llm_base = llm
141
+ self.tools_base = tools or []
142
+ self.stream = bool(stream) # kept for symmetry with OpenAI; LI agents don't stream tokens
143
+ self.base_prompt = base_prompt or ""
144
+
145
+ # Runtime
146
+ self._on_stop = None
147
+ self._cancelled = False
148
+ self._current_ids: List[str] = []
149
+ self._steps = 0
150
+ self._first_dispatch_done = False
151
+ self._last_plain_output = ""
152
+ self._initial_items = sanitize_input_items(initial_messages or [])
153
+ self._initial_chat = [] # List[ChatMessage]
154
+
155
+ # Debug config (read once)
156
+ self.dbg = DebugConfig(
157
+ log_runtime=bool(self.option_get("debug", "log_runtime", True)),
158
+ log_routes=bool(self.option_get("debug", "log_routes", True)),
159
+ log_inputs=bool(self.option_get("debug", "log_inputs", True)),
160
+ log_outputs=bool(self.option_get("debug", "log_outputs", True)),
161
+ log_tools=bool(self.option_get("debug", "log_tools", True)),
162
+ log_llm=bool(self.option_get("debug", "log_llm", True)),
163
+ log_schema=bool(self.option_get("debug", "log_schema", True)),
164
+ log_memory_dump=bool(self.option_get("debug", "log_memory_dump", True)),
165
+ preview_chars=int(self.option_get("debug", "preview_chars", 280)),
166
+ step_timeout_sec=int(self.option_get("debug", "step_timeout_sec", 0)),
167
+ timeit_agent_run=bool(self.option_get("debug", "timeit_agent_run", True)),
168
+ event_echo=bool(self.option_get("debug", "event_echo", True)),
169
+ )
170
+
171
+ # One-shot schema/graph dump
172
+ if self.dbg.log_schema:
173
+ try:
174
+ self._dump_schema_debug()
175
+ except Exception as e:
176
+ self.logger.error(f"[debug] schema dump failed: {e}")
177
+
178
+ # ============== Debug helpers ==============
179
+
180
+ def _dump_schema_debug(self):
181
+ self.logger.info("[debug] ===== Schema/Graph =====")
182
+ self.logger.info(f"[debug] agents={len(self.fs.agents)} memories={len(self.fs.memories)} "
183
+ f"starts={len(self.fs.starts)} ends={len(self.fs.ends)}")
184
+ self.logger.info(f"[debug] start_targets={self.g.start_targets} end_nodes={self.g.end_nodes}")
185
+ for nid, outs in self.g.adjacency.items():
186
+ self.logger.info(f"[debug] edge {nid} -> {outs}")
187
+ for aid, mem_id in self.g.agent_to_memory.items():
188
+ self.logger.info(f"[debug] agent_to_memory {aid} -> {mem_id}")
189
+ self.logger.info("[debug] ========================")
190
+
191
+ def _tool_names(self, tools: List[Any]) -> List[str]:
192
+ names = []
193
+ for t in tools or []:
194
+ n = getattr(getattr(t, "metadata", None), "name", None) or getattr(t, "name", None) or t.__class__.__name__
195
+ names.append(str(n))
196
+ return names
197
+
198
+ # ============== Internal helpers ==============
199
+
200
+ def _is_stopped(self) -> bool:
201
+ if self._cancelled:
202
+ return True
203
+ if callable(self._on_stop):
204
+ try:
205
+ return bool(self._on_stop())
206
+ except Exception:
207
+ return False
208
+ return False
209
+
210
+ def _friendly_map(self) -> Dict[str, str]:
211
+ return {aid: a.name or aid for aid, a in self.fs.agents.items()}
212
+
213
+ def _friendly_map_for_routes(self, route_ids: List[str]) -> Dict[str, Any]:
214
+ """
215
+ Build a friendly map for the given route ids:
216
+ - Always include a human-friendly name.
217
+ - Include role only if provided in preset options or schema and non-empty.
218
+ """
219
+ out: Dict[str, Any] = {}
220
+ for rid in route_ids or []:
221
+ a = self.fs.agents.get(rid)
222
+ name = (a.name if a and a.name else rid)
223
+ # Prefer preset option, then schema role
224
+ role_opt = None
225
+ try:
226
+ role_opt = self.option_get(rid, "role", None)
227
+ except Exception:
228
+ role_opt = None
229
+ role_schema = getattr(a, "role", None) if a is not None else None
230
+ role_val = None
231
+ if isinstance(role_opt, str) and role_opt.strip():
232
+ role_val = role_opt.strip()
233
+ elif isinstance(role_schema, str) and role_schema.strip():
234
+ role_val = role_schema.strip()
235
+ item = {"name": name}
236
+ if role_val:
237
+ item["role"] = role_val
238
+ out[rid] = item
239
+ return out
240
+
241
+ async def _emit(self, ctx: Context, ev: Any):
242
+ if self.dbg.event_echo:
243
+ self.logger.debug(f"[event] emit {ev.__class__.__name__}")
244
+ ctx.write_event_to_stream(ev)
245
+
246
+ async def _emit_agent_text(self, ctx: Context, text: str, agent_name: str = "Agent"):
247
+ """
248
+ Emit AgentStream(delta=text) robustly. If your env requires extra fields,
249
+ fall back to extended AgentStream like in your SupervisorWorkflow.
250
+ """
251
+ try:
252
+ if self.dbg.event_echo:
253
+ self.logger.debug(f"[event] AgentStream delta len={len(text or '')}")
254
+ ctx.write_event_to_stream(AgentStream(delta=text or ""))
255
+ except ValidationError:
256
+ if self.dbg.event_echo:
257
+ self.logger.debug("[event] AgentStream ValidationError -> using extended fields")
258
+ ctx.write_event_to_stream(
259
+ AgentStream(
260
+ delta=text or "",
261
+ response=text or "",
262
+ current_agent_name=agent_name or "Agent",
263
+ tool_calls=[],
264
+ raw={},
265
+ )
266
+ )
267
+
268
+ async def _emit_header(self, ctx: Context, name: str):
269
+ if self.dbg.event_echo:
270
+ self.logger.debug(f"[event] header emit begin name='{name}'")
271
+ await self._emit_agent_text(ctx, f"\n\n**{name}**\n\n", agent_name=name)
272
+ if self.dbg.event_echo:
273
+ self.logger.debug("[event] header emit done")
274
+
275
+ async def _emit_step_sep(self, ctx: Context, node_id: str):
276
+ try:
277
+ await self._emit(ctx, StepEvent(name="next", index=self._steps, total=self.max_iterations, meta={"node": node_id}))
278
+ except Exception as e:
279
+ self.logger.error(f"[event] StepEvent emit failed: {e}")
280
+
281
+ def _resolve_node_runtime(self, node: AgentNode) -> NodeRuntime:
282
+ return resolve_node_runtime(
283
+ window=self.window,
284
+ node=node,
285
+ option_get=self.option_get,
286
+ default_model=self.default_model,
287
+ base_prompt=self.base_prompt,
288
+ schema_allow_local=node.allow_local_tools,
289
+ schema_allow_remote=node.allow_remote_tools,
290
+ default_allow_local=self.allow_local_tools_default,
291
+ default_allow_remote=self.allow_remote_tools_default,
292
+ )
293
+
294
+ def _build_input_for_node(self, node_id: str) -> tuple[str, list, str]:
295
+ """
296
+ Return (user_msg_text, chat_history_msgs, source_tag) using baton/memory policy.
297
+ """
298
+ mem_id = self.g.agent_to_memory.get(node_id)
299
+ mem_state = self.mem.get(mem_id) if mem_id else None
300
+
301
+ # memory with history
302
+ if mem_state and mem_state.items:
303
+ base_items = list(mem_state.items[:-1]) if len(mem_state.items) >= 1 else []
304
+ if self._last_plain_output.strip():
305
+ user_msg_text = self._last_plain_output
306
+ else:
307
+ last_ass = mem_state.items[-1] if isinstance(mem_state.items[-1], dict) else {}
308
+ if isinstance(last_ass.get("content"), str):
309
+ user_msg_text = last_ass.get("content", "")
310
+ elif isinstance(last_ass.get("content"), list) and last_ass["content"]:
311
+ user_msg_text = last_ass["content"][0].get("text", "")
312
+ else:
313
+ user_msg_text = ""
314
+ chat_history_msgs = to_li_chat_messages(base_items)
315
+ return user_msg_text, chat_history_msgs, "memory:existing_to_user_baton"
316
+
317
+ # memory empty
318
+ if mem_state:
319
+ if self._last_plain_output.strip():
320
+ return self._last_plain_output, [], "memory:seed_from_last_output"
321
+ else:
322
+ chat_hist = self._initial_chat[:-1] if self._initial_chat else []
323
+ user_msg = self._initial_chat[-1].content if self._initial_chat else ""
324
+ return user_msg, chat_hist, "memory:seed_from_initial"
325
+
326
+ # no memory
327
+ if not self._first_dispatch_done:
328
+ chat_hist = self._initial_chat[:-1] if self._initial_chat else []
329
+ user_msg = self._initial_chat[-1].content if self._initial_chat else ""
330
+ return user_msg, chat_hist, "no-mem:first_initial"
331
+ else:
332
+ user_msg = self._last_plain_output if self._last_plain_output.strip() else (
333
+ self._initial_chat[-1].content if self._initial_chat else ""
334
+ )
335
+ return user_msg, [], "no-mem:last_output"
336
+
337
+ async def _update_memory_after_step(self, node_id: str, user_msg_text: str, display_text: str):
338
+ mem_id = self.g.agent_to_memory.get(node_id)
339
+ mem_state = self.mem.get(mem_id) if mem_id else None
340
+ if not mem_state:
341
+ if self.dbg.log_memory_dump:
342
+ self.logger.debug(f"[memory] no memory for {node_id}; skip update.")
343
+ return
344
+ before_len = len(mem_state.items)
345
+ base_items = list(mem_state.items[:-1]) if mem_state.items else []
346
+ new_mem = (base_items or []) + [
347
+ {"role": "user", "content": user_msg_text},
348
+ {"role": "assistant", "content": [{"type": "output_text", "text": display_text}]},
349
+ ]
350
+ mem_state.set_from(new_mem, None)
351
+ after_len = len(mem_state.items)
352
+ if self.dbg.log_memory_dump:
353
+ self.logger.debug(
354
+ f"[memory] {node_id} updated mem_id={mem_id} len {before_len} -> {after_len} "
355
+ f"user='{ellipsize(user_msg_text, self.dbg.preview_chars)}' "
356
+ f"assist='{ellipsize(display_text, self.dbg.preview_chars)}'"
357
+ )
358
+
359
+ # ============== Workflow steps ==============
360
+
361
+ def run(self, query: str, ctx: Optional[Context] = None, memory: Any = None, verbose: bool = False, on_stop=None):
362
+ """Entry point used by your LlamaWorkflow runner."""
363
+ self._on_stop = on_stop
364
+
365
+ # Build initial chat once
366
+ if self._initial_items:
367
+ self._initial_chat = to_li_chat_messages(self._initial_items)
368
+ if self.dbg.log_inputs:
369
+ prev = ellipsize(str(self._initial_items), self.dbg.preview_chars)
370
+ self.logger.debug(f"[debug] initial_items count={len(self._initial_items)} preview={prev}")
371
+ else:
372
+ from llama_index.core.llms import ChatMessage, MessageRole
373
+ self._initial_chat = [ChatMessage(role=MessageRole.USER, content=query or "")]
374
+ if self.dbg.log_inputs:
375
+ self.logger.debug(f"[debug] initial from query='{ellipsize(query, self.dbg.preview_chars)}'")
376
+
377
+ # Pick START
378
+ if self.g.start_targets:
379
+ self._current_ids = [self.g.start_targets[0]]
380
+ self.logger.info(f"[step] START -> {self._current_ids[0]}")
381
+ else:
382
+ default_agent = self.g.pick_default_start_agent()
383
+ if default_agent is None:
384
+ self.logger.error("[step] No START and no agents in schema.")
385
+ return super().run(ctx=ctx, start_event=FlowStartEvent(query=query or ""))
386
+ self._current_ids = [default_agent]
387
+ self.logger.info(f"[step] START (auto lowest-id) -> {default_agent}")
388
+
389
+ self._steps = 0
390
+ self._first_dispatch_done = False
391
+ self._last_plain_output = ""
392
+
393
+ return super().run(ctx=ctx, start_event=FlowStartEvent(query=query or ""))
394
+
395
+ @step
396
+ async def start_step(self, ctx: Context, ev: FlowStartEvent) -> FlowTickEvent | FlowStopEvent:
397
+ self.logger.debug("[step] start_step")
398
+ if not self._current_ids:
399
+ await self._emit_agent_text(ctx, "Flow has no start and no agents.\n", agent_name="Flow")
400
+ return FlowStopEvent(final_answer="")
401
+ return FlowTickEvent()
402
+
403
+ @step
404
+ async def loop_step(self, ctx: Context, ev: FlowTickEvent) -> FlowTickEvent | FlowStopEvent:
405
+ # Cooperative stop
406
+ if self._is_stopped():
407
+ self.logger.info("[step] loop_step: stopped() -> cancelling")
408
+ raise WorkflowCancelledByUser()
409
+
410
+ # Termination conditions
411
+ if not self._current_ids or (self._steps >= self.max_iterations > 0):
412
+ self.logger.info(f"[step] loop_step: done (ids={self._current_ids}, steps={self._steps})")
413
+ return FlowStopEvent(final_answer=self._last_plain_output or "")
414
+
415
+ current_id = self._current_ids[0]
416
+ self._steps += 1
417
+ self.logger.debug(f"[step] loop_step#{self._steps} current_id={current_id}")
418
+
419
+ # Reached END?
420
+ if current_id in self.fs.ends:
421
+ self.logger.info(f"[step] loop_step: reached END {current_id}")
422
+ return FlowStopEvent(final_answer=self._last_plain_output or "")
423
+
424
+ # If unknown id: jump to END if any
425
+ if current_id not in self.fs.agents:
426
+ self.logger.warning(f"[step] loop_step: {current_id} not an agent; jumping to END if any.")
427
+ end_id = self.g.end_nodes[0] if self.g.end_nodes else None
428
+ self._current_ids = [end_id] if end_id else []
429
+ return FlowTickEvent() if self._current_ids else FlowStopEvent(final_answer=self._last_plain_output or "")
430
+
431
+ node: AgentNode = self.fs.agents[current_id]
432
+ if self._steps > 1:
433
+ await self._emit_step_sep(ctx, current_id)
434
+ await self._emit_header(ctx, node.name or current_id)
435
+
436
+ # Resolve runtime + per-node LLM/tools
437
+ node_rt = self._resolve_node_runtime(node)
438
+ if self.dbg.log_runtime:
439
+ self.logger.debug(
440
+ f"[runtime] model={getattr(node_rt.model,'name',str(node_rt.model))} "
441
+ f"allow_local={node_rt.allow_local_tools} allow_remote={node_rt.allow_remote_tools} "
442
+ f"instructions='{ellipsize(node_rt.instructions, self.dbg.preview_chars)}'"
443
+ f" role='{ellipsize(node_rt.role or '', self.dbg.preview_chars)}'"
444
+ )
445
+
446
+ llm_node = resolve_llm(self.window, node_rt.model, self.llm_base, self.stream)
447
+ if self.dbg.log_llm:
448
+ self.logger.debug(f"[llm] using={llm_node.__class__.__name__} id={getattr(llm_node,'model',None) or getattr(llm_node,'_model',None)}")
449
+
450
+ tools_node = self.tools_base if (node_rt.allow_local_tools or node_rt.allow_remote_tools) else []
451
+ if self.dbg.log_tools:
452
+ self.logger.debug(f"[tools] count={len(tools_node)} names={self._tool_names(tools_node)}")
453
+
454
+ # Build input (baton/memory)
455
+ user_msg_text, chat_history_msgs, src = self._build_input_for_node(current_id)
456
+ if self.dbg.log_inputs:
457
+ self.logger.debug(
458
+ f"[input] src={src} chat_hist={len(chat_history_msgs)} "
459
+ f"user='{ellipsize(user_msg_text, self.dbg.preview_chars)}'"
460
+ )
461
+
462
+ # Prepare friendly map with optional roles for this node's allowed routes
463
+ allowed_routes_now = list(node.outputs or [])
464
+ friendly_map = self._friendly_map_for_routes(allowed_routes_now)
465
+
466
+ # Build agent (chat_history/max_iterations in ctor – best practice)
467
+ built = self.factory.build(
468
+ node=node,
469
+ node_runtime=node_rt,
470
+ llm=llm_node,
471
+ tools=tools_node,
472
+ friendly_map=friendly_map,
473
+ chat_history=chat_history_msgs,
474
+ max_iterations=self.max_iterations,
475
+ )
476
+ agent = built.instance
477
+ multi_output = built.multi_output
478
+ allowed_routes = built.allowed_routes
479
+
480
+ if self.dbg.log_routes:
481
+ self.logger.debug(f"[routing] multi_output={multi_output} routes={allowed_routes} mode={self.router_stream_mode}")
482
+
483
+ display_text = ""
484
+ next_id: Optional[str] = None
485
+
486
+ # Execute (single LI agent doesn't token-stream; Workflow emits blocks)
487
+ try:
488
+ t0 = perf_counter()
489
+ if self.dbg.step_timeout_sec > 0:
490
+ ret = await asyncio.wait_for(agent.run(user_msg=user_msg_text), timeout=self.dbg.step_timeout_sec)
491
+ else:
492
+ ret = await agent.run(user_msg=user_msg_text)
493
+ dt_ms = (perf_counter() - t0) * 1000.0
494
+ if self.dbg.timeit_agent_run:
495
+ self.logger.debug(f"[time] agent.run took {dt_ms:.1f} ms")
496
+ except asyncio.TimeoutError:
497
+ self.logger.error(f"[error] agent.run timeout after {self.dbg.step_timeout_sec}s on node={current_id}")
498
+ ret = None
499
+ except Exception as e:
500
+ self.logger.error(f"[error] agent.run failed on node={current_id}: {e}")
501
+ ret = None
502
+
503
+ # Extract and sanitize text
504
+ raw_text = extract_agent_text(ret) if ret is not None else ""
505
+ raw_text_clean = strip_role_prefixes(raw_text)
506
+
507
+ if self.dbg.log_outputs:
508
+ self.logger.debug(f"[out.raw] len={len(raw_text)} preview='{ellipsize(raw_text, self.dbg.preview_chars)}'")
509
+ self.logger.debug(f"[out.cln] len={len(raw_text_clean)} preview='{ellipsize(raw_text_clean, self.dbg.preview_chars)}'")
510
+
511
+ if multi_output:
512
+ decision = parse_route_output(raw_text_clean or "", allowed_routes)
513
+ display_text = decision.content or ""
514
+ if display_text:
515
+ await self._emit_agent_text(ctx, display_text, agent_name=(node.name or current_id))
516
+ if self.dbg.log_memory_dump:
517
+ self.logger.debug(f"[mem.prep] node={current_id} save user='{ellipsize(user_msg_text, self.dbg.preview_chars)}' "
518
+ f"assist='{ellipsize(display_text, self.dbg.preview_chars)}'")
519
+ await self._update_memory_after_step(current_id, user_msg_text, display_text)
520
+ next_id = decision.route if decision.valid else (allowed_routes[0] if allowed_routes else None)
521
+ if self.dbg.log_routes:
522
+ self.logger.debug(
523
+ f"[route] node={current_id} valid={decision.valid} next={next_id} content_len={len(display_text)}"
524
+ )
525
+ else:
526
+ display_text = raw_text_clean or ""
527
+ if display_text:
528
+ await self._emit_agent_text(ctx, display_text, agent_name=(node.name or current_id))
529
+ if self.dbg.log_memory_dump:
530
+ self.logger.debug(f"[mem.prep] node={current_id} save user='{ellipsize(user_msg_text, self.dbg.preview_chars)}' "
531
+ f"assist='{ellipsize(display_text, self.dbg.preview_chars)}'")
532
+ await self._update_memory_after_step(current_id, user_msg_text, display_text)
533
+ outs = self.g.get_next(current_id)
534
+ next_id = outs[0] if outs else self.g.first_connected_end(current_id)
535
+ if self.dbg.log_routes:
536
+ self.logger.debug(f"[route] node={current_id} next={next_id} (first edge or END)")
537
+
538
+ if self.dbg.log_outputs:
539
+ self.logger.debug(f"[output] preview='{ellipsize(display_text, self.dbg.preview_chars)}'")
540
+
541
+ # Update baton and next
542
+ self._first_dispatch_done = True
543
+ self._last_plain_output = display_text
544
+
545
+ if isinstance(next_id, str) and next_id.lower() == "end":
546
+ end_id = self.g.first_connected_end(current_id) or (self.g.end_nodes[0] if self.g.end_nodes else None)
547
+ self._current_ids = [end_id] if end_id else []
548
+ if not self._current_ids:
549
+ self.logger.info("[step] next=END (no concrete END node); stopping now.")
550
+ return FlowStopEvent(final_answer=self._last_plain_output or "")
551
+ elif next_id:
552
+ self._current_ids = [next_id]
553
+ else:
554
+ end_id = self.g.first_connected_end(current_id)
555
+ if end_id:
556
+ self._current_ids = [end_id]
557
+ else:
558
+ self._current_ids = []
559
+ self.logger.info("[step] no next, no END; stopping.")
560
+ return FlowStopEvent(final_answer=self._last_plain_output or "")
561
+
562
+ return FlowTickEvent()
@@ -0,0 +1,56 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.24 23:00:00 #
10
+ # ================================================== #
11
+
12
+ from __future__ import annotations
13
+ import io
14
+ from typing import Tuple
15
+
16
+ from pygpt_net.core.agents.bridge import ConnectionContext
17
+ from pygpt_net.item.ctx import CtxItem
18
+
19
+
20
+ class LIStreamHandler:
21
+ """
22
+ Minimal streaming helper for LlamaIndex events, API-compatible with your usage:
23
+ - reset(), new(), to_buffer(text)
24
+ - begin flag
25
+ - handle_token(delta, ctx) -> returns (buffer, None)
26
+ """
27
+ def __init__(self, bridge: ConnectionContext):
28
+ self.bridge = bridge
29
+ self._buf = io.StringIO()
30
+ self.begin = True
31
+
32
+ @property
33
+ def buffer(self) -> str:
34
+ return self._buf.getvalue()
35
+
36
+ def reset(self):
37
+ self._buf = io.StringIO()
38
+
39
+ def new(self):
40
+ self.reset()
41
+ self.begin = True
42
+
43
+ def to_buffer(self, text: str):
44
+ if text:
45
+ self._buf.write(text)
46
+
47
+ def handle_token(self, delta: str, ctx: CtxItem, flush: bool = True, buffer: bool = True) -> Tuple[str, None]:
48
+ if not delta:
49
+ return self.buffer, None
50
+ ctx.stream = delta
51
+ if flush:
52
+ self.bridge.on_step(ctx, self.begin)
53
+ if buffer:
54
+ self._buf.write(delta)
55
+ self.begin = False
56
+ return self.buffer, None