pygpt-net 2.6.59__py3-none-any.whl → 2.6.61__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. pygpt_net/CHANGELOG.txt +11 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/app.py +9 -5
  4. pygpt_net/controller/__init__.py +1 -0
  5. pygpt_net/controller/chat/common.py +115 -6
  6. pygpt_net/controller/chat/input.py +4 -1
  7. pygpt_net/controller/presets/editor.py +442 -39
  8. pygpt_net/controller/presets/presets.py +121 -6
  9. pygpt_net/controller/settings/editor.py +0 -15
  10. pygpt_net/controller/theme/markdown.py +2 -5
  11. pygpt_net/controller/ui/ui.py +4 -7
  12. pygpt_net/core/agents/custom/__init__.py +281 -0
  13. pygpt_net/core/agents/custom/debug.py +64 -0
  14. pygpt_net/core/agents/custom/factory.py +109 -0
  15. pygpt_net/core/agents/custom/graph.py +71 -0
  16. pygpt_net/core/agents/custom/llama_index/__init__.py +10 -0
  17. pygpt_net/core/agents/custom/llama_index/factory.py +100 -0
  18. pygpt_net/core/agents/custom/llama_index/router_streamer.py +106 -0
  19. pygpt_net/core/agents/custom/llama_index/runner.py +562 -0
  20. pygpt_net/core/agents/custom/llama_index/stream.py +56 -0
  21. pygpt_net/core/agents/custom/llama_index/utils.py +253 -0
  22. pygpt_net/core/agents/custom/logging.py +50 -0
  23. pygpt_net/core/agents/custom/memory.py +51 -0
  24. pygpt_net/core/agents/custom/router.py +155 -0
  25. pygpt_net/core/agents/custom/router_streamer.py +187 -0
  26. pygpt_net/core/agents/custom/runner.py +455 -0
  27. pygpt_net/core/agents/custom/schema.py +127 -0
  28. pygpt_net/core/agents/custom/utils.py +193 -0
  29. pygpt_net/core/agents/provider.py +72 -7
  30. pygpt_net/core/agents/runner.py +7 -4
  31. pygpt_net/core/agents/runners/helpers.py +1 -1
  32. pygpt_net/core/agents/runners/llama_workflow.py +3 -0
  33. pygpt_net/core/agents/runners/openai_workflow.py +8 -1
  34. pygpt_net/core/db/viewer.py +11 -5
  35. pygpt_net/{ui/widget/builder → core/node_editor}/__init__.py +2 -2
  36. pygpt_net/core/{builder → node_editor}/graph.py +28 -226
  37. pygpt_net/core/node_editor/models.py +118 -0
  38. pygpt_net/core/node_editor/types.py +78 -0
  39. pygpt_net/core/node_editor/utils.py +17 -0
  40. pygpt_net/core/presets/presets.py +216 -29
  41. pygpt_net/core/render/markdown/parser.py +0 -2
  42. pygpt_net/core/render/web/renderer.py +10 -8
  43. pygpt_net/data/config/config.json +5 -6
  44. pygpt_net/data/config/models.json +3 -3
  45. pygpt_net/data/config/settings.json +2 -38
  46. pygpt_net/data/locale/locale.de.ini +64 -1
  47. pygpt_net/data/locale/locale.en.ini +63 -4
  48. pygpt_net/data/locale/locale.es.ini +64 -1
  49. pygpt_net/data/locale/locale.fr.ini +64 -1
  50. pygpt_net/data/locale/locale.it.ini +64 -1
  51. pygpt_net/data/locale/locale.pl.ini +65 -2
  52. pygpt_net/data/locale/locale.uk.ini +64 -1
  53. pygpt_net/data/locale/locale.zh.ini +64 -1
  54. pygpt_net/data/locale/plugin.cmd_system.en.ini +62 -66
  55. pygpt_net/item/agent.py +5 -1
  56. pygpt_net/item/preset.py +19 -1
  57. pygpt_net/provider/agents/base.py +33 -2
  58. pygpt_net/provider/agents/llama_index/flow_from_schema.py +92 -0
  59. pygpt_net/provider/agents/openai/flow_from_schema.py +96 -0
  60. pygpt_net/provider/core/agent/json_file.py +11 -5
  61. pygpt_net/provider/core/config/patch.py +10 -1
  62. pygpt_net/provider/core/config/patches/patch_before_2_6_42.py +0 -6
  63. pygpt_net/tools/agent_builder/tool.py +233 -52
  64. pygpt_net/tools/agent_builder/ui/dialogs.py +172 -28
  65. pygpt_net/tools/agent_builder/ui/list.py +37 -10
  66. pygpt_net/ui/__init__.py +2 -4
  67. pygpt_net/ui/dialog/about.py +58 -38
  68. pygpt_net/ui/dialog/db.py +142 -3
  69. pygpt_net/ui/dialog/preset.py +62 -8
  70. pygpt_net/ui/layout/toolbox/presets.py +52 -16
  71. pygpt_net/ui/main.py +1 -1
  72. pygpt_net/ui/widget/dialog/db.py +0 -0
  73. pygpt_net/ui/widget/lists/preset.py +644 -60
  74. pygpt_net/{core/builder → ui/widget/node_editor}/__init__.py +2 -2
  75. pygpt_net/ui/widget/node_editor/command.py +373 -0
  76. pygpt_net/ui/widget/node_editor/config.py +157 -0
  77. pygpt_net/ui/widget/node_editor/editor.py +2070 -0
  78. pygpt_net/ui/widget/node_editor/item.py +493 -0
  79. pygpt_net/ui/widget/node_editor/node.py +1460 -0
  80. pygpt_net/ui/widget/node_editor/utils.py +17 -0
  81. pygpt_net/ui/widget/node_editor/view.py +364 -0
  82. pygpt_net/ui/widget/tabs/output.py +1 -1
  83. pygpt_net/ui/widget/textarea/input.py +2 -2
  84. pygpt_net/utils.py +114 -2
  85. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.61.dist-info}/METADATA +80 -93
  86. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.61.dist-info}/RECORD +88 -61
  87. pygpt_net/core/agents/custom.py +0 -150
  88. pygpt_net/ui/widget/builder/editor.py +0 -2001
  89. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.61.dist-info}/LICENSE +0 -0
  90. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.61.dist-info}/WHEEL +0 -0
  91. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.61.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,455 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.25 14:00:00 #
10
+ # ================================================== #
11
+
12
+ from __future__ import annotations
13
+ from dataclasses import dataclass
14
+ from typing import Any, Dict, List, Optional
15
+ from time import perf_counter
16
+
17
+ from agents import Runner, TResponseInputItem
18
+ from pygpt_net.core.agents.bridge import ConnectionContext
19
+ from pygpt_net.item.ctx import CtxItem
20
+ from pygpt_net.item.model import ModelItem
21
+ from pygpt_net.item.preset import PresetItem
22
+ from pygpt_net.provider.api.openai.agents.response import StreamHandler
23
+
24
+ from .logging import Logger, NullLogger
25
+ from .schema import FlowSchema, AgentNode, parse_schema
26
+ from .graph import FlowGraph, build_graph
27
+ from .memory import MemoryManager
28
+ from .factory import AgentFactory
29
+ from .router import parse_route_output
30
+ from .utils import (
31
+ sanitize_input_items,
32
+ extract_text_output,
33
+ patch_last_assistant_output,
34
+ OptionGetter,
35
+ resolve_node_runtime,
36
+ )
37
+ from .router_streamer import DelayedRouterStreamer, RealtimeRouterStreamer
38
+ from .debug import items_preview, ellipsize
39
+
40
+
41
+ @dataclass
42
+ class FlowResult:
43
+ ctx: CtxItem
44
+ final_output: str
45
+ last_response_id: Optional[str]
46
+
47
+
48
+ @dataclass
49
+ class DebugConfig:
50
+ log_runtime: bool = True
51
+ log_routes: bool = True
52
+ log_inputs: bool = False
53
+ log_outputs: bool = False
54
+ preview_chars: int = 280
55
+
56
+
57
+ class FlowOrchestrator:
58
+ """
59
+ Orchestrates dynamic multi-agent flow based on NodeEditor schema.
60
+ UI semantics follow "bot-to-bot" and supports router stream modes: off/delayed/realtime.
61
+ Memory/no-memory input policy:
62
+ - First agent (in the whole flow) gets full initial messages from the app.
63
+ - Next agent WITHOUT memory gets only last step's displayed content as a single 'user' message.
64
+ - Agent WITH memory:
65
+ * if memory has items -> use them;
66
+ * if memory empty -> seed input from last displayed content (or initial messages as fallback).
67
+ """
68
+ def __init__(self, window, logger: Optional[Logger] = None) -> None:
69
+ self.window = window
70
+ self.logger = logger or NullLogger()
71
+
72
+ async def run_flow(
73
+ self,
74
+ schema: List[Dict[str, Any]],
75
+ messages: List[TResponseInputItem],
76
+ ctx: CtxItem,
77
+ bridge: ConnectionContext,
78
+ agent_kwargs: Dict[str, Any],
79
+ preset: Optional[PresetItem],
80
+ model: ModelItem,
81
+ stream: bool,
82
+ use_partial_ctx: bool,
83
+ base_prompt: Optional[str],
84
+ allow_local_tools_default: bool,
85
+ allow_remote_tools_default: bool,
86
+ function_tools: List[dict],
87
+ trace_id: Optional[str],
88
+ max_iterations: int = 20,
89
+ router_stream_mode: str = "off", # "off" | "delayed" | "realtime"
90
+ option_get: Optional[OptionGetter] = None,
91
+ ) -> FlowResult:
92
+ fs: FlowSchema = parse_schema(schema)
93
+ g: FlowGraph = build_graph(fs)
94
+ mem = MemoryManager()
95
+ factory = AgentFactory(self.window, self.logger)
96
+ option_get = option_get or (lambda s, k, d=None: d)
97
+
98
+ # Debug config
99
+ dbg = DebugConfig(
100
+ log_runtime=bool(option_get("debug", "log_runtime", True)),
101
+ log_routes=bool(option_get("debug", "log_routes", True)),
102
+ log_inputs=bool(option_get("debug", "log_inputs", True)),
103
+ log_outputs=bool(option_get("debug", "log_outputs", True)),
104
+ preview_chars=int(option_get("debug", "preview_chars", 280)),
105
+ )
106
+
107
+ # Entry
108
+ if g.start_targets:
109
+ current_ids: List[str] = [g.start_targets[0]]
110
+ self.logger.info(f"Using explicit START -> {current_ids[0]}")
111
+ else:
112
+ default_agent = g.pick_default_start_agent()
113
+ if default_agent is None:
114
+ self.logger.error("No START and no agents in schema.")
115
+ return FlowResult(ctx=ctx, final_output="", last_response_id=None)
116
+ current_ids = [default_agent]
117
+ self.logger.info(f"No START found, using lowest-id agent: {default_agent}")
118
+
119
+ # State
120
+ final_output = ""
121
+ last_response_id: Optional[str] = None
122
+
123
+ # Initial messages for the very first agent in flow
124
+ initial_messages: List[TResponseInputItem] = sanitize_input_items(list(messages or []))
125
+ first_dispatch_done: bool = False
126
+ last_plain_output: str = "" # what was displayed to UI in the previous step
127
+
128
+ steps = 0
129
+
130
+ # Shared stream handler (bot-to-bot style)
131
+ handler = StreamHandler(self.window, bridge)
132
+ begin = True
133
+
134
+ while current_ids and (steps < max_iterations or max_iterations == 0) and not bridge.stopped():
135
+ step_start = perf_counter()
136
+ current_id = current_ids[0]
137
+ steps += 1
138
+
139
+ # END node
140
+ if current_id in fs.ends:
141
+ self.logger.info(f"Reached END node: {current_id}")
142
+ break
143
+
144
+ # Validate agent
145
+ if current_id not in fs.agents:
146
+ self.logger.warning(f"Next id {current_id} is not an agent; stopping or jumping to END.")
147
+ if g.end_nodes:
148
+ current_ids = [g.end_nodes[0]]
149
+ continue
150
+ break
151
+
152
+ node: AgentNode = fs.agents[current_id]
153
+ self.logger.debug(f"[step {steps}] agent_id={node.id} name={node.name} outs={node.outputs}")
154
+
155
+ # Resolve per-node runtime
156
+ node_rt = resolve_node_runtime(
157
+ window=self.window,
158
+ node=node,
159
+ option_get=option_get,
160
+ default_model=model,
161
+ base_prompt=base_prompt,
162
+ schema_allow_local=node.allow_local_tools,
163
+ schema_allow_remote=node.allow_remote_tools,
164
+ default_allow_local=allow_local_tools_default,
165
+ default_allow_remote=allow_remote_tools_default,
166
+ )
167
+
168
+ if dbg.log_runtime:
169
+ instr_preview = ellipsize(node_rt.instructions, dbg.preview_chars)
170
+ self.logger.debug(
171
+ f"[runtime] model={getattr(node_rt.model,'name',str(node_rt.model))} "
172
+ f"allow_local={node_rt.allow_local_tools} allow_remote={node_rt.allow_remote_tools} "
173
+ f"instructions='{instr_preview}'"
174
+ f" role='{node_rt.role}'"
175
+ )
176
+
177
+ # Memory selection and INPUT BUILD (memory-first policy)
178
+ mem_id = g.agent_to_memory.get(current_id)
179
+ mem_state = mem.get(mem_id) if mem_id else None
180
+ if dbg.log_runtime:
181
+ mem_info = f"{mem_id} (len={len(mem_state.items)})" if mem_state and mem_state.items else (mem_id or "-")
182
+ self.logger.debug(f"[memory] attached={bool(mem_id)} mem_id={mem_info}")
183
+
184
+ input_items: List[TResponseInputItem]
185
+ input_source = ""
186
+
187
+ if mem_state and not mem_state.is_empty():
188
+ # memory present and already has history
189
+ input_items = list(mem_state.items)
190
+ input_source = "memory:existing"
191
+ elif mem_state:
192
+ # memory present but empty -> seed from last output or initial messages
193
+ if last_plain_output and last_plain_output.strip():
194
+ input_items = [{"role": "user", "content": last_plain_output}]
195
+ input_source = "memory:seeded_from_last"
196
+ else:
197
+ input_items = list(initial_messages)
198
+ input_source = "memory:seeded_from_initial"
199
+ else:
200
+ # no memory -> first agent gets initial messages; others get only last output
201
+ if not first_dispatch_done:
202
+ input_items = list(initial_messages)
203
+ input_source = "no-mem:initial"
204
+ else:
205
+ if last_plain_output and last_plain_output.strip():
206
+ input_items = [{"role": "user", "content": last_plain_output}]
207
+ input_source = "no-mem:last_output"
208
+ else:
209
+ input_items = list(initial_messages)
210
+ input_source = "no-mem:fallback_initial"
211
+
212
+ prepared_items = sanitize_input_items(input_items)
213
+
214
+ if dbg.log_inputs:
215
+ self.logger.debug(f"[input] source={input_source} items={len(prepared_items)} "
216
+ f"preview={items_preview(prepared_items, dbg.preview_chars)}")
217
+
218
+ # Build agent with per-node runtime
219
+ built = factory.build(
220
+ node=node,
221
+ node_runtime=node_rt,
222
+ preset=preset,
223
+ function_tools=function_tools,
224
+ force_router=False, # auto on multi-output
225
+ friendly_map={aid: a.name or aid for aid, a in fs.agents.items()},
226
+ handoffs_enabled=True,
227
+ context=agent_kwargs.get("context"),
228
+ )
229
+ agent = built.instance
230
+ multi_output = built.multi_output
231
+ allowed_routes = built.allowed_routes
232
+
233
+ if dbg.log_runtime and multi_output:
234
+ self.logger.debug(f"[routing] multi_output=True routes={allowed_routes} mode={router_stream_mode}")
235
+
236
+ # Prepare run kwargs
237
+ run_kwargs: Dict[str, Any] = {
238
+ "input": prepared_items,
239
+ "max_turns": int(agent_kwargs.get("max_iterations", max_iterations)),
240
+ }
241
+ if trace_id:
242
+ run_kwargs["trace_id"] = trace_id
243
+
244
+ # Header for UI
245
+ title = f"\n\n**{built.name}**\n\n"
246
+ ctx.stream = title
247
+ bridge.on_step(ctx, begin)
248
+ begin = False
249
+ handler.begin = begin
250
+ if not use_partial_ctx:
251
+ handler.to_buffer(title)
252
+
253
+ display_text = "" # what we show to UI for this step
254
+ next_id: Optional[str] = None
255
+
256
+ # --- EXECUTION ---
257
+ if stream and not multi_output:
258
+ # Full token streaming (single-output agent)
259
+ result = Runner.run_streamed(agent, **run_kwargs)
260
+ handler.reset()
261
+
262
+ async for event in result.stream_events():
263
+ if bridge.stopped():
264
+ result.cancel()
265
+ bridge.on_stop(ctx)
266
+ break
267
+ display_text, last_response_id = handler.handle(event, ctx)
268
+
269
+ # Prepare next inputs from result for memory update (if any)
270
+ input_items_next = result.to_input_list()
271
+ input_items_next = sanitize_input_items(input_items_next)
272
+
273
+ # Update memory only if attached
274
+ if mem_state:
275
+ mem_state.update_from_result(input_items_next, last_response_id)
276
+
277
+ # Route: first edge or END
278
+ outs = g.get_next(current_id)
279
+ next_id = outs[0] if outs else g.first_connected_end(current_id)
280
+
281
+ else:
282
+ # Multi-output or non-stream
283
+ if multi_output:
284
+ mode = (router_stream_mode or "off").lower()
285
+ if stream and mode == "realtime":
286
+ # Realtime router streaming: stream content as tokens arrive
287
+ result = Runner.run_streamed(agent, **run_kwargs)
288
+ rts = RealtimeRouterStreamer(
289
+ window=self.window,
290
+ bridge=bridge,
291
+ handler=handler if not use_partial_ctx else None,
292
+ buffer_to_handler=(not use_partial_ctx),
293
+ logger=self.logger,
294
+ )
295
+ rts.reset()
296
+
297
+ async for event in result.stream_events():
298
+ if bridge.stopped():
299
+ result.cancel()
300
+ bridge.on_stop(ctx)
301
+ break
302
+ rts.handle_event(event, ctx)
303
+ if rts.last_response_id:
304
+ last_response_id = rts.last_response_id
305
+
306
+ raw_text = rts.buffer or ""
307
+ decision = parse_route_output(raw_text, allowed_routes)
308
+ display_text = decision.content or ""
309
+
310
+ # Prepare next inputs from streamed result, patch assistant content -> content
311
+ input_items_next = result.to_input_list()
312
+ input_items_next = patch_last_assistant_output(input_items_next, decision.content or "")
313
+
314
+ # Update memory if attached
315
+ if mem_state:
316
+ mem_state.update_from_result(input_items_next, last_response_id)
317
+
318
+ # Route decision
319
+ if decision.valid:
320
+ next_id = decision.route
321
+ else:
322
+ if dbg.log_routes:
323
+ self.logger.warning("[router-realtime] Invalid JSON; fallback to first route.")
324
+ next_id = allowed_routes[0] if allowed_routes else None
325
+
326
+ elif stream and mode == "delayed":
327
+ # Delayed router streaming: collect tokens silently, reveal once
328
+ result = Runner.run_streamed(agent, **run_kwargs)
329
+ delayed = DelayedRouterStreamer(self.window, bridge)
330
+ delayed.reset()
331
+
332
+ async for event in result.stream_events():
333
+ if bridge.stopped():
334
+ result.cancel()
335
+ bridge.on_stop(ctx)
336
+ break
337
+ _, rid = delayed.handle_event(event, ctx)
338
+ if rid:
339
+ last_response_id = rid
340
+
341
+ raw_text = delayed.buffer or ""
342
+ decision = parse_route_output(raw_text, allowed_routes)
343
+ display_text = decision.content or ""
344
+ if display_text:
345
+ ctx.stream = display_text
346
+ bridge.on_step(ctx, False)
347
+ if not use_partial_ctx:
348
+ handler.to_buffer(display_text)
349
+
350
+ input_items_next = result.to_input_list()
351
+ input_items_next = patch_last_assistant_output(input_items_next, display_text)
352
+
353
+ if mem_state:
354
+ mem_state.update_from_result(input_items_next, last_response_id)
355
+
356
+ if decision.valid:
357
+ next_id = decision.route
358
+ else:
359
+ if dbg.log_routes:
360
+ self.logger.warning(f"[router-delayed] Invalid JSON: {decision.error}; fallback first route.")
361
+ next_id = allowed_routes[0] if allowed_routes else None
362
+ else:
363
+ # No streaming for router: run to completion, then emit only content
364
+ result = await Runner.run(agent, **run_kwargs)
365
+ last_response_id = getattr(result, "last_response_id", None)
366
+ raw_text = extract_text_output(result)
367
+ decision = parse_route_output(raw_text, allowed_routes)
368
+ display_text = decision.content or ""
369
+ if display_text:
370
+ ctx.stream = display_text
371
+ bridge.on_step(ctx, False)
372
+ if not use_partial_ctx:
373
+ handler.to_buffer(display_text)
374
+
375
+ input_items_next = result.to_input_list()
376
+ input_items_next = patch_last_assistant_output(input_items_next, display_text)
377
+
378
+ if mem_state:
379
+ mem_state.update_from_result(input_items_next, last_response_id)
380
+
381
+ if decision.valid:
382
+ next_id = decision.route
383
+ else:
384
+ if dbg.log_routes:
385
+ self.logger.warning(f"[router-off] Invalid JSON: {decision.error}; fallback first route.")
386
+ next_id = allowed_routes[0] if allowed_routes else None
387
+ else:
388
+ # Single-output, non-stream path
389
+ result = await Runner.run(agent, **run_kwargs)
390
+ last_response_id = getattr(result, "last_response_id", None)
391
+ raw_text = extract_text_output(result)
392
+ display_text = raw_text or ""
393
+ if display_text:
394
+ ctx.stream = display_text
395
+ bridge.on_step(ctx, False)
396
+ if not use_partial_ctx:
397
+ handler.to_buffer(display_text)
398
+
399
+ input_items_next = result.to_input_list()
400
+ input_items_next = sanitize_input_items(input_items_next)
401
+
402
+ if mem_state:
403
+ mem_state.update_from_result(input_items_next, last_response_id)
404
+
405
+ outs = g.get_next(current_id)
406
+ next_id = outs[0] if outs else g.first_connected_end(current_id)
407
+
408
+ # DEBUG: output + route
409
+ if dbg.log_outputs:
410
+ self.logger.debug(f"[output] preview='{ellipsize(display_text, dbg.preview_chars)}' "
411
+ f"last_response_id={last_response_id}")
412
+
413
+ if dbg.log_routes:
414
+ self.logger.debug(f"[route] current={current_id} -> next={next_id} "
415
+ f"(end_connected={g.first_connected_end(current_id)})")
416
+
417
+ # Mark dispatch and remember last plain output (for next no-memory agent)
418
+ first_dispatch_done = True
419
+ last_plain_output = display_text
420
+ final_output = display_text
421
+
422
+ # Resolve next id / END
423
+ if isinstance(next_id, str) and next_id.lower() == "end":
424
+ end_id = g.first_connected_end(current_id) or (g.end_nodes[0] if g.end_nodes else None)
425
+ current_ids = [end_id] if end_id else []
426
+ elif next_id:
427
+ current_ids = [next_id]
428
+ else:
429
+ end_id = g.first_connected_end(current_id)
430
+ current_ids = [end_id] if end_id else []
431
+
432
+ # UI separation after each agent step
433
+ is_end = True if not current_ids else (current_ids[0] in fs.ends)
434
+ if use_partial_ctx:
435
+ ctx = bridge.on_next_ctx(
436
+ ctx=ctx,
437
+ input="",
438
+ output=display_text,
439
+ response_id=last_response_id or "",
440
+ finish=is_end,
441
+ stream=True,
442
+ )
443
+ handler.new()
444
+ else:
445
+ bridge.on_next(ctx)
446
+
447
+ # Step duration
448
+ dur = perf_counter() - step_start
449
+ self.logger.debug(f"[step {steps}] duration={dur:.3f}s")
450
+
451
+ if bridge.stopped():
452
+ bridge.on_stop(ctx)
453
+
454
+ self.logger.info(f"Flow finished. steps={steps} final_len={len(final_output)}")
455
+ return FlowResult(ctx=ctx, final_output=final_output, last_response_id=last_response_id)
@@ -0,0 +1,127 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.25 14:00:00 #
10
+ # ================================================== #
11
+
12
+ from __future__ import annotations
13
+ from dataclasses import dataclass, field
14
+ from typing import Dict, List, Optional, Any
15
+
16
+
17
+ @dataclass
18
+ class BaseNode:
19
+ id: str
20
+ type: str
21
+ slots: Dict[str, Any] = field(default_factory=dict)
22
+
23
+
24
+ @dataclass
25
+ class AgentNode(BaseNode):
26
+ name: str = ""
27
+ instruction: str = ""
28
+ allow_remote_tools: bool = True
29
+ allow_local_tools: bool = True
30
+ outputs: List[str] = field(default_factory=list)
31
+ inputs: List[str] = field(default_factory=list)
32
+ memory_out: Optional[str] = None # single mem by spec
33
+ memory_in: List[str] = field(default_factory=list) # not used, but kept for completeness
34
+ role: str = "" # Optional short description of agent's purpose
35
+
36
+
37
+ @dataclass
38
+ class StartNode(BaseNode):
39
+ outputs: List[str] = field(default_factory=list)
40
+
41
+
42
+ @dataclass
43
+ class EndNode(BaseNode):
44
+ inputs: List[str] = field(default_factory=list)
45
+
46
+
47
+ @dataclass
48
+ class MemoryNode(BaseNode):
49
+ name: str = ""
50
+ agents: List[str] = field(default_factory=list) # agents connected to this memory
51
+
52
+
53
+ @dataclass
54
+ class FlowSchema:
55
+ agents: Dict[str, AgentNode] = field(default_factory=dict)
56
+ memories: Dict[str, MemoryNode] = field(default_factory=dict)
57
+ starts: Dict[str, StartNode] = field(default_factory=dict)
58
+ ends: Dict[str, EndNode] = field(default_factory=dict)
59
+
60
+
61
+ def _safe_get(d: Dict[str, Any], *keys, default=None):
62
+ cur = d
63
+ for k in keys:
64
+ if not isinstance(cur, dict):
65
+ return default
66
+ if k not in cur:
67
+ return default
68
+ cur = cur[k]
69
+ return cur
70
+
71
+
72
+ def parse_schema(schema: List[Dict[str, Any]]) -> FlowSchema:
73
+ """
74
+ Parse NodeEditor-exported schema list into FlowSchema.
75
+ """
76
+ fs = FlowSchema()
77
+ for raw in schema:
78
+ ntype = raw.get("type")
79
+ nid = raw.get("id")
80
+ slots = raw.get("slots", {}) or {}
81
+
82
+ if ntype == "agent":
83
+ node = AgentNode(
84
+ id=nid,
85
+ type=ntype,
86
+ slots=slots,
87
+ name=_safe_get(slots, "name", default=""),
88
+ instruction=_safe_get(slots, "instruction", default=""),
89
+ allow_remote_tools=bool(_safe_get(slots, "remote_tools", default=True)),
90
+ allow_local_tools=bool(_safe_get(slots, "local_tools", default=True)),
91
+ outputs=list(_safe_get(slots, "output", "out", default=[])) or [],
92
+ inputs=list(_safe_get(slots, "input", "in", default=[])) or [],
93
+ memory_out=(_safe_get(slots, "memory", "out", default=[None]) or [None])[0],
94
+ memory_in=list(_safe_get(slots, "memory", "in", default=[])) or [],
95
+ role=_safe_get(slots, "role", default="") or "",
96
+ )
97
+ fs.agents[nid] = node
98
+
99
+ elif ntype == "start":
100
+ node = StartNode(
101
+ id=nid,
102
+ type=ntype,
103
+ slots=slots,
104
+ outputs=list(_safe_get(slots, "output", "out", default=[])) or [],
105
+ )
106
+ fs.starts[nid] = node
107
+
108
+ elif ntype == "end":
109
+ node = EndNode(
110
+ id=nid,
111
+ type=ntype,
112
+ slots=slots,
113
+ inputs=list(_safe_get(slots, "input", "in", default=[])) or [],
114
+ )
115
+ fs.ends[nid] = node
116
+
117
+ elif ntype == "memory":
118
+ node = MemoryNode(
119
+ id=nid,
120
+ type=ntype,
121
+ slots=slots,
122
+ name=_safe_get(slots, "name", default=""),
123
+ agents=list(_safe_get(slots, "input", "in", default=[])) or [],
124
+ )
125
+ fs.memories[nid] = node
126
+
127
+ return fs