pygpt-net 2.6.59__py3-none-any.whl → 2.6.60__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. pygpt_net/CHANGELOG.txt +4 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/app.py +9 -5
  4. pygpt_net/controller/__init__.py +1 -0
  5. pygpt_net/controller/presets/editor.py +442 -39
  6. pygpt_net/core/agents/custom/__init__.py +275 -0
  7. pygpt_net/core/agents/custom/debug.py +64 -0
  8. pygpt_net/core/agents/custom/factory.py +109 -0
  9. pygpt_net/core/agents/custom/graph.py +71 -0
  10. pygpt_net/core/agents/custom/llama_index/__init__.py +10 -0
  11. pygpt_net/core/agents/custom/llama_index/factory.py +89 -0
  12. pygpt_net/core/agents/custom/llama_index/router_streamer.py +106 -0
  13. pygpt_net/core/agents/custom/llama_index/runner.py +529 -0
  14. pygpt_net/core/agents/custom/llama_index/stream.py +56 -0
  15. pygpt_net/core/agents/custom/llama_index/utils.py +242 -0
  16. pygpt_net/core/agents/custom/logging.py +50 -0
  17. pygpt_net/core/agents/custom/memory.py +51 -0
  18. pygpt_net/core/agents/custom/router.py +116 -0
  19. pygpt_net/core/agents/custom/router_streamer.py +187 -0
  20. pygpt_net/core/agents/custom/runner.py +454 -0
  21. pygpt_net/core/agents/custom/schema.py +125 -0
  22. pygpt_net/core/agents/custom/utils.py +181 -0
  23. pygpt_net/core/agents/provider.py +72 -7
  24. pygpt_net/core/agents/runner.py +7 -4
  25. pygpt_net/core/agents/runners/helpers.py +1 -1
  26. pygpt_net/core/agents/runners/llama_workflow.py +3 -0
  27. pygpt_net/core/agents/runners/openai_workflow.py +8 -1
  28. pygpt_net/{ui/widget/builder → core/node_editor}/__init__.py +2 -2
  29. pygpt_net/core/{builder → node_editor}/graph.py +11 -218
  30. pygpt_net/core/node_editor/models.py +111 -0
  31. pygpt_net/core/node_editor/types.py +76 -0
  32. pygpt_net/core/node_editor/utils.py +17 -0
  33. pygpt_net/core/render/web/renderer.py +10 -8
  34. pygpt_net/data/config/config.json +3 -3
  35. pygpt_net/data/config/models.json +3 -3
  36. pygpt_net/data/locale/locale.en.ini +4 -4
  37. pygpt_net/item/agent.py +5 -1
  38. pygpt_net/item/preset.py +19 -1
  39. pygpt_net/provider/agents/base.py +33 -2
  40. pygpt_net/provider/agents/llama_index/flow_from_schema.py +92 -0
  41. pygpt_net/provider/agents/openai/flow_from_schema.py +96 -0
  42. pygpt_net/provider/core/agent/json_file.py +11 -5
  43. pygpt_net/tools/agent_builder/tool.py +217 -52
  44. pygpt_net/tools/agent_builder/ui/dialogs.py +119 -24
  45. pygpt_net/tools/agent_builder/ui/list.py +37 -10
  46. pygpt_net/ui/dialog/preset.py +16 -1
  47. pygpt_net/ui/main.py +1 -1
  48. pygpt_net/{core/builder → ui/widget/node_editor}/__init__.py +2 -2
  49. pygpt_net/ui/widget/node_editor/command.py +373 -0
  50. pygpt_net/ui/widget/node_editor/editor.py +2038 -0
  51. pygpt_net/ui/widget/node_editor/item.py +492 -0
  52. pygpt_net/ui/widget/node_editor/node.py +1205 -0
  53. pygpt_net/ui/widget/node_editor/utils.py +17 -0
  54. pygpt_net/ui/widget/node_editor/view.py +247 -0
  55. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.60.dist-info}/METADATA +72 -2
  56. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.60.dist-info}/RECORD +59 -33
  57. pygpt_net/core/agents/custom.py +0 -150
  58. pygpt_net/ui/widget/builder/editor.py +0 -2001
  59. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.60.dist-info}/LICENSE +0 -0
  60. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.60.dist-info}/WHEEL +0 -0
  61. {pygpt_net-2.6.59.dist-info → pygpt_net-2.6.60.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,454 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.24 23:00:00 #
10
+ # ================================================== #
11
+
12
+ from __future__ import annotations
13
+ from dataclasses import dataclass
14
+ from typing import Any, Dict, List, Optional
15
+ from time import perf_counter
16
+
17
+ from agents import Runner, TResponseInputItem
18
+ from pygpt_net.core.agents.bridge import ConnectionContext
19
+ from pygpt_net.item.ctx import CtxItem
20
+ from pygpt_net.item.model import ModelItem
21
+ from pygpt_net.item.preset import PresetItem
22
+ from pygpt_net.provider.api.openai.agents.response import StreamHandler
23
+
24
+ from .logging import Logger, NullLogger
25
+ from .schema import FlowSchema, AgentNode, parse_schema
26
+ from .graph import FlowGraph, build_graph
27
+ from .memory import MemoryManager
28
+ from .factory import AgentFactory
29
+ from .router import parse_route_output
30
+ from .utils import (
31
+ sanitize_input_items,
32
+ extract_text_output,
33
+ patch_last_assistant_output,
34
+ OptionGetter,
35
+ resolve_node_runtime,
36
+ )
37
+ from .router_streamer import DelayedRouterStreamer, RealtimeRouterStreamer
38
+ from .debug import items_preview, ellipsize
39
+
40
+
41
+ @dataclass
42
+ class FlowResult:
43
+ ctx: CtxItem
44
+ final_output: str
45
+ last_response_id: Optional[str]
46
+
47
+
48
+ @dataclass
49
+ class DebugConfig:
50
+ log_runtime: bool = True
51
+ log_routes: bool = True
52
+ log_inputs: bool = False
53
+ log_outputs: bool = False
54
+ preview_chars: int = 280
55
+
56
+
57
+ class FlowOrchestrator:
58
+ """
59
+ Orchestrates dynamic multi-agent flow based on NodeEditor schema.
60
+ UI semantics follow "bot-to-bot" and supports router stream modes: off/delayed/realtime.
61
+ Memory/no-memory input policy:
62
+ - First agent (in the whole flow) gets full initial messages from the app.
63
+ - Next agent WITHOUT memory gets only last step's displayed content as a single 'user' message.
64
+ - Agent WITH memory:
65
+ * if memory has items -> use them;
66
+ * if memory empty -> seed input from last displayed content (or initial messages as fallback).
67
+ """
68
+ def __init__(self, window, logger: Optional[Logger] = None) -> None:
69
+ self.window = window
70
+ self.logger = logger or NullLogger()
71
+
72
+ async def run_flow(
73
+ self,
74
+ schema: List[Dict[str, Any]],
75
+ messages: List[TResponseInputItem],
76
+ ctx: CtxItem,
77
+ bridge: ConnectionContext,
78
+ agent_kwargs: Dict[str, Any],
79
+ preset: Optional[PresetItem],
80
+ model: ModelItem,
81
+ stream: bool,
82
+ use_partial_ctx: bool,
83
+ base_prompt: Optional[str],
84
+ allow_local_tools_default: bool,
85
+ allow_remote_tools_default: bool,
86
+ function_tools: List[dict],
87
+ trace_id: Optional[str],
88
+ max_iterations: int = 20,
89
+ router_stream_mode: str = "off", # "off" | "delayed" | "realtime"
90
+ option_get: Optional[OptionGetter] = None,
91
+ ) -> FlowResult:
92
+ fs: FlowSchema = parse_schema(schema)
93
+ g: FlowGraph = build_graph(fs)
94
+ mem = MemoryManager()
95
+ factory = AgentFactory(self.window, self.logger)
96
+ option_get = option_get or (lambda s, k, d=None: d)
97
+
98
+ # Debug config
99
+ dbg = DebugConfig(
100
+ log_runtime=bool(option_get("debug", "log_runtime", True)),
101
+ log_routes=bool(option_get("debug", "log_routes", True)),
102
+ log_inputs=bool(option_get("debug", "log_inputs", True)),
103
+ log_outputs=bool(option_get("debug", "log_outputs", True)),
104
+ preview_chars=int(option_get("debug", "preview_chars", 280)),
105
+ )
106
+
107
+ # Entry
108
+ if g.start_targets:
109
+ current_ids: List[str] = [g.start_targets[0]]
110
+ self.logger.info(f"Using explicit START -> {current_ids[0]}")
111
+ else:
112
+ default_agent = g.pick_default_start_agent()
113
+ if default_agent is None:
114
+ self.logger.error("No START and no agents in schema.")
115
+ return FlowResult(ctx=ctx, final_output="", last_response_id=None)
116
+ current_ids = [default_agent]
117
+ self.logger.info(f"No START found, using lowest-id agent: {default_agent}")
118
+
119
+ # State
120
+ final_output = ""
121
+ last_response_id: Optional[str] = None
122
+
123
+ # Initial messages for the very first agent in flow
124
+ initial_messages: List[TResponseInputItem] = sanitize_input_items(list(messages or []))
125
+ first_dispatch_done: bool = False
126
+ last_plain_output: str = "" # what was displayed to UI in the previous step
127
+
128
+ steps = 0
129
+
130
+ # Shared stream handler (bot-to-bot style)
131
+ handler = StreamHandler(self.window, bridge)
132
+ begin = True
133
+
134
+ while current_ids and (steps < max_iterations or max_iterations == 0) and not bridge.stopped():
135
+ step_start = perf_counter()
136
+ current_id = current_ids[0]
137
+ steps += 1
138
+
139
+ # END node
140
+ if current_id in fs.ends:
141
+ self.logger.info(f"Reached END node: {current_id}")
142
+ break
143
+
144
+ # Validate agent
145
+ if current_id not in fs.agents:
146
+ self.logger.warning(f"Next id {current_id} is not an agent; stopping or jumping to END.")
147
+ if g.end_nodes:
148
+ current_ids = [g.end_nodes[0]]
149
+ continue
150
+ break
151
+
152
+ node: AgentNode = fs.agents[current_id]
153
+ self.logger.debug(f"[step {steps}] agent_id={node.id} name={node.name} outs={node.outputs}")
154
+
155
+ # Resolve per-node runtime
156
+ node_rt = resolve_node_runtime(
157
+ window=self.window,
158
+ node=node,
159
+ option_get=option_get,
160
+ default_model=model,
161
+ base_prompt=base_prompt,
162
+ schema_allow_local=node.allow_local_tools,
163
+ schema_allow_remote=node.allow_remote_tools,
164
+ default_allow_local=allow_local_tools_default,
165
+ default_allow_remote=allow_remote_tools_default,
166
+ )
167
+
168
+ if dbg.log_runtime:
169
+ instr_preview = ellipsize(node_rt.instructions, dbg.preview_chars)
170
+ self.logger.debug(
171
+ f"[runtime] model={getattr(node_rt.model,'name',str(node_rt.model))} "
172
+ f"allow_local={node_rt.allow_local_tools} allow_remote={node_rt.allow_remote_tools} "
173
+ f"instructions='{instr_preview}'"
174
+ )
175
+
176
+ # Memory selection and INPUT BUILD (memory-first policy)
177
+ mem_id = g.agent_to_memory.get(current_id)
178
+ mem_state = mem.get(mem_id) if mem_id else None
179
+ if dbg.log_runtime:
180
+ mem_info = f"{mem_id} (len={len(mem_state.items)})" if mem_state and mem_state.items else (mem_id or "-")
181
+ self.logger.debug(f"[memory] attached={bool(mem_id)} mem_id={mem_info}")
182
+
183
+ input_items: List[TResponseInputItem]
184
+ input_source = ""
185
+
186
+ if mem_state and not mem_state.is_empty():
187
+ # memory present and already has history
188
+ input_items = list(mem_state.items)
189
+ input_source = "memory:existing"
190
+ elif mem_state:
191
+ # memory present but empty -> seed from last output or initial messages
192
+ if last_plain_output and last_plain_output.strip():
193
+ input_items = [{"role": "user", "content": last_plain_output}]
194
+ input_source = "memory:seeded_from_last"
195
+ else:
196
+ input_items = list(initial_messages)
197
+ input_source = "memory:seeded_from_initial"
198
+ else:
199
+ # no memory -> first agent gets initial messages; others get only last output
200
+ if not first_dispatch_done:
201
+ input_items = list(initial_messages)
202
+ input_source = "no-mem:initial"
203
+ else:
204
+ if last_plain_output and last_plain_output.strip():
205
+ input_items = [{"role": "user", "content": last_plain_output}]
206
+ input_source = "no-mem:last_output"
207
+ else:
208
+ input_items = list(initial_messages)
209
+ input_source = "no-mem:fallback_initial"
210
+
211
+ prepared_items = sanitize_input_items(input_items)
212
+
213
+ if dbg.log_inputs:
214
+ self.logger.debug(f"[input] source={input_source} items={len(prepared_items)} "
215
+ f"preview={items_preview(prepared_items, dbg.preview_chars)}")
216
+
217
+ # Build agent with per-node runtime
218
+ built = factory.build(
219
+ node=node,
220
+ node_runtime=node_rt,
221
+ preset=preset,
222
+ function_tools=function_tools,
223
+ force_router=False, # auto on multi-output
224
+ friendly_map={aid: a.name or aid for aid, a in fs.agents.items()},
225
+ handoffs_enabled=True,
226
+ context=agent_kwargs.get("context"),
227
+ )
228
+ agent = built.instance
229
+ multi_output = built.multi_output
230
+ allowed_routes = built.allowed_routes
231
+
232
+ if dbg.log_runtime and multi_output:
233
+ self.logger.debug(f"[routing] multi_output=True routes={allowed_routes} mode={router_stream_mode}")
234
+
235
+ # Prepare run kwargs
236
+ run_kwargs: Dict[str, Any] = {
237
+ "input": prepared_items,
238
+ "max_turns": int(agent_kwargs.get("max_iterations", max_iterations)),
239
+ }
240
+ if trace_id:
241
+ run_kwargs["trace_id"] = trace_id
242
+
243
+ # Header for UI
244
+ title = f"\n\n**{built.name}**\n\n"
245
+ ctx.stream = title
246
+ bridge.on_step(ctx, begin)
247
+ begin = False
248
+ handler.begin = begin
249
+ if not use_partial_ctx:
250
+ handler.to_buffer(title)
251
+
252
+ display_text = "" # what we show to UI for this step
253
+ next_id: Optional[str] = None
254
+
255
+ # --- EXECUTION ---
256
+ if stream and not multi_output:
257
+ # Full token streaming (single-output agent)
258
+ result = Runner.run_streamed(agent, **run_kwargs)
259
+ handler.reset()
260
+
261
+ async for event in result.stream_events():
262
+ if bridge.stopped():
263
+ result.cancel()
264
+ bridge.on_stop(ctx)
265
+ break
266
+ display_text, last_response_id = handler.handle(event, ctx)
267
+
268
+ # Prepare next inputs from result for memory update (if any)
269
+ input_items_next = result.to_input_list()
270
+ input_items_next = sanitize_input_items(input_items_next)
271
+
272
+ # Update memory only if attached
273
+ if mem_state:
274
+ mem_state.update_from_result(input_items_next, last_response_id)
275
+
276
+ # Route: first edge or END
277
+ outs = g.get_next(current_id)
278
+ next_id = outs[0] if outs else g.first_connected_end(current_id)
279
+
280
+ else:
281
+ # Multi-output or non-stream
282
+ if multi_output:
283
+ mode = (router_stream_mode or "off").lower()
284
+ if stream and mode == "realtime":
285
+ # Realtime router streaming: stream content as tokens arrive
286
+ result = Runner.run_streamed(agent, **run_kwargs)
287
+ rts = RealtimeRouterStreamer(
288
+ window=self.window,
289
+ bridge=bridge,
290
+ handler=handler if not use_partial_ctx else None,
291
+ buffer_to_handler=(not use_partial_ctx),
292
+ logger=self.logger,
293
+ )
294
+ rts.reset()
295
+
296
+ async for event in result.stream_events():
297
+ if bridge.stopped():
298
+ result.cancel()
299
+ bridge.on_stop(ctx)
300
+ break
301
+ rts.handle_event(event, ctx)
302
+ if rts.last_response_id:
303
+ last_response_id = rts.last_response_id
304
+
305
+ raw_text = rts.buffer or ""
306
+ decision = parse_route_output(raw_text, allowed_routes)
307
+ display_text = decision.content or ""
308
+
309
+ # Prepare next inputs from streamed result, patch assistant content -> content
310
+ input_items_next = result.to_input_list()
311
+ input_items_next = patch_last_assistant_output(input_items_next, decision.content or "")
312
+
313
+ # Update memory if attached
314
+ if mem_state:
315
+ mem_state.update_from_result(input_items_next, last_response_id)
316
+
317
+ # Route decision
318
+ if decision.valid:
319
+ next_id = decision.route
320
+ else:
321
+ if dbg.log_routes:
322
+ self.logger.warning("[router-realtime] Invalid JSON; fallback to first route.")
323
+ next_id = allowed_routes[0] if allowed_routes else None
324
+
325
+ elif stream and mode == "delayed":
326
+ # Delayed router streaming: collect tokens silently, reveal once
327
+ result = Runner.run_streamed(agent, **run_kwargs)
328
+ delayed = DelayedRouterStreamer(self.window, bridge)
329
+ delayed.reset()
330
+
331
+ async for event in result.stream_events():
332
+ if bridge.stopped():
333
+ result.cancel()
334
+ bridge.on_stop(ctx)
335
+ break
336
+ _, rid = delayed.handle_event(event, ctx)
337
+ if rid:
338
+ last_response_id = rid
339
+
340
+ raw_text = delayed.buffer or ""
341
+ decision = parse_route_output(raw_text, allowed_routes)
342
+ display_text = decision.content or ""
343
+ if display_text:
344
+ ctx.stream = display_text
345
+ bridge.on_step(ctx, False)
346
+ if not use_partial_ctx:
347
+ handler.to_buffer(display_text)
348
+
349
+ input_items_next = result.to_input_list()
350
+ input_items_next = patch_last_assistant_output(input_items_next, display_text)
351
+
352
+ if mem_state:
353
+ mem_state.update_from_result(input_items_next, last_response_id)
354
+
355
+ if decision.valid:
356
+ next_id = decision.route
357
+ else:
358
+ if dbg.log_routes:
359
+ self.logger.warning(f"[router-delayed] Invalid JSON: {decision.error}; fallback first route.")
360
+ next_id = allowed_routes[0] if allowed_routes else None
361
+ else:
362
+ # No streaming for router: run to completion, then emit only content
363
+ result = await Runner.run(agent, **run_kwargs)
364
+ last_response_id = getattr(result, "last_response_id", None)
365
+ raw_text = extract_text_output(result)
366
+ decision = parse_route_output(raw_text, allowed_routes)
367
+ display_text = decision.content or ""
368
+ if display_text:
369
+ ctx.stream = display_text
370
+ bridge.on_step(ctx, False)
371
+ if not use_partial_ctx:
372
+ handler.to_buffer(display_text)
373
+
374
+ input_items_next = result.to_input_list()
375
+ input_items_next = patch_last_assistant_output(input_items_next, display_text)
376
+
377
+ if mem_state:
378
+ mem_state.update_from_result(input_items_next, last_response_id)
379
+
380
+ if decision.valid:
381
+ next_id = decision.route
382
+ else:
383
+ if dbg.log_routes:
384
+ self.logger.warning(f"[router-off] Invalid JSON: {decision.error}; fallback first route.")
385
+ next_id = allowed_routes[0] if allowed_routes else None
386
+ else:
387
+ # Single-output, non-stream path
388
+ result = await Runner.run(agent, **run_kwargs)
389
+ last_response_id = getattr(result, "last_response_id", None)
390
+ raw_text = extract_text_output(result)
391
+ display_text = raw_text or ""
392
+ if display_text:
393
+ ctx.stream = display_text
394
+ bridge.on_step(ctx, False)
395
+ if not use_partial_ctx:
396
+ handler.to_buffer(display_text)
397
+
398
+ input_items_next = result.to_input_list()
399
+ input_items_next = sanitize_input_items(input_items_next)
400
+
401
+ if mem_state:
402
+ mem_state.update_from_result(input_items_next, last_response_id)
403
+
404
+ outs = g.get_next(current_id)
405
+ next_id = outs[0] if outs else g.first_connected_end(current_id)
406
+
407
+ # DEBUG: output + route
408
+ if dbg.log_outputs:
409
+ self.logger.debug(f"[output] preview='{ellipsize(display_text, dbg.preview_chars)}' "
410
+ f"last_response_id={last_response_id}")
411
+
412
+ if dbg.log_routes:
413
+ self.logger.debug(f"[route] current={current_id} -> next={next_id} "
414
+ f"(end_connected={g.first_connected_end(current_id)})")
415
+
416
+ # Mark dispatch and remember last plain output (for next no-memory agent)
417
+ first_dispatch_done = True
418
+ last_plain_output = display_text
419
+ final_output = display_text
420
+
421
+ # Resolve next id / END
422
+ if isinstance(next_id, str) and next_id.lower() == "end":
423
+ end_id = g.first_connected_end(current_id) or (g.end_nodes[0] if g.end_nodes else None)
424
+ current_ids = [end_id] if end_id else []
425
+ elif next_id:
426
+ current_ids = [next_id]
427
+ else:
428
+ end_id = g.first_connected_end(current_id)
429
+ current_ids = [end_id] if end_id else []
430
+
431
+ # UI separation after each agent step
432
+ is_end = True if not current_ids else (current_ids[0] in fs.ends)
433
+ if use_partial_ctx:
434
+ ctx = bridge.on_next_ctx(
435
+ ctx=ctx,
436
+ input="",
437
+ output=display_text,
438
+ response_id=last_response_id or "",
439
+ finish=is_end,
440
+ stream=True,
441
+ )
442
+ handler.new()
443
+ else:
444
+ bridge.on_next(ctx)
445
+
446
+ # Step duration
447
+ dur = perf_counter() - step_start
448
+ self.logger.debug(f"[step {steps}] duration={dur:.3f}s")
449
+
450
+ if bridge.stopped():
451
+ bridge.on_stop(ctx)
452
+
453
+ self.logger.info(f"Flow finished. steps={steps} final_len={len(final_output)}")
454
+ return FlowResult(ctx=ctx, final_output=final_output, last_response_id=last_response_id)
@@ -0,0 +1,125 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.24 23:00:00 #
10
+ # ================================================== #
11
+
12
+ from __future__ import annotations
13
+ from dataclasses import dataclass, field
14
+ from typing import Dict, List, Optional, Any
15
+
16
+
17
+ @dataclass
18
+ class BaseNode:
19
+ id: str
20
+ type: str
21
+ slots: Dict[str, Any] = field(default_factory=dict)
22
+
23
+
24
+ @dataclass
25
+ class AgentNode(BaseNode):
26
+ name: str = ""
27
+ instruction: str = ""
28
+ allow_remote_tools: bool = True
29
+ allow_local_tools: bool = True
30
+ outputs: List[str] = field(default_factory=list)
31
+ inputs: List[str] = field(default_factory=list)
32
+ memory_out: Optional[str] = None # single mem by spec
33
+ memory_in: List[str] = field(default_factory=list) # not used, but kept for completeness
34
+
35
+
36
+ @dataclass
37
+ class StartNode(BaseNode):
38
+ outputs: List[str] = field(default_factory=list)
39
+
40
+
41
+ @dataclass
42
+ class EndNode(BaseNode):
43
+ inputs: List[str] = field(default_factory=list)
44
+
45
+
46
+ @dataclass
47
+ class MemoryNode(BaseNode):
48
+ name: str = ""
49
+ agents: List[str] = field(default_factory=list) # agents connected to this memory
50
+
51
+
52
+ @dataclass
53
+ class FlowSchema:
54
+ agents: Dict[str, AgentNode] = field(default_factory=dict)
55
+ memories: Dict[str, MemoryNode] = field(default_factory=dict)
56
+ starts: Dict[str, StartNode] = field(default_factory=dict)
57
+ ends: Dict[str, EndNode] = field(default_factory=dict)
58
+
59
+
60
+ def _safe_get(d: Dict[str, Any], *keys, default=None):
61
+ cur = d
62
+ for k in keys:
63
+ if not isinstance(cur, dict):
64
+ return default
65
+ if k not in cur:
66
+ return default
67
+ cur = cur[k]
68
+ return cur
69
+
70
+
71
+ def parse_schema(schema: List[Dict[str, Any]]) -> FlowSchema:
72
+ """
73
+ Parse NodeEditor-exported schema list into FlowSchema.
74
+ """
75
+ fs = FlowSchema()
76
+ for raw in schema:
77
+ ntype = raw.get("type")
78
+ nid = raw.get("id")
79
+ slots = raw.get("slots", {}) or {}
80
+
81
+ if ntype == "agent":
82
+ node = AgentNode(
83
+ id=nid,
84
+ type=ntype,
85
+ slots=slots,
86
+ name=_safe_get(slots, "name", default=""),
87
+ instruction=_safe_get(slots, "instruction", default=""),
88
+ allow_remote_tools=bool(_safe_get(slots, "remote_tools", default=True)),
89
+ allow_local_tools=bool(_safe_get(slots, "local_tools", default=True)),
90
+ outputs=list(_safe_get(slots, "output", "out", default=[])) or [],
91
+ inputs=list(_safe_get(slots, "input", "in", default=[])) or [],
92
+ memory_out=(_safe_get(slots, "memory", "out", default=[None]) or [None])[0],
93
+ memory_in=list(_safe_get(slots, "memory", "in", default=[])) or [],
94
+ )
95
+ fs.agents[nid] = node
96
+
97
+ elif ntype == "start":
98
+ node = StartNode(
99
+ id=nid,
100
+ type=ntype,
101
+ slots=slots,
102
+ outputs=list(_safe_get(slots, "output", "out", default=[])) or [],
103
+ )
104
+ fs.starts[nid] = node
105
+
106
+ elif ntype == "end":
107
+ node = EndNode(
108
+ id=nid,
109
+ type=ntype,
110
+ slots=slots,
111
+ inputs=list(_safe_get(slots, "input", "in", default=[])) or [],
112
+ )
113
+ fs.ends[nid] = node
114
+
115
+ elif ntype == "memory":
116
+ node = MemoryNode(
117
+ id=nid,
118
+ type=ntype,
119
+ slots=slots,
120
+ name=_safe_get(slots, "name", default=""),
121
+ agents=list(_safe_get(slots, "input", "in", default=[])) or [],
122
+ )
123
+ fs.memories[nid] = node
124
+
125
+ return fs