pygpt-net 2.6.61__py3-none-any.whl → 2.6.63__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. pygpt_net/CHANGELOG.txt +12 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/chat/response.py +8 -2
  4. pygpt_net/controller/presets/editor.py +65 -1
  5. pygpt_net/controller/settings/profile.py +16 -4
  6. pygpt_net/controller/settings/workdir.py +30 -5
  7. pygpt_net/controller/theme/common.py +4 -2
  8. pygpt_net/controller/theme/markdown.py +2 -2
  9. pygpt_net/controller/theme/theme.py +2 -1
  10. pygpt_net/controller/ui/ui.py +31 -3
  11. pygpt_net/core/agents/custom/llama_index/runner.py +30 -52
  12. pygpt_net/core/agents/custom/runner.py +199 -76
  13. pygpt_net/core/agents/runners/llama_workflow.py +122 -12
  14. pygpt_net/core/agents/runners/openai_workflow.py +2 -1
  15. pygpt_net/core/node_editor/types.py +13 -1
  16. pygpt_net/core/render/web/renderer.py +76 -11
  17. pygpt_net/data/config/config.json +3 -3
  18. pygpt_net/data/config/models.json +3 -3
  19. pygpt_net/data/config/presets/agent_openai_b2b.json +1 -15
  20. pygpt_net/data/config/presets/agent_openai_coder.json +1 -15
  21. pygpt_net/data/config/presets/agent_openai_evolve.json +1 -23
  22. pygpt_net/data/config/presets/agent_openai_planner.json +1 -21
  23. pygpt_net/data/config/presets/agent_openai_researcher.json +1 -21
  24. pygpt_net/data/config/presets/agent_openai_supervisor.json +1 -13
  25. pygpt_net/data/config/presets/agent_openai_writer.json +1 -15
  26. pygpt_net/data/config/presets/agent_supervisor.json +1 -11
  27. pygpt_net/data/css/style.dark.css +18 -0
  28. pygpt_net/data/css/style.light.css +20 -1
  29. pygpt_net/data/js/app/runtime.js +4 -1
  30. pygpt_net/data/js/app.min.js +3 -2
  31. pygpt_net/data/locale/locale.de.ini +2 -0
  32. pygpt_net/data/locale/locale.en.ini +7 -0
  33. pygpt_net/data/locale/locale.es.ini +2 -0
  34. pygpt_net/data/locale/locale.fr.ini +2 -0
  35. pygpt_net/data/locale/locale.it.ini +2 -0
  36. pygpt_net/data/locale/locale.pl.ini +3 -1
  37. pygpt_net/data/locale/locale.uk.ini +2 -0
  38. pygpt_net/data/locale/locale.zh.ini +2 -0
  39. pygpt_net/item/ctx.py +23 -1
  40. pygpt_net/js_rc.py +13 -10
  41. pygpt_net/provider/agents/base.py +0 -0
  42. pygpt_net/provider/agents/llama_index/flow_from_schema.py +0 -0
  43. pygpt_net/provider/agents/llama_index/workflow/codeact.py +9 -6
  44. pygpt_net/provider/agents/llama_index/workflow/openai.py +38 -11
  45. pygpt_net/provider/agents/llama_index/workflow/planner.py +248 -28
  46. pygpt_net/provider/agents/llama_index/workflow/supervisor.py +60 -10
  47. pygpt_net/provider/agents/openai/agent.py +3 -1
  48. pygpt_net/provider/agents/openai/agent_b2b.py +17 -13
  49. pygpt_net/provider/agents/openai/agent_planner.py +617 -258
  50. pygpt_net/provider/agents/openai/agent_with_experts.py +4 -1
  51. pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +8 -6
  52. pygpt_net/provider/agents/openai/agent_with_feedback.py +8 -6
  53. pygpt_net/provider/agents/openai/evolve.py +12 -8
  54. pygpt_net/provider/agents/openai/flow_from_schema.py +0 -0
  55. pygpt_net/provider/agents/openai/supervisor.py +292 -37
  56. pygpt_net/provider/api/openai/agents/response.py +1 -0
  57. pygpt_net/provider/api/x_ai/__init__.py +0 -0
  58. pygpt_net/provider/core/agent/__init__.py +0 -0
  59. pygpt_net/provider/core/agent/base.py +0 -0
  60. pygpt_net/provider/core/agent/json_file.py +0 -0
  61. pygpt_net/provider/core/config/patch.py +8 -0
  62. pygpt_net/provider/core/config/patches/patch_before_2_6_42.py +0 -0
  63. pygpt_net/provider/llms/base.py +0 -0
  64. pygpt_net/provider/llms/deepseek_api.py +0 -0
  65. pygpt_net/provider/llms/google.py +0 -0
  66. pygpt_net/provider/llms/hugging_face_api.py +0 -0
  67. pygpt_net/provider/llms/hugging_face_router.py +0 -0
  68. pygpt_net/provider/llms/mistral.py +0 -0
  69. pygpt_net/provider/llms/perplexity.py +0 -0
  70. pygpt_net/provider/llms/x_ai.py +0 -0
  71. pygpt_net/tools/agent_builder/tool.py +6 -0
  72. pygpt_net/tools/agent_builder/ui/dialogs.py +0 -41
  73. pygpt_net/ui/layout/toolbox/presets.py +14 -2
  74. pygpt_net/ui/main.py +2 -2
  75. pygpt_net/ui/widget/dialog/confirm.py +55 -5
  76. pygpt_net/ui/widget/draw/painter.py +90 -1
  77. pygpt_net/ui/widget/lists/preset.py +289 -25
  78. pygpt_net/ui/widget/node_editor/editor.py +53 -15
  79. pygpt_net/ui/widget/node_editor/node.py +82 -104
  80. pygpt_net/ui/widget/node_editor/view.py +4 -5
  81. pygpt_net/ui/widget/textarea/input.py +155 -21
  82. {pygpt_net-2.6.61.dist-info → pygpt_net-2.6.63.dist-info}/METADATA +22 -8
  83. {pygpt_net-2.6.61.dist-info → pygpt_net-2.6.63.dist-info}/RECORD +70 -70
  84. {pygpt_net-2.6.61.dist-info → pygpt_net-2.6.63.dist-info}/LICENSE +0 -0
  85. {pygpt_net-2.6.61.dist-info → pygpt_net-2.6.63.dist-info}/WHEEL +0 -0
  86. {pygpt_net-2.6.61.dist-info → pygpt_net-2.6.63.dist-info}/entry_points.txt +0 -0
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.25 14:00:00 #
9
+ # Updated Date: 2025.09.27 06:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from __future__ import annotations
@@ -62,13 +62,133 @@ class FlowOrchestrator:
62
62
  - First agent (in the whole flow) gets full initial messages from the app.
63
63
  - Next agent WITHOUT memory gets only last step's displayed content as a single 'user' message.
64
64
  - Agent WITH memory:
65
- * if memory has items -> use them;
66
- * if memory empty -> seed input from last displayed content (or initial messages as fallback).
65
+ * if memory has items -> use base history (items[:-1]) and pass last displayed content as a single 'user' baton;
66
+ * if memory empty -> seed baton from last displayed content (or initial messages as fallback).
67
67
  """
68
68
  def __init__(self, window, logger: Optional[Logger] = None) -> None:
69
69
  self.window = window
70
70
  self.logger = logger or NullLogger()
71
71
 
72
+ # ---------- Helpers (production-ready) ----------
73
+
74
+ def _extract_text_from_item(self, item: TResponseInputItem) -> str:
75
+ """Best-effort extract plain text from TResponseInputItem."""
76
+ if isinstance(item, dict):
77
+ content = item.get("content", "")
78
+ if isinstance(content, str):
79
+ return content
80
+ if isinstance(content, list):
81
+ parts = []
82
+ for p in content:
83
+ if isinstance(p, dict):
84
+ t = p.get("text")
85
+ if isinstance(t, str):
86
+ parts.append(t)
87
+ return "\n".join(parts)
88
+ return ""
89
+ if isinstance(item, str):
90
+ return item
91
+ return ""
92
+
93
+ def _build_baton_input(
94
+ self,
95
+ *,
96
+ node_id: str,
97
+ g: FlowGraph,
98
+ mem: MemoryManager,
99
+ initial_messages: List[TResponseInputItem],
100
+ first_dispatch_done: bool,
101
+ last_plain_output: str,
102
+ dbg: DebugConfig,
103
+ ) -> tuple[List[TResponseInputItem], str, Optional[str], Any, str]:
104
+ """
105
+ Returns: (prepared_items, baton_user_text, mem_id, mem_state, source_tag)
106
+ Mirrors LI baton/memory policy.
107
+ """
108
+ mem_id = g.agent_to_memory.get(node_id)
109
+ mem_state = mem.get(mem_id) if mem_id else None
110
+
111
+ baton_user_text = ""
112
+ source = ""
113
+
114
+ if mem_state and mem_state.items:
115
+ # memory with history -> base history + baton from last output (preferred)
116
+ base_items = list(mem_state.items[:-1]) if len(mem_state.items) >= 1 else []
117
+ if last_plain_output and last_plain_output.strip():
118
+ baton_user_text = last_plain_output
119
+ prepared = base_items + [{"role": "user", "content": baton_user_text}]
120
+ source = "memory:existing_to_user_baton"
121
+ else:
122
+ # fallback: use last assistant content as baton
123
+ last_ass = mem_state.items[-1] if isinstance(mem_state.items[-1], dict) else {}
124
+ if isinstance(last_ass.get("content"), str):
125
+ baton_user_text = last_ass.get("content", "")
126
+ elif isinstance(last_ass.get("content"), list) and last_ass["content"]:
127
+ baton_user_text = last_ass["content"][0].get("text", "") or ""
128
+ else:
129
+ baton_user_text = ""
130
+ prepared = base_items + [{"role": "user", "content": baton_user_text}]
131
+ source = "memory:existing_to_last_assistant"
132
+ return sanitize_input_items(prepared), baton_user_text, mem_id, mem_state, source
133
+
134
+ if mem_state:
135
+ # memory attached but empty -> seed from last output else from initial (use last user msg as baton)
136
+ if last_plain_output and last_plain_output.strip():
137
+ baton_user_text = last_plain_output
138
+ prepared = [{"role": "user", "content": baton_user_text}]
139
+ source = "memory:seed_from_last_output"
140
+ else:
141
+ base_items = list(initial_messages[:-1]) if initial_messages else []
142
+ last_item = initial_messages[-1] if initial_messages else {"role": "user", "content": ""}
143
+ baton_user_text = self._extract_text_from_item(last_item)
144
+ prepared = base_items + [{"role": "user", "content": baton_user_text}]
145
+ source = "memory:seed_from_initial"
146
+ return sanitize_input_items(prepared), baton_user_text, mem_id, mem_state, source
147
+
148
+ # no memory attached
149
+ if not first_dispatch_done:
150
+ # first agent: pass initial messages as-is; baton is last user text (for potential external memory)
151
+ last_item = initial_messages[-1] if initial_messages else {"role": "user", "content": ""}
152
+ baton_user_text = self._extract_text_from_item(last_item)
153
+ return sanitize_input_items(list(initial_messages)), baton_user_text, None, None, "no-mem:first_initial"
154
+ else:
155
+ baton_user_text = last_plain_output if last_plain_output and last_plain_output.strip() else (
156
+ self._extract_text_from_item(initial_messages[-1]) if initial_messages else ""
157
+ )
158
+ prepared = [{"role": "user", "content": baton_user_text}]
159
+ return sanitize_input_items(prepared), baton_user_text, None, None, "no-mem:last_output"
160
+
161
+ def _update_memory_after_step(
162
+ self,
163
+ *,
164
+ node_id: str,
165
+ mem_state: Any,
166
+ baton_user_text: str,
167
+ display_text: str,
168
+ last_response_id: Optional[str],
169
+ dbg: DebugConfig,
170
+ ) -> None:
171
+ """Update memory strictly with [user baton, assistant display_text], mirroring LI semantics."""
172
+ if not mem_state:
173
+ return
174
+ base_items = list(mem_state.items[:-1]) if getattr(mem_state, "items", None) else []
175
+ new_mem = (base_items or []) + [
176
+ {"role": "user", "content": baton_user_text or ""},
177
+ {"role": "assistant", "content": [{"type": "output_text", "text": display_text or ""}]},
178
+ ]
179
+ try:
180
+ mem_state.set_from(new_mem, last_response_id)
181
+ if dbg.log_inputs:
182
+ self.logger.debug(
183
+ f"[memory] {node_id} updated len {len(base_items)} -> {len(new_mem)} "
184
+ f"user='{ellipsize(baton_user_text or '', dbg.preview_chars)}' "
185
+ f"assist='{ellipsize(display_text or '', dbg.preview_chars)}'"
186
+ )
187
+ except Exception as e:
188
+ self.logger.error(f"[memory] update failed for {node_id}: {e}")
189
+
190
+ # ---------- Main flow ----------
191
+
72
192
  async def run_flow(
73
193
  self,
74
194
  schema: List[Dict[str, Any]],
@@ -174,55 +294,34 @@ class FlowOrchestrator:
174
294
  f" role='{node_rt.role}'"
175
295
  )
176
296
 
177
- # Memory selection and INPUT BUILD (memory-first policy)
178
- mem_id = g.agent_to_memory.get(current_id)
179
- mem_state = mem.get(mem_id) if mem_id else None
180
- if dbg.log_runtime:
181
- mem_info = f"{mem_id} (len={len(mem_state.items)})" if mem_state and mem_state.items else (mem_id or "-")
182
- self.logger.debug(f"[memory] attached={bool(mem_id)} mem_id={mem_info}")
183
-
184
- input_items: List[TResponseInputItem]
185
- input_source = ""
186
-
187
- if mem_state and not mem_state.is_empty():
188
- # memory present and already has history
189
- input_items = list(mem_state.items)
190
- input_source = "memory:existing"
191
- elif mem_state:
192
- # memory present but empty -> seed from last output or initial messages
193
- if last_plain_output and last_plain_output.strip():
194
- input_items = [{"role": "user", "content": last_plain_output}]
195
- input_source = "memory:seeded_from_last"
196
- else:
197
- input_items = list(initial_messages)
198
- input_source = "memory:seeded_from_initial"
199
- else:
200
- # no memory -> first agent gets initial messages; others get only last output
201
- if not first_dispatch_done:
202
- input_items = list(initial_messages)
203
- input_source = "no-mem:initial"
204
- else:
205
- if last_plain_output and last_plain_output.strip():
206
- input_items = [{"role": "user", "content": last_plain_output}]
207
- input_source = "no-mem:last_output"
208
- else:
209
- input_items = list(initial_messages)
210
- input_source = "no-mem:fallback_initial"
211
-
212
- prepared_items = sanitize_input_items(input_items)
297
+ # Input build using baton policy (LI parity)
298
+ prepared_items, baton_user_text, mem_id, mem_state, input_source = self._build_baton_input(
299
+ node_id=current_id,
300
+ g=g,
301
+ mem=mem,
302
+ initial_messages=initial_messages,
303
+ first_dispatch_done=first_dispatch_done,
304
+ last_plain_output=last_plain_output,
305
+ dbg=dbg,
306
+ )
213
307
 
214
308
  if dbg.log_inputs:
215
309
  self.logger.debug(f"[input] source={input_source} items={len(prepared_items)} "
216
310
  f"preview={items_preview(prepared_items, dbg.preview_chars)}")
311
+ if mem_id:
312
+ mem_info = f"{mem_id} (len={len(mem_state.items) if mem_state else 0})"
313
+ self.logger.debug(f"[memory] attached={bool(mem_id)} mem_id={mem_info}")
217
314
 
218
315
  # Build agent with per-node runtime
316
+ # Restrict friendly_map only to allowed outgoing routes of current node
317
+ allowed_map = {rid: fs.agents[rid].name or rid for rid in (node.outputs or []) if rid in fs.agents}
219
318
  built = factory.build(
220
319
  node=node,
221
320
  node_runtime=node_rt,
222
321
  preset=preset,
223
322
  function_tools=function_tools,
224
323
  force_router=False, # auto on multi-output
225
- friendly_map={aid: a.name or aid for aid, a in fs.agents.items()},
324
+ friendly_map=allowed_map,
226
325
  handoffs_enabled=True,
227
326
  context=agent_kwargs.get("context"),
228
327
  )
@@ -242,37 +341,43 @@ class FlowOrchestrator:
242
341
  run_kwargs["trace_id"] = trace_id
243
342
 
244
343
  # Header for UI
245
- title = f"\n\n**{built.name}**\n\n"
246
- ctx.stream = title
344
+ ctx.set_agent_name(agent.name)
247
345
  bridge.on_step(ctx, begin)
248
346
  begin = False
249
347
  handler.begin = begin
250
- if not use_partial_ctx:
251
- handler.to_buffer(title)
252
348
 
253
349
  display_text = "" # what we show to UI for this step
254
350
  next_id: Optional[str] = None
255
351
 
256
352
  # --- EXECUTION ---
257
353
  if stream and not multi_output:
258
- # Full token streaming (single-output agent)
354
+ # Full token streaming (single-output agent) – collect full buffer for baton
259
355
  result = Runner.run_streamed(agent, **run_kwargs)
260
356
  handler.reset()
357
+ # Optional local accumulator; prefer handler.buffer after loop
358
+ last_chunk = ""
261
359
 
262
360
  async for event in result.stream_events():
263
361
  if bridge.stopped():
264
362
  result.cancel()
265
363
  bridge.on_stop(ctx)
266
364
  break
267
- display_text, last_response_id = handler.handle(event, ctx)
268
-
269
- # Prepare next inputs from result for memory update (if any)
270
- input_items_next = result.to_input_list()
271
- input_items_next = sanitize_input_items(input_items_next)
272
-
273
- # Update memory only if attached
274
- if mem_state:
275
- mem_state.update_from_result(input_items_next, last_response_id)
365
+ chunk, last_response_id = handler.handle(event, ctx)
366
+ if chunk:
367
+ last_chunk = chunk
368
+
369
+ # Use full buffer if available (ensures baton sees complete output)
370
+ display_text = getattr(handler, "buffer", "") or last_chunk or ""
371
+
372
+ # Update memory strictly with baton + displayed text
373
+ self._update_memory_after_step(
374
+ node_id=current_id,
375
+ mem_state=mem_state,
376
+ baton_user_text=baton_user_text,
377
+ display_text=display_text,
378
+ last_response_id=last_response_id,
379
+ dbg=dbg,
380
+ )
276
381
 
277
382
  # Route: first edge or END
278
383
  outs = g.get_next(current_id)
@@ -307,13 +412,15 @@ class FlowOrchestrator:
307
412
  decision = parse_route_output(raw_text, allowed_routes)
308
413
  display_text = decision.content or ""
309
414
 
310
- # Prepare next inputs from streamed result, patch assistant content -> content
311
- input_items_next = result.to_input_list()
312
- input_items_next = patch_last_assistant_output(input_items_next, decision.content or "")
313
-
314
- # Update memory if attached
315
- if mem_state:
316
- mem_state.update_from_result(input_items_next, last_response_id)
415
+ # Update memory with baton + displayed content
416
+ self._update_memory_after_step(
417
+ node_id=current_id,
418
+ mem_state=mem_state,
419
+ baton_user_text=baton_user_text,
420
+ display_text=display_text,
421
+ last_response_id=last_response_id,
422
+ dbg=dbg,
423
+ )
317
424
 
318
425
  # Route decision
319
426
  if decision.valid:
@@ -347,11 +454,15 @@ class FlowOrchestrator:
347
454
  if not use_partial_ctx:
348
455
  handler.to_buffer(display_text)
349
456
 
350
- input_items_next = result.to_input_list()
351
- input_items_next = patch_last_assistant_output(input_items_next, display_text)
352
-
353
- if mem_state:
354
- mem_state.update_from_result(input_items_next, last_response_id)
457
+ # Update memory with baton + displayed content
458
+ self._update_memory_after_step(
459
+ node_id=current_id,
460
+ mem_state=mem_state,
461
+ baton_user_text=baton_user_text,
462
+ display_text=display_text,
463
+ last_response_id=last_response_id,
464
+ dbg=dbg,
465
+ )
355
466
 
356
467
  if decision.valid:
357
468
  next_id = decision.route
@@ -372,11 +483,15 @@ class FlowOrchestrator:
372
483
  if not use_partial_ctx:
373
484
  handler.to_buffer(display_text)
374
485
 
375
- input_items_next = result.to_input_list()
376
- input_items_next = patch_last_assistant_output(input_items_next, display_text)
377
-
378
- if mem_state:
379
- mem_state.update_from_result(input_items_next, last_response_id)
486
+ # Update memory with baton + displayed content
487
+ self._update_memory_after_step(
488
+ node_id=current_id,
489
+ mem_state=mem_state,
490
+ baton_user_text=baton_user_text,
491
+ display_text=display_text,
492
+ last_response_id=last_response_id,
493
+ dbg=dbg,
494
+ )
380
495
 
381
496
  if decision.valid:
382
497
  next_id = decision.route
@@ -396,11 +511,15 @@ class FlowOrchestrator:
396
511
  if not use_partial_ctx:
397
512
  handler.to_buffer(display_text)
398
513
 
399
- input_items_next = result.to_input_list()
400
- input_items_next = sanitize_input_items(input_items_next)
401
-
402
- if mem_state:
403
- mem_state.update_from_result(input_items_next, last_response_id)
514
+ # Update memory with baton + displayed text
515
+ self._update_memory_after_step(
516
+ node_id=current_id,
517
+ mem_state=mem_state,
518
+ baton_user_text=baton_user_text,
519
+ display_text=display_text,
520
+ last_response_id=last_response_id,
521
+ dbg=dbg,
522
+ )
404
523
 
405
524
  outs = g.get_next(current_id)
406
525
  next_id = outs[0] if outs else g.first_connected_end(current_id)
@@ -444,6 +563,10 @@ class FlowOrchestrator:
444
563
  else:
445
564
  bridge.on_next(ctx)
446
565
 
566
+ # set next agent name if not at the end
567
+ if current_ids and current_ids[0] in fs.agents:
568
+ ctx.set_agent_name(fs.agents[current_ids[0]].name)
569
+
447
570
  # Step duration
448
571
  dur = perf_counter() - step_start
449
572
  self.logger.debug(f"[step {steps}] duration={dur:.3f}s")
@@ -1,3 +1,5 @@
1
+ # core/agents/runners/llama_workflow.py
2
+
1
3
  #!/usr/bin/env python3
2
4
  # -*- coding: utf-8 -*-
3
5
  # ================================================== #
@@ -6,7 +8,7 @@
6
8
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
9
  # MIT License #
8
10
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.24 02:00:00 #
11
+ # Updated Date: 2025.09.27 06:00:00 #
10
12
  # ================================================== #
11
13
 
12
14
  import re
@@ -18,6 +20,7 @@ from llama_index.core.agent.workflow import (
18
20
  ToolCallResult,
19
21
  AgentStream,
20
22
  AgentOutput,
23
+ # AgentInput, # not needed currently
21
24
  )
22
25
  from workflows.errors import WorkflowCancelledByUser
23
26
 
@@ -38,6 +41,7 @@ class LlamaWorkflow(BaseRunner):
38
41
  """
39
42
  super(LlamaWorkflow, self).__init__(window)
40
43
  self.window = window
44
+ self.last_response_id = None
41
45
 
42
46
  async def run(
43
47
  self,
@@ -177,12 +181,13 @@ class LlamaWorkflow(BaseRunner):
177
181
 
178
182
  prev_output = ctx.live_output
179
183
  if prev_output:
180
- prev_output = self.filter_output(prev_output) # remove all <execute>...</execute>
184
+ prev_output = self.filter_output(prev_output) # remove all [!exec]...[/!exec]
181
185
 
182
186
  response_ctx.set_agent_final_response(ctx.agent_final_response) # always set to further use
183
187
  response_ctx.set_output(prev_output) # append from stream
184
188
  response_ctx.extra["agent_output"] = True # mark as output response
185
189
  response_ctx.extra["agent_finish"] = True # mark as finished
190
+ response_ctx.set_agent_name(ctx.get_agent_name()) # store last agent name
186
191
 
187
192
  if "agent_input" in response_ctx.extra:
188
193
  del response_ctx.extra["agent_input"] # remove agent input from extra
@@ -254,8 +259,15 @@ class LlamaWorkflow(BaseRunner):
254
259
  item_ctx.output = "" # empty to prevent render
255
260
  item_ctx.stream = "" # for stream
256
261
 
262
+ # Keep last known agent name to avoid redundant ctx updates.
263
+ last_agent_name: Optional[str] = None
264
+
265
+ # Track whether current block has already produced user-visible tokens.
266
+ # This prevents creating empty DB items and preserves order.
267
+ content_written: bool = False
268
+ block_open: bool = False # logical "block" opened after first StepEvent
269
+
257
270
  async for event in handler.stream_events():
258
- print(event)
259
271
  if self.is_stopped():
260
272
  # persist current output on stop
261
273
  item_ctx.output = item_ctx.live_output
@@ -264,6 +276,7 @@ class LlamaWorkflow(BaseRunner):
264
276
  self.end_stream(item_ctx, signals)
265
277
  await handler.cancel_run() # cancel, will raise WorkflowCancelledByUser
266
278
  break
279
+
267
280
  if isinstance(event, ToolCallResult):
268
281
  output = f"\n-----------\nExecution result:\n{event.tool_output}"
269
282
  if verbose:
@@ -271,8 +284,11 @@ class LlamaWorkflow(BaseRunner):
271
284
  formatted = "\n```output\n" + str(event.tool_output) + "\n```\n"
272
285
  item_ctx.live_output += formatted
273
286
  item_ctx.stream = formatted
287
+ content_written = True
274
288
  if item_ctx.stream_agent_output and flush:
275
289
  self.send_stream(item_ctx, signals, begin)
290
+ begin = False
291
+
276
292
  elif isinstance(event, ToolCall):
277
293
  if "code" in event.tool_kwargs:
278
294
  output = f"\n-----------\nTool call code:\n{event.tool_kwargs['code']}"
@@ -281,37 +297,97 @@ class LlamaWorkflow(BaseRunner):
281
297
  formatted = "\n```python\n" + str(event.tool_kwargs['code']) + "\n```\n"
282
298
  item_ctx.live_output += formatted
283
299
  item_ctx.stream = formatted
300
+ content_written = True
284
301
  if item_ctx.stream_agent_output and flush:
285
302
  self.send_stream(item_ctx, signals, begin)
303
+ begin = False
304
+
286
305
  elif isinstance(event, StepEvent):
306
+ # UI splitting strategy aligned with OpenAI flow:
307
+ # - do NOT start a new DB item at the first StepEvent
308
+ # - only finalize the previous item if it already produced content
309
+ # (prevents empty items and ordering glitches)
287
310
  self.set_busy(signals)
288
311
  if not use_partials:
312
+ # We still want to propagate the name early if provided.
313
+ try:
314
+ meta = getattr(event, "meta", {}) or {}
315
+ next_name = meta.get("agent_name")
316
+ if next_name:
317
+ last_agent_name = self._apply_agent_name_to_ctx(item_ctx, next_name, last_agent_name)
318
+ except Exception:
319
+ pass
320
+ begin = True
289
321
  continue
322
+
290
323
  if verbose:
291
324
  print("\n\n-----STEP-----\n\n")
292
325
  print(f"[{event.name}] {event.index}/{event.total} meta={event.meta}")
293
- if flush:
294
- item_ctx = self.on_next_ctx(
295
- item_ctx,
296
- signals=signals,
297
- begin=begin,
298
- stream=True,
299
- )
326
+
327
+ # If there was an open block with content -> finalize it to a new DB item.
328
+ if block_open and content_written:
329
+ if flush:
330
+ item_ctx = self.on_next_ctx(
331
+ item_ctx,
332
+ signals=signals,
333
+ begin=begin,
334
+ stream=True,
335
+ )
336
+ # Apply next agent name on the fresh ctx (so UI header is correct from token #1).
337
+ try:
338
+ meta = getattr(event, "meta", {}) or {}
339
+ next_name = meta.get("agent_name")
340
+ if next_name:
341
+ last_agent_name = self._apply_agent_name_to_ctx(item_ctx, next_name, last_agent_name)
342
+ except Exception:
343
+ pass
344
+ else:
345
+ # First step or previous step had no visible content: just propagate the name.
346
+ try:
347
+ meta = getattr(event, "meta", {}) or {}
348
+ next_name = meta.get("agent_name")
349
+ if next_name:
350
+ last_agent_name = self._apply_agent_name_to_ctx(item_ctx, next_name, last_agent_name)
351
+ except Exception:
352
+ pass
353
+
354
+ # Prepare for the upcoming tokens (new block begins).
355
+ block_open = True
356
+ content_written = False
357
+ begin = True
358
+ continue
359
+
300
360
  elif isinstance(event, AgentStream):
361
+ # Update agent name from event if present; fallback to header parsing.
362
+ name = getattr(event, "current_agent_name", None)
363
+ if not name:
364
+ name = self._guess_agent_name_from_text(getattr(event, "delta", "") or "")
365
+ if name:
366
+ last_agent_name = self._apply_agent_name_to_ctx(item_ctx, name, last_agent_name)
367
+
301
368
  if verbose:
302
369
  print(f"{event.delta}", end="", flush=True)
303
370
  if event.delta:
304
371
  item_ctx.live_output += event.delta
305
372
  item_ctx.stream = event.delta
373
+ content_written = True
306
374
  if item_ctx.stream_agent_output and flush:
307
375
  self.send_stream(item_ctx, signals, begin) # send stream to webview
308
376
  begin = False
377
+
309
378
  elif isinstance(event, AgentOutput):
379
+ # Ensure final agent name is applied as well.
380
+ name = getattr(event, "current_agent_name", None)
381
+ if name:
382
+ last_agent_name = self._apply_agent_name_to_ctx(item_ctx, name, last_agent_name)
310
383
  thought, answer = self.extract_final_response(str(event))
311
384
  if answer:
312
385
  item_ctx.set_agent_final_response(answer)
313
386
  if verbose:
314
387
  print(f"\nFinal response: {answer}")
388
+ # Do not split the block here – we will either:
389
+ # - split on the next StepEvent, or
390
+ # - finalize once at the end (make_response), just like OpenAI flow does.
315
391
 
316
392
  return item_ctx
317
393
 
@@ -348,6 +424,40 @@ class LlamaWorkflow(BaseRunner):
348
424
  next_ctx.set_output("")
349
425
  next_ctx.partial = True
350
426
  next_ctx.extra["agent_output"] = True # allow usage in history
351
-
427
+ next_ctx.set_agent_name(ctx.get_agent_name()) # propagate agent name
352
428
  self.send_response(next_ctx, signals, KernelEvent.APPEND_DATA)
353
- return next_ctx
429
+
430
+ return next_ctx
431
+
432
+ # ===== helpers for agent name propagation =====
433
+
434
+ def _apply_agent_name_to_ctx(self, ctx: CtxItem, name: str, last_known: Optional[str]) -> str:
435
+ """
436
+ Apply agent name to your context, avoiding redundant updates.
437
+ Falls back to ctx.extra['agent_name'] if set_agent_name is unavailable.
438
+ """
439
+ if not name:
440
+ return last_known or ""
441
+ if last_known and last_known == name:
442
+ return last_known
443
+ try:
444
+ if hasattr(ctx, "set_agent_name") and callable(getattr(ctx, "set_agent_name")):
445
+ ctx.set_agent_name(name)
446
+ # Always mirror into extra for downstream consumers
447
+ ctx.extra["agent_name"] = name
448
+ except Exception:
449
+ ctx.extra["agent_name"] = name
450
+ return name
451
+
452
+ def _guess_agent_name_from_text(self, text: str) -> Optional[str]:
453
+ """
454
+ Try to infer agent name from header like '**Name**' which our workflow emits
455
+ before each agent block.
456
+ """
457
+ if not text:
458
+ return None
459
+ # Look for the first bold segment – keep it lenient
460
+ m = re.search(r"\*\*([^*]+?)\*\*", text)
461
+ if m:
462
+ return m.group(1).strip()
463
+ return None
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.24 03:00:00 #
9
+ # Updated Date: 2025.09.26 17:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Dict, Any, List, Optional
@@ -237,6 +237,7 @@ class OpenAIWorkflow(BaseRunner):
237
237
  response_ctx.set_agent_final_response(output) # always set to further use
238
238
  response_ctx.extra["agent_output"] = True # mark as output response
239
239
  response_ctx.extra["agent_finish"] = True # mark as finished
240
+ response_ctx.set_agent_name(ctx.get_agent_name()) # store last agent name
240
241
  response_ctx.msg_id = response_id # set response id for OpenAI
241
242
 
242
243
  if ctx.agent_final_response: # only if not empty
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.25 15:00:00 #
9
+ # Updated Date: 2025.09.26 12:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from __future__ import annotations
@@ -34,11 +34,14 @@ class PropertySpec:
34
34
  class NodeTypeSpec:
35
35
  type_name: str
36
36
  title: Optional[str] = None
37
+ # UI-only human-readable label used for menus; never persisted nor used as an identifier
38
+ display_name: Optional[str] = None
37
39
  properties: List[PropertySpec] = field(default_factory=list)
38
40
  # Below are optional extensions for agent-flow needs:
39
41
  base_id: Optional[str] = None # base prefix for friendly ids, e.g. "agent"
40
42
  export_kind: Optional[str] = None # short kind for export, e.g. "agent", "start"
41
43
  bg_color: Optional[str] = None # optional per-type background color (CSS/hex)
44
+ max_num: Optional[int] = None # optional per-layout cap; None or <=0 means unlimited
42
45
 
43
46
  class NodeTypeRegistry:
44
47
  """Registry for node type specifications. Extend/override in subclasses."""
@@ -56,6 +59,15 @@ class NodeTypeRegistry:
56
59
  def get(self, type_name: str) -> Optional[NodeTypeSpec]:
57
60
  return self._types.get(type_name)
58
61
 
62
+ def display_name(self, type_name: str) -> str:
63
+ """Return UI label for a type: spec.display_name if non-empty, otherwise type_name."""
64
+ spec = self.get(type_name)
65
+ if spec:
66
+ dn = getattr(spec, "display_name", None)
67
+ if isinstance(dn, str) and dn.strip():
68
+ return dn
69
+ return type_name
70
+
59
71
  def _install_default_types(self):
60
72
  # Example/basic nodes kept intact
61
73
  self.register(NodeTypeSpec(