pygpt-net 2.6.61__py3-none-any.whl → 2.6.62__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. pygpt_net/CHANGELOG.txt +7 -0
  2. pygpt_net/__init__.py +1 -1
  3. pygpt_net/controller/chat/response.py +8 -2
  4. pygpt_net/controller/settings/profile.py +16 -4
  5. pygpt_net/controller/settings/workdir.py +30 -5
  6. pygpt_net/controller/theme/common.py +4 -2
  7. pygpt_net/controller/theme/markdown.py +2 -2
  8. pygpt_net/controller/theme/theme.py +2 -1
  9. pygpt_net/controller/ui/ui.py +31 -3
  10. pygpt_net/core/agents/custom/llama_index/runner.py +18 -3
  11. pygpt_net/core/agents/custom/runner.py +10 -5
  12. pygpt_net/core/agents/runners/llama_workflow.py +65 -5
  13. pygpt_net/core/agents/runners/openai_workflow.py +2 -1
  14. pygpt_net/core/node_editor/types.py +13 -1
  15. pygpt_net/core/render/web/renderer.py +76 -11
  16. pygpt_net/data/config/config.json +2 -2
  17. pygpt_net/data/config/models.json +2 -2
  18. pygpt_net/data/css/style.dark.css +18 -0
  19. pygpt_net/data/css/style.light.css +20 -1
  20. pygpt_net/data/locale/locale.de.ini +2 -0
  21. pygpt_net/data/locale/locale.en.ini +2 -0
  22. pygpt_net/data/locale/locale.es.ini +2 -0
  23. pygpt_net/data/locale/locale.fr.ini +2 -0
  24. pygpt_net/data/locale/locale.it.ini +2 -0
  25. pygpt_net/data/locale/locale.pl.ini +3 -1
  26. pygpt_net/data/locale/locale.uk.ini +2 -0
  27. pygpt_net/data/locale/locale.zh.ini +2 -0
  28. pygpt_net/item/ctx.py +23 -1
  29. pygpt_net/provider/agents/llama_index/workflow/codeact.py +9 -6
  30. pygpt_net/provider/agents/llama_index/workflow/openai.py +38 -11
  31. pygpt_net/provider/agents/llama_index/workflow/planner.py +36 -16
  32. pygpt_net/provider/agents/llama_index/workflow/supervisor.py +60 -10
  33. pygpt_net/provider/agents/openai/agent.py +3 -1
  34. pygpt_net/provider/agents/openai/agent_b2b.py +13 -9
  35. pygpt_net/provider/agents/openai/agent_planner.py +6 -2
  36. pygpt_net/provider/agents/openai/agent_with_experts.py +4 -1
  37. pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +4 -2
  38. pygpt_net/provider/agents/openai/agent_with_feedback.py +4 -2
  39. pygpt_net/provider/agents/openai/evolve.py +6 -2
  40. pygpt_net/provider/agents/openai/supervisor.py +3 -1
  41. pygpt_net/provider/api/openai/agents/response.py +1 -0
  42. pygpt_net/provider/core/config/patch.py +8 -0
  43. pygpt_net/tools/agent_builder/tool.py +6 -0
  44. pygpt_net/tools/agent_builder/ui/dialogs.py +0 -41
  45. pygpt_net/ui/layout/toolbox/presets.py +14 -2
  46. pygpt_net/ui/main.py +2 -2
  47. pygpt_net/ui/widget/dialog/confirm.py +27 -3
  48. pygpt_net/ui/widget/draw/painter.py +90 -1
  49. pygpt_net/ui/widget/lists/preset.py +289 -25
  50. pygpt_net/ui/widget/node_editor/editor.py +53 -15
  51. pygpt_net/ui/widget/node_editor/node.py +82 -104
  52. pygpt_net/ui/widget/node_editor/view.py +4 -5
  53. pygpt_net/ui/widget/textarea/input.py +155 -21
  54. {pygpt_net-2.6.61.dist-info → pygpt_net-2.6.62.dist-info}/METADATA +17 -8
  55. {pygpt_net-2.6.61.dist-info → pygpt_net-2.6.62.dist-info}/RECORD +58 -58
  56. {pygpt_net-2.6.61.dist-info → pygpt_net-2.6.62.dist-info}/LICENSE +0 -0
  57. {pygpt_net-2.6.61.dist-info → pygpt_net-2.6.62.dist-info}/WHEEL +0 -0
  58. {pygpt_net-2.6.61.dist-info → pygpt_net-2.6.62.dist-info}/entry_points.txt +0 -0
pygpt_net/CHANGELOG.txt CHANGED
@@ -1,3 +1,10 @@
1
+ 2.6.62 (2025-09-26)
2
+
3
+ - Enhanced agent workflow execution.
4
+ - Improved preset list handling by adding a drop field indicator and fixing auto-scroll.
5
+ - Added middle-mouse button panning to Painter.
6
+ - Added an input character counter.
7
+
1
8
  2.6.61 (2025-09-26)
2
9
 
3
10
  - Enhanced the agents node editor, custom agent flow, and instruction following.
pygpt_net/__init__.py CHANGED
@@ -13,7 +13,7 @@ __author__ = "Marcin Szczygliński"
13
13
  __copyright__ = "Copyright 2025, Marcin Szczygliński"
14
14
  __credits__ = ["Marcin Szczygliński"]
15
15
  __license__ = "MIT"
16
- __version__ = "2.6.61"
16
+ __version__ = "2.6.62"
17
17
  __build__ = "2025-09-26"
18
18
  __maintainer__ = "Marcin Szczygliński"
19
19
  __github__ = "https://github.com/szczyglis-dev/py-gpt"
@@ -36,6 +36,7 @@ class Response:
36
36
  """
37
37
  super(Response, self).__init__()
38
38
  self.window = window
39
+ self.last_response_id = None
39
40
 
40
41
  def handle(
41
42
  self,
@@ -273,9 +274,14 @@ class Response:
273
274
  self.window.update_status(trans("status.agent.reasoning"))
274
275
  controller.chat.common.lock_input() # lock input, re-enable stop button
275
276
 
276
- # agent final response
277
+ # agent final response, with fix for async delayed finish (prevent multiple calls for the same response)
277
278
  if ctx.extra is not None and (isinstance(ctx.extra, dict) and "agent_finish" in ctx.extra):
278
- controller.agent.llama.on_finish(ctx) # evaluate response and continue if needed
279
+ consume = False
280
+ if self.last_response_id is None or self.last_response_id < ctx.id:
281
+ consume = True
282
+ self.last_response_id = ctx.id
283
+ if consume:
284
+ controller.agent.llama.on_finish(ctx) # evaluate response and continue if needed
279
285
 
280
286
  def end(
281
287
  self,
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.24 23:00:00 #
9
+ # Updated Date: 2025.09.26 13:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -33,6 +33,8 @@ class Profile:
33
33
  self.height = 500
34
34
  self.initialized = False
35
35
  self.dialog_initialized = False
36
+ self.before_theme = None
37
+ self.before_language = None
36
38
 
37
39
  def setup(self):
38
40
  """Setup profile"""
@@ -54,7 +56,8 @@ class Profile:
54
56
  uuid: str,
55
57
  force: bool = False,
56
58
  save_current: bool = True,
57
- on_finish: Optional[callable] = None
59
+ on_finish: Optional[callable] = None,
60
+ is_create: bool = False,
58
61
  ):
59
62
  """
60
63
  Switch profile
@@ -63,6 +66,7 @@ class Profile:
63
66
  :param force: Force switch
64
67
  :param save_current: Save current profile
65
68
  :param on_finish: Callback function to call after switch
69
+ :param is_create: Is called from create profile
66
70
  """
67
71
  current = self.window.core.config.profile.get_current()
68
72
  if uuid == current and not force:
@@ -85,7 +89,8 @@ class Profile:
85
89
  self.window.controller.settings.workdir.update(
86
90
  path,
87
91
  force=True,
88
- profile_name=profile['name']
92
+ profile_name=profile['name'],
93
+ is_create=is_create,
89
94
  )
90
95
  else:
91
96
  self.after_update(profile['name'])
@@ -288,7 +293,14 @@ class Profile:
288
293
 
289
294
  :param uuid: profile UUID
290
295
  """
291
- self.switch(uuid, force=True, on_finish=self.after_create_finish)
296
+ self.before_theme = self.window.core.config.get("theme")
297
+ self.before_language = self.window.core.config.get("lang")
298
+ self.switch(
299
+ uuid,
300
+ force=True,
301
+ on_finish=self.after_create_finish,
302
+ is_create=True
303
+ )
292
304
 
293
305
  def after_create_finish(self, uuid: str):
294
306
  """
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.20 23:00:00 #
9
+ # Updated Date: 2025.09.26 13:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -288,13 +288,15 @@ class Workdir:
288
288
  def update_workdir(
289
289
  self,
290
290
  force: bool = False,
291
- path: str = None
291
+ path: str = None,
292
+ is_create: bool = False,
292
293
  ):
293
294
  """
294
295
  Update working directory
295
296
 
296
297
  :param force: boolean indicating if update should be forced (confirm)
297
298
  :param path: new working directory to set
299
+ :param is_create: True if called on profile creation
298
300
  """
299
301
  print("\n====================")
300
302
  print(f"Changing workdir to: {path}")
@@ -313,8 +315,25 @@ class Workdir:
313
315
  # update path in current profile
314
316
  self.window.core.config.profile.update_current_workdir(path)
315
317
 
318
+ # save previous theme and language to retain them after workdir change
319
+ prev_theme = None
320
+ prev_lang = None
321
+ if is_create:
322
+ prev_theme = self.window.core.config.get('theme')
323
+ prev_lang = self.window.core.config.get('lang')
324
+
316
325
  # reload config
317
326
  self.window.core.config.set_workdir(path, reload=True)
327
+
328
+ # if profile is just created, use current theme and language
329
+ if is_create:
330
+ print("Using current theme and language: ", prev_theme, prev_lang)
331
+ if prev_theme is not None:
332
+ self.window.core.config.set('theme', prev_theme)
333
+ if prev_lang is not None:
334
+ self.window.core.config.set('lang', prev_lang)
335
+ self.window.core.config.save()
336
+
318
337
  self.window.core.config.set('license.accepted', True) # accept license to prevent show dialog again
319
338
 
320
339
  @Slot(bool, str, str, str)
@@ -323,7 +342,8 @@ class Workdir:
323
342
  force: bool,
324
343
  profile_name: str,
325
344
  current_path: str,
326
- new_path: str
345
+ new_path: str,
346
+ is_create: bool = False
327
347
  ) -> bool:
328
348
  """
329
349
  Update working directory
@@ -332,18 +352,20 @@ class Workdir:
332
352
  :param profile_name: profile name to update after workdir change
333
353
  :param current_path: current working directory before update
334
354
  :param new_path: new working directory to set
355
+ :param is_create: if True, skip check for existing workdir in path
335
356
  :return: boolean indicating if update was successful
336
357
  """
337
358
  self.update_workdir(
338
359
  force=force,
339
360
  path=new_path,
361
+ is_create=is_create,
340
362
  )
341
363
  rollback = False
342
364
  success = False
343
365
  if force:
344
366
  try:
345
367
  self.window.ui.dialogs.workdir.show_status(trans("dialog.workdir.result.wait"))
346
- self.window.controller.reload()
368
+ self.window.controller.reload() # reload all
347
369
  self.window.ui.dialogs.workdir.show_status(trans("dialog.workdir.result.wait"))
348
370
  msg = trans("dialog.workdir.result.success").format(path=new_path)
349
371
  self.window.ui.dialogs.workdir.show_status(msg)
@@ -498,7 +520,8 @@ class Workdir:
498
520
  self,
499
521
  path: str,
500
522
  force: bool = False,
501
- profile_name: str = None
523
+ profile_name: str = None,
524
+ is_create: bool = False,
502
525
  ):
503
526
  """
504
527
  Switch working directory to the existing one
@@ -506,12 +529,14 @@ class Workdir:
506
529
  :param path: existing working directory
507
530
  :param force: force update (confirm)
508
531
  :param profile_name: profile name (optional, for future use)
532
+ :param is_create: if True, skip check for existing workdir in path
509
533
  """
510
534
  self.do_update(
511
535
  force=force,
512
536
  profile_name=profile_name,
513
537
  current_path=self.window.core.config.get_user_path(),
514
538
  new_path=path,
539
+ is_create=is_create,
515
540
  )
516
541
 
517
542
  def migrate(
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.07.22 15:00:00 #
9
+ # Updated Date: 2025.09.26 13:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -32,6 +32,8 @@ class Common:
32
32
  :return: custom css filename (e.g. style.dark.css)
33
33
  """
34
34
  # check per theme style css
35
+ if name is None:
36
+ name = ""
35
37
  filename = 'style.css'
36
38
  if filename is not None:
37
39
  # per theme mode (light / dark)
@@ -58,7 +60,7 @@ class Common:
58
60
 
59
61
  :return: True if light theme, False otherwise
60
62
  """
61
- theme = self.window.core.config.get('theme')
63
+ theme = str(self.window.core.config.get('theme'))
62
64
  return theme.startswith('light_') or theme == 'light'
63
65
 
64
66
  def toggle_tooltips(self):
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 08:00:00 #
9
+ # Updated Date: 2025.09.26 13:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -92,7 +92,7 @@ class Markdown:
92
92
  if base_name == 'web':
93
93
  suffix = "-" + web_style
94
94
  self.web_style = web_style
95
- theme = self.window.core.config.get('theme')
95
+ theme = str(self.window.core.config.get('theme'))
96
96
  name = str(base_name)
97
97
  if theme.startswith('light'):
98
98
  color = '.light'
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.27 00:00:00 #
9
+ # Updated Date: 2025.09.26 13:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -83,6 +83,7 @@ class Theme:
83
83
  :param name: theme name
84
84
  :param force: force theme change (manual trigger)
85
85
  """
86
+ self.current_theme = name
86
87
  window = self.window
87
88
  core = window.core
88
89
  controller = window.controller
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.25 12:00:00 #
9
+ # Updated Date: 2025.09.26 17:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional
@@ -51,6 +51,9 @@ class UI:
51
51
  self._last_chat_model = None
52
52
  self._last_chat_label = None
53
53
 
54
+ # Cache for Input tab tooltip to avoid redundant updates
55
+ self._last_input_tab_tooltip = None
56
+
54
57
  def setup(self):
55
58
  """Setup UI"""
56
59
  self.update_font_size()
@@ -150,7 +153,11 @@ class UI:
150
153
  def update_tokens(self):
151
154
  """Update tokens counter in real-time"""
152
155
  ui_nodes = self.window.ui.nodes
153
- prompt = ui_nodes['input'].toPlainText().strip()
156
+
157
+ # Read raw input for accurate character count (without trimming)
158
+ raw_text = ui_nodes['input'].toPlainText()
159
+ prompt = raw_text.strip()
160
+
154
161
  input_tokens, system_tokens, extra_tokens, ctx_tokens, ctx_len, ctx_len_all, \
155
162
  sum_tokens, max_current, threshold = self.window.core.tokens.get_current(prompt)
156
163
  attachments_tokens = self.window.controller.chat.attachment.get_current_tokens()
@@ -161,11 +168,32 @@ class UI:
161
168
  ui_nodes['prompt.context'].setText(ctx_string)
162
169
  self._last_ctx_string = ctx_string
163
170
 
164
- input_string = f"{short_num(input_tokens)} + {short_num(system_tokens)} + {short_num(ctx_tokens)} + {short_num(extra_tokens)} + {short_num(attachments_tokens)} = {short_num(sum_tokens)} / {short_num(max_current)}"
171
+ if max_current > 0:
172
+ max_str = short_num(max_current)
173
+ else:
174
+ max_str = "∞"
175
+
176
+ input_string = f"{short_num(input_tokens)} + {short_num(system_tokens)} + {short_num(ctx_tokens)} + {short_num(extra_tokens)} + {short_num(attachments_tokens)} = {short_num(sum_tokens)} / {max_str}"
165
177
  if input_string != self._last_input_string:
166
178
  ui_nodes['input.counter'].setText(input_string)
167
179
  self._last_input_string = input_string
168
180
 
181
+ # Update Input tab tooltip with live "<chars> chars (~<tokens> tokens)" string
182
+ try:
183
+ tabs = self.window.ui.tabs.get('input')
184
+ except Exception:
185
+ tabs = None
186
+
187
+ if tabs is not None:
188
+ try:
189
+ tooltip = trans("input.tab.tooltip").format(chars=short_num(len(raw_text)), tokens=short_num(input_tokens))
190
+ except Exception:
191
+ tooltip = ""
192
+ #tooltip = f"{short_num(len(raw_text))} chars (~{short_num(input_tokens)} tokens)"
193
+ if tooltip != self._last_input_tab_tooltip:
194
+ tabs.setTabToolTip(0, tooltip)
195
+ self._last_input_tab_tooltip = tooltip
196
+
169
197
  def store_state(self):
170
198
  """Store UI state"""
171
199
  self.window.controller.layout.scroll_save()
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.25 14:00:00 #
9
+ # Updated Date: 2025.09.26 17:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from __future__ import annotations
@@ -268,13 +268,28 @@ class DynamicFlowWorkflowLI(Workflow):
268
268
  async def _emit_header(self, ctx: Context, name: str):
269
269
  if self.dbg.event_echo:
270
270
  self.logger.debug(f"[event] header emit begin name='{name}'")
271
- await self._emit_agent_text(ctx, f"\n\n**{name}**\n\n", agent_name=name)
271
+ await self._emit_agent_text(ctx, "", agent_name=name)
272
+ # await self._emit_agent_text(ctx, f"\n\n**{name}**\n\n", agent_name=name)
272
273
  if self.dbg.event_echo:
273
274
  self.logger.debug("[event] header emit done")
274
275
 
275
276
  async def _emit_step_sep(self, ctx: Context, node_id: str):
276
277
  try:
277
- await self._emit(ctx, StepEvent(name="next", index=self._steps, total=self.max_iterations, meta={"node": node_id}))
278
+ # Include human-friendly agent name in StepEvent meta for downstream ctx propagation.
279
+ a = self.fs.agents.get(node_id)
280
+ friendly_name = (a.name if a and a.name else node_id)
281
+ await self._emit(
282
+ ctx,
283
+ StepEvent(
284
+ name="next",
285
+ index=self._steps,
286
+ total=self.max_iterations,
287
+ meta={
288
+ "node": node_id,
289
+ "agent_name": friendly_name, # pass current agent display name
290
+ },
291
+ ),
292
+ )
278
293
  except Exception as e:
279
294
  self.logger.error(f"[event] StepEvent emit failed: {e}")
280
295
 
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.25 14:00:00 #
9
+ # Updated Date: 2025.09.26 17:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from __future__ import annotations
@@ -242,13 +242,14 @@ class FlowOrchestrator:
242
242
  run_kwargs["trace_id"] = trace_id
243
243
 
244
244
  # Header for UI
245
- title = f"\n\n**{built.name}**\n\n"
246
- ctx.stream = title
245
+ ctx.set_agent_name(agent.name)
246
+ # title = f"\n\n**{built.name}**\n\n"
247
+ # ctx.stream = title
247
248
  bridge.on_step(ctx, begin)
248
249
  begin = False
249
250
  handler.begin = begin
250
- if not use_partial_ctx:
251
- handler.to_buffer(title)
251
+ # if not use_partial_ctx:
252
+ # handler.to_buffer(title)
252
253
 
253
254
  display_text = "" # what we show to UI for this step
254
255
  next_id: Optional[str] = None
@@ -444,6 +445,10 @@ class FlowOrchestrator:
444
445
  else:
445
446
  bridge.on_next(ctx)
446
447
 
448
+ # set next agent name if not at the end
449
+ if current_ids and current_ids[0] in fs.agents:
450
+ ctx.set_agent_name(fs.agents[current_ids[0]].name)
451
+
447
452
  # Step duration
448
453
  dur = perf_counter() - step_start
449
454
  self.logger.debug(f"[step {steps}] duration={dur:.3f}s")
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.24 02:00:00 #
9
+ # Updated Date: 2025.09.26 17:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import re
@@ -18,6 +18,7 @@ from llama_index.core.agent.workflow import (
18
18
  ToolCallResult,
19
19
  AgentStream,
20
20
  AgentOutput,
21
+ # AgentInput, # not needed currently
21
22
  )
22
23
  from workflows.errors import WorkflowCancelledByUser
23
24
 
@@ -38,6 +39,7 @@ class LlamaWorkflow(BaseRunner):
38
39
  """
39
40
  super(LlamaWorkflow, self).__init__(window)
40
41
  self.window = window
42
+ self.last_response_id = None
41
43
 
42
44
  async def run(
43
45
  self,
@@ -177,12 +179,13 @@ class LlamaWorkflow(BaseRunner):
177
179
 
178
180
  prev_output = ctx.live_output
179
181
  if prev_output:
180
- prev_output = self.filter_output(prev_output) # remove all <execute>...</execute>
182
+ prev_output = self.filter_output(prev_output) # remove all [!exec]...[/!exec]
181
183
 
182
184
  response_ctx.set_agent_final_response(ctx.agent_final_response) # always set to further use
183
185
  response_ctx.set_output(prev_output) # append from stream
184
186
  response_ctx.extra["agent_output"] = True # mark as output response
185
187
  response_ctx.extra["agent_finish"] = True # mark as finished
188
+ response_ctx.set_agent_name(ctx.get_agent_name()) # store last agent name
186
189
 
187
190
  if "agent_input" in response_ctx.extra:
188
191
  del response_ctx.extra["agent_input"] # remove agent input from extra
@@ -254,8 +257,10 @@ class LlamaWorkflow(BaseRunner):
254
257
  item_ctx.output = "" # empty to prevent render
255
258
  item_ctx.stream = "" # for stream
256
259
 
260
+ # Keep last known agent name to avoid redundant ctx updates.
261
+ last_agent_name: Optional[str] = None
262
+
257
263
  async for event in handler.stream_events():
258
- print(event)
259
264
  if self.is_stopped():
260
265
  # persist current output on stop
261
266
  item_ctx.output = item_ctx.live_output
@@ -297,7 +302,24 @@ class LlamaWorkflow(BaseRunner):
297
302
  begin=begin,
298
303
  stream=True,
299
304
  )
305
+ # Propagate agent name early based on StepEvent meta, if available.
306
+ try:
307
+ meta = getattr(event, "meta", {}) or {}
308
+ next_name = meta.get("agent_name")
309
+ if next_name:
310
+ last_agent_name = self._apply_agent_name_to_ctx(item_ctx, next_name, last_agent_name)
311
+ except Exception:
312
+ pass
313
+ # Optional: mark start of a new stream block
314
+ begin = True
300
315
  elif isinstance(event, AgentStream):
316
+ # Update agent name from event if present; fallback to header parsing.
317
+ name = getattr(event, "current_agent_name", None)
318
+ if not name:
319
+ name = self._guess_agent_name_from_text(getattr(event, "delta", "") or "")
320
+ if name:
321
+ last_agent_name = self._apply_agent_name_to_ctx(item_ctx, name, last_agent_name)
322
+
301
323
  if verbose:
302
324
  print(f"{event.delta}", end="", flush=True)
303
325
  if event.delta:
@@ -307,6 +329,10 @@ class LlamaWorkflow(BaseRunner):
307
329
  self.send_stream(item_ctx, signals, begin) # send stream to webview
308
330
  begin = False
309
331
  elif isinstance(event, AgentOutput):
332
+ # Ensure final agent name is applied as well.
333
+ name = getattr(event, "current_agent_name", None)
334
+ if name:
335
+ last_agent_name = self._apply_agent_name_to_ctx(item_ctx, name, last_agent_name)
310
336
  thought, answer = self.extract_final_response(str(event))
311
337
  if answer:
312
338
  item_ctx.set_agent_final_response(answer)
@@ -348,6 +374,40 @@ class LlamaWorkflow(BaseRunner):
348
374
  next_ctx.set_output("")
349
375
  next_ctx.partial = True
350
376
  next_ctx.extra["agent_output"] = True # allow usage in history
351
-
377
+ next_ctx.set_agent_name(ctx.get_agent_name()) # propagate agent name
352
378
  self.send_response(next_ctx, signals, KernelEvent.APPEND_DATA)
353
- return next_ctx
379
+
380
+ return next_ctx
381
+
382
+ # ===== helpers for agent name propagation =====
383
+
384
+ def _apply_agent_name_to_ctx(self, ctx: CtxItem, name: str, last_known: Optional[str]) -> str:
385
+ """
386
+ Apply agent name to your context, avoiding redundant updates.
387
+ Falls back to ctx.extra['agent_name'] if set_agent_name is unavailable.
388
+ """
389
+ if not name:
390
+ return last_known or ""
391
+ if last_known and last_known == name:
392
+ return last_known
393
+ try:
394
+ if hasattr(ctx, "set_agent_name") and callable(getattr(ctx, "set_agent_name")):
395
+ ctx.set_agent_name(name)
396
+ # Always mirror into extra for downstream consumers
397
+ ctx.extra["agent_name"] = name
398
+ except Exception:
399
+ ctx.extra["agent_name"] = name
400
+ return name
401
+
402
+ def _guess_agent_name_from_text(self, text: str) -> Optional[str]:
403
+ """
404
+ Try to infer agent name from header like '**Name**' which our workflow emits
405
+ before each agent block.
406
+ """
407
+ if not text:
408
+ return None
409
+ # Look for the first bold segment – keep it lenient
410
+ m = re.search(r"\*\*([^*]+?)\*\*", text)
411
+ if m:
412
+ return m.group(1).strip()
413
+ return None
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.24 03:00:00 #
9
+ # Updated Date: 2025.09.26 17:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Dict, Any, List, Optional
@@ -237,6 +237,7 @@ class OpenAIWorkflow(BaseRunner):
237
237
  response_ctx.set_agent_final_response(output) # always set to further use
238
238
  response_ctx.extra["agent_output"] = True # mark as output response
239
239
  response_ctx.extra["agent_finish"] = True # mark as finished
240
+ response_ctx.set_agent_name(ctx.get_agent_name()) # store last agent name
240
241
  response_ctx.msg_id = response_id # set response id for OpenAI
241
242
 
242
243
  if ctx.agent_final_response: # only if not empty
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.25 15:00:00 #
9
+ # Updated Date: 2025.09.26 12:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from __future__ import annotations
@@ -34,11 +34,14 @@ class PropertySpec:
34
34
  class NodeTypeSpec:
35
35
  type_name: str
36
36
  title: Optional[str] = None
37
+ # UI-only human-readable label used for menus; never persisted nor used as an identifier
38
+ display_name: Optional[str] = None
37
39
  properties: List[PropertySpec] = field(default_factory=list)
38
40
  # Below are optional extensions for agent-flow needs:
39
41
  base_id: Optional[str] = None # base prefix for friendly ids, e.g. "agent"
40
42
  export_kind: Optional[str] = None # short kind for export, e.g. "agent", "start"
41
43
  bg_color: Optional[str] = None # optional per-type background color (CSS/hex)
44
+ max_num: Optional[int] = None # optional per-layout cap; None or <=0 means unlimited
42
45
 
43
46
  class NodeTypeRegistry:
44
47
  """Registry for node type specifications. Extend/override in subclasses."""
@@ -56,6 +59,15 @@ class NodeTypeRegistry:
56
59
  def get(self, type_name: str) -> Optional[NodeTypeSpec]:
57
60
  return self._types.get(type_name)
58
61
 
62
+ def display_name(self, type_name: str) -> str:
63
+ """Return UI label for a type: spec.display_name if non-empty, otherwise type_name."""
64
+ spec = self.get(type_name)
65
+ if spec:
66
+ dn = getattr(spec, "display_name", None)
67
+ if isinstance(dn, str) and dn.strip():
68
+ return dn
69
+ return type_name
70
+
59
71
  def _install_default_types(self):
60
72
  # Example/basic nodes kept intact
61
73
  self.register(NodeTypeSpec(