pygpt-net 2.6.61__py3-none-any.whl → 2.6.63__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. pygpt_net/CHANGELOG.txt +12 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/chat/response.py +8 -2
  4. pygpt_net/controller/presets/editor.py +65 -1
  5. pygpt_net/controller/settings/profile.py +16 -4
  6. pygpt_net/controller/settings/workdir.py +30 -5
  7. pygpt_net/controller/theme/common.py +4 -2
  8. pygpt_net/controller/theme/markdown.py +2 -2
  9. pygpt_net/controller/theme/theme.py +2 -1
  10. pygpt_net/controller/ui/ui.py +31 -3
  11. pygpt_net/core/agents/custom/llama_index/runner.py +30 -52
  12. pygpt_net/core/agents/custom/runner.py +199 -76
  13. pygpt_net/core/agents/runners/llama_workflow.py +122 -12
  14. pygpt_net/core/agents/runners/openai_workflow.py +2 -1
  15. pygpt_net/core/node_editor/types.py +13 -1
  16. pygpt_net/core/render/web/renderer.py +76 -11
  17. pygpt_net/data/config/config.json +3 -3
  18. pygpt_net/data/config/models.json +3 -3
  19. pygpt_net/data/config/presets/agent_openai_b2b.json +1 -15
  20. pygpt_net/data/config/presets/agent_openai_coder.json +1 -15
  21. pygpt_net/data/config/presets/agent_openai_evolve.json +1 -23
  22. pygpt_net/data/config/presets/agent_openai_planner.json +1 -21
  23. pygpt_net/data/config/presets/agent_openai_researcher.json +1 -21
  24. pygpt_net/data/config/presets/agent_openai_supervisor.json +1 -13
  25. pygpt_net/data/config/presets/agent_openai_writer.json +1 -15
  26. pygpt_net/data/config/presets/agent_supervisor.json +1 -11
  27. pygpt_net/data/css/style.dark.css +18 -0
  28. pygpt_net/data/css/style.light.css +20 -1
  29. pygpt_net/data/js/app/runtime.js +4 -1
  30. pygpt_net/data/js/app.min.js +3 -2
  31. pygpt_net/data/locale/locale.de.ini +2 -0
  32. pygpt_net/data/locale/locale.en.ini +7 -0
  33. pygpt_net/data/locale/locale.es.ini +2 -0
  34. pygpt_net/data/locale/locale.fr.ini +2 -0
  35. pygpt_net/data/locale/locale.it.ini +2 -0
  36. pygpt_net/data/locale/locale.pl.ini +3 -1
  37. pygpt_net/data/locale/locale.uk.ini +2 -0
  38. pygpt_net/data/locale/locale.zh.ini +2 -0
  39. pygpt_net/item/ctx.py +23 -1
  40. pygpt_net/js_rc.py +13 -10
  41. pygpt_net/provider/agents/base.py +0 -0
  42. pygpt_net/provider/agents/llama_index/flow_from_schema.py +0 -0
  43. pygpt_net/provider/agents/llama_index/workflow/codeact.py +9 -6
  44. pygpt_net/provider/agents/llama_index/workflow/openai.py +38 -11
  45. pygpt_net/provider/agents/llama_index/workflow/planner.py +248 -28
  46. pygpt_net/provider/agents/llama_index/workflow/supervisor.py +60 -10
  47. pygpt_net/provider/agents/openai/agent.py +3 -1
  48. pygpt_net/provider/agents/openai/agent_b2b.py +17 -13
  49. pygpt_net/provider/agents/openai/agent_planner.py +617 -258
  50. pygpt_net/provider/agents/openai/agent_with_experts.py +4 -1
  51. pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +8 -6
  52. pygpt_net/provider/agents/openai/agent_with_feedback.py +8 -6
  53. pygpt_net/provider/agents/openai/evolve.py +12 -8
  54. pygpt_net/provider/agents/openai/flow_from_schema.py +0 -0
  55. pygpt_net/provider/agents/openai/supervisor.py +292 -37
  56. pygpt_net/provider/api/openai/agents/response.py +1 -0
  57. pygpt_net/provider/api/x_ai/__init__.py +0 -0
  58. pygpt_net/provider/core/agent/__init__.py +0 -0
  59. pygpt_net/provider/core/agent/base.py +0 -0
  60. pygpt_net/provider/core/agent/json_file.py +0 -0
  61. pygpt_net/provider/core/config/patch.py +8 -0
  62. pygpt_net/provider/core/config/patches/patch_before_2_6_42.py +0 -0
  63. pygpt_net/provider/llms/base.py +0 -0
  64. pygpt_net/provider/llms/deepseek_api.py +0 -0
  65. pygpt_net/provider/llms/google.py +0 -0
  66. pygpt_net/provider/llms/hugging_face_api.py +0 -0
  67. pygpt_net/provider/llms/hugging_face_router.py +0 -0
  68. pygpt_net/provider/llms/mistral.py +0 -0
  69. pygpt_net/provider/llms/perplexity.py +0 -0
  70. pygpt_net/provider/llms/x_ai.py +0 -0
  71. pygpt_net/tools/agent_builder/tool.py +6 -0
  72. pygpt_net/tools/agent_builder/ui/dialogs.py +0 -41
  73. pygpt_net/ui/layout/toolbox/presets.py +14 -2
  74. pygpt_net/ui/main.py +2 -2
  75. pygpt_net/ui/widget/dialog/confirm.py +55 -5
  76. pygpt_net/ui/widget/draw/painter.py +90 -1
  77. pygpt_net/ui/widget/lists/preset.py +289 -25
  78. pygpt_net/ui/widget/node_editor/editor.py +53 -15
  79. pygpt_net/ui/widget/node_editor/node.py +82 -104
  80. pygpt_net/ui/widget/node_editor/view.py +4 -5
  81. pygpt_net/ui/widget/textarea/input.py +155 -21
  82. {pygpt_net-2.6.61.dist-info → pygpt_net-2.6.63.dist-info}/METADATA +22 -8
  83. {pygpt_net-2.6.61.dist-info → pygpt_net-2.6.63.dist-info}/RECORD +70 -70
  84. {pygpt_net-2.6.61.dist-info → pygpt_net-2.6.63.dist-info}/LICENSE +0 -0
  85. {pygpt_net-2.6.61.dist-info → pygpt_net-2.6.63.dist-info}/WHEEL +0 -0
  86. {pygpt_net-2.6.61.dist-info → pygpt_net-2.6.63.dist-info}/entry_points.txt +0 -0
@@ -6,12 +6,13 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.26 01:00:00 #
9
+ # Updated Date: 2025.09.27 20:25:00 #
10
10
  # ================================================== #
11
-
11
+ import base64
12
12
  import json
13
13
  import re
14
- from typing import Dict, Any, Tuple, Optional
14
+ import io
15
+ from typing import Dict, Any, Tuple, Optional, Callable
15
16
 
16
17
  from agents import (
17
18
  Agent as OpenAIAgent,
@@ -38,6 +39,16 @@ from pygpt_net.utils import trans
38
39
 
39
40
  from ..base import BaseAgent
40
41
 
42
+ # OpenAI response event types (used by StreamHandler)
43
+ from openai.types.responses import (
44
+ ResponseTextDeltaEvent,
45
+ ResponseCreatedEvent,
46
+ ResponseCodeInterpreterCallCodeDeltaEvent,
47
+ ResponseOutputItemAddedEvent,
48
+ ResponseCompletedEvent,
49
+ ResponseOutputItemDoneEvent,
50
+ )
51
+
41
52
  JSON_RE = re.compile(r"\{[\s\S]*\}$", re.MULTILINE)
42
53
 
43
54
  SUPERVISOR_PROMPT = """
@@ -62,6 +73,200 @@ You are the “Worker”. You execute Supervisor instructions strictly, using yo
62
73
  Respond in the user's language.
63
74
  """
64
75
 
76
+
77
+ class SupervisorStreamHandler(StreamHandler):
78
+ """
79
+ Stream handler that filters JSON from Supervisor output during streaming.
80
+ - Pass-through normal text.
81
+ - Suppress raw JSON (both ```json fenced and bare {...}).
82
+ - When JSON finishes, parse and emit only the human-friendly text via `json_to_text`.
83
+ """
84
+ def __init__(
85
+ self,
86
+ window,
87
+ bridge: ConnectionContext = None,
88
+ message: str = None,
89
+ json_to_text: Optional[Callable[[dict], str]] = None,
90
+ ):
91
+ super().__init__(window, bridge, message)
92
+ self.json_to_text = json_to_text or (lambda d: json.dumps(d, ensure_ascii=False))
93
+ self._json_fenced = False
94
+ self._json_buf = io.StringIO()
95
+ self._json_in_braces = False
96
+ self._brace_depth = 0
97
+ self._in_string = False
98
+ self._escape = False
99
+
100
+ def _emit_text(self, ctx: CtxItem, text: str, flush: bool, buffer: bool):
101
+ if not text:
102
+ return
103
+ self._emit(ctx, text, flush, buffer)
104
+
105
+ def _flush_json(self, ctx: CtxItem, flush: bool, buffer: bool):
106
+ """
107
+ Parse collected JSON and emit only formatted text; reset state.
108
+ """
109
+ raw_json = self._json_buf.getvalue().strip()
110
+ self._json_buf = io.StringIO()
111
+ self._json_fenced = False
112
+ self._json_in_braces = False
113
+ self._brace_depth = 0
114
+ self._in_string = False
115
+ self._escape = False
116
+
117
+ if not raw_json:
118
+ return
119
+ try:
120
+ data = json.loads(raw_json)
121
+ out = self.json_to_text(data) or ""
122
+ except Exception:
123
+ # Fallback: if parsing failed, do not leak JSON; stay silent
124
+ out = ""
125
+ if out:
126
+ self._emit_text(ctx, out, flush, buffer)
127
+
128
+ def _handle_text_delta(self, s: str, ctx: CtxItem, flush: bool, buffer: bool):
129
+ """
130
+ Filter JSON while streaming; emit only non-JSON text or parsed JSON text.
131
+ """
132
+ i = 0
133
+ n = len(s)
134
+ while i < n:
135
+ # Detect fenced JSON start
136
+ if not self._json_fenced and not self._json_in_braces and s.startswith("```json", i):
137
+ # Emit any text before the fence
138
+ # (there shouldn't be in this branch because we check exact start, but keep safe)
139
+ i += len("```json")
140
+ self._json_fenced = True
141
+ # Skip possible newline after fence
142
+ if i < n and s[i] == '\n':
143
+ i += 1
144
+ continue
145
+
146
+ # Detect fenced JSON end
147
+ if self._json_fenced and s.startswith("```", i):
148
+ # Flush JSON collected so far
149
+ self._flush_json(ctx, flush, buffer)
150
+ i += len("```")
151
+ # Optional newline after closing fence
152
+ if i < n and s[i] == '\n':
153
+ i += 1
154
+ continue
155
+
156
+ # While inside fenced JSON -> buffer and continue
157
+ if self._json_fenced:
158
+ self._json_buf.write(s[i])
159
+ i += 1
160
+ continue
161
+
162
+ # Bare JSON detection (naive but effective for supervisor outputs)
163
+ if not self._json_in_braces and s[i] == "{":
164
+ self._json_in_braces = True
165
+ self._brace_depth = 1
166
+ self._in_string = False
167
+ self._escape = False
168
+ self._json_buf.write("{")
169
+ i += 1
170
+ continue
171
+
172
+ if self._json_in_braces:
173
+ ch = s[i]
174
+ # Basic JSON string/escape handling
175
+ if ch == '"' and not self._escape:
176
+ self._in_string = not self._in_string
177
+ if ch == "\\" and not self._escape:
178
+ self._escape = True
179
+ else:
180
+ self._escape = False
181
+ if not self._in_string:
182
+ if ch == "{":
183
+ self._brace_depth += 1
184
+ elif ch == "}":
185
+ self._brace_depth -= 1
186
+ self._json_buf.write(ch)
187
+ i += 1
188
+ if self._brace_depth == 0:
189
+ # JSON closed -> flush parsed text
190
+ self._flush_json(ctx, flush, buffer)
191
+ continue
192
+
193
+ # Normal text path
194
+ # Accumulate until potential fenced start to avoid splitting too often
195
+ next_fence = s.find("```json", i)
196
+ next_bare = s.find("{", i)
197
+ cut = n
198
+ candidates = [x for x in (next_fence, next_bare) if x != -1]
199
+ if candidates:
200
+ cut = min(candidates)
201
+ chunk = s[i:cut]
202
+ if chunk:
203
+ self._emit_text(ctx, chunk, flush, buffer)
204
+ i = cut if cut != n else n
205
+
206
+ def handle(
207
+ self,
208
+ event,
209
+ ctx: CtxItem,
210
+ flush: bool = True,
211
+ buffer: bool = True
212
+ ) -> Tuple[str, str]:
213
+ """
214
+ Override StreamHandler.handle to filter JSON in text deltas.
215
+ For non-text events, fallback to parent handler.
216
+ """
217
+ # ReasoningItem path remains the same (parent prints to stdout), keep parent behavior.
218
+
219
+ if getattr(event, "type", None) == "raw_response_event":
220
+ data = event.data
221
+
222
+ if isinstance(data, ResponseCreatedEvent):
223
+ self.response_id = data.response.id
224
+ return self.buffer, self.response_id
225
+
226
+ if isinstance(data, ResponseTextDeltaEvent):
227
+ # Filter JSON while streaming
228
+ delta = data.delta or ""
229
+ # If a code_interpreter block was started previously, render fence first
230
+ if self.code_block:
231
+ self._emit_text(ctx, "\n```\n", flush, buffer)
232
+ self.code_block = False
233
+ self._handle_text_delta(delta, ctx, flush, buffer)
234
+ return self.buffer, self.response_id
235
+
236
+ if isinstance(data, ResponseOutputItemAddedEvent):
237
+ if data.item.type == "code_interpreter_call":
238
+ self.code_block = True
239
+ s = "\n\n**Code interpreter**\n```python\n"
240
+ self._emit_text(ctx, s, flush, buffer)
241
+ return self.buffer, self.response_id
242
+
243
+ if isinstance(data, ResponseOutputItemDoneEvent):
244
+ if data.item.type == "image_generation_call":
245
+ img_path = self.window.core.image.gen_unique_path(ctx)
246
+ image_base64 = data.item.result
247
+ image_bytes = base64.b64decode(image_base64)
248
+ with open(img_path, "wb") as f:
249
+ f.write(image_bytes)
250
+ self.window.core.debug.info("[chat] Image generation call found")
251
+ ctx.images = [img_path]
252
+ return self.buffer, self.response_id
253
+
254
+ if isinstance(data, ResponseCodeInterpreterCallCodeDeltaEvent):
255
+ self._emit_text(ctx, data.delta or "", flush, buffer)
256
+ return self.buffer, self.response_id
257
+
258
+ if isinstance(data, ResponseCompletedEvent):
259
+ # If we are still buffering JSON, flush it now (emit parsed text only)
260
+ if self._json_fenced or self._json_in_braces:
261
+ self._flush_json(ctx, flush, buffer)
262
+ # Mark finished so parent downloader logic (files) may trigger if needed
263
+ self.finished = True
264
+ return self.buffer, self.response_id
265
+
266
+ # Handoff / other events: fallback to parent, but it won't print JSON since we already filtered in text deltas
267
+ return super().handle(event, ctx, flush, buffer)
268
+
269
+
65
270
  class Agent(BaseAgent):
66
271
 
67
272
  def __init__(self, *args, **kwargs):
@@ -69,7 +274,7 @@ class Agent(BaseAgent):
69
274
  self.id = "openai_agent_supervisor"
70
275
  self.type = AGENT_TYPE_OPENAI
71
276
  self.mode = AGENT_MODE_OPENAI
72
- self.name = "Supervisor + worker"
277
+ self.name = "Supervisor" # use clean name in UI headers
73
278
 
74
279
  def get_agent(self, window, kwargs: Dict[str, Any]):
75
280
  """
@@ -81,8 +286,11 @@ class Agent(BaseAgent):
81
286
  """
82
287
  context = kwargs.get("context", BridgeContext())
83
288
  preset = context.preset
84
- agent_name = preset.name if preset else "Supervisor"
85
289
  model = kwargs.get("model", ModelItem())
290
+
291
+ # Enforce a stable, clean display name for the Supervisor regardless of preset name.
292
+ agent_name = "Supervisor" # hard-coded UI name
293
+
86
294
  worker_tool = kwargs.get("worker_tool", None)
87
295
  kwargs = {
88
296
  "name": agent_name,
@@ -184,63 +392,117 @@ class Agent(BaseAgent):
184
392
  worker_session_id = f"worker_session_{ctx.meta.id}" if ctx.meta else "worker_session_default"
185
393
  worker_session = SQLiteSession(worker_session_id)
186
394
 
187
- handler = StreamHandler(window, bridge)
188
- item_ctx = ctx
395
+ # Use JSON-filtering handler for Supervisor streaming
396
+ handler = SupervisorStreamHandler(
397
+ window,
398
+ bridge,
399
+ json_to_text=self.response_from_json,
400
+ )
401
+ item_ctx = ctx # will reassign on splits
402
+
403
+ supervisor_display_name = None # set after agent is created
189
404
 
190
405
  # tool to run Worker
191
406
  @function_tool(name_override="run_worker")
192
- async def run_worker(ctx: RunContextWrapper[Any], instruction: str) -> str:
407
+ async def run_worker(fn_ctx: RunContextWrapper[Any], instruction: str) -> str:
193
408
  """
194
409
  Run the Worker with an instruction from the Supervisor and return its output.
195
410
 
196
- :param ctx: Run context wrapper
197
- :param instruction: Instruction for the Worker
198
- :return: Output from the Worker
411
+ - Appends the instruction to the current Supervisor block.
412
+ - Finalizes the Supervisor block and opens a new Worker block.
413
+ - Runs the Worker and streams its result into the Worker block.
414
+ - Finalizes the Worker block, then opens a fresh block for the Supervisor to continue.
199
415
  """
200
- item_ctx.stream = f"\n\n**{trans('agent.name.supervisor')} --> {trans('agent.name.worker')}:** {instruction}\n\n"
416
+ nonlocal item_ctx, supervisor_display_name
417
+
418
+ info = f"\n\n**{trans('agent.name.supervisor')} → {trans('agent.name.worker')}:** {instruction}\n\n"
419
+ item_ctx.stream = info
201
420
  bridge.on_step(item_ctx, True)
202
- handler.begin = False
421
+ handler.to_buffer(info)
422
+
423
+ if use_partial_ctx:
424
+ item_ctx = bridge.on_next_ctx(
425
+ ctx=item_ctx,
426
+ input="",
427
+ output=handler.buffer, # finalize current Supervisor content
428
+ response_id=handler.response_id or "",
429
+ stream=True,
430
+ )
431
+ handler.new() # reset handler buffer for next block
432
+
433
+ try:
434
+ item_ctx.set_agent_name(worker.name)
435
+ except Exception:
436
+ pass
437
+
203
438
  result = await Runner.run(
204
439
  worker,
205
440
  input=instruction,
206
441
  session=worker_session,
207
442
  max_turns=max_steps,
208
443
  )
209
- item_ctx.stream = f"\n\n{result.final_output}\n\n"
210
- bridge.on_step(item_ctx, False)
211
- return str(result.final_output)
444
+
445
+ worker_text = str(result.final_output or "")
446
+ if worker_text:
447
+ item_ctx.stream = f"{worker_text}\n"
448
+ bridge.on_step(item_ctx, True)
449
+
450
+ if use_partial_ctx:
451
+ item_ctx = bridge.on_next_ctx(
452
+ ctx=item_ctx,
453
+ input="",
454
+ output=worker_text, # finalize worker output
455
+ response_id="", # worker has no OpenAI response id here
456
+ stream=True,
457
+ )
458
+ try:
459
+ if supervisor_display_name:
460
+ item_ctx.set_agent_name(supervisor_display_name)
461
+ except Exception:
462
+ pass
463
+
464
+ return worker_text
212
465
 
213
466
  agent_kwargs["worker_tool"] = run_worker
214
467
  agent = self.get_agent(window, agent_kwargs)
468
+ supervisor_display_name = agent.name # "Supervisor"
215
469
 
216
470
  if not stream:
217
- result = await Runner.run(
218
- agent,
219
- **kwargs
220
- )
221
- final_output, last_response_id = window.core.api.openai.responses.unpack_agent_response(result, ctx)
471
+ item_ctx.set_agent_name(agent.name)
472
+ result = await Runner.run(agent, **kwargs)
473
+ final_output, last_response_id = window.core.api.openai.responses.unpack_agent_response(result, item_ctx)
222
474
  response_id = result.last_response_id
223
475
  if verbose:
224
476
  print("Final response:", result)
225
477
  else:
226
- result = Runner.run_streamed(
227
- agent,
228
- **kwargs
229
- )
478
+ item_ctx.set_agent_name(agent.name)
479
+ result = Runner.run_streamed(agent, **kwargs)
230
480
  async for event in result.stream_events():
231
481
  if bridge.stopped():
232
482
  result.cancel()
233
- bridge.on_stop(ctx)
483
+ bridge.on_stop(item_ctx)
234
484
  break
235
- final_output, response_id = handler.handle(event, ctx)
485
+ # Write into current item_ctx (it changes when we split)
486
+ final_output, response_id = handler.handle(event, item_ctx)
236
487
 
237
- # extract final output from JSON
488
+ # extract final output from JSON (Supervisor's last block)
238
489
  if final_output:
239
490
  final_output = self.extract_final_response(final_output)
240
491
  if verbose:
241
492
  print("Final output after extraction:", final_output)
242
493
 
243
- return ctx, final_output, response_id
494
+ # Properly finalize last block
495
+ if use_partial_ctx:
496
+ item_ctx = bridge.on_next_ctx(
497
+ ctx=item_ctx,
498
+ input=final_output or "",
499
+ output=final_output or "",
500
+ response_id=response_id or "",
501
+ finish=True,
502
+ stream=stream,
503
+ )
504
+
505
+ return item_ctx, final_output, response_id
244
506
 
245
507
  def extract_final_response(self, output: str) -> str:
246
508
  """
@@ -255,7 +517,6 @@ class Agent(BaseAgent):
255
517
  fence = re.search(r"```json\s*([\s\S]*?)\s*```", output, re.IGNORECASE)
256
518
  if fence:
257
519
  try:
258
- # Try to parse the fenced JSON
259
520
  json_text = fence.group(1).strip()
260
521
  json_response = json.loads(json_text)
261
522
  return self.response_from_json(json_response)
@@ -265,7 +526,6 @@ class Agent(BaseAgent):
265
526
  tail = JSON_RE.findall(output)
266
527
  for candidate in tail[::-1]:
267
528
  try:
268
- # Try to parse the JSON from the tail
269
529
  json_response = json.loads(candidate)
270
530
  return self.response_from_json(json_response)
271
531
  except Exception:
@@ -273,7 +533,6 @@ class Agent(BaseAgent):
273
533
 
274
534
  if output.startswith("{") and output.endswith("}"):
275
535
  try:
276
- # Try to parse the entire output as JSON
277
536
  response = json.loads(output)
278
537
  return self.response_from_json(response)
279
538
  except Exception as e:
@@ -348,8 +607,4 @@ class Agent(BaseAgent):
348
607
  },
349
608
  }
350
609
  },
351
- }
352
-
353
-
354
-
355
-
610
+ }
@@ -163,6 +163,7 @@ class StreamHandler:
163
163
  elif event.type == "run_item_stream_event":
164
164
  if isinstance(event.item, HandoffOutputItem):
165
165
  s = f"\n\n**Handoff to: {event.item.target_agent.name}**\n\n"
166
+ ctx.set_agent_name(event.item.target_agent.name)
166
167
  self._emit(ctx, s, flush, buffer)
167
168
 
168
169
  if self.finished and not self.files_handled and self.files:
File without changes
File without changes
File without changes
File without changes
@@ -160,6 +160,14 @@ class Patch:
160
160
  data["presets_order"] = {}
161
161
  updated = True
162
162
 
163
+ # < 2.6.62
164
+ if old < parse_version("2.6.62"):
165
+ print("Migrating config from < 2.6.62...")
166
+ # add: node editor css
167
+ patch_css('style.light.css', True)
168
+ patch_css('style.dark.css', True)
169
+ updated = True
170
+
163
171
  # update file
164
172
  migrated = False
165
173
  if updated:
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
@@ -392,10 +392,12 @@ class AgentBuilder(BaseTool):
392
392
  # Start
393
393
  registry.register(NodeTypeSpec(
394
394
  type_name="Flow/Start",
395
+ display_name=trans("node.editor.spec.start.title"),
395
396
  title=trans("node.editor.spec.start.title"),
396
397
  base_id="start",
397
398
  export_kind="start",
398
399
  bg_color="#2D5A27",
400
+ max_num=1, # per-layout limit
399
401
  properties=[
400
402
  PropertySpec(id="output", type="flow", name=trans("node.editor.property.output.name"), editable=False,
401
403
  allowed_inputs=0, allowed_outputs=1),
@@ -406,6 +408,7 @@ class AgentBuilder(BaseTool):
406
408
  # Agent
407
409
  registry.register(NodeTypeSpec(
408
410
  type_name="Flow/Agent",
411
+ display_name=trans("node.editor.spec.agent.title"),
409
412
  title=trans("node.editor.spec.agent.title"),
410
413
  base_id="agent",
411
414
  export_kind="agent",
@@ -435,6 +438,7 @@ class AgentBuilder(BaseTool):
435
438
  # Memory
436
439
  registry.register(NodeTypeSpec(
437
440
  type_name="Flow/Memory",
441
+ display_name=trans("node.editor.spec.memory.title"),
438
442
  title=trans("node.editor.spec.memory.title"),
439
443
  base_id="mem",
440
444
  export_kind="memory",
@@ -449,10 +453,12 @@ class AgentBuilder(BaseTool):
449
453
  # End
450
454
  registry.register(NodeTypeSpec(
451
455
  type_name="Flow/End",
456
+ display_name=trans("node.editor.spec.end.title"),
452
457
  title=trans("node.editor.spec.end.title"),
453
458
  base_id="end",
454
459
  export_kind="end",
455
460
  bg_color="#6B2E2E",
461
+ max_num=1, # per-layout limit
456
462
  properties=[
457
463
  PropertySpec(id="input", type="flow", name=trans("node.editor.property.input.name"), editable=False,
458
464
  allowed_inputs=-1, allowed_outputs=0),
@@ -77,49 +77,8 @@ class Builder:
77
77
  registry=registry
78
78
  ) # parent == dialog
79
79
 
80
- theme = self.window.core.config.get("theme")
81
- if theme.startswith("light"):
82
- style = """
83
- NodeEditor {
84
- qproperty-gridBackColor: #ffffff;
85
- qproperty-gridPenColor: #eaeaea;
86
-
87
- qproperty-nodeBackgroundColor: #2d2f34;
88
- qproperty-nodeBorderColor: #4b4f57;
89
- qproperty-nodeSelectionColor: #ff9900;
90
- qproperty-nodeTitleColor: #3a3d44;
91
-
92
- qproperty-portInputColor: #66b2ff;
93
- qproperty-portOutputColor: #70e070;
94
- qproperty-portConnectedColor: #ffd166;
95
-
96
- qproperty-edgeColor: #c0c0c0;
97
- qproperty-edgeSelectedColor: #ff8a5c;
98
- }
99
- """
100
- else:
101
- style = """
102
- NodeEditor {
103
- qproperty-gridBackColor: #242629;
104
- qproperty-gridPenColor: #3b3f46;
105
-
106
- qproperty-nodeBackgroundColor: #2d2f34;
107
- qproperty-nodeBorderColor: #4b4f57;
108
- qproperty-nodeSelectionColor: #ff9900;
109
- qproperty-nodeTitleColor: #3a3d44;
110
-
111
- qproperty-portInputColor: #66b2ff;
112
- qproperty-portOutputColor: #70e070;
113
- qproperty-portConnectedColor: #ffd166;
114
-
115
- qproperty-edgeColor: #c0c0c0;
116
- qproperty-edgeSelectedColor: #ff8a5c;
117
- }
118
- """
119
- editor.setStyleSheet(style)
120
80
  editor.on_clear = self.tool.clear
121
81
  editor.editing_allowed = self.tool.editing_allowed
122
-
123
82
  u.editor[id] = editor
124
83
 
125
84
  layout = QVBoxLayout()
@@ -1,3 +1,4 @@
1
+ # ui/layout/presets.py
1
2
  #!/usr/bin/env python3
2
3
  # -*- coding: utf-8 -*-
3
4
  # ================================================== #
@@ -6,7 +7,7 @@
6
7
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
8
  # MIT License #
8
9
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.26 03:00:00 #
10
+ # Updated Date: 2025.09.26 13:30:00 #
10
11
  # ================================================== #
11
12
 
12
13
  from PySide6 import QtCore
@@ -126,6 +127,16 @@ class Presets:
126
127
  models[self.id] = model
127
128
  view.setModel(model)
128
129
 
130
+ # Preserve current scroll position across model rebuild to avoid a visible jump to the top.
131
+ # This is applied while updates are disabled, then restored just before re-enabling them.
132
+ try:
133
+ v = view.verticalScrollBar().value()
134
+ h = view.horizontalScrollBar().value()
135
+ view.set_pending_v_scroll(v)
136
+ view.set_pending_h_scroll(h)
137
+ except Exception:
138
+ pass
139
+
129
140
  # Block user input during model rebuild to avoid crashes on quick clicks
130
141
  view.begin_model_update()
131
142
 
@@ -193,5 +204,6 @@ class Presets:
193
204
  # Force repaint in case Qt defers layout until next input
194
205
  view.viewport().update()
195
206
 
196
- # Re-enable user interaction after the rebuild is fully done
207
+ # Clear one-shot pending scroll values and re-enable user interaction
208
+ view.clear_pending_scroll()
197
209
  view.end_model_update()
pygpt_net/ui/main.py CHANGED
@@ -22,7 +22,7 @@ from pygpt_net.controller import Controller
22
22
  from pygpt_net.tools import Tools
23
23
  from pygpt_net.ui import UI
24
24
  from pygpt_net.ui.widget.textarea.web import ChatWebOutput
25
- from pygpt_net.utils import get_app_meta, freeze_updates, set_env, has_env, get_env
25
+ from pygpt_net.utils import get_app_meta, freeze_updates, set_env, has_env, get_env, trans
26
26
 
27
27
 
28
28
  class MainWindow(QMainWindow, QtStyleTools):
@@ -356,7 +356,7 @@ class MainWindow(QMainWindow, QtStyleTools):
356
356
  self.core.presets.save_all()
357
357
  print("Exiting...")
358
358
  print("")
359
- print("Do you like PyGPT? Support the development of the project: https://pygpt.net/#donate")
359
+ print(f"{trans('exit.msg')} https://pygpt.net/#donate")
360
360
 
361
361
  def changeEvent(self, event):
362
362
  """