pygpt-net 2.6.62__py3-none-any.whl → 2.6.63__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +5 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/controller/presets/editor.py +65 -1
- pygpt_net/core/agents/custom/llama_index/runner.py +15 -52
- pygpt_net/core/agents/custom/runner.py +194 -76
- pygpt_net/core/agents/runners/llama_workflow.py +60 -10
- pygpt_net/data/config/config.json +3 -3
- pygpt_net/data/config/models.json +3 -3
- pygpt_net/data/config/presets/agent_openai_b2b.json +1 -15
- pygpt_net/data/config/presets/agent_openai_coder.json +1 -15
- pygpt_net/data/config/presets/agent_openai_evolve.json +1 -23
- pygpt_net/data/config/presets/agent_openai_planner.json +1 -21
- pygpt_net/data/config/presets/agent_openai_researcher.json +1 -21
- pygpt_net/data/config/presets/agent_openai_supervisor.json +1 -13
- pygpt_net/data/config/presets/agent_openai_writer.json +1 -15
- pygpt_net/data/config/presets/agent_supervisor.json +1 -11
- pygpt_net/data/js/app/runtime.js +4 -1
- pygpt_net/data/js/app.min.js +3 -2
- pygpt_net/data/locale/locale.en.ini +5 -0
- pygpt_net/js_rc.py +13 -10
- pygpt_net/provider/agents/base.py +0 -0
- pygpt_net/provider/agents/llama_index/flow_from_schema.py +0 -0
- pygpt_net/provider/agents/llama_index/workflow/codeact.py +0 -0
- pygpt_net/provider/agents/llama_index/workflow/planner.py +229 -29
- pygpt_net/provider/agents/llama_index/workflow/supervisor.py +0 -0
- pygpt_net/provider/agents/openai/agent.py +0 -0
- pygpt_net/provider/agents/openai/agent_b2b.py +4 -4
- pygpt_net/provider/agents/openai/agent_planner.py +617 -262
- pygpt_net/provider/agents/openai/agent_with_experts.py +0 -0
- pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +4 -4
- pygpt_net/provider/agents/openai/agent_with_feedback.py +4 -4
- pygpt_net/provider/agents/openai/evolve.py +6 -6
- pygpt_net/provider/agents/openai/flow_from_schema.py +0 -0
- pygpt_net/provider/agents/openai/supervisor.py +290 -37
- pygpt_net/provider/api/x_ai/__init__.py +0 -0
- pygpt_net/provider/core/agent/__init__.py +0 -0
- pygpt_net/provider/core/agent/base.py +0 -0
- pygpt_net/provider/core/agent/json_file.py +0 -0
- pygpt_net/provider/core/config/patches/patch_before_2_6_42.py +0 -0
- pygpt_net/provider/llms/base.py +0 -0
- pygpt_net/provider/llms/deepseek_api.py +0 -0
- pygpt_net/provider/llms/google.py +0 -0
- pygpt_net/provider/llms/hugging_face_api.py +0 -0
- pygpt_net/provider/llms/hugging_face_router.py +0 -0
- pygpt_net/provider/llms/mistral.py +0 -0
- pygpt_net/provider/llms/perplexity.py +0 -0
- pygpt_net/provider/llms/x_ai.py +0 -0
- pygpt_net/ui/widget/dialog/confirm.py +34 -8
- pygpt_net/ui/widget/textarea/input.py +1 -1
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.63.dist-info}/METADATA +7 -2
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.63.dist-info}/RECORD +34 -34
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.63.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.63.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.63.dist-info}/entry_points.txt +0 -0
|
File without changes
|
|
@@ -342,13 +342,13 @@ class Agent(BaseAgent):
|
|
|
342
342
|
"type": "bool",
|
|
343
343
|
"label": trans("agent.option.tools.local"),
|
|
344
344
|
"description": trans("agent.option.tools.local.desc"),
|
|
345
|
-
"default":
|
|
345
|
+
"default": True,
|
|
346
346
|
},
|
|
347
347
|
"allow_remote_tools": {
|
|
348
348
|
"type": "bool",
|
|
349
349
|
"label": trans("agent.option.tools.remote"),
|
|
350
350
|
"description": trans("agent.option.tools.remote.desc"),
|
|
351
|
-
"default":
|
|
351
|
+
"default": True,
|
|
352
352
|
},
|
|
353
353
|
}
|
|
354
354
|
},
|
|
@@ -371,13 +371,13 @@ class Agent(BaseAgent):
|
|
|
371
371
|
"type": "bool",
|
|
372
372
|
"label": trans("agent.option.tools.local"),
|
|
373
373
|
"description": trans("agent.option.tools.local.desc"),
|
|
374
|
-
"default":
|
|
374
|
+
"default": True,
|
|
375
375
|
},
|
|
376
376
|
"allow_remote_tools": {
|
|
377
377
|
"type": "bool",
|
|
378
378
|
"label": trans("agent.option.tools.remote"),
|
|
379
379
|
"description": trans("agent.option.tools.remote.desc"),
|
|
380
|
-
"default":
|
|
380
|
+
"default": True,
|
|
381
381
|
},
|
|
382
382
|
}
|
|
383
383
|
},
|
|
@@ -342,13 +342,13 @@ class Agent(BaseAgent):
|
|
|
342
342
|
"type": "bool",
|
|
343
343
|
"label": trans("agent.option.tools.local"),
|
|
344
344
|
"description": trans("agent.option.tools.local.desc"),
|
|
345
|
-
"default":
|
|
345
|
+
"default": True,
|
|
346
346
|
},
|
|
347
347
|
"allow_remote_tools": {
|
|
348
348
|
"type": "bool",
|
|
349
349
|
"label": trans("agent.option.tools.remote"),
|
|
350
350
|
"description": trans("agent.option.tools.remote.desc"),
|
|
351
|
-
"default":
|
|
351
|
+
"default": True,
|
|
352
352
|
},
|
|
353
353
|
}
|
|
354
354
|
},
|
|
@@ -371,13 +371,13 @@ class Agent(BaseAgent):
|
|
|
371
371
|
"type": "bool",
|
|
372
372
|
"label": trans("agent.option.tools.local"),
|
|
373
373
|
"description": trans("agent.option.tools.local.desc"),
|
|
374
|
-
"default":
|
|
374
|
+
"default": True,
|
|
375
375
|
},
|
|
376
376
|
"allow_remote_tools": {
|
|
377
377
|
"type": "bool",
|
|
378
378
|
"label": trans("agent.option.tools.remote"),
|
|
379
379
|
"description": trans("agent.option.tools.remote.desc"),
|
|
380
|
-
"default":
|
|
380
|
+
"default": True,
|
|
381
381
|
},
|
|
382
382
|
}
|
|
383
383
|
},
|
|
@@ -528,13 +528,13 @@ class Agent(BaseAgent):
|
|
|
528
528
|
"type": "bool",
|
|
529
529
|
"label": trans("agent.option.tools.local"),
|
|
530
530
|
"description": trans("agent.option.tools.local.desc"),
|
|
531
|
-
"default":
|
|
531
|
+
"default": True,
|
|
532
532
|
},
|
|
533
533
|
"allow_remote_tools": {
|
|
534
534
|
"type": "bool",
|
|
535
535
|
"label": trans("agent.option.tools.remote"),
|
|
536
536
|
"description": trans("agent.option.tools.remote.desc"),
|
|
537
|
-
"default":
|
|
537
|
+
"default": True,
|
|
538
538
|
},
|
|
539
539
|
}
|
|
540
540
|
},
|
|
@@ -557,13 +557,13 @@ class Agent(BaseAgent):
|
|
|
557
557
|
"type": "bool",
|
|
558
558
|
"label": trans("agent.option.tools.local"),
|
|
559
559
|
"description": trans("agent.option.tools.local.desc"),
|
|
560
|
-
"default":
|
|
560
|
+
"default": True,
|
|
561
561
|
},
|
|
562
562
|
"allow_remote_tools": {
|
|
563
563
|
"type": "bool",
|
|
564
564
|
"label": trans("agent.option.tools.remote"),
|
|
565
565
|
"description": trans("agent.option.tools.remote.desc"),
|
|
566
|
-
"default":
|
|
566
|
+
"default": True,
|
|
567
567
|
},
|
|
568
568
|
}
|
|
569
569
|
},
|
|
@@ -586,13 +586,13 @@ class Agent(BaseAgent):
|
|
|
586
586
|
"type": "bool",
|
|
587
587
|
"label": trans("agent.option.tools.local"),
|
|
588
588
|
"description": trans("agent.option.tools.local.desc"),
|
|
589
|
-
"default":
|
|
589
|
+
"default": True,
|
|
590
590
|
},
|
|
591
591
|
"allow_remote_tools": {
|
|
592
592
|
"type": "bool",
|
|
593
593
|
"label": trans("agent.option.tools.remote"),
|
|
594
594
|
"description": trans("agent.option.tools.remote.desc"),
|
|
595
|
-
"default":
|
|
595
|
+
"default": True,
|
|
596
596
|
},
|
|
597
597
|
}
|
|
598
598
|
},
|
|
File without changes
|
|
@@ -6,12 +6,13 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.09.
|
|
9
|
+
# Updated Date: 2025.09.27 20:25:00 #
|
|
10
10
|
# ================================================== #
|
|
11
|
-
|
|
11
|
+
import base64
|
|
12
12
|
import json
|
|
13
13
|
import re
|
|
14
|
-
|
|
14
|
+
import io
|
|
15
|
+
from typing import Dict, Any, Tuple, Optional, Callable
|
|
15
16
|
|
|
16
17
|
from agents import (
|
|
17
18
|
Agent as OpenAIAgent,
|
|
@@ -38,6 +39,16 @@ from pygpt_net.utils import trans
|
|
|
38
39
|
|
|
39
40
|
from ..base import BaseAgent
|
|
40
41
|
|
|
42
|
+
# OpenAI response event types (used by StreamHandler)
|
|
43
|
+
from openai.types.responses import (
|
|
44
|
+
ResponseTextDeltaEvent,
|
|
45
|
+
ResponseCreatedEvent,
|
|
46
|
+
ResponseCodeInterpreterCallCodeDeltaEvent,
|
|
47
|
+
ResponseOutputItemAddedEvent,
|
|
48
|
+
ResponseCompletedEvent,
|
|
49
|
+
ResponseOutputItemDoneEvent,
|
|
50
|
+
)
|
|
51
|
+
|
|
41
52
|
JSON_RE = re.compile(r"\{[\s\S]*\}$", re.MULTILINE)
|
|
42
53
|
|
|
43
54
|
SUPERVISOR_PROMPT = """
|
|
@@ -62,6 +73,200 @@ You are the “Worker”. You execute Supervisor instructions strictly, using yo
|
|
|
62
73
|
Respond in the user's language.
|
|
63
74
|
"""
|
|
64
75
|
|
|
76
|
+
|
|
77
|
+
class SupervisorStreamHandler(StreamHandler):
|
|
78
|
+
"""
|
|
79
|
+
Stream handler that filters JSON from Supervisor output during streaming.
|
|
80
|
+
- Pass-through normal text.
|
|
81
|
+
- Suppress raw JSON (both ```json fenced and bare {...}).
|
|
82
|
+
- When JSON finishes, parse and emit only the human-friendly text via `json_to_text`.
|
|
83
|
+
"""
|
|
84
|
+
def __init__(
|
|
85
|
+
self,
|
|
86
|
+
window,
|
|
87
|
+
bridge: ConnectionContext = None,
|
|
88
|
+
message: str = None,
|
|
89
|
+
json_to_text: Optional[Callable[[dict], str]] = None,
|
|
90
|
+
):
|
|
91
|
+
super().__init__(window, bridge, message)
|
|
92
|
+
self.json_to_text = json_to_text or (lambda d: json.dumps(d, ensure_ascii=False))
|
|
93
|
+
self._json_fenced = False
|
|
94
|
+
self._json_buf = io.StringIO()
|
|
95
|
+
self._json_in_braces = False
|
|
96
|
+
self._brace_depth = 0
|
|
97
|
+
self._in_string = False
|
|
98
|
+
self._escape = False
|
|
99
|
+
|
|
100
|
+
def _emit_text(self, ctx: CtxItem, text: str, flush: bool, buffer: bool):
|
|
101
|
+
if not text:
|
|
102
|
+
return
|
|
103
|
+
self._emit(ctx, text, flush, buffer)
|
|
104
|
+
|
|
105
|
+
def _flush_json(self, ctx: CtxItem, flush: bool, buffer: bool):
|
|
106
|
+
"""
|
|
107
|
+
Parse collected JSON and emit only formatted text; reset state.
|
|
108
|
+
"""
|
|
109
|
+
raw_json = self._json_buf.getvalue().strip()
|
|
110
|
+
self._json_buf = io.StringIO()
|
|
111
|
+
self._json_fenced = False
|
|
112
|
+
self._json_in_braces = False
|
|
113
|
+
self._brace_depth = 0
|
|
114
|
+
self._in_string = False
|
|
115
|
+
self._escape = False
|
|
116
|
+
|
|
117
|
+
if not raw_json:
|
|
118
|
+
return
|
|
119
|
+
try:
|
|
120
|
+
data = json.loads(raw_json)
|
|
121
|
+
out = self.json_to_text(data) or ""
|
|
122
|
+
except Exception:
|
|
123
|
+
# Fallback: if parsing failed, do not leak JSON; stay silent
|
|
124
|
+
out = ""
|
|
125
|
+
if out:
|
|
126
|
+
self._emit_text(ctx, out, flush, buffer)
|
|
127
|
+
|
|
128
|
+
def _handle_text_delta(self, s: str, ctx: CtxItem, flush: bool, buffer: bool):
|
|
129
|
+
"""
|
|
130
|
+
Filter JSON while streaming; emit only non-JSON text or parsed JSON text.
|
|
131
|
+
"""
|
|
132
|
+
i = 0
|
|
133
|
+
n = len(s)
|
|
134
|
+
while i < n:
|
|
135
|
+
# Detect fenced JSON start
|
|
136
|
+
if not self._json_fenced and not self._json_in_braces and s.startswith("```json", i):
|
|
137
|
+
# Emit any text before the fence
|
|
138
|
+
# (there shouldn't be in this branch because we check exact start, but keep safe)
|
|
139
|
+
i += len("```json")
|
|
140
|
+
self._json_fenced = True
|
|
141
|
+
# Skip possible newline after fence
|
|
142
|
+
if i < n and s[i] == '\n':
|
|
143
|
+
i += 1
|
|
144
|
+
continue
|
|
145
|
+
|
|
146
|
+
# Detect fenced JSON end
|
|
147
|
+
if self._json_fenced and s.startswith("```", i):
|
|
148
|
+
# Flush JSON collected so far
|
|
149
|
+
self._flush_json(ctx, flush, buffer)
|
|
150
|
+
i += len("```")
|
|
151
|
+
# Optional newline after closing fence
|
|
152
|
+
if i < n and s[i] == '\n':
|
|
153
|
+
i += 1
|
|
154
|
+
continue
|
|
155
|
+
|
|
156
|
+
# While inside fenced JSON -> buffer and continue
|
|
157
|
+
if self._json_fenced:
|
|
158
|
+
self._json_buf.write(s[i])
|
|
159
|
+
i += 1
|
|
160
|
+
continue
|
|
161
|
+
|
|
162
|
+
# Bare JSON detection (naive but effective for supervisor outputs)
|
|
163
|
+
if not self._json_in_braces and s[i] == "{":
|
|
164
|
+
self._json_in_braces = True
|
|
165
|
+
self._brace_depth = 1
|
|
166
|
+
self._in_string = False
|
|
167
|
+
self._escape = False
|
|
168
|
+
self._json_buf.write("{")
|
|
169
|
+
i += 1
|
|
170
|
+
continue
|
|
171
|
+
|
|
172
|
+
if self._json_in_braces:
|
|
173
|
+
ch = s[i]
|
|
174
|
+
# Basic JSON string/escape handling
|
|
175
|
+
if ch == '"' and not self._escape:
|
|
176
|
+
self._in_string = not self._in_string
|
|
177
|
+
if ch == "\\" and not self._escape:
|
|
178
|
+
self._escape = True
|
|
179
|
+
else:
|
|
180
|
+
self._escape = False
|
|
181
|
+
if not self._in_string:
|
|
182
|
+
if ch == "{":
|
|
183
|
+
self._brace_depth += 1
|
|
184
|
+
elif ch == "}":
|
|
185
|
+
self._brace_depth -= 1
|
|
186
|
+
self._json_buf.write(ch)
|
|
187
|
+
i += 1
|
|
188
|
+
if self._brace_depth == 0:
|
|
189
|
+
# JSON closed -> flush parsed text
|
|
190
|
+
self._flush_json(ctx, flush, buffer)
|
|
191
|
+
continue
|
|
192
|
+
|
|
193
|
+
# Normal text path
|
|
194
|
+
# Accumulate until potential fenced start to avoid splitting too often
|
|
195
|
+
next_fence = s.find("```json", i)
|
|
196
|
+
next_bare = s.find("{", i)
|
|
197
|
+
cut = n
|
|
198
|
+
candidates = [x for x in (next_fence, next_bare) if x != -1]
|
|
199
|
+
if candidates:
|
|
200
|
+
cut = min(candidates)
|
|
201
|
+
chunk = s[i:cut]
|
|
202
|
+
if chunk:
|
|
203
|
+
self._emit_text(ctx, chunk, flush, buffer)
|
|
204
|
+
i = cut if cut != n else n
|
|
205
|
+
|
|
206
|
+
def handle(
|
|
207
|
+
self,
|
|
208
|
+
event,
|
|
209
|
+
ctx: CtxItem,
|
|
210
|
+
flush: bool = True,
|
|
211
|
+
buffer: bool = True
|
|
212
|
+
) -> Tuple[str, str]:
|
|
213
|
+
"""
|
|
214
|
+
Override StreamHandler.handle to filter JSON in text deltas.
|
|
215
|
+
For non-text events, fallback to parent handler.
|
|
216
|
+
"""
|
|
217
|
+
# ReasoningItem path remains the same (parent prints to stdout), keep parent behavior.
|
|
218
|
+
|
|
219
|
+
if getattr(event, "type", None) == "raw_response_event":
|
|
220
|
+
data = event.data
|
|
221
|
+
|
|
222
|
+
if isinstance(data, ResponseCreatedEvent):
|
|
223
|
+
self.response_id = data.response.id
|
|
224
|
+
return self.buffer, self.response_id
|
|
225
|
+
|
|
226
|
+
if isinstance(data, ResponseTextDeltaEvent):
|
|
227
|
+
# Filter JSON while streaming
|
|
228
|
+
delta = data.delta or ""
|
|
229
|
+
# If a code_interpreter block was started previously, render fence first
|
|
230
|
+
if self.code_block:
|
|
231
|
+
self._emit_text(ctx, "\n```\n", flush, buffer)
|
|
232
|
+
self.code_block = False
|
|
233
|
+
self._handle_text_delta(delta, ctx, flush, buffer)
|
|
234
|
+
return self.buffer, self.response_id
|
|
235
|
+
|
|
236
|
+
if isinstance(data, ResponseOutputItemAddedEvent):
|
|
237
|
+
if data.item.type == "code_interpreter_call":
|
|
238
|
+
self.code_block = True
|
|
239
|
+
s = "\n\n**Code interpreter**\n```python\n"
|
|
240
|
+
self._emit_text(ctx, s, flush, buffer)
|
|
241
|
+
return self.buffer, self.response_id
|
|
242
|
+
|
|
243
|
+
if isinstance(data, ResponseOutputItemDoneEvent):
|
|
244
|
+
if data.item.type == "image_generation_call":
|
|
245
|
+
img_path = self.window.core.image.gen_unique_path(ctx)
|
|
246
|
+
image_base64 = data.item.result
|
|
247
|
+
image_bytes = base64.b64decode(image_base64)
|
|
248
|
+
with open(img_path, "wb") as f:
|
|
249
|
+
f.write(image_bytes)
|
|
250
|
+
self.window.core.debug.info("[chat] Image generation call found")
|
|
251
|
+
ctx.images = [img_path]
|
|
252
|
+
return self.buffer, self.response_id
|
|
253
|
+
|
|
254
|
+
if isinstance(data, ResponseCodeInterpreterCallCodeDeltaEvent):
|
|
255
|
+
self._emit_text(ctx, data.delta or "", flush, buffer)
|
|
256
|
+
return self.buffer, self.response_id
|
|
257
|
+
|
|
258
|
+
if isinstance(data, ResponseCompletedEvent):
|
|
259
|
+
# If we are still buffering JSON, flush it now (emit parsed text only)
|
|
260
|
+
if self._json_fenced or self._json_in_braces:
|
|
261
|
+
self._flush_json(ctx, flush, buffer)
|
|
262
|
+
# Mark finished so parent downloader logic (files) may trigger if needed
|
|
263
|
+
self.finished = True
|
|
264
|
+
return self.buffer, self.response_id
|
|
265
|
+
|
|
266
|
+
# Handoff / other events: fallback to parent, but it won't print JSON since we already filtered in text deltas
|
|
267
|
+
return super().handle(event, ctx, flush, buffer)
|
|
268
|
+
|
|
269
|
+
|
|
65
270
|
class Agent(BaseAgent):
|
|
66
271
|
|
|
67
272
|
def __init__(self, *args, **kwargs):
|
|
@@ -69,7 +274,7 @@ class Agent(BaseAgent):
|
|
|
69
274
|
self.id = "openai_agent_supervisor"
|
|
70
275
|
self.type = AGENT_TYPE_OPENAI
|
|
71
276
|
self.mode = AGENT_MODE_OPENAI
|
|
72
|
-
self.name = "Supervisor
|
|
277
|
+
self.name = "Supervisor" # use clean name in UI headers
|
|
73
278
|
|
|
74
279
|
def get_agent(self, window, kwargs: Dict[str, Any]):
|
|
75
280
|
"""
|
|
@@ -81,8 +286,11 @@ class Agent(BaseAgent):
|
|
|
81
286
|
"""
|
|
82
287
|
context = kwargs.get("context", BridgeContext())
|
|
83
288
|
preset = context.preset
|
|
84
|
-
agent_name = preset.name if preset else "Supervisor"
|
|
85
289
|
model = kwargs.get("model", ModelItem())
|
|
290
|
+
|
|
291
|
+
# Enforce a stable, clean display name for the Supervisor regardless of preset name.
|
|
292
|
+
agent_name = "Supervisor" # hard-coded UI name
|
|
293
|
+
|
|
86
294
|
worker_tool = kwargs.get("worker_tool", None)
|
|
87
295
|
kwargs = {
|
|
88
296
|
"name": agent_name,
|
|
@@ -184,65 +392,117 @@ class Agent(BaseAgent):
|
|
|
184
392
|
worker_session_id = f"worker_session_{ctx.meta.id}" if ctx.meta else "worker_session_default"
|
|
185
393
|
worker_session = SQLiteSession(worker_session_id)
|
|
186
394
|
|
|
187
|
-
handler
|
|
188
|
-
|
|
395
|
+
# Use JSON-filtering handler for Supervisor streaming
|
|
396
|
+
handler = SupervisorStreamHandler(
|
|
397
|
+
window,
|
|
398
|
+
bridge,
|
|
399
|
+
json_to_text=self.response_from_json,
|
|
400
|
+
)
|
|
401
|
+
item_ctx = ctx # will reassign on splits
|
|
402
|
+
|
|
403
|
+
supervisor_display_name = None # set after agent is created
|
|
189
404
|
|
|
190
405
|
# tool to run Worker
|
|
191
406
|
@function_tool(name_override="run_worker")
|
|
192
|
-
async def run_worker(
|
|
407
|
+
async def run_worker(fn_ctx: RunContextWrapper[Any], instruction: str) -> str:
|
|
193
408
|
"""
|
|
194
409
|
Run the Worker with an instruction from the Supervisor and return its output.
|
|
195
410
|
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
411
|
+
- Appends the instruction to the current Supervisor block.
|
|
412
|
+
- Finalizes the Supervisor block and opens a new Worker block.
|
|
413
|
+
- Runs the Worker and streams its result into the Worker block.
|
|
414
|
+
- Finalizes the Worker block, then opens a fresh block for the Supervisor to continue.
|
|
199
415
|
"""
|
|
200
|
-
item_ctx
|
|
416
|
+
nonlocal item_ctx, supervisor_display_name
|
|
417
|
+
|
|
418
|
+
info = f"\n\n**{trans('agent.name.supervisor')} → {trans('agent.name.worker')}:** {instruction}\n\n"
|
|
419
|
+
item_ctx.stream = info
|
|
201
420
|
bridge.on_step(item_ctx, True)
|
|
202
|
-
handler.
|
|
421
|
+
handler.to_buffer(info)
|
|
422
|
+
|
|
423
|
+
if use_partial_ctx:
|
|
424
|
+
item_ctx = bridge.on_next_ctx(
|
|
425
|
+
ctx=item_ctx,
|
|
426
|
+
input="",
|
|
427
|
+
output=handler.buffer, # finalize current Supervisor content
|
|
428
|
+
response_id=handler.response_id or "",
|
|
429
|
+
stream=True,
|
|
430
|
+
)
|
|
431
|
+
handler.new() # reset handler buffer for next block
|
|
432
|
+
|
|
433
|
+
try:
|
|
434
|
+
item_ctx.set_agent_name(worker.name)
|
|
435
|
+
except Exception:
|
|
436
|
+
pass
|
|
437
|
+
|
|
203
438
|
result = await Runner.run(
|
|
204
439
|
worker,
|
|
205
440
|
input=instruction,
|
|
206
441
|
session=worker_session,
|
|
207
442
|
max_turns=max_steps,
|
|
208
443
|
)
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
444
|
+
|
|
445
|
+
worker_text = str(result.final_output or "")
|
|
446
|
+
if worker_text:
|
|
447
|
+
item_ctx.stream = f"{worker_text}\n"
|
|
448
|
+
bridge.on_step(item_ctx, True)
|
|
449
|
+
|
|
450
|
+
if use_partial_ctx:
|
|
451
|
+
item_ctx = bridge.on_next_ctx(
|
|
452
|
+
ctx=item_ctx,
|
|
453
|
+
input="",
|
|
454
|
+
output=worker_text, # finalize worker output
|
|
455
|
+
response_id="", # worker has no OpenAI response id here
|
|
456
|
+
stream=True,
|
|
457
|
+
)
|
|
458
|
+
try:
|
|
459
|
+
if supervisor_display_name:
|
|
460
|
+
item_ctx.set_agent_name(supervisor_display_name)
|
|
461
|
+
except Exception:
|
|
462
|
+
pass
|
|
463
|
+
|
|
464
|
+
return worker_text
|
|
212
465
|
|
|
213
466
|
agent_kwargs["worker_tool"] = run_worker
|
|
214
467
|
agent = self.get_agent(window, agent_kwargs)
|
|
468
|
+
supervisor_display_name = agent.name # "Supervisor"
|
|
215
469
|
|
|
216
470
|
if not stream:
|
|
217
471
|
item_ctx.set_agent_name(agent.name)
|
|
218
|
-
result = await Runner.run(
|
|
219
|
-
|
|
220
|
-
**kwargs
|
|
221
|
-
)
|
|
222
|
-
final_output, last_response_id = window.core.api.openai.responses.unpack_agent_response(result, ctx)
|
|
472
|
+
result = await Runner.run(agent, **kwargs)
|
|
473
|
+
final_output, last_response_id = window.core.api.openai.responses.unpack_agent_response(result, item_ctx)
|
|
223
474
|
response_id = result.last_response_id
|
|
224
475
|
if verbose:
|
|
225
476
|
print("Final response:", result)
|
|
226
477
|
else:
|
|
227
478
|
item_ctx.set_agent_name(agent.name)
|
|
228
|
-
result = Runner.run_streamed(
|
|
229
|
-
agent,
|
|
230
|
-
**kwargs
|
|
231
|
-
)
|
|
479
|
+
result = Runner.run_streamed(agent, **kwargs)
|
|
232
480
|
async for event in result.stream_events():
|
|
233
481
|
if bridge.stopped():
|
|
234
482
|
result.cancel()
|
|
235
|
-
bridge.on_stop(
|
|
483
|
+
bridge.on_stop(item_ctx)
|
|
236
484
|
break
|
|
237
|
-
|
|
485
|
+
# Write into current item_ctx (it changes when we split)
|
|
486
|
+
final_output, response_id = handler.handle(event, item_ctx)
|
|
238
487
|
|
|
239
|
-
# extract final output from JSON
|
|
488
|
+
# extract final output from JSON (Supervisor's last block)
|
|
240
489
|
if final_output:
|
|
241
490
|
final_output = self.extract_final_response(final_output)
|
|
242
491
|
if verbose:
|
|
243
492
|
print("Final output after extraction:", final_output)
|
|
244
493
|
|
|
245
|
-
|
|
494
|
+
# Properly finalize last block
|
|
495
|
+
if use_partial_ctx:
|
|
496
|
+
item_ctx = bridge.on_next_ctx(
|
|
497
|
+
ctx=item_ctx,
|
|
498
|
+
input=final_output or "",
|
|
499
|
+
output=final_output or "",
|
|
500
|
+
response_id=response_id or "",
|
|
501
|
+
finish=True,
|
|
502
|
+
stream=stream,
|
|
503
|
+
)
|
|
504
|
+
|
|
505
|
+
return item_ctx, final_output, response_id
|
|
246
506
|
|
|
247
507
|
def extract_final_response(self, output: str) -> str:
|
|
248
508
|
"""
|
|
@@ -257,7 +517,6 @@ class Agent(BaseAgent):
|
|
|
257
517
|
fence = re.search(r"```json\s*([\s\S]*?)\s*```", output, re.IGNORECASE)
|
|
258
518
|
if fence:
|
|
259
519
|
try:
|
|
260
|
-
# Try to parse the fenced JSON
|
|
261
520
|
json_text = fence.group(1).strip()
|
|
262
521
|
json_response = json.loads(json_text)
|
|
263
522
|
return self.response_from_json(json_response)
|
|
@@ -267,7 +526,6 @@ class Agent(BaseAgent):
|
|
|
267
526
|
tail = JSON_RE.findall(output)
|
|
268
527
|
for candidate in tail[::-1]:
|
|
269
528
|
try:
|
|
270
|
-
# Try to parse the JSON from the tail
|
|
271
529
|
json_response = json.loads(candidate)
|
|
272
530
|
return self.response_from_json(json_response)
|
|
273
531
|
except Exception:
|
|
@@ -275,7 +533,6 @@ class Agent(BaseAgent):
|
|
|
275
533
|
|
|
276
534
|
if output.startswith("{") and output.endswith("}"):
|
|
277
535
|
try:
|
|
278
|
-
# Try to parse the entire output as JSON
|
|
279
536
|
response = json.loads(output)
|
|
280
537
|
return self.response_from_json(response)
|
|
281
538
|
except Exception as e:
|
|
@@ -350,8 +607,4 @@ class Agent(BaseAgent):
|
|
|
350
607
|
},
|
|
351
608
|
}
|
|
352
609
|
},
|
|
353
|
-
}
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
610
|
+
}
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
pygpt_net/provider/llms/base.py
CHANGED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
pygpt_net/provider/llms/x_ai.py
CHANGED
|
File without changes
|
|
@@ -6,11 +6,11 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.09.
|
|
9
|
+
# Updated Date: 2025.09.27 09:30:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import sys
|
|
13
|
-
from PySide6.QtCore import Qt
|
|
13
|
+
from PySide6.QtCore import Qt, QTimer
|
|
14
14
|
from PySide6.QtWidgets import QDialog, QLabel, QHBoxLayout, QVBoxLayout, QPushButton
|
|
15
15
|
|
|
16
16
|
from pygpt_net.utils import trans
|
|
@@ -44,11 +44,7 @@ class ConfirmDialog(QDialog):
|
|
|
44
44
|
|
|
45
45
|
# Always make the neutral action (No/Cancel) the default/active one.
|
|
46
46
|
# This ensures Enter triggers the safe option by default.
|
|
47
|
-
self.
|
|
48
|
-
self.window.ui.nodes['dialog.confirm.btn.no'].setDefault(True)
|
|
49
|
-
self.window.ui.nodes['dialog.confirm.btn.no'].setFocus()
|
|
50
|
-
self.window.ui.nodes['dialog.confirm.btn.yes'].setAutoDefault(False)
|
|
51
|
-
self.window.ui.nodes['dialog.confirm.btn.yes'].setDefault(False)
|
|
47
|
+
self._apply_neutral_default()
|
|
52
48
|
|
|
53
49
|
# Bottom button row with platform-specific ordering
|
|
54
50
|
# Windows: affirmative on the left, neutral on the right
|
|
@@ -85,4 +81,34 @@ class ConfirmDialog(QDialog):
|
|
|
85
81
|
:param event: close event
|
|
86
82
|
"""
|
|
87
83
|
self.window.controller.dialogs.confirm.dismiss(self.type, self.id)
|
|
88
|
-
super(ConfirmDialog, self).closeEvent(event)
|
|
84
|
+
super(ConfirmDialog, self).closeEvent(event)
|
|
85
|
+
|
|
86
|
+
def showEvent(self, event):
|
|
87
|
+
"""
|
|
88
|
+
Ensure neutral button is default/active on every show.
|
|
89
|
+
|
|
90
|
+
Using a single-shot timer defers focus/default restoration until
|
|
91
|
+
after the dialog becomes visible, which prevents focus being stolen
|
|
92
|
+
by the window manager.
|
|
93
|
+
"""
|
|
94
|
+
super(ConfirmDialog, self).showEvent(event)
|
|
95
|
+
QTimer.singleShot(0, self._apply_neutral_default)
|
|
96
|
+
|
|
97
|
+
def _apply_neutral_default(self):
|
|
98
|
+
"""
|
|
99
|
+
Set the neutral action (No/Cancel) as default and active.
|
|
100
|
+
Always called on construction and each time the dialog is shown.
|
|
101
|
+
"""
|
|
102
|
+
btn_no = self.window.ui.nodes.get('dialog.confirm.btn.no')
|
|
103
|
+
btn_yes = self.window.ui.nodes.get('dialog.confirm.btn.yes')
|
|
104
|
+
if not btn_no or not btn_yes:
|
|
105
|
+
return
|
|
106
|
+
|
|
107
|
+
# Make sure affirmative button cannot become default by leftover state
|
|
108
|
+
btn_yes.setAutoDefault(False)
|
|
109
|
+
btn_yes.setDefault(False)
|
|
110
|
+
|
|
111
|
+
# Make neutral (No/Cancel) the active default and take focus
|
|
112
|
+
btn_no.setAutoDefault(True)
|
|
113
|
+
btn_no.setDefault(True)
|
|
114
|
+
btn_no.setFocus()
|
|
@@ -125,7 +125,7 @@ class ChatInput(QTextEdit):
|
|
|
125
125
|
self.textChanged.connect(self._on_text_changed_tokens)
|
|
126
126
|
|
|
127
127
|
# Paste/input safety limits
|
|
128
|
-
self._paste_max_chars =
|
|
128
|
+
self._paste_max_chars = 1000000000 # hard cap to prevent pathological pastes from freezing/crashing
|
|
129
129
|
|
|
130
130
|
def _on_text_changed_tokens(self):
|
|
131
131
|
"""Schedule token count update with debounce."""
|