pygpt-net 2.5.95__py3-none-any.whl → 2.5.97__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +8 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/controller/chat/output.py +4 -3
- pygpt_net/controller/chat/response.py +1 -1
- pygpt_net/controller/chat/stream.py +171 -207
- pygpt_net/controller/ctx/ctx.py +5 -5
- pygpt_net/controller/kernel/kernel.py +200 -223
- pygpt_net/core/bridge/worker.py +7 -11
- pygpt_net/core/ctx/bag.py +2 -4
- pygpt_net/core/ctx/container.py +3 -4
- pygpt_net/core/ctx/ctx.py +1 -0
- pygpt_net/core/ctx/output.py +137 -93
- pygpt_net/core/dispatcher/dispatcher.py +60 -80
- pygpt_net/core/idx/chat.py +0 -21
- pygpt_net/core/render/web/body.py +873 -853
- pygpt_net/core/render/web/helpers.py +99 -59
- pygpt_net/core/render/web/parser.py +144 -126
- pygpt_net/core/render/web/renderer.py +354 -296
- pygpt_net/data/config/config.json +4 -4
- pygpt_net/data/config/models.json +3 -3
- pygpt_net/item/ctx.py +11 -1
- pygpt_net/launcher.py +4 -1
- pygpt_net/provider/gpt/__init__.py +0 -3
- pygpt_net/provider/gpt/worker/assistants.py +1 -2
- pygpt_net/ui/layout/chat/input.py +4 -2
- pygpt_net/ui/layout/ctx/ctx_list.py +106 -104
- pygpt_net/ui/layout/toolbox/presets.py +4 -0
- pygpt_net/ui/main.py +6 -1
- pygpt_net/ui/widget/textarea/web.py +162 -164
- pygpt_net/utils.py +48 -2
- {pygpt_net-2.5.95.dist-info → pygpt_net-2.5.97.dist-info}/METADATA +10 -2
- {pygpt_net-2.5.95.dist-info → pygpt_net-2.5.97.dist-info}/RECORD +35 -35
- {pygpt_net-2.5.95.dist-info → pygpt_net-2.5.97.dist-info}/LICENSE +0 -0
- {pygpt_net-2.5.95.dist-info → pygpt_net-2.5.97.dist-info}/WHEEL +0 -0
- {pygpt_net-2.5.95.dist-info → pygpt_net-2.5.97.dist-info}/entry_points.txt +0 -0
|
@@ -6,14 +6,13 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.11 00:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import base64
|
|
13
|
-
import json
|
|
14
13
|
from typing import Optional
|
|
15
14
|
|
|
16
|
-
from PySide6.QtCore import QObject, Signal, Slot, QRunnable
|
|
15
|
+
from PySide6.QtCore import QObject, Signal, Slot, QRunnable, QMetaObject, Qt
|
|
17
16
|
|
|
18
17
|
from pygpt_net.core.bridge import BridgeContext
|
|
19
18
|
from pygpt_net.core.events import RenderEvent
|
|
@@ -21,82 +20,95 @@ from pygpt_net.core.types import MODE_ASSISTANT
|
|
|
21
20
|
from pygpt_net.core.text.utils import has_unclosed_code_tag
|
|
22
21
|
from pygpt_net.item.ctx import CtxItem
|
|
23
22
|
|
|
24
|
-
|
|
25
|
-
|
|
23
|
+
class WorkerSignals(QObject):
|
|
24
|
+
"""
|
|
25
|
+
Defines the signals available from a running worker thread.
|
|
26
|
+
- `finished`: No data
|
|
27
|
+
- `errorOccurred`: Exception
|
|
28
|
+
- `eventReady`: RenderEvent
|
|
29
|
+
"""
|
|
26
30
|
end = Signal(object)
|
|
27
31
|
errorOccurred = Signal(Exception)
|
|
28
32
|
eventReady = Signal(object)
|
|
29
33
|
|
|
34
|
+
|
|
35
|
+
class StreamWorker(QRunnable):
|
|
30
36
|
def __init__(self, ctx: CtxItem, window, parent=None):
|
|
31
|
-
QObject.__init__(self)
|
|
32
37
|
QRunnable.__init__(self)
|
|
38
|
+
self.signals = WorkerSignals()
|
|
33
39
|
self.ctx = ctx
|
|
34
40
|
self.window = window
|
|
35
41
|
|
|
36
42
|
@Slot()
|
|
37
43
|
def run(self):
|
|
38
|
-
|
|
44
|
+
ctx = self.ctx
|
|
45
|
+
win = self.window
|
|
46
|
+
core = win.core
|
|
47
|
+
ctrl = win.controller
|
|
48
|
+
|
|
49
|
+
emit_event = self.signals.eventReady.emit
|
|
50
|
+
emit_error = self.signals.errorOccurred.emit
|
|
51
|
+
emit_end = self.signals.end.emit
|
|
52
|
+
|
|
53
|
+
output_parts = []
|
|
39
54
|
output_tokens = 0
|
|
40
55
|
begin = True
|
|
41
56
|
error = None
|
|
42
57
|
fn_args_buffers = {}
|
|
43
58
|
citations = []
|
|
44
59
|
files = []
|
|
45
|
-
img_path =
|
|
60
|
+
img_path = core.image.gen_unique_path(ctx)
|
|
46
61
|
is_image = False
|
|
47
62
|
is_code = False
|
|
48
63
|
force_func_call = False
|
|
49
64
|
stopped = False
|
|
50
65
|
chunk_type = "raw"
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
"meta":
|
|
56
|
-
"ctx":
|
|
66
|
+
generator = ctx.stream
|
|
67
|
+
ctx.stream = None
|
|
68
|
+
|
|
69
|
+
base_data = {
|
|
70
|
+
"meta": ctx.meta,
|
|
71
|
+
"ctx": ctx,
|
|
57
72
|
}
|
|
58
|
-
|
|
59
|
-
self.eventReady.emit(event)
|
|
73
|
+
emit_event(RenderEvent(RenderEvent.STREAM_BEGIN, base_data))
|
|
60
74
|
|
|
75
|
+
tool_calls = []
|
|
61
76
|
try:
|
|
62
77
|
if generator is not None:
|
|
63
|
-
tool_calls = []
|
|
64
78
|
for chunk in generator:
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
self.ctx.msg_id = None # reset message ID
|
|
79
|
+
if ctrl.kernel.stopped():
|
|
80
|
+
ctx.msg_id = None
|
|
68
81
|
stopped = True
|
|
69
82
|
break
|
|
83
|
+
|
|
70
84
|
if error is not None:
|
|
71
|
-
|
|
85
|
+
ctx.msg_id = None
|
|
72
86
|
stopped = True
|
|
73
87
|
break
|
|
74
88
|
|
|
75
89
|
etype = None
|
|
76
90
|
response = None
|
|
77
91
|
|
|
78
|
-
if
|
|
79
|
-
if hasattr(chunk, 'type'):
|
|
92
|
+
if ctx.use_responses_api:
|
|
93
|
+
if hasattr(chunk, 'type'):
|
|
80
94
|
etype = chunk.type
|
|
81
|
-
chunk_type = "api_chat_responses"
|
|
95
|
+
chunk_type = "api_chat_responses"
|
|
82
96
|
else:
|
|
83
97
|
continue
|
|
84
98
|
else:
|
|
85
99
|
if (hasattr(chunk, 'choices')
|
|
86
|
-
and chunk.choices
|
|
100
|
+
and chunk.choices
|
|
87
101
|
and hasattr(chunk.choices[0], 'delta')
|
|
88
102
|
and chunk.choices[0].delta is not None):
|
|
89
|
-
chunk_type = "api_chat"
|
|
103
|
+
chunk_type = "api_chat"
|
|
90
104
|
elif (hasattr(chunk, 'choices')
|
|
91
|
-
and chunk.choices
|
|
105
|
+
and chunk.choices
|
|
92
106
|
and hasattr(chunk.choices[0], 'text')
|
|
93
107
|
and chunk.choices[0].text is not None):
|
|
94
108
|
chunk_type = "api_completion"
|
|
95
|
-
elif
|
|
96
|
-
and chunk.content is not None):
|
|
109
|
+
elif hasattr(chunk, 'content') and chunk.content is not None:
|
|
97
110
|
chunk_type = "langchain_chat"
|
|
98
|
-
elif
|
|
99
|
-
and chunk.delta is not None):
|
|
111
|
+
elif hasattr(chunk, 'delta') and chunk.delta is not None:
|
|
100
112
|
chunk_type = "llama_chat"
|
|
101
113
|
else:
|
|
102
114
|
chunk_type = "raw"
|
|
@@ -104,16 +116,15 @@ class StreamWorker(QObject, QRunnable):
|
|
|
104
116
|
# OpenAI chat completion
|
|
105
117
|
if chunk_type == "api_chat":
|
|
106
118
|
citations = None
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
response =
|
|
113
|
-
|
|
114
|
-
if
|
|
115
|
-
|
|
116
|
-
for tool_chunk in tool_chunks:
|
|
119
|
+
delta = chunk.choices[0].delta
|
|
120
|
+
if delta and delta.content is not None:
|
|
121
|
+
if citations is None and hasattr(chunk, 'citations') and chunk.citations is not None:
|
|
122
|
+
citations = chunk.citations
|
|
123
|
+
ctx.urls = citations
|
|
124
|
+
response = delta.content
|
|
125
|
+
|
|
126
|
+
if delta and delta.tool_calls:
|
|
127
|
+
for tool_chunk in delta.tool_calls:
|
|
117
128
|
if tool_chunk.index is None:
|
|
118
129
|
tool_chunk.index = 0
|
|
119
130
|
if len(tool_calls) <= tool_chunk.index:
|
|
@@ -121,30 +132,23 @@ class StreamWorker(QObject, QRunnable):
|
|
|
121
132
|
{
|
|
122
133
|
"id": "",
|
|
123
134
|
"type": "function",
|
|
124
|
-
"function": {
|
|
125
|
-
"name": "",
|
|
126
|
-
"arguments": ""
|
|
127
|
-
}
|
|
135
|
+
"function": {"name": "", "arguments": ""}
|
|
128
136
|
}
|
|
129
137
|
)
|
|
130
138
|
tool_call = tool_calls[tool_chunk.index]
|
|
131
|
-
if tool_chunk
|
|
139
|
+
if getattr(tool_chunk, "id", None):
|
|
132
140
|
tool_call["id"] += tool_chunk.id
|
|
133
|
-
if tool_chunk.function
|
|
141
|
+
if getattr(tool_chunk.function, "name", None):
|
|
134
142
|
tool_call["function"]["name"] += tool_chunk.function.name
|
|
135
|
-
if tool_chunk.function
|
|
143
|
+
if getattr(tool_chunk.function, "arguments", None):
|
|
136
144
|
tool_call["function"]["arguments"] += tool_chunk.function.arguments
|
|
137
145
|
|
|
138
146
|
# OpenAI Responses API
|
|
139
147
|
elif chunk_type == "api_chat_responses":
|
|
140
148
|
if etype == "response.completed":
|
|
141
|
-
# MCP tools
|
|
142
149
|
for item in chunk.response.output:
|
|
143
|
-
# MCP: list tools
|
|
144
150
|
if item.type == "mcp_list_tools":
|
|
145
|
-
|
|
146
|
-
self.window.core.gpt.responses.mcp_tools = tools # store MCP tools for later use
|
|
147
|
-
# MCP: tool call
|
|
151
|
+
core.gpt.responses.mcp_tools = item.tools
|
|
148
152
|
elif item.type == "mcp_call":
|
|
149
153
|
call = {
|
|
150
154
|
"id": item.id,
|
|
@@ -160,14 +164,10 @@ class StreamWorker(QObject, QRunnable):
|
|
|
160
164
|
"id": item.id,
|
|
161
165
|
"call_id": "",
|
|
162
166
|
"type": "function",
|
|
163
|
-
"function": {
|
|
164
|
-
"name": item.name,
|
|
165
|
-
"arguments": item.arguments
|
|
166
|
-
}
|
|
167
|
+
"function": {"name": item.name, "arguments": item.arguments}
|
|
167
168
|
})
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
# MCP: approval request
|
|
169
|
+
ctx.extra["mcp_call"] = call
|
|
170
|
+
core.ctx.update_item(ctx)
|
|
171
171
|
elif item.type == "mcp_approval_request":
|
|
172
172
|
call = {
|
|
173
173
|
"id": item.id,
|
|
@@ -176,61 +176,56 @@ class StreamWorker(QObject, QRunnable):
|
|
|
176
176
|
"name": item.name,
|
|
177
177
|
"server_label": item.server_label,
|
|
178
178
|
}
|
|
179
|
-
|
|
180
|
-
|
|
179
|
+
ctx.extra["mcp_approval_request"] = call
|
|
180
|
+
core.ctx.update_item(ctx)
|
|
181
181
|
|
|
182
|
-
# text chunk
|
|
183
182
|
elif etype == "response.output_text.delta":
|
|
184
183
|
response = chunk.delta
|
|
185
184
|
|
|
186
|
-
#
|
|
185
|
+
# function_call
|
|
187
186
|
elif etype == "response.output_item.added" and chunk.item.type == "function_call":
|
|
188
187
|
tool_calls.append({
|
|
189
188
|
"id": chunk.item.id,
|
|
190
189
|
"call_id": chunk.item.call_id,
|
|
191
190
|
"type": "function",
|
|
192
|
-
"function": {
|
|
193
|
-
"name": chunk.item.name,
|
|
194
|
-
"arguments": ""
|
|
195
|
-
}
|
|
191
|
+
"function": {"name": chunk.item.name, "arguments": ""}
|
|
196
192
|
})
|
|
197
193
|
fn_args_buffers[chunk.item.id] = ""
|
|
198
194
|
elif etype == "response.function_call_arguments.delta":
|
|
199
195
|
fn_args_buffers[chunk.item_id] += chunk.delta
|
|
200
196
|
elif etype == "response.function_call_arguments.done":
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
197
|
+
buf = fn_args_buffers.pop(chunk.item_id, None)
|
|
198
|
+
if buf is not None:
|
|
199
|
+
for tc in tool_calls:
|
|
200
|
+
if tc["id"] == chunk.item_id:
|
|
201
|
+
tc["function"]["arguments"] = buf
|
|
202
|
+
break
|
|
203
|
+
|
|
204
|
+
# annotations
|
|
208
205
|
elif etype == "response.output_text.annotation.added":
|
|
209
|
-
|
|
206
|
+
ann = chunk.annotation
|
|
207
|
+
if ann['type'] == "url_citation":
|
|
210
208
|
if citations is None:
|
|
211
209
|
citations = []
|
|
212
|
-
url_citation =
|
|
210
|
+
url_citation = ann['url']
|
|
213
211
|
citations.append(url_citation)
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
container_id = chunk.annotation['container_id']
|
|
217
|
-
file_id = chunk.annotation['file_id']
|
|
212
|
+
ctx.urls = citations
|
|
213
|
+
elif ann['type'] == "container_file_citation":
|
|
218
214
|
files.append({
|
|
219
|
-
"container_id": container_id,
|
|
220
|
-
"file_id": file_id,
|
|
215
|
+
"container_id": ann['container_id'],
|
|
216
|
+
"file_id": ann['file_id'],
|
|
221
217
|
})
|
|
222
218
|
|
|
223
|
-
#
|
|
219
|
+
# computer use
|
|
224
220
|
elif etype == "response.reasoning_summary_text.delta":
|
|
225
221
|
response = chunk.delta
|
|
226
222
|
|
|
227
223
|
elif etype == "response.output_item.done":
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
224
|
+
tool_calls, has_calls = core.gpt.computer.handle_stream_chunk(ctx, chunk, tool_calls)
|
|
225
|
+
if has_calls:
|
|
226
|
+
force_func_call = True
|
|
231
227
|
|
|
232
|
-
|
|
233
|
-
# ---------- code interpreter ----------
|
|
228
|
+
# code interpreter
|
|
234
229
|
elif etype == "response.code_interpreter_call_code.delta":
|
|
235
230
|
if not is_code:
|
|
236
231
|
response = "\n\n**Code interpreter**\n```python\n" + chunk.delta
|
|
@@ -240,27 +235,29 @@ class StreamWorker(QObject, QRunnable):
|
|
|
240
235
|
elif etype == "response.code_interpreter_call_code.done":
|
|
241
236
|
response = "\n\n```\n-----------\n"
|
|
242
237
|
|
|
243
|
-
#
|
|
238
|
+
# image gen
|
|
244
239
|
elif etype == "response.image_generation_call.partial_image":
|
|
245
240
|
image_base64 = chunk.partial_image_b64
|
|
246
241
|
image_bytes = base64.b64decode(image_base64)
|
|
242
|
+
# prosty i bezpieczny overwrite (jak w oryginale)
|
|
247
243
|
with open(img_path, "wb") as f:
|
|
248
244
|
f.write(image_bytes)
|
|
249
245
|
is_image = True
|
|
250
246
|
|
|
251
|
-
#
|
|
247
|
+
# response ID
|
|
252
248
|
elif etype == "response.created":
|
|
253
|
-
|
|
254
|
-
|
|
249
|
+
ctx.msg_id = str(chunk.response.id)
|
|
250
|
+
core.ctx.update_item(ctx)
|
|
255
251
|
|
|
256
|
-
#
|
|
252
|
+
# end/error etype – nic nie robimy
|
|
257
253
|
elif etype in {"response.done", "response.failed", "error"}:
|
|
258
254
|
pass
|
|
259
255
|
|
|
260
256
|
# OpenAI completion
|
|
261
257
|
elif chunk_type == "api_completion":
|
|
262
|
-
|
|
263
|
-
|
|
258
|
+
choice0 = chunk.choices[0]
|
|
259
|
+
if choice0.text is not None:
|
|
260
|
+
response = choice0.text
|
|
264
261
|
|
|
265
262
|
# langchain chat
|
|
266
263
|
elif chunk_type == "langchain_chat":
|
|
@@ -271,121 +268,115 @@ class StreamWorker(QObject, QRunnable):
|
|
|
271
268
|
elif chunk_type == "llama_chat":
|
|
272
269
|
if chunk.delta is not None:
|
|
273
270
|
response = str(chunk.delta)
|
|
274
|
-
tool_chunks = chunk.message
|
|
271
|
+
tool_chunks = getattr(chunk.message, "additional_kwargs", {}).get("tool_calls", [])
|
|
275
272
|
if tool_chunks:
|
|
276
273
|
for tool_chunk in tool_chunks:
|
|
277
|
-
id_val = None
|
|
278
|
-
name = None
|
|
279
|
-
args =
|
|
280
|
-
if
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
args = tool_chunk.function.arguments
|
|
284
|
-
if hasattr(tool_chunk, 'call_id'):
|
|
285
|
-
id_val = tool_chunk.call_id
|
|
286
|
-
elif hasattr(tool_chunk, 'id'):
|
|
287
|
-
id_val = tool_chunk.id
|
|
288
|
-
if hasattr(tool_chunk, 'name'):
|
|
289
|
-
name = tool_chunk.name
|
|
290
|
-
elif hasattr(tool_chunk, 'function') and hasattr(tool_chunk.function, 'name'):
|
|
291
|
-
name = tool_chunk.function.name
|
|
274
|
+
id_val = getattr(tool_chunk, "call_id", None) or getattr(tool_chunk, "id", None)
|
|
275
|
+
name = getattr(tool_chunk, "name", None) or getattr(getattr(tool_chunk, "function", None), "name", None)
|
|
276
|
+
args = getattr(tool_chunk, "arguments", None)
|
|
277
|
+
if args is None:
|
|
278
|
+
f = getattr(tool_chunk, "function", None)
|
|
279
|
+
args = getattr(f, "arguments", None) if f else None
|
|
292
280
|
if id_val:
|
|
293
281
|
if not args:
|
|
294
|
-
args = "{}"
|
|
282
|
+
args = "{}"
|
|
295
283
|
tool_call = {
|
|
296
284
|
"id": id_val,
|
|
297
285
|
"type": "function",
|
|
298
|
-
"function": {
|
|
299
|
-
"name": name,
|
|
300
|
-
"arguments": args
|
|
301
|
-
}
|
|
286
|
+
"function": {"name": name, "arguments": args}
|
|
302
287
|
}
|
|
303
288
|
tool_calls.clear()
|
|
304
289
|
tool_calls.append(tool_call)
|
|
305
290
|
|
|
306
|
-
# raw text
|
|
291
|
+
# raw text (llama-index / langchain completion)
|
|
307
292
|
else:
|
|
308
293
|
if chunk is not None:
|
|
309
294
|
response = str(chunk)
|
|
310
295
|
|
|
311
296
|
if response is not None and response != "" and not stopped:
|
|
312
|
-
if begin and response == "":
|
|
297
|
+
if begin and response == "":
|
|
313
298
|
continue
|
|
314
|
-
|
|
299
|
+
output_parts.append(response)
|
|
315
300
|
output_tokens += 1
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
301
|
+
emit_event(
|
|
302
|
+
RenderEvent(
|
|
303
|
+
RenderEvent.STREAM_APPEND,
|
|
304
|
+
{
|
|
305
|
+
"meta": ctx.meta,
|
|
306
|
+
"ctx": ctx,
|
|
307
|
+
"chunk": response,
|
|
308
|
+
"begin": begin,
|
|
309
|
+
},
|
|
310
|
+
)
|
|
311
|
+
)
|
|
324
312
|
begin = False
|
|
325
313
|
|
|
326
|
-
|
|
314
|
+
chunk = None
|
|
315
|
+
|
|
316
|
+
# tool calls
|
|
327
317
|
if tool_calls:
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
318
|
+
ctx.force_call = force_func_call
|
|
319
|
+
core.debug.info("[chat] Tool calls found, unpacking...")
|
|
320
|
+
core.command.unpack_tool_calls_chunks(ctx, tool_calls)
|
|
331
321
|
|
|
332
|
-
#
|
|
322
|
+
# image
|
|
333
323
|
if is_image:
|
|
334
|
-
|
|
335
|
-
|
|
324
|
+
core.debug.info("[chat] Image generation call found")
|
|
325
|
+
ctx.images = [img_path]
|
|
336
326
|
|
|
337
327
|
except Exception as e:
|
|
338
328
|
error = e
|
|
339
329
|
|
|
330
|
+
finally:
|
|
331
|
+
output = "".join(output_parts)
|
|
332
|
+
output_parts.clear()
|
|
333
|
+
|
|
334
|
+
if has_unclosed_code_tag(output):
|
|
335
|
+
output += "\n```"
|
|
336
|
+
|
|
337
|
+
if generator and hasattr(generator, 'close'):
|
|
338
|
+
try:
|
|
339
|
+
generator.close()
|
|
340
|
+
except Exception:
|
|
341
|
+
pass
|
|
340
342
|
|
|
341
|
-
|
|
342
|
-
if has_unclosed_code_tag(output):
|
|
343
|
-
output += "\n```"
|
|
343
|
+
del generator
|
|
344
344
|
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
except Exception as e:
|
|
349
|
-
pass
|
|
350
|
-
del generator
|
|
345
|
+
ctx.output = output
|
|
346
|
+
ctx.set_tokens(ctx.input_tokens, output_tokens)
|
|
347
|
+
core.ctx.update_item(ctx)
|
|
351
348
|
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
349
|
+
if files and not stopped:
|
|
350
|
+
core.debug.info("[chat] Container files found, downloading...")
|
|
351
|
+
try:
|
|
352
|
+
core.gpt.container.download_files(ctx, files)
|
|
353
|
+
except Exception as e:
|
|
354
|
+
core.debug.error(f"[chat] Error downloading container files: {e}")
|
|
355
355
|
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
self.window.core.debug.info("[chat] Container files found, downloading...")
|
|
359
|
-
try:
|
|
360
|
-
self.window.core.gpt.container.download_files(self.ctx, files)
|
|
361
|
-
except Exception as e:
|
|
362
|
-
self.window.core.debug.error(f"[chat] Error downloading container files: {e}")
|
|
356
|
+
if error:
|
|
357
|
+
emit_error(error)
|
|
363
358
|
|
|
364
|
-
|
|
365
|
-
if error:
|
|
366
|
-
self.errorOccurred.emit(error)
|
|
359
|
+
emit_end(ctx)
|
|
367
360
|
|
|
368
|
-
|
|
369
|
-
|
|
361
|
+
fn_args_buffers.clear()
|
|
362
|
+
files.clear()
|
|
363
|
+
tool_calls.clear()
|
|
364
|
+
if citations is not None:
|
|
365
|
+
citations.clear()
|
|
366
|
+
|
|
367
|
+
self.cleanup()
|
|
370
368
|
|
|
371
369
|
def cleanup(self):
|
|
370
|
+
"""
|
|
371
|
+
Cleanup resources after worker execution.
|
|
372
|
+
"""
|
|
373
|
+
self.ctx = None
|
|
374
|
+
self.window = None
|
|
372
375
|
try:
|
|
373
|
-
self.
|
|
374
|
-
except Exception:
|
|
375
|
-
pass
|
|
376
|
-
try:
|
|
377
|
-
self.errorOccurred.disconnect()
|
|
378
|
-
except Exception:
|
|
379
|
-
pass
|
|
380
|
-
try:
|
|
381
|
-
self.end.disconnect()
|
|
376
|
+
QMetaObject.invokeMethod(self.signals, "deleteLater", Qt.QueuedConnection)
|
|
382
377
|
except Exception:
|
|
383
378
|
pass
|
|
384
379
|
|
|
385
|
-
self.ctx = None
|
|
386
|
-
self.window = None
|
|
387
|
-
self.deleteLater()
|
|
388
|
-
|
|
389
380
|
|
|
390
381
|
class Stream:
|
|
391
382
|
def __init__(self, window=None):
|
|
@@ -417,14 +408,6 @@ class Stream:
|
|
|
417
408
|
):
|
|
418
409
|
"""
|
|
419
410
|
Asynchronous append of stream worker to the thread.
|
|
420
|
-
|
|
421
|
-
:param ctx: CtxItem
|
|
422
|
-
:param mode: Mode of stream processing
|
|
423
|
-
:param is_response: Is this a response stream?
|
|
424
|
-
:param reply: Reply text
|
|
425
|
-
:param internal: Internal flag for handling
|
|
426
|
-
:param context: Optional BridgeContext for additional context
|
|
427
|
-
:param extra: Optional extra data for the stream
|
|
428
411
|
"""
|
|
429
412
|
self.ctx = ctx
|
|
430
413
|
self.mode = mode
|
|
@@ -435,9 +418,11 @@ class Stream:
|
|
|
435
418
|
self.extra = extra if extra is not None else {}
|
|
436
419
|
|
|
437
420
|
worker = StreamWorker(ctx, self.window)
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
worker.
|
|
421
|
+
self.worker = worker
|
|
422
|
+
|
|
423
|
+
worker.signals.eventReady.connect(self.handleEvent)
|
|
424
|
+
worker.signals.errorOccurred.connect(self.handleError)
|
|
425
|
+
worker.signals.end.connect(self.handleEnd)
|
|
441
426
|
|
|
442
427
|
self.window.core.debug.info("[chat] Stream begin...")
|
|
443
428
|
self.window.threadpool.start(worker)
|
|
@@ -446,10 +431,6 @@ class Stream:
|
|
|
446
431
|
def handleEnd(self, ctx: CtxItem):
|
|
447
432
|
"""
|
|
448
433
|
Slot for handling end of stream
|
|
449
|
-
|
|
450
|
-
This method is called when the stream processing is finished.
|
|
451
|
-
|
|
452
|
-
:param ctx: CtxItem
|
|
453
434
|
"""
|
|
454
435
|
self.window.controller.ui.update_tokens()
|
|
455
436
|
|
|
@@ -462,13 +443,10 @@ class Stream:
|
|
|
462
443
|
stream=True,
|
|
463
444
|
)
|
|
464
445
|
|
|
465
|
-
# finish: assistant thread run
|
|
466
446
|
if self.mode == MODE_ASSISTANT:
|
|
467
447
|
self.window.controller.assistant.threads.handle_output_message_after_stream(ctx)
|
|
468
448
|
else:
|
|
469
|
-
# finish: KernelEvent.RESPONSE_OK, KernelEvent.RESPONSE_ERROR
|
|
470
449
|
if self.is_response:
|
|
471
|
-
# post-handle, execute cmd, etc.
|
|
472
450
|
self.window.controller.chat.response.post_handle(
|
|
473
451
|
ctx=ctx,
|
|
474
452
|
mode=self.mode,
|
|
@@ -477,27 +455,18 @@ class Stream:
|
|
|
477
455
|
internal=self.internal
|
|
478
456
|
)
|
|
479
457
|
|
|
480
|
-
|
|
481
|
-
"""
|
|
482
|
-
Slot for handling RenderEvent
|
|
458
|
+
self.worker = None
|
|
483
459
|
|
|
484
|
-
|
|
485
|
-
"""
|
|
460
|
+
def handleEvent(self, event):
|
|
486
461
|
self.window.dispatch(event)
|
|
487
462
|
|
|
488
463
|
def handleError(self, error):
|
|
489
|
-
"""
|
|
490
|
-
Slot for handling errors
|
|
491
|
-
|
|
492
|
-
:param error: Exception
|
|
493
|
-
"""
|
|
494
464
|
self.window.core.debug.log(error)
|
|
495
465
|
if self.is_response:
|
|
496
466
|
if not isinstance(self.extra, dict):
|
|
497
467
|
self.extra = {}
|
|
498
468
|
self.extra["error"] = error
|
|
499
|
-
self.window.controller.chat.response.failed(self.context, self.extra)
|
|
500
|
-
# post-handle, execute cmd, etc.
|
|
469
|
+
self.window.controller.chat.response.failed(self.context, self.extra)
|
|
501
470
|
self.window.controller.chat.response.post_handle(
|
|
502
471
|
ctx=self.ctx,
|
|
503
472
|
mode=self.mode,
|
|
@@ -507,9 +476,4 @@ class Stream:
|
|
|
507
476
|
)
|
|
508
477
|
|
|
509
478
|
def log(self, data: object):
|
|
510
|
-
"""
|
|
511
|
-
Save debug log data
|
|
512
|
-
|
|
513
|
-
:param data: log data
|
|
514
|
-
"""
|
|
515
479
|
self.window.core.debug.info(data)
|
pygpt_net/controller/ctx/ctx.py
CHANGED
|
@@ -6,9 +6,9 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.11 00:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
|
-
|
|
11
|
+
|
|
12
12
|
from typing import Optional, List
|
|
13
13
|
|
|
14
14
|
from PySide6.QtCore import QModelIndex, QTimer
|
|
@@ -21,8 +21,7 @@ from .common import Common
|
|
|
21
21
|
from .summarizer import Summarizer
|
|
22
22
|
from .extra import Extra
|
|
23
23
|
|
|
24
|
-
from pygpt_net.utils import trans
|
|
25
|
-
from pygpt_net.core.tabs.tab import Tab
|
|
24
|
+
from pygpt_net.utils import trans, mem_clean
|
|
26
25
|
|
|
27
26
|
|
|
28
27
|
class Ctx:
|
|
@@ -943,11 +942,12 @@ class Ctx:
|
|
|
943
942
|
:param child_id: int
|
|
944
943
|
:return: QModelIndex
|
|
945
944
|
"""
|
|
945
|
+
finder = self.find_child_index_by_id
|
|
946
946
|
for row in range(root_item.rowCount()):
|
|
947
947
|
item = root_item.child(row)
|
|
948
948
|
if hasattr(item, 'id') and hasattr(item, 'isFolder') and not item.isFolder and item.id == child_id:
|
|
949
949
|
return item.index()
|
|
950
|
-
child_index =
|
|
950
|
+
child_index = finder(item, child_id)
|
|
951
951
|
if child_index.isValid():
|
|
952
952
|
return child_index
|
|
953
953
|
return QModelIndex()
|