pygpt-net 2.7.5__py3-none-any.whl → 2.7.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +14 -0
- pygpt_net/__init__.py +4 -4
- pygpt_net/controller/chat/remote_tools.py +3 -9
- pygpt_net/controller/chat/stream.py +2 -2
- pygpt_net/controller/chat/{handler/worker.py → stream_worker.py} +20 -64
- pygpt_net/controller/debug/fixtures.py +3 -2
- pygpt_net/controller/files/files.py +65 -4
- pygpt_net/core/debug/models.py +2 -2
- pygpt_net/core/filesystem/url.py +4 -1
- pygpt_net/core/render/web/body.py +3 -2
- pygpt_net/core/types/chunk.py +27 -0
- pygpt_net/data/config/config.json +14 -4
- pygpt_net/data/config/models.json +192 -4
- pygpt_net/data/config/settings.json +126 -36
- pygpt_net/data/js/app/template.js +1 -1
- pygpt_net/data/js/app.min.js +2 -2
- pygpt_net/data/locale/locale.de.ini +5 -0
- pygpt_net/data/locale/locale.en.ini +35 -8
- pygpt_net/data/locale/locale.es.ini +5 -0
- pygpt_net/data/locale/locale.fr.ini +5 -0
- pygpt_net/data/locale/locale.it.ini +5 -0
- pygpt_net/data/locale/locale.pl.ini +5 -0
- pygpt_net/data/locale/locale.uk.ini +5 -0
- pygpt_net/data/locale/locale.zh.ini +5 -0
- pygpt_net/data/locale/plugin.cmd_mouse_control.en.ini +2 -2
- pygpt_net/item/ctx.py +3 -5
- pygpt_net/js_rc.py +2449 -2447
- pygpt_net/plugin/cmd_mouse_control/config.py +8 -7
- pygpt_net/plugin/cmd_mouse_control/plugin.py +3 -4
- pygpt_net/plugin/cmd_mouse_control/worker.py +2 -1
- pygpt_net/plugin/cmd_mouse_control/worker_sandbox.py +2 -1
- pygpt_net/provider/api/anthropic/__init__.py +16 -9
- pygpt_net/provider/api/anthropic/chat.py +259 -11
- pygpt_net/provider/api/anthropic/computer.py +844 -0
- pygpt_net/provider/api/anthropic/remote_tools.py +172 -0
- pygpt_net/{controller/chat/handler/anthropic_stream.py → provider/api/anthropic/stream.py} +24 -10
- pygpt_net/provider/api/anthropic/tools.py +32 -77
- pygpt_net/provider/api/anthropic/utils.py +30 -0
- pygpt_net/provider/api/google/__init__.py +6 -5
- pygpt_net/provider/api/google/chat.py +3 -8
- pygpt_net/{controller/chat/handler/google_stream.py → provider/api/google/stream.py} +1 -1
- pygpt_net/provider/api/google/utils.py +185 -0
- pygpt_net/{controller/chat/handler → provider/api/langchain}/__init__.py +0 -0
- pygpt_net/{controller/chat/handler/langchain_stream.py → provider/api/langchain/stream.py} +1 -1
- pygpt_net/provider/api/llama_index/__init__.py +0 -0
- pygpt_net/{controller/chat/handler/llamaindex_stream.py → provider/api/llama_index/stream.py} +1 -1
- pygpt_net/provider/api/openai/__init__.py +7 -3
- pygpt_net/provider/api/openai/image.py +2 -2
- pygpt_net/provider/api/openai/responses.py +0 -0
- pygpt_net/{controller/chat/handler/openai_stream.py → provider/api/openai/stream.py} +1 -1
- pygpt_net/provider/api/openai/utils.py +69 -3
- pygpt_net/provider/api/x_ai/__init__.py +117 -17
- pygpt_net/provider/api/x_ai/chat.py +272 -102
- pygpt_net/provider/api/x_ai/image.py +149 -47
- pygpt_net/provider/api/x_ai/{remote.py → remote_tools.py} +165 -70
- pygpt_net/provider/api/x_ai/responses.py +507 -0
- pygpt_net/provider/api/x_ai/stream.py +715 -0
- pygpt_net/provider/api/x_ai/tools.py +59 -8
- pygpt_net/{controller/chat/handler → provider/api/x_ai}/utils.py +1 -2
- pygpt_net/provider/api/x_ai/vision.py +1 -4
- pygpt_net/provider/core/config/patch.py +22 -1
- pygpt_net/provider/core/model/patch.py +26 -1
- pygpt_net/tools/image_viewer/ui/dialogs.py +300 -13
- pygpt_net/tools/text_editor/ui/dialogs.py +3 -2
- pygpt_net/tools/text_editor/ui/widgets.py +5 -1
- pygpt_net/ui/base/context_menu.py +44 -1
- pygpt_net/ui/layout/toolbox/indexes.py +22 -19
- pygpt_net/ui/layout/toolbox/model.py +28 -5
- pygpt_net/ui/widget/dialog/base.py +16 -5
- pygpt_net/ui/widget/image/display.py +25 -8
- pygpt_net/ui/widget/tabs/output.py +9 -1
- pygpt_net/ui/widget/textarea/editor.py +14 -1
- pygpt_net/ui/widget/textarea/input.py +20 -7
- pygpt_net/ui/widget/textarea/notepad.py +24 -1
- pygpt_net/ui/widget/textarea/output.py +23 -1
- pygpt_net/ui/widget/textarea/web.py +16 -1
- {pygpt_net-2.7.5.dist-info → pygpt_net-2.7.7.dist-info}/METADATA +16 -2
- {pygpt_net-2.7.5.dist-info → pygpt_net-2.7.7.dist-info}/RECORD +80 -73
- pygpt_net/controller/chat/handler/xai_stream.py +0 -135
- {pygpt_net-2.7.5.dist-info → pygpt_net-2.7.7.dist-info}/LICENSE +0 -0
- {pygpt_net-2.7.5.dist-info → pygpt_net-2.7.7.dist-info}/WHEEL +0 -0
- {pygpt_net-2.7.5.dist-info → pygpt_net-2.7.7.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,507 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
# ================================================== #
|
|
4
|
+
# This file is a part of PYGPT package #
|
|
5
|
+
# Website: https://pygpt.net #
|
|
6
|
+
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
|
+
# MIT License #
|
|
8
|
+
# Created By : Marcin Szczygliński #
|
|
9
|
+
# Updated Date: 2026.01.04 19:00:00 #
|
|
10
|
+
# ================================================== #
|
|
11
|
+
|
|
12
|
+
from __future__ import annotations
|
|
13
|
+
|
|
14
|
+
import json
|
|
15
|
+
from typing import Optional, Dict, Any, List, Tuple
|
|
16
|
+
|
|
17
|
+
from pygpt_net.core.types import MODE_CHAT
|
|
18
|
+
from pygpt_net.core.bridge.context import BridgeContext, MultimodalContext
|
|
19
|
+
from pygpt_net.item.attachment import AttachmentItem
|
|
20
|
+
from pygpt_net.item.ctx import CtxItem
|
|
21
|
+
from pygpt_net.item.model import ModelItem
|
|
22
|
+
|
|
23
|
+
# xAI SDK chat helpers (system/user/assistant/image) for message building
|
|
24
|
+
from xai_sdk.chat import (
|
|
25
|
+
system as xsystem,
|
|
26
|
+
user as xuser,
|
|
27
|
+
assistant as xassistant,
|
|
28
|
+
image as ximage,
|
|
29
|
+
tool_result as xtool_result
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
# get_tool_call_type helps filter client-side vs server-side tools
|
|
33
|
+
try:
|
|
34
|
+
from xai_sdk.tools import get_tool_call_type as x_get_tool_call_type
|
|
35
|
+
except Exception:
|
|
36
|
+
x_get_tool_call_type = None
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class Responses:
|
|
40
|
+
def __init__(self, window=None):
|
|
41
|
+
"""
|
|
42
|
+
Chat wrapper for xAI using Chat Responses via xai_sdk.
|
|
43
|
+
|
|
44
|
+
:param window: Window instance
|
|
45
|
+
"""
|
|
46
|
+
self.window = window
|
|
47
|
+
self.input_tokens = 0
|
|
48
|
+
# Image constraints
|
|
49
|
+
self.allowed_mimes = {"image/jpeg", "image/png"}
|
|
50
|
+
self.default_image_max_bytes = 10 * 1024 * 1024 # 10 MiB default
|
|
51
|
+
|
|
52
|
+
# ---------- SEND (Chat Responses) ----------
|
|
53
|
+
|
|
54
|
+
def send(self, context: BridgeContext, extra: Optional[Dict[str, Any]] = None):
|
|
55
|
+
"""
|
|
56
|
+
Entry point for xAI chat/multimodal using Chat Responses (stateful).
|
|
57
|
+
|
|
58
|
+
Streaming:
|
|
59
|
+
- Uses xai_sdk chat.stream() (tuples of (response, chunk)).
|
|
60
|
+
|
|
61
|
+
Non-stream:
|
|
62
|
+
- Uses xai_sdk chat.sample().
|
|
63
|
+
|
|
64
|
+
Client-side tools:
|
|
65
|
+
- Prepared from app functions and mixed with server-side tools.
|
|
66
|
+
- Only client-side tool calls are returned in ctx.tool_calls (server-side are executed on xAI).
|
|
67
|
+
|
|
68
|
+
:param context: BridgeContext with all parameters
|
|
69
|
+
:param extra: Extra parameters (not used)
|
|
70
|
+
:return: SDK response object (non-stream) or an iterable for streaming (generator/iterator)
|
|
71
|
+
"""
|
|
72
|
+
prompt = context.prompt
|
|
73
|
+
system_prompt = context.system_prompt
|
|
74
|
+
model_item = context.model
|
|
75
|
+
functions = context.external_functions
|
|
76
|
+
attachments = context.attachments
|
|
77
|
+
multimodal_ctx = context.multimodal_ctx
|
|
78
|
+
stream = context.stream
|
|
79
|
+
history = context.history
|
|
80
|
+
ctx = context.ctx or CtxItem()
|
|
81
|
+
|
|
82
|
+
client = self.window.core.api.xai.get_client(context.mode, model_item)
|
|
83
|
+
|
|
84
|
+
# Local input token estimate (best effort)
|
|
85
|
+
self.reset_tokens()
|
|
86
|
+
cnt_msgs = self._build_count_messages(prompt, system_prompt, model_item, history)
|
|
87
|
+
self.input_tokens += self.window.core.tokens.from_messages(cnt_msgs, model_item.id)
|
|
88
|
+
|
|
89
|
+
# Remote (server-side) tools and include flags
|
|
90
|
+
rt_cfg = self.window.core.api.xai.remote.build_for_chat(
|
|
91
|
+
model=model_item,
|
|
92
|
+
stream=stream,
|
|
93
|
+
)
|
|
94
|
+
srv_tools = rt_cfg.get("tools", []) or []
|
|
95
|
+
include = rt_cfg.get("include", []) or []
|
|
96
|
+
use_encrypted = bool(rt_cfg.get("use_encrypted_content", False))
|
|
97
|
+
max_turns = rt_cfg.get("max_turns", None)
|
|
98
|
+
|
|
99
|
+
# Client-side tools (from app functions)
|
|
100
|
+
client_tools = self.window.core.api.xai.tools.prepare_sdk_tools(functions)
|
|
101
|
+
all_tools = (srv_tools + client_tools) if (srv_tools or client_tools) else None
|
|
102
|
+
|
|
103
|
+
# Vision fallback if needed
|
|
104
|
+
has_images = self._attachments_have_images(attachments)
|
|
105
|
+
model_id = model_item.id
|
|
106
|
+
if has_images and not self._is_vision_model(model_item):
|
|
107
|
+
fb = self.window.core.config.get("xai_vision_fallback_model") or "grok-2-vision-latest"
|
|
108
|
+
self.window.core.debug.info(f"[xai] Switching to vision model: {fb} (was: {model_id}) due to image input")
|
|
109
|
+
model_id = fb
|
|
110
|
+
|
|
111
|
+
# Store messages: false when images present (SDK guidance), otherwise configurable (default True)
|
|
112
|
+
store_messages = True
|
|
113
|
+
cfg_store = self.window.core.config.get("remote_tools.xai.store_messages")
|
|
114
|
+
if cfg_store is not None:
|
|
115
|
+
try:
|
|
116
|
+
store_messages = bool(cfg_store)
|
|
117
|
+
except Exception:
|
|
118
|
+
pass
|
|
119
|
+
if has_images:
|
|
120
|
+
store_messages = False
|
|
121
|
+
|
|
122
|
+
# previous_response_id from last history item or current ctx
|
|
123
|
+
prev_id = self._detect_previous_response_id(history, ctx)
|
|
124
|
+
|
|
125
|
+
# Create chat session in SDK
|
|
126
|
+
chat_kwargs: Dict[str, Any] = {
|
|
127
|
+
"model": model_id,
|
|
128
|
+
"tools": all_tools,
|
|
129
|
+
"include": (include if include else None),
|
|
130
|
+
"store_messages": store_messages,
|
|
131
|
+
"previous_response_id": prev_id,
|
|
132
|
+
}
|
|
133
|
+
if use_encrypted:
|
|
134
|
+
chat_kwargs["use_encrypted_content"] = True
|
|
135
|
+
if isinstance(max_turns, int) and max_turns > 0:
|
|
136
|
+
chat_kwargs["max_turns"] = max_turns
|
|
137
|
+
|
|
138
|
+
chat = client.chat.create(**chat_kwargs)
|
|
139
|
+
|
|
140
|
+
# Append history (only when not continuing via previous_response_id)
|
|
141
|
+
self.append_history_sdk(
|
|
142
|
+
chat=chat,
|
|
143
|
+
system_prompt=system_prompt,
|
|
144
|
+
model=model_item,
|
|
145
|
+
history=history if prev_id is None else None, # do not duplicate when chaining
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
# If last turn contained client-side tool outputs, append them first
|
|
149
|
+
self._append_tool_results_from_ctx(chat, history)
|
|
150
|
+
|
|
151
|
+
# Append current user message (with images if any)
|
|
152
|
+
self.append_current_user_sdk(
|
|
153
|
+
chat=chat,
|
|
154
|
+
prompt=prompt,
|
|
155
|
+
attachments=attachments,
|
|
156
|
+
multimodal_ctx=multimodal_ctx,
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
# STREAMING: return raw iterator (handler attaches ctx.msg_id)
|
|
160
|
+
if stream:
|
|
161
|
+
return chat.stream()
|
|
162
|
+
|
|
163
|
+
# NON-STREAM
|
|
164
|
+
response = chat.sample()
|
|
165
|
+
return response
|
|
166
|
+
|
|
167
|
+
# ---------- UNPACK (non-stream) ----------
|
|
168
|
+
|
|
169
|
+
def unpack_response(self, mode: str, response, ctx: CtxItem):
|
|
170
|
+
"""
|
|
171
|
+
Unpack non-streaming xAI response into ctx (text, tool calls, usage, citations, images).
|
|
172
|
+
|
|
173
|
+
:param mode: mode (chat, etc)
|
|
174
|
+
:param response: Response object from SDK or dict
|
|
175
|
+
:param ctx: CtxItem to fill
|
|
176
|
+
"""
|
|
177
|
+
# Output text
|
|
178
|
+
out = ""
|
|
179
|
+
try:
|
|
180
|
+
out = getattr(response, "content", None) or ""
|
|
181
|
+
except Exception:
|
|
182
|
+
pass
|
|
183
|
+
if not out and isinstance(response, dict):
|
|
184
|
+
out = response.get("output_text") or ""
|
|
185
|
+
|
|
186
|
+
ctx.output = (str(out or "")).strip()
|
|
187
|
+
|
|
188
|
+
# Citations (list of urls)
|
|
189
|
+
try:
|
|
190
|
+
cits = getattr(response, "citations", None)
|
|
191
|
+
if isinstance(cits, list) and cits:
|
|
192
|
+
if ctx.urls is None:
|
|
193
|
+
ctx.urls = []
|
|
194
|
+
for u in cits:
|
|
195
|
+
if isinstance(u, str) and (u.startswith("http://") or u.startswith("https://")):
|
|
196
|
+
if u not in ctx.urls:
|
|
197
|
+
ctx.urls.append(u)
|
|
198
|
+
except Exception:
|
|
199
|
+
pass
|
|
200
|
+
|
|
201
|
+
# Tool calls: only return client-side tools to be executed by the app
|
|
202
|
+
try:
|
|
203
|
+
raw_calls = getattr(response, "tool_calls", None) or []
|
|
204
|
+
client_side = []
|
|
205
|
+
for tc in raw_calls:
|
|
206
|
+
# filter client-side tool calls using SDK helper when available
|
|
207
|
+
ttype = None
|
|
208
|
+
if x_get_tool_call_type is not None:
|
|
209
|
+
try:
|
|
210
|
+
ttype = x_get_tool_call_type(tc)
|
|
211
|
+
except Exception:
|
|
212
|
+
ttype = None
|
|
213
|
+
# treat as client-side when helper says so, or when object looks like function call
|
|
214
|
+
if ttype == "client_side_tool" or self._looks_like_client_tool(tc):
|
|
215
|
+
fn = getattr(tc, "function", None)
|
|
216
|
+
name = getattr(fn, "name", "") if fn is not None else ""
|
|
217
|
+
args = getattr(fn, "arguments", "") if fn is not None else ""
|
|
218
|
+
if isinstance(args, (dict, list)):
|
|
219
|
+
try:
|
|
220
|
+
args = json.dumps(args, ensure_ascii=False)
|
|
221
|
+
except Exception:
|
|
222
|
+
args = str(args)
|
|
223
|
+
client_side.append({
|
|
224
|
+
"id": getattr(tc, "id", "") or "",
|
|
225
|
+
"type": "function",
|
|
226
|
+
"function": {"name": name or "", "arguments": args or ""},
|
|
227
|
+
})
|
|
228
|
+
if client_side:
|
|
229
|
+
ctx.tool_calls = client_side
|
|
230
|
+
except Exception:
|
|
231
|
+
pass
|
|
232
|
+
|
|
233
|
+
# Usage
|
|
234
|
+
try:
|
|
235
|
+
usage = getattr(response, "usage", None)
|
|
236
|
+
if isinstance(usage, dict):
|
|
237
|
+
u = self._normalize_usage(usage)
|
|
238
|
+
else:
|
|
239
|
+
# Sometimes SDK exposes usage as object with attributes
|
|
240
|
+
u = self._normalize_usage({
|
|
241
|
+
"prompt_tokens": getattr(usage, "prompt_tokens", 0) if usage else 0,
|
|
242
|
+
"completion_tokens": getattr(usage, "completion_tokens", 0) if usage else 0,
|
|
243
|
+
"reasoning_tokens": getattr(usage, "reasoning_tokens", 0) if usage else 0,
|
|
244
|
+
})
|
|
245
|
+
if u:
|
|
246
|
+
ctx.set_tokens(u.get("in", 0), u.get("out", 0))
|
|
247
|
+
if not isinstance(ctx.extra, dict):
|
|
248
|
+
ctx.extra = {}
|
|
249
|
+
ctx.extra["usage"] = {
|
|
250
|
+
"vendor": "xai",
|
|
251
|
+
"input_tokens": u.get("in", 0),
|
|
252
|
+
"output_tokens": u.get("out", 0),
|
|
253
|
+
"reasoning_tokens": u.get("reasoning", 0),
|
|
254
|
+
"total_reported": u.get("total"),
|
|
255
|
+
}
|
|
256
|
+
except Exception:
|
|
257
|
+
pass
|
|
258
|
+
|
|
259
|
+
# Response ID
|
|
260
|
+
try:
|
|
261
|
+
rid = getattr(response, "id", None)
|
|
262
|
+
if not rid and isinstance(response, dict):
|
|
263
|
+
rid = response.get("id")
|
|
264
|
+
if rid:
|
|
265
|
+
ctx.msg_id = str(rid)
|
|
266
|
+
if not isinstance(ctx.extra, dict):
|
|
267
|
+
ctx.extra = {}
|
|
268
|
+
ctx.extra["xai_response_id"] = ctx.msg_id
|
|
269
|
+
except Exception:
|
|
270
|
+
pass
|
|
271
|
+
|
|
272
|
+
# ---------- SDK message building helpers ----------
|
|
273
|
+
|
|
274
|
+
def append_history_sdk(
|
|
275
|
+
self,
|
|
276
|
+
chat,
|
|
277
|
+
system_prompt: Optional[str],
|
|
278
|
+
model: ModelItem,
|
|
279
|
+
history: Optional[List[CtxItem]] = None,
|
|
280
|
+
):
|
|
281
|
+
"""
|
|
282
|
+
Append history to SDK chat (only when not using previous_response_id).
|
|
283
|
+
|
|
284
|
+
:param chat: xai_sdk chat instance
|
|
285
|
+
:param system_prompt: system prompt
|
|
286
|
+
:param model: model item
|
|
287
|
+
:param history: history items
|
|
288
|
+
"""
|
|
289
|
+
if history is None:
|
|
290
|
+
# initial turn, append system only (if any)
|
|
291
|
+
if system_prompt:
|
|
292
|
+
chat.append(xsystem(system_prompt))
|
|
293
|
+
return
|
|
294
|
+
|
|
295
|
+
# system prompt first (if any)
|
|
296
|
+
if system_prompt:
|
|
297
|
+
chat.append(xsystem(system_prompt))
|
|
298
|
+
|
|
299
|
+
# history pairs
|
|
300
|
+
if self.window.core.config.get('use_context'):
|
|
301
|
+
used = self.window.core.tokens.from_user("", system_prompt or "")
|
|
302
|
+
items = self.window.core.ctx.get_history(
|
|
303
|
+
history, model.id, MODE_CHAT, used, self._fit_ctx(model),
|
|
304
|
+
)
|
|
305
|
+
for item in items:
|
|
306
|
+
if item.final_input:
|
|
307
|
+
chat.append(xuser(str(item.final_input)))
|
|
308
|
+
if item.final_output:
|
|
309
|
+
chat.append(xassistant(str(item.final_output)))
|
|
310
|
+
|
|
311
|
+
def append_current_user_sdk(
|
|
312
|
+
self,
|
|
313
|
+
chat,
|
|
314
|
+
prompt: str,
|
|
315
|
+
attachments: Optional[Dict[str, AttachmentItem]],
|
|
316
|
+
multimodal_ctx: Optional[MultimodalContext],
|
|
317
|
+
):
|
|
318
|
+
"""
|
|
319
|
+
Append current user message with optional inline images.
|
|
320
|
+
|
|
321
|
+
:param chat: xai_sdk chat instance
|
|
322
|
+
:param prompt: user text
|
|
323
|
+
:param attachments: attachments dict (images)
|
|
324
|
+
:param multimodal_ctx: multimodal context (not used here)
|
|
325
|
+
"""
|
|
326
|
+
parts = [str(prompt or "")]
|
|
327
|
+
for img in self.window.core.api.xai.vision.build_images_for_chat(attachments):
|
|
328
|
+
parts.append(ximage(img))
|
|
329
|
+
chat.append(xuser(*parts))
|
|
330
|
+
|
|
331
|
+
def _append_tool_results_from_ctx(self, chat, history: Optional[List[CtxItem]]):
|
|
332
|
+
"""
|
|
333
|
+
Append tool results from the last ctx item when function-calling loop is active.
|
|
334
|
+
"""
|
|
335
|
+
tool_call_native_enabled = self.window.core.config.get('func_call.native', False)
|
|
336
|
+
if not (history and tool_call_native_enabled):
|
|
337
|
+
return
|
|
338
|
+
last = history[-1]
|
|
339
|
+
if not (last.extra and isinstance(last.extra, dict)):
|
|
340
|
+
return
|
|
341
|
+
tool_output = last.extra.get("tool_output")
|
|
342
|
+
if not (tool_output and isinstance(tool_output, list)):
|
|
343
|
+
return
|
|
344
|
+
|
|
345
|
+
for out in tool_output:
|
|
346
|
+
# accept direct result value or whole dict
|
|
347
|
+
if isinstance(out, dict) and "result" in out:
|
|
348
|
+
chat.append(xtool_result(str(out["result"])))
|
|
349
|
+
else:
|
|
350
|
+
chat.append(xtool_result(str(out)))
|
|
351
|
+
|
|
352
|
+
# ---------- legacy/local utils ----------
|
|
353
|
+
|
|
354
|
+
def _fit_ctx(self, model: ModelItem) -> int:
|
|
355
|
+
"""
|
|
356
|
+
Fit to max model tokens (uses model.ctx if present).
|
|
357
|
+
"""
|
|
358
|
+
max_ctx_tokens = self.window.core.config.get('max_total_tokens')
|
|
359
|
+
if model and model.ctx and 0 < model.ctx < max_ctx_tokens:
|
|
360
|
+
max_ctx_tokens = model.ctx
|
|
361
|
+
return max_ctx_tokens
|
|
362
|
+
|
|
363
|
+
def _build_count_messages(
|
|
364
|
+
self,
|
|
365
|
+
prompt: str,
|
|
366
|
+
system_prompt: str,
|
|
367
|
+
model: ModelItem,
|
|
368
|
+
history: Optional[List[CtxItem]] = None,
|
|
369
|
+
) -> List[dict]:
|
|
370
|
+
"""
|
|
371
|
+
Build simple messages structure for local token estimation.
|
|
372
|
+
"""
|
|
373
|
+
messages = []
|
|
374
|
+
if system_prompt:
|
|
375
|
+
messages.append({"role": "system", "content": system_prompt})
|
|
376
|
+
|
|
377
|
+
if self.window.core.config.get('use_context'):
|
|
378
|
+
used_tokens = self.window.core.tokens.from_user(prompt, system_prompt)
|
|
379
|
+
items = self.window.core.ctx.get_history(
|
|
380
|
+
history, model.id, MODE_CHAT, used_tokens, self._fit_ctx(model),
|
|
381
|
+
)
|
|
382
|
+
for item in items:
|
|
383
|
+
if item.final_input:
|
|
384
|
+
messages.append({"role": "user", "content": str(item.final_input)})
|
|
385
|
+
if item.final_output:
|
|
386
|
+
messages.append({"role": "assistant", "content": str(item.final_output)})
|
|
387
|
+
|
|
388
|
+
messages.append({"role": "user", "content": str(prompt)})
|
|
389
|
+
return messages
|
|
390
|
+
|
|
391
|
+
def _normalize_usage(self, raw) -> Optional[dict]:
|
|
392
|
+
"""
|
|
393
|
+
Normalize usage to a common dict: {'in','out','reasoning','total'}.
|
|
394
|
+
Accepts either:
|
|
395
|
+
- {'input_tokens','output_tokens','total_tokens'}
|
|
396
|
+
- {'prompt_tokens','completion_tokens','total_tokens'}
|
|
397
|
+
"""
|
|
398
|
+
if not isinstance(raw, dict):
|
|
399
|
+
return None
|
|
400
|
+
|
|
401
|
+
def _as_int(v) -> int:
|
|
402
|
+
try:
|
|
403
|
+
return int(v)
|
|
404
|
+
except Exception:
|
|
405
|
+
try:
|
|
406
|
+
return int(float(v))
|
|
407
|
+
except Exception:
|
|
408
|
+
return 0
|
|
409
|
+
|
|
410
|
+
in_tok = raw.get("input_tokens") if "input_tokens" in raw else raw.get("prompt_tokens")
|
|
411
|
+
out_tok = raw.get("output_tokens") if "output_tokens" in raw else raw.get("completion_tokens")
|
|
412
|
+
reasoning_tok = raw.get("reasoning_tokens", 0)
|
|
413
|
+
tot = raw.get("total_tokens")
|
|
414
|
+
|
|
415
|
+
i = _as_int(in_tok or 0)
|
|
416
|
+
o = _as_int(out_tok or 0)
|
|
417
|
+
r = _as_int(reasoning_tok or 0)
|
|
418
|
+
t = _as_int(tot if tot is not None else (i + o + r))
|
|
419
|
+
return {"in": i, "out": max(0, t - i) if t else o, "reasoning": r, "total": t}
|
|
420
|
+
|
|
421
|
+
def _attachments_have_images(self, attachments: Optional[Dict[str, AttachmentItem]]) -> bool:
|
|
422
|
+
"""
|
|
423
|
+
Detect if attachments contain at least one image file.
|
|
424
|
+
"""
|
|
425
|
+
if not attachments:
|
|
426
|
+
return False
|
|
427
|
+
for _, att in attachments.items():
|
|
428
|
+
try:
|
|
429
|
+
if att.path and self.window.core.api.xai.vision.is_image(att.path):
|
|
430
|
+
return True
|
|
431
|
+
except Exception:
|
|
432
|
+
continue
|
|
433
|
+
return False
|
|
434
|
+
|
|
435
|
+
def _is_vision_model(self, model: ModelItem) -> bool:
|
|
436
|
+
"""
|
|
437
|
+
Heuristic check for vision-capable model IDs.
|
|
438
|
+
"""
|
|
439
|
+
model_id = (model.id if model and model.id else "").strip()
|
|
440
|
+
if not model or not model_id:
|
|
441
|
+
return False
|
|
442
|
+
if model.is_image_input():
|
|
443
|
+
return True
|
|
444
|
+
mid = model_id.lower()
|
|
445
|
+
return ("vision" in mid) or ("-v" in mid and "grok" in mid)
|
|
446
|
+
|
|
447
|
+
def _looks_like_client_tool(self, tc_obj) -> bool:
|
|
448
|
+
"""
|
|
449
|
+
Best-effort detection of a client-side tool call when helper is not available.
|
|
450
|
+
"""
|
|
451
|
+
try:
|
|
452
|
+
fn = getattr(tc_obj, "function", None)
|
|
453
|
+
if fn is None:
|
|
454
|
+
return False
|
|
455
|
+
name = getattr(fn, "name", None)
|
|
456
|
+
# arguments can be str/dict/list; presence indicates structured call
|
|
457
|
+
has_args = hasattr(fn, "arguments")
|
|
458
|
+
return isinstance(name, str) and name != "" and has_args
|
|
459
|
+
except Exception:
|
|
460
|
+
return False
|
|
461
|
+
|
|
462
|
+
def _detect_previous_response_id(self, history: Optional[List[CtxItem]], ctx: CtxItem) -> Optional[str]:
|
|
463
|
+
"""
|
|
464
|
+
Return last response id from history or current ctx when available.
|
|
465
|
+
"""
|
|
466
|
+
try:
|
|
467
|
+
if history and len(history) > 0:
|
|
468
|
+
last = history[-1]
|
|
469
|
+
if last and last.msg_id:
|
|
470
|
+
return str(last.msg_id)
|
|
471
|
+
except Exception:
|
|
472
|
+
pass
|
|
473
|
+
try:
|
|
474
|
+
if ctx and ctx.msg_id:
|
|
475
|
+
return str(ctx.msg_id)
|
|
476
|
+
except Exception:
|
|
477
|
+
pass
|
|
478
|
+
return None
|
|
479
|
+
|
|
480
|
+
def reset_tokens(self):
|
|
481
|
+
"""Reset input tokens counter."""
|
|
482
|
+
self.input_tokens = 0
|
|
483
|
+
|
|
484
|
+
def get_used_tokens(self) -> int:
|
|
485
|
+
"""
|
|
486
|
+
Return the locally estimated input tokens count.
|
|
487
|
+
"""
|
|
488
|
+
return self.input_tokens
|
|
489
|
+
|
|
490
|
+
# ---------- helpers for quick_call compatibility ----------
|
|
491
|
+
|
|
492
|
+
def quick_collect_response_id(self, response, ctx: CtxItem):
|
|
493
|
+
"""
|
|
494
|
+
Set response id and xai_response_id after non-stream sample() in quick paths.
|
|
495
|
+
|
|
496
|
+
:param response: Response object from SDK or dict
|
|
497
|
+
:param ctx: CtxItem to fill
|
|
498
|
+
"""
|
|
499
|
+
try:
|
|
500
|
+
rid = getattr(response, "id", None)
|
|
501
|
+
if rid:
|
|
502
|
+
ctx.msg_id = str(rid)
|
|
503
|
+
if not isinstance(ctx.extra, dict):
|
|
504
|
+
ctx.extra = {}
|
|
505
|
+
ctx.extra["xai_response_id"] = ctx.msg_id
|
|
506
|
+
except Exception:
|
|
507
|
+
pass
|