pygpt-net 2.7.5__py3-none-any.whl → 2.7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. pygpt_net/CHANGELOG.txt +14 -0
  2. pygpt_net/__init__.py +4 -4
  3. pygpt_net/controller/chat/remote_tools.py +3 -9
  4. pygpt_net/controller/chat/stream.py +2 -2
  5. pygpt_net/controller/chat/{handler/worker.py → stream_worker.py} +20 -64
  6. pygpt_net/controller/debug/fixtures.py +3 -2
  7. pygpt_net/controller/files/files.py +65 -4
  8. pygpt_net/core/debug/models.py +2 -2
  9. pygpt_net/core/filesystem/url.py +4 -1
  10. pygpt_net/core/render/web/body.py +3 -2
  11. pygpt_net/core/types/chunk.py +27 -0
  12. pygpt_net/data/config/config.json +14 -4
  13. pygpt_net/data/config/models.json +192 -4
  14. pygpt_net/data/config/settings.json +126 -36
  15. pygpt_net/data/js/app/template.js +1 -1
  16. pygpt_net/data/js/app.min.js +2 -2
  17. pygpt_net/data/locale/locale.de.ini +5 -0
  18. pygpt_net/data/locale/locale.en.ini +35 -8
  19. pygpt_net/data/locale/locale.es.ini +5 -0
  20. pygpt_net/data/locale/locale.fr.ini +5 -0
  21. pygpt_net/data/locale/locale.it.ini +5 -0
  22. pygpt_net/data/locale/locale.pl.ini +5 -0
  23. pygpt_net/data/locale/locale.uk.ini +5 -0
  24. pygpt_net/data/locale/locale.zh.ini +5 -0
  25. pygpt_net/data/locale/plugin.cmd_mouse_control.en.ini +2 -2
  26. pygpt_net/item/ctx.py +3 -5
  27. pygpt_net/js_rc.py +2449 -2447
  28. pygpt_net/plugin/cmd_mouse_control/config.py +8 -7
  29. pygpt_net/plugin/cmd_mouse_control/plugin.py +3 -4
  30. pygpt_net/plugin/cmd_mouse_control/worker.py +2 -1
  31. pygpt_net/plugin/cmd_mouse_control/worker_sandbox.py +2 -1
  32. pygpt_net/provider/api/anthropic/__init__.py +16 -9
  33. pygpt_net/provider/api/anthropic/chat.py +259 -11
  34. pygpt_net/provider/api/anthropic/computer.py +844 -0
  35. pygpt_net/provider/api/anthropic/remote_tools.py +172 -0
  36. pygpt_net/{controller/chat/handler/anthropic_stream.py → provider/api/anthropic/stream.py} +24 -10
  37. pygpt_net/provider/api/anthropic/tools.py +32 -77
  38. pygpt_net/provider/api/anthropic/utils.py +30 -0
  39. pygpt_net/provider/api/google/__init__.py +6 -5
  40. pygpt_net/provider/api/google/chat.py +3 -8
  41. pygpt_net/{controller/chat/handler/google_stream.py → provider/api/google/stream.py} +1 -1
  42. pygpt_net/provider/api/google/utils.py +185 -0
  43. pygpt_net/{controller/chat/handler → provider/api/langchain}/__init__.py +0 -0
  44. pygpt_net/{controller/chat/handler/langchain_stream.py → provider/api/langchain/stream.py} +1 -1
  45. pygpt_net/provider/api/llama_index/__init__.py +0 -0
  46. pygpt_net/{controller/chat/handler/llamaindex_stream.py → provider/api/llama_index/stream.py} +1 -1
  47. pygpt_net/provider/api/openai/__init__.py +7 -3
  48. pygpt_net/provider/api/openai/image.py +2 -2
  49. pygpt_net/provider/api/openai/responses.py +0 -0
  50. pygpt_net/{controller/chat/handler/openai_stream.py → provider/api/openai/stream.py} +1 -1
  51. pygpt_net/provider/api/openai/utils.py +69 -3
  52. pygpt_net/provider/api/x_ai/__init__.py +117 -17
  53. pygpt_net/provider/api/x_ai/chat.py +272 -102
  54. pygpt_net/provider/api/x_ai/image.py +149 -47
  55. pygpt_net/provider/api/x_ai/{remote.py → remote_tools.py} +165 -70
  56. pygpt_net/provider/api/x_ai/responses.py +507 -0
  57. pygpt_net/provider/api/x_ai/stream.py +715 -0
  58. pygpt_net/provider/api/x_ai/tools.py +59 -8
  59. pygpt_net/{controller/chat/handler → provider/api/x_ai}/utils.py +1 -2
  60. pygpt_net/provider/api/x_ai/vision.py +1 -4
  61. pygpt_net/provider/core/config/patch.py +22 -1
  62. pygpt_net/provider/core/model/patch.py +26 -1
  63. pygpt_net/tools/image_viewer/ui/dialogs.py +300 -13
  64. pygpt_net/tools/text_editor/ui/dialogs.py +3 -2
  65. pygpt_net/tools/text_editor/ui/widgets.py +5 -1
  66. pygpt_net/ui/base/context_menu.py +44 -1
  67. pygpt_net/ui/layout/toolbox/indexes.py +22 -19
  68. pygpt_net/ui/layout/toolbox/model.py +28 -5
  69. pygpt_net/ui/widget/dialog/base.py +16 -5
  70. pygpt_net/ui/widget/image/display.py +25 -8
  71. pygpt_net/ui/widget/tabs/output.py +9 -1
  72. pygpt_net/ui/widget/textarea/editor.py +14 -1
  73. pygpt_net/ui/widget/textarea/input.py +20 -7
  74. pygpt_net/ui/widget/textarea/notepad.py +24 -1
  75. pygpt_net/ui/widget/textarea/output.py +23 -1
  76. pygpt_net/ui/widget/textarea/web.py +16 -1
  77. {pygpt_net-2.7.5.dist-info → pygpt_net-2.7.7.dist-info}/METADATA +16 -2
  78. {pygpt_net-2.7.5.dist-info → pygpt_net-2.7.7.dist-info}/RECORD +80 -73
  79. pygpt_net/controller/chat/handler/xai_stream.py +0 -135
  80. {pygpt_net-2.7.5.dist-info → pygpt_net-2.7.7.dist-info}/LICENSE +0 -0
  81. {pygpt_net-2.7.5.dist-info → pygpt_net-2.7.7.dist-info}/WHEEL +0 -0
  82. {pygpt_net-2.7.5.dist-info → pygpt_net-2.7.7.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,185 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2026.01.05 20:00:00 #
10
+ # ================================================== #
11
+
12
+ from typing import Any, Optional
13
+
14
+
15
+ def safe_get(obj: Any, path: str) -> Any:
16
+ """
17
+ Dot-path getter for dicts and objects.
18
+
19
+ :param obj: Source object or dict
20
+ :param path: Dot-separated path, e.g. 'a.b.0.c'
21
+ :return: Value at path or None
22
+ """
23
+ cur = obj
24
+ for seg in path.split("."):
25
+ if cur is None:
26
+ return None
27
+ if isinstance(cur, dict):
28
+ cur = cur.get(seg)
29
+ else:
30
+ if seg.isdigit() and isinstance(cur, (list, tuple)):
31
+ idx = int(seg)
32
+ if 0 <= idx < len(cur):
33
+ cur = cur[idx]
34
+ else:
35
+ return None
36
+ else:
37
+ cur = getattr(cur, seg, None)
38
+ return cur
39
+
40
+
41
+ def as_int(val: Any) -> Optional[int]:
42
+ """
43
+ Coerce to int if possible, else None.
44
+
45
+ :param val: Input value
46
+ :return: int or None
47
+ """
48
+ if val is None:
49
+ return None
50
+ try:
51
+ return int(val)
52
+ except Exception:
53
+ try:
54
+ return int(float(val))
55
+ except Exception:
56
+ return None
57
+
58
+ def capture_google_usage(state, um_obj: Any):
59
+ """
60
+ Extract usage for Google python-genai; prefer total - prompt to include reasoning.
61
+
62
+ :param state: Chat state
63
+ :param um_obj: Usage metadata object/dict
64
+ """
65
+ if not um_obj:
66
+ return
67
+ state.usage_vendor = "google"
68
+ prompt = (
69
+ as_int(safe_get(um_obj, "prompt_token_count")) or
70
+ as_int(safe_get(um_obj, "prompt_tokens")) or
71
+ as_int(safe_get(um_obj, "input_tokens"))
72
+ )
73
+ total = (
74
+ as_int(safe_get(um_obj, "total_token_count")) or
75
+ as_int(safe_get(um_obj, "total_tokens"))
76
+ )
77
+ candidates = (
78
+ as_int(safe_get(um_obj, "candidates_token_count")) or
79
+ as_int(safe_get(um_obj, "output_tokens"))
80
+ )
81
+ reasoning = (
82
+ as_int(safe_get(um_obj, "candidates_reasoning_token_count")) or
83
+ as_int(safe_get(um_obj, "reasoning_tokens")) or 0
84
+ )
85
+ if total is not None and prompt is not None:
86
+ out_total = max(0, total - prompt)
87
+ else:
88
+ out_total = candidates
89
+ state.usage_payload = {"in": prompt, "out": out_total, "reasoning": reasoning or 0, "total": total}
90
+
91
+
92
+ def collect_google_citations(ctx, state, chunk: Any):
93
+ """
94
+ Collect web citations (URLs) from Google GenAI stream.
95
+
96
+ :param ctx: Chat context
97
+ :param state: Chat state
98
+ :param chunk: Incoming streaming chunk
99
+ """
100
+ try:
101
+ cands = getattr(chunk, "candidates", None) or []
102
+ except Exception:
103
+ cands = []
104
+
105
+ if not isinstance(state.citations, list):
106
+ state.citations = []
107
+
108
+ def _add_url(url: Optional[str]):
109
+ if not url or not isinstance(url, str):
110
+ return
111
+ url = url.strip()
112
+ if not (url.startswith("http://") or url.startswith("https://")):
113
+ return
114
+ if ctx.urls is None:
115
+ ctx.urls = []
116
+ if url not in state.citations:
117
+ state.citations.append(url)
118
+ if url not in ctx.urls:
119
+ ctx.urls.append(url)
120
+
121
+ for cand in cands:
122
+ gm = safe_get(cand, "grounding_metadata") or safe_get(cand, "groundingMetadata")
123
+ if gm:
124
+ atts = safe_get(gm, "grounding_attributions") or safe_get(gm, "groundingAttributions") or []
125
+ try:
126
+ for att in atts or []:
127
+ for path in (
128
+ "web.uri",
129
+ "web.url",
130
+ "source.web.uri",
131
+ "source.web.url",
132
+ "source.uri",
133
+ "source.url",
134
+ "uri",
135
+ "url",
136
+ ):
137
+ _add_url(safe_get(att, path))
138
+ except Exception:
139
+ pass
140
+ for path in (
141
+ "search_entry_point.uri",
142
+ "search_entry_point.url",
143
+ "searchEntryPoint.uri",
144
+ "searchEntryPoint.url",
145
+ "search_entry_point.rendered_content_uri",
146
+ "searchEntryPoint.rendered_content_uri",
147
+ ):
148
+ _add_url(safe_get(gm, path))
149
+
150
+ cm = safe_get(cand, "citation_metadata") or safe_get(cand, "citationMetadata")
151
+ if cm:
152
+ cit_arrays = (
153
+ safe_get(cm, "citation_sources") or
154
+ safe_get(cm, "citationSources") or
155
+ safe_get(cm, "citations") or []
156
+ )
157
+ try:
158
+ for cit in cit_arrays or []:
159
+ for path in ("uri", "url", "source.uri", "source.url", "web.uri", "web.url"):
160
+ _add_url(safe_get(cit, path))
161
+ except Exception:
162
+ pass
163
+
164
+ try:
165
+ parts = safe_get(cand, "content.parts") or []
166
+ for p in parts:
167
+ pcm = safe_get(p, "citation_metadata") or safe_get(p, "citationMetadata")
168
+ if pcm:
169
+ arr = (
170
+ safe_get(pcm, "citation_sources") or
171
+ safe_get(pcm, "citationSources") or
172
+ safe_get(pcm, "citations") or []
173
+ )
174
+ for cit in arr or []:
175
+ for path in ("uri", "url", "source.uri", "source.url", "web.uri", "web.url"):
176
+ _add_url(safe_get(cit, path))
177
+ gpa = safe_get(p, "grounding_attributions") or safe_get(p, "groundingAttributions") or []
178
+ for att in gpa or []:
179
+ for path in ("web.uri", "web.url", "source.web.uri", "source.web.url", "uri", "url"):
180
+ _add_url(safe_get(att, path))
181
+ except Exception:
182
+ pass
183
+
184
+ if state.citations and (ctx.urls is None or not ctx.urls):
185
+ ctx.urls = list(state.citations)
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.05 00:00:00 #
9
+ # Updated Date: 2026.01.05 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional
File without changes
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.05 00:00:00 #
9
+ # Updated Date: 2026.01.05 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.12.28 00:00:00 #
9
+ # Updated Date: 2026.01.03 17:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from openai import OpenAI
@@ -22,6 +22,7 @@ from pygpt_net.core.types import (
22
22
  MODE_COMPUTER,
23
23
  )
24
24
  from pygpt_net.core.bridge.context import BridgeContext
25
+ from pygpt_net.core.types.chunk import ChunkType
25
26
  from pygpt_net.item.model import ModelItem
26
27
 
27
28
  from .audio import Audio
@@ -116,10 +117,12 @@ class ApiOpenAI:
116
117
  ctx = context.ctx
117
118
  ai_name = ctx.output_name
118
119
  thread_id = ctx.thread # from ctx
120
+ ctx.chunk_type = ChunkType.API_CHAT # default: ChatCompletion API
119
121
 
120
122
  # --- Responses API ----
121
123
  use_responses_api = self.responses.is_enabled(model, mode, parent_mode, is_expert_call, preset)
122
- ctx.use_responses_api = use_responses_api # set in context
124
+ if use_responses_api:
125
+ ctx.chunk_type = ChunkType.API_CHAT_RESPONSES # Responses API
123
126
 
124
127
  fixtures = self.window.controller.debug.fixtures
125
128
 
@@ -137,13 +140,14 @@ class ApiOpenAI:
137
140
 
138
141
  # completion
139
142
  if mode == MODE_COMPLETION:
143
+ ctx.chunk_type = ChunkType.API_COMPLETION
140
144
  response = self.completion.send(
141
145
  context=context,
142
146
  extra=extra,
143
147
  )
144
148
  used_tokens = self.completion.get_used_tokens()
145
149
 
146
- # chat, audio (OpenAI) | research (Perplexity)
150
+ # chat, audio (OpenAI) | research (deep research, Perplexity)
147
151
  elif mode in [
148
152
  MODE_CHAT,
149
153
  MODE_AUDIO,
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.12.31 16:00:00 #
9
+ # Updated Date: 2026.01.05 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import base64
@@ -177,7 +177,7 @@ class ImageWorker(QRunnable):
177
177
 
178
178
  def _is_gpt_image_model(self, model_id: Optional[str] = None) -> bool:
179
179
  mid = (model_id or self.model or "").lower()
180
- return mid.startswith("gpt-image-1")
180
+ return mid.startswith("gpt-image-1") or mid.startswith("chatgpt-image")
181
181
 
182
182
  def _is_dalle2(self, model_id: Optional[str] = None) -> bool:
183
183
  mid = (model_id or self.model or "").lower()
File without changes
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.12.26 00:00:00 #
9
+ # Updated Date: 2026.01.05 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import base64
@@ -6,11 +6,11 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.11.26 19:00:00 #
9
+ # Updated Date: 2026.01.05 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import re
13
-
13
+ from typing import Optional, Any
14
14
 
15
15
  def sanitize_name(name: str) -> str:
16
16
  """
@@ -24,4 +24,70 @@ def sanitize_name(name: str) -> str:
24
24
  # allowed characters: a-z, A-Z, 0-9, _, and -
25
25
  name = name.strip().lower()
26
26
  sanitized_name = re.sub(r'[^a-z0-9_-]', '_', name)
27
- return sanitized_name[:64] # limit to 64 characters
27
+ return sanitized_name[:64] # limit to 64 characters
28
+
29
+
30
+ def capture_openai_usage(state, u_obj: Any):
31
+ """
32
+ Extract usage for OpenAI/xAI-compatible chunks.
33
+
34
+ :param state: Chat state
35
+ :param u_obj: Usage object/dict
36
+ """
37
+ if not u_obj:
38
+ return
39
+ state.usage_vendor = "openai"
40
+ in_tok = as_int(safe_get(u_obj, "input_tokens")) or as_int(safe_get(u_obj, "prompt_tokens"))
41
+ out_tok = as_int(safe_get(u_obj, "output_tokens")) or as_int(safe_get(u_obj, "completion_tokens"))
42
+ total = as_int(safe_get(u_obj, "total_tokens"))
43
+ reasoning = (
44
+ as_int(safe_get(u_obj, "output_tokens_details.reasoning_tokens")) or
45
+ as_int(safe_get(u_obj, "completion_tokens_details.reasoning_tokens")) or
46
+ as_int(safe_get(u_obj, "reasoning_tokens")) or
47
+ 0
48
+ )
49
+ out_with_reason = (out_tok or 0) + (reasoning or 0)
50
+ state.usage_payload = {"in": in_tok, "out": out_with_reason, "reasoning": reasoning or 0, "total": total}
51
+
52
+ def safe_get(obj: Any, path: str) -> Any:
53
+ """
54
+ Dot-path getter for dicts and objects.
55
+
56
+ :param obj: Source object or dict
57
+ :param path: Dot-separated path, e.g. 'a.b.0.c'
58
+ :return: Value at path or None
59
+ """
60
+ cur = obj
61
+ for seg in path.split("."):
62
+ if cur is None:
63
+ return None
64
+ if isinstance(cur, dict):
65
+ cur = cur.get(seg)
66
+ else:
67
+ if seg.isdigit() and isinstance(cur, (list, tuple)):
68
+ idx = int(seg)
69
+ if 0 <= idx < len(cur):
70
+ cur = cur[idx]
71
+ else:
72
+ return None
73
+ else:
74
+ cur = getattr(cur, seg, None)
75
+ return cur
76
+
77
+
78
+ def as_int(val: Any) -> Optional[int]:
79
+ """
80
+ Coerce to int if possible, else None.
81
+
82
+ :param val: Input value
83
+ :return: int or None
84
+ """
85
+ if val is None:
86
+ return None
87
+ try:
88
+ return int(val)
89
+ except Exception:
90
+ try:
91
+ return int(float(val))
92
+ except Exception:
93
+ return None
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.15 01:00:00 #
9
+ # Updated Date: 2026.01.04 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional, Dict, Any
@@ -23,6 +23,7 @@ from pygpt_net.core.types import (
23
23
  MODE_RESEARCH,
24
24
  )
25
25
  from pygpt_net.core.bridge.context import BridgeContext
26
+ from pygpt_net.core.types.chunk import ChunkType
26
27
  from pygpt_net.item.model import ModelItem
27
28
 
28
29
  import xai_sdk
@@ -32,7 +33,8 @@ from .vision import Vision
32
33
  from .tools import Tools
33
34
  from .audio import Audio
34
35
  from .image import Image
35
- from .remote import Remote
36
+ from .remote_tools import Remote
37
+ from .responses import Responses
36
38
 
37
39
 
38
40
  class ApiXAI:
@@ -48,7 +50,8 @@ class ApiXAI:
48
50
  self.tools = Tools(window)
49
51
  self.audio = Audio(window)
50
52
  self.image = Image(window)
51
- self.remote = Remote(window) # Live Search builder
53
+ self.remote = Remote(window)
54
+ self.responses = Responses(window)
52
55
  self.client: Optional[xai_sdk.Client] = None
53
56
  self.locked = False
54
57
  self.last_client_args: Optional[Dict[str, Any]] = None
@@ -100,8 +103,9 @@ class ApiXAI:
100
103
  """
101
104
  Make an API call to xAI.
102
105
 
103
- Supports chat (stream/non-stream), images (via REST),
104
- and function-calling. Audio is not available in public xAI SDK at this time.
106
+ Uses old API and Chat Responses (stateful) via xai_sdk:
107
+ - Streaming: chat.stream() (tuples of (response, chunk))
108
+ - Non-stream: chat.sample()
105
109
 
106
110
  :param context: BridgeContext
107
111
  :param extra: Extra params (not used)
@@ -109,22 +113,33 @@ class ApiXAI:
109
113
  :return: True on success, False on error
110
114
  """
111
115
  mode = context.mode
112
- model = context.model
113
116
  stream = context.stream
114
117
  ctx = context.ctx
115
118
  ai_name = (ctx.output_name if ctx else "assistant")
116
-
117
- # No Responses API in xAI SDK
118
- if ctx:
119
- ctx.use_responses_api = False
120
-
119
+ model = context.model # model instance (item, not id)
121
120
  used_tokens = 0
122
121
  response = None
122
+ ctx.chunk_type = ChunkType.XAI_SDK
123
+
124
+ use_responses_api = True
125
+ if model and model.id.startswith("grok-3"):
126
+ use_responses_api = False # use old API
127
+
128
+ if mode in (
129
+ MODE_COMPLETION,
130
+ MODE_CHAT,
131
+ MODE_AUDIO,
132
+ MODE_RESEARCH
133
+ ):
134
+ # Audio TTS is not exposed via public SDK; treat MODE_AUDIO as chat input.
135
+ # NOTE: for grok-3 use Chat completions, for > grok-4 use Chat responses
136
+ if use_responses_api:
137
+ response = self.responses.send(context=context, extra=extra) # responses
138
+ used_tokens = self.responses.get_used_tokens()
139
+ else:
140
+ response = self.chat.send(context=context, extra=extra) # completions
141
+ used_tokens = self.chat.get_used_tokens()
123
142
 
124
- if mode in (MODE_COMPLETION, MODE_CHAT, MODE_AUDIO, MODE_RESEARCH):
125
- # There is no public realtime audio in SDK; treat MODE_AUDIO as chat (TTS not supported).
126
- response = self.chat.send(context=context, extra=extra)
127
- used_tokens = self.chat.get_used_tokens()
128
143
  if ctx:
129
144
  self.vision.append_images(ctx)
130
145
 
@@ -150,7 +165,10 @@ class ApiXAI:
150
165
 
151
166
  if ctx:
152
167
  ctx.ai_name = ai_name
153
- self.chat.unpack_response(context.mode, response, ctx)
168
+ if use_responses_api:
169
+ self.responses.unpack_response(context.mode, response, ctx)
170
+ else:
171
+ self.chat.unpack_response(context.mode, response, ctx)
154
172
  try:
155
173
  for tc in getattr(ctx, "tool_calls", []) or []:
156
174
  fn = tc.get("function") or {}
@@ -174,6 +192,88 @@ class ApiXAI:
174
192
 
175
193
  If context.request is set, makes a full call() instead (for consistency).
176
194
 
195
+ :param context: BridgeContext
196
+ :param extra: Extra params (not used)
197
+ :return: Output text or "" on error
198
+ """
199
+ model = context.model or self.window.core.models.from_defaults()
200
+ if model and model.id.startswith("grok-3"):
201
+ return self.quick_call_old(context, extra) # grok-3 uses old path
202
+
203
+ if context.request:
204
+ context.stream = False
205
+ context.mode = MODE_CHAT
206
+ self.locked = True
207
+ self.call(context, extra)
208
+ self.locked = False
209
+ return context.ctx.output
210
+
211
+ self.locked = True
212
+ try:
213
+ ctx = context.ctx
214
+ prompt = context.prompt
215
+ system_prompt = context.system_prompt
216
+ history = context.history
217
+ functions = context.external_functions
218
+ attachments = context.attachments
219
+ multimodal_ctx = context.multimodal_ctx
220
+
221
+ # Prepare client-side tools for SDK (no server-side tools in quick_call)
222
+ client_tools = self.tools.prepare_sdk_tools(functions)
223
+
224
+ client = self.get_client(MODE_CHAT, model)
225
+ # store_messages: false for quick, and false if images present (SDK guidance)
226
+ store_messages = False
227
+ prev_id = None
228
+
229
+ # Create chat session
230
+ include = []
231
+ chat = client.chat.create(
232
+ model=model.id,
233
+ tools=(client_tools if client_tools else None),
234
+ include=(include if include else None),
235
+ store_messages=store_messages,
236
+ previous_response_id=prev_id,
237
+ )
238
+
239
+ # Append history if enabled and no previous_response_id is used
240
+ self.responses.append_history_sdk(
241
+ chat=chat,
242
+ system_prompt=system_prompt,
243
+ model=model,
244
+ history=history,
245
+ )
246
+
247
+ # Append current prompt with optional images
248
+ self.responses.append_current_user_sdk(
249
+ chat=chat,
250
+ prompt=prompt,
251
+ attachments=attachments,
252
+ multimodal_ctx=multimodal_ctx,
253
+ )
254
+
255
+ resp = chat.sample()
256
+ # Extract client-side tool calls if any (leave server-side out)
257
+ out = getattr(resp, "content", "") or ""
258
+ if ctx:
259
+ self.responses.quick_collect_response_id(resp, ctx)
260
+ return out.strip()
261
+ except Exception as e:
262
+ self.window.core.debug.log(e)
263
+ return ""
264
+ finally:
265
+ self.locked = False
266
+
267
+ def quick_call_old(
268
+ self,
269
+ context: BridgeContext,
270
+ extra: dict = None
271
+ ) -> str:
272
+ """
273
+ Quick non-streaming xAI chat call and return output text.
274
+
275
+ If context.request is set, makes a full call() instead (for consistency).
276
+
177
277
  :param context: BridgeContext
178
278
  :param extra: Extra params (not used)
179
279
  :return: Output text or "" on error
@@ -201,7 +301,7 @@ class ApiXAI:
201
301
  # If tools are present, prefer non-streaming HTTP Chat Completions path to extract tool calls reliably.
202
302
  # Otherwise use native SDK chat.sample().
203
303
  if tools:
204
- out, calls, citations, usage = self.chat.call_http_nonstream(
304
+ out, calls, citations, usage = self.chat.call_http_nonstream(
205
305
  model=model.id,
206
306
  prompt=prompt,
207
307
  system_prompt=system_prompt,