pygpt-net 2.6.36__py3-none-any.whl → 2.6.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. pygpt_net/CHANGELOG.txt +5 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/chat/handler/anthropic_stream.py +166 -0
  4. pygpt_net/controller/chat/handler/google_stream.py +181 -0
  5. pygpt_net/controller/chat/handler/langchain_stream.py +24 -0
  6. pygpt_net/controller/chat/handler/llamaindex_stream.py +47 -0
  7. pygpt_net/controller/chat/handler/openai_stream.py +260 -0
  8. pygpt_net/controller/chat/handler/utils.py +210 -0
  9. pygpt_net/controller/chat/handler/worker.py +566 -0
  10. pygpt_net/controller/chat/handler/xai_stream.py +135 -0
  11. pygpt_net/controller/chat/stream.py +1 -1
  12. pygpt_net/controller/ctx/ctx.py +1 -1
  13. pygpt_net/controller/model/editor.py +3 -0
  14. pygpt_net/core/bridge/context.py +35 -35
  15. pygpt_net/core/bridge/worker.py +40 -16
  16. pygpt_net/core/render/web/body.py +29 -34
  17. pygpt_net/data/config/config.json +10 -3
  18. pygpt_net/data/config/models.json +3 -3
  19. pygpt_net/data/config/settings.json +105 -0
  20. pygpt_net/data/css/style.dark.css +2 -3
  21. pygpt_net/data/css/style.light.css +2 -3
  22. pygpt_net/data/locale/locale.de.ini +3 -1
  23. pygpt_net/data/locale/locale.en.ini +19 -1
  24. pygpt_net/data/locale/locale.es.ini +3 -1
  25. pygpt_net/data/locale/locale.fr.ini +3 -1
  26. pygpt_net/data/locale/locale.it.ini +3 -1
  27. pygpt_net/data/locale/locale.pl.ini +4 -2
  28. pygpt_net/data/locale/locale.uk.ini +3 -1
  29. pygpt_net/data/locale/locale.zh.ini +3 -1
  30. pygpt_net/provider/api/__init__.py +5 -3
  31. pygpt_net/provider/api/anthropic/__init__.py +190 -29
  32. pygpt_net/provider/api/anthropic/audio.py +30 -0
  33. pygpt_net/provider/api/anthropic/chat.py +341 -0
  34. pygpt_net/provider/api/anthropic/image.py +25 -0
  35. pygpt_net/provider/api/anthropic/tools.py +266 -0
  36. pygpt_net/provider/api/anthropic/vision.py +142 -0
  37. pygpt_net/provider/api/google/chat.py +2 -2
  38. pygpt_net/provider/api/google/tools.py +58 -48
  39. pygpt_net/provider/api/google/vision.py +7 -1
  40. pygpt_net/provider/api/openai/chat.py +1 -0
  41. pygpt_net/provider/api/openai/vision.py +6 -0
  42. pygpt_net/provider/api/x_ai/__init__.py +247 -0
  43. pygpt_net/provider/api/x_ai/audio.py +32 -0
  44. pygpt_net/provider/api/x_ai/chat.py +968 -0
  45. pygpt_net/provider/api/x_ai/image.py +208 -0
  46. pygpt_net/provider/api/x_ai/remote.py +262 -0
  47. pygpt_net/provider/api/x_ai/tools.py +120 -0
  48. pygpt_net/provider/api/x_ai/vision.py +119 -0
  49. pygpt_net/provider/core/config/patch.py +28 -0
  50. pygpt_net/provider/llms/anthropic.py +4 -2
  51. pygpt_net/ui/base/config_dialog.py +5 -11
  52. pygpt_net/ui/dialog/models.py +2 -4
  53. pygpt_net/ui/dialog/plugins.py +40 -43
  54. pygpt_net/ui/widget/element/labels.py +19 -3
  55. pygpt_net/ui/widget/textarea/web.py +1 -1
  56. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/METADATA +11 -6
  57. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/RECORD +60 -41
  58. pygpt_net/controller/chat/handler/stream_worker.py +0 -1136
  59. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/LICENSE +0 -0
  60. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/WHEEL +0 -0
  61. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,142 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.05 01:00:00 #
10
+ # ================================================== #
11
+
12
+ import base64
13
+ import os
14
+ from typing import Optional, Dict, List, Union
15
+
16
+ from pygpt_net.item.attachment import AttachmentItem
17
+ from pygpt_net.item.ctx import CtxItem
18
+
19
+
20
+ class Vision:
21
+ def __init__(self, window=None):
22
+ """
23
+ Vision helpers for Anthropic (image input blocks).
24
+
25
+ :param window: Window instance
26
+ """
27
+ self.window = window
28
+ self.attachments: Dict[str, str] = {}
29
+ self.urls: List[str] = []
30
+ self.input_tokens = 0
31
+
32
+ def build_blocks(
33
+ self,
34
+ content: Union[str, list],
35
+ attachments: Optional[Dict[str, AttachmentItem]] = None,
36
+ ) -> List[dict]:
37
+ """
38
+ Build image content blocks from local attachments.
39
+
40
+ :param content: User message text (unused here)
41
+ :param attachments: Attachments dict (id -> AttachmentItem)
42
+ :return: List of Anthropic content blocks
43
+ """
44
+ blocks: List[dict] = []
45
+ self.attachments = {}
46
+ self.urls = []
47
+
48
+ if attachments:
49
+ for id_, attachment in attachments.items():
50
+ if attachment.path and os.path.exists(attachment.path):
51
+ if self.is_image(attachment.path):
52
+ mime = self._guess_mime(attachment.path)
53
+ with open(attachment.path, "rb") as f:
54
+ data = f.read()
55
+ b64 = base64.b64encode(data).decode("utf-8")
56
+ blocks.append({
57
+ "type": "image",
58
+ "source": {
59
+ "type": "base64",
60
+ "media_type": mime,
61
+ "data": b64,
62
+ }
63
+ })
64
+ self.attachments[id_] = attachment.path
65
+ attachment.consumed = True
66
+
67
+ return blocks
68
+
69
+ def is_image(self, path: str) -> bool:
70
+ """
71
+ Check if path looks like an image.
72
+
73
+ :param path: File path
74
+ :return: True if path has image file extension
75
+ """
76
+ return path.lower().endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif', '.webp'))
77
+
78
+ def _guess_mime(self, path: str) -> str:
79
+ """
80
+ Guess mime type from file extension.
81
+
82
+ :param path: File path
83
+ :return: MIME type string
84
+ """
85
+ ext = os.path.splitext(path)[1].lower().lstrip(".")
86
+ if ext in ("jpg", "jpeg"):
87
+ return "image/jpeg"
88
+ if ext == "png":
89
+ return "image/png"
90
+ if ext == "gif":
91
+ return "image/gif"
92
+ if ext == "bmp":
93
+ return "image/bmp"
94
+ if ext == "webp":
95
+ return "image/webp"
96
+ if ext == "tiff":
97
+ return "image/tiff"
98
+ return "image/jpeg"
99
+
100
+ def append_images(self, ctx: CtxItem):
101
+ """
102
+ Append sent images paths to context for UI/history.
103
+
104
+ :param ctx: CtxItem
105
+ """
106
+ images = self.get_attachments()
107
+ if len(images) > 0:
108
+ ctx.images = self.window.core.filesystem.make_local_list(list(images.values()))
109
+
110
+ def get_attachments(self) -> Dict[str, str]:
111
+ """
112
+ Return attachments dict (id -> path).
113
+
114
+ :return: Attachments dictionary
115
+ """
116
+ return self.attachments
117
+
118
+ def get_urls(self) -> List[str]:
119
+ """
120
+ Return image urls (unused).
121
+
122
+ :return: List of image URLs
123
+ """
124
+ return self.urls
125
+
126
+ def reset_tokens(self):
127
+ """Reset input tokens counter."""
128
+ self.input_tokens = 0
129
+
130
+ def get_used_tokens(self) -> int:
131
+ """
132
+ Return input tokens counter.
133
+
134
+ :return: Number of input tokens
135
+ """
136
+ return self.input_tokens
137
+
138
+ def reset(self):
139
+ """Reset state."""
140
+ self.attachments = {}
141
+ self.urls = []
142
+ self.input_tokens = 0
@@ -401,8 +401,7 @@ class Chat:
401
401
  # Store only as URL; downloading is out of scope here.
402
402
  if uri.startswith("http://") or uri.startswith("https://"):
403
403
  urls.append(uri)
404
- except Exception:
405
- # Best-effort only
404
+ except Exception as e:
406
405
  pass
407
406
 
408
407
  if images:
@@ -497,6 +496,7 @@ class Chat:
497
496
  :param multimodal_ctx: MultimodalContext for audio
498
497
  :return: List of Part
499
498
  """
499
+ self.window.core.api.google.vision.reset()
500
500
  parts: List[Part] = []
501
501
  if content:
502
502
  parts.append(Part.from_text(text=str(content)))
@@ -28,61 +28,70 @@ class Tools:
28
28
  # -------- SANITIZER --------
29
29
  def _sanitize_schema(self, schema: Any) -> Any:
30
30
  """
31
- Sanitize JSON Schema dict by removing unsupported keywords and normalizing types.
32
-
33
- 1. Remove unsupported keywords like additionalProperties, patternProperties,
34
- dependencies, oneOf, anyOf, allOf, $ref, $defs, examples, readOnly, writeOnly.
35
- 2. Normalize 'type' to a single value (e.g., if it's a list, take the first non-null type).
36
- 3. Ensure 'enum' is only present for string types.
37
- 4. Recursively sanitize nested schemas in 'properties' and 'items'.
38
- 5. Handle arrays by ensuring 'items' is a single schema.
39
- 6. Handle objects by ensuring 'properties' is a dict and 'required' is a list of strings.
40
-
41
- :param schema: Any JSON Schema as dict or list
42
- :return: Sanitized schema dict
31
+ Sanitize a JSON Schema dict for Google GenAI (function parameters).
32
+
33
+ Key points:
34
+ - Remove unsupported JSON Schema keywords (additionalProperties, oneOf, $ref, ...).
35
+ - Normalize "type" so that it's either a single lowercase string or absent.
36
+ Handle lists (unions), non-string types (e.g., dict), and infer a type when possible.
37
+ - Keep "enum" only when type is string.
38
+ - For objects, sanitize only "properties" (each property's schema) and validate "required".
39
+ - For arrays, sanitize "items" into a single schema (object, not list).
40
+ - Do not recurse into "properties" itself as a map, nor into "required"/"enum" as they are scalars/lists.
43
41
  """
42
+ # 1) Fast exits
44
43
  if isinstance(schema, list):
45
- return self._sanitize_schema(schema[0]) if schema else {}
44
+ # Only descend into lists of dicts (complex schemas). For scalar lists (required/enum), return as is.
45
+ if schema and all(isinstance(x, dict) for x in schema):
46
+ return [self._sanitize_schema(x) for x in schema]
47
+ return schema
46
48
 
47
49
  if not isinstance(schema, dict):
48
50
  return schema
49
51
 
52
+ # 2) Remove unsupported/problematic keywords for Google function parameters
50
53
  banned = {
51
- "additionalProperties",
52
- "additional_properties",
53
- "unevaluatedProperties",
54
- "patternProperties",
55
- "dependencies",
56
- "dependentSchemas",
57
- "dependentRequired",
58
- "oneOf",
59
- "anyOf",
60
- "allOf",
61
- "$defs",
62
- "$ref",
63
- "$schema",
64
- "$id",
65
- "examples",
66
- "readOnly",
67
- "writeOnly",
68
- "nullable",
54
+ "additionalProperties", "additional_properties",
55
+ "unevaluatedProperties", "patternProperties",
56
+ "dependencies", "dependentSchemas", "dependentRequired",
57
+ "oneOf", "anyOf", "allOf",
58
+ "$defs", "$ref", "$schema", "$id",
59
+ "examples", "readOnly", "writeOnly", "nullable",
69
60
  }
70
61
  for k in list(schema.keys()):
71
62
  if k in banned:
72
63
  schema.pop(k, None)
73
64
 
74
- # Union -> first non-null type
65
+ # 3) Normalize "type" safely
75
66
  t = schema.get("type")
76
- if isinstance(t, list):
77
- t_no_null = [x for x in t if x != "null"]
78
- schema["type"] = t_no_null[0] if t_no_null else "string"
79
67
 
80
- # enum only for string
81
- if "enum" in schema and schema.get("type") not in ("string", "STRING"):
68
+ # a) If it's a list (union), pick the first non-null string, otherwise default to "object"
69
+ if isinstance(t, list):
70
+ t_no_null = [x for x in t if isinstance(x, str) and x.lower() != "null"]
71
+ schema["type"] = t_no_null[0] if t_no_null else "object"
72
+ t = schema["type"]
73
+
74
+ # b) If "type" is not a string (could be dict or missing), try to infer; otherwise drop it
75
+ if not isinstance(t, str):
76
+ if isinstance(schema.get("properties"), dict):
77
+ schema["type"] = "object"
78
+ elif "items" in schema:
79
+ schema["type"] = "array"
80
+ elif isinstance(schema.get("enum"), list) and all(isinstance(x, str) for x in schema["enum"]):
81
+ schema["type"] = "string"
82
+ else:
83
+ schema.pop("type", None)
84
+ else:
85
+ schema["type"] = t.lower()
86
+
87
+ type_l = schema["type"].lower() if isinstance(schema.get("type"), str) else ""
88
+
89
+ # 4) Keep enum only for string-typed schemas
90
+ if "enum" in schema and type_l != "string":
82
91
  schema.pop("enum", None)
83
92
 
84
- # object
85
- if (schema.get("type") or "").lower() == "object":
93
+ # 5) Objects: sanitize properties and required
94
+ if type_l == "object":
86
95
  props = schema.get("properties")
87
96
  if not isinstance(props, dict):
88
97
  props = {}
@@ -92,25 +101,26 @@ class Tools:
92
101
  schema["properties"] = clean_props
93
102
 
94
103
  req = schema.get("required")
95
- if not isinstance(req, list) or not all(isinstance(x, str) for x in req):
96
- schema.pop("required", None)
97
- elif len(req) == 0:
104
+ if not (isinstance(req, list) and all(isinstance(x, str) for x in req) and len(req) > 0):
98
105
  schema.pop("required", None)
99
106
 
100
- # array
101
- if (schema.get("type") or "").lower() == "array":
107
+ # 6) Arrays: ensure "items" is a single dict schema
108
+ elif type_l == "array":
102
109
  items = schema.get("items")
103
- if isinstance(items, list) and items:
104
- items = items[0]
110
+ if isinstance(items, list):
111
+ items = items[0] if items else {"type": "string"}
105
112
  if not isinstance(items, dict):
106
113
  items = {"type": "string"}
107
114
  schema["items"] = self._sanitize_schema(items)
108
115
 
109
- # recursive sanitize
116
+ # 7) Recurse into the remaining nested dict/list values,
117
+ # but skip "properties", "items", "required", and "enum" (already handled)
110
118
  for k, v in list(schema.items()):
119
+ if k in ("properties", "items", "required", "enum"):
120
+ continue
111
121
  if isinstance(v, dict):
112
122
  schema[k] = self._sanitize_schema(v)
113
- elif isinstance(v, list):
123
+ elif isinstance(v, list) and v and all(isinstance(x, dict) for x in v):
114
124
  schema[k] = [self._sanitize_schema(x) for x in v]
115
125
 
116
126
  return schema
@@ -126,4 +126,10 @@ class Vision:
126
126
 
127
127
  :return: Number of input tokens
128
128
  """
129
- return self.input_tokens
129
+ return self.input_tokens
130
+
131
+ def reset(self):
132
+ """Reset state"""
133
+ self.attachments = {}
134
+ self.urls = []
135
+ self.input_tokens = 0
@@ -177,6 +177,7 @@ class Chat:
177
177
  :param multimodal_ctx: Multimodal context
178
178
  :return: messages list
179
179
  """
180
+ self.window.core.api.openai.vision.reset()
180
181
  messages = []
181
182
 
182
183
  # tokens config
@@ -369,6 +369,12 @@ class Vision:
369
369
  """Reset input tokens counter"""
370
370
  self.input_tokens = 0
371
371
 
372
+ def reset(self):
373
+ """Reset attachments, urls and input tokens"""
374
+ self.attachments = {}
375
+ self.urls = []
376
+ self.input_tokens = 0
377
+
372
378
  def get_attachments(self) -> Dict[str, str]:
373
379
  """
374
380
  Get attachments
@@ -0,0 +1,247 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.05 01:00:00 #
10
+ # ================================================== #
11
+
12
+ from typing import Optional, Dict, Any
13
+
14
+ import os
15
+ import json
16
+
17
+ from pygpt_net.core.types import (
18
+ MODE_ASSISTANT,
19
+ MODE_AUDIO,
20
+ MODE_CHAT,
21
+ MODE_COMPLETION,
22
+ MODE_IMAGE,
23
+ MODE_RESEARCH,
24
+ )
25
+ from pygpt_net.core.bridge.context import BridgeContext
26
+ from pygpt_net.item.model import ModelItem
27
+
28
+ import xai_sdk
29
+
30
+ from .chat import Chat
31
+ from .vision import Vision
32
+ from .tools import Tools
33
+ from .audio import Audio
34
+ from .image import Image
35
+ from .remote import Remote
36
+
37
+
38
+ class ApiXAI:
39
+ def __init__(self, window=None):
40
+ """
41
+ xAI (Grok) Python SDK wrapper.
42
+
43
+ :param window: Window instance
44
+ """
45
+ self.window = window
46
+ self.chat = Chat(window)
47
+ self.vision = Vision(window)
48
+ self.tools = Tools(window)
49
+ self.audio = Audio(window)
50
+ self.image = Image(window)
51
+ self.remote = Remote(window) # Live Search builder
52
+ self.client: Optional[xai_sdk.Client] = None
53
+ self.locked = False
54
+ self.last_client_args: Optional[Dict[str, Any]] = None
55
+
56
+ def get_client(
57
+ self,
58
+ mode: str = MODE_CHAT,
59
+ model: ModelItem = None
60
+ ) -> xai_sdk.Client:
61
+ """
62
+ Get or create xAI client.
63
+
64
+ - Reads api_key from config or XAI_API_KEY env.
65
+ - Caches the client instance.
66
+
67
+ :param mode: One of MODE_*
68
+ :param model: ModelItem (optional, not used currently)
69
+ :return: xai_sdk.Client
70
+ """
71
+ if self.client is not None:
72
+ return self.client
73
+
74
+ cfg = self.window.core.config
75
+ api_key = cfg.get("api_key_xai") or os.environ.get("XAI_API_KEY") or ""
76
+ timeout = cfg.get("api_native_xai.timeout") # optional
77
+
78
+ kwargs: Dict[str, Any] = {}
79
+ if api_key:
80
+ kwargs["api_key"] = api_key
81
+ if timeout is not None:
82
+ # Official SDK supports setting a global timeout on client init.
83
+ kwargs["timeout"] = timeout
84
+
85
+ self.client = xai_sdk.Client(**kwargs)
86
+ return self.client
87
+
88
+ def call(
89
+ self,
90
+ context: BridgeContext,
91
+ extra: dict = None,
92
+ rt_signals=None
93
+ ) -> bool:
94
+ """
95
+ Make an API call to xAI.
96
+
97
+ Supports chat (stream/non-stream), images (via REST),
98
+ and function-calling. Audio is not available in public xAI SDK at this time.
99
+
100
+ :param context: BridgeContext
101
+ :param extra: Extra params (not used)
102
+ :param rt_signals: Realtime signals (not used)
103
+ :return: True on success, False on error
104
+ """
105
+ mode = context.mode
106
+ model = context.model
107
+ stream = context.stream
108
+ ctx = context.ctx
109
+ ai_name = (ctx.output_name if ctx else "assistant")
110
+
111
+ # No Responses API in xAI SDK
112
+ if ctx:
113
+ ctx.use_responses_api = False
114
+
115
+ used_tokens = 0
116
+ response = None
117
+
118
+ if mode in (MODE_COMPLETION, MODE_CHAT, MODE_AUDIO, MODE_RESEARCH):
119
+ # There is no public realtime audio in SDK; treat MODE_AUDIO as chat (TTS not supported).
120
+ response = self.chat.send(context=context, extra=extra)
121
+ used_tokens = self.chat.get_used_tokens()
122
+ if ctx:
123
+ self.vision.append_images(ctx)
124
+
125
+ elif mode == MODE_IMAGE:
126
+ # Image generation via REST /v1/images/generations (OpenAI-compatible)
127
+ return self.image.generate(context=context, extra=extra)
128
+
129
+ elif mode == MODE_ASSISTANT:
130
+ return False # not implemented for xAI
131
+
132
+ if stream:
133
+ if ctx:
134
+ ctx.stream = response
135
+ ctx.set_output("", ai_name)
136
+ ctx.input_tokens = used_tokens
137
+ return True
138
+
139
+ if response is None:
140
+ return False
141
+
142
+ if isinstance(response, dict) and "error" in response:
143
+ return False
144
+
145
+ if ctx:
146
+ ctx.ai_name = ai_name
147
+ self.chat.unpack_response(context.mode, response, ctx)
148
+ try:
149
+ for tc in getattr(ctx, "tool_calls", []) or []:
150
+ fn = tc.get("function") or {}
151
+ args = fn.get("arguments")
152
+ if isinstance(args, str):
153
+ try:
154
+ fn["arguments"] = json.loads(args)
155
+ except Exception:
156
+ fn["arguments"] = {}
157
+ except Exception:
158
+ pass
159
+ return True
160
+
161
+ def quick_call(
162
+ self,
163
+ context: BridgeContext,
164
+ extra: dict = None
165
+ ) -> str:
166
+ """
167
+ Quick non-streaming xAI chat call and return output text.
168
+
169
+ If context.request is set, makes a full call() instead (for consistency).
170
+
171
+ :param context: BridgeContext
172
+ :param extra: Extra params (not used)
173
+ :return: Output text or "" on error
174
+ """
175
+ if context.request:
176
+ context.stream = False
177
+ context.mode = MODE_CHAT
178
+ self.locked = True
179
+ self.call(context, extra)
180
+ self.locked = False
181
+ return context.ctx.output
182
+
183
+ self.locked = True
184
+ try:
185
+ ctx = context.ctx
186
+ prompt = context.prompt
187
+ system_prompt = context.system_prompt
188
+ temperature = context.temperature
189
+ history = context.history
190
+ functions = context.external_functions
191
+ model = context.model or self.window.core.models.from_defaults()
192
+
193
+ tools = self.tools.prepare(functions)
194
+
195
+ # If tools are present, prefer non-streaming HTTP Chat Completions path to extract tool calls reliably.
196
+ # Otherwise use native SDK chat.sample().
197
+ if tools:
198
+ out, calls, citations, usage = self.chat.call_http_nonstream(
199
+ model=model.id,
200
+ prompt=prompt,
201
+ system_prompt=system_prompt,
202
+ history=history,
203
+ attachments=context.attachments,
204
+ multimodal_ctx=context.multimodal_ctx,
205
+ tools=tools,
206
+ temperature=temperature,
207
+ max_tokens=context.max_tokens,
208
+ )
209
+ if ctx:
210
+ if calls:
211
+ ctx.tool_calls = calls
212
+ return out or ""
213
+
214
+ # Native SDK path (no tools)
215
+ client = self.get_client(MODE_CHAT, model)
216
+ messages = self.chat.build_messages(
217
+ prompt=prompt,
218
+ system_prompt=system_prompt,
219
+ model=model,
220
+ history=history,
221
+ attachments=context.attachments,
222
+ multimodal_ctx=context.multimodal_ctx,
223
+ )
224
+ chat = client.chat.create(model=model.id, messages=messages)
225
+ resp = chat.sample()
226
+ return getattr(resp, "content", "") or ""
227
+ except Exception as e:
228
+ self.window.core.debug.log(e)
229
+ return ""
230
+ finally:
231
+ self.locked = False
232
+
233
+ def stop(self):
234
+ """On global event stop."""
235
+ pass
236
+
237
+ def close(self):
238
+ """Close xAI client."""
239
+ if self.locked:
240
+ return
241
+ self.client = None # xai-sdk gRPC channels close on GC; explicit close not exposed.
242
+
243
+ def safe_close(self):
244
+ """Close client."""
245
+ if self.locked:
246
+ return
247
+ self.client = None
@@ -0,0 +1,32 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.05 01:00:00 #
10
+ # ================================================== #
11
+
12
+ from typing import Tuple
13
+
14
+
15
+ class Audio:
16
+ def __init__(self, window=None):
17
+ """
18
+ Audio helpers for xAI.
19
+
20
+ Note: As of now, the public xAI Python SDK does not expose TTS/STT or realtime audio APIs.
21
+ This class exists to keep provider surface compatible.
22
+
23
+ :param window: Window instance
24
+ """
25
+ self.window = window
26
+
27
+ # Placeholders to keep interface parity
28
+ def build_part(self, multimodal_ctx) -> None:
29
+ return None
30
+
31
+ def extract_first_audio_part(self, response) -> Tuple[None, None]:
32
+ return None, None