pygpt-net 2.6.36__py3-none-any.whl → 2.6.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. pygpt_net/CHANGELOG.txt +5 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/chat/handler/anthropic_stream.py +166 -0
  4. pygpt_net/controller/chat/handler/google_stream.py +181 -0
  5. pygpt_net/controller/chat/handler/langchain_stream.py +24 -0
  6. pygpt_net/controller/chat/handler/llamaindex_stream.py +47 -0
  7. pygpt_net/controller/chat/handler/openai_stream.py +260 -0
  8. pygpt_net/controller/chat/handler/utils.py +210 -0
  9. pygpt_net/controller/chat/handler/worker.py +566 -0
  10. pygpt_net/controller/chat/handler/xai_stream.py +135 -0
  11. pygpt_net/controller/chat/stream.py +1 -1
  12. pygpt_net/controller/ctx/ctx.py +1 -1
  13. pygpt_net/controller/model/editor.py +3 -0
  14. pygpt_net/core/bridge/context.py +35 -35
  15. pygpt_net/core/bridge/worker.py +40 -16
  16. pygpt_net/core/render/web/body.py +29 -34
  17. pygpt_net/data/config/config.json +10 -3
  18. pygpt_net/data/config/models.json +3 -3
  19. pygpt_net/data/config/settings.json +105 -0
  20. pygpt_net/data/css/style.dark.css +2 -3
  21. pygpt_net/data/css/style.light.css +2 -3
  22. pygpt_net/data/locale/locale.de.ini +3 -1
  23. pygpt_net/data/locale/locale.en.ini +19 -1
  24. pygpt_net/data/locale/locale.es.ini +3 -1
  25. pygpt_net/data/locale/locale.fr.ini +3 -1
  26. pygpt_net/data/locale/locale.it.ini +3 -1
  27. pygpt_net/data/locale/locale.pl.ini +4 -2
  28. pygpt_net/data/locale/locale.uk.ini +3 -1
  29. pygpt_net/data/locale/locale.zh.ini +3 -1
  30. pygpt_net/provider/api/__init__.py +5 -3
  31. pygpt_net/provider/api/anthropic/__init__.py +190 -29
  32. pygpt_net/provider/api/anthropic/audio.py +30 -0
  33. pygpt_net/provider/api/anthropic/chat.py +341 -0
  34. pygpt_net/provider/api/anthropic/image.py +25 -0
  35. pygpt_net/provider/api/anthropic/tools.py +266 -0
  36. pygpt_net/provider/api/anthropic/vision.py +142 -0
  37. pygpt_net/provider/api/google/chat.py +2 -2
  38. pygpt_net/provider/api/google/tools.py +58 -48
  39. pygpt_net/provider/api/google/vision.py +7 -1
  40. pygpt_net/provider/api/openai/chat.py +1 -0
  41. pygpt_net/provider/api/openai/vision.py +6 -0
  42. pygpt_net/provider/api/x_ai/__init__.py +247 -0
  43. pygpt_net/provider/api/x_ai/audio.py +32 -0
  44. pygpt_net/provider/api/x_ai/chat.py +968 -0
  45. pygpt_net/provider/api/x_ai/image.py +208 -0
  46. pygpt_net/provider/api/x_ai/remote.py +262 -0
  47. pygpt_net/provider/api/x_ai/tools.py +120 -0
  48. pygpt_net/provider/api/x_ai/vision.py +119 -0
  49. pygpt_net/provider/core/config/patch.py +28 -0
  50. pygpt_net/provider/llms/anthropic.py +4 -2
  51. pygpt_net/ui/base/config_dialog.py +5 -11
  52. pygpt_net/ui/dialog/models.py +2 -4
  53. pygpt_net/ui/dialog/plugins.py +40 -43
  54. pygpt_net/ui/widget/element/labels.py +19 -3
  55. pygpt_net/ui/widget/textarea/web.py +1 -1
  56. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/METADATA +11 -6
  57. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/RECORD +60 -41
  58. pygpt_net/controller/chat/handler/stream_worker.py +0 -1136
  59. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/LICENSE +0 -0
  60. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/WHEEL +0 -0
  61. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/entry_points.txt +0 -0
@@ -18,7 +18,7 @@ from pygpt_net.core.events import RenderEvent
18
18
  from pygpt_net.core.types import MODE_ASSISTANT
19
19
  from pygpt_net.item.ctx import CtxItem
20
20
 
21
- from .handler.stream_worker import StreamWorker
21
+ from .handler.worker import StreamWorker
22
22
 
23
23
  class Stream:
24
24
  def __init__(self, window=None):
@@ -295,7 +295,7 @@ class Ctx:
295
295
 
296
296
  def clean_memory(self):
297
297
  """Clean memory"""
298
- self.window.core.api.openai.close() # clear gpt client
298
+ pass
299
299
 
300
300
  def new_in_group(
301
301
  self,
@@ -37,15 +37,18 @@ class Editor:
37
37
  "id": {
38
38
  "type": "text",
39
39
  "label": "model.id",
40
+ "description": "model.id.desc",
40
41
  },
41
42
  "name": {
42
43
  "type": "text",
43
44
  "label": "model.name",
45
+ "description": "model.name.desc",
44
46
  },
45
47
  "provider": {
46
48
  "type": "combo",
47
49
  "use": "llm_providers",
48
50
  "label": "model.provider",
51
+ "description": "model.provider.desc",
49
52
  },
50
53
  "ctx": {
51
54
  "type": "int",
@@ -22,10 +22,10 @@ class MultimodalContext:
22
22
  """
23
23
  Multimodal context
24
24
  """
25
- is_audio_input: bool = False
26
- is_audio_output: bool = False
27
- audio_data: Optional[Any] = None
28
- audio_format: str = "wav"
25
+ is_audio_input: bool = False # is audio input
26
+ is_audio_output: bool = False # is audio output
27
+ audio_data: Optional[Any] = None # audio data (bytes or file-like object)
28
+ audio_format: str = "wav" # audio format (wav, mp3, etc.)
29
29
 
30
30
  def __init__(self, **kwargs):
31
31
  """
@@ -59,32 +59,32 @@ class BridgeContext:
59
59
  """
60
60
  Bridge context
61
61
  """
62
- assistant_id: str = ""
63
- attachments: dict = field(default_factory=dict)
64
- ctx: Optional[CtxItem] = None
65
- external_functions: list = field(default_factory=list)
66
- file_ids: list = field(default_factory=list)
67
- force: bool = False # Force mode flag
68
- force_sync: bool = False # Force sync flag
69
- history: list = field(default_factory=list)
70
- idx: Optional[Any] = None
71
- idx_mode: str = "chat"
72
- is_expert_call: bool = False # Expert call flag
73
- max_tokens: int = 0
74
- mode: Optional[Any] = None
62
+ assistant_id: str = "" # OpenAI Assistant ID
63
+ attachments: dict = field(default_factory=dict) # id -> AttachmentItem
64
+ ctx: Optional[CtxItem] = None # CtxItem instance
65
+ external_functions: list = field(default_factory=list) # list of tools definitions
66
+ file_ids: list = field(default_factory=list) # list of uploaded file IDs
67
+ force: bool = False # force send
68
+ force_sync: bool = False # force synchronous plugin call
69
+ history: list = field(default_factory=list) # list of messages
70
+ idx: Optional[Any] = None # index name for LlamaIndex
71
+ idx_mode: str = "chat" # sub-mode for LlamaIndex
72
+ is_expert_call: bool = False # is expert call
73
+ max_tokens: int = 0 # max tokens
74
+ mode: Optional[Any] = None # mode to use
75
75
  model: Optional[ModelItem] = None # model instance, not model name
76
76
  multimodal_ctx: MultimodalContext = field(default_factory=lambda: MultimodalContext()) # AudioContext
77
77
  parent_mode: Optional[Any] = None # real mode (global)
78
78
  preset: Optional[Any] = None # PresetItem
79
- prompt: str = ""
79
+ prompt: str = "" # user input prompt
80
80
  reply_context: Optional[Any] = None # ReplyContext
81
- request: bool = False # Use normal request instead of quick call
82
- stream: bool = False
83
- system_prompt: str = ""
84
- system_prompt_raw: str = "" # without plugins addons
85
- temperature: float = 1.0
86
- thread_id: str = ""
87
- tools_outputs: list = field(default_factory=list)
81
+ request: bool = False # use normal request instead of quick call
82
+ stream: bool = False # stream enabled
83
+ system_prompt: str = "" # system prompt
84
+ system_prompt_raw: str = "" # system prompt without plugins addons
85
+ temperature: float = 1.0 # temperature
86
+ thread_id: str = "" # OpenAI Assistants thread ID for chat mode
87
+ tools_outputs: list = field(default_factory=list) # list of tools outputs
88
88
 
89
89
  def __init__(self, **kwargs):
90
90
  """
@@ -98,24 +98,24 @@ class BridgeContext:
98
98
  self.ctx = kwargs.get("ctx", None)
99
99
  self.external_functions = list(kwargs.get("external_functions", []))
100
100
  self.file_ids = list(kwargs.get("file_ids", []))
101
- self.force = kwargs.get("force", False) # Force mode flag
102
- self.force_sync = kwargs.get("force_sync", False) # Force sync flag
101
+ self.force = kwargs.get("force", False)
102
+ self.force_sync = kwargs.get("force_sync", False)
103
103
  self.history = list(kwargs.get("history", []))
104
104
  self.idx = kwargs.get("idx", None)
105
105
  self.idx_mode = kwargs.get("idx_mode", "chat")
106
- self.is_expert_call = kwargs.get("is_expert_call", False) # Expert call flag
106
+ self.is_expert_call = kwargs.get("is_expert_call", False)
107
107
  self.max_tokens = kwargs.get("max_tokens", 0)
108
108
  self.mode = kwargs.get("mode", None)
109
- self.model = kwargs.get("model", None) # model instance, not model name
110
- self.multimodal_ctx = kwargs.get("multimodal_ctx", MultimodalContext()) # AudioContext
111
- self.parent_mode = kwargs.get("parent_mode", None) # real mode (global)
112
- self.preset = kwargs.get("preset", None) # PresetItem
109
+ self.model = kwargs.get("model", None)
110
+ self.multimodal_ctx = kwargs.get("multimodal_ctx", MultimodalContext())
111
+ self.parent_mode = kwargs.get("parent_mode", None)
112
+ self.preset = kwargs.get("preset", None)
113
113
  self.prompt = kwargs.get("prompt", "")
114
- self.reply_context = kwargs.get("reply_ctx", kwargs.get("reply_context", None)) # ReplyContext
115
- self.request = kwargs.get("request", False) # Use normal request instead of quick call
114
+ self.reply_context = kwargs.get("reply_ctx", kwargs.get("reply_context", None))
115
+ self.request = kwargs.get("request", False)
116
116
  self.stream = kwargs.get("stream", False)
117
117
  self.system_prompt = kwargs.get("system_prompt", "")
118
- self.system_prompt_raw = kwargs.get("system_prompt_raw", "") # without plugins addons
118
+ self.system_prompt_raw = kwargs.get("system_prompt_raw", "")
119
119
  self.temperature = kwargs.get("temperature", 1.0)
120
120
  self.thread_id = kwargs.get("thread_id", "")
121
121
  self.tools_outputs = list(kwargs.get("tools_outputs", []))
@@ -46,7 +46,8 @@ class BridgeWorker(QRunnable):
46
46
  @Slot()
47
47
  def run(self):
48
48
  """Run bridge worker"""
49
- self.window.core.debug.info("[bridge] Worker started.")
49
+ core = self.window.core
50
+ core.debug.info("[bridge] Worker started.")
50
51
  result = False
51
52
 
52
53
  try:
@@ -64,7 +65,7 @@ class BridgeWorker(QRunnable):
64
65
  if self.mode == MODE_LANGCHAIN:
65
66
  raise Exception("Langchain mode is deprecated from v2.5.20 and no longer supported. ")
66
67
  """
67
- result = self.window.core.chain.call(
68
+ result = core.chain.call(
68
69
  context=self.context,
69
70
  extra=self.extra,
70
71
  )
@@ -74,15 +75,18 @@ class BridgeWorker(QRunnable):
74
75
 
75
76
  # LlamaIndex: chat with files
76
77
  if self.mode == MODE_LLAMA_INDEX:
77
- result = self.window.core.idx.chat.call(
78
+ result = core.idx.chat.call(
78
79
  context=self.context,
79
80
  extra=self.extra,
80
81
  signals=self.signals,
81
82
  )
82
83
 
83
- # LlamaIndex: agents
84
- elif self.mode in [MODE_AGENT_LLAMA, MODE_AGENT_OPENAI]:
85
- result = self.window.core.agents.runner.call(
84
+ # Agents (OpenAI, Llama)
85
+ elif self.mode in (
86
+ MODE_AGENT_LLAMA,
87
+ MODE_AGENT_OPENAI
88
+ ):
89
+ result = core.agents.runner.call(
86
90
  context=self.context,
87
91
  extra=self.extra,
88
92
  signals=self.signals,
@@ -91,11 +95,11 @@ class BridgeWorker(QRunnable):
91
95
  self.cleanup()
92
96
  return # don't emit any signals (handled in agent runner, step by step)
93
97
  else:
94
- self.extra["error"] = str(self.window.core.agents.runner.get_error())
98
+ self.extra["error"] = str(core.agents.runner.get_error())
95
99
 
96
- # Loop: next step
100
+ # Agents loop: next step
97
101
  elif self.mode == MODE_LOOP_NEXT: # virtual mode
98
- result = self.window.core.agents.runner.loop.run_next(
102
+ result = core.agents.runner.loop.run_next(
99
103
  context=self.context,
100
104
  extra=self.extra,
101
105
  signals=self.signals,
@@ -103,27 +107,47 @@ class BridgeWorker(QRunnable):
103
107
  if result:
104
108
  return # don't emit any signals (handled in agent runner, step by step)
105
109
  else:
106
- self.extra["error"] = str(self.window.core.agents.runner.get_error())
110
+ self.extra["error"] = str(core.agents.runner.get_error())
107
111
 
108
112
  # API SDK: chat, completion, vision, image, assistants
109
113
  else:
110
- sdk = "openai"
114
+ sdk = "openai" # default to OpenAI SDK
111
115
  model = self.context.model
112
116
  if model.provider == "google":
113
- if self.window.core.config.get("api_native_google", False):
117
+ if core.config.get("api_native_google", False):
114
118
  sdk = "google"
119
+ elif model.provider == "anthropic":
120
+ if core.config.get("api_native_anthropic", False):
121
+ sdk = "anthropic"
122
+ elif model.provider == "x_ai":
123
+ if core.config.get("api_native_xai", False):
124
+ sdk = "x_ai"
115
125
 
116
126
  # call appropriate SDK
117
127
  if sdk == "google":
118
- # print("Using Google SDK")
119
- result = self.window.core.api.google.call(
128
+ core.debug.info("[bridge] Using Google SDK.")
129
+ result = core.api.google.call(
130
+ context=self.context,
131
+ extra=self.extra,
132
+ rt_signals=self.rt_signals,
133
+ )
134
+ elif sdk == "anthropic":
135
+ core.debug.info("[bridge] Using Anthropic SDK.")
136
+ result = core.api.anthropic.call(
137
+ context=self.context,
138
+ extra=self.extra,
139
+ rt_signals=self.rt_signals,
140
+ )
141
+ elif sdk == "x_ai":
142
+ core.debug.info("[bridge] Using xAI SDK.")
143
+ result = core.api.xai.call(
120
144
  context=self.context,
121
145
  extra=self.extra,
122
146
  rt_signals=self.rt_signals,
123
147
  )
124
148
  elif sdk == "openai":
125
- # print("Using OpenAI SDK")
126
- result = self.window.core.api.openai.call(
149
+ core.debug.info("[bridge] Using OpenAI SDK.")
150
+ result = core.api.openai.call(
127
151
  context=self.context,
128
152
  extra=self.extra,
129
153
  rt_signals=self.rt_signals,
@@ -46,10 +46,12 @@ class Body:
46
46
  <script type="text/javascript" src="qrc:///js/highlight.min.js"></script>
47
47
  <script type="text/javascript" src="qrc:///js/katex.min.js"></script>
48
48
  <script>
49
- hljs.configure({
50
- ignoreUnescapedHTML: true,
51
- });
52
- const DEBUG_MODE = false;
49
+ if (hljs) {
50
+ hljs.configure({
51
+ ignoreUnescapedHTML: true,
52
+ });
53
+ }
54
+ let DEBUG_MODE = false; // allow dynamic enabling via debug console
53
55
  let bridgeConnected = false;
54
56
  let streamHandler;
55
57
  let nodeHandler;
@@ -1351,6 +1353,27 @@ class Body:
1351
1353
  }
1352
1354
  """
1353
1355
 
1356
+ _PERFORMANCE_CSS = """
1357
+ #container, #_nodes_, #_append_output_, #_append_output_before_ {
1358
+ contain: layout paint;
1359
+ overscroll-behavior: contain;
1360
+ backface-visibility: hidden;
1361
+ transform: translateZ(0);
1362
+ }
1363
+ .msg-box {
1364
+ contain: layout paint style;
1365
+ contain-intrinsic-size: 1px 600px;
1366
+ box-shadow: none !important;
1367
+ filter: none !important;
1368
+ }
1369
+ .msg-box:not(:last-child) {
1370
+ content-visibility: auto;
1371
+ }
1372
+ .msg {
1373
+ text-rendering: optimizeSpeed;
1374
+ }
1375
+ """
1376
+
1354
1377
  def __init__(self, window=None):
1355
1378
  """
1356
1379
  HTML Body
@@ -1398,42 +1421,14 @@ class Body:
1398
1421
  cfg = self.window.core.config
1399
1422
  fonts_path = os.path.join(cfg.get_app_path(), "data", "fonts").replace("\\", "/")
1400
1423
  syntax_style = self.window.core.config.get("render.code_syntax") or "default"
1401
- perf_css = """
1402
- #container, #_nodes_, #_append_output_, #_append_output_before_ {
1403
- contain: layout paint;
1404
- overscroll-behavior: contain;
1405
- }
1406
- .msg-box {
1407
- contain: layout paint style;
1408
- contain-intrinsic-size: 1px 600px;
1409
- }
1410
- .msg-box:not(:last-child) {
1411
- content-visibility: auto;
1412
- }
1413
- #container,
1414
- #_nodes_,
1415
- #_append_output_,
1416
- #_append_output_before_ {
1417
- backface-visibility: hidden;
1418
- transform: translateZ(0);
1419
- }
1420
- .msg-box {
1421
- box-shadow: none !important;
1422
- filter: none !important;
1423
- }
1424
- .msg {
1425
- text-rendering: optimizeSpeed;
1426
- }
1427
- """
1428
-
1429
1424
  theme_css = self.window.controller.theme.markdown.get_web_css().replace('%fonts%', fonts_path)
1430
1425
  parts = [
1431
1426
  self._SPINNER,
1427
+ self._SCROLL_FAB_CSS,
1432
1428
  theme_css,
1433
1429
  "pre { color: #fff; }" if syntax_style in self._syntax_dark else "pre { color: #000; }",
1434
1430
  self.highlight.get_style_defs(),
1435
- perf_css,
1436
- self._SCROLL_FAB_CSS, # keep FAB styles last to ensure precedence
1431
+ self._PERFORMANCE_CSS # performance improvements
1437
1432
  ]
1438
1433
  return "\n".join(parts)
1439
1434
 
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.6.36",
4
- "app.version": "2.6.36",
5
- "updated_at": "2025-09-04T00:00:00"
3
+ "version": "2.6.37",
4
+ "app.version": "2.6.37",
5
+ "updated_at": "2025-09-05T00:00:00"
6
6
  },
7
7
  "access.audio.event.speech": false,
8
8
  "access.audio.event.speech.disabled": [],
@@ -88,11 +88,13 @@
88
88
  "api_key_perplexity": "",
89
89
  "api_key_voyage": "",
90
90
  "api_key_xai": "",
91
+ "api_native_anthropic": true,
91
92
  "api_native_google": true,
92
93
  "api_native_google.app_credentials": "",
93
94
  "api_native_google.cloud_location": "us-central1",
94
95
  "api_native_google.cloud_project": "",
95
96
  "api_native_google.use_vertex": false,
97
+ "api_native_xai": true,
96
98
  "api_proxy": "",
97
99
  "api_use_responses": true,
98
100
  "api_use_responses_llama": false,
@@ -415,6 +417,7 @@
415
417
  "render.engine": "web",
416
418
  "render.open_gl": false,
417
419
  "render.plain": false,
420
+ "remote_tools.anthropic.web_search": true,
418
421
  "remote_tools.code_interpreter": false,
419
422
  "remote_tools.computer_use.env": "",
420
423
  "remote_tools.file_search": false,
@@ -426,6 +429,10 @@
426
429
  "remote_tools.mcp": false,
427
430
  "remote_tools.mcp.args": "{\n \"type\": \"mcp\",\n \"server_label\": \"deepwiki\",\n \"server_url\": \"https://mcp.deepwiki.com/mcp\",\n \"require_approval\": \"never\",\n \"allowed_tools\": [\"ask_question\"]\n}",
428
431
  "remote_tools.web_search": true,
432
+ "remote_tools.xai.mode": "auto",
433
+ "remote_tools.xai.sources.web": true,
434
+ "remote_tools.xai.sources.x": true,
435
+ "remote_tools.xai.sources.news": false,
429
436
  "send_clear": true,
430
437
  "send_mode": 2,
431
438
  "store_history": true,
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.6.36",
4
- "app.version": "2.6.36",
5
- "updated_at": "2025-09-04T08:03:34"
3
+ "version": "2.6.37",
4
+ "app.version": "2.6.37",
5
+ "updated_at": "2025-09-05T08:03:34"
6
6
  },
7
7
  "items": {
8
8
  "SpeakLeash/bielik-11b-v2.3-instruct:Q4_K_M": {
@@ -242,6 +242,21 @@
242
242
  "advanced": false,
243
243
  "tab": "Anthropic"
244
244
  },
245
+ "api_native_anthropic": {
246
+ "section": "api_keys",
247
+ "type": "bool",
248
+ "slider": false,
249
+ "label": "settings.api_native_anthropic",
250
+ "description": "settings.api_native_anthropic.desc",
251
+ "value": true,
252
+ "min": null,
253
+ "max": null,
254
+ "multiplier": null,
255
+ "step": null,
256
+ "secret": false,
257
+ "advanced": false,
258
+ "tab": "Anthropic"
259
+ },
245
260
  "api_key_hugging_face": {
246
261
  "section": "api_keys",
247
262
  "type": "text",
@@ -344,6 +359,21 @@
344
359
  "advanced": false,
345
360
  "tab": "xAI"
346
361
  },
362
+ "api_native_xai": {
363
+ "section": "api_keys",
364
+ "type": "bool",
365
+ "slider": false,
366
+ "label": "settings.api_native_xai",
367
+ "description": "settings.api_native_xai.desc",
368
+ "value": true,
369
+ "min": null,
370
+ "max": null,
371
+ "multiplier": null,
372
+ "step": null,
373
+ "secret": false,
374
+ "advanced": false,
375
+ "tab": "xAI"
376
+ },
347
377
  "api_azure_version": {
348
378
  "section": "api_keys",
349
379
  "type": "text",
@@ -1886,6 +1916,81 @@
1886
1916
  "advanced": false,
1887
1917
  "tab": "Google"
1888
1918
  },
1919
+ "remote_tools.anthropic.web_search": {
1920
+ "section": "remote_tools",
1921
+ "type": "bool",
1922
+ "slider": false,
1923
+ "label": "settings.remote_tools.anthropic.web_search",
1924
+ "description": "settings.remote_tools.anthropic.web_search.desc",
1925
+ "value": true,
1926
+ "min": null,
1927
+ "max": null,
1928
+ "multiplier": null,
1929
+ "step": null,
1930
+ "advanced": false,
1931
+ "tab": "Anthropic"
1932
+ },
1933
+ "remote_tools.xai.mode": {
1934
+ "section": "remote_tools",
1935
+ "type": "combo",
1936
+ "slider": false,
1937
+ "label": "settings.remote_tools.xai.mode",
1938
+ "description": "settings.remote_tools.xai.mode.desc",
1939
+ "value": "auto",
1940
+ "keys": [
1941
+ {"auto": "auto"},
1942
+ {"on": "on"},
1943
+ {"off": "off"}
1944
+ ],
1945
+ "min": null,
1946
+ "max": null,
1947
+ "multiplier": null,
1948
+ "step": null,
1949
+ "advanced": false,
1950
+ "tab": "xAI"
1951
+ },
1952
+ "remote_tools.xai.sources.web": {
1953
+ "section": "remote_tools",
1954
+ "type": "bool",
1955
+ "slider": false,
1956
+ "label": "settings.remote_tools.xai.sources.web",
1957
+ "description": "settings.remote_tools.xai.sources.web.desc",
1958
+ "value": true,
1959
+ "min": null,
1960
+ "max": null,
1961
+ "multiplier": null,
1962
+ "step": null,
1963
+ "advanced": false,
1964
+ "tab": "xAI"
1965
+ },
1966
+ "remote_tools.xai.sources.x": {
1967
+ "section": "remote_tools",
1968
+ "type": "bool",
1969
+ "slider": false,
1970
+ "label": "settings.remote_tools.xai.sources.x",
1971
+ "description": "settings.remote_tools.xai.sources.x.desc",
1972
+ "value": true,
1973
+ "min": null,
1974
+ "max": null,
1975
+ "multiplier": null,
1976
+ "step": null,
1977
+ "advanced": false,
1978
+ "tab": "xAI"
1979
+ },
1980
+ "remote_tools.xai.sources.news": {
1981
+ "section": "remote_tools",
1982
+ "type": "bool",
1983
+ "slider": false,
1984
+ "label": "settings.remote_tools.xai.sources.news",
1985
+ "description": "settings.remote_tools.xai.sources.news.desc",
1986
+ "value": true,
1987
+ "min": null,
1988
+ "max": null,
1989
+ "multiplier": null,
1990
+ "step": null,
1991
+ "advanced": false,
1992
+ "tab": "xAI"
1993
+ },
1889
1994
  "llama.idx.list": {
1890
1995
  "section": "llama-index",
1891
1996
  "type": "dict",
@@ -67,9 +67,8 @@ QHeaderView::section {{
67
67
  .label-title {{
68
68
  font-weight: bold;
69
69
  }}
70
- .label-help {{
71
- color: #999;
72
- }}
70
+ .label-help,
71
+ .label-desc,
73
72
  .label-chat-status {{
74
73
  color: #999;
75
74
  }}
@@ -181,9 +181,8 @@ QPushButton {{
181
181
  .label-title {{
182
182
  font-weight: bold;
183
183
  }}
184
- .label-help {{
185
- color: #5d5d5d;
186
- }}
184
+ .label-help,
185
+ .label-desc,
187
186
  .label-chat-status {{
188
187
  color: #5d5d5d;
189
188
  }}
@@ -865,6 +865,7 @@ model.default = Standard im Modus
865
865
  model.extra = Zusätzliche Parameter (JSON)
866
866
  model.extra.desc = Ein JSON-Objekt, das zusätzliche Parameter für das Modell enthält (wie z.B. den intellektuellen Aufwand usw.).
867
867
  model.id = Modell-ID
868
+ model.id.desc = Geben Sie die genaue Modell-ID ein, die vom Anbieter bereitgestellt wird
868
869
  model.input = Eingabe
869
870
  mode.llama_index = Chat mit Dateien
870
871
  mode.llama_index.tooltip = Chat mit zusätzlichem Kontext von LlamaIndex
@@ -888,11 +889,12 @@ model.mode = Modus(e)
888
889
  model.mode.desc = Verfügbare Modi: Chat, Llama_Index, Audio, Research, Completion, Bild, Vision, Assistent, Agent_Llama, Agent, Experte
889
890
  model.multimodal = Multimodal
890
891
  model.name = Name
892
+ model.name.desc = Anzeigename in der Liste, kann beliebig sein
891
893
  model.openai = OpenAI API
892
894
  model.openai.desc = Unterstützt OpenAI API (oder kompatibel)
893
895
  model.output = Ausgabe
894
896
  model.provider = Anbieter
895
- model.provider.desc = LLM-Anbieter
897
+ model.provider.desc = Wählen Sie den Anbieter für das Modell
896
898
  models.importer.all = Alle anzeigen
897
899
  models.importer.available.label = Verfügbare Modelle
898
900
  models.importer.current.default = Bitte wählen Sie einen Anbieter aus der Liste.
@@ -868,6 +868,7 @@ model.default = Default in mode
868
868
  model.extra = Extra parameters (JSON)
869
869
  model.extra.desc = A JSON object containing additional parameters for the model (such as reasoning effort, etc.).
870
870
  model.id = Model ID
871
+ model.id.desc = Enter the exact model ID provided by the provider
871
872
  model.input = Input
872
873
  mode.llama_index = Chat with Files
873
874
  mode.llama_index.tooltip = Chat with additional context provided by LlamaIndex
@@ -891,11 +892,12 @@ model.mode = Mode(s)
891
892
  model.mode.desc = Available modes: chat, llama_index, audio, research, completion, img, vision, assistant, agent_llama, agent, expert
892
893
  model.multimodal = Multimodal
893
894
  model.name = Name
895
+ model.name.desc = Display name on the list, can be anything
894
896
  model.openai = OpenAI API
895
897
  model.openai.desc = Supports OpenAI API (or compatible)
896
898
  model.output = Output
897
899
  model.provider = Provider
898
- model.provider.desc = LLM provider
900
+ model.provider.desc = Choose the provider for the model
899
901
  models.importer.all = Show all
900
902
  models.importer.available.label = Available models
901
903
  models.importer.current.default = Please select a provider from the list.
@@ -1124,6 +1126,8 @@ settings.api_key.voyage = VoyageAI API KEY
1124
1126
  settings.api_key.voyage.desc = Required for the Voyage API - embeddings for Anthropic and DeepSeek API.
1125
1127
  settings.api_key.xai = xAI API KEY
1126
1128
  settings.api_key.xai.desc = Required for the xAI API and Grok models.
1129
+ settings.api_native_anthropic = Use native API SDK
1130
+ settings.api_native_anthropic.desc = Use native Anthropic SDK instead of compatible OpenAI client
1127
1131
  settings.api_native_google = Use native API SDK
1128
1132
  settings.api_native_google.app_credentials = Google Application credentials (path)
1129
1133
  settings.api_native_google.app_credentials.desc = Absolute path to credentials.json, e.g. /home/user/credentials.json
@@ -1134,6 +1138,8 @@ settings.api_native_google.cloud_project.desc = Provide your Google Cloud projec
1134
1138
  settings.api_native_google.desc = Use native GenAI SDK instead of compatible OpenAI client
1135
1139
  settings.api_native_google.use_vertex = Use VertexAI
1136
1140
  settings.api_native_google.use_vertex.desc = Enable to use VertexAI in Google GenAI SDK
1141
+ settings.api_native_xai = Use native API SDK
1142
+ settings.api_native_xai.desc = Use native xAI SDK instead of compatible OpenAI client
1137
1143
  settings.api_proxy = Proxy address
1138
1144
  settings.api_proxy.desc = Optional, proxy for OpenAI API, e.g. http://proxy.example.com or socks5://user:pass@host:port
1139
1145
  settings.api_use_responses = Use Responses API in Chat mode
@@ -1339,6 +1345,8 @@ settings.prompt.img = Image generation
1339
1345
  settings.prompt.img.desc = Prompt for generating prompts for image model (if raw-mode is disabled). Image / Video modes only.
1340
1346
  settings.prompt.video = Video generation
1341
1347
  settings.prompt.video.desc = Prompt for generating prompts for video model (if raw-mode is disabled). Image / Videos mode only.
1348
+ settings.remote_tools.anthropic.web_search = Web Search
1349
+ settings.remote_tools.anthropic.web_search.desc = Enable Web Search remote tool.
1342
1350
  settings.remote_tools.code_interpreter = Code Interpreter
1343
1351
  settings.remote_tools.code_interpreter.desc = Enable `code_interpreter` remote tool - Responses API only.
1344
1352
  settings.remote_tools.file_search = File search
@@ -1359,6 +1367,14 @@ settings.remote_tools.mcp.args.desc = Configuration in JSON format (will be used
1359
1367
  settings.remote_tools.mcp.desc = Enable `mcp` remote tool - Responses API only.
1360
1368
  settings.remote_tools.web_search = Web Search
1361
1369
  settings.remote_tools.web_search.desc = Enable `web_search` remote tool - Responses API only.
1370
+ settings.remote_tools.xai.mode = Live Search mode
1371
+ settings.remote_tools.xai.mode.desc = Select mode: auto|on|off
1372
+ settings.remote_tools.xai.sources.news = Source: News
1373
+ settings.remote_tools.xai.sources.news.desc = Enable News in Live Search
1374
+ settings.remote_tools.xai.sources.web = Source: Web
1375
+ settings.remote_tools.xai.sources.web.desc = Enable Web in Live Search
1376
+ settings.remote_tools.xai.sources.x = Source: X / Twitter
1377
+ settings.remote_tools.xai.sources.x.desc = Enable X / Twitter in Live Search
1362
1378
  settings.render.code_syntax = Code syntax highlight
1363
1379
  settings.render.engine = Rendering engine
1364
1380
  settings.render.open_gl = OpenGL hardware acceleration
@@ -1405,8 +1421,10 @@ settings.section.model = Models
1405
1421
  settings.section.personalize = Personalize
1406
1422
  settings.section.prompts = Prompts
1407
1423
  settings.section.remote_tools = Remote tools
1424
+ settings.section.remote_tools.Anthropic = Anthropic
1408
1425
  settings.section.remote_tools.google = Google
1409
1426
  settings.section.remote_tools.openai = OpenAI
1427
+ settings.section.remote_tools.xAI = xAI
1410
1428
  settings.section.tab.general = General
1411
1429
  settings.section.updates = Updates
1412
1430
  settings.section.vision = Vision and camera