pygpt-net 2.6.35__py3-none-any.whl → 2.6.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. pygpt_net/CHANGELOG.txt +9 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/chat/handler/anthropic_stream.py +166 -0
  4. pygpt_net/controller/chat/handler/google_stream.py +181 -0
  5. pygpt_net/controller/chat/handler/langchain_stream.py +24 -0
  6. pygpt_net/controller/chat/handler/llamaindex_stream.py +47 -0
  7. pygpt_net/controller/chat/handler/openai_stream.py +260 -0
  8. pygpt_net/controller/chat/handler/utils.py +210 -0
  9. pygpt_net/controller/chat/handler/worker.py +566 -0
  10. pygpt_net/controller/chat/handler/xai_stream.py +135 -0
  11. pygpt_net/controller/chat/stream.py +1 -1
  12. pygpt_net/controller/ctx/ctx.py +1 -1
  13. pygpt_net/controller/model/editor.py +3 -0
  14. pygpt_net/core/bridge/context.py +35 -35
  15. pygpt_net/core/bridge/worker.py +40 -16
  16. pygpt_net/core/render/web/body.py +39 -15
  17. pygpt_net/core/render/web/renderer.py +4 -4
  18. pygpt_net/data/config/config.json +10 -3
  19. pygpt_net/data/config/models.json +3 -3
  20. pygpt_net/data/config/settings.json +105 -0
  21. pygpt_net/data/css/style.dark.css +2 -3
  22. pygpt_net/data/css/style.light.css +2 -3
  23. pygpt_net/data/css/web-blocks.css +1 -1
  24. pygpt_net/data/css/web-chatgpt.css +1 -1
  25. pygpt_net/data/css/web-chatgpt_wide.css +1 -1
  26. pygpt_net/data/locale/locale.de.ini +3 -1
  27. pygpt_net/data/locale/locale.en.ini +19 -1
  28. pygpt_net/data/locale/locale.es.ini +3 -1
  29. pygpt_net/data/locale/locale.fr.ini +3 -1
  30. pygpt_net/data/locale/locale.it.ini +3 -1
  31. pygpt_net/data/locale/locale.pl.ini +4 -2
  32. pygpt_net/data/locale/locale.uk.ini +3 -1
  33. pygpt_net/data/locale/locale.zh.ini +3 -1
  34. pygpt_net/provider/api/__init__.py +5 -3
  35. pygpt_net/provider/api/anthropic/__init__.py +190 -29
  36. pygpt_net/provider/api/anthropic/audio.py +30 -0
  37. pygpt_net/provider/api/anthropic/chat.py +341 -0
  38. pygpt_net/provider/api/anthropic/image.py +25 -0
  39. pygpt_net/provider/api/anthropic/tools.py +266 -0
  40. pygpt_net/provider/api/anthropic/vision.py +142 -0
  41. pygpt_net/provider/api/google/chat.py +2 -2
  42. pygpt_net/provider/api/google/tools.py +58 -48
  43. pygpt_net/provider/api/google/vision.py +7 -1
  44. pygpt_net/provider/api/openai/chat.py +1 -0
  45. pygpt_net/provider/api/openai/vision.py +6 -0
  46. pygpt_net/provider/api/x_ai/__init__.py +247 -0
  47. pygpt_net/provider/api/x_ai/audio.py +32 -0
  48. pygpt_net/provider/api/x_ai/chat.py +968 -0
  49. pygpt_net/provider/api/x_ai/image.py +208 -0
  50. pygpt_net/provider/api/x_ai/remote.py +262 -0
  51. pygpt_net/provider/api/x_ai/tools.py +120 -0
  52. pygpt_net/provider/api/x_ai/vision.py +119 -0
  53. pygpt_net/provider/core/config/patch.py +37 -0
  54. pygpt_net/provider/llms/anthropic.py +4 -2
  55. pygpt_net/ui/base/config_dialog.py +5 -11
  56. pygpt_net/ui/dialog/models.py +2 -4
  57. pygpt_net/ui/dialog/plugins.py +40 -43
  58. pygpt_net/ui/widget/element/labels.py +19 -3
  59. pygpt_net/ui/widget/textarea/web.py +1 -1
  60. {pygpt_net-2.6.35.dist-info → pygpt_net-2.6.37.dist-info}/METADATA +15 -6
  61. {pygpt_net-2.6.35.dist-info → pygpt_net-2.6.37.dist-info}/RECORD +64 -45
  62. pygpt_net/controller/chat/handler/stream_worker.py +0 -1136
  63. {pygpt_net-2.6.35.dist-info → pygpt_net-2.6.37.dist-info}/LICENSE +0 -0
  64. {pygpt_net-2.6.35.dist-info → pygpt_net-2.6.37.dist-info}/WHEEL +0 -0
  65. {pygpt_net-2.6.35.dist-info → pygpt_net-2.6.37.dist-info}/entry_points.txt +0 -0
@@ -18,7 +18,7 @@ from pygpt_net.core.events import RenderEvent
18
18
  from pygpt_net.core.types import MODE_ASSISTANT
19
19
  from pygpt_net.item.ctx import CtxItem
20
20
 
21
- from .handler.stream_worker import StreamWorker
21
+ from .handler.worker import StreamWorker
22
22
 
23
23
  class Stream:
24
24
  def __init__(self, window=None):
@@ -295,7 +295,7 @@ class Ctx:
295
295
 
296
296
  def clean_memory(self):
297
297
  """Clean memory"""
298
- self.window.core.api.openai.close() # clear gpt client
298
+ pass
299
299
 
300
300
  def new_in_group(
301
301
  self,
@@ -37,15 +37,18 @@ class Editor:
37
37
  "id": {
38
38
  "type": "text",
39
39
  "label": "model.id",
40
+ "description": "model.id.desc",
40
41
  },
41
42
  "name": {
42
43
  "type": "text",
43
44
  "label": "model.name",
45
+ "description": "model.name.desc",
44
46
  },
45
47
  "provider": {
46
48
  "type": "combo",
47
49
  "use": "llm_providers",
48
50
  "label": "model.provider",
51
+ "description": "model.provider.desc",
49
52
  },
50
53
  "ctx": {
51
54
  "type": "int",
@@ -22,10 +22,10 @@ class MultimodalContext:
22
22
  """
23
23
  Multimodal context
24
24
  """
25
- is_audio_input: bool = False
26
- is_audio_output: bool = False
27
- audio_data: Optional[Any] = None
28
- audio_format: str = "wav"
25
+ is_audio_input: bool = False # is audio input
26
+ is_audio_output: bool = False # is audio output
27
+ audio_data: Optional[Any] = None # audio data (bytes or file-like object)
28
+ audio_format: str = "wav" # audio format (wav, mp3, etc.)
29
29
 
30
30
  def __init__(self, **kwargs):
31
31
  """
@@ -59,32 +59,32 @@ class BridgeContext:
59
59
  """
60
60
  Bridge context
61
61
  """
62
- assistant_id: str = ""
63
- attachments: dict = field(default_factory=dict)
64
- ctx: Optional[CtxItem] = None
65
- external_functions: list = field(default_factory=list)
66
- file_ids: list = field(default_factory=list)
67
- force: bool = False # Force mode flag
68
- force_sync: bool = False # Force sync flag
69
- history: list = field(default_factory=list)
70
- idx: Optional[Any] = None
71
- idx_mode: str = "chat"
72
- is_expert_call: bool = False # Expert call flag
73
- max_tokens: int = 0
74
- mode: Optional[Any] = None
62
+ assistant_id: str = "" # OpenAI Assistant ID
63
+ attachments: dict = field(default_factory=dict) # id -> AttachmentItem
64
+ ctx: Optional[CtxItem] = None # CtxItem instance
65
+ external_functions: list = field(default_factory=list) # list of tools definitions
66
+ file_ids: list = field(default_factory=list) # list of uploaded file IDs
67
+ force: bool = False # force send
68
+ force_sync: bool = False # force synchronous plugin call
69
+ history: list = field(default_factory=list) # list of messages
70
+ idx: Optional[Any] = None # index name for LlamaIndex
71
+ idx_mode: str = "chat" # sub-mode for LlamaIndex
72
+ is_expert_call: bool = False # is expert call
73
+ max_tokens: int = 0 # max tokens
74
+ mode: Optional[Any] = None # mode to use
75
75
  model: Optional[ModelItem] = None # model instance, not model name
76
76
  multimodal_ctx: MultimodalContext = field(default_factory=lambda: MultimodalContext()) # AudioContext
77
77
  parent_mode: Optional[Any] = None # real mode (global)
78
78
  preset: Optional[Any] = None # PresetItem
79
- prompt: str = ""
79
+ prompt: str = "" # user input prompt
80
80
  reply_context: Optional[Any] = None # ReplyContext
81
- request: bool = False # Use normal request instead of quick call
82
- stream: bool = False
83
- system_prompt: str = ""
84
- system_prompt_raw: str = "" # without plugins addons
85
- temperature: float = 1.0
86
- thread_id: str = ""
87
- tools_outputs: list = field(default_factory=list)
81
+ request: bool = False # use normal request instead of quick call
82
+ stream: bool = False # stream enabled
83
+ system_prompt: str = "" # system prompt
84
+ system_prompt_raw: str = "" # system prompt without plugins addons
85
+ temperature: float = 1.0 # temperature
86
+ thread_id: str = "" # OpenAI Assistants thread ID for chat mode
87
+ tools_outputs: list = field(default_factory=list) # list of tools outputs
88
88
 
89
89
  def __init__(self, **kwargs):
90
90
  """
@@ -98,24 +98,24 @@ class BridgeContext:
98
98
  self.ctx = kwargs.get("ctx", None)
99
99
  self.external_functions = list(kwargs.get("external_functions", []))
100
100
  self.file_ids = list(kwargs.get("file_ids", []))
101
- self.force = kwargs.get("force", False) # Force mode flag
102
- self.force_sync = kwargs.get("force_sync", False) # Force sync flag
101
+ self.force = kwargs.get("force", False)
102
+ self.force_sync = kwargs.get("force_sync", False)
103
103
  self.history = list(kwargs.get("history", []))
104
104
  self.idx = kwargs.get("idx", None)
105
105
  self.idx_mode = kwargs.get("idx_mode", "chat")
106
- self.is_expert_call = kwargs.get("is_expert_call", False) # Expert call flag
106
+ self.is_expert_call = kwargs.get("is_expert_call", False)
107
107
  self.max_tokens = kwargs.get("max_tokens", 0)
108
108
  self.mode = kwargs.get("mode", None)
109
- self.model = kwargs.get("model", None) # model instance, not model name
110
- self.multimodal_ctx = kwargs.get("multimodal_ctx", MultimodalContext()) # AudioContext
111
- self.parent_mode = kwargs.get("parent_mode", None) # real mode (global)
112
- self.preset = kwargs.get("preset", None) # PresetItem
109
+ self.model = kwargs.get("model", None)
110
+ self.multimodal_ctx = kwargs.get("multimodal_ctx", MultimodalContext())
111
+ self.parent_mode = kwargs.get("parent_mode", None)
112
+ self.preset = kwargs.get("preset", None)
113
113
  self.prompt = kwargs.get("prompt", "")
114
- self.reply_context = kwargs.get("reply_ctx", kwargs.get("reply_context", None)) # ReplyContext
115
- self.request = kwargs.get("request", False) # Use normal request instead of quick call
114
+ self.reply_context = kwargs.get("reply_ctx", kwargs.get("reply_context", None))
115
+ self.request = kwargs.get("request", False)
116
116
  self.stream = kwargs.get("stream", False)
117
117
  self.system_prompt = kwargs.get("system_prompt", "")
118
- self.system_prompt_raw = kwargs.get("system_prompt_raw", "") # without plugins addons
118
+ self.system_prompt_raw = kwargs.get("system_prompt_raw", "")
119
119
  self.temperature = kwargs.get("temperature", 1.0)
120
120
  self.thread_id = kwargs.get("thread_id", "")
121
121
  self.tools_outputs = list(kwargs.get("tools_outputs", []))
@@ -46,7 +46,8 @@ class BridgeWorker(QRunnable):
46
46
  @Slot()
47
47
  def run(self):
48
48
  """Run bridge worker"""
49
- self.window.core.debug.info("[bridge] Worker started.")
49
+ core = self.window.core
50
+ core.debug.info("[bridge] Worker started.")
50
51
  result = False
51
52
 
52
53
  try:
@@ -64,7 +65,7 @@ class BridgeWorker(QRunnable):
64
65
  if self.mode == MODE_LANGCHAIN:
65
66
  raise Exception("Langchain mode is deprecated from v2.5.20 and no longer supported. ")
66
67
  """
67
- result = self.window.core.chain.call(
68
+ result = core.chain.call(
68
69
  context=self.context,
69
70
  extra=self.extra,
70
71
  )
@@ -74,15 +75,18 @@ class BridgeWorker(QRunnable):
74
75
 
75
76
  # LlamaIndex: chat with files
76
77
  if self.mode == MODE_LLAMA_INDEX:
77
- result = self.window.core.idx.chat.call(
78
+ result = core.idx.chat.call(
78
79
  context=self.context,
79
80
  extra=self.extra,
80
81
  signals=self.signals,
81
82
  )
82
83
 
83
- # LlamaIndex: agents
84
- elif self.mode in [MODE_AGENT_LLAMA, MODE_AGENT_OPENAI]:
85
- result = self.window.core.agents.runner.call(
84
+ # Agents (OpenAI, Llama)
85
+ elif self.mode in (
86
+ MODE_AGENT_LLAMA,
87
+ MODE_AGENT_OPENAI
88
+ ):
89
+ result = core.agents.runner.call(
86
90
  context=self.context,
87
91
  extra=self.extra,
88
92
  signals=self.signals,
@@ -91,11 +95,11 @@ class BridgeWorker(QRunnable):
91
95
  self.cleanup()
92
96
  return # don't emit any signals (handled in agent runner, step by step)
93
97
  else:
94
- self.extra["error"] = str(self.window.core.agents.runner.get_error())
98
+ self.extra["error"] = str(core.agents.runner.get_error())
95
99
 
96
- # Loop: next step
100
+ # Agents loop: next step
97
101
  elif self.mode == MODE_LOOP_NEXT: # virtual mode
98
- result = self.window.core.agents.runner.loop.run_next(
102
+ result = core.agents.runner.loop.run_next(
99
103
  context=self.context,
100
104
  extra=self.extra,
101
105
  signals=self.signals,
@@ -103,27 +107,47 @@ class BridgeWorker(QRunnable):
103
107
  if result:
104
108
  return # don't emit any signals (handled in agent runner, step by step)
105
109
  else:
106
- self.extra["error"] = str(self.window.core.agents.runner.get_error())
110
+ self.extra["error"] = str(core.agents.runner.get_error())
107
111
 
108
112
  # API SDK: chat, completion, vision, image, assistants
109
113
  else:
110
- sdk = "openai"
114
+ sdk = "openai" # default to OpenAI SDK
111
115
  model = self.context.model
112
116
  if model.provider == "google":
113
- if self.window.core.config.get("api_native_google", False):
117
+ if core.config.get("api_native_google", False):
114
118
  sdk = "google"
119
+ elif model.provider == "anthropic":
120
+ if core.config.get("api_native_anthropic", False):
121
+ sdk = "anthropic"
122
+ elif model.provider == "x_ai":
123
+ if core.config.get("api_native_xai", False):
124
+ sdk = "x_ai"
115
125
 
116
126
  # call appropriate SDK
117
127
  if sdk == "google":
118
- # print("Using Google SDK")
119
- result = self.window.core.api.google.call(
128
+ core.debug.info("[bridge] Using Google SDK.")
129
+ result = core.api.google.call(
130
+ context=self.context,
131
+ extra=self.extra,
132
+ rt_signals=self.rt_signals,
133
+ )
134
+ elif sdk == "anthropic":
135
+ core.debug.info("[bridge] Using Anthropic SDK.")
136
+ result = core.api.anthropic.call(
137
+ context=self.context,
138
+ extra=self.extra,
139
+ rt_signals=self.rt_signals,
140
+ )
141
+ elif sdk == "x_ai":
142
+ core.debug.info("[bridge] Using xAI SDK.")
143
+ result = core.api.xai.call(
120
144
  context=self.context,
121
145
  extra=self.extra,
122
146
  rt_signals=self.rt_signals,
123
147
  )
124
148
  elif sdk == "openai":
125
- # print("Using OpenAI SDK")
126
- result = self.window.core.api.openai.call(
149
+ core.debug.info("[bridge] Using OpenAI SDK.")
150
+ result = core.api.openai.call(
127
151
  context=self.context,
128
152
  extra=self.extra,
129
153
  rt_signals=self.rt_signals,
@@ -46,7 +46,12 @@ class Body:
46
46
  <script type="text/javascript" src="qrc:///js/highlight.min.js"></script>
47
47
  <script type="text/javascript" src="qrc:///js/katex.min.js"></script>
48
48
  <script>
49
- const DEBUG_MODE = false;
49
+ if (hljs) {
50
+ hljs.configure({
51
+ ignoreUnescapedHTML: true,
52
+ });
53
+ }
54
+ let DEBUG_MODE = false; // allow dynamic enabling via debug console
50
55
  let bridgeConnected = false;
51
56
  let streamHandler;
52
57
  let nodeHandler;
@@ -248,8 +253,7 @@ class Body:
248
253
  });
249
254
  }
250
255
  function highlightCodeInternal(root, withMath) {
251
- (root || document).querySelectorAll('pre code').forEach(el => {
252
- try { if (el.dataset) delete el.dataset.highlighted; } catch (e) {}
256
+ (root || document).querySelectorAll('pre code:not(.hljs)').forEach(el => {
253
257
  hljs.highlightElement(el);
254
258
  });
255
259
  if (withMath) {
@@ -342,7 +346,7 @@ class Body:
342
346
  el.scrollTop = el.scrollHeight; // no behavior, no RAF, deterministic
343
347
  prevScroll = el.scrollHeight;
344
348
  }
345
- function scrollToBottom(live = false) {
349
+ function scrollToBottom(live = false, force = false) {
346
350
  const el = document.scrollingElement || document.documentElement;
347
351
  const marginPx = 450;
348
352
  const behavior = (live === true) ? 'instant' : 'smooth';
@@ -355,7 +359,7 @@ class Body:
355
359
  }
356
360
 
357
361
  // Allow initial auto-follow before any user interaction
358
- if ((live === true && userInteracted === false) || isNearBottom(marginPx) || live == false) {
362
+ if ((live === true && userInteracted === false) || isNearBottom(marginPx) || live == false || force) {
359
363
  el.scrollTo({ top: el.scrollHeight, behavior });
360
364
  }
361
365
  prevScroll = el.scrollHeight;
@@ -380,6 +384,7 @@ class Body:
380
384
  return element;
381
385
  }
382
386
  function appendNode(content) {
387
+ userInteracted = false;
383
388
  if (DEBUG_MODE) {
384
389
  log("APPEND NODE: {" + content + "}");
385
390
  }
@@ -396,6 +401,7 @@ class Body:
396
401
  clearHighlightCache();
397
402
  }
398
403
  function replaceNodes(content) {
404
+ userInteracted = false;
399
405
  if (DEBUG_MODE) {
400
406
  log("REPLACE NODES: {" + content + "}");
401
407
  }
@@ -407,7 +413,7 @@ class Body:
407
413
  element.replaceChildren();
408
414
  element.insertAdjacentHTML('beforeend', content);
409
415
  highlightCode(true, element);
410
- scrollToBottom(false); // without schedule
416
+ scrollToBottom(false, true); // without schedule
411
417
  scheduleScrollFabUpdate();
412
418
  }
413
419
  clearHighlightCache();
@@ -434,10 +440,7 @@ class Body:
434
440
  */
435
441
  }
436
442
  function clearHighlightCache() {
437
- const elements = document.querySelectorAll('pre code');
438
- elements.forEach(function(el) {
439
- try { if (el.dataset) delete el.dataset.highlighted; } catch (e) {}
440
- });
443
+ //
441
444
  }
442
445
  function appendExtra(id, content) {
443
446
  hideTips();
@@ -522,14 +525,14 @@ class Body:
522
525
  clearOutput();
523
526
  // Ensure initial auto-follow baseline before any chunks overflow
524
527
  forceScrollToBottomImmediate();
525
- scheduleScroll(); // keep existing logic
528
+ scheduleScroll();
526
529
  }
527
530
  function endStream() {
528
531
  if (DEBUG_MODE) {
529
532
  log("STREAM END");
530
533
  }
531
534
  clearOutput();
532
- bridgeReconnect();
535
+ bridgeReconnect();
533
536
  }
534
537
  function enqueueStream(name_header, content, chunk, replace = false, is_code_block = false) {
535
538
  // Push incoming chunk; scheduling is done with RAF to batch DOM ops
@@ -1154,7 +1157,7 @@ class Body:
1154
1157
  window.addEventListener('resize', scheduleScrollFabUpdate, { passive: true });
1155
1158
 
1156
1159
  // Initial state
1157
- scheduleScrollFabUpdate();
1160
+ scheduleScrollFabUpdate();
1158
1161
 
1159
1162
  container.addEventListener('click', function(event) {
1160
1163
  const copyButton = event.target.closest('.code-header-copy');
@@ -1350,6 +1353,27 @@ class Body:
1350
1353
  }
1351
1354
  """
1352
1355
 
1356
+ _PERFORMANCE_CSS = """
1357
+ #container, #_nodes_, #_append_output_, #_append_output_before_ {
1358
+ contain: layout paint;
1359
+ overscroll-behavior: contain;
1360
+ backface-visibility: hidden;
1361
+ transform: translateZ(0);
1362
+ }
1363
+ .msg-box {
1364
+ contain: layout paint style;
1365
+ contain-intrinsic-size: 1px 600px;
1366
+ box-shadow: none !important;
1367
+ filter: none !important;
1368
+ }
1369
+ .msg-box:not(:last-child) {
1370
+ content-visibility: auto;
1371
+ }
1372
+ .msg {
1373
+ text-rendering: optimizeSpeed;
1374
+ }
1375
+ """
1376
+
1353
1377
  def __init__(self, window=None):
1354
1378
  """
1355
1379
  HTML Body
@@ -1397,14 +1421,14 @@ class Body:
1397
1421
  cfg = self.window.core.config
1398
1422
  fonts_path = os.path.join(cfg.get_app_path(), "data", "fonts").replace("\\", "/")
1399
1423
  syntax_style = self.window.core.config.get("render.code_syntax") or "default"
1400
-
1401
1424
  theme_css = self.window.controller.theme.markdown.get_web_css().replace('%fonts%', fonts_path)
1402
1425
  parts = [
1403
1426
  self._SPINNER,
1427
+ self._SCROLL_FAB_CSS,
1404
1428
  theme_css,
1405
1429
  "pre { color: #fff; }" if syntax_style in self._syntax_dark else "pre { color: #000; }",
1406
1430
  self.highlight.get_style_defs(),
1407
- self._SCROLL_FAB_CSS, # keep FAB styles last to ensure precedence
1431
+ self._PERFORMANCE_CSS # performance improvements
1408
1432
  ]
1409
1433
  return "\n".join(parts)
1410
1434
 
@@ -1488,12 +1488,12 @@ class Renderer(BaseRenderer):
1488
1488
  """
1489
1489
  try:
1490
1490
  if replace:
1491
- self.get_output_node_by_pid(pid).page().bridge.nodeReplace.emit(
1492
- self.sanitize_html(html)
1491
+ self.get_output_node_by_pid(pid).page().runJavaScript(
1492
+ f"if (typeof window.replaceNodes !== 'undefined') replaceNodes({self.to_json(self.sanitize_html(html))});"
1493
1493
  )
1494
1494
  else:
1495
- self.get_output_node_by_pid(pid).page().bridge.node.emit(
1496
- self.sanitize_html(html)
1495
+ self.get_output_node_by_pid(pid).page().runJavaScript(
1496
+ f"if (typeof window.appendNode !== 'undefined') appendNode({self.to_json(self.sanitize_html(html))});"
1497
1497
  )
1498
1498
  except Exception:
1499
1499
  pass
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.6.35",
4
- "app.version": "2.6.35",
5
- "updated_at": "2025-09-04T00:00:00"
3
+ "version": "2.6.37",
4
+ "app.version": "2.6.37",
5
+ "updated_at": "2025-09-05T00:00:00"
6
6
  },
7
7
  "access.audio.event.speech": false,
8
8
  "access.audio.event.speech.disabled": [],
@@ -88,11 +88,13 @@
88
88
  "api_key_perplexity": "",
89
89
  "api_key_voyage": "",
90
90
  "api_key_xai": "",
91
+ "api_native_anthropic": true,
91
92
  "api_native_google": true,
92
93
  "api_native_google.app_credentials": "",
93
94
  "api_native_google.cloud_location": "us-central1",
94
95
  "api_native_google.cloud_project": "",
95
96
  "api_native_google.use_vertex": false,
97
+ "api_native_xai": true,
96
98
  "api_proxy": "",
97
99
  "api_use_responses": true,
98
100
  "api_use_responses_llama": false,
@@ -415,6 +417,7 @@
415
417
  "render.engine": "web",
416
418
  "render.open_gl": false,
417
419
  "render.plain": false,
420
+ "remote_tools.anthropic.web_search": true,
418
421
  "remote_tools.code_interpreter": false,
419
422
  "remote_tools.computer_use.env": "",
420
423
  "remote_tools.file_search": false,
@@ -426,6 +429,10 @@
426
429
  "remote_tools.mcp": false,
427
430
  "remote_tools.mcp.args": "{\n \"type\": \"mcp\",\n \"server_label\": \"deepwiki\",\n \"server_url\": \"https://mcp.deepwiki.com/mcp\",\n \"require_approval\": \"never\",\n \"allowed_tools\": [\"ask_question\"]\n}",
428
431
  "remote_tools.web_search": true,
432
+ "remote_tools.xai.mode": "auto",
433
+ "remote_tools.xai.sources.web": true,
434
+ "remote_tools.xai.sources.x": true,
435
+ "remote_tools.xai.sources.news": false,
429
436
  "send_clear": true,
430
437
  "send_mode": 2,
431
438
  "store_history": true,
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.6.35",
4
- "app.version": "2.6.35",
5
- "updated_at": "2025-09-04T08:03:34"
3
+ "version": "2.6.37",
4
+ "app.version": "2.6.37",
5
+ "updated_at": "2025-09-05T08:03:34"
6
6
  },
7
7
  "items": {
8
8
  "SpeakLeash/bielik-11b-v2.3-instruct:Q4_K_M": {
@@ -242,6 +242,21 @@
242
242
  "advanced": false,
243
243
  "tab": "Anthropic"
244
244
  },
245
+ "api_native_anthropic": {
246
+ "section": "api_keys",
247
+ "type": "bool",
248
+ "slider": false,
249
+ "label": "settings.api_native_anthropic",
250
+ "description": "settings.api_native_anthropic.desc",
251
+ "value": true,
252
+ "min": null,
253
+ "max": null,
254
+ "multiplier": null,
255
+ "step": null,
256
+ "secret": false,
257
+ "advanced": false,
258
+ "tab": "Anthropic"
259
+ },
245
260
  "api_key_hugging_face": {
246
261
  "section": "api_keys",
247
262
  "type": "text",
@@ -344,6 +359,21 @@
344
359
  "advanced": false,
345
360
  "tab": "xAI"
346
361
  },
362
+ "api_native_xai": {
363
+ "section": "api_keys",
364
+ "type": "bool",
365
+ "slider": false,
366
+ "label": "settings.api_native_xai",
367
+ "description": "settings.api_native_xai.desc",
368
+ "value": true,
369
+ "min": null,
370
+ "max": null,
371
+ "multiplier": null,
372
+ "step": null,
373
+ "secret": false,
374
+ "advanced": false,
375
+ "tab": "xAI"
376
+ },
347
377
  "api_azure_version": {
348
378
  "section": "api_keys",
349
379
  "type": "text",
@@ -1886,6 +1916,81 @@
1886
1916
  "advanced": false,
1887
1917
  "tab": "Google"
1888
1918
  },
1919
+ "remote_tools.anthropic.web_search": {
1920
+ "section": "remote_tools",
1921
+ "type": "bool",
1922
+ "slider": false,
1923
+ "label": "settings.remote_tools.anthropic.web_search",
1924
+ "description": "settings.remote_tools.anthropic.web_search.desc",
1925
+ "value": true,
1926
+ "min": null,
1927
+ "max": null,
1928
+ "multiplier": null,
1929
+ "step": null,
1930
+ "advanced": false,
1931
+ "tab": "Anthropic"
1932
+ },
1933
+ "remote_tools.xai.mode": {
1934
+ "section": "remote_tools",
1935
+ "type": "combo",
1936
+ "slider": false,
1937
+ "label": "settings.remote_tools.xai.mode",
1938
+ "description": "settings.remote_tools.xai.mode.desc",
1939
+ "value": "auto",
1940
+ "keys": [
1941
+ {"auto": "auto"},
1942
+ {"on": "on"},
1943
+ {"off": "off"}
1944
+ ],
1945
+ "min": null,
1946
+ "max": null,
1947
+ "multiplier": null,
1948
+ "step": null,
1949
+ "advanced": false,
1950
+ "tab": "xAI"
1951
+ },
1952
+ "remote_tools.xai.sources.web": {
1953
+ "section": "remote_tools",
1954
+ "type": "bool",
1955
+ "slider": false,
1956
+ "label": "settings.remote_tools.xai.sources.web",
1957
+ "description": "settings.remote_tools.xai.sources.web.desc",
1958
+ "value": true,
1959
+ "min": null,
1960
+ "max": null,
1961
+ "multiplier": null,
1962
+ "step": null,
1963
+ "advanced": false,
1964
+ "tab": "xAI"
1965
+ },
1966
+ "remote_tools.xai.sources.x": {
1967
+ "section": "remote_tools",
1968
+ "type": "bool",
1969
+ "slider": false,
1970
+ "label": "settings.remote_tools.xai.sources.x",
1971
+ "description": "settings.remote_tools.xai.sources.x.desc",
1972
+ "value": true,
1973
+ "min": null,
1974
+ "max": null,
1975
+ "multiplier": null,
1976
+ "step": null,
1977
+ "advanced": false,
1978
+ "tab": "xAI"
1979
+ },
1980
+ "remote_tools.xai.sources.news": {
1981
+ "section": "remote_tools",
1982
+ "type": "bool",
1983
+ "slider": false,
1984
+ "label": "settings.remote_tools.xai.sources.news",
1985
+ "description": "settings.remote_tools.xai.sources.news.desc",
1986
+ "value": true,
1987
+ "min": null,
1988
+ "max": null,
1989
+ "multiplier": null,
1990
+ "step": null,
1991
+ "advanced": false,
1992
+ "tab": "xAI"
1993
+ },
1889
1994
  "llama.idx.list": {
1890
1995
  "section": "llama-index",
1891
1996
  "type": "dict",
@@ -67,9 +67,8 @@ QHeaderView::section {{
67
67
  .label-title {{
68
68
  font-weight: bold;
69
69
  }}
70
- .label-help {{
71
- color: #999;
72
- }}
70
+ .label-help,
71
+ .label-desc,
73
72
  .label-chat-status {{
74
73
  color: #999;
75
74
  }}
@@ -181,9 +181,8 @@ QPushButton {{
181
181
  .label-title {{
182
182
  font-weight: bold;
183
183
  }}
184
- .label-help {{
185
- color: #5d5d5d;
186
- }}
184
+ .label-help,
185
+ .label-desc,
187
186
  .label-chat-status {{
188
187
  color: #5d5d5d;
189
188
  }}
@@ -23,7 +23,7 @@ body {{
23
23
  }}
24
24
  ::-webkit-scrollbar-thumb {{
25
25
  -webkit-border-radius: 1ex;
26
- -webkit-box-shadow: 0px 1px 2px rgba(0, 0, 0, 0.75);
26
+ -webkit-box-shadow: none;
27
27
  }}
28
28
  #container {{
29
29
  will-change: transform, opacity;
@@ -24,7 +24,7 @@ body {{
24
24
  }}
25
25
  ::-webkit-scrollbar-thumb {{
26
26
  -webkit-border-radius: 1ex;
27
- -webkit-box-shadow: 0px 1px 2px rgba(0, 0, 0, 0.75);
27
+ -webkit-box-shadow: none;
28
28
  }}
29
29
  #container {{
30
30
  will-change: transform, opacity;
@@ -24,7 +24,7 @@ body {{
24
24
  }}
25
25
  ::-webkit-scrollbar-thumb {{
26
26
  -webkit-border-radius: 1ex;
27
- -webkit-box-shadow: 0px 1px 2px rgba(0, 0, 0, 0.75);
27
+ -webkit-box-shadow: none;
28
28
  }}
29
29
  #container {{
30
30
  will-change: transform, opacity;