pygpt-net 2.7.5__py3-none-any.whl → 2.7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. pygpt_net/CHANGELOG.txt +14 -0
  2. pygpt_net/__init__.py +4 -4
  3. pygpt_net/controller/chat/remote_tools.py +3 -9
  4. pygpt_net/controller/chat/stream.py +2 -2
  5. pygpt_net/controller/chat/{handler/worker.py → stream_worker.py} +20 -64
  6. pygpt_net/controller/debug/fixtures.py +3 -2
  7. pygpt_net/controller/files/files.py +65 -4
  8. pygpt_net/core/debug/models.py +2 -2
  9. pygpt_net/core/filesystem/url.py +4 -1
  10. pygpt_net/core/render/web/body.py +3 -2
  11. pygpt_net/core/types/chunk.py +27 -0
  12. pygpt_net/data/config/config.json +14 -4
  13. pygpt_net/data/config/models.json +192 -4
  14. pygpt_net/data/config/settings.json +126 -36
  15. pygpt_net/data/js/app/template.js +1 -1
  16. pygpt_net/data/js/app.min.js +2 -2
  17. pygpt_net/data/locale/locale.de.ini +5 -0
  18. pygpt_net/data/locale/locale.en.ini +35 -8
  19. pygpt_net/data/locale/locale.es.ini +5 -0
  20. pygpt_net/data/locale/locale.fr.ini +5 -0
  21. pygpt_net/data/locale/locale.it.ini +5 -0
  22. pygpt_net/data/locale/locale.pl.ini +5 -0
  23. pygpt_net/data/locale/locale.uk.ini +5 -0
  24. pygpt_net/data/locale/locale.zh.ini +5 -0
  25. pygpt_net/data/locale/plugin.cmd_mouse_control.en.ini +2 -2
  26. pygpt_net/item/ctx.py +3 -5
  27. pygpt_net/js_rc.py +2449 -2447
  28. pygpt_net/plugin/cmd_mouse_control/config.py +8 -7
  29. pygpt_net/plugin/cmd_mouse_control/plugin.py +3 -4
  30. pygpt_net/plugin/cmd_mouse_control/worker.py +2 -1
  31. pygpt_net/plugin/cmd_mouse_control/worker_sandbox.py +2 -1
  32. pygpt_net/provider/api/anthropic/__init__.py +16 -9
  33. pygpt_net/provider/api/anthropic/chat.py +259 -11
  34. pygpt_net/provider/api/anthropic/computer.py +844 -0
  35. pygpt_net/provider/api/anthropic/remote_tools.py +172 -0
  36. pygpt_net/{controller/chat/handler/anthropic_stream.py → provider/api/anthropic/stream.py} +24 -10
  37. pygpt_net/provider/api/anthropic/tools.py +32 -77
  38. pygpt_net/provider/api/anthropic/utils.py +30 -0
  39. pygpt_net/provider/api/google/__init__.py +6 -5
  40. pygpt_net/provider/api/google/chat.py +3 -8
  41. pygpt_net/{controller/chat/handler/google_stream.py → provider/api/google/stream.py} +1 -1
  42. pygpt_net/provider/api/google/utils.py +185 -0
  43. pygpt_net/{controller/chat/handler → provider/api/langchain}/__init__.py +0 -0
  44. pygpt_net/{controller/chat/handler/langchain_stream.py → provider/api/langchain/stream.py} +1 -1
  45. pygpt_net/provider/api/llama_index/__init__.py +0 -0
  46. pygpt_net/{controller/chat/handler/llamaindex_stream.py → provider/api/llama_index/stream.py} +1 -1
  47. pygpt_net/provider/api/openai/__init__.py +7 -3
  48. pygpt_net/provider/api/openai/image.py +2 -2
  49. pygpt_net/provider/api/openai/responses.py +0 -0
  50. pygpt_net/{controller/chat/handler/openai_stream.py → provider/api/openai/stream.py} +1 -1
  51. pygpt_net/provider/api/openai/utils.py +69 -3
  52. pygpt_net/provider/api/x_ai/__init__.py +117 -17
  53. pygpt_net/provider/api/x_ai/chat.py +272 -102
  54. pygpt_net/provider/api/x_ai/image.py +149 -47
  55. pygpt_net/provider/api/x_ai/{remote.py → remote_tools.py} +165 -70
  56. pygpt_net/provider/api/x_ai/responses.py +507 -0
  57. pygpt_net/provider/api/x_ai/stream.py +715 -0
  58. pygpt_net/provider/api/x_ai/tools.py +59 -8
  59. pygpt_net/{controller/chat/handler → provider/api/x_ai}/utils.py +1 -2
  60. pygpt_net/provider/api/x_ai/vision.py +1 -4
  61. pygpt_net/provider/core/config/patch.py +22 -1
  62. pygpt_net/provider/core/model/patch.py +26 -1
  63. pygpt_net/tools/image_viewer/ui/dialogs.py +300 -13
  64. pygpt_net/tools/text_editor/ui/dialogs.py +3 -2
  65. pygpt_net/tools/text_editor/ui/widgets.py +5 -1
  66. pygpt_net/ui/base/context_menu.py +44 -1
  67. pygpt_net/ui/layout/toolbox/indexes.py +22 -19
  68. pygpt_net/ui/layout/toolbox/model.py +28 -5
  69. pygpt_net/ui/widget/dialog/base.py +16 -5
  70. pygpt_net/ui/widget/image/display.py +25 -8
  71. pygpt_net/ui/widget/tabs/output.py +9 -1
  72. pygpt_net/ui/widget/textarea/editor.py +14 -1
  73. pygpt_net/ui/widget/textarea/input.py +20 -7
  74. pygpt_net/ui/widget/textarea/notepad.py +24 -1
  75. pygpt_net/ui/widget/textarea/output.py +23 -1
  76. pygpt_net/ui/widget/textarea/web.py +16 -1
  77. {pygpt_net-2.7.5.dist-info → pygpt_net-2.7.7.dist-info}/METADATA +16 -2
  78. {pygpt_net-2.7.5.dist-info → pygpt_net-2.7.7.dist-info}/RECORD +80 -73
  79. pygpt_net/controller/chat/handler/xai_stream.py +0 -135
  80. {pygpt_net-2.7.5.dist-info → pygpt_net-2.7.7.dist-info}/LICENSE +0 -0
  81. {pygpt_net-2.7.5.dist-info → pygpt_net-2.7.7.dist-info}/WHEEL +0 -0
  82. {pygpt_net-2.7.5.dist-info → pygpt_net-2.7.7.dist-info}/entry_points.txt +0 -0
pygpt_net/CHANGELOG.txt CHANGED
@@ -1,3 +1,17 @@
1
+ 2.7.7 (2026-01-05)
2
+
3
+ - Added support for Responses API in xAI.
4
+ - Added xAI remote tools: Remote MCP, Code Execution.
5
+ - Added Anthropic remote tools: Remote MCP, Web Fetch, Code Execution.
6
+
7
+ 2.7.6 (2026-01-03)
8
+
9
+ - Fixed compatibility with xAI SDK and resolved empty responses from Grok models.
10
+ - Fixed missing libraries in the Snap package.
11
+ - Added zoom and grab functionality in the Image Viewer.
12
+ - Added a zoom menu to textarea and web widgets.
13
+ - Added the ability to close tabs with a middle mouse button click.
14
+
1
15
  2.7.5 (2026-01-03)
2
16
 
3
17
  - Added Sandbox/Playwright option to Computer Use mode.
pygpt_net/__init__.py CHANGED
@@ -6,15 +6,15 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2026-01-03 00:00:00 #
9
+ # Updated Date: 2026-01-05 00:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  __author__ = "Marcin Szczygliński"
13
- __copyright__ = "Copyright 2025, Marcin Szczygliński"
13
+ __copyright__ = "Copyright 2026, Marcin Szczygliński"
14
14
  __credits__ = ["Marcin Szczygliński"]
15
15
  __license__ = "MIT"
16
- __version__ = "2.7.5"
17
- __build__ = "2026-01-03"
16
+ __version__ = "2.7.7"
17
+ __build__ = "2026-01-05"
18
18
  __maintainer__ = "Marcin Szczygliński"
19
19
  __github__ = "https://github.com/szczyglis-dev/py-gpt"
20
20
  __report__ = "https://github.com/szczyglis-dev/py-gpt/issues"
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.22 12:00:00 #
9
+ # Updated Date: 2026.01.04 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Union
@@ -69,11 +69,7 @@ class RemoteTools:
69
69
  elif model.provider == "anthropic": # native SDK
70
70
  state = cfg_get("remote_tools.anthropic.web_search", False)
71
71
  elif model.provider == "x_ai": # native SDK
72
- mode = cfg_get("remote_tools.xai.mode", "auto")
73
- if mode not in ("auto", "on", "off"):
74
- mode = "auto"
75
- if mode == "auto" or mode == "on":
76
- state = True
72
+ state = cfg_get("remote_tools.xai.web_search", False)
77
73
 
78
74
  # if not enabled by default or other provider, then use global config
79
75
  if not state:
@@ -109,9 +105,7 @@ class RemoteTools:
109
105
  cfg_set("remote_tools.web_search", state)
110
106
  cfg_set("remote_tools.google.web_search", state)
111
107
  cfg_set("remote_tools.anthropic.web_search", state)
112
-
113
- # xAI has 3 modes: auto, on, off
114
- cfg_set("remote_tools.xai.mode", "on" if state else "off")
108
+ cfg_set("remote_tools.xai.web_search", state)
115
109
 
116
110
  # save config
117
111
  self.window.core.config.save()
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.28 20:00:00 #
9
+ # Updated Date: 2026.01.05 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional
@@ -18,7 +18,7 @@ from pygpt_net.core.events import RenderEvent
18
18
  from pygpt_net.core.types import MODE_ASSISTANT
19
19
  from pygpt_net.item.ctx import CtxItem
20
20
 
21
- from .handler.worker import StreamWorker
21
+ from .stream_worker import StreamWorker
22
22
 
23
23
  class Stream(QObject):
24
24
  def __init__(self, window=None):
@@ -6,67 +6,29 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.07 05:00:00 #
9
+ # Updated Date: 2026.01.05 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import io
13
13
  import json
14
14
  from dataclasses import dataclass, field
15
- from typing import Optional, Literal, Any
16
- from enum import Enum
15
+ from typing import Optional, Any
17
16
 
18
17
  from PySide6.QtCore import QObject, Signal, Slot, QRunnable
19
18
  from openai.types.chat import ChatCompletionChunk
20
19
 
21
20
  from pygpt_net.core.events import RenderEvent
22
- from pygpt_net.core.text.utils import has_unclosed_code_tag
21
+ from pygpt_net.core.types.chunk import ChunkType
23
22
  from pygpt_net.item.ctx import CtxItem
23
+ from pygpt_net.provider.api.google.utils import capture_google_usage
24
24
 
25
- from . import (
26
- openai_stream,
27
- google_stream,
28
- anthropic_stream,
29
- xai_stream,
30
- llamaindex_stream,
31
- langchain_stream,
32
- utils as stream_utils,
33
- )
34
-
35
- # OpenAI Responses Events
36
- EventType = Literal[
37
- "response.completed",
38
- "response.output_text.delta",
39
- "response.output_item.added",
40
- "response.function_call_arguments.delta",
41
- "response.function_call_arguments.done",
42
- "response.output_text.annotation.added",
43
- "response.reasoning_summary_text.delta",
44
- "response.output_item.done",
45
- "response.code_interpreter_call_code.delta",
46
- "response.code_interpreter_call_code.done",
47
- "response.image_generation_call.partial_image",
48
- "response.created",
49
- "response.done",
50
- "response.failed",
51
- "error",
52
- ]
53
-
54
-
55
- class ChunkType(str, Enum):
56
- """
57
- Enum for chunk type classification.
58
- """
59
- API_CHAT = "api_chat" # OpenAI Chat Completions / or compatible
60
- API_CHAT_RESPONSES = "api_chat_responses" # OpenAI Responses
61
- API_COMPLETION = "api_completion" # OpenAI Completions
62
- LANGCHAIN_CHAT = "langchain_chat" # LangChain chat (deprecated)
63
- LLAMA_CHAT = "llama_chat" # LlamaIndex chat
64
- GOOGLE = "google" # Google SDK
65
- GOOGLE_INTERACTIONS_API = "api_google_interactions" # Google SDK, deep research - interactions
66
- ANTHROPIC = "anthropic" # Anthropic SDK
67
- XAI_SDK = "xai_sdk" # xAI SDK
68
- RAW = "raw" # Raw string fallback
69
-
25
+ # Import provider-specific stream processors
26
+ from pygpt_net.provider.api.openai import stream as openai_stream
27
+ from pygpt_net.provider.api.google import stream as google_stream
28
+ from pygpt_net.provider.api.anthropic import stream as anthropic_stream
29
+ from pygpt_net.provider.api.x_ai import stream as xai_stream
30
+ from pygpt_net.provider.api.llama_index import stream as llamaindex_stream
31
+ from pygpt_net.provider.api.langchain import stream as langchain_stream
70
32
 
71
33
  class WorkerSignals(QObject):
72
34
  """
@@ -153,21 +115,15 @@ class StreamWorker(QRunnable):
153
115
  state.stopped = True
154
116
  break
155
117
 
156
- etype: Optional[EventType] = None
118
+ etype: Optional[str] = None
157
119
 
158
- # detect chunk type
159
- if ctx.use_responses_api:
120
+ # detect chunk type if not defined
121
+ if ctx.chunk_type:
122
+ state.chunk_type = ctx.chunk_type
160
123
  if hasattr(chunk, 'type'):
161
- etype = chunk.type # type: ignore[assignment]
162
- state.chunk_type = ChunkType.API_CHAT_RESPONSES
163
- else:
164
- continue
165
- elif ctx.use_google_interactions_api:
166
- if hasattr(chunk, 'event_type'):
167
- etype = chunk.event_type # type: ignore[assignment]
168
- state.chunk_type = ChunkType.GOOGLE_INTERACTIONS_API
169
- else:
170
- continue
124
+ etype = chunk.type
125
+ elif hasattr(chunk, 'event_type'):
126
+ etype = chunk.event_type
171
127
  else:
172
128
  state.chunk_type = self._detect_chunk_type(chunk)
173
129
 
@@ -401,7 +357,7 @@ class StreamWorker(QRunnable):
401
357
  state.generator.resolve()
402
358
  um = getattr(state.generator, "usage_metadata", None)
403
359
  if um:
404
- stream_utils.capture_google_usage(state, um)
360
+ capture_google_usage(state, um)
405
361
  except Exception:
406
362
  pass
407
363
 
@@ -496,7 +452,7 @@ class StreamWorker(QRunnable):
496
452
  core,
497
453
  state: WorkerState,
498
454
  chunk,
499
- etype: Optional[EventType]
455
+ etype: Optional[str]
500
456
  ) -> Optional[str]:
501
457
  """
502
458
  Dispatches processing to concrete provider-specific processing.
@@ -6,13 +6,14 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.12 20:00:00 #
9
+ # Updated Date: 2026.01.03 17:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
13
13
  from typing import Iterable
14
14
 
15
15
  from pygpt_net.core.fixtures.stream.generator import FakeOpenAIStream
16
+ from pygpt_net.core.types.chunk import ChunkType
16
17
  from pygpt_net.item.ctx import CtxItem
17
18
 
18
19
 
@@ -83,7 +84,7 @@ class Fixtures:
83
84
  :param ctx: context item
84
85
  :return: stream generator
85
86
  """
86
- ctx.use_responses_api = False
87
+ ctx.chunk_type = ChunkType.API_CHAT
87
88
  path = os.path.join(self.window.core.config.get_app_path(), "data", "fixtures", "fake_stream.txt")
88
89
  return FakeOpenAIStream(code_path=path).stream(
89
90
  api="raw",
@@ -1,3 +1,5 @@
1
+ # controller/files.py
2
+
1
3
  #!/usr/bin/env python3
2
4
  # -*- coding: utf-8 -*-
3
5
  # ================================================== #
@@ -6,7 +8,7 @@
6
8
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
9
  # MIT License #
8
10
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.12.31 17:00:00 #
11
+ # Updated Date: 2026.01.03 00:00:00 #
10
12
  # ================================================== #
11
13
 
12
14
  import datetime
@@ -203,14 +205,73 @@ class Files:
203
205
 
204
206
  def download_local(self, path: Union[str, list]):
205
207
  """
206
- Download (copy) file or directory to local filesystem
208
+ Download (copy) file or directory to local filesystem.
207
209
 
208
- :param path: path to source file or list of files
210
+ Behavior:
211
+ - Single selection: unchanged, prompts Save As dialog per current implementation.
212
+ - Multi selection (>1 item): prompts once for a target directory and copies all selected items there.
213
+ In case of name collision in the target directory, a timestamp prefix is added to the copied name.
209
214
  """
215
+ # Multi-selection: choose a directory once and copy all into it
210
216
  if isinstance(path, list):
217
+ # Normalize incoming list (decode, map to workdir)
218
+ norm_paths = []
211
219
  for p in path:
212
- self.download_local(p)
220
+ try:
221
+ p_norm = self.window.core.filesystem.to_workdir(unquote(p))
222
+ except Exception:
223
+ p_norm = p
224
+ if p_norm:
225
+ norm_paths.append(p_norm)
226
+
227
+ if not norm_paths:
228
+ return
229
+
230
+ if len(norm_paths) == 1:
231
+ # Defer to single-item path handling
232
+ self.download_local(norm_paths[0])
233
+ return
234
+
235
+ last_dir = self.window.core.config.get_last_used_dir()
236
+ target_dir = QFileDialog.getExistingDirectory(
237
+ self.window,
238
+ "Select target directory",
239
+ last_dir if last_dir else os.path.expanduser("~"),
240
+ )
241
+ if not target_dir:
242
+ return
243
+
244
+ # Remember last used directory
245
+ self.window.core.config.set_last_used_dir(target_dir)
246
+
247
+ copied = 0
248
+
249
+ for src in norm_paths:
250
+ try:
251
+ if not os.path.exists(src):
252
+ continue
253
+
254
+ base_name = os.path.basename(src.rstrip(os.sep))
255
+ dst = os.path.join(target_dir, base_name)
256
+
257
+ # Avoid copying into itself and handle name collisions
258
+ if os.path.abspath(dst) == os.path.abspath(src) or os.path.exists(dst):
259
+ dst = os.path.join(target_dir, f"{self.make_ts_prefix()}_{base_name}")
260
+
261
+ if os.path.isdir(src):
262
+ shutil.copytree(src, dst)
263
+ else:
264
+ copy2(src, dst)
265
+ copied += 1
266
+ except Exception as e:
267
+ self.window.core.debug.log(e)
268
+ print(f"Error downloading item: {src} -> {target_dir} - {e}")
269
+
270
+ if copied > 0:
271
+ self.window.update_status(f"[OK] Downloaded: {copied} items to: {target_dir}")
213
272
  return
273
+
274
+ # Single-item flow (unchanged)
214
275
  path = self.window.core.filesystem.to_workdir(unquote(path))
215
276
  last_dir = self.window.core.config.get_last_used_dir()
216
277
  dialog = QFileDialog(self.window)
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.14 20:00:00 #
9
+ # Updated Date: 2026.01.05 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -24,7 +24,7 @@ class ModelsDebug:
24
24
  def update(self):
25
25
  """Update debug window."""
26
26
  debug = self.window.core.debug
27
- models_controller = self.window.controller.models
27
+ models_controller = self.window.controller.model
28
28
  models_core = self.window.core.models
29
29
  command_core = self.window.core.command
30
30
  config_core = self.window.core.config
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.12.31 16:00:00 #
9
+ # Updated Date: 2026.01.03 17:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from PySide6.QtCore import QUrl
@@ -47,6 +47,9 @@ class Url:
47
47
  elif url.toString().startswith('bridge://play_video/'):
48
48
  self.window.controller.media.play_video(url.toString().replace("bridge://play_video/", ""))
49
49
  return
50
+ elif url.toString().startswith('bridge://open_image/'):
51
+ self.window.tools.get("viewer").open_preview(url.toString().replace("bridge://open_image/", ""))
52
+ return
50
53
  elif url.toString().startswith('bridge://download/'):
51
54
  self.window.controller.files.download_local(url.toString().replace("bridge://download/", ""))
52
55
  return
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.13 06:05:00 #
9
+ # Updated Date: 2026.01.03 17:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -410,7 +410,8 @@ class Body:
410
410
  <p><a href="bridge://play_video/{url}" class="title">{elide_filename(basename)}</a></p>
411
411
  </div>
412
412
  '''
413
- return f'<div class="extra-src-img-box" title="{url}"><div class="img-outer"><div class="img-wrapper"><a href="{url}"><img src="{path}" class="image"></a></div><a href="{url}" class="title">{elide_filename(basename)}</a></div></div><br/>'
413
+ url_preview = f"bridge://open_image/{url}"
414
+ return f'<div class="extra-src-img-box" title="{url}"><div class="img-outer"><div class="img-wrapper"><a href="{url_preview}"><img src="{path}" class="image"></a></div><a href="{url}" class="title">{elide_filename(basename)}</a></div></div><br/>'
414
415
 
415
416
  def get_url_html(
416
417
  self,
@@ -0,0 +1,27 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2026.01.03 17:00:00 #
10
+ # ================================================== #
11
+
12
+ from enum import Enum
13
+
14
+ class ChunkType(str, Enum):
15
+ """
16
+ Enum for chunk type classification.
17
+ """
18
+ API_CHAT = "api_chat" # OpenAI Chat Completions / or compatible
19
+ API_CHAT_RESPONSES = "api_chat_responses" # OpenAI Responses
20
+ API_COMPLETION = "api_completion" # OpenAI Completions
21
+ LANGCHAIN_CHAT = "langchain_chat" # LangChain chat (deprecated)
22
+ LLAMA_CHAT = "llama_chat" # LlamaIndex chat
23
+ GOOGLE = "google" # Google SDK
24
+ GOOGLE_INTERACTIONS_API = "api_google_interactions" # Google SDK, deep research - interactions API
25
+ ANTHROPIC = "anthropic" # Anthropic SDK
26
+ XAI_SDK = "xai_sdk" # xAI SDK
27
+ RAW = "raw" # Raw string fallback
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.7.5",
4
- "app.version": "2.7.5",
5
- "updated_at": "2026-01-03T00:00:00"
3
+ "version": "2.7.7",
4
+ "app.version": "2.7.7",
5
+ "updated_at": "2026-01-05T00:00:00"
6
6
  },
7
7
  "access.audio.event.speech": false,
8
8
  "access.audio.event.speech.disabled": [],
@@ -416,6 +416,11 @@
416
416
  "prompt.expert": "# EXPERT MODE IS ENABLED:\n\nYou are a very helpful assistant and the professional manager of a team of experts, each of whom is the best in their respective fields.\n\n## Rules:\n\n- Instead of directly answering questions, you will manage these experts by forwarding queries to them, collecting their responses, controlling the flow of information between them, and sending requests to them.\n- Experts are distinct, specialized AI models running on my local machine.\n- You will serve as a bridge between them.\n- Each expert has their own conversation context, allowing you to communicate with them normally.\n- The experts can communicate with you but not with each other, making you responsible for relaying information between them.\n- Each expert can use tools and commands, for example, reading files or searching the Web. You can ask experts to call tools and commands.\n- After consulting with the experts, your task is to prepare the best possible answer based on the assistance received.\n- To send a request to an expert, prepare the \"expert_call\" tool. Remember to format the JSON tool as shown, surrounding the tool with `<tool></tool>` tags, for example: `<tool>{\"cmd\": \"expert_call\", \"params\": {\"id\": \"expert_id\", \"query\": \"some query\"}}</tool>`. Make sure the JSON object is syntactically correct, without any redundand parenthness.\n- DO NOT call any remote tools when calling the experts.\n- Remember that the call must contain a query and the ID of the expert to whom the query is being forwarded.\n- Use ONLY the experts listed below; DO NOT TRY to ask any non-existent experts\u2014they will not respond.\n- Do not respond to the expert's replies or use the JSON tool again if replying; the expert only has the ability to listen to your queries sent via the JSON command and to respond to them.\n- Always follow ALL user commands, even if you think the expert can't do something. Experts are allowed to execute commands just as you can.\n- After querying an expert, always wait for the expert's response before sending another query.\n- Do not try to query experts in the middle of the conversation; put your queries and commands at the end of the text.\n- Always call only one expert at a time.\n- Always execute only one command at a time.\n- Always use the language used by the user in the conversation.\n- Never write as a called expert and do not imagine a response from them; instead, always wait for the response from the expert.\n- Every message sent to you directly after your query is always a response from the asked expert, not from me.\n- Always send a direct, clear, and detailed prompt to the expert, without any \"Please...\", \"Could you...\", etc.\n- Always forward all the messages that the user wants to send to the expert, without any exceptions.\n-- If a user command appears outside an expert's scope, fulfill the request regardless, without refusal.\n\n## List of allowed experts, along with their IDs:\n\n{presets}",
417
417
  "prompt.img": "Convert the user's request into a single, production\u2011ready description for generating one high\u2011quality still image. Output only the description text, nothing else.\n\nWrite in concise, vivid, present\u2011tense language. Do not use commands (no \u201cplease generate\u201d), model names, parameters, or metadata. Do not mention aspect ratio, resolution, steps, seed, or negative prompts. Avoid on\u2011image text, captions, watermarks, logos, and UI elements. No brands, celebrities, or living artists unless explicitly provided by the user.\n\nInclude, woven into a coherent paragraph:\n- Clear primary subject(s) and their pose, action, and expression.\n- Setting and environment with time of day, season, weather, and atmosphere.\n- Composition and camera viewpoint (e.g., close\u2011up portrait, wide establishing, eye\u2011level, low\u2011angle, top\u2011down), framing (rule of thirds, centered symmetry), and background/foreground separation.\n- Lens and focus behavior (e.g., 85\u202fmm portrait, macro, shallow depth of field, smooth bokeh, gentle focus falloff).\n- Lighting style and quality (e.g., soft diffused daylight, golden hour rim light, dramatic chiaroscuro, studio three\u2011point) and how it shapes forms and shadows.\n- Color palette and grading (e.g., warm cinematic teal\u2011and\u2011orange, muted earth tones, cool monochrome with a single accent color).\n- Visual style or medium (e.g., photorealistic photography, watercolor illustration, oil painting, pencil sketch, anime cel\u2011shading, 3D render, isometric).\n- Material and surface detail (e.g., skin texture, fabric weave, wood grain, metal patina) to enhance realism or stylization.\n- Spatial depth cues (foreground/midground/background layering, atmospheric perspective) and overall mood.\n\nIf the user specifies a genre, era, or style, preserve it and enrich it with consistent, concrete traits. If the request is vague, infer specific but reasonable details that enhance clarity without contradicting the user\u2019s intent.\n\nReturn only the final visual description.",
418
418
  "prompt.video": "Convert the user's request into a single, production-ready description for generating one continuous video clip. Output only the description text, nothing else.\n\nWrite in concise, vivid, present-tense language. Do not use commands (no \u201cplease generate\u201d), model names, parameters, or metadata. Do not mention duration, aspect ratio, FPS, resolution, shot numbers, cuts, or lists. Focus on visuals only; no dialogue, captions, on\u2011screen text, watermarks, logos, or UI.\n\nInclude, in a coherent way:\n- Clear subject(s) and what they are doing.\n- Setting, time of day, atmosphere, and weather.\n- Camera perspective and motion (e.g., wide establishing, low\u2011angle tracking, slow dolly in, aerial, handheld), framing and composition.\n- Lens and focus behavior (e.g., 24\u202fmm wide, shallow depth of field, gentle rack focus).\n- Lighting style and quality (e.g., soft golden hour rim light, moody volumetric shafts).\n- Color palette and grading (e.g., warm cinematic teal\u2011and\u2011orange, desaturated documentary).\n- Visual style or medium (e.g., photoreal live\u2011action, stylized anime, stop\u2011motion clay, watercolor animation).\n- Material and surface details that reinforce realism or the chosen style.\n- Temporal progression within one shot (use cues like \u201cas\u2026\u201d, \u201cthen\u2026\u201d, \u201cwhile\u2026\u201d), maintaining physical plausibility and continuity.\n\nIf the user specifies a genre or style (e.g., cyberpunk, nature documentary), keep it and expand with consistent, concrete visual traits. If the request is vague, infer specific but reasonable details that enhance clarity without contradicting the user\u2019s intent.\n\nReturn only the final visual description.",
419
+ "remote_tools.anthropic.code_execution": false,
420
+ "remote_tools.anthropic.mcp": false,
421
+ "remote_tools.anthropic.mcp.mcp_servers": "[\n {\n \"type\": \"url\",\n \"url\": \"https://mcp.example.com/sse\",\n \"name\": \"example-mcp\",\n \"authorization_token\": \"YOUR_TOKEN\"\n }\n]",
422
+ "remote_tools.anthropic.mcp.tools": "[\n {\n \"type\": \"mcp_toolset\",\n \"mcp_server_name\": \"example-mcp\"\n }\n]",
423
+ "remote_tools.anthropic.web_fetch": false,
419
424
  "remote_tools.anthropic.web_search": true,
420
425
  "remote_tools.code_interpreter": false,
421
426
  "remote_tools.computer_use.env": "",
@@ -433,10 +438,15 @@
433
438
  "remote_tools.mcp": false,
434
439
  "remote_tools.mcp.args": "{\n \"type\": \"mcp\",\n \"server_label\": \"deepwiki\",\n \"server_url\": \"https://mcp.deepwiki.com/mcp\",\n \"require_approval\": \"never\",\n \"allowed_tools\": [\"ask_question\"]\n}",
435
440
  "remote_tools.web_search": true,
436
- "remote_tools.xai.mode": "auto",
441
+ "remote_tools.xai.mode": "auto",
442
+ "remote_tools.xai.code_execution": false,
443
+ "remote_tools.xai.mcp": false,
444
+ "remote_tools.xai.mcp.args": "{\n \"server_url\": \"https://mcp.deepwiki.com/mcp\n}",
437
445
  "remote_tools.xai.sources.web": true,
438
446
  "remote_tools.xai.sources.x": true,
439
447
  "remote_tools.xai.sources.news": false,
448
+ "remote_tools.xai.web_search": true,
449
+ "remote_tools.xai.x_search": false,
440
450
  "render.blocks": true,
441
451
  "render.code_syntax": "github-dark",
442
452
  "render.code_syntax.disabled": false,