pygpt-net 2.5.17__py3-none-any.whl → 2.5.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. pygpt_net/CHANGELOG.txt +7 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/chat/common.py +4 -2
  4. pygpt_net/controller/chat/input.py +36 -27
  5. pygpt_net/controller/chat/stream.py +22 -2
  6. pygpt_net/controller/config/placeholder.py +1 -1
  7. pygpt_net/controller/model/__init__.py +1 -1
  8. pygpt_net/controller/model/editor.py +6 -1
  9. pygpt_net/controller/model/importer.py +4 -3
  10. pygpt_net/core/bridge/__init__.py +8 -4
  11. pygpt_net/core/command/__init__.py +10 -1
  12. pygpt_net/core/idx/chat.py +6 -1
  13. pygpt_net/core/image/__init__.py +15 -0
  14. pygpt_net/core/models/__init__.py +14 -6
  15. pygpt_net/core/models/ollama.py +4 -3
  16. pygpt_net/data/config/config.json +4 -3
  17. pygpt_net/data/config/models.json +205 -34
  18. pygpt_net/data/config/modes.json +10 -10
  19. pygpt_net/data/config/settings.json +22 -0
  20. pygpt_net/data/locale/locale.de.ini +1 -1
  21. pygpt_net/data/locale/locale.en.ini +6 -2
  22. pygpt_net/data/locale/locale.es.ini +1 -1
  23. pygpt_net/data/locale/locale.fr.ini +1 -1
  24. pygpt_net/data/locale/locale.pl.ini +1 -1
  25. pygpt_net/data/locale/locale.uk.ini +1 -1
  26. pygpt_net/data/locale/locale.zh.ini +1 -1
  27. pygpt_net/item/model.py +35 -1
  28. pygpt_net/provider/core/config/patch.py +7 -0
  29. pygpt_net/provider/core/model/json_file.py +4 -1
  30. pygpt_net/provider/core/model/patch.py +17 -1
  31. pygpt_net/provider/gpt/__init__.py +14 -0
  32. pygpt_net/provider/gpt/image.py +42 -8
  33. pygpt_net/provider/gpt/responses.py +22 -16
  34. pygpt_net/provider/llms/anthropic.py +3 -1
  35. pygpt_net/provider/llms/google.py +3 -1
  36. pygpt_net/provider/llms/hugging_face.py +3 -1
  37. pygpt_net/provider/llms/hugging_face_api.py +3 -1
  38. pygpt_net/provider/llms/ollama.py +9 -3
  39. pygpt_net/provider/llms/openai.py +7 -1
  40. pygpt_net/ui/dialog/preset.py +1 -1
  41. {pygpt_net-2.5.17.dist-info → pygpt_net-2.5.18.dist-info}/METADATA +13 -6
  42. {pygpt_net-2.5.17.dist-info → pygpt_net-2.5.18.dist-info}/RECORD +45 -45
  43. {pygpt_net-2.5.17.dist-info → pygpt_net-2.5.18.dist-info}/LICENSE +0 -0
  44. {pygpt_net-2.5.17.dist-info → pygpt_net-2.5.18.dist-info}/WHEEL +0 -0
  45. {pygpt_net-2.5.17.dist-info → pygpt_net-2.5.18.dist-info}/entry_points.txt +0 -0
pygpt_net/CHANGELOG.txt CHANGED
@@ -1,3 +1,10 @@
1
+ 2.5.18 (2025-06-26)
2
+
3
+ - Non-GPT models are now available in standard Chat mode.
4
+ - Added a new remote tool: `image_generation` in Responses API -> disabled by default, enable in `Config -> Settings -> Remote Tools`. Enables native image generation and editing of uploaded images in Chat mode.
5
+ - Added a new model `gpt-image-1` and improved image generation.
6
+ - Other small fixes.
7
+
1
8
  2.5.17 (2025-06-25)
2
9
 
3
10
  - Added settings for enable/disable Remote Tools via Responses API in Chat mode: Config -> Settings -> Remote tools. Currently only web-search-preview tool is available, rest of tools coming soon.
pygpt_net/__init__.py CHANGED
@@ -6,15 +6,15 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025-06-25 02:00:00 #
9
+ # Updated Date: 2025-06-26 02:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  __author__ = "Marcin Szczygliński"
13
13
  __copyright__ = "Copyright 2025, Marcin Szczygliński"
14
14
  __credits__ = ["Marcin Szczygliński"]
15
15
  __license__ = "MIT"
16
- __version__ = "2.5.17"
17
- __build__ = "2025-06-25"
16
+ __version__ = "2.5.18"
17
+ __build__ = "2025-06-26"
18
18
  __maintainer__ = "Marcin Szczygliński"
19
19
  __github__ = "https://github.com/szczyglis-dev/py-gpt"
20
20
  __report__ = "https://github.com/szczyglis-dev/py-gpt/issues"
@@ -282,15 +282,17 @@ class Common:
282
282
  if not exit:
283
283
  self.window.dispatch(AppEvent(AppEvent.INPUT_STOPPED)) # app event
284
284
 
285
- def check_api_key(self) -> bool:
285
+ def check_api_key(self, monit: bool = True) -> bool:
286
286
  """
287
287
  Check if API KEY is set
288
288
 
289
+ :param monit: True if monitor should be shown
289
290
  :return: True if API KEY is set, False otherwise
290
291
  """
291
292
  result = True
292
293
  if self.window.core.config.get('api_key') is None or self.window.core.config.get('api_key') == '':
293
- self.window.controller.launcher.show_api_monit()
294
+ if monit:
295
+ self.window.controller.launcher.show_api_monit()
294
296
  self.window.update_status("Missing API KEY!")
295
297
  result = False
296
298
  return result
@@ -6,8 +6,9 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.06.24 16:00:00 #
9
+ # Updated Date: 2025.06.26 16:00:00 #
10
10
  # ================================================== #
11
+
11
12
  import os
12
13
  from typing import Optional, Any, Dict
13
14
 
@@ -20,6 +21,7 @@ from pygpt_net.core.types import (
20
21
  MODE_LLAMA_INDEX,
21
22
  MODE_ASSISTANT,
22
23
  MODE_IMAGE,
24
+ MODE_CHAT,
23
25
  )
24
26
  from pygpt_net.item.ctx import CtxItem
25
27
  from pygpt_net.utils import trans
@@ -76,27 +78,29 @@ class Input:
76
78
 
77
79
  # check ollama model
78
80
  model = self.window.core.config.get('model')
79
- if mode == MODE_LLAMA_INDEX and model is not None:
81
+ if model:
80
82
  model_data = self.window.core.models.get(model)
81
83
  if model_data is not None and model_data.is_ollama():
82
- model_id = model_data.get_ollama_model()
83
- # load ENV vars first
84
- if ('env' in model_data.llama_index
85
- and model_data.llama_index['env'] is not None):
86
- for item in model_data.llama_index['env']:
87
- key = item.get('name', '').strip()
88
- value = item.get('value', '').strip()
89
- os.environ[key] = value
90
- status = self.window.core.models.ollama.check_model(model_id)
91
- is_installed = status.get('is_installed', False)
92
- is_model = status.get('is_model', False)
93
- if not is_installed:
94
- self.window.ui.dialogs.alert(trans("dialog.ollama.not_installed"))
95
- return
96
- if not is_model:
97
- self.window.ui.dialogs.alert(
98
- trans("dialog.ollama.model_not_found").replace("{model}", model_id))
99
- return
84
+ if (mode == MODE_LLAMA_INDEX or
85
+ (mode == MODE_CHAT and not model_data.is_openai() and model_data.is_ollama())):
86
+ model_id = model_data.get_ollama_model()
87
+ # load ENV vars first
88
+ if ('env' in model_data.llama_index
89
+ and model_data.llama_index['env'] is not None):
90
+ for item in model_data.llama_index['env']:
91
+ key = item.get('name', '').strip()
92
+ value = item.get('value', '').strip()
93
+ os.environ[key] = value
94
+ status = self.window.core.models.ollama.check_model(model_id)
95
+ is_installed = status.get('is_installed', False)
96
+ is_model = status.get('is_model', False)
97
+ if not is_installed:
98
+ self.window.ui.dialogs.alert(trans("dialog.ollama.not_installed"))
99
+ return
100
+ if not is_model:
101
+ self.window.ui.dialogs.alert(
102
+ trans("dialog.ollama.model_not_found").replace("{model}", model_id))
103
+ return
100
104
 
101
105
  # listen for stop command
102
106
  if self.generating \
@@ -247,14 +251,19 @@ class Input:
247
251
  self.generating = False # unlock as not generating
248
252
  return
249
253
 
250
- # check API key, show monit if no API key
254
+ # check OpenAI API key, show monit if no API key
251
255
  if mode not in self.window.controller.launcher.no_api_key_allowed:
252
- if not self.window.controller.chat.common.check_api_key():
253
- self.generating = False
254
- self.window.dispatch(KernelEvent(KernelEvent.STATE_ERROR, {
255
- "id": "chat",
256
- }))
257
- return
256
+ if not self.window.controller.chat.common.check_api_key(monit=False):
257
+ model = self.window.core.config.get('model')
258
+ if model:
259
+ model_data = self.window.core.models.get(model)
260
+ if model_data is not None and model_data.is_openai():
261
+ self.window.controller.chat.common.check_api_key(monit=True)
262
+ self.generating = False
263
+ self.window.dispatch(KernelEvent(KernelEvent.STATE_ERROR, {
264
+ "id": "chat",
265
+ }))
266
+ return
258
267
 
259
268
  # set state to: busy
260
269
  self.window.dispatch(KernelEvent(KernelEvent.STATE_BUSY, {
@@ -8,7 +8,8 @@
8
8
  # Created By : Marcin Szczygliński #
9
9
  # Updated Date: 2025.06.25 02:00:00 #
10
10
  # ================================================== #
11
-
11
+ import base64
12
+ import uuid
12
13
  from typing import Any
13
14
 
14
15
  from PySide6.QtWidgets import QApplication
@@ -39,6 +40,8 @@ class Stream:
39
40
  tool_calls = []
40
41
  fn_args_buffers = {}
41
42
  citations = []
43
+ img_path = self.window.core.image.gen_unique_path(ctx)
44
+ is_image = False
42
45
 
43
46
  # chunks: stream begin
44
47
  data = {
@@ -121,8 +124,8 @@ class Stream:
121
124
  if tool_chunk.function.arguments:
122
125
  tool_call["function"]["arguments"] += tool_chunk.function.arguments
123
126
 
127
+ # OpenAI Responses API
124
128
  elif chunk_type == "api_chat_responses":
125
-
126
129
  if etype == "response.output_text.delta":
127
130
  response = chunk.delta
128
131
 
@@ -154,6 +157,19 @@ class Stream:
154
157
  citations.append(url_citation)
155
158
  ctx.urls = citations
156
159
 
160
+ # ---------- image gen ----------
161
+ elif etype == "response.image_generation_call.partial_image":
162
+ idx = chunk.partial_image_index
163
+ image_base64 = chunk.partial_image_b64
164
+ image_bytes = base64.b64decode(image_base64)
165
+ with open(img_path, "wb") as f:
166
+ f.write(image_bytes)
167
+ is_image = True
168
+
169
+ # ---------- response ID ----------
170
+ elif etype == "response.created":
171
+ ctx.msg_id = chunk.response.id # store previous response ID
172
+
157
173
  # ---------- end / error ----------
158
174
  elif etype in {"response.done", "response.failed", "error"}:
159
175
  pass
@@ -214,6 +230,10 @@ class Stream:
214
230
  if tool_calls:
215
231
  self.window.core.command.unpack_tool_calls_chunks(ctx, tool_calls)
216
232
 
233
+ # append images
234
+ if is_image:
235
+ ctx.images = [img_path] # save image path to ctx
236
+
217
237
  except Exception as e:
218
238
  self.window.core.debug.log(e)
219
239
  error = e
@@ -270,7 +270,7 @@ class Placeholder:
270
270
  model = models[id]
271
271
  suffix = ""
272
272
  if "provider" in model.llama_index and model.llama_index["provider"] == "ollama":
273
- suffix = " [Ollama]"
273
+ suffix = " (Ollama)"
274
274
  name = model.name + suffix
275
275
  data.append({id: name})
276
276
  return data
@@ -161,7 +161,7 @@ class Model:
161
161
  for k in data:
162
162
  suffix = ""
163
163
  if "provider" in data[k].llama_index and data[k].llama_index["provider"] == "ollama":
164
- suffix = " [Ollama]"
164
+ suffix = " (Ollama)"
165
165
  items[k] = data[k].name + suffix
166
166
  items = dict(sorted(items.items(), key=lambda item: item[1])) # sort items by name
167
167
  self.window.ui.nodes["prompt.model"].set_keys(items)
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 08:00:00 #
9
+ # Updated Date: 2025.06.26 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -56,6 +56,11 @@ class Editor:
56
56
  "type": "bool",
57
57
  "label": "model.default",
58
58
  },
59
+ "openai": {
60
+ "type": "bool",
61
+ "label": "model.openai",
62
+ "description": "model.openai.desc",
63
+ },
59
64
  "langchain.provider": {
60
65
  "type": "combo",
61
66
  "use": "langchain_providers",
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.06.24 02:00:00 #
9
+ # Updated Date: 2025.06.26 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -200,7 +200,7 @@ class Importer:
200
200
  """
201
201
  Get current ollama models
202
202
 
203
- :return: ollama models dictionary
203
+ :return: PyGPT ollama models dictionary
204
204
  """
205
205
  items = copy.deepcopy(self.window.core.models.items)
206
206
  for key in list(items.keys()):
@@ -214,7 +214,7 @@ class Importer:
214
214
  """
215
215
  Get available ollama models
216
216
 
217
- :return: ollama models dictionary
217
+ :return: Ollama API models dictionary
218
218
  """
219
219
  models = {}
220
220
  status = self.window.core.models.ollama.get_status()
@@ -233,6 +233,7 @@ class Importer:
233
233
  m.id = name
234
234
  m.name = name
235
235
  m.mode = [
236
+ "chat",
236
237
  "llama_index",
237
238
  "agent",
238
239
  "agent_llama",
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.03.02 19:00:00 #
9
+ # Updated Date: 2025.06.26 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import time
@@ -93,9 +93,11 @@ class Bridge:
93
93
  if model is not None:
94
94
  if not model.is_supported(mode): # check selected mode
95
95
  mode = self.window.core.models.get_supported_mode(model, mode) # switch
96
+ if base_mode == MODE_CHAT and mode == MODE_LLAMA_INDEX:
97
+ context.idx = None # disable index if in Chat mode and switch to Llama Index
96
98
 
97
99
  if mode == MODE_LLAMA_INDEX and base_mode != MODE_LLAMA_INDEX:
98
- context.idx_mode = MODE_CHAT
100
+ context.idx_mode = MODE_CHAT # default in sub-mode
99
101
 
100
102
  if is_virtual:
101
103
  if mode == MODE_LLAMA_INDEX: # after switch
@@ -192,11 +194,13 @@ class Bridge:
192
194
  debug = {k: str(v) for k, v in context.to_dict().items()}
193
195
  self.window.core.debug.debug(str(debug))
194
196
 
197
+ # --- DEBUG ONLY ---
195
198
  self.last_context_quick = context # store last context for quick call (debug)
196
199
 
197
200
  if context.model is not None:
198
- # check if model is supported by chat API, if not then try to use llama-index or langchain call
199
- if not context.model.is_supported(MODE_CHAT) and not context.model.is_supported(MODE_RESEARCH):
201
+ # check if model is supported by OpenAI API, if not then try to use llama-index or langchain call
202
+ if ((not context.model.is_supported(MODE_CHAT) or not context.model.is_openai())
203
+ and not context.model.is_supported(MODE_RESEARCH)):
200
204
 
201
205
  # tmp switch to: llama-index
202
206
  if context.model.is_supported(MODE_LLAMA_INDEX):
@@ -241,6 +241,8 @@ class Command:
241
241
  parsed = []
242
242
  for tool_call in tool_calls:
243
243
  try:
244
+ if not hasattr(tool_call, 'name'):
245
+ continue
244
246
  parsed.append(
245
247
  {
246
248
  "id": tool_call.id,
@@ -615,7 +617,7 @@ class Command:
615
617
  :return: True if enabled
616
618
  """
617
619
  disabled_modes = [
618
- MODE_LLAMA_INDEX,
620
+ #MODE_LLAMA_INDEX,
619
621
  MODE_LANGCHAIN,
620
622
  MODE_COMPLETION,
621
623
  ]
@@ -624,6 +626,13 @@ class Command:
624
626
  return False # disabled for specific modes
625
627
  if self.window.controller.agent.legacy.enabled() or self.window.controller.agent.experts.enabled():
626
628
  return False
629
+ model = self.window.core.config.get('model')
630
+ if model:
631
+ model_data = self.window.core.models.get(model)
632
+ if model_data:
633
+ llama_provider = model_data.get_llama_provider()
634
+ if llama_provider in self.window.core.idx.chat.tool_calls_not_allowed_providers:
635
+ return False
627
636
  return self.window.core.config.get('func_call.native', False) # otherwise check config
628
637
 
629
638
  def is_enabled(self, cmd: str) -> bool:
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.06.21 23:00:00 #
9
+ # Updated Date: 2025.06.26 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import json
@@ -39,6 +39,7 @@ class Chat:
39
39
  self.context = Context(window)
40
40
  self.tool_calls_not_allowed_providers = [
41
41
  "ollama",
42
+ "google",
42
43
  "hugging_face_api",
43
44
  "deepseek_api",
44
45
  ]
@@ -237,6 +238,7 @@ class Chat:
237
238
  use_index = True
238
239
  verbose = self.window.core.config.get("log.llama", False)
239
240
  allow_native_tool_calls = True
241
+ response = None
240
242
  if ('provider' in model.llama_index
241
243
  and model.llama_index['provider'] in self.tool_calls_not_allowed_providers):
242
244
  allow_native_tool_calls = False
@@ -246,10 +248,12 @@ class Chat:
246
248
  use_index = False
247
249
 
248
250
  # disable index if no api key
251
+ """
249
252
  if self.window.core.config.get("api_key") == "" and self.window.core.config.get("llama.idx.embeddings.provider") == "openai":
250
253
  print("Warning: no api key! Disabling index...")
251
254
  chat_mode = "simple" # do not use query engine if no index
252
255
  use_index = False
256
+ """
253
257
 
254
258
  if model is None or not isinstance(model, ModelItem):
255
259
  raise Exception("Model config not provided")
@@ -322,6 +326,7 @@ class Chat:
322
326
  if stream:
323
327
  # IMPORTANT: stream chat with tools not supported by all providers
324
328
  if allow_native_tool_calls and hasattr(llm, "stream_chat_with_tools"):
329
+ self.log("Using with tools...")
325
330
  response = llm.stream_chat_with_tools(
326
331
  tools=tools,
327
332
  messages=history,
@@ -10,6 +10,8 @@
10
10
  # ================================================== #
11
11
 
12
12
  import os
13
+ import uuid
14
+ from time import strftime
13
15
  from typing import List
14
16
 
15
17
  from PySide6.QtCore import Slot, QObject
@@ -126,3 +128,16 @@ class Image(QObject):
126
128
  else:
127
129
  return "_"
128
130
  return "".join(safe_char(c) for c in name).rstrip("_")[:30]
131
+
132
+ def gen_unique_path(self, ctx: CtxItem):
133
+ """
134
+ Generate unique image path based on context
135
+
136
+ :param ctx: CtxItem
137
+ :return: unique image path
138
+ """
139
+ img_id = uuid.uuid4()
140
+ dt_prefix = strftime("%Y%m%d_%H%M%S")
141
+ img_dir = self.window.core.config.get_user_dir("img")
142
+ filename = f"{dt_prefix}_{img_id}.png"
143
+ return os.path.join(img_dir, filename)
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.06.24 02:00:00 #
9
+ # Updated Date: 2025.06.26 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -385,18 +385,26 @@ class Models:
385
385
  :param mode: mode (initial)
386
386
  :return: mode (supported)
387
387
  """
388
- if model.is_supported(MODE_CHAT):
389
- self.window.core.debug.info(
390
- "WARNING: Switching to chat mode (model not supported in: {})".format(mode))
391
- mode = MODE_CHAT
392
- elif model.is_supported(MODE_RESEARCH):
388
+ # if OpenAI API model and not llama_index mode, switch to Chat mode
389
+ if model.is_openai():
390
+ if model.is_supported(MODE_CHAT) and mode != MODE_LLAMA_INDEX: # do not switch if llama_index mode!
391
+ self.window.core.debug.info(
392
+ "WARNING: Switching to chat mode (model not supported in: {})".format(mode))
393
+ return MODE_CHAT
394
+
395
+ # Research / Perplexity
396
+ if model.is_supported(MODE_RESEARCH):
393
397
  self.window.core.debug.info(
394
398
  "WARNING: Switching to research mode (model not supported in: {})".format(mode))
395
399
  mode = MODE_RESEARCH
400
+
401
+ # Llama Index / Chat with Files
396
402
  elif model.is_supported(MODE_LLAMA_INDEX):
397
403
  self.window.core.debug.info(
398
404
  "WARNING: Switching to llama_index mode (model not supported in: {})".format(mode))
399
405
  mode = MODE_LLAMA_INDEX
406
+
407
+ # LangChain
400
408
  elif model.is_supported(MODE_LANGCHAIN):
401
409
  self.window.core.debug.info(
402
410
  "WARNING: Switching to langchain mode (model not supported in: {})".format(mode))
@@ -6,10 +6,10 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.06.24 16:00:00 #
9
+ # Updated Date: 2025.06.26 16:00:00 #
10
10
  # ================================================== #
11
- import os
12
11
 
12
+ import os
13
13
  import requests
14
14
 
15
15
  class Ollama:
@@ -31,7 +31,8 @@ class Ollama:
31
31
  api_base = "http://localhost:11434"
32
32
  if 'OLLAMA_API_BASE' in os.environ:
33
33
  api_base = os.environ['OLLAMA_API_BASE']
34
- print("Using Ollama base URL:", api_base)
34
+ self.window.core.idx.log("Using Ollama base URL: {}".format(api_base))
35
+
35
36
  url = api_base + "/api/tags"
36
37
  try:
37
38
  response = requests.get(url, timeout=2)
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.5.17",
4
- "app.version": "2.5.17",
5
- "updated_at": "2025-06-25T00:00:00"
3
+ "version": "2.5.18",
4
+ "app.version": "2.5.18",
5
+ "updated_at": "2025-06-26T00:00:00"
6
6
  },
7
7
  "access.audio.event.speech": false,
8
8
  "access.audio.event.speech.disabled": [],
@@ -308,6 +308,7 @@
308
308
  "render.plain": false,
309
309
  "render.code_syntax": "github-dark",
310
310
  "remote_tools.web_search": true,
311
+ "remote_tools.image": false,
311
312
  "send_clear": true,
312
313
  "send_mode": 2,
313
314
  "store_history": true,