pygpt-net 2.6.28__py3-none-any.whl → 2.6.30__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. pygpt_net/CHANGELOG.txt +13 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/{container.py → app_core.py} +5 -6
  4. pygpt_net/controller/access/control.py +1 -9
  5. pygpt_net/controller/assistant/assistant.py +4 -4
  6. pygpt_net/controller/assistant/batch.py +7 -7
  7. pygpt_net/controller/assistant/files.py +4 -4
  8. pygpt_net/controller/assistant/threads.py +3 -3
  9. pygpt_net/controller/attachment/attachment.py +4 -7
  10. pygpt_net/controller/chat/common.py +1 -1
  11. pygpt_net/controller/chat/stream.py +961 -294
  12. pygpt_net/controller/chat/vision.py +11 -19
  13. pygpt_net/controller/config/placeholder.py +1 -1
  14. pygpt_net/controller/ctx/ctx.py +1 -1
  15. pygpt_net/controller/ctx/summarizer.py +1 -1
  16. pygpt_net/controller/mode/mode.py +21 -12
  17. pygpt_net/controller/plugins/settings.py +3 -2
  18. pygpt_net/controller/presets/editor.py +112 -99
  19. pygpt_net/controller/theme/common.py +2 -0
  20. pygpt_net/controller/theme/theme.py +6 -2
  21. pygpt_net/controller/ui/vision.py +4 -4
  22. pygpt_net/core/agents/legacy.py +2 -2
  23. pygpt_net/core/agents/runners/openai_workflow.py +2 -2
  24. pygpt_net/core/assistants/files.py +5 -5
  25. pygpt_net/core/assistants/store.py +4 -4
  26. pygpt_net/core/bridge/bridge.py +3 -3
  27. pygpt_net/core/bridge/worker.py +28 -9
  28. pygpt_net/core/debug/console/console.py +2 -2
  29. pygpt_net/core/debug/presets.py +2 -2
  30. pygpt_net/core/experts/experts.py +2 -2
  31. pygpt_net/core/idx/llm.py +21 -3
  32. pygpt_net/core/modes/modes.py +2 -2
  33. pygpt_net/core/presets/presets.py +3 -3
  34. pygpt_net/core/tokens/tokens.py +4 -4
  35. pygpt_net/core/types/mode.py +5 -2
  36. pygpt_net/core/vision/analyzer.py +1 -1
  37. pygpt_net/data/config/config.json +6 -3
  38. pygpt_net/data/config/models.json +75 -3
  39. pygpt_net/data/config/modes.json +3 -9
  40. pygpt_net/data/config/settings.json +112 -55
  41. pygpt_net/data/config/settings_section.json +2 -2
  42. pygpt_net/data/locale/locale.de.ini +2 -2
  43. pygpt_net/data/locale/locale.en.ini +9 -2
  44. pygpt_net/data/locale/locale.es.ini +2 -2
  45. pygpt_net/data/locale/locale.fr.ini +2 -2
  46. pygpt_net/data/locale/locale.it.ini +2 -2
  47. pygpt_net/data/locale/locale.pl.ini +3 -3
  48. pygpt_net/data/locale/locale.uk.ini +2 -2
  49. pygpt_net/data/locale/locale.zh.ini +2 -2
  50. pygpt_net/item/model.py +23 -3
  51. pygpt_net/plugin/openai_dalle/plugin.py +4 -4
  52. pygpt_net/plugin/openai_vision/plugin.py +12 -13
  53. pygpt_net/provider/agents/openai/agent.py +5 -5
  54. pygpt_net/provider/agents/openai/agent_b2b.py +5 -5
  55. pygpt_net/provider/agents/openai/agent_planner.py +5 -6
  56. pygpt_net/provider/agents/openai/agent_with_experts.py +5 -5
  57. pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +4 -4
  58. pygpt_net/provider/agents/openai/agent_with_feedback.py +4 -4
  59. pygpt_net/provider/agents/openai/bot_researcher.py +2 -2
  60. pygpt_net/provider/agents/openai/bots/research_bot/agents/planner_agent.py +1 -1
  61. pygpt_net/provider/agents/openai/bots/research_bot/agents/search_agent.py +1 -1
  62. pygpt_net/provider/agents/openai/bots/research_bot/agents/writer_agent.py +1 -1
  63. pygpt_net/provider/agents/openai/evolve.py +5 -5
  64. pygpt_net/provider/agents/openai/supervisor.py +4 -4
  65. pygpt_net/provider/api/__init__.py +27 -0
  66. pygpt_net/provider/api/anthropic/__init__.py +68 -0
  67. pygpt_net/provider/api/google/__init__.py +262 -0
  68. pygpt_net/provider/api/google/audio.py +114 -0
  69. pygpt_net/provider/api/google/chat.py +552 -0
  70. pygpt_net/provider/api/google/image.py +287 -0
  71. pygpt_net/provider/api/google/tools.py +222 -0
  72. pygpt_net/provider/api/google/vision.py +129 -0
  73. pygpt_net/provider/{gpt → api/openai}/__init__.py +2 -2
  74. pygpt_net/provider/{gpt → api/openai}/agents/computer.py +1 -1
  75. pygpt_net/provider/{gpt → api/openai}/agents/experts.py +1 -1
  76. pygpt_net/provider/{gpt → api/openai}/agents/response.py +1 -1
  77. pygpt_net/provider/{gpt → api/openai}/assistants.py +1 -1
  78. pygpt_net/provider/{gpt → api/openai}/chat.py +15 -8
  79. pygpt_net/provider/{gpt → api/openai}/completion.py +1 -1
  80. pygpt_net/provider/{gpt → api/openai}/image.py +1 -1
  81. pygpt_net/provider/{gpt → api/openai}/remote_tools.py +1 -1
  82. pygpt_net/provider/{gpt → api/openai}/responses.py +34 -20
  83. pygpt_net/provider/{gpt → api/openai}/store.py +2 -2
  84. pygpt_net/provider/{gpt → api/openai}/vision.py +1 -1
  85. pygpt_net/provider/{gpt → api/openai}/worker/assistants.py +4 -4
  86. pygpt_net/provider/{gpt → api/openai}/worker/importer.py +10 -10
  87. pygpt_net/provider/audio_input/openai_whisper.py +1 -1
  88. pygpt_net/provider/audio_output/google_tts.py +12 -0
  89. pygpt_net/provider/audio_output/openai_tts.py +1 -1
  90. pygpt_net/provider/core/config/patch.py +11 -0
  91. pygpt_net/provider/core/model/patch.py +9 -0
  92. pygpt_net/provider/core/preset/json_file.py +2 -4
  93. pygpt_net/provider/llms/anthropic.py +2 -5
  94. pygpt_net/provider/llms/base.py +4 -3
  95. pygpt_net/provider/llms/openai.py +1 -1
  96. pygpt_net/provider/loaders/hub/image_vision/base.py +1 -1
  97. pygpt_net/ui/dialog/preset.py +71 -55
  98. pygpt_net/ui/main.py +6 -4
  99. pygpt_net/utils.py +9 -0
  100. {pygpt_net-2.6.28.dist-info → pygpt_net-2.6.30.dist-info}/METADATA +42 -48
  101. {pygpt_net-2.6.28.dist-info → pygpt_net-2.6.30.dist-info}/RECORD +115 -107
  102. /pygpt_net/provider/{gpt → api/openai}/agents/__init__.py +0 -0
  103. /pygpt_net/provider/{gpt → api/openai}/agents/client.py +0 -0
  104. /pygpt_net/provider/{gpt → api/openai}/agents/remote_tools.py +0 -0
  105. /pygpt_net/provider/{gpt → api/openai}/agents/utils.py +0 -0
  106. /pygpt_net/provider/{gpt → api/openai}/audio.py +0 -0
  107. /pygpt_net/provider/{gpt → api/openai}/computer.py +0 -0
  108. /pygpt_net/provider/{gpt → api/openai}/container.py +0 -0
  109. /pygpt_net/provider/{gpt → api/openai}/summarizer.py +0 -0
  110. /pygpt_net/provider/{gpt → api/openai}/tools.py +0 -0
  111. /pygpt_net/provider/{gpt → api/openai}/utils.py +0 -0
  112. /pygpt_net/provider/{gpt → api/openai}/worker/__init__.py +0 -0
  113. {pygpt_net-2.6.28.dist-info → pygpt_net-2.6.30.dist-info}/LICENSE +0 -0
  114. {pygpt_net-2.6.28.dist-info → pygpt_net-2.6.30.dist-info}/WHEEL +0 -0
  115. {pygpt_net-2.6.28.dist-info → pygpt_net-2.6.30.dist-info}/entry_points.txt +0 -0
@@ -106,7 +106,7 @@ class Store:
106
106
  :return: store item
107
107
  """
108
108
  name = "New vector store"
109
- vector_store = self.window.core.gpt.store.create_store(name, 0)
109
+ vector_store = self.window.core.api.openai.store.create_store(name, 0)
110
110
  if vector_store is None:
111
111
  return None
112
112
  store = AssistantStoreItem()
@@ -124,7 +124,7 @@ class Store:
124
124
  :param store: store
125
125
  :return: updated store or None if failed
126
126
  """
127
- vector_store = self.window.core.gpt.store.update_store(store.id, store.name, store.expire_days)
127
+ vector_store = self.window.core.api.openai.store.update_store(store.id, store.name, store.expire_days)
128
128
  if vector_store is None:
129
129
  return None
130
130
  self.items[store.id] = store
@@ -139,7 +139,7 @@ class Store:
139
139
  :return: status data, store data
140
140
  """
141
141
  status = {}
142
- data = self.window.core.gpt.store.get_store(id)
142
+ data = self.window.core.api.openai.store.get_store(id)
143
143
  if data is not None:
144
144
  status = self.parse_status(data)
145
145
  return status, data
@@ -236,7 +236,7 @@ class Store:
236
236
  if id in self.items:
237
237
  store = self.items[id]
238
238
  self.provider.delete_by_id(store.record_id)
239
- self.window.core.gpt.store.remove_store(id)
239
+ self.window.core.api.openai.store.remove_store(id)
240
240
  del self.items[id]
241
241
  return True
242
242
  return False
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.15 23:00:00 #
9
+ # Updated Date: 2025.08.28 09:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import time
@@ -59,7 +59,7 @@ class Bridge:
59
59
  if self.window.controller.kernel.stopped():
60
60
  return False
61
61
 
62
- allowed_model_change = MODE_VISION
62
+ allowed_model_change = [MODE_CHAT]
63
63
  is_virtual = False
64
64
  force_sync = False
65
65
 
@@ -254,7 +254,7 @@ class Bridge:
254
254
  context.mode = MODE_RESEARCH
255
255
 
256
256
  # default: OpenAI API call
257
- return self.window.core.gpt.quick_call(
257
+ return self.window.core.api.openai.quick_call(
258
258
  context=context,
259
259
  extra=extra,
260
260
  )
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.11 14:00:00 #
9
+ # Updated Date: 2025.08.28 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from PySide6.QtCore import QObject, Signal, QRunnable, Slot
@@ -17,6 +17,8 @@ from pygpt_net.core.types import (
17
17
  MODE_LANGCHAIN,
18
18
  MODE_LLAMA_INDEX,
19
19
  MODE_ASSISTANT,
20
+ MODE_VISION,
21
+ MODE_LOOP_NEXT,
20
22
  )
21
23
  from pygpt_net.core.events import KernelEvent, Event
22
24
 
@@ -29,7 +31,7 @@ class BridgeSignals(QObject):
29
31
  class BridgeWorker(QRunnable):
30
32
  """Bridge worker"""
31
33
  def __init__(self, *args, **kwargs):
32
- QRunnable.__init__(self)
34
+ super().__init__()
33
35
  self.signals = BridgeSignals()
34
36
  self.args = args
35
37
  self.kwargs = kwargs
@@ -64,6 +66,8 @@ class BridgeWorker(QRunnable):
64
66
  extra=self.extra,
65
67
  )
66
68
  """
69
+ elif self.mode == MODE_VISION:
70
+ raise Exception("Vision mode is deprecated from v2.6.30 and integrated into Chat. ")
67
71
 
68
72
  # LlamaIndex: chat with files
69
73
  if self.mode == MODE_LLAMA_INDEX:
@@ -87,7 +91,7 @@ class BridgeWorker(QRunnable):
87
91
  self.extra["error"] = str(self.window.core.agents.runner.get_error())
88
92
 
89
93
  # Loop: next step
90
- elif self.mode == "loop_next": # virtual mode
94
+ elif self.mode == MODE_LOOP_NEXT: # virtual mode
91
95
  result = self.window.core.agents.runner.loop.run_next(
92
96
  context=self.context,
93
97
  extra=self.extra,
@@ -98,12 +102,27 @@ class BridgeWorker(QRunnable):
98
102
  else:
99
103
  self.extra["error"] = str(self.window.core.agents.runner.get_error())
100
104
 
101
- # API OpenAI: chat, completion, vision, image, assistants
105
+ # API SDK: chat, completion, vision, image, assistants
102
106
  else:
103
- result = self.window.core.gpt.call(
104
- context=self.context,
105
- extra=self.extra,
106
- )
107
+ sdk = "openai"
108
+ model = self.context.model
109
+ if model.provider == "google":
110
+ if self.window.core.config.get("api_native_google", False):
111
+ sdk = "google"
112
+
113
+ # call appropriate SDK
114
+ if sdk == "google":
115
+ # print("Using Google SDK")
116
+ result = self.window.core.api.google.call(
117
+ context=self.context,
118
+ extra=self.extra,
119
+ )
120
+ elif sdk == "openai":
121
+ # print("Using OpenAI SDK")
122
+ result = self.window.core.api.openai.call(
123
+ context=self.context,
124
+ extra=self.extra,
125
+ )
107
126
  except Exception as e:
108
127
  if self.signals:
109
128
  self.extra["error"] = e
@@ -170,7 +189,7 @@ class BridgeWorker(QRunnable):
170
189
  ad_context = self.window.controller.chat.attachment.get_context(ctx, self.context.history)
171
190
  ad_mode = self.window.controller.chat.attachment.get_mode()
172
191
  if ad_context:
173
- self.context.prompt += "\n\n" + ad_context # append to input text
192
+ self.context.prompt += f"\n\n{ad_context}" # append to input text
174
193
  if (ad_mode == self.window.controller.chat.attachment.MODE_QUERY_CONTEXT
175
194
  or self.mode in [MODE_AGENT_LLAMA, MODE_AGENT_OPENAI]):
176
195
  ctx.hidden_input = ad_context # store for future use, only if query context
@@ -69,8 +69,8 @@ class Console:
69
69
  elif msg.lower() == "mpkfa":
70
70
  self.log("GOD MODE ACTIVATED ;)")
71
71
  elif msg == "oclr":
72
- if self.window.core.gpt.client:
73
- self.window.core.gpt.client.close()
72
+ if self.window.core.api.openai.client:
73
+ self.window.core.api.openai.client.close()
74
74
  self.log("OpenAI client closed")
75
75
  else:
76
76
  self.log("OpenAI client not initialized")
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.07.30 00:00:00 #
9
+ # Updated Date: 2025.08.28 09:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -62,7 +62,7 @@ class PresetsDebug:
62
62
  MODE_CHAT: preset.chat,
63
63
  MODE_COMPLETION: preset.completion,
64
64
  MODE_IMAGE: preset.img,
65
- MODE_VISION: preset.vision,
65
+ # MODE_VISION: preset.vision,
66
66
  # MODE_LANGCHAIN: preset.langchain,
67
67
  MODE_ASSISTANT: preset.assistant,
68
68
  MODE_LLAMA_INDEX: preset.llama_index,
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.23 15:00:00 #
9
+ # Updated Date: 2025.08.28 09:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import json
@@ -49,7 +49,7 @@ class Experts:
49
49
  self.allowed_modes = [
50
50
  MODE_CHAT,
51
51
  MODE_COMPLETION,
52
- MODE_VISION,
52
+ # MODE_VISION,
53
53
  # MODE_LANGCHAIN,
54
54
  MODE_LLAMA_INDEX,
55
55
  MODE_AUDIO,
pygpt_net/core/idx/llm.py CHANGED
@@ -10,7 +10,7 @@
10
10
  # ================================================== #
11
11
 
12
12
  import os.path
13
- from typing import Optional, Union
13
+ from typing import Optional, Union, List, Dict
14
14
 
15
15
  from llama_index.core.llms.llm import BaseLLM
16
16
  from llama_index.core.multi_modal_llms import MultiModalLLM
@@ -119,6 +119,8 @@ class Llm:
119
119
  window=self.window,
120
120
  env=env,
121
121
  )
122
+ model_name = self.extract_model_name_from_args(args)
123
+ self.window.core.idx.log(f"Embeddings: using global provider: {provider}, model_name: {model_name}")
122
124
  return self.window.core.llm.llms[provider].get_embeddings_model(
123
125
  window=self.window,
124
126
  config=args,
@@ -162,8 +164,8 @@ class Llm:
162
164
 
163
165
  # try to get custom args from config for the model provider
164
166
  is_custom_provider = False
165
- default = self.window.core.config.get("llama.idx.embeddings.default", [])
166
- for item in default:
167
+ defaults = self.window.core.config.get("llama.idx.embeddings.default", [])
168
+ for item in defaults:
167
169
  provider = item.get("provider", "")
168
170
  if provider and provider == model.provider:
169
171
  is_custom_provider = True
@@ -188,6 +190,7 @@ class Llm:
188
190
  "value": client_args.get("api_key", ""),
189
191
  }
190
192
  )
193
+ self.window.core.idx.log(f"Embeddings: trying to use {m.provider}, model_name: {model_name}")
191
194
  break
192
195
 
193
196
  if is_custom_provider:
@@ -196,5 +199,20 @@ class Llm:
196
199
  config=args,
197
200
  )
198
201
  if not embed_model:
202
+ self.window.core.idx.log(f"Embeddings: not configured for {model.provider}. Fallback: using global provider.")
199
203
  embed_model = self.get_embeddings_provider()
200
204
  return embed_model
205
+
206
+ def extract_model_name_from_args(self, args: List[Dict]) -> str:
207
+ """
208
+ Extract model name from provider args
209
+
210
+ :param args: List of args
211
+ :return: Model name if configured
212
+ """
213
+ model_name = ""
214
+ for item in args:
215
+ if item.get("name") in ["model", "model_name"]:
216
+ model_name = item.get("value")
217
+ break
218
+ return model_name
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.15 23:00:00 #
9
+ # Updated Date: 2025.08.28 09:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Dict, List
@@ -53,7 +53,7 @@ class Modes:
53
53
  MODE_IMAGE,
54
54
  # MODE_LANGCHAIN,
55
55
  MODE_LLAMA_INDEX,
56
- MODE_VISION,
56
+ # MODE_VISION,
57
57
  MODE_RESEARCH,
58
58
  MODE_COMPUTER,
59
59
  )
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.15 23:00:00 #
9
+ # Updated Date: 2025.08.28 09:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -165,8 +165,8 @@ class Presets:
165
165
  return MODE_COMPLETION
166
166
  if preset.img:
167
167
  return MODE_IMAGE
168
- if preset.vision:
169
- return MODE_VISION
168
+ # if preset.vision:
169
+ # return MODE_VISION
170
170
  # if preset.langchain:
171
171
  # return MODE_LANGCHAIN
172
172
  if preset.assistant:
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.15 23:00:00 #
9
+ # Updated Date: 2025.08.28 09:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Tuple, List
@@ -36,8 +36,8 @@ from pygpt_net.item.ctx import CtxItem
36
36
 
37
37
  CHAT_MODES = [
38
38
  MODE_CHAT,
39
- MODE_VISION,
40
- MODE_LANGCHAIN,
39
+ # MODE_VISION,
40
+ # MODE_LANGCHAIN,
41
41
  MODE_ASSISTANT,
42
42
  MODE_LLAMA_INDEX,
43
43
  MODE_AGENT,
@@ -328,7 +328,7 @@ class Tokens:
328
328
  model_id = self.window.core.models.get_id(model)
329
329
  mode = self.window.core.config.get('mode')
330
330
  tokens = 0
331
- if mode in [MODE_CHAT, MODE_VISION, MODE_AUDIO, MODE_RESEARCH]:
331
+ if mode in [MODE_CHAT, MODE_AUDIO, MODE_RESEARCH]:
332
332
  tokens += self.from_prompt(system_prompt, "", model_id)
333
333
  tokens += self.from_text("system", model_id)
334
334
  tokens += self.from_prompt(input_prompt, "", model_id)
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.23 21:00:00 #
9
+ # Updated Date: 2025.08.28 09:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  MODE_AGENT = "agent"
@@ -22,4 +22,7 @@ MODE_IMAGE = "img"
22
22
  MODE_LANGCHAIN = "langchain"
23
23
  MODE_LLAMA_INDEX = "llama_index"
24
24
  MODE_RESEARCH = "research"
25
- MODE_VISION = "vision"
25
+ MODE_VISION = "vision"
26
+
27
+ # virtual modes
28
+ MODE_LOOP_NEXT = "loop_next"
@@ -51,7 +51,7 @@ class Analyzer:
51
51
 
52
52
  extra = {}
53
53
  output = ""
54
- response = self.window.core.gpt.vision.send(context, extra)
54
+ response = self.window.core.api.openai.vision.send(context, extra)
55
55
  if response.choices[0] and response.choices[0].message.content:
56
56
  output = response.choices[0].message.content.strip()
57
57
  for id in files:
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.6.28",
4
- "app.version": "2.6.28",
5
- "updated_at": "2025-08-27T00:00:00"
3
+ "version": "2.6.30",
4
+ "app.version": "2.6.30",
5
+ "updated_at": "2025-08-29T00:00:00"
6
6
  },
7
7
  "access.audio.event.speech": false,
8
8
  "access.audio.event.speech.disabled": [],
@@ -88,6 +88,7 @@
88
88
  "api_key_mistral": "",
89
89
  "api_key_voyage": "",
90
90
  "api_key_open_router": "",
91
+ "api_native_google": true,
91
92
  "api_proxy": "",
92
93
  "api_use_responses": true,
93
94
  "api_use_responses_llama": false,
@@ -396,6 +397,8 @@
396
397
  "remote_tools.file_search": false,
397
398
  "remote_tools.file_search.args": "",
398
399
  "remote_tools.computer_use.env": "",
400
+ "remote_tools.google.web_search": true,
401
+ "remote_tools.google.code_interpreter": false,
399
402
  "send_clear": true,
400
403
  "send_mode": 2,
401
404
  "store_history": true,
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.6.28",
4
- "app.version": "2.6.28",
5
- "updated_at": "2025-08-27T23:07:35"
3
+ "version": "2.6.30",
4
+ "app.version": "2.6.30",
5
+ "updated_at": "2025-08-29T23:07:35"
6
6
  },
7
7
  "items": {
8
8
  "SpeakLeash/bielik-11b-v2.3-instruct:Q4_K_M": {
@@ -2565,6 +2565,78 @@
2565
2565
  "provider": "x_ai",
2566
2566
  "tool_calls": true
2567
2567
  },
2568
+ "imagen-3.0-generate-002": {
2569
+ "id": "imagen-3.0-generate-002",
2570
+ "name": "imagen-3.0-generate-002",
2571
+ "mode": [
2572
+ "img"
2573
+ ],
2574
+ "llama_index": {
2575
+ "args": [
2576
+ {
2577
+ "name": "model",
2578
+ "value": "models/imagen-3.0-generate-002",
2579
+ "type": "str"
2580
+ }
2581
+ ],
2582
+ "env": [
2583
+ {
2584
+ "name": "GOOGLE_API_KEY",
2585
+ "value": "{api_key_google}",
2586
+ "type": "str"
2587
+ }
2588
+ ]
2589
+ },
2590
+ "ctx": 128000,
2591
+ "tokens": 0,
2592
+ "default": false,
2593
+ "input": [
2594
+ "text"
2595
+ ],
2596
+ "output": [
2597
+ "image"
2598
+ ],
2599
+ "extra": {},
2600
+ "imported": true,
2601
+ "provider": "google",
2602
+ "tool_calls": true
2603
+ },
2604
+ "imagen-4.0-generate-001": {
2605
+ "id": "imagen-4.0-generate-001",
2606
+ "name": "imagen-4.0-generate-001",
2607
+ "mode": [
2608
+ "img"
2609
+ ],
2610
+ "llama_index": {
2611
+ "args": [
2612
+ {
2613
+ "name": "model",
2614
+ "value": "models/imagen-4.0-generate-001",
2615
+ "type": "str"
2616
+ }
2617
+ ],
2618
+ "env": [
2619
+ {
2620
+ "name": "GOOGLE_API_KEY",
2621
+ "value": "{api_key_google}",
2622
+ "type": "str"
2623
+ }
2624
+ ]
2625
+ },
2626
+ "ctx": 128000,
2627
+ "tokens": 0,
2628
+ "default": false,
2629
+ "input": [
2630
+ "text"
2631
+ ],
2632
+ "output": [
2633
+ "image"
2634
+ ],
2635
+ "extra": {},
2636
+ "imported": true,
2637
+ "provider": "google",
2638
+ "tool_calls": true
2639
+ },
2568
2640
  "llama2-uncensored": {
2569
2641
  "id": "llama2-uncensored",
2570
2642
  "name": "llama2-uncensored",
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.5.28",
4
- "app.version": "2.5.28",
5
- "updated_at": "2025-07-08T00:00:00"
3
+ "version": "2.6.30",
4
+ "app.version": "2.6.30",
5
+ "updated_at": "2025-08-28T09:00:00"
6
6
  },
7
7
  "items": {
8
8
  "chat": {
@@ -41,12 +41,6 @@
41
41
  "label": "mode.img",
42
42
  "default": false
43
43
  },
44
- "vision": {
45
- "id": "vision",
46
- "name": "vision",
47
- "label": "mode.vision",
48
- "default": false
49
- },
50
44
  "assistant": {
51
45
  "id": "assistant",
52
46
  "name": "assistant",