pygpt-net 2.5.17__py3-none-any.whl → 2.5.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. pygpt_net/CHANGELOG.txt +12 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/chat/common.py +4 -2
  4. pygpt_net/controller/chat/input.py +36 -27
  5. pygpt_net/controller/chat/stream.py +22 -2
  6. pygpt_net/controller/config/placeholder.py +1 -1
  7. pygpt_net/controller/model/__init__.py +1 -1
  8. pygpt_net/controller/model/editor.py +6 -1
  9. pygpt_net/controller/model/importer.py +4 -3
  10. pygpt_net/core/bridge/__init__.py +10 -4
  11. pygpt_net/core/command/__init__.py +10 -1
  12. pygpt_net/core/idx/chat.py +6 -1
  13. pygpt_net/core/image/__init__.py +15 -0
  14. pygpt_net/core/models/__init__.py +14 -6
  15. pygpt_net/core/models/ollama.py +4 -3
  16. pygpt_net/data/config/config.json +7 -3
  17. pygpt_net/data/config/models.json +437 -34
  18. pygpt_net/data/config/modes.json +10 -10
  19. pygpt_net/data/config/settings.json +56 -0
  20. pygpt_net/data/locale/locale.de.ini +1 -1
  21. pygpt_net/data/locale/locale.en.ini +13 -2
  22. pygpt_net/data/locale/locale.es.ini +1 -1
  23. pygpt_net/data/locale/locale.fr.ini +1 -1
  24. pygpt_net/data/locale/locale.pl.ini +1 -1
  25. pygpt_net/data/locale/locale.uk.ini +1 -1
  26. pygpt_net/data/locale/locale.zh.ini +1 -1
  27. pygpt_net/item/model.py +43 -1
  28. pygpt_net/provider/core/config/patch.py +19 -1
  29. pygpt_net/provider/core/model/json_file.py +4 -1
  30. pygpt_net/provider/core/model/patch.py +21 -1
  31. pygpt_net/provider/gpt/__init__.py +31 -6
  32. pygpt_net/provider/gpt/chat.py +2 -2
  33. pygpt_net/provider/gpt/image.py +42 -8
  34. pygpt_net/provider/gpt/responses.py +22 -16
  35. pygpt_net/provider/llms/anthropic.py +3 -1
  36. pygpt_net/provider/llms/google.py +3 -1
  37. pygpt_net/provider/llms/hugging_face.py +3 -1
  38. pygpt_net/provider/llms/hugging_face_api.py +3 -1
  39. pygpt_net/provider/llms/ollama.py +9 -3
  40. pygpt_net/provider/llms/openai.py +7 -1
  41. pygpt_net/ui/dialog/preset.py +1 -1
  42. {pygpt_net-2.5.17.dist-info → pygpt_net-2.5.19.dist-info}/METADATA +25 -7
  43. {pygpt_net-2.5.17.dist-info → pygpt_net-2.5.19.dist-info}/RECORD +46 -46
  44. {pygpt_net-2.5.17.dist-info → pygpt_net-2.5.19.dist-info}/LICENSE +0 -0
  45. {pygpt_net-2.5.17.dist-info → pygpt_net-2.5.19.dist-info}/WHEEL +0 -0
  46. {pygpt_net-2.5.17.dist-info → pygpt_net-2.5.19.dist-info}/entry_points.txt +0 -0
@@ -6,9 +6,9 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 22:00:00 #
9
+ # Updated Date: 2025.06.26 18:00:00 #
10
10
  # ================================================== #
11
-
11
+ import base64
12
12
  import datetime
13
13
  import os
14
14
  from typing import Optional, Dict, Any
@@ -132,6 +132,7 @@ class ImageWorker(QObject, QRunnable):
132
132
  self.allowed_max_num = {
133
133
  "dall-e-2": 4,
134
134
  "dall-e-3": 1,
135
+ "gpt-image-1": 1,
135
136
  }
136
137
  self.allowed_resolutions = {
137
138
  "dall-e-2": [
@@ -144,6 +145,27 @@ class ImageWorker(QObject, QRunnable):
144
145
  "1024x1792",
145
146
  "1024x1024",
146
147
  ],
148
+ "gpt-image-1": [
149
+ "1536x1024",
150
+ "1024x1536",
151
+ "1024x1024",
152
+ "auto",
153
+ ],
154
+ }
155
+ self.allowed_quality = {
156
+ "dall-e-2": [
157
+ "standard",
158
+ ],
159
+ "dall-e-3": [
160
+ "standard",
161
+ "hd",
162
+ ],
163
+ "gpt-image-1": [
164
+ "auto",
165
+ "high",
166
+ "medium",
167
+ "low",
168
+ ],
147
169
  }
148
170
 
149
171
  @Slot()
@@ -188,6 +210,11 @@ class ImageWorker(QObject, QRunnable):
188
210
  if resolution not in self.allowed_resolutions[self.model]:
189
211
  resolution = self.allowed_resolutions[self.model][0]
190
212
 
213
+ quality = self.quality
214
+ if self.model in self.allowed_quality:
215
+ if quality not in self.allowed_quality[self.model]:
216
+ quality = self.allowed_quality[self.model][0]
217
+
191
218
  # send to API
192
219
  response = None
193
220
  if self.model == "dall-e-2":
@@ -197,12 +224,12 @@ class ImageWorker(QObject, QRunnable):
197
224
  n=self.num,
198
225
  size=resolution,
199
226
  )
200
- elif self.model == "dall-e-3":
227
+ elif self.model == "dall-e-3" or self.model == "gpt-image-1":
201
228
  response = self.client.images.generate(
202
229
  model=self.model,
203
230
  prompt=self.input_prompt,
204
231
  n=self.num,
205
- quality=self.quality,
232
+ quality=quality,
206
233
  size=resolution,
207
234
  )
208
235
 
@@ -215,20 +242,27 @@ class ImageWorker(QObject, QRunnable):
215
242
  for i in range(self.num):
216
243
  if i >= len(response.data):
217
244
  break
218
- url = response.data[i].url
219
- res = requests.get(url)
220
245
 
221
246
  # generate filename
222
247
  name = datetime.date.today().strftime(
223
248
  "%Y-%m-%d") + "_" + datetime.datetime.now().strftime("%H-%M-%S") + "-" \
224
- + self.window.core.image.make_safe_filename(self.input_prompt) + "-" + str(i + 1) + ".png"
249
+ + self.window.core.image.make_safe_filename(self.input_prompt) + "-" + str(i + 1) + ".png"
225
250
  path = os.path.join(self.window.core.config.get_user_dir("img"), name)
226
251
 
227
252
  msg = trans('img.status.downloading') + " (" + str(i + 1) + " / " + str(self.num) + ") -> " + str(path)
228
253
  self.signals.status.emit(msg)
229
254
 
255
+ if response.data[i] is None:
256
+ self.signals.error.emit("API Error: empty image data")
257
+ return
258
+ if response.data[i].url: # dall-e 2 and 3 returns URL
259
+ res = requests.get(response.data[i].url)
260
+ data = res.content
261
+ else: # gpt-image-1 returns base64 encoded image
262
+ data = base64.b64decode(response.data[i].b64_json)
263
+
230
264
  # save image
231
- if self.window.core.image.save_image(path, res.content):
265
+ if data and self.window.core.image.save_image(path, data):
232
266
  paths.append(path)
233
267
  else:
234
268
  self.signals.error.emit("Error saving image")
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.06.25 02:00:00 #
9
+ # Updated Date: 2025.06.26 18:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import json
@@ -17,13 +17,11 @@ from pygpt_net.core.types import (
17
17
  MODE_CHAT,
18
18
  MODE_VISION,
19
19
  MODE_AUDIO,
20
- MODE_RESEARCH,
21
20
  )
22
21
  from pygpt_net.core.bridge.context import BridgeContext, MultimodalContext
23
22
  from pygpt_net.item.ctx import CtxItem
24
23
  from pygpt_net.item.model import ModelItem
25
24
 
26
- from .utils import sanitize_name
27
25
  from pygpt_net.item.attachment import AttachmentItem
28
26
 
29
27
 
@@ -38,6 +36,7 @@ class Responses:
38
36
  self.input_tokens = 0
39
37
  self.audio_prev_id = None
40
38
  self.audio_prev_expires_ts = None
39
+ self.prev_response_id = None
41
40
 
42
41
  def send(
43
42
  self,
@@ -80,6 +79,7 @@ class Responses:
80
79
  user_name=user_name,
81
80
  multimodal_ctx=multimodal_ctx,
82
81
  )
82
+
83
83
  msg_tokens = self.window.core.tokens.from_messages(
84
84
  messages,
85
85
  model.id,
@@ -116,9 +116,15 @@ class Responses:
116
116
  response_kwargs['reasoning']['effort'] = model.extra["reasoning_effort"]
117
117
 
118
118
  # extend tools with external tools
119
- if not model.id.startswith("o1") and not model.id.startswith("o3"):
119
+ if (not model.id.startswith("o1")
120
+ and not model.id.startswith("o3")):
120
121
  if self.window.core.config.get("remote_tools.web_search", False):
121
122
  tools.append({"type": "web_search_preview"})
123
+ if self.window.core.config.get("remote_tools.image", False):
124
+ tool = {"type": "image_generation"}
125
+ if stream:
126
+ tool["partial_images"] = 1 # required for streaming
127
+ tools.append(tool)
122
128
 
123
129
  # tool calls are not supported for o1-mini and o1-preview
124
130
  if (model.id is not None
@@ -126,18 +132,9 @@ class Responses:
126
132
  if len(tools) > 0:
127
133
  response_kwargs['tools'] = tools
128
134
 
129
- # audio mode
130
- if mode in [MODE_AUDIO]:
131
- stream = False
132
- voice_id = "alloy"
133
- tmp_voice = self.window.core.plugins.get_option("audio_output", "openai_voice")
134
- if tmp_voice:
135
- voice_id = tmp_voice
136
- response_kwargs["modalities"] = ["text", "audio"]
137
- response_kwargs["audio"] = {
138
- "voice": voice_id,
139
- "format": "wav"
140
- }
135
+ # attach previous response ID if available
136
+ if self.prev_response_id:
137
+ response_kwargs['previous_response_id'] = self.prev_response_id
141
138
 
142
139
  response = client.responses.create(
143
140
  input=messages,
@@ -145,6 +142,11 @@ class Responses:
145
142
  stream=stream,
146
143
  **response_kwargs,
147
144
  )
145
+
146
+ # store previous response ID
147
+ if not stream and response:
148
+ ctx.msg_id = response.id
149
+
148
150
  return response
149
151
 
150
152
  def build(
@@ -172,6 +174,7 @@ class Responses:
172
174
  :return: messages list
173
175
  """
174
176
  messages = []
177
+ self.prev_response_id = None # reset
175
178
 
176
179
  # tokens config
177
180
  mode = MODE_CHAT
@@ -240,6 +243,9 @@ class Responses:
240
243
  }
241
244
  messages.append(msg)
242
245
 
246
+ if item.msg_id and (item.cmds is None or len(item.cmds) == 0): # if no cmds before
247
+ self.prev_response_id = item.msg_id # previous response ID to use in current input
248
+
243
249
  # use vision and audio if available in current model
244
250
  content = str(prompt)
245
251
  if MODE_VISION in model.mode:
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 22:00:00 #
9
+ # Updated Date: 2025.06.26 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from llama_index.llms.anthropic import Anthropic
@@ -47,4 +47,6 @@ class AnthropicLLM(BaseLLM):
47
47
  :return: LLM provider instance
48
48
  """
49
49
  args = self.parse_args(model.llama_index)
50
+ if "model" not in args:
51
+ args["model"] = model.id
50
52
  return Anthropic(**args)
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.01.16 01:00:00 #
9
+ # Updated Date: 2025.06.26 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional, List, Dict
@@ -51,6 +51,8 @@ class GoogleLLM(BaseLLM):
51
51
  :return: LLM provider instance
52
52
  """
53
53
  args = self.parse_args(model.llama_index)
54
+ if "model" not in args:
55
+ args["model"] = model.id
54
56
  return Gemini(**args)
55
57
 
56
58
  def get_embeddings_model(
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 22:00:00 #
9
+ # Updated Date: 2025.06.26 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from langchain_community.llms import HuggingFaceHub
@@ -39,6 +39,8 @@ class HuggingFaceLLM(BaseLLM):
39
39
  :return: LLM provider instance
40
40
  """
41
41
  args = self.parse_args(model.langchain)
42
+ if "model" not in args:
43
+ args["model"] = model.id
42
44
  return HuggingFaceHub(**args)
43
45
 
44
46
  def chat(
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 22:00:00 #
9
+ # Updated Date: 2025.06.26 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -44,6 +44,8 @@ class HuggingFaceApiLLM(BaseLLM):
44
44
  :return: LLM provider instance
45
45
  """
46
46
  args = self.parse_args(model.llama_index)
47
+ if "model" not in args:
48
+ args["model"] = model.id
47
49
  return HuggingFaceInferenceAPI(**args)
48
50
 
49
51
  def get_embeddings_model(
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.06.24 16:00:00 #
9
+ # Updated Date: 2025.06.26 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -66,6 +66,8 @@ class OllamaLLM(BaseLLM):
66
66
  :return: LLM provider instance
67
67
  """
68
68
  args = self.parse_args(model.langchain)
69
+ if "model" not in args:
70
+ args["model"] = model.id
69
71
  return ChatOllama(**args)
70
72
 
71
73
  def llama(
@@ -87,7 +89,10 @@ class OllamaLLM(BaseLLM):
87
89
  if "request_timeout" not in args:
88
90
  args["request_timeout"] = 120
89
91
  if 'OLLAMA_API_BASE' in os.environ:
90
- args["base_url"] = os.environ['OLLAMA_API_BASE']
92
+ if "base_url" not in args:
93
+ args["base_url"] = os.environ['OLLAMA_API_BASE']
94
+ if "model" not in args:
95
+ args["model"] = model.id
91
96
  return Ollama(**args)
92
97
 
93
98
  def get_embeddings_model(
@@ -108,7 +113,8 @@ class OllamaLLM(BaseLLM):
108
113
  "args": config,
109
114
  })
110
115
  if 'OLLAMA_API_BASE' in os.environ:
111
- args["base_url"] = os.environ['OLLAMA_API_BASE']
116
+ if "base_url" not in args:
117
+ args["base_url"] = os.environ['OLLAMA_API_BASE']
112
118
  return OllamaEmbedding(**args)
113
119
 
114
120
  def init_embeddings(
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.01.16 01:00:00 #
9
+ # Updated Date: 2025.06.26 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional, List, Dict
@@ -50,6 +50,8 @@ class OpenAILLM(BaseLLM):
50
50
  :return: LLM provider instance
51
51
  """
52
52
  args = self.parse_args(model.langchain)
53
+ if "model" not in args:
54
+ args["model"] = model.id
53
55
  return OpenAI(**args)
54
56
 
55
57
  def chat(
@@ -84,6 +86,8 @@ class OpenAILLM(BaseLLM):
84
86
  :return: LLM provider instance
85
87
  """
86
88
  args = self.parse_args(model.llama_index)
89
+ if "model" not in args:
90
+ args["model"] = model.id
87
91
  return LlamaOpenAI(**args)
88
92
 
89
93
  def llama_multimodal(
@@ -101,6 +105,8 @@ class OpenAILLM(BaseLLM):
101
105
  :return: LLM provider instance
102
106
  """
103
107
  args = self.parse_args(model.llama_index)
108
+ if "model" not in args:
109
+ args["model"] = model.id
104
110
  return LlamaOpenAIMultiModal(**args)
105
111
 
106
112
  def get_embeddings_model(
@@ -103,12 +103,12 @@ class Preset(BaseConfigDialog):
103
103
  # modes
104
104
  mode_keys = [
105
105
  MODE_CHAT,
106
+ MODE_LLAMA_INDEX,
106
107
  MODE_AUDIO,
107
108
  MODE_COMPLETION,
108
109
  MODE_IMAGE,
109
110
  MODE_VISION,
110
111
  MODE_LANGCHAIN,
111
- MODE_LLAMA_INDEX,
112
112
  MODE_AGENT_LLAMA,
113
113
  MODE_AGENT,
114
114
  MODE_EXPERT,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: pygpt-net
3
- Version: 2.5.17
3
+ Version: 2.5.19
4
4
  Summary: Desktop AI Assistant powered by models: OpenAI o1, GPT-4o, GPT-4, GPT-4 Vision, GPT-3.5, DALL-E 3, Llama 3, Mistral, Gemini, Claude, DeepSeek, Bielik, and other models supported by Langchain, Llama Index, and Ollama. Features include chatbot, text completion, image generation, vision analysis, speech-to-text, internet access, file handling, command execution and more.
5
5
  License: MIT
6
6
  Keywords: py_gpt,py-gpt,pygpt,desktop,app,o1,gpt,gpt4,gpt-4o,gpt-4v,gpt3.5,gpt-4,gpt-4-vision,gpt-3.5,llama3,mistral,gemini,deepseek,bielik,claude,tts,whisper,vision,chatgpt,dall-e,chat,chatbot,assistant,text completion,image generation,ai,api,openai,api key,langchain,llama-index,ollama,presets,ui,qt,pyside
@@ -100,7 +100,7 @@ Description-Content-Type: text/markdown
100
100
 
101
101
  [![pygpt](https://snapcraft.io/pygpt/badge.svg)](https://snapcraft.io/pygpt)
102
102
 
103
- Release: **2.5.17** | build: **2025-06-25** | Python: **>=3.10, <3.13**
103
+ Release: **2.5.19** | build: **2025-06-27** | Python: **>=3.10, <3.13**
104
104
 
105
105
  > Official website: https://pygpt.net | Documentation: https://pygpt.readthedocs.io
106
106
  >
@@ -462,9 +462,9 @@ Your API keys will be available here:
462
462
 
463
463
  **+ Inline Vision and Image generation**
464
464
 
465
- This mode in **PyGPT** mirrors `ChatGPT`, allowing you to chat with models such as `o1`, `GPT-4`, `GPT-4o` and `GPT-3.5`. It works by using the `ChatCompletion` OpenAI API.
465
+ This mode in **PyGPT** mirrors `ChatGPT`, allowing you to chat with models such as `o1`, `o3`, `GPT-4`, `GPT-4o` and `GPT-3.5`. It works by using the `Responses` and `ChatCompletions` OpenAI API. You can select the API to use in: `Config -> Settings -> API Keys -> OpenAI`.
466
466
 
467
- **Tip: This mode directly uses the OpenAI API. If you want to use models other than GPT (such as Gemini, Claude, or Llama3), use `Chat with Files` mode.**
467
+ **Tip: This mode directly uses the OpenAI API. Other models, such as Gemini, Claude, or Llama3, are supported in Chat mode via LlamaIndex, which the application switches to in the background when working with models other than OpenAI.**
468
468
 
469
469
  The main part of the interface is a chat window where you see your conversations. Below it is a message box for typing. On the right side, you can set up or change the model and system prompt. You can also save these settings as presets to easily switch between models or tasks.
470
470
 
@@ -520,7 +520,7 @@ From version `2.0.107` the `davinci` models are deprecated and has been replaced
520
520
 
521
521
  ### DALL-E 3
522
522
 
523
- **PyGPT** enables quick and easy image creation with `DALL-E 3`.
523
+ **PyGPT** enables quick and easy image creation with `DALL-E 3` or `gpt-image-1`.
524
524
  The older model version, `DALL-E 2`, is also accessible. Generating images is akin to a chat conversation - a user's prompt triggers the generation, followed by downloading, saving to the computer,
525
525
  and displaying the image onscreen. You can send raw prompt to `DALL-E` in `Image generation` mode or ask the model for the best prompt.
526
526
 
@@ -1003,7 +1003,7 @@ The name of the currently active profile is shown as (Profile Name) in the windo
1003
1003
 
1004
1004
  ## Built-in models
1005
1005
 
1006
- PyGPT has built-in support for models (as of 2025-06-24):
1006
+ PyGPT has built-in support for models (as of 2025-06-27):
1007
1007
 
1008
1008
  - `bielik-11b-v2.3-instruct:Q4_K_M`
1009
1009
  - `chatgpt-4o-latest`
@@ -1049,6 +1049,10 @@ PyGPT has built-in support for models (as of 2025-06-24):
1049
1049
  - `gpt-4o-2024-11-20`
1050
1050
  - `gpt-4o-audio-preview`
1051
1051
  - `gpt-4o-mini`
1052
+ - `grok-3`
1053
+ - `grok-3-fast`
1054
+ - `grok-3-mini`
1055
+ - `grok-3-mini-fast`
1052
1056
  - `llama2-uncensored`
1053
1057
  - `llama3.1`
1054
1058
  - `llama3.1:405b`
@@ -1064,6 +1068,8 @@ PyGPT has built-in support for models (as of 2025-06-24):
1064
1068
  - `qwen:7b`
1065
1069
  - `qwen2:7b`
1066
1070
  - `qwen2.5-coder:7b`
1071
+ - `qwen3:8b`
1072
+ - `qwen3:30b-a3b`
1067
1073
  - `r1` (Perplexity)
1068
1074
  - `sonar` (Perplexity)
1069
1075
  - `sonar-deep-research` (Perplexity)
@@ -1095,7 +1101,7 @@ There is built-in support for those LLM providers:
1095
1101
 
1096
1102
  How to use locally installed Llama 3 or Mistral models:
1097
1103
 
1098
- 1) Choose a working mode: `Chat with Files`.
1104
+ 1) Choose a working mode: `Chat` or `Chat with Files`.
1099
1105
 
1100
1106
  2) On the models list - select, edit, or add a new model (with `ollama` provider). You can edit the model settings through the menu `Config -> Models -> Edit`, then configure the model parameters in the `advanced` section.
1101
1107
 
@@ -4124,6 +4130,18 @@ may consume additional tokens that are not displayed in the main window.
4124
4130
 
4125
4131
  ## Recent changes:
4126
4132
 
4133
+ **2.5.19 (2025-06-27)**
4134
+
4135
+ - Added option to enable/disable `Responses API` in `Config -> Settings -> API Keys -> OpenAI`.
4136
+ - Added support for xAI / Grok models, added grok-3 models.
4137
+
4138
+ **2.5.18 (2025-06-26)**
4139
+
4140
+ - Non-GPT models are now available in standard Chat mode.
4141
+ - Added a new remote tool: `image_generation` in Responses API -> disabled by default, enable in `Config -> Settings -> Remote Tools`. Enables native image generation and editing of uploaded images in Chat mode.
4142
+ - Added a new model `gpt-image-1` and improved image generation.
4143
+ - Other small fixes.
4144
+
4127
4145
  **2.5.17 (2025-06-25)**
4128
4146
 
4129
4147
  - Added settings for enable/disable Remote Tools via Responses API in Chat mode: Config -> Settings -> Remote tools. Currently only web-search-preview tool is available, rest of tools coming soon.