pygpt-net 2.5.17__py3-none-any.whl → 2.5.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. pygpt_net/CHANGELOG.txt +12 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/chat/common.py +4 -2
  4. pygpt_net/controller/chat/input.py +36 -27
  5. pygpt_net/controller/chat/stream.py +22 -2
  6. pygpt_net/controller/config/placeholder.py +1 -1
  7. pygpt_net/controller/model/__init__.py +1 -1
  8. pygpt_net/controller/model/editor.py +6 -1
  9. pygpt_net/controller/model/importer.py +4 -3
  10. pygpt_net/core/bridge/__init__.py +10 -4
  11. pygpt_net/core/command/__init__.py +10 -1
  12. pygpt_net/core/idx/chat.py +6 -1
  13. pygpt_net/core/image/__init__.py +15 -0
  14. pygpt_net/core/models/__init__.py +14 -6
  15. pygpt_net/core/models/ollama.py +4 -3
  16. pygpt_net/data/config/config.json +7 -3
  17. pygpt_net/data/config/models.json +437 -34
  18. pygpt_net/data/config/modes.json +10 -10
  19. pygpt_net/data/config/settings.json +56 -0
  20. pygpt_net/data/locale/locale.de.ini +1 -1
  21. pygpt_net/data/locale/locale.en.ini +13 -2
  22. pygpt_net/data/locale/locale.es.ini +1 -1
  23. pygpt_net/data/locale/locale.fr.ini +1 -1
  24. pygpt_net/data/locale/locale.pl.ini +1 -1
  25. pygpt_net/data/locale/locale.uk.ini +1 -1
  26. pygpt_net/data/locale/locale.zh.ini +1 -1
  27. pygpt_net/item/model.py +43 -1
  28. pygpt_net/provider/core/config/patch.py +19 -1
  29. pygpt_net/provider/core/model/json_file.py +4 -1
  30. pygpt_net/provider/core/model/patch.py +21 -1
  31. pygpt_net/provider/gpt/__init__.py +31 -6
  32. pygpt_net/provider/gpt/chat.py +2 -2
  33. pygpt_net/provider/gpt/image.py +42 -8
  34. pygpt_net/provider/gpt/responses.py +22 -16
  35. pygpt_net/provider/llms/anthropic.py +3 -1
  36. pygpt_net/provider/llms/google.py +3 -1
  37. pygpt_net/provider/llms/hugging_face.py +3 -1
  38. pygpt_net/provider/llms/hugging_face_api.py +3 -1
  39. pygpt_net/provider/llms/ollama.py +9 -3
  40. pygpt_net/provider/llms/openai.py +7 -1
  41. pygpt_net/ui/dialog/preset.py +1 -1
  42. {pygpt_net-2.5.17.dist-info → pygpt_net-2.5.19.dist-info}/METADATA +25 -7
  43. {pygpt_net-2.5.17.dist-info → pygpt_net-2.5.19.dist-info}/RECORD +46 -46
  44. {pygpt_net-2.5.17.dist-info → pygpt_net-2.5.19.dist-info}/LICENSE +0 -0
  45. {pygpt_net-2.5.17.dist-info → pygpt_net-2.5.19.dist-info}/WHEEL +0 -0
  46. {pygpt_net-2.5.17.dist-info → pygpt_net-2.5.19.dist-info}/entry_points.txt +0 -0
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.5.14",
4
- "app.version": "2.5.14",
5
- "updated_at": "2025-06-23T00:00:00"
3
+ "version": "2.5.19",
4
+ "app.version": "2.5.19",
5
+ "updated_at": "2025-06-27T00:00:00"
6
6
  },
7
7
  "items": {
8
8
  "chat": {
@@ -11,6 +11,12 @@
11
11
  "label": "mode.chat",
12
12
  "default": true
13
13
  },
14
+ "llama_index": {
15
+ "id": "llama_index",
16
+ "name": "Index (LlamaIndex)",
17
+ "label": "mode.llama_index",
18
+ "default": false
19
+ },
14
20
  "audio": {
15
21
  "id": "audio",
16
22
  "name": "audio",
@@ -52,13 +58,7 @@
52
58
  "name": "langchain",
53
59
  "label": "mode.langchain",
54
60
  "default": false
55
- },
56
- "llama_index": {
57
- "id": "llama_index",
58
- "name": "Index (LlamaIndex)",
59
- "label": "mode.llama_index",
60
- "default": false
61
- },
61
+ },
62
62
  "agent_llama": {
63
63
  "id": "agent_llama",
64
64
  "name": "Agent (LlamaIndex)",
@@ -69,6 +69,21 @@
69
69
  "advanced": false,
70
70
  "tab": "OpenAI"
71
71
  },
72
+ "api_use_responses": {
73
+ "section": "api_keys",
74
+ "type": "bool",
75
+ "slider": false,
76
+ "label": "settings.api_use_responses",
77
+ "description": "settings.api_use_responses.desc",
78
+ "value": true,
79
+ "min": null,
80
+ "max": null,
81
+ "multiplier": null,
82
+ "step": null,
83
+ "secret": false,
84
+ "advanced": false,
85
+ "tab": "OpenAI"
86
+ },
72
87
  "api_key_google": {
73
88
  "section": "api_keys",
74
89
  "type": "text",
@@ -145,6 +160,25 @@
145
160
  "advanced": false,
146
161
  "tab": "DeepSeek"
147
162
  },
163
+ "api_key_xai": {
164
+ "section": "api_keys",
165
+ "type": "text",
166
+ "slider": false,
167
+ "label": "settings.api_key.xai",
168
+ "description": "settings.api_key.xai.desc",
169
+ "value": "",
170
+ "min": null,
171
+ "max": null,
172
+ "multiplier": null,
173
+ "step": null,
174
+ "extra": {
175
+ "bold": true
176
+ },
177
+ "secret": true,
178
+ "persist": true,
179
+ "advanced": false,
180
+ "tab": "xAI"
181
+ },
148
182
  "api_azure_version": {
149
183
  "section": "api_keys",
150
184
  "type": "text",
@@ -1027,6 +1061,11 @@
1027
1061
  "step": null,
1028
1062
  "advanced": false,
1029
1063
  "keys": [
1064
+ {"auto": "[gpt-image-1] auto"},
1065
+ {"1024x1024": "[gpt-image-1] 1024x1024"},
1066
+ {"1536x1024": "[gpt-image-1] 1536x1024"},
1067
+ {"1024x1536": "[gpt-image-1] 1024x1536"},
1068
+ {"1536x1024": "[gpt-image-1] 1536x1024"},
1030
1069
  {"1792x1024": "[DALL-E 3] 1792x1024"},
1031
1070
  {"1024x1792": "[DALL-E 3] 1024x1792"},
1032
1071
  {"1024x1024": "[DALL-E 3] 1024x1024"},
@@ -1047,6 +1086,10 @@
1047
1086
  "step": null,
1048
1087
  "advanced": false,
1049
1088
  "keys": [
1089
+ {"auto": "[gpt-image-1] auto"},
1090
+ {"high": "[gpt-image-1] high"},
1091
+ {"medium": "[gpt-image-1] medium"},
1092
+ {"medium": "[gpt-image-1] low"},
1050
1093
  {"standard": "[DALL-E 3] standard"},
1051
1094
  {"hd": "[DALL-E 3] hd"},
1052
1095
  {"standard": "[DALL-E 2] standard"}
@@ -1211,6 +1254,19 @@
1211
1254
  "step": null,
1212
1255
  "advanced": false
1213
1256
  },
1257
+ "remote_tools.image": {
1258
+ "section": "remote_tools",
1259
+ "type": "bool",
1260
+ "slider": false,
1261
+ "label": "settings.remote_tools.image",
1262
+ "description": "settings.remote_tools.image.desc",
1263
+ "value": true,
1264
+ "min": null,
1265
+ "max": null,
1266
+ "multiplier": null,
1267
+ "step": null,
1268
+ "advanced": false
1269
+ },
1214
1270
  "llama.idx.list": {
1215
1271
  "section": "llama-index",
1216
1272
  "type": "dict",
@@ -989,7 +989,7 @@ tip.tokens.input = Token: Benutzereingabeaufforderung + Systemaufforderung + Kon
989
989
  tip.toolbox.assistants = Die Liste der Assistenten zeigt die erstellten Assistenten, die auf dem entfernten Server arbeiten. Alle Änderungen werden mit dem entfernten Assistenten synchronisiert.
990
990
  tip.toolbox.ctx = Erstellen Sie so viele Gesprächskontexte, wie Sie benötigen; Sie können jederzeit zu ihnen zurückkehren.
991
991
  tip.toolbox.indexes = Durch das Indizieren von Gesprächen und Dateien können Sie das verfügbare Wissen mit Ihren eigenen Daten und Gesprächsverläufen erweitern.
992
- tip.toolbox.mode = Sie können den Arbeitsmodus und das Modell in Echtzeit ändern. Um andere Modelle als GPT zu verwenden, nutzen Sie den Modus Chat mit Dateien.
992
+ tip.toolbox.mode = Sie können den Arbeitsmodus und das Modell in Echtzeit ändern.
993
993
  tip.toolbox.presets = Erstellen Sie Voreinstellungen mit verschiedenen Konfigurationen, um schnell zwischen verschiedenen Einstellungen wie dem Systemprompt und anderen zu wechseln.
994
994
  tip.toolbox.prompt = Die aktuelle Systemeingabeaufforderung kann in Echtzeit geändert werden. Um Werkzeuge aus Plugins zu aktivieren, aktivieren Sie die Option "+ Werkzeuge."
995
995
  toolbox.agent.auto_stop.label = Automatischer Stopp
@@ -816,7 +816,7 @@ model.llama_index.mode.desc = Available sub-modes: chat
816
816
  model.llama_index.provider = [LlamaIndex] Provider
817
817
  model.llama_index.provider.desc = LLM provider to use in "Chat with Files" mode
818
818
  model.mode = Mode(s)
819
- model.mode.desc = Available modes: chat, completion, img, audio, vision, assistant, langchain, llama_index, agent, agent_llama, research
819
+ model.mode.desc = Available modes: chat (Chat), llama_index (Chat with Files), audio (Chat with Audio), research (Research), completion (Completion), img (Image), vision (Vision), assistant (Assistants), langchain (Langchain), agent_llama (Agent LlamaIndex), agent (Agent Autonomous), expert (Experts)
820
820
  model.name = Name
821
821
  models.importer.all = Show all
822
822
  models.importer.available.label = Ollama models
@@ -829,6 +829,8 @@ models.importer.error.remove.no_model = No model selected to remove
829
829
  models.importer.error.remove.not_exists = Model already exists in current list
830
830
  models.importer.loaded = Ollama models loaded successfully.
831
831
  models.importer.status.imported = Models imported successfully.
832
+ model.openai = OpenAI API
833
+ model.openai.desc = Supports OpenAI API (or compatible)
832
834
  model.tokens = Output tokens
833
835
  model.tokens.desc = Max model output tokens
834
836
  mode.research = Research (Perplexity)
@@ -951,6 +953,8 @@ settings.api_azure_version = OpenAI API version
951
953
  settings.api_azure_version.desc = Azure OpenAI API version, e.g. 2023-07-01-preview
952
954
  settings.api_endpoint = API Endpoint
953
955
  settings.api_endpoint.desc = OpenAI API (or compatible) endpoint URL, default: https://api.openai.com/v1
956
+ settings.api_endpoint_xai = API Endpoint
957
+ settings.api_endpoint_xai.desc = xAPI API endpoint URL, default: https://api.x.ai
954
958
  settings.api_key = OpenAI API KEY
955
959
  settings.api_key.anthropic = Anthropic API KEY
956
960
  settings.api_key.anthropic.desc = Required for the Anthropic API and Claude models.
@@ -963,8 +967,12 @@ settings.api_key.hugging_face = HuggingFace API KEY
963
967
  settings.api_key.hugging_face.desc = Required for the HuggingFace API.
964
968
  settings.api_key.perplexity = Perplexity API KEY
965
969
  settings.api_key.perplexity.desc = Required for the Perplexity API.
970
+ settings.api_key.xai = xAI API KEY
971
+ settings.api_key.xai.desc = Required for the xAI API and Grok models.
966
972
  settings.api_proxy = Proxy address
967
973
  settings.api_proxy.desc = Optional, proxy for OpenAI API, e.g. http://proxy.example.com or socks5://user:pass@host:port
974
+ settings.api_use_responses = Use Responses API
975
+ settings.api_use_responses.desc = Use Responses API instead of ChatCompletions API
968
976
  settings.app.env = Application environment (os.environ)
969
977
  settings.app.env.desc = Additional environment vars to set on application start
970
978
  settings.audio.input.channels = Channels
@@ -1129,6 +1137,8 @@ settings.prompt.img = DALL-E: image generation
1129
1137
  settings.prompt.img.desc = Prompt for generating prompts for DALL-E (if raw-mode is disabled). Image mode only.
1130
1138
  settings.remote_tools.web_search = Web Search
1131
1139
  settings.remote_tools.web_search.desc = Enable `web_search` remote tool in Chat mode / via OpenAI Responses API.
1140
+ settings.remote_tools.image = Image generation
1141
+ settings.remote_tools.image.desc = Enable `image_generation` remote tool in Chat mode / via OpenAI Responses API.
1132
1142
  settings.render.code_syntax = Code syntax highlight
1133
1143
  settings.render.engine = Rendering engine
1134
1144
  settings.render.open_gl = OpenGL hardware acceleration
@@ -1148,6 +1158,7 @@ settings.section.api_keys.google = Google
1148
1158
  settings.section.api_keys.huggingface = HuggingFace
1149
1159
  settings.section.api_keys.openai = OpenAI
1150
1160
  settings.section.api_keys.perplexity = Perplexity
1161
+ settings.section.api_keys.xai = xAI
1151
1162
  settings.section.audio = Audio
1152
1163
  settings.section.ctx = Context
1153
1164
  settings.section.developer = Developer
@@ -1238,7 +1249,7 @@ tip.tokens.input = Tokens: input prompt + system prompt + context + extra + atta
1238
1249
  tip.toolbox.assistants = The list of assistants shows the assistants created and operating on the remote server. Any changes will be synchronized with the remote assistant.
1239
1250
  tip.toolbox.ctx = Create as many conversation contexts as you need; you can return to them at any time.
1240
1251
  tip.toolbox.indexes = By indexing conversations and files, you can expand the available knowledge with your own data and conversation history.
1241
- tip.toolbox.mode = You can change the working mode and model in real-time. To use models other than GPT, use the Chat with Files mode.
1252
+ tip.toolbox.mode = You can change the working mode and model in real-time.
1242
1253
  tip.toolbox.presets = Create presets with different configurations to quickly switch between various settings, such as the system prompt and others.
1243
1254
  tip.toolbox.prompt = The current system prompt can be modified in real-time. To enable tools from plugins, enable the option "+ Tools."
1244
1255
  toolbox.agent.auto_stop.label = Auto-stop
@@ -989,7 +989,7 @@ tip.tokens.input = Fichas: indicación del usuario + indicación del sistema + c
989
989
  tip.toolbox.assistants = La lista de asistentes muestra los asistentes creados y operando en el servidor remoto. Cualquier cambio se sincronizará con el asistente remoto.
990
990
  tip.toolbox.ctx = Crea tantos contextos de conversación como necesites; puedes volver a ellos en cualquier momento.
991
991
  tip.toolbox.indexes = Al indexar conversaciones y archivos, puedes ampliar el conocimiento disponible con tus propios datos e historial de conversaciones.
992
- tip.toolbox.mode = Puedes cambiar el modo de trabajo y el modelo en tiempo real. Para usar modelos distintos a GPT, utiliza el modo Chat con archivos.
992
+ tip.toolbox.mode = Puedes cambiar el modo de trabajo y el modelo en tiempo real.
993
993
  tip.toolbox.presets = Crea preajustes con diferentes configuraciones para cambiar rápidamente entre varios ajustes, como el prompt del sistema y otros.
994
994
  tip.toolbox.prompt = La solicitud del sistema actual se puede modificar en tiempo real. Para habilitar herramientas desde complementos, habilite la opción "+ Herramientas."
995
995
  toolbox.agent.auto_stop.label = Auto-parada
@@ -989,7 +989,7 @@ tip.tokens.input = Jetons: invite de l'utilisateur + invite système + contexte
989
989
  tip.toolbox.assistants = La liste des assistants montre les assistants créés et opérant sur le serveur distant. Tout changement sera synchronisé avec l'assistant distant.
990
990
  tip.toolbox.ctx = Créez autant de contextes de conversation que vous en avez besoin ; vous pouvez y revenir à tout moment.
991
991
  tip.toolbox.indexes = En indexant des conversations et des fichiers, vous pouvez étendre les connaissances disponibles avec vos propres données et historique de conversation.
992
- tip.toolbox.mode = Vous pouvez changer le mode de travail et le modèle en temps réel. Pour utiliser des modèles autres que GPT, utilisez le mode Chat avec fichiers.
992
+ tip.toolbox.mode = Vous pouvez changer le mode de travail et le modèle en temps réel.
993
993
  tip.toolbox.presets = Créez des préréglages avec différentes configurations pour basculer rapidement entre divers réglages, tels que l'invite système et d'autres.
994
994
  tip.toolbox.prompt = L'invite système actuelle peut être modifiée en temps réel. Pour activer les outils à partir des plugins, activez l'option "+ Outils."
995
995
  toolbox.agent.auto_stop.label = Arrêt automatique
@@ -990,7 +990,7 @@ tip.tokens.input = Tokeny: prompt użytkownika + systemowy prompt + kontekst + d
990
990
  tip.toolbox.assistants = Lista asystentów pokazuje asystentów stworzonych i działających na zdalnym serwerze. Wszelkie zmiany zostaną zsynchronizowane ze zdalnym asystentem.
991
991
  tip.toolbox.ctx = Twórz tyle kontekstów rozmów, ile potrzebujesz; możesz do nich wrócić w dowolnym momencie.
992
992
  tip.toolbox.indexes = Indeksując rozmowy i pliki, możesz rozszerzyć dostępną wiedzę o własne dane i historię rozmów.
993
- tip.toolbox.mode = Możesz zmienić tryb pracy i model w czasie rzeczywistym. Aby użyć modeli innych niż GPT, użyj trybu Czat z plikami.
993
+ tip.toolbox.mode = Możesz zmienić tryb pracy i model w czasie rzeczywistym.
994
994
  tip.toolbox.presets = Twórz presety z różnymi konfiguracjami, aby szybko przełączać się między różnymi ustawieniami, takimi jak prompt systemowy i inne.
995
995
  tip.toolbox.prompt = Aktualna podpowiedź systemu może być modyfikowana w czasie rzeczywistym. Aby włączyć narzędzia z wtyczek, włącz opcję "+ Narzędzia."
996
996
  toolbox.agent.auto_stop.label = Auto-stop
@@ -989,7 +989,7 @@ tip.tokens.input = Токени: запит користувача + систе
989
989
  tip.toolbox.assistants = Список асистентів показує асистентів, створених і що працюють на віддаленому сервері. Будь-які зміни будуть синхронізовані з віддаленим асистентом.
990
990
  tip.toolbox.ctx = Створіть стільки контекстів розмов, як вам потрібно; ви можете повернутися до них у будь-який час.
991
991
  tip.toolbox.indexes = Індексуючи розмови та файли, ви можете розширити доступні знання зі своїми власними даними та історією розмов.
992
- tip.toolbox.mode = Ви можете змінити робочий режим та модель в реальному часі. Щоб використовувати моделі, відмінні від GPT, використовуйте режим Чат з файлами.
992
+ tip.toolbox.mode = Ви можете змінити робочий режим та модель в реальному часі.
993
993
  tip.toolbox.presets = Створіть пресети з різними конфігураціями для швидкого перемикання між різними налаштуваннями, такими як системний сповіщення та інші.
994
994
  tip.toolbox.prompt = Поточну системну підказку можна змінювати в режимі реального часу. Щоб увімкнути інструменти з плагінів, увімкніть опцію "+ Інструменти."
995
995
  toolbox.agent.auto_stop.label = Авто-стоп
@@ -1104,7 +1104,7 @@ tip.tokens.input = 代币:用户输入提示 + 系统提示 + 上下文 + 额
1104
1104
  tip.toolbox.assistants = 助手列表顯示在遠程服務器上創建和運行的助手。任何更改都將與遠程助手同步。
1105
1105
  tip.toolbox.ctx = 創建所需數量的對話上下文;您隨時可以返回它們。
1106
1106
  tip.toolbox.indexes = 通過索引對話和文件,您可以用自己的數據和對話歷史擴展可用知識。
1107
- tip.toolbox.mode = 您可以實時更換工作模式和模型。要使用非GPT模型,请使用“文件聊天模式”模式。
1107
+ tip.toolbox.mode = 您可以實時更換工作模式和模型。
1108
1108
  tip.toolbox.presets = 創建具有不同配置的預設,以便快速切換不同設置,例如系統提示等。
1109
1109
  tip.toolbox.prompt = 当前系统提示可以实时修改。要启用来自插件的工具,请启用“+ 工具”选项。
1110
1110
  toolbox.agent.auto_stop.label = 自動停止
pygpt_net/item/model.py CHANGED
@@ -6,11 +6,13 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.06.24 16:00:00 #
9
+ # Updated Date: 2025.06.27 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import json
13
13
 
14
+ from pygpt_net.core.types import MODE_CHAT
15
+
14
16
 
15
17
  class ModelItem:
16
18
  def __init__(self, id=None):
@@ -29,6 +31,7 @@ class ModelItem:
29
31
  self.tokens = 0
30
32
  self.default = False
31
33
  self.imported = False
34
+ self.openai = False # OpenAI API supported model
32
35
  self.extra = {}
33
36
 
34
37
  def from_dict(self, data: dict):
@@ -54,6 +57,8 @@ class ModelItem:
54
57
  self.extra = data['extra']
55
58
  if 'imported' in data:
56
59
  self.imported = data['imported']
60
+ if 'openai' in data:
61
+ self.openai = data['openai']
57
62
 
58
63
  # multimodal
59
64
  if 'multimodal' in data:
@@ -105,6 +110,7 @@ class ModelItem:
105
110
  data['multimodal'] = ','.join(self.multimodal)
106
111
  data['extra'] = self.extra
107
112
  data['imported'] = self.imported
113
+ data['openai'] = self.openai
108
114
 
109
115
  data['langchain.provider'] = None
110
116
  data['langchain.mode'] = ""
@@ -178,6 +184,9 @@ class ModelItem:
178
184
  :param mode: Mode
179
185
  :return: True if supported
180
186
  """
187
+ if mode == MODE_CHAT and not self.is_openai():
188
+ # only OpenAI models are supported for chat mode
189
+ return False
181
190
  return mode in self.mode
182
191
 
183
192
  def is_multimodal(self) -> bool:
@@ -188,6 +197,29 @@ class ModelItem:
188
197
  """
189
198
  return len(self.multimodal) > 0
190
199
 
200
+ def is_openai(self) -> bool:
201
+ """
202
+ Check if model is supported by OpenAI API
203
+
204
+ :return: True if OpenAI
205
+ """
206
+ return self.openai
207
+
208
+ def is_gpt(self) -> bool:
209
+ """
210
+ Check if model is supported by OpenAI Responses API
211
+
212
+ :return: True if OpenAI Responses API compatible
213
+ """
214
+ if (self.id.startswith("gpt-")
215
+ or self.id.startswith("chatgpt")
216
+ or self.id.startswith("o1")
217
+ or self.id.startswith("o3")
218
+ or self.id.startswith("o4")
219
+ or self.id.startswith("o5")):
220
+ return True
221
+ return False
222
+
191
223
  def is_ollama(self) -> bool:
192
224
  """
193
225
  Check if model is Ollama
@@ -196,6 +228,8 @@ class ModelItem:
196
228
  """
197
229
  if self.llama_index is None:
198
230
  return False
231
+ if self.llama_index.get("provider") is None:
232
+ return False
199
233
  return "ollama" in self.llama_index.get("provider", "")
200
234
 
201
235
  def get_ollama_model(self) -> str:
@@ -210,6 +244,14 @@ class ModelItem:
210
244
  return arg["value"]
211
245
  return ""
212
246
 
247
+ def get_llama_provider(self) -> str:
248
+ """
249
+ Get Llama Index provider
250
+
251
+ :return: provider name
252
+ """
253
+ return self.llama_index.get("provider", "")
254
+
213
255
  def has_mode(self, mode: str) -> bool:
214
256
  """
215
257
  Check if model has mode
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.03.02 19:00:00 #
9
+ # Updated Date: 2025.06.27 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -1862,6 +1862,24 @@ class Patch:
1862
1862
  data["remote_tools.web_search"] = True
1863
1863
  updated = True
1864
1864
 
1865
+ # < 2.5.18
1866
+ if old < parse_version("2.5.18"):
1867
+ print("Migrating config from < 2.5.18...")
1868
+ if 'remote_tools.image' not in data:
1869
+ data["remote_tools.image"] = False
1870
+ updated = True
1871
+
1872
+ # < 2.5.19
1873
+ if old < parse_version("2.5.19"):
1874
+ print("Migrating config from < 2.5.19...")
1875
+ if 'api_use_responses' not in data:
1876
+ data["api_use_responses"] = True
1877
+ if 'api_key_xai' not in data:
1878
+ data["api_key_xai"] = ""
1879
+ if 'api_endpoint_xai' not in data:
1880
+ data["api_endpoint_xai"] = "https://api.x.ai/v1"
1881
+ updated = True
1882
+
1865
1883
  # update file
1866
1884
  migrated = False
1867
1885
  if updated:
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.02.02 02:00:00 #
9
+ # Updated Date: 2025.06.26 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import json
@@ -166,6 +166,7 @@ class JsonFileProvider(BaseProvider):
166
166
  'multimodal': item.multimodal,
167
167
  'extra': item.extra,
168
168
  'imported': item.imported,
169
+ 'openai': item.openai,
169
170
  }
170
171
 
171
172
  @staticmethod
@@ -198,6 +199,8 @@ class JsonFileProvider(BaseProvider):
198
199
  item.extra = data['extra']
199
200
  if 'imported' in data:
200
201
  item.imported = data['imported']
202
+ if 'openai' in data:
203
+ item.openai = data['openai']
201
204
 
202
205
  def dump(self, item: ModelItem) -> str:
203
206
  """
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.06.24 16:00:00 #
9
+ # Updated Date: 2025.06.27 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from packaging.version import parse as parse_version, Version
@@ -550,6 +550,26 @@ class Patch:
550
550
  del data[name_to_replace]
551
551
  updated = True
552
552
 
553
+ # < 2.5.18 <--- update openai flag
554
+ if old < parse_version("2.5.18"):
555
+ print("Migrating models from < 2.5.18...")
556
+ for id in data:
557
+ model = data[id]
558
+ if (model.id.startswith("o1")
559
+ or model.id.startswith("o3")
560
+ or model.id.startswith("gpt-")
561
+ or model.id.startswith("chatgpt")
562
+ or model.id.startswith("dall-e")):
563
+ model.openai = True
564
+ if model.is_supported("llama_index"):
565
+ if "chat" not in model.mode:
566
+ model.mode.append("chat")
567
+ updated = True
568
+
569
+ # < 2.5.19 <--- add Grok models
570
+ if old < parse_version("2.5.19"):
571
+ updated = True
572
+
553
573
  # update file
554
574
  if updated:
555
575
  data = dict(sorted(data.items()))
@@ -6,8 +6,9 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.06.25 02:00:00 #
9
+ # Updated Date: 2025.06.27 16:00:00 #
10
10
  # ================================================== #
11
+ import base64
11
12
 
12
13
  from httpx_socks import SyncProxyTransport
13
14
 
@@ -33,6 +34,7 @@ from .responses import Responses
33
34
  from .store import Store
34
35
  from .summarizer import Summarizer
35
36
  from .vision import Vision
37
+ from pygpt_net.item.model import ModelItem
36
38
 
37
39
 
38
40
  class Gpt:
@@ -53,11 +55,12 @@ class Gpt:
53
55
  self.summarizer = Summarizer(window)
54
56
  self.vision = Vision(window)
55
57
 
56
- def get_client(self, mode: str = MODE_CHAT) -> OpenAI:
58
+ def get_client(self, mode: str = MODE_CHAT, model: ModelItem = None) -> OpenAI:
57
59
  """
58
60
  Return OpenAI client
59
61
 
60
62
  :param mode: Mode
63
+ :param model: Model
61
64
  :return: OpenAI client
62
65
  """
63
66
  args = {
@@ -86,6 +89,12 @@ class Gpt:
86
89
  endpoint = self.window.core.config.get('api_endpoint_perplexity')
87
90
  if endpoint:
88
91
  args["base_url"] = endpoint
92
+ elif mode == MODE_CHAT:
93
+ if model is not None:
94
+ # xAI / grok
95
+ if model.id.startswith("grok"):
96
+ args["api_key"] = self.window.core.config.get('api_key_xai')
97
+ args["base_url"] = self.window.core.config.get('api_endpoint_xai')
89
98
 
90
99
  return OpenAI(**args)
91
100
 
@@ -112,9 +121,6 @@ class Gpt:
112
121
 
113
122
  # --- Responses API ---- /beta/
114
123
  use_responses_api = False
115
- if mode == MODE_CHAT:
116
- use_responses_api = True # use responses API for chat, audio, research modes
117
- ctx.use_responses_api = use_responses_api # set in context
118
124
 
119
125
  # get model id
120
126
  model_id = None
@@ -123,6 +129,12 @@ class Gpt:
123
129
  if max_tokens > model.tokens: # check max output tokens
124
130
  max_tokens = model.tokens
125
131
 
132
+ if model.is_gpt():
133
+ if mode == MODE_CHAT and self.window.core.config.get('api_use_responses', False):
134
+ use_responses_api = True # use responses API for chat mode, only OpenAI models
135
+
136
+ ctx.use_responses_api = use_responses_api # set in context
137
+
126
138
  response = None
127
139
  used_tokens = 0
128
140
  context.max_tokens = max_tokens # update max output tokens
@@ -271,6 +283,19 @@ class Gpt:
271
283
  response.usage.input_tokens,
272
284
  response.usage.output_tokens,
273
285
  )
286
+ if mode == MODE_CHAT:
287
+ # if image generation call in responses API
288
+ image_data = [
289
+ output.result
290
+ for output in response.output
291
+ if output.type == "image_generation_call"
292
+ ]
293
+ if image_data:
294
+ img_path = self.window.core.image.gen_unique_path(ctx)
295
+ image_base64 = image_data[0]
296
+ with open(img_path, "wb") as f:
297
+ f.write(base64.b64decode(image_base64))
298
+ ctx.images = [img_path]
274
299
  return True
275
300
 
276
301
  def quick_call(self, context: BridgeContext, extra: dict = None) -> str:
@@ -290,7 +315,7 @@ class Gpt:
290
315
  if model is None:
291
316
  model = self.window.core.models.from_defaults()
292
317
 
293
- client = self.get_client(mode)
318
+ client = self.get_client(mode, model)
294
319
  messages = []
295
320
  messages.append({"role": "system", "content": system_prompt})
296
321
  messages.append({"role": "user", "content": prompt})
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.02.26 23:00:00 #
9
+ # Updated Date: 2025.06.27 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import json
@@ -67,7 +67,7 @@ class Chat:
67
67
  user_name = ctx.input_name # from ctx
68
68
  ai_name = ctx.output_name # from ctx
69
69
 
70
- client = self.window.core.gpt.get_client(mode)
70
+ client = self.window.core.gpt.get_client(mode, context.model)
71
71
 
72
72
  # build chat messages
73
73
  messages = self.build(