pygpt-net 2.5.18__py3-none-any.whl → 2.5.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. pygpt_net/CHANGELOG.txt +13 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/app.py +8 -4
  4. pygpt_net/container.py +3 -3
  5. pygpt_net/controller/chat/command.py +4 -4
  6. pygpt_net/controller/chat/input.py +3 -3
  7. pygpt_net/controller/chat/stream.py +6 -2
  8. pygpt_net/controller/config/placeholder.py +28 -14
  9. pygpt_net/controller/lang/custom.py +2 -2
  10. pygpt_net/controller/mode/__init__.py +22 -1
  11. pygpt_net/controller/model/__init__.py +2 -2
  12. pygpt_net/controller/model/editor.py +6 -63
  13. pygpt_net/controller/model/importer.py +9 -7
  14. pygpt_net/controller/presets/editor.py +8 -8
  15. pygpt_net/core/agents/legacy.py +2 -2
  16. pygpt_net/core/bridge/__init__.py +6 -3
  17. pygpt_net/core/bridge/worker.py +5 -2
  18. pygpt_net/core/command/__init__.py +10 -8
  19. pygpt_net/core/debug/presets.py +2 -2
  20. pygpt_net/core/experts/__init__.py +2 -2
  21. pygpt_net/core/idx/chat.py +7 -20
  22. pygpt_net/core/idx/llm.py +27 -28
  23. pygpt_net/core/llm/__init__.py +25 -3
  24. pygpt_net/core/models/__init__.py +83 -9
  25. pygpt_net/core/modes/__init__.py +2 -2
  26. pygpt_net/core/presets/__init__.py +3 -3
  27. pygpt_net/core/prompt/__init__.py +5 -5
  28. pygpt_net/core/tokens/__init__.py +3 -3
  29. pygpt_net/core/updater/__init__.py +5 -3
  30. pygpt_net/data/config/config.json +8 -3
  31. pygpt_net/data/config/models.json +1051 -2605
  32. pygpt_net/data/config/modes.json +4 -10
  33. pygpt_net/data/config/settings.json +94 -0
  34. pygpt_net/data/locale/locale.en.ini +17 -2
  35. pygpt_net/item/model.py +56 -33
  36. pygpt_net/plugin/base/plugin.py +6 -5
  37. pygpt_net/provider/core/config/patch.py +23 -1
  38. pygpt_net/provider/core/model/json_file.py +7 -7
  39. pygpt_net/provider/core/model/patch.py +60 -7
  40. pygpt_net/provider/core/preset/json_file.py +4 -4
  41. pygpt_net/provider/gpt/__init__.py +18 -15
  42. pygpt_net/provider/gpt/chat.py +91 -21
  43. pygpt_net/provider/gpt/responses.py +58 -21
  44. pygpt_net/provider/llms/anthropic.py +2 -1
  45. pygpt_net/provider/llms/azure_openai.py +11 -7
  46. pygpt_net/provider/llms/base.py +3 -2
  47. pygpt_net/provider/llms/deepseek_api.py +3 -1
  48. pygpt_net/provider/llms/google.py +2 -1
  49. pygpt_net/provider/llms/hugging_face.py +8 -5
  50. pygpt_net/provider/llms/hugging_face_api.py +3 -1
  51. pygpt_net/provider/llms/local.py +2 -1
  52. pygpt_net/provider/llms/ollama.py +8 -6
  53. pygpt_net/provider/llms/openai.py +11 -7
  54. pygpt_net/provider/llms/perplexity.py +109 -0
  55. pygpt_net/provider/llms/x_ai.py +108 -0
  56. pygpt_net/ui/dialog/about.py +5 -5
  57. pygpt_net/ui/dialog/preset.py +5 -5
  58. {pygpt_net-2.5.18.dist-info → pygpt_net-2.5.20.dist-info}/METADATA +65 -178
  59. {pygpt_net-2.5.18.dist-info → pygpt_net-2.5.20.dist-info}/RECORD +62 -60
  60. {pygpt_net-2.5.18.dist-info → pygpt_net-2.5.20.dist-info}/LICENSE +0 -0
  61. {pygpt_net-2.5.18.dist-info → pygpt_net-2.5.20.dist-info}/WHEEL +0 -0
  62. {pygpt_net-2.5.18.dist-info → pygpt_net-2.5.20.dist-info}/entry_points.txt +0 -0
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.5.18",
4
- "app.version": "2.5.18",
5
- "updated_at": "2025-06-26T00:00:00"
3
+ "version": "2.5.19",
4
+ "app.version": "2.5.19",
5
+ "updated_at": "2025-06-27T00:00:00"
6
6
  },
7
7
  "items": {
8
8
  "chat": {
@@ -52,13 +52,7 @@
52
52
  "name": "assistant",
53
53
  "label": "mode.assistant",
54
54
  "default": false
55
- },
56
- "langchain": {
57
- "id": "langchain",
58
- "name": "langchain",
59
- "label": "mode.langchain",
60
- "default": false
61
- },
55
+ },
62
56
  "agent_llama": {
63
57
  "id": "agent_llama",
64
58
  "name": "Agent (LlamaIndex)",
@@ -69,6 +69,21 @@
69
69
  "advanced": false,
70
70
  "tab": "OpenAI"
71
71
  },
72
+ "api_use_responses": {
73
+ "section": "api_keys",
74
+ "type": "bool",
75
+ "slider": false,
76
+ "label": "settings.api_use_responses",
77
+ "description": "settings.api_use_responses.desc",
78
+ "value": true,
79
+ "min": null,
80
+ "max": null,
81
+ "multiplier": null,
82
+ "step": null,
83
+ "secret": false,
84
+ "advanced": false,
85
+ "tab": "OpenAI"
86
+ },
72
87
  "api_key_google": {
73
88
  "section": "api_keys",
74
89
  "type": "text",
@@ -88,6 +103,21 @@
88
103
  "advanced": false,
89
104
  "tab": "Google"
90
105
  },
106
+ "api_endpoint_google": {
107
+ "section": "api_keys",
108
+ "type": "text",
109
+ "slider": false,
110
+ "label": "settings.api_endpoint_google",
111
+ "description": "settings.api_endpoint_google.desc",
112
+ "value": "https://generativelanguage.googleapis.com/v1beta/openai",
113
+ "min": null,
114
+ "max": null,
115
+ "multiplier": null,
116
+ "step": null,
117
+ "secret": false,
118
+ "advanced": false,
119
+ "tab": "Google"
120
+ },
91
121
  "api_key_anthropic": {
92
122
  "section": "api_keys",
93
123
  "type": "text",
@@ -145,6 +175,55 @@
145
175
  "advanced": false,
146
176
  "tab": "DeepSeek"
147
177
  },
178
+ "api_endpoint_deepseek": {
179
+ "section": "api_keys",
180
+ "type": "text",
181
+ "slider": false,
182
+ "label": "settings.api_endpoint_deepseek",
183
+ "description": "settings.api_endpoint_deepseek.desc",
184
+ "value": "https://api.deepseek.com/v1",
185
+ "min": null,
186
+ "max": null,
187
+ "multiplier": null,
188
+ "step": null,
189
+ "secret": false,
190
+ "advanced": false,
191
+ "tab": "DeepSeek"
192
+ },
193
+ "api_key_xai": {
194
+ "section": "api_keys",
195
+ "type": "text",
196
+ "slider": false,
197
+ "label": "settings.api_key.xai",
198
+ "description": "settings.api_key.xai.desc",
199
+ "value": "",
200
+ "min": null,
201
+ "max": null,
202
+ "multiplier": null,
203
+ "step": null,
204
+ "extra": {
205
+ "bold": true
206
+ },
207
+ "secret": true,
208
+ "persist": true,
209
+ "advanced": false,
210
+ "tab": "xAI"
211
+ },
212
+ "api_endpoint_xai": {
213
+ "section": "api_keys",
214
+ "type": "text",
215
+ "slider": false,
216
+ "label": "settings.api_endpoint_xai",
217
+ "description": "settings.api_endpoint_xai.desc",
218
+ "value": "https://api.x.ai/v1",
219
+ "min": null,
220
+ "max": null,
221
+ "multiplier": null,
222
+ "step": null,
223
+ "secret": false,
224
+ "advanced": false,
225
+ "tab": "xAI"
226
+ },
148
227
  "api_azure_version": {
149
228
  "section": "api_keys",
150
229
  "type": "text",
@@ -196,6 +275,21 @@
196
275
  "advanced": false,
197
276
  "tab": "Perplexity"
198
277
  },
278
+ "api_endpoint_perplexity": {
279
+ "section": "api_keys",
280
+ "type": "text",
281
+ "slider": false,
282
+ "label": "settings.api_endpoint_perplexity",
283
+ "description": "settings.api_endpoint_perplexity.desc",
284
+ "value": "https://api.perplexity.ai",
285
+ "min": null,
286
+ "max": null,
287
+ "multiplier": null,
288
+ "step": null,
289
+ "secret": false,
290
+ "advanced": false,
291
+ "tab": "Perplexity"
292
+ },
199
293
  "app.env": {
200
294
  "section": "general",
201
295
  "type": "dict",
@@ -816,8 +816,10 @@ model.llama_index.mode.desc = Available sub-modes: chat
816
816
  model.llama_index.provider = [LlamaIndex] Provider
817
817
  model.llama_index.provider.desc = LLM provider to use in "Chat with Files" mode
818
818
  model.mode = Mode(s)
819
- model.mode.desc = Available modes: chat (Chat), llama_index (Chat with Files), audio (Chat with Audio), research (Research), completion (Completion), img (Image), vision (Vision), assistant (Assistants), langchain (Langchain), agent_llama (Agent LlamaIndex), agent (Agent Autonomous), expert (Experts)
819
+ model.mode.desc = Available modes: chat, llama_index, audio, research, completion, img, vision, assistant, agent_llama, agent, expert
820
820
  model.name = Name
821
+ model.provider = Provider
822
+ model.provider.desc = LLM provider
821
823
  models.importer.all = Show all
822
824
  models.importer.available.label = Ollama models
823
825
  models.importer.current.label = PyGPT models
@@ -830,7 +832,7 @@ models.importer.error.remove.not_exists = Model already exists in current list
830
832
  models.importer.loaded = Ollama models loaded successfully.
831
833
  models.importer.status.imported = Models imported successfully.
832
834
  model.openai = OpenAI API
833
- model.openai.desc = Supports native OpenAI API
835
+ model.openai.desc = Supports OpenAI API (or compatible)
834
836
  model.tokens = Output tokens
835
837
  model.tokens.desc = Max model output tokens
836
838
  mode.research = Research (Perplexity)
@@ -953,6 +955,14 @@ settings.api_azure_version = OpenAI API version
953
955
  settings.api_azure_version.desc = Azure OpenAI API version, e.g. 2023-07-01-preview
954
956
  settings.api_endpoint = API Endpoint
955
957
  settings.api_endpoint.desc = OpenAI API (or compatible) endpoint URL, default: https://api.openai.com/v1
958
+ settings.api_endpoint_deepseek = API Endpoint
959
+ settings.api_endpoint_deepseek.desc = Deepseek API endpoint URL, default: https://api.deepseek.com/v1
960
+ settings.api_endpoint_google = API Endpoint
961
+ settings.api_endpoint_google.desc = Google API endpoint URL, default: https://generativelanguage.googleapis.com/v1beta/openai
962
+ settings.api_endpoint_perplexity = API Endpoint
963
+ settings.api_endpoint_perplexity.desc = Perplexity API endpoint URL, default: https://api.perplexity.ai
964
+ settings.api_endpoint_xai = API Endpoint
965
+ settings.api_endpoint_xai.desc = xAI API endpoint URL, default: https://api.x.ai
956
966
  settings.api_key = OpenAI API KEY
957
967
  settings.api_key.anthropic = Anthropic API KEY
958
968
  settings.api_key.anthropic.desc = Required for the Anthropic API and Claude models.
@@ -965,8 +975,12 @@ settings.api_key.hugging_face = HuggingFace API KEY
965
975
  settings.api_key.hugging_face.desc = Required for the HuggingFace API.
966
976
  settings.api_key.perplexity = Perplexity API KEY
967
977
  settings.api_key.perplexity.desc = Required for the Perplexity API.
978
+ settings.api_key.xai = xAI API KEY
979
+ settings.api_key.xai.desc = Required for the xAI API and Grok models.
968
980
  settings.api_proxy = Proxy address
969
981
  settings.api_proxy.desc = Optional, proxy for OpenAI API, e.g. http://proxy.example.com or socks5://user:pass@host:port
982
+ settings.api_use_responses = Use Responses API
983
+ settings.api_use_responses.desc = Use Responses API instead of ChatCompletions API
970
984
  settings.app.env = Application environment (os.environ)
971
985
  settings.app.env.desc = Additional environment vars to set on application start
972
986
  settings.audio.input.channels = Channels
@@ -1152,6 +1166,7 @@ settings.section.api_keys.google = Google
1152
1166
  settings.section.api_keys.huggingface = HuggingFace
1153
1167
  settings.section.api_keys.openai = OpenAI
1154
1168
  settings.section.api_keys.perplexity = Perplexity
1169
+ settings.section.api_keys.xai = xAI
1155
1170
  settings.section.audio = Audio
1156
1171
  settings.section.ctx = Context
1157
1172
  settings.section.developer = Developer
pygpt_net/item/model.py CHANGED
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.06.26 16:00:00 #
9
+ # Updated Date: 2025.06.28 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import json
@@ -15,6 +15,17 @@ from pygpt_net.core.types import MODE_CHAT
15
15
 
16
16
 
17
17
  class ModelItem:
18
+
19
+ OPENAI_COMPATIBLE = [
20
+ "openai",
21
+ "azure_openai",
22
+ "google",
23
+ "local_ai",
24
+ "perplexity",
25
+ "deepseek_api",
26
+ "x_ai",
27
+ ]
28
+
18
29
  def __init__(self, id=None):
19
30
  """
20
31
  Model data item
@@ -31,7 +42,7 @@ class ModelItem:
31
42
  self.tokens = 0
32
43
  self.default = False
33
44
  self.imported = False
34
- self.openai = False # OpenAI API supported model
45
+ self.provider = ""
35
46
  self.extra = {}
36
47
 
37
48
  def from_dict(self, data: dict):
@@ -57,8 +68,8 @@ class ModelItem:
57
68
  self.extra = data['extra']
58
69
  if 'imported' in data:
59
70
  self.imported = data['imported']
60
- if 'openai' in data:
61
- self.openai = data['openai']
71
+ if 'provider' in data:
72
+ self.provider = data['provider']
62
73
 
63
74
  # multimodal
64
75
  if 'multimodal' in data:
@@ -66,6 +77,7 @@ class ModelItem:
66
77
  self.multimodal = options.split(',')
67
78
 
68
79
  # langchain
80
+ """
69
81
  if 'langchain.provider' in data:
70
82
  self.langchain['provider'] = data['langchain.provider']
71
83
  if 'langchain.mode' in data:
@@ -78,16 +90,19 @@ class ModelItem:
78
90
  self.langchain['args'] = data['langchain.args']
79
91
  if 'langchain.env' in data:
80
92
  self.langchain['env'] = data['langchain.env']
81
-
93
+ """
94
+
82
95
  # llama index
83
96
  if 'llama_index.provider' in data:
84
- self.llama_index['provider'] = data['llama_index.provider']
97
+ self.llama_index['provider'] = data['llama_index.provider'] # backward compatibility < v2.5.20
98
+ """
85
99
  if 'llama_index.mode' in data:
86
100
  if data['llama_index.mode'] is None or data['llama_index.mode'] == "":
87
101
  self.llama_index['mode'] = []
88
102
  else:
89
103
  mode = data['llama_index.mode'].replace(' ', '')
90
104
  self.llama_index['mode'] = mode.split(',')
105
+ """
91
106
  if 'llama_index.args' in data:
92
107
  self.llama_index['args'] = data['llama_index.args']
93
108
  if 'llama_index.env' in data:
@@ -103,27 +118,29 @@ class ModelItem:
103
118
  data['id'] = self.id
104
119
  data['name'] = self.name
105
120
  data['mode'] = ','.join(self.mode)
106
- data['langchain'] = self.langchain
121
+ # data['langchain'] = self.langchain
107
122
  data['ctx'] = self.ctx
108
123
  data['tokens'] = self.tokens
109
124
  data['default'] = self.default
110
125
  data['multimodal'] = ','.join(self.multimodal)
111
126
  data['extra'] = self.extra
112
127
  data['imported'] = self.imported
113
- data['openai'] = self.openai
114
-
115
- data['langchain.provider'] = None
116
- data['langchain.mode'] = ""
117
- data['langchain.args'] = []
118
- data['langchain.env'] = []
119
- data['llama_index.provider'] = None
120
- data['llama_index.mode'] = ""
128
+ data['provider'] = self.provider
129
+
130
+ # data['langchain.provider'] = None
131
+ # data['langchain.mode'] = ""
132
+ # data['langchain.args'] = []
133
+ # data['langchain.env'] = []
134
+ # data['llama_index.provider'] = None
135
+ # data['llama_index.mode'] = ""
121
136
  data['llama_index.args'] = []
122
137
  data['llama_index.env'] = []
123
138
 
139
+
124
140
  # langchain
141
+ """
125
142
  if 'provider' in self.langchain:
126
- data['langchain.provider'] = self.langchain['provider']
143
+ data['langchain.provider'] = self.langchain['provider']
127
144
  if 'mode' in self.langchain:
128
145
  data['langchain.mode'] = ",".join(self.langchain['mode'])
129
146
  if 'args' in self.langchain:
@@ -147,12 +164,13 @@ class ModelItem:
147
164
  data['langchain.env'].append(item)
148
165
  elif isinstance(self.langchain['env'], list):
149
166
  data['langchain.env'] = self.langchain['env']
167
+ """
150
168
 
151
169
  # llama_index
152
- if 'provider' in self.llama_index:
153
- data['llama_index.provider'] = self.llama_index['provider']
154
- if 'mode' in self.llama_index:
155
- data['llama_index.mode'] = ",".join(self.llama_index['mode'])
170
+ # if 'provider' in self.llama_index:
171
+ # data['llama_index.provider'] = self.llama_index['provider']
172
+ # if 'mode' in self.llama_index:
173
+ # data['llama_index.mode'] = ",".join(self.llama_index['mode'])
156
174
  if 'args' in self.llama_index:
157
175
  # old versions support
158
176
  if isinstance(self.llama_index['args'], dict):
@@ -184,8 +202,8 @@ class ModelItem:
184
202
  :param mode: Mode
185
203
  :return: True if supported
186
204
  """
187
- if mode == MODE_CHAT and not self.is_openai():
188
- # only OpenAI models are supported for chat mode
205
+ if mode == MODE_CHAT and not self.is_openai_supported():
206
+ # only OpenAI API compatible models are supported in Chat mode
189
207
  return False
190
208
  return mode in self.mode
191
209
 
@@ -197,11 +215,19 @@ class ModelItem:
197
215
  """
198
216
  return len(self.multimodal) > 0
199
217
 
200
- def is_openai(self) -> bool:
218
+ def is_openai_supported(self) -> bool:
201
219
  """
202
- Check if model is supported by OpenAI API
220
+ Check if model is supported by OpenAI API (or compatible)
203
221
 
204
- :return: True if OpenAI
222
+ :return: True if OpenAI compatible
223
+ """
224
+ return self.provider in self.OPENAI_COMPATIBLE
225
+
226
+ def is_gpt(self) -> bool:
227
+ """
228
+ Check if model is supported by OpenAI Responses API
229
+
230
+ :return: True if OpenAI Responses API compatible
205
231
  """
206
232
  if (self.id.startswith("gpt-")
207
233
  or self.id.startswith("chatgpt")
@@ -218,12 +244,17 @@ class ModelItem:
218
244
 
219
245
  :return: True if Ollama
220
246
  """
247
+ if self.provider == "ollama":
248
+ return True
221
249
  if self.llama_index is None:
222
250
  return False
223
251
  if self.llama_index.get("provider") is None:
224
252
  return False
225
253
  return "ollama" in self.llama_index.get("provider", "")
226
254
 
255
+ def get_provider(self):
256
+ return self.provider
257
+
227
258
  def get_ollama_model(self) -> str:
228
259
  """
229
260
  Get Ollama model ID
@@ -236,14 +267,6 @@ class ModelItem:
236
267
  return arg["value"]
237
268
  return ""
238
269
 
239
- def get_llama_provider(self) -> str:
240
- """
241
- Get Llama Index provider
242
-
243
- :return: provider name
244
- """
245
- return self.llama_index.get("provider", "")
246
-
247
270
  def has_mode(self, mode: str) -> bool:
248
271
  """
249
272
  Check if model has mode
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.14 22:00:00 #
9
+ # Updated Date: 2025.06.28 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -289,13 +289,14 @@ class BasePlugin(QObject):
289
289
  msg = self.window.core.debug.parse_alert(err)
290
290
  self.window.ui.dialogs.alert("{}: {}".format(self.name, msg))
291
291
 
292
- def debug(self, data: Any):
292
+ def debug(self, data: Any, console: bool = True):
293
293
  """
294
294
  Send debug message to logger window
295
295
 
296
296
  :param data: data to send
297
+ :param console: print in console
297
298
  """
298
- self.window.core.debug.info(data)
299
+ self.window.core.debug.info(data, console)
299
300
 
300
301
  def reply(
301
302
  self,
@@ -347,7 +348,7 @@ class BasePlugin(QObject):
347
348
  :param msg: message to log
348
349
  """
349
350
  msg = "[{}] {}".format(self.prefix, msg)
350
- self.debug(msg)
351
+ self.debug(msg, not self.is_log())
351
352
  if self.is_threaded():
352
353
  return
353
354
  self.window.update_status(msg.replace("\n", " "))
@@ -439,7 +440,7 @@ class BasePlugin(QObject):
439
440
  :param ctx: context (CtxItem)
440
441
  :return: response dict
441
442
  """
442
- ignore_extra = ["request", "result", "context"]
443
+ ignore_extra = ["request", "context"]
443
444
  allow_output = ["request", "result", "context"]
444
445
  clean_response = {}
445
446
  for key in response:
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.03.02 19:00:00 #
9
+ # Updated Date: 2025.06.28 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -1869,6 +1869,28 @@ class Patch:
1869
1869
  data["remote_tools.image"] = False
1870
1870
  updated = True
1871
1871
 
1872
+ # < 2.5.19
1873
+ if old < parse_version("2.5.19"):
1874
+ print("Migrating config from < 2.5.19...")
1875
+ if 'api_use_responses' not in data:
1876
+ data["api_use_responses"] = True
1877
+ if 'api_key_xai' not in data:
1878
+ data["api_key_xai"] = ""
1879
+ if 'api_endpoint_xai' not in data:
1880
+ data["api_endpoint_xai"] = "https://api.x.ai/v1"
1881
+ updated = True
1882
+
1883
+ # < 2.5.20
1884
+ if old < parse_version("2.5.20"):
1885
+ print("Migrating config from < 2.5.20...")
1886
+ if 'api_endpoint_deepseek' not in data:
1887
+ data["api_endpoint_deepseek"] = "https://api.deepseek.com/v1"
1888
+ if 'api_endpoint_google' not in data:
1889
+ data["api_endpoint_google"] = "https://generativelanguage.googleapis.com/v1beta/openai"
1890
+ if "mode" in data and "mode" == "langchain": # deprecated mode
1891
+ data["mode"] = "chat"
1892
+ updated = True
1893
+
1872
1894
  # update file
1873
1895
  migrated = False
1874
1896
  if updated:
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.06.26 16:00:00 #
9
+ # Updated Date: 2025.06.28 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import json
@@ -158,7 +158,7 @@ class JsonFileProvider(BaseProvider):
158
158
  'id': item.id,
159
159
  'name': item.name,
160
160
  'mode': item.mode,
161
- 'langchain': item.langchain,
161
+ # 'langchain': item.langchain,
162
162
  'llama_index': item.llama_index,
163
163
  'ctx': item.ctx,
164
164
  'tokens': item.tokens,
@@ -166,7 +166,7 @@ class JsonFileProvider(BaseProvider):
166
166
  'multimodal': item.multimodal,
167
167
  'extra': item.extra,
168
168
  'imported': item.imported,
169
- 'openai': item.openai,
169
+ 'provider': item.provider,
170
170
  }
171
171
 
172
172
  @staticmethod
@@ -183,8 +183,8 @@ class JsonFileProvider(BaseProvider):
183
183
  item.name = data['name']
184
184
  if 'mode' in data:
185
185
  item.mode = data['mode']
186
- if 'langchain' in data:
187
- item.langchain = data['langchain']
186
+ # if 'langchain' in data:
187
+ # item.langchain = data['langchain']
188
188
  if 'llama_index' in data:
189
189
  item.llama_index = data['llama_index']
190
190
  if 'ctx' in data:
@@ -199,8 +199,8 @@ class JsonFileProvider(BaseProvider):
199
199
  item.extra = data['extra']
200
200
  if 'imported' in data:
201
201
  item.imported = data['imported']
202
- if 'openai' in data:
203
- item.openai = data['openai']
202
+ if 'provider' in data:
203
+ item.provider = data['provider']
204
204
 
205
205
  def dump(self, item: ModelItem) -> str:
206
206
  """
@@ -6,11 +6,13 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.06.26 16:00:00 #
9
+ # Updated Date: 2025.06.28 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from packaging.version import parse as parse_version, Version
13
13
 
14
+ from pygpt_net.core.types import MODE_RESEARCH
15
+
14
16
 
15
17
  class Patch:
16
18
  def __init__(self, window=None):
@@ -328,6 +330,7 @@ class Patch:
328
330
  # langchain
329
331
  is_endpoint = False
330
332
  is_version = False
333
+ """
331
334
  for item in model.langchain["env"]:
332
335
  if item["name"] == "AZURE_OPENAI_ENDPOINT":
333
336
  is_endpoint = True
@@ -355,6 +358,7 @@ class Patch:
355
358
  "value": "{api_azure_version}",
356
359
  }
357
360
  )
361
+ """
358
362
 
359
363
  # llama
360
364
  is_endpoint = False
@@ -390,6 +394,7 @@ class Patch:
390
394
  # Anthropic
391
395
  elif model.id.startswith("claude-"):
392
396
  is_key = False
397
+ """
393
398
  for item in model.langchain["env"]:
394
399
  if item["name"] == "ANTHROPIC_API_KEY":
395
400
  is_key = True
@@ -404,6 +409,7 @@ class Patch:
404
409
  "value": "{api_key_anthropic}",
405
410
  }
406
411
  )
412
+ """
407
413
  is_key = False
408
414
  for item in model.llama_index["env"]:
409
415
  if item["name"] == "ANTHROPIC_API_KEY":
@@ -422,6 +428,7 @@ class Patch:
422
428
  # Google
423
429
  elif model.id.startswith("gemini-"):
424
430
  is_key = False
431
+ """
425
432
  for item in model.langchain["env"]:
426
433
  if item["name"] == "GOOGLE_API_KEY":
427
434
  is_key = True
@@ -436,6 +443,7 @@ class Patch:
436
443
  "value": "{api_key_google}",
437
444
  }
438
445
  )
446
+ """
439
447
  is_key = False
440
448
  for item in model.llama_index["env"]:
441
449
  if item["name"] == "GOOGLE_API_KEY":
@@ -555,17 +563,62 @@ class Patch:
555
563
  print("Migrating models from < 2.5.18...")
556
564
  for id in data:
557
565
  model = data[id]
558
- if (model.id.startswith("o1")
559
- or model.id.startswith("o3")
560
- or model.id.startswith("gpt-")
561
- or model.id.startswith("chatgpt")
562
- or model.id.startswith("dall-e")):
563
- model.openai = True
564
566
  if model.is_supported("llama_index"):
565
567
  if "chat" not in model.mode:
566
568
  model.mode.append("chat")
567
569
  updated = True
568
570
 
571
+ # < 2.5.19 <--- add Grok models
572
+ if old < parse_version("2.5.19"):
573
+ updated = True
574
+
575
+ # < 2.5.20 <--- add provider field
576
+ if old < parse_version("2.5.20"):
577
+ print("Migrating models from < 2.5.20...")
578
+ for id in data:
579
+ model = data[id]
580
+
581
+ # add global providers
582
+ if model.is_ollama():
583
+ model.provider = "ollama"
584
+ if (model.id.startswith("gpt-")
585
+ or model.id.startswith("chatgpt")
586
+ or model.id.startswith("o1")
587
+ or model.id.startswith("o3")
588
+ or model.id.startswith("o4")
589
+ or model.id.startswith("o5")
590
+ or model.id.startswith("dall-e")):
591
+ model.provider = "openai"
592
+ if model.id.startswith("claude-"):
593
+ model.provider = "anthropic"
594
+ if model.id.startswith("gemini-"):
595
+ model.provider = "google"
596
+ if MODE_RESEARCH in model.mode:
597
+ model.provider = "perplexity"
598
+ if model.id.startswith("grok-"):
599
+ model.provider = "x_ai"
600
+ if id.startswith("deepseek_api"):
601
+ model.provider = "deepseek_api"
602
+ if model.provider is None or model.provider == "":
603
+ model.provider = "local_ai"
604
+
605
+ # patch llama_index config
606
+ if model.llama_index:
607
+ if 'mode' in model.llama_index:
608
+ del model.llama_index['mode']
609
+ if 'provider' in model.llama_index:
610
+ del model.llama_index['provider']
611
+
612
+ # add llama_index mode to o1, o3
613
+ if model.id.startswith("o1") or model.id.startswith("o3"):
614
+ if "llama_index" not in model.mode:
615
+ model.mode.append("llama_index")
616
+
617
+ # del langchain config
618
+ if 'langchain' in model.mode:
619
+ model.mode.remove("langchain")
620
+ updated = True
621
+
569
622
  # update file
570
623
  if updated:
571
624
  data = dict(sorted(data.items()))