pygpt-net 2.5.19__py3-none-any.whl → 2.5.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +13 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/app.py +8 -4
- pygpt_net/container.py +3 -3
- pygpt_net/controller/chat/command.py +4 -4
- pygpt_net/controller/chat/input.py +2 -2
- pygpt_net/controller/chat/stream.py +6 -2
- pygpt_net/controller/config/placeholder.py +28 -14
- pygpt_net/controller/lang/custom.py +2 -2
- pygpt_net/controller/mode/__init__.py +22 -1
- pygpt_net/controller/model/__init__.py +2 -2
- pygpt_net/controller/model/editor.py +6 -63
- pygpt_net/controller/model/importer.py +9 -7
- pygpt_net/controller/presets/editor.py +8 -8
- pygpt_net/core/agents/legacy.py +2 -2
- pygpt_net/core/bridge/__init__.py +5 -4
- pygpt_net/core/bridge/worker.py +5 -2
- pygpt_net/core/command/__init__.py +10 -8
- pygpt_net/core/debug/presets.py +2 -2
- pygpt_net/core/experts/__init__.py +2 -2
- pygpt_net/core/idx/chat.py +7 -20
- pygpt_net/core/idx/llm.py +27 -28
- pygpt_net/core/llm/__init__.py +25 -3
- pygpt_net/core/models/__init__.py +83 -9
- pygpt_net/core/modes/__init__.py +2 -2
- pygpt_net/core/presets/__init__.py +3 -3
- pygpt_net/core/prompt/__init__.py +5 -5
- pygpt_net/core/render/web/renderer.py +16 -16
- pygpt_net/core/tokens/__init__.py +3 -3
- pygpt_net/core/updater/__init__.py +5 -3
- pygpt_net/data/config/config.json +5 -3
- pygpt_net/data/config/models.json +1302 -3088
- pygpt_net/data/config/modes.json +1 -7
- pygpt_net/data/config/settings.json +60 -0
- pygpt_net/data/css/web-chatgpt.css +2 -2
- pygpt_net/data/locale/locale.de.ini +2 -2
- pygpt_net/data/locale/locale.en.ini +12 -4
- pygpt_net/data/locale/locale.es.ini +2 -2
- pygpt_net/data/locale/locale.fr.ini +2 -2
- pygpt_net/data/locale/locale.it.ini +2 -2
- pygpt_net/data/locale/locale.pl.ini +2 -2
- pygpt_net/data/locale/locale.uk.ini +2 -2
- pygpt_net/data/locale/locale.zh.ini +2 -2
- pygpt_net/item/model.py +49 -34
- pygpt_net/plugin/base/plugin.py +6 -5
- pygpt_net/provider/core/config/patch.py +18 -1
- pygpt_net/provider/core/model/json_file.py +7 -7
- pygpt_net/provider/core/model/patch.py +56 -7
- pygpt_net/provider/core/preset/json_file.py +4 -4
- pygpt_net/provider/gpt/__init__.py +9 -17
- pygpt_net/provider/gpt/chat.py +90 -20
- pygpt_net/provider/gpt/responses.py +58 -21
- pygpt_net/provider/llms/anthropic.py +2 -1
- pygpt_net/provider/llms/azure_openai.py +11 -7
- pygpt_net/provider/llms/base.py +3 -2
- pygpt_net/provider/llms/deepseek_api.py +3 -1
- pygpt_net/provider/llms/google.py +2 -1
- pygpt_net/provider/llms/hugging_face.py +8 -5
- pygpt_net/provider/llms/hugging_face_api.py +3 -1
- pygpt_net/provider/llms/local.py +2 -1
- pygpt_net/provider/llms/ollama.py +8 -6
- pygpt_net/provider/llms/openai.py +11 -7
- pygpt_net/provider/llms/perplexity.py +109 -0
- pygpt_net/provider/llms/x_ai.py +108 -0
- pygpt_net/ui/dialog/about.py +5 -5
- pygpt_net/ui/dialog/preset.py +5 -5
- {pygpt_net-2.5.19.dist-info → pygpt_net-2.5.21.dist-info}/METADATA +173 -285
- {pygpt_net-2.5.19.dist-info → pygpt_net-2.5.21.dist-info}/RECORD +71 -69
- {pygpt_net-2.5.19.dist-info → pygpt_net-2.5.21.dist-info}/LICENSE +0 -0
- {pygpt_net-2.5.19.dist-info → pygpt_net-2.5.21.dist-info}/WHEEL +0 -0
- {pygpt_net-2.5.19.dist-info → pygpt_net-2.5.21.dist-info}/entry_points.txt +0 -0
pygpt_net/data/config/modes.json
CHANGED
@@ -52,13 +52,7 @@
|
|
52
52
|
"name": "assistant",
|
53
53
|
"label": "mode.assistant",
|
54
54
|
"default": false
|
55
|
-
},
|
56
|
-
"langchain": {
|
57
|
-
"id": "langchain",
|
58
|
-
"name": "langchain",
|
59
|
-
"label": "mode.langchain",
|
60
|
-
"default": false
|
61
|
-
},
|
55
|
+
},
|
62
56
|
"agent_llama": {
|
63
57
|
"id": "agent_llama",
|
64
58
|
"name": "Agent (LlamaIndex)",
|
@@ -103,6 +103,21 @@
|
|
103
103
|
"advanced": false,
|
104
104
|
"tab": "Google"
|
105
105
|
},
|
106
|
+
"api_endpoint_google": {
|
107
|
+
"section": "api_keys",
|
108
|
+
"type": "text",
|
109
|
+
"slider": false,
|
110
|
+
"label": "settings.api_endpoint_google",
|
111
|
+
"description": "settings.api_endpoint_google.desc",
|
112
|
+
"value": "https://generativelanguage.googleapis.com/v1beta/openai",
|
113
|
+
"min": null,
|
114
|
+
"max": null,
|
115
|
+
"multiplier": null,
|
116
|
+
"step": null,
|
117
|
+
"secret": false,
|
118
|
+
"advanced": false,
|
119
|
+
"tab": "Google"
|
120
|
+
},
|
106
121
|
"api_key_anthropic": {
|
107
122
|
"section": "api_keys",
|
108
123
|
"type": "text",
|
@@ -160,6 +175,21 @@
|
|
160
175
|
"advanced": false,
|
161
176
|
"tab": "DeepSeek"
|
162
177
|
},
|
178
|
+
"api_endpoint_deepseek": {
|
179
|
+
"section": "api_keys",
|
180
|
+
"type": "text",
|
181
|
+
"slider": false,
|
182
|
+
"label": "settings.api_endpoint_deepseek",
|
183
|
+
"description": "settings.api_endpoint_deepseek.desc",
|
184
|
+
"value": "https://api.deepseek.com/v1",
|
185
|
+
"min": null,
|
186
|
+
"max": null,
|
187
|
+
"multiplier": null,
|
188
|
+
"step": null,
|
189
|
+
"secret": false,
|
190
|
+
"advanced": false,
|
191
|
+
"tab": "DeepSeek"
|
192
|
+
},
|
163
193
|
"api_key_xai": {
|
164
194
|
"section": "api_keys",
|
165
195
|
"type": "text",
|
@@ -179,6 +209,21 @@
|
|
179
209
|
"advanced": false,
|
180
210
|
"tab": "xAI"
|
181
211
|
},
|
212
|
+
"api_endpoint_xai": {
|
213
|
+
"section": "api_keys",
|
214
|
+
"type": "text",
|
215
|
+
"slider": false,
|
216
|
+
"label": "settings.api_endpoint_xai",
|
217
|
+
"description": "settings.api_endpoint_xai.desc",
|
218
|
+
"value": "https://api.x.ai/v1",
|
219
|
+
"min": null,
|
220
|
+
"max": null,
|
221
|
+
"multiplier": null,
|
222
|
+
"step": null,
|
223
|
+
"secret": false,
|
224
|
+
"advanced": false,
|
225
|
+
"tab": "xAI"
|
226
|
+
},
|
182
227
|
"api_azure_version": {
|
183
228
|
"section": "api_keys",
|
184
229
|
"type": "text",
|
@@ -230,6 +275,21 @@
|
|
230
275
|
"advanced": false,
|
231
276
|
"tab": "Perplexity"
|
232
277
|
},
|
278
|
+
"api_endpoint_perplexity": {
|
279
|
+
"section": "api_keys",
|
280
|
+
"type": "text",
|
281
|
+
"slider": false,
|
282
|
+
"label": "settings.api_endpoint_perplexity",
|
283
|
+
"description": "settings.api_endpoint_perplexity.desc",
|
284
|
+
"value": "https://api.perplexity.ai",
|
285
|
+
"min": null,
|
286
|
+
"max": null,
|
287
|
+
"multiplier": null,
|
288
|
+
"step": null,
|
289
|
+
"secret": false,
|
290
|
+
"advanced": false,
|
291
|
+
"tab": "Perplexity"
|
292
|
+
},
|
233
293
|
"app.env": {
|
234
294
|
"section": "general",
|
235
295
|
"type": "dict",
|
@@ -269,7 +269,7 @@ code {{
|
|
269
269
|
margin: 0;
|
270
270
|
padding-top: 10px;
|
271
271
|
display: block;
|
272
|
-
font-size: 0.
|
272
|
+
font-size: 0.8rem;
|
273
273
|
}}
|
274
274
|
.extra-src-img-box .img-outer {{
|
275
275
|
display: flex;
|
@@ -298,7 +298,7 @@ code {{
|
|
298
298
|
display: block;
|
299
299
|
width: 100%;
|
300
300
|
height: 100%;
|
301
|
-
object-fit:
|
301
|
+
object-fit: contain;
|
302
302
|
}}
|
303
303
|
|
304
304
|
/* common */
|
@@ -205,8 +205,8 @@ ctx.delete.confirm = Sind Sie sicher, dass Sie löschen möchten?
|
|
205
205
|
ctx.delete.item.confirm = Gesprächselement löschen?
|
206
206
|
ctx.delete.item.link = löschen
|
207
207
|
ctx.extra.audio = Antwort vorlesen (Audio)
|
208
|
-
ctx.extra.copy =
|
209
|
-
ctx.extra.copy_code =
|
208
|
+
ctx.extra.copy = Kopieren
|
209
|
+
ctx.extra.copy_code = kopieren
|
210
210
|
ctx.extra.delete = Diesen Eintrag löschen
|
211
211
|
ctx.extra.edit = Bearbeiten und Antwort neu generieren (ab diesem Punkt)
|
212
212
|
ctx.extra.join = An vorherige Antwort anhängen (zusammenführen)
|
@@ -222,8 +222,8 @@ ctx.delete.confirm = Are you sure you want to delete?
|
|
222
222
|
ctx.delete.item.confirm = Delete conversation item?
|
223
223
|
ctx.delete.item.link = delete
|
224
224
|
ctx.extra.audio = Read response (audio)
|
225
|
-
ctx.extra.copy = Copy
|
226
|
-
ctx.extra.copy_code = Copy
|
225
|
+
ctx.extra.copy = Copy
|
226
|
+
ctx.extra.copy_code = Copy
|
227
227
|
ctx.extra.delete = Delete this item
|
228
228
|
ctx.extra.edit = Edit and regenerate (from this point)
|
229
229
|
ctx.extra.join = Append to previous response (merge)
|
@@ -816,8 +816,10 @@ model.llama_index.mode.desc = Available sub-modes: chat
|
|
816
816
|
model.llama_index.provider = [LlamaIndex] Provider
|
817
817
|
model.llama_index.provider.desc = LLM provider to use in "Chat with Files" mode
|
818
818
|
model.mode = Mode(s)
|
819
|
-
model.mode.desc = Available modes: chat
|
819
|
+
model.mode.desc = Available modes: chat, llama_index, audio, research, completion, img, vision, assistant, agent_llama, agent, expert
|
820
820
|
model.name = Name
|
821
|
+
model.provider = Provider
|
822
|
+
model.provider.desc = LLM provider
|
821
823
|
models.importer.all = Show all
|
822
824
|
models.importer.available.label = Ollama models
|
823
825
|
models.importer.current.label = PyGPT models
|
@@ -953,8 +955,14 @@ settings.api_azure_version = OpenAI API version
|
|
953
955
|
settings.api_azure_version.desc = Azure OpenAI API version, e.g. 2023-07-01-preview
|
954
956
|
settings.api_endpoint = API Endpoint
|
955
957
|
settings.api_endpoint.desc = OpenAI API (or compatible) endpoint URL, default: https://api.openai.com/v1
|
958
|
+
settings.api_endpoint_deepseek = API Endpoint
|
959
|
+
settings.api_endpoint_deepseek.desc = Deepseek API endpoint URL, default: https://api.deepseek.com/v1
|
960
|
+
settings.api_endpoint_google = API Endpoint
|
961
|
+
settings.api_endpoint_google.desc = Google API endpoint URL, default: https://generativelanguage.googleapis.com/v1beta/openai
|
962
|
+
settings.api_endpoint_perplexity = API Endpoint
|
963
|
+
settings.api_endpoint_perplexity.desc = Perplexity API endpoint URL, default: https://api.perplexity.ai
|
956
964
|
settings.api_endpoint_xai = API Endpoint
|
957
|
-
settings.api_endpoint_xai.desc =
|
965
|
+
settings.api_endpoint_xai.desc = xAI API endpoint URL, default: https://api.x.ai
|
958
966
|
settings.api_key = OpenAI API KEY
|
959
967
|
settings.api_key.anthropic = Anthropic API KEY
|
960
968
|
settings.api_key.anthropic.desc = Required for the Anthropic API and Claude models.
|
@@ -205,8 +205,8 @@ ctx.delete.confirm = ¿Está seguro de querer eliminar?
|
|
205
205
|
ctx.delete.item.confirm = ¿Eliminar elemento de la conversación?
|
206
206
|
ctx.delete.item.link = eliminar
|
207
207
|
ctx.extra.audio = Leer respuesta (audio)
|
208
|
-
ctx.extra.copy = Copiar
|
209
|
-
ctx.extra.copy_code = copiar
|
208
|
+
ctx.extra.copy = Copiar
|
209
|
+
ctx.extra.copy_code = copiar
|
210
210
|
ctx.extra.delete = Eliminar este elemento
|
211
211
|
ctx.extra.edit = Editar y regenerar (desde este punto)
|
212
212
|
ctx.extra.join = Adjuntar a la respuesta anterior (combinar)
|
@@ -205,8 +205,8 @@ ctx.delete.confirm = Êtes-vous sûr de vouloir supprimer ?
|
|
205
205
|
ctx.delete.item.confirm = Supprimer l'élément de la conversation ?
|
206
206
|
ctx.delete.item.link = supprimer
|
207
207
|
ctx.extra.audio = Lire la réponse (audio)
|
208
|
-
ctx.extra.copy = Copier
|
209
|
-
ctx.extra.copy_code = copier
|
208
|
+
ctx.extra.copy = Copier
|
209
|
+
ctx.extra.copy_code = copier
|
210
210
|
ctx.extra.delete = Supprimer cet élément
|
211
211
|
ctx.extra.edit = Modifier et régénérer (à partir de ce point)
|
212
212
|
ctx.extra.join = Joindre à la réponse précédente (fusionner)
|
@@ -205,8 +205,8 @@ ctx.delete.confirm = Sei sicuro di voler eliminare?
|
|
205
205
|
ctx.delete.item.confirm = Eliminare l'elemento della conversazione?
|
206
206
|
ctx.delete.item.link = elimina
|
207
207
|
ctx.extra.audio = Leggi la risposta (audio)
|
208
|
-
ctx.extra.copy = Copia
|
209
|
-
ctx.extra.copy_code = copia
|
208
|
+
ctx.extra.copy = Copia
|
209
|
+
ctx.extra.copy_code = copia
|
210
210
|
ctx.extra.delete = Elimina questo elemento
|
211
211
|
ctx.extra.edit = Modifica e rigenera (da questo punto)
|
212
212
|
ctx.extra.join = Aggiungi alla risposta precedente (unisci)
|
@@ -205,8 +205,8 @@ ctx.delete.confirm = Czy na pewno usunąć?
|
|
205
205
|
ctx.delete.item.confirm = Usunąć element rozmowy?
|
206
206
|
ctx.delete.item.link = usuń
|
207
207
|
ctx.extra.audio = Odczytaj odpowiedź (audio)
|
208
|
-
ctx.extra.copy = Kopiuj
|
209
|
-
ctx.extra.copy_code = kopiuj
|
208
|
+
ctx.extra.copy = Kopiuj
|
209
|
+
ctx.extra.copy_code = kopiuj
|
210
210
|
ctx.extra.delete = Usuń ten element
|
211
211
|
ctx.extra.edit = Edytuj i wygeneruj ponownie (od tego miejsca)
|
212
212
|
ctx.extra.join = Dołącz do poprzedniej odpowiedzi (połącz)
|
@@ -205,8 +205,8 @@ ctx.delete.confirm = Ви впевнені, що хочете видалити?
|
|
205
205
|
ctx.delete.item.confirm = Видалити елемент розмови?
|
206
206
|
ctx.delete.item.link = видалити
|
207
207
|
ctx.extra.audio = Читати відповідь (аудіо)
|
208
|
-
ctx.extra.copy =
|
209
|
-
ctx.extra.copy_code =
|
208
|
+
ctx.extra.copy = Копіювати
|
209
|
+
ctx.extra.copy_code = kопіювати
|
210
210
|
ctx.extra.delete = Видалити цей елемент
|
211
211
|
ctx.extra.edit = Редагувати та згенерувати знову (з цього місця)
|
212
212
|
ctx.extra.join = Долучити до попередньої відповіді (об'єднати)
|
@@ -205,8 +205,8 @@ ctx.delete.confirm = 您確定要刪除嗎?
|
|
205
205
|
ctx.delete.item.confirm = 刪除對話項目?
|
206
206
|
ctx.delete.item.link = 刪除
|
207
207
|
ctx.extra.audio = 朗读回应(音频)
|
208
|
-
ctx.extra.copy =
|
209
|
-
ctx.extra.copy_code =
|
208
|
+
ctx.extra.copy = 复制
|
209
|
+
ctx.extra.copy_code = 复制
|
210
210
|
ctx.extra.delete = 删除这一项
|
211
211
|
ctx.extra.edit = 编辑并重新生成响应(从这一点开始)
|
212
212
|
ctx.extra.join = 附加到前一个回应(合并)
|
pygpt_net/item/model.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.06.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import json
|
@@ -15,6 +15,17 @@ from pygpt_net.core.types import MODE_CHAT
|
|
15
15
|
|
16
16
|
|
17
17
|
class ModelItem:
|
18
|
+
|
19
|
+
OPENAI_COMPATIBLE = [
|
20
|
+
"openai",
|
21
|
+
"azure_openai",
|
22
|
+
"google",
|
23
|
+
"local_ai",
|
24
|
+
"perplexity",
|
25
|
+
"deepseek_api",
|
26
|
+
"x_ai",
|
27
|
+
]
|
28
|
+
|
18
29
|
def __init__(self, id=None):
|
19
30
|
"""
|
20
31
|
Model data item
|
@@ -31,7 +42,7 @@ class ModelItem:
|
|
31
42
|
self.tokens = 0
|
32
43
|
self.default = False
|
33
44
|
self.imported = False
|
34
|
-
self.
|
45
|
+
self.provider = ""
|
35
46
|
self.extra = {}
|
36
47
|
|
37
48
|
def from_dict(self, data: dict):
|
@@ -57,8 +68,8 @@ class ModelItem:
|
|
57
68
|
self.extra = data['extra']
|
58
69
|
if 'imported' in data:
|
59
70
|
self.imported = data['imported']
|
60
|
-
if '
|
61
|
-
self.
|
71
|
+
if 'provider' in data:
|
72
|
+
self.provider = data['provider']
|
62
73
|
|
63
74
|
# multimodal
|
64
75
|
if 'multimodal' in data:
|
@@ -66,6 +77,7 @@ class ModelItem:
|
|
66
77
|
self.multimodal = options.split(',')
|
67
78
|
|
68
79
|
# langchain
|
80
|
+
"""
|
69
81
|
if 'langchain.provider' in data:
|
70
82
|
self.langchain['provider'] = data['langchain.provider']
|
71
83
|
if 'langchain.mode' in data:
|
@@ -78,16 +90,19 @@ class ModelItem:
|
|
78
90
|
self.langchain['args'] = data['langchain.args']
|
79
91
|
if 'langchain.env' in data:
|
80
92
|
self.langchain['env'] = data['langchain.env']
|
81
|
-
|
93
|
+
"""
|
94
|
+
|
82
95
|
# llama index
|
83
96
|
if 'llama_index.provider' in data:
|
84
|
-
self.llama_index['provider'] = data['llama_index.provider']
|
97
|
+
self.llama_index['provider'] = data['llama_index.provider'] # backward compatibility < v2.5.20
|
98
|
+
"""
|
85
99
|
if 'llama_index.mode' in data:
|
86
100
|
if data['llama_index.mode'] is None or data['llama_index.mode'] == "":
|
87
101
|
self.llama_index['mode'] = []
|
88
102
|
else:
|
89
103
|
mode = data['llama_index.mode'].replace(' ', '')
|
90
104
|
self.llama_index['mode'] = mode.split(',')
|
105
|
+
"""
|
91
106
|
if 'llama_index.args' in data:
|
92
107
|
self.llama_index['args'] = data['llama_index.args']
|
93
108
|
if 'llama_index.env' in data:
|
@@ -103,27 +118,29 @@ class ModelItem:
|
|
103
118
|
data['id'] = self.id
|
104
119
|
data['name'] = self.name
|
105
120
|
data['mode'] = ','.join(self.mode)
|
106
|
-
data['langchain'] = self.langchain
|
121
|
+
# data['langchain'] = self.langchain
|
107
122
|
data['ctx'] = self.ctx
|
108
123
|
data['tokens'] = self.tokens
|
109
124
|
data['default'] = self.default
|
110
125
|
data['multimodal'] = ','.join(self.multimodal)
|
111
126
|
data['extra'] = self.extra
|
112
127
|
data['imported'] = self.imported
|
113
|
-
data['
|
114
|
-
|
115
|
-
data['langchain.provider'] = None
|
116
|
-
data['langchain.mode'] = ""
|
117
|
-
data['langchain.args'] = []
|
118
|
-
data['langchain.env'] = []
|
119
|
-
data['llama_index.provider'] = None
|
120
|
-
data['llama_index.mode'] = ""
|
128
|
+
data['provider'] = self.provider
|
129
|
+
|
130
|
+
# data['langchain.provider'] = None
|
131
|
+
# data['langchain.mode'] = ""
|
132
|
+
# data['langchain.args'] = []
|
133
|
+
# data['langchain.env'] = []
|
134
|
+
# data['llama_index.provider'] = None
|
135
|
+
# data['llama_index.mode'] = ""
|
121
136
|
data['llama_index.args'] = []
|
122
137
|
data['llama_index.env'] = []
|
123
138
|
|
139
|
+
|
124
140
|
# langchain
|
141
|
+
"""
|
125
142
|
if 'provider' in self.langchain:
|
126
|
-
data['langchain.provider'] = self.langchain['provider']
|
143
|
+
data['langchain.provider'] = self.langchain['provider']
|
127
144
|
if 'mode' in self.langchain:
|
128
145
|
data['langchain.mode'] = ",".join(self.langchain['mode'])
|
129
146
|
if 'args' in self.langchain:
|
@@ -147,12 +164,13 @@ class ModelItem:
|
|
147
164
|
data['langchain.env'].append(item)
|
148
165
|
elif isinstance(self.langchain['env'], list):
|
149
166
|
data['langchain.env'] = self.langchain['env']
|
167
|
+
"""
|
150
168
|
|
151
169
|
# llama_index
|
152
|
-
if 'provider' in self.llama_index:
|
153
|
-
data['llama_index.provider'] = self.llama_index['provider']
|
154
|
-
if 'mode' in self.llama_index:
|
155
|
-
data['llama_index.mode'] = ",".join(self.llama_index['mode'])
|
170
|
+
# if 'provider' in self.llama_index:
|
171
|
+
# data['llama_index.provider'] = self.llama_index['provider']
|
172
|
+
# if 'mode' in self.llama_index:
|
173
|
+
# data['llama_index.mode'] = ",".join(self.llama_index['mode'])
|
156
174
|
if 'args' in self.llama_index:
|
157
175
|
# old versions support
|
158
176
|
if isinstance(self.llama_index['args'], dict):
|
@@ -184,8 +202,8 @@ class ModelItem:
|
|
184
202
|
:param mode: Mode
|
185
203
|
:return: True if supported
|
186
204
|
"""
|
187
|
-
if mode == MODE_CHAT and not self.
|
188
|
-
# only OpenAI models are supported
|
205
|
+
if mode == MODE_CHAT and not self.is_openai_supported():
|
206
|
+
# only OpenAI API compatible models are supported in Chat mode
|
189
207
|
return False
|
190
208
|
return mode in self.mode
|
191
209
|
|
@@ -197,13 +215,13 @@ class ModelItem:
|
|
197
215
|
"""
|
198
216
|
return len(self.multimodal) > 0
|
199
217
|
|
200
|
-
def
|
218
|
+
def is_openai_supported(self) -> bool:
|
201
219
|
"""
|
202
|
-
Check if model is supported by OpenAI API
|
220
|
+
Check if model is supported by OpenAI API (or compatible)
|
203
221
|
|
204
|
-
:return: True if OpenAI
|
222
|
+
:return: True if OpenAI compatible
|
205
223
|
"""
|
206
|
-
return self.
|
224
|
+
return self.provider in self.OPENAI_COMPATIBLE
|
207
225
|
|
208
226
|
def is_gpt(self) -> bool:
|
209
227
|
"""
|
@@ -226,12 +244,17 @@ class ModelItem:
|
|
226
244
|
|
227
245
|
:return: True if Ollama
|
228
246
|
"""
|
247
|
+
if self.provider == "ollama":
|
248
|
+
return True
|
229
249
|
if self.llama_index is None:
|
230
250
|
return False
|
231
251
|
if self.llama_index.get("provider") is None:
|
232
252
|
return False
|
233
253
|
return "ollama" in self.llama_index.get("provider", "")
|
234
254
|
|
255
|
+
def get_provider(self):
|
256
|
+
return self.provider
|
257
|
+
|
235
258
|
def get_ollama_model(self) -> str:
|
236
259
|
"""
|
237
260
|
Get Ollama model ID
|
@@ -244,14 +267,6 @@ class ModelItem:
|
|
244
267
|
return arg["value"]
|
245
268
|
return ""
|
246
269
|
|
247
|
-
def get_llama_provider(self) -> str:
|
248
|
-
"""
|
249
|
-
Get Llama Index provider
|
250
|
-
|
251
|
-
:return: provider name
|
252
|
-
"""
|
253
|
-
return self.llama_index.get("provider", "")
|
254
|
-
|
255
270
|
def has_mode(self, mode: str) -> bool:
|
256
271
|
"""
|
257
272
|
Check if model has mode
|
pygpt_net/plugin/base/plugin.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import copy
|
@@ -289,13 +289,14 @@ class BasePlugin(QObject):
|
|
289
289
|
msg = self.window.core.debug.parse_alert(err)
|
290
290
|
self.window.ui.dialogs.alert("{}: {}".format(self.name, msg))
|
291
291
|
|
292
|
-
def debug(self, data: Any):
|
292
|
+
def debug(self, data: Any, console: bool = True):
|
293
293
|
"""
|
294
294
|
Send debug message to logger window
|
295
295
|
|
296
296
|
:param data: data to send
|
297
|
+
:param console: print in console
|
297
298
|
"""
|
298
|
-
self.window.core.debug.info(data)
|
299
|
+
self.window.core.debug.info(data, console)
|
299
300
|
|
300
301
|
def reply(
|
301
302
|
self,
|
@@ -347,7 +348,7 @@ class BasePlugin(QObject):
|
|
347
348
|
:param msg: message to log
|
348
349
|
"""
|
349
350
|
msg = "[{}] {}".format(self.prefix, msg)
|
350
|
-
self.debug(msg)
|
351
|
+
self.debug(msg, not self.is_log())
|
351
352
|
if self.is_threaded():
|
352
353
|
return
|
353
354
|
self.window.update_status(msg.replace("\n", " "))
|
@@ -439,7 +440,7 @@ class BasePlugin(QObject):
|
|
439
440
|
:param ctx: context (CtxItem)
|
440
441
|
:return: response dict
|
441
442
|
"""
|
442
|
-
ignore_extra = ["request", "
|
443
|
+
ignore_extra = ["request", "context"]
|
443
444
|
allow_output = ["request", "result", "context"]
|
444
445
|
clean_response = {}
|
445
446
|
for key in response:
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.06.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import copy
|
@@ -1880,6 +1880,23 @@ class Patch:
|
|
1880
1880
|
data["api_endpoint_xai"] = "https://api.x.ai/v1"
|
1881
1881
|
updated = True
|
1882
1882
|
|
1883
|
+
# < 2.5.20
|
1884
|
+
if old < parse_version("2.5.20"):
|
1885
|
+
print("Migrating config from < 2.5.20...")
|
1886
|
+
if 'api_endpoint_deepseek' not in data:
|
1887
|
+
data["api_endpoint_deepseek"] = "https://api.deepseek.com/v1"
|
1888
|
+
if 'api_endpoint_google' not in data:
|
1889
|
+
data["api_endpoint_google"] = "https://generativelanguage.googleapis.com/v1beta/openai"
|
1890
|
+
if "mode" in data and "mode" == "langchain": # deprecated mode
|
1891
|
+
data["mode"] = "chat"
|
1892
|
+
updated = True
|
1893
|
+
|
1894
|
+
# < 2.5.21
|
1895
|
+
if old < parse_version("2.5.21"):
|
1896
|
+
print("Migrating config from < 2.5.21...")
|
1897
|
+
self.window.core.updater.patch_css('web-chatgpt.css', True) # force replace file
|
1898
|
+
updated = True
|
1899
|
+
|
1883
1900
|
# update file
|
1884
1901
|
migrated = False
|
1885
1902
|
if updated:
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.06.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import json
|
@@ -158,7 +158,7 @@ class JsonFileProvider(BaseProvider):
|
|
158
158
|
'id': item.id,
|
159
159
|
'name': item.name,
|
160
160
|
'mode': item.mode,
|
161
|
-
'langchain': item.langchain,
|
161
|
+
# 'langchain': item.langchain,
|
162
162
|
'llama_index': item.llama_index,
|
163
163
|
'ctx': item.ctx,
|
164
164
|
'tokens': item.tokens,
|
@@ -166,7 +166,7 @@ class JsonFileProvider(BaseProvider):
|
|
166
166
|
'multimodal': item.multimodal,
|
167
167
|
'extra': item.extra,
|
168
168
|
'imported': item.imported,
|
169
|
-
'
|
169
|
+
'provider': item.provider,
|
170
170
|
}
|
171
171
|
|
172
172
|
@staticmethod
|
@@ -183,8 +183,8 @@ class JsonFileProvider(BaseProvider):
|
|
183
183
|
item.name = data['name']
|
184
184
|
if 'mode' in data:
|
185
185
|
item.mode = data['mode']
|
186
|
-
if 'langchain' in data:
|
187
|
-
item.langchain = data['langchain']
|
186
|
+
# if 'langchain' in data:
|
187
|
+
# item.langchain = data['langchain']
|
188
188
|
if 'llama_index' in data:
|
189
189
|
item.llama_index = data['llama_index']
|
190
190
|
if 'ctx' in data:
|
@@ -199,8 +199,8 @@ class JsonFileProvider(BaseProvider):
|
|
199
199
|
item.extra = data['extra']
|
200
200
|
if 'imported' in data:
|
201
201
|
item.imported = data['imported']
|
202
|
-
if '
|
203
|
-
item.
|
202
|
+
if 'provider' in data:
|
203
|
+
item.provider = data['provider']
|
204
204
|
|
205
205
|
def dump(self, item: ModelItem) -> str:
|
206
206
|
"""
|