pygpt-net 2.5.18__py3-none-any.whl → 2.5.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +13 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/app.py +8 -4
- pygpt_net/container.py +3 -3
- pygpt_net/controller/chat/command.py +4 -4
- pygpt_net/controller/chat/input.py +3 -3
- pygpt_net/controller/chat/stream.py +6 -2
- pygpt_net/controller/config/placeholder.py +28 -14
- pygpt_net/controller/lang/custom.py +2 -2
- pygpt_net/controller/mode/__init__.py +22 -1
- pygpt_net/controller/model/__init__.py +2 -2
- pygpt_net/controller/model/editor.py +6 -63
- pygpt_net/controller/model/importer.py +9 -7
- pygpt_net/controller/presets/editor.py +8 -8
- pygpt_net/core/agents/legacy.py +2 -2
- pygpt_net/core/bridge/__init__.py +6 -3
- pygpt_net/core/bridge/worker.py +5 -2
- pygpt_net/core/command/__init__.py +10 -8
- pygpt_net/core/debug/presets.py +2 -2
- pygpt_net/core/experts/__init__.py +2 -2
- pygpt_net/core/idx/chat.py +7 -20
- pygpt_net/core/idx/llm.py +27 -28
- pygpt_net/core/llm/__init__.py +25 -3
- pygpt_net/core/models/__init__.py +83 -9
- pygpt_net/core/modes/__init__.py +2 -2
- pygpt_net/core/presets/__init__.py +3 -3
- pygpt_net/core/prompt/__init__.py +5 -5
- pygpt_net/core/tokens/__init__.py +3 -3
- pygpt_net/core/updater/__init__.py +5 -3
- pygpt_net/data/config/config.json +8 -3
- pygpt_net/data/config/models.json +1051 -2605
- pygpt_net/data/config/modes.json +4 -10
- pygpt_net/data/config/settings.json +94 -0
- pygpt_net/data/locale/locale.en.ini +17 -2
- pygpt_net/item/model.py +56 -33
- pygpt_net/plugin/base/plugin.py +6 -5
- pygpt_net/provider/core/config/patch.py +23 -1
- pygpt_net/provider/core/model/json_file.py +7 -7
- pygpt_net/provider/core/model/patch.py +60 -7
- pygpt_net/provider/core/preset/json_file.py +4 -4
- pygpt_net/provider/gpt/__init__.py +18 -15
- pygpt_net/provider/gpt/chat.py +91 -21
- pygpt_net/provider/gpt/responses.py +58 -21
- pygpt_net/provider/llms/anthropic.py +2 -1
- pygpt_net/provider/llms/azure_openai.py +11 -7
- pygpt_net/provider/llms/base.py +3 -2
- pygpt_net/provider/llms/deepseek_api.py +3 -1
- pygpt_net/provider/llms/google.py +2 -1
- pygpt_net/provider/llms/hugging_face.py +8 -5
- pygpt_net/provider/llms/hugging_face_api.py +3 -1
- pygpt_net/provider/llms/local.py +2 -1
- pygpt_net/provider/llms/ollama.py +8 -6
- pygpt_net/provider/llms/openai.py +11 -7
- pygpt_net/provider/llms/perplexity.py +109 -0
- pygpt_net/provider/llms/x_ai.py +108 -0
- pygpt_net/ui/dialog/about.py +5 -5
- pygpt_net/ui/dialog/preset.py +5 -5
- {pygpt_net-2.5.18.dist-info → pygpt_net-2.5.20.dist-info}/METADATA +65 -178
- {pygpt_net-2.5.18.dist-info → pygpt_net-2.5.20.dist-info}/RECORD +62 -60
- {pygpt_net-2.5.18.dist-info → pygpt_net-2.5.20.dist-info}/LICENSE +0 -0
- {pygpt_net-2.5.18.dist-info → pygpt_net-2.5.20.dist-info}/WHEEL +0 -0
- {pygpt_net-2.5.18.dist-info → pygpt_net-2.5.20.dist-info}/entry_points.txt +0 -0
pygpt_net/CHANGELOG.txt
CHANGED
@@ -1,3 +1,16 @@
|
|
1
|
+
2.5.20 (2025-06-28)
|
2
|
+
|
3
|
+
- LlamaIndex upgraded to 0.12.44.
|
4
|
+
- Langchain removed from the list of modes and dependencies.
|
5
|
+
- Improved tools execution.
|
6
|
+
- Simplified model configuration.
|
7
|
+
- Added endpoint configuration for non-OpenAI APIs.
|
8
|
+
|
9
|
+
2.5.19 (2025-06-27)
|
10
|
+
|
11
|
+
- Added option to enable/disable `Responses API` in `Config -> Settings -> API Keys -> OpenAI`.
|
12
|
+
- Added support for xAI / Grok models, added grok-3 models.
|
13
|
+
|
1
14
|
2.5.18 (2025-06-26)
|
2
15
|
|
3
16
|
- Non-GPT models are now available in standard Chat mode.
|
pygpt_net/__init__.py
CHANGED
@@ -6,15 +6,15 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
__author__ = "Marcin Szczygliński"
|
13
13
|
__copyright__ = "Copyright 2025, Marcin Szczygliński"
|
14
14
|
__credits__ = ["Marcin Szczygliński"]
|
15
15
|
__license__ = "MIT"
|
16
|
-
__version__ = "2.5.
|
17
|
-
__build__ = "2025-06-
|
16
|
+
__version__ = "2.5.20"
|
17
|
+
__build__ = "2025-06-28"
|
18
18
|
__maintainer__ = "Marcin Szczygliński"
|
19
19
|
__github__ = "https://github.com/szczyglis-dev/py-gpt"
|
20
20
|
__report__ = "https://github.com/szczyglis-dev/py-gpt/issues"
|
pygpt_net/app.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import os
|
@@ -55,6 +55,8 @@ from pygpt_net.provider.llms.hugging_face_api import HuggingFaceApiLLM
|
|
55
55
|
from pygpt_net.provider.llms.local import LocalLLM
|
56
56
|
from pygpt_net.provider.llms.ollama import OllamaLLM
|
57
57
|
from pygpt_net.provider.llms.openai import OpenAILLM
|
58
|
+
from pygpt_net.provider.llms.perplexity import PerplexityLLM
|
59
|
+
from pygpt_net.provider.llms.x_ai import xAILLM
|
58
60
|
|
59
61
|
# vector store providers (llama-index)
|
60
62
|
from pygpt_net.provider.vector_stores.chroma import ChromaProvider
|
@@ -336,18 +338,20 @@ def run(**kwargs):
|
|
336
338
|
for plugin in plugins:
|
337
339
|
launcher.add_plugin(plugin)
|
338
340
|
|
339
|
-
# register
|
341
|
+
# register LLMs
|
340
342
|
launcher.add_llm(OpenAILLM())
|
341
343
|
launcher.add_llm(AzureOpenAILLM())
|
342
344
|
launcher.add_llm(AnthropicLLM())
|
343
345
|
launcher.add_llm(GoogleLLM())
|
344
|
-
launcher.add_llm(HuggingFaceLLM())
|
346
|
+
# launcher.add_llm(HuggingFaceLLM())
|
345
347
|
launcher.add_llm(HuggingFaceApiLLM())
|
346
348
|
launcher.add_llm(LocalLLM())
|
347
349
|
launcher.add_llm(OllamaLLM())
|
348
350
|
launcher.add_llm(DeepseekApiLLM())
|
351
|
+
launcher.add_llm(PerplexityLLM())
|
352
|
+
launcher.add_llm(xAILLM())
|
349
353
|
|
350
|
-
# register
|
354
|
+
# register LLMs
|
351
355
|
llms = kwargs.get('llms', None)
|
352
356
|
if isinstance(llms, list):
|
353
357
|
for llm in llms:
|
pygpt_net/container.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from pygpt_net.config import Config
|
@@ -18,7 +18,7 @@ from pygpt_net.core.audio import Audio
|
|
18
18
|
from pygpt_net.core.bridge import Bridge
|
19
19
|
from pygpt_net.core.calendar import Calendar
|
20
20
|
from pygpt_net.core.camera import Camera
|
21
|
-
from pygpt_net.core.chain import Chain
|
21
|
+
# from pygpt_net.core.chain import Chain
|
22
22
|
from pygpt_net.core.command import Command
|
23
23
|
from pygpt_net.core.ctx import Ctx
|
24
24
|
from pygpt_net.core.db import Database
|
@@ -65,7 +65,7 @@ class Container:
|
|
65
65
|
self.bridge = Bridge(window)
|
66
66
|
self.calendar = Calendar(window)
|
67
67
|
self.camera = Camera(window)
|
68
|
-
self.chain = Chain(window)
|
68
|
+
# self.chain = Chain(window) # deprecated from v2.5.20
|
69
69
|
self.command = Command(window)
|
70
70
|
self.config = Config(window)
|
71
71
|
self.ctx = Ctx(window)
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from typing import Any
|
@@ -50,13 +50,13 @@ class Command:
|
|
50
50
|
for cmd in cmds:
|
51
51
|
cmd_id = str(cmd["cmd"])
|
52
52
|
if not self.window.core.command.is_enabled(cmd_id):
|
53
|
-
self.log("Command not allowed: " + cmd_id)
|
53
|
+
self.log("[cmd] Command not allowed: " + cmd_id)
|
54
54
|
cmds.remove(cmd) # remove command from execution list
|
55
55
|
if len(cmds) == 0:
|
56
56
|
return # abort if no commands
|
57
57
|
|
58
58
|
ctx.cmds = cmds # append commands to ctx
|
59
|
-
self.log("Command call received...")
|
59
|
+
self.log("[cmd] Command call received...")
|
60
60
|
|
61
61
|
# agent mode
|
62
62
|
if mode == MODE_AGENT:
|
@@ -67,7 +67,7 @@ class Command:
|
|
67
67
|
)
|
68
68
|
|
69
69
|
# plugins
|
70
|
-
self.log("Preparing command reply context...")
|
70
|
+
self.log("[cmd] Preparing command reply context...")
|
71
71
|
|
72
72
|
reply = ReplyContext()
|
73
73
|
reply.ctx = ctx
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.06.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import os
|
@@ -82,7 +82,7 @@ class Input:
|
|
82
82
|
model_data = self.window.core.models.get(model)
|
83
83
|
if model_data is not None and model_data.is_ollama():
|
84
84
|
if (mode == MODE_LLAMA_INDEX or
|
85
|
-
(mode == MODE_CHAT and not model_data.
|
85
|
+
(mode == MODE_CHAT and not model_data.is_openai_supported() and model_data.is_ollama())):
|
86
86
|
model_id = model_data.get_ollama_model()
|
87
87
|
# load ENV vars first
|
88
88
|
if ('env' in model_data.llama_index
|
@@ -257,7 +257,7 @@ class Input:
|
|
257
257
|
model = self.window.core.config.get('model')
|
258
258
|
if model:
|
259
259
|
model_data = self.window.core.models.get(model)
|
260
|
-
if model_data is not None and model_data.
|
260
|
+
if model_data is not None and model_data.is_gpt():
|
261
261
|
self.window.controller.chat.common.check_api_key(monit=True)
|
262
262
|
self.generating = False
|
263
263
|
self.window.dispatch(KernelEvent(KernelEvent.STATE_ERROR, {
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.06.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
import base64
|
12
12
|
import uuid
|
@@ -37,7 +37,6 @@ class Stream:
|
|
37
37
|
output_tokens = 0
|
38
38
|
begin = True
|
39
39
|
error = None
|
40
|
-
tool_calls = []
|
41
40
|
fn_args_buffers = {}
|
42
41
|
citations = []
|
43
42
|
img_path = self.window.core.image.gen_unique_path(ctx)
|
@@ -105,6 +104,8 @@ class Stream:
|
|
105
104
|
if chunk.choices[0].delta and chunk.choices[0].delta.tool_calls:
|
106
105
|
tool_chunks = chunk.choices[0].delta.tool_calls
|
107
106
|
for tool_chunk in tool_chunks:
|
107
|
+
if tool_chunk.index is None:
|
108
|
+
tool_chunk.index = 0
|
108
109
|
if len(tool_calls) <= tool_chunk.index:
|
109
110
|
tool_calls.append(
|
110
111
|
{
|
@@ -133,6 +134,7 @@ class Stream:
|
|
133
134
|
elif etype == "response.output_item.added" and chunk.item.type == "function_call":
|
134
135
|
tool_calls.append({
|
135
136
|
"id": chunk.item.id,
|
137
|
+
"call_id": chunk.item.call_id,
|
136
138
|
"type": "function",
|
137
139
|
"function": {"name": chunk.item.name, "arguments": ""}
|
138
140
|
})
|
@@ -228,10 +230,12 @@ class Stream:
|
|
228
230
|
|
229
231
|
# unpack and store tool calls
|
230
232
|
if tool_calls:
|
233
|
+
self.window.core.debug.info("[chat] Tool calls found, unpacking...")
|
231
234
|
self.window.core.command.unpack_tool_calls_chunks(ctx, tool_calls)
|
232
235
|
|
233
236
|
# append images
|
234
237
|
if is_image:
|
238
|
+
self.window.core.debug.info("[chat] Image generation call found")
|
235
239
|
ctx.images = [img_path] # save image path to ctx
|
236
240
|
|
237
241
|
except Exception as e:
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from typing import Dict, Any, List
|
@@ -68,6 +68,8 @@ class Placeholder:
|
|
68
68
|
return self.get_langchain_providers()
|
69
69
|
elif id == "llama_index_providers":
|
70
70
|
return self.get_llama_index_providers()
|
71
|
+
elif id == "llm_providers":
|
72
|
+
return self.get_llm_providers()
|
71
73
|
elif id == "embeddings_providers":
|
72
74
|
return self.get_embeddings_providers()
|
73
75
|
elif id == "llama_index_loaders":
|
@@ -139,10 +141,10 @@ class Placeholder:
|
|
139
141
|
|
140
142
|
:return: placeholders list
|
141
143
|
"""
|
142
|
-
|
144
|
+
choices = self.window.core.llm.get_choices(MODE_LANGCHAIN)
|
143
145
|
data = []
|
144
|
-
for id in
|
145
|
-
data.append({id: id})
|
146
|
+
for id in choices:
|
147
|
+
data.append({id: choices[id]})
|
146
148
|
return data
|
147
149
|
|
148
150
|
def get_llama_index_providers(self) -> List[Dict[str, str]]:
|
@@ -151,22 +153,22 @@ class Placeholder:
|
|
151
153
|
|
152
154
|
:return: placeholders list
|
153
155
|
"""
|
154
|
-
|
156
|
+
choices = self.window.core.llm.get_choices(MODE_LLAMA_INDEX)
|
155
157
|
data = []
|
156
|
-
for id in
|
157
|
-
data.append({id: id})
|
158
|
+
for id in choices:
|
159
|
+
data.append({id: choices[id]})
|
158
160
|
return data
|
159
161
|
|
160
|
-
def
|
162
|
+
def get_llm_providers(self) -> List[Dict[str, str]]:
|
161
163
|
"""
|
162
|
-
Get
|
164
|
+
Get all LLM provider placeholders list
|
163
165
|
|
164
166
|
:return: placeholders list
|
165
167
|
"""
|
166
|
-
|
168
|
+
choices = self.window.core.llm.get_choices()
|
167
169
|
data = []
|
168
|
-
for id in
|
169
|
-
data.append({id: id})
|
170
|
+
for id in choices:
|
171
|
+
data.append({id: choices[id]})
|
170
172
|
return data
|
171
173
|
|
172
174
|
def get_embeddings_providers(self) -> List[Dict[str, str]]:
|
@@ -175,7 +177,19 @@ class Placeholder:
|
|
175
177
|
|
176
178
|
:return: placeholders list
|
177
179
|
"""
|
178
|
-
|
180
|
+
choices = self.window.core.llm.get_choices("embeddings")
|
181
|
+
data = []
|
182
|
+
for id in choices:
|
183
|
+
data.append({id: choices[id]})
|
184
|
+
return data
|
185
|
+
|
186
|
+
def get_agent_providers(self) -> List[Dict[str, str]]:
|
187
|
+
"""
|
188
|
+
Get Llama-index agent provider placeholders list
|
189
|
+
|
190
|
+
:return: placeholders list
|
191
|
+
"""
|
192
|
+
ids = self.window.core.agents.provider.get_providers()
|
179
193
|
data = []
|
180
194
|
for id in ids:
|
181
195
|
data.append({id: id})
|
@@ -269,7 +283,7 @@ class Placeholder:
|
|
269
283
|
for id in models:
|
270
284
|
model = models[id]
|
271
285
|
suffix = ""
|
272
|
-
if
|
286
|
+
if model.provider == "ollama":
|
273
287
|
suffix = " (Ollama)"
|
274
288
|
name = model.name + suffix
|
275
289
|
data.append({id: name})
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from PySide6.QtCore import Qt
|
@@ -56,7 +56,7 @@ class Custom:
|
|
56
56
|
self.window.ui.config['preset'][MODE_COMPLETION].box.setText(trans("preset.completion"))
|
57
57
|
self.window.ui.config['preset'][MODE_IMAGE].box.setText(trans("preset.img"))
|
58
58
|
self.window.ui.config['preset'][MODE_VISION].box.setText(trans("preset.vision"))
|
59
|
-
self.window.ui.config['preset'][MODE_LANGCHAIN].box.setText(trans("preset.langchain"))
|
59
|
+
#self.window.ui.config['preset'][MODE_LANGCHAIN].box.setText(trans("preset.langchain"))
|
60
60
|
self.window.ui.config['preset'][MODE_LLAMA_INDEX].box.setText(trans("preset.llama_index"))
|
61
61
|
self.window.ui.config['preset'][MODE_AGENT].box.setText(trans("preset.agent"))
|
62
62
|
self.window.ui.config['preset'][MODE_AGENT_LLAMA].box.setText(trans("preset.agent_llama"))
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from pygpt_net.core.events import Event, AppEvent
|
@@ -30,6 +30,13 @@ class Mode:
|
|
30
30
|
|
31
31
|
:param mode
|
32
32
|
"""
|
33
|
+
# --- deprecated from v2.5.20 ---
|
34
|
+
if mode == "langchain":
|
35
|
+
print("Langchain mode is deprecated from v2.5.20 and no longer supported. "
|
36
|
+
"Please use LlamaIndex or Chat mode instead.")
|
37
|
+
mode = "chat"
|
38
|
+
# --- end of deprecated ---
|
39
|
+
|
33
40
|
# check if mode change is not locked
|
34
41
|
if self.change_locked() or mode is None:
|
35
42
|
return
|
@@ -50,6 +57,13 @@ class Mode:
|
|
50
57
|
|
51
58
|
:param mode: mode name
|
52
59
|
"""
|
60
|
+
# --- deprecated from v2.5.20 ---
|
61
|
+
if mode == "langchain":
|
62
|
+
print("Langchain mode is deprecated from v2.5.20 and no longer supported. "
|
63
|
+
"Please use LlamaIndex or Chat mode instead.")
|
64
|
+
mode = "chat"
|
65
|
+
# --- end of deprecated ---
|
66
|
+
|
53
67
|
self.locked = True
|
54
68
|
# if ctx loaded with assistant ID assigned then switch to assistant from ctx
|
55
69
|
if mode == "assistant":
|
@@ -93,6 +107,13 @@ class Mode:
|
|
93
107
|
|
94
108
|
:param mode: mode name
|
95
109
|
"""
|
110
|
+
# --- deprecated from v2.5.20 ---
|
111
|
+
if mode == "langchain":
|
112
|
+
print("Langchain mode is deprecated from v2.5.20 and no longer supported. "
|
113
|
+
"Please use LlamaIndex or Chat mode instead.")
|
114
|
+
mode = "chat"
|
115
|
+
# --- end of deprecated ---
|
116
|
+
|
96
117
|
self.window.ui.nodes["prompt.mode"].set_value(mode)
|
97
118
|
|
98
119
|
def init_list(self):
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.06.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from typing import Optional
|
@@ -160,7 +160,7 @@ class Model:
|
|
160
160
|
data = self.window.core.models.get_by_mode(mode)
|
161
161
|
for k in data:
|
162
162
|
suffix = ""
|
163
|
-
if
|
163
|
+
if data[k].provider == "ollama":
|
164
164
|
suffix = " (Ollama)"
|
165
165
|
items[k] = data[k].name + suffix
|
166
166
|
items = dict(sorted(items.items(), key=lambda item: item[1])) # sort items by name
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.06.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import copy
|
@@ -52,61 +52,15 @@ class Editor:
|
|
52
52
|
"label": "model.mode",
|
53
53
|
"description": "model.mode.desc",
|
54
54
|
},
|
55
|
+
"provider": {
|
56
|
+
"type": "combo",
|
57
|
+
"use": "llm_providers",
|
58
|
+
"label": "model.provider",
|
59
|
+
},
|
55
60
|
"default": {
|
56
61
|
"type": "bool",
|
57
62
|
"label": "model.default",
|
58
63
|
},
|
59
|
-
"openai": {
|
60
|
-
"type": "bool",
|
61
|
-
"label": "model.openai",
|
62
|
-
"description": "model.openai.desc",
|
63
|
-
},
|
64
|
-
"langchain.provider": {
|
65
|
-
"type": "combo",
|
66
|
-
"use": "langchain_providers",
|
67
|
-
"label": "model.langchain.provider",
|
68
|
-
"description": "model.langchain.provider.desc",
|
69
|
-
},
|
70
|
-
"langchain.mode": {
|
71
|
-
"type": "text", # list of comma separated values
|
72
|
-
"label": "model.langchain.mode",
|
73
|
-
"description": "model.langchain.mode.desc",
|
74
|
-
},
|
75
|
-
"langchain.args": {
|
76
|
-
"type": "dict",
|
77
|
-
"keys": {
|
78
|
-
'name': 'text',
|
79
|
-
'value': 'text',
|
80
|
-
'type': {
|
81
|
-
"type": "combo",
|
82
|
-
"use": "var_types",
|
83
|
-
},
|
84
|
-
},
|
85
|
-
"label": "model.langchain.args",
|
86
|
-
"description": "model.langchain.args.desc",
|
87
|
-
"advanced": True,
|
88
|
-
},
|
89
|
-
"langchain.env": {
|
90
|
-
"type": "dict",
|
91
|
-
"keys": {
|
92
|
-
'name': 'text',
|
93
|
-
'value': 'text',
|
94
|
-
},
|
95
|
-
"label": "model.langchain.env",
|
96
|
-
"description": "model.langchain.env.desc",
|
97
|
-
"advanced": True,
|
98
|
-
},
|
99
|
-
"llama_index.provider": {
|
100
|
-
"type": "combo",
|
101
|
-
"use": "llama_index_providers",
|
102
|
-
"label": "model.llama_index.provider",
|
103
|
-
"description": "model.llama_index.provider.desc",
|
104
|
-
},
|
105
|
-
"llama_index.mode": {
|
106
|
-
"type": "text", # list of comma separated values
|
107
|
-
"label": "model.llama_index.mode",
|
108
|
-
"description": "model.llama_index.mode.desc",
|
109
|
-
},
|
110
64
|
"llama_index.args": {
|
111
65
|
"type": "dict",
|
112
66
|
"keys": {
|
@@ -155,17 +109,6 @@ class Editor:
|
|
155
109
|
"""Set up editor"""
|
156
110
|
idx = None
|
157
111
|
self.window.model_settings.setup(idx) # widget dialog setup
|
158
|
-
parent = "model"
|
159
|
-
keys = [
|
160
|
-
"langchain.args",
|
161
|
-
"langchain.env",
|
162
|
-
]
|
163
|
-
for key in keys:
|
164
|
-
self.window.ui.dialogs.register_dictionary(
|
165
|
-
key,
|
166
|
-
parent,
|
167
|
-
self.get_option(key),
|
168
|
-
)
|
169
112
|
|
170
113
|
def toggle_editor(self):
|
171
114
|
"""Toggle models editor dialog"""
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.06.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import copy
|
@@ -205,8 +205,7 @@ class Importer:
|
|
205
205
|
items = copy.deepcopy(self.window.core.models.items)
|
206
206
|
for key in list(items.keys()):
|
207
207
|
if (items[key].llama_index is None
|
208
|
-
or
|
209
|
-
or items[key].llama_index['provider'] != 'ollama'):
|
208
|
+
or items[key].provider != 'ollama'):
|
210
209
|
del items[key]
|
211
210
|
return items
|
212
211
|
|
@@ -239,8 +238,9 @@ class Importer:
|
|
239
238
|
"agent_llama",
|
240
239
|
"expert",
|
241
240
|
]
|
242
|
-
m.
|
243
|
-
m.llama_index['
|
241
|
+
m.provider = 'ollama'
|
242
|
+
# m.llama_index['provider'] = 'ollama'
|
243
|
+
# m.llama_index['mode'] = ['chat']
|
244
244
|
m.llama_index['args'] = [
|
245
245
|
{
|
246
246
|
'name': 'model',
|
@@ -248,6 +248,7 @@ class Importer:
|
|
248
248
|
'type': 'str'
|
249
249
|
}
|
250
250
|
]
|
251
|
+
"""
|
251
252
|
m.langchain['provider'] = 'ollama'
|
252
253
|
m.langchain['mode'] = ['chat']
|
253
254
|
m.langchain['args'] = [
|
@@ -257,11 +258,12 @@ class Importer:
|
|
257
258
|
'type': 'str'
|
258
259
|
}
|
259
260
|
]
|
261
|
+
"""
|
260
262
|
m.imported = True
|
261
263
|
m.ctx = 32000 # default
|
262
264
|
key = m.id
|
263
|
-
#if key in self.items_current:
|
264
|
-
#key += "_imported"
|
265
|
+
# if key in self.items_current:
|
266
|
+
# key += "_imported"
|
265
267
|
models[key] = m
|
266
268
|
self.set_status(trans('models.importer.loaded'))
|
267
269
|
return models
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import datetime
|
@@ -74,10 +74,10 @@ class Editor:
|
|
74
74
|
"type": "bool",
|
75
75
|
"label": "preset.vision",
|
76
76
|
},
|
77
|
-
MODE_LANGCHAIN: {
|
78
|
-
|
79
|
-
|
80
|
-
},
|
77
|
+
#MODE_LANGCHAIN: {
|
78
|
+
# "type": "bool",
|
79
|
+
# "label": "preset.langchain",
|
80
|
+
#},
|
81
81
|
MODE_EXPERT: {
|
82
82
|
"type": "bool",
|
83
83
|
"label": "preset.expert",
|
@@ -293,8 +293,8 @@ class Editor:
|
|
293
293
|
data.img = True
|
294
294
|
elif mode == MODE_VISION:
|
295
295
|
data.vision = True
|
296
|
-
elif mode == MODE_LANGCHAIN:
|
297
|
-
data.langchain = True
|
296
|
+
# elif mode == MODE_LANGCHAIN:
|
297
|
+
# data.langchain = True
|
298
298
|
# elif mode == MODE_ASSISTANT:
|
299
299
|
# data.assistant = True
|
300
300
|
elif mode == MODE_LLAMA_INDEX:
|
@@ -373,7 +373,7 @@ class Editor:
|
|
373
373
|
MODE_COMPLETION,
|
374
374
|
MODE_IMAGE,
|
375
375
|
MODE_VISION,
|
376
|
-
MODE_LANGCHAIN,
|
376
|
+
# MODE_LANGCHAIN,
|
377
377
|
MODE_LLAMA_INDEX,
|
378
378
|
MODE_EXPERT,
|
379
379
|
MODE_AGENT_LLAMA,
|
pygpt_net/core/agents/legacy.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from typing import List
|
@@ -33,7 +33,7 @@ class Legacy:
|
|
33
33
|
MODE_CHAT,
|
34
34
|
MODE_COMPLETION,
|
35
35
|
MODE_VISION,
|
36
|
-
MODE_LANGCHAIN,
|
36
|
+
# MODE_LANGCHAIN,
|
37
37
|
MODE_LLAMA_INDEX,
|
38
38
|
MODE_AUDIO,
|
39
39
|
MODE_RESEARCH,
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.06.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import time
|
@@ -96,6 +96,8 @@ class Bridge:
|
|
96
96
|
if base_mode == MODE_CHAT and mode == MODE_LLAMA_INDEX:
|
97
97
|
context.idx = None # disable index if in Chat mode and switch to Llama Index
|
98
98
|
|
99
|
+
self.window.core.debug.info("[bridge] Using mode: " + str(mode))
|
100
|
+
|
99
101
|
if mode == MODE_LLAMA_INDEX and base_mode != MODE_LLAMA_INDEX:
|
100
102
|
context.idx_mode = MODE_CHAT # default in sub-mode
|
101
103
|
|
@@ -199,8 +201,7 @@ class Bridge:
|
|
199
201
|
|
200
202
|
if context.model is not None:
|
201
203
|
# check if model is supported by OpenAI API, if not then try to use llama-index or langchain call
|
202
|
-
if
|
203
|
-
and not context.model.is_supported(MODE_RESEARCH)):
|
204
|
+
if not context.model.is_supported(MODE_CHAT):
|
204
205
|
|
205
206
|
# tmp switch to: llama-index
|
206
207
|
if context.model.is_supported(MODE_LLAMA_INDEX):
|
@@ -220,6 +221,7 @@ class Bridge:
|
|
220
221
|
return ""
|
221
222
|
|
222
223
|
# tmp switch to: langchain
|
224
|
+
"""
|
223
225
|
elif context.model.is_supported(MODE_LANGCHAIN):
|
224
226
|
context.stream = False
|
225
227
|
ctx = context.ctx
|
@@ -235,6 +237,7 @@ class Bridge:
|
|
235
237
|
self.window.core.debug.error("Error in Langchain quick call: " + str(e))
|
236
238
|
self.window.core.debug.error(e)
|
237
239
|
return ""
|
240
|
+
"""
|
238
241
|
|
239
242
|
# if model is research model, then switch to research / Perplexity endpoint
|
240
243
|
if context.mode is None or context.mode == MODE_CHAT:
|