pygpt-net 2.5.19__py3-none-any.whl → 2.5.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +8 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/app.py +8 -4
- pygpt_net/container.py +3 -3
- pygpt_net/controller/chat/command.py +4 -4
- pygpt_net/controller/chat/input.py +2 -2
- pygpt_net/controller/chat/stream.py +6 -2
- pygpt_net/controller/config/placeholder.py +28 -14
- pygpt_net/controller/lang/custom.py +2 -2
- pygpt_net/controller/mode/__init__.py +22 -1
- pygpt_net/controller/model/__init__.py +2 -2
- pygpt_net/controller/model/editor.py +6 -63
- pygpt_net/controller/model/importer.py +9 -7
- pygpt_net/controller/presets/editor.py +8 -8
- pygpt_net/core/agents/legacy.py +2 -2
- pygpt_net/core/bridge/__init__.py +5 -4
- pygpt_net/core/bridge/worker.py +5 -2
- pygpt_net/core/command/__init__.py +10 -8
- pygpt_net/core/debug/presets.py +2 -2
- pygpt_net/core/experts/__init__.py +2 -2
- pygpt_net/core/idx/chat.py +7 -20
- pygpt_net/core/idx/llm.py +27 -28
- pygpt_net/core/llm/__init__.py +25 -3
- pygpt_net/core/models/__init__.py +83 -9
- pygpt_net/core/modes/__init__.py +2 -2
- pygpt_net/core/presets/__init__.py +3 -3
- pygpt_net/core/prompt/__init__.py +5 -5
- pygpt_net/core/tokens/__init__.py +3 -3
- pygpt_net/core/updater/__init__.py +5 -3
- pygpt_net/data/config/config.json +5 -3
- pygpt_net/data/config/models.json +1302 -3088
- pygpt_net/data/config/modes.json +1 -7
- pygpt_net/data/config/settings.json +60 -0
- pygpt_net/data/locale/locale.en.ini +10 -2
- pygpt_net/item/model.py +49 -34
- pygpt_net/plugin/base/plugin.py +6 -5
- pygpt_net/provider/core/config/patch.py +12 -1
- pygpt_net/provider/core/model/json_file.py +7 -7
- pygpt_net/provider/core/model/patch.py +56 -7
- pygpt_net/provider/core/preset/json_file.py +4 -4
- pygpt_net/provider/gpt/__init__.py +9 -17
- pygpt_net/provider/gpt/chat.py +90 -20
- pygpt_net/provider/gpt/responses.py +58 -21
- pygpt_net/provider/llms/anthropic.py +2 -1
- pygpt_net/provider/llms/azure_openai.py +11 -7
- pygpt_net/provider/llms/base.py +3 -2
- pygpt_net/provider/llms/deepseek_api.py +3 -1
- pygpt_net/provider/llms/google.py +2 -1
- pygpt_net/provider/llms/hugging_face.py +8 -5
- pygpt_net/provider/llms/hugging_face_api.py +3 -1
- pygpt_net/provider/llms/local.py +2 -1
- pygpt_net/provider/llms/ollama.py +8 -6
- pygpt_net/provider/llms/openai.py +11 -7
- pygpt_net/provider/llms/perplexity.py +109 -0
- pygpt_net/provider/llms/x_ai.py +108 -0
- pygpt_net/ui/dialog/about.py +5 -5
- pygpt_net/ui/dialog/preset.py +5 -5
- {pygpt_net-2.5.19.dist-info → pygpt_net-2.5.20.dist-info}/METADATA +52 -176
- {pygpt_net-2.5.19.dist-info → pygpt_net-2.5.20.dist-info}/RECORD +62 -60
- {pygpt_net-2.5.19.dist-info → pygpt_net-2.5.20.dist-info}/LICENSE +0 -0
- {pygpt_net-2.5.19.dist-info → pygpt_net-2.5.20.dist-info}/WHEEL +0 -0
- {pygpt_net-2.5.19.dist-info → pygpt_net-2.5.20.dist-info}/entry_points.txt +0 -0
pygpt_net/provider/gpt/chat.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.06.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import json
|
@@ -17,7 +17,6 @@ from pygpt_net.core.types import (
|
|
17
17
|
MODE_CHAT,
|
18
18
|
MODE_VISION,
|
19
19
|
MODE_AUDIO,
|
20
|
-
MODE_RESEARCH,
|
21
20
|
)
|
22
21
|
from pygpt_net.core.bridge.context import BridgeContext, MultimodalContext
|
23
22
|
from pygpt_net.item.ctx import CtxItem
|
@@ -113,7 +112,10 @@ class Chat:
|
|
113
112
|
})
|
114
113
|
|
115
114
|
# fix: o1 compatibility
|
116
|
-
if model.id is not None
|
115
|
+
if (model.id is not None
|
116
|
+
and not model.id.startswith("o1")
|
117
|
+
and not model.id.startswith("o3")
|
118
|
+
and model.is_gpt()):
|
117
119
|
response_kwargs['presence_penalty'] = self.window.core.config.get('presence_penalty')
|
118
120
|
response_kwargs['frequency_penalty'] = self.window.core.config.get('frequency_penalty')
|
119
121
|
response_kwargs['temperature'] = self.window.core.config.get('temperature')
|
@@ -184,6 +186,8 @@ class Chat:
|
|
184
186
|
|
185
187
|
# tokens config
|
186
188
|
mode = MODE_CHAT
|
189
|
+
is_tool_output = False
|
190
|
+
tool_call_native_enabled = self.window.core.config.get('func_call.native', False)
|
187
191
|
allowed_system = True
|
188
192
|
if (model.id is not None
|
189
193
|
and model.id in ["o1-mini", "o1-preview"]):
|
@@ -257,24 +261,90 @@ class Chat:
|
|
257
261
|
}
|
258
262
|
messages.append(msg)
|
259
263
|
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
264
|
+
# ---- tool output ----
|
265
|
+
is_tool_output = False
|
266
|
+
if tool_call_native_enabled and item.extra and isinstance(item.extra, dict):
|
267
|
+
if "tool_calls" in item.extra and isinstance(item.extra["tool_calls"], list):
|
268
|
+
for tool_call in item.extra["tool_calls"]:
|
269
|
+
if "function" in tool_call:
|
270
|
+
if "id" not in tool_call or "name" not in tool_call["function"]:
|
271
|
+
continue
|
272
|
+
if tool_call["id"] and tool_call["function"]["name"]:
|
273
|
+
if "tool_output" in item.extra and isinstance(item.extra["tool_output"], list):
|
274
|
+
for tool_output in item.extra["tool_output"]:
|
275
|
+
if ("cmd" in tool_output
|
276
|
+
and tool_output["cmd"] == tool_call["function"]["name"]):
|
277
|
+
msg = {
|
278
|
+
"role": "tool",
|
279
|
+
"tool_call_id": tool_call["id"],
|
280
|
+
"content": str(tool_output),
|
281
|
+
}
|
282
|
+
last_msg = messages[-1] if messages else None
|
283
|
+
if last_msg and last_msg.get(
|
284
|
+
"role") == "assistant":
|
285
|
+
last_msg["tool_calls"] = []
|
286
|
+
for call in item.extra["tool_calls"]:
|
287
|
+
last_msg["tool_calls"].append(
|
288
|
+
{
|
289
|
+
"id": call["id"],
|
290
|
+
"type": "function",
|
291
|
+
"function": {
|
292
|
+
"name": call["function"]["name"],
|
293
|
+
"arguments": json.dumps(
|
294
|
+
call["function"]["arguments"]),
|
295
|
+
}
|
296
|
+
}
|
297
|
+
)
|
298
|
+
last_msg["content"] = ""
|
299
|
+
messages.append(msg)
|
300
|
+
is_tool_output = True
|
301
|
+
break
|
302
|
+
elif "result" in tool_output:
|
303
|
+
# if result is present, append it as function call output
|
304
|
+
msg = {
|
305
|
+
"role": "tool",
|
306
|
+
"tool_call_id": tool_call["id"],
|
307
|
+
"content": str(tool_output["result"]),
|
308
|
+
}
|
309
|
+
last_msg = messages[-1] if messages else None
|
310
|
+
if last_msg and last_msg.get(
|
311
|
+
"role") == "assistant":
|
312
|
+
last_msg["tool_calls"] = []
|
313
|
+
for call in item.extra["tool_calls"]:
|
314
|
+
last_msg["tool_calls"].append(
|
315
|
+
{
|
316
|
+
"id": call["id"],
|
317
|
+
"type": "function",
|
318
|
+
"function": {
|
319
|
+
"name": call["function"]["name"],
|
320
|
+
"arguments": json.dumps(call["function"]["arguments"]),
|
321
|
+
}
|
322
|
+
}
|
323
|
+
)
|
324
|
+
last_msg["content"] = ""
|
325
|
+
messages.append(msg)
|
326
|
+
is_tool_output = True
|
327
|
+
break
|
272
328
|
|
273
|
-
#
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
329
|
+
# use vision and audio if available in current model
|
330
|
+
if not is_tool_output: # append current prompt only if not tool output
|
331
|
+
content = str(prompt)
|
332
|
+
if MODE_VISION in model.mode:
|
333
|
+
content = self.window.core.gpt.vision.build_content(
|
334
|
+
content=content,
|
335
|
+
attachments=attachments,
|
336
|
+
)
|
337
|
+
if MODE_AUDIO in model.mode:
|
338
|
+
content = self.window.core.gpt.audio.build_content(
|
339
|
+
content=content,
|
340
|
+
multimodal_ctx=multimodal_ctx,
|
341
|
+
)
|
342
|
+
|
343
|
+
# append current prompt
|
344
|
+
messages.append({
|
345
|
+
"role": "user",
|
346
|
+
"content": content,
|
347
|
+
})
|
278
348
|
|
279
349
|
# input tokens: update
|
280
350
|
self.input_tokens += self.window.core.tokens.from_messages(
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.06.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import json
|
@@ -66,7 +66,7 @@ class Responses:
|
|
66
66
|
user_name = ctx.input_name # from ctx
|
67
67
|
ai_name = ctx.output_name # from ctx
|
68
68
|
|
69
|
-
client = self.window.core.gpt.get_client(mode)
|
69
|
+
client = self.window.core.gpt.get_client(mode, model)
|
70
70
|
|
71
71
|
# build chat messages
|
72
72
|
messages = self.build(
|
@@ -175,9 +175,11 @@ class Responses:
|
|
175
175
|
"""
|
176
176
|
messages = []
|
177
177
|
self.prev_response_id = None # reset
|
178
|
+
is_tool_output = False # reset
|
178
179
|
|
179
180
|
# tokens config
|
180
181
|
mode = MODE_CHAT
|
182
|
+
tool_call_native_enabled = self.window.core.config.get('func_call.native', False)
|
181
183
|
allowed_system = True
|
182
184
|
if (model.id is not None
|
183
185
|
and model.id in ["o1-mini", "o1-preview"]):
|
@@ -243,28 +245,63 @@ class Responses:
|
|
243
245
|
}
|
244
246
|
messages.append(msg)
|
245
247
|
|
246
|
-
|
248
|
+
# ---- tool output ----
|
249
|
+
is_tool_output = False # reset tool output flag
|
250
|
+
if tool_call_native_enabled and item.extra and isinstance(item.extra, dict):
|
251
|
+
if "tool_calls" in item.extra and isinstance(item.extra["tool_calls"], list):
|
252
|
+
for tool_call in item.extra["tool_calls"]:
|
253
|
+
if "function" in tool_call:
|
254
|
+
if "call_id" not in tool_call or "name" not in tool_call["function"]:
|
255
|
+
continue
|
256
|
+
if tool_call["call_id"] and tool_call["function"]["name"]:
|
257
|
+
if "tool_output" in item.extra and isinstance(item.extra["tool_output"], list):
|
258
|
+
for tool_output in item.extra["tool_output"]:
|
259
|
+
if ("cmd" in tool_output
|
260
|
+
and tool_output["cmd"] == tool_call["function"]["name"]):
|
261
|
+
msg = {
|
262
|
+
"type": "function_call_output",
|
263
|
+
"call_id": tool_call["call_id"],
|
264
|
+
"output": str(tool_output),
|
265
|
+
}
|
266
|
+
is_tool_output = True
|
267
|
+
messages.append(msg)
|
268
|
+
break
|
269
|
+
elif "result" in tool_output:
|
270
|
+
# if result is present, append it as function call output
|
271
|
+
msg = {
|
272
|
+
"type": "function_call_output",
|
273
|
+
"call_id": tool_call["call_id"],
|
274
|
+
"output": str(tool_output["result"]),
|
275
|
+
}
|
276
|
+
is_tool_output = True
|
277
|
+
messages.append(msg)
|
278
|
+
break
|
279
|
+
|
280
|
+
# --- previous message ID ---
|
281
|
+
if (item.msg_id
|
282
|
+
and ((item.cmds is None or len(item.cmds) == 0) or is_tool_output)): # if no cmds before or tool output
|
247
283
|
self.prev_response_id = item.msg_id # previous response ID to use in current input
|
248
284
|
|
249
285
|
# use vision and audio if available in current model
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
content=
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
content=
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
286
|
+
if not is_tool_output: # append current prompt only if not tool output
|
287
|
+
content = str(prompt)
|
288
|
+
if MODE_VISION in model.mode:
|
289
|
+
content = self.window.core.gpt.vision.build_content(
|
290
|
+
content=content,
|
291
|
+
attachments=attachments,
|
292
|
+
responses_api=True,
|
293
|
+
)
|
294
|
+
if MODE_AUDIO in model.mode:
|
295
|
+
content = self.window.core.gpt.audio.build_content(
|
296
|
+
content=content,
|
297
|
+
multimodal_ctx=multimodal_ctx,
|
298
|
+
)
|
299
|
+
|
300
|
+
# append current prompt
|
301
|
+
messages.append({
|
302
|
+
"role": "user",
|
303
|
+
"content": content,
|
304
|
+
})
|
268
305
|
|
269
306
|
# input tokens: update
|
270
307
|
self.input_tokens += self.window.core.tokens.from_messages(
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.06.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from llama_index.llms.anthropic import Anthropic
|
@@ -30,6 +30,7 @@ class AnthropicLLM(BaseLLM):
|
|
30
30
|
- api_key: API key for Anthropic API
|
31
31
|
"""
|
32
32
|
self.id = "anthropic"
|
33
|
+
self.name = "Anthropic"
|
33
34
|
self.type = [MODE_LLAMA_INDEX]
|
34
35
|
|
35
36
|
def llama(
|
@@ -6,13 +6,13 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from typing import Optional, List, Dict
|
13
13
|
|
14
|
-
from langchain_openai import AzureOpenAI
|
15
|
-
from langchain_openai import AzureChatOpenAI
|
14
|
+
# from langchain_openai import AzureOpenAI
|
15
|
+
# from langchain_openai import AzureChatOpenAI
|
16
16
|
|
17
17
|
from llama_index.core.llms.llm import BaseLLM as LlamaBaseLLM
|
18
18
|
from llama_index.core.base.embeddings.base import BaseEmbedding
|
@@ -20,7 +20,6 @@ from llama_index.llms.azure_openai import AzureOpenAI as LlamaAzureOpenAI
|
|
20
20
|
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
|
21
21
|
|
22
22
|
from pygpt_net.core.types import (
|
23
|
-
MODE_LANGCHAIN,
|
24
23
|
MODE_LLAMA_INDEX,
|
25
24
|
)
|
26
25
|
from pygpt_net.provider.llms.base import BaseLLM
|
@@ -39,7 +38,8 @@ class AzureOpenAILLM(BaseLLM):
|
|
39
38
|
- api_key: API key for Azure OpenAI API
|
40
39
|
"""
|
41
40
|
self.id = "azure_openai"
|
42
|
-
self.
|
41
|
+
self.name = "Azure OpenAI"
|
42
|
+
self.type = [MODE_LLAMA_INDEX, "embeddings"]
|
43
43
|
|
44
44
|
def completion(
|
45
45
|
self,
|
@@ -54,9 +54,11 @@ class AzureOpenAILLM(BaseLLM):
|
|
54
54
|
:param model: model instance
|
55
55
|
:param stream: stream mode
|
56
56
|
:return: LLM provider instance
|
57
|
-
|
57
|
+
|
58
58
|
args = self.parse_args(model.langchain)
|
59
59
|
return AzureOpenAI(**args)
|
60
|
+
"""
|
61
|
+
pass
|
60
62
|
|
61
63
|
def chat(
|
62
64
|
self,
|
@@ -71,9 +73,11 @@ class AzureOpenAILLM(BaseLLM):
|
|
71
73
|
:param model: model instance
|
72
74
|
:param stream: stream mode
|
73
75
|
:return: LLM provider instance
|
74
|
-
|
76
|
+
|
75
77
|
args = self.parse_args(model.langchain)
|
76
78
|
return AzureChatOpenAI(**args)
|
79
|
+
"""
|
80
|
+
pass
|
77
81
|
|
78
82
|
def llama(
|
79
83
|
self,
|
pygpt_net/provider/llms/base.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import os
|
@@ -48,7 +48,8 @@ class BaseLLM:
|
|
48
48
|
"""
|
49
49
|
options = {}
|
50
50
|
if mode == MODE_LANGCHAIN:
|
51
|
-
|
51
|
+
pass
|
52
|
+
# options = model.langchain
|
52
53
|
elif mode == MODE_LLAMA_INDEX:
|
53
54
|
options = model.llama_index
|
54
55
|
if 'env' in options:
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from pygpt_net.core.types import (
|
@@ -14,6 +14,7 @@ from pygpt_net.core.types import (
|
|
14
14
|
)
|
15
15
|
from llama_index.llms.deepseek import DeepSeek
|
16
16
|
from llama_index.core.llms.llm import BaseLLM as LlamaBaseLLM
|
17
|
+
|
17
18
|
from pygpt_net.provider.llms.base import BaseLLM
|
18
19
|
from pygpt_net.item.model import ModelItem
|
19
20
|
|
@@ -22,6 +23,7 @@ class DeepseekApiLLM(BaseLLM):
|
|
22
23
|
def __init__(self, *args, **kwargs):
|
23
24
|
super(DeepseekApiLLM, self).__init__(*args, **kwargs)
|
24
25
|
self.id = "deepseek_api"
|
26
|
+
self.name = "Deepseek API"
|
25
27
|
self.type = [MODE_LLAMA_INDEX]
|
26
28
|
|
27
29
|
def llama(
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.06.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from typing import Optional, List, Dict
|
@@ -34,6 +34,7 @@ class GoogleLLM(BaseLLM):
|
|
34
34
|
- api_key: API key for Google API
|
35
35
|
"""
|
36
36
|
self.id = "google"
|
37
|
+
self.name = "Google"
|
37
38
|
self.type = [MODE_LLAMA_INDEX, "embeddings"]
|
38
39
|
|
39
40
|
def llama(
|
@@ -6,10 +6,10 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.06.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
|
-
from langchain_community.llms import HuggingFaceHub
|
12
|
+
# from langchain_community.llms import HuggingFaceHub
|
13
13
|
|
14
14
|
from pygpt_net.core.types import (
|
15
15
|
MODE_LANGCHAIN,
|
@@ -22,7 +22,8 @@ class HuggingFaceLLM(BaseLLM):
|
|
22
22
|
def __init__(self, *args, **kwargs):
|
23
23
|
super(HuggingFaceLLM, self).__init__(*args, **kwargs)
|
24
24
|
self.id = "huggingface"
|
25
|
-
self.
|
25
|
+
self.name = "HuggingFace"
|
26
|
+
self.type = []
|
26
27
|
|
27
28
|
def completion(
|
28
29
|
self,
|
@@ -37,11 +38,13 @@ class HuggingFaceLLM(BaseLLM):
|
|
37
38
|
:param model: model instance
|
38
39
|
:param stream: stream mode
|
39
40
|
:return: LLM provider instance
|
40
|
-
|
41
|
+
|
41
42
|
args = self.parse_args(model.langchain)
|
42
43
|
if "model" not in args:
|
43
44
|
args["model"] = model.id
|
44
45
|
return HuggingFaceHub(**args)
|
46
|
+
"""
|
47
|
+
pass
|
45
48
|
|
46
49
|
def chat(
|
47
50
|
self,
|
@@ -57,4 +60,4 @@ class HuggingFaceLLM(BaseLLM):
|
|
57
60
|
:param stream: stream mode
|
58
61
|
:return: LLM provider instance
|
59
62
|
"""
|
60
|
-
|
63
|
+
pass
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.06.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import os
|
@@ -19,6 +19,7 @@ from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
|
|
19
19
|
from llama_index.embeddings.huggingface_api import HuggingFaceInferenceAPIEmbedding as HuggingFaceAPIEmbedding
|
20
20
|
from llama_index.core.llms.llm import BaseLLM as LlamaBaseLLM
|
21
21
|
from llama_index.core.base.embeddings.base import BaseEmbedding
|
22
|
+
|
22
23
|
from pygpt_net.provider.llms.base import BaseLLM
|
23
24
|
from pygpt_net.item.model import ModelItem
|
24
25
|
|
@@ -27,6 +28,7 @@ class HuggingFaceApiLLM(BaseLLM):
|
|
27
28
|
def __init__(self, *args, **kwargs):
|
28
29
|
super(HuggingFaceApiLLM, self).__init__(*args, **kwargs)
|
29
30
|
self.id = "huggingface_api"
|
31
|
+
self.name = "HuggingFace API"
|
30
32
|
self.type = [MODE_LLAMA_INDEX, "embeddings"]
|
31
33
|
|
32
34
|
def llama(
|
pygpt_net/provider/llms/local.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from llama_index.llms.openai_like import OpenAILike
|
@@ -23,6 +23,7 @@ class LocalLLM(BaseLLM):
|
|
23
23
|
def __init__(self, *args, **kwargs):
|
24
24
|
super(LocalLLM, self).__init__(*args, **kwargs)
|
25
25
|
self.id = "local_ai"
|
26
|
+
self.name = "Local model (OpenAI API compatible)"
|
26
27
|
self.type = [MODE_LLAMA_INDEX]
|
27
28
|
|
28
29
|
def llama(
|
@@ -6,13 +6,13 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.06.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import os
|
13
13
|
from typing import Optional, List, Dict
|
14
14
|
|
15
|
-
from langchain_community.chat_models import ChatOllama
|
15
|
+
# from langchain_community.chat_models import ChatOllama
|
16
16
|
|
17
17
|
from .ollama_custom import Ollama
|
18
18
|
|
@@ -21,7 +21,6 @@ from llama_index.core.base.embeddings.base import BaseEmbedding
|
|
21
21
|
from llama_index.embeddings.ollama import OllamaEmbedding
|
22
22
|
|
23
23
|
from pygpt_net.core.types import (
|
24
|
-
MODE_LANGCHAIN,
|
25
24
|
MODE_LLAMA_INDEX,
|
26
25
|
)
|
27
26
|
from pygpt_net.provider.llms.base import BaseLLM
|
@@ -33,7 +32,8 @@ class OllamaLLM(BaseLLM):
|
|
33
32
|
def __init__(self, *args, **kwargs):
|
34
33
|
super(OllamaLLM, self).__init__(*args, **kwargs)
|
35
34
|
self.id = "ollama"
|
36
|
-
self.
|
35
|
+
self.name = "Ollama"
|
36
|
+
self.type = [MODE_LLAMA_INDEX, "embeddings"]
|
37
37
|
|
38
38
|
def completion(
|
39
39
|
self,
|
@@ -49,7 +49,7 @@ class OllamaLLM(BaseLLM):
|
|
49
49
|
:param stream: stream mode
|
50
50
|
:return: LLM provider instance
|
51
51
|
"""
|
52
|
-
|
52
|
+
pass
|
53
53
|
|
54
54
|
def chat(
|
55
55
|
self,
|
@@ -64,11 +64,13 @@ class OllamaLLM(BaseLLM):
|
|
64
64
|
:param model: model instance
|
65
65
|
:param stream: stream mode
|
66
66
|
:return: LLM provider instance
|
67
|
-
|
67
|
+
|
68
68
|
args = self.parse_args(model.langchain)
|
69
69
|
if "model" not in args:
|
70
70
|
args["model"] = model.id
|
71
71
|
return ChatOllama(**args)
|
72
|
+
"""
|
73
|
+
pass
|
72
74
|
|
73
75
|
def llama(
|
74
76
|
self,
|
@@ -6,13 +6,13 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.06.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from typing import Optional, List, Dict
|
13
13
|
|
14
|
-
from langchain_openai import OpenAI
|
15
|
-
from langchain_openai import ChatOpenAI
|
14
|
+
# from langchain_openai import OpenAI
|
15
|
+
# from langchain_openai import ChatOpenAI
|
16
16
|
|
17
17
|
from llama_index.core.llms.llm import BaseLLM as LlamaBaseLLM
|
18
18
|
from llama_index.core.multi_modal_llms import MultiModalLLM as LlamaMultiModalLLM
|
@@ -22,7 +22,6 @@ from llama_index.multi_modal_llms.openai import OpenAIMultiModal as LlamaOpenAIM
|
|
22
22
|
from llama_index.embeddings.openai import OpenAIEmbedding
|
23
23
|
|
24
24
|
from pygpt_net.core.types import (
|
25
|
-
MODE_LANGCHAIN,
|
26
25
|
MODE_LLAMA_INDEX,
|
27
26
|
)
|
28
27
|
from pygpt_net.provider.llms.base import BaseLLM
|
@@ -33,7 +32,8 @@ class OpenAILLM(BaseLLM):
|
|
33
32
|
def __init__(self, *args, **kwargs):
|
34
33
|
super(OpenAILLM, self).__init__(*args, **kwargs)
|
35
34
|
self.id = "openai"
|
36
|
-
self.
|
35
|
+
self.name = "OpenAI"
|
36
|
+
self.type = [MODE_LLAMA_INDEX, "embeddings"]
|
37
37
|
|
38
38
|
def completion(
|
39
39
|
self,
|
@@ -48,11 +48,13 @@ class OpenAILLM(BaseLLM):
|
|
48
48
|
:param model: model instance
|
49
49
|
:param stream: stream mode
|
50
50
|
:return: LLM provider instance
|
51
|
-
|
51
|
+
|
52
52
|
args = self.parse_args(model.langchain)
|
53
53
|
if "model" not in args:
|
54
54
|
args["model"] = model.id
|
55
55
|
return OpenAI(**args)
|
56
|
+
"""
|
57
|
+
pass
|
56
58
|
|
57
59
|
def chat(
|
58
60
|
self,
|
@@ -67,9 +69,11 @@ class OpenAILLM(BaseLLM):
|
|
67
69
|
:param model: model instance
|
68
70
|
:param stream: stream mode
|
69
71
|
:return: LLM provider instance
|
70
|
-
|
72
|
+
|
71
73
|
args = self.parse_args(model.langchain)
|
72
74
|
return ChatOpenAI(**args)
|
75
|
+
"""
|
76
|
+
pass
|
73
77
|
|
74
78
|
def llama(
|
75
79
|
self,
|