pygpt-net 2.5.19__py3-none-any.whl → 2.5.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +8 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/app.py +8 -4
- pygpt_net/container.py +3 -3
- pygpt_net/controller/chat/command.py +4 -4
- pygpt_net/controller/chat/input.py +2 -2
- pygpt_net/controller/chat/stream.py +6 -2
- pygpt_net/controller/config/placeholder.py +28 -14
- pygpt_net/controller/lang/custom.py +2 -2
- pygpt_net/controller/mode/__init__.py +22 -1
- pygpt_net/controller/model/__init__.py +2 -2
- pygpt_net/controller/model/editor.py +6 -63
- pygpt_net/controller/model/importer.py +9 -7
- pygpt_net/controller/presets/editor.py +8 -8
- pygpt_net/core/agents/legacy.py +2 -2
- pygpt_net/core/bridge/__init__.py +5 -4
- pygpt_net/core/bridge/worker.py +5 -2
- pygpt_net/core/command/__init__.py +10 -8
- pygpt_net/core/debug/presets.py +2 -2
- pygpt_net/core/experts/__init__.py +2 -2
- pygpt_net/core/idx/chat.py +7 -20
- pygpt_net/core/idx/llm.py +27 -28
- pygpt_net/core/llm/__init__.py +25 -3
- pygpt_net/core/models/__init__.py +83 -9
- pygpt_net/core/modes/__init__.py +2 -2
- pygpt_net/core/presets/__init__.py +3 -3
- pygpt_net/core/prompt/__init__.py +5 -5
- pygpt_net/core/tokens/__init__.py +3 -3
- pygpt_net/core/updater/__init__.py +5 -3
- pygpt_net/data/config/config.json +5 -3
- pygpt_net/data/config/models.json +1302 -3088
- pygpt_net/data/config/modes.json +1 -7
- pygpt_net/data/config/settings.json +60 -0
- pygpt_net/data/locale/locale.en.ini +10 -2
- pygpt_net/item/model.py +49 -34
- pygpt_net/plugin/base/plugin.py +6 -5
- pygpt_net/provider/core/config/patch.py +12 -1
- pygpt_net/provider/core/model/json_file.py +7 -7
- pygpt_net/provider/core/model/patch.py +56 -7
- pygpt_net/provider/core/preset/json_file.py +4 -4
- pygpt_net/provider/gpt/__init__.py +9 -17
- pygpt_net/provider/gpt/chat.py +90 -20
- pygpt_net/provider/gpt/responses.py +58 -21
- pygpt_net/provider/llms/anthropic.py +2 -1
- pygpt_net/provider/llms/azure_openai.py +11 -7
- pygpt_net/provider/llms/base.py +3 -2
- pygpt_net/provider/llms/deepseek_api.py +3 -1
- pygpt_net/provider/llms/google.py +2 -1
- pygpt_net/provider/llms/hugging_face.py +8 -5
- pygpt_net/provider/llms/hugging_face_api.py +3 -1
- pygpt_net/provider/llms/local.py +2 -1
- pygpt_net/provider/llms/ollama.py +8 -6
- pygpt_net/provider/llms/openai.py +11 -7
- pygpt_net/provider/llms/perplexity.py +109 -0
- pygpt_net/provider/llms/x_ai.py +108 -0
- pygpt_net/ui/dialog/about.py +5 -5
- pygpt_net/ui/dialog/preset.py +5 -5
- {pygpt_net-2.5.19.dist-info → pygpt_net-2.5.20.dist-info}/METADATA +52 -176
- {pygpt_net-2.5.19.dist-info → pygpt_net-2.5.20.dist-info}/RECORD +62 -60
- {pygpt_net-2.5.19.dist-info → pygpt_net-2.5.20.dist-info}/LICENSE +0 -0
- {pygpt_net-2.5.19.dist-info → pygpt_net-2.5.20.dist-info}/WHEEL +0 -0
- {pygpt_net-2.5.19.dist-info → pygpt_net-2.5.20.dist-info}/entry_points.txt +0 -0
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.06.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import copy
|
@@ -617,8 +617,8 @@ class Command:
|
|
617
617
|
:return: True if enabled
|
618
618
|
"""
|
619
619
|
disabled_modes = [
|
620
|
-
#MODE_LLAMA_INDEX,
|
621
|
-
MODE_LANGCHAIN,
|
620
|
+
# MODE_LLAMA_INDEX,
|
621
|
+
# MODE_LANGCHAIN,
|
622
622
|
MODE_COMPLETION,
|
623
623
|
]
|
624
624
|
mode = self.window.core.config.get('mode')
|
@@ -630,10 +630,12 @@ class Command:
|
|
630
630
|
if model:
|
631
631
|
model_data = self.window.core.models.get(model)
|
632
632
|
if model_data:
|
633
|
-
|
634
|
-
if llama_provider in self.window.core.idx.chat.tool_calls_not_allowed_providers:
|
633
|
+
if not self.window.core.models.is_tool_call_allowed(mode, model_data):
|
635
634
|
return False
|
636
|
-
|
635
|
+
enabled = self.window.core.config.get('func_call.native', False) # otherwise check config
|
636
|
+
# if enabled:
|
637
|
+
# self.window.core.debug.info("[cmd] Native tool calls enabled")
|
638
|
+
return enabled
|
637
639
|
|
638
640
|
def is_enabled(self, cmd: str) -> bool:
|
639
641
|
"""
|
@@ -693,12 +695,12 @@ class Command:
|
|
693
695
|
"deepseek-r1:1.5b",
|
694
696
|
"deepseek-r1:7b",
|
695
697
|
"llama2",
|
696
|
-
"llama3.1",
|
698
|
+
#"llama3.1",
|
697
699
|
"codellama",
|
698
700
|
]
|
699
701
|
if model.id is not None:
|
700
702
|
for disabled_model in disabled_models:
|
701
|
-
if (model.
|
703
|
+
if (model.get_provider() == "ollama"
|
702
704
|
and model.id.startswith(disabled_model)):
|
703
705
|
return False
|
704
706
|
return True
|
pygpt_net/core/debug/presets.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import os
|
@@ -60,7 +60,7 @@ class PresetsDebug:
|
|
60
60
|
MODE_COMPLETION: preset.completion,
|
61
61
|
MODE_IMAGE: preset.img,
|
62
62
|
MODE_VISION: preset.vision,
|
63
|
-
MODE_LANGCHAIN: preset.langchain,
|
63
|
+
# MODE_LANGCHAIN: preset.langchain,
|
64
64
|
MODE_ASSISTANT: preset.assistant,
|
65
65
|
MODE_LLAMA_INDEX: preset.llama_index,
|
66
66
|
MODE_AGENT: preset.agent,
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from typing import Dict, List
|
@@ -40,7 +40,7 @@ class Experts:
|
|
40
40
|
MODE_CHAT,
|
41
41
|
MODE_COMPLETION,
|
42
42
|
MODE_VISION,
|
43
|
-
MODE_LANGCHAIN,
|
43
|
+
# MODE_LANGCHAIN,
|
44
44
|
MODE_LLAMA_INDEX,
|
45
45
|
MODE_AUDIO,
|
46
46
|
MODE_RESEARCH,
|
pygpt_net/core/idx/chat.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.06.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import json
|
@@ -37,12 +37,6 @@ class Chat:
|
|
37
37
|
self.window = window
|
38
38
|
self.storage = storage
|
39
39
|
self.context = Context(window)
|
40
|
-
self.tool_calls_not_allowed_providers = [
|
41
|
-
"ollama",
|
42
|
-
"google",
|
43
|
-
"hugging_face_api",
|
44
|
-
"deepseek_api",
|
45
|
-
]
|
46
40
|
|
47
41
|
def call(
|
48
42
|
self,
|
@@ -73,17 +67,11 @@ class Chat:
|
|
73
67
|
extra=extra,
|
74
68
|
)
|
75
69
|
|
76
|
-
#
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
)
|
82
|
-
else:
|
83
|
-
return self.query(
|
84
|
-
context=context,
|
85
|
-
extra=extra,
|
86
|
-
) # if not, use query mode
|
70
|
+
# chat
|
71
|
+
return self.chat(
|
72
|
+
context=context,
|
73
|
+
extra=extra,
|
74
|
+
)
|
87
75
|
|
88
76
|
def raw_query(
|
89
77
|
self,
|
@@ -239,8 +227,7 @@ class Chat:
|
|
239
227
|
verbose = self.window.core.config.get("log.llama", False)
|
240
228
|
allow_native_tool_calls = True
|
241
229
|
response = None
|
242
|
-
if (
|
243
|
-
and model.llama_index['provider'] in self.tool_calls_not_allowed_providers):
|
230
|
+
if not self.window.core.models.is_tool_call_allowed(context.mode, model):
|
244
231
|
allow_native_tool_calls = False
|
245
232
|
|
246
233
|
if idx is None or idx == "_":
|
pygpt_net/core/idx/llm.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import os.path
|
@@ -31,7 +31,7 @@ class Llm:
|
|
31
31
|
:param window: Window instance
|
32
32
|
"""
|
33
33
|
self.window = window
|
34
|
-
self.default_model = "gpt-
|
34
|
+
self.default_model = "gpt-4o-mini"
|
35
35
|
self.default_embed = "openai"
|
36
36
|
self.initialized = False
|
37
37
|
|
@@ -56,39 +56,38 @@ class Llm:
|
|
56
56
|
# TMP: deprecation warning fix
|
57
57
|
# https://github.com/DataDog/dd-trace-py/issues/8212#issuecomment-1971063988
|
58
58
|
if not self.initialized:
|
59
|
-
import warnings
|
60
|
-
from langchain._api import LangChainDeprecationWarning
|
61
|
-
warnings.simplefilter("ignore", category=LangChainDeprecationWarning)
|
59
|
+
# import warnings
|
60
|
+
# from langchain._api import LangChainDeprecationWarning
|
61
|
+
# warnings.simplefilter("ignore", category=LangChainDeprecationWarning)
|
62
62
|
self.initialized = True
|
63
63
|
|
64
64
|
llm = None
|
65
65
|
if model is not None:
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
self.window
|
66
|
+
provider = model.get_provider()
|
67
|
+
if provider in self.window.core.llm.llms:
|
68
|
+
# init env vars
|
69
|
+
self.window.core.llm.llms[provider].init(
|
70
|
+
window=self.window,
|
71
|
+
model=model,
|
72
|
+
mode=MODE_LLAMA_INDEX,
|
73
|
+
sub_mode="",
|
74
|
+
)
|
75
|
+
# get llama LLM instance
|
76
|
+
if multimodal and model.is_multimodal():
|
77
|
+
# at first, try to get multimodal provider
|
78
|
+
llm = self.window.core.llm.llms[provider].llama_multimodal(
|
79
|
+
window=self.window,
|
80
|
+
model=model,
|
81
|
+
)
|
82
|
+
if llm is not None:
|
83
|
+
print("Using multimodal.")
|
84
|
+
|
85
|
+
if llm is None:
|
86
|
+
# if no multimodal, get default llama provider
|
87
|
+
llm = self.window.core.llm.llms[provider].llama(
|
71
88
|
window=self.window,
|
72
89
|
model=model,
|
73
|
-
mode=MODE_LLAMA_INDEX,
|
74
|
-
sub_mode="",
|
75
90
|
)
|
76
|
-
# get llama LLM instance
|
77
|
-
if multimodal and model.is_multimodal():
|
78
|
-
# at first, try to get multimodal provider
|
79
|
-
llm = self.window.core.llm.llms[provider].llama_multimodal(
|
80
|
-
window=self.window,
|
81
|
-
model=model,
|
82
|
-
)
|
83
|
-
if llm is not None:
|
84
|
-
print("Using multimodal.")
|
85
|
-
|
86
|
-
if llm is None:
|
87
|
-
# if no multimodal, get default llama provider
|
88
|
-
llm = self.window.core.llm.llms[provider].llama(
|
89
|
-
window=self.window,
|
90
|
-
model=model,
|
91
|
-
)
|
92
91
|
|
93
92
|
# default model
|
94
93
|
if llm is None:
|
pygpt_net/core/llm/__init__.py
CHANGED
@@ -6,10 +6,10 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
|
-
from typing import Optional, List
|
12
|
+
from typing import Optional, List, Dict
|
13
13
|
|
14
14
|
|
15
15
|
class LLM:
|
@@ -34,7 +34,29 @@ class LLM:
|
|
34
34
|
"""
|
35
35
|
if type is not None:
|
36
36
|
return [id for id in self.llms.keys() if type in self.llms[id].type]
|
37
|
-
return list(self.llms.keys())
|
37
|
+
return list(self.llms.keys()) # get all
|
38
|
+
|
39
|
+
def get_choices(
|
40
|
+
self,
|
41
|
+
type: Optional[str] = None
|
42
|
+
) -> Dict[str, str]:
|
43
|
+
"""
|
44
|
+
Get providers choices
|
45
|
+
|
46
|
+
:param type: provider type
|
47
|
+
:return: providers choices
|
48
|
+
"""
|
49
|
+
choices = {}
|
50
|
+
if type is not None:
|
51
|
+
for id in list(self.llms.keys()):
|
52
|
+
if type in self.llms[id].type:
|
53
|
+
choices[id] = self.llms[id].name
|
54
|
+
else:
|
55
|
+
for id in list(self.llms.keys()):
|
56
|
+
choices[id] = self.llms[id].name
|
57
|
+
|
58
|
+
# sorted by name
|
59
|
+
return dict(sorted(choices.items(), key=lambda item: item[1].lower()))
|
38
60
|
|
39
61
|
def register(
|
40
62
|
self,
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.06.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import copy
|
@@ -385,32 +385,106 @@ class Models:
|
|
385
385
|
:param mode: mode (initial)
|
386
386
|
:return: mode (supported)
|
387
387
|
"""
|
388
|
+
prev_mode = mode
|
388
389
|
# if OpenAI API model and not llama_index mode, switch to Chat mode
|
389
|
-
if model.
|
390
|
-
if
|
390
|
+
if model.is_supported(MODE_CHAT) and mode != MODE_LLAMA_INDEX: # do not switch if llama_index mode!
|
391
|
+
if prev_mode != MODE_CHAT:
|
391
392
|
self.window.core.debug.info(
|
392
|
-
"WARNING: Switching to chat mode (model not supported in: {})".format(
|
393
|
-
|
393
|
+
"WARNING: Switching to chat mode (model not supported in: {})".format(prev_mode))
|
394
|
+
return MODE_CHAT
|
394
395
|
|
395
396
|
# Research / Perplexity
|
396
397
|
if model.is_supported(MODE_RESEARCH):
|
397
|
-
|
398
|
-
|
398
|
+
if prev_mode != MODE_RESEARCH:
|
399
|
+
self.window.core.debug.info(
|
400
|
+
"WARNING: Switching to research mode (model not supported in: {})".format(mode))
|
399
401
|
mode = MODE_RESEARCH
|
400
402
|
|
401
403
|
# Llama Index / Chat with Files
|
402
404
|
elif model.is_supported(MODE_LLAMA_INDEX):
|
403
|
-
|
404
|
-
|
405
|
+
if prev_mode != MODE_LLAMA_INDEX:
|
406
|
+
self.window.core.debug.info(
|
407
|
+
"WARNING: Switching to llama_index mode (model not supported in: {})".format(mode))
|
405
408
|
mode = MODE_LLAMA_INDEX
|
406
409
|
|
407
410
|
# LangChain
|
411
|
+
"""
|
408
412
|
elif model.is_supported(MODE_LANGCHAIN):
|
409
413
|
self.window.core.debug.info(
|
410
414
|
"WARNING: Switching to langchain mode (model not supported in: {})".format(mode))
|
411
415
|
mode = MODE_LANGCHAIN
|
416
|
+
"""
|
412
417
|
return mode
|
413
418
|
|
419
|
+
def prepare_client_args(
|
420
|
+
self,
|
421
|
+
args: dict,
|
422
|
+
mode: str = MODE_CHAT,
|
423
|
+
model: ModelItem = None
|
424
|
+
) -> Dict[str, str]:
|
425
|
+
"""
|
426
|
+
Prepare chat client arguments
|
427
|
+
|
428
|
+
:param args: client arguments
|
429
|
+
:param mode: mode name
|
430
|
+
:param model: ModelItem
|
431
|
+
:return: client arguments dict
|
432
|
+
"""
|
433
|
+
# research mode endpoint - Perplexity
|
434
|
+
if mode == MODE_RESEARCH:
|
435
|
+
args["api_key"] = self.window.core.config.get('api_key_perplexity', "")
|
436
|
+
args["base_url"] = self.window.core.config.get('api_endpoint_perplexity', "")
|
437
|
+
self.window.core.debug.info("[api] Using client: Perplexity")
|
438
|
+
elif mode == MODE_CHAT:
|
439
|
+
if model is not None:
|
440
|
+
# xAI / grok
|
441
|
+
if model.provider == "x_ai":
|
442
|
+
args["api_key"] = self.window.core.config.get('api_key_xai', "")
|
443
|
+
args["base_url"] = self.window.core.config.get('api_endpoint_xai', "")
|
444
|
+
self.window.core.debug.info("[api] Using client: xAI")
|
445
|
+
# Perplexity
|
446
|
+
elif model.provider == "perplexity":
|
447
|
+
args["api_key"] = self.window.core.config.get('api_key_perplexity', "")
|
448
|
+
args["base_url"] = self.window.core.config.get('api_endpoint_perplexity', "")
|
449
|
+
self.window.core.debug.info("[api] Using client: Perplexity")
|
450
|
+
# Google
|
451
|
+
elif model.provider == "google":
|
452
|
+
args["api_key"] = self.window.core.config.get('api_key_google', "")
|
453
|
+
args["base_url"] = self.window.core.config.get('api_endpoint_google', "")
|
454
|
+
self.window.core.debug.info("[api] Using client: Google")
|
455
|
+
# Deepseek
|
456
|
+
elif model.provider == "deepseek_api":
|
457
|
+
args["api_key"] = self.window.core.config.get('api_key_deepseek_api', "")
|
458
|
+
args["base_url"] = self.window.core.config.get('api_endpoint_deepseek_api', "")
|
459
|
+
self.window.core.debug.info("[api] Using client: Deepseek API")
|
460
|
+
else:
|
461
|
+
self.window.core.debug.info("[api] Using client: OpenAI default")
|
462
|
+
else:
|
463
|
+
self.window.core.debug.info("[api] No model provided, using default OpenAI client")
|
464
|
+
return args
|
465
|
+
|
466
|
+
def is_tool_call_allowed(self, mode: str, model: ModelItem) -> bool:
|
467
|
+
"""
|
468
|
+
Check if native tool call is allowed for model and mode
|
469
|
+
|
470
|
+
:param mode: Mode name
|
471
|
+
:param model: ModelItem
|
472
|
+
:return: True if tool call is allowed, False otherwise
|
473
|
+
"""
|
474
|
+
not_allowed_providers = [
|
475
|
+
"ollama",
|
476
|
+
"hugging_face_api",
|
477
|
+
"deepseek_api",
|
478
|
+
"perplexity",
|
479
|
+
# "x_ai",
|
480
|
+
]
|
481
|
+
if mode == MODE_LLAMA_INDEX:
|
482
|
+
if model.provider == "google":
|
483
|
+
not_allowed_providers.append("google") # bug in types in google-generativeai==0.8.5
|
484
|
+
if model.provider in not_allowed_providers:
|
485
|
+
return False
|
486
|
+
return True
|
487
|
+
|
414
488
|
def get_version(self) -> str:
|
415
489
|
"""
|
416
490
|
Get config version
|
pygpt_net/core/modes/__init__.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from typing import Dict, List
|
@@ -48,7 +48,7 @@ class Modes:
|
|
48
48
|
MODE_COMPLETION,
|
49
49
|
MODE_EXPERT,
|
50
50
|
MODE_IMAGE,
|
51
|
-
MODE_LANGCHAIN,
|
51
|
+
# MODE_LANGCHAIN,
|
52
52
|
MODE_LLAMA_INDEX,
|
53
53
|
MODE_VISION,
|
54
54
|
MODE_RESEARCH,
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import copy
|
@@ -220,8 +220,8 @@ class Presets:
|
|
220
220
|
return MODE_IMAGE
|
221
221
|
if preset.vision:
|
222
222
|
return MODE_VISION
|
223
|
-
if preset.langchain:
|
224
|
-
return MODE_LANGCHAIN
|
223
|
+
# if preset.langchain:
|
224
|
+
# return MODE_LANGCHAIN
|
225
225
|
if preset.assistant:
|
226
226
|
return MODE_ASSISTANT
|
227
227
|
if preset.llama_index:
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from pygpt_net.core.events import Event
|
@@ -81,8 +81,8 @@ class Prompt:
|
|
81
81
|
return prompt
|
82
82
|
|
83
83
|
# abort if model not supported
|
84
|
-
if not self.window.core.command.is_model_supports_tools(mode, model):
|
85
|
-
return prompt
|
84
|
+
# if not self.window.core.command.is_model_supports_tools(mode, model):
|
85
|
+
# return prompt
|
86
86
|
|
87
87
|
# cmd syntax tokens
|
88
88
|
data = {
|
@@ -169,8 +169,8 @@ class Prompt:
|
|
169
169
|
return sys_prompt # abort if native func call enabled
|
170
170
|
|
171
171
|
# abort if model not supported
|
172
|
-
if not self.window.core.command.is_model_supports_tools(mode, model):
|
173
|
-
return sys_prompt
|
172
|
+
# if not self.window.core.command.is_model_supports_tools(mode, model):
|
173
|
+
# return sys_prompt
|
174
174
|
|
175
175
|
data = {
|
176
176
|
'mode': mode,
|
@@ -6,14 +6,14 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from typing import Tuple, List
|
13
13
|
|
14
14
|
import tiktoken
|
15
15
|
|
16
|
-
from langchain_core.messages import ChatMessage as ChatMessageLangchain
|
16
|
+
# from langchain_core.messages import ChatMessage as ChatMessageLangchain
|
17
17
|
from llama_index.core.base.llms.types import ChatMessage as ChatMessageLlama
|
18
18
|
|
19
19
|
from pygpt_net.core.types import (
|
@@ -186,7 +186,7 @@ class Tokens:
|
|
186
186
|
|
187
187
|
@staticmethod
|
188
188
|
def from_langchain_messages(
|
189
|
-
messages: List
|
189
|
+
messages: List,
|
190
190
|
model: str = "gpt-4"
|
191
191
|
) -> int:
|
192
192
|
"""
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.
|
9
|
+
# Updated Date: 2025.06.28 16:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import copy
|
@@ -16,6 +16,7 @@ import json
|
|
16
16
|
import ssl
|
17
17
|
import time
|
18
18
|
from typing import Tuple
|
19
|
+
import locale
|
19
20
|
|
20
21
|
from urllib.request import urlopen, Request
|
21
22
|
|
@@ -333,8 +334,9 @@ class Updater:
|
|
333
334
|
:param people: people list
|
334
335
|
:return: parsed people list
|
335
336
|
"""
|
336
|
-
|
337
|
-
|
337
|
+
people_list = [x.strip() for x in people.split(",")]
|
338
|
+
sorted_people = sorted(people_list, key=locale.strxfrm)
|
339
|
+
return ", ".join(sorted_people)
|
338
340
|
|
339
341
|
def check(self, force: bool = False) -> bool:
|
340
342
|
"""
|
@@ -1,8 +1,8 @@
|
|
1
1
|
{
|
2
2
|
"__meta__": {
|
3
|
-
"version": "2.5.
|
4
|
-
"app.version": "2.5.
|
5
|
-
"updated_at": "2025-06-
|
3
|
+
"version": "2.5.20",
|
4
|
+
"app.version": "2.5.20",
|
5
|
+
"updated_at": "2025-06-28T00:00:00"
|
6
6
|
},
|
7
7
|
"access.audio.event.speech": false,
|
8
8
|
"access.audio.event.speech.disabled": [],
|
@@ -61,6 +61,8 @@
|
|
61
61
|
"api_azure_version": "2023-07-01-preview",
|
62
62
|
"api_azure_endpoint": "https://<your-resource-name>.openai.azure.com/",
|
63
63
|
"api_endpoint": "https://api.openai.com/v1",
|
64
|
+
"api_endpoint_deepseek": "https://api.deepseek.com/v1",
|
65
|
+
"api_endpoint_google": "https://generativelanguage.googleapis.com/v1beta/openai",
|
64
66
|
"api_endpoint_perplexity": "https://api.perplexity.ai",
|
65
67
|
"api_endpoint_xai": "https://api.x.ai/v1",
|
66
68
|
"api_key": "",
|