pygpt-net 2.6.45__py3-none-any.whl → 2.6.47__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +12 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/controller/__init__.py +1 -3
- pygpt_net/controller/audio/audio.py +2 -0
- pygpt_net/controller/chat/text.py +2 -1
- pygpt_net/controller/debug/debug.py +11 -9
- pygpt_net/controller/dialogs/debug.py +40 -29
- pygpt_net/controller/notepad/notepad.py +0 -2
- pygpt_net/controller/theme/theme.py +5 -5
- pygpt_net/controller/ui/tabs.py +40 -2
- pygpt_net/core/debug/agent.py +19 -14
- pygpt_net/core/debug/assistants.py +22 -24
- pygpt_net/core/debug/attachments.py +11 -7
- pygpt_net/core/debug/config.py +22 -23
- pygpt_net/core/debug/context.py +63 -63
- pygpt_net/core/debug/db.py +1 -4
- pygpt_net/core/debug/events.py +14 -11
- pygpt_net/core/debug/indexes.py +41 -76
- pygpt_net/core/debug/kernel.py +11 -8
- pygpt_net/core/debug/models.py +20 -15
- pygpt_net/core/debug/plugins.py +9 -6
- pygpt_net/core/debug/presets.py +16 -11
- pygpt_net/core/debug/tabs.py +28 -22
- pygpt_net/core/debug/ui.py +25 -22
- pygpt_net/core/render/web/renderer.py +5 -2
- pygpt_net/core/tabs/tab.py +16 -3
- pygpt_net/data/config/config.json +3 -3
- pygpt_net/data/config/models.json +3 -3
- pygpt_net/data/config/settings.json +15 -17
- pygpt_net/data/css/style.dark.css +6 -0
- pygpt_net/data/css/web-blocks.css +4 -0
- pygpt_net/data/css/web-blocks.light.css +1 -1
- pygpt_net/data/css/web-chatgpt.css +4 -0
- pygpt_net/data/css/web-chatgpt.light.css +1 -1
- pygpt_net/data/css/web-chatgpt_wide.css +4 -0
- pygpt_net/data/css/web-chatgpt_wide.light.css +1 -1
- pygpt_net/data/js/app.js +1804 -1688
- pygpt_net/data/locale/locale.de.ini +1 -1
- pygpt_net/data/locale/locale.en.ini +1 -1
- pygpt_net/data/locale/locale.es.ini +1 -1
- pygpt_net/data/locale/locale.fr.ini +1 -1
- pygpt_net/data/locale/locale.it.ini +1 -1
- pygpt_net/data/locale/locale.pl.ini +2 -2
- pygpt_net/data/locale/locale.uk.ini +1 -1
- pygpt_net/data/locale/locale.zh.ini +1 -1
- pygpt_net/item/model.py +4 -1
- pygpt_net/js_rc.py +14303 -14540
- pygpt_net/provider/api/anthropic/__init__.py +3 -1
- pygpt_net/provider/api/anthropic/tools.py +1 -1
- pygpt_net/provider/api/google/__init__.py +7 -1
- pygpt_net/provider/api/x_ai/__init__.py +5 -1
- pygpt_net/provider/core/config/patch.py +14 -1
- pygpt_net/provider/llms/anthropic.py +37 -5
- pygpt_net/provider/llms/azure_openai.py +3 -1
- pygpt_net/provider/llms/base.py +13 -1
- pygpt_net/provider/llms/deepseek_api.py +13 -3
- pygpt_net/provider/llms/google.py +14 -1
- pygpt_net/provider/llms/hugging_face_api.py +105 -24
- pygpt_net/provider/llms/hugging_face_embedding.py +88 -0
- pygpt_net/provider/llms/hugging_face_router.py +28 -16
- pygpt_net/provider/llms/local.py +2 -0
- pygpt_net/provider/llms/mistral.py +60 -3
- pygpt_net/provider/llms/open_router.py +4 -2
- pygpt_net/provider/llms/openai.py +4 -1
- pygpt_net/provider/llms/perplexity.py +66 -5
- pygpt_net/provider/llms/utils.py +39 -0
- pygpt_net/provider/llms/voyage.py +50 -0
- pygpt_net/provider/llms/x_ai.py +70 -10
- pygpt_net/ui/widget/lists/db.py +1 -0
- pygpt_net/ui/widget/lists/debug.py +1 -0
- pygpt_net/ui/widget/tabs/body.py +23 -4
- pygpt_net/ui/widget/textarea/notepad.py +0 -4
- {pygpt_net-2.6.45.dist-info → pygpt_net-2.6.47.dist-info}/METADATA +16 -4
- {pygpt_net-2.6.45.dist-info → pygpt_net-2.6.47.dist-info}/RECORD +77 -74
- {pygpt_net-2.6.45.dist-info → pygpt_net-2.6.47.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.45.dist-info → pygpt_net-2.6.47.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.45.dist-info → pygpt_net-2.6.47.dist-info}/entry_points.txt +0 -0
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.09.15 01:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import os
|
|
@@ -44,12 +44,41 @@ class MistralAILLM(BaseLLM):
|
|
|
44
44
|
:return: LLM provider instance
|
|
45
45
|
"""
|
|
46
46
|
from llama_index.llms.mistralai import MistralAI
|
|
47
|
+
class MistralAIWithProxy(MistralAI):
|
|
48
|
+
def __init__(self, *args, proxy: Optional[str] = None, **kwargs):
|
|
49
|
+
endpoint = kwargs.get("endpoint")
|
|
50
|
+
super().__init__(*args, **kwargs)
|
|
51
|
+
if not proxy:
|
|
52
|
+
return
|
|
53
|
+
|
|
54
|
+
import httpx
|
|
55
|
+
from mistralai import Mistral
|
|
56
|
+
timeout = getattr(self, "timeout", 120)
|
|
57
|
+
|
|
58
|
+
try:
|
|
59
|
+
sync_client = httpx.Client(proxy=proxy, timeout=timeout, follow_redirects=True)
|
|
60
|
+
async_client = httpx.AsyncClient(proxy=proxy, timeout=timeout, follow_redirects=True)
|
|
61
|
+
except TypeError:
|
|
62
|
+
sync_client = httpx.Client(proxies=proxy, timeout=timeout, follow_redirects=True)
|
|
63
|
+
async_client = httpx.AsyncClient(proxies=proxy, timeout=timeout, follow_redirects=True)
|
|
64
|
+
|
|
65
|
+
sdk_kwargs = {
|
|
66
|
+
"api_key": self.api_key,
|
|
67
|
+
"client": sync_client,
|
|
68
|
+
"async_client": async_client,
|
|
69
|
+
}
|
|
70
|
+
if endpoint:
|
|
71
|
+
sdk_kwargs["server_url"] = endpoint
|
|
72
|
+
|
|
73
|
+
self._client = Mistral(**sdk_kwargs)
|
|
74
|
+
|
|
47
75
|
args = self.parse_args(model.llama_index, window)
|
|
76
|
+
proxy = window.core.config.get("api_proxy") or None
|
|
48
77
|
if "model" not in args:
|
|
49
78
|
args["model"] = model.id
|
|
50
79
|
if "api_key" not in args or args["api_key"] == "":
|
|
51
80
|
args["api_key"] = window.core.config.get("api_key_mistral", "")
|
|
52
|
-
return
|
|
81
|
+
return MistralAIWithProxy(**args, proxy=proxy)
|
|
53
82
|
|
|
54
83
|
def get_embeddings_model(
|
|
55
84
|
self,
|
|
@@ -64,6 +93,32 @@ class MistralAILLM(BaseLLM):
|
|
|
64
93
|
:return: Embedding provider instance
|
|
65
94
|
"""
|
|
66
95
|
from llama_index.embeddings.mistralai import MistralAIEmbedding
|
|
96
|
+
class MistralAIEmbeddingWithProxy(MistralAIEmbedding):
|
|
97
|
+
def __init__(self, *args, proxy: Optional[str] = None, api_key: Optional[str] = None, **kwargs):
|
|
98
|
+
captured_key = api_key or os.environ.get("MISTRAL_API_KEY", "")
|
|
99
|
+
super().__init__(*args, api_key=api_key, **kwargs)
|
|
100
|
+
|
|
101
|
+
if not proxy:
|
|
102
|
+
return
|
|
103
|
+
|
|
104
|
+
import httpx
|
|
105
|
+
try:
|
|
106
|
+
sync_client = httpx.Client(proxy=proxy, timeout=60.0, follow_redirects=True)
|
|
107
|
+
async_client = httpx.AsyncClient(proxy=proxy, timeout=60.0, follow_redirects=True)
|
|
108
|
+
except TypeError:
|
|
109
|
+
sync_client = httpx.Client(proxies=proxy, timeout=60.0, follow_redirects=True)
|
|
110
|
+
async_client = httpx.AsyncClient(proxies=proxy, timeout=60.0, follow_redirects=True)
|
|
111
|
+
|
|
112
|
+
from mistralai import Mistral
|
|
113
|
+
server_url = os.environ.get("MISTRAL_ENDPOINT") or None
|
|
114
|
+
self._client = Mistral(
|
|
115
|
+
api_key=captured_key,
|
|
116
|
+
client=sync_client,
|
|
117
|
+
async_client=async_client,
|
|
118
|
+
**({"server_url": server_url} if server_url else {}),
|
|
119
|
+
)
|
|
120
|
+
if hasattr(self, "_mistralai_client"):
|
|
121
|
+
self._mistralai_client = self._client
|
|
67
122
|
args = {}
|
|
68
123
|
if config is not None:
|
|
69
124
|
args = self.parse_args({
|
|
@@ -73,7 +128,9 @@ class MistralAILLM(BaseLLM):
|
|
|
73
128
|
args["api_key"] = window.core.config.get("api_key_mistral", "")
|
|
74
129
|
if "model" in args and "model_name" not in args:
|
|
75
130
|
args["model_name"] = args.pop("model")
|
|
76
|
-
|
|
131
|
+
|
|
132
|
+
proxy = window.core.config.get("api_proxy") or None
|
|
133
|
+
return MistralAIEmbeddingWithProxy(**args, proxy=proxy)
|
|
77
134
|
|
|
78
135
|
def init_embeddings(
|
|
79
136
|
self,
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.09.15 01:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Optional, Dict, List
|
|
@@ -52,6 +52,7 @@ class OpenRouterLLM(BaseLLM):
|
|
|
52
52
|
args["api_base"] = window.core.config.get("api_endpoint_open_router", "")
|
|
53
53
|
if "model" in args and "model_name" not in args:
|
|
54
54
|
args["model_name"] = args.pop("model")
|
|
55
|
+
args = self.inject_llamaindex_http_clients(args, window.core.config)
|
|
55
56
|
return OpenAILikeEmbedding(**args)
|
|
56
57
|
|
|
57
58
|
def llama(
|
|
@@ -80,6 +81,7 @@ class OpenRouterLLM(BaseLLM):
|
|
|
80
81
|
args["is_chat_model"] = True
|
|
81
82
|
if "is_function_calling_model" not in args:
|
|
82
83
|
args["is_function_calling_model"] = model.tool_calls
|
|
84
|
+
args = self.inject_llamaindex_http_clients(args, window.core.config)
|
|
83
85
|
return OpenAILike(**args)
|
|
84
86
|
|
|
85
87
|
def get_models(
|
|
@@ -101,4 +103,4 @@ class OpenRouterLLM(BaseLLM):
|
|
|
101
103
|
"id": item.id,
|
|
102
104
|
"name": item.id,
|
|
103
105
|
})
|
|
104
|
-
return items
|
|
106
|
+
return items
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.09.15 01:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Optional, List, Dict
|
|
@@ -97,6 +97,7 @@ class OpenAILLM(BaseLLM):
|
|
|
97
97
|
if "model" not in args:
|
|
98
98
|
args["model"] = model.id
|
|
99
99
|
|
|
100
|
+
args = self.inject_llamaindex_http_clients(args, window.core.config)
|
|
100
101
|
if window.core.config.get('api_use_responses_llama', False):
|
|
101
102
|
tools = []
|
|
102
103
|
tools = window.core.api.openai.remote_tools.append_to_tools(
|
|
@@ -153,6 +154,8 @@ class OpenAILLM(BaseLLM):
|
|
|
153
154
|
args["api_key"] = window.core.config.get("api_key", "")
|
|
154
155
|
if "model" in args and "model_name" not in args:
|
|
155
156
|
args["model_name"] = args.pop("model")
|
|
157
|
+
|
|
158
|
+
args = self.inject_llamaindex_http_clients(args, window.core.config)
|
|
156
159
|
return OpenAIEmbedding(**args)
|
|
157
160
|
|
|
158
161
|
def get_models(
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.09.15 01:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Optional, List, Dict
|
|
@@ -69,7 +69,7 @@ class PerplexityLLM(BaseLLM):
|
|
|
69
69
|
stream: bool = False
|
|
70
70
|
) -> LlamaBaseLLM:
|
|
71
71
|
"""
|
|
72
|
-
Return LLM provider instance for llama
|
|
72
|
+
Return LLM provider instance for llama (Perplexity)
|
|
73
73
|
|
|
74
74
|
:param window: window instance
|
|
75
75
|
:param model: model instance
|
|
@@ -77,12 +77,73 @@ class PerplexityLLM(BaseLLM):
|
|
|
77
77
|
:return: LLM provider instance
|
|
78
78
|
"""
|
|
79
79
|
from llama_index.llms.perplexity import Perplexity as LlamaPerplexity
|
|
80
|
+
from .utils import ProxyEnv
|
|
81
|
+
|
|
82
|
+
cfg = window.core.config
|
|
80
83
|
args = self.parse_args(model.llama_index, window)
|
|
81
|
-
|
|
82
|
-
|
|
84
|
+
|
|
85
|
+
if "api_key" not in args or not args["api_key"]:
|
|
86
|
+
args["api_key"] = cfg.get("api_key_perplexity", "")
|
|
83
87
|
if "model" not in args:
|
|
84
88
|
args["model"] = model.id
|
|
85
|
-
|
|
89
|
+
|
|
90
|
+
custom_base = cfg.get("api_endpoint_perplexity", "").strip()
|
|
91
|
+
if custom_base and "api_base" not in args:
|
|
92
|
+
args["api_base"] = custom_base
|
|
93
|
+
|
|
94
|
+
# httpx.Client/AsyncClient (proxy, timeout, socks etc.)
|
|
95
|
+
try:
|
|
96
|
+
args_injected = self.inject_llamaindex_http_clients(dict(args), cfg)
|
|
97
|
+
return LlamaPerplexity(**args_injected)
|
|
98
|
+
except TypeError:
|
|
99
|
+
return LlamaPerplexity(**args)
|
|
100
|
+
|
|
101
|
+
# -----------------------------------
|
|
102
|
+
# TODO: fallback
|
|
103
|
+
proxy = cfg.get("api_proxy") or cfg.get("api_native_perplexity.proxy")
|
|
104
|
+
|
|
105
|
+
class PerplexityWithProxy(LlamaPerplexity):
|
|
106
|
+
def __init__(self, *a, **kw):
|
|
107
|
+
super().__init__(*a, **kw)
|
|
108
|
+
self._proxy = proxy
|
|
109
|
+
|
|
110
|
+
# sync
|
|
111
|
+
def complete(self, *a, **kw):
|
|
112
|
+
with ProxyEnv(self._proxy):
|
|
113
|
+
return super().complete(*a, **kw)
|
|
114
|
+
|
|
115
|
+
def chat(self, *a, **kw):
|
|
116
|
+
with ProxyEnv(self._proxy):
|
|
117
|
+
return super().chat(*a, **kw)
|
|
118
|
+
|
|
119
|
+
def stream_complete(self, *a, **kw):
|
|
120
|
+
with ProxyEnv(self._proxy):
|
|
121
|
+
return super().stream_complete(*a, **kw)
|
|
122
|
+
|
|
123
|
+
def stream_chat(self, *a, **kw):
|
|
124
|
+
with ProxyEnv(self._proxy):
|
|
125
|
+
return super().stream_chat(*a, **kw)
|
|
126
|
+
|
|
127
|
+
# async
|
|
128
|
+
async def acomplete(self, *a, **kw):
|
|
129
|
+
with ProxyEnv(self._proxy):
|
|
130
|
+
return await super().acomplete(*a, **kw)
|
|
131
|
+
|
|
132
|
+
async def achat(self, *a, **kw):
|
|
133
|
+
with ProxyEnv(self._proxy):
|
|
134
|
+
return await super().achat(*a, **kw)
|
|
135
|
+
|
|
136
|
+
async def astream_complete(self, *a, **kw):
|
|
137
|
+
with ProxyEnv(self._proxy):
|
|
138
|
+
async for chunk in super().astream_complete(*a, **kw):
|
|
139
|
+
yield chunk
|
|
140
|
+
|
|
141
|
+
async def astream_chat(self, *a, **kw):
|
|
142
|
+
with ProxyEnv(self._proxy):
|
|
143
|
+
async for chunk in super().astream_chat(*a, **kw):
|
|
144
|
+
yield chunk
|
|
145
|
+
|
|
146
|
+
return PerplexityWithProxy(**args)
|
|
86
147
|
|
|
87
148
|
def llama_multimodal(
|
|
88
149
|
self,
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
# ================================================== #
|
|
4
|
+
# This file is a part of PYGPT package #
|
|
5
|
+
# Website: https://pygpt.net #
|
|
6
|
+
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
|
+
# MIT License #
|
|
8
|
+
# Created By : Marcin Szczygliński #
|
|
9
|
+
# Updated Date: 2025.09.15 01:00:00 #
|
|
10
|
+
# ================================================== #
|
|
11
|
+
|
|
12
|
+
import os
|
|
13
|
+
from contextlib import ContextDecorator
|
|
14
|
+
from typing import Optional
|
|
15
|
+
|
|
16
|
+
class ProxyEnv(ContextDecorator):
|
|
17
|
+
def __init__(self, proxy: Optional[str]):
|
|
18
|
+
self.proxy = proxy
|
|
19
|
+
self._saved = {}
|
|
20
|
+
|
|
21
|
+
def __enter__(self):
|
|
22
|
+
if not self.proxy:
|
|
23
|
+
return self
|
|
24
|
+
for key in ("HTTP_PROXY", "HTTPS_PROXY", "ALL_PROXY"):
|
|
25
|
+
self._saved[key] = os.environ.get(key)
|
|
26
|
+
os.environ["HTTP_PROXY"] = self.proxy
|
|
27
|
+
os.environ["HTTPS_PROXY"] = self.proxy
|
|
28
|
+
os.environ["ALL_PROXY"] = self.proxy
|
|
29
|
+
return self
|
|
30
|
+
|
|
31
|
+
def __exit__(self, exc_type, exc, tb):
|
|
32
|
+
if not self.proxy:
|
|
33
|
+
return False
|
|
34
|
+
for key, val in self._saved.items():
|
|
35
|
+
if val is None:
|
|
36
|
+
os.environ.pop(key, None)
|
|
37
|
+
else:
|
|
38
|
+
os.environ[key] = val
|
|
39
|
+
return False
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
# ================================================== #
|
|
4
|
+
# This file is a part of PYGPT package #
|
|
5
|
+
# Website: https://pygpt.net #
|
|
6
|
+
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
|
+
# MIT License #
|
|
8
|
+
# Created By : Marcin Szczygliński #
|
|
9
|
+
# Updated Date: 2025.09.15 01:00:00 #
|
|
10
|
+
# ================================================== #
|
|
11
|
+
|
|
12
|
+
from typing import Optional, List
|
|
13
|
+
import voyageai
|
|
14
|
+
from llama_index.embeddings.voyageai import VoyageEmbedding
|
|
15
|
+
from .utils import ProxyEnv
|
|
16
|
+
|
|
17
|
+
class VoyageEmbeddingWithProxy(VoyageEmbedding):
|
|
18
|
+
def __init__(
|
|
19
|
+
self,
|
|
20
|
+
*args,
|
|
21
|
+
proxy: Optional[str] = None,
|
|
22
|
+
voyage_api_key: Optional[str] = None,
|
|
23
|
+
timeout: Optional[float] = None,
|
|
24
|
+
max_retries: Optional[int] = None,
|
|
25
|
+
**kwargs
|
|
26
|
+
):
|
|
27
|
+
super().__init__(*args, voyage_api_key=voyage_api_key, **kwargs)
|
|
28
|
+
self._proxy = proxy
|
|
29
|
+
|
|
30
|
+
if timeout is not None or max_retries is not None:
|
|
31
|
+
self._client = voyageai.Client(
|
|
32
|
+
api_key=voyage_api_key,
|
|
33
|
+
timeout=timeout,
|
|
34
|
+
max_retries=max_retries,
|
|
35
|
+
)
|
|
36
|
+
self._aclient = voyageai.AsyncClient(
|
|
37
|
+
api_key=voyage_api_key,
|
|
38
|
+
timeout=timeout,
|
|
39
|
+
max_retries=max_retries,
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
# sync batch
|
|
43
|
+
def get_text_embedding_batch(self, texts: List[str], show_progress: bool = False, **kwargs):
|
|
44
|
+
with ProxyEnv(self._proxy):
|
|
45
|
+
return super().get_text_embedding_batch(texts, show_progress=show_progress, **kwargs)
|
|
46
|
+
|
|
47
|
+
# async batch
|
|
48
|
+
async def aget_text_embedding_batch(self, texts: List[str], show_progress: bool = False):
|
|
49
|
+
with ProxyEnv(self._proxy):
|
|
50
|
+
return await super().aget_text_embedding_batch(texts, show_progress=show_progress)
|
pygpt_net/provider/llms/x_ai.py
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.09.15 01:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Optional, List, Dict
|
|
@@ -84,6 +84,11 @@ class xAILLM(BaseLLM):
|
|
|
84
84
|
args["api_key"] = window.core.config.get("api_key_xai", "")
|
|
85
85
|
if "api_base" not in args or args["api_base"] == "":
|
|
86
86
|
args["api_base"] = window.core.config.get("api_endpoint_xai", "https://api.x.ai/v1")
|
|
87
|
+
if "is_chat_model" not in args:
|
|
88
|
+
args["is_chat_model"] = True
|
|
89
|
+
if "is_function_calling_model" not in args:
|
|
90
|
+
args["is_function_calling_model"] = model.tool_calls
|
|
91
|
+
args = self.inject_llamaindex_http_clients(args, window.core.config)
|
|
87
92
|
return OpenAILike(**args)
|
|
88
93
|
|
|
89
94
|
def llama_multimodal(
|
|
@@ -108,23 +113,78 @@ class xAILLM(BaseLLM):
|
|
|
108
113
|
config: Optional[List[Dict]] = None
|
|
109
114
|
) -> BaseEmbedding:
|
|
110
115
|
"""
|
|
111
|
-
Return provider instance for embeddings
|
|
116
|
+
Return provider instance for embeddings (xAI)
|
|
112
117
|
|
|
113
118
|
:param window: window instance
|
|
114
119
|
:param config: config keyword arguments list
|
|
115
120
|
:return: Embedding provider instance
|
|
116
121
|
"""
|
|
117
|
-
from .llama_index.x_ai.embedding import XAIEmbedding
|
|
118
|
-
|
|
122
|
+
from .llama_index.x_ai.embedding import XAIEmbedding as BaseXAIEmbedding
|
|
123
|
+
|
|
124
|
+
cfg = window.core.config
|
|
125
|
+
|
|
126
|
+
args: Dict = {}
|
|
119
127
|
if config is not None:
|
|
120
|
-
args = self.parse_args({
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
128
|
+
args = self.parse_args({"args": config}, window)
|
|
129
|
+
|
|
130
|
+
if "api_key" not in args or not args["api_key"]:
|
|
131
|
+
args["api_key"] = cfg.get("api_key_xai", "")
|
|
132
|
+
|
|
125
133
|
if "model" in args and "model_name" not in args:
|
|
126
134
|
args["model_name"] = args.pop("model")
|
|
127
|
-
|
|
135
|
+
|
|
136
|
+
# if OpenAI-compatible
|
|
137
|
+
if "api_base" not in args or not args["api_base"]:
|
|
138
|
+
args["api_base"] = cfg.get("api_endpoint_xai", "https://api.x.ai/v1")
|
|
139
|
+
|
|
140
|
+
proxy = cfg.get("api_proxy") or cfg.get("api_native_xai.proxy")
|
|
141
|
+
timeout = cfg.get("api_native_xai.timeout")
|
|
142
|
+
|
|
143
|
+
# 1) REST (OpenAI-compatible)
|
|
144
|
+
try_args = dict(args)
|
|
145
|
+
try:
|
|
146
|
+
try_args = self.inject_llamaindex_http_clients(try_args, cfg)
|
|
147
|
+
return BaseXAIEmbedding(**try_args)
|
|
148
|
+
except TypeError:
|
|
149
|
+
# goto gRPC
|
|
150
|
+
pass
|
|
151
|
+
|
|
152
|
+
# 2) Fallback: gRPC (xai_sdk)
|
|
153
|
+
def _build_xai_grpc_client(api_key: str, proxy_url: Optional[str], timeout_val: Optional[float]):
|
|
154
|
+
import os
|
|
155
|
+
import xai_sdk
|
|
156
|
+
kwargs = {"api_key": api_key}
|
|
157
|
+
if timeout_val is not None:
|
|
158
|
+
kwargs["timeout"] = timeout_val
|
|
159
|
+
|
|
160
|
+
# channel_options - 'grpc.http_proxy'
|
|
161
|
+
if proxy_url:
|
|
162
|
+
try:
|
|
163
|
+
kwargs["channel_options"] = [("grpc.http_proxy", proxy_url)]
|
|
164
|
+
except TypeError:
|
|
165
|
+
# ENV
|
|
166
|
+
os.environ["grpc_proxy"] = proxy_url
|
|
167
|
+
|
|
168
|
+
try:
|
|
169
|
+
return xai_sdk.Client(**kwargs)
|
|
170
|
+
except TypeError:
|
|
171
|
+
if proxy_url:
|
|
172
|
+
os.environ["grpc_proxy"] = proxy_url
|
|
173
|
+
return xai_sdk.Client(api_key=api_key)
|
|
174
|
+
|
|
175
|
+
xai_client = _build_xai_grpc_client(args.get("api_key", ""), proxy, timeout)
|
|
176
|
+
|
|
177
|
+
# gRPC
|
|
178
|
+
class XAIEmbeddingWithProxy(BaseXAIEmbedding):
|
|
179
|
+
def __init__(self, *a, injected_client=None, **kw):
|
|
180
|
+
super().__init__(*a, **kw)
|
|
181
|
+
if injected_client is not None:
|
|
182
|
+
for attr in ("client", "_client", "_xai_client"):
|
|
183
|
+
if hasattr(self, attr):
|
|
184
|
+
setattr(self, attr, injected_client)
|
|
185
|
+
break
|
|
186
|
+
|
|
187
|
+
return XAIEmbeddingWithProxy(**args, injected_client=xai_client)
|
|
128
188
|
|
|
129
189
|
def get_models(
|
|
130
190
|
self,
|
pygpt_net/ui/widget/lists/db.py
CHANGED
pygpt_net/ui/widget/tabs/body.py
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.09.14 20:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Any
|
|
@@ -31,7 +31,7 @@ class TabBody(QTabWidget):
|
|
|
31
31
|
"""
|
|
32
32
|
Clean up on delete
|
|
33
33
|
"""
|
|
34
|
-
if self.on_delete:
|
|
34
|
+
if self.on_delete and callable(self.on_delete):
|
|
35
35
|
self.on_delete(self)
|
|
36
36
|
self.delete_refs()
|
|
37
37
|
|
|
@@ -49,12 +49,31 @@ class TabBody(QTabWidget):
|
|
|
49
49
|
Delete all references to widgets in this tab
|
|
50
50
|
"""
|
|
51
51
|
for ref in self.refs:
|
|
52
|
+
if ref is None:
|
|
53
|
+
continue
|
|
52
54
|
if ref and hasattr(ref, 'on_delete'):
|
|
53
|
-
|
|
55
|
+
try:
|
|
56
|
+
ref.on_delete()
|
|
57
|
+
except Exception:
|
|
58
|
+
pass
|
|
54
59
|
if ref and hasattr(ref, 'deleteLater'):
|
|
55
|
-
|
|
60
|
+
try:
|
|
61
|
+
ref.deleteLater()
|
|
62
|
+
except Exception:
|
|
63
|
+
pass
|
|
56
64
|
del self.refs[:]
|
|
57
65
|
|
|
66
|
+
def delete_ref(self, widget: Any) -> None:
|
|
67
|
+
"""
|
|
68
|
+
Unpin reference to widget in this tab
|
|
69
|
+
|
|
70
|
+
:param widget: widget reference
|
|
71
|
+
"""
|
|
72
|
+
for ref in self.refs:
|
|
73
|
+
if ref and ref is widget:
|
|
74
|
+
self.refs.remove(ref)
|
|
75
|
+
break
|
|
76
|
+
|
|
58
77
|
def append(self, body: QWidget):
|
|
59
78
|
"""
|
|
60
79
|
Append tab body (parent widget)
|
|
@@ -120,10 +120,6 @@ class NotepadOutput(QTextEdit):
|
|
|
120
120
|
if self.finder:
|
|
121
121
|
self.finder.disconnect() # disconnect finder
|
|
122
122
|
self.finder = None # delete finder
|
|
123
|
-
try:
|
|
124
|
-
self._vscroll.valueChanged.disconnect(self._on_scrollbar_value_changed)
|
|
125
|
-
except Exception:
|
|
126
|
-
pass
|
|
127
123
|
self.deleteLater()
|
|
128
124
|
|
|
129
125
|
def showEvent(self, event):
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: pygpt-net
|
|
3
|
-
Version: 2.6.
|
|
3
|
+
Version: 2.6.47
|
|
4
4
|
Summary: Desktop AI Assistant powered by: OpenAI GPT-5, GPT-4, o1, o3, Gemini, Claude, Grok, DeepSeek, and other models supported by Llama Index, and Ollama. Chatbot, agents, completion, image generation, vision analysis, speech-to-text, plugins, internet access, file handling, command execution and more.
|
|
5
5
|
License: MIT
|
|
6
6
|
Keywords: ai,api,api key,app,assistant,bielik,chat,chatbot,chatgpt,claude,dall-e,deepseek,desktop,gemini,gpt,gpt-3.5,gpt-4,gpt-4-vision,gpt-4o,gpt-5,gpt-oss,gpt3.5,gpt4,grok,langchain,llama-index,llama3,mistral,o1,o3,ollama,openai,presets,py-gpt,py_gpt,pygpt,pyside,qt,text completion,tts,ui,vision,whisper
|
|
@@ -118,7 +118,7 @@ Description-Content-Type: text/markdown
|
|
|
118
118
|
|
|
119
119
|
[](https://snapcraft.io/pygpt)
|
|
120
120
|
|
|
121
|
-
Release: **2.6.
|
|
121
|
+
Release: **2.6.47** | build: **2025-09-15** | Python: **>=3.10, <3.14**
|
|
122
122
|
|
|
123
123
|
> Official website: https://pygpt.net | Documentation: https://pygpt.readthedocs.io
|
|
124
124
|
>
|
|
@@ -2287,6 +2287,8 @@ Config -> Settings...
|
|
|
2287
2287
|
|
|
2288
2288
|
- `OpenGL hardware acceleration`: enables hardware acceleration in `WebEngine / Chromium` renderer. Default: False.
|
|
2289
2289
|
|
|
2290
|
+
- `Proxy address`: Proxy address to be used for connection in API SDKs; supports HTTP/SOCKS, e.g. http://proxy.example.com or socks5://user:pass@host:port
|
|
2291
|
+
|
|
2290
2292
|
- `Application environment (os.environ)`: Additional environment vars to set on application start.
|
|
2291
2293
|
|
|
2292
2294
|
- `Memory Limit`: Renderer memory limit; set to 0 to disable. If > 0, the app will try to free memory after the limit is reached. Accepted formats: 3.5GB, 2GB, 2048MB, 1_000_000. Minimum: 2GB.
|
|
@@ -2299,8 +2301,6 @@ Config -> Settings...
|
|
|
2299
2301
|
|
|
2300
2302
|
- `API Endpoint`: OpenAI API endpoint URL, default: https://api.openai.com/v1.
|
|
2301
2303
|
|
|
2302
|
-
- `Proxy address`: Proxy address to be used for connection; supports HTTP/SOCKS.
|
|
2303
|
-
|
|
2304
2304
|
- `Anthropic API KEY`: Required for the Anthropic API and Claude models.
|
|
2305
2305
|
|
|
2306
2306
|
- `Deepseek API KEY`: Required for the Deepseek API.
|
|
@@ -3612,6 +3612,18 @@ may consume additional tokens that are not displayed in the main window.
|
|
|
3612
3612
|
|
|
3613
3613
|
## Recent changes:
|
|
3614
3614
|
|
|
3615
|
+
**2.6.47 (2025-09-15)**
|
|
3616
|
+
|
|
3617
|
+
- Improved: Parsing of custom markup tags.
|
|
3618
|
+
- Optimized: Switching profiles.
|
|
3619
|
+
|
|
3620
|
+
**2.6.46 (2025-09-15)**
|
|
3621
|
+
|
|
3622
|
+
- Added: Global proxy settings for all API SDKs.
|
|
3623
|
+
- Fixed: xAI client configuration in Chat with Files.
|
|
3624
|
+
- Fixed: Top margin in streaming container.
|
|
3625
|
+
- Refactored: Debug workers.
|
|
3626
|
+
|
|
3615
3627
|
**2.6.45 (2025-09-13)**
|
|
3616
3628
|
|
|
3617
3629
|
- Improved: Parsing of custom markup in the stream.
|