pygpt-net 2.6.44__py3-none-any.whl → 2.6.46__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +12 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/app.py +0 -5
- pygpt_net/controller/ctx/ctx.py +6 -0
- pygpt_net/controller/debug/debug.py +11 -9
- pygpt_net/controller/debug/fixtures.py +1 -1
- pygpt_net/controller/dialogs/debug.py +40 -29
- pygpt_net/core/debug/agent.py +19 -14
- pygpt_net/core/debug/assistants.py +22 -24
- pygpt_net/core/debug/attachments.py +11 -7
- pygpt_net/core/debug/config.py +22 -23
- pygpt_net/core/debug/console/console.py +2 -1
- pygpt_net/core/debug/context.py +63 -63
- pygpt_net/core/debug/db.py +1 -4
- pygpt_net/core/debug/debug.py +1 -1
- pygpt_net/core/debug/events.py +14 -11
- pygpt_net/core/debug/indexes.py +41 -76
- pygpt_net/core/debug/kernel.py +11 -8
- pygpt_net/core/debug/models.py +20 -15
- pygpt_net/core/debug/plugins.py +9 -6
- pygpt_net/core/debug/presets.py +16 -11
- pygpt_net/core/debug/tabs.py +28 -22
- pygpt_net/core/debug/ui.py +25 -22
- pygpt_net/core/fixtures/stream/generator.py +1 -2
- pygpt_net/core/render/web/body.py +290 -23
- pygpt_net/core/render/web/helpers.py +26 -0
- pygpt_net/core/render/web/renderer.py +459 -705
- pygpt_net/core/tabs/tab.py +14 -1
- pygpt_net/data/config/config.json +3 -3
- pygpt_net/data/config/models.json +3 -3
- pygpt_net/data/config/settings.json +15 -17
- pygpt_net/data/css/style.dark.css +6 -0
- pygpt_net/data/css/web-blocks.css +4 -0
- pygpt_net/data/css/web-blocks.light.css +1 -1
- pygpt_net/data/css/web-chatgpt.css +4 -0
- pygpt_net/data/css/web-chatgpt.light.css +1 -1
- pygpt_net/data/css/web-chatgpt_wide.css +4 -0
- pygpt_net/data/css/web-chatgpt_wide.light.css +1 -1
- pygpt_net/data/fixtures/fake_stream.txt +5733 -0
- pygpt_net/data/js/app.js +1921 -901
- pygpt_net/data/locale/locale.de.ini +1 -1
- pygpt_net/data/locale/locale.en.ini +5 -5
- pygpt_net/data/locale/locale.es.ini +1 -1
- pygpt_net/data/locale/locale.fr.ini +1 -1
- pygpt_net/data/locale/locale.it.ini +1 -1
- pygpt_net/data/locale/locale.pl.ini +2 -2
- pygpt_net/data/locale/locale.uk.ini +1 -1
- pygpt_net/data/locale/locale.zh.ini +1 -1
- pygpt_net/item/model.py +4 -1
- pygpt_net/js_rc.py +13076 -10198
- pygpt_net/provider/api/anthropic/__init__.py +3 -1
- pygpt_net/provider/api/anthropic/tools.py +1 -1
- pygpt_net/provider/api/google/__init__.py +7 -1
- pygpt_net/provider/api/x_ai/__init__.py +5 -1
- pygpt_net/provider/core/config/patch.py +14 -1
- pygpt_net/provider/llms/anthropic.py +37 -5
- pygpt_net/provider/llms/azure_openai.py +3 -1
- pygpt_net/provider/llms/base.py +13 -1
- pygpt_net/provider/llms/deepseek_api.py +13 -3
- pygpt_net/provider/llms/google.py +14 -1
- pygpt_net/provider/llms/hugging_face_api.py +105 -24
- pygpt_net/provider/llms/hugging_face_embedding.py +88 -0
- pygpt_net/provider/llms/hugging_face_router.py +28 -16
- pygpt_net/provider/llms/local.py +2 -0
- pygpt_net/provider/llms/mistral.py +60 -3
- pygpt_net/provider/llms/open_router.py +4 -2
- pygpt_net/provider/llms/openai.py +4 -1
- pygpt_net/provider/llms/perplexity.py +66 -5
- pygpt_net/provider/llms/utils.py +39 -0
- pygpt_net/provider/llms/voyage.py +50 -0
- pygpt_net/provider/llms/x_ai.py +70 -10
- pygpt_net/ui/layout/chat/output.py +1 -1
- pygpt_net/ui/widget/lists/db.py +1 -0
- pygpt_net/ui/widget/lists/debug.py +1 -0
- pygpt_net/ui/widget/tabs/body.py +12 -1
- pygpt_net/ui/widget/textarea/web.py +4 -4
- pygpt_net/utils.py +3 -2
- {pygpt_net-2.6.44.dist-info → pygpt_net-2.6.46.dist-info}/METADATA +73 -16
- {pygpt_net-2.6.44.dist-info → pygpt_net-2.6.46.dist-info}/RECORD +82 -78
- {pygpt_net-2.6.44.dist-info → pygpt_net-2.6.46.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.44.dist-info → pygpt_net-2.6.46.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.44.dist-info → pygpt_net-2.6.46.dist-info}/entry_points.txt +0 -0
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.09.15 01:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import os
|
|
@@ -44,12 +44,41 @@ class MistralAILLM(BaseLLM):
|
|
|
44
44
|
:return: LLM provider instance
|
|
45
45
|
"""
|
|
46
46
|
from llama_index.llms.mistralai import MistralAI
|
|
47
|
+
class MistralAIWithProxy(MistralAI):
|
|
48
|
+
def __init__(self, *args, proxy: Optional[str] = None, **kwargs):
|
|
49
|
+
endpoint = kwargs.get("endpoint")
|
|
50
|
+
super().__init__(*args, **kwargs)
|
|
51
|
+
if not proxy:
|
|
52
|
+
return
|
|
53
|
+
|
|
54
|
+
import httpx
|
|
55
|
+
from mistralai import Mistral
|
|
56
|
+
timeout = getattr(self, "timeout", 120)
|
|
57
|
+
|
|
58
|
+
try:
|
|
59
|
+
sync_client = httpx.Client(proxy=proxy, timeout=timeout, follow_redirects=True)
|
|
60
|
+
async_client = httpx.AsyncClient(proxy=proxy, timeout=timeout, follow_redirects=True)
|
|
61
|
+
except TypeError:
|
|
62
|
+
sync_client = httpx.Client(proxies=proxy, timeout=timeout, follow_redirects=True)
|
|
63
|
+
async_client = httpx.AsyncClient(proxies=proxy, timeout=timeout, follow_redirects=True)
|
|
64
|
+
|
|
65
|
+
sdk_kwargs = {
|
|
66
|
+
"api_key": self.api_key,
|
|
67
|
+
"client": sync_client,
|
|
68
|
+
"async_client": async_client,
|
|
69
|
+
}
|
|
70
|
+
if endpoint:
|
|
71
|
+
sdk_kwargs["server_url"] = endpoint
|
|
72
|
+
|
|
73
|
+
self._client = Mistral(**sdk_kwargs)
|
|
74
|
+
|
|
47
75
|
args = self.parse_args(model.llama_index, window)
|
|
76
|
+
proxy = window.core.config.get("api_proxy") or None
|
|
48
77
|
if "model" not in args:
|
|
49
78
|
args["model"] = model.id
|
|
50
79
|
if "api_key" not in args or args["api_key"] == "":
|
|
51
80
|
args["api_key"] = window.core.config.get("api_key_mistral", "")
|
|
52
|
-
return
|
|
81
|
+
return MistralAIWithProxy(**args, proxy=proxy)
|
|
53
82
|
|
|
54
83
|
def get_embeddings_model(
|
|
55
84
|
self,
|
|
@@ -64,6 +93,32 @@ class MistralAILLM(BaseLLM):
|
|
|
64
93
|
:return: Embedding provider instance
|
|
65
94
|
"""
|
|
66
95
|
from llama_index.embeddings.mistralai import MistralAIEmbedding
|
|
96
|
+
class MistralAIEmbeddingWithProxy(MistralAIEmbedding):
|
|
97
|
+
def __init__(self, *args, proxy: Optional[str] = None, api_key: Optional[str] = None, **kwargs):
|
|
98
|
+
captured_key = api_key or os.environ.get("MISTRAL_API_KEY", "")
|
|
99
|
+
super().__init__(*args, api_key=api_key, **kwargs)
|
|
100
|
+
|
|
101
|
+
if not proxy:
|
|
102
|
+
return
|
|
103
|
+
|
|
104
|
+
import httpx
|
|
105
|
+
try:
|
|
106
|
+
sync_client = httpx.Client(proxy=proxy, timeout=60.0, follow_redirects=True)
|
|
107
|
+
async_client = httpx.AsyncClient(proxy=proxy, timeout=60.0, follow_redirects=True)
|
|
108
|
+
except TypeError:
|
|
109
|
+
sync_client = httpx.Client(proxies=proxy, timeout=60.0, follow_redirects=True)
|
|
110
|
+
async_client = httpx.AsyncClient(proxies=proxy, timeout=60.0, follow_redirects=True)
|
|
111
|
+
|
|
112
|
+
from mistralai import Mistral
|
|
113
|
+
server_url = os.environ.get("MISTRAL_ENDPOINT") or None
|
|
114
|
+
self._client = Mistral(
|
|
115
|
+
api_key=captured_key,
|
|
116
|
+
client=sync_client,
|
|
117
|
+
async_client=async_client,
|
|
118
|
+
**({"server_url": server_url} if server_url else {}),
|
|
119
|
+
)
|
|
120
|
+
if hasattr(self, "_mistralai_client"):
|
|
121
|
+
self._mistralai_client = self._client
|
|
67
122
|
args = {}
|
|
68
123
|
if config is not None:
|
|
69
124
|
args = self.parse_args({
|
|
@@ -73,7 +128,9 @@ class MistralAILLM(BaseLLM):
|
|
|
73
128
|
args["api_key"] = window.core.config.get("api_key_mistral", "")
|
|
74
129
|
if "model" in args and "model_name" not in args:
|
|
75
130
|
args["model_name"] = args.pop("model")
|
|
76
|
-
|
|
131
|
+
|
|
132
|
+
proxy = window.core.config.get("api_proxy") or None
|
|
133
|
+
return MistralAIEmbeddingWithProxy(**args, proxy=proxy)
|
|
77
134
|
|
|
78
135
|
def init_embeddings(
|
|
79
136
|
self,
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.09.15 01:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Optional, Dict, List
|
|
@@ -52,6 +52,7 @@ class OpenRouterLLM(BaseLLM):
|
|
|
52
52
|
args["api_base"] = window.core.config.get("api_endpoint_open_router", "")
|
|
53
53
|
if "model" in args and "model_name" not in args:
|
|
54
54
|
args["model_name"] = args.pop("model")
|
|
55
|
+
args = self.inject_llamaindex_http_clients(args, window.core.config)
|
|
55
56
|
return OpenAILikeEmbedding(**args)
|
|
56
57
|
|
|
57
58
|
def llama(
|
|
@@ -80,6 +81,7 @@ class OpenRouterLLM(BaseLLM):
|
|
|
80
81
|
args["is_chat_model"] = True
|
|
81
82
|
if "is_function_calling_model" not in args:
|
|
82
83
|
args["is_function_calling_model"] = model.tool_calls
|
|
84
|
+
args = self.inject_llamaindex_http_clients(args, window.core.config)
|
|
83
85
|
return OpenAILike(**args)
|
|
84
86
|
|
|
85
87
|
def get_models(
|
|
@@ -101,4 +103,4 @@ class OpenRouterLLM(BaseLLM):
|
|
|
101
103
|
"id": item.id,
|
|
102
104
|
"name": item.id,
|
|
103
105
|
})
|
|
104
|
-
return items
|
|
106
|
+
return items
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.09.15 01:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Optional, List, Dict
|
|
@@ -97,6 +97,7 @@ class OpenAILLM(BaseLLM):
|
|
|
97
97
|
if "model" not in args:
|
|
98
98
|
args["model"] = model.id
|
|
99
99
|
|
|
100
|
+
args = self.inject_llamaindex_http_clients(args, window.core.config)
|
|
100
101
|
if window.core.config.get('api_use_responses_llama', False):
|
|
101
102
|
tools = []
|
|
102
103
|
tools = window.core.api.openai.remote_tools.append_to_tools(
|
|
@@ -153,6 +154,8 @@ class OpenAILLM(BaseLLM):
|
|
|
153
154
|
args["api_key"] = window.core.config.get("api_key", "")
|
|
154
155
|
if "model" in args and "model_name" not in args:
|
|
155
156
|
args["model_name"] = args.pop("model")
|
|
157
|
+
|
|
158
|
+
args = self.inject_llamaindex_http_clients(args, window.core.config)
|
|
156
159
|
return OpenAIEmbedding(**args)
|
|
157
160
|
|
|
158
161
|
def get_models(
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.09.15 01:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Optional, List, Dict
|
|
@@ -69,7 +69,7 @@ class PerplexityLLM(BaseLLM):
|
|
|
69
69
|
stream: bool = False
|
|
70
70
|
) -> LlamaBaseLLM:
|
|
71
71
|
"""
|
|
72
|
-
Return LLM provider instance for llama
|
|
72
|
+
Return LLM provider instance for llama (Perplexity)
|
|
73
73
|
|
|
74
74
|
:param window: window instance
|
|
75
75
|
:param model: model instance
|
|
@@ -77,12 +77,73 @@ class PerplexityLLM(BaseLLM):
|
|
|
77
77
|
:return: LLM provider instance
|
|
78
78
|
"""
|
|
79
79
|
from llama_index.llms.perplexity import Perplexity as LlamaPerplexity
|
|
80
|
+
from .utils import ProxyEnv
|
|
81
|
+
|
|
82
|
+
cfg = window.core.config
|
|
80
83
|
args = self.parse_args(model.llama_index, window)
|
|
81
|
-
|
|
82
|
-
|
|
84
|
+
|
|
85
|
+
if "api_key" not in args or not args["api_key"]:
|
|
86
|
+
args["api_key"] = cfg.get("api_key_perplexity", "")
|
|
83
87
|
if "model" not in args:
|
|
84
88
|
args["model"] = model.id
|
|
85
|
-
|
|
89
|
+
|
|
90
|
+
custom_base = cfg.get("api_endpoint_perplexity", "").strip()
|
|
91
|
+
if custom_base and "api_base" not in args:
|
|
92
|
+
args["api_base"] = custom_base
|
|
93
|
+
|
|
94
|
+
# httpx.Client/AsyncClient (proxy, timeout, socks etc.)
|
|
95
|
+
try:
|
|
96
|
+
args_injected = self.inject_llamaindex_http_clients(dict(args), cfg)
|
|
97
|
+
return LlamaPerplexity(**args_injected)
|
|
98
|
+
except TypeError:
|
|
99
|
+
return LlamaPerplexity(**args)
|
|
100
|
+
|
|
101
|
+
# -----------------------------------
|
|
102
|
+
# TODO: fallback
|
|
103
|
+
proxy = cfg.get("api_proxy") or cfg.get("api_native_perplexity.proxy")
|
|
104
|
+
|
|
105
|
+
class PerplexityWithProxy(LlamaPerplexity):
|
|
106
|
+
def __init__(self, *a, **kw):
|
|
107
|
+
super().__init__(*a, **kw)
|
|
108
|
+
self._proxy = proxy
|
|
109
|
+
|
|
110
|
+
# sync
|
|
111
|
+
def complete(self, *a, **kw):
|
|
112
|
+
with ProxyEnv(self._proxy):
|
|
113
|
+
return super().complete(*a, **kw)
|
|
114
|
+
|
|
115
|
+
def chat(self, *a, **kw):
|
|
116
|
+
with ProxyEnv(self._proxy):
|
|
117
|
+
return super().chat(*a, **kw)
|
|
118
|
+
|
|
119
|
+
def stream_complete(self, *a, **kw):
|
|
120
|
+
with ProxyEnv(self._proxy):
|
|
121
|
+
return super().stream_complete(*a, **kw)
|
|
122
|
+
|
|
123
|
+
def stream_chat(self, *a, **kw):
|
|
124
|
+
with ProxyEnv(self._proxy):
|
|
125
|
+
return super().stream_chat(*a, **kw)
|
|
126
|
+
|
|
127
|
+
# async
|
|
128
|
+
async def acomplete(self, *a, **kw):
|
|
129
|
+
with ProxyEnv(self._proxy):
|
|
130
|
+
return await super().acomplete(*a, **kw)
|
|
131
|
+
|
|
132
|
+
async def achat(self, *a, **kw):
|
|
133
|
+
with ProxyEnv(self._proxy):
|
|
134
|
+
return await super().achat(*a, **kw)
|
|
135
|
+
|
|
136
|
+
async def astream_complete(self, *a, **kw):
|
|
137
|
+
with ProxyEnv(self._proxy):
|
|
138
|
+
async for chunk in super().astream_complete(*a, **kw):
|
|
139
|
+
yield chunk
|
|
140
|
+
|
|
141
|
+
async def astream_chat(self, *a, **kw):
|
|
142
|
+
with ProxyEnv(self._proxy):
|
|
143
|
+
async for chunk in super().astream_chat(*a, **kw):
|
|
144
|
+
yield chunk
|
|
145
|
+
|
|
146
|
+
return PerplexityWithProxy(**args)
|
|
86
147
|
|
|
87
148
|
def llama_multimodal(
|
|
88
149
|
self,
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
# ================================================== #
|
|
4
|
+
# This file is a part of PYGPT package #
|
|
5
|
+
# Website: https://pygpt.net #
|
|
6
|
+
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
|
+
# MIT License #
|
|
8
|
+
# Created By : Marcin Szczygliński #
|
|
9
|
+
# Updated Date: 2025.09.15 01:00:00 #
|
|
10
|
+
# ================================================== #
|
|
11
|
+
|
|
12
|
+
import os
|
|
13
|
+
from contextlib import ContextDecorator
|
|
14
|
+
from typing import Optional
|
|
15
|
+
|
|
16
|
+
class ProxyEnv(ContextDecorator):
|
|
17
|
+
def __init__(self, proxy: Optional[str]):
|
|
18
|
+
self.proxy = proxy
|
|
19
|
+
self._saved = {}
|
|
20
|
+
|
|
21
|
+
def __enter__(self):
|
|
22
|
+
if not self.proxy:
|
|
23
|
+
return self
|
|
24
|
+
for key in ("HTTP_PROXY", "HTTPS_PROXY", "ALL_PROXY"):
|
|
25
|
+
self._saved[key] = os.environ.get(key)
|
|
26
|
+
os.environ["HTTP_PROXY"] = self.proxy
|
|
27
|
+
os.environ["HTTPS_PROXY"] = self.proxy
|
|
28
|
+
os.environ["ALL_PROXY"] = self.proxy
|
|
29
|
+
return self
|
|
30
|
+
|
|
31
|
+
def __exit__(self, exc_type, exc, tb):
|
|
32
|
+
if not self.proxy:
|
|
33
|
+
return False
|
|
34
|
+
for key, val in self._saved.items():
|
|
35
|
+
if val is None:
|
|
36
|
+
os.environ.pop(key, None)
|
|
37
|
+
else:
|
|
38
|
+
os.environ[key] = val
|
|
39
|
+
return False
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
# ================================================== #
|
|
4
|
+
# This file is a part of PYGPT package #
|
|
5
|
+
# Website: https://pygpt.net #
|
|
6
|
+
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
|
+
# MIT License #
|
|
8
|
+
# Created By : Marcin Szczygliński #
|
|
9
|
+
# Updated Date: 2025.09.15 01:00:00 #
|
|
10
|
+
# ================================================== #
|
|
11
|
+
|
|
12
|
+
from typing import Optional, List
|
|
13
|
+
import voyageai
|
|
14
|
+
from llama_index.embeddings.voyageai import VoyageEmbedding
|
|
15
|
+
from .utils import ProxyEnv
|
|
16
|
+
|
|
17
|
+
class VoyageEmbeddingWithProxy(VoyageEmbedding):
|
|
18
|
+
def __init__(
|
|
19
|
+
self,
|
|
20
|
+
*args,
|
|
21
|
+
proxy: Optional[str] = None,
|
|
22
|
+
voyage_api_key: Optional[str] = None,
|
|
23
|
+
timeout: Optional[float] = None,
|
|
24
|
+
max_retries: Optional[int] = None,
|
|
25
|
+
**kwargs
|
|
26
|
+
):
|
|
27
|
+
super().__init__(*args, voyage_api_key=voyage_api_key, **kwargs)
|
|
28
|
+
self._proxy = proxy
|
|
29
|
+
|
|
30
|
+
if timeout is not None or max_retries is not None:
|
|
31
|
+
self._client = voyageai.Client(
|
|
32
|
+
api_key=voyage_api_key,
|
|
33
|
+
timeout=timeout,
|
|
34
|
+
max_retries=max_retries,
|
|
35
|
+
)
|
|
36
|
+
self._aclient = voyageai.AsyncClient(
|
|
37
|
+
api_key=voyage_api_key,
|
|
38
|
+
timeout=timeout,
|
|
39
|
+
max_retries=max_retries,
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
# sync batch
|
|
43
|
+
def get_text_embedding_batch(self, texts: List[str], show_progress: bool = False, **kwargs):
|
|
44
|
+
with ProxyEnv(self._proxy):
|
|
45
|
+
return super().get_text_embedding_batch(texts, show_progress=show_progress, **kwargs)
|
|
46
|
+
|
|
47
|
+
# async batch
|
|
48
|
+
async def aget_text_embedding_batch(self, texts: List[str], show_progress: bool = False):
|
|
49
|
+
with ProxyEnv(self._proxy):
|
|
50
|
+
return await super().aget_text_embedding_batch(texts, show_progress=show_progress)
|
pygpt_net/provider/llms/x_ai.py
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.09.15 01:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Optional, List, Dict
|
|
@@ -84,6 +84,11 @@ class xAILLM(BaseLLM):
|
|
|
84
84
|
args["api_key"] = window.core.config.get("api_key_xai", "")
|
|
85
85
|
if "api_base" not in args or args["api_base"] == "":
|
|
86
86
|
args["api_base"] = window.core.config.get("api_endpoint_xai", "https://api.x.ai/v1")
|
|
87
|
+
if "is_chat_model" not in args:
|
|
88
|
+
args["is_chat_model"] = True
|
|
89
|
+
if "is_function_calling_model" not in args:
|
|
90
|
+
args["is_function_calling_model"] = model.tool_calls
|
|
91
|
+
args = self.inject_llamaindex_http_clients(args, window.core.config)
|
|
87
92
|
return OpenAILike(**args)
|
|
88
93
|
|
|
89
94
|
def llama_multimodal(
|
|
@@ -108,23 +113,78 @@ class xAILLM(BaseLLM):
|
|
|
108
113
|
config: Optional[List[Dict]] = None
|
|
109
114
|
) -> BaseEmbedding:
|
|
110
115
|
"""
|
|
111
|
-
Return provider instance for embeddings
|
|
116
|
+
Return provider instance for embeddings (xAI)
|
|
112
117
|
|
|
113
118
|
:param window: window instance
|
|
114
119
|
:param config: config keyword arguments list
|
|
115
120
|
:return: Embedding provider instance
|
|
116
121
|
"""
|
|
117
|
-
from .llama_index.x_ai.embedding import XAIEmbedding
|
|
118
|
-
|
|
122
|
+
from .llama_index.x_ai.embedding import XAIEmbedding as BaseXAIEmbedding
|
|
123
|
+
|
|
124
|
+
cfg = window.core.config
|
|
125
|
+
|
|
126
|
+
args: Dict = {}
|
|
119
127
|
if config is not None:
|
|
120
|
-
args = self.parse_args({
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
128
|
+
args = self.parse_args({"args": config}, window)
|
|
129
|
+
|
|
130
|
+
if "api_key" not in args or not args["api_key"]:
|
|
131
|
+
args["api_key"] = cfg.get("api_key_xai", "")
|
|
132
|
+
|
|
125
133
|
if "model" in args and "model_name" not in args:
|
|
126
134
|
args["model_name"] = args.pop("model")
|
|
127
|
-
|
|
135
|
+
|
|
136
|
+
# if OpenAI-compatible
|
|
137
|
+
if "api_base" not in args or not args["api_base"]:
|
|
138
|
+
args["api_base"] = cfg.get("api_endpoint_xai", "https://api.x.ai/v1")
|
|
139
|
+
|
|
140
|
+
proxy = cfg.get("api_proxy") or cfg.get("api_native_xai.proxy")
|
|
141
|
+
timeout = cfg.get("api_native_xai.timeout")
|
|
142
|
+
|
|
143
|
+
# 1) REST (OpenAI-compatible)
|
|
144
|
+
try_args = dict(args)
|
|
145
|
+
try:
|
|
146
|
+
try_args = self.inject_llamaindex_http_clients(try_args, cfg)
|
|
147
|
+
return BaseXAIEmbedding(**try_args)
|
|
148
|
+
except TypeError:
|
|
149
|
+
# goto gRPC
|
|
150
|
+
pass
|
|
151
|
+
|
|
152
|
+
# 2) Fallback: gRPC (xai_sdk)
|
|
153
|
+
def _build_xai_grpc_client(api_key: str, proxy_url: Optional[str], timeout_val: Optional[float]):
|
|
154
|
+
import os
|
|
155
|
+
import xai_sdk
|
|
156
|
+
kwargs = {"api_key": api_key}
|
|
157
|
+
if timeout_val is not None:
|
|
158
|
+
kwargs["timeout"] = timeout_val
|
|
159
|
+
|
|
160
|
+
# channel_options - 'grpc.http_proxy'
|
|
161
|
+
if proxy_url:
|
|
162
|
+
try:
|
|
163
|
+
kwargs["channel_options"] = [("grpc.http_proxy", proxy_url)]
|
|
164
|
+
except TypeError:
|
|
165
|
+
# ENV
|
|
166
|
+
os.environ["grpc_proxy"] = proxy_url
|
|
167
|
+
|
|
168
|
+
try:
|
|
169
|
+
return xai_sdk.Client(**kwargs)
|
|
170
|
+
except TypeError:
|
|
171
|
+
if proxy_url:
|
|
172
|
+
os.environ["grpc_proxy"] = proxy_url
|
|
173
|
+
return xai_sdk.Client(api_key=api_key)
|
|
174
|
+
|
|
175
|
+
xai_client = _build_xai_grpc_client(args.get("api_key", ""), proxy, timeout)
|
|
176
|
+
|
|
177
|
+
# gRPC
|
|
178
|
+
class XAIEmbeddingWithProxy(BaseXAIEmbedding):
|
|
179
|
+
def __init__(self, *a, injected_client=None, **kw):
|
|
180
|
+
super().__init__(*a, **kw)
|
|
181
|
+
if injected_client is not None:
|
|
182
|
+
for attr in ("client", "_client", "_xai_client"):
|
|
183
|
+
if hasattr(self, attr):
|
|
184
|
+
setattr(self, attr, injected_client)
|
|
185
|
+
break
|
|
186
|
+
|
|
187
|
+
return XAIEmbeddingWithProxy(**args, injected_client=xai_client)
|
|
128
188
|
|
|
129
189
|
def get_models(
|
|
130
190
|
self,
|
|
@@ -118,7 +118,7 @@ class Output:
|
|
|
118
118
|
|
|
119
119
|
nodes['inline.vision'] = HelpLabel(trans('inline.vision'))
|
|
120
120
|
nodes['inline.vision'].setVisible(False)
|
|
121
|
-
nodes['inline.vision'].setContentsMargins(
|
|
121
|
+
nodes['inline.vision'].setContentsMargins(3, 2, 0, 0)
|
|
122
122
|
|
|
123
123
|
opts_layout = QHBoxLayout()
|
|
124
124
|
opts_layout.setContentsMargins(0, 0, 0, 0)
|
pygpt_net/ui/widget/lists/db.py
CHANGED
pygpt_net/ui/widget/tabs/body.py
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.09.14 20:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Any
|
|
@@ -55,6 +55,17 @@ class TabBody(QTabWidget):
|
|
|
55
55
|
ref.deleteLater()
|
|
56
56
|
del self.refs[:]
|
|
57
57
|
|
|
58
|
+
def delete_ref(self, widget: Any) -> None:
|
|
59
|
+
"""
|
|
60
|
+
Unpin reference to widget in this tab
|
|
61
|
+
|
|
62
|
+
:param widget: widget reference
|
|
63
|
+
"""
|
|
64
|
+
for ref in self.refs:
|
|
65
|
+
if ref and ref is widget:
|
|
66
|
+
self.refs.remove(ref)
|
|
67
|
+
break
|
|
68
|
+
|
|
58
69
|
def append(self, body: QWidget):
|
|
59
70
|
"""
|
|
60
71
|
Append tab body (parent widget)
|
|
@@ -505,10 +505,10 @@ class Bridge(QObject):
|
|
|
505
505
|
super(Bridge, self).__init__(parent)
|
|
506
506
|
self.window = window
|
|
507
507
|
|
|
508
|
-
chunk = Signal(str, str) # name, chunk
|
|
509
|
-
node = Signal(str) #
|
|
510
|
-
nodeReplace = Signal(str) #
|
|
511
|
-
nodeInput = Signal(str) #
|
|
508
|
+
chunk = Signal(str, str, str) # name, chunk, type
|
|
509
|
+
node = Signal(str) # JSON payload
|
|
510
|
+
nodeReplace = Signal(str) # JSON payload
|
|
511
|
+
nodeInput = Signal(str) # raw text
|
|
512
512
|
readyChanged = Signal(bool)
|
|
513
513
|
|
|
514
514
|
@Slot(int)
|
pygpt_net/utils.py
CHANGED
|
@@ -285,9 +285,10 @@ def natsort(l: list) -> list:
|
|
|
285
285
|
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
|
|
286
286
|
return sorted(l, key=alphanum_key)
|
|
287
287
|
|
|
288
|
-
def mem_clean():
|
|
288
|
+
def mem_clean(force: bool = False) -> bool:
|
|
289
289
|
"""Clean memory by removing unused objects"""
|
|
290
|
-
|
|
290
|
+
if not force:
|
|
291
|
+
return False
|
|
291
292
|
import sys, gc
|
|
292
293
|
ok = False
|
|
293
294
|
try:
|