pygpt-net 2.5.6__py3-none-any.whl → 2.5.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- CHANGELOG.md +14 -0
- README.md +25 -3
- pygpt_net/CHANGELOG.txt +14 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/controller/chat/stream.py +6 -1
- pygpt_net/controller/chat/text.py +2 -2
- pygpt_net/controller/chat/vision.py +2 -0
- pygpt_net/controller/config/placeholder.py +3 -2
- pygpt_net/controller/lang/custom.py +3 -1
- pygpt_net/controller/notepad/__init__.py +2 -2
- pygpt_net/controller/presets/editor.py +8 -1
- pygpt_net/controller/theme/menu.py +5 -1
- pygpt_net/controller/ui/__init__.py +17 -7
- pygpt_net/core/agents/legacy.py +2 -0
- pygpt_net/core/bridge/__init__.py +10 -2
- pygpt_net/core/ctx/__init__.py +4 -1
- pygpt_net/core/debug/presets.py +3 -1
- pygpt_net/core/events/control.py +2 -1
- pygpt_net/core/experts/__init__.py +3 -1
- pygpt_net/core/models/__init__.py +6 -1
- pygpt_net/core/modes/__init__.py +3 -1
- pygpt_net/core/presets/__init__.py +5 -1
- pygpt_net/core/render/web/helpers.py +8 -1
- pygpt_net/core/tokens/__init__.py +5 -3
- pygpt_net/core/types/mode.py +3 -2
- pygpt_net/data/config/config.json +6 -4
- pygpt_net/data/config/models.json +424 -3
- pygpt_net/data/config/modes.json +9 -3
- pygpt_net/data/config/presets/current.research.json +35 -0
- pygpt_net/data/config/settings.json +19 -0
- pygpt_net/data/css/web-blocks.css +8 -0
- pygpt_net/data/css/web-chatgpt.css +8 -0
- pygpt_net/data/css/web-chatgpt_wide.css +8 -0
- pygpt_net/data/locale/locale.en.ini +6 -0
- pygpt_net/item/preset.py +5 -1
- pygpt_net/plugin/openai_dalle/__init__.py +3 -1
- pygpt_net/plugin/openai_vision/__init__.py +3 -1
- pygpt_net/provider/core/config/patch.py +18 -1
- pygpt_net/provider/core/model/patch.py +7 -1
- pygpt_net/provider/core/preset/json_file.py +4 -0
- pygpt_net/provider/gpt/__init__.py +30 -6
- pygpt_net/provider/gpt/chat.py +4 -7
- pygpt_net/ui/dialog/preset.py +3 -1
- pygpt_net/ui/layout/ctx/__init__.py +1 -5
- pygpt_net/ui/layout/ctx/ctx_list.py +6 -1
- {pygpt_net-2.5.6.dist-info → pygpt_net-2.5.8.dist-info}/METADATA +26 -4
- {pygpt_net-2.5.6.dist-info → pygpt_net-2.5.8.dist-info}/RECORD +50 -49
- {pygpt_net-2.5.6.dist-info → pygpt_net-2.5.8.dist-info}/LICENSE +0 -0
- {pygpt_net-2.5.6.dist-info → pygpt_net-2.5.8.dist-info}/WHEEL +0 -0
- {pygpt_net-2.5.6.dist-info → pygpt_net-2.5.8.dist-info}/entry_points.txt +0 -0
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.02
|
9
|
+
# Updated Date: 2025.03.02 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from pygpt_net.core.types import (
|
@@ -18,6 +18,7 @@ from pygpt_net.core.types import (
|
|
18
18
|
MODE_LANGCHAIN,
|
19
19
|
MODE_LLAMA_INDEX,
|
20
20
|
MODE_VISION,
|
21
|
+
MODE_RESEARCH,
|
21
22
|
)
|
22
23
|
from pygpt_net.plugin.base.plugin import BasePlugin
|
23
24
|
from pygpt_net.core.bridge.context import BridgeContext
|
@@ -46,6 +47,7 @@ class Plugin(BasePlugin):
|
|
46
47
|
MODE_ASSISTANT,
|
47
48
|
MODE_AGENT,
|
48
49
|
MODE_AUDIO,
|
50
|
+
MODE_RESEARCH,
|
49
51
|
]
|
50
52
|
self.allowed_cmds = [
|
51
53
|
"image",
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.03.02 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from pygpt_net.core.types import (
|
@@ -17,6 +17,7 @@ from pygpt_net.core.types import (
|
|
17
17
|
MODE_LLAMA_INDEX,
|
18
18
|
MODE_VISION,
|
19
19
|
MODE_CHAT,
|
20
|
+
MODE_RESEARCH,
|
20
21
|
)
|
21
22
|
from pygpt_net.plugin.base.plugin import BasePlugin
|
22
23
|
from pygpt_net.item.ctx import CtxItem
|
@@ -61,6 +62,7 @@ class Plugin(BasePlugin):
|
|
61
62
|
MODE_LLAMA_INDEX,
|
62
63
|
MODE_LANGCHAIN,
|
63
64
|
MODE_AUDIO,
|
65
|
+
MODE_RESEARCH,
|
64
66
|
]
|
65
67
|
self.worker = None
|
66
68
|
self.config = Config(self)
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.
|
9
|
+
# Updated Date: 2025.03.02 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import copy
|
@@ -1838,6 +1838,23 @@ class Patch:
|
|
1838
1838
|
data["api_key_deepseek"] = ""
|
1839
1839
|
updated = True
|
1840
1840
|
|
1841
|
+
# < 2.5.7
|
1842
|
+
if old < parse_version("2.5.7"):
|
1843
|
+
print("Migrating config from < 2.5.7...")
|
1844
|
+
self.window.core.updater.patch_css('web-blocks.css', True) # force update
|
1845
|
+
self.window.core.updater.patch_css('web-chatgpt.css', True) # force update
|
1846
|
+
self.window.core.updater.patch_css('web-chatgpt_wide.css', True) # force update
|
1847
|
+
updated = True
|
1848
|
+
|
1849
|
+
# < 2.5.8
|
1850
|
+
if old < parse_version("2.5.8"):
|
1851
|
+
print("Migrating config from < 2.5.8...")
|
1852
|
+
if 'api_key_perplexity' not in data:
|
1853
|
+
data["api_key_perplexity"] = ""
|
1854
|
+
if 'api_endpoint_perplexity' not in data:
|
1855
|
+
data["api_endpoint_perplexity"] = "https://api.perplexity.ai"
|
1856
|
+
updated = True
|
1857
|
+
|
1841
1858
|
# update file
|
1842
1859
|
migrated = False
|
1843
1860
|
if updated:
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.
|
9
|
+
# Updated Date: 2025.03.02 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from packaging.version import parse as parse_version, Version
|
@@ -503,6 +503,12 @@ class Patch:
|
|
503
503
|
model.tokens = 65536
|
504
504
|
updated = True
|
505
505
|
|
506
|
+
# < 2.5.8 <--- add gpt-4.5-preview and sonar models (Perplexity)
|
507
|
+
if old < parse_version("2.5.8"):
|
508
|
+
print("Migrating models from < 2.5.8...")
|
509
|
+
# add gpt-4.5-preview, sonar, R1
|
510
|
+
updated = True
|
511
|
+
|
506
512
|
# update file
|
507
513
|
if updated:
|
508
514
|
data = dict(sorted(data.items()))
|
@@ -28,6 +28,7 @@ from pygpt_net.core.types import (
|
|
28
28
|
MODE_LANGCHAIN,
|
29
29
|
MODE_LLAMA_INDEX,
|
30
30
|
MODE_VISION,
|
31
|
+
MODE_RESEARCH,
|
31
32
|
)
|
32
33
|
from pygpt_net.provider.core.preset.base import BaseProvider
|
33
34
|
from pygpt_net.item.preset import PresetItem
|
@@ -195,6 +196,7 @@ class JsonFileProvider(BaseProvider):
|
|
195
196
|
MODE_AGENT_LLAMA: item.agent_llama,
|
196
197
|
MODE_EXPERT: item.expert,
|
197
198
|
MODE_AUDIO: item.audio,
|
199
|
+
MODE_RESEARCH: item.research,
|
198
200
|
'temperature': item.temperature,
|
199
201
|
'filename': item.filename,
|
200
202
|
'model': item.model,
|
@@ -236,6 +238,8 @@ class JsonFileProvider(BaseProvider):
|
|
236
238
|
item.expert = data[MODE_EXPERT]
|
237
239
|
if MODE_AUDIO in data:
|
238
240
|
item.audio = data[MODE_AUDIO]
|
241
|
+
if MODE_RESEARCH in data:
|
242
|
+
item.research = data[MODE_RESEARCH]
|
239
243
|
|
240
244
|
if 'uuid' in data:
|
241
245
|
item.uuid = data['uuid']
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.
|
9
|
+
# Updated Date: 2025.03.02 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from httpx_socks import SyncProxyTransport
|
@@ -20,6 +20,7 @@ from pygpt_net.core.types import (
|
|
20
20
|
MODE_COMPLETION,
|
21
21
|
MODE_IMAGE,
|
22
22
|
MODE_VISION,
|
23
|
+
MODE_RESEARCH,
|
23
24
|
)
|
24
25
|
from pygpt_net.core.bridge.context import BridgeContext
|
25
26
|
|
@@ -50,10 +51,11 @@ class Gpt:
|
|
50
51
|
self.summarizer = Summarizer(window)
|
51
52
|
self.vision = Vision(window)
|
52
53
|
|
53
|
-
def get_client(self) -> OpenAI:
|
54
|
+
def get_client(self, mode: str = MODE_CHAT) -> OpenAI:
|
54
55
|
"""
|
55
56
|
Return OpenAI client
|
56
57
|
|
58
|
+
:param mode: Mode
|
57
59
|
:return: OpenAI client
|
58
60
|
"""
|
59
61
|
args = {
|
@@ -73,6 +75,16 @@ class Gpt:
|
|
73
75
|
args["http_client"] = DefaultHttpxClient(
|
74
76
|
transport=transport,
|
75
77
|
)
|
78
|
+
|
79
|
+
# research mode endpoint - Perplexity
|
80
|
+
if mode == MODE_RESEARCH:
|
81
|
+
if self.window.core.config.has('api_key_perplexity'):
|
82
|
+
args["api_key"] = self.window.core.config.get('api_key_perplexity')
|
83
|
+
if self.window.core.config.has('api_endpoint_perplexity'):
|
84
|
+
endpoint = self.window.core.config.get('api_endpoint_perplexity')
|
85
|
+
if endpoint:
|
86
|
+
args["base_url"] = endpoint
|
87
|
+
|
76
88
|
return OpenAI(**args)
|
77
89
|
|
78
90
|
def call(self, context: BridgeContext, extra: dict = None) -> bool:
|
@@ -116,12 +128,19 @@ class Gpt:
|
|
116
128
|
)
|
117
129
|
used_tokens = self.completion.get_used_tokens()
|
118
130
|
|
119
|
-
# chat
|
120
|
-
elif mode in [
|
131
|
+
# chat (OpenAI) | research (Perplexity)
|
132
|
+
elif mode in [
|
133
|
+
MODE_CHAT,
|
134
|
+
MODE_AUDIO,
|
135
|
+
MODE_RESEARCH
|
136
|
+
]:
|
121
137
|
response = self.chat.send(
|
122
138
|
context=context,
|
123
139
|
extra=extra,
|
124
140
|
)
|
141
|
+
if hasattr(response, "citations"):
|
142
|
+
if response.citations:
|
143
|
+
ctx.urls = response.citations
|
125
144
|
used_tokens = self.chat.get_used_tokens()
|
126
145
|
self.vision.append_images(ctx) # append images to ctx if provided
|
127
146
|
|
@@ -182,7 +201,11 @@ class Gpt:
|
|
182
201
|
output = ""
|
183
202
|
if mode == MODE_COMPLETION:
|
184
203
|
output = response.choices[0].text.strip()
|
185
|
-
elif mode in [
|
204
|
+
elif mode in [
|
205
|
+
MODE_CHAT,
|
206
|
+
MODE_VISION,
|
207
|
+
MODE_RESEARCH
|
208
|
+
]:
|
186
209
|
if response.choices[0]:
|
187
210
|
if response.choices[0].message.content:
|
188
211
|
output = response.choices[0].message.content.strip()
|
@@ -225,6 +248,7 @@ class Gpt:
|
|
225
248
|
:param extra: Extra arguments
|
226
249
|
:return: response content
|
227
250
|
"""
|
251
|
+
mode = context.mode
|
228
252
|
prompt = context.prompt
|
229
253
|
system_prompt = context.system_prompt
|
230
254
|
max_tokens = context.max_tokens
|
@@ -233,7 +257,7 @@ class Gpt:
|
|
233
257
|
if model is None:
|
234
258
|
model = self.window.core.models.from_defaults()
|
235
259
|
|
236
|
-
client = self.get_client()
|
260
|
+
client = self.get_client(mode)
|
237
261
|
messages = []
|
238
262
|
messages.append({"role": "system", "content": system_prompt})
|
239
263
|
messages.append({"role": "user", "content": prompt})
|
pygpt_net/provider/gpt/chat.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.02.
|
9
|
+
# Updated Date: 2025.02.26 23:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import json
|
@@ -17,13 +17,14 @@ from pygpt_net.core.types import (
|
|
17
17
|
MODE_CHAT,
|
18
18
|
MODE_VISION,
|
19
19
|
MODE_AUDIO,
|
20
|
+
MODE_RESEARCH,
|
20
21
|
)
|
21
22
|
from pygpt_net.core.bridge.context import BridgeContext, MultimodalContext
|
22
23
|
from pygpt_net.item.ctx import CtxItem
|
23
24
|
from pygpt_net.item.model import ModelItem
|
24
25
|
|
25
26
|
from .utils import sanitize_name
|
26
|
-
from
|
27
|
+
from pygpt_net.item.attachment import AttachmentItem
|
27
28
|
|
28
29
|
|
29
30
|
class Chat:
|
@@ -66,7 +67,7 @@ class Chat:
|
|
66
67
|
user_name = ctx.input_name # from ctx
|
67
68
|
ai_name = ctx.output_name # from ctx
|
68
69
|
|
69
|
-
client = self.window.core.gpt.get_client()
|
70
|
+
client = self.window.core.gpt.get_client(mode)
|
70
71
|
|
71
72
|
# build chat messages
|
72
73
|
messages = self.build(
|
@@ -134,10 +135,6 @@ class Chat:
|
|
134
135
|
else:
|
135
136
|
response_kwargs['max_completion_tokens'] = max_tokens
|
136
137
|
|
137
|
-
# o1 models do not support streaming
|
138
|
-
if model.id is not None and model.id.startswith("o1"):
|
139
|
-
stream = False
|
140
|
-
|
141
138
|
# audio mode
|
142
139
|
if mode in [MODE_AUDIO]:
|
143
140
|
stream = False
|
pygpt_net/ui/dialog/preset.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.03.02 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from PySide6.QtCore import Qt
|
@@ -23,6 +23,7 @@ from pygpt_net.core.types import (
|
|
23
23
|
MODE_LANGCHAIN,
|
24
24
|
MODE_LLAMA_INDEX,
|
25
25
|
MODE_VISION,
|
26
|
+
MODE_RESEARCH,
|
26
27
|
)
|
27
28
|
from pygpt_net.ui.base.config_dialog import BaseConfigDialog
|
28
29
|
from pygpt_net.ui.widget.dialog.editor import EditorDialog
|
@@ -111,6 +112,7 @@ class Preset(BaseConfigDialog):
|
|
111
112
|
MODE_AGENT_LLAMA,
|
112
113
|
MODE_AGENT,
|
113
114
|
MODE_EXPERT,
|
115
|
+
MODE_RESEARCH,
|
114
116
|
]
|
115
117
|
rows_mode = QVBoxLayout()
|
116
118
|
rows_mode.addStretch()
|
@@ -6,13 +6,12 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.02.26 23:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from PySide6.QtCore import Qt
|
13
13
|
from PySide6.QtWidgets import QVBoxLayout, QWidget
|
14
14
|
|
15
|
-
from pygpt_net.ui.layout.ctx.search_input import SearchInput
|
16
15
|
from pygpt_net.ui.layout.ctx.ctx_list import CtxList
|
17
16
|
from pygpt_net.ui.layout.ctx.video import Video
|
18
17
|
from pygpt_net.ui.widget.element.labels import HelpLabel
|
@@ -27,7 +26,6 @@ class CtxMain:
|
|
27
26
|
:param window: Window instance
|
28
27
|
"""
|
29
28
|
self.window = window
|
30
|
-
self.search_input = SearchInput(window)
|
31
29
|
self.ctx_list = CtxList(window)
|
32
30
|
self.video = Video(window)
|
33
31
|
|
@@ -39,7 +37,6 @@ class CtxMain:
|
|
39
37
|
"""
|
40
38
|
ctx = self.ctx_list.setup()
|
41
39
|
video = self.video.setup()
|
42
|
-
search_input = self.search_input.setup()
|
43
40
|
|
44
41
|
self.window.ui.nodes['tip.toolbox.ctx'] = HelpLabel(trans('tip.toolbox.ctx'), self.window)
|
45
42
|
self.window.ui.nodes['tip.toolbox.ctx'].setAlignment(Qt.AlignCenter)
|
@@ -48,7 +45,6 @@ class CtxMain:
|
|
48
45
|
layout.addWidget(ctx)
|
49
46
|
|
50
47
|
layout.addWidget(self.window.ui.nodes['tip.toolbox.ctx'])
|
51
|
-
layout.addWidget(search_input)
|
52
48
|
layout.addWidget(video)
|
53
49
|
layout.setContentsMargins(5, 5, 2, 5)
|
54
50
|
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.02.26 23:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from PySide6 import QtCore
|
@@ -15,6 +15,7 @@ from PySide6.QtWidgets import QVBoxLayout, QWidget
|
|
15
15
|
from datetime import datetime, timedelta
|
16
16
|
|
17
17
|
from pygpt_net.item.ctx import CtxMeta
|
18
|
+
from pygpt_net.ui.layout.ctx.search_input import SearchInput
|
18
19
|
from pygpt_net.ui.widget.element.button import NewCtxButton
|
19
20
|
from pygpt_net.ui.widget.element.labels import TitleLabel
|
20
21
|
from pygpt_net.ui.widget.lists.context import ContextList, Item, GroupItem, SectionItem
|
@@ -30,6 +31,7 @@ class CtxList:
|
|
30
31
|
:param window: Window instance
|
31
32
|
"""
|
32
33
|
self.window = window
|
34
|
+
self.search_input = SearchInput(window)
|
33
35
|
|
34
36
|
def setup(self) -> QWidget:
|
35
37
|
"""
|
@@ -42,9 +44,12 @@ class CtxList:
|
|
42
44
|
self.window.ui.nodes[id] = ContextList(self.window, id)
|
43
45
|
self.window.ui.nodes[id].selection_locked = self.window.controller.ctx.context_change_locked
|
44
46
|
self.window.ui.nodes['ctx.label'] = TitleLabel(trans("ctx.list.label"))
|
47
|
+
self.window.ui.nodes['ctx.new'].setContentsMargins(0,0,0,0)
|
48
|
+
search_input = self.search_input.setup()
|
45
49
|
|
46
50
|
layout = QVBoxLayout()
|
47
51
|
layout.addWidget(self.window.ui.nodes['ctx.new'])
|
52
|
+
layout.addWidget(search_input)
|
48
53
|
layout.addWidget(self.window.ui.nodes[id])
|
49
54
|
layout.setContentsMargins(0, 0, 0, 0)
|
50
55
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pygpt-net
|
3
|
-
Version: 2.5.
|
3
|
+
Version: 2.5.8
|
4
4
|
Summary: Desktop AI Assistant powered by models: OpenAI o1, GPT-4o, GPT-4, GPT-4 Vision, GPT-3.5, DALL-E 3, Llama 3, Mistral, Gemini, Claude, DeepSeek, Bielik, and other models supported by Langchain, Llama Index, and Ollama. Features include chatbot, text completion, image generation, vision analysis, speech-to-text, internet access, file handling, command execution and more.
|
5
5
|
Home-page: https://pygpt.net
|
6
6
|
License: MIT
|
@@ -94,7 +94,7 @@ Description-Content-Type: text/markdown
|
|
94
94
|
|
95
95
|
[](https://snapcraft.io/pygpt)
|
96
96
|
|
97
|
-
Release: **2.5.
|
97
|
+
Release: **2.5.8** | build: **2025.03.02** | Python: **>=3.10, <3.13**
|
98
98
|
|
99
99
|
> Official website: https://pygpt.net | Documentation: https://pygpt.readthedocs.io
|
100
100
|
>
|
@@ -130,7 +130,7 @@ You can download compiled 64-bit versions for Windows and Linux here: https://py
|
|
130
130
|
|
131
131
|
- Desktop AI Assistant for `Linux`, `Windows` and `Mac`, written in Python.
|
132
132
|
- Works similarly to `ChatGPT`, but locally (on a desktop computer).
|
133
|
-
-
|
133
|
+
- 12 modes of operation: Chat, Vision, Completion, Assistant, Image generation, LangChain, Chat with Files, Chat with Audio, Research (Perplexity), Experts, Autonomous Mode and Agents.
|
134
134
|
- Supports multiple models: `o1`, `GPT-4o`, `GPT-4`, `GPT-3.5`, and any model accessible through `LangChain`, `LlamaIndex` and `Ollama` such as `Llama 3`, `Mistral`, `Google Gemini`, `Anthropic Claude`, `DeepSeek V3/R1`, `Bielik`, etc.
|
135
135
|
- Chat with your own Files: integrated `LlamaIndex` support: chat with data such as: `txt`, `pdf`, `csv`, `html`, `md`, `docx`, `json`, `epub`, `xlsx`, `xml`, webpages, `Google`, `GitHub`, video/audio, images and other data types, or use conversation history as additional context provided to the model.
|
136
136
|
- Built-in vector databases support and automated files and data embedding.
|
@@ -490,7 +490,15 @@ More info: https://platform.openai.com/docs/guides/audio/quickstart
|
|
490
490
|
|
491
491
|
Currently, in beta. Tool and function calls are not enabled in this mode.
|
492
492
|
|
493
|
-
|
493
|
+
## Research (Perplexity)
|
494
|
+
|
495
|
+
2025-03-02: currently in beta.
|
496
|
+
|
497
|
+
Mode operates using the Perplexity API: https://perplexity.ai.
|
498
|
+
|
499
|
+
It allows for deep web searching and utilizes Sonar models, available in `Perplexity AI`.
|
500
|
+
|
501
|
+
It requires a Perplexity API key, which can be generated at: https://perplexity.ai.
|
494
502
|
|
495
503
|
## Completion
|
496
504
|
|
@@ -4056,6 +4064,20 @@ may consume additional tokens that are not displayed in the main window.
|
|
4056
4064
|
|
4057
4065
|
## Recent changes:
|
4058
4066
|
|
4067
|
+
**2.5.8 (2025-03-02)**
|
4068
|
+
|
4069
|
+
- Added a new mode: Research (Perplexity) powered by: https://perplexity.ai - beta.
|
4070
|
+
- Added Perplexity models: sonar, sonar-pro, sonar-deep-research, sonar-reasoning, sonar-reasoning-pro, r1-1776.
|
4071
|
+
- Added a new OpenAI model: gpt-4.5-preview.
|
4072
|
+
|
4073
|
+
**2.5.7 (2025-02-26)**
|
4074
|
+
|
4075
|
+
- Stream mode has been enabled in o1 models.
|
4076
|
+
- CSS styling for <think> tags (reasoning models) has been added.
|
4077
|
+
- The search input has been moved to the top.
|
4078
|
+
- The ChatGPT-based style is now set as default.
|
4079
|
+
- Fix: Display of max tokens in models with a context window greater than 128k.
|
4080
|
+
|
4059
4081
|
**2.5.6 (2025-02-03)**
|
4060
4082
|
|
4061
4083
|
- Fix: disabled index initialization if embedding provider is OpenAI and no API KEY is provided.
|