pygpt-net 2.5.7__py3-none-any.whl → 2.5.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- CHANGELOG.md +13 -1
- README.md +24 -68
- pygpt_net/CHANGELOG.txt +13 -1
- pygpt_net/__init__.py +3 -3
- pygpt_net/controller/chat/stream.py +6 -1
- pygpt_net/controller/chat/vision.py +2 -0
- pygpt_net/controller/lang/custom.py +3 -1
- pygpt_net/controller/notepad/__init__.py +6 -2
- pygpt_net/controller/presets/editor.py +8 -1
- pygpt_net/core/agents/legacy.py +2 -0
- pygpt_net/core/bridge/__init__.py +10 -2
- pygpt_net/core/ctx/__init__.py +4 -1
- pygpt_net/core/debug/presets.py +3 -1
- pygpt_net/core/events/control.py +2 -1
- pygpt_net/core/experts/__init__.py +3 -1
- pygpt_net/core/models/__init__.py +6 -1
- pygpt_net/core/modes/__init__.py +3 -1
- pygpt_net/core/presets/__init__.py +5 -1
- pygpt_net/core/render/web/helpers.py +12 -7
- pygpt_net/core/render/web/parser.py +7 -5
- pygpt_net/core/tokens/__init__.py +4 -2
- pygpt_net/core/types/mode.py +3 -2
- pygpt_net/data/config/config.json +5 -3
- pygpt_net/data/config/models.json +424 -3
- pygpt_net/data/config/modes.json +9 -3
- pygpt_net/data/config/presets/current.research.json +35 -0
- pygpt_net/data/config/settings.json +19 -0
- pygpt_net/data/locale/locale.en.ini +6 -0
- pygpt_net/item/preset.py +5 -1
- pygpt_net/plugin/openai_dalle/__init__.py +3 -1
- pygpt_net/plugin/openai_vision/__init__.py +3 -1
- pygpt_net/provider/core/config/patch.py +10 -1
- pygpt_net/provider/core/model/patch.py +7 -1
- pygpt_net/provider/core/preset/json_file.py +4 -0
- pygpt_net/provider/gpt/__init__.py +30 -6
- pygpt_net/provider/gpt/chat.py +3 -6
- pygpt_net/ui/dialog/preset.py +3 -1
- {pygpt_net-2.5.7.dist-info → pygpt_net-2.5.9.dist-info}/METADATA +25 -69
- {pygpt_net-2.5.7.dist-info → pygpt_net-2.5.9.dist-info}/RECORD +42 -41
- {pygpt_net-2.5.7.dist-info → pygpt_net-2.5.9.dist-info}/LICENSE +0 -0
- {pygpt_net-2.5.7.dist-info → pygpt_net-2.5.9.dist-info}/WHEEL +0 -0
- {pygpt_net-2.5.7.dist-info → pygpt_net-2.5.9.dist-info}/entry_points.txt +0 -0
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.02
|
9
|
+
# Updated Date: 2025.03.02 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from pygpt_net.core.types import (
|
@@ -18,6 +18,7 @@ from pygpt_net.core.types import (
|
|
18
18
|
MODE_LANGCHAIN,
|
19
19
|
MODE_LLAMA_INDEX,
|
20
20
|
MODE_VISION,
|
21
|
+
MODE_RESEARCH,
|
21
22
|
)
|
22
23
|
from pygpt_net.plugin.base.plugin import BasePlugin
|
23
24
|
from pygpt_net.core.bridge.context import BridgeContext
|
@@ -46,6 +47,7 @@ class Plugin(BasePlugin):
|
|
46
47
|
MODE_ASSISTANT,
|
47
48
|
MODE_AGENT,
|
48
49
|
MODE_AUDIO,
|
50
|
+
MODE_RESEARCH,
|
49
51
|
]
|
50
52
|
self.allowed_cmds = [
|
51
53
|
"image",
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.03.02 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from pygpt_net.core.types import (
|
@@ -17,6 +17,7 @@ from pygpt_net.core.types import (
|
|
17
17
|
MODE_LLAMA_INDEX,
|
18
18
|
MODE_VISION,
|
19
19
|
MODE_CHAT,
|
20
|
+
MODE_RESEARCH,
|
20
21
|
)
|
21
22
|
from pygpt_net.plugin.base.plugin import BasePlugin
|
22
23
|
from pygpt_net.item.ctx import CtxItem
|
@@ -61,6 +62,7 @@ class Plugin(BasePlugin):
|
|
61
62
|
MODE_LLAMA_INDEX,
|
62
63
|
MODE_LANGCHAIN,
|
63
64
|
MODE_AUDIO,
|
65
|
+
MODE_RESEARCH,
|
64
66
|
]
|
65
67
|
self.worker = None
|
66
68
|
self.config = Config(self)
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.02
|
9
|
+
# Updated Date: 2025.03.02 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import copy
|
@@ -1846,6 +1846,15 @@ class Patch:
|
|
1846
1846
|
self.window.core.updater.patch_css('web-chatgpt_wide.css', True) # force update
|
1847
1847
|
updated = True
|
1848
1848
|
|
1849
|
+
# < 2.5.8
|
1850
|
+
if old < parse_version("2.5.8"):
|
1851
|
+
print("Migrating config from < 2.5.8...")
|
1852
|
+
if 'api_key_perplexity' not in data:
|
1853
|
+
data["api_key_perplexity"] = ""
|
1854
|
+
if 'api_endpoint_perplexity' not in data:
|
1855
|
+
data["api_endpoint_perplexity"] = "https://api.perplexity.ai"
|
1856
|
+
updated = True
|
1857
|
+
|
1849
1858
|
# update file
|
1850
1859
|
migrated = False
|
1851
1860
|
if updated:
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.
|
9
|
+
# Updated Date: 2025.03.02 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from packaging.version import parse as parse_version, Version
|
@@ -503,6 +503,12 @@ class Patch:
|
|
503
503
|
model.tokens = 65536
|
504
504
|
updated = True
|
505
505
|
|
506
|
+
# < 2.5.8 <--- add gpt-4.5-preview and sonar models (Perplexity)
|
507
|
+
if old < parse_version("2.5.8"):
|
508
|
+
print("Migrating models from < 2.5.8...")
|
509
|
+
# add gpt-4.5-preview, sonar, R1
|
510
|
+
updated = True
|
511
|
+
|
506
512
|
# update file
|
507
513
|
if updated:
|
508
514
|
data = dict(sorted(data.items()))
|
@@ -28,6 +28,7 @@ from pygpt_net.core.types import (
|
|
28
28
|
MODE_LANGCHAIN,
|
29
29
|
MODE_LLAMA_INDEX,
|
30
30
|
MODE_VISION,
|
31
|
+
MODE_RESEARCH,
|
31
32
|
)
|
32
33
|
from pygpt_net.provider.core.preset.base import BaseProvider
|
33
34
|
from pygpt_net.item.preset import PresetItem
|
@@ -195,6 +196,7 @@ class JsonFileProvider(BaseProvider):
|
|
195
196
|
MODE_AGENT_LLAMA: item.agent_llama,
|
196
197
|
MODE_EXPERT: item.expert,
|
197
198
|
MODE_AUDIO: item.audio,
|
199
|
+
MODE_RESEARCH: item.research,
|
198
200
|
'temperature': item.temperature,
|
199
201
|
'filename': item.filename,
|
200
202
|
'model': item.model,
|
@@ -236,6 +238,8 @@ class JsonFileProvider(BaseProvider):
|
|
236
238
|
item.expert = data[MODE_EXPERT]
|
237
239
|
if MODE_AUDIO in data:
|
238
240
|
item.audio = data[MODE_AUDIO]
|
241
|
+
if MODE_RESEARCH in data:
|
242
|
+
item.research = data[MODE_RESEARCH]
|
239
243
|
|
240
244
|
if 'uuid' in data:
|
241
245
|
item.uuid = data['uuid']
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.
|
9
|
+
# Updated Date: 2025.03.02 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from httpx_socks import SyncProxyTransport
|
@@ -20,6 +20,7 @@ from pygpt_net.core.types import (
|
|
20
20
|
MODE_COMPLETION,
|
21
21
|
MODE_IMAGE,
|
22
22
|
MODE_VISION,
|
23
|
+
MODE_RESEARCH,
|
23
24
|
)
|
24
25
|
from pygpt_net.core.bridge.context import BridgeContext
|
25
26
|
|
@@ -50,10 +51,11 @@ class Gpt:
|
|
50
51
|
self.summarizer = Summarizer(window)
|
51
52
|
self.vision = Vision(window)
|
52
53
|
|
53
|
-
def get_client(self) -> OpenAI:
|
54
|
+
def get_client(self, mode: str = MODE_CHAT) -> OpenAI:
|
54
55
|
"""
|
55
56
|
Return OpenAI client
|
56
57
|
|
58
|
+
:param mode: Mode
|
57
59
|
:return: OpenAI client
|
58
60
|
"""
|
59
61
|
args = {
|
@@ -73,6 +75,16 @@ class Gpt:
|
|
73
75
|
args["http_client"] = DefaultHttpxClient(
|
74
76
|
transport=transport,
|
75
77
|
)
|
78
|
+
|
79
|
+
# research mode endpoint - Perplexity
|
80
|
+
if mode == MODE_RESEARCH:
|
81
|
+
if self.window.core.config.has('api_key_perplexity'):
|
82
|
+
args["api_key"] = self.window.core.config.get('api_key_perplexity')
|
83
|
+
if self.window.core.config.has('api_endpoint_perplexity'):
|
84
|
+
endpoint = self.window.core.config.get('api_endpoint_perplexity')
|
85
|
+
if endpoint:
|
86
|
+
args["base_url"] = endpoint
|
87
|
+
|
76
88
|
return OpenAI(**args)
|
77
89
|
|
78
90
|
def call(self, context: BridgeContext, extra: dict = None) -> bool:
|
@@ -116,12 +128,19 @@ class Gpt:
|
|
116
128
|
)
|
117
129
|
used_tokens = self.completion.get_used_tokens()
|
118
130
|
|
119
|
-
# chat
|
120
|
-
elif mode in [
|
131
|
+
# chat (OpenAI) | research (Perplexity)
|
132
|
+
elif mode in [
|
133
|
+
MODE_CHAT,
|
134
|
+
MODE_AUDIO,
|
135
|
+
MODE_RESEARCH
|
136
|
+
]:
|
121
137
|
response = self.chat.send(
|
122
138
|
context=context,
|
123
139
|
extra=extra,
|
124
140
|
)
|
141
|
+
if hasattr(response, "citations"):
|
142
|
+
if response.citations:
|
143
|
+
ctx.urls = response.citations
|
125
144
|
used_tokens = self.chat.get_used_tokens()
|
126
145
|
self.vision.append_images(ctx) # append images to ctx if provided
|
127
146
|
|
@@ -182,7 +201,11 @@ class Gpt:
|
|
182
201
|
output = ""
|
183
202
|
if mode == MODE_COMPLETION:
|
184
203
|
output = response.choices[0].text.strip()
|
185
|
-
elif mode in [
|
204
|
+
elif mode in [
|
205
|
+
MODE_CHAT,
|
206
|
+
MODE_VISION,
|
207
|
+
MODE_RESEARCH
|
208
|
+
]:
|
186
209
|
if response.choices[0]:
|
187
210
|
if response.choices[0].message.content:
|
188
211
|
output = response.choices[0].message.content.strip()
|
@@ -225,6 +248,7 @@ class Gpt:
|
|
225
248
|
:param extra: Extra arguments
|
226
249
|
:return: response content
|
227
250
|
"""
|
251
|
+
mode = context.mode
|
228
252
|
prompt = context.prompt
|
229
253
|
system_prompt = context.system_prompt
|
230
254
|
max_tokens = context.max_tokens
|
@@ -233,7 +257,7 @@ class Gpt:
|
|
233
257
|
if model is None:
|
234
258
|
model = self.window.core.models.from_defaults()
|
235
259
|
|
236
|
-
client = self.get_client()
|
260
|
+
client = self.get_client(mode)
|
237
261
|
messages = []
|
238
262
|
messages.append({"role": "system", "content": system_prompt})
|
239
263
|
messages.append({"role": "user", "content": prompt})
|
pygpt_net/provider/gpt/chat.py
CHANGED
@@ -17,13 +17,14 @@ from pygpt_net.core.types import (
|
|
17
17
|
MODE_CHAT,
|
18
18
|
MODE_VISION,
|
19
19
|
MODE_AUDIO,
|
20
|
+
MODE_RESEARCH,
|
20
21
|
)
|
21
22
|
from pygpt_net.core.bridge.context import BridgeContext, MultimodalContext
|
22
23
|
from pygpt_net.item.ctx import CtxItem
|
23
24
|
from pygpt_net.item.model import ModelItem
|
24
25
|
|
25
26
|
from .utils import sanitize_name
|
26
|
-
from
|
27
|
+
from pygpt_net.item.attachment import AttachmentItem
|
27
28
|
|
28
29
|
|
29
30
|
class Chat:
|
@@ -66,7 +67,7 @@ class Chat:
|
|
66
67
|
user_name = ctx.input_name # from ctx
|
67
68
|
ai_name = ctx.output_name # from ctx
|
68
69
|
|
69
|
-
client = self.window.core.gpt.get_client()
|
70
|
+
client = self.window.core.gpt.get_client(mode)
|
70
71
|
|
71
72
|
# build chat messages
|
72
73
|
messages = self.build(
|
@@ -134,10 +135,6 @@ class Chat:
|
|
134
135
|
else:
|
135
136
|
response_kwargs['max_completion_tokens'] = max_tokens
|
136
137
|
|
137
|
-
# o1 models do not support streaming
|
138
|
-
if model.id is not None and model.id.startswith("o1-preview"):
|
139
|
-
streams = False
|
140
|
-
|
141
138
|
# audio mode
|
142
139
|
if mode in [MODE_AUDIO]:
|
143
140
|
stream = False
|
pygpt_net/ui/dialog/preset.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.03.02 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from PySide6.QtCore import Qt
|
@@ -23,6 +23,7 @@ from pygpt_net.core.types import (
|
|
23
23
|
MODE_LANGCHAIN,
|
24
24
|
MODE_LLAMA_INDEX,
|
25
25
|
MODE_VISION,
|
26
|
+
MODE_RESEARCH,
|
26
27
|
)
|
27
28
|
from pygpt_net.ui.base.config_dialog import BaseConfigDialog
|
28
29
|
from pygpt_net.ui.widget.dialog.editor import EditorDialog
|
@@ -111,6 +112,7 @@ class Preset(BaseConfigDialog):
|
|
111
112
|
MODE_AGENT_LLAMA,
|
112
113
|
MODE_AGENT,
|
113
114
|
MODE_EXPERT,
|
115
|
+
MODE_RESEARCH,
|
114
116
|
]
|
115
117
|
rows_mode = QVBoxLayout()
|
116
118
|
rows_mode.addStretch()
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pygpt-net
|
3
|
-
Version: 2.5.
|
3
|
+
Version: 2.5.9
|
4
4
|
Summary: Desktop AI Assistant powered by models: OpenAI o1, GPT-4o, GPT-4, GPT-4 Vision, GPT-3.5, DALL-E 3, Llama 3, Mistral, Gemini, Claude, DeepSeek, Bielik, and other models supported by Langchain, Llama Index, and Ollama. Features include chatbot, text completion, image generation, vision analysis, speech-to-text, internet access, file handling, command execution and more.
|
5
5
|
Home-page: https://pygpt.net
|
6
6
|
License: MIT
|
@@ -94,7 +94,7 @@ Description-Content-Type: text/markdown
|
|
94
94
|
|
95
95
|
[](https://snapcraft.io/pygpt)
|
96
96
|
|
97
|
-
Release: **2.5.
|
97
|
+
Release: **2.5.9** | build: **2025.03.05** | Python: **>=3.10, <3.13**
|
98
98
|
|
99
99
|
> Official website: https://pygpt.net | Documentation: https://pygpt.readthedocs.io
|
100
100
|
>
|
@@ -130,7 +130,7 @@ You can download compiled 64-bit versions for Windows and Linux here: https://py
|
|
130
130
|
|
131
131
|
- Desktop AI Assistant for `Linux`, `Windows` and `Mac`, written in Python.
|
132
132
|
- Works similarly to `ChatGPT`, but locally (on a desktop computer).
|
133
|
-
-
|
133
|
+
- 12 modes of operation: Chat, Vision, Completion, Assistant, Image generation, LangChain, Chat with Files, Chat with Audio, Research (Perplexity), Experts, Autonomous Mode and Agents.
|
134
134
|
- Supports multiple models: `o1`, `GPT-4o`, `GPT-4`, `GPT-3.5`, and any model accessible through `LangChain`, `LlamaIndex` and `Ollama` such as `Llama 3`, `Mistral`, `Google Gemini`, `Anthropic Claude`, `DeepSeek V3/R1`, `Bielik`, etc.
|
135
135
|
- Chat with your own Files: integrated `LlamaIndex` support: chat with data such as: `txt`, `pdf`, `csv`, `html`, `md`, `docx`, `json`, `epub`, `xlsx`, `xml`, webpages, `Google`, `GitHub`, video/audio, images and other data types, or use conversation history as additional context provided to the model.
|
136
136
|
- Built-in vector databases support and automated files and data embedding.
|
@@ -490,7 +490,15 @@ More info: https://platform.openai.com/docs/guides/audio/quickstart
|
|
490
490
|
|
491
491
|
Currently, in beta. Tool and function calls are not enabled in this mode.
|
492
492
|
|
493
|
-
|
493
|
+
## Research (Perplexity)
|
494
|
+
|
495
|
+
2025-03-02: currently in beta.
|
496
|
+
|
497
|
+
Mode operates using the Perplexity API: https://perplexity.ai.
|
498
|
+
|
499
|
+
It allows for deep web searching and utilizes Sonar models, available in `Perplexity AI`.
|
500
|
+
|
501
|
+
It requires a Perplexity API key, which can be generated at: https://perplexity.ai.
|
494
502
|
|
495
503
|
## Completion
|
496
504
|
|
@@ -4056,9 +4064,21 @@ may consume additional tokens that are not displayed in the main window.
|
|
4056
4064
|
|
4057
4065
|
## Recent changes:
|
4058
4066
|
|
4067
|
+
**2.5.9 (2025-03-05)**
|
4068
|
+
|
4069
|
+
- Improved formatting of HTML code in the output.
|
4070
|
+
- Disabled automatic indentation parsing as code blocks.
|
4071
|
+
- Disabled automatic scrolling of the notepad when opening a tab.
|
4072
|
+
|
4073
|
+
**2.5.8 (2025-03-02)**
|
4074
|
+
|
4075
|
+
- Added a new mode: Research (Perplexity) powered by: https://perplexity.ai - beta.
|
4076
|
+
- Added Perplexity models: sonar, sonar-pro, sonar-deep-research, sonar-reasoning, sonar-reasoning-pro, r1-1776.
|
4077
|
+
- Added a new OpenAI model: gpt-4.5-preview.
|
4078
|
+
|
4059
4079
|
**2.5.7 (2025-02-26)**
|
4060
4080
|
|
4061
|
-
-
|
4081
|
+
- Stream mode has been enabled in o1 models.
|
4062
4082
|
- CSS styling for <think> tags (reasoning models) has been added.
|
4063
4083
|
- The search input has been moved to the top.
|
4064
4084
|
- The ChatGPT-based style is now set as default.
|
@@ -4107,70 +4127,6 @@ may consume additional tokens that are not displayed in the main window.
|
|
4107
4127
|
- Fix: error handling in stream mode.
|
4108
4128
|
- Fix: added check for active plugin tools before tool call.
|
4109
4129
|
|
4110
|
-
**2.4.57 (2025-01-19)**
|
4111
|
-
|
4112
|
-
- Logging fix.
|
4113
|
-
|
4114
|
-
**2.4.56 (2025-01-19)**
|
4115
|
-
|
4116
|
-
- Improved tab switching and focus change.
|
4117
|
-
- Improved global keyboard shortcuts handling.
|
4118
|
-
|
4119
|
-
**2.4.55 (2025-01-18)**
|
4120
|
-
|
4121
|
-
- Added a new option in settings: Audio -> Recording timeout.
|
4122
|
-
- Added a new option in settings: Audio -> Enable timeout in continuous mode.
|
4123
|
-
|
4124
|
-
**2.4.54 (2025-01-18)**
|
4125
|
-
|
4126
|
-
- Audio output switched from PyGame to PyAudio. It may be necessary to manually connect Alsa in Snap version with: "sudo snap connect pygpt:alsa".
|
4127
|
-
- Added audio output volume progress bar.
|
4128
|
-
|
4129
|
-
**2.4.53 (2025-01-17)**
|
4130
|
-
|
4131
|
-
- Fix: issue #89
|
4132
|
-
|
4133
|
-
**2.4.52 (2025-01-17)**
|
4134
|
-
|
4135
|
-
- Improved audio input button visibility toggle.
|
4136
|
-
- Fix: check for required arguments - issue #88.
|
4137
|
-
- UI Fixes.
|
4138
|
-
|
4139
|
-
**2.4.51 (2025-01-17)**
|
4140
|
-
|
4141
|
-
- Added a "Continuous recording" mode under Audio Input in the Notepad tab, allowing for recording long voice notes and real-time auto-transcription. (beta)
|
4142
|
-
- A new option has been added in Settings -> Audio -> Continuous recording auto-transcribe interval.
|
4143
|
-
|
4144
|
-
**2.4.50 (2025-01-16)**
|
4145
|
-
|
4146
|
-
- Refactored audio input core.
|
4147
|
-
- Added audio input volume progress bar.
|
4148
|
-
|
4149
|
-
**2.4.49 (2025-01-16)**
|
4150
|
-
|
4151
|
-
- Fix: stream render in Assistants mode.
|
4152
|
-
- Fix: items remove in context regen/edit.
|
4153
|
-
|
4154
|
-
**2.4.48 (2025-01-16)**
|
4155
|
-
|
4156
|
-
- Fix: parsing lists in data loaders configuration.
|
4157
|
-
- Fix: crash on Windows on PySide6 v6.6.0.
|
4158
|
-
- Added Gemini embeddings to LlamaIndex settings.
|
4159
|
-
- LlamaIndex upgraded to 0.12.11.
|
4160
|
-
- Security updates.
|
4161
|
-
|
4162
|
-
**2.4.47 (2025-01-14)**
|
4163
|
-
|
4164
|
-
- Added support for Python 3.12.
|
4165
|
-
- Added a new model to Chat with Files: gemini-2.0-flash-exp.
|
4166
|
-
- PySide6 upgraded to 6.6.0.
|
4167
|
-
|
4168
|
-
**2.4.46 (2024-12-16)**
|
4169
|
-
|
4170
|
-
- Added a new tab in Settings: "API Keys", where the API keys configuration for Google and Anthropic models has been relocated.
|
4171
|
-
- Introduced a new mode in "Chat with Files": "Retrieve Only", which allows for retrieving raw documents from the index.
|
4172
|
-
- Fixed a bug related to tool calls in the Gemini provider when using Chat with Files mode.
|
4173
|
-
|
4174
4130
|
# Credits and links
|
4175
4131
|
|
4176
4132
|
**Official website:** <https://pygpt.net>
|