pygpt-net 2.6.19.post1__py3-none-any.whl → 2.6.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +14 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/app.py +3 -1
- pygpt_net/controller/agent/agent.py +130 -2
- pygpt_net/controller/agent/experts.py +93 -96
- pygpt_net/controller/agent/llama.py +2 -1
- pygpt_net/controller/assistant/assistant.py +18 -1
- pygpt_net/controller/attachment/attachment.py +17 -1
- pygpt_net/controller/camera/camera.py +15 -7
- pygpt_net/controller/chat/chat.py +2 -2
- pygpt_net/controller/chat/common.py +50 -33
- pygpt_net/controller/chat/image.py +67 -77
- pygpt_net/controller/chat/input.py +94 -166
- pygpt_net/controller/chat/output.py +83 -140
- pygpt_net/controller/chat/response.py +83 -102
- pygpt_net/controller/chat/text.py +116 -149
- pygpt_net/controller/ctx/common.py +2 -1
- pygpt_net/controller/ctx/ctx.py +86 -6
- pygpt_net/controller/files/files.py +13 -1
- pygpt_net/controller/idx/idx.py +26 -2
- pygpt_net/controller/kernel/reply.py +53 -66
- pygpt_net/controller/kernel/stack.py +16 -16
- pygpt_net/controller/model/importer.py +2 -1
- pygpt_net/controller/model/model.py +62 -3
- pygpt_net/controller/settings/editor.py +4 -4
- pygpt_net/controller/ui/ui.py +16 -2
- pygpt_net/core/agents/observer/evaluation.py +3 -3
- pygpt_net/core/agents/provider.py +25 -3
- pygpt_net/core/agents/runner.py +4 -1
- pygpt_net/core/agents/runners/llama_workflow.py +19 -7
- pygpt_net/core/agents/runners/loop.py +3 -1
- pygpt_net/core/agents/runners/openai_workflow.py +17 -3
- pygpt_net/core/agents/tools.py +4 -1
- pygpt_net/core/bridge/context.py +34 -37
- pygpt_net/core/ctx/ctx.py +1 -1
- pygpt_net/core/db/database.py +2 -2
- pygpt_net/core/debug/debug.py +12 -1
- pygpt_net/core/dispatcher/dispatcher.py +24 -1
- pygpt_net/core/events/app.py +7 -7
- pygpt_net/core/events/control.py +26 -26
- pygpt_net/core/events/event.py +6 -3
- pygpt_net/core/events/kernel.py +2 -2
- pygpt_net/core/events/render.py +13 -13
- pygpt_net/core/experts/experts.py +76 -82
- pygpt_net/core/experts/worker.py +12 -12
- pygpt_net/core/models/models.py +5 -1
- pygpt_net/core/models/ollama.py +14 -5
- pygpt_net/core/render/web/helpers.py +2 -2
- pygpt_net/core/render/web/renderer.py +4 -4
- pygpt_net/core/types/__init__.py +2 -1
- pygpt_net/core/types/agent.py +4 -4
- pygpt_net/core/types/base.py +19 -0
- pygpt_net/core/types/console.py +6 -6
- pygpt_net/core/types/mode.py +8 -8
- pygpt_net/core/types/multimodal.py +3 -3
- pygpt_net/core/types/openai.py +2 -1
- pygpt_net/data/config/config.json +4 -4
- pygpt_net/data/config/models.json +19 -3
- pygpt_net/data/config/settings.json +14 -14
- pygpt_net/data/locale/locale.en.ini +2 -2
- pygpt_net/item/ctx.py +256 -240
- pygpt_net/item/model.py +59 -116
- pygpt_net/item/preset.py +122 -105
- pygpt_net/plugin/server/__init__.py +12 -0
- pygpt_net/plugin/server/config.py +301 -0
- pygpt_net/plugin/server/plugin.py +111 -0
- pygpt_net/plugin/server/worker.py +1057 -0
- pygpt_net/provider/agents/llama_index/workflow/planner.py +3 -3
- pygpt_net/provider/agents/openai/agent.py +4 -12
- pygpt_net/provider/agents/openai/agent_b2b.py +10 -15
- pygpt_net/provider/agents/openai/agent_planner.py +4 -4
- pygpt_net/provider/agents/openai/agent_with_experts.py +3 -7
- pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +4 -8
- pygpt_net/provider/agents/openai/agent_with_feedback.py +4 -8
- pygpt_net/provider/agents/openai/bot_researcher.py +2 -18
- pygpt_net/provider/agents/openai/bots/__init__.py +0 -0
- pygpt_net/provider/agents/openai/bots/research_bot/__init__.py +0 -0
- pygpt_net/provider/agents/openai/bots/research_bot/agents/__init__.py +0 -0
- pygpt_net/provider/agents/openai/bots/research_bot/agents/planner_agent.py +1 -1
- pygpt_net/provider/agents/openai/bots/research_bot/agents/search_agent.py +1 -0
- pygpt_net/provider/agents/openai/bots/research_bot/agents/writer_agent.py +1 -1
- pygpt_net/provider/agents/openai/bots/research_bot/manager.py +1 -10
- pygpt_net/provider/agents/openai/evolve.py +5 -9
- pygpt_net/provider/agents/openai/supervisor.py +4 -8
- pygpt_net/provider/core/config/patch.py +10 -3
- pygpt_net/provider/core/ctx/db_sqlite/utils.py +43 -43
- pygpt_net/provider/core/model/patch.py +11 -1
- pygpt_net/provider/core/preset/json_file.py +47 -49
- pygpt_net/provider/gpt/agents/experts.py +2 -2
- pygpt_net/ui/base/config_dialog.py +17 -3
- pygpt_net/ui/widget/option/checkbox.py +16 -2
- {pygpt_net-2.6.19.post1.dist-info → pygpt_net-2.6.21.dist-info}/METADATA +30 -6
- {pygpt_net-2.6.19.post1.dist-info → pygpt_net-2.6.21.dist-info}/RECORD +93 -88
- {pygpt_net-2.6.19.post1.dist-info → pygpt_net-2.6.21.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.19.post1.dist-info → pygpt_net-2.6.21.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.19.post1.dist-info → pygpt_net-2.6.21.dist-info}/entry_points.txt +0 -0
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date:
|
|
9
|
+
# Updated Date: 2025.08.23 15:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import json
|
|
@@ -136,35 +136,35 @@ def unpack_item(
|
|
|
136
136
|
:param row: DB row
|
|
137
137
|
:return: context item
|
|
138
138
|
"""
|
|
139
|
-
item.
|
|
140
|
-
item.
|
|
139
|
+
item.additional_ctx = unpack_item_value(row['additional_ctx_json'])
|
|
140
|
+
item.attachments = unpack_item_value(row['attachments_json'])
|
|
141
|
+
item.audio_expires_ts = row['audio_expires_ts']
|
|
142
|
+
item.audio_id = row['audio_id']
|
|
143
|
+
item.cmds = unpack_item_value(row['cmds_json'])
|
|
144
|
+
item.doc_ids = unpack_item_value(row['docs_json'])
|
|
141
145
|
item.external_id = row['external_id']
|
|
146
|
+
item.extra = unpack_item_value(row['extra'])
|
|
147
|
+
item.files = unpack_item_value(row['files_json'])
|
|
148
|
+
item.id = unpack_var(row['id'], 'int')
|
|
149
|
+
item.images = unpack_item_value(row['images_json'])
|
|
142
150
|
item.input = row['input']
|
|
143
|
-
item.output = row['output']
|
|
144
151
|
item.input_name = row['input_name']
|
|
145
|
-
item.output_name = row['output_name']
|
|
146
152
|
item.input_timestamp = unpack_var(row['input_ts'], 'int')
|
|
147
|
-
item.
|
|
153
|
+
item.input_tokens = unpack_var(row['input_tokens'], 'int')
|
|
154
|
+
item.internal = unpack_var(row['is_internal'], 'bool')
|
|
155
|
+
item.meta_id = unpack_var(row['meta_id'], 'int')
|
|
148
156
|
item.mode = row['mode']
|
|
149
157
|
item.model = row['model']
|
|
150
|
-
item.thread = row['thread_id']
|
|
151
158
|
item.msg_id = row['msg_id']
|
|
152
|
-
item.
|
|
153
|
-
item.
|
|
154
|
-
item.
|
|
155
|
-
item.urls = unpack_item_value(row['urls_json'])
|
|
156
|
-
item.images = unpack_item_value(row['images_json'])
|
|
157
|
-
item.files = unpack_item_value(row['files_json'])
|
|
158
|
-
item.attachments = unpack_item_value(row['attachments_json'])
|
|
159
|
-
item.additional_ctx = unpack_item_value(row['additional_ctx_json'])
|
|
160
|
-
item.extra = unpack_item_value(row['extra'])
|
|
161
|
-
item.input_tokens = unpack_var(row['input_tokens'], 'int')
|
|
159
|
+
item.output = row['output']
|
|
160
|
+
item.output_name = row['output_name']
|
|
161
|
+
item.output_timestamp = unpack_var(row['output_ts'], 'int')
|
|
162
162
|
item.output_tokens = unpack_var(row['output_tokens'], 'int')
|
|
163
|
+
item.results = unpack_item_value(row['results_json'])
|
|
164
|
+
item.run_id = row['run_id']
|
|
165
|
+
item.thread = row['thread_id']
|
|
163
166
|
item.total_tokens = unpack_var(row['total_tokens'], 'int')
|
|
164
|
-
item.
|
|
165
|
-
item.doc_ids = unpack_item_value(row['docs_json'])
|
|
166
|
-
item.audio_id = row['audio_id']
|
|
167
|
-
item.audio_expires_ts = row['audio_expires_ts']
|
|
167
|
+
item.urls = unpack_item_value(row['urls_json'])
|
|
168
168
|
|
|
169
169
|
# set defaults
|
|
170
170
|
if item.cmds is None:
|
|
@@ -199,31 +199,31 @@ def unpack_meta(
|
|
|
199
199
|
:param row: DB row
|
|
200
200
|
:return: context meta
|
|
201
201
|
"""
|
|
202
|
-
meta.
|
|
203
|
-
meta.
|
|
204
|
-
meta.
|
|
202
|
+
meta.additional_ctx = unpack_item_value(row['additional_ctx_json'])
|
|
203
|
+
meta.archived = unpack_var(row['is_archived'], 'bool')
|
|
204
|
+
meta.assistant = row['assistant_id']
|
|
205
205
|
meta.created = unpack_var(row['created_ts'], 'int')
|
|
206
|
-
meta.
|
|
206
|
+
meta.deleted = unpack_var(row['is_deleted'], 'bool')
|
|
207
|
+
meta.external_id = row['external_id']
|
|
208
|
+
meta.extra = row['extra']
|
|
209
|
+
meta.group_id = unpack_var(row['group_id'], 'int')
|
|
210
|
+
meta.id = unpack_var(row['id'], 'int')
|
|
207
211
|
meta.indexed = unpack_var(row['indexed_ts'], 'int')
|
|
208
|
-
meta.
|
|
209
|
-
meta.
|
|
210
|
-
meta.
|
|
212
|
+
meta.indexes = unpack_item_value(row['indexes_json'])
|
|
213
|
+
meta.initialized = unpack_var(row['is_initialized'], 'bool')
|
|
214
|
+
meta.important = unpack_var(row['is_important'], 'bool')
|
|
215
|
+
meta.label = unpack_var(row['label'], 'int')
|
|
211
216
|
meta.last_mode = row['last_mode']
|
|
212
217
|
meta.last_model = row['last_model']
|
|
213
|
-
meta.
|
|
214
|
-
meta.
|
|
218
|
+
meta.mode = row['mode']
|
|
219
|
+
meta.model = row['model']
|
|
220
|
+
meta.name = row['name']
|
|
215
221
|
meta.preset = row['preset_id']
|
|
216
222
|
meta.run = row['run_id']
|
|
217
223
|
meta.status = row['status']
|
|
218
|
-
meta.
|
|
219
|
-
meta.
|
|
220
|
-
meta.
|
|
221
|
-
meta.important = unpack_var(row['is_important'], 'bool')
|
|
222
|
-
meta.archived = unpack_var(row['is_archived'], 'bool')
|
|
223
|
-
meta.label = unpack_var(row['label'], 'int')
|
|
224
|
-
meta.indexes = unpack_item_value(row['indexes_json'])
|
|
225
|
-
meta.group_id = unpack_var(row['group_id'], 'int')
|
|
226
|
-
meta.additional_ctx = unpack_item_value(row['additional_ctx_json'])
|
|
224
|
+
meta.thread = row['thread_id']
|
|
225
|
+
meta.updated = unpack_var(row['updated_ts'], 'int')
|
|
226
|
+
meta.uuid = row['uuid']
|
|
227
227
|
|
|
228
228
|
if meta.additional_ctx is None:
|
|
229
229
|
meta.additional_ctx = []
|
|
@@ -253,12 +253,12 @@ def unpack_group(
|
|
|
253
253
|
:param row: DB row
|
|
254
254
|
:return: context group
|
|
255
255
|
"""
|
|
256
|
-
group.
|
|
257
|
-
group.uuid = row['uuid']
|
|
256
|
+
group.additional_ctx = unpack_item_value(row['additional_ctx_json'])
|
|
258
257
|
group.created = unpack_var(row['created_ts'], 'int')
|
|
259
|
-
group.
|
|
258
|
+
group.id = unpack_var(row['id'], 'int')
|
|
260
259
|
group.name = row['name']
|
|
261
|
-
group.
|
|
260
|
+
group.updated = unpack_var(row['updated_ts'], 'int')
|
|
261
|
+
group.uuid = row['uuid']
|
|
262
262
|
if group.additional_ctx is None:
|
|
263
263
|
group.additional_ctx = []
|
|
264
264
|
return group
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.24 03:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from packaging.version import parse as parse_version, Version
|
|
@@ -753,6 +753,16 @@ class Patch:
|
|
|
753
753
|
data["gpt-4.1-nano"] = base_data["gpt-4.1-nano"]
|
|
754
754
|
updated = True
|
|
755
755
|
|
|
756
|
+
# < 2.6.21 <-- add OpenAI Agents to Ollama
|
|
757
|
+
if old < parse_version("2.6.21"):
|
|
758
|
+
print("Migrating models from < 2.6.21...")
|
|
759
|
+
for id in data:
|
|
760
|
+
model = data[id]
|
|
761
|
+
if model.provider in ["ollama"]:
|
|
762
|
+
if "agent_openai" not in model.mode:
|
|
763
|
+
model.mode.append(MODE_AGENT_OPENAI)
|
|
764
|
+
updated = True
|
|
765
|
+
|
|
756
766
|
# update file
|
|
757
767
|
if updated:
|
|
758
768
|
data = dict(sorted(data.items()))
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.23 15:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import json
|
|
@@ -226,75 +226,73 @@ class JsonFileProvider(BaseProvider):
|
|
|
226
226
|
:param data: serialized item
|
|
227
227
|
:param item: item to deserialize
|
|
228
228
|
"""
|
|
229
|
-
if MODE_CHAT in data:
|
|
230
|
-
item.chat = data[MODE_CHAT]
|
|
231
|
-
if MODE_COMPLETION in data:
|
|
232
|
-
item.completion = data[MODE_COMPLETION]
|
|
233
|
-
if MODE_IMAGE in data:
|
|
234
|
-
item.img = data[MODE_IMAGE]
|
|
235
|
-
if MODE_VISION in data:
|
|
236
|
-
item.vision = data[MODE_VISION]
|
|
237
|
-
# if MODE_LANGCHAIN in data:
|
|
238
|
-
# item.langchain = data[MODE_LANGCHAIN]
|
|
239
|
-
if MODE_ASSISTANT in data:
|
|
240
|
-
item.assistant = data[MODE_ASSISTANT]
|
|
241
|
-
if MODE_LLAMA_INDEX in data:
|
|
242
|
-
item.llama_index = data[MODE_LLAMA_INDEX]
|
|
243
229
|
if MODE_AGENT in data:
|
|
244
230
|
item.agent = data[MODE_AGENT]
|
|
245
231
|
if MODE_AGENT_LLAMA in data:
|
|
246
232
|
item.agent_llama = data[MODE_AGENT_LLAMA]
|
|
247
233
|
if MODE_AGENT_OPENAI in data:
|
|
248
234
|
item.agent_openai = data[MODE_AGENT_OPENAI]
|
|
249
|
-
if
|
|
250
|
-
item.
|
|
235
|
+
if MODE_ASSISTANT in data:
|
|
236
|
+
item.assistant = data[MODE_ASSISTANT]
|
|
251
237
|
if MODE_AUDIO in data:
|
|
252
238
|
item.audio = data[MODE_AUDIO]
|
|
253
|
-
if
|
|
254
|
-
item.
|
|
239
|
+
if MODE_CHAT in data:
|
|
240
|
+
item.chat = data[MODE_CHAT]
|
|
241
|
+
if MODE_COMPLETION in data:
|
|
242
|
+
item.completion = data[MODE_COMPLETION]
|
|
255
243
|
if MODE_COMPUTER in data:
|
|
256
244
|
item.computer = data[MODE_COMPUTER]
|
|
245
|
+
if MODE_EXPERT in data:
|
|
246
|
+
item.expert = data[MODE_EXPERT]
|
|
247
|
+
if MODE_IMAGE in data:
|
|
248
|
+
item.img = data[MODE_IMAGE]
|
|
249
|
+
if MODE_LLAMA_INDEX in data:
|
|
250
|
+
item.llama_index = data[MODE_LLAMA_INDEX]
|
|
251
|
+
if MODE_RESEARCH in data:
|
|
252
|
+
item.research = data[MODE_RESEARCH]
|
|
253
|
+
if MODE_VISION in data:
|
|
254
|
+
item.vision = data[MODE_VISION]
|
|
257
255
|
|
|
258
|
-
if 'uuid' in data:
|
|
259
|
-
item.uuid = data['uuid']
|
|
260
|
-
if 'name' in data:
|
|
261
|
-
item.name = data['name']
|
|
262
|
-
if 'ai_name' in data:
|
|
263
|
-
item.ai_name = data['ai_name']
|
|
264
|
-
if 'ai_avatar' in data:
|
|
265
|
-
item.ai_avatar = data['ai_avatar']
|
|
266
|
-
if 'ai_personalize' in data:
|
|
267
|
-
item.ai_personalize = data['ai_personalize']
|
|
268
|
-
if 'user_name' in data:
|
|
269
|
-
item.user_name = data['user_name']
|
|
270
|
-
if 'prompt' in data:
|
|
271
|
-
item.prompt = data['prompt']
|
|
272
|
-
if 'temperature' in data:
|
|
273
|
-
item.temperature = data['temperature']
|
|
274
|
-
if 'filename' in data:
|
|
275
|
-
item.filename = data['filename']
|
|
276
|
-
if 'model' in data:
|
|
277
|
-
item.model = data['model']
|
|
278
|
-
if 'tools' in data:
|
|
279
|
-
item.tools = data['tools']
|
|
280
|
-
if 'experts' in data:
|
|
281
|
-
item.experts = data['experts']
|
|
282
|
-
if 'idx' in data:
|
|
283
|
-
item.idx = data['idx']
|
|
284
256
|
if 'agent_provider' in data:
|
|
285
257
|
item.agent_provider = data['agent_provider']
|
|
286
258
|
if 'agent_provider_openai' in data:
|
|
287
259
|
item.agent_provider_openai = data['agent_provider_openai']
|
|
260
|
+
if 'ai_avatar' in data:
|
|
261
|
+
item.ai_avatar = data['ai_avatar']
|
|
262
|
+
if 'ai_name' in data:
|
|
263
|
+
item.ai_name = data['ai_name']
|
|
264
|
+
if 'ai_personalize' in data:
|
|
265
|
+
item.ai_personalize = data['ai_personalize']
|
|
288
266
|
if 'assistant_id' in data:
|
|
289
267
|
item.assistant_id = data['assistant_id']
|
|
290
|
-
if 'enabled' in data:
|
|
291
|
-
item.enabled = data['enabled']
|
|
292
268
|
if 'description' in data:
|
|
293
269
|
item.description = data['description']
|
|
294
|
-
if '
|
|
295
|
-
item.
|
|
270
|
+
if 'enabled' in data:
|
|
271
|
+
item.enabled = data['enabled']
|
|
272
|
+
if 'experts' in data:
|
|
273
|
+
item.experts = data['experts']
|
|
296
274
|
if 'extra' in data:
|
|
297
275
|
item.extra = data['extra']
|
|
276
|
+
if 'filename' in data:
|
|
277
|
+
item.filename = data['filename']
|
|
278
|
+
if 'idx' in data:
|
|
279
|
+
item.idx = data['idx']
|
|
280
|
+
if 'model' in data:
|
|
281
|
+
item.model = data['model']
|
|
282
|
+
if 'name' in data:
|
|
283
|
+
item.name = data['name']
|
|
284
|
+
if 'prompt' in data:
|
|
285
|
+
item.prompt = data['prompt']
|
|
286
|
+
if 'remote_tools' in data:
|
|
287
|
+
item.remote_tools = data['remote_tools']
|
|
288
|
+
if 'temperature' in data:
|
|
289
|
+
item.temperature = data['temperature']
|
|
290
|
+
if 'tools' in data:
|
|
291
|
+
item.tools = data['tools']
|
|
292
|
+
if 'user_name' in data:
|
|
293
|
+
item.user_name = data['user_name']
|
|
294
|
+
if 'uuid' in data:
|
|
295
|
+
item.uuid = data['uuid']
|
|
298
296
|
|
|
299
297
|
# get version
|
|
300
298
|
if '__meta__' in data and 'version' in data['__meta__']:
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
4
4
|
# MIT License #
|
|
5
5
|
# Created By : Marcin Szczygliński #
|
|
6
|
-
# Updated Date: 2025.08.
|
|
6
|
+
# Updated Date: 2025.08.24 03:00:00 #
|
|
7
7
|
# ================================================== #
|
|
8
8
|
|
|
9
9
|
from agents import (
|
|
@@ -74,7 +74,7 @@ def get_expert(
|
|
|
74
74
|
kwargs = {
|
|
75
75
|
"name": agent_name,
|
|
76
76
|
"instructions": prompt,
|
|
77
|
-
"model": model
|
|
77
|
+
"model": window.core.agents.provider.get_openai_model(model),
|
|
78
78
|
}
|
|
79
79
|
tool_kwargs = append_tools(
|
|
80
80
|
tools=tools,
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.22 10:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from PySide6.QtCore import Qt
|
|
@@ -91,6 +91,20 @@ class BaseConfigDialog:
|
|
|
91
91
|
|
|
92
92
|
return widgets
|
|
93
93
|
|
|
94
|
+
def trans_or_not(self, label: str):
|
|
95
|
+
"""
|
|
96
|
+
Translate label or return it as is if translation is not available
|
|
97
|
+
|
|
98
|
+
:param label: Label to translate
|
|
99
|
+
:return: Translated label or original if not found
|
|
100
|
+
"""
|
|
101
|
+
txt = trans(label)
|
|
102
|
+
if txt == label:
|
|
103
|
+
if txt.startswith("dictionary."):
|
|
104
|
+
# get only last part after the dot
|
|
105
|
+
txt = txt.split('.')[-1].capitalize()
|
|
106
|
+
return txt
|
|
107
|
+
|
|
94
108
|
def add_option(self, widget: QWidget, option: dict) -> QHBoxLayout:
|
|
95
109
|
"""
|
|
96
110
|
Add option
|
|
@@ -105,7 +119,7 @@ class BaseConfigDialog:
|
|
|
105
119
|
label_key = f'{label}.label'
|
|
106
120
|
nodes = self.window.ui.nodes
|
|
107
121
|
|
|
108
|
-
txt =
|
|
122
|
+
txt = self.trans_or_not(label)
|
|
109
123
|
if extra.get('bold'):
|
|
110
124
|
nodes[label_key] = TitleLabel(txt)
|
|
111
125
|
else:
|
|
@@ -151,7 +165,7 @@ class BaseConfigDialog:
|
|
|
151
165
|
extra = option.get('extra') or {}
|
|
152
166
|
nodes = self.window.ui.nodes
|
|
153
167
|
|
|
154
|
-
txt =
|
|
168
|
+
txt = self.trans_or_not(label)
|
|
155
169
|
if extra.get('bold'):
|
|
156
170
|
nodes[label_key] = TitleLabel(txt)
|
|
157
171
|
else:
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.08.22 10:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from PySide6.QtGui import QIcon
|
|
@@ -50,7 +50,7 @@ class OptionCheckbox(QWidget):
|
|
|
50
50
|
if self.option is not None:
|
|
51
51
|
if "label" in self.option and self.option["label"] is not None \
|
|
52
52
|
and self.option["label"] != "":
|
|
53
|
-
self.title =
|
|
53
|
+
self.title = self.trans_or_not(self.option["label"])
|
|
54
54
|
if "value" in self.option:
|
|
55
55
|
self.value = self.option["value"]
|
|
56
56
|
if "real_time" in self.option:
|
|
@@ -90,6 +90,20 @@ class OptionCheckbox(QWidget):
|
|
|
90
90
|
|
|
91
91
|
#self.setLayout(self.layout)
|
|
92
92
|
|
|
93
|
+
def trans_or_not(self, label: str):
|
|
94
|
+
"""
|
|
95
|
+
Translate label or return it as is if translation is not available
|
|
96
|
+
|
|
97
|
+
:param label: Label to translate
|
|
98
|
+
:return: Translated label or original if not found
|
|
99
|
+
"""
|
|
100
|
+
txt = trans(label)
|
|
101
|
+
if txt == label:
|
|
102
|
+
if txt.startswith("dictionary."):
|
|
103
|
+
# get only last part after the dot
|
|
104
|
+
txt = txt.split('.')[-1].capitalize()
|
|
105
|
+
return txt
|
|
106
|
+
|
|
93
107
|
def setIcon(self, icon: str):
|
|
94
108
|
"""
|
|
95
109
|
Set icon
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: pygpt-net
|
|
3
|
-
Version: 2.6.
|
|
3
|
+
Version: 2.6.21
|
|
4
4
|
Summary: Desktop AI Assistant powered by: OpenAI GPT-5, o1, o3, GPT-4, Gemini, Claude, Grok, DeepSeek, and other models supported by Llama Index, and Ollama. Chatbot, agents, completion, image generation, vision analysis, speech-to-text, plugins, internet access, file handling, command execution and more.
|
|
5
5
|
License: MIT
|
|
6
6
|
Keywords: py_gpt,py-gpt,pygpt,desktop,app,o1,o3,gpt-5,gpt,gpt4,gpt-4o,gpt-4v,gpt3.5,gpt-4,gpt-4-vision,gpt-3.5,llama3,mistral,gemini,grok,deepseek,bielik,claude,tts,whisper,vision,chatgpt,dall-e,chat,chatbot,assistant,text completion,image generation,ai,api,openai,api key,langchain,llama-index,ollama,presets,ui,qt,pyside
|
|
@@ -23,6 +23,7 @@ Requires-Dist: PySide6 (==6.9.1)
|
|
|
23
23
|
Requires-Dist: Pygments (>=2.19.2,<3.0.0)
|
|
24
24
|
Requires-Dist: SQLAlchemy (>=2.0.41,<3.0.0)
|
|
25
25
|
Requires-Dist: SpeechRecognition (>=3.14.3,<4.0.0)
|
|
26
|
+
Requires-Dist: Telethon (>=1.40.0,<2.0.0)
|
|
26
27
|
Requires-Dist: anthropic (>=0.54.0,<0.55.0)
|
|
27
28
|
Requires-Dist: azure-core (>=1.34.0,<2.0.0)
|
|
28
29
|
Requires-Dist: beautifulsoup4 (>=4.13.4,<5.0.0)
|
|
@@ -80,6 +81,7 @@ Requires-Dist: openai-agents (>=0.2.3,<0.3.0)
|
|
|
80
81
|
Requires-Dist: opencv-python (>=4.11.0.86,<5.0.0.0)
|
|
81
82
|
Requires-Dist: packaging (>=24.2,<25.0)
|
|
82
83
|
Requires-Dist: pandas (>=2.2.3,<3.0.0)
|
|
84
|
+
Requires-Dist: paramiko (>=4.0.0,<5.0.0)
|
|
83
85
|
Requires-Dist: pillow (>=10.4.0,<11.0.0)
|
|
84
86
|
Requires-Dist: pinecone-client (>=3.2.2,<4.0.0)
|
|
85
87
|
Requires-Dist: psutil (>=7.0.0,<8.0.0)
|
|
@@ -109,7 +111,7 @@ Description-Content-Type: text/markdown
|
|
|
109
111
|
|
|
110
112
|
[](https://snapcraft.io/pygpt)
|
|
111
113
|
|
|
112
|
-
Release: **2.6.
|
|
114
|
+
Release: **2.6.21** | build: **2025-08-24** | Python: **>=3.10, <3.14**
|
|
113
115
|
|
|
114
116
|
> Official website: https://pygpt.net | Documentation: https://pygpt.readthedocs.io
|
|
115
117
|
>
|
|
@@ -928,8 +930,6 @@ Below is a pattern for how different types of agents work. You can use these pat
|
|
|
928
930
|
|
|
929
931
|
- When the `Computer use` tool is selected for an expert or when the `computer-use` model is chosen, all other tools will not be available for that model.
|
|
930
932
|
|
|
931
|
-
- Ollama models are not supported in this mode.
|
|
932
|
-
|
|
933
933
|
## Agent (Autonomous)
|
|
934
934
|
|
|
935
935
|
This is an older version of the Agent mode, still available as legacy. However, it is recommended to use the newer mode: `Agent (LlamaIndex)`.
|
|
@@ -1454,6 +1454,8 @@ The following plugins are currently available, and model can use them instantly:
|
|
|
1454
1454
|
|
|
1455
1455
|
- `Serial port / USB` - plugin provides commands for reading and sending data to USB ports.
|
|
1456
1456
|
|
|
1457
|
+
- `Server (SSH/FTP)` - Connect to remote servers using FTP, SFTP, and SSH. Execute remote commands, upload, download, and more.
|
|
1458
|
+
|
|
1457
1459
|
- `Slack` - Handle users, conversations, messages, and files on Slack.
|
|
1458
1460
|
|
|
1459
1461
|
- `System Prompt Extra (append)` - appends additional system prompts (extra data) from a list to every current system prompt. You can enhance every system prompt with extra instructions that will be automatically appended to the system prompt.
|
|
@@ -1802,6 +1804,14 @@ You can send commands to, for example, an Arduino or any other controllers using
|
|
|
1802
1804
|
|
|
1803
1805
|
Documentation: https://pygpt.readthedocs.io/en/latest/plugins.html#serial-port-usb
|
|
1804
1806
|
|
|
1807
|
+
## Server (SSH/FTP)
|
|
1808
|
+
|
|
1809
|
+
The Server plugin provides integration for remote server management via SSH, SFTP, and FTP protocols. This plugin allows executing commands, transferring files, and managing directories on remote servers.
|
|
1810
|
+
|
|
1811
|
+
For security reasons, the model will not see any credentials, only the server name and port fields (see the docs)
|
|
1812
|
+
|
|
1813
|
+
Documentation: https://pygpt.readthedocs.io/en/latest/plugins.html#server-ssh-ftp
|
|
1814
|
+
|
|
1805
1815
|
## Slack
|
|
1806
1816
|
|
|
1807
1817
|
The Slack plugin integrates with the Slack Web API, enabling interaction with Slack workspaces through the application. This plugin supports OAuth2 for authentication, which allows for seamless integration with Slack services, enabling actions such as posting messages, retrieving users, and managing conversations.
|
|
@@ -2477,8 +2487,6 @@ Enable/disable remote tools, like Web Search or Image generation to use in OpenA
|
|
|
2477
2487
|
|
|
2478
2488
|
- `Use ReAct agent for Tool calls in Chat with Files mode`: Enable ReAct agent for tool calls in Chat with Files mode.
|
|
2479
2489
|
|
|
2480
|
-
- `Display full agent output in chat window`: If enabled, a real-time output from agent reasoning will be displayed with the response.
|
|
2481
|
-
|
|
2482
2490
|
- `Auto-retrieve additional context`: Enable automatic retrieve of additional context from vector store in every query.
|
|
2483
2491
|
|
|
2484
2492
|
**Embeddings**
|
|
@@ -2533,6 +2541,8 @@ Enable/disable remote tools, like Web Search or Image generation to use in OpenA
|
|
|
2533
2541
|
|
|
2534
2542
|
- `Display a tray notification when the goal is achieved.`: If enabled, a notification will be displayed after goal achieved / finished run.
|
|
2535
2543
|
|
|
2544
|
+
- `Display full agent output in chat window`: If enabled, a real-time output from agent reasoning will be displayed with the response.
|
|
2545
|
+
|
|
2536
2546
|
**Agents (LlamaIndex / OpenAI)**
|
|
2537
2547
|
|
|
2538
2548
|
- `Max steps (per iteration)` - Max steps is one iteration before goal achieved
|
|
@@ -3526,6 +3536,20 @@ may consume additional tokens that are not displayed in the main window.
|
|
|
3526
3536
|
|
|
3527
3537
|
## Recent changes:
|
|
3528
3538
|
|
|
3539
|
+
**2.6.21 (2025-08-24)**
|
|
3540
|
+
|
|
3541
|
+
- Ollama models are now available in OpenAI Agents mode.
|
|
3542
|
+
- Improved parsing of responses from Agents.
|
|
3543
|
+
- Fix: do not initialize index in Agents mode if not provided.
|
|
3544
|
+
- Fix: agent response evaluation steps limit.
|
|
3545
|
+
- Fix: do not execute code in agents if Tools are disabled.
|
|
3546
|
+
- Refactoring.
|
|
3547
|
+
|
|
3548
|
+
**2.6.20 (2025-08-22)**
|
|
3549
|
+
|
|
3550
|
+
- Added a new plugin: Server (FTP/SSH) - connect to remote servers using FTP, SFTP, and SSH. Execute remote commands, upload, download, and more (beta).
|
|
3551
|
+
- Added support for Wayland in Snap/compiled versions.
|
|
3552
|
+
|
|
3529
3553
|
**2.6.19 (2025-08-22)**
|
|
3530
3554
|
|
|
3531
3555
|
- Fixed: added prevention for summarizing an empty context.
|