pygpt-net 2.6.22__py3-none-any.whl → 2.6.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +16 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/controller/agent/llama.py +3 -0
- pygpt_net/controller/chat/response.py +6 -1
- pygpt_net/controller/files/files.py +24 -55
- pygpt_net/controller/theme/theme.py +3 -3
- pygpt_net/core/agents/observer/evaluation.py +2 -2
- pygpt_net/core/agents/runners/loop.py +1 -0
- pygpt_net/core/attachments/context.py +4 -4
- pygpt_net/core/bridge/bridge.py +2 -0
- pygpt_net/core/filesystem/opener.py +261 -0
- pygpt_net/core/filesystem/url.py +13 -10
- pygpt_net/core/idx/chat.py +1 -1
- pygpt_net/core/idx/indexing.py +3 -3
- pygpt_net/core/idx/llm.py +61 -2
- pygpt_net/core/platforms/platforms.py +5 -4
- pygpt_net/data/config/config.json +21 -3
- pygpt_net/data/config/models.json +3 -3
- pygpt_net/data/config/settings.json +18 -0
- pygpt_net/data/css/web-blocks.dark.css +7 -1
- pygpt_net/data/css/web-blocks.light.css +5 -2
- pygpt_net/data/css/web-chatgpt.dark.css +7 -1
- pygpt_net/data/css/web-chatgpt.light.css +3 -0
- pygpt_net/data/css/web-chatgpt_wide.dark.css +7 -1
- pygpt_net/data/css/web-chatgpt_wide.light.css +3 -0
- pygpt_net/data/locale/locale.de.ini +47 -0
- pygpt_net/data/locale/locale.en.ini +50 -1
- pygpt_net/data/locale/locale.es.ini +47 -0
- pygpt_net/data/locale/locale.fr.ini +47 -0
- pygpt_net/data/locale/locale.it.ini +47 -0
- pygpt_net/data/locale/locale.pl.ini +47 -0
- pygpt_net/data/locale/locale.uk.ini +47 -0
- pygpt_net/data/locale/locale.zh.ini +47 -0
- pygpt_net/provider/agents/llama_index/codeact_workflow.py +8 -7
- pygpt_net/provider/agents/llama_index/planner_workflow.py +11 -10
- pygpt_net/provider/agents/llama_index/supervisor_workflow.py +9 -8
- pygpt_net/provider/agents/openai/agent_b2b.py +30 -17
- pygpt_net/provider/agents/openai/agent_planner.py +29 -29
- pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +21 -23
- pygpt_net/provider/agents/openai/agent_with_feedback.py +21 -23
- pygpt_net/provider/agents/openai/bot_researcher.py +25 -30
- pygpt_net/provider/agents/openai/evolve.py +37 -39
- pygpt_net/provider/agents/openai/supervisor.py +16 -18
- pygpt_net/provider/core/config/patch.py +20 -1
- pygpt_net/provider/llms/anthropic.py +5 -4
- pygpt_net/provider/llms/google.py +2 -2
- pygpt_net/ui/layout/toolbox/agent_llama.py +2 -3
- pygpt_net/ui/widget/tabs/layout.py +6 -4
- pygpt_net/ui/widget/tabs/output.py +348 -13
- pygpt_net/ui/widget/textarea/input.py +74 -8
- {pygpt_net-2.6.22.dist-info → pygpt_net-2.6.24.dist-info}/METADATA +34 -25
- {pygpt_net-2.6.22.dist-info → pygpt_net-2.6.24.dist-info}/RECORD +55 -54
- {pygpt_net-2.6.22.dist-info → pygpt_net-2.6.24.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.22.dist-info → pygpt_net-2.6.24.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.22.dist-info → pygpt_net-2.6.24.dist-info}/entry_points.txt +0 -0
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.26 01:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import json
|
|
@@ -16,10 +16,8 @@ from typing import Dict, Any, Tuple, Optional
|
|
|
16
16
|
from agents import (
|
|
17
17
|
Agent as OpenAIAgent,
|
|
18
18
|
Runner,
|
|
19
|
-
RunConfig,
|
|
20
19
|
RunContextWrapper,
|
|
21
20
|
SQLiteSession,
|
|
22
|
-
ModelSettings,
|
|
23
21
|
function_tool,
|
|
24
22
|
)
|
|
25
23
|
|
|
@@ -33,10 +31,10 @@ from pygpt_net.core.types import (
|
|
|
33
31
|
from pygpt_net.item.ctx import CtxItem
|
|
34
32
|
from pygpt_net.item.model import ModelItem
|
|
35
33
|
|
|
36
|
-
from pygpt_net.provider.gpt.agents.client import get_custom_model_provider, set_openai_env
|
|
37
34
|
from pygpt_net.provider.gpt.agents.remote_tools import append_tools
|
|
38
35
|
from pygpt_net.provider.gpt.agents.response import StreamHandler
|
|
39
36
|
from pygpt_net.provider.gpt.agents.experts import get_experts
|
|
37
|
+
from pygpt_net.utils import trans
|
|
40
38
|
|
|
41
39
|
from ..base import BaseAgent
|
|
42
40
|
|
|
@@ -199,7 +197,7 @@ class Agent(BaseAgent):
|
|
|
199
197
|
:param instruction: Instruction for the Worker
|
|
200
198
|
:return: Output from the Worker
|
|
201
199
|
"""
|
|
202
|
-
item_ctx.stream = f"\n\n**
|
|
200
|
+
item_ctx.stream = f"\n\n**{trans('agent.name.supervisor')} --> {trans('agent.name.worker')}:** {instruction}\n\n"
|
|
203
201
|
bridge.on_step(item_ctx, True)
|
|
204
202
|
handler.begin = False
|
|
205
203
|
result = await Runner.run(
|
|
@@ -295,11 +293,11 @@ class Agent(BaseAgent):
|
|
|
295
293
|
if action == "ask_user":
|
|
296
294
|
question = response.get("question", "")
|
|
297
295
|
reasoning = response.get("reasoning", "")
|
|
298
|
-
return f"**
|
|
296
|
+
return f"**{trans('agent.name.supervisor')}:** {reasoning}\n\n{question}"
|
|
299
297
|
elif action == "final":
|
|
300
298
|
final_answer = response.get("final_answer", "")
|
|
301
299
|
reasoning = response.get("reasoning", "")
|
|
302
|
-
return f"**
|
|
300
|
+
return f"**{trans('agent.name.supervisor')}:** {reasoning}\n\n{final_answer}\n\n"
|
|
303
301
|
else:
|
|
304
302
|
return response.get("final_answer", "")
|
|
305
303
|
|
|
@@ -311,41 +309,41 @@ class Agent(BaseAgent):
|
|
|
311
309
|
"""
|
|
312
310
|
return {
|
|
313
311
|
"supervisor": {
|
|
314
|
-
"label": "
|
|
312
|
+
"label": trans("agent.option.section.supervisor"),
|
|
315
313
|
"options": {
|
|
316
314
|
"prompt": {
|
|
317
315
|
"type": "textarea",
|
|
318
|
-
"label": "
|
|
319
|
-
"description": "
|
|
316
|
+
"label": trans("agent.option.prompt"),
|
|
317
|
+
"description": trans("agent.option.prompt.supervisor.desc"),
|
|
320
318
|
"default": SUPERVISOR_PROMPT,
|
|
321
319
|
},
|
|
322
320
|
}
|
|
323
321
|
},
|
|
324
322
|
"worker": {
|
|
325
|
-
"label": "
|
|
323
|
+
"label": trans("agent.option.section.worker"),
|
|
326
324
|
"options": {
|
|
327
325
|
"model": {
|
|
328
|
-
"label": "
|
|
326
|
+
"label": trans("agent.option.model"),
|
|
329
327
|
"type": "combo",
|
|
330
328
|
"use": "models",
|
|
331
329
|
"default": "gpt-4o",
|
|
332
330
|
},
|
|
333
331
|
"prompt": {
|
|
334
332
|
"type": "textarea",
|
|
335
|
-
"label": "
|
|
336
|
-
"description": "
|
|
333
|
+
"label": trans("agent.option.prompt"),
|
|
334
|
+
"description": trans("agent.option.prompt.worker.desc"),
|
|
337
335
|
"default": WORKER_PROMPT,
|
|
338
336
|
},
|
|
339
337
|
"allow_local_tools": {
|
|
340
338
|
"type": "bool",
|
|
341
|
-
"label": "
|
|
342
|
-
"description": "
|
|
339
|
+
"label": trans("agent.option.tools.local"),
|
|
340
|
+
"description": trans("agent.option.tools.local.desc"),
|
|
343
341
|
"default": True,
|
|
344
342
|
},
|
|
345
343
|
"allow_remote_tools": {
|
|
346
344
|
"type": "bool",
|
|
347
|
-
"label": "
|
|
348
|
-
"description": "
|
|
345
|
+
"label": trans("agent.option.tools.remote"),
|
|
346
|
+
"description": trans("agent.option.tools.remote.desc"),
|
|
349
347
|
"default": True,
|
|
350
348
|
},
|
|
351
349
|
}
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.25 20:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import copy
|
|
@@ -2280,6 +2280,25 @@ class Patch:
|
|
|
2280
2280
|
data["agent.output.render.all"] = True
|
|
2281
2281
|
updated = True
|
|
2282
2282
|
|
|
2283
|
+
# < 2.6.23 -- fix: restore <p> color
|
|
2284
|
+
if old < parse_version("2.6.23"):
|
|
2285
|
+
print("Migrating config from < 2.6.23...")
|
|
2286
|
+
self.window.core.updater.patch_css('web-chatgpt.dark.css', True)
|
|
2287
|
+
self.window.core.updater.patch_css('web-chatgpt_wide.dark.css', True)
|
|
2288
|
+
self.window.core.updater.patch_css('web-chatgpt.light.css', True)
|
|
2289
|
+
self.window.core.updater.patch_css('web-chatgpt_wide.light.css', True)
|
|
2290
|
+
self.window.core.updater.patch_css('web-blocks.dark.css', True)
|
|
2291
|
+
self.window.core.updater.patch_css('web-blocks.light.css', True)
|
|
2292
|
+
updated = True
|
|
2293
|
+
|
|
2294
|
+
# < 2.6.24
|
|
2295
|
+
if old < parse_version("2.6.24"):
|
|
2296
|
+
print("Migrating config from < 2.6.24...")
|
|
2297
|
+
if "llama.idx.embeddings.default" not in data:
|
|
2298
|
+
data["llama.idx.embeddings.default"] = self.window.core.config.get_base(
|
|
2299
|
+
'llama.idx.embeddings.default')
|
|
2300
|
+
updated = True
|
|
2301
|
+
|
|
2283
2302
|
# update file
|
|
2284
2303
|
migrated = False
|
|
2285
2304
|
if updated:
|
|
@@ -63,11 +63,12 @@ class AnthropicLLM(BaseLLM):
|
|
|
63
63
|
:param window: window instance
|
|
64
64
|
:return: list of models
|
|
65
65
|
"""
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
66
|
+
import anthropic
|
|
67
|
+
client = anthropic.Anthropic(
|
|
68
|
+
api_key=window.core.config.get('api_key_anthropic', "")
|
|
69
|
+
)
|
|
70
70
|
models_list = client.models.list()
|
|
71
|
+
items = []
|
|
71
72
|
if models_list.data:
|
|
72
73
|
for item in models_list.data:
|
|
73
74
|
items.append({
|
|
@@ -67,13 +67,13 @@ class GoogleLLM(BaseLLM):
|
|
|
67
67
|
:param config: config keyword arguments list
|
|
68
68
|
:return: Embedding provider instance
|
|
69
69
|
"""
|
|
70
|
-
from llama_index.embeddings.
|
|
70
|
+
from llama_index.embeddings.google_genai import GoogleGenAIEmbedding
|
|
71
71
|
args = {}
|
|
72
72
|
if config is not None:
|
|
73
73
|
args = self.parse_args({
|
|
74
74
|
"args": config,
|
|
75
75
|
}, window)
|
|
76
|
-
return
|
|
76
|
+
return GoogleGenAIEmbedding(**args)
|
|
77
77
|
|
|
78
78
|
def get_models(
|
|
79
79
|
self,
|
|
@@ -40,7 +40,6 @@ class AgentLlama:
|
|
|
40
40
|
|
|
41
41
|
container = QWidget(win)
|
|
42
42
|
|
|
43
|
-
nodes['agent.llama.loop.score.label'] = QLabel(trans("toolbox.agent.llama.loop.score.label"), parent=container)
|
|
44
43
|
nodes['agent.llama.loop.score'] = OptionSlider(
|
|
45
44
|
win,
|
|
46
45
|
'global',
|
|
@@ -50,7 +49,7 @@ class AgentLlama:
|
|
|
50
49
|
nodes['agent.llama.loop.score'].setToolTip(trans("toolbox.agent.llama.loop.score.tooltip"))
|
|
51
50
|
config_global['agent.llama.loop.score'] = nodes['agent.llama.loop.score']
|
|
52
51
|
|
|
53
|
-
nodes['agent.llama.loop.mode.label'] = QLabel(trans("toolbox.agent.llama.loop.mode.label"), parent=
|
|
52
|
+
nodes['agent.llama.loop.mode.label'] = QLabel(trans("toolbox.agent.llama.loop.mode.label"), parent=win)
|
|
54
53
|
nodes['agent.llama.loop.mode'] = OptionCombo(
|
|
55
54
|
win,
|
|
56
55
|
'global',
|
|
@@ -66,7 +65,7 @@ class AgentLlama:
|
|
|
66
65
|
)
|
|
67
66
|
config_global['agent.llama.loop.enabled'] = nodes['agent.llama.loop.enabled']
|
|
68
67
|
|
|
69
|
-
nodes['agent.llama.loop.label'] = QLabel(trans("toolbox.agent.llama.loop.label"), parent=
|
|
68
|
+
nodes['agent.llama.loop.label'] = QLabel(trans("toolbox.agent.llama.loop.label"), parent=win)
|
|
70
69
|
|
|
71
70
|
cols = QHBoxLayout()
|
|
72
71
|
cols.addWidget(config_global['agent.llama.loop.enabled'])
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.25 18:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Optional
|
|
@@ -19,7 +19,7 @@ from pygpt_net.ui.widget.tabs.output import OutputTabs
|
|
|
19
19
|
|
|
20
20
|
|
|
21
21
|
class OutputColumn(QWidget):
|
|
22
|
-
def __init__(self, window=None):
|
|
22
|
+
def __init__(self, window=None, idx: Optional[int] = None):
|
|
23
23
|
"""
|
|
24
24
|
Output column
|
|
25
25
|
|
|
@@ -28,6 +28,8 @@ class OutputColumn(QWidget):
|
|
|
28
28
|
super(OutputColumn, self).__init__(window)
|
|
29
29
|
self.window = window
|
|
30
30
|
self.idx = -1
|
|
31
|
+
if idx is not None:
|
|
32
|
+
self.idx = idx
|
|
31
33
|
self.tabs = OutputTabs(self.window, column=self)
|
|
32
34
|
self.layout = QVBoxLayout()
|
|
33
35
|
self.layout.addWidget(self.tabs)
|
|
@@ -92,8 +94,8 @@ class OutputLayout(QWidget):
|
|
|
92
94
|
self.columns = []
|
|
93
95
|
self._was_width_zero = None
|
|
94
96
|
|
|
95
|
-
column1 = OutputColumn(self.window)
|
|
96
|
-
column2 = OutputColumn(self.window)
|
|
97
|
+
column1 = OutputColumn(self.window, idx=0)
|
|
98
|
+
column2 = OutputColumn(self.window, idx=1)
|
|
97
99
|
self.add_column(column1)
|
|
98
100
|
self.add_column(column2)
|
|
99
101
|
|