pygpt-net 2.5.98.post1__py3-none-any.whl → 2.6.0.post1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +9 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/controller/ctx/ctx.py +7 -2
- pygpt_net/core/agents/runners/openai_workflow.py +9 -6
- pygpt_net/core/render/plain/pid.py +3 -2
- pygpt_net/core/render/web/body.py +21 -5
- pygpt_net/core/render/web/pid.py +26 -6
- pygpt_net/core/render/web/renderer.py +4 -10
- pygpt_net/data/config/config.json +3 -3
- pygpt_net/data/config/models.json +3 -3
- pygpt_net/data/css/style.dark.css +13 -0
- pygpt_net/data/css/style.light.css +29 -0
- pygpt_net/data/icon.ico +0 -0
- pygpt_net/data/icon.png +0 -0
- pygpt_net/data/locale/locale.de.ini +1 -1
- pygpt_net/data/locale/locale.en.ini +1 -1
- pygpt_net/data/locale/locale.es.ini +1 -1
- pygpt_net/data/locale/locale.fr.ini +1 -1
- pygpt_net/data/locale/locale.it.ini +1 -1
- pygpt_net/data/locale/locale.pl.ini +1 -1
- pygpt_net/data/locale/locale.uk.ini +1 -1
- pygpt_net/data/locale/locale.zh.ini +1 -1
- pygpt_net/provider/agents/llama_index/code_act.py +5 -4
- pygpt_net/provider/agents/llama_index/openai.py +3 -3
- pygpt_net/provider/agents/llama_index/openai_assistant.py +3 -3
- pygpt_net/provider/agents/llama_index/planner.py +6 -6
- pygpt_net/provider/agents/llama_index/react.py +3 -7
- pygpt_net/provider/agents/llama_index/react_workflow.py +4 -7
- pygpt_net/provider/agents/openai/agent_b2b.py +45 -10
- pygpt_net/provider/agents/openai/agent_planner.py +24 -2
- pygpt_net/provider/agents/openai/agent_with_experts.py +1 -38
- pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +23 -38
- pygpt_net/provider/agents/openai/agent_with_feedback.py +23 -2
- pygpt_net/provider/agents/openai/bot_researcher.py +2 -6
- pygpt_net/provider/agents/openai/evolve.py +23 -2
- pygpt_net/provider/core/config/patch.py +7 -0
- pygpt_net/provider/gpt/__init__.py +21 -3
- pygpt_net/tools/html_canvas/ui/widgets.py +4 -0
- pygpt_net/tools/media_player/tool.py +11 -2
- pygpt_net/tools/media_player/ui/widgets.py +99 -94
- pygpt_net/ui/widget/calendar/select.py +10 -2
- pygpt_net/ui/widget/filesystem/explorer.py +1 -0
- {pygpt_net-2.5.98.post1.dist-info → pygpt_net-2.6.0.post1.dist-info}/METADATA +14 -147
- {pygpt_net-2.5.98.post1.dist-info → pygpt_net-2.6.0.post1.dist-info}/RECORD +47 -47
- {pygpt_net-2.5.98.post1.dist-info → pygpt_net-2.6.0.post1.dist-info}/LICENSE +0 -0
- {pygpt_net-2.5.98.post1.dist-info → pygpt_net-2.6.0.post1.dist-info}/WHEEL +0 -0
- {pygpt_net-2.5.98.post1.dist-info → pygpt_net-2.6.0.post1.dist-info}/entry_points.txt +0 -0
pygpt_net/CHANGELOG.txt
CHANGED
|
@@ -1,3 +1,12 @@
|
|
|
1
|
+
2.6.0 (2025-08-13)
|
|
2
|
+
|
|
3
|
+
- Added split responses to the OpenAI Agents in non-streaming mode.
|
|
4
|
+
- Disabled auto-scroll when manually scrolled to the top.
|
|
5
|
+
- Increased scrollbar width in the light theme.
|
|
6
|
+
- Optimized the clearing of the streaming buffer.
|
|
7
|
+
- Optimized imports.
|
|
8
|
+
- Made CSS improvements.
|
|
9
|
+
|
|
1
10
|
2.5.98 (2025-08-12)
|
|
2
11
|
|
|
3
12
|
- Added support for GPT-5 in LlamaIndex/Chat with Files mode.
|
pygpt_net/__init__.py
CHANGED
|
@@ -6,15 +6,15 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.13 00:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
__author__ = "Marcin Szczygliński"
|
|
13
13
|
__copyright__ = "Copyright 2025, Marcin Szczygliński"
|
|
14
14
|
__credits__ = ["Marcin Szczygliński"]
|
|
15
15
|
__license__ = "MIT"
|
|
16
|
-
__version__ = "2.
|
|
17
|
-
__build__ = "2025-08-
|
|
16
|
+
__version__ = "2.6.0"
|
|
17
|
+
__build__ = "2025-08-13"
|
|
18
18
|
__maintainer__ = "Marcin Szczygliński"
|
|
19
19
|
__github__ = "https://github.com/szczyglis-dev/py-gpt"
|
|
20
20
|
__report__ = "https://github.com/szczyglis-dev/py-gpt/issues"
|
pygpt_net/controller/ctx/ctx.py
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.12 19:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Optional, List
|
|
@@ -21,7 +21,7 @@ from .common import Common
|
|
|
21
21
|
from .summarizer import Summarizer
|
|
22
22
|
from .extra import Extra
|
|
23
23
|
|
|
24
|
-
from pygpt_net.utils import trans
|
|
24
|
+
from pygpt_net.utils import trans
|
|
25
25
|
|
|
26
26
|
|
|
27
27
|
class Ctx:
|
|
@@ -172,6 +172,7 @@ class Ctx:
|
|
|
172
172
|
# update additional context attachments
|
|
173
173
|
self.window.controller.chat.attachment.update()
|
|
174
174
|
self.set_selected(id)
|
|
175
|
+
self.clean_memory() # clean memory
|
|
175
176
|
|
|
176
177
|
def select_on_list_only(self, id: int):
|
|
177
178
|
"""
|
|
@@ -251,6 +252,10 @@ class Ctx:
|
|
|
251
252
|
self.group_id = None
|
|
252
253
|
self.new()
|
|
253
254
|
|
|
255
|
+
def clean_memory(self):
|
|
256
|
+
"""Clean memory"""
|
|
257
|
+
self.window.core.gpt.close() # clear gpt client
|
|
258
|
+
|
|
254
259
|
def new(
|
|
255
260
|
self,
|
|
256
261
|
force: bool = False,
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.12 19:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Dict, Any, List
|
|
@@ -112,7 +112,8 @@ class OpenAIWorkflow(BaseRunner):
|
|
|
112
112
|
input: str,
|
|
113
113
|
output: str,
|
|
114
114
|
response_id: str,
|
|
115
|
-
finish: bool = False
|
|
115
|
+
finish: bool = False,
|
|
116
|
+
stream: bool = True,
|
|
116
117
|
) -> CtxItem:
|
|
117
118
|
"""
|
|
118
119
|
Callback for next context in cycle
|
|
@@ -122,6 +123,7 @@ class OpenAIWorkflow(BaseRunner):
|
|
|
122
123
|
:param output: output text
|
|
123
124
|
:param response_id: response id for OpenAI
|
|
124
125
|
:param finish: If
|
|
126
|
+
:param stream: is streaming enabled
|
|
125
127
|
:return: CtxItem - the next context item in the cycle
|
|
126
128
|
"""
|
|
127
129
|
# finish current stream
|
|
@@ -129,8 +131,10 @@ class OpenAIWorkflow(BaseRunner):
|
|
|
129
131
|
ctx.extra["agent_output"] = True # allow usage in history
|
|
130
132
|
ctx.output = output # set output to current context
|
|
131
133
|
self.window.core.ctx.update_item(ctx)
|
|
132
|
-
|
|
133
|
-
|
|
134
|
+
|
|
135
|
+
if stream:
|
|
136
|
+
self.send_stream(ctx, signals, False)
|
|
137
|
+
self.end_stream(ctx, signals)
|
|
134
138
|
|
|
135
139
|
# create and return next context item
|
|
136
140
|
next_ctx = self.add_next_ctx(ctx)
|
|
@@ -181,12 +185,11 @@ class OpenAIWorkflow(BaseRunner):
|
|
|
181
185
|
# run agent
|
|
182
186
|
ctx, output, response_id = await run(**run_kwargs)
|
|
183
187
|
|
|
184
|
-
if not ctx.partial:
|
|
188
|
+
if not ctx.partial or self.is_stopped():
|
|
185
189
|
response_ctx = self.make_response(ctx, prompt, output, response_id)
|
|
186
190
|
self.send_response(response_ctx, signals, KernelEvent.APPEND_DATA)
|
|
187
191
|
else:
|
|
188
192
|
ctx.partial = False # last part, not partial anymore
|
|
189
|
-
# already handled in next_ctx(), so do not return response
|
|
190
193
|
|
|
191
194
|
self.set_idle(signals)
|
|
192
195
|
return True
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.12 19:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import io
|
|
@@ -29,7 +29,8 @@ class PidData:
|
|
|
29
29
|
|
|
30
30
|
@buffer.setter
|
|
31
31
|
def buffer(self, value: str):
|
|
32
|
-
self._buffer
|
|
32
|
+
self._buffer.seek(0)
|
|
33
|
+
self._buffer.truncate(0)
|
|
33
34
|
if value:
|
|
34
35
|
self._buffer.write(value)
|
|
35
36
|
|
|
@@ -148,10 +148,25 @@ class Body:
|
|
|
148
148
|
script.parentNode.replaceChild(element, script);
|
|
149
149
|
});
|
|
150
150
|
}
|
|
151
|
-
function
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
151
|
+
function isNearBottom(marginPx = 100) {
|
|
152
|
+
const el = document.scrollingElement || document.documentElement;
|
|
153
|
+
const distanceToBottom = el.scrollHeight - el.clientHeight - el.scrollTop;
|
|
154
|
+
return distanceToBottom <= marginPx;
|
|
155
|
+
}
|
|
156
|
+
function scrollToBottom(live = false) {
|
|
157
|
+
const el = document.scrollingElement || document.documentElement;
|
|
158
|
+
const marginPx = 300;
|
|
159
|
+
let behavior = 'instant';
|
|
160
|
+
if (live == true) {
|
|
161
|
+
behavior = 'instant'; // no smooth scroll for live updates
|
|
162
|
+
} else {
|
|
163
|
+
behavior = 'smooth'; // smooth scroll for normal updates, TODO: implement in Chromium
|
|
164
|
+
}
|
|
165
|
+
if (isNearBottom(marginPx) || live == false) {
|
|
166
|
+
el.scrollTo({ top: el.scrollHeight, behavior });
|
|
167
|
+
}
|
|
168
|
+
prevScroll = el.scrollHeight;
|
|
169
|
+
getScrollPosition(); // store using bridge
|
|
155
170
|
}
|
|
156
171
|
function appendToInput(content) {
|
|
157
172
|
const element = document.getElementById('_append_input_');
|
|
@@ -291,6 +306,7 @@ class Body:
|
|
|
291
306
|
function beginStream() {
|
|
292
307
|
hideTips();
|
|
293
308
|
clearOutput();
|
|
309
|
+
scrollToBottom();
|
|
294
310
|
}
|
|
295
311
|
function endStream() {
|
|
296
312
|
clearOutput();
|
|
@@ -381,7 +397,7 @@ class Body:
|
|
|
381
397
|
highlightCode(doMath); // with or without math
|
|
382
398
|
}
|
|
383
399
|
}
|
|
384
|
-
scrollToBottom();
|
|
400
|
+
scrollToBottom(true);
|
|
385
401
|
}
|
|
386
402
|
function replaceOutput(name_header, content) {
|
|
387
403
|
hideTips();
|
pygpt_net/core/render/web/pid.py
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.12 19:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import io
|
|
@@ -43,7 +43,8 @@ class PidData:
|
|
|
43
43
|
|
|
44
44
|
@buffer.setter
|
|
45
45
|
def buffer(self, value: str):
|
|
46
|
-
self._buffer
|
|
46
|
+
self._buffer.seek(0)
|
|
47
|
+
self._buffer.truncate(0)
|
|
47
48
|
if value:
|
|
48
49
|
self._buffer.write(value)
|
|
49
50
|
|
|
@@ -56,7 +57,8 @@ class PidData:
|
|
|
56
57
|
|
|
57
58
|
@live_buffer.setter
|
|
58
59
|
def live_buffer(self, value: str):
|
|
59
|
-
self._live_buffer
|
|
60
|
+
self._live_buffer.seek(0)
|
|
61
|
+
self._live_buffer.truncate(0)
|
|
60
62
|
if value:
|
|
61
63
|
self._live_buffer.write(value)
|
|
62
64
|
|
|
@@ -69,7 +71,8 @@ class PidData:
|
|
|
69
71
|
|
|
70
72
|
@html.setter
|
|
71
73
|
def html(self, value: str):
|
|
72
|
-
self._html
|
|
74
|
+
self._html.seek(0)
|
|
75
|
+
self._html.truncate(0)
|
|
73
76
|
if value:
|
|
74
77
|
self._html.write(value)
|
|
75
78
|
|
|
@@ -82,9 +85,26 @@ class PidData:
|
|
|
82
85
|
|
|
83
86
|
@document.setter
|
|
84
87
|
def document(self, value: str):
|
|
85
|
-
self._document
|
|
88
|
+
self._document.seek(0)
|
|
89
|
+
self._document.truncate(0)
|
|
86
90
|
if value:
|
|
87
91
|
self._document.write(value)
|
|
88
92
|
|
|
89
93
|
def append_document(self, text: str):
|
|
90
|
-
self._document.write(text)
|
|
94
|
+
self._document.write(text)
|
|
95
|
+
|
|
96
|
+
def clear(self, all: bool = False):
|
|
97
|
+
"""
|
|
98
|
+
Clear buffers and other data
|
|
99
|
+
|
|
100
|
+
:param all: If True, clear all data, otherwise only buffers
|
|
101
|
+
"""
|
|
102
|
+
for buf in (self._html, self._document, self._buffer, self._live_buffer):
|
|
103
|
+
buf.seek(0)
|
|
104
|
+
buf.truncate(0)
|
|
105
|
+
|
|
106
|
+
if all:
|
|
107
|
+
self.item = None
|
|
108
|
+
self.images_appended.clear()
|
|
109
|
+
self.urls_appended.clear()
|
|
110
|
+
self.files_appended.clear()
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.12 19:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import html
|
|
@@ -293,10 +293,7 @@ class Renderer(BaseRenderer):
|
|
|
293
293
|
self.append_context_item(meta, p.item)
|
|
294
294
|
p.item = None
|
|
295
295
|
|
|
296
|
-
p.
|
|
297
|
-
p.live_buffer = "" # reset live buffer
|
|
298
|
-
p.html = "" # reset html buffer
|
|
299
|
-
|
|
296
|
+
p.clear() # reset buffers
|
|
300
297
|
try:
|
|
301
298
|
self.get_output_node(meta).page().runJavaScript("endStream();")
|
|
302
299
|
except Exception as e:
|
|
@@ -992,13 +989,9 @@ class Renderer(BaseRenderer):
|
|
|
992
989
|
"""
|
|
993
990
|
p = self.pids.get(pid)
|
|
994
991
|
self.parser.reset()
|
|
995
|
-
p.
|
|
996
|
-
p.html = ""
|
|
992
|
+
p.clear(all=True)
|
|
997
993
|
self.clear_nodes(pid)
|
|
998
994
|
self.clear_chunks(pid)
|
|
999
|
-
p.images_appended.clear()
|
|
1000
|
-
p.urls_appended.clear()
|
|
1001
|
-
p.files_appended.clear()
|
|
1002
995
|
self.get_output_node_by_pid(pid).reset_current_content()
|
|
1003
996
|
self.reset_names_by_pid(pid)
|
|
1004
997
|
self.prev_chunk_replace = False
|
|
@@ -1392,6 +1385,7 @@ class Renderer(BaseRenderer):
|
|
|
1392
1385
|
return
|
|
1393
1386
|
p = self.pids[pid]
|
|
1394
1387
|
html = self.body.get_html(pid)
|
|
1388
|
+
p.clear(all=True)
|
|
1395
1389
|
p.loaded = False
|
|
1396
1390
|
p.document = html
|
|
1397
1391
|
node = self.get_output_node_by_pid(pid)
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
{
|
|
2
2
|
"__meta__": {
|
|
3
|
-
"version": "2.
|
|
4
|
-
"app.version": "2.
|
|
5
|
-
"updated_at": "2025-08-
|
|
3
|
+
"version": "2.6.0",
|
|
4
|
+
"app.version": "2.6.0",
|
|
5
|
+
"updated_at": "2025-08-13T00:00:00"
|
|
6
6
|
},
|
|
7
7
|
"access.audio.event.speech": false,
|
|
8
8
|
"access.audio.event.speech.disabled": [],
|
|
@@ -118,4 +118,17 @@ QLineEdit {{
|
|
|
118
118
|
}}
|
|
119
119
|
QComboBox::item:checked {{
|
|
120
120
|
font-weight: bold;
|
|
121
|
+
}}
|
|
122
|
+
QCalendarWidget QAbstractItemView::item:selected:focus,
|
|
123
|
+
QCalendarWidget QAbstractItemView::item:selected {{
|
|
124
|
+
background: #3a4045;
|
|
125
|
+
}}
|
|
126
|
+
QCalendarWidget QAbstractItemView::item:hover {{
|
|
127
|
+
background: #3a4045;
|
|
128
|
+
}}
|
|
129
|
+
.file-explorer QHeaderView::section {{
|
|
130
|
+
background: #262a2e;
|
|
131
|
+
padding-top: 6px;
|
|
132
|
+
padding-bottom: 2px;
|
|
133
|
+
height: 20px;
|
|
121
134
|
}}
|
|
@@ -228,4 +228,33 @@ QLineEdit {{
|
|
|
228
228
|
}}
|
|
229
229
|
QComboBox::item:checked {{
|
|
230
230
|
font-weight: bold;
|
|
231
|
+
}}
|
|
232
|
+
|
|
233
|
+
QScrollBar:vertical {{
|
|
234
|
+
width: 6px;
|
|
235
|
+
}}
|
|
236
|
+
QCalendarWidget QToolButton,
|
|
237
|
+
QCalendarWidget QToolButton:hover {{
|
|
238
|
+
border: none;
|
|
239
|
+
}}
|
|
240
|
+
QCalendarWidget QAbstractItemView::item:selected:focus,
|
|
241
|
+
QCalendarWidget QAbstractItemView::item:selected {{
|
|
242
|
+
background: #ffffd4;
|
|
243
|
+
}}
|
|
244
|
+
QCalendarWidget QAbstractItemView::item:hover {{
|
|
245
|
+
background: #f6f6f6;
|
|
246
|
+
}}
|
|
247
|
+
.file-explorer QTreeView,
|
|
248
|
+
.file-explorer QHeaderView::section,
|
|
249
|
+
.file-explorer QTreeView::branch,
|
|
250
|
+
.file-explorer QTreeView::branch:open {{
|
|
251
|
+
background: #fff;
|
|
252
|
+
}}
|
|
253
|
+
.file-explorer QHeaderView::section {{
|
|
254
|
+
padding-top: 6px;
|
|
255
|
+
padding-bottom: 2px;
|
|
256
|
+
height: 20px;
|
|
257
|
+
}}
|
|
258
|
+
QMenu::indicator {{
|
|
259
|
+
border: 1px solid gray;
|
|
231
260
|
}}
|
pygpt_net/data/icon.ico
CHANGED
|
Binary file
|
pygpt_net/data/icon.png
CHANGED
|
Binary file
|
|
@@ -1012,7 +1012,7 @@ settings.agent.llama.verbose = Ausführlich (Protokollausgabe in die Konsole)
|
|
|
1012
1012
|
settings.agent.mode = Interner Modus für Agenten
|
|
1013
1013
|
settings.agent.mode.desc = Interner Modus für die Verwendung im Agentenmodus
|
|
1014
1014
|
settings.agent.openai.response.split = Agentenantworten aufteilen
|
|
1015
|
-
settings.agent.openai.response.split.desc = Agentenantworten in getrennte Kontextelemente im OpenAI-Agentenmodus aufteilen.
|
|
1015
|
+
settings.agent.openai.response.split.desc = Agentenantworten in getrennte Kontextelemente im OpenAI-Agentenmodus aufteilen.
|
|
1016
1016
|
settings.api_azure_endpoint = API-Endpunkt
|
|
1017
1017
|
settings.api_azure_endpoint.desc = Azure OpenAI API-Endpunkt, https://<Ihr-Ressourcenname>.openai.azure.com/
|
|
1018
1018
|
settings.api_azure_version = OpenAI API-Version
|
|
@@ -1018,7 +1018,7 @@ settings.agent.llama.verbose = Verbose (log output to console)
|
|
|
1018
1018
|
settings.agent.mode = Sub-mode for agents
|
|
1019
1019
|
settings.agent.mode.desc = Sub-mode to use in Agent (Autonomous) mode
|
|
1020
1020
|
settings.agent.openai.response.split = Split response messages
|
|
1021
|
-
settings.agent.openai.response.split.desc = Split response messages to separated context items in OpenAI Agents mode.
|
|
1021
|
+
settings.agent.openai.response.split.desc = Split response messages to separated context items in OpenAI Agents mode.
|
|
1022
1022
|
settings.api_azure_endpoint = API Endpoint
|
|
1023
1023
|
settings.api_azure_endpoint.desc = Azure OpenAI API endpoint, https://<your-resource-name>.openai.azure.com/
|
|
1024
1024
|
settings.api_azure_version = OpenAI API version
|
|
@@ -1013,7 +1013,7 @@ settings.agent.llama.verbose = Verboso (salida de registro a la consola)
|
|
|
1013
1013
|
settings.agent.mode = Modo interno para agentes
|
|
1014
1014
|
settings.agent.mode.desc = Modo interno para usar en modo Agente
|
|
1015
1015
|
settings.agent.openai.response.split = Dividir respuestas de agentes
|
|
1016
|
-
settings.agent.openai.response.split.desc = Dividir respuestas de agentes en elementos de contexto separados en el modo Agentes de OpenAI.
|
|
1016
|
+
settings.agent.openai.response.split.desc = Dividir respuestas de agentes en elementos de contexto separados en el modo Agentes de OpenAI.
|
|
1017
1017
|
settings.api_azure_endpoint = Punto final de la API
|
|
1018
1018
|
settings.api_azure_endpoint.desc = Punto final de la API de Azure OpenAI, https://<tu-nombre-de-recurso>.openai.azure.com/
|
|
1019
1019
|
settings.api_azure_version = Versión de la API de OpenAI
|
|
@@ -1012,7 +1012,7 @@ settings.agent.llama.verbose = Verbeux (sortie du journal vers la console)
|
|
|
1012
1012
|
settings.agent.mode = Mode interne pour les agents
|
|
1013
1013
|
settings.agent.mode.desc = Mode interne à utiliser en mode Agent
|
|
1014
1014
|
settings.agent.openai.response.split = Diviser les réponses des agents
|
|
1015
|
-
settings.agent.openai.response.split.desc = Diviser les réponses des agents en éléments de contexte séparés dans le mode Agents OpenAI.
|
|
1015
|
+
settings.agent.openai.response.split.desc = Diviser les réponses des agents en éléments de contexte séparés dans le mode Agents OpenAI.
|
|
1016
1016
|
settings.api_azure_endpoint = Point de terminaison de l'API
|
|
1017
1017
|
settings.api_azure_endpoint.desc = Point de terminaison de l'API Azure OpenAI, https://<votre-nom-de-ressource>.openai.azure.com/
|
|
1018
1018
|
settings.api_azure_version = Version de l'API OpenAI
|
|
@@ -1012,7 +1012,7 @@ settings.agent.llama.verbose = Verboso (output del log sulla console)
|
|
|
1012
1012
|
settings.agent.mode = Modalità interna per agenti
|
|
1013
1013
|
settings.agent.mode.desc = Modalità interna da utilizzare in modalità Agente
|
|
1014
1014
|
settings.agent.openai.response.split = Dividi le risposte degli agenti
|
|
1015
|
-
settings.agent.openai.response.split.desc = Dividi le risposte degli agenti in elementi di contesto separati nella modalità Agenti OpenAI.
|
|
1015
|
+
settings.agent.openai.response.split.desc = Dividi le risposte degli agenti in elementi di contesto separati nella modalità Agenti OpenAI.
|
|
1016
1016
|
settings.api_azure_endpoint = Endpoint API
|
|
1017
1017
|
settings.api_azure_endpoint.desc = Endpoint API Azure OpenAI, https://<il-tuo-nome-risorsa>.openai.azure.com/
|
|
1018
1018
|
settings.api_azure_version = Versione API OpenAI
|
|
@@ -1015,7 +1015,7 @@ settings.agent.llama.verbose = Szczegółowy (logowanie do konsoli)
|
|
|
1015
1015
|
settings.agent.mode = Tryb wewnętrzny dla agentów
|
|
1016
1016
|
settings.agent.mode.desc = Tryb wewnętrzny do użycia w trybie Agenta
|
|
1017
1017
|
settings.agent.openai.response.split = Podziel odpowiedzi agentów
|
|
1018
|
-
settings.agent.openai.response.split.desc = Podziel odpowiedzi agentów na oddzielne elementy kontekstu w trybie Agentów OpenAI.
|
|
1018
|
+
settings.agent.openai.response.split.desc = Podziel odpowiedzi agentów na oddzielne elementy kontekstu w trybie Agentów OpenAI.
|
|
1019
1019
|
settings.api_azure_endpoint = Punkt końcowy API
|
|
1020
1020
|
settings.api_azure_endpoint.desc = Punkt końcowy Azure OpenAI API, https://<twoja-nazwa-zasobu>.openai.azure.com/
|
|
1021
1021
|
settings.api_azure_version = Wersja API OpenAI
|
|
@@ -1012,7 +1012,7 @@ settings.agent.llama.verbose = Докладно (вивід журналу в к
|
|
|
1012
1012
|
settings.agent.mode = Внутрішній режим для агентів
|
|
1013
1013
|
settings.agent.mode.desc = Внутрішній режим для використання в режимі Агента
|
|
1014
1014
|
settings.agent.openai.response.split = Розділити відповіді агентів
|
|
1015
|
-
settings.agent.openai.response.split.desc = Розділити відповіді агентів на окремі елементи контексту в режимі агентів OpenAI.
|
|
1015
|
+
settings.agent.openai.response.split.desc = Розділити відповіді агентів на окремі елементи контексту в режимі агентів OpenAI.
|
|
1016
1016
|
settings.api_azure_endpoint = Кінцева точка API
|
|
1017
1017
|
settings.api_azure_endpoint.desc = Кінцева точка Azure OpenAI API, https://<ваше-імена-ресурсу>.openai.azure.com/
|
|
1018
1018
|
settings.api_azure_version = Версія API OpenAI
|
|
@@ -1012,7 +1012,7 @@ settings.agent.llama.verbose = 详细(日志输出到控制台)
|
|
|
1012
1012
|
settings.agent.mode = 代理的内部模式
|
|
1013
1013
|
settings.agent.mode.desc = 在代理模式下使用的子模式
|
|
1014
1014
|
settings.agent.openai.response.split = 拆分代理响应
|
|
1015
|
-
settings.agent.openai.response.split.desc = 在 OpenAI
|
|
1015
|
+
settings.agent.openai.response.split.desc = 在 OpenAI 代理模式下将代理响应拆分为独立的上下文项。
|
|
1016
1016
|
settings.api_azure_endpoint = API 端点
|
|
1017
1017
|
settings.api_azure_endpoint.desc = Azure OpenAI API 端点,https://<您的资源名称>.openai.azure.com/
|
|
1018
1018
|
settings.api_azure_version = OpenAI API 版本
|
|
@@ -6,14 +6,11 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.08.12 19:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Dict, Any
|
|
13
13
|
|
|
14
|
-
# from llama_index.core.agent.workflow import CodeActAgent as Agent
|
|
15
|
-
from .codeact_agent_custom import DEFAULT_CODE_ACT_PROMPT, CodeActAgent as Agent # <-- custom version with tools
|
|
16
|
-
|
|
17
14
|
from pygpt_net.core.types import (
|
|
18
15
|
AGENT_MODE_WORKFLOW,
|
|
19
16
|
AGENT_TYPE_LLAMA,
|
|
@@ -37,6 +34,10 @@ class CodeActAgent(BaseAgent):
|
|
|
37
34
|
:param kwargs: keyword arguments
|
|
38
35
|
:return: Agent provider instance
|
|
39
36
|
"""
|
|
37
|
+
# from llama_index.core.agent.workflow import CodeActAgent as Agent
|
|
38
|
+
from .codeact_agent_custom import DEFAULT_CODE_ACT_PROMPT, \
|
|
39
|
+
CodeActAgent as Agent # <-- custom version with tools
|
|
40
|
+
|
|
40
41
|
tools = kwargs.get("plugin_tools", {})
|
|
41
42
|
specs = kwargs.get("plugin_specs", [])
|
|
42
43
|
retriever_tool = kwargs.get("retriever_tools", None)
|
|
@@ -6,13 +6,11 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.08.12 19:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Dict, Any
|
|
13
13
|
|
|
14
|
-
from llama_index.agent.openai import OpenAIAgent as Agent
|
|
15
|
-
|
|
16
14
|
from pygpt_net.core.types import (
|
|
17
15
|
AGENT_MODE_STEP,
|
|
18
16
|
AGENT_TYPE_LLAMA,
|
|
@@ -36,6 +34,8 @@ class OpenAIAgent(BaseAgent):
|
|
|
36
34
|
:param kwargs: keyword arguments
|
|
37
35
|
:return: Agent provider instance
|
|
38
36
|
"""
|
|
37
|
+
from llama_index.agent.openai import OpenAIAgent as Agent
|
|
38
|
+
|
|
39
39
|
tools = kwargs.get("tools", [])
|
|
40
40
|
verbose = kwargs.get("verbose", False)
|
|
41
41
|
llm = kwargs.get("llm", None)
|
|
@@ -6,13 +6,11 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.08.12 19:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Dict, Any
|
|
13
13
|
|
|
14
|
-
from llama_index.agent.openai import OpenAIAssistantAgent as Agent
|
|
15
|
-
|
|
16
14
|
from pygpt_net.core.types import (
|
|
17
15
|
AGENT_MODE_ASSISTANT,
|
|
18
16
|
AGENT_TYPE_LLAMA,
|
|
@@ -38,6 +36,8 @@ class OpenAIAssistantAgent(BaseAgent):
|
|
|
38
36
|
:param kwargs: keyword arguments
|
|
39
37
|
:return: Agent provider instance
|
|
40
38
|
"""
|
|
39
|
+
from llama_index.agent.openai import OpenAIAssistantAgent as Agent
|
|
40
|
+
|
|
41
41
|
context = kwargs.get("context", BridgeContext())
|
|
42
42
|
tools = kwargs.get("tools", [])
|
|
43
43
|
verbose = kwargs.get("verbose", False)
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.08.12 19:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Dict, Any
|
|
@@ -15,11 +15,6 @@ from pygpt_net.core.types import (
|
|
|
15
15
|
AGENT_MODE_PLAN,
|
|
16
16
|
AGENT_TYPE_LLAMA,
|
|
17
17
|
)
|
|
18
|
-
from llama_index.core.agent import (
|
|
19
|
-
StructuredPlannerAgent,
|
|
20
|
-
FunctionCallingAgentWorker,
|
|
21
|
-
ReActAgentWorker,
|
|
22
|
-
)
|
|
23
18
|
|
|
24
19
|
from ..base import BaseAgent
|
|
25
20
|
|
|
@@ -39,6 +34,11 @@ class PlannerAgent(BaseAgent):
|
|
|
39
34
|
:param kwargs: keyword arguments
|
|
40
35
|
:return: Agent provider instance
|
|
41
36
|
"""
|
|
37
|
+
from llama_index.core.agent import (
|
|
38
|
+
StructuredPlannerAgent,
|
|
39
|
+
FunctionCallingAgentWorker,
|
|
40
|
+
)
|
|
41
|
+
|
|
42
42
|
tools = kwargs.get("tools", [])
|
|
43
43
|
verbose = kwargs.get("verbose", False)
|
|
44
44
|
llm = kwargs.get("llm", None)
|
|
@@ -6,18 +6,12 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.08.12 19:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Dict, Any
|
|
13
13
|
|
|
14
|
-
from llama_index.core.agent import ReActAgent as Agent
|
|
15
|
-
from llama_index.core.agent.react_multimodal.step import (
|
|
16
|
-
MultimodalReActAgentWorker,
|
|
17
|
-
)
|
|
18
|
-
|
|
19
14
|
from pygpt_net.core.types import (
|
|
20
|
-
MODE_VISION,
|
|
21
15
|
AGENT_TYPE_LLAMA,
|
|
22
16
|
)
|
|
23
17
|
|
|
@@ -43,6 +37,8 @@ class ReactAgent(BaseAgent):
|
|
|
43
37
|
:param kwargs: keyword arguments
|
|
44
38
|
:return: Agent provider instance
|
|
45
39
|
"""
|
|
40
|
+
from llama_index.core.agent import ReActAgent as Agent
|
|
41
|
+
|
|
46
42
|
tools = kwargs.get("tools", [])
|
|
47
43
|
verbose = kwargs.get("verbose", False)
|
|
48
44
|
llm = kwargs.get("llm", None)
|
|
@@ -6,18 +6,12 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.08.12 19:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Dict, Any
|
|
13
13
|
|
|
14
|
-
from llama_index.core.agent.workflow import ReActAgent as Agent
|
|
15
|
-
from llama_index.core.agent.react_multimodal.step import (
|
|
16
|
-
MultimodalReActAgentWorker,
|
|
17
|
-
)
|
|
18
|
-
|
|
19
14
|
from pygpt_net.core.types import (
|
|
20
|
-
MODE_VISION,
|
|
21
15
|
AGENT_TYPE_LLAMA,
|
|
22
16
|
)
|
|
23
17
|
from pygpt_net.core.types import (
|
|
@@ -42,6 +36,9 @@ class ReactWorkflowAgent(BaseAgent):
|
|
|
42
36
|
:param kwargs: keyword arguments
|
|
43
37
|
:return: Agent provider instance
|
|
44
38
|
"""
|
|
39
|
+
|
|
40
|
+
from llama_index.core.agent.workflow import ReActAgent as Agent
|
|
41
|
+
|
|
45
42
|
tools = kwargs.get("tools", [])
|
|
46
43
|
verbose = kwargs.get("verbose", False)
|
|
47
44
|
llm = kwargs.get("llm", None)
|