pygpt-net 2.6.0.post2__py3-none-any.whl → 2.6.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +8 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/app.py +27 -9
- pygpt_net/controller/chat/response.py +10 -4
- pygpt_net/controller/chat/stream.py +40 -2
- pygpt_net/controller/model/editor.py +45 -4
- pygpt_net/controller/plugins/plugins.py +25 -0
- pygpt_net/controller/presets/editor.py +100 -100
- pygpt_net/controller/presets/experts.py +20 -1
- pygpt_net/controller/presets/presets.py +5 -4
- pygpt_net/controller/ui/mode.py +17 -66
- pygpt_net/core/agents/provider.py +2 -1
- pygpt_net/core/agents/runner.py +123 -9
- pygpt_net/core/agents/runners/helpers.py +3 -2
- pygpt_net/core/agents/runners/llama_workflow.py +176 -22
- pygpt_net/core/agents/runners/loop.py +22 -13
- pygpt_net/core/experts/experts.py +19 -25
- pygpt_net/core/idx/chat.py +24 -34
- pygpt_net/core/idx/response.py +5 -2
- pygpt_net/core/locale/locale.py +73 -45
- pygpt_net/core/render/web/body.py +152 -207
- pygpt_net/core/render/web/renderer.py +4 -2
- pygpt_net/data/config/config.json +3 -3
- pygpt_net/data/config/models.json +3 -3
- pygpt_net/data/locale/locale.de.ini +12 -8
- pygpt_net/data/locale/locale.en.ini +12 -8
- pygpt_net/data/locale/locale.es.ini +12 -8
- pygpt_net/data/locale/locale.fr.ini +12 -8
- pygpt_net/data/locale/locale.it.ini +12 -8
- pygpt_net/data/locale/locale.pl.ini +12 -8
- pygpt_net/data/locale/locale.uk.ini +12 -8
- pygpt_net/data/locale/locale.zh.ini +12 -8
- pygpt_net/item/ctx.py +2 -1
- pygpt_net/plugin/base/plugin.py +35 -3
- pygpt_net/plugin/bitbucket/__init__.py +12 -0
- pygpt_net/plugin/bitbucket/config.py +267 -0
- pygpt_net/plugin/bitbucket/plugin.py +125 -0
- pygpt_net/plugin/bitbucket/worker.py +569 -0
- pygpt_net/plugin/cmd_files/worker.py +19 -16
- pygpt_net/plugin/facebook/__init__.py +12 -0
- pygpt_net/plugin/facebook/config.py +359 -0
- pygpt_net/plugin/facebook/plugin.py +114 -0
- pygpt_net/plugin/facebook/worker.py +698 -0
- pygpt_net/plugin/github/__init__.py +12 -0
- pygpt_net/plugin/github/config.py +441 -0
- pygpt_net/plugin/github/plugin.py +124 -0
- pygpt_net/plugin/github/worker.py +674 -0
- pygpt_net/plugin/google/__init__.py +12 -0
- pygpt_net/plugin/google/config.py +367 -0
- pygpt_net/plugin/google/plugin.py +126 -0
- pygpt_net/plugin/google/worker.py +826 -0
- pygpt_net/plugin/slack/__init__.py +12 -0
- pygpt_net/plugin/slack/config.py +349 -0
- pygpt_net/plugin/slack/plugin.py +116 -0
- pygpt_net/plugin/slack/worker.py +639 -0
- pygpt_net/plugin/telegram/__init__.py +12 -0
- pygpt_net/plugin/telegram/config.py +308 -0
- pygpt_net/plugin/telegram/plugin.py +118 -0
- pygpt_net/plugin/telegram/worker.py +563 -0
- pygpt_net/plugin/twitter/__init__.py +12 -0
- pygpt_net/plugin/twitter/config.py +491 -0
- pygpt_net/plugin/twitter/plugin.py +126 -0
- pygpt_net/plugin/twitter/worker.py +837 -0
- pygpt_net/provider/agents/base.py +4 -1
- pygpt_net/provider/agents/llama_index/codeact_workflow.py +95 -0
- pygpt_net/provider/agents/llama_index/legacy/__init__.py +0 -0
- pygpt_net/provider/agents/llama_index/{openai.py → legacy/openai.py} +2 -2
- pygpt_net/provider/agents/llama_index/{openai_assistant.py → legacy/openai_assistant.py} +37 -5
- pygpt_net/provider/agents/llama_index/{planner.py → legacy/planner.py} +3 -3
- pygpt_net/provider/agents/llama_index/{react.py → legacy/react.py} +3 -3
- pygpt_net/provider/agents/llama_index/openai_workflow.py +52 -0
- pygpt_net/provider/agents/llama_index/planner_workflow.py +115 -0
- pygpt_net/provider/agents/llama_index/react_workflow.py +6 -4
- pygpt_net/provider/agents/llama_index/workflow/__init__.py +0 -0
- pygpt_net/provider/agents/llama_index/{codeact_agent_custom.py → workflow/codeact.py} +124 -8
- pygpt_net/provider/agents/llama_index/workflow/events.py +24 -0
- pygpt_net/provider/agents/llama_index/workflow/openai.py +634 -0
- pygpt_net/provider/agents/llama_index/workflow/planner.py +601 -0
- pygpt_net/provider/agents/openai/agent.py +1 -0
- pygpt_net/provider/agents/openai/agent_b2b.py +2 -0
- pygpt_net/provider/agents/openai/agent_planner.py +1 -0
- pygpt_net/provider/agents/openai/agent_with_experts.py +1 -0
- pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +1 -0
- pygpt_net/provider/agents/openai/agent_with_feedback.py +1 -0
- pygpt_net/provider/agents/openai/evolve.py +1 -0
- pygpt_net/provider/core/preset/patch.py +11 -17
- pygpt_net/ui/base/config_dialog.py +4 -0
- pygpt_net/ui/dialog/preset.py +34 -77
- pygpt_net/ui/layout/toolbox/presets.py +2 -2
- pygpt_net/ui/main.py +3 -1
- pygpt_net/ui/widget/lists/experts.py +3 -2
- {pygpt_net-2.6.0.post2.dist-info → pygpt_net-2.6.2.dist-info}/METADATA +155 -4
- {pygpt_net-2.6.0.post2.dist-info → pygpt_net-2.6.2.dist-info}/RECORD +96 -62
- pygpt_net/data/config/presets/agent_react_workflow.json +0 -34
- pygpt_net/provider/agents/llama_index/code_act.py +0 -58
- {pygpt_net-2.6.0.post2.dist-info → pygpt_net-2.6.2.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.0.post2.dist-info → pygpt_net-2.6.2.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.0.post2.dist-info → pygpt_net-2.6.2.dist-info}/entry_points.txt +0 -0
pygpt_net/core/agents/runner.py
CHANGED
|
@@ -6,11 +6,11 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.14 13:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import asyncio
|
|
13
|
-
from typing import Optional, Dict, Any
|
|
13
|
+
from typing import Optional, Dict, Any, Union
|
|
14
14
|
|
|
15
15
|
from llama_index.core.base.llms.types import ChatMessage, MessageRole
|
|
16
16
|
|
|
@@ -24,6 +24,8 @@ from pygpt_net.core.types import (
|
|
|
24
24
|
AGENT_MODE_OPENAI,
|
|
25
25
|
)
|
|
26
26
|
|
|
27
|
+
from pygpt_net.item.ctx import CtxItem
|
|
28
|
+
|
|
27
29
|
from .runners.llama_assistant import LlamaAssistant
|
|
28
30
|
from .runners.llama_plan import LlamaPlan
|
|
29
31
|
from .runners.llama_steps import LlamaSteps
|
|
@@ -33,6 +35,11 @@ from .runners.helpers import Helpers
|
|
|
33
35
|
from .runners.loop import Loop
|
|
34
36
|
|
|
35
37
|
class Runner:
|
|
38
|
+
|
|
39
|
+
APPEND_SYSTEM_PROMPT_TO_MSG = [
|
|
40
|
+
"react", # llama-index
|
|
41
|
+
]
|
|
42
|
+
|
|
36
43
|
def __init__(self, window=None):
|
|
37
44
|
"""
|
|
38
45
|
Agent runner
|
|
@@ -80,8 +87,9 @@ class Runner:
|
|
|
80
87
|
|
|
81
88
|
# prepare agent
|
|
82
89
|
model = context.model
|
|
83
|
-
vector_store_idx = extra.get("agent_idx", None)
|
|
90
|
+
vector_store_idx = extra.get("agent_idx", None)
|
|
84
91
|
system_prompt = context.system_prompt
|
|
92
|
+
preset = context.preset
|
|
85
93
|
max_steps = self.window.core.config.get("agent.llama.steps", 10)
|
|
86
94
|
is_stream = self.window.core.config.get("stream", False)
|
|
87
95
|
is_cmd = self.window.core.command.is_cmd(inline=False)
|
|
@@ -89,6 +97,10 @@ class Runner:
|
|
|
89
97
|
llm = self.window.core.idx.llm.get(model, stream=False)
|
|
90
98
|
workdir = self.window.core.config.get_workdir_prefix()
|
|
91
99
|
|
|
100
|
+
# vector store idx from preset
|
|
101
|
+
if preset:
|
|
102
|
+
vector_store_idx = preset.idx
|
|
103
|
+
|
|
92
104
|
# tools
|
|
93
105
|
self.window.core.agents.tools.context = context
|
|
94
106
|
self.window.core.agents.tools.agent_idx = vector_store_idx
|
|
@@ -107,12 +119,7 @@ class Runner:
|
|
|
107
119
|
tools = []
|
|
108
120
|
|
|
109
121
|
# append system prompt
|
|
110
|
-
if agent_id
|
|
111
|
-
"openai_agent_base", # openai-agents
|
|
112
|
-
"openai_agent_experts", # openai-agents
|
|
113
|
-
"openai_assistant", # llama-index
|
|
114
|
-
"code_act", # llama-index
|
|
115
|
-
]:
|
|
122
|
+
if agent_id in self.APPEND_SYSTEM_PROMPT_TO_MSG:
|
|
116
123
|
if system_prompt:
|
|
117
124
|
msg = ChatMessage(
|
|
118
125
|
role=MessageRole.SYSTEM,
|
|
@@ -174,10 +181,117 @@ class Runner:
|
|
|
174
181
|
return asyncio.run(self.openai_workflow.run(**kwargs))
|
|
175
182
|
|
|
176
183
|
except Exception as e:
|
|
184
|
+
print("Error in agent runner:", e)
|
|
177
185
|
self.window.core.debug.error(e)
|
|
178
186
|
self.last_error = e
|
|
179
187
|
return False
|
|
180
188
|
|
|
189
|
+
def call_once(
|
|
190
|
+
self,
|
|
191
|
+
context: BridgeContext,
|
|
192
|
+
extra: Dict[str, Any],
|
|
193
|
+
signals: BridgeSignals
|
|
194
|
+
) -> Union[CtxItem, bool, None]:
|
|
195
|
+
"""
|
|
196
|
+
Call an agent once (quick call to the agent)
|
|
197
|
+
|
|
198
|
+
:param context: BridgeContext
|
|
199
|
+
:param extra: extra data
|
|
200
|
+
:param signals: BridgeSignals
|
|
201
|
+
:return: CtxItem if success, True if stopped, None on error
|
|
202
|
+
"""
|
|
203
|
+
if self.window.controller.kernel.stopped():
|
|
204
|
+
return True # abort if stopped
|
|
205
|
+
|
|
206
|
+
agent_id = extra.get("agent_provider", "openai")
|
|
207
|
+
verbose = self.window.core.config.get("agent.llama.verbose", False)
|
|
208
|
+
|
|
209
|
+
try:
|
|
210
|
+
# prepare input ctx
|
|
211
|
+
ctx = context.ctx
|
|
212
|
+
ctx.extra["agent_input"] = True # mark as user input
|
|
213
|
+
ctx.extra["agent_output"] = True # mark as user input
|
|
214
|
+
ctx.agent_call = True # disables reply from plugin commands
|
|
215
|
+
prompt = context.prompt
|
|
216
|
+
|
|
217
|
+
# prepare agent
|
|
218
|
+
model = context.model
|
|
219
|
+
vector_store_idx = extra.get("agent_idx", None)
|
|
220
|
+
system_prompt = context.system_prompt
|
|
221
|
+
max_steps = self.window.core.config.get("agent.llama.steps", 10)
|
|
222
|
+
is_cmd = self.window.core.command.is_cmd(inline=False)
|
|
223
|
+
llm = self.window.core.idx.llm.get(model, stream=False)
|
|
224
|
+
workdir = self.window.core.config.get_workdir_prefix()
|
|
225
|
+
|
|
226
|
+
# tools
|
|
227
|
+
self.window.core.agents.tools.context = context
|
|
228
|
+
self.window.core.agents.tools.agent_idx = vector_store_idx
|
|
229
|
+
|
|
230
|
+
if "agent_tools" in extra:
|
|
231
|
+
tools = extra["agent_tools"] # use tools from extra if provided
|
|
232
|
+
else:
|
|
233
|
+
tools = self.window.core.agents.tools.prepare(context, extra, force=True)
|
|
234
|
+
|
|
235
|
+
if "agent_history" in extra:
|
|
236
|
+
history = extra["agent_history"]
|
|
237
|
+
else:
|
|
238
|
+
history = self.window.core.agents.memory.prepare(context)
|
|
239
|
+
|
|
240
|
+
# disable tools if cmd is not enabled
|
|
241
|
+
if not is_cmd:
|
|
242
|
+
tools = []
|
|
243
|
+
|
|
244
|
+
# append system prompt
|
|
245
|
+
if agent_id in self.APPEND_SYSTEM_PROMPT_TO_MSG:
|
|
246
|
+
if system_prompt:
|
|
247
|
+
msg = ChatMessage(
|
|
248
|
+
role=MessageRole.SYSTEM,
|
|
249
|
+
content=system_prompt,
|
|
250
|
+
)
|
|
251
|
+
history.insert(0, msg)
|
|
252
|
+
|
|
253
|
+
agent_kwargs = {
|
|
254
|
+
"context": context,
|
|
255
|
+
"tools": tools,
|
|
256
|
+
"llm": llm,
|
|
257
|
+
"model": model,
|
|
258
|
+
"chat_history": history,
|
|
259
|
+
"max_iterations": max_steps,
|
|
260
|
+
"verbose": verbose,
|
|
261
|
+
"system_prompt": system_prompt,
|
|
262
|
+
"are_commands": is_cmd,
|
|
263
|
+
"workdir": workdir,
|
|
264
|
+
"preset": context.preset if context else None,
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
if self.window.core.agents.provider.has(agent_id):
|
|
268
|
+
provider = self.window.core.agents.provider.get(agent_id)
|
|
269
|
+
agent = provider.get_agent(self.window, agent_kwargs)
|
|
270
|
+
if verbose:
|
|
271
|
+
print("Using Agent: " + str(agent_id) + ", model: " + str(model.id))
|
|
272
|
+
else:
|
|
273
|
+
raise Exception("Agent not found: " + str(agent_id))
|
|
274
|
+
|
|
275
|
+
# run agent and return result
|
|
276
|
+
mode = provider.get_mode()
|
|
277
|
+
kwargs = {
|
|
278
|
+
"agent": agent,
|
|
279
|
+
"ctx": ctx,
|
|
280
|
+
"prompt": prompt,
|
|
281
|
+
"signals": signals,
|
|
282
|
+
"verbose": verbose,
|
|
283
|
+
"history": history,
|
|
284
|
+
"llm": llm,
|
|
285
|
+
}
|
|
286
|
+
# TODO: add support for other modes
|
|
287
|
+
if mode == AGENT_MODE_WORKFLOW:
|
|
288
|
+
return asyncio.run(self.llama_workflow.run_once(**kwargs))
|
|
289
|
+
|
|
290
|
+
except Exception as e:
|
|
291
|
+
self.window.core.debug.error(e)
|
|
292
|
+
self.last_error = e
|
|
293
|
+
return None
|
|
294
|
+
|
|
181
295
|
def get_error(self) -> Optional[Exception]:
|
|
182
296
|
"""
|
|
183
297
|
Get last error
|
|
@@ -6,12 +6,12 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.14 01:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import copy
|
|
13
13
|
import re
|
|
14
|
-
import
|
|
14
|
+
import time
|
|
15
15
|
from typing import Optional, Tuple
|
|
16
16
|
|
|
17
17
|
from pygpt_net.core.bridge.context import BridgeContext
|
|
@@ -82,6 +82,7 @@ class Helpers:
|
|
|
82
82
|
# ctx.attachments = from_ctx.attachments # copy from parent if appended from plugins
|
|
83
83
|
# ctx.files = from_ctx.files # copy from parent if appended from plugins
|
|
84
84
|
ctx.extra = from_ctx.extra.copy() # copy extra data
|
|
85
|
+
ctx.output_timestamp = int(time.time()) # set output timestamp
|
|
85
86
|
return ctx
|
|
86
87
|
|
|
87
88
|
def send_stream(
|
|
@@ -6,11 +6,11 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.14 01:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import re
|
|
13
|
-
from typing import Optional, Any, List
|
|
13
|
+
from typing import Optional, Any, List, Union
|
|
14
14
|
|
|
15
15
|
from llama_index.core.workflow import Context
|
|
16
16
|
from llama_index.core.agent.workflow import (
|
|
@@ -19,9 +19,12 @@ from llama_index.core.agent.workflow import (
|
|
|
19
19
|
AgentStream,
|
|
20
20
|
AgentOutput,
|
|
21
21
|
)
|
|
22
|
+
from workflows.errors import WorkflowCancelledByUser
|
|
23
|
+
|
|
22
24
|
from pygpt_net.core.bridge.worker import BridgeSignals
|
|
23
25
|
from pygpt_net.core.events import KernelEvent
|
|
24
26
|
from pygpt_net.item.ctx import CtxItem
|
|
27
|
+
from pygpt_net.provider.agents.llama_index.workflow.events import StepEvent
|
|
25
28
|
|
|
26
29
|
from .base import BaseRunner
|
|
27
30
|
|
|
@@ -64,22 +67,103 @@ class LlamaWorkflow(BaseRunner):
|
|
|
64
67
|
agent_ctx = Context(agent)
|
|
65
68
|
memory = self.window.core.idx.chat.get_memory_buffer(history, llm)
|
|
66
69
|
self.set_busy(signals)
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
ctx=
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
70
|
+
|
|
71
|
+
try:
|
|
72
|
+
ctx = await self.run_agent(
|
|
73
|
+
agent=agent,
|
|
74
|
+
ctx=agent_ctx,
|
|
75
|
+
query=prompt,
|
|
76
|
+
memory=memory,
|
|
77
|
+
verbose=verbose,
|
|
78
|
+
item_ctx=ctx,
|
|
79
|
+
signals=signals,
|
|
80
|
+
use_partials=self.window.core.config.get("agent.openai.response.split", True)
|
|
81
|
+
)
|
|
82
|
+
except WorkflowCancelledByUser:
|
|
83
|
+
print("\n\n[STOP] Workflow stopped by user.")
|
|
84
|
+
except Exception as e:
|
|
85
|
+
self.window.core.debug.log(f"Error running agent workflow: {e}")
|
|
86
|
+
ctx.extra["error"] = str(e)
|
|
87
|
+
self.set_idle(signals)
|
|
88
|
+
return False
|
|
89
|
+
|
|
90
|
+
if ctx.partial:
|
|
91
|
+
ctx.partial = False # reset partial flag
|
|
92
|
+
|
|
93
|
+
response_ctx = self.make_response(ctx)
|
|
94
|
+
self.end_stream(response_ctx, signals)
|
|
95
|
+
self.send_response(response_ctx, signals, KernelEvent.APPEND_DATA) # send response
|
|
96
|
+
|
|
97
|
+
self.set_idle(signals)
|
|
98
|
+
return True
|
|
99
|
+
|
|
100
|
+
async def run_once(
|
|
101
|
+
self,
|
|
102
|
+
agent: Any,
|
|
103
|
+
ctx: CtxItem,
|
|
104
|
+
prompt: str,
|
|
105
|
+
signals: BridgeSignals,
|
|
106
|
+
verbose: bool = False,
|
|
107
|
+
history: List[CtxItem] = None,
|
|
108
|
+
llm: Any = None,
|
|
109
|
+
) -> Union[CtxItem, None]:
|
|
110
|
+
"""
|
|
111
|
+
Run agent workflow
|
|
112
|
+
|
|
113
|
+
:param agent: Agent instance
|
|
114
|
+
:param ctx: Input context
|
|
115
|
+
:param prompt: input text
|
|
116
|
+
:param signals: BridgeSignals
|
|
117
|
+
:param verbose: verbose mode
|
|
118
|
+
:param history: chat history
|
|
119
|
+
:param llm: LLM instance
|
|
120
|
+
:return: True if success
|
|
121
|
+
"""
|
|
122
|
+
if self.is_stopped():
|
|
123
|
+
return None # abort if stopped
|
|
124
|
+
|
|
125
|
+
memory = self.window.core.idx.chat.get_memory_buffer(history, llm)
|
|
126
|
+
agent_ctx = Context(agent)
|
|
127
|
+
try:
|
|
128
|
+
ctx = await self.run_agent(
|
|
129
|
+
agent=agent,
|
|
130
|
+
ctx=agent_ctx,
|
|
131
|
+
query=prompt,
|
|
132
|
+
memory=memory,
|
|
133
|
+
verbose=verbose,
|
|
134
|
+
item_ctx=ctx,
|
|
135
|
+
signals=signals,
|
|
136
|
+
use_partials=False, # use partials for streaming
|
|
137
|
+
)
|
|
138
|
+
except WorkflowCancelledByUser:
|
|
139
|
+
print("\n\n[STOP] Workflow stopped by user.")
|
|
140
|
+
except Exception as e:
|
|
141
|
+
self.window.core.debug.log(f"Error running agent workflow: {e}")
|
|
142
|
+
ctx.extra["error"] = str(e)
|
|
143
|
+
return None
|
|
144
|
+
|
|
145
|
+
if ctx.agent_final_response:
|
|
146
|
+
ctx.output = ctx.agent_final_response # set output to current context
|
|
147
|
+
else:
|
|
148
|
+
ctx.output = ctx.live_output
|
|
149
|
+
|
|
150
|
+
return ctx
|
|
151
|
+
|
|
152
|
+
def make_response(
|
|
153
|
+
self,
|
|
154
|
+
ctx: CtxItem
|
|
155
|
+
) -> CtxItem:
|
|
156
|
+
"""
|
|
157
|
+
Create a response context item with the given input and output.
|
|
158
|
+
|
|
159
|
+
:param ctx: CtxItem - the context item to use as a base
|
|
160
|
+
"""
|
|
76
161
|
response_ctx = self.add_ctx(ctx, with_tool_outputs=True)
|
|
77
|
-
response_ctx.set_input("
|
|
162
|
+
response_ctx.set_input("")
|
|
78
163
|
|
|
79
164
|
prev_output = ctx.live_output
|
|
80
|
-
# remove all <execute>...</execute>
|
|
81
165
|
if prev_output:
|
|
82
|
-
prev_output =
|
|
166
|
+
prev_output = self.filter_output(prev_output) # remove all <execute>...</execute>
|
|
83
167
|
|
|
84
168
|
response_ctx.set_agent_final_response(ctx.agent_final_response) # always set to further use
|
|
85
169
|
response_ctx.set_output(prev_output) # append from stream
|
|
@@ -94,12 +178,19 @@ class LlamaWorkflow(BaseRunner):
|
|
|
94
178
|
self.window.core.agents.tools.append_tool_outputs(response_ctx)
|
|
95
179
|
else:
|
|
96
180
|
self.window.core.agents.tools.extract_tool_outputs(response_ctx)
|
|
97
|
-
self.end_stream(response_ctx, signals)
|
|
98
181
|
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
182
|
+
return response_ctx
|
|
183
|
+
|
|
184
|
+
def filter_output(self, output: str) -> str:
|
|
185
|
+
"""
|
|
186
|
+
Filter output to remove unwanted tags
|
|
187
|
+
|
|
188
|
+
:param output: Output string
|
|
189
|
+
:return: Filtered output string
|
|
190
|
+
"""
|
|
191
|
+
# Remove <execute>...</execute> tags
|
|
192
|
+
filtered_output = re.sub(r'<execute>.*?</execute>', '', output, flags=re.DOTALL)
|
|
193
|
+
return filtered_output
|
|
103
194
|
|
|
104
195
|
async def run_agent(
|
|
105
196
|
self,
|
|
@@ -109,7 +200,9 @@ class LlamaWorkflow(BaseRunner):
|
|
|
109
200
|
memory,
|
|
110
201
|
verbose=False,
|
|
111
202
|
item_ctx: Optional[CtxItem] = None,
|
|
112
|
-
signals: Optional[BridgeSignals] = None
|
|
203
|
+
signals: Optional[BridgeSignals] = None,
|
|
204
|
+
use_partials: bool = True,
|
|
205
|
+
):
|
|
113
206
|
"""
|
|
114
207
|
Run agent workflow
|
|
115
208
|
This method runs the agent's workflow, processes tool calls, and streams events.
|
|
@@ -121,9 +214,16 @@ class LlamaWorkflow(BaseRunner):
|
|
|
121
214
|
:param verbose: Verbose mode (default: False)
|
|
122
215
|
:param item_ctx: Optional CtxItem for additional context
|
|
123
216
|
:param signals: Optional BridgeSignals for communication
|
|
217
|
+
:param use_partials: If True, use partial context items for streaming
|
|
124
218
|
:return: handler for the agent workflow
|
|
125
219
|
"""
|
|
126
|
-
handler = agent.run(
|
|
220
|
+
handler = agent.run(
|
|
221
|
+
query,
|
|
222
|
+
ctx=ctx,
|
|
223
|
+
memory=memory,
|
|
224
|
+
verbose=verbose,
|
|
225
|
+
on_stop=self.is_stopped,
|
|
226
|
+
)
|
|
127
227
|
if verbose:
|
|
128
228
|
print(f"User: {query}")
|
|
129
229
|
|
|
@@ -134,7 +234,11 @@ class LlamaWorkflow(BaseRunner):
|
|
|
134
234
|
|
|
135
235
|
async for event in handler.stream_events():
|
|
136
236
|
if self.is_stopped():
|
|
237
|
+
# persist current output on stop
|
|
238
|
+
item_ctx.output = item_ctx.live_output
|
|
239
|
+
self.window.core.ctx.update_item(item_ctx)
|
|
137
240
|
self.end_stream(item_ctx, signals)
|
|
241
|
+
await handler.cancel_run() # cancel, will raise WorkflowCancelledByUser
|
|
138
242
|
break
|
|
139
243
|
if isinstance(event, ToolCallResult):
|
|
140
244
|
output = f"\n-----------\nExecution result:\n{event.tool_output}"
|
|
@@ -155,6 +259,19 @@ class LlamaWorkflow(BaseRunner):
|
|
|
155
259
|
item_ctx.stream = formatted
|
|
156
260
|
if item_ctx.stream_agent_output:
|
|
157
261
|
self.send_stream(item_ctx, signals, begin)
|
|
262
|
+
elif isinstance(event, StepEvent):
|
|
263
|
+
self.set_busy(signals)
|
|
264
|
+
if not use_partials:
|
|
265
|
+
continue
|
|
266
|
+
if verbose:
|
|
267
|
+
print("\n\n-----STEP-----\n\n")
|
|
268
|
+
print(f"[{event.name}] {event.index}/{event.total} meta={event.meta}")
|
|
269
|
+
item_ctx = self.on_next_ctx(
|
|
270
|
+
item_ctx,
|
|
271
|
+
signals=signals,
|
|
272
|
+
begin=begin,
|
|
273
|
+
stream=True,
|
|
274
|
+
)
|
|
158
275
|
elif isinstance(event, AgentStream):
|
|
159
276
|
if verbose:
|
|
160
277
|
print(f"{event.delta}", end="", flush=True)
|
|
@@ -171,4 +288,41 @@ class LlamaWorkflow(BaseRunner):
|
|
|
171
288
|
if verbose:
|
|
172
289
|
print(f"\nFinal response: {answer}")
|
|
173
290
|
|
|
174
|
-
return
|
|
291
|
+
return item_ctx
|
|
292
|
+
|
|
293
|
+
def on_next_ctx(
|
|
294
|
+
self,
|
|
295
|
+
ctx: CtxItem,
|
|
296
|
+
signals: BridgeSignals,
|
|
297
|
+
begin: bool = False,
|
|
298
|
+
stream: bool = True,
|
|
299
|
+
) -> CtxItem:
|
|
300
|
+
"""
|
|
301
|
+
Callback for next context in cycle
|
|
302
|
+
|
|
303
|
+
:param ctx: CtxItem
|
|
304
|
+
:param signals: BridgeSignals
|
|
305
|
+
:param begin: if True, flush current output to before buffer and clear current buffer
|
|
306
|
+
:param stream: is streaming enabled
|
|
307
|
+
:return: CtxItem - the next context item in the cycle
|
|
308
|
+
"""
|
|
309
|
+
# finish current stream
|
|
310
|
+
ctx.stream = "\n"
|
|
311
|
+
ctx.extra["agent_output"] = True # allow usage in history
|
|
312
|
+
ctx.output = ctx.live_output # set output to current context
|
|
313
|
+
ctx.output = self.filter_output(ctx.output)
|
|
314
|
+
self.window.core.ctx.update_item(ctx)
|
|
315
|
+
|
|
316
|
+
if stream:
|
|
317
|
+
self.send_stream(ctx, signals, begin)
|
|
318
|
+
self.end_stream(ctx, signals)
|
|
319
|
+
|
|
320
|
+
# create and return next context item
|
|
321
|
+
next_ctx = self.add_next_ctx(ctx)
|
|
322
|
+
next_ctx.set_input("")
|
|
323
|
+
next_ctx.set_output("")
|
|
324
|
+
next_ctx.partial = True
|
|
325
|
+
next_ctx.extra["agent_output"] = True # allow usage in history
|
|
326
|
+
|
|
327
|
+
self.send_response(next_ctx, signals, KernelEvent.APPEND_DATA)
|
|
328
|
+
return next_ctx
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.03
|
|
9
|
+
# Updated Date: 2025.08.14 03:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Optional, List
|
|
@@ -51,20 +51,28 @@ class Loop(BaseRunner):
|
|
|
51
51
|
if self.is_stopped():
|
|
52
52
|
return "" # abort if stopped
|
|
53
53
|
|
|
54
|
-
verbose = self.window.core.config.get("agent.llama.verbose", False)
|
|
55
54
|
model = self.window.core.models.get(model_name)
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
55
|
+
ctx = CtxItem()
|
|
56
|
+
bridge_context = BridgeContext(
|
|
57
|
+
ctx=ctx,
|
|
58
|
+
history=[],
|
|
59
|
+
model=model,
|
|
60
|
+
prompt=self.prepare_input(input),
|
|
61
|
+
stream=False,
|
|
62
|
+
)
|
|
63
|
+
extra = {
|
|
64
|
+
"agent_provider": "react", # use React workflow provider
|
|
65
|
+
"agent_tools": tools,
|
|
64
66
|
}
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
67
|
+
response_ctx = self.window.core.agents.runner.call_once(
|
|
68
|
+
context=bridge_context,
|
|
69
|
+
extra=extra,
|
|
70
|
+
signals=None,
|
|
71
|
+
)
|
|
72
|
+
if response_ctx:
|
|
73
|
+
return str(response_ctx.output)
|
|
74
|
+
else:
|
|
75
|
+
return "No response from evaluator."
|
|
68
76
|
|
|
69
77
|
def run_next(
|
|
70
78
|
self,
|
|
@@ -166,6 +174,7 @@ class Loop(BaseRunner):
|
|
|
166
174
|
context.history = self.window.core.ctx.all(meta_id=ctx.meta.id)
|
|
167
175
|
context.prompt = instruction # use instruction as prompt
|
|
168
176
|
preset = self.window.controller.presets.get_current()
|
|
177
|
+
context.preset = preset
|
|
169
178
|
extra = {
|
|
170
179
|
"agent_idx": preset.idx,
|
|
171
180
|
"agent_provider": preset.agent_provider,
|
|
@@ -6,14 +6,13 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.14 13:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import json
|
|
13
13
|
from typing import Dict, List, Optional
|
|
14
14
|
|
|
15
15
|
from PySide6.QtCore import QRunnable, QObject, Signal, Slot
|
|
16
|
-
from llama_index.core.base.llms.types import ChatMessage, MessageRole
|
|
17
16
|
from llama_index.core.tools import QueryEngineTool
|
|
18
17
|
|
|
19
18
|
from pygpt_net.core.types import (
|
|
@@ -578,7 +577,7 @@ class ExpertWorker(QRunnable):
|
|
|
578
577
|
try:
|
|
579
578
|
# get or create children (slave) meta
|
|
580
579
|
slave = self.window.core.ctx.get_or_create_slave_meta(master_ctx, expert_id)
|
|
581
|
-
expert = self.window.core.experts.get_expert(expert_id)
|
|
580
|
+
expert = self.window.core.experts.get_expert(expert_id) # preset
|
|
582
581
|
reply = True
|
|
583
582
|
hidden = False
|
|
584
583
|
internal = False
|
|
@@ -605,7 +604,7 @@ class ExpertWorker(QRunnable):
|
|
|
605
604
|
stream_mode = self.window.core.config.get('stream')
|
|
606
605
|
verbose = self.window.core.config.get('agent.llama.verbose')
|
|
607
606
|
use_agent = self.window.core.config.get('experts.use_agent', False)
|
|
608
|
-
db_idx =
|
|
607
|
+
db_idx = expert.idx # get idx from expert preset
|
|
609
608
|
|
|
610
609
|
mode = MODE_EXPERT # force expert mode, mode will change in bridge
|
|
611
610
|
|
|
@@ -858,29 +857,24 @@ class ExpertWorker(QRunnable):
|
|
|
858
857
|
:return: True if success, False otherwise
|
|
859
858
|
"""
|
|
860
859
|
history = self.window.core.agents.memory.prepare(context)
|
|
861
|
-
|
|
862
|
-
msg = ChatMessage(
|
|
863
|
-
role=MessageRole.SYSTEM,
|
|
864
|
-
content=system_prompt
|
|
865
|
-
)
|
|
866
|
-
history.insert(0, msg)
|
|
867
|
-
kwargs = {
|
|
868
|
-
"context": context,
|
|
869
|
-
"tools": tools,
|
|
870
|
-
"llm": llm,
|
|
871
|
-
"chat_history": history,
|
|
872
|
-
"max_iterations": 30,
|
|
873
|
-
"verbose": verbose,
|
|
874
|
-
"system_prompt": system_prompt,
|
|
875
|
-
"are_commands": self.window.core.config.get("cmd"),
|
|
876
|
-
}
|
|
877
|
-
provider = self.window.core.agents.provider.get("planner")
|
|
878
|
-
agent = provider.get_agent(self.window, kwargs)
|
|
879
|
-
response_ctx = self.window.core.agents.runner.llama_plan.run_once(
|
|
880
|
-
agent=agent,
|
|
860
|
+
bridge_context = BridgeContext(
|
|
881
861
|
ctx=ctx,
|
|
862
|
+
system_prompt=system_prompt,
|
|
863
|
+
model=context.model,
|
|
882
864
|
prompt=query,
|
|
883
|
-
|
|
865
|
+
stream=False,
|
|
866
|
+
is_expert_call=True, # mark as expert call
|
|
867
|
+
)
|
|
868
|
+
extra = {
|
|
869
|
+
"agent_provider": "react", # use react workflow provider
|
|
870
|
+
"agent_idx": context.idx, # index to use
|
|
871
|
+
"agent_tools": tools, # tools to use
|
|
872
|
+
"agent_history": history, # already prepared history
|
|
873
|
+
}
|
|
874
|
+
response_ctx = self.window.core.agents.runner.call_once(
|
|
875
|
+
context=bridge_context,
|
|
876
|
+
extra=extra,
|
|
877
|
+
signals=None,
|
|
884
878
|
)
|
|
885
879
|
if response_ctx:
|
|
886
880
|
return str(response_ctx.output)
|