pygpt-net 2.6.0.post2__py3-none-any.whl → 2.6.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +8 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/app.py +27 -9
- pygpt_net/controller/chat/response.py +10 -4
- pygpt_net/controller/chat/stream.py +40 -2
- pygpt_net/controller/model/editor.py +45 -4
- pygpt_net/controller/plugins/plugins.py +25 -0
- pygpt_net/controller/presets/editor.py +100 -100
- pygpt_net/controller/presets/experts.py +20 -1
- pygpt_net/controller/presets/presets.py +5 -4
- pygpt_net/controller/ui/mode.py +17 -66
- pygpt_net/core/agents/provider.py +2 -1
- pygpt_net/core/agents/runner.py +123 -9
- pygpt_net/core/agents/runners/helpers.py +3 -2
- pygpt_net/core/agents/runners/llama_workflow.py +176 -22
- pygpt_net/core/agents/runners/loop.py +22 -13
- pygpt_net/core/experts/experts.py +19 -25
- pygpt_net/core/idx/chat.py +24 -34
- pygpt_net/core/idx/response.py +5 -2
- pygpt_net/core/locale/locale.py +73 -45
- pygpt_net/core/render/web/body.py +152 -207
- pygpt_net/core/render/web/renderer.py +4 -2
- pygpt_net/data/config/config.json +3 -3
- pygpt_net/data/config/models.json +3 -3
- pygpt_net/data/locale/locale.de.ini +12 -8
- pygpt_net/data/locale/locale.en.ini +12 -8
- pygpt_net/data/locale/locale.es.ini +12 -8
- pygpt_net/data/locale/locale.fr.ini +12 -8
- pygpt_net/data/locale/locale.it.ini +12 -8
- pygpt_net/data/locale/locale.pl.ini +12 -8
- pygpt_net/data/locale/locale.uk.ini +12 -8
- pygpt_net/data/locale/locale.zh.ini +12 -8
- pygpt_net/item/ctx.py +2 -1
- pygpt_net/plugin/base/plugin.py +35 -3
- pygpt_net/plugin/bitbucket/__init__.py +12 -0
- pygpt_net/plugin/bitbucket/config.py +267 -0
- pygpt_net/plugin/bitbucket/plugin.py +125 -0
- pygpt_net/plugin/bitbucket/worker.py +569 -0
- pygpt_net/plugin/cmd_files/worker.py +19 -16
- pygpt_net/plugin/facebook/__init__.py +12 -0
- pygpt_net/plugin/facebook/config.py +359 -0
- pygpt_net/plugin/facebook/plugin.py +114 -0
- pygpt_net/plugin/facebook/worker.py +698 -0
- pygpt_net/plugin/github/__init__.py +12 -0
- pygpt_net/plugin/github/config.py +441 -0
- pygpt_net/plugin/github/plugin.py +124 -0
- pygpt_net/plugin/github/worker.py +674 -0
- pygpt_net/plugin/google/__init__.py +12 -0
- pygpt_net/plugin/google/config.py +367 -0
- pygpt_net/plugin/google/plugin.py +126 -0
- pygpt_net/plugin/google/worker.py +826 -0
- pygpt_net/plugin/slack/__init__.py +12 -0
- pygpt_net/plugin/slack/config.py +349 -0
- pygpt_net/plugin/slack/plugin.py +116 -0
- pygpt_net/plugin/slack/worker.py +639 -0
- pygpt_net/plugin/telegram/__init__.py +12 -0
- pygpt_net/plugin/telegram/config.py +308 -0
- pygpt_net/plugin/telegram/plugin.py +118 -0
- pygpt_net/plugin/telegram/worker.py +563 -0
- pygpt_net/plugin/twitter/__init__.py +12 -0
- pygpt_net/plugin/twitter/config.py +491 -0
- pygpt_net/plugin/twitter/plugin.py +126 -0
- pygpt_net/plugin/twitter/worker.py +837 -0
- pygpt_net/provider/agents/base.py +4 -1
- pygpt_net/provider/agents/llama_index/codeact_workflow.py +95 -0
- pygpt_net/provider/agents/llama_index/legacy/__init__.py +0 -0
- pygpt_net/provider/agents/llama_index/{openai.py → legacy/openai.py} +2 -2
- pygpt_net/provider/agents/llama_index/{openai_assistant.py → legacy/openai_assistant.py} +37 -5
- pygpt_net/provider/agents/llama_index/{planner.py → legacy/planner.py} +3 -3
- pygpt_net/provider/agents/llama_index/{react.py → legacy/react.py} +3 -3
- pygpt_net/provider/agents/llama_index/openai_workflow.py +52 -0
- pygpt_net/provider/agents/llama_index/planner_workflow.py +115 -0
- pygpt_net/provider/agents/llama_index/react_workflow.py +6 -4
- pygpt_net/provider/agents/llama_index/workflow/__init__.py +0 -0
- pygpt_net/provider/agents/llama_index/{codeact_agent_custom.py → workflow/codeact.py} +124 -8
- pygpt_net/provider/agents/llama_index/workflow/events.py +24 -0
- pygpt_net/provider/agents/llama_index/workflow/openai.py +634 -0
- pygpt_net/provider/agents/llama_index/workflow/planner.py +601 -0
- pygpt_net/provider/agents/openai/agent.py +1 -0
- pygpt_net/provider/agents/openai/agent_b2b.py +2 -0
- pygpt_net/provider/agents/openai/agent_planner.py +1 -0
- pygpt_net/provider/agents/openai/agent_with_experts.py +1 -0
- pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +1 -0
- pygpt_net/provider/agents/openai/agent_with_feedback.py +1 -0
- pygpt_net/provider/agents/openai/evolve.py +1 -0
- pygpt_net/provider/core/preset/patch.py +11 -17
- pygpt_net/ui/base/config_dialog.py +4 -0
- pygpt_net/ui/dialog/preset.py +34 -77
- pygpt_net/ui/layout/toolbox/presets.py +2 -2
- pygpt_net/ui/main.py +3 -1
- pygpt_net/ui/widget/lists/experts.py +3 -2
- {pygpt_net-2.6.0.post2.dist-info → pygpt_net-2.6.2.dist-info}/METADATA +155 -4
- {pygpt_net-2.6.0.post2.dist-info → pygpt_net-2.6.2.dist-info}/RECORD +96 -62
- pygpt_net/data/config/presets/agent_react_workflow.json +0 -34
- pygpt_net/provider/agents/llama_index/code_act.py +0 -58
- {pygpt_net-2.6.0.post2.dist-info → pygpt_net-2.6.2.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.0.post2.dist-info → pygpt_net-2.6.2.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.0.post2.dist-info → pygpt_net-2.6.2.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,634 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
# ================================================== #
|
|
4
|
+
# This file is a part of PYGPT package #
|
|
5
|
+
# Website: https://pygpt.net #
|
|
6
|
+
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
|
+
# MIT License #
|
|
8
|
+
# Created By : Marcin Szczygliński #
|
|
9
|
+
# Updated Date: 2025.08.14 01:00:00 #
|
|
10
|
+
# ================================================== #
|
|
11
|
+
|
|
12
|
+
from __future__ import annotations
|
|
13
|
+
|
|
14
|
+
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple
|
|
15
|
+
import inspect
|
|
16
|
+
from pydantic import ValidationError
|
|
17
|
+
|
|
18
|
+
# LlamaIndex workflow / agent
|
|
19
|
+
from llama_index.core.workflow import (
|
|
20
|
+
Workflow,
|
|
21
|
+
Context,
|
|
22
|
+
StartEvent,
|
|
23
|
+
StopEvent,
|
|
24
|
+
Event,
|
|
25
|
+
step,
|
|
26
|
+
)
|
|
27
|
+
from llama_index.core.llms.llm import LLM
|
|
28
|
+
from llama_index.core.tools.types import BaseTool
|
|
29
|
+
|
|
30
|
+
# v12/v13 agent workflow events + agent
|
|
31
|
+
from llama_index.core.agent.workflow import (
|
|
32
|
+
FunctionAgent,
|
|
33
|
+
ToolCall,
|
|
34
|
+
ToolCallResult,
|
|
35
|
+
AgentStream,
|
|
36
|
+
AgentOutput,
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
# v12/v13 compatibility imports
|
|
40
|
+
try:
|
|
41
|
+
# v13+
|
|
42
|
+
from llama_index.core.memory import ChatMemoryBuffer
|
|
43
|
+
except Exception: # pragma: no cover
|
|
44
|
+
try:
|
|
45
|
+
# v12
|
|
46
|
+
from llama_index.memory import ChatMemoryBuffer
|
|
47
|
+
except Exception:
|
|
48
|
+
ChatMemoryBuffer = None # type: ignore
|
|
49
|
+
|
|
50
|
+
try:
|
|
51
|
+
from llama_index.core.objects.base import ObjectRetriever
|
|
52
|
+
except Exception: # pragma: no cover
|
|
53
|
+
ObjectRetriever = None # type: ignore
|
|
54
|
+
|
|
55
|
+
try:
|
|
56
|
+
from llama_index.core.settings import Settings
|
|
57
|
+
except Exception: # pragma: no cover
|
|
58
|
+
Settings = None # type: ignore
|
|
59
|
+
|
|
60
|
+
# optional: OpenAI type for hints only
|
|
61
|
+
try:
|
|
62
|
+
from llama_index.llms.openai import OpenAI # noqa: F401
|
|
63
|
+
except Exception:
|
|
64
|
+
pass
|
|
65
|
+
|
|
66
|
+
try:
|
|
67
|
+
from .events import StepEvent # local helper, same as in your Planner
|
|
68
|
+
except Exception: # pragma: no cover
|
|
69
|
+
StepEvent = None # type: ignore
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
DEFAULT_MAX_FUNCTION_CALLS = 5
|
|
73
|
+
DEFAULT_SYSTEM_PROMPT = (
|
|
74
|
+
"You are an OpenAI function-calling agent. "
|
|
75
|
+
"Use tools when helpful, reason step-by-step, and produce concise, correct answers."
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class QueryEvent(StartEvent):
|
|
80
|
+
query: str
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class FinalEvent(StopEvent):
|
|
84
|
+
pass
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def _safe_tool_name(t: BaseTool) -> str:
|
|
88
|
+
"""
|
|
89
|
+
Get a safe tool name from the BaseTool instance.
|
|
90
|
+
|
|
91
|
+
:param t: BaseTool instance
|
|
92
|
+
:return: str: Tool name or class name if name is not available
|
|
93
|
+
"""
|
|
94
|
+
try:
|
|
95
|
+
# v13 BaseTool.metadata.name
|
|
96
|
+
n = (getattr(t, "metadata", None) or {}).get("name") if isinstance(getattr(t, "metadata", None), dict) else None
|
|
97
|
+
if not n and hasattr(t, "metadata") and hasattr(t.metadata, "name"):
|
|
98
|
+
n = t.metadata.name # pydantic model
|
|
99
|
+
if not n:
|
|
100
|
+
n = getattr(t, "name", None)
|
|
101
|
+
if not n:
|
|
102
|
+
n = t.__class__.__name__
|
|
103
|
+
return str(n)
|
|
104
|
+
except Exception:
|
|
105
|
+
return t.__class__.__name__
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def _list_tool_names(tools: Sequence[BaseTool]) -> List[str]:
|
|
109
|
+
"""
|
|
110
|
+
Get a list of safe tool names from a sequence of BaseTool instances.
|
|
111
|
+
|
|
112
|
+
:param tools: Sequence of BaseTool instances
|
|
113
|
+
:return: List[str]: List of tool names
|
|
114
|
+
"""
|
|
115
|
+
return [_safe_tool_name(t) for t in tools]
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
class OpenAIWorkflowAgent(Workflow):
|
|
119
|
+
"""
|
|
120
|
+
Workflow-based replacement for the legacy OpenAIAgent (v12) using FunctionAgent (v12/v13).
|
|
121
|
+
|
|
122
|
+
- memory: tries to set FunctionAgent.memory; falls back to injecting memory text into system prompt.
|
|
123
|
+
- tools: accepts static list or dynamic tool_retriever (query-aware).
|
|
124
|
+
- default_tool_choice: 'auto' | 'none' | '<tool_name>' -> filters visible tools for this run.
|
|
125
|
+
- max_function_calls: mapped to FunctionAgent.max_steps.
|
|
126
|
+
- streaming: forwards AgentStream/ToolCall/ToolCallResult/AgentOutput; emits StepEvent when available.
|
|
127
|
+
"""
|
|
128
|
+
def __init__(
|
|
129
|
+
self,
|
|
130
|
+
tools: List[BaseTool],
|
|
131
|
+
llm: LLM,
|
|
132
|
+
memory: Optional[Any] = None,
|
|
133
|
+
system_prompt: Optional[str] = None,
|
|
134
|
+
prefix_messages: Optional[Sequence[Any]] = None,
|
|
135
|
+
verbose: bool = False,
|
|
136
|
+
max_function_calls: int = DEFAULT_MAX_FUNCTION_CALLS,
|
|
137
|
+
default_tool_choice: str = "auto",
|
|
138
|
+
tool_retriever: Optional[Any] = None,
|
|
139
|
+
memory_char_limit: int = 8000,
|
|
140
|
+
on_stop: Optional[Callable[[], bool]] = None,
|
|
141
|
+
):
|
|
142
|
+
"""
|
|
143
|
+
Initialize the OpenAIWorkflowAgent.
|
|
144
|
+
|
|
145
|
+
:param tools: List of BaseTool instances to use in the agent.
|
|
146
|
+
:param llm: LLM instance to use for the agent.
|
|
147
|
+
:param memory: Optional memory object to use for the agent. If provided, it will be set on FunctionAgent.memory.
|
|
148
|
+
:param system_prompt: System prompt to use for the agent. If not provided, a default will be used.
|
|
149
|
+
:param prefix_messages: List of messages to prepend to the system prompt for context.
|
|
150
|
+
:param verbose: Verbosity flag for the agent.
|
|
151
|
+
:param max_function_calls: Maximum number of function calls allowed in the agent run. This maps to FunctionAgent.max_steps.
|
|
152
|
+
:param default_tool_choice: Default tool choice for the agent run. Can be 'auto', 'none', or a specific tool name.
|
|
153
|
+
:param tool_retriever: Optional tool retriever to dynamically select tools based on the query.
|
|
154
|
+
:param memory_char_limit: Optional character limit for the memory text representation. If set, will truncate memory text to this limit.
|
|
155
|
+
:param on_stop: Optional callback function that returns a boolean indicating whether the agent should stop running.
|
|
156
|
+
"""
|
|
157
|
+
super().__init__(timeout=None, verbose=verbose)
|
|
158
|
+
self._llm = llm
|
|
159
|
+
self._base_system_prompt = system_prompt or DEFAULT_SYSTEM_PROMPT
|
|
160
|
+
self._prefix_messages = list(prefix_messages or [])
|
|
161
|
+
self._tools = tools or []
|
|
162
|
+
self._tool_retriever = tool_retriever
|
|
163
|
+
self._memory = memory
|
|
164
|
+
self._memory_char_limit = memory_char_limit
|
|
165
|
+
self._default_tool_choice = (default_tool_choice or "auto").strip().lower()
|
|
166
|
+
self._max_steps = int(max_function_calls or DEFAULT_MAX_FUNCTION_CALLS)
|
|
167
|
+
self._on_stop = on_stop
|
|
168
|
+
self.verbose = verbose
|
|
169
|
+
|
|
170
|
+
# construct FunctionAgent once, will override tools/system_prompt/memory per run
|
|
171
|
+
self._agent = FunctionAgent(
|
|
172
|
+
name="OpenAIWorkflowAgent",
|
|
173
|
+
description="Workflow-based OpenAI function-calling agent.",
|
|
174
|
+
tools=self._tools,
|
|
175
|
+
llm=self._llm,
|
|
176
|
+
system_prompt=self._base_system_prompt,
|
|
177
|
+
max_steps=self._max_steps,
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
# try attach memory now (can be overridden in run())
|
|
181
|
+
if self._memory is not None:
|
|
182
|
+
self._set_agent_memory(self._memory)
|
|
183
|
+
|
|
184
|
+
def run(
|
|
185
|
+
self,
|
|
186
|
+
query: str,
|
|
187
|
+
ctx: Optional[Context] = None,
|
|
188
|
+
memory: Optional[Any] = None,
|
|
189
|
+
verbose: Optional[bool] = None,
|
|
190
|
+
**kwargs: Any,
|
|
191
|
+
):
|
|
192
|
+
"""
|
|
193
|
+
Start the workflow answering a single user query.
|
|
194
|
+
|
|
195
|
+
:param query: user message
|
|
196
|
+
:param ctx: workflow context
|
|
197
|
+
:param memory: optional memory object to use for this run
|
|
198
|
+
:param verbose: override verbosity
|
|
199
|
+
:return: Workflow run handler (stream_events() supported)
|
|
200
|
+
"""
|
|
201
|
+
if verbose is not None:
|
|
202
|
+
self.verbose = bool(verbose)
|
|
203
|
+
if memory is not None:
|
|
204
|
+
self._memory = memory
|
|
205
|
+
# system handles rest via steps
|
|
206
|
+
return super().run(ctx=ctx, query=query)
|
|
207
|
+
|
|
208
|
+
# ---------- steps ----------
|
|
209
|
+
|
|
210
|
+
@step
|
|
211
|
+
async def answer(self, ctx: Context, ev: QueryEvent) -> FinalEvent:
|
|
212
|
+
"""
|
|
213
|
+
Single-step: select tools -> prepare prompt/memory -> run FunctionAgent with streaming.
|
|
214
|
+
|
|
215
|
+
:param ctx: Context for the workflow
|
|
216
|
+
:param ev: QueryEvent containing the user query
|
|
217
|
+
:return: FinalEvent with the last answer from the agent
|
|
218
|
+
"""
|
|
219
|
+
self._emit_step_event(ctx, name="run", meta={"query": ev.query})
|
|
220
|
+
|
|
221
|
+
# prepare memory + prompt
|
|
222
|
+
self._set_agent_memory(self._memory)
|
|
223
|
+
effective_system_prompt = self._compose_system_prompt(self._base_system_prompt, self._prefix_messages, self._memory)
|
|
224
|
+
|
|
225
|
+
# select tools for this query
|
|
226
|
+
tools_for_run, selection_reason = await self._select_tools_for_query(ev.query)
|
|
227
|
+
|
|
228
|
+
# apply default_tool_choice filter
|
|
229
|
+
tools_for_run = self._apply_default_tool_choice_filter(tools_for_run)
|
|
230
|
+
|
|
231
|
+
# update agent config for this run
|
|
232
|
+
try:
|
|
233
|
+
self._agent.system_prompt = effective_system_prompt # type: ignore[attr-defined]
|
|
234
|
+
except Exception:
|
|
235
|
+
pass
|
|
236
|
+
try:
|
|
237
|
+
self._agent.tools = tools_for_run # type: ignore[attr-defined]
|
|
238
|
+
except Exception:
|
|
239
|
+
pass
|
|
240
|
+
|
|
241
|
+
# log tool selection
|
|
242
|
+
self._emit_step_event(
|
|
243
|
+
ctx,
|
|
244
|
+
name="tools_selected",
|
|
245
|
+
meta={
|
|
246
|
+
"available": _list_tool_names(self._tools),
|
|
247
|
+
"selected": _list_tool_names(tools_for_run),
|
|
248
|
+
"reason": selection_reason,
|
|
249
|
+
"default_tool_choice": self._default_tool_choice,
|
|
250
|
+
"max_steps": self._max_steps,
|
|
251
|
+
},
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
# run agent and stream
|
|
255
|
+
last_answer = await self._run_agent_once(ctx, ev.query)
|
|
256
|
+
return FinalEvent(result=last_answer or "")
|
|
257
|
+
|
|
258
|
+
# ---------- internals ----------
|
|
259
|
+
|
|
260
|
+
def _stopped(self) -> bool:
|
|
261
|
+
"""
|
|
262
|
+
Check if the agent should stop running based on the provided callback.
|
|
263
|
+
|
|
264
|
+
:return: bool: True if the agent should stop, False otherwise.
|
|
265
|
+
"""
|
|
266
|
+
if self._on_stop:
|
|
267
|
+
try:
|
|
268
|
+
return bool(self._on_stop())
|
|
269
|
+
except Exception:
|
|
270
|
+
return False
|
|
271
|
+
return False
|
|
272
|
+
|
|
273
|
+
def _emit_step_event(
|
|
274
|
+
self,
|
|
275
|
+
ctx: Context,
|
|
276
|
+
name: str,
|
|
277
|
+
index: Optional[int] = None,
|
|
278
|
+
total: Optional[int] = None,
|
|
279
|
+
meta: Optional[dict] = None,
|
|
280
|
+
) -> None:
|
|
281
|
+
"""
|
|
282
|
+
Emit a step event to the context stream.
|
|
283
|
+
|
|
284
|
+
:param ctx: Context for the workflow
|
|
285
|
+
:param name: Name of the step event
|
|
286
|
+
:param index: Index of the step (optional)
|
|
287
|
+
:param total: Total number of steps (optional)
|
|
288
|
+
:param meta: Optional metadata dictionary for the step event
|
|
289
|
+
"""
|
|
290
|
+
try:
|
|
291
|
+
if StepEvent is not None:
|
|
292
|
+
ctx.write_event_to_stream(StepEvent(name=name, index=index, total=total, meta=meta or {}))
|
|
293
|
+
return
|
|
294
|
+
except Exception:
|
|
295
|
+
pass
|
|
296
|
+
|
|
297
|
+
# Fallback: embed in AgentStream.raw for older AgentStream validators
|
|
298
|
+
try:
|
|
299
|
+
ctx.write_event_to_stream(
|
|
300
|
+
AgentStream(
|
|
301
|
+
delta="",
|
|
302
|
+
response="",
|
|
303
|
+
current_agent_name="OpenAIWorkflowAgent",
|
|
304
|
+
tool_calls=[],
|
|
305
|
+
raw={"StepEvent": {"name": name, "index": index, "total": total, "meta": meta or {}}},
|
|
306
|
+
)
|
|
307
|
+
)
|
|
308
|
+
except Exception:
|
|
309
|
+
pass
|
|
310
|
+
|
|
311
|
+
def _set_agent_memory(self, memory: Optional[Any]) -> None:
|
|
312
|
+
"""
|
|
313
|
+
Set the memory for the FunctionAgent instance.
|
|
314
|
+
|
|
315
|
+
:param memory: Optional memory object to set on the agent.
|
|
316
|
+
"""
|
|
317
|
+
if memory is None:
|
|
318
|
+
return
|
|
319
|
+
try:
|
|
320
|
+
# Prefer native memory on the FunctionAgent if present
|
|
321
|
+
if hasattr(self._agent, "memory"):
|
|
322
|
+
self._agent.memory = memory # type: ignore[attr-defined]
|
|
323
|
+
except Exception:
|
|
324
|
+
pass
|
|
325
|
+
|
|
326
|
+
def _memory_to_text(self, memory: Any) -> str:
|
|
327
|
+
"""
|
|
328
|
+
Convert memory to a text representation, handling various types and structures.
|
|
329
|
+
|
|
330
|
+
:param memory: Memory object or content to convert
|
|
331
|
+
:return: str: Text representation of the memory
|
|
332
|
+
"""
|
|
333
|
+
if not memory:
|
|
334
|
+
return ""
|
|
335
|
+
try:
|
|
336
|
+
if isinstance(memory, str):
|
|
337
|
+
text = memory
|
|
338
|
+
elif isinstance(memory, list):
|
|
339
|
+
parts = []
|
|
340
|
+
for m in memory:
|
|
341
|
+
if isinstance(m, str):
|
|
342
|
+
parts.append(m)
|
|
343
|
+
elif isinstance(m, dict) and ("content" in m or "text" in m):
|
|
344
|
+
role = m.get("role", "user")
|
|
345
|
+
content = m.get("content", m.get("text", ""))
|
|
346
|
+
parts.append(f"{role}: {content}")
|
|
347
|
+
else:
|
|
348
|
+
role = getattr(m, "role", None) or getattr(m, "sender", "user")
|
|
349
|
+
content = getattr(m, "content", None) or getattr(m, "text", "")
|
|
350
|
+
parts.append(f"{role}: {content}")
|
|
351
|
+
text = "\n".join(parts)
|
|
352
|
+
else:
|
|
353
|
+
for attr in ("to_string", "to_str"):
|
|
354
|
+
fn = getattr(memory, attr, None)
|
|
355
|
+
if callable(fn):
|
|
356
|
+
text = fn()
|
|
357
|
+
break
|
|
358
|
+
else:
|
|
359
|
+
for attr in ("get", "messages", "get_all", "dump"):
|
|
360
|
+
fn = getattr(memory, attr, None)
|
|
361
|
+
if callable(fn):
|
|
362
|
+
data = fn()
|
|
363
|
+
text = self._memory_to_text(data)
|
|
364
|
+
break
|
|
365
|
+
else:
|
|
366
|
+
text = str(memory)
|
|
367
|
+
except Exception:
|
|
368
|
+
text = str(memory)
|
|
369
|
+
|
|
370
|
+
if self._memory_char_limit and len(text) > self._memory_char_limit:
|
|
371
|
+
text = "...[truncated]...\n" + text[-self._memory_char_limit:]
|
|
372
|
+
return text
|
|
373
|
+
|
|
374
|
+
def _prefix_to_text(self, prefix_messages: Sequence[Any]) -> str:
|
|
375
|
+
"""
|
|
376
|
+
Convert a sequence of prefix messages to a formatted text representation.
|
|
377
|
+
|
|
378
|
+
:param prefix_messages: Sequence of messages to convert, can be strings or objects with 'role' and 'content' attributes.
|
|
379
|
+
:return: str: Formatted text representation of the prefix messages.
|
|
380
|
+
"""
|
|
381
|
+
if not prefix_messages:
|
|
382
|
+
return ""
|
|
383
|
+
parts: List[str] = []
|
|
384
|
+
for m in prefix_messages:
|
|
385
|
+
if isinstance(m, str):
|
|
386
|
+
parts.append(m.strip())
|
|
387
|
+
continue
|
|
388
|
+
|
|
389
|
+
# chat-like
|
|
390
|
+
role = getattr(m, "role", None) or getattr(m, "sender", "system")
|
|
391
|
+
content = getattr(m, "content", None) or getattr(m, "text", "")
|
|
392
|
+
if not content and isinstance(m, dict):
|
|
393
|
+
content = m.get("content", m.get("text", ""))
|
|
394
|
+
if content:
|
|
395
|
+
parts.append(f"{role}: {content}")
|
|
396
|
+
return "\n".join([p for p in parts if p])
|
|
397
|
+
|
|
398
|
+
def _compose_system_prompt(
|
|
399
|
+
self,
|
|
400
|
+
base: str,
|
|
401
|
+
prefix_messages: Sequence[Any],
|
|
402
|
+
memory: Optional[Any],
|
|
403
|
+
) -> str:
|
|
404
|
+
"""
|
|
405
|
+
Compose the system prompt for the FunctionAgent, including base prompt,
|
|
406
|
+
|
|
407
|
+
:param base: Base system prompt text.
|
|
408
|
+
:param prefix_messages: Sequence of messages to prepend to the system prompt.
|
|
409
|
+
:param memory: Optional memory object to include in the system prompt.
|
|
410
|
+
:return: str: Composed system prompt text.
|
|
411
|
+
"""
|
|
412
|
+
out = [base.strip()]
|
|
413
|
+
prefix_text = self._prefix_to_text(prefix_messages)
|
|
414
|
+
if prefix_text:
|
|
415
|
+
out += ["", "Additional preface:", prefix_text]
|
|
416
|
+
mem_text = self._memory_to_text(memory)
|
|
417
|
+
if mem_text:
|
|
418
|
+
out += ["", "Relevant past memory/context:", mem_text]
|
|
419
|
+
return "\n".join(out).strip()
|
|
420
|
+
|
|
421
|
+
async def _select_tools_for_query(self, query: str) -> Tuple[List[BaseTool], str]:
|
|
422
|
+
"""
|
|
423
|
+
Select tools for the given query, either from static tools or dynamically retrieved tools.
|
|
424
|
+
|
|
425
|
+
:param query: User query to select tools for
|
|
426
|
+
:return: Tuple containing the list of selected tools and the reason for selection.
|
|
427
|
+
"""
|
|
428
|
+
# default: use provided static tools
|
|
429
|
+
selected = list(self._tools)
|
|
430
|
+
reason = "static tools"
|
|
431
|
+
|
|
432
|
+
if not self._tool_retriever:
|
|
433
|
+
return selected, reason
|
|
434
|
+
|
|
435
|
+
retriever = self._tool_retriever
|
|
436
|
+
candidates: Optional[Iterable[Any]] = None
|
|
437
|
+
|
|
438
|
+
# try a range of method names for compatibility
|
|
439
|
+
for name in ("aretrieve", "aget_retrieved_objects", "retrieve", "get_retrieved_objects"):
|
|
440
|
+
fn = getattr(retriever, name, None)
|
|
441
|
+
if not fn:
|
|
442
|
+
continue
|
|
443
|
+
try:
|
|
444
|
+
if inspect.iscoroutinefunction(fn):
|
|
445
|
+
candidates = await fn(query) # type: ignore[misc]
|
|
446
|
+
else:
|
|
447
|
+
candidates = fn(query) # type: ignore[misc]
|
|
448
|
+
break
|
|
449
|
+
except Exception:
|
|
450
|
+
candidates = None
|
|
451
|
+
|
|
452
|
+
if candidates is None:
|
|
453
|
+
return selected, "retriever failed; using static tools"
|
|
454
|
+
|
|
455
|
+
tools: List[BaseTool] = []
|
|
456
|
+
for item in candidates:
|
|
457
|
+
if isinstance(item, BaseTool):
|
|
458
|
+
tools.append(item)
|
|
459
|
+
continue
|
|
460
|
+
# common wrappers
|
|
461
|
+
for attr in ("obj", "object", "tool"):
|
|
462
|
+
cand = getattr(item, attr, None)
|
|
463
|
+
if isinstance(cand, BaseTool):
|
|
464
|
+
tools.append(cand)
|
|
465
|
+
break
|
|
466
|
+
|
|
467
|
+
if tools:
|
|
468
|
+
selected = tools
|
|
469
|
+
reason = "retrieved tools"
|
|
470
|
+
else:
|
|
471
|
+
reason = "retriever returned no tools; using static tools"
|
|
472
|
+
|
|
473
|
+
return selected, reason
|
|
474
|
+
|
|
475
|
+
def _apply_default_tool_choice_filter(self, tools: List[BaseTool]) -> List[BaseTool]:
|
|
476
|
+
"""
|
|
477
|
+
Apply the default tool choice filter to the list of tools.
|
|
478
|
+
|
|
479
|
+
:param tools: List of BaseTool instances to filter.
|
|
480
|
+
:return: List[BaseTool]: Filtered list of tools based on the default tool choice.
|
|
481
|
+
"""
|
|
482
|
+
choice = (self._default_tool_choice or "auto").strip().lower()
|
|
483
|
+
if choice in ("auto", "", "default"):
|
|
484
|
+
return tools
|
|
485
|
+
if choice in ("none", "no", "off"):
|
|
486
|
+
return []
|
|
487
|
+
|
|
488
|
+
# filter by name
|
|
489
|
+
wanted = choice
|
|
490
|
+
filtered: List[BaseTool] = []
|
|
491
|
+
for t in tools:
|
|
492
|
+
name = _safe_tool_name(t).strip().lower()
|
|
493
|
+
if name == wanted:
|
|
494
|
+
filtered = [t]
|
|
495
|
+
break
|
|
496
|
+
return filtered or tools # if not found, keep original
|
|
497
|
+
|
|
498
|
+
async def _emit_text(
|
|
499
|
+
self,
|
|
500
|
+
ctx: Context,
|
|
501
|
+
text: str,
|
|
502
|
+
agent_name: str = "OpenAIWorkflowAgent"
|
|
503
|
+
):
|
|
504
|
+
"""
|
|
505
|
+
Emit text to the context stream, handling validation errors gracefully.
|
|
506
|
+
|
|
507
|
+
:param ctx: Context for the workflow
|
|
508
|
+
:param text: Text to emit to the stream
|
|
509
|
+
:param agent_name: Name of the agent to set in the event (default: "OpenAIWorkflowAgent")
|
|
510
|
+
"""
|
|
511
|
+
try:
|
|
512
|
+
ctx.write_event_to_stream(AgentStream(delta=text))
|
|
513
|
+
except ValidationError:
|
|
514
|
+
ctx.write_event_to_stream(
|
|
515
|
+
AgentStream(
|
|
516
|
+
delta=text,
|
|
517
|
+
response=text,
|
|
518
|
+
current_agent_name=agent_name,
|
|
519
|
+
tool_calls=[],
|
|
520
|
+
raw={},
|
|
521
|
+
)
|
|
522
|
+
)
|
|
523
|
+
|
|
524
|
+
async def _run_agent_once(self, ctx: Context, prompt: str) -> str:
|
|
525
|
+
"""
|
|
526
|
+
Run FunctionAgent for a single user message, streaming events.
|
|
527
|
+
|
|
528
|
+
:param ctx: Context for the workflow
|
|
529
|
+
:param prompt: User message to process
|
|
530
|
+
:return: Last answer from the agent (text response)
|
|
531
|
+
"""
|
|
532
|
+
sig = inspect.signature(self._agent.run)
|
|
533
|
+
kwargs: Dict[str, Any] = {}
|
|
534
|
+
if "user_msg" in sig.parameters:
|
|
535
|
+
kwargs["user_msg"] = prompt
|
|
536
|
+
elif "query" in sig.parameters:
|
|
537
|
+
kwargs["query"] = prompt
|
|
538
|
+
if "max_steps" in sig.parameters:
|
|
539
|
+
kwargs["max_steps"] = self._max_steps
|
|
540
|
+
|
|
541
|
+
handler = self._agent.run(**kwargs)
|
|
542
|
+
last_answer = ""
|
|
543
|
+
has_stream = False
|
|
544
|
+
|
|
545
|
+
async def _stream():
|
|
546
|
+
nonlocal last_answer, has_stream
|
|
547
|
+
|
|
548
|
+
async for e in handler.stream_events():
|
|
549
|
+
if isinstance(e, StopEvent):
|
|
550
|
+
continue
|
|
551
|
+
|
|
552
|
+
# external stop callback
|
|
553
|
+
if self._stopped():
|
|
554
|
+
try:
|
|
555
|
+
ctx.write_event_to_stream(StopEvent())
|
|
556
|
+
except Exception:
|
|
557
|
+
pass
|
|
558
|
+
try:
|
|
559
|
+
await handler.cancel_run()
|
|
560
|
+
except Exception:
|
|
561
|
+
pass
|
|
562
|
+
return last_answer
|
|
563
|
+
|
|
564
|
+
if isinstance(e, AgentStream):
|
|
565
|
+
if getattr(e, "delta", None):
|
|
566
|
+
has_stream = True
|
|
567
|
+
if not getattr(e, "current_agent_name", None):
|
|
568
|
+
try:
|
|
569
|
+
e.current_agent_name = "OpenAIWorkflowAgent"
|
|
570
|
+
except Exception:
|
|
571
|
+
pass
|
|
572
|
+
ctx.write_event_to_stream(e)
|
|
573
|
+
continue
|
|
574
|
+
|
|
575
|
+
if isinstance(e, AgentOutput):
|
|
576
|
+
resp = getattr(e, "response", None)
|
|
577
|
+
content = self._to_text(resp).strip()
|
|
578
|
+
last_answer = content
|
|
579
|
+
if not has_stream and content:
|
|
580
|
+
ctx.write_event_to_stream(
|
|
581
|
+
AgentStream(
|
|
582
|
+
delta=content,
|
|
583
|
+
response=content,
|
|
584
|
+
current_agent_name="OpenAIWorkflowAgent",
|
|
585
|
+
tool_calls=e.tool_calls,
|
|
586
|
+
raw=e.raw,
|
|
587
|
+
)
|
|
588
|
+
)
|
|
589
|
+
continue
|
|
590
|
+
|
|
591
|
+
if isinstance(e, (ToolCall, ToolCallResult)):
|
|
592
|
+
ctx.write_event_to_stream(e)
|
|
593
|
+
continue
|
|
594
|
+
|
|
595
|
+
if isinstance(e, Event):
|
|
596
|
+
ctx.write_event_to_stream(e)
|
|
597
|
+
|
|
598
|
+
try:
|
|
599
|
+
await handler
|
|
600
|
+
except Exception:
|
|
601
|
+
pass
|
|
602
|
+
|
|
603
|
+
return last_answer
|
|
604
|
+
|
|
605
|
+
try:
|
|
606
|
+
return await _stream()
|
|
607
|
+
except Exception as ex:
|
|
608
|
+
await self._emit_text(ctx, f"\n`Agent run failed: {ex}`")
|
|
609
|
+
return last_answer
|
|
610
|
+
|
|
611
|
+
def _to_text(self, resp: Any) -> str:
|
|
612
|
+
"""
|
|
613
|
+
Convert response to text, handling various types and structures.
|
|
614
|
+
|
|
615
|
+
:param resp: Response object or content to convert
|
|
616
|
+
:return: str: Text representation of the response
|
|
617
|
+
"""
|
|
618
|
+
try:
|
|
619
|
+
if resp is None or str(resp) == "assistant: None":
|
|
620
|
+
return ""
|
|
621
|
+
msg = getattr(resp, "message", None)
|
|
622
|
+
if msg is not None:
|
|
623
|
+
return getattr(msg, "content", "") or ""
|
|
624
|
+
c = getattr(resp, "content", None)
|
|
625
|
+
if c is not None:
|
|
626
|
+
if isinstance(c, list):
|
|
627
|
+
parts = []
|
|
628
|
+
for s in c:
|
|
629
|
+
parts.append(getattr(s, "text", s if isinstance(s, str) else str(s)))
|
|
630
|
+
return "".join(parts)
|
|
631
|
+
return c if isinstance(c, str) else str(c)
|
|
632
|
+
return str(resp)
|
|
633
|
+
except Exception:
|
|
634
|
+
return str(resp)
|