agno 1.7.4__py3-none-any.whl → 1.7.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +28 -15
- agno/app/agui/async_router.py +5 -5
- agno/app/agui/sync_router.py +5 -5
- agno/app/agui/utils.py +84 -14
- agno/app/fastapi/app.py +1 -1
- agno/app/fastapi/async_router.py +67 -16
- agno/app/fastapi/sync_router.py +80 -14
- agno/document/chunking/row.py +39 -0
- agno/document/reader/base.py +0 -7
- agno/embedder/jina.py +73 -0
- agno/knowledge/agent.py +39 -2
- agno/knowledge/combined.py +1 -1
- agno/memory/agent.py +2 -2
- agno/memory/team.py +2 -2
- agno/models/aws/bedrock.py +311 -15
- agno/models/litellm/chat.py +12 -3
- agno/models/openai/chat.py +1 -22
- agno/models/openai/responses.py +5 -5
- agno/models/portkey/__init__.py +3 -0
- agno/models/portkey/portkey.py +88 -0
- agno/models/xai/xai.py +54 -0
- agno/run/v2/workflow.py +4 -0
- agno/storage/mysql.py +1 -0
- agno/storage/postgres.py +1 -0
- agno/storage/session/v2/workflow.py +29 -5
- agno/storage/singlestore.py +4 -1
- agno/storage/sqlite.py +0 -1
- agno/team/team.py +52 -22
- agno/tools/bitbucket.py +292 -0
- agno/tools/daytona.py +411 -63
- agno/tools/decorator.py +45 -2
- agno/tools/evm.py +123 -0
- agno/tools/function.py +16 -12
- agno/tools/linkup.py +54 -0
- agno/tools/mcp.py +10 -3
- agno/tools/mem0.py +15 -2
- agno/tools/postgres.py +175 -162
- agno/utils/log.py +16 -0
- agno/utils/pprint.py +2 -0
- agno/utils/string.py +14 -0
- agno/vectordb/pgvector/pgvector.py +4 -5
- agno/vectordb/surrealdb/__init__.py +3 -0
- agno/vectordb/surrealdb/surrealdb.py +493 -0
- agno/workflow/v2/workflow.py +144 -19
- agno/workflow/workflow.py +90 -63
- {agno-1.7.4.dist-info → agno-1.7.6.dist-info}/METADATA +19 -1
- {agno-1.7.4.dist-info → agno-1.7.6.dist-info}/RECORD +51 -42
- {agno-1.7.4.dist-info → agno-1.7.6.dist-info}/WHEEL +0 -0
- {agno-1.7.4.dist-info → agno-1.7.6.dist-info}/entry_points.txt +0 -0
- {agno-1.7.4.dist-info → agno-1.7.6.dist-info}/licenses/LICENSE +0 -0
- {agno-1.7.4.dist-info → agno-1.7.6.dist-info}/top_level.txt +0 -0
agno/agent/agent.py
CHANGED
|
@@ -119,6 +119,8 @@ class Agent:
|
|
|
119
119
|
session_state: Optional[Dict[str, Any]] = None
|
|
120
120
|
search_previous_sessions_history: Optional[bool] = False
|
|
121
121
|
num_history_sessions: Optional[int] = None
|
|
122
|
+
# If True, cache the session in memory
|
|
123
|
+
cache_session: bool = True
|
|
122
124
|
|
|
123
125
|
# --- Agent Context ---
|
|
124
126
|
# Context available for tools and prompt functions
|
|
@@ -351,6 +353,7 @@ class Agent:
|
|
|
351
353
|
session_state: Optional[Dict[str, Any]] = None,
|
|
352
354
|
search_previous_sessions_history: Optional[bool] = False,
|
|
353
355
|
num_history_sessions: Optional[int] = None,
|
|
356
|
+
cache_session: bool = True,
|
|
354
357
|
context: Optional[Dict[str, Any]] = None,
|
|
355
358
|
add_context: bool = False,
|
|
356
359
|
resolve_context: bool = True,
|
|
@@ -441,6 +444,8 @@ class Agent:
|
|
|
441
444
|
self.search_previous_sessions_history = search_previous_sessions_history
|
|
442
445
|
self.num_history_sessions = num_history_sessions
|
|
443
446
|
|
|
447
|
+
self.cache_session = cache_session
|
|
448
|
+
|
|
444
449
|
self.context = context
|
|
445
450
|
self.add_context = add_context
|
|
446
451
|
self.resolve_context = resolve_context
|
|
@@ -754,9 +759,6 @@ class Agent:
|
|
|
754
759
|
|
|
755
760
|
self._initialize_session_state(user_id=user_id, session_id=session_id)
|
|
756
761
|
|
|
757
|
-
# Read existing session from storage
|
|
758
|
-
self.read_from_storage(session_id=session_id)
|
|
759
|
-
|
|
760
762
|
return session_id, user_id
|
|
761
763
|
|
|
762
764
|
def _run(
|
|
@@ -996,16 +998,18 @@ class Agent:
|
|
|
996
998
|
**kwargs: Any,
|
|
997
999
|
) -> Union[RunResponse, Iterator[RunResponseEvent]]:
|
|
998
1000
|
"""Run the Agent and return the response."""
|
|
999
|
-
|
|
1000
1001
|
session_id, user_id = self._initialize_session(
|
|
1001
1002
|
session_id=session_id, user_id=user_id, session_state=session_state
|
|
1002
1003
|
)
|
|
1003
1004
|
|
|
1004
|
-
log_debug(f"Session ID: {session_id}", center=True)
|
|
1005
|
-
|
|
1006
1005
|
# Initialize the Agent
|
|
1007
1006
|
self.initialize_agent()
|
|
1008
1007
|
|
|
1008
|
+
# Read existing session from storage
|
|
1009
|
+
self.read_from_storage(session_id=session_id)
|
|
1010
|
+
|
|
1011
|
+
log_debug(f"Session ID: {session_id}", center=True)
|
|
1012
|
+
|
|
1009
1013
|
# Initialize Knowledge Filters
|
|
1010
1014
|
effective_filters = knowledge_filters
|
|
1011
1015
|
|
|
@@ -1382,6 +1386,9 @@ class Agent:
|
|
|
1382
1386
|
# Initialize the Agent
|
|
1383
1387
|
self.initialize_agent()
|
|
1384
1388
|
|
|
1389
|
+
# Read existing session from storage
|
|
1390
|
+
self.read_from_storage(session_id=session_id)
|
|
1391
|
+
|
|
1385
1392
|
effective_filters = knowledge_filters
|
|
1386
1393
|
# When filters are passed manually
|
|
1387
1394
|
if self.knowledge_filters or knowledge_filters:
|
|
@@ -1606,6 +1613,9 @@ class Agent:
|
|
|
1606
1613
|
retries: The number of retries to continue the run for.
|
|
1607
1614
|
knowledge_filters: The knowledge filters to use for the run.
|
|
1608
1615
|
"""
|
|
1616
|
+
# Initialize the Agent
|
|
1617
|
+
self.initialize_agent()
|
|
1618
|
+
|
|
1609
1619
|
if session_id is not None:
|
|
1610
1620
|
self.reset_run_state()
|
|
1611
1621
|
# Reset session state if a session_id is provided. Session name and session state will be loaded from storage.
|
|
@@ -1614,9 +1624,6 @@ class Agent:
|
|
|
1614
1624
|
if self.session_id is not None and session_id != self.session_id:
|
|
1615
1625
|
self.session_state = None
|
|
1616
1626
|
|
|
1617
|
-
# Initialize the Agent
|
|
1618
|
-
self.initialize_agent()
|
|
1619
|
-
|
|
1620
1627
|
# Initialize Session
|
|
1621
1628
|
# Use the default user_id and session_id when necessary
|
|
1622
1629
|
user_id = user_id if user_id is not None else self.user_id
|
|
@@ -1996,6 +2003,9 @@ class Agent:
|
|
|
1996
2003
|
retries: The number of retries to continue the run for.
|
|
1997
2004
|
knowledge_filters: The knowledge filters to use for the run.
|
|
1998
2005
|
"""
|
|
2006
|
+
# Initialize the Agent
|
|
2007
|
+
self.initialize_agent()
|
|
2008
|
+
|
|
1999
2009
|
if session_id is not None:
|
|
2000
2010
|
self.reset_run_state()
|
|
2001
2011
|
# Reset session state if a session_id is provided. Session name and session state will be loaded from storage.
|
|
@@ -2004,9 +2014,6 @@ class Agent:
|
|
|
2004
2014
|
if self.session_id is not None and session_id != self.session_id:
|
|
2005
2015
|
self.session_state = None
|
|
2006
2016
|
|
|
2007
|
-
# Initialize the Agent
|
|
2008
|
-
self.initialize_agent()
|
|
2009
|
-
|
|
2010
2017
|
# Initialize Session
|
|
2011
2018
|
# Use the default user_id and session_id when necessary
|
|
2012
2019
|
user_id = user_id if user_id is not None else self.user_id
|
|
@@ -4056,6 +4063,7 @@ class Agent:
|
|
|
4056
4063
|
# Convert dict to Memory
|
|
4057
4064
|
elif isinstance(self.memory, dict):
|
|
4058
4065
|
memory_dict = self.memory
|
|
4066
|
+
|
|
4059
4067
|
memory_dict.pop("runs")
|
|
4060
4068
|
self.memory = Memory(**memory_dict)
|
|
4061
4069
|
else:
|
|
@@ -4111,6 +4119,7 @@ class Agent:
|
|
|
4111
4119
|
self.memory.runs[session.session_id] = []
|
|
4112
4120
|
for run in session.memory["runs"]:
|
|
4113
4121
|
run_session_id = run["session_id"]
|
|
4122
|
+
|
|
4114
4123
|
if "team_id" in run:
|
|
4115
4124
|
self.memory.runs[run_session_id].append(TeamRunResponse.from_dict(run))
|
|
4116
4125
|
else:
|
|
@@ -4179,10 +4188,10 @@ class Agent:
|
|
|
4179
4188
|
if not self.storage:
|
|
4180
4189
|
return
|
|
4181
4190
|
|
|
4182
|
-
agent_session_from_db = self.storage.read(session_id=session_id)
|
|
4191
|
+
agent_session_from_db = self.storage.read(session_id=session_id) # type: ignore
|
|
4183
4192
|
if (
|
|
4184
4193
|
agent_session_from_db is not None
|
|
4185
|
-
and agent_session_from_db.memory is not None
|
|
4194
|
+
and agent_session_from_db.memory is not None # type: ignore
|
|
4186
4195
|
and "runs" in agent_session_from_db.memory # type: ignore
|
|
4187
4196
|
):
|
|
4188
4197
|
if isinstance(self.memory, AgentMemory):
|
|
@@ -4224,6 +4233,11 @@ class Agent:
|
|
|
4224
4233
|
AgentSession,
|
|
4225
4234
|
self.storage.upsert(session=self.get_agent_session(session_id=session_id, user_id=user_id)),
|
|
4226
4235
|
)
|
|
4236
|
+
|
|
4237
|
+
if not self.cache_session:
|
|
4238
|
+
if self.memory is not None and self.memory.runs is not None and session_id in self.memory.runs:
|
|
4239
|
+
self.memory.runs.pop(session_id) # type: ignore
|
|
4240
|
+
|
|
4227
4241
|
return self.agent_session
|
|
4228
4242
|
|
|
4229
4243
|
def add_introduction(self, introduction: str) -> None:
|
|
@@ -4355,7 +4369,6 @@ class Agent:
|
|
|
4355
4369
|
# Format the system message with the session state variables
|
|
4356
4370
|
if self.add_state_in_messages:
|
|
4357
4371
|
sys_message_content = self.format_message_with_state_variables(sys_message_content)
|
|
4358
|
-
print("HELLO", sys_message_content)
|
|
4359
4372
|
|
|
4360
4373
|
# Add the JSON output prompt if response_model is provided and the model does not support native structured outputs or JSON schema outputs
|
|
4361
4374
|
# or if use_json_mode is True
|
agno/app/agui/async_router.py
CHANGED
|
@@ -16,7 +16,7 @@ from fastapi import APIRouter
|
|
|
16
16
|
from fastapi.responses import StreamingResponse
|
|
17
17
|
|
|
18
18
|
from agno.agent.agent import Agent
|
|
19
|
-
from agno.app.agui.utils import async_stream_agno_response_as_agui_events,
|
|
19
|
+
from agno.app.agui.utils import async_stream_agno_response_as_agui_events, convert_agui_messages_to_agno_messages
|
|
20
20
|
from agno.team.team import Team
|
|
21
21
|
|
|
22
22
|
logger = logging.getLogger(__name__)
|
|
@@ -28,12 +28,12 @@ async def run_agent(agent: Agent, run_input: RunAgentInput) -> AsyncIterator[Bas
|
|
|
28
28
|
|
|
29
29
|
try:
|
|
30
30
|
# Preparing the input for the Agent and emitting the run started event
|
|
31
|
-
|
|
31
|
+
messages = convert_agui_messages_to_agno_messages(run_input.messages or [])
|
|
32
32
|
yield RunStartedEvent(type=EventType.RUN_STARTED, thread_id=run_input.thread_id, run_id=run_id)
|
|
33
33
|
|
|
34
34
|
# Request streaming response from agent
|
|
35
35
|
response_stream = await agent.arun(
|
|
36
|
-
|
|
36
|
+
messages=messages,
|
|
37
37
|
session_id=run_input.thread_id,
|
|
38
38
|
stream=True,
|
|
39
39
|
stream_intermediate_steps=True,
|
|
@@ -56,12 +56,12 @@ async def run_team(team: Team, input: RunAgentInput) -> AsyncIterator[BaseEvent]
|
|
|
56
56
|
run_id = input.run_id or str(uuid.uuid4())
|
|
57
57
|
try:
|
|
58
58
|
# Extract the last user message for team execution
|
|
59
|
-
|
|
59
|
+
messages = convert_agui_messages_to_agno_messages(input.messages or [])
|
|
60
60
|
yield RunStartedEvent(type=EventType.RUN_STARTED, thread_id=input.thread_id, run_id=run_id)
|
|
61
61
|
|
|
62
62
|
# Request streaming response from team
|
|
63
63
|
response_stream = await team.arun(
|
|
64
|
-
message=
|
|
64
|
+
message=messages,
|
|
65
65
|
session_id=input.thread_id,
|
|
66
66
|
stream=True,
|
|
67
67
|
stream_intermediate_steps=True,
|
agno/app/agui/sync_router.py
CHANGED
|
@@ -16,7 +16,7 @@ from fastapi import APIRouter
|
|
|
16
16
|
from fastapi.responses import StreamingResponse
|
|
17
17
|
|
|
18
18
|
from agno.agent.agent import Agent
|
|
19
|
-
from agno.app.agui.utils import
|
|
19
|
+
from agno.app.agui.utils import convert_agui_messages_to_agno_messages, stream_agno_response_as_agui_events
|
|
20
20
|
from agno.team.team import Team
|
|
21
21
|
|
|
22
22
|
logger = logging.getLogger(__name__)
|
|
@@ -28,12 +28,12 @@ def run_agent(agent: Agent, run_input: RunAgentInput) -> Iterator[BaseEvent]:
|
|
|
28
28
|
|
|
29
29
|
try:
|
|
30
30
|
# Preparing the input for the Agent and emitting the run started event
|
|
31
|
-
|
|
31
|
+
messages = convert_agui_messages_to_agno_messages(run_input.messages or [])
|
|
32
32
|
yield RunStartedEvent(type=EventType.RUN_STARTED, thread_id=run_input.thread_id, run_id=run_id)
|
|
33
33
|
|
|
34
34
|
# Request streaming response from agent
|
|
35
35
|
response_stream = agent.run(
|
|
36
|
-
|
|
36
|
+
messages=messages,
|
|
37
37
|
session_id=run_input.thread_id,
|
|
38
38
|
stream=True,
|
|
39
39
|
stream_intermediate_steps=True,
|
|
@@ -56,12 +56,12 @@ def run_team(team: Team, input: RunAgentInput) -> Iterator[BaseEvent]:
|
|
|
56
56
|
run_id = input.run_id or str(uuid.uuid4())
|
|
57
57
|
try:
|
|
58
58
|
# Extract the last user message for team execution
|
|
59
|
-
|
|
59
|
+
messages = convert_agui_messages_to_agno_messages(input.messages or [])
|
|
60
60
|
yield RunStartedEvent(type=EventType.RUN_STARTED, thread_id=input.thread_id, run_id=run_id)
|
|
61
61
|
|
|
62
62
|
# Request streaming response from team
|
|
63
63
|
response_stream = team.run(
|
|
64
|
-
message=
|
|
64
|
+
message=messages,
|
|
65
65
|
session_id=input.thread_id,
|
|
66
66
|
stream=True,
|
|
67
67
|
stream_intermediate_steps=True,
|
agno/app/agui/utils.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
"""Logic used by the AG-UI router."""
|
|
2
2
|
|
|
3
|
+
import json
|
|
3
4
|
import uuid
|
|
4
5
|
from collections import deque
|
|
5
6
|
from collections.abc import Iterator
|
|
@@ -17,11 +18,13 @@ from ag_ui.core import (
|
|
|
17
18
|
TextMessageStartEvent,
|
|
18
19
|
ToolCallArgsEvent,
|
|
19
20
|
ToolCallEndEvent,
|
|
21
|
+
ToolCallResultEvent,
|
|
20
22
|
ToolCallStartEvent,
|
|
21
23
|
)
|
|
22
24
|
from ag_ui.core.types import Message as AGUIMessage
|
|
23
25
|
|
|
24
|
-
from agno.
|
|
26
|
+
from agno.models.message import Message
|
|
27
|
+
from agno.run.response import RunEvent, RunResponseContentEvent, RunResponseEvent, RunResponsePausedEvent
|
|
25
28
|
from agno.run.team import RunResponseContentEvent as TeamRunResponseContentEvent
|
|
26
29
|
from agno.run.team import TeamRunEvent, TeamRunResponseEvent
|
|
27
30
|
|
|
@@ -64,13 +67,26 @@ class EventBuffer:
|
|
|
64
67
|
return False
|
|
65
68
|
|
|
66
69
|
|
|
67
|
-
def
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
for msg in
|
|
71
|
-
if msg.role == "
|
|
72
|
-
|
|
73
|
-
|
|
70
|
+
def convert_agui_messages_to_agno_messages(messages: List[AGUIMessage]) -> List[Message]:
|
|
71
|
+
"""Convert AG-UI messages to Agno messages."""
|
|
72
|
+
result = []
|
|
73
|
+
for msg in messages:
|
|
74
|
+
if msg.role == "tool":
|
|
75
|
+
result.append(Message(role="tool", tool_call_id=msg.tool_call_id, content=msg.content))
|
|
76
|
+
elif msg.role == "assistant":
|
|
77
|
+
tool_calls = None
|
|
78
|
+
if msg.tool_calls:
|
|
79
|
+
tool_calls = [call.model_dump() for call in msg.tool_calls]
|
|
80
|
+
result.append(
|
|
81
|
+
Message(
|
|
82
|
+
role="assistant",
|
|
83
|
+
content=msg.content,
|
|
84
|
+
tool_calls=tool_calls,
|
|
85
|
+
)
|
|
86
|
+
)
|
|
87
|
+
elif msg.role == "user":
|
|
88
|
+
result.append(Message(role="user", content=msg.content))
|
|
89
|
+
return result
|
|
74
90
|
|
|
75
91
|
|
|
76
92
|
def extract_team_response_chunk_content(response: TeamRunResponseContentEvent) -> str:
|
|
@@ -159,7 +175,7 @@ def _create_events_from_chunk(
|
|
|
159
175
|
args_event = ToolCallArgsEvent(
|
|
160
176
|
type=EventType.TOOL_CALL_ARGS,
|
|
161
177
|
tool_call_id=tool_call.tool_call_id, # type: ignore
|
|
162
|
-
delta=
|
|
178
|
+
delta=json.dumps(tool_call.tool_args),
|
|
163
179
|
)
|
|
164
180
|
events_to_emit.append(args_event)
|
|
165
181
|
|
|
@@ -174,6 +190,16 @@ def _create_events_from_chunk(
|
|
|
174
190
|
)
|
|
175
191
|
events_to_emit.append(end_event)
|
|
176
192
|
|
|
193
|
+
if tool_call.result is not None:
|
|
194
|
+
result_event = ToolCallResultEvent(
|
|
195
|
+
type=EventType.TOOL_CALL_RESULT,
|
|
196
|
+
tool_call_id=tool_call.tool_call_id, # type: ignore
|
|
197
|
+
content=str(tool_call.result),
|
|
198
|
+
role="tool",
|
|
199
|
+
message_id=str(uuid.uuid4()),
|
|
200
|
+
)
|
|
201
|
+
events_to_emit.append(result_event)
|
|
202
|
+
|
|
177
203
|
# Handle reasoning
|
|
178
204
|
elif chunk.event == RunEvent.reasoning_started:
|
|
179
205
|
step_event = StepStartedEvent(type=EventType.STEP_STARTED, step_name="reasoning")
|
|
@@ -186,7 +212,12 @@ def _create_events_from_chunk(
|
|
|
186
212
|
|
|
187
213
|
|
|
188
214
|
def _create_completion_events(
|
|
189
|
-
|
|
215
|
+
chunk: Union[RunResponseEvent, TeamRunResponseEvent],
|
|
216
|
+
event_buffer: EventBuffer,
|
|
217
|
+
message_started: bool,
|
|
218
|
+
message_id: str,
|
|
219
|
+
thread_id: str,
|
|
220
|
+
run_id: str,
|
|
190
221
|
) -> List[BaseEvent]:
|
|
191
222
|
"""Create events for run completion."""
|
|
192
223
|
events_to_emit = []
|
|
@@ -205,6 +236,33 @@ def _create_completion_events(
|
|
|
205
236
|
end_message_event = TextMessageEndEvent(type=EventType.TEXT_MESSAGE_END, message_id=message_id)
|
|
206
237
|
events_to_emit.append(end_message_event)
|
|
207
238
|
|
|
239
|
+
# emit frontend tool calls, i.e. external_execution=True
|
|
240
|
+
if isinstance(chunk, RunResponsePausedEvent) and chunk.tools is not None:
|
|
241
|
+
for tool in chunk.tools:
|
|
242
|
+
if tool.tool_call_id is None or tool.tool_name is None:
|
|
243
|
+
continue
|
|
244
|
+
|
|
245
|
+
start_event = ToolCallStartEvent(
|
|
246
|
+
type=EventType.TOOL_CALL_START,
|
|
247
|
+
tool_call_id=tool.tool_call_id,
|
|
248
|
+
tool_call_name=tool.tool_name,
|
|
249
|
+
parent_message_id=message_id,
|
|
250
|
+
)
|
|
251
|
+
events_to_emit.append(start_event)
|
|
252
|
+
|
|
253
|
+
args_event = ToolCallArgsEvent(
|
|
254
|
+
type=EventType.TOOL_CALL_ARGS,
|
|
255
|
+
tool_call_id=tool.tool_call_id,
|
|
256
|
+
delta=json.dumps(tool.tool_args),
|
|
257
|
+
)
|
|
258
|
+
events_to_emit.append(args_event)
|
|
259
|
+
|
|
260
|
+
end_event = ToolCallEndEvent(
|
|
261
|
+
type=EventType.TOOL_CALL_END,
|
|
262
|
+
tool_call_id=tool.tool_call_id,
|
|
263
|
+
)
|
|
264
|
+
events_to_emit.append(end_event)
|
|
265
|
+
|
|
208
266
|
run_finished_event = RunFinishedEvent(type=EventType.RUN_FINISHED, thread_id=thread_id, run_id=run_id)
|
|
209
267
|
events_to_emit.append(run_finished_event)
|
|
210
268
|
|
|
@@ -271,8 +329,14 @@ def stream_agno_response_as_agui_events(
|
|
|
271
329
|
|
|
272
330
|
for chunk in response_stream:
|
|
273
331
|
# Handle the lifecycle end event
|
|
274
|
-
if
|
|
275
|
-
|
|
332
|
+
if (
|
|
333
|
+
chunk.event == RunEvent.run_completed
|
|
334
|
+
or chunk.event == TeamRunEvent.run_completed
|
|
335
|
+
or chunk.event == RunEvent.run_paused
|
|
336
|
+
):
|
|
337
|
+
completion_events = _create_completion_events(
|
|
338
|
+
chunk, event_buffer, message_started, message_id, thread_id, run_id
|
|
339
|
+
)
|
|
276
340
|
for event in completion_events:
|
|
277
341
|
events_to_emit = _emit_event_logic(event_buffer=event_buffer, event=event)
|
|
278
342
|
for emit_event in events_to_emit:
|
|
@@ -302,8 +366,14 @@ async def async_stream_agno_response_as_agui_events(
|
|
|
302
366
|
|
|
303
367
|
async for chunk in response_stream:
|
|
304
368
|
# Handle the lifecycle end event
|
|
305
|
-
if
|
|
306
|
-
|
|
369
|
+
if (
|
|
370
|
+
chunk.event == RunEvent.run_completed
|
|
371
|
+
or chunk.event == TeamRunEvent.run_completed
|
|
372
|
+
or chunk.event == RunEvent.run_paused
|
|
373
|
+
):
|
|
374
|
+
completion_events = _create_completion_events(
|
|
375
|
+
chunk, event_buffer, message_started, message_id, thread_id, run_id
|
|
376
|
+
)
|
|
307
377
|
for event in completion_events:
|
|
308
378
|
events_to_emit = _emit_event_logic(event_buffer=event_buffer, event=event)
|
|
309
379
|
for emit_event in events_to_emit:
|
agno/app/fastapi/app.py
CHANGED
|
@@ -77,7 +77,7 @@ class FastAPIApp(BaseAPIApp):
|
|
|
77
77
|
|
|
78
78
|
if self.workflows:
|
|
79
79
|
for workflow in self.workflows:
|
|
80
|
-
if not workflow.app_id:
|
|
80
|
+
if hasattr(workflow, "app_id") and not workflow.app_id:
|
|
81
81
|
workflow.app_id = self.app_id
|
|
82
82
|
if not workflow.workflow_id:
|
|
83
83
|
workflow.workflow_id = generate_id(workflow.name)
|
agno/app/fastapi/async_router.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import json
|
|
2
2
|
from dataclasses import asdict
|
|
3
3
|
from io import BytesIO
|
|
4
|
-
from typing import AsyncGenerator, List, Optional, cast
|
|
4
|
+
from typing import Any, AsyncGenerator, Dict, List, Optional, Union, cast
|
|
5
5
|
from uuid import uuid4
|
|
6
6
|
|
|
7
7
|
from fastapi import APIRouter, File, Form, HTTPException, Query, UploadFile
|
|
@@ -14,8 +14,10 @@ from agno.media import File as FileMedia
|
|
|
14
14
|
from agno.run.response import RunResponseErrorEvent
|
|
15
15
|
from agno.run.team import RunResponseErrorEvent as TeamRunResponseErrorEvent
|
|
16
16
|
from agno.run.team import TeamRunResponseEvent
|
|
17
|
+
from agno.run.v2.workflow import WorkflowErrorEvent
|
|
17
18
|
from agno.team.team import Team
|
|
18
19
|
from agno.utils.log import logger
|
|
20
|
+
from agno.workflow.v2.workflow import Workflow as WorkflowV2
|
|
19
21
|
from agno.workflow.workflow import Workflow
|
|
20
22
|
|
|
21
23
|
|
|
@@ -83,6 +85,42 @@ async def team_chat_response_streamer(
|
|
|
83
85
|
return
|
|
84
86
|
|
|
85
87
|
|
|
88
|
+
async def workflow_response_streamer(
|
|
89
|
+
workflow: WorkflowV2,
|
|
90
|
+
body: Union[Dict[str, Any], str],
|
|
91
|
+
session_id: Optional[str] = None,
|
|
92
|
+
user_id: Optional[str] = None,
|
|
93
|
+
) -> AsyncGenerator:
|
|
94
|
+
try:
|
|
95
|
+
if isinstance(body, dict):
|
|
96
|
+
run_response = await workflow.arun( # type: ignore
|
|
97
|
+
**body,
|
|
98
|
+
user_id=user_id,
|
|
99
|
+
session_id=session_id,
|
|
100
|
+
stream=True,
|
|
101
|
+
stream_intermediate_steps=True,
|
|
102
|
+
)
|
|
103
|
+
else:
|
|
104
|
+
run_response = await workflow.arun( # type: ignore
|
|
105
|
+
body,
|
|
106
|
+
user_id=user_id,
|
|
107
|
+
session_id=session_id,
|
|
108
|
+
stream=True,
|
|
109
|
+
stream_intermediate_steps=True,
|
|
110
|
+
)
|
|
111
|
+
async for run_response_chunk in run_response:
|
|
112
|
+
yield run_response_chunk.to_json()
|
|
113
|
+
except Exception as e:
|
|
114
|
+
import traceback
|
|
115
|
+
|
|
116
|
+
traceback.print_exc(limit=3)
|
|
117
|
+
error_response = WorkflowErrorEvent(
|
|
118
|
+
error=str(e),
|
|
119
|
+
)
|
|
120
|
+
yield error_response.to_json()
|
|
121
|
+
return
|
|
122
|
+
|
|
123
|
+
|
|
86
124
|
def get_async_router(
|
|
87
125
|
agents: Optional[List[Agent]] = None, teams: Optional[List[Team]] = None, workflows: Optional[List[Workflow]] = None
|
|
88
126
|
) -> APIRouter:
|
|
@@ -351,17 +389,24 @@ def get_async_router(
|
|
|
351
389
|
media_type="text/event-stream",
|
|
352
390
|
)
|
|
353
391
|
elif workflow:
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
392
|
+
if isinstance(workflow, Workflow):
|
|
393
|
+
workflow_instance = workflow.deep_copy(update={"workflow_id": workflow_id})
|
|
394
|
+
workflow_instance.user_id = user_id
|
|
395
|
+
workflow_instance.session_name = None
|
|
396
|
+
|
|
397
|
+
if isinstance(workflow_input, dict):
|
|
398
|
+
return StreamingResponse(
|
|
399
|
+
(json.dumps(asdict(result)) for result in await workflow_instance.arun(**workflow_input)),
|
|
400
|
+
media_type="text/event-stream",
|
|
401
|
+
)
|
|
402
|
+
else:
|
|
403
|
+
return StreamingResponse(
|
|
404
|
+
(json.dumps(asdict(result)) for result in await workflow_instance.arun(workflow_input)), # type: ignore
|
|
405
|
+
media_type="text/event-stream",
|
|
406
|
+
)
|
|
362
407
|
else:
|
|
363
408
|
return StreamingResponse(
|
|
364
|
-
(
|
|
409
|
+
workflow_response_streamer(workflow, workflow_input, session_id=session_id, user_id=user_id), # type: ignore
|
|
365
410
|
media_type="text/event-stream",
|
|
366
411
|
)
|
|
367
412
|
else:
|
|
@@ -392,12 +437,18 @@ def get_async_router(
|
|
|
392
437
|
)
|
|
393
438
|
return team_run_response.to_dict()
|
|
394
439
|
elif workflow:
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
440
|
+
if isinstance(workflow, Workflow):
|
|
441
|
+
workflow_instance = workflow.deep_copy(update={"workflow_id": workflow_id})
|
|
442
|
+
workflow_instance.user_id = user_id
|
|
443
|
+
workflow_instance.session_name = None
|
|
444
|
+
if isinstance(workflow_input, dict):
|
|
445
|
+
return (await workflow_instance.arun(**workflow_input)).to_dict()
|
|
446
|
+
else:
|
|
447
|
+
return (await workflow_instance.arun(workflow_input)).to_dict() # type: ignore
|
|
400
448
|
else:
|
|
401
|
-
|
|
449
|
+
if isinstance(workflow_input, dict):
|
|
450
|
+
return (await workflow.arun(**workflow_input, session_id=session_id, user_id=user_id)).to_dict()
|
|
451
|
+
else:
|
|
452
|
+
return (await workflow.arun(workflow_input, session_id=session_id, user_id=user_id)).to_dict() # type: ignore
|
|
402
453
|
|
|
403
454
|
return router
|
agno/app/fastapi/sync_router.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import json
|
|
2
2
|
from dataclasses import asdict
|
|
3
3
|
from io import BytesIO
|
|
4
|
-
from typing import Any, Dict, Generator, List, Optional, cast
|
|
4
|
+
from typing import Any, Dict, Generator, List, Optional, Union, cast
|
|
5
5
|
from uuid import uuid4
|
|
6
6
|
|
|
7
7
|
from fastapi import APIRouter, File, Form, HTTPException, Query, UploadFile
|
|
@@ -15,8 +15,10 @@ from agno.run.base import RunStatus
|
|
|
15
15
|
from agno.run.response import RunResponseEvent
|
|
16
16
|
from agno.run.team import RunResponseErrorEvent as TeamRunResponseErrorEvent
|
|
17
17
|
from agno.run.team import TeamRunResponseEvent
|
|
18
|
+
from agno.run.v2.workflow import WorkflowErrorEvent
|
|
18
19
|
from agno.team.team import Team
|
|
19
20
|
from agno.utils.log import logger
|
|
21
|
+
from agno.workflow.v2.workflow import Workflow as WorkflowV2
|
|
20
22
|
from agno.workflow.workflow import Workflow
|
|
21
23
|
|
|
22
24
|
|
|
@@ -82,6 +84,42 @@ def team_chat_response_streamer(
|
|
|
82
84
|
return
|
|
83
85
|
|
|
84
86
|
|
|
87
|
+
def workflow_response_streamer(
|
|
88
|
+
workflow: WorkflowV2,
|
|
89
|
+
body: Union[Dict[str, Any], str],
|
|
90
|
+
session_id: Optional[str] = None,
|
|
91
|
+
user_id: Optional[str] = None,
|
|
92
|
+
) -> Generator:
|
|
93
|
+
try:
|
|
94
|
+
if isinstance(body, dict):
|
|
95
|
+
run_response = workflow.run(
|
|
96
|
+
**body,
|
|
97
|
+
user_id=user_id,
|
|
98
|
+
session_id=session_id,
|
|
99
|
+
stream=True,
|
|
100
|
+
stream_intermediate_steps=True,
|
|
101
|
+
)
|
|
102
|
+
else:
|
|
103
|
+
run_response = workflow.run(
|
|
104
|
+
body,
|
|
105
|
+
user_id=user_id,
|
|
106
|
+
session_id=session_id,
|
|
107
|
+
stream=True,
|
|
108
|
+
stream_intermediate_steps=True,
|
|
109
|
+
)
|
|
110
|
+
for run_response_chunk in run_response:
|
|
111
|
+
yield run_response_chunk.to_json()
|
|
112
|
+
except Exception as e:
|
|
113
|
+
import traceback
|
|
114
|
+
|
|
115
|
+
traceback.print_exc(limit=3)
|
|
116
|
+
error_response = WorkflowErrorEvent(
|
|
117
|
+
error=str(e),
|
|
118
|
+
)
|
|
119
|
+
yield error_response.to_json()
|
|
120
|
+
return
|
|
121
|
+
|
|
122
|
+
|
|
85
123
|
def get_sync_router(
|
|
86
124
|
agents: Optional[List[Agent]] = None, teams: Optional[List[Team]] = None, workflows: Optional[List[Workflow]] = None
|
|
87
125
|
) -> APIRouter:
|
|
@@ -251,12 +289,12 @@ def get_sync_router(
|
|
|
251
289
|
@router.post("/runs")
|
|
252
290
|
def run_agent_or_team_or_workflow(
|
|
253
291
|
message: str = Form(None),
|
|
254
|
-
stream: bool = Form(
|
|
292
|
+
stream: bool = Form(False),
|
|
255
293
|
monitor: bool = Form(False),
|
|
256
294
|
agent_id: Optional[str] = Query(None),
|
|
257
295
|
team_id: Optional[str] = Query(None),
|
|
258
296
|
workflow_id: Optional[str] = Query(None),
|
|
259
|
-
workflow_input: Optional[
|
|
297
|
+
workflow_input: Optional[str] = Form(None),
|
|
260
298
|
session_id: Optional[str] = Form(None),
|
|
261
299
|
user_id: Optional[str] = Form(None),
|
|
262
300
|
files: Optional[List[UploadFile]] = File(None),
|
|
@@ -297,6 +335,13 @@ def get_sync_router(
|
|
|
297
335
|
if not workflow_input:
|
|
298
336
|
raise HTTPException(status_code=400, detail="Workflow input is required")
|
|
299
337
|
|
|
338
|
+
# Parse workflow_input into a dict if it is a valid JSON
|
|
339
|
+
try:
|
|
340
|
+
parsed_workflow_input = json.loads(workflow_input)
|
|
341
|
+
workflow_input = parsed_workflow_input
|
|
342
|
+
except json.JSONDecodeError:
|
|
343
|
+
pass
|
|
344
|
+
|
|
300
345
|
if agent:
|
|
301
346
|
agent.monitoring = bool(monitor)
|
|
302
347
|
elif team:
|
|
@@ -339,13 +384,25 @@ def get_sync_router(
|
|
|
339
384
|
media_type="text/event-stream",
|
|
340
385
|
)
|
|
341
386
|
elif workflow:
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
387
|
+
if isinstance(workflow, Workflow):
|
|
388
|
+
workflow_instance = workflow.deep_copy(update={"workflow_id": workflow_id})
|
|
389
|
+
workflow_instance.user_id = user_id
|
|
390
|
+
workflow_instance.session_name = None
|
|
391
|
+
if isinstance(workflow_input, dict):
|
|
392
|
+
return StreamingResponse(
|
|
393
|
+
(json.dumps(asdict(result)) for result in workflow_instance.run(**workflow_input)),
|
|
394
|
+
media_type="text/event-stream",
|
|
395
|
+
)
|
|
396
|
+
else:
|
|
397
|
+
return StreamingResponse(
|
|
398
|
+
(json.dumps(asdict(result)) for result in workflow_instance.run(workflow_input)), # type: ignore
|
|
399
|
+
media_type="text/event-stream",
|
|
400
|
+
)
|
|
401
|
+
else:
|
|
402
|
+
return StreamingResponse(
|
|
403
|
+
workflow_response_streamer(workflow, workflow_input, session_id=session_id, user_id=user_id),
|
|
404
|
+
media_type="text/event-stream",
|
|
405
|
+
)
|
|
349
406
|
else:
|
|
350
407
|
if agent:
|
|
351
408
|
run_response = cast(
|
|
@@ -374,9 +431,18 @@ def get_sync_router(
|
|
|
374
431
|
)
|
|
375
432
|
return team_run_response.to_dict()
|
|
376
433
|
elif workflow:
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
434
|
+
if isinstance(workflow, Workflow):
|
|
435
|
+
workflow_instance = workflow.deep_copy(update={"workflow_id": workflow_id})
|
|
436
|
+
workflow_instance.user_id = user_id
|
|
437
|
+
workflow_instance.session_name = None
|
|
438
|
+
if isinstance(workflow_input, dict):
|
|
439
|
+
return workflow_instance.run(**workflow_input).to_dict()
|
|
440
|
+
else:
|
|
441
|
+
return workflow_instance.run(workflow_input).to_dict() # type: ignore
|
|
442
|
+
else:
|
|
443
|
+
if isinstance(workflow_input, dict):
|
|
444
|
+
return workflow.run(**workflow_input, session_id=session_id, user_id=user_id).to_dict()
|
|
445
|
+
else:
|
|
446
|
+
return workflow.run(workflow_input, session_id=session_id, user_id=user_id).to_dict()
|
|
381
447
|
|
|
382
448
|
return router
|