agno 2.0.6__py3-none-any.whl → 2.0.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +94 -48
- agno/db/migrations/v1_to_v2.py +140 -11
- agno/knowledge/chunking/semantic.py +33 -6
- agno/knowledge/embedder/sentence_transformer.py +3 -3
- agno/knowledge/knowledge.py +152 -31
- agno/knowledge/types.py +8 -0
- agno/media.py +2 -0
- agno/models/base.py +38 -9
- agno/models/cometapi/__init__.py +5 -0
- agno/models/cometapi/cometapi.py +57 -0
- agno/models/google/gemini.py +4 -8
- agno/models/llama_cpp/__init__.py +5 -0
- agno/models/llama_cpp/llama_cpp.py +22 -0
- agno/models/nexus/__init__.py +1 -1
- agno/models/nexus/nexus.py +2 -5
- agno/models/ollama/chat.py +24 -1
- agno/models/openai/chat.py +2 -7
- agno/models/openai/responses.py +21 -17
- agno/os/app.py +4 -10
- agno/os/interfaces/agui/agui.py +2 -2
- agno/os/interfaces/agui/utils.py +81 -18
- agno/os/interfaces/slack/slack.py +2 -2
- agno/os/interfaces/whatsapp/whatsapp.py +2 -2
- agno/os/router.py +3 -4
- agno/os/routers/evals/evals.py +1 -1
- agno/os/routers/memory/memory.py +1 -1
- agno/os/schema.py +3 -4
- agno/os/utils.py +55 -12
- agno/reasoning/default.py +3 -1
- agno/run/agent.py +4 -0
- agno/run/team.py +3 -1
- agno/session/agent.py +8 -5
- agno/session/team.py +14 -10
- agno/team/team.py +239 -115
- agno/tools/decorator.py +4 -2
- agno/tools/function.py +43 -4
- agno/tools/mcp.py +61 -38
- agno/tools/memori.py +1 -53
- agno/utils/events.py +7 -1
- agno/utils/gemini.py +147 -19
- agno/utils/models/claude.py +9 -0
- agno/utils/print_response/agent.py +16 -0
- agno/utils/print_response/team.py +16 -0
- agno/vectordb/base.py +2 -2
- agno/vectordb/langchaindb/langchaindb.py +5 -7
- agno/vectordb/llamaindex/llamaindexdb.py +25 -6
- agno/workflow/workflow.py +59 -15
- {agno-2.0.6.dist-info → agno-2.0.8.dist-info}/METADATA +1 -1
- {agno-2.0.6.dist-info → agno-2.0.8.dist-info}/RECORD +52 -48
- {agno-2.0.6.dist-info → agno-2.0.8.dist-info}/WHEEL +0 -0
- {agno-2.0.6.dist-info → agno-2.0.8.dist-info}/licenses/LICENSE +0 -0
- {agno-2.0.6.dist-info → agno-2.0.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
|
|
3
|
+
from agno.models.openai.like import OpenAILike
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
@dataclass
|
|
7
|
+
class LlamaCpp(OpenAILike):
|
|
8
|
+
"""
|
|
9
|
+
A class for interacting with LLMs using Llama CPP.
|
|
10
|
+
|
|
11
|
+
Attributes:
|
|
12
|
+
id (str): The id of the Llama CPP model. Default is "ggml-org/gpt-oss-20b-GGUF".
|
|
13
|
+
name (str): The name of this chat model instance. Default is "LlamaCpp".
|
|
14
|
+
provider (str): The provider of the model. Default is "LlamaCpp".
|
|
15
|
+
base_url (str): The base url to which the requests are sent.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
id: str = "ggml-org/gpt-oss-20b-GGUF"
|
|
19
|
+
name: str = "LlamaCpp"
|
|
20
|
+
provider: str = "LlamaCpp"
|
|
21
|
+
|
|
22
|
+
base_url: str = "http://127.0.0.1:8080/v1"
|
agno/models/nexus/__init__.py
CHANGED
agno/models/nexus/nexus.py
CHANGED
|
@@ -6,13 +6,12 @@ from agno.models.openai.like import OpenAILike
|
|
|
6
6
|
@dataclass
|
|
7
7
|
class Nexus(OpenAILike):
|
|
8
8
|
"""
|
|
9
|
-
A class for interacting with
|
|
9
|
+
A class for interacting with LLMs using Nexus.
|
|
10
10
|
|
|
11
11
|
Attributes:
|
|
12
|
-
id (str): The id of the Nexus model to use. Default is "
|
|
12
|
+
id (str): The id of the Nexus model to use. Default is "openai/gpt-4".
|
|
13
13
|
name (str): The name of this chat model instance. Default is "Nexus"
|
|
14
14
|
provider (str): The provider of the model. Default is "Nexus".
|
|
15
|
-
api_key (str): The api key to authorize request to Nexus.
|
|
16
15
|
base_url (str): The base url to which the requests are sent.
|
|
17
16
|
"""
|
|
18
17
|
|
|
@@ -21,5 +20,3 @@ class Nexus(OpenAILike):
|
|
|
21
20
|
provider: str = "Nexus"
|
|
22
21
|
|
|
23
22
|
base_url: str = "http://localhost:8000/llm/v1/"
|
|
24
|
-
|
|
25
|
-
supports_native_structured_outputs: bool = False
|
agno/models/ollama/chat.py
CHANGED
|
@@ -84,7 +84,8 @@ class Ollama(Model):
|
|
|
84
84
|
if self.async_client is not None:
|
|
85
85
|
return self.async_client
|
|
86
86
|
|
|
87
|
-
|
|
87
|
+
self.async_client = AsyncOllamaClient(**self._get_client_params())
|
|
88
|
+
return self.async_client
|
|
88
89
|
|
|
89
90
|
def get_request_params(
|
|
90
91
|
self,
|
|
@@ -144,6 +145,28 @@ class Ollama(Model):
|
|
|
144
145
|
"role": message.role,
|
|
145
146
|
"content": message.content,
|
|
146
147
|
}
|
|
148
|
+
|
|
149
|
+
if message.role == "assistant" and message.tool_calls is not None:
|
|
150
|
+
# Format tool calls for assistant messages
|
|
151
|
+
formatted_tool_calls = []
|
|
152
|
+
for tool_call in message.tool_calls:
|
|
153
|
+
if "function" in tool_call:
|
|
154
|
+
function_data = tool_call["function"]
|
|
155
|
+
formatted_tool_call = {
|
|
156
|
+
"id": tool_call.get("id"),
|
|
157
|
+
"type": "function",
|
|
158
|
+
"function": {
|
|
159
|
+
"name": function_data["name"],
|
|
160
|
+
"arguments": json.loads(function_data["arguments"])
|
|
161
|
+
if isinstance(function_data["arguments"], str)
|
|
162
|
+
else function_data["arguments"],
|
|
163
|
+
},
|
|
164
|
+
}
|
|
165
|
+
formatted_tool_calls.append(formatted_tool_call)
|
|
166
|
+
|
|
167
|
+
if formatted_tool_calls:
|
|
168
|
+
_message["tool_calls"] = formatted_tool_calls
|
|
169
|
+
|
|
147
170
|
if message.role == "user":
|
|
148
171
|
if message.images is not None:
|
|
149
172
|
message_images = []
|
agno/models/openai/chat.py
CHANGED
|
@@ -22,13 +22,8 @@ try:
|
|
|
22
22
|
from openai import AsyncOpenAI as AsyncOpenAIClient
|
|
23
23
|
from openai import OpenAI as OpenAIClient
|
|
24
24
|
from openai.types import CompletionUsage
|
|
25
|
-
from openai.types.chat import ChatCompletionAudio
|
|
26
|
-
from openai.types.chat.
|
|
27
|
-
from openai.types.chat.chat_completion_chunk import (
|
|
28
|
-
ChatCompletionChunk,
|
|
29
|
-
ChoiceDelta,
|
|
30
|
-
ChoiceDeltaToolCall,
|
|
31
|
-
)
|
|
25
|
+
from openai.types.chat import ChatCompletion, ChatCompletionAudio, ChatCompletionChunk
|
|
26
|
+
from openai.types.chat.chat_completion_chunk import ChoiceDelta, ChoiceDeltaToolCall
|
|
32
27
|
except (ImportError, ModuleNotFoundError):
|
|
33
28
|
raise ImportError("`openai` not installed. Please install using `pip install openai`")
|
|
34
29
|
|
agno/models/openai/responses.py
CHANGED
|
@@ -19,10 +19,7 @@ from agno.utils.models.schema_utils import get_response_schema_for_provider
|
|
|
19
19
|
|
|
20
20
|
try:
|
|
21
21
|
from openai import APIConnectionError, APIStatusError, AsyncOpenAI, OpenAI, RateLimitError
|
|
22
|
-
from openai.types.responses
|
|
23
|
-
from openai.types.responses.response_reasoning_item import ResponseReasoningItem
|
|
24
|
-
from openai.types.responses.response_stream_event import ResponseStreamEvent
|
|
25
|
-
from openai.types.responses.response_usage import ResponseUsage
|
|
22
|
+
from openai.types.responses import Response, ResponseReasoningItem, ResponseStreamEvent, ResponseUsage
|
|
26
23
|
except ImportError as e:
|
|
27
24
|
raise ImportError("`openai` not installed. Please install using `pip install openai -U`") from e
|
|
28
25
|
|
|
@@ -407,21 +404,28 @@ class OpenAIResponses(Model):
|
|
|
407
404
|
"""
|
|
408
405
|
formatted_messages: List[Union[Dict[str, Any], ResponseReasoningItem]] = []
|
|
409
406
|
|
|
410
|
-
|
|
407
|
+
messages_to_format = messages
|
|
408
|
+
previous_response_id: Optional[str] = None
|
|
409
|
+
|
|
410
|
+
if self._using_reasoning_model() and self.store is not False:
|
|
411
411
|
# Detect whether we're chaining via previous_response_id. If so, we should NOT
|
|
412
412
|
# re-send prior function_call items; the Responses API already has the state and
|
|
413
413
|
# expects only the corresponding function_call_output items.
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
414
|
+
|
|
415
|
+
for msg in reversed(messages):
|
|
416
|
+
if (
|
|
417
|
+
msg.role == "assistant"
|
|
418
|
+
and hasattr(msg, "provider_data")
|
|
419
|
+
and msg.provider_data
|
|
420
|
+
and "response_id" in msg.provider_data
|
|
421
|
+
):
|
|
422
|
+
previous_response_id = msg.provider_data["response_id"]
|
|
423
|
+
msg_index = messages.index(msg)
|
|
424
|
+
|
|
425
|
+
# Include messages after this assistant message
|
|
426
|
+
messages_to_format = messages[msg_index + 1:]
|
|
427
|
+
|
|
428
|
+
break
|
|
425
429
|
|
|
426
430
|
# Build a mapping from function_call id (fc_*) → call_id (call_*) from prior assistant tool_calls
|
|
427
431
|
fc_id_to_call_id: Dict[str, str] = {}
|
|
@@ -434,7 +438,7 @@ class OpenAIResponses(Model):
|
|
|
434
438
|
if isinstance(fc_id, str) and isinstance(call_id, str):
|
|
435
439
|
fc_id_to_call_id[fc_id] = call_id
|
|
436
440
|
|
|
437
|
-
for message in
|
|
441
|
+
for message in messages_to_format:
|
|
438
442
|
if message.role in ["user", "system"]:
|
|
439
443
|
message_dict: Dict[str, Any] = {
|
|
440
444
|
"role": self.role_map[message.role],
|
agno/os/app.py
CHANGED
|
@@ -9,7 +9,6 @@ from fastapi.responses import JSONResponse
|
|
|
9
9
|
from fastapi.routing import APIRoute
|
|
10
10
|
from rich import box
|
|
11
11
|
from rich.panel import Panel
|
|
12
|
-
from starlette.middleware.cors import CORSMiddleware
|
|
13
12
|
from starlette.requests import Request
|
|
14
13
|
|
|
15
14
|
from agno.agent.agent import Agent
|
|
@@ -37,6 +36,7 @@ from agno.os.routers.memory import get_memory_router
|
|
|
37
36
|
from agno.os.routers.metrics import get_metrics_router
|
|
38
37
|
from agno.os.routers.session import get_session_router
|
|
39
38
|
from agno.os.settings import AgnoAPISettings
|
|
39
|
+
from agno.os.utils import update_cors_middleware
|
|
40
40
|
from agno.team.team import Team
|
|
41
41
|
from agno.utils.log import logger
|
|
42
42
|
from agno.utils.string import generate_id, generate_id_from_name
|
|
@@ -286,14 +286,8 @@ class AgentOS:
|
|
|
286
286
|
|
|
287
287
|
self.fastapi_app.middleware("http")(general_exception_handler)
|
|
288
288
|
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
allow_origins=self.settings.cors_origin_list, # type: ignore
|
|
292
|
-
allow_credentials=True,
|
|
293
|
-
allow_methods=["*"],
|
|
294
|
-
allow_headers=["*"],
|
|
295
|
-
expose_headers=["*"],
|
|
296
|
-
)
|
|
289
|
+
# Update CORS middleware
|
|
290
|
+
update_cors_middleware(self.fastapi_app, self.settings.cors_origin_list) # type: ignore
|
|
297
291
|
|
|
298
292
|
return self.fastapi_app
|
|
299
293
|
|
|
@@ -368,7 +362,7 @@ class AgentOS:
|
|
|
368
362
|
for route in self.fastapi_app.routes:
|
|
369
363
|
for conflict in conflicts:
|
|
370
364
|
if isinstance(route, APIRoute):
|
|
371
|
-
if route.path == conflict["path"] and list(route.methods) == list(conflict["methods"]):
|
|
365
|
+
if route.path == conflict["path"] and list(route.methods) == list(conflict["methods"]): # type: ignore
|
|
372
366
|
self.fastapi_app.routes.pop(self.fastapi_app.routes.index(route))
|
|
373
367
|
|
|
374
368
|
self.fastapi_app.include_router(router)
|
agno/os/interfaces/agui/agui.py
CHANGED
|
@@ -19,8 +19,8 @@ class AGUI(BaseInterface):
|
|
|
19
19
|
self.agent = agent
|
|
20
20
|
self.team = team
|
|
21
21
|
|
|
22
|
-
if not self.agent
|
|
23
|
-
raise ValueError("AGUI requires an agent
|
|
22
|
+
if not (self.agent or self.team):
|
|
23
|
+
raise ValueError("AGUI requires an agent or a team")
|
|
24
24
|
|
|
25
25
|
def get_router(self, **kwargs) -> APIRouter:
|
|
26
26
|
# Cannot be overridden
|
agno/os/interfaces/agui/utils.py
CHANGED
|
@@ -35,10 +35,16 @@ class EventBuffer:
|
|
|
35
35
|
|
|
36
36
|
active_tool_call_ids: Set[str] # All currently active tool calls
|
|
37
37
|
ended_tool_call_ids: Set[str] # All tool calls that have ended
|
|
38
|
+
current_text_message_id: str = "" # ID of the current text message context (for tool call parenting)
|
|
39
|
+
next_text_message_id: str = "" # Pre-generated ID for the next text message
|
|
40
|
+
pending_tool_calls_parent_id: str = "" # Parent message ID for pending tool calls
|
|
38
41
|
|
|
39
42
|
def __init__(self):
|
|
40
43
|
self.active_tool_call_ids = set()
|
|
41
44
|
self.ended_tool_call_ids = set()
|
|
45
|
+
self.current_text_message_id = ""
|
|
46
|
+
self.next_text_message_id = str(uuid.uuid4())
|
|
47
|
+
self.pending_tool_calls_parent_id = ""
|
|
42
48
|
|
|
43
49
|
def start_tool_call(self, tool_call_id: str) -> None:
|
|
44
50
|
"""Start a new tool call."""
|
|
@@ -49,6 +55,29 @@ class EventBuffer:
|
|
|
49
55
|
self.active_tool_call_ids.discard(tool_call_id)
|
|
50
56
|
self.ended_tool_call_ids.add(tool_call_id)
|
|
51
57
|
|
|
58
|
+
def start_text_message(self) -> str:
|
|
59
|
+
"""Start a new text message and return its ID."""
|
|
60
|
+
# Use the pre-generated next ID as current, and generate a new next ID
|
|
61
|
+
self.current_text_message_id = self.next_text_message_id
|
|
62
|
+
self.next_text_message_id = str(uuid.uuid4())
|
|
63
|
+
return self.current_text_message_id
|
|
64
|
+
|
|
65
|
+
def get_parent_message_id_for_tool_call(self) -> str:
|
|
66
|
+
"""Get the message ID to use as parent for tool calls."""
|
|
67
|
+
# If we have a pending parent ID set (from text message end), use that
|
|
68
|
+
if self.pending_tool_calls_parent_id:
|
|
69
|
+
return self.pending_tool_calls_parent_id
|
|
70
|
+
# Otherwise use current text message ID
|
|
71
|
+
return self.current_text_message_id
|
|
72
|
+
|
|
73
|
+
def set_pending_tool_calls_parent_id(self, parent_id: str) -> None:
|
|
74
|
+
"""Set the parent message ID for upcoming tool calls."""
|
|
75
|
+
self.pending_tool_calls_parent_id = parent_id
|
|
76
|
+
|
|
77
|
+
def clear_pending_tool_calls_parent_id(self) -> None:
|
|
78
|
+
"""Clear the pending parent ID when a new text message starts."""
|
|
79
|
+
self.pending_tool_calls_parent_id = ""
|
|
80
|
+
|
|
52
81
|
|
|
53
82
|
def convert_agui_messages_to_agno_messages(messages: List[AGUIMessage]) -> List[Message]:
|
|
54
83
|
"""Convert AG-UI messages to Agno messages."""
|
|
@@ -113,10 +142,18 @@ def _create_events_from_chunk(
|
|
|
113
142
|
message_id: str,
|
|
114
143
|
message_started: bool,
|
|
115
144
|
event_buffer: EventBuffer,
|
|
116
|
-
) -> Tuple[List[BaseEvent], bool]:
|
|
145
|
+
) -> Tuple[List[BaseEvent], bool, str]:
|
|
117
146
|
"""
|
|
118
147
|
Process a single chunk and return events to emit + updated message_started state.
|
|
119
|
-
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
chunk: The event chunk to process
|
|
151
|
+
message_id: Current message identifier
|
|
152
|
+
message_started: Whether a message is currently active
|
|
153
|
+
event_buffer: Event buffer for tracking tool call state
|
|
154
|
+
|
|
155
|
+
Returns:
|
|
156
|
+
Tuple of (events_to_emit, new_message_started_state, message_id)
|
|
120
157
|
"""
|
|
121
158
|
events_to_emit: List[BaseEvent] = []
|
|
122
159
|
|
|
@@ -133,6 +170,11 @@ def _create_events_from_chunk(
|
|
|
133
170
|
# Handle the message start event, emitted once per message
|
|
134
171
|
if not message_started:
|
|
135
172
|
message_started = True
|
|
173
|
+
message_id = event_buffer.start_text_message()
|
|
174
|
+
|
|
175
|
+
# Clear pending tool calls parent ID when starting new text message
|
|
176
|
+
event_buffer.clear_pending_tool_calls_parent_id()
|
|
177
|
+
|
|
136
178
|
start_event = TextMessageStartEvent(
|
|
137
179
|
type=EventType.TEXT_MESSAGE_START,
|
|
138
180
|
message_id=message_id,
|
|
@@ -149,21 +191,37 @@ def _create_events_from_chunk(
|
|
|
149
191
|
)
|
|
150
192
|
events_to_emit.append(content_event) # type: ignore
|
|
151
193
|
|
|
152
|
-
# Handle starting a new tool
|
|
153
|
-
elif chunk.event == RunEvent.tool_call_started:
|
|
154
|
-
# End the current text message if one is active before starting tool calls
|
|
155
|
-
if message_started:
|
|
156
|
-
end_message_event = TextMessageEndEvent(type=EventType.TEXT_MESSAGE_END, message_id=message_id)
|
|
157
|
-
events_to_emit.append(end_message_event)
|
|
158
|
-
message_started = False # Reset message_started state
|
|
159
|
-
|
|
194
|
+
# Handle starting a new tool
|
|
195
|
+
elif chunk.event == RunEvent.tool_call_started or chunk.event == TeamRunEvent.tool_call_started:
|
|
160
196
|
if chunk.tool is not None: # type: ignore
|
|
161
197
|
tool_call = chunk.tool # type: ignore
|
|
198
|
+
|
|
199
|
+
# End current text message and handle for tool calls
|
|
200
|
+
current_message_id = message_id
|
|
201
|
+
if message_started:
|
|
202
|
+
# End the current text message
|
|
203
|
+
end_message_event = TextMessageEndEvent(type=EventType.TEXT_MESSAGE_END, message_id=current_message_id)
|
|
204
|
+
events_to_emit.append(end_message_event)
|
|
205
|
+
|
|
206
|
+
# Set this message as the parent for any upcoming tool calls
|
|
207
|
+
# This ensures multiple sequential tool calls all use the same parent
|
|
208
|
+
event_buffer.set_pending_tool_calls_parent_id(current_message_id)
|
|
209
|
+
|
|
210
|
+
# Reset message started state and generate new message_id for future messages
|
|
211
|
+
message_started = False
|
|
212
|
+
message_id = str(uuid.uuid4())
|
|
213
|
+
|
|
214
|
+
# Get the parent message ID - this will use pending parent if set, ensuring multiple tool calls in sequence have the same parent
|
|
215
|
+
parent_message_id = event_buffer.get_parent_message_id_for_tool_call()
|
|
216
|
+
|
|
217
|
+
if not parent_message_id:
|
|
218
|
+
parent_message_id = current_message_id
|
|
219
|
+
|
|
162
220
|
start_event = ToolCallStartEvent(
|
|
163
221
|
type=EventType.TOOL_CALL_START,
|
|
164
222
|
tool_call_id=tool_call.tool_call_id, # type: ignore
|
|
165
223
|
tool_call_name=tool_call.tool_name, # type: ignore
|
|
166
|
-
parent_message_id=
|
|
224
|
+
parent_message_id=parent_message_id,
|
|
167
225
|
)
|
|
168
226
|
events_to_emit.append(start_event)
|
|
169
227
|
|
|
@@ -175,7 +233,7 @@ def _create_events_from_chunk(
|
|
|
175
233
|
events_to_emit.append(args_event) # type: ignore
|
|
176
234
|
|
|
177
235
|
# Handle tool call completion
|
|
178
|
-
elif chunk.event == RunEvent.tool_call_completed:
|
|
236
|
+
elif chunk.event == RunEvent.tool_call_completed or chunk.event == TeamRunEvent.tool_call_completed:
|
|
179
237
|
if chunk.tool is not None: # type: ignore
|
|
180
238
|
tool_call = chunk.tool # type: ignore
|
|
181
239
|
if tool_call.tool_call_id not in event_buffer.ended_tool_call_ids:
|
|
@@ -203,7 +261,7 @@ def _create_events_from_chunk(
|
|
|
203
261
|
step_finished_event = StepFinishedEvent(type=EventType.STEP_FINISHED, step_name="reasoning")
|
|
204
262
|
events_to_emit.append(step_finished_event)
|
|
205
263
|
|
|
206
|
-
return events_to_emit, message_started
|
|
264
|
+
return events_to_emit, message_started, message_id
|
|
207
265
|
|
|
208
266
|
|
|
209
267
|
def _create_completion_events(
|
|
@@ -237,11 +295,16 @@ def _create_completion_events(
|
|
|
237
295
|
if tool.tool_call_id is None or tool.tool_name is None:
|
|
238
296
|
continue
|
|
239
297
|
|
|
298
|
+
# Use the current text message ID from event buffer as parent
|
|
299
|
+
parent_message_id = event_buffer.get_parent_message_id_for_tool_call()
|
|
300
|
+
if not parent_message_id:
|
|
301
|
+
parent_message_id = message_id # Fallback to the passed message_id
|
|
302
|
+
|
|
240
303
|
start_event = ToolCallStartEvent(
|
|
241
304
|
type=EventType.TOOL_CALL_START,
|
|
242
305
|
tool_call_id=tool.tool_call_id,
|
|
243
306
|
tool_call_name=tool.tool_name,
|
|
244
|
-
parent_message_id=
|
|
307
|
+
parent_message_id=parent_message_id,
|
|
245
308
|
)
|
|
246
309
|
events_to_emit.append(start_event)
|
|
247
310
|
|
|
@@ -285,7 +348,7 @@ def stream_agno_response_as_agui_events(
|
|
|
285
348
|
response_stream: Iterator[Union[RunOutputEvent, TeamRunOutputEvent]], thread_id: str, run_id: str
|
|
286
349
|
) -> Iterator[BaseEvent]:
|
|
287
350
|
"""Map the Agno response stream to AG-UI format, handling event ordering constraints."""
|
|
288
|
-
message_id =
|
|
351
|
+
message_id = "" # Will be set by EventBuffer when text message starts
|
|
289
352
|
message_started = False
|
|
290
353
|
event_buffer = EventBuffer()
|
|
291
354
|
stream_completed = False
|
|
@@ -304,7 +367,7 @@ def stream_agno_response_as_agui_events(
|
|
|
304
367
|
stream_completed = True
|
|
305
368
|
else:
|
|
306
369
|
# Process regular chunk immediately
|
|
307
|
-
events_from_chunk, message_started = _create_events_from_chunk(
|
|
370
|
+
events_from_chunk, message_started, message_id = _create_events_from_chunk(
|
|
308
371
|
chunk, message_id, message_started, event_buffer
|
|
309
372
|
)
|
|
310
373
|
|
|
@@ -345,7 +408,7 @@ async def async_stream_agno_response_as_agui_events(
|
|
|
345
408
|
run_id: str,
|
|
346
409
|
) -> AsyncIterator[BaseEvent]:
|
|
347
410
|
"""Map the Agno response stream to AG-UI format, handling event ordering constraints."""
|
|
348
|
-
message_id =
|
|
411
|
+
message_id = "" # Will be set by EventBuffer when text message starts
|
|
349
412
|
message_started = False
|
|
350
413
|
event_buffer = EventBuffer()
|
|
351
414
|
stream_completed = False
|
|
@@ -364,7 +427,7 @@ async def async_stream_agno_response_as_agui_events(
|
|
|
364
427
|
stream_completed = True
|
|
365
428
|
else:
|
|
366
429
|
# Process regular chunk immediately
|
|
367
|
-
events_from_chunk, message_started = _create_events_from_chunk(
|
|
430
|
+
events_from_chunk, message_started, message_id = _create_events_from_chunk(
|
|
368
431
|
chunk, message_id, message_started, event_buffer
|
|
369
432
|
)
|
|
370
433
|
|
|
@@ -20,8 +20,8 @@ class Slack(BaseInterface):
|
|
|
20
20
|
self.agent = agent
|
|
21
21
|
self.team = team
|
|
22
22
|
|
|
23
|
-
if not self.agent
|
|
24
|
-
raise ValueError("Slack requires an agent
|
|
23
|
+
if not (self.agent or self.team):
|
|
24
|
+
raise ValueError("Slack requires an agent or a team")
|
|
25
25
|
|
|
26
26
|
def get_router(self, **kwargs) -> APIRouter:
|
|
27
27
|
# Cannot be overridden
|
|
@@ -17,8 +17,8 @@ class Whatsapp(BaseInterface):
|
|
|
17
17
|
self.agent = agent
|
|
18
18
|
self.team = team
|
|
19
19
|
|
|
20
|
-
if not self.agent
|
|
21
|
-
raise ValueError("Whatsapp requires an agent
|
|
20
|
+
if not (self.agent or self.team):
|
|
21
|
+
raise ValueError("Whatsapp requires an agent or a team")
|
|
22
22
|
|
|
23
23
|
def get_router(self, **kwargs) -> APIRouter:
|
|
24
24
|
# Cannot be overridden
|
agno/os/router.py
CHANGED
|
@@ -731,10 +731,9 @@ def get_base_router(
|
|
|
731
731
|
]:
|
|
732
732
|
# Process document files
|
|
733
733
|
try:
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
)
|
|
734
|
+
input_file = process_document(file)
|
|
735
|
+
if input_file is not None:
|
|
736
|
+
input_files.append(input_file)
|
|
738
737
|
except Exception as e:
|
|
739
738
|
log_error(f"Error processing file {file.filename}: {e}")
|
|
740
739
|
continue
|
agno/os/routers/evals/evals.py
CHANGED
|
@@ -380,7 +380,7 @@ def parse_eval_types_filter(
|
|
|
380
380
|
eval_types: Optional[str] = Query(
|
|
381
381
|
default=None,
|
|
382
382
|
description="Comma-separated eval types (accuracy,performance,reliability)",
|
|
383
|
-
|
|
383
|
+
examples=["accuracy,performance"],
|
|
384
384
|
),
|
|
385
385
|
) -> Optional[List[EvalType]]:
|
|
386
386
|
"""Parse comma-separated eval types into EvalType enums for filtering evaluation runs."""
|
agno/os/routers/memory/memory.py
CHANGED
|
@@ -396,7 +396,7 @@ def parse_topics(
|
|
|
396
396
|
topics: Optional[List[str]] = Query(
|
|
397
397
|
default=None,
|
|
398
398
|
description="Comma-separated list of topics to filter by",
|
|
399
|
-
|
|
399
|
+
examples=["preferences,technical,communication_style"],
|
|
400
400
|
),
|
|
401
401
|
) -> Optional[List[str]]:
|
|
402
402
|
"""Parse comma-separated topics into a list for filtering memories by topic."""
|
agno/os/schema.py
CHANGED
|
@@ -461,11 +461,8 @@ class TeamResponse(BaseModel):
|
|
|
461
461
|
"stream_member_events": False,
|
|
462
462
|
}
|
|
463
463
|
|
|
464
|
-
if team.model is None:
|
|
465
|
-
raise ValueError("Team model is required")
|
|
466
|
-
|
|
467
464
|
team.determine_tools_for_model(
|
|
468
|
-
model=team.model,
|
|
465
|
+
model=team.model, # type: ignore
|
|
469
466
|
session=TeamSession(session_id=str(uuid4()), session_data={}),
|
|
470
467
|
run_response=TeamRunOutput(run_id=str(uuid4())),
|
|
471
468
|
async_mode=True,
|
|
@@ -763,6 +760,7 @@ class TeamSessionDetailSchema(BaseModel):
|
|
|
763
760
|
session_state: Optional[dict]
|
|
764
761
|
metrics: Optional[dict]
|
|
765
762
|
team_data: Optional[dict]
|
|
763
|
+
chat_history: Optional[List[dict]]
|
|
766
764
|
created_at: Optional[datetime]
|
|
767
765
|
updated_at: Optional[datetime]
|
|
768
766
|
total_tokens: Optional[int]
|
|
@@ -784,6 +782,7 @@ class TeamSessionDetailSchema(BaseModel):
|
|
|
784
782
|
if session.session_data
|
|
785
783
|
else None,
|
|
786
784
|
metrics=session.session_data.get("session_metrics", {}) if session.session_data else None,
|
|
785
|
+
chat_history=[message.to_dict() for message in session.get_chat_history()],
|
|
787
786
|
created_at=datetime.fromtimestamp(session.created_at, tz=timezone.utc) if session.created_at else None,
|
|
788
787
|
updated_at=datetime.fromtimestamp(session.updated_at, tz=timezone.utc) if session.updated_at else None,
|
|
789
788
|
)
|
agno/os/utils.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from typing import Any, Callable, Dict, List, Optional, Union
|
|
2
2
|
|
|
3
|
-
from fastapi import HTTPException, UploadFile
|
|
3
|
+
from fastapi import FastAPI, HTTPException, UploadFile
|
|
4
|
+
from starlette.middleware.cors import CORSMiddleware
|
|
4
5
|
|
|
5
6
|
from agno.agent.agent import Agent
|
|
6
7
|
from agno.db.base import BaseDb
|
|
@@ -60,6 +61,14 @@ def get_run_input(run_dict: Dict[str, Any], is_workflow_run: bool = False) -> st
|
|
|
60
61
|
if message.get("role") == "user":
|
|
61
62
|
return message.get("content", "")
|
|
62
63
|
|
|
64
|
+
# Check the input field directly as final fallback
|
|
65
|
+
if run_dict.get("input") is not None:
|
|
66
|
+
input_value = run_dict.get("input")
|
|
67
|
+
if isinstance(input_value, str):
|
|
68
|
+
return input_value
|
|
69
|
+
else:
|
|
70
|
+
return str(input_value)
|
|
71
|
+
|
|
63
72
|
if run_dict.get("messages") is not None:
|
|
64
73
|
for message in run_dict["messages"]:
|
|
65
74
|
if message.get("role") == "user":
|
|
@@ -109,27 +118,21 @@ def process_image(file: UploadFile) -> Image:
|
|
|
109
118
|
content = file.file.read()
|
|
110
119
|
if not content:
|
|
111
120
|
raise HTTPException(status_code=400, detail="Empty file")
|
|
112
|
-
return Image(content=content)
|
|
121
|
+
return Image(content=content, format=extract_format(file), mime_type=file.content_type)
|
|
113
122
|
|
|
114
123
|
|
|
115
124
|
def process_audio(file: UploadFile) -> Audio:
|
|
116
125
|
content = file.file.read()
|
|
117
126
|
if not content:
|
|
118
127
|
raise HTTPException(status_code=400, detail="Empty file")
|
|
119
|
-
format =
|
|
120
|
-
if file.filename and "." in file.filename:
|
|
121
|
-
format = file.filename.split(".")[-1].lower()
|
|
122
|
-
elif file.content_type:
|
|
123
|
-
format = file.content_type.split("/")[-1]
|
|
124
|
-
|
|
125
|
-
return Audio(content=content, format=format)
|
|
128
|
+
return Audio(content=content, format=extract_format(file), mime_type=file.content_type)
|
|
126
129
|
|
|
127
130
|
|
|
128
131
|
def process_video(file: UploadFile) -> Video:
|
|
129
132
|
content = file.file.read()
|
|
130
133
|
if not content:
|
|
131
134
|
raise HTTPException(status_code=400, detail="Empty file")
|
|
132
|
-
return Video(content=content, format=file.content_type)
|
|
135
|
+
return Video(content=content, format=extract_format(file), mime_type=file.content_type)
|
|
133
136
|
|
|
134
137
|
|
|
135
138
|
def process_document(file: UploadFile) -> Optional[FileMedia]:
|
|
@@ -137,13 +140,23 @@ def process_document(file: UploadFile) -> Optional[FileMedia]:
|
|
|
137
140
|
content = file.file.read()
|
|
138
141
|
if not content:
|
|
139
142
|
raise HTTPException(status_code=400, detail="Empty file")
|
|
140
|
-
|
|
141
|
-
|
|
143
|
+
return FileMedia(
|
|
144
|
+
content=content, filename=file.filename, format=extract_format(file), mime_type=file.content_type
|
|
145
|
+
)
|
|
142
146
|
except Exception as e:
|
|
143
147
|
logger.error(f"Error processing document {file.filename}: {e}")
|
|
144
148
|
return None
|
|
145
149
|
|
|
146
150
|
|
|
151
|
+
def extract_format(file: UploadFile):
|
|
152
|
+
format = None
|
|
153
|
+
if file.filename and "." in file.filename:
|
|
154
|
+
format = file.filename.split(".")[-1].lower()
|
|
155
|
+
elif file.content_type:
|
|
156
|
+
format = file.content_type.split("/")[-1]
|
|
157
|
+
return format
|
|
158
|
+
|
|
159
|
+
|
|
147
160
|
def format_tools(agent_tools: List[Union[Dict[str, Any], Toolkit, Function, Callable]]):
|
|
148
161
|
formatted_tools = []
|
|
149
162
|
if agent_tools is not None:
|
|
@@ -260,3 +273,33 @@ def _generate_schema_from_params(params: Dict[str, Any]) -> Dict[str, Any]:
|
|
|
260
273
|
schema["required"] = required
|
|
261
274
|
|
|
262
275
|
return schema
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
def update_cors_middleware(app: FastAPI, new_origins: list):
|
|
279
|
+
existing_origins: List[str] = []
|
|
280
|
+
|
|
281
|
+
# TODO: Allow more options where CORS is properly merged and user can disable this behaviour
|
|
282
|
+
|
|
283
|
+
# Extract existing origins from current CORS middleware
|
|
284
|
+
for middleware in app.user_middleware:
|
|
285
|
+
if middleware.cls == CORSMiddleware:
|
|
286
|
+
if hasattr(middleware, "kwargs"):
|
|
287
|
+
existing_origins = middleware.kwargs.get("allow_origins", [])
|
|
288
|
+
break
|
|
289
|
+
# Merge origins
|
|
290
|
+
merged_origins = list(set(new_origins + existing_origins))
|
|
291
|
+
final_origins = [origin for origin in merged_origins if origin != "*"]
|
|
292
|
+
|
|
293
|
+
# Remove existing CORS
|
|
294
|
+
app.user_middleware = [m for m in app.user_middleware if m.cls != CORSMiddleware]
|
|
295
|
+
app.middleware_stack = None
|
|
296
|
+
|
|
297
|
+
# Add updated CORS
|
|
298
|
+
app.add_middleware(
|
|
299
|
+
CORSMiddleware,
|
|
300
|
+
allow_origins=final_origins,
|
|
301
|
+
allow_credentials=True,
|
|
302
|
+
allow_methods=["*"],
|
|
303
|
+
allow_headers=["*"],
|
|
304
|
+
expose_headers=["*"],
|
|
305
|
+
)
|
agno/reasoning/default.py
CHANGED
|
@@ -14,6 +14,7 @@ def get_default_reasoning_agent(
|
|
|
14
14
|
min_steps: int,
|
|
15
15
|
max_steps: int,
|
|
16
16
|
tools: Optional[List[Union[Toolkit, Callable, Function, Dict]]] = None,
|
|
17
|
+
tool_call_limit: Optional[int] = None,
|
|
17
18
|
use_json_mode: bool = False,
|
|
18
19
|
telemetry: bool = True,
|
|
19
20
|
debug_mode: bool = False,
|
|
@@ -56,7 +57,7 @@ def get_default_reasoning_agent(
|
|
|
56
57
|
- **validate**: When you reach a potential answer, signaling it's ready for validation.
|
|
57
58
|
- **final_answer**: Only if you have confidently validated the solution.
|
|
58
59
|
- **reset**: Immediately restart analysis if a critical error or incorrect result is identified.
|
|
59
|
-
6. **Confidence Score**: Provide a numeric confidence score (0.0–1.0) indicating your certainty in the step
|
|
60
|
+
6. **Confidence Score**: Provide a numeric confidence score (0.0–1.0) indicating your certainty in the step's correctness and its outcome.
|
|
60
61
|
|
|
61
62
|
Step 5 - Validation (mandatory before finalizing an answer):
|
|
62
63
|
- Explicitly validate your solution by:
|
|
@@ -82,6 +83,7 @@ def get_default_reasoning_agent(
|
|
|
82
83
|
- Only create a single instance of ReasoningSteps for your response.\
|
|
83
84
|
"""),
|
|
84
85
|
tools=tools,
|
|
86
|
+
tool_call_limit=tool_call_limit,
|
|
85
87
|
output_schema=ReasoningSteps,
|
|
86
88
|
use_json_mode=use_json_mode,
|
|
87
89
|
telemetry=telemetry,
|