agno 2.0.0rc1__py3-none-any.whl → 2.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +101 -140
- agno/db/mongo/mongo.py +8 -3
- agno/eval/accuracy.py +12 -5
- agno/knowledge/chunking/strategy.py +14 -14
- agno/knowledge/knowledge.py +156 -120
- agno/knowledge/reader/arxiv_reader.py +5 -5
- agno/knowledge/reader/csv_reader.py +6 -77
- agno/knowledge/reader/docx_reader.py +5 -5
- agno/knowledge/reader/firecrawl_reader.py +5 -5
- agno/knowledge/reader/json_reader.py +5 -5
- agno/knowledge/reader/markdown_reader.py +31 -9
- agno/knowledge/reader/pdf_reader.py +10 -123
- agno/knowledge/reader/reader_factory.py +65 -72
- agno/knowledge/reader/s3_reader.py +44 -114
- agno/knowledge/reader/text_reader.py +5 -5
- agno/knowledge/reader/url_reader.py +75 -31
- agno/knowledge/reader/web_search_reader.py +6 -29
- agno/knowledge/reader/website_reader.py +5 -5
- agno/knowledge/reader/wikipedia_reader.py +5 -5
- agno/knowledge/reader/youtube_reader.py +6 -6
- agno/knowledge/reranker/__init__.py +9 -0
- agno/knowledge/utils.py +10 -10
- agno/media.py +269 -268
- agno/models/aws/bedrock.py +3 -7
- agno/models/base.py +50 -54
- agno/models/google/gemini.py +11 -10
- agno/models/message.py +4 -4
- agno/models/ollama/chat.py +1 -1
- agno/models/openai/chat.py +33 -14
- agno/models/response.py +5 -5
- agno/os/app.py +40 -29
- agno/os/mcp.py +39 -59
- agno/os/router.py +547 -16
- agno/os/routers/evals/evals.py +197 -12
- agno/os/routers/knowledge/knowledge.py +428 -14
- agno/os/routers/memory/memory.py +250 -28
- agno/os/routers/metrics/metrics.py +125 -7
- agno/os/routers/session/session.py +393 -25
- agno/os/schema.py +55 -2
- agno/run/agent.py +37 -28
- agno/run/base.py +9 -19
- agno/run/team.py +110 -19
- agno/run/workflow.py +41 -28
- agno/team/team.py +808 -1080
- agno/tools/brightdata.py +3 -3
- agno/tools/cartesia.py +3 -5
- agno/tools/dalle.py +7 -4
- agno/tools/desi_vocal.py +2 -2
- agno/tools/e2b.py +6 -6
- agno/tools/eleven_labs.py +3 -3
- agno/tools/fal.py +4 -4
- agno/tools/function.py +7 -7
- agno/tools/giphy.py +2 -2
- agno/tools/lumalab.py +3 -3
- agno/tools/mcp.py +1 -2
- agno/tools/models/azure_openai.py +2 -2
- agno/tools/models/gemini.py +3 -3
- agno/tools/models/groq.py +3 -5
- agno/tools/models/nebius.py +2 -2
- agno/tools/models_labs.py +5 -5
- agno/tools/openai.py +4 -9
- agno/tools/opencv.py +3 -3
- agno/tools/replicate.py +7 -7
- agno/utils/events.py +5 -5
- agno/utils/gemini.py +1 -1
- agno/utils/log.py +52 -2
- agno/utils/mcp.py +57 -5
- agno/utils/models/aws_claude.py +1 -1
- agno/utils/models/claude.py +0 -8
- agno/utils/models/cohere.py +1 -1
- agno/utils/models/watsonx.py +1 -1
- agno/utils/openai.py +1 -1
- agno/utils/print_response/team.py +177 -73
- agno/utils/streamlit.py +27 -0
- agno/vectordb/lancedb/lance_db.py +82 -25
- agno/workflow/step.py +7 -7
- agno/workflow/types.py +13 -13
- agno/workflow/workflow.py +37 -28
- {agno-2.0.0rc1.dist-info → agno-2.0.1.dist-info}/METADATA +140 -1
- {agno-2.0.0rc1.dist-info → agno-2.0.1.dist-info}/RECORD +83 -84
- agno-2.0.1.dist-info/licenses/LICENSE +201 -0
- agno/knowledge/reader/gcs_reader.py +0 -67
- agno-2.0.0rc1.dist-info/licenses/LICENSE +0 -375
- {agno-2.0.0rc1.dist-info → agno-2.0.1.dist-info}/WHEEL +0 -0
- {agno-2.0.0rc1.dist-info → agno-2.0.1.dist-info}/top_level.txt +0 -0
agno/models/base.py
CHANGED
|
@@ -21,11 +21,11 @@ from uuid import uuid4
|
|
|
21
21
|
from pydantic import BaseModel
|
|
22
22
|
|
|
23
23
|
from agno.exceptions import AgentRunException
|
|
24
|
-
from agno.media import Audio,
|
|
24
|
+
from agno.media import Audio, Image, Video
|
|
25
25
|
from agno.models.message import Citations, Message
|
|
26
26
|
from agno.models.metrics import Metrics
|
|
27
27
|
from agno.models.response import ModelResponse, ModelResponseEvent, ToolExecution
|
|
28
|
-
from agno.run.agent import RunContentEvent, RunOutput, RunOutputEvent
|
|
28
|
+
from agno.run.agent import CustomEvent, RunContentEvent, RunOutput, RunOutputEvent
|
|
29
29
|
from agno.run.team import RunContentEvent as TeamRunContentEvent
|
|
30
30
|
from agno.run.team import TeamRunOutputEvent
|
|
31
31
|
from agno.tools.function import Function, FunctionCall, FunctionExecutionResult, UserInputField
|
|
@@ -43,9 +43,9 @@ class MessageData:
|
|
|
43
43
|
response_citations: Optional[Citations] = None
|
|
44
44
|
response_tool_calls: List[Dict[str, Any]] = field(default_factory=list)
|
|
45
45
|
|
|
46
|
-
response_audio: Optional[
|
|
47
|
-
response_image: Optional[
|
|
48
|
-
response_video: Optional[
|
|
46
|
+
response_audio: Optional[Audio] = None
|
|
47
|
+
response_image: Optional[Image] = None
|
|
48
|
+
response_video: Optional[Video] = None
|
|
49
49
|
|
|
50
50
|
# Data from the provider that we might need on subsequent messages
|
|
51
51
|
response_provider_data: Optional[Dict[str, Any]] = None
|
|
@@ -502,9 +502,7 @@ class Model(ABC):
|
|
|
502
502
|
if assistant_message.citations is not None:
|
|
503
503
|
model_response.citations = assistant_message.citations
|
|
504
504
|
if assistant_message.audio_output is not None:
|
|
505
|
-
if isinstance(assistant_message.audio_output,
|
|
506
|
-
model_response.audios = [assistant_message.audio_output]
|
|
507
|
-
elif isinstance(assistant_message.audio_output, AudioResponse):
|
|
505
|
+
if isinstance(assistant_message.audio_output, Audio):
|
|
508
506
|
model_response.audio = assistant_message.audio_output
|
|
509
507
|
if assistant_message.image_output is not None:
|
|
510
508
|
model_response.images = [assistant_message.image_output]
|
|
@@ -557,9 +555,7 @@ class Model(ABC):
|
|
|
557
555
|
if assistant_message.citations is not None:
|
|
558
556
|
model_response.citations = assistant_message.citations
|
|
559
557
|
if assistant_message.audio_output is not None:
|
|
560
|
-
if isinstance(assistant_message.audio_output,
|
|
561
|
-
model_response.audios = [assistant_message.audio_output]
|
|
562
|
-
elif isinstance(assistant_message.audio_output, AudioResponse):
|
|
558
|
+
if isinstance(assistant_message.audio_output, Audio):
|
|
563
559
|
model_response.audio = assistant_message.audio_output
|
|
564
560
|
if assistant_message.image_output is not None:
|
|
565
561
|
model_response.images = [assistant_message.image_output]
|
|
@@ -993,13 +989,13 @@ class Model(ABC):
|
|
|
993
989
|
stream_data.response_tool_calls.extend(model_response_delta.tool_calls)
|
|
994
990
|
should_yield = True
|
|
995
991
|
|
|
996
|
-
if model_response_delta.audio is not None and isinstance(model_response_delta.audio,
|
|
992
|
+
if model_response_delta.audio is not None and isinstance(model_response_delta.audio, Audio):
|
|
997
993
|
if stream_data.response_audio is None:
|
|
998
|
-
stream_data.response_audio =
|
|
994
|
+
stream_data.response_audio = Audio(id=str(uuid4()), content="", transcript="")
|
|
999
995
|
|
|
1000
996
|
from typing import cast
|
|
1001
997
|
|
|
1002
|
-
audio_response = cast(
|
|
998
|
+
audio_response = cast(Audio, model_response_delta.audio)
|
|
1003
999
|
|
|
1004
1000
|
# Update the stream data with audio information
|
|
1005
1001
|
if audio_response.id is not None:
|
|
@@ -1104,41 +1100,10 @@ class Model(ABC):
|
|
|
1104
1100
|
audios = None
|
|
1105
1101
|
|
|
1106
1102
|
if success and function_execution_result:
|
|
1107
|
-
#
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
images = []
|
|
1112
|
-
for img_artifact in function_execution_result.images:
|
|
1113
|
-
if img_artifact.url:
|
|
1114
|
-
images.append(Image(url=img_artifact.url))
|
|
1115
|
-
elif img_artifact.content:
|
|
1116
|
-
images.append(Image(content=img_artifact.content))
|
|
1117
|
-
|
|
1118
|
-
# Convert VideoArtifacts to Videos for message compatibility
|
|
1119
|
-
if function_execution_result.videos:
|
|
1120
|
-
from agno.media import Video
|
|
1121
|
-
|
|
1122
|
-
videos = []
|
|
1123
|
-
for vid_artifact in function_execution_result.videos:
|
|
1124
|
-
if vid_artifact.url:
|
|
1125
|
-
videos.append(Video(url=vid_artifact.url))
|
|
1126
|
-
elif vid_artifact.content:
|
|
1127
|
-
videos.append(Video(content=vid_artifact.content))
|
|
1128
|
-
|
|
1129
|
-
# Convert AudioArtifacts to Audio for message compatibility
|
|
1130
|
-
if function_execution_result.audios:
|
|
1131
|
-
from agno.media import Audio
|
|
1132
|
-
|
|
1133
|
-
audios = []
|
|
1134
|
-
for aud_artifact in function_execution_result.audios:
|
|
1135
|
-
if aud_artifact.url:
|
|
1136
|
-
audios.append(Audio(url=aud_artifact.url))
|
|
1137
|
-
elif aud_artifact.base64_audio:
|
|
1138
|
-
import base64
|
|
1139
|
-
|
|
1140
|
-
audio_bytes = base64.b64decode(aud_artifact.base64_audio)
|
|
1141
|
-
audios.append(Audio(content=audio_bytes))
|
|
1103
|
+
# With unified classes, no conversion needed - use directly
|
|
1104
|
+
images = function_execution_result.images
|
|
1105
|
+
videos = function_execution_result.videos
|
|
1106
|
+
audios = function_execution_result.audios
|
|
1142
1107
|
|
|
1143
1108
|
return Message(
|
|
1144
1109
|
role=self.tool_message_role,
|
|
@@ -1223,6 +1188,9 @@ class Model(ABC):
|
|
|
1223
1188
|
if function_call.function.show_result:
|
|
1224
1189
|
yield ModelResponse(content=item.content)
|
|
1225
1190
|
|
|
1191
|
+
if isinstance(item, CustomEvent):
|
|
1192
|
+
function_call_output += str(item)
|
|
1193
|
+
|
|
1226
1194
|
# Yield the event itself to bubble it up
|
|
1227
1195
|
yield item
|
|
1228
1196
|
|
|
@@ -1389,7 +1357,7 @@ class Model(ABC):
|
|
|
1389
1357
|
async def arun_function_call(
|
|
1390
1358
|
self,
|
|
1391
1359
|
function_call: FunctionCall,
|
|
1392
|
-
) -> Tuple[Union[bool, AgentRunException], Timer, FunctionCall,
|
|
1360
|
+
) -> Tuple[Union[bool, AgentRunException], Timer, FunctionCall, FunctionExecutionResult]:
|
|
1393
1361
|
"""Run a single function call and return its success status, timer, and the FunctionCall object."""
|
|
1394
1362
|
from inspect import isasyncgenfunction, iscoroutine, iscoroutinefunction
|
|
1395
1363
|
|
|
@@ -1423,7 +1391,7 @@ class Model(ABC):
|
|
|
1423
1391
|
raise e
|
|
1424
1392
|
|
|
1425
1393
|
function_call_timer.stop()
|
|
1426
|
-
return success, function_call_timer, function_call, result
|
|
1394
|
+
return success, function_call_timer, function_call, result
|
|
1427
1395
|
|
|
1428
1396
|
async def arun_function_calls(
|
|
1429
1397
|
self,
|
|
@@ -1569,7 +1537,9 @@ class Model(ABC):
|
|
|
1569
1537
|
raise result
|
|
1570
1538
|
|
|
1571
1539
|
# Unpack result
|
|
1572
|
-
function_call_success, function_call_timer, function_call,
|
|
1540
|
+
function_call_success, function_call_timer, function_call, function_execution_result = result
|
|
1541
|
+
|
|
1542
|
+
updated_session_state = function_execution_result.updated_session_state
|
|
1573
1543
|
|
|
1574
1544
|
# Handle AgentRunException
|
|
1575
1545
|
if isinstance(function_call_success, AgentRunException):
|
|
@@ -1623,20 +1593,43 @@ class Model(ABC):
|
|
|
1623
1593
|
yield ModelResponse(content=item.content)
|
|
1624
1594
|
continue
|
|
1625
1595
|
|
|
1596
|
+
if isinstance(item, CustomEvent):
|
|
1597
|
+
function_call_output += str(item)
|
|
1598
|
+
|
|
1626
1599
|
# Yield the event itself to bubble it up
|
|
1627
1600
|
yield item
|
|
1601
|
+
|
|
1602
|
+
# Yield custom events emitted by the tool
|
|
1628
1603
|
else:
|
|
1629
1604
|
function_call_output += str(item)
|
|
1630
1605
|
if function_call.function.show_result:
|
|
1631
1606
|
yield ModelResponse(content=str(item))
|
|
1632
1607
|
else:
|
|
1633
|
-
|
|
1608
|
+
from agno.tools.function import ToolResult
|
|
1609
|
+
|
|
1610
|
+
if isinstance(function_execution_result.result, ToolResult):
|
|
1611
|
+
tool_result = function_execution_result.result
|
|
1612
|
+
function_call_output = tool_result.content
|
|
1613
|
+
|
|
1614
|
+
if tool_result.images:
|
|
1615
|
+
function_execution_result.images = tool_result.images
|
|
1616
|
+
if tool_result.videos:
|
|
1617
|
+
function_execution_result.videos = tool_result.videos
|
|
1618
|
+
if tool_result.audios:
|
|
1619
|
+
function_execution_result.audios = tool_result.audios
|
|
1620
|
+
else:
|
|
1621
|
+
function_call_output = str(function_call.result)
|
|
1622
|
+
|
|
1634
1623
|
if function_call.function.show_result:
|
|
1635
1624
|
yield ModelResponse(content=function_call_output)
|
|
1636
1625
|
|
|
1637
1626
|
# Create and yield function call result
|
|
1638
1627
|
function_call_result = self.create_function_call_result(
|
|
1639
|
-
function_call,
|
|
1628
|
+
function_call,
|
|
1629
|
+
success=function_call_success,
|
|
1630
|
+
output=function_call_output,
|
|
1631
|
+
timer=function_call_timer,
|
|
1632
|
+
function_execution_result=function_execution_result,
|
|
1640
1633
|
)
|
|
1641
1634
|
yield ModelResponse(
|
|
1642
1635
|
content=f"{function_call.get_call_str()} completed in {function_call_timer.elapsed:.4f}s.",
|
|
@@ -1653,6 +1646,9 @@ class Model(ABC):
|
|
|
1653
1646
|
],
|
|
1654
1647
|
event=ModelResponseEvent.tool_call_completed.value,
|
|
1655
1648
|
updated_session_state=updated_session_state,
|
|
1649
|
+
images=function_execution_result.images,
|
|
1650
|
+
videos=function_execution_result.videos,
|
|
1651
|
+
audios=function_execution_result.audios,
|
|
1656
1652
|
)
|
|
1657
1653
|
|
|
1658
1654
|
# Add function call result to function call results
|
agno/models/google/gemini.py
CHANGED
|
@@ -10,7 +10,7 @@ from uuid import uuid4
|
|
|
10
10
|
from pydantic import BaseModel
|
|
11
11
|
|
|
12
12
|
from agno.exceptions import ModelProviderError
|
|
13
|
-
from agno.media import Audio, File,
|
|
13
|
+
from agno.media import Audio, File, Image, Video
|
|
14
14
|
from agno.models.base import Model
|
|
15
15
|
from agno.models.message import Citations, Message, UrlCitation
|
|
16
16
|
from agno.models.metrics import Metrics
|
|
@@ -559,9 +559,14 @@ class Gemini(Model):
|
|
|
559
559
|
return Part.from_bytes(mime_type=mime_type, data=audio.content)
|
|
560
560
|
|
|
561
561
|
# Case 2: Audio is an url
|
|
562
|
-
elif audio.url is not None
|
|
563
|
-
|
|
564
|
-
|
|
562
|
+
elif audio.url is not None:
|
|
563
|
+
audio_bytes = audio.get_content_bytes() # type: ignore
|
|
564
|
+
if audio_bytes is not None:
|
|
565
|
+
mime_type = f"audio/{audio.format}" if audio.format else "audio/mp3"
|
|
566
|
+
return Part.from_bytes(mime_type=mime_type, data=audio_bytes)
|
|
567
|
+
else:
|
|
568
|
+
log_warning(f"Failed to download audio from {audio}")
|
|
569
|
+
return None
|
|
565
570
|
|
|
566
571
|
# Case 3: Audio is a local file path
|
|
567
572
|
elif audio.filepath is not None:
|
|
@@ -815,9 +820,7 @@ class Gemini(Model):
|
|
|
815
820
|
if model_response.images is None:
|
|
816
821
|
model_response.images = []
|
|
817
822
|
model_response.images.append(
|
|
818
|
-
|
|
819
|
-
id=str(uuid4()), content=part.inline_data.data, mime_type=part.inline_data.mime_type
|
|
820
|
-
)
|
|
823
|
+
Image(id=str(uuid4()), content=part.inline_data.data, mime_type=part.inline_data.mime_type)
|
|
821
824
|
)
|
|
822
825
|
|
|
823
826
|
# Extract function call if present
|
|
@@ -929,9 +932,7 @@ class Gemini(Model):
|
|
|
929
932
|
if model_response.images is None:
|
|
930
933
|
model_response.images = []
|
|
931
934
|
model_response.images.append(
|
|
932
|
-
|
|
933
|
-
id=str(uuid4()), content=part.inline_data.data, mime_type=part.inline_data.mime_type
|
|
934
|
-
)
|
|
935
|
+
Image(id=str(uuid4()), content=part.inline_data.data, mime_type=part.inline_data.mime_type)
|
|
935
936
|
)
|
|
936
937
|
|
|
937
938
|
# Extract function call if present
|
agno/models/message.py
CHANGED
|
@@ -4,7 +4,7 @@ from typing import Any, Dict, List, Optional, Sequence, Union
|
|
|
4
4
|
|
|
5
5
|
from pydantic import BaseModel, ConfigDict, Field
|
|
6
6
|
|
|
7
|
-
from agno.media import Audio,
|
|
7
|
+
from agno.media import Audio, File, Image, Video
|
|
8
8
|
from agno.models.metrics import Metrics
|
|
9
9
|
from agno.utils.log import log_debug, log_error, log_info, log_warning
|
|
10
10
|
|
|
@@ -71,9 +71,9 @@ class Message(BaseModel):
|
|
|
71
71
|
files: Optional[Sequence[File]] = None
|
|
72
72
|
|
|
73
73
|
# Output from the models
|
|
74
|
-
audio_output: Optional[
|
|
75
|
-
image_output: Optional[
|
|
76
|
-
video_output: Optional[
|
|
74
|
+
audio_output: Optional[Audio] = None
|
|
75
|
+
image_output: Optional[Image] = None
|
|
76
|
+
video_output: Optional[Video] = None
|
|
77
77
|
|
|
78
78
|
# The thinking content from the model
|
|
79
79
|
redacted_reasoning_content: Optional[str] = None
|
agno/models/ollama/chat.py
CHANGED
|
@@ -149,7 +149,7 @@ class Ollama(Model):
|
|
|
149
149
|
message_images = []
|
|
150
150
|
for image in message.images:
|
|
151
151
|
if image.url is not None:
|
|
152
|
-
message_images.append(image.
|
|
152
|
+
message_images.append(image.get_content_bytes())
|
|
153
153
|
if image.filepath is not None:
|
|
154
154
|
message_images.append(image.filepath) # type: ignore
|
|
155
155
|
if image.content is not None and isinstance(image.content, bytes):
|
agno/models/openai/chat.py
CHANGED
|
@@ -2,12 +2,13 @@ from collections.abc import AsyncIterator
|
|
|
2
2
|
from dataclasses import dataclass
|
|
3
3
|
from os import getenv
|
|
4
4
|
from typing import Any, Dict, Iterator, List, Literal, Optional, Type, Union
|
|
5
|
+
from uuid import uuid4
|
|
5
6
|
|
|
6
7
|
import httpx
|
|
7
8
|
from pydantic import BaseModel
|
|
8
9
|
|
|
9
10
|
from agno.exceptions import ModelProviderError
|
|
10
|
-
from agno.media import
|
|
11
|
+
from agno.media import Audio
|
|
11
12
|
from agno.models.base import Model
|
|
12
13
|
from agno.models.message import Message
|
|
13
14
|
from agno.models.metrics import Metrics
|
|
@@ -729,14 +730,14 @@ class OpenAIChat(Model):
|
|
|
729
730
|
# If the audio output modality is requested, we can extract an audio response
|
|
730
731
|
try:
|
|
731
732
|
if isinstance(response_message.audio, dict):
|
|
732
|
-
model_response.audio =
|
|
733
|
+
model_response.audio = Audio(
|
|
733
734
|
id=response_message.audio.get("id"),
|
|
734
735
|
content=response_message.audio.get("data"),
|
|
735
736
|
expires_at=response_message.audio.get("expires_at"),
|
|
736
737
|
transcript=response_message.audio.get("transcript"),
|
|
737
738
|
)
|
|
738
739
|
else:
|
|
739
|
-
model_response.audio =
|
|
740
|
+
model_response.audio = Audio(
|
|
740
741
|
id=response_message.audio.id,
|
|
741
742
|
content=response_message.audio.data,
|
|
742
743
|
expires_at=response_message.audio.expires_at,
|
|
@@ -783,21 +784,39 @@ class OpenAIChat(Model):
|
|
|
783
784
|
# Add audio if present
|
|
784
785
|
if hasattr(choice_delta, "audio") and choice_delta.audio is not None:
|
|
785
786
|
try:
|
|
787
|
+
audio_data = None
|
|
788
|
+
audio_id = None
|
|
789
|
+
audio_expires_at = None
|
|
790
|
+
audio_transcript = None
|
|
791
|
+
|
|
786
792
|
if isinstance(choice_delta.audio, dict):
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
793
|
+
audio_data = choice_delta.audio.get("data")
|
|
794
|
+
audio_id = choice_delta.audio.get("id")
|
|
795
|
+
audio_expires_at = choice_delta.audio.get("expires_at")
|
|
796
|
+
audio_transcript = choice_delta.audio.get("transcript")
|
|
797
|
+
else:
|
|
798
|
+
audio_data = choice_delta.audio.data
|
|
799
|
+
audio_id = choice_delta.audio.id
|
|
800
|
+
audio_expires_at = choice_delta.audio.expires_at
|
|
801
|
+
audio_transcript = choice_delta.audio.transcript
|
|
802
|
+
|
|
803
|
+
# Only create Audio object if there's actual content
|
|
804
|
+
if audio_data is not None:
|
|
805
|
+
model_response.audio = Audio(
|
|
806
|
+
id=audio_id,
|
|
807
|
+
content=audio_data,
|
|
808
|
+
expires_at=audio_expires_at,
|
|
809
|
+
transcript=audio_transcript,
|
|
792
810
|
sample_rate=24000,
|
|
793
811
|
mime_type="pcm16",
|
|
794
812
|
)
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
813
|
+
# If no content but there's transcript/metadata, create minimal Audio object
|
|
814
|
+
elif audio_transcript is not None or audio_id is not None:
|
|
815
|
+
model_response.audio = Audio(
|
|
816
|
+
id=audio_id or str(uuid4()),
|
|
817
|
+
content=b"",
|
|
818
|
+
expires_at=audio_expires_at,
|
|
819
|
+
transcript=audio_transcript,
|
|
801
820
|
sample_rate=24000,
|
|
802
821
|
mime_type="pcm16",
|
|
803
822
|
)
|
agno/models/response.py
CHANGED
|
@@ -3,7 +3,7 @@ from enum import Enum
|
|
|
3
3
|
from time import time
|
|
4
4
|
from typing import Any, Dict, List, Optional
|
|
5
5
|
|
|
6
|
-
from agno.media import
|
|
6
|
+
from agno.media import Audio, Image, Video
|
|
7
7
|
from agno.models.message import Citations
|
|
8
8
|
from agno.models.metrics import Metrics
|
|
9
9
|
from agno.tools.function import UserInputField
|
|
@@ -87,12 +87,12 @@ class ModelResponse:
|
|
|
87
87
|
|
|
88
88
|
content: Optional[Any] = None
|
|
89
89
|
parsed: Optional[Any] = None
|
|
90
|
-
audio: Optional[
|
|
90
|
+
audio: Optional[Audio] = None
|
|
91
91
|
|
|
92
92
|
# Unified media fields for LLM-generated and tool-generated media artifacts
|
|
93
|
-
images: Optional[List[
|
|
94
|
-
videos: Optional[List[
|
|
95
|
-
audios: Optional[List[
|
|
93
|
+
images: Optional[List[Image]] = None
|
|
94
|
+
videos: Optional[List[Video]] = None
|
|
95
|
+
audios: Optional[List[Audio]] = None
|
|
96
96
|
|
|
97
97
|
# Model tool calls
|
|
98
98
|
tool_calls: List[Dict[str, Any]] = field(default_factory=list)
|
agno/os/app.py
CHANGED
|
@@ -36,12 +36,11 @@ from agno.os.routers.session import get_session_router
|
|
|
36
36
|
from agno.os.settings import AgnoAPISettings
|
|
37
37
|
from agno.os.utils import generate_id
|
|
38
38
|
from agno.team.team import Team
|
|
39
|
-
from agno.tools.mcp import MCPTools, MultiMCPTools
|
|
40
39
|
from agno.workflow.workflow import Workflow
|
|
41
40
|
|
|
42
41
|
|
|
43
42
|
@asynccontextmanager
|
|
44
|
-
async def mcp_lifespan(app, mcp_tools
|
|
43
|
+
async def mcp_lifespan(app, mcp_tools):
|
|
45
44
|
"""Manage MCP connection lifecycle inside a FastAPI app"""
|
|
46
45
|
# Startup logic: connect to all contextual MCP servers
|
|
47
46
|
for tool in mcp_tools:
|
|
@@ -83,7 +82,12 @@ class AgentOS:
|
|
|
83
82
|
self.interfaces = interfaces or []
|
|
84
83
|
|
|
85
84
|
self.settings: AgnoAPISettings = settings or AgnoAPISettings()
|
|
86
|
-
|
|
85
|
+
|
|
86
|
+
self._app_set = False
|
|
87
|
+
self.fastapi_app: Optional[FastAPI] = None
|
|
88
|
+
if fastapi_app:
|
|
89
|
+
self.fastapi_app = fastapi_app
|
|
90
|
+
self._app_set = True
|
|
87
91
|
|
|
88
92
|
self.interfaces = interfaces or []
|
|
89
93
|
|
|
@@ -98,14 +102,16 @@ class AgentOS:
|
|
|
98
102
|
self.lifespan = lifespan
|
|
99
103
|
|
|
100
104
|
# List of all MCP tools used inside the AgentOS
|
|
101
|
-
self.mcp_tools
|
|
105
|
+
self.mcp_tools = []
|
|
102
106
|
|
|
103
107
|
if self.agents:
|
|
104
108
|
for agent in self.agents:
|
|
105
109
|
# Track all MCP tools to later handle their connection
|
|
106
110
|
if agent.tools:
|
|
107
111
|
for tool in agent.tools:
|
|
108
|
-
if
|
|
112
|
+
# Checking if the tool is a MCPTools or MultiMCPTools instance
|
|
113
|
+
type_name = type(tool).__name__
|
|
114
|
+
if type_name in ("MCPTools", "MultiMCPTools"):
|
|
109
115
|
self.mcp_tools.append(tool)
|
|
110
116
|
|
|
111
117
|
agent.initialize_agent()
|
|
@@ -118,7 +124,9 @@ class AgentOS:
|
|
|
118
124
|
# Track all MCP tools to later handle their connection
|
|
119
125
|
if team.tools:
|
|
120
126
|
for tool in team.tools:
|
|
121
|
-
if
|
|
127
|
+
# Checking if the tool is a MCPTools or MultiMCPTools instance
|
|
128
|
+
type_name = type(tool).__name__
|
|
129
|
+
if type_name in ("MCPTools", "MultiMCPTools"):
|
|
122
130
|
self.mcp_tools.append(tool)
|
|
123
131
|
|
|
124
132
|
team.initialize_team()
|
|
@@ -213,33 +221,35 @@ class AgentOS:
|
|
|
213
221
|
if self.enable_mcp and self.mcp_app:
|
|
214
222
|
self.fastapi_app.mount("/", self.mcp_app)
|
|
215
223
|
|
|
216
|
-
# Add middleware
|
|
217
|
-
|
|
218
|
-
async def http_exception_handler(request: Request, exc: HTTPException) -> JSONResponse:
|
|
219
|
-
return JSONResponse(
|
|
220
|
-
status_code=exc.status_code,
|
|
221
|
-
content={"detail": str(exc.detail)},
|
|
222
|
-
)
|
|
224
|
+
# Add middleware (only if app is not set)
|
|
225
|
+
if not self._app_set:
|
|
223
226
|
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
return await call_next(request)
|
|
227
|
-
except Exception as e:
|
|
227
|
+
@self.fastapi_app.exception_handler(HTTPException)
|
|
228
|
+
async def http_exception_handler(request: Request, exc: HTTPException) -> JSONResponse:
|
|
228
229
|
return JSONResponse(
|
|
229
|
-
status_code=
|
|
230
|
-
content={"detail": str(
|
|
230
|
+
status_code=exc.status_code,
|
|
231
|
+
content={"detail": str(exc.detail)},
|
|
231
232
|
)
|
|
232
233
|
|
|
233
|
-
|
|
234
|
+
async def general_exception_handler(request: Request, call_next):
|
|
235
|
+
try:
|
|
236
|
+
return await call_next(request)
|
|
237
|
+
except Exception as e:
|
|
238
|
+
return JSONResponse(
|
|
239
|
+
status_code=e.status_code if hasattr(e, "status_code") else 500, # type: ignore
|
|
240
|
+
content={"detail": str(e)},
|
|
241
|
+
)
|
|
242
|
+
|
|
243
|
+
self.fastapi_app.middleware("http")(general_exception_handler)
|
|
234
244
|
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
245
|
+
self.fastapi_app.add_middleware(
|
|
246
|
+
CORSMiddleware,
|
|
247
|
+
allow_origins=self.settings.cors_origin_list, # type: ignore
|
|
248
|
+
allow_credentials=True,
|
|
249
|
+
allow_methods=["*"],
|
|
250
|
+
allow_headers=["*"],
|
|
251
|
+
expose_headers=["*"],
|
|
252
|
+
)
|
|
243
253
|
|
|
244
254
|
return self.fastapi_app
|
|
245
255
|
|
|
@@ -458,6 +468,7 @@ class AgentOS:
|
|
|
458
468
|
host: str = "localhost",
|
|
459
469
|
port: int = 7777,
|
|
460
470
|
reload: bool = False,
|
|
471
|
+
workers: Optional[int] = None,
|
|
461
472
|
**kwargs,
|
|
462
473
|
):
|
|
463
474
|
import uvicorn
|
|
@@ -486,4 +497,4 @@ class AgentOS:
|
|
|
486
497
|
)
|
|
487
498
|
)
|
|
488
499
|
|
|
489
|
-
uvicorn.run(app=app, host=host, port=port, reload=reload, **kwargs)
|
|
500
|
+
uvicorn.run(app=app, host=host, port=port, reload=reload, workers=workers, **kwargs)
|