agno 2.0.5__py3-none-any.whl → 2.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +67 -17
- agno/db/dynamo/dynamo.py +7 -5
- agno/db/firestore/firestore.py +4 -2
- agno/db/gcs_json/gcs_json_db.py +4 -2
- agno/db/json/json_db.py +8 -4
- agno/db/mongo/mongo.py +6 -4
- agno/db/mysql/mysql.py +2 -1
- agno/db/postgres/postgres.py +2 -1
- agno/db/redis/redis.py +1 -1
- agno/db/singlestore/singlestore.py +2 -2
- agno/db/sqlite/sqlite.py +1 -1
- agno/knowledge/chunking/semantic.py +33 -6
- agno/knowledge/embedder/openai.py +19 -11
- agno/knowledge/knowledge.py +4 -3
- agno/knowledge/reader/website_reader.py +33 -16
- agno/media.py +72 -0
- agno/models/aimlapi/aimlapi.py +2 -2
- agno/models/base.py +68 -12
- agno/models/cerebras/cerebras_openai.py +2 -2
- agno/models/deepinfra/deepinfra.py +2 -2
- agno/models/deepseek/deepseek.py +2 -2
- agno/models/fireworks/fireworks.py +2 -2
- agno/models/internlm/internlm.py +2 -2
- agno/models/langdb/langdb.py +4 -4
- agno/models/litellm/litellm_openai.py +2 -2
- agno/models/llama_cpp/__init__.py +5 -0
- agno/models/llama_cpp/llama_cpp.py +22 -0
- agno/models/message.py +26 -0
- agno/models/meta/llama_openai.py +2 -2
- agno/models/nebius/nebius.py +2 -2
- agno/models/nexus/__init__.py +3 -0
- agno/models/nexus/nexus.py +22 -0
- agno/models/nvidia/nvidia.py +2 -2
- agno/models/openrouter/openrouter.py +2 -2
- agno/models/perplexity/perplexity.py +2 -2
- agno/models/portkey/portkey.py +3 -3
- agno/models/response.py +2 -1
- agno/models/sambanova/sambanova.py +2 -2
- agno/models/together/together.py +2 -2
- agno/models/vercel/v0.py +2 -2
- agno/models/xai/xai.py +2 -2
- agno/os/app.py +4 -10
- agno/os/router.py +3 -2
- agno/os/routers/evals/evals.py +1 -1
- agno/os/routers/memory/memory.py +1 -1
- agno/os/schema.py +3 -4
- agno/os/utils.py +47 -12
- agno/run/agent.py +20 -0
- agno/run/team.py +18 -1
- agno/run/workflow.py +10 -0
- agno/team/team.py +58 -18
- agno/tools/decorator.py +4 -2
- agno/tools/e2b.py +14 -7
- agno/tools/file_generation.py +350 -0
- agno/tools/function.py +2 -0
- agno/tools/mcp.py +1 -1
- agno/tools/memori.py +1 -53
- agno/utils/events.py +7 -1
- agno/utils/gemini.py +24 -4
- agno/vectordb/chroma/chromadb.py +66 -25
- agno/vectordb/lancedb/lance_db.py +15 -4
- agno/vectordb/milvus/milvus.py +6 -0
- agno/workflow/workflow.py +32 -0
- {agno-2.0.5.dist-info → agno-2.0.7.dist-info}/METADATA +4 -1
- {agno-2.0.5.dist-info → agno-2.0.7.dist-info}/RECORD +68 -63
- {agno-2.0.5.dist-info → agno-2.0.7.dist-info}/WHEEL +0 -0
- {agno-2.0.5.dist-info → agno-2.0.7.dist-info}/licenses/LICENSE +0 -0
- {agno-2.0.5.dist-info → agno-2.0.7.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
|
|
3
|
+
from agno.models.openai.like import OpenAILike
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
@dataclass
|
|
7
|
+
class Nexus(OpenAILike):
|
|
8
|
+
"""
|
|
9
|
+
A class for interacting with LLMs using Nexus.
|
|
10
|
+
|
|
11
|
+
Attributes:
|
|
12
|
+
id (str): The id of the Nexus model to use. Default is "openai/gpt-4".
|
|
13
|
+
name (str): The name of this chat model instance. Default is "Nexus"
|
|
14
|
+
provider (str): The provider of the model. Default is "Nexus".
|
|
15
|
+
base_url (str): The base url to which the requests are sent.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
id: str = "openai/gpt-4"
|
|
19
|
+
name: str = "Nexus"
|
|
20
|
+
provider: str = "Nexus"
|
|
21
|
+
|
|
22
|
+
base_url: str = "http://localhost:8000/llm/v1/"
|
agno/models/nvidia/nvidia.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from dataclasses import dataclass
|
|
1
|
+
from dataclasses import dataclass, field
|
|
2
2
|
from os import getenv
|
|
3
3
|
from typing import Optional
|
|
4
4
|
|
|
@@ -22,7 +22,7 @@ class Nvidia(OpenAILike):
|
|
|
22
22
|
name: str = "Nvidia"
|
|
23
23
|
provider: str = "Nvidia"
|
|
24
24
|
|
|
25
|
-
api_key: Optional[str] = getenv("NVIDIA_API_KEY")
|
|
25
|
+
api_key: Optional[str] = field(default_factory=lambda: getenv("NVIDIA_API_KEY"))
|
|
26
26
|
base_url: str = "https://integrate.api.nvidia.com/v1"
|
|
27
27
|
|
|
28
28
|
supports_native_structured_outputs: bool = False
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from dataclasses import dataclass
|
|
1
|
+
from dataclasses import dataclass, field
|
|
2
2
|
from os import getenv
|
|
3
3
|
from typing import Optional
|
|
4
4
|
|
|
@@ -23,6 +23,6 @@ class OpenRouter(OpenAILike):
|
|
|
23
23
|
name: str = "OpenRouter"
|
|
24
24
|
provider: str = "OpenRouter"
|
|
25
25
|
|
|
26
|
-
api_key: Optional[str] = getenv("OPENROUTER_API_KEY")
|
|
26
|
+
api_key: Optional[str] = field(default_factory=lambda: getenv("OPENROUTER_API_KEY"))
|
|
27
27
|
base_url: str = "https://openrouter.ai/api/v1"
|
|
28
28
|
max_tokens: int = 1024
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from dataclasses import dataclass
|
|
1
|
+
from dataclasses import dataclass, field
|
|
2
2
|
from os import getenv
|
|
3
3
|
from typing import Any, Dict, List, Optional, Type, Union
|
|
4
4
|
|
|
@@ -42,7 +42,7 @@ class Perplexity(OpenAILike):
|
|
|
42
42
|
name: str = "Perplexity"
|
|
43
43
|
provider: str = "Perplexity"
|
|
44
44
|
|
|
45
|
-
api_key: Optional[str] = getenv("PERPLEXITY_API_KEY")
|
|
45
|
+
api_key: Optional[str] = field(default_factory=lambda: getenv("PERPLEXITY_API_KEY"))
|
|
46
46
|
base_url: str = "https://api.perplexity.ai/"
|
|
47
47
|
max_tokens: int = 1024
|
|
48
48
|
top_k: Optional[float] = None
|
agno/models/portkey/portkey.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from dataclasses import dataclass
|
|
1
|
+
from dataclasses import dataclass, field
|
|
2
2
|
from os import getenv
|
|
3
3
|
from typing import Any, Dict, Optional, cast
|
|
4
4
|
|
|
@@ -30,8 +30,8 @@ class Portkey(OpenAILike):
|
|
|
30
30
|
name: str = "Portkey"
|
|
31
31
|
provider: str = "Portkey"
|
|
32
32
|
|
|
33
|
-
portkey_api_key: Optional[str] = getenv("PORTKEY_API_KEY")
|
|
34
|
-
virtual_key: Optional[str] = getenv("PORTKEY_VIRTUAL_KEY")
|
|
33
|
+
portkey_api_key: Optional[str] = field(default_factory=lambda: getenv("PORTKEY_API_KEY"))
|
|
34
|
+
virtual_key: Optional[str] = field(default_factory=lambda: getenv("PORTKEY_VIRTUAL_KEY"))
|
|
35
35
|
config: Optional[Dict[str, Any]] = None
|
|
36
36
|
base_url: str = PORTKEY_GATEWAY_URL
|
|
37
37
|
|
agno/models/response.py
CHANGED
|
@@ -3,7 +3,7 @@ from enum import Enum
|
|
|
3
3
|
from time import time
|
|
4
4
|
from typing import Any, Dict, List, Optional
|
|
5
5
|
|
|
6
|
-
from agno.media import Audio, Image, Video
|
|
6
|
+
from agno.media import Audio, File, Image, Video
|
|
7
7
|
from agno.models.message import Citations
|
|
8
8
|
from agno.models.metrics import Metrics
|
|
9
9
|
from agno.tools.function import UserInputField
|
|
@@ -98,6 +98,7 @@ class ModelResponse:
|
|
|
98
98
|
images: Optional[List[Image]] = None
|
|
99
99
|
videos: Optional[List[Video]] = None
|
|
100
100
|
audios: Optional[List[Audio]] = None
|
|
101
|
+
files: Optional[List[File]] = None
|
|
101
102
|
|
|
102
103
|
# Model tool calls
|
|
103
104
|
tool_calls: List[Dict[str, Any]] = field(default_factory=list)
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from dataclasses import dataclass
|
|
1
|
+
from dataclasses import dataclass, field
|
|
2
2
|
from os import getenv
|
|
3
3
|
from typing import Optional
|
|
4
4
|
|
|
@@ -22,7 +22,7 @@ class Sambanova(OpenAILike):
|
|
|
22
22
|
name: str = "Sambanova"
|
|
23
23
|
provider: str = "Sambanova"
|
|
24
24
|
|
|
25
|
-
api_key: Optional[str] = getenv("SAMBANOVA_API_KEY")
|
|
25
|
+
api_key: Optional[str] = field(default_factory=lambda: getenv("SAMBANOVA_API_KEY"))
|
|
26
26
|
base_url: str = "https://api.sambanova.ai/v1"
|
|
27
27
|
|
|
28
28
|
supports_native_structured_outputs: bool = False
|
agno/models/together/together.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from dataclasses import dataclass
|
|
1
|
+
from dataclasses import dataclass, field
|
|
2
2
|
from os import getenv
|
|
3
3
|
from typing import Optional
|
|
4
4
|
|
|
@@ -21,5 +21,5 @@ class Together(OpenAILike):
|
|
|
21
21
|
id: str = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
|
22
22
|
name: str = "Together"
|
|
23
23
|
provider: str = "Together"
|
|
24
|
-
api_key: Optional[str] = getenv("TOGETHER_API_KEY")
|
|
24
|
+
api_key: Optional[str] = field(default_factory=lambda: getenv("TOGETHER_API_KEY"))
|
|
25
25
|
base_url: str = "https://api.together.xyz/v1"
|
agno/models/vercel/v0.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from dataclasses import dataclass
|
|
1
|
+
from dataclasses import dataclass, field
|
|
2
2
|
from os import getenv
|
|
3
3
|
from typing import Optional
|
|
4
4
|
|
|
@@ -22,5 +22,5 @@ class V0(OpenAILike):
|
|
|
22
22
|
name: str = "v0"
|
|
23
23
|
provider: str = "Vercel"
|
|
24
24
|
|
|
25
|
-
api_key: Optional[str] = getenv("V0_API_KEY")
|
|
25
|
+
api_key: Optional[str] = field(default_factory=lambda: getenv("V0_API_KEY"))
|
|
26
26
|
base_url: str = "https://api.v0.dev/v1/"
|
agno/models/xai/xai.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from dataclasses import dataclass
|
|
1
|
+
from dataclasses import dataclass, field
|
|
2
2
|
from os import getenv
|
|
3
3
|
from typing import Any, Dict, List, Optional, Type, Union
|
|
4
4
|
|
|
@@ -34,7 +34,7 @@ class xAI(OpenAILike):
|
|
|
34
34
|
name: str = "xAI"
|
|
35
35
|
provider: str = "xAI"
|
|
36
36
|
|
|
37
|
-
api_key: Optional[str] = getenv("XAI_API_KEY")
|
|
37
|
+
api_key: Optional[str] = field(default_factory=lambda: getenv("XAI_API_KEY"))
|
|
38
38
|
base_url: str = "https://api.x.ai/v1"
|
|
39
39
|
|
|
40
40
|
search_parameters: Optional[Dict[str, Any]] = None
|
agno/os/app.py
CHANGED
|
@@ -9,7 +9,6 @@ from fastapi.responses import JSONResponse
|
|
|
9
9
|
from fastapi.routing import APIRoute
|
|
10
10
|
from rich import box
|
|
11
11
|
from rich.panel import Panel
|
|
12
|
-
from starlette.middleware.cors import CORSMiddleware
|
|
13
12
|
from starlette.requests import Request
|
|
14
13
|
|
|
15
14
|
from agno.agent.agent import Agent
|
|
@@ -37,6 +36,7 @@ from agno.os.routers.memory import get_memory_router
|
|
|
37
36
|
from agno.os.routers.metrics import get_metrics_router
|
|
38
37
|
from agno.os.routers.session import get_session_router
|
|
39
38
|
from agno.os.settings import AgnoAPISettings
|
|
39
|
+
from agno.os.utils import update_cors_middleware
|
|
40
40
|
from agno.team.team import Team
|
|
41
41
|
from agno.utils.log import logger
|
|
42
42
|
from agno.utils.string import generate_id, generate_id_from_name
|
|
@@ -286,14 +286,8 @@ class AgentOS:
|
|
|
286
286
|
|
|
287
287
|
self.fastapi_app.middleware("http")(general_exception_handler)
|
|
288
288
|
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
allow_origins=self.settings.cors_origin_list, # type: ignore
|
|
292
|
-
allow_credentials=True,
|
|
293
|
-
allow_methods=["*"],
|
|
294
|
-
allow_headers=["*"],
|
|
295
|
-
expose_headers=["*"],
|
|
296
|
-
)
|
|
289
|
+
# Update CORS middleware
|
|
290
|
+
update_cors_middleware(self.fastapi_app, self.settings.cors_origin_list) # type: ignore
|
|
297
291
|
|
|
298
292
|
return self.fastapi_app
|
|
299
293
|
|
|
@@ -368,7 +362,7 @@ class AgentOS:
|
|
|
368
362
|
for route in self.fastapi_app.routes:
|
|
369
363
|
for conflict in conflicts:
|
|
370
364
|
if isinstance(route, APIRoute):
|
|
371
|
-
if route.path == conflict["path"] and list(route.methods) == list(conflict["methods"]):
|
|
365
|
+
if route.path == conflict["path"] and list(route.methods) == list(conflict["methods"]): # type: ignore
|
|
372
366
|
self.fastapi_app.routes.pop(self.fastapi_app.routes.index(route))
|
|
373
367
|
|
|
374
368
|
self.fastapi_app.include_router(router)
|
agno/os/router.py
CHANGED
|
@@ -731,8 +731,9 @@ def get_base_router(
|
|
|
731
731
|
]:
|
|
732
732
|
# Process document files
|
|
733
733
|
try:
|
|
734
|
-
|
|
735
|
-
|
|
734
|
+
input_file = process_document(file)
|
|
735
|
+
if input_file is not None:
|
|
736
|
+
input_files.append(input_file)
|
|
736
737
|
except Exception as e:
|
|
737
738
|
log_error(f"Error processing file {file.filename}: {e}")
|
|
738
739
|
continue
|
agno/os/routers/evals/evals.py
CHANGED
|
@@ -380,7 +380,7 @@ def parse_eval_types_filter(
|
|
|
380
380
|
eval_types: Optional[str] = Query(
|
|
381
381
|
default=None,
|
|
382
382
|
description="Comma-separated eval types (accuracy,performance,reliability)",
|
|
383
|
-
|
|
383
|
+
examples=["accuracy,performance"],
|
|
384
384
|
),
|
|
385
385
|
) -> Optional[List[EvalType]]:
|
|
386
386
|
"""Parse comma-separated eval types into EvalType enums for filtering evaluation runs."""
|
agno/os/routers/memory/memory.py
CHANGED
|
@@ -396,7 +396,7 @@ def parse_topics(
|
|
|
396
396
|
topics: Optional[List[str]] = Query(
|
|
397
397
|
default=None,
|
|
398
398
|
description="Comma-separated list of topics to filter by",
|
|
399
|
-
|
|
399
|
+
examples=["preferences,technical,communication_style"],
|
|
400
400
|
),
|
|
401
401
|
) -> Optional[List[str]]:
|
|
402
402
|
"""Parse comma-separated topics into a list for filtering memories by topic."""
|
agno/os/schema.py
CHANGED
|
@@ -461,11 +461,8 @@ class TeamResponse(BaseModel):
|
|
|
461
461
|
"stream_member_events": False,
|
|
462
462
|
}
|
|
463
463
|
|
|
464
|
-
if team.model is None:
|
|
465
|
-
raise ValueError("Team model is required")
|
|
466
|
-
|
|
467
464
|
team.determine_tools_for_model(
|
|
468
|
-
model=team.model,
|
|
465
|
+
model=team.model, # type: ignore
|
|
469
466
|
session=TeamSession(session_id=str(uuid4()), session_data={}),
|
|
470
467
|
run_response=TeamRunOutput(run_id=str(uuid4())),
|
|
471
468
|
async_mode=True,
|
|
@@ -763,6 +760,7 @@ class TeamSessionDetailSchema(BaseModel):
|
|
|
763
760
|
session_state: Optional[dict]
|
|
764
761
|
metrics: Optional[dict]
|
|
765
762
|
team_data: Optional[dict]
|
|
763
|
+
chat_history: Optional[List[dict]]
|
|
766
764
|
created_at: Optional[datetime]
|
|
767
765
|
updated_at: Optional[datetime]
|
|
768
766
|
total_tokens: Optional[int]
|
|
@@ -784,6 +782,7 @@ class TeamSessionDetailSchema(BaseModel):
|
|
|
784
782
|
if session.session_data
|
|
785
783
|
else None,
|
|
786
784
|
metrics=session.session_data.get("session_metrics", {}) if session.session_data else None,
|
|
785
|
+
chat_history=[message.to_dict() for message in session.get_chat_history()],
|
|
787
786
|
created_at=datetime.fromtimestamp(session.created_at, tz=timezone.utc) if session.created_at else None,
|
|
788
787
|
updated_at=datetime.fromtimestamp(session.updated_at, tz=timezone.utc) if session.updated_at else None,
|
|
789
788
|
)
|
agno/os/utils.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from typing import Any, Callable, Dict, List, Optional, Union
|
|
2
2
|
|
|
3
|
-
from fastapi import HTTPException, UploadFile
|
|
3
|
+
from fastapi import FastAPI, HTTPException, UploadFile
|
|
4
|
+
from starlette.middleware.cors import CORSMiddleware
|
|
4
5
|
|
|
5
6
|
from agno.agent.agent import Agent
|
|
6
7
|
from agno.db.base import BaseDb
|
|
@@ -109,27 +110,21 @@ def process_image(file: UploadFile) -> Image:
|
|
|
109
110
|
content = file.file.read()
|
|
110
111
|
if not content:
|
|
111
112
|
raise HTTPException(status_code=400, detail="Empty file")
|
|
112
|
-
return Image(content=content)
|
|
113
|
+
return Image(content=content, format=extract_format(file), mime_type=file.content_type)
|
|
113
114
|
|
|
114
115
|
|
|
115
116
|
def process_audio(file: UploadFile) -> Audio:
|
|
116
117
|
content = file.file.read()
|
|
117
118
|
if not content:
|
|
118
119
|
raise HTTPException(status_code=400, detail="Empty file")
|
|
119
|
-
format =
|
|
120
|
-
if file.filename and "." in file.filename:
|
|
121
|
-
format = file.filename.split(".")[-1].lower()
|
|
122
|
-
elif file.content_type:
|
|
123
|
-
format = file.content_type.split("/")[-1]
|
|
124
|
-
|
|
125
|
-
return Audio(content=content, format=format)
|
|
120
|
+
return Audio(content=content, format=extract_format(file), mime_type=file.content_type)
|
|
126
121
|
|
|
127
122
|
|
|
128
123
|
def process_video(file: UploadFile) -> Video:
|
|
129
124
|
content = file.file.read()
|
|
130
125
|
if not content:
|
|
131
126
|
raise HTTPException(status_code=400, detail="Empty file")
|
|
132
|
-
return Video(content=content, format=file.content_type)
|
|
127
|
+
return Video(content=content, format=extract_format(file), mime_type=file.content_type)
|
|
133
128
|
|
|
134
129
|
|
|
135
130
|
def process_document(file: UploadFile) -> Optional[FileMedia]:
|
|
@@ -137,13 +132,23 @@ def process_document(file: UploadFile) -> Optional[FileMedia]:
|
|
|
137
132
|
content = file.file.read()
|
|
138
133
|
if not content:
|
|
139
134
|
raise HTTPException(status_code=400, detail="Empty file")
|
|
140
|
-
|
|
141
|
-
|
|
135
|
+
return FileMedia(
|
|
136
|
+
content=content, filename=file.filename, format=extract_format(file), mime_type=file.content_type
|
|
137
|
+
)
|
|
142
138
|
except Exception as e:
|
|
143
139
|
logger.error(f"Error processing document {file.filename}: {e}")
|
|
144
140
|
return None
|
|
145
141
|
|
|
146
142
|
|
|
143
|
+
def extract_format(file: UploadFile):
|
|
144
|
+
format = None
|
|
145
|
+
if file.filename and "." in file.filename:
|
|
146
|
+
format = file.filename.split(".")[-1].lower()
|
|
147
|
+
elif file.content_type:
|
|
148
|
+
format = file.content_type.split("/")[-1]
|
|
149
|
+
return format
|
|
150
|
+
|
|
151
|
+
|
|
147
152
|
def format_tools(agent_tools: List[Union[Dict[str, Any], Toolkit, Function, Callable]]):
|
|
148
153
|
formatted_tools = []
|
|
149
154
|
if agent_tools is not None:
|
|
@@ -260,3 +265,33 @@ def _generate_schema_from_params(params: Dict[str, Any]) -> Dict[str, Any]:
|
|
|
260
265
|
schema["required"] = required
|
|
261
266
|
|
|
262
267
|
return schema
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
def update_cors_middleware(app: FastAPI, new_origins: list):
|
|
271
|
+
existing_origins: List[str] = []
|
|
272
|
+
|
|
273
|
+
# TODO: Allow more options where CORS is properly merged and user can disable this behaviour
|
|
274
|
+
|
|
275
|
+
# Extract existing origins from current CORS middleware
|
|
276
|
+
for middleware in app.user_middleware:
|
|
277
|
+
if middleware.cls == CORSMiddleware:
|
|
278
|
+
if hasattr(middleware, "kwargs"):
|
|
279
|
+
existing_origins = middleware.kwargs.get("allow_origins", [])
|
|
280
|
+
break
|
|
281
|
+
# Merge origins
|
|
282
|
+
merged_origins = list(set(new_origins + existing_origins))
|
|
283
|
+
final_origins = [origin for origin in merged_origins if origin != "*"]
|
|
284
|
+
|
|
285
|
+
# Remove existing CORS
|
|
286
|
+
app.user_middleware = [m for m in app.user_middleware if m.cls != CORSMiddleware]
|
|
287
|
+
app.middleware_stack = None
|
|
288
|
+
|
|
289
|
+
# Add updated CORS
|
|
290
|
+
app.add_middleware(
|
|
291
|
+
CORSMiddleware,
|
|
292
|
+
allow_origins=final_origins,
|
|
293
|
+
allow_credentials=True,
|
|
294
|
+
allow_methods=["*"],
|
|
295
|
+
allow_headers=["*"],
|
|
296
|
+
expose_headers=["*"],
|
|
297
|
+
)
|
agno/run/agent.py
CHANGED
|
@@ -96,6 +96,7 @@ class RunContentEvent(BaseAgentRunEvent):
|
|
|
96
96
|
content: Optional[Any] = None
|
|
97
97
|
content_type: str = "str"
|
|
98
98
|
reasoning_content: Optional[str] = None
|
|
99
|
+
model_provider_data: Optional[Dict[str, Any]] = None
|
|
99
100
|
citations: Optional[Citations] = None
|
|
100
101
|
response_audio: Optional[Audio] = None # Model audio response
|
|
101
102
|
image: Optional[Image] = None # Image attached to the response
|
|
@@ -119,6 +120,7 @@ class RunCompletedEvent(BaseAgentRunEvent):
|
|
|
119
120
|
content_type: str = "str"
|
|
120
121
|
reasoning_content: Optional[str] = None
|
|
121
122
|
citations: Optional[Citations] = None
|
|
123
|
+
model_provider_data: Optional[Dict[str, Any]] = None
|
|
122
124
|
images: Optional[List[Image]] = None # Images attached to the response
|
|
123
125
|
videos: Optional[List[Video]] = None # Videos attached to the response
|
|
124
126
|
audio: Optional[List[Audio]] = None # Audio attached to the response
|
|
@@ -337,6 +339,8 @@ class RunInput:
|
|
|
337
339
|
result["videos"] = [vid.to_dict() for vid in self.videos]
|
|
338
340
|
if self.audios:
|
|
339
341
|
result["audios"] = [aud.to_dict() for aud in self.audios]
|
|
342
|
+
if self.files:
|
|
343
|
+
result["files"] = [file.to_dict() for file in self.files]
|
|
340
344
|
|
|
341
345
|
return result
|
|
342
346
|
|
|
@@ -381,6 +385,8 @@ class RunOutput:
|
|
|
381
385
|
reasoning_steps: Optional[List[ReasoningStep]] = None
|
|
382
386
|
reasoning_messages: Optional[List[Message]] = None
|
|
383
387
|
|
|
388
|
+
model_provider_data: Optional[Dict[str, Any]] = None
|
|
389
|
+
|
|
384
390
|
model: Optional[str] = None
|
|
385
391
|
model_provider: Optional[str] = None
|
|
386
392
|
messages: Optional[List[Message]] = None
|
|
@@ -392,6 +398,7 @@ class RunOutput:
|
|
|
392
398
|
images: Optional[List[Image]] = None # Images attached to the response
|
|
393
399
|
videos: Optional[List[Video]] = None # Videos attached to the response
|
|
394
400
|
audio: Optional[List[Audio]] = None # Audio attached to the response
|
|
401
|
+
files: Optional[List[File]] = None # Files attached to the response
|
|
395
402
|
response_audio: Optional[Audio] = None # Model audio response
|
|
396
403
|
|
|
397
404
|
# Input media and messages from user
|
|
@@ -446,6 +453,7 @@ class RunOutput:
|
|
|
446
453
|
"images",
|
|
447
454
|
"videos",
|
|
448
455
|
"audio",
|
|
456
|
+
"files",
|
|
449
457
|
"response_audio",
|
|
450
458
|
"input",
|
|
451
459
|
"citations",
|
|
@@ -508,6 +516,14 @@ class RunOutput:
|
|
|
508
516
|
else:
|
|
509
517
|
_dict["audio"].append(aud)
|
|
510
518
|
|
|
519
|
+
if self.files is not None:
|
|
520
|
+
_dict["files"] = []
|
|
521
|
+
for file in self.files:
|
|
522
|
+
if isinstance(file, File):
|
|
523
|
+
_dict["files"].append(file.to_dict())
|
|
524
|
+
else:
|
|
525
|
+
_dict["files"].append(file)
|
|
526
|
+
|
|
511
527
|
if self.response_audio is not None:
|
|
512
528
|
if isinstance(self.response_audio, Audio):
|
|
513
529
|
_dict["response_audio"] = self.response_audio.to_dict()
|
|
@@ -576,6 +592,9 @@ class RunOutput:
|
|
|
576
592
|
audio = data.pop("audio", [])
|
|
577
593
|
audio = [Audio.model_validate(audio) for audio in audio] if audio else None
|
|
578
594
|
|
|
595
|
+
files = data.pop("files", [])
|
|
596
|
+
files = [File.model_validate(file) for file in files] if files else None
|
|
597
|
+
|
|
579
598
|
response_audio = data.pop("response_audio", None)
|
|
580
599
|
response_audio = Audio.model_validate(response_audio) if response_audio else None
|
|
581
600
|
|
|
@@ -613,6 +632,7 @@ class RunOutput:
|
|
|
613
632
|
images=images,
|
|
614
633
|
audio=audio,
|
|
615
634
|
videos=videos,
|
|
635
|
+
files=files,
|
|
616
636
|
response_audio=response_audio,
|
|
617
637
|
input=input_obj,
|
|
618
638
|
events=events,
|
agno/run/team.py
CHANGED
|
@@ -98,6 +98,7 @@ class RunContentEvent(BaseTeamRunEvent):
|
|
|
98
98
|
content: Optional[Any] = None
|
|
99
99
|
content_type: str = "str"
|
|
100
100
|
reasoning_content: Optional[str] = None
|
|
101
|
+
model_provider_data: Optional[Dict[str, Any]] = None
|
|
101
102
|
citations: Optional[Citations] = None
|
|
102
103
|
response_audio: Optional[Audio] = None # Model audio response
|
|
103
104
|
image: Optional[Image] = None # Image attached to the response
|
|
@@ -121,6 +122,7 @@ class RunCompletedEvent(BaseTeamRunEvent):
|
|
|
121
122
|
content_type: str = "str"
|
|
122
123
|
reasoning_content: Optional[str] = None
|
|
123
124
|
citations: Optional[Citations] = None
|
|
125
|
+
model_provider_data: Optional[Dict[str, Any]] = None
|
|
124
126
|
images: Optional[List[Image]] = None # Images attached to the response
|
|
125
127
|
videos: Optional[List[Video]] = None # Videos attached to the response
|
|
126
128
|
audio: Optional[List[Audio]] = None # Audio attached to the response
|
|
@@ -321,6 +323,8 @@ class TeamRunInput:
|
|
|
321
323
|
result["videos"] = [vid.to_dict() for vid in self.videos]
|
|
322
324
|
if self.audios:
|
|
323
325
|
result["audios"] = [aud.to_dict() for aud in self.audios]
|
|
326
|
+
if self.files:
|
|
327
|
+
result["files"] = [file.to_dict() for file in self.files]
|
|
324
328
|
|
|
325
329
|
return result
|
|
326
330
|
|
|
@@ -370,6 +374,7 @@ class TeamRunOutput:
|
|
|
370
374
|
images: Optional[List[Image]] = None # Images from member runs
|
|
371
375
|
videos: Optional[List[Video]] = None # Videos from member runs
|
|
372
376
|
audio: Optional[List[Audio]] = None # Audio from member runs
|
|
377
|
+
files: Optional[List[File]] = None # Files from member runs
|
|
373
378
|
|
|
374
379
|
response_audio: Optional[Audio] = None # Model audio response
|
|
375
380
|
|
|
@@ -379,7 +384,7 @@ class TeamRunOutput:
|
|
|
379
384
|
reasoning_content: Optional[str] = None
|
|
380
385
|
|
|
381
386
|
citations: Optional[Citations] = None
|
|
382
|
-
|
|
387
|
+
model_provider_data: Optional[Dict[str, Any]] = None
|
|
383
388
|
metadata: Optional[Dict[str, Any]] = None
|
|
384
389
|
|
|
385
390
|
references: Optional[List[MessageReferences]] = None
|
|
@@ -419,6 +424,7 @@ class TeamRunOutput:
|
|
|
419
424
|
"images",
|
|
420
425
|
"videos",
|
|
421
426
|
"audio",
|
|
427
|
+
"files",
|
|
422
428
|
"response_audio",
|
|
423
429
|
"citations",
|
|
424
430
|
"events",
|
|
@@ -461,6 +467,9 @@ class TeamRunOutput:
|
|
|
461
467
|
if self.audio is not None:
|
|
462
468
|
_dict["audio"] = [aud.to_dict() for aud in self.audio]
|
|
463
469
|
|
|
470
|
+
if self.files is not None:
|
|
471
|
+
_dict["files"] = [file.to_dict() for file in self.files]
|
|
472
|
+
|
|
464
473
|
if self.response_audio is not None:
|
|
465
474
|
_dict["response_audio"] = self.response_audio.to_dict()
|
|
466
475
|
|
|
@@ -555,6 +564,9 @@ class TeamRunOutput:
|
|
|
555
564
|
audio = data.pop("audio", [])
|
|
556
565
|
audio = [Audio.model_validate(audio) for audio in audio] if audio else None
|
|
557
566
|
|
|
567
|
+
files = data.pop("files", [])
|
|
568
|
+
files = [File.model_validate(file) for file in files] if files else None
|
|
569
|
+
|
|
558
570
|
tools = data.pop("tools", [])
|
|
559
571
|
tools = [ToolExecution.from_dict(tool) for tool in tools] if tools else None
|
|
560
572
|
|
|
@@ -584,6 +596,7 @@ class TeamRunOutput:
|
|
|
584
596
|
images=images,
|
|
585
597
|
videos=videos,
|
|
586
598
|
audio=audio,
|
|
599
|
+
files=files,
|
|
587
600
|
response_audio=response_audio,
|
|
588
601
|
input=input_obj,
|
|
589
602
|
citations=citations,
|
|
@@ -618,3 +631,7 @@ class TeamRunOutput:
|
|
|
618
631
|
if self.audio is None:
|
|
619
632
|
self.audio = []
|
|
620
633
|
self.audio.extend(run_response.audio)
|
|
634
|
+
if run_response.files is not None:
|
|
635
|
+
if self.files is None:
|
|
636
|
+
self.files = []
|
|
637
|
+
self.files.extend(run_response.files)
|
agno/run/workflow.py
CHANGED
|
@@ -462,6 +462,7 @@ def workflow_run_output_event_from_dict(data: dict) -> BaseWorkflowRunOutputEven
|
|
|
462
462
|
class WorkflowRunOutput:
|
|
463
463
|
"""Response returned by Workflow.run() functions - kept for backwards compatibility"""
|
|
464
464
|
|
|
465
|
+
input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None
|
|
465
466
|
content: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, Any]] = None
|
|
466
467
|
content_type: str = "str"
|
|
467
468
|
|
|
@@ -553,6 +554,12 @@ class WorkflowRunOutput:
|
|
|
553
554
|
if self.metrics is not None:
|
|
554
555
|
_dict["metrics"] = self.metrics.to_dict()
|
|
555
556
|
|
|
557
|
+
if self.input is not None:
|
|
558
|
+
if isinstance(self.input, BaseModel):
|
|
559
|
+
_dict["input"] = self.input.model_dump(exclude_none=True)
|
|
560
|
+
else:
|
|
561
|
+
_dict["input"] = self.input
|
|
562
|
+
|
|
556
563
|
if self.content and isinstance(self.content, BaseModel):
|
|
557
564
|
_dict["content"] = self.content.model_dump(exclude_none=True)
|
|
558
565
|
|
|
@@ -624,6 +631,8 @@ class WorkflowRunOutput:
|
|
|
624
631
|
final_events.append(event)
|
|
625
632
|
events = final_events
|
|
626
633
|
|
|
634
|
+
input_data = data.pop("input", None)
|
|
635
|
+
|
|
627
636
|
return cls(
|
|
628
637
|
step_results=parsed_step_results,
|
|
629
638
|
metadata=metadata,
|
|
@@ -634,6 +643,7 @@ class WorkflowRunOutput:
|
|
|
634
643
|
events=events,
|
|
635
644
|
metrics=workflow_metrics,
|
|
636
645
|
step_executor_runs=step_executor_runs,
|
|
646
|
+
input=input_data,
|
|
637
647
|
**data,
|
|
638
648
|
)
|
|
639
649
|
|