agno 2.0.4__py3-none-any.whl → 2.0.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +127 -102
- agno/db/dynamo/dynamo.py +9 -7
- agno/db/firestore/firestore.py +7 -4
- agno/db/gcs_json/gcs_json_db.py +6 -4
- agno/db/json/json_db.py +10 -6
- agno/db/migrations/v1_to_v2.py +191 -23
- agno/db/mongo/mongo.py +67 -6
- agno/db/mysql/mysql.py +7 -6
- agno/db/mysql/schemas.py +27 -27
- agno/db/postgres/postgres.py +7 -6
- agno/db/redis/redis.py +3 -3
- agno/db/singlestore/singlestore.py +4 -4
- agno/db/sqlite/sqlite.py +7 -6
- agno/db/utils.py +0 -14
- agno/integrations/discord/client.py +1 -0
- agno/knowledge/embedder/openai.py +19 -11
- agno/knowledge/knowledge.py +11 -10
- agno/knowledge/reader/reader_factory.py +7 -3
- agno/knowledge/reader/web_search_reader.py +12 -6
- agno/knowledge/reader/website_reader.py +33 -16
- agno/media.py +70 -0
- agno/models/aimlapi/aimlapi.py +2 -2
- agno/models/base.py +31 -4
- agno/models/cerebras/cerebras_openai.py +2 -2
- agno/models/deepinfra/deepinfra.py +2 -2
- agno/models/deepseek/deepseek.py +2 -2
- agno/models/fireworks/fireworks.py +2 -2
- agno/models/internlm/internlm.py +2 -2
- agno/models/langdb/langdb.py +4 -4
- agno/models/litellm/litellm_openai.py +2 -2
- agno/models/message.py +135 -0
- agno/models/meta/llama_openai.py +2 -2
- agno/models/nebius/nebius.py +2 -2
- agno/models/nexus/__init__.py +3 -0
- agno/models/nexus/nexus.py +25 -0
- agno/models/nvidia/nvidia.py +2 -2
- agno/models/openai/responses.py +6 -0
- agno/models/openrouter/openrouter.py +2 -2
- agno/models/perplexity/perplexity.py +2 -2
- agno/models/portkey/portkey.py +3 -3
- agno/models/response.py +2 -1
- agno/models/sambanova/sambanova.py +2 -2
- agno/models/together/together.py +2 -2
- agno/models/vercel/v0.py +2 -2
- agno/models/xai/xai.py +2 -2
- agno/os/app.py +162 -42
- agno/os/interfaces/agui/utils.py +98 -134
- agno/os/router.py +3 -1
- agno/os/routers/health.py +0 -1
- agno/os/routers/home.py +52 -0
- agno/os/routers/knowledge/knowledge.py +2 -2
- agno/os/schema.py +21 -0
- agno/os/utils.py +1 -9
- agno/run/agent.py +19 -3
- agno/run/team.py +18 -3
- agno/run/workflow.py +10 -0
- agno/team/team.py +70 -45
- agno/tools/duckduckgo.py +15 -11
- agno/tools/e2b.py +14 -7
- agno/tools/file_generation.py +350 -0
- agno/tools/function.py +2 -0
- agno/tools/googlesearch.py +1 -1
- agno/utils/gemini.py +24 -4
- agno/utils/string.py +32 -0
- agno/utils/tools.py +1 -1
- agno/vectordb/chroma/chromadb.py +66 -25
- agno/vectordb/lancedb/lance_db.py +15 -4
- agno/vectordb/milvus/milvus.py +6 -0
- agno/workflow/step.py +4 -3
- agno/workflow/workflow.py +4 -0
- {agno-2.0.4.dist-info → agno-2.0.6.dist-info}/METADATA +9 -5
- {agno-2.0.4.dist-info → agno-2.0.6.dist-info}/RECORD +75 -72
- agno/knowledge/reader/url_reader.py +0 -128
- {agno-2.0.4.dist-info → agno-2.0.6.dist-info}/WHEEL +0 -0
- {agno-2.0.4.dist-info → agno-2.0.6.dist-info}/licenses/LICENSE +0 -0
- {agno-2.0.4.dist-info → agno-2.0.6.dist-info}/top_level.txt +0 -0
agno/os/router.py
CHANGED
|
@@ -732,7 +732,9 @@ def get_base_router(
|
|
|
732
732
|
# Process document files
|
|
733
733
|
try:
|
|
734
734
|
file_content = await file.read()
|
|
735
|
-
input_files.append(
|
|
735
|
+
input_files.append(
|
|
736
|
+
FileMedia(content=file_content, filename=file.filename, mime_type=file.content_type)
|
|
737
|
+
)
|
|
736
738
|
except Exception as e:
|
|
737
739
|
log_error(f"Error processing file {file.filename}: {e}")
|
|
738
740
|
continue
|
agno/os/routers/health.py
CHANGED
agno/os/routers/home.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
from typing import TYPE_CHECKING
|
|
2
|
+
|
|
3
|
+
from fastapi import APIRouter
|
|
4
|
+
|
|
5
|
+
if TYPE_CHECKING:
|
|
6
|
+
from agno.os.app import AgentOS
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def get_home_router(os: "AgentOS") -> APIRouter:
|
|
10
|
+
router = APIRouter(tags=["Home"])
|
|
11
|
+
|
|
12
|
+
@router.get(
|
|
13
|
+
"/",
|
|
14
|
+
operation_id="get_api_info",
|
|
15
|
+
summary="API Information",
|
|
16
|
+
description=(
|
|
17
|
+
"Get basic information about this AgentOS API instance, including:\n\n"
|
|
18
|
+
"- API metadata and version\n"
|
|
19
|
+
"- Available capabilities overview\n"
|
|
20
|
+
"- Links to key endpoints and documentation"
|
|
21
|
+
),
|
|
22
|
+
responses={
|
|
23
|
+
200: {
|
|
24
|
+
"description": "API information retrieved successfully",
|
|
25
|
+
"content": {
|
|
26
|
+
"application/json": {
|
|
27
|
+
"examples": {
|
|
28
|
+
"home": {
|
|
29
|
+
"summary": "Example home response",
|
|
30
|
+
"value": {
|
|
31
|
+
"name": "AgentOS API",
|
|
32
|
+
"description": "AI Agent Operating System API",
|
|
33
|
+
"os_id": "demo-os",
|
|
34
|
+
"version": "1.0.0",
|
|
35
|
+
},
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
},
|
|
40
|
+
}
|
|
41
|
+
},
|
|
42
|
+
)
|
|
43
|
+
async def get_api_info():
|
|
44
|
+
"""Get basic API information and available capabilities"""
|
|
45
|
+
return {
|
|
46
|
+
"name": "AgentOS API",
|
|
47
|
+
"description": os.description or "AI Agent Operating System API",
|
|
48
|
+
"os_id": os.os_id or "agno-agentos",
|
|
49
|
+
"version": os.version or "1.0.0",
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
return router
|
|
@@ -5,7 +5,6 @@ from typing import Dict, List, Optional
|
|
|
5
5
|
|
|
6
6
|
from fastapi import APIRouter, BackgroundTasks, Depends, File, Form, HTTPException, Path, Query, UploadFile
|
|
7
7
|
|
|
8
|
-
from agno.db.utils import generate_deterministic_id
|
|
9
8
|
from agno.knowledge.content import Content, FileData
|
|
10
9
|
from agno.knowledge.knowledge import Knowledge
|
|
11
10
|
from agno.knowledge.reader import ReaderFactory
|
|
@@ -34,6 +33,7 @@ from agno.os.schema import (
|
|
|
34
33
|
from agno.os.settings import AgnoAPISettings
|
|
35
34
|
from agno.os.utils import get_knowledge_instance_by_db_id
|
|
36
35
|
from agno.utils.log import log_debug, log_info
|
|
36
|
+
from agno.utils.string import generate_id
|
|
37
37
|
|
|
38
38
|
logger = logging.getLogger(__name__)
|
|
39
39
|
|
|
@@ -167,7 +167,7 @@ def attach_routes(router: APIRouter, knowledge_instances: List[Knowledge]) -> AP
|
|
|
167
167
|
)
|
|
168
168
|
content_hash = knowledge._build_content_hash(content)
|
|
169
169
|
content.content_hash = content_hash
|
|
170
|
-
content.id =
|
|
170
|
+
content.id = generate_id(content_hash)
|
|
171
171
|
|
|
172
172
|
background_tasks.add_task(process_content, knowledge, content, reader_id, chunker)
|
|
173
173
|
|
agno/os/schema.py
CHANGED
|
@@ -765,6 +765,7 @@ class TeamSessionDetailSchema(BaseModel):
|
|
|
765
765
|
team_data: Optional[dict]
|
|
766
766
|
created_at: Optional[datetime]
|
|
767
767
|
updated_at: Optional[datetime]
|
|
768
|
+
total_tokens: Optional[int]
|
|
768
769
|
|
|
769
770
|
@classmethod
|
|
770
771
|
def from_session(cls, session: TeamSession) -> "TeamSessionDetailSchema":
|
|
@@ -833,11 +834,14 @@ class RunSchema(BaseModel):
|
|
|
833
834
|
content: Optional[Union[str, dict]]
|
|
834
835
|
run_response_format: Optional[str]
|
|
835
836
|
reasoning_content: Optional[str]
|
|
837
|
+
reasoning_steps: Optional[List[dict]]
|
|
836
838
|
metrics: Optional[dict]
|
|
837
839
|
messages: Optional[List[dict]]
|
|
838
840
|
tools: Optional[List[dict]]
|
|
839
841
|
events: Optional[List[dict]]
|
|
840
842
|
created_at: Optional[datetime]
|
|
843
|
+
references: Optional[List[dict]]
|
|
844
|
+
reasoning_messages: Optional[List[dict]]
|
|
841
845
|
|
|
842
846
|
@classmethod
|
|
843
847
|
def from_dict(cls, run_dict: Dict[str, Any]) -> "RunSchema":
|
|
@@ -852,10 +856,13 @@ class RunSchema(BaseModel):
|
|
|
852
856
|
content=run_dict.get("content", ""),
|
|
853
857
|
run_response_format=run_response_format,
|
|
854
858
|
reasoning_content=run_dict.get("reasoning_content", ""),
|
|
859
|
+
reasoning_steps=run_dict.get("reasoning_steps", []),
|
|
855
860
|
metrics=run_dict.get("metrics", {}),
|
|
856
861
|
messages=[message for message in run_dict.get("messages", [])] if run_dict.get("messages") else None,
|
|
857
862
|
tools=[tool for tool in run_dict.get("tools", [])] if run_dict.get("tools") else None,
|
|
858
863
|
events=[event for event in run_dict["events"]] if run_dict.get("events") else None,
|
|
864
|
+
references=run_dict.get("references", []),
|
|
865
|
+
reasoning_messages=run_dict.get("reasoning_messages", []),
|
|
859
866
|
created_at=datetime.fromtimestamp(run_dict.get("created_at", 0), tz=timezone.utc)
|
|
860
867
|
if run_dict.get("created_at") is not None
|
|
861
868
|
else None,
|
|
@@ -868,6 +875,7 @@ class TeamRunSchema(BaseModel):
|
|
|
868
875
|
team_id: Optional[str]
|
|
869
876
|
content: Optional[Union[str, dict]]
|
|
870
877
|
reasoning_content: Optional[str]
|
|
878
|
+
reasoning_steps: Optional[List[dict]]
|
|
871
879
|
run_input: Optional[str]
|
|
872
880
|
run_response_format: Optional[str]
|
|
873
881
|
metrics: Optional[dict]
|
|
@@ -875,6 +883,8 @@ class TeamRunSchema(BaseModel):
|
|
|
875
883
|
messages: Optional[List[dict]]
|
|
876
884
|
events: Optional[List[dict]]
|
|
877
885
|
created_at: Optional[datetime]
|
|
886
|
+
references: Optional[List[dict]]
|
|
887
|
+
reasoning_messages: Optional[List[dict]]
|
|
878
888
|
|
|
879
889
|
@classmethod
|
|
880
890
|
def from_dict(cls, run_dict: Dict[str, Any]) -> "TeamRunSchema":
|
|
@@ -888,6 +898,7 @@ class TeamRunSchema(BaseModel):
|
|
|
888
898
|
content=run_dict.get("content", ""),
|
|
889
899
|
run_response_format=run_response_format,
|
|
890
900
|
reasoning_content=run_dict.get("reasoning_content", ""),
|
|
901
|
+
reasoning_steps=run_dict.get("reasoning_steps", []),
|
|
891
902
|
metrics=run_dict.get("metrics", {}),
|
|
892
903
|
messages=[message for message in run_dict.get("messages", [])] if run_dict.get("messages") else None,
|
|
893
904
|
tools=[tool for tool in run_dict.get("tools", [])] if run_dict.get("tools") else None,
|
|
@@ -895,6 +906,8 @@ class TeamRunSchema(BaseModel):
|
|
|
895
906
|
created_at=datetime.fromtimestamp(run_dict.get("created_at", 0), tz=timezone.utc)
|
|
896
907
|
if run_dict.get("created_at") is not None
|
|
897
908
|
else None,
|
|
909
|
+
references=run_dict.get("references", []),
|
|
910
|
+
reasoning_messages=run_dict.get("reasoning_messages", []),
|
|
898
911
|
)
|
|
899
912
|
|
|
900
913
|
|
|
@@ -910,6 +923,10 @@ class WorkflowRunSchema(BaseModel):
|
|
|
910
923
|
step_executor_runs: Optional[list[dict]]
|
|
911
924
|
metrics: Optional[dict]
|
|
912
925
|
created_at: Optional[int]
|
|
926
|
+
reasoning_content: Optional[str]
|
|
927
|
+
reasoning_steps: Optional[List[dict]]
|
|
928
|
+
references: Optional[List[dict]]
|
|
929
|
+
reasoning_messages: Optional[List[dict]]
|
|
913
930
|
|
|
914
931
|
@classmethod
|
|
915
932
|
def from_dict(cls, run_response: Dict[str, Any]) -> "WorkflowRunSchema":
|
|
@@ -926,6 +943,10 @@ class WorkflowRunSchema(BaseModel):
|
|
|
926
943
|
step_results=run_response.get("step_results", []),
|
|
927
944
|
step_executor_runs=run_response.get("step_executor_runs", []),
|
|
928
945
|
created_at=run_response["created_at"],
|
|
946
|
+
reasoning_content=run_response.get("reasoning_content", ""),
|
|
947
|
+
reasoning_steps=run_response.get("reasoning_steps", []),
|
|
948
|
+
references=run_response.get("references", []),
|
|
949
|
+
reasoning_messages=run_response.get("reasoning_messages", []),
|
|
929
950
|
)
|
|
930
951
|
|
|
931
952
|
|
agno/os/utils.py
CHANGED
|
@@ -1,5 +1,4 @@
|
|
|
1
1
|
from typing import Any, Callable, Dict, List, Optional, Union
|
|
2
|
-
from uuid import uuid4
|
|
3
2
|
|
|
4
3
|
from fastapi import HTTPException, UploadFile
|
|
5
4
|
|
|
@@ -139,7 +138,7 @@ def process_document(file: UploadFile) -> Optional[FileMedia]:
|
|
|
139
138
|
if not content:
|
|
140
139
|
raise HTTPException(status_code=400, detail="Empty file")
|
|
141
140
|
|
|
142
|
-
return FileMedia(content=content)
|
|
141
|
+
return FileMedia(content=content, filename=file.filename, mime_type=file.content_type)
|
|
143
142
|
except Exception as e:
|
|
144
143
|
logger.error(f"Error processing document {file.filename}: {e}")
|
|
145
144
|
return None
|
|
@@ -261,10 +260,3 @@ def _generate_schema_from_params(params: Dict[str, Any]) -> Dict[str, Any]:
|
|
|
261
260
|
schema["required"] = required
|
|
262
261
|
|
|
263
262
|
return schema
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
def generate_id(name: Optional[str] = None) -> str:
|
|
267
|
-
if name:
|
|
268
|
-
return name.lower().replace(" ", "-").replace("_", "-")
|
|
269
|
-
else:
|
|
270
|
-
return str(uuid4())
|
agno/run/agent.py
CHANGED
|
@@ -337,6 +337,8 @@ class RunInput:
|
|
|
337
337
|
result["videos"] = [vid.to_dict() for vid in self.videos]
|
|
338
338
|
if self.audios:
|
|
339
339
|
result["audios"] = [aud.to_dict() for aud in self.audios]
|
|
340
|
+
if self.files:
|
|
341
|
+
result["files"] = [file.to_dict() for file in self.files]
|
|
340
342
|
|
|
341
343
|
return result
|
|
342
344
|
|
|
@@ -392,6 +394,7 @@ class RunOutput:
|
|
|
392
394
|
images: Optional[List[Image]] = None # Images attached to the response
|
|
393
395
|
videos: Optional[List[Video]] = None # Videos attached to the response
|
|
394
396
|
audio: Optional[List[Audio]] = None # Audio attached to the response
|
|
397
|
+
files: Optional[List[File]] = None # Files attached to the response
|
|
395
398
|
response_audio: Optional[Audio] = None # Model audio response
|
|
396
399
|
|
|
397
400
|
# Input media and messages from user
|
|
@@ -446,6 +449,7 @@ class RunOutput:
|
|
|
446
449
|
"images",
|
|
447
450
|
"videos",
|
|
448
451
|
"audio",
|
|
452
|
+
"files",
|
|
449
453
|
"response_audio",
|
|
450
454
|
"input",
|
|
451
455
|
"citations",
|
|
@@ -508,6 +512,14 @@ class RunOutput:
|
|
|
508
512
|
else:
|
|
509
513
|
_dict["audio"].append(aud)
|
|
510
514
|
|
|
515
|
+
if self.files is not None:
|
|
516
|
+
_dict["files"] = []
|
|
517
|
+
for file in self.files:
|
|
518
|
+
if isinstance(file, File):
|
|
519
|
+
_dict["files"].append(file.to_dict())
|
|
520
|
+
else:
|
|
521
|
+
_dict["files"].append(file)
|
|
522
|
+
|
|
511
523
|
if self.response_audio is not None:
|
|
512
524
|
if isinstance(self.response_audio, Audio):
|
|
513
525
|
_dict["response_audio"] = self.response_audio.to_dict()
|
|
@@ -559,7 +571,7 @@ class RunOutput:
|
|
|
559
571
|
events = [run_output_event_from_dict(event) for event in events] if events else None
|
|
560
572
|
|
|
561
573
|
messages = data.pop("messages", None)
|
|
562
|
-
messages = [Message.
|
|
574
|
+
messages = [Message.from_dict(message) for message in messages] if messages else None
|
|
563
575
|
|
|
564
576
|
citations = data.pop("citations", None)
|
|
565
577
|
citations = Citations.model_validate(citations) if citations else None
|
|
@@ -576,6 +588,9 @@ class RunOutput:
|
|
|
576
588
|
audio = data.pop("audio", [])
|
|
577
589
|
audio = [Audio.model_validate(audio) for audio in audio] if audio else None
|
|
578
590
|
|
|
591
|
+
files = data.pop("files", [])
|
|
592
|
+
files = [File.model_validate(file) for file in files] if files else None
|
|
593
|
+
|
|
579
594
|
response_audio = data.pop("response_audio", None)
|
|
580
595
|
response_audio = Audio.model_validate(response_audio) if response_audio else None
|
|
581
596
|
|
|
@@ -591,7 +606,7 @@ class RunOutput:
|
|
|
591
606
|
additional_input = data.pop("additional_input", None)
|
|
592
607
|
|
|
593
608
|
if additional_input is not None:
|
|
594
|
-
additional_input = [Message.
|
|
609
|
+
additional_input = [Message.from_dict(message) for message in additional_input]
|
|
595
610
|
|
|
596
611
|
reasoning_steps = data.pop("reasoning_steps", None)
|
|
597
612
|
if reasoning_steps is not None:
|
|
@@ -599,7 +614,7 @@ class RunOutput:
|
|
|
599
614
|
|
|
600
615
|
reasoning_messages = data.pop("reasoning_messages", None)
|
|
601
616
|
if reasoning_messages is not None:
|
|
602
|
-
reasoning_messages = [Message.
|
|
617
|
+
reasoning_messages = [Message.from_dict(message) for message in reasoning_messages]
|
|
603
618
|
|
|
604
619
|
references = data.pop("references", None)
|
|
605
620
|
if references is not None:
|
|
@@ -613,6 +628,7 @@ class RunOutput:
|
|
|
613
628
|
images=images,
|
|
614
629
|
audio=audio,
|
|
615
630
|
videos=videos,
|
|
631
|
+
files=files,
|
|
616
632
|
response_audio=response_audio,
|
|
617
633
|
input=input_obj,
|
|
618
634
|
events=events,
|
agno/run/team.py
CHANGED
|
@@ -321,6 +321,8 @@ class TeamRunInput:
|
|
|
321
321
|
result["videos"] = [vid.to_dict() for vid in self.videos]
|
|
322
322
|
if self.audios:
|
|
323
323
|
result["audios"] = [aud.to_dict() for aud in self.audios]
|
|
324
|
+
if self.files:
|
|
325
|
+
result["files"] = [file.to_dict() for file in self.files]
|
|
324
326
|
|
|
325
327
|
return result
|
|
326
328
|
|
|
@@ -370,6 +372,7 @@ class TeamRunOutput:
|
|
|
370
372
|
images: Optional[List[Image]] = None # Images from member runs
|
|
371
373
|
videos: Optional[List[Video]] = None # Videos from member runs
|
|
372
374
|
audio: Optional[List[Audio]] = None # Audio from member runs
|
|
375
|
+
files: Optional[List[File]] = None # Files from member runs
|
|
373
376
|
|
|
374
377
|
response_audio: Optional[Audio] = None # Model audio response
|
|
375
378
|
|
|
@@ -419,6 +422,7 @@ class TeamRunOutput:
|
|
|
419
422
|
"images",
|
|
420
423
|
"videos",
|
|
421
424
|
"audio",
|
|
425
|
+
"files",
|
|
422
426
|
"response_audio",
|
|
423
427
|
"citations",
|
|
424
428
|
"events",
|
|
@@ -461,6 +465,9 @@ class TeamRunOutput:
|
|
|
461
465
|
if self.audio is not None:
|
|
462
466
|
_dict["audio"] = [aud.to_dict() for aud in self.audio]
|
|
463
467
|
|
|
468
|
+
if self.files is not None:
|
|
469
|
+
_dict["files"] = [file.to_dict() for file in self.files]
|
|
470
|
+
|
|
464
471
|
if self.response_audio is not None:
|
|
465
472
|
_dict["response_audio"] = self.response_audio.to_dict()
|
|
466
473
|
|
|
@@ -519,7 +526,7 @@ class TeamRunOutput:
|
|
|
519
526
|
events = final_events
|
|
520
527
|
|
|
521
528
|
messages = data.pop("messages", None)
|
|
522
|
-
messages = [Message.
|
|
529
|
+
messages = [Message.from_dict(message) for message in messages] if messages else None
|
|
523
530
|
|
|
524
531
|
member_responses = data.pop("member_responses", [])
|
|
525
532
|
parsed_member_responses: List[Union["TeamRunOutput", RunOutput]] = []
|
|
@@ -532,7 +539,7 @@ class TeamRunOutput:
|
|
|
532
539
|
|
|
533
540
|
additional_input = data.pop("additional_input", None)
|
|
534
541
|
if additional_input is not None:
|
|
535
|
-
additional_input = [Message.
|
|
542
|
+
additional_input = [Message.from_dict(message) for message in additional_input]
|
|
536
543
|
|
|
537
544
|
reasoning_steps = data.pop("reasoning_steps", None)
|
|
538
545
|
if reasoning_steps is not None:
|
|
@@ -540,7 +547,7 @@ class TeamRunOutput:
|
|
|
540
547
|
|
|
541
548
|
reasoning_messages = data.pop("reasoning_messages", None)
|
|
542
549
|
if reasoning_messages is not None:
|
|
543
|
-
reasoning_messages = [Message.
|
|
550
|
+
reasoning_messages = [Message.from_dict(message) for message in reasoning_messages]
|
|
544
551
|
|
|
545
552
|
references = data.pop("references", None)
|
|
546
553
|
if references is not None:
|
|
@@ -555,6 +562,9 @@ class TeamRunOutput:
|
|
|
555
562
|
audio = data.pop("audio", [])
|
|
556
563
|
audio = [Audio.model_validate(audio) for audio in audio] if audio else None
|
|
557
564
|
|
|
565
|
+
files = data.pop("files", [])
|
|
566
|
+
files = [File.model_validate(file) for file in files] if files else None
|
|
567
|
+
|
|
558
568
|
tools = data.pop("tools", [])
|
|
559
569
|
tools = [ToolExecution.from_dict(tool) for tool in tools] if tools else None
|
|
560
570
|
|
|
@@ -584,6 +594,7 @@ class TeamRunOutput:
|
|
|
584
594
|
images=images,
|
|
585
595
|
videos=videos,
|
|
586
596
|
audio=audio,
|
|
597
|
+
files=files,
|
|
587
598
|
response_audio=response_audio,
|
|
588
599
|
input=input_obj,
|
|
589
600
|
citations=citations,
|
|
@@ -618,3 +629,7 @@ class TeamRunOutput:
|
|
|
618
629
|
if self.audio is None:
|
|
619
630
|
self.audio = []
|
|
620
631
|
self.audio.extend(run_response.audio)
|
|
632
|
+
if run_response.files is not None:
|
|
633
|
+
if self.files is None:
|
|
634
|
+
self.files = []
|
|
635
|
+
self.files.extend(run_response.files)
|
agno/run/workflow.py
CHANGED
|
@@ -462,6 +462,7 @@ def workflow_run_output_event_from_dict(data: dict) -> BaseWorkflowRunOutputEven
|
|
|
462
462
|
class WorkflowRunOutput:
|
|
463
463
|
"""Response returned by Workflow.run() functions - kept for backwards compatibility"""
|
|
464
464
|
|
|
465
|
+
input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None
|
|
465
466
|
content: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, Any]] = None
|
|
466
467
|
content_type: str = "str"
|
|
467
468
|
|
|
@@ -553,6 +554,12 @@ class WorkflowRunOutput:
|
|
|
553
554
|
if self.metrics is not None:
|
|
554
555
|
_dict["metrics"] = self.metrics.to_dict()
|
|
555
556
|
|
|
557
|
+
if self.input is not None:
|
|
558
|
+
if isinstance(self.input, BaseModel):
|
|
559
|
+
_dict["input"] = self.input.model_dump(exclude_none=True)
|
|
560
|
+
else:
|
|
561
|
+
_dict["input"] = self.input
|
|
562
|
+
|
|
556
563
|
if self.content and isinstance(self.content, BaseModel):
|
|
557
564
|
_dict["content"] = self.content.model_dump(exclude_none=True)
|
|
558
565
|
|
|
@@ -624,6 +631,8 @@ class WorkflowRunOutput:
|
|
|
624
631
|
final_events.append(event)
|
|
625
632
|
events = final_events
|
|
626
633
|
|
|
634
|
+
input_data = data.pop("input", None)
|
|
635
|
+
|
|
627
636
|
return cls(
|
|
628
637
|
step_results=parsed_step_results,
|
|
629
638
|
metadata=metadata,
|
|
@@ -634,6 +643,7 @@ class WorkflowRunOutput:
|
|
|
634
643
|
events=events,
|
|
635
644
|
metrics=workflow_metrics,
|
|
636
645
|
step_executor_runs=step_executor_runs,
|
|
646
|
+
input=input_data,
|
|
637
647
|
**data,
|
|
638
648
|
)
|
|
639
649
|
|
agno/team/team.py
CHANGED
|
@@ -104,7 +104,7 @@ from agno.utils.response import (
|
|
|
104
104
|
generator_wrapper,
|
|
105
105
|
)
|
|
106
106
|
from agno.utils.safe_formatter import SafeFormatter
|
|
107
|
-
from agno.utils.string import parse_response_model_str
|
|
107
|
+
from agno.utils.string import generate_id_from_name, parse_response_model_str
|
|
108
108
|
from agno.utils.team import format_member_agent_task, get_member_id
|
|
109
109
|
from agno.utils.timer import Timer
|
|
110
110
|
|
|
@@ -580,10 +580,7 @@ class Team:
|
|
|
580
580
|
If the name is not provided, generate a random UUID.
|
|
581
581
|
"""
|
|
582
582
|
if self.id is None:
|
|
583
|
-
|
|
584
|
-
self.id = self.name.lower().replace(" ", "-")
|
|
585
|
-
else:
|
|
586
|
-
self.id = str(uuid4())
|
|
583
|
+
self.id = generate_id_from_name(self.name)
|
|
587
584
|
|
|
588
585
|
def _set_debug(self, debug_mode: Optional[bool] = None) -> None:
|
|
589
586
|
if self.debug_mode or debug_mode or getenv("AGNO_DEBUG", "false").lower() == "true":
|
|
@@ -659,8 +656,6 @@ class Team:
|
|
|
659
656
|
member.set_id()
|
|
660
657
|
|
|
661
658
|
elif isinstance(member, Team):
|
|
662
|
-
if member.id is None:
|
|
663
|
-
member.id = str(uuid4())
|
|
664
659
|
member.parent_team_id = self.id
|
|
665
660
|
for sub_member in member.members:
|
|
666
661
|
self._initialize_member(sub_member, debug_mode=debug_mode)
|
|
@@ -853,7 +848,15 @@ class Team:
|
|
|
853
848
|
else:
|
|
854
849
|
self._scrub_media_from_run_output(run_response)
|
|
855
850
|
|
|
856
|
-
|
|
851
|
+
run_response.status = RunStatus.completed
|
|
852
|
+
|
|
853
|
+
# Parse team response model
|
|
854
|
+
self._convert_response_to_structured_format(run_response=run_response)
|
|
855
|
+
|
|
856
|
+
# 3. Add the RunOutput to Team Session
|
|
857
|
+
session.upsert_run(run_response=run_response)
|
|
858
|
+
|
|
859
|
+
# 4. Update Team Memory
|
|
857
860
|
response_iterator = self._make_memories_and_summaries(
|
|
858
861
|
run_response=run_response,
|
|
859
862
|
run_messages=run_messages,
|
|
@@ -862,14 +865,6 @@ class Team:
|
|
|
862
865
|
)
|
|
863
866
|
deque(response_iterator, maxlen=0)
|
|
864
867
|
|
|
865
|
-
run_response.status = RunStatus.completed
|
|
866
|
-
|
|
867
|
-
# Parse team response model
|
|
868
|
-
self._convert_response_to_structured_format(run_response=run_response)
|
|
869
|
-
|
|
870
|
-
# 4. Add the RunOutput to Team Session
|
|
871
|
-
session.upsert_run(run_response=run_response)
|
|
872
|
-
|
|
873
868
|
# 5. Calculate session metrics
|
|
874
869
|
self._update_session_metrics(session=session)
|
|
875
870
|
|
|
@@ -978,7 +973,12 @@ class Team:
|
|
|
978
973
|
session=session, run_response=run_response, stream_intermediate_steps=stream_intermediate_steps
|
|
979
974
|
)
|
|
980
975
|
|
|
981
|
-
|
|
976
|
+
run_response.status = RunStatus.completed
|
|
977
|
+
|
|
978
|
+
# 3. Add the run to Team Session
|
|
979
|
+
session.upsert_run(run_response=run_response)
|
|
980
|
+
|
|
981
|
+
# 4. Update Team Memory
|
|
982
982
|
yield from self._make_memories_and_summaries(
|
|
983
983
|
run_response=run_response,
|
|
984
984
|
run_messages=run_messages,
|
|
@@ -986,11 +986,6 @@ class Team:
|
|
|
986
986
|
user_id=user_id,
|
|
987
987
|
)
|
|
988
988
|
|
|
989
|
-
run_response.status = RunStatus.completed
|
|
990
|
-
|
|
991
|
-
# 4. Add the run to memory
|
|
992
|
-
session.upsert_run(run_response=run_response)
|
|
993
|
-
|
|
994
989
|
# 5. Calculate session metrics
|
|
995
990
|
self._update_session_metrics(session=session)
|
|
996
991
|
|
|
@@ -1126,13 +1121,17 @@ class Team:
|
|
|
1126
1121
|
# Initialize Team
|
|
1127
1122
|
self.initialize_team(debug_mode=debug_mode)
|
|
1128
1123
|
|
|
1129
|
-
image_artifacts, video_artifacts, audio_artifacts = self._validate_media_object_id(
|
|
1130
|
-
images=images, videos=videos, audios=audio
|
|
1124
|
+
image_artifacts, video_artifacts, audio_artifacts, file_artifacts = self._validate_media_object_id(
|
|
1125
|
+
images=images, videos=videos, audios=audio, files=files
|
|
1131
1126
|
)
|
|
1132
1127
|
|
|
1133
1128
|
# Create RunInput to capture the original user input
|
|
1134
1129
|
run_input = TeamRunInput(
|
|
1135
|
-
input_content=input,
|
|
1130
|
+
input_content=input,
|
|
1131
|
+
images=image_artifacts,
|
|
1132
|
+
videos=video_artifacts,
|
|
1133
|
+
audios=audio_artifacts,
|
|
1134
|
+
files=file_artifacts,
|
|
1136
1135
|
)
|
|
1137
1136
|
|
|
1138
1137
|
# Read existing session from database
|
|
@@ -1435,7 +1434,15 @@ class Team:
|
|
|
1435
1434
|
else:
|
|
1436
1435
|
self._scrub_media_from_run_output(run_response)
|
|
1437
1436
|
|
|
1438
|
-
|
|
1437
|
+
run_response.status = RunStatus.completed
|
|
1438
|
+
|
|
1439
|
+
# Parse team response model
|
|
1440
|
+
self._convert_response_to_structured_format(run_response=run_response)
|
|
1441
|
+
|
|
1442
|
+
# 5. Add the run to memory
|
|
1443
|
+
session.upsert_run(run_response=run_response)
|
|
1444
|
+
|
|
1445
|
+
# 6. Update Team Memory
|
|
1439
1446
|
async for _ in self._amake_memories_and_summaries(
|
|
1440
1447
|
run_response=run_response,
|
|
1441
1448
|
session=session,
|
|
@@ -1444,14 +1451,6 @@ class Team:
|
|
|
1444
1451
|
):
|
|
1445
1452
|
pass
|
|
1446
1453
|
|
|
1447
|
-
run_response.status = RunStatus.completed
|
|
1448
|
-
|
|
1449
|
-
# Parse team response model
|
|
1450
|
-
self._convert_response_to_structured_format(run_response=run_response)
|
|
1451
|
-
|
|
1452
|
-
# 6. Add the run to memory
|
|
1453
|
-
session.upsert_run(run_response=run_response)
|
|
1454
|
-
|
|
1455
1454
|
# 7. Calculate session metrics
|
|
1456
1455
|
self._update_session_metrics(session=session)
|
|
1457
1456
|
|
|
@@ -1599,6 +1598,11 @@ class Team:
|
|
|
1599
1598
|
):
|
|
1600
1599
|
yield event
|
|
1601
1600
|
|
|
1601
|
+
run_response.status = RunStatus.completed
|
|
1602
|
+
|
|
1603
|
+
# 5. Add the run to Team Session
|
|
1604
|
+
session.upsert_run(run_response=run_response)
|
|
1605
|
+
|
|
1602
1606
|
# 6. Update Team Memory
|
|
1603
1607
|
async for event in self._amake_memories_and_summaries(
|
|
1604
1608
|
run_response=run_response,
|
|
@@ -1608,19 +1612,14 @@ class Team:
|
|
|
1608
1612
|
):
|
|
1609
1613
|
yield event
|
|
1610
1614
|
|
|
1611
|
-
|
|
1612
|
-
|
|
1613
|
-
# 7. Add the run to memory
|
|
1614
|
-
session.upsert_run(run_response=run_response)
|
|
1615
|
-
|
|
1616
|
-
# 8. Calculate session metrics
|
|
1615
|
+
# 7. Calculate session metrics
|
|
1617
1616
|
self._update_session_metrics(session=session)
|
|
1618
1617
|
|
|
1619
1618
|
completed_event = self._handle_event(
|
|
1620
1619
|
create_team_run_completed_event(from_run_response=run_response), run_response, workflow_context
|
|
1621
1620
|
)
|
|
1622
1621
|
|
|
1623
|
-
#
|
|
1622
|
+
# 8. Save session to storage
|
|
1624
1623
|
self.save_session(session=session)
|
|
1625
1624
|
|
|
1626
1625
|
if stream_intermediate_steps:
|
|
@@ -1744,13 +1743,17 @@ class Team:
|
|
|
1744
1743
|
# Initialize Team
|
|
1745
1744
|
self.initialize_team(debug_mode=debug_mode)
|
|
1746
1745
|
|
|
1747
|
-
image_artifacts, video_artifacts, audio_artifacts = self._validate_media_object_id(
|
|
1748
|
-
images=images, videos=videos, audios=audio
|
|
1746
|
+
image_artifacts, video_artifacts, audio_artifacts, file_artifacts = self._validate_media_object_id(
|
|
1747
|
+
images=images, videos=videos, audios=audio, files=files
|
|
1749
1748
|
)
|
|
1750
1749
|
|
|
1751
1750
|
# Create RunInput to capture the original user input
|
|
1752
1751
|
run_input = TeamRunInput(
|
|
1753
|
-
input_content=input,
|
|
1752
|
+
input_content=input,
|
|
1753
|
+
images=image_artifacts,
|
|
1754
|
+
videos=video_artifacts,
|
|
1755
|
+
audios=audio_artifacts,
|
|
1756
|
+
files=file_artifacts,
|
|
1754
1757
|
)
|
|
1755
1758
|
|
|
1756
1759
|
team_session = self._read_or_create_session(session_id=session_id, user_id=user_id)
|
|
@@ -1968,6 +1971,10 @@ class Team:
|
|
|
1968
1971
|
for audio in model_response.audios:
|
|
1969
1972
|
self._add_audio(audio, run_response) # Generated audio go to run_response.audio
|
|
1970
1973
|
|
|
1974
|
+
if model_response.files is not None:
|
|
1975
|
+
for file in model_response.files:
|
|
1976
|
+
self._add_file(file, run_response) # Generated files go to run_response.files
|
|
1977
|
+
|
|
1971
1978
|
def _update_run_response(
|
|
1972
1979
|
self, model_response: ModelResponse, run_response: TeamRunOutput, run_messages: RunMessages
|
|
1973
1980
|
):
|
|
@@ -3123,6 +3130,7 @@ class Team:
|
|
|
3123
3130
|
images: Optional[Sequence[Image]] = None,
|
|
3124
3131
|
videos: Optional[Sequence[Video]] = None,
|
|
3125
3132
|
audios: Optional[Sequence[Audio]] = None,
|
|
3133
|
+
files: Optional[Sequence[File]] = None,
|
|
3126
3134
|
) -> tuple:
|
|
3127
3135
|
image_list = None
|
|
3128
3136
|
if images:
|
|
@@ -3154,7 +3162,17 @@ class Team:
|
|
|
3154
3162
|
aud.id = str(uuid4())
|
|
3155
3163
|
audio_list.append(aud)
|
|
3156
3164
|
|
|
3157
|
-
|
|
3165
|
+
file_list = None
|
|
3166
|
+
if files:
|
|
3167
|
+
file_list = []
|
|
3168
|
+
for file in files:
|
|
3169
|
+
if not file.id:
|
|
3170
|
+
from uuid import uuid4
|
|
3171
|
+
|
|
3172
|
+
file.id = str(uuid4())
|
|
3173
|
+
file_list.append(file)
|
|
3174
|
+
|
|
3175
|
+
return image_list, video_list, audio_list, file_list
|
|
3158
3176
|
|
|
3159
3177
|
def cli_app(
|
|
3160
3178
|
self,
|
|
@@ -6264,6 +6282,13 @@ class Team:
|
|
|
6264
6282
|
run_response.audio = []
|
|
6265
6283
|
run_response.audio.append(audio)
|
|
6266
6284
|
|
|
6285
|
+
def _add_file(self, file: File, run_response: TeamRunOutput) -> None:
|
|
6286
|
+
"""Add file to both the agent's stateful storage and the current run response"""
|
|
6287
|
+
# Add to run response
|
|
6288
|
+
if run_response.files is None:
|
|
6289
|
+
run_response.files = []
|
|
6290
|
+
run_response.files.append(file)
|
|
6291
|
+
|
|
6267
6292
|
def _update_reasoning_content_from_tool_call(
|
|
6268
6293
|
self, run_response: TeamRunOutput, tool_name: str, tool_args: Dict[str, Any]
|
|
6269
6294
|
) -> Optional[ReasoningStep]:
|