agno 2.2.9__py3-none-any.whl → 2.2.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +27 -5
- agno/db/dynamo/utils.py +1 -1
- agno/db/firestore/utils.py +1 -1
- agno/db/gcs_json/utils.py +1 -1
- agno/db/in_memory/utils.py +1 -1
- agno/db/json/utils.py +1 -1
- agno/db/mongo/utils.py +3 -3
- agno/db/mysql/utils.py +1 -1
- agno/db/postgres/utils.py +1 -1
- agno/db/redis/utils.py +1 -1
- agno/db/singlestore/utils.py +1 -1
- agno/db/sqlite/utils.py +1 -1
- agno/knowledge/chunking/agentic.py +8 -9
- agno/knowledge/chunking/strategy.py +59 -15
- agno/knowledge/embedder/sentence_transformer.py +6 -2
- agno/knowledge/reader/base.py +6 -2
- agno/knowledge/utils.py +20 -0
- agno/models/anthropic/claude.py +45 -9
- agno/models/base.py +4 -0
- agno/os/app.py +35 -19
- agno/os/routers/health.py +5 -3
- agno/os/routers/knowledge/knowledge.py +43 -17
- agno/os/routers/knowledge/schemas.py +4 -3
- agno/run/agent.py +11 -1
- agno/team/team.py +20 -3
- agno/tools/file_generation.py +4 -4
- agno/tools/gmail.py +179 -0
- agno/tools/parallel.py +314 -0
- agno/utils/models/claude.py +2 -1
- agno/workflow/agent.py +2 -2
- agno/workflow/condition.py +26 -4
- agno/workflow/loop.py +9 -0
- agno/workflow/parallel.py +39 -16
- agno/workflow/router.py +25 -4
- agno/workflow/step.py +163 -91
- agno/workflow/steps.py +9 -0
- agno/workflow/types.py +20 -1
- agno/workflow/workflow.py +117 -30
- {agno-2.2.9.dist-info → agno-2.2.11.dist-info}/METADATA +4 -1
- {agno-2.2.9.dist-info → agno-2.2.11.dist-info}/RECORD +43 -42
- {agno-2.2.9.dist-info → agno-2.2.11.dist-info}/WHEEL +0 -0
- {agno-2.2.9.dist-info → agno-2.2.11.dist-info}/licenses/LICENSE +0 -0
- {agno-2.2.9.dist-info → agno-2.2.11.dist-info}/top_level.txt +0 -0
agno/models/base.py
CHANGED
|
@@ -692,6 +692,8 @@ class Model(ABC):
|
|
|
692
692
|
if model_response.extra is None:
|
|
693
693
|
model_response.extra = {}
|
|
694
694
|
model_response.extra.update(provider_response.extra)
|
|
695
|
+
if provider_response.provider_data is not None:
|
|
696
|
+
model_response.provider_data = provider_response.provider_data
|
|
695
697
|
|
|
696
698
|
async def _aprocess_model_response(
|
|
697
699
|
self,
|
|
@@ -745,6 +747,8 @@ class Model(ABC):
|
|
|
745
747
|
if model_response.extra is None:
|
|
746
748
|
model_response.extra = {}
|
|
747
749
|
model_response.extra.update(provider_response.extra)
|
|
750
|
+
if provider_response.provider_data is not None:
|
|
751
|
+
model_response.provider_data = provider_response.provider_data
|
|
748
752
|
|
|
749
753
|
def _populate_assistant_message(
|
|
750
754
|
self,
|
agno/os/app.py
CHANGED
|
@@ -232,34 +232,53 @@ class AgentOS:
|
|
|
232
232
|
self._initialize_workflows()
|
|
233
233
|
self._auto_discover_databases()
|
|
234
234
|
self._auto_discover_knowledge_instances()
|
|
235
|
+
|
|
236
|
+
if self.enable_mcp_server:
|
|
237
|
+
from agno.os.mcp import get_mcp_server
|
|
238
|
+
|
|
239
|
+
self._mcp_app = get_mcp_server(self)
|
|
240
|
+
|
|
235
241
|
self._reprovision_routers(app=app)
|
|
236
242
|
|
|
237
243
|
def _reprovision_routers(self, app: FastAPI) -> None:
|
|
238
244
|
"""Re-provision all routes for the AgentOS."""
|
|
239
245
|
updated_routers = [
|
|
240
246
|
get_session_router(dbs=self.dbs),
|
|
241
|
-
get_memory_router(dbs=self.dbs),
|
|
242
|
-
get_eval_router(dbs=self.dbs, agents=self.agents, teams=self.teams),
|
|
243
247
|
get_metrics_router(dbs=self.dbs),
|
|
244
248
|
get_knowledge_router(knowledge_instances=self.knowledge_instances),
|
|
249
|
+
get_memory_router(dbs=self.dbs),
|
|
250
|
+
get_eval_router(dbs=self.dbs, agents=self.agents, teams=self.teams),
|
|
245
251
|
]
|
|
246
252
|
|
|
247
253
|
# Clear all previously existing routes
|
|
248
|
-
app.router.routes = [
|
|
254
|
+
app.router.routes = [
|
|
255
|
+
route
|
|
256
|
+
for route in app.router.routes
|
|
257
|
+
if hasattr(route, "path")
|
|
258
|
+
and route.path in ["/docs", "/redoc", "/openapi.json", "/docs/oauth2-redirect"]
|
|
259
|
+
or route.path.startswith("/mcp") # type: ignore
|
|
260
|
+
]
|
|
261
|
+
|
|
262
|
+
# Add the built-in routes
|
|
263
|
+
self._add_built_in_routes(app=app)
|
|
249
264
|
|
|
250
265
|
# Add the updated routes
|
|
251
266
|
for router in updated_routers:
|
|
252
267
|
self._add_router(app, router)
|
|
253
268
|
|
|
254
|
-
#
|
|
255
|
-
self.
|
|
269
|
+
# Mount MCP if needed
|
|
270
|
+
if self.enable_mcp_server and self._mcp_app:
|
|
271
|
+
app.mount("/", self._mcp_app)
|
|
256
272
|
|
|
257
273
|
def _add_built_in_routes(self, app: FastAPI) -> None:
|
|
258
274
|
"""Add all AgentOSbuilt-in routes to the given app."""
|
|
275
|
+
# Add the home router if MCP server is not enabled
|
|
276
|
+
if not self.enable_mcp_server:
|
|
277
|
+
self._add_router(app, get_home_router(self))
|
|
278
|
+
|
|
279
|
+
self._add_router(app, get_health_router(health_endpoint="/health"))
|
|
259
280
|
self._add_router(app, get_base_router(self, settings=self.settings))
|
|
260
281
|
self._add_router(app, get_websocket_router(self, settings=self.settings))
|
|
261
|
-
self._add_router(app, get_health_router())
|
|
262
|
-
self._add_router(app, get_home_router(self))
|
|
263
282
|
|
|
264
283
|
# Add A2A interface if relevant
|
|
265
284
|
has_a2a_interface = False
|
|
@@ -275,10 +294,6 @@ class AgentOS:
|
|
|
275
294
|
self.interfaces.append(a2a_interface)
|
|
276
295
|
self._add_router(app, a2a_interface.get_router())
|
|
277
296
|
|
|
278
|
-
# Add the home router if MCP server is not enabled
|
|
279
|
-
if not self.enable_mcp_server:
|
|
280
|
-
self._add_router(app, get_home_router(self))
|
|
281
|
-
|
|
282
297
|
def _make_app(self, lifespan: Optional[Any] = None) -> FastAPI:
|
|
283
298
|
# Adjust the FastAPI app lifespan to handle MCP connections if relevant
|
|
284
299
|
app_lifespan = lifespan
|
|
@@ -378,20 +393,24 @@ class AgentOS:
|
|
|
378
393
|
# Collect all lifespans that need to be combined
|
|
379
394
|
lifespans = []
|
|
380
395
|
|
|
396
|
+
# The user provided lifespan
|
|
397
|
+
if self.lifespan:
|
|
398
|
+
# Wrap the user lifespan with agent_os parameter
|
|
399
|
+
wrapped_lifespan = self._add_agent_os_to_lifespan_function(self.lifespan)
|
|
400
|
+
lifespans.append(wrapped_lifespan)
|
|
401
|
+
|
|
402
|
+
# The provided app's existing lifespan
|
|
381
403
|
if fastapi_app.router.lifespan_context:
|
|
382
404
|
lifespans.append(fastapi_app.router.lifespan_context)
|
|
383
405
|
|
|
406
|
+
# The MCP tools lifespan
|
|
384
407
|
if self.mcp_tools:
|
|
385
408
|
lifespans.append(partial(mcp_lifespan, mcp_tools=self.mcp_tools))
|
|
386
409
|
|
|
410
|
+
# The /mcp server lifespan
|
|
387
411
|
if self.enable_mcp_server and self._mcp_app:
|
|
388
412
|
lifespans.append(self._mcp_app.lifespan)
|
|
389
413
|
|
|
390
|
-
if self.lifespan:
|
|
391
|
-
# Wrap the user lifespan with agent_os parameter
|
|
392
|
-
wrapped_lifespan = self._add_agent_os_to_lifespan_function(self.lifespan)
|
|
393
|
-
lifespans.append(wrapped_lifespan)
|
|
394
|
-
|
|
395
414
|
# Combine lifespans and set them in the app
|
|
396
415
|
if lifespans:
|
|
397
416
|
fastapi_app.router.lifespan_context = _combine_app_lifespans(lifespans)
|
|
@@ -447,9 +466,6 @@ class AgentOS:
|
|
|
447
466
|
# Mount MCP if needed
|
|
448
467
|
if self.enable_mcp_server and self._mcp_app:
|
|
449
468
|
fastapi_app.mount("/", self._mcp_app)
|
|
450
|
-
else:
|
|
451
|
-
# Add the home router
|
|
452
|
-
self._add_router(fastapi_app, get_home_router(self))
|
|
453
469
|
|
|
454
470
|
if not self._app_set:
|
|
455
471
|
|
agno/os/routers/health.py
CHANGED
|
@@ -5,13 +5,13 @@ from fastapi import APIRouter
|
|
|
5
5
|
from agno.os.schema import HealthResponse
|
|
6
6
|
|
|
7
7
|
|
|
8
|
-
def get_health_router() -> APIRouter:
|
|
8
|
+
def get_health_router(health_endpoint: str = "/health") -> APIRouter:
|
|
9
9
|
router = APIRouter(tags=["Health"])
|
|
10
10
|
|
|
11
11
|
started_time_stamp = datetime.now(timezone.utc).timestamp()
|
|
12
12
|
|
|
13
13
|
@router.get(
|
|
14
|
-
|
|
14
|
+
health_endpoint,
|
|
15
15
|
operation_id="health_check",
|
|
16
16
|
summary="Health Check",
|
|
17
17
|
description="Check the health status of the AgentOS API. Returns a simple status indicator.",
|
|
@@ -19,7 +19,9 @@ def get_health_router() -> APIRouter:
|
|
|
19
19
|
responses={
|
|
20
20
|
200: {
|
|
21
21
|
"description": "API is healthy and operational",
|
|
22
|
-
"content": {
|
|
22
|
+
"content": {
|
|
23
|
+
"application/json": {"example": {"status": "ok", "instantiated_at": str(started_time_stamp)}}
|
|
24
|
+
},
|
|
23
25
|
}
|
|
24
26
|
},
|
|
25
27
|
)
|
|
@@ -102,6 +102,8 @@ def attach_routes(router: APIRouter, knowledge_instances: List[Knowledge]) -> AP
|
|
|
102
102
|
text_content: Optional[str] = Form(None, description="Raw text content to process"),
|
|
103
103
|
reader_id: Optional[str] = Form(None, description="ID of the reader to use for content processing"),
|
|
104
104
|
chunker: Optional[str] = Form(None, description="Chunking strategy to apply during processing"),
|
|
105
|
+
chunk_size: Optional[int] = Form(None, description="Chunk size to use for processing"),
|
|
106
|
+
chunk_overlap: Optional[int] = Form(None, description="Chunk overlap to use for processing"),
|
|
105
107
|
db_id: Optional[str] = Query(default=None, description="Database ID to use for content storage"),
|
|
106
108
|
):
|
|
107
109
|
knowledge = get_knowledge_instance_by_db_id(knowledge_instances, db_id)
|
|
@@ -172,7 +174,7 @@ def attach_routes(router: APIRouter, knowledge_instances: List[Knowledge]) -> AP
|
|
|
172
174
|
content.content_hash = content_hash
|
|
173
175
|
content.id = generate_id(content_hash)
|
|
174
176
|
|
|
175
|
-
background_tasks.add_task(process_content, knowledge, content, reader_id, chunker)
|
|
177
|
+
background_tasks.add_task(process_content, knowledge, content, reader_id, chunker, chunk_size, chunk_overlap)
|
|
176
178
|
|
|
177
179
|
response = ContentResponseSchema(
|
|
178
180
|
id=content.id,
|
|
@@ -801,36 +803,55 @@ def attach_routes(router: APIRouter, knowledge_instances: List[Knowledge]) -> AP
|
|
|
801
803
|
"key": "AgenticChunker",
|
|
802
804
|
"name": "AgenticChunker",
|
|
803
805
|
"description": "Chunking strategy that uses an LLM to determine natural breakpoints in the text",
|
|
806
|
+
"metadata": {"chunk_size": 5000},
|
|
804
807
|
},
|
|
805
808
|
"DocumentChunker": {
|
|
806
809
|
"key": "DocumentChunker",
|
|
807
810
|
"name": "DocumentChunker",
|
|
808
811
|
"description": "A chunking strategy that splits text based on document structure like paragraphs and sections",
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
"description": "Chunking strategy that recursively splits text into chunks by finding natural break points",
|
|
814
|
-
},
|
|
815
|
-
"SemanticChunker": {
|
|
816
|
-
"key": "SemanticChunker",
|
|
817
|
-
"name": "SemanticChunker",
|
|
818
|
-
"description": "Chunking strategy that splits text into semantic chunks using chonkie",
|
|
812
|
+
"metadata": {
|
|
813
|
+
"chunk_size": 5000,
|
|
814
|
+
"chunk_overlap": 0,
|
|
815
|
+
},
|
|
819
816
|
},
|
|
820
817
|
"FixedSizeChunker": {
|
|
821
818
|
"key": "FixedSizeChunker",
|
|
822
819
|
"name": "FixedSizeChunker",
|
|
823
820
|
"description": "Chunking strategy that splits text into fixed-size chunks with optional overlap",
|
|
821
|
+
"metadata": {
|
|
822
|
+
"chunk_size": 5000,
|
|
823
|
+
"chunk_overlap": 0,
|
|
824
|
+
},
|
|
825
|
+
},
|
|
826
|
+
"MarkdownChunker": {
|
|
827
|
+
"key": "MarkdownChunker",
|
|
828
|
+
"name": "MarkdownChunker",
|
|
829
|
+
"description": "A chunking strategy that splits markdown based on structure like headers, paragraphs and sections",
|
|
830
|
+
"metadata": {
|
|
831
|
+
"chunk_size": 5000,
|
|
832
|
+
"chunk_overlap": 0,
|
|
833
|
+
},
|
|
834
|
+
},
|
|
835
|
+
"RecursiveChunker": {
|
|
836
|
+
"key": "RecursiveChunker",
|
|
837
|
+
"name": "RecursiveChunker",
|
|
838
|
+
"description": "Chunking strategy that recursively splits text into chunks by finding natural break points",
|
|
839
|
+
"metadata": {
|
|
840
|
+
"chunk_size": 5000,
|
|
841
|
+
"chunk_overlap": 0,
|
|
842
|
+
},
|
|
824
843
|
},
|
|
825
844
|
"RowChunker": {
|
|
826
845
|
"key": "RowChunker",
|
|
827
846
|
"name": "RowChunker",
|
|
828
847
|
"description": "RowChunking chunking strategy",
|
|
848
|
+
"metadata": {},
|
|
829
849
|
},
|
|
830
|
-
"
|
|
831
|
-
"key": "
|
|
832
|
-
"name": "
|
|
833
|
-
"description": "
|
|
850
|
+
"SemanticChunker": {
|
|
851
|
+
"key": "SemanticChunker",
|
|
852
|
+
"name": "SemanticChunker",
|
|
853
|
+
"description": "Chunking strategy that splits text into semantic chunks using chonkie",
|
|
854
|
+
"metadata": {"chunk_size": 5000},
|
|
834
855
|
},
|
|
835
856
|
},
|
|
836
857
|
"vector_dbs": [
|
|
@@ -896,7 +917,10 @@ def attach_routes(router: APIRouter, knowledge_instances: List[Knowledge]) -> AP
|
|
|
896
917
|
chunker_key = chunker_info.get("key")
|
|
897
918
|
if chunker_key:
|
|
898
919
|
chunkers_dict[chunker_key] = ChunkerSchema(
|
|
899
|
-
key=chunker_key,
|
|
920
|
+
key=chunker_key,
|
|
921
|
+
name=chunker_info.get("name"),
|
|
922
|
+
description=chunker_info.get("description"),
|
|
923
|
+
metadata=chunker_info.get("metadata", {}),
|
|
900
924
|
)
|
|
901
925
|
|
|
902
926
|
vector_dbs = []
|
|
@@ -929,6 +953,8 @@ async def process_content(
|
|
|
929
953
|
content: Content,
|
|
930
954
|
reader_id: Optional[str] = None,
|
|
931
955
|
chunker: Optional[str] = None,
|
|
956
|
+
chunk_size: Optional[int] = None,
|
|
957
|
+
chunk_overlap: Optional[int] = None,
|
|
932
958
|
):
|
|
933
959
|
"""Background task to process the content"""
|
|
934
960
|
|
|
@@ -951,7 +977,7 @@ async def process_content(
|
|
|
951
977
|
content.reader = reader
|
|
952
978
|
if chunker and content.reader:
|
|
953
979
|
# Set the chunker name on the reader - let the reader handle it internally
|
|
954
|
-
content.reader.set_chunking_strategy_from_string(chunker)
|
|
980
|
+
content.reader.set_chunking_strategy_from_string(chunker, chunk_size=chunk_size, overlap=chunk_overlap)
|
|
955
981
|
log_debug(f"Set chunking strategy: {chunker}")
|
|
956
982
|
|
|
957
983
|
log_debug(f"Using reader: {content.reader.__class__.__name__}")
|
|
@@ -106,9 +106,10 @@ class ReaderSchema(BaseModel):
|
|
|
106
106
|
|
|
107
107
|
|
|
108
108
|
class ChunkerSchema(BaseModel):
|
|
109
|
-
key: str
|
|
110
|
-
name: Optional[str] =
|
|
111
|
-
description: Optional[str] =
|
|
109
|
+
key: str
|
|
110
|
+
name: Optional[str] = None
|
|
111
|
+
description: Optional[str] = None
|
|
112
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
112
113
|
|
|
113
114
|
|
|
114
115
|
class VectorDbSchema(BaseModel):
|
agno/run/agent.py
CHANGED
|
@@ -696,7 +696,17 @@ class RunOutput:
|
|
|
696
696
|
data = data.pop("run")
|
|
697
697
|
|
|
698
698
|
events = data.pop("events", None)
|
|
699
|
-
|
|
699
|
+
final_events = []
|
|
700
|
+
for event in events or []:
|
|
701
|
+
if "agent_id" in event:
|
|
702
|
+
event = run_output_event_from_dict(event)
|
|
703
|
+
else:
|
|
704
|
+
# Use the factory from response.py for agent events
|
|
705
|
+
from agno.run.team import team_run_output_event_from_dict
|
|
706
|
+
|
|
707
|
+
event = team_run_output_event_from_dict(event)
|
|
708
|
+
final_events.append(event)
|
|
709
|
+
events = final_events
|
|
700
710
|
|
|
701
711
|
messages = data.pop("messages", None)
|
|
702
712
|
messages = [Message.from_dict(message) for message in messages] if messages else None
|
agno/team/team.py
CHANGED
|
@@ -1762,6 +1762,7 @@ class Team:
|
|
|
1762
1762
|
stream_intermediate_steps: Optional[bool] = None,
|
|
1763
1763
|
session_id: Optional[str] = None,
|
|
1764
1764
|
session_state: Optional[Dict[str, Any]] = None,
|
|
1765
|
+
run_context: Optional[RunContext] = None,
|
|
1765
1766
|
user_id: Optional[str] = None,
|
|
1766
1767
|
retries: Optional[int] = None,
|
|
1767
1768
|
audio: Optional[Sequence[Audio]] = None,
|
|
@@ -1789,6 +1790,7 @@ class Team:
|
|
|
1789
1790
|
stream_intermediate_steps: Optional[bool] = None,
|
|
1790
1791
|
session_id: Optional[str] = None,
|
|
1791
1792
|
session_state: Optional[Dict[str, Any]] = None,
|
|
1793
|
+
run_context: Optional[RunContext] = None,
|
|
1792
1794
|
user_id: Optional[str] = None,
|
|
1793
1795
|
retries: Optional[int] = None,
|
|
1794
1796
|
audio: Optional[Sequence[Audio]] = None,
|
|
@@ -1862,7 +1864,7 @@ class Team:
|
|
|
1862
1864
|
dependencies = dependencies if dependencies is not None else self.dependencies
|
|
1863
1865
|
|
|
1864
1866
|
# Initialize run context
|
|
1865
|
-
run_context = RunContext(
|
|
1867
|
+
run_context = run_context or RunContext(
|
|
1866
1868
|
run_id=run_id,
|
|
1867
1869
|
session_id=session_id,
|
|
1868
1870
|
user_id=user_id,
|
|
@@ -2593,6 +2595,7 @@ class Team:
|
|
|
2593
2595
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2594
2596
|
session_id: Optional[str] = None,
|
|
2595
2597
|
session_state: Optional[Dict[str, Any]] = None,
|
|
2598
|
+
run_context: Optional[RunContext] = None,
|
|
2596
2599
|
user_id: Optional[str] = None,
|
|
2597
2600
|
retries: Optional[int] = None,
|
|
2598
2601
|
audio: Optional[Sequence[Audio]] = None,
|
|
@@ -2619,6 +2622,7 @@ class Team:
|
|
|
2619
2622
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2620
2623
|
session_id: Optional[str] = None,
|
|
2621
2624
|
session_state: Optional[Dict[str, Any]] = None,
|
|
2625
|
+
run_context: Optional[RunContext] = None,
|
|
2622
2626
|
user_id: Optional[str] = None,
|
|
2623
2627
|
retries: Optional[int] = None,
|
|
2624
2628
|
audio: Optional[Sequence[Audio]] = None,
|
|
@@ -2646,6 +2650,7 @@ class Team:
|
|
|
2646
2650
|
stream_intermediate_steps: Optional[bool] = None,
|
|
2647
2651
|
session_id: Optional[str] = None,
|
|
2648
2652
|
session_state: Optional[Dict[str, Any]] = None,
|
|
2653
|
+
run_context: Optional[RunContext] = None,
|
|
2649
2654
|
user_id: Optional[str] = None,
|
|
2650
2655
|
retries: Optional[int] = None,
|
|
2651
2656
|
audio: Optional[Sequence[Audio]] = None,
|
|
@@ -2750,7 +2755,7 @@ class Team:
|
|
|
2750
2755
|
effective_filters = self._get_effective_filters(knowledge_filters)
|
|
2751
2756
|
|
|
2752
2757
|
# Initialize run context
|
|
2753
|
-
run_context = RunContext(
|
|
2758
|
+
run_context = run_context or RunContext(
|
|
2754
2759
|
run_id=run_id,
|
|
2755
2760
|
session_id=session_id,
|
|
2756
2761
|
user_id=user_id,
|
|
@@ -4266,7 +4271,7 @@ class Team:
|
|
|
4266
4271
|
"""Calculate session metrics"""
|
|
4267
4272
|
|
|
4268
4273
|
session_messages: List[Message] = []
|
|
4269
|
-
for run in session.runs
|
|
4274
|
+
for run in session.runs or []:
|
|
4270
4275
|
if run.messages is not None:
|
|
4271
4276
|
for m in run.messages:
|
|
4272
4277
|
# Skipping messages from history to avoid duplicates
|
|
@@ -7514,6 +7519,9 @@ class Team:
|
|
|
7514
7519
|
session = self.db.get_session(session_id=session_id, session_type=session_type)
|
|
7515
7520
|
return session # type: ignore
|
|
7516
7521
|
except Exception as e:
|
|
7522
|
+
import traceback
|
|
7523
|
+
|
|
7524
|
+
traceback.print_exc(limit=3)
|
|
7517
7525
|
log_warning(f"Error getting session from db: {e}")
|
|
7518
7526
|
return None
|
|
7519
7527
|
|
|
@@ -7528,6 +7536,9 @@ class Team:
|
|
|
7528
7536
|
session = await self.db.get_session(session_id=session_id, session_type=session_type)
|
|
7529
7537
|
return session # type: ignore
|
|
7530
7538
|
except Exception as e:
|
|
7539
|
+
import traceback
|
|
7540
|
+
|
|
7541
|
+
traceback.print_exc(limit=3)
|
|
7531
7542
|
log_warning(f"Error getting session from db: {e}")
|
|
7532
7543
|
return None
|
|
7533
7544
|
|
|
@@ -7539,6 +7550,9 @@ class Team:
|
|
|
7539
7550
|
raise ValueError("Db not initialized")
|
|
7540
7551
|
return self.db.upsert_session(session=session) # type: ignore
|
|
7541
7552
|
except Exception as e:
|
|
7553
|
+
import traceback
|
|
7554
|
+
|
|
7555
|
+
traceback.print_exc(limit=3)
|
|
7542
7556
|
log_warning(f"Error upserting session into db: {e}")
|
|
7543
7557
|
return None
|
|
7544
7558
|
|
|
@@ -7550,6 +7564,9 @@ class Team:
|
|
|
7550
7564
|
raise ValueError("Db not initialized")
|
|
7551
7565
|
return await self.db.upsert_session(session=session) # type: ignore
|
|
7552
7566
|
except Exception as e:
|
|
7567
|
+
import traceback
|
|
7568
|
+
|
|
7569
|
+
traceback.print_exc(limit=3)
|
|
7553
7570
|
log_warning(f"Error upserting session into db: {e}")
|
|
7554
7571
|
return None
|
|
7555
7572
|
|
agno/tools/file_generation.py
CHANGED
|
@@ -116,7 +116,7 @@ class FileGenerationTools(Toolkit):
|
|
|
116
116
|
file_type="json",
|
|
117
117
|
filename=filename,
|
|
118
118
|
size=len(json_content.encode("utf-8")),
|
|
119
|
-
|
|
119
|
+
filepath=file_path if file_path else None,
|
|
120
120
|
)
|
|
121
121
|
|
|
122
122
|
log_debug("JSON file generated successfully")
|
|
@@ -203,7 +203,7 @@ class FileGenerationTools(Toolkit):
|
|
|
203
203
|
file_type="csv",
|
|
204
204
|
filename=filename,
|
|
205
205
|
size=len(csv_content.encode("utf-8")),
|
|
206
|
-
|
|
206
|
+
filepath=file_path if file_path else None,
|
|
207
207
|
)
|
|
208
208
|
|
|
209
209
|
log_debug("CSV file generated successfully")
|
|
@@ -287,7 +287,7 @@ class FileGenerationTools(Toolkit):
|
|
|
287
287
|
file_type="pdf",
|
|
288
288
|
filename=filename,
|
|
289
289
|
size=len(pdf_content),
|
|
290
|
-
|
|
290
|
+
filepath=file_path if file_path else None,
|
|
291
291
|
)
|
|
292
292
|
|
|
293
293
|
log_debug("PDF file generated successfully")
|
|
@@ -333,7 +333,7 @@ class FileGenerationTools(Toolkit):
|
|
|
333
333
|
file_type="txt",
|
|
334
334
|
filename=filename,
|
|
335
335
|
size=len(content.encode("utf-8")),
|
|
336
|
-
|
|
336
|
+
filepath=file_path if file_path else None,
|
|
337
337
|
)
|
|
338
338
|
|
|
339
339
|
log_debug("Text file generated successfully")
|
agno/tools/gmail.py
CHANGED
|
@@ -141,6 +141,11 @@ class GmailTools(Toolkit):
|
|
|
141
141
|
self.create_draft_email,
|
|
142
142
|
self.send_email,
|
|
143
143
|
self.send_email_reply,
|
|
144
|
+
# Label management
|
|
145
|
+
self.list_custom_labels,
|
|
146
|
+
self.apply_label,
|
|
147
|
+
self.remove_label,
|
|
148
|
+
self.delete_custom_label,
|
|
144
149
|
]
|
|
145
150
|
|
|
146
151
|
super().__init__(name="gmail_tools", tools=tools, **kwargs)
|
|
@@ -161,6 +166,7 @@ class GmailTools(Toolkit):
|
|
|
161
166
|
"get_emails_by_date",
|
|
162
167
|
"get_emails_by_thread",
|
|
163
168
|
"search_emails",
|
|
169
|
+
"list_custom_labels",
|
|
164
170
|
]
|
|
165
171
|
modify_operations = ["mark_email_as_read", "mark_email_as_unread"]
|
|
166
172
|
if any(read_operation in self.functions for read_operation in read_operations):
|
|
@@ -600,6 +606,179 @@ class GmailTools(Toolkit):
|
|
|
600
606
|
except Exception as error:
|
|
601
607
|
return f"Error marking email {message_id} as unread: {type(error).__name__}: {error}"
|
|
602
608
|
|
|
609
|
+
@authenticate
|
|
610
|
+
def list_custom_labels(self) -> str:
|
|
611
|
+
"""
|
|
612
|
+
List only user-created custom labels (filters out system labels) in a numbered format.
|
|
613
|
+
|
|
614
|
+
Returns:
|
|
615
|
+
str: A numbered list of custom labels only
|
|
616
|
+
"""
|
|
617
|
+
try:
|
|
618
|
+
results = self.service.users().labels().list(userId="me").execute() # type: ignore
|
|
619
|
+
labels = results.get("labels", [])
|
|
620
|
+
|
|
621
|
+
# Filter out only user-created labels
|
|
622
|
+
custom_labels = [label["name"] for label in labels if label.get("type") == "user"]
|
|
623
|
+
|
|
624
|
+
if not custom_labels:
|
|
625
|
+
return "No custom labels found.\nCreate labels using apply_label function!"
|
|
626
|
+
|
|
627
|
+
# Create numbered list
|
|
628
|
+
numbered_labels = [f"{i}. {name}" for i, name in enumerate(custom_labels, 1)]
|
|
629
|
+
return f"Your Custom Labels ({len(custom_labels)} total):\n\n" + "\n".join(numbered_labels)
|
|
630
|
+
|
|
631
|
+
except HttpError as e:
|
|
632
|
+
return f"Error fetching labels: {e}"
|
|
633
|
+
except Exception as e:
|
|
634
|
+
return f"Unexpected error: {type(e).__name__}: {e}"
|
|
635
|
+
|
|
636
|
+
@authenticate
|
|
637
|
+
def apply_label(self, context: str, label_name: str, count: int = 10) -> str:
|
|
638
|
+
"""
|
|
639
|
+
Find emails matching a context (search query) and apply a label, creating it if necessary.
|
|
640
|
+
|
|
641
|
+
Args:
|
|
642
|
+
context (str): Gmail search query (e.g., 'is:unread category:promotions')
|
|
643
|
+
label_name (str): Name of the label to apply
|
|
644
|
+
count (int): Maximum number of emails to process
|
|
645
|
+
Returns:
|
|
646
|
+
str: Summary of labeled emails
|
|
647
|
+
"""
|
|
648
|
+
try:
|
|
649
|
+
# Fetch messages matching context
|
|
650
|
+
results = self.service.users().messages().list(userId="me", q=context, maxResults=count).execute() # type: ignore
|
|
651
|
+
|
|
652
|
+
messages = results.get("messages", [])
|
|
653
|
+
if not messages:
|
|
654
|
+
return f"No emails found matching: '{context}'"
|
|
655
|
+
|
|
656
|
+
# Check if label exists, create if not
|
|
657
|
+
labels = self.service.users().labels().list(userId="me").execute().get("labels", []) # type: ignore
|
|
658
|
+
label_id = None
|
|
659
|
+
for label in labels:
|
|
660
|
+
if label["name"].lower() == label_name.lower():
|
|
661
|
+
label_id = label["id"]
|
|
662
|
+
break
|
|
663
|
+
|
|
664
|
+
if not label_id:
|
|
665
|
+
label = (
|
|
666
|
+
self.service.users() # type: ignore
|
|
667
|
+
.labels()
|
|
668
|
+
.create(
|
|
669
|
+
userId="me",
|
|
670
|
+
body={"name": label_name, "labelListVisibility": "labelShow", "messageListVisibility": "show"},
|
|
671
|
+
)
|
|
672
|
+
.execute()
|
|
673
|
+
)
|
|
674
|
+
label_id = label["id"]
|
|
675
|
+
|
|
676
|
+
# Apply label to all matching messages
|
|
677
|
+
for msg in messages:
|
|
678
|
+
self.service.users().messages().modify( # type: ignore
|
|
679
|
+
userId="me", id=msg["id"], body={"addLabelIds": [label_id]}
|
|
680
|
+
).execute() # type: ignore
|
|
681
|
+
|
|
682
|
+
return f"Applied label '{label_name}' to {len(messages)} emails matching '{context}'."
|
|
683
|
+
|
|
684
|
+
except HttpError as e:
|
|
685
|
+
return f"Error applying label '{label_name}': {e}"
|
|
686
|
+
except Exception as e:
|
|
687
|
+
return f"Unexpected error: {type(e).__name__}: {e}"
|
|
688
|
+
|
|
689
|
+
@authenticate
|
|
690
|
+
def remove_label(self, context: str, label_name: str, count: int = 10) -> str:
|
|
691
|
+
"""
|
|
692
|
+
Remove a label from emails matching a context (search query).
|
|
693
|
+
|
|
694
|
+
Args:
|
|
695
|
+
context (str): Gmail search query (e.g., 'is:unread category:promotions')
|
|
696
|
+
label_name (str): Name of the label to remove
|
|
697
|
+
count (int): Maximum number of emails to process
|
|
698
|
+
Returns:
|
|
699
|
+
str: Summary of emails with label removed
|
|
700
|
+
"""
|
|
701
|
+
try:
|
|
702
|
+
# Get all labels to find the target label
|
|
703
|
+
labels = self.service.users().labels().list(userId="me").execute().get("labels", []) # type: ignore
|
|
704
|
+
label_id = None
|
|
705
|
+
|
|
706
|
+
for label in labels:
|
|
707
|
+
if label["name"].lower() == label_name.lower():
|
|
708
|
+
label_id = label["id"]
|
|
709
|
+
break
|
|
710
|
+
|
|
711
|
+
if not label_id:
|
|
712
|
+
return f"Label '{label_name}' not found."
|
|
713
|
+
|
|
714
|
+
# Fetch messages matching context that have this label
|
|
715
|
+
results = (
|
|
716
|
+
self.service.users() # type: ignore
|
|
717
|
+
.messages()
|
|
718
|
+
.list(userId="me", q=f"{context} label:{label_name}", maxResults=count)
|
|
719
|
+
.execute()
|
|
720
|
+
)
|
|
721
|
+
|
|
722
|
+
messages = results.get("messages", [])
|
|
723
|
+
if not messages:
|
|
724
|
+
return f"No emails found matching: '{context}' with label '{label_name}'"
|
|
725
|
+
|
|
726
|
+
# Remove label from all matching messages
|
|
727
|
+
removed_count = 0
|
|
728
|
+
for msg in messages:
|
|
729
|
+
self.service.users().messages().modify( # type: ignore
|
|
730
|
+
userId="me", id=msg["id"], body={"removeLabelIds": [label_id]}
|
|
731
|
+
).execute() # type: ignore
|
|
732
|
+
removed_count += 1
|
|
733
|
+
|
|
734
|
+
return f"Removed label '{label_name}' from {removed_count} emails matching '{context}'."
|
|
735
|
+
|
|
736
|
+
except HttpError as e:
|
|
737
|
+
return f"Error removing label '{label_name}': {e}"
|
|
738
|
+
except Exception as e:
|
|
739
|
+
return f"Unexpected error: {type(e).__name__}: {e}"
|
|
740
|
+
|
|
741
|
+
@authenticate
|
|
742
|
+
def delete_custom_label(self, label_name: str, confirm: bool = False) -> str:
|
|
743
|
+
"""
|
|
744
|
+
Delete a custom label (with safety confirmation).
|
|
745
|
+
|
|
746
|
+
Args:
|
|
747
|
+
label_name (str): Name of the label to delete
|
|
748
|
+
confirm (bool): Must be True to actually delete the label
|
|
749
|
+
Returns:
|
|
750
|
+
str: Confirmation message or warning
|
|
751
|
+
"""
|
|
752
|
+
if not confirm:
|
|
753
|
+
return f"LABEL DELETION REQUIRES CONFIRMATION. This will permanently delete the label '{label_name}' from all emails. Set confirm=True to proceed."
|
|
754
|
+
|
|
755
|
+
try:
|
|
756
|
+
# Get all labels to find the target label
|
|
757
|
+
labels = self.service.users().labels().list(userId="me").execute().get("labels", []) # type: ignore
|
|
758
|
+
target_label = None
|
|
759
|
+
|
|
760
|
+
for label in labels:
|
|
761
|
+
if label["name"].lower() == label_name.lower():
|
|
762
|
+
target_label = label
|
|
763
|
+
break
|
|
764
|
+
|
|
765
|
+
if not target_label:
|
|
766
|
+
return f"Label '{label_name}' not found."
|
|
767
|
+
|
|
768
|
+
# Check if it's a system label using the type field
|
|
769
|
+
if target_label.get("type") != "user":
|
|
770
|
+
return f"Cannot delete system label '{label_name}'. Only user-created labels can be deleted."
|
|
771
|
+
|
|
772
|
+
# Delete the label
|
|
773
|
+
self.service.users().labels().delete(userId="me", id=target_label["id"]).execute() # type: ignore
|
|
774
|
+
|
|
775
|
+
return f"Successfully deleted label '{label_name}'. This label has been removed from all emails."
|
|
776
|
+
|
|
777
|
+
except HttpError as e:
|
|
778
|
+
return f"Error deleting label '{label_name}': {e}"
|
|
779
|
+
except Exception as e:
|
|
780
|
+
return f"Unexpected error: {type(e).__name__}: {e}"
|
|
781
|
+
|
|
603
782
|
def _validate_email_params(self, to: str, subject: str, body: str) -> None:
|
|
604
783
|
"""Validate email parameters."""
|
|
605
784
|
if not to:
|