agno 2.1.4__py3-none-any.whl → 2.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. agno/agent/agent.py +1775 -538
  2. agno/db/async_postgres/__init__.py +3 -0
  3. agno/db/async_postgres/async_postgres.py +1668 -0
  4. agno/db/async_postgres/schemas.py +124 -0
  5. agno/db/async_postgres/utils.py +289 -0
  6. agno/db/base.py +237 -2
  7. agno/db/dynamo/dynamo.py +2 -2
  8. agno/db/firestore/firestore.py +2 -2
  9. agno/db/firestore/utils.py +4 -2
  10. agno/db/gcs_json/gcs_json_db.py +2 -2
  11. agno/db/in_memory/in_memory_db.py +2 -2
  12. agno/db/json/json_db.py +2 -2
  13. agno/db/migrations/v1_to_v2.py +43 -13
  14. agno/db/mongo/mongo.py +14 -6
  15. agno/db/mongo/utils.py +0 -4
  16. agno/db/mysql/mysql.py +23 -13
  17. agno/db/postgres/postgres.py +17 -6
  18. agno/db/redis/redis.py +2 -2
  19. agno/db/singlestore/singlestore.py +19 -10
  20. agno/db/sqlite/sqlite.py +22 -12
  21. agno/db/sqlite/utils.py +8 -3
  22. agno/db/surrealdb/__init__.py +3 -0
  23. agno/db/surrealdb/metrics.py +292 -0
  24. agno/db/surrealdb/models.py +259 -0
  25. agno/db/surrealdb/queries.py +71 -0
  26. agno/db/surrealdb/surrealdb.py +1193 -0
  27. agno/db/surrealdb/utils.py +87 -0
  28. agno/eval/accuracy.py +50 -43
  29. agno/eval/performance.py +6 -3
  30. agno/eval/reliability.py +6 -3
  31. agno/eval/utils.py +33 -16
  32. agno/exceptions.py +8 -2
  33. agno/knowledge/knowledge.py +260 -46
  34. agno/knowledge/reader/pdf_reader.py +4 -6
  35. agno/knowledge/reader/reader_factory.py +2 -3
  36. agno/memory/manager.py +254 -46
  37. agno/models/anthropic/claude.py +37 -0
  38. agno/os/app.py +8 -7
  39. agno/os/interfaces/a2a/router.py +3 -5
  40. agno/os/interfaces/agui/router.py +4 -1
  41. agno/os/interfaces/agui/utils.py +27 -6
  42. agno/os/interfaces/slack/router.py +2 -4
  43. agno/os/mcp.py +98 -41
  44. agno/os/router.py +23 -0
  45. agno/os/routers/evals/evals.py +52 -20
  46. agno/os/routers/evals/utils.py +14 -14
  47. agno/os/routers/knowledge/knowledge.py +130 -9
  48. agno/os/routers/knowledge/schemas.py +57 -0
  49. agno/os/routers/memory/memory.py +116 -44
  50. agno/os/routers/metrics/metrics.py +16 -6
  51. agno/os/routers/session/session.py +65 -22
  52. agno/os/schema.py +36 -0
  53. agno/os/utils.py +64 -11
  54. agno/reasoning/anthropic.py +80 -0
  55. agno/reasoning/gemini.py +73 -0
  56. agno/reasoning/openai.py +5 -0
  57. agno/reasoning/vertexai.py +76 -0
  58. agno/session/workflow.py +3 -3
  59. agno/team/team.py +968 -179
  60. agno/tools/googlesheets.py +20 -5
  61. agno/tools/mcp_toolbox.py +3 -3
  62. agno/tools/scrapegraph.py +1 -1
  63. agno/utils/models/claude.py +3 -1
  64. agno/utils/streamlit.py +1 -1
  65. agno/vectordb/base.py +22 -1
  66. agno/vectordb/cassandra/cassandra.py +9 -0
  67. agno/vectordb/chroma/chromadb.py +26 -6
  68. agno/vectordb/clickhouse/clickhousedb.py +9 -1
  69. agno/vectordb/couchbase/couchbase.py +11 -0
  70. agno/vectordb/lancedb/lance_db.py +20 -0
  71. agno/vectordb/langchaindb/langchaindb.py +11 -0
  72. agno/vectordb/lightrag/lightrag.py +9 -0
  73. agno/vectordb/llamaindex/llamaindexdb.py +15 -1
  74. agno/vectordb/milvus/milvus.py +23 -0
  75. agno/vectordb/mongodb/mongodb.py +22 -0
  76. agno/vectordb/pgvector/pgvector.py +19 -0
  77. agno/vectordb/pineconedb/pineconedb.py +35 -4
  78. agno/vectordb/qdrant/qdrant.py +24 -0
  79. agno/vectordb/singlestore/singlestore.py +25 -17
  80. agno/vectordb/surrealdb/surrealdb.py +18 -2
  81. agno/vectordb/upstashdb/upstashdb.py +26 -1
  82. agno/vectordb/weaviate/weaviate.py +18 -0
  83. agno/workflow/condition.py +4 -0
  84. agno/workflow/loop.py +4 -0
  85. agno/workflow/parallel.py +4 -0
  86. agno/workflow/router.py +4 -0
  87. agno/workflow/step.py +30 -14
  88. agno/workflow/steps.py +4 -0
  89. agno/workflow/types.py +2 -2
  90. agno/workflow/workflow.py +328 -61
  91. {agno-2.1.4.dist-info → agno-2.1.6.dist-info}/METADATA +100 -41
  92. {agno-2.1.4.dist-info → agno-2.1.6.dist-info}/RECORD +95 -82
  93. {agno-2.1.4.dist-info → agno-2.1.6.dist-info}/WHEEL +0 -0
  94. {agno-2.1.4.dist-info → agno-2.1.6.dist-info}/licenses/LICENSE +0 -0
  95. {agno-2.1.4.dist-info → agno-2.1.6.dist-info}/top_level.txt +0 -0
@@ -19,6 +19,9 @@ from agno.os.routers.knowledge.schemas import (
19
19
  ContentStatusResponse,
20
20
  ContentUpdateSchema,
21
21
  ReaderSchema,
22
+ VectorDbSchema,
23
+ VectorSearchRequestSchema,
24
+ VectorSearchResult,
22
25
  )
23
26
  from agno.os.schema import (
24
27
  BadRequestResponse,
@@ -303,7 +306,7 @@ def attach_routes(router: APIRouter, knowledge_instances: List[Knowledge]) -> AP
303
306
  }
304
307
  },
305
308
  )
306
- def get_content(
309
+ async def get_content(
307
310
  limit: Optional[int] = Query(default=20, description="Number of content entries to return"),
308
311
  page: Optional[int] = Query(default=1, description="Page number"),
309
312
  sort_by: Optional[str] = Query(default="created_at", description="Field to sort by"),
@@ -311,7 +314,7 @@ def attach_routes(router: APIRouter, knowledge_instances: List[Knowledge]) -> AP
311
314
  db_id: Optional[str] = Query(default=None, description="The ID of the database to use"),
312
315
  ) -> PaginatedResponse[ContentResponseSchema]:
313
316
  knowledge = get_knowledge_instance_by_db_id(knowledge_instances, db_id)
314
- contents, count = knowledge.get_content(limit=limit, page=page, sort_by=sort_by, sort_order=sort_order)
317
+ contents, count = await knowledge.aget_content(limit=limit, page=page, sort_by=sort_by, sort_order=sort_order)
315
318
 
316
319
  return PaginatedResponse(
317
320
  data=[
@@ -371,13 +374,13 @@ def attach_routes(router: APIRouter, knowledge_instances: List[Knowledge]) -> AP
371
374
  404: {"description": "Content not found", "model": NotFoundResponse},
372
375
  },
373
376
  )
374
- def get_content_by_id(
377
+ async def get_content_by_id(
375
378
  content_id: str,
376
379
  db_id: Optional[str] = Query(default=None, description="The ID of the database to use"),
377
380
  ) -> ContentResponseSchema:
378
381
  log_info(f"Getting content by id: {content_id}")
379
382
  knowledge = get_knowledge_instance_by_db_id(knowledge_instances, db_id)
380
- content = knowledge.get_content_by_id(content_id=content_id)
383
+ content = await knowledge.aget_content_by_id(content_id=content_id)
381
384
  if not content:
382
385
  raise HTTPException(status_code=404, detail=f"Content not found: {content_id}")
383
386
  response = ContentResponseSchema.from_dict(
@@ -411,12 +414,12 @@ def attach_routes(router: APIRouter, knowledge_instances: List[Knowledge]) -> AP
411
414
  500: {"description": "Failed to delete content", "model": InternalServerErrorResponse},
412
415
  },
413
416
  )
414
- def delete_content_by_id(
417
+ async def delete_content_by_id(
415
418
  content_id: str,
416
419
  db_id: Optional[str] = Query(default=None, description="The ID of the database to use"),
417
420
  ) -> ContentResponseSchema:
418
421
  knowledge = get_knowledge_instance_by_db_id(knowledge_instances, db_id)
419
- knowledge.remove_content_by_id(content_id=content_id)
422
+ await knowledge.aremove_content_by_id(content_id=content_id)
420
423
  log_info(f"Deleting content by id: {content_id}")
421
424
 
422
425
  return ContentResponseSchema(
@@ -443,7 +446,6 @@ def attach_routes(router: APIRouter, knowledge_instances: List[Knowledge]) -> AP
443
446
  knowledge = get_knowledge_instance_by_db_id(knowledge_instances, db_id)
444
447
  log_info("Deleting all content")
445
448
  knowledge.remove_all_content()
446
-
447
449
  return "success"
448
450
 
449
451
  @router.get(
@@ -476,13 +478,13 @@ def attach_routes(router: APIRouter, knowledge_instances: List[Knowledge]) -> AP
476
478
  404: {"description": "Content not found", "model": NotFoundResponse},
477
479
  },
478
480
  )
479
- def get_content_status(
481
+ async def get_content_status(
480
482
  content_id: str,
481
483
  db_id: Optional[str] = Query(default=None, description="The ID of the database to use"),
482
484
  ) -> ContentStatusResponse:
483
485
  log_info(f"Getting content status: {content_id}")
484
486
  knowledge = get_knowledge_instance_by_db_id(knowledge_instances, db_id)
485
- knowledge_status, status_message = knowledge.get_content_status(content_id=content_id)
487
+ knowledge_status, status_message = await knowledge.aget_content_status(content_id=content_id)
486
488
 
487
489
  # Handle the case where content is not found
488
490
  if knowledge_status is None:
@@ -513,6 +515,102 @@ def attach_routes(router: APIRouter, knowledge_instances: List[Knowledge]) -> AP
513
515
 
514
516
  return ContentStatusResponse(status=status, status_message=status_message or "")
515
517
 
518
+ @router.post(
519
+ "/knowledge/search",
520
+ status_code=200,
521
+ operation_id="search_vectors",
522
+ summary="Search Vectors",
523
+ description="Search the knowledge base for relevant vectors using query, filters and search type.",
524
+ response_model=PaginatedResponse[VectorSearchResult],
525
+ responses={
526
+ 200: {
527
+ "description": "Search results retrieved successfully",
528
+ "content": {
529
+ "application/json": {
530
+ "example": {
531
+ "data": [
532
+ {
533
+ "id": "doc_123",
534
+ "content": "Jordan Mitchell - Software Engineer with skills in JavaScript, React, Python",
535
+ "name": "cv_1",
536
+ "meta_data": {"page": 1, "chunk": 1},
537
+ "usage": {"total_tokens": 14},
538
+ "reranking_score": 0.95,
539
+ "content_id": "content_456",
540
+ }
541
+ ],
542
+ "meta": {"page": 1, "limit": 20, "total_pages": 2, "total_count": 35},
543
+ }
544
+ }
545
+ },
546
+ },
547
+ 400: {"description": "Invalid search parameters"},
548
+ 404: {"description": "No documents found"},
549
+ },
550
+ )
551
+ def search_vectors(request: VectorSearchRequestSchema) -> PaginatedResponse[VectorSearchResult]:
552
+ import time
553
+
554
+ start_time = time.time()
555
+
556
+ knowledge = get_knowledge_instance_by_db_id(knowledge_instances, request.db_id)
557
+
558
+ # For now, validate the vector db ids exist in the knowledge base
559
+ # We will add more logic around this once we have multi vectordb support
560
+ # If vector db ids are provided, check if any of them match the knowledge's vector db
561
+ if request.vector_db_ids:
562
+ if knowledge.vector_db and knowledge.vector_db.id:
563
+ if knowledge.vector_db.id not in request.vector_db_ids:
564
+ raise HTTPException(
565
+ status_code=400,
566
+ detail=f"None of the provided Vector DB IDs {request.vector_db_ids} match the knowledge base Vector DB ID {knowledge.vector_db.id}",
567
+ )
568
+ else:
569
+ raise HTTPException(status_code=400, detail="Knowledge base has no vector database configured")
570
+
571
+ # Calculate pagination parameters
572
+ meta = request.meta
573
+ limit = meta.limit if meta and meta.limit is not None else 20
574
+ page = meta.page if meta and meta.page is not None else 1
575
+
576
+ # Use max_results if specified, otherwise use a higher limit for search then paginate
577
+ search_limit = request.max_results
578
+
579
+ results = knowledge.search(
580
+ query=request.query, max_results=search_limit, filters=request.filters, search_type=request.search_type
581
+ )
582
+
583
+ # Calculate pagination
584
+ total_results = len(results)
585
+ start_idx = (page - 1) * limit
586
+
587
+ # Ensure start_idx doesn't exceed the total results
588
+ if start_idx >= total_results and total_results > 0:
589
+ # If page is beyond available results, return empty results
590
+ paginated_results = []
591
+ else:
592
+ end_idx = min(start_idx + limit, total_results)
593
+ paginated_results = results[start_idx:end_idx]
594
+
595
+ search_time_ms = (time.time() - start_time) * 1000
596
+
597
+ # Convert Document objects to serializable format
598
+ document_results = [VectorSearchResult.from_document(doc) for doc in paginated_results]
599
+
600
+ # Calculate pagination info
601
+ total_pages = (total_results + limit - 1) // limit # Ceiling division
602
+
603
+ return PaginatedResponse(
604
+ data=document_results,
605
+ meta=PaginationInfo(
606
+ page=page,
607
+ limit=limit,
608
+ total_pages=total_pages,
609
+ total_count=total_results,
610
+ search_time_ms=search_time_ms,
611
+ ),
612
+ )
613
+
516
614
  @router.get(
517
615
  "/knowledge/config",
518
616
  status_code=200,
@@ -735,6 +833,14 @@ def attach_routes(router: APIRouter, knowledge_instances: List[Knowledge]) -> AP
735
833
  "description": "A chunking strategy that splits markdown based on structure like headers, paragraphs and sections",
736
834
  },
737
835
  },
836
+ "vector_dbs": [
837
+ {
838
+ "id": "vector_db_1",
839
+ "name": "Vector DB 1",
840
+ "description": "Vector DB 1 description",
841
+ "search_types": ["vector", "keyword", "hybrid"],
842
+ }
843
+ ],
738
844
  "filters": ["filter_tag_1", "filter_tag2"],
739
845
  }
740
846
  }
@@ -793,8 +899,23 @@ def attach_routes(router: APIRouter, knowledge_instances: List[Knowledge]) -> AP
793
899
  key=chunker_key, name=chunker_info.get("name"), description=chunker_info.get("description")
794
900
  )
795
901
 
902
+ vector_dbs = []
903
+ if knowledge.vector_db:
904
+ search_types = knowledge.vector_db.get_supported_search_types()
905
+ name = knowledge.vector_db.name
906
+ db_id = knowledge.vector_db.id
907
+ vector_dbs.append(
908
+ VectorDbSchema(
909
+ id=db_id,
910
+ name=name,
911
+ description=knowledge.vector_db.description,
912
+ search_types=search_types,
913
+ )
914
+ )
915
+
796
916
  return ConfigResponseSchema(
797
917
  readers=reader_schemas,
918
+ vector_dbs=vector_dbs,
798
919
  readersForType=types_of_readers,
799
920
  chunkers=chunkers_dict,
800
921
  filters=knowledge.get_filters(),
@@ -111,8 +111,65 @@ class ChunkerSchema(BaseModel):
111
111
  description: Optional[str] = None
112
112
 
113
113
 
114
+ class VectorDbSchema(BaseModel):
115
+ id: str
116
+ name: Optional[str] = None
117
+ description: Optional[str] = None
118
+ search_types: Optional[List[str]] = None
119
+
120
+
121
+ class VectorSearchResult(BaseModel):
122
+ """Schema for search result documents."""
123
+
124
+ id: str
125
+ content: str
126
+ name: Optional[str] = None
127
+ meta_data: Optional[Dict[str, Any]] = None
128
+ usage: Optional[Dict[str, Any]] = None
129
+ reranking_score: Optional[float] = None
130
+ content_id: Optional[str] = None
131
+ content_origin: Optional[str] = None
132
+ size: Optional[int] = None
133
+
134
+ @classmethod
135
+ def from_document(cls, document) -> "VectorSearchResult":
136
+ """Convert a Document object to a serializable VectorSearchResult."""
137
+ return cls(
138
+ id=document.id,
139
+ content=document.content,
140
+ name=getattr(document, "name", None),
141
+ meta_data=getattr(document, "meta_data", None),
142
+ usage=getattr(document, "usage", None),
143
+ reranking_score=getattr(document, "reranking_score", None),
144
+ content_id=getattr(document, "content_id", None),
145
+ content_origin=getattr(document, "content_origin", None),
146
+ size=getattr(document, "size", None),
147
+ )
148
+
149
+
150
+ class VectorSearchRequestSchema(BaseModel):
151
+ """Schema for vector search request."""
152
+
153
+ class Meta(BaseModel):
154
+ """Inline metadata schema for pagination."""
155
+
156
+ limit: Optional[int] = Field(20, description="Number of results per page", ge=1, le=100)
157
+ page: Optional[int] = Field(1, description="Page number", ge=1)
158
+
159
+ query: str = Field(..., description="The search query")
160
+ db_id: Optional[str] = Field(None, description="The content database id")
161
+ vector_db_ids: Optional[List[str]] = Field(None, description="List of vector database ids to search in")
162
+ search_type: Optional[str] = Field(None, description="The type of search to perform")
163
+ max_results: Optional[int] = Field(None, description="The maximum number of results to return")
164
+ filters: Optional[Dict[str, Any]] = Field(None, description="The filters to apply to the search")
165
+ meta: Optional[Meta] = Field(
166
+ None, description="Pagination metadata. Limit and page number to return a subset of results."
167
+ )
168
+
169
+
114
170
  class ConfigResponseSchema(BaseModel):
115
171
  readers: Optional[Dict[str, ReaderSchema]] = None
116
172
  readersForType: Optional[Dict[str, List[str]]] = None
117
173
  chunkers: Optional[Dict[str, ChunkerSchema]] = None
118
174
  filters: Optional[List[str]] = None
175
+ vector_dbs: Optional[List[VectorDbSchema]] = None
@@ -1,12 +1,12 @@
1
1
  import logging
2
2
  import math
3
- from typing import List, Optional
3
+ from typing import List, Optional, Union, cast
4
4
  from uuid import uuid4
5
5
 
6
6
  from fastapi import Depends, HTTPException, Path, Query, Request
7
7
  from fastapi.routing import APIRouter
8
8
 
9
- from agno.db.base import BaseDb
9
+ from agno.db.base import AsyncBaseDb, BaseDb
10
10
  from agno.db.schemas import UserMemory
11
11
  from agno.os.auth import get_authentication_dependency
12
12
  from agno.os.routers.memory.schemas import (
@@ -31,7 +31,9 @@ from agno.os.utils import get_db
31
31
  logger = logging.getLogger(__name__)
32
32
 
33
33
 
34
- def get_memory_router(dbs: dict[str, BaseDb], settings: AgnoAPISettings = AgnoAPISettings(), **kwargs) -> APIRouter:
34
+ def get_memory_router(
35
+ dbs: dict[str, Union[BaseDb, AsyncBaseDb]], settings: AgnoAPISettings = AgnoAPISettings(), **kwargs
36
+ ) -> APIRouter:
35
37
  """Create memory router with comprehensive OpenAPI documentation for user memory management endpoints."""
36
38
  router = APIRouter(
37
39
  dependencies=[Depends(get_authentication_dependency(settings))],
@@ -47,7 +49,7 @@ def get_memory_router(dbs: dict[str, BaseDb], settings: AgnoAPISettings = AgnoAP
47
49
  return attach_routes(router=router, dbs=dbs)
48
50
 
49
51
 
50
- def attach_routes(router: APIRouter, dbs: dict[str, BaseDb]) -> APIRouter:
52
+ def attach_routes(router: APIRouter, dbs: dict[str, Union[BaseDb, AsyncBaseDb]]) -> APIRouter:
51
53
  @router.post(
52
54
  "/memories",
53
55
  response_model=UserMemorySchema,
@@ -92,15 +94,29 @@ def attach_routes(router: APIRouter, dbs: dict[str, BaseDb]) -> APIRouter:
92
94
  raise HTTPException(status_code=400, detail="User ID is required")
93
95
 
94
96
  db = get_db(dbs, db_id)
95
- user_memory = db.upsert_user_memory(
96
- memory=UserMemory(
97
- memory_id=str(uuid4()),
98
- memory=payload.memory,
99
- topics=payload.topics or [],
100
- user_id=payload.user_id,
101
- ),
102
- deserialize=False,
103
- )
97
+
98
+ if isinstance(db, AsyncBaseDb):
99
+ db = cast(AsyncBaseDb, db)
100
+ user_memory = await db.upsert_user_memory(
101
+ memory=UserMemory(
102
+ memory_id=str(uuid4()),
103
+ memory=payload.memory,
104
+ topics=payload.topics or [],
105
+ user_id=payload.user_id,
106
+ ),
107
+ deserialize=False,
108
+ )
109
+ else:
110
+ user_memory = db.upsert_user_memory(
111
+ memory=UserMemory(
112
+ memory_id=str(uuid4()),
113
+ memory=payload.memory,
114
+ topics=payload.topics or [],
115
+ user_id=payload.user_id,
116
+ ),
117
+ deserialize=False,
118
+ )
119
+
104
120
  if not user_memory:
105
121
  raise HTTPException(status_code=500, detail="Failed to create memory")
106
122
 
@@ -124,7 +140,11 @@ def attach_routes(router: APIRouter, dbs: dict[str, BaseDb]) -> APIRouter:
124
140
  db_id: Optional[str] = Query(default=None, description="Database ID to use for deletion"),
125
141
  ) -> None:
126
142
  db = get_db(dbs, db_id)
127
- db.delete_user_memory(memory_id=memory_id, user_id=user_id)
143
+ if isinstance(db, AsyncBaseDb):
144
+ db = cast(AsyncBaseDb, db)
145
+ await db.delete_user_memory(memory_id=memory_id, user_id=user_id)
146
+ else:
147
+ db.delete_user_memory(memory_id=memory_id, user_id=user_id)
128
148
 
129
149
  @router.delete(
130
150
  "/memories",
@@ -146,7 +166,11 @@ def attach_routes(router: APIRouter, dbs: dict[str, BaseDb]) -> APIRouter:
146
166
  db_id: Optional[str] = Query(default=None, description="Database ID to use for deletion"),
147
167
  ) -> None:
148
168
  db = get_db(dbs, db_id)
149
- db.delete_user_memories(memory_ids=request.memory_ids, user_id=request.user_id)
169
+ if isinstance(db, AsyncBaseDb):
170
+ db = cast(AsyncBaseDb, db)
171
+ await db.delete_user_memories(memory_ids=request.memory_ids, user_id=request.user_id)
172
+ else:
173
+ db.delete_user_memories(memory_ids=request.memory_ids, user_id=request.user_id)
150
174
 
151
175
  @router.get(
152
176
  "/memories",
@@ -199,18 +223,34 @@ def attach_routes(router: APIRouter, dbs: dict[str, BaseDb]) -> APIRouter:
199
223
  if hasattr(request.state, "user_id"):
200
224
  user_id = request.state.user_id
201
225
 
202
- user_memories, total_count = db.get_user_memories(
203
- limit=limit,
204
- page=page,
205
- user_id=user_id,
206
- agent_id=agent_id,
207
- team_id=team_id,
208
- topics=topics,
209
- search_content=search_content,
210
- sort_by=sort_by,
211
- sort_order=sort_order,
212
- deserialize=False,
213
- )
226
+ if isinstance(db, AsyncBaseDb):
227
+ db = cast(AsyncBaseDb, db)
228
+ user_memories, total_count = await db.get_user_memories(
229
+ limit=limit,
230
+ page=page,
231
+ user_id=user_id,
232
+ agent_id=agent_id,
233
+ team_id=team_id,
234
+ topics=topics,
235
+ search_content=search_content,
236
+ sort_by=sort_by,
237
+ sort_order=sort_order,
238
+ deserialize=False,
239
+ )
240
+ else:
241
+ user_memories, total_count = db.get_user_memories( # type: ignore
242
+ limit=limit,
243
+ page=page,
244
+ user_id=user_id,
245
+ agent_id=agent_id,
246
+ team_id=team_id,
247
+ topics=topics,
248
+ search_content=search_content,
249
+ sort_by=sort_by,
250
+ sort_order=sort_order,
251
+ deserialize=False,
252
+ )
253
+
214
254
  return PaginatedResponse(
215
255
  data=[UserMemorySchema.from_dict(user_memory) for user_memory in user_memories], # type: ignore
216
256
  meta=PaginationInfo(
@@ -249,12 +289,21 @@ def attach_routes(router: APIRouter, dbs: dict[str, BaseDb]) -> APIRouter:
249
289
  },
250
290
  )
251
291
  async def get_memory(
292
+ request: Request,
252
293
  memory_id: str = Path(description="Memory ID to retrieve"),
253
294
  user_id: Optional[str] = Query(default=None, description="User ID to query memory for"),
254
295
  db_id: Optional[str] = Query(default=None, description="Database ID to query memory from"),
255
296
  ) -> UserMemorySchema:
256
297
  db = get_db(dbs, db_id)
257
- user_memory = db.get_user_memory(memory_id=memory_id, user_id=user_id, deserialize=False)
298
+
299
+ if hasattr(request.state, "user_id"):
300
+ user_id = request.state.user_id
301
+
302
+ if isinstance(db, AsyncBaseDb):
303
+ db = cast(AsyncBaseDb, db)
304
+ user_memory = await db.get_user_memory(memory_id=memory_id, user_id=user_id, deserialize=False)
305
+ else:
306
+ user_memory = db.get_user_memory(memory_id=memory_id, user_id=user_id, deserialize=False)
258
307
  if not user_memory:
259
308
  raise HTTPException(status_code=404, detail=f"Memory with ID {memory_id} not found")
260
309
 
@@ -295,7 +344,11 @@ def attach_routes(router: APIRouter, dbs: dict[str, BaseDb]) -> APIRouter:
295
344
  db_id: Optional[str] = Query(default=None, description="Database ID to query topics from"),
296
345
  ) -> List[str]:
297
346
  db = get_db(dbs, db_id)
298
- return db.get_all_memory_topics()
347
+ if isinstance(db, AsyncBaseDb):
348
+ db = cast(AsyncBaseDb, db)
349
+ return await db.get_all_memory_topics()
350
+ else:
351
+ return db.get_all_memory_topics()
299
352
 
300
353
  @router.patch(
301
354
  "/memories/{memory_id}",
@@ -336,8 +389,6 @@ def attach_routes(router: APIRouter, dbs: dict[str, BaseDb]) -> APIRouter:
336
389
  memory_id: str = Path(description="Memory ID to update"),
337
390
  db_id: Optional[str] = Query(default=None, description="Database ID to use for update"),
338
391
  ) -> UserMemorySchema:
339
- db = get_db(dbs, db_id)
340
-
341
392
  if hasattr(request.state, "user_id"):
342
393
  user_id = request.state.user_id
343
394
  payload.user_id = user_id
@@ -345,15 +396,29 @@ def attach_routes(router: APIRouter, dbs: dict[str, BaseDb]) -> APIRouter:
345
396
  if payload.user_id is None:
346
397
  raise HTTPException(status_code=400, detail="User ID is required")
347
398
 
348
- user_memory = db.upsert_user_memory(
349
- memory=UserMemory(
350
- memory_id=memory_id,
351
- memory=payload.memory,
352
- topics=payload.topics or [],
353
- user_id=payload.user_id,
354
- ),
355
- deserialize=False,
356
- )
399
+ db = get_db(dbs, db_id)
400
+
401
+ if isinstance(db, AsyncBaseDb):
402
+ db = cast(AsyncBaseDb, db)
403
+ user_memory = await db.upsert_user_memory(
404
+ memory=UserMemory(
405
+ memory_id=memory_id,
406
+ memory=payload.memory,
407
+ topics=payload.topics or [],
408
+ user_id=payload.user_id,
409
+ ),
410
+ deserialize=False,
411
+ )
412
+ else:
413
+ user_memory = db.upsert_user_memory(
414
+ memory=UserMemory(
415
+ memory_id=memory_id,
416
+ memory=payload.memory,
417
+ topics=payload.topics or [],
418
+ user_id=payload.user_id,
419
+ ),
420
+ deserialize=False,
421
+ )
357
422
  if not user_memory:
358
423
  raise HTTPException(status_code=500, detail="Failed to update memory")
359
424
 
@@ -396,10 +461,17 @@ def attach_routes(router: APIRouter, dbs: dict[str, BaseDb]) -> APIRouter:
396
461
  ) -> PaginatedResponse[UserStatsSchema]:
397
462
  db = get_db(dbs, db_id)
398
463
  try:
399
- user_stats, total_count = db.get_user_memory_stats(
400
- limit=limit,
401
- page=page,
402
- )
464
+ if isinstance(db, AsyncBaseDb):
465
+ db = cast(AsyncBaseDb, db)
466
+ user_stats, total_count = await db.get_user_memory_stats(
467
+ limit=limit,
468
+ page=page,
469
+ )
470
+ else:
471
+ user_stats, total_count = db.get_user_memory_stats(
472
+ limit=limit,
473
+ page=page,
474
+ )
403
475
  return PaginatedResponse(
404
476
  data=[UserStatsSchema.from_dict(stats) for stats in user_stats],
405
477
  meta=PaginationInfo(
@@ -1,11 +1,11 @@
1
1
  import logging
2
2
  from datetime import date, datetime, timezone
3
- from typing import List, Optional
3
+ from typing import List, Optional, Union, cast
4
4
 
5
5
  from fastapi import Depends, HTTPException, Query
6
6
  from fastapi.routing import APIRouter
7
7
 
8
- from agno.db.base import BaseDb
8
+ from agno.db.base import AsyncBaseDb, BaseDb
9
9
  from agno.os.auth import get_authentication_dependency
10
10
  from agno.os.routers.metrics.schemas import DayAggregatedMetrics, MetricsResponse
11
11
  from agno.os.schema import (
@@ -21,7 +21,9 @@ from agno.os.utils import get_db
21
21
  logger = logging.getLogger(__name__)
22
22
 
23
23
 
24
- def get_metrics_router(dbs: dict[str, BaseDb], settings: AgnoAPISettings = AgnoAPISettings(), **kwargs) -> APIRouter:
24
+ def get_metrics_router(
25
+ dbs: dict[str, Union[BaseDb, AsyncBaseDb]], settings: AgnoAPISettings = AgnoAPISettings(), **kwargs
26
+ ) -> APIRouter:
25
27
  """Create metrics router with comprehensive OpenAPI documentation for system metrics and analytics endpoints."""
26
28
  router = APIRouter(
27
29
  dependencies=[Depends(get_authentication_dependency(settings))],
@@ -37,7 +39,7 @@ def get_metrics_router(dbs: dict[str, BaseDb], settings: AgnoAPISettings = AgnoA
37
39
  return attach_routes(router=router, dbs=dbs)
38
40
 
39
41
 
40
- def attach_routes(router: APIRouter, dbs: dict[str, BaseDb]) -> APIRouter:
42
+ def attach_routes(router: APIRouter, dbs: dict[str, Union[BaseDb, AsyncBaseDb]]) -> APIRouter:
41
43
  @router.get(
42
44
  "/metrics",
43
45
  response_model=MetricsResponse,
@@ -100,7 +102,11 @@ def attach_routes(router: APIRouter, dbs: dict[str, BaseDb]) -> APIRouter:
100
102
  ) -> MetricsResponse:
101
103
  try:
102
104
  db = get_db(dbs, db_id)
103
- metrics, latest_updated_at = db.get_metrics(starting_date=starting_date, ending_date=ending_date)
105
+ if isinstance(db, AsyncBaseDb):
106
+ db = cast(AsyncBaseDb, db)
107
+ metrics, latest_updated_at = await db.get_metrics(starting_date=starting_date, ending_date=ending_date)
108
+ else:
109
+ metrics, latest_updated_at = db.get_metrics(starting_date=starting_date, ending_date=ending_date)
104
110
 
105
111
  return MetricsResponse(
106
112
  metrics=[DayAggregatedMetrics.from_dict(metric) for metric in metrics],
@@ -166,7 +172,11 @@ def attach_routes(router: APIRouter, dbs: dict[str, BaseDb]) -> APIRouter:
166
172
  ) -> List[DayAggregatedMetrics]:
167
173
  try:
168
174
  db = get_db(dbs, db_id)
169
- result = db.calculate_metrics()
175
+ if isinstance(db, AsyncBaseDb):
176
+ db = cast(AsyncBaseDb, db)
177
+ result = await db.calculate_metrics()
178
+ else:
179
+ result = db.calculate_metrics()
170
180
  if result is None:
171
181
  return []
172
182