agno 2.0.0rc1__py3-none-any.whl → 2.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +101 -140
- agno/db/mongo/mongo.py +8 -3
- agno/eval/accuracy.py +12 -5
- agno/knowledge/chunking/strategy.py +14 -14
- agno/knowledge/knowledge.py +156 -120
- agno/knowledge/reader/arxiv_reader.py +5 -5
- agno/knowledge/reader/csv_reader.py +6 -77
- agno/knowledge/reader/docx_reader.py +5 -5
- agno/knowledge/reader/firecrawl_reader.py +5 -5
- agno/knowledge/reader/json_reader.py +5 -5
- agno/knowledge/reader/markdown_reader.py +31 -9
- agno/knowledge/reader/pdf_reader.py +10 -123
- agno/knowledge/reader/reader_factory.py +65 -72
- agno/knowledge/reader/s3_reader.py +44 -114
- agno/knowledge/reader/text_reader.py +5 -5
- agno/knowledge/reader/url_reader.py +75 -31
- agno/knowledge/reader/web_search_reader.py +6 -29
- agno/knowledge/reader/website_reader.py +5 -5
- agno/knowledge/reader/wikipedia_reader.py +5 -5
- agno/knowledge/reader/youtube_reader.py +6 -6
- agno/knowledge/reranker/__init__.py +9 -0
- agno/knowledge/utils.py +10 -10
- agno/media.py +269 -268
- agno/models/aws/bedrock.py +3 -7
- agno/models/base.py +50 -54
- agno/models/google/gemini.py +11 -10
- agno/models/message.py +4 -4
- agno/models/ollama/chat.py +1 -1
- agno/models/openai/chat.py +33 -14
- agno/models/response.py +5 -5
- agno/os/app.py +40 -29
- agno/os/mcp.py +39 -59
- agno/os/router.py +547 -16
- agno/os/routers/evals/evals.py +197 -12
- agno/os/routers/knowledge/knowledge.py +428 -14
- agno/os/routers/memory/memory.py +250 -28
- agno/os/routers/metrics/metrics.py +125 -7
- agno/os/routers/session/session.py +393 -25
- agno/os/schema.py +55 -2
- agno/run/agent.py +37 -28
- agno/run/base.py +9 -19
- agno/run/team.py +110 -19
- agno/run/workflow.py +41 -28
- agno/team/team.py +808 -1080
- agno/tools/brightdata.py +3 -3
- agno/tools/cartesia.py +3 -5
- agno/tools/dalle.py +7 -4
- agno/tools/desi_vocal.py +2 -2
- agno/tools/e2b.py +6 -6
- agno/tools/eleven_labs.py +3 -3
- agno/tools/fal.py +4 -4
- agno/tools/function.py +7 -7
- agno/tools/giphy.py +2 -2
- agno/tools/lumalab.py +3 -3
- agno/tools/mcp.py +1 -2
- agno/tools/models/azure_openai.py +2 -2
- agno/tools/models/gemini.py +3 -3
- agno/tools/models/groq.py +3 -5
- agno/tools/models/nebius.py +2 -2
- agno/tools/models_labs.py +5 -5
- agno/tools/openai.py +4 -9
- agno/tools/opencv.py +3 -3
- agno/tools/replicate.py +7 -7
- agno/utils/events.py +5 -5
- agno/utils/gemini.py +1 -1
- agno/utils/log.py +52 -2
- agno/utils/mcp.py +57 -5
- agno/utils/models/aws_claude.py +1 -1
- agno/utils/models/claude.py +0 -8
- agno/utils/models/cohere.py +1 -1
- agno/utils/models/watsonx.py +1 -1
- agno/utils/openai.py +1 -1
- agno/utils/print_response/team.py +177 -73
- agno/utils/streamlit.py +27 -0
- agno/vectordb/lancedb/lance_db.py +82 -25
- agno/workflow/step.py +7 -7
- agno/workflow/types.py +13 -13
- agno/workflow/workflow.py +37 -28
- {agno-2.0.0rc1.dist-info → agno-2.0.1.dist-info}/METADATA +140 -1
- {agno-2.0.0rc1.dist-info → agno-2.0.1.dist-info}/RECORD +83 -84
- agno-2.0.1.dist-info/licenses/LICENSE +201 -0
- agno/knowledge/reader/gcs_reader.py +0 -67
- agno-2.0.0rc1.dist-info/licenses/LICENSE +0 -375
- {agno-2.0.0rc1.dist-info → agno-2.0.1.dist-info}/WHEEL +0 -0
- {agno-2.0.0rc1.dist-info → agno-2.0.1.dist-info}/top_level.txt +0 -0
agno/os/routers/memory/memory.py
CHANGED
|
@@ -15,7 +15,16 @@ from agno.os.routers.memory.schemas import (
|
|
|
15
15
|
UserMemorySchema,
|
|
16
16
|
UserStatsSchema,
|
|
17
17
|
)
|
|
18
|
-
from agno.os.schema import
|
|
18
|
+
from agno.os.schema import (
|
|
19
|
+
BadRequestResponse,
|
|
20
|
+
InternalServerErrorResponse,
|
|
21
|
+
NotFoundResponse,
|
|
22
|
+
PaginatedResponse,
|
|
23
|
+
PaginationInfo,
|
|
24
|
+
SortOrder,
|
|
25
|
+
UnauthenticatedResponse,
|
|
26
|
+
ValidationErrorResponse,
|
|
27
|
+
)
|
|
19
28
|
from agno.os.settings import AgnoAPISettings
|
|
20
29
|
from agno.os.utils import get_db
|
|
21
30
|
|
|
@@ -23,15 +32,56 @@ logger = logging.getLogger(__name__)
|
|
|
23
32
|
|
|
24
33
|
|
|
25
34
|
def get_memory_router(dbs: dict[str, BaseDb], settings: AgnoAPISettings = AgnoAPISettings(), **kwargs) -> APIRouter:
|
|
26
|
-
router
|
|
35
|
+
"""Create memory router with comprehensive OpenAPI documentation for user memory management endpoints."""
|
|
36
|
+
router = APIRouter(
|
|
37
|
+
dependencies=[Depends(get_authentication_dependency(settings))],
|
|
38
|
+
tags=["Memory"],
|
|
39
|
+
responses={
|
|
40
|
+
400: {"description": "Bad Request", "model": BadRequestResponse},
|
|
41
|
+
401: {"description": "Unauthorized", "model": UnauthenticatedResponse},
|
|
42
|
+
404: {"description": "Not Found", "model": NotFoundResponse},
|
|
43
|
+
422: {"description": "Validation Error", "model": ValidationErrorResponse},
|
|
44
|
+
500: {"description": "Internal Server Error", "model": InternalServerErrorResponse},
|
|
45
|
+
},
|
|
46
|
+
)
|
|
27
47
|
return attach_routes(router=router, dbs=dbs)
|
|
28
48
|
|
|
29
49
|
|
|
30
50
|
def attach_routes(router: APIRouter, dbs: dict[str, BaseDb]) -> APIRouter:
|
|
31
|
-
@router.post(
|
|
51
|
+
@router.post(
|
|
52
|
+
"/memories",
|
|
53
|
+
response_model=UserMemorySchema,
|
|
54
|
+
status_code=200,
|
|
55
|
+
operation_id="create_memory",
|
|
56
|
+
summary="Create Memory",
|
|
57
|
+
description=(
|
|
58
|
+
"Create a new user memory with content and associated topics. "
|
|
59
|
+
"Memories are used to store contextual information for users across conversations."
|
|
60
|
+
),
|
|
61
|
+
responses={
|
|
62
|
+
200: {
|
|
63
|
+
"description": "Memory created successfully",
|
|
64
|
+
"content": {
|
|
65
|
+
"application/json": {
|
|
66
|
+
"example": {
|
|
67
|
+
"memory_id": "mem-123",
|
|
68
|
+
"memory": "User prefers technical explanations with code examples",
|
|
69
|
+
"topics": ["preferences", "communication_style", "technical"],
|
|
70
|
+
"user_id": "user-456",
|
|
71
|
+
"created_at": "2024-01-15T10:30:00Z",
|
|
72
|
+
"updated_at": "2024-01-15T10:30:00Z",
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
},
|
|
76
|
+
},
|
|
77
|
+
400: {"description": "Invalid request data", "model": BadRequestResponse},
|
|
78
|
+
422: {"description": "Validation error in payload", "model": ValidationErrorResponse},
|
|
79
|
+
500: {"description": "Failed to create memory", "model": InternalServerErrorResponse},
|
|
80
|
+
},
|
|
81
|
+
)
|
|
32
82
|
async def create_memory(
|
|
33
83
|
payload: UserMemoryCreateSchema,
|
|
34
|
-
db_id: Optional[str] = Query(default=None, description="
|
|
84
|
+
db_id: Optional[str] = Query(default=None, description="Database ID to use for memory storage"),
|
|
35
85
|
) -> UserMemorySchema:
|
|
36
86
|
db = get_db(dbs, db_id)
|
|
37
87
|
user_memory = db.upsert_user_memory(
|
|
@@ -48,35 +98,91 @@ def attach_routes(router: APIRouter, dbs: dict[str, BaseDb]) -> APIRouter:
|
|
|
48
98
|
|
|
49
99
|
return UserMemorySchema.from_dict(user_memory) # type: ignore
|
|
50
100
|
|
|
51
|
-
@router.delete(
|
|
101
|
+
@router.delete(
|
|
102
|
+
"/memories/{memory_id}",
|
|
103
|
+
status_code=204,
|
|
104
|
+
operation_id="delete_memory",
|
|
105
|
+
summary="Delete Memory",
|
|
106
|
+
description="Permanently delete a specific user memory. This action cannot be undone.",
|
|
107
|
+
responses={
|
|
108
|
+
204: {"description": "Memory deleted successfully"},
|
|
109
|
+
404: {"description": "Memory not found", "model": NotFoundResponse},
|
|
110
|
+
500: {"description": "Failed to delete memory", "model": InternalServerErrorResponse},
|
|
111
|
+
},
|
|
112
|
+
)
|
|
52
113
|
async def delete_memory(
|
|
53
|
-
memory_id: str = Path(
|
|
114
|
+
memory_id: str = Path(description="Memory ID to delete"),
|
|
115
|
+
db_id: Optional[str] = Query(default=None, description="Database ID to use for deletion"),
|
|
54
116
|
) -> None:
|
|
55
117
|
db = get_db(dbs, db_id)
|
|
56
118
|
db.delete_user_memory(memory_id=memory_id)
|
|
57
119
|
|
|
58
|
-
@router.delete(
|
|
120
|
+
@router.delete(
|
|
121
|
+
"/memories",
|
|
122
|
+
status_code=204,
|
|
123
|
+
operation_id="delete_memories",
|
|
124
|
+
summary="Delete Multiple Memories",
|
|
125
|
+
description=(
|
|
126
|
+
"Delete multiple user memories by their IDs in a single operation. "
|
|
127
|
+
"This action cannot be undone and all specified memories will be permanently removed."
|
|
128
|
+
),
|
|
129
|
+
responses={
|
|
130
|
+
204: {"description": "Memories deleted successfully"},
|
|
131
|
+
400: {"description": "Invalid request - empty memory_ids list", "model": BadRequestResponse},
|
|
132
|
+
500: {"description": "Failed to delete memories", "model": InternalServerErrorResponse},
|
|
133
|
+
},
|
|
134
|
+
)
|
|
59
135
|
async def delete_memories(
|
|
60
136
|
request: DeleteMemoriesRequest,
|
|
61
|
-
db_id: Optional[str] = Query(default=None, description="
|
|
137
|
+
db_id: Optional[str] = Query(default=None, description="Database ID to use for deletion"),
|
|
62
138
|
) -> None:
|
|
63
139
|
db = get_db(dbs, db_id)
|
|
64
140
|
db.delete_user_memories(memory_ids=request.memory_ids)
|
|
65
141
|
|
|
66
142
|
@router.get(
|
|
67
|
-
"/memories",
|
|
143
|
+
"/memories",
|
|
144
|
+
response_model=PaginatedResponse[UserMemorySchema],
|
|
145
|
+
status_code=200,
|
|
146
|
+
operation_id="get_memories",
|
|
147
|
+
summary="List Memories",
|
|
148
|
+
description=(
|
|
149
|
+
"Retrieve paginated list of user memories with filtering and search capabilities. "
|
|
150
|
+
"Filter by user, agent, team, topics, or search within memory content."
|
|
151
|
+
),
|
|
152
|
+
responses={
|
|
153
|
+
200: {
|
|
154
|
+
"description": "Memories retrieved successfully",
|
|
155
|
+
"content": {
|
|
156
|
+
"application/json": {
|
|
157
|
+
"example": {
|
|
158
|
+
"data": [
|
|
159
|
+
{
|
|
160
|
+
"memory_id": "f9361a69-2997-40c7-ae4e-a5861d434047",
|
|
161
|
+
"memory": "User likes coffee.",
|
|
162
|
+
"topics": ["preferences"],
|
|
163
|
+
"agent_id": None,
|
|
164
|
+
"team_id": None,
|
|
165
|
+
"user_id": "123",
|
|
166
|
+
"updated_at": "2025-09-01T07:53:17Z",
|
|
167
|
+
}
|
|
168
|
+
]
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
},
|
|
172
|
+
}
|
|
173
|
+
},
|
|
68
174
|
)
|
|
69
175
|
async def get_memories(
|
|
70
176
|
user_id: Optional[str] = Query(default=None, description="Filter memories by user ID"),
|
|
71
177
|
agent_id: Optional[str] = Query(default=None, description="Filter memories by agent ID"),
|
|
72
178
|
team_id: Optional[str] = Query(default=None, description="Filter memories by team ID"),
|
|
73
179
|
topics: Optional[List[str]] = Depends(parse_topics),
|
|
74
|
-
search_content: Optional[str] = Query(default=None, description="Fuzzy search memory content"),
|
|
75
|
-
limit: Optional[int] = Query(default=20, description="Number of memories to return"),
|
|
76
|
-
page: Optional[int] = Query(default=1, description="Page number"),
|
|
77
|
-
sort_by: Optional[str] = Query(default="updated_at", description="Field to sort by"),
|
|
180
|
+
search_content: Optional[str] = Query(default=None, description="Fuzzy search within memory content"),
|
|
181
|
+
limit: Optional[int] = Query(default=20, description="Number of memories to return per page"),
|
|
182
|
+
page: Optional[int] = Query(default=1, description="Page number for pagination"),
|
|
183
|
+
sort_by: Optional[str] = Query(default="updated_at", description="Field to sort memories by"),
|
|
78
184
|
sort_order: Optional[SortOrder] = Query(default="desc", description="Sort order (asc or desc)"),
|
|
79
|
-
db_id: Optional[str] = Query(default=None, description="
|
|
185
|
+
db_id: Optional[str] = Query(default=None, description="Database ID to query memories from"),
|
|
80
186
|
) -> PaginatedResponse[UserMemorySchema]:
|
|
81
187
|
db = get_db(dbs, db_id)
|
|
82
188
|
user_memories, total_count = db.get_user_memories(
|
|
@@ -101,10 +207,36 @@ def attach_routes(router: APIRouter, dbs: dict[str, BaseDb]) -> APIRouter:
|
|
|
101
207
|
),
|
|
102
208
|
)
|
|
103
209
|
|
|
104
|
-
@router.get(
|
|
210
|
+
@router.get(
|
|
211
|
+
"/memories/{memory_id}",
|
|
212
|
+
response_model=UserMemorySchema,
|
|
213
|
+
status_code=200,
|
|
214
|
+
operation_id="get_memory",
|
|
215
|
+
summary="Get Memory by ID",
|
|
216
|
+
description="Retrieve detailed information about a specific user memory by its ID.",
|
|
217
|
+
responses={
|
|
218
|
+
200: {
|
|
219
|
+
"description": "Memory retrieved successfully",
|
|
220
|
+
"content": {
|
|
221
|
+
"application/json": {
|
|
222
|
+
"example": {
|
|
223
|
+
"memory_id": "f9361a69-2997-40c7-ae4e-a5861d434047",
|
|
224
|
+
"memory": "User likes coffee.",
|
|
225
|
+
"topics": ["preferences"],
|
|
226
|
+
"agent_id": None,
|
|
227
|
+
"team_id": None,
|
|
228
|
+
"user_id": "123",
|
|
229
|
+
"updated_at": "2025-09-01T07:53:17Z",
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
},
|
|
233
|
+
},
|
|
234
|
+
404: {"description": "Memory not found", "model": NotFoundResponse},
|
|
235
|
+
},
|
|
236
|
+
)
|
|
105
237
|
async def get_memory(
|
|
106
|
-
memory_id: str = Path(),
|
|
107
|
-
db_id: Optional[str] = Query(default=None, description="
|
|
238
|
+
memory_id: str = Path(description="Memory ID to retrieve"),
|
|
239
|
+
db_id: Optional[str] = Query(default=None, description="Database ID to query memory from"),
|
|
108
240
|
) -> UserMemorySchema:
|
|
109
241
|
db = get_db(dbs, db_id)
|
|
110
242
|
user_memory = db.get_user_memory(memory_id=memory_id, deserialize=False)
|
|
@@ -113,20 +245,80 @@ def attach_routes(router: APIRouter, dbs: dict[str, BaseDb]) -> APIRouter:
|
|
|
113
245
|
|
|
114
246
|
return UserMemorySchema.from_dict(user_memory) # type: ignore
|
|
115
247
|
|
|
116
|
-
@router.get(
|
|
248
|
+
@router.get(
|
|
249
|
+
"/memory_topics",
|
|
250
|
+
response_model=List[str],
|
|
251
|
+
status_code=200,
|
|
252
|
+
operation_id="get_memory_topics",
|
|
253
|
+
summary="Get Memory Topics",
|
|
254
|
+
description=(
|
|
255
|
+
"Retrieve all unique topics associated with memories in the system. "
|
|
256
|
+
"Useful for filtering and categorizing memories by topic."
|
|
257
|
+
),
|
|
258
|
+
responses={
|
|
259
|
+
200: {
|
|
260
|
+
"description": "Memory topics retrieved successfully",
|
|
261
|
+
"content": {
|
|
262
|
+
"application/json": {
|
|
263
|
+
"example": [
|
|
264
|
+
"preferences",
|
|
265
|
+
"communication_style",
|
|
266
|
+
"technical",
|
|
267
|
+
"industry",
|
|
268
|
+
"compliance",
|
|
269
|
+
"code_examples",
|
|
270
|
+
"requirements",
|
|
271
|
+
"healthcare",
|
|
272
|
+
"finance",
|
|
273
|
+
]
|
|
274
|
+
}
|
|
275
|
+
},
|
|
276
|
+
}
|
|
277
|
+
},
|
|
278
|
+
)
|
|
117
279
|
async def get_topics(
|
|
118
|
-
db_id: Optional[str] = Query(default=None, description="
|
|
280
|
+
db_id: Optional[str] = Query(default=None, description="Database ID to query topics from"),
|
|
119
281
|
) -> List[str]:
|
|
120
282
|
db = get_db(dbs, db_id)
|
|
121
283
|
return db.get_all_memory_topics()
|
|
122
284
|
|
|
123
285
|
@router.patch(
|
|
124
|
-
"/memories/{memory_id}",
|
|
286
|
+
"/memories/{memory_id}",
|
|
287
|
+
response_model=UserMemorySchema,
|
|
288
|
+
status_code=200,
|
|
289
|
+
operation_id="update_memory",
|
|
290
|
+
summary="Update Memory",
|
|
291
|
+
description=(
|
|
292
|
+
"Update an existing user memory's content and topics. "
|
|
293
|
+
"Replaces the entire memory content and topic list with the provided values."
|
|
294
|
+
),
|
|
295
|
+
responses={
|
|
296
|
+
200: {
|
|
297
|
+
"description": "Memory updated successfully",
|
|
298
|
+
"content": {
|
|
299
|
+
"application/json": {
|
|
300
|
+
"example": {
|
|
301
|
+
"memory_id": "f9361a69-2997-40c7-ae4e-a5861d434047",
|
|
302
|
+
"memory": "User likes coffee.",
|
|
303
|
+
"topics": ["preferences"],
|
|
304
|
+
"agent_id": None,
|
|
305
|
+
"team_id": None,
|
|
306
|
+
"user_id": "123",
|
|
307
|
+
"updated_at": "2025-09-01T07:53:17Z",
|
|
308
|
+
}
|
|
309
|
+
}
|
|
310
|
+
},
|
|
311
|
+
},
|
|
312
|
+
400: {"description": "Invalid request data", "model": BadRequestResponse},
|
|
313
|
+
404: {"description": "Memory not found", "model": NotFoundResponse},
|
|
314
|
+
422: {"description": "Validation error in payload", "model": ValidationErrorResponse},
|
|
315
|
+
500: {"description": "Failed to update memory", "model": InternalServerErrorResponse},
|
|
316
|
+
},
|
|
125
317
|
)
|
|
126
318
|
async def update_memory(
|
|
127
319
|
payload: UserMemoryCreateSchema,
|
|
128
|
-
memory_id: str = Path(),
|
|
129
|
-
db_id: Optional[str] = Query(default=None, description="
|
|
320
|
+
memory_id: str = Path(description="Memory ID to update"),
|
|
321
|
+
db_id: Optional[str] = Query(default=None, description="Database ID to use for update"),
|
|
130
322
|
) -> UserMemorySchema:
|
|
131
323
|
db = get_db(dbs, db_id)
|
|
132
324
|
user_memory = db.upsert_user_memory(
|
|
@@ -148,11 +340,35 @@ def attach_routes(router: APIRouter, dbs: dict[str, BaseDb]) -> APIRouter:
|
|
|
148
340
|
response_model=PaginatedResponse[UserStatsSchema],
|
|
149
341
|
status_code=200,
|
|
150
342
|
operation_id="get_user_memory_stats",
|
|
343
|
+
summary="Get User Memory Statistics",
|
|
344
|
+
description=(
|
|
345
|
+
"Retrieve paginated statistics about memory usage by user. "
|
|
346
|
+
"Provides insights into user engagement and memory distribution across users."
|
|
347
|
+
),
|
|
348
|
+
responses={
|
|
349
|
+
200: {
|
|
350
|
+
"description": "User memory statistics retrieved successfully",
|
|
351
|
+
"content": {
|
|
352
|
+
"application/json": {
|
|
353
|
+
"example": {
|
|
354
|
+
"data": [
|
|
355
|
+
{
|
|
356
|
+
"user_id": "123",
|
|
357
|
+
"total_memories": 3,
|
|
358
|
+
"last_memory_updated_at": "2025-09-01T07:53:17Z",
|
|
359
|
+
}
|
|
360
|
+
]
|
|
361
|
+
}
|
|
362
|
+
}
|
|
363
|
+
},
|
|
364
|
+
},
|
|
365
|
+
500: {"description": "Failed to retrieve user statistics", "model": InternalServerErrorResponse},
|
|
366
|
+
},
|
|
151
367
|
)
|
|
152
368
|
async def get_user_memory_stats(
|
|
153
|
-
limit: Optional[int] = Query(default=20, description="Number of
|
|
154
|
-
page: Optional[int] = Query(default=1, description="Page number"),
|
|
155
|
-
db_id: Optional[str] = Query(default=None, description="
|
|
369
|
+
limit: Optional[int] = Query(default=20, description="Number of user statistics to return per page"),
|
|
370
|
+
page: Optional[int] = Query(default=1, description="Page number for pagination"),
|
|
371
|
+
db_id: Optional[str] = Query(default=None, description="Database ID to query statistics from"),
|
|
156
372
|
) -> PaginatedResponse[UserStatsSchema]:
|
|
157
373
|
db = get_db(dbs, db_id)
|
|
158
374
|
try:
|
|
@@ -176,8 +392,14 @@ def attach_routes(router: APIRouter, dbs: dict[str, BaseDb]) -> APIRouter:
|
|
|
176
392
|
return router
|
|
177
393
|
|
|
178
394
|
|
|
179
|
-
def parse_topics(
|
|
180
|
-
|
|
395
|
+
def parse_topics(
|
|
396
|
+
topics: Optional[List[str]] = Query(
|
|
397
|
+
default=None,
|
|
398
|
+
description="Comma-separated list of topics to filter by",
|
|
399
|
+
example=["preferences,technical,communication_style"],
|
|
400
|
+
),
|
|
401
|
+
) -> Optional[List[str]]:
|
|
402
|
+
"""Parse comma-separated topics into a list for filtering memories by topic."""
|
|
181
403
|
if not topics:
|
|
182
404
|
return None
|
|
183
405
|
|
|
@@ -185,4 +407,4 @@ def parse_topics(topics: Optional[List[str]] = Query(default=None)) -> Optional[
|
|
|
185
407
|
return [topic.strip() for topic in topics[0].split(",") if topic.strip()]
|
|
186
408
|
|
|
187
409
|
except Exception as e:
|
|
188
|
-
raise HTTPException(status_code=422, detail=f"Invalid topics: {e}")
|
|
410
|
+
raise HTTPException(status_code=422, detail=f"Invalid topics format: {e}")
|
|
@@ -8,6 +8,13 @@ from fastapi.routing import APIRouter
|
|
|
8
8
|
from agno.db.base import BaseDb
|
|
9
9
|
from agno.os.auth import get_authentication_dependency
|
|
10
10
|
from agno.os.routers.metrics.schemas import DayAggregatedMetrics, MetricsResponse
|
|
11
|
+
from agno.os.schema import (
|
|
12
|
+
BadRequestResponse,
|
|
13
|
+
InternalServerErrorResponse,
|
|
14
|
+
NotFoundResponse,
|
|
15
|
+
UnauthenticatedResponse,
|
|
16
|
+
ValidationErrorResponse,
|
|
17
|
+
)
|
|
11
18
|
from agno.os.settings import AgnoAPISettings
|
|
12
19
|
from agno.os.utils import get_db
|
|
13
20
|
|
|
@@ -15,16 +22,81 @@ logger = logging.getLogger(__name__)
|
|
|
15
22
|
|
|
16
23
|
|
|
17
24
|
def get_metrics_router(dbs: dict[str, BaseDb], settings: AgnoAPISettings = AgnoAPISettings(), **kwargs) -> APIRouter:
|
|
18
|
-
router
|
|
25
|
+
"""Create metrics router with comprehensive OpenAPI documentation for system metrics and analytics endpoints."""
|
|
26
|
+
router = APIRouter(
|
|
27
|
+
dependencies=[Depends(get_authentication_dependency(settings))],
|
|
28
|
+
tags=["Metrics"],
|
|
29
|
+
responses={
|
|
30
|
+
400: {"description": "Bad Request", "model": BadRequestResponse},
|
|
31
|
+
401: {"description": "Unauthorized", "model": UnauthenticatedResponse},
|
|
32
|
+
404: {"description": "Not Found", "model": NotFoundResponse},
|
|
33
|
+
422: {"description": "Validation Error", "model": ValidationErrorResponse},
|
|
34
|
+
500: {"description": "Internal Server Error", "model": InternalServerErrorResponse},
|
|
35
|
+
},
|
|
36
|
+
)
|
|
19
37
|
return attach_routes(router=router, dbs=dbs)
|
|
20
38
|
|
|
21
39
|
|
|
22
40
|
def attach_routes(router: APIRouter, dbs: dict[str, BaseDb]) -> APIRouter:
|
|
23
|
-
@router.get(
|
|
41
|
+
@router.get(
|
|
42
|
+
"/metrics",
|
|
43
|
+
response_model=MetricsResponse,
|
|
44
|
+
status_code=200,
|
|
45
|
+
operation_id="get_metrics",
|
|
46
|
+
summary="Get AgentOS Metrics",
|
|
47
|
+
description=(
|
|
48
|
+
"Retrieve AgentOS metrics and analytics data for a specified date range. "
|
|
49
|
+
"If no date range is specified, returns all available metrics."
|
|
50
|
+
),
|
|
51
|
+
responses={
|
|
52
|
+
200: {
|
|
53
|
+
"description": "Metrics retrieved successfully",
|
|
54
|
+
"content": {
|
|
55
|
+
"application/json": {
|
|
56
|
+
"example": {
|
|
57
|
+
"metrics": [
|
|
58
|
+
{
|
|
59
|
+
"id": "7bf39658-a00a-484c-8a28-67fd8a9ddb2a",
|
|
60
|
+
"agent_runs_count": 5,
|
|
61
|
+
"agent_sessions_count": 5,
|
|
62
|
+
"team_runs_count": 0,
|
|
63
|
+
"team_sessions_count": 0,
|
|
64
|
+
"workflow_runs_count": 0,
|
|
65
|
+
"workflow_sessions_count": 0,
|
|
66
|
+
"users_count": 1,
|
|
67
|
+
"token_metrics": {
|
|
68
|
+
"input_tokens": 448,
|
|
69
|
+
"output_tokens": 148,
|
|
70
|
+
"total_tokens": 596,
|
|
71
|
+
"audio_tokens": 0,
|
|
72
|
+
"input_audio_tokens": 0,
|
|
73
|
+
"output_audio_tokens": 0,
|
|
74
|
+
"cached_tokens": 0,
|
|
75
|
+
"cache_write_tokens": 0,
|
|
76
|
+
"reasoning_tokens": 0,
|
|
77
|
+
},
|
|
78
|
+
"model_metrics": [{"model_id": "gpt-4o", "model_provider": "OpenAI", "count": 5}],
|
|
79
|
+
"date": "2025-07-31T00:00:00",
|
|
80
|
+
"created_at": 1753993132,
|
|
81
|
+
"updated_at": 1753993741,
|
|
82
|
+
}
|
|
83
|
+
]
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
},
|
|
87
|
+
},
|
|
88
|
+
400: {"description": "Invalid date range parameters", "model": BadRequestResponse},
|
|
89
|
+
500: {"description": "Failed to retrieve metrics", "model": InternalServerErrorResponse},
|
|
90
|
+
},
|
|
91
|
+
)
|
|
24
92
|
async def get_metrics(
|
|
25
|
-
starting_date: Optional[date] = Query(
|
|
26
|
-
|
|
27
|
-
|
|
93
|
+
starting_date: Optional[date] = Query(
|
|
94
|
+
default=None, description="Starting date for metrics range (YYYY-MM-DD format)"
|
|
95
|
+
),
|
|
96
|
+
ending_date: Optional[date] = Query(
|
|
97
|
+
default=None, description="Ending date for metrics range (YYYY-MM-DD format)"
|
|
98
|
+
),
|
|
99
|
+
db_id: Optional[str] = Query(default=None, description="Database ID to query metrics from"),
|
|
28
100
|
) -> MetricsResponse:
|
|
29
101
|
try:
|
|
30
102
|
db = get_db(dbs, db_id)
|
|
@@ -41,10 +113,56 @@ def attach_routes(router: APIRouter, dbs: dict[str, BaseDb]) -> APIRouter:
|
|
|
41
113
|
raise HTTPException(status_code=500, detail=f"Error getting metrics: {str(e)}")
|
|
42
114
|
|
|
43
115
|
@router.post(
|
|
44
|
-
"/metrics/refresh",
|
|
116
|
+
"/metrics/refresh",
|
|
117
|
+
response_model=List[DayAggregatedMetrics],
|
|
118
|
+
status_code=200,
|
|
119
|
+
operation_id="refresh_metrics",
|
|
120
|
+
summary="Refresh Metrics",
|
|
121
|
+
description=(
|
|
122
|
+
"Manually trigger recalculation of system metrics from raw data. "
|
|
123
|
+
"This operation analyzes system activity logs and regenerates aggregated metrics. "
|
|
124
|
+
"Useful for ensuring metrics are up-to-date or after system maintenance."
|
|
125
|
+
),
|
|
126
|
+
responses={
|
|
127
|
+
200: {
|
|
128
|
+
"description": "Metrics refreshed successfully",
|
|
129
|
+
"content": {
|
|
130
|
+
"application/json": {
|
|
131
|
+
"example": [
|
|
132
|
+
{
|
|
133
|
+
"id": "e77c9531-818b-47a5-99cd-59fed61e5403",
|
|
134
|
+
"agent_runs_count": 2,
|
|
135
|
+
"agent_sessions_count": 2,
|
|
136
|
+
"team_runs_count": 0,
|
|
137
|
+
"team_sessions_count": 0,
|
|
138
|
+
"workflow_runs_count": 0,
|
|
139
|
+
"workflow_sessions_count": 0,
|
|
140
|
+
"users_count": 1,
|
|
141
|
+
"token_metrics": {
|
|
142
|
+
"input_tokens": 256,
|
|
143
|
+
"output_tokens": 441,
|
|
144
|
+
"total_tokens": 697,
|
|
145
|
+
"audio_total_tokens": 0,
|
|
146
|
+
"audio_input_tokens": 0,
|
|
147
|
+
"audio_output_tokens": 0,
|
|
148
|
+
"cache_read_tokens": 0,
|
|
149
|
+
"cache_write_tokens": 0,
|
|
150
|
+
"reasoning_tokens": 0,
|
|
151
|
+
},
|
|
152
|
+
"model_metrics": [{"model_id": "gpt-4o", "model_provider": "OpenAI", "count": 2}],
|
|
153
|
+
"date": "2025-08-12T00:00:00",
|
|
154
|
+
"created_at": 1755016907,
|
|
155
|
+
"updated_at": 1755016907,
|
|
156
|
+
}
|
|
157
|
+
]
|
|
158
|
+
}
|
|
159
|
+
},
|
|
160
|
+
},
|
|
161
|
+
500: {"description": "Failed to refresh metrics", "model": InternalServerErrorResponse},
|
|
162
|
+
},
|
|
45
163
|
)
|
|
46
164
|
async def calculate_metrics(
|
|
47
|
-
db_id: Optional[str] = Query(default=None, description="
|
|
165
|
+
db_id: Optional[str] = Query(default=None, description="Database ID to use for metrics calculation"),
|
|
48
166
|
) -> List[DayAggregatedMetrics]:
|
|
49
167
|
try:
|
|
50
168
|
db = get_db(dbs, db_id)
|