agno 2.3.3__py3-none-any.whl → 2.3.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. agno/agent/agent.py +177 -41
  2. agno/culture/manager.py +2 -2
  3. agno/db/base.py +330 -8
  4. agno/db/dynamo/dynamo.py +722 -2
  5. agno/db/dynamo/schemas.py +127 -0
  6. agno/db/firestore/firestore.py +573 -1
  7. agno/db/firestore/schemas.py +40 -0
  8. agno/db/gcs_json/gcs_json_db.py +446 -1
  9. agno/db/in_memory/in_memory_db.py +143 -1
  10. agno/db/json/json_db.py +438 -1
  11. agno/db/mongo/async_mongo.py +522 -0
  12. agno/db/mongo/mongo.py +523 -1
  13. agno/db/mongo/schemas.py +29 -0
  14. agno/db/mysql/mysql.py +536 -3
  15. agno/db/mysql/schemas.py +38 -0
  16. agno/db/postgres/async_postgres.py +546 -14
  17. agno/db/postgres/postgres.py +535 -2
  18. agno/db/postgres/schemas.py +38 -0
  19. agno/db/redis/redis.py +468 -1
  20. agno/db/redis/schemas.py +32 -0
  21. agno/db/singlestore/schemas.py +38 -0
  22. agno/db/singlestore/singlestore.py +523 -1
  23. agno/db/sqlite/async_sqlite.py +548 -9
  24. agno/db/sqlite/schemas.py +38 -0
  25. agno/db/sqlite/sqlite.py +537 -5
  26. agno/db/sqlite/utils.py +6 -8
  27. agno/db/surrealdb/models.py +25 -0
  28. agno/db/surrealdb/surrealdb.py +548 -1
  29. agno/eval/accuracy.py +10 -4
  30. agno/eval/performance.py +10 -4
  31. agno/eval/reliability.py +22 -13
  32. agno/exceptions.py +11 -0
  33. agno/hooks/__init__.py +3 -0
  34. agno/hooks/decorator.py +164 -0
  35. agno/knowledge/chunking/semantic.py +2 -2
  36. agno/models/aimlapi/aimlapi.py +17 -0
  37. agno/models/anthropic/claude.py +19 -12
  38. agno/models/aws/bedrock.py +3 -4
  39. agno/models/aws/claude.py +5 -1
  40. agno/models/azure/ai_foundry.py +2 -2
  41. agno/models/azure/openai_chat.py +8 -0
  42. agno/models/cerebras/cerebras.py +61 -4
  43. agno/models/cerebras/cerebras_openai.py +17 -0
  44. agno/models/cohere/chat.py +5 -1
  45. agno/models/cometapi/cometapi.py +18 -1
  46. agno/models/dashscope/dashscope.py +2 -3
  47. agno/models/deepinfra/deepinfra.py +18 -1
  48. agno/models/deepseek/deepseek.py +2 -3
  49. agno/models/fireworks/fireworks.py +18 -1
  50. agno/models/google/gemini.py +8 -2
  51. agno/models/groq/groq.py +5 -2
  52. agno/models/internlm/internlm.py +18 -1
  53. agno/models/langdb/langdb.py +13 -1
  54. agno/models/litellm/chat.py +2 -2
  55. agno/models/litellm/litellm_openai.py +18 -1
  56. agno/models/meta/llama_openai.py +19 -2
  57. agno/models/nebius/nebius.py +2 -3
  58. agno/models/nvidia/nvidia.py +20 -3
  59. agno/models/openai/chat.py +17 -2
  60. agno/models/openai/responses.py +17 -2
  61. agno/models/openrouter/openrouter.py +21 -2
  62. agno/models/perplexity/perplexity.py +17 -1
  63. agno/models/portkey/portkey.py +7 -6
  64. agno/models/requesty/requesty.py +19 -2
  65. agno/models/response.py +2 -1
  66. agno/models/sambanova/sambanova.py +20 -3
  67. agno/models/siliconflow/siliconflow.py +19 -2
  68. agno/models/together/together.py +20 -3
  69. agno/models/vercel/v0.py +20 -3
  70. agno/models/vllm/vllm.py +19 -14
  71. agno/models/xai/xai.py +19 -2
  72. agno/os/app.py +104 -0
  73. agno/os/config.py +13 -0
  74. agno/os/interfaces/whatsapp/router.py +0 -1
  75. agno/os/mcp.py +1 -0
  76. agno/os/router.py +31 -0
  77. agno/os/routers/traces/__init__.py +3 -0
  78. agno/os/routers/traces/schemas.py +414 -0
  79. agno/os/routers/traces/traces.py +499 -0
  80. agno/os/schema.py +22 -1
  81. agno/os/utils.py +57 -0
  82. agno/run/agent.py +1 -0
  83. agno/run/base.py +17 -0
  84. agno/run/team.py +4 -0
  85. agno/session/team.py +1 -0
  86. agno/table.py +10 -0
  87. agno/team/team.py +215 -65
  88. agno/tools/function.py +10 -8
  89. agno/tools/nano_banana.py +1 -1
  90. agno/tracing/__init__.py +12 -0
  91. agno/tracing/exporter.py +157 -0
  92. agno/tracing/schemas.py +276 -0
  93. agno/tracing/setup.py +111 -0
  94. agno/utils/agent.py +4 -4
  95. agno/utils/hooks.py +56 -1
  96. agno/vectordb/qdrant/qdrant.py +22 -22
  97. agno/workflow/condition.py +8 -0
  98. agno/workflow/loop.py +8 -0
  99. agno/workflow/parallel.py +8 -0
  100. agno/workflow/router.py +8 -0
  101. agno/workflow/step.py +20 -0
  102. agno/workflow/steps.py +8 -0
  103. agno/workflow/workflow.py +83 -17
  104. {agno-2.3.3.dist-info → agno-2.3.5.dist-info}/METADATA +2 -2
  105. {agno-2.3.3.dist-info → agno-2.3.5.dist-info}/RECORD +108 -98
  106. {agno-2.3.3.dist-info → agno-2.3.5.dist-info}/WHEEL +0 -0
  107. {agno-2.3.3.dist-info → agno-2.3.5.dist-info}/licenses/LICENSE +0 -0
  108. {agno-2.3.3.dist-info → agno-2.3.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,499 @@
1
+ import logging
2
+ from typing import Optional, Union
3
+
4
+ from fastapi import Depends, HTTPException, Query
5
+ from fastapi.routing import APIRouter
6
+
7
+ from agno.db.base import AsyncBaseDb, BaseDb
8
+ from agno.os.auth import get_authentication_dependency
9
+ from agno.os.routers.traces.schemas import (
10
+ TraceDetail,
11
+ TraceNode,
12
+ TraceSessionStats,
13
+ TraceSummary,
14
+ )
15
+ from agno.os.schema import (
16
+ BadRequestResponse,
17
+ InternalServerErrorResponse,
18
+ NotFoundResponse,
19
+ PaginatedResponse,
20
+ PaginationInfo,
21
+ UnauthenticatedResponse,
22
+ ValidationErrorResponse,
23
+ )
24
+ from agno.os.settings import AgnoAPISettings
25
+ from agno.os.utils import get_db, parse_datetime_to_utc
26
+ from agno.utils.log import log_error
27
+
28
+ logger = logging.getLogger(__name__)
29
+
30
+
31
+ def get_traces_router(
32
+ dbs: dict[str, list[Union[BaseDb, AsyncBaseDb]]], settings: AgnoAPISettings = AgnoAPISettings(), **kwargs
33
+ ) -> APIRouter:
34
+ """Create traces router with comprehensive OpenAPI documentation for trace endpoints."""
35
+ router = APIRouter(
36
+ dependencies=[Depends(get_authentication_dependency(settings))],
37
+ tags=["Traces"],
38
+ responses={
39
+ 400: {"description": "Bad Request", "model": BadRequestResponse},
40
+ 401: {"description": "Unauthorized", "model": UnauthenticatedResponse},
41
+ 404: {"description": "Not Found", "model": NotFoundResponse},
42
+ 422: {"description": "Validation Error", "model": ValidationErrorResponse},
43
+ 500: {"description": "Internal Server Error", "model": InternalServerErrorResponse},
44
+ },
45
+ )
46
+ return attach_routes(router=router, dbs=dbs)
47
+
48
+
49
+ def attach_routes(router: APIRouter, dbs: dict[str, list[Union[BaseDb, AsyncBaseDb]]]) -> APIRouter:
50
+ @router.get(
51
+ "/traces",
52
+ response_model=PaginatedResponse[TraceSummary],
53
+ response_model_exclude_none=True,
54
+ tags=["Traces"],
55
+ operation_id="get_traces",
56
+ summary="List Traces",
57
+ description=(
58
+ "Retrieve a paginated list of execution traces with optional filtering.\n\n"
59
+ "**Traces provide observability into:**\n"
60
+ "- Agent execution flows\n"
61
+ "- Model invocations and token usage\n"
62
+ "- Tool calls and their results\n"
63
+ "- Errors and performance bottlenecks\n\n"
64
+ "**Filtering Options:**\n"
65
+ "- By run, session, user, or agent ID\n"
66
+ "- By status (OK, ERROR)\n"
67
+ "- By time range\n\n"
68
+ "**Pagination:**\n"
69
+ "- Use `page` (1-indexed) and `limit` parameters\n"
70
+ "- Response includes pagination metadata (total_pages, total_count, etc.)\n\n"
71
+ "**Response Format:**\n"
72
+ "Returns summary information for each trace. Use GET `/traces/{trace_id}` for detailed hierarchy."
73
+ ),
74
+ responses={
75
+ 200: {
76
+ "description": "List of traces retrieved successfully",
77
+ "content": {
78
+ "application/json": {
79
+ "example": {
80
+ "data": [
81
+ {
82
+ "trace_id": "a1b2c3d4",
83
+ "name": "Stock_Price_Agent.run",
84
+ "status": "OK",
85
+ "duration": "1.2s",
86
+ "start_time": "2025-11-19T10:30:00.000000+00:00",
87
+ "total_spans": 4,
88
+ "error_count": 0,
89
+ "input": "What is the stock price of NVDA?",
90
+ "run_id": "run123",
91
+ "session_id": "session456",
92
+ "user_id": "user789",
93
+ "agent_id": "agent_stock",
94
+ "team_id": None,
95
+ "workflow_id": None,
96
+ "created_at": "2025-11-19T10:30:00+00:00",
97
+ }
98
+ ],
99
+ "meta": {
100
+ "page": 1,
101
+ "limit": 20,
102
+ "total_pages": 5,
103
+ "total_count": 95,
104
+ },
105
+ }
106
+ }
107
+ },
108
+ }
109
+ },
110
+ )
111
+ async def get_traces(
112
+ run_id: Optional[str] = Query(default=None, description="Filter by run ID"),
113
+ session_id: Optional[str] = Query(default=None, description="Filter by session ID"),
114
+ user_id: Optional[str] = Query(default=None, description="Filter by user ID"),
115
+ agent_id: Optional[str] = Query(default=None, description="Filter by agent ID"),
116
+ team_id: Optional[str] = Query(default=None, description="Filter by team ID"),
117
+ workflow_id: Optional[str] = Query(default=None, description="Filter by workflow ID"),
118
+ status: Optional[str] = Query(default=None, description="Filter by status (OK, ERROR)"),
119
+ start_time: Optional[str] = Query(
120
+ default=None,
121
+ description="Filter traces starting after this time (ISO 8601 format with timezone, e.g., '2025-11-19T10:00:00Z' or '2025-11-19T15:30:00+05:30'). Times are converted to UTC for comparison.",
122
+ ),
123
+ end_time: Optional[str] = Query(
124
+ default=None,
125
+ description="Filter traces ending before this time (ISO 8601 format with timezone, e.g., '2025-11-19T11:00:00Z' or '2025-11-19T16:30:00+05:30'). Times are converted to UTC for comparison.",
126
+ ),
127
+ page: int = Query(default=1, description="Page number (1-indexed)", ge=1),
128
+ limit: int = Query(default=20, description="Number of traces per page", ge=1, le=100),
129
+ db_id: Optional[str] = Query(default=None, description="Database ID to query traces from"),
130
+ ):
131
+ """Get list of traces with optional filters and pagination"""
132
+ import time as time_module
133
+
134
+ # Get database using db_id or default to first available
135
+ db = await get_db(dbs, db_id)
136
+
137
+ try:
138
+ start_time_ms = time_module.time() * 1000
139
+
140
+ # Convert ISO datetime strings to UTC datetime objects
141
+ start_time_dt = parse_datetime_to_utc(start_time, "start_time") if start_time else None
142
+ end_time_dt = parse_datetime_to_utc(end_time, "end_time") if end_time else None
143
+
144
+ if isinstance(db, AsyncBaseDb):
145
+ traces, total_count = await db.get_traces(
146
+ run_id=run_id,
147
+ session_id=session_id,
148
+ user_id=user_id,
149
+ agent_id=agent_id,
150
+ team_id=team_id,
151
+ workflow_id=workflow_id,
152
+ status=status,
153
+ start_time=start_time_dt,
154
+ end_time=end_time_dt,
155
+ limit=limit,
156
+ page=page,
157
+ )
158
+ else:
159
+ traces, total_count = db.get_traces(
160
+ run_id=run_id,
161
+ session_id=session_id,
162
+ user_id=user_id,
163
+ agent_id=agent_id,
164
+ team_id=team_id,
165
+ workflow_id=workflow_id,
166
+ status=status,
167
+ start_time=start_time_dt,
168
+ end_time=end_time_dt,
169
+ limit=limit,
170
+ page=page,
171
+ )
172
+
173
+ end_time_ms = time_module.time() * 1000
174
+ search_time_ms = round(end_time_ms - start_time_ms, 2)
175
+
176
+ # Calculate total pages
177
+ total_pages = (total_count + limit - 1) // limit if limit > 0 else 0
178
+
179
+ trace_inputs = {}
180
+ for trace in traces:
181
+ if isinstance(db, AsyncBaseDb):
182
+ spans = await db.get_spans(trace_id=trace.trace_id)
183
+ else:
184
+ spans = db.get_spans(trace_id=trace.trace_id)
185
+
186
+ # Find root span and extract input
187
+ root_span = next((s for s in spans if not s.parent_span_id), None)
188
+ if root_span and hasattr(root_span, "attributes"):
189
+ trace_inputs[trace.trace_id] = root_span.attributes.get("input.value")
190
+
191
+ # Build response
192
+ trace_summaries = [
193
+ TraceSummary.from_trace(trace, input=trace_inputs.get(trace.trace_id)) for trace in traces
194
+ ]
195
+
196
+ return PaginatedResponse(
197
+ data=trace_summaries,
198
+ meta=PaginationInfo(
199
+ page=page,
200
+ limit=limit,
201
+ total_pages=total_pages,
202
+ total_count=total_count,
203
+ search_time_ms=search_time_ms,
204
+ ),
205
+ )
206
+
207
+ except Exception as e:
208
+ log_error(f"Error retrieving traces: {e}")
209
+ raise HTTPException(status_code=500, detail=f"Error retrieving traces: {str(e)}")
210
+
211
+ @router.get(
212
+ "/traces/{trace_id}",
213
+ response_model=Union[TraceDetail, TraceNode],
214
+ response_model_exclude_none=True,
215
+ tags=["Traces"],
216
+ operation_id="get_trace",
217
+ summary="Get Trace or Span Detail",
218
+ description=(
219
+ "Retrieve detailed trace information with hierarchical span tree, or a specific span within the trace.\n\n"
220
+ "**Without span_id parameter:**\n"
221
+ "Returns the full trace with hierarchical span tree:\n"
222
+ "- Trace metadata (ID, status, duration, context)\n"
223
+ "- Hierarchical tree of all spans\n"
224
+ "- Each span includes timing, status, and type-specific metadata\n\n"
225
+ "**With span_id parameter:**\n"
226
+ "Returns details for a specific span within the trace:\n"
227
+ "- Span metadata (ID, name, type, timing)\n"
228
+ "- Status and error information\n"
229
+ "- Type-specific attributes (model, tokens, tool params, etc.)\n\n"
230
+ "**Span Hierarchy (full trace):**\n"
231
+ "The `tree` field contains root spans, each with potential `children`.\n"
232
+ "This recursive structure represents the execution flow:\n"
233
+ "```\n"
234
+ "Agent.run (root)\n"
235
+ " ├─ LLM.invoke\n"
236
+ " ├─ Tool.execute\n"
237
+ " │ └─ LLM.invoke (nested)\n"
238
+ " └─ LLM.invoke\n"
239
+ "```\n\n"
240
+ "**Span Types:**\n"
241
+ "- `AGENT`: Agent execution with input/output\n"
242
+ "- `LLM`: Model invocations with tokens and prompts\n"
243
+ "- `TOOL`: Tool calls with parameters and results"
244
+ ),
245
+ responses={
246
+ 200: {
247
+ "description": "Trace or span detail retrieved successfully",
248
+ "content": {
249
+ "application/json": {
250
+ "examples": {
251
+ "full_trace": {
252
+ "summary": "Full trace with hierarchy (no span_id)",
253
+ "value": {
254
+ "trace_id": "a1b2c3d4",
255
+ "name": "Stock_Price_Agent.run",
256
+ "status": "OK",
257
+ "duration": "1.2s",
258
+ "start_time": "2025-11-19T10:30:00.000000+00:00",
259
+ "end_time": "2025-11-19T10:30:01.200000+00:00",
260
+ "total_spans": 4,
261
+ "error_count": 0,
262
+ "input": "What is Tesla stock price?",
263
+ "output": "The current price of Tesla (TSLA) is $245.67.",
264
+ "error": None,
265
+ "run_id": "run123",
266
+ "session_id": "session456",
267
+ "user_id": "user789",
268
+ "agent_id": "stock_agent",
269
+ "team_id": None,
270
+ "workflow_id": None,
271
+ "created_at": "2025-11-19T10:30:00+00:00",
272
+ "tree": [
273
+ {
274
+ "id": "span1",
275
+ "name": "Stock_Price_Agent.run",
276
+ "type": "AGENT",
277
+ "duration": "1.2s",
278
+ "status": "OK",
279
+ "input": None,
280
+ "output": None,
281
+ "error": None,
282
+ "spans": [],
283
+ }
284
+ ],
285
+ },
286
+ },
287
+ "single_span": {
288
+ "summary": "Single span detail (with span_id)",
289
+ "value": {
290
+ "id": "span2",
291
+ "name": "gpt-4o-mini.invoke",
292
+ "type": "LLM",
293
+ "duration": "800ms",
294
+ "status": "OK",
295
+ "metadata": {"model": "gpt-4o-mini", "input_tokens": 120},
296
+ },
297
+ },
298
+ }
299
+ }
300
+ },
301
+ },
302
+ 404: {"description": "Trace or span not found", "model": NotFoundResponse},
303
+ },
304
+ )
305
+ async def get_trace(
306
+ trace_id: str,
307
+ span_id: Optional[str] = Query(default=None, description="Optional: Span ID to retrieve specific span"),
308
+ run_id: Optional[str] = Query(default=None, description="Optional: Run ID to retrieve trace for"),
309
+ db_id: Optional[str] = Query(default=None, description="Database ID to query trace from"),
310
+ ):
311
+ """Get detailed trace with hierarchical span tree, or a specific span within the trace"""
312
+ # Get database using db_id or default to first available
313
+ db = await get_db(dbs, db_id)
314
+
315
+ try:
316
+ # If span_id is provided, return just that span
317
+ if span_id:
318
+ if isinstance(db, AsyncBaseDb):
319
+ span = await db.get_span(span_id)
320
+ else:
321
+ span = db.get_span(span_id)
322
+
323
+ if span is None:
324
+ raise HTTPException(status_code=404, detail="Span not found")
325
+
326
+ # Verify the span belongs to the requested trace
327
+ if span.trace_id != trace_id:
328
+ raise HTTPException(status_code=404, detail=f"Span {span_id} does not belong to trace {trace_id}")
329
+
330
+ # Convert to TraceNode (without children since we're fetching a single span)
331
+ return TraceNode.from_span(span, spans=None)
332
+
333
+ # Otherwise, return full trace with hierarchy
334
+ # Get trace
335
+ if isinstance(db, AsyncBaseDb):
336
+ trace = await db.get_trace(trace_id=trace_id, run_id=run_id)
337
+ else:
338
+ trace = db.get_trace(trace_id=trace_id, run_id=run_id)
339
+
340
+ if trace is None:
341
+ raise HTTPException(status_code=404, detail="Trace not found")
342
+
343
+ # Get all spans for this trace
344
+ if isinstance(db, AsyncBaseDb):
345
+ spans = await db.get_spans(trace_id=trace_id)
346
+ else:
347
+ spans = db.get_spans(trace_id=trace_id)
348
+
349
+ # Build hierarchical response
350
+ return TraceDetail.from_trace_and_spans(trace, spans)
351
+
352
+ except HTTPException:
353
+ raise
354
+ except Exception as e:
355
+ log_error(f"Error retrieving trace {trace_id}: {e}")
356
+ raise HTTPException(status_code=500, detail=f"Error retrieving trace: {str(e)}")
357
+
358
+ @router.get(
359
+ "/trace_session_stats",
360
+ response_model=PaginatedResponse[TraceSessionStats],
361
+ response_model_exclude_none=True,
362
+ tags=["Traces"],
363
+ operation_id="get_trace_stats",
364
+ summary="Get Trace Statistics by Session",
365
+ description=(
366
+ "Retrieve aggregated trace statistics grouped by session ID with pagination.\n\n"
367
+ "**Provides insights into:**\n"
368
+ "- Total traces per session\n"
369
+ "- First and last trace timestamps per session\n"
370
+ "- Associated user and agent information\n\n"
371
+ "**Filtering Options:**\n"
372
+ "- By user ID\n"
373
+ "- By agent ID\n\n"
374
+ "**Use Cases:**\n"
375
+ "- Monitor session-level activity\n"
376
+ "- Track conversation flows\n"
377
+ "- Identify high-activity sessions\n"
378
+ "- Analyze user engagement patterns"
379
+ ),
380
+ responses={
381
+ 200: {
382
+ "description": "Trace statistics retrieved successfully",
383
+ "content": {
384
+ "application/json": {
385
+ "example": {
386
+ "data": [
387
+ {
388
+ "session_id": "37029bc6-1794-4ba8-a629-1efedc53dcad",
389
+ "user_id": "kaustubh@agno.com",
390
+ "agent_id": "hackernews-agent",
391
+ "team_id": None,
392
+ "total_traces": 5,
393
+ "first_trace_at": "2025-11-19T10:15:16+00:00",
394
+ "last_trace_at": "2025-11-19T10:21:30+00:00",
395
+ }
396
+ ],
397
+ "meta": {
398
+ "page": 1,
399
+ "limit": 20,
400
+ "total_pages": 3,
401
+ "total_count": 45,
402
+ },
403
+ }
404
+ }
405
+ },
406
+ },
407
+ 500: {"description": "Failed to retrieve statistics", "model": InternalServerErrorResponse},
408
+ },
409
+ )
410
+ async def get_trace_stats(
411
+ user_id: Optional[str] = Query(default=None, description="Filter by user ID"),
412
+ agent_id: Optional[str] = Query(default=None, description="Filter by agent ID"),
413
+ team_id: Optional[str] = Query(default=None, description="Filter by team ID"),
414
+ workflow_id: Optional[str] = Query(default=None, description="Filter by workflow ID"),
415
+ start_time: Optional[str] = Query(
416
+ default=None,
417
+ description="Filter sessions with traces created after this time (ISO 8601 format with timezone, e.g., '2025-11-19T10:00:00Z' or '2025-11-19T15:30:00+05:30'). Times are converted to UTC for comparison.",
418
+ ),
419
+ end_time: Optional[str] = Query(
420
+ default=None,
421
+ description="Filter sessions with traces created before this time (ISO 8601 format with timezone, e.g., '2025-11-19T11:00:00Z' or '2025-11-19T16:30:00+05:30'). Times are converted to UTC for comparison.",
422
+ ),
423
+ page: int = Query(default=1, description="Page number (1-indexed)", ge=1),
424
+ limit: int = Query(default=20, description="Number of sessions per page", ge=1, le=100),
425
+ db_id: Optional[str] = Query(default=None, description="Database ID to query statistics from"),
426
+ ):
427
+ """Get trace statistics grouped by session"""
428
+ import time as time_module
429
+
430
+ # Get database using db_id or default to first available
431
+ db = await get_db(dbs, db_id)
432
+
433
+ try:
434
+ start_time_ms = time_module.time() * 1000
435
+
436
+ # Convert ISO datetime strings to UTC datetime objects
437
+ start_time_dt = parse_datetime_to_utc(start_time, "start_time") if start_time else None
438
+ end_time_dt = parse_datetime_to_utc(end_time, "end_time") if end_time else None
439
+
440
+ if isinstance(db, AsyncBaseDb):
441
+ stats_list, total_count = await db.get_trace_stats(
442
+ user_id=user_id,
443
+ agent_id=agent_id,
444
+ team_id=team_id,
445
+ workflow_id=workflow_id,
446
+ start_time=start_time_dt,
447
+ end_time=end_time_dt,
448
+ limit=limit,
449
+ page=page,
450
+ )
451
+ else:
452
+ stats_list, total_count = db.get_trace_stats(
453
+ user_id=user_id,
454
+ agent_id=agent_id,
455
+ team_id=team_id,
456
+ workflow_id=workflow_id,
457
+ start_time=start_time_dt,
458
+ end_time=end_time_dt,
459
+ limit=limit,
460
+ page=page,
461
+ )
462
+
463
+ end_time_ms = time_module.time() * 1000
464
+ search_time_ms = round(end_time_ms - start_time_ms, 2)
465
+
466
+ # Calculate total pages
467
+ total_pages = (total_count + limit - 1) // limit if limit > 0 else 0
468
+
469
+ # Convert stats to response models (Pydantic auto-serializes datetime to ISO 8601)
470
+ stats_response = [
471
+ TraceSessionStats(
472
+ session_id=stat["session_id"],
473
+ user_id=stat.get("user_id"),
474
+ agent_id=stat.get("agent_id"),
475
+ team_id=stat.get("team_id"),
476
+ workflow_id=stat.get("workflow_id"),
477
+ total_traces=stat["total_traces"],
478
+ first_trace_at=stat["first_trace_at"],
479
+ last_trace_at=stat["last_trace_at"],
480
+ )
481
+ for stat in stats_list
482
+ ]
483
+
484
+ return PaginatedResponse(
485
+ data=stats_response,
486
+ meta=PaginationInfo(
487
+ page=page,
488
+ limit=limit,
489
+ total_pages=total_pages,
490
+ total_count=total_count,
491
+ search_time_ms=search_time_ms,
492
+ ),
493
+ )
494
+
495
+ except Exception as e:
496
+ log_error(f"Error retrieving trace statistics: {e}")
497
+ raise HTTPException(status_code=500, detail=f"Error retrieving statistics: {str(e)}")
498
+
499
+ return router
agno/os/schema.py CHANGED
@@ -8,7 +8,15 @@ from pydantic import BaseModel, ConfigDict, Field
8
8
  from agno.agent import Agent
9
9
  from agno.db.base import SessionType
10
10
  from agno.models.message import Message
11
- from agno.os.config import ChatConfig, EvalsConfig, KnowledgeConfig, MemoryConfig, MetricsConfig, SessionConfig
11
+ from agno.os.config import (
12
+ ChatConfig,
13
+ EvalsConfig,
14
+ KnowledgeConfig,
15
+ MemoryConfig,
16
+ MetricsConfig,
17
+ SessionConfig,
18
+ TracesConfig,
19
+ )
12
20
  from agno.os.utils import (
13
21
  extract_input_media,
14
22
  format_team_tools,
@@ -152,6 +160,7 @@ class ConfigResponse(BaseModel):
152
160
  memory: Optional[MemoryConfig] = Field(None, description="Memory configuration")
153
161
  knowledge: Optional[KnowledgeConfig] = Field(None, description="Knowledge configuration")
154
162
  evals: Optional[EvalsConfig] = Field(None, description="Evaluations configuration")
163
+ traces: Optional[TracesConfig] = Field(None, description="Traces configuration")
155
164
 
156
165
  agents: List[AgentSummaryResponse] = Field(..., description="List of registered agents")
157
166
  teams: List[TeamSummaryResponse] = Field(..., description="List of registered teams")
@@ -898,6 +907,9 @@ class RunSchema(BaseModel):
898
907
  events: Optional[List[dict]] = Field(None, description="Events generated during the run")
899
908
  created_at: Optional[datetime] = Field(None, description="Run creation timestamp")
900
909
  references: Optional[List[dict]] = Field(None, description="References cited in the run")
910
+ citations: Optional[Dict[str, Any]] = Field(
911
+ None, description="Citations from the model (e.g., from Gemini grounding/search)"
912
+ )
901
913
  reasoning_messages: Optional[List[dict]] = Field(None, description="Reasoning process messages")
902
914
  session_state: Optional[dict] = Field(None, description="Session state at the end of the run")
903
915
  images: Optional[List[dict]] = Field(None, description="Images included in the run")
@@ -926,6 +938,7 @@ class RunSchema(BaseModel):
926
938
  tools=[tool for tool in run_dict.get("tools", [])] if run_dict.get("tools") else None,
927
939
  events=[event for event in run_dict["events"]] if run_dict.get("events") else None,
928
940
  references=run_dict.get("references", []),
941
+ citations=run_dict.get("citations", None),
929
942
  reasoning_messages=run_dict.get("reasoning_messages", []),
930
943
  session_state=run_dict.get("session_state"),
931
944
  images=run_dict.get("images", []),
@@ -955,6 +968,9 @@ class TeamRunSchema(BaseModel):
955
968
  events: Optional[List[dict]] = Field(None, description="Events generated during the run")
956
969
  created_at: Optional[datetime] = Field(None, description="Run creation timestamp")
957
970
  references: Optional[List[dict]] = Field(None, description="References cited in the run")
971
+ citations: Optional[Dict[str, Any]] = Field(
972
+ None, description="Citations from the model (e.g., from Gemini grounding/search)"
973
+ )
958
974
  reasoning_messages: Optional[List[dict]] = Field(None, description="Reasoning process messages")
959
975
  session_state: Optional[dict] = Field(None, description="Session state at the end of the run")
960
976
  input_media: Optional[Dict[str, Any]] = Field(None, description="Input media attachments")
@@ -985,6 +1001,7 @@ class TeamRunSchema(BaseModel):
985
1001
  if run_dict.get("created_at") is not None
986
1002
  else None,
987
1003
  references=run_dict.get("references", []),
1004
+ citations=run_dict.get("citations", None),
988
1005
  reasoning_messages=run_dict.get("reasoning_messages", []),
989
1006
  session_state=run_dict.get("session_state"),
990
1007
  images=run_dict.get("images", []),
@@ -1012,6 +1029,9 @@ class WorkflowRunSchema(BaseModel):
1012
1029
  reasoning_content: Optional[str] = Field(None, description="Reasoning content if reasoning was enabled")
1013
1030
  reasoning_steps: Optional[List[dict]] = Field(None, description="List of reasoning steps")
1014
1031
  references: Optional[List[dict]] = Field(None, description="References cited in the workflow")
1032
+ citations: Optional[Dict[str, Any]] = Field(
1033
+ None, description="Citations from the model (e.g., from Gemini grounding/search)"
1034
+ )
1015
1035
  reasoning_messages: Optional[List[dict]] = Field(None, description="Reasoning process messages")
1016
1036
  images: Optional[List[dict]] = Field(None, description="Images included in the workflow")
1017
1037
  videos: Optional[List[dict]] = Field(None, description="Videos included in the workflow")
@@ -1038,6 +1058,7 @@ class WorkflowRunSchema(BaseModel):
1038
1058
  reasoning_content=run_response.get("reasoning_content", ""),
1039
1059
  reasoning_steps=run_response.get("reasoning_steps", []),
1040
1060
  references=run_response.get("references", []),
1061
+ citations=run_response.get("citations", None),
1041
1062
  reasoning_messages=run_response.get("reasoning_messages", []),
1042
1063
  images=run_response.get("images", []),
1043
1064
  videos=run_response.get("videos", []),
agno/os/utils.py CHANGED
@@ -1,3 +1,4 @@
1
+ from datetime import datetime, timezone
1
2
  from typing import Any, Callable, Dict, List, Optional, Set, Type, Union
2
3
 
3
4
  from fastapi import FastAPI, HTTPException, UploadFile
@@ -752,3 +753,59 @@ def json_schema_to_pydantic_model(schema: Dict[str, Any]) -> Type[BaseModel]:
752
753
  logger.error(f"Failed to create dynamic model '{model_name}': {e}")
753
754
  # Return a minimal model as fallback
754
755
  return create_model(model_name)
756
+
757
+
758
+ def setup_tracing_for_os(db: Union[BaseDb, AsyncBaseDb]) -> None:
759
+ """Set up OpenTelemetry tracing for this agent/team/workflow."""
760
+ try:
761
+ from agno.tracing import setup_tracing
762
+
763
+ setup_tracing(db=db)
764
+ except ImportError:
765
+ logger.warning(
766
+ "tracing=True but OpenTelemetry packages not installed. "
767
+ "Install with: pip install opentelemetry-api opentelemetry-sdk openinference-instrumentation-agno"
768
+ )
769
+ except Exception as e:
770
+ logger.warning(f"Failed to enable tracing: {e}")
771
+
772
+
773
+ def format_duration_ms(duration_ms: Optional[int]) -> str:
774
+ """Format a duration in milliseconds to a human-readable string.
775
+
776
+ Args:
777
+ duration_ms: Duration in milliseconds
778
+
779
+ Returns:
780
+ Formatted string like "150ms" or "1.50s"
781
+ """
782
+ if duration_ms is None or duration_ms < 1000:
783
+ return f"{duration_ms or 0}ms"
784
+ return f"{duration_ms / 1000:.2f}s"
785
+
786
+
787
+ def parse_datetime_to_utc(datetime_str: str, param_name: str = "datetime") -> "datetime":
788
+ """Parse an ISO 8601 datetime string and convert to UTC.
789
+
790
+ Args:
791
+ datetime_str: ISO 8601 formatted datetime string (e.g., '2025-11-19T10:00:00Z' or '2025-11-19T15:30:00+05:30')
792
+ param_name: Name of the parameter for error messages
793
+
794
+ Returns:
795
+ datetime object in UTC timezone
796
+
797
+ Raises:
798
+ HTTPException: If the datetime string is invalid
799
+ """
800
+ try:
801
+ dt = datetime.fromisoformat(datetime_str.replace("Z", "+00:00"))
802
+ # Convert to UTC if timezone-aware, otherwise assume UTC
803
+ if dt.tzinfo is not None:
804
+ return dt.astimezone(timezone.utc)
805
+ else:
806
+ return dt.replace(tzinfo=timezone.utc)
807
+ except ValueError as e:
808
+ raise HTTPException(
809
+ status_code=400,
810
+ detail=f"Invalid {param_name} format. Use ISO 8601 format (e.g., '2025-11-19T10:00:00Z' or '2025-11-19T10:00:00+05:30'): {e}",
811
+ )
agno/run/agent.py CHANGED
@@ -572,6 +572,7 @@ class RunOutput:
572
572
  and k
573
573
  not in [
574
574
  "messages",
575
+ "metrics",
575
576
  "tools",
576
577
  "metadata",
577
578
  "images",