agno 2.0.0rc1__py3-none-any.whl → 2.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. agno/agent/agent.py +101 -140
  2. agno/db/mongo/mongo.py +8 -3
  3. agno/eval/accuracy.py +12 -5
  4. agno/knowledge/chunking/strategy.py +14 -14
  5. agno/knowledge/knowledge.py +156 -120
  6. agno/knowledge/reader/arxiv_reader.py +5 -5
  7. agno/knowledge/reader/csv_reader.py +6 -77
  8. agno/knowledge/reader/docx_reader.py +5 -5
  9. agno/knowledge/reader/firecrawl_reader.py +5 -5
  10. agno/knowledge/reader/json_reader.py +5 -5
  11. agno/knowledge/reader/markdown_reader.py +31 -9
  12. agno/knowledge/reader/pdf_reader.py +10 -123
  13. agno/knowledge/reader/reader_factory.py +65 -72
  14. agno/knowledge/reader/s3_reader.py +44 -114
  15. agno/knowledge/reader/text_reader.py +5 -5
  16. agno/knowledge/reader/url_reader.py +75 -31
  17. agno/knowledge/reader/web_search_reader.py +6 -29
  18. agno/knowledge/reader/website_reader.py +5 -5
  19. agno/knowledge/reader/wikipedia_reader.py +5 -5
  20. agno/knowledge/reader/youtube_reader.py +6 -6
  21. agno/knowledge/reranker/__init__.py +9 -0
  22. agno/knowledge/utils.py +10 -10
  23. agno/media.py +269 -268
  24. agno/models/aws/bedrock.py +3 -7
  25. agno/models/base.py +50 -54
  26. agno/models/google/gemini.py +11 -10
  27. agno/models/message.py +4 -4
  28. agno/models/ollama/chat.py +1 -1
  29. agno/models/openai/chat.py +33 -14
  30. agno/models/response.py +5 -5
  31. agno/os/app.py +40 -29
  32. agno/os/mcp.py +39 -59
  33. agno/os/router.py +547 -16
  34. agno/os/routers/evals/evals.py +197 -12
  35. agno/os/routers/knowledge/knowledge.py +428 -14
  36. agno/os/routers/memory/memory.py +250 -28
  37. agno/os/routers/metrics/metrics.py +125 -7
  38. agno/os/routers/session/session.py +393 -25
  39. agno/os/schema.py +55 -2
  40. agno/run/agent.py +37 -28
  41. agno/run/base.py +9 -19
  42. agno/run/team.py +110 -19
  43. agno/run/workflow.py +41 -28
  44. agno/team/team.py +808 -1080
  45. agno/tools/brightdata.py +3 -3
  46. agno/tools/cartesia.py +3 -5
  47. agno/tools/dalle.py +7 -4
  48. agno/tools/desi_vocal.py +2 -2
  49. agno/tools/e2b.py +6 -6
  50. agno/tools/eleven_labs.py +3 -3
  51. agno/tools/fal.py +4 -4
  52. agno/tools/function.py +7 -7
  53. agno/tools/giphy.py +2 -2
  54. agno/tools/lumalab.py +3 -3
  55. agno/tools/mcp.py +1 -2
  56. agno/tools/models/azure_openai.py +2 -2
  57. agno/tools/models/gemini.py +3 -3
  58. agno/tools/models/groq.py +3 -5
  59. agno/tools/models/nebius.py +2 -2
  60. agno/tools/models_labs.py +5 -5
  61. agno/tools/openai.py +4 -9
  62. agno/tools/opencv.py +3 -3
  63. agno/tools/replicate.py +7 -7
  64. agno/utils/events.py +5 -5
  65. agno/utils/gemini.py +1 -1
  66. agno/utils/log.py +52 -2
  67. agno/utils/mcp.py +57 -5
  68. agno/utils/models/aws_claude.py +1 -1
  69. agno/utils/models/claude.py +0 -8
  70. agno/utils/models/cohere.py +1 -1
  71. agno/utils/models/watsonx.py +1 -1
  72. agno/utils/openai.py +1 -1
  73. agno/utils/print_response/team.py +177 -73
  74. agno/utils/streamlit.py +27 -0
  75. agno/vectordb/lancedb/lance_db.py +82 -25
  76. agno/workflow/step.py +7 -7
  77. agno/workflow/types.py +13 -13
  78. agno/workflow/workflow.py +37 -28
  79. {agno-2.0.0rc1.dist-info → agno-2.0.1.dist-info}/METADATA +140 -1
  80. {agno-2.0.0rc1.dist-info → agno-2.0.1.dist-info}/RECORD +83 -84
  81. agno-2.0.1.dist-info/licenses/LICENSE +201 -0
  82. agno/knowledge/reader/gcs_reader.py +0 -67
  83. agno-2.0.0rc1.dist-info/licenses/LICENSE +0 -375
  84. {agno-2.0.0rc1.dist-info → agno-2.0.1.dist-info}/WHEEL +0 -0
  85. {agno-2.0.0rc1.dist-info → agno-2.0.1.dist-info}/top_level.txt +0 -0
@@ -16,7 +16,16 @@ from agno.os.routers.evals.schemas import (
16
16
  UpdateEvalRunRequest,
17
17
  )
18
18
  from agno.os.routers.evals.utils import run_accuracy_eval, run_performance_eval, run_reliability_eval
19
- from agno.os.schema import PaginatedResponse, PaginationInfo, SortOrder
19
+ from agno.os.schema import (
20
+ BadRequestResponse,
21
+ InternalServerErrorResponse,
22
+ NotFoundResponse,
23
+ PaginatedResponse,
24
+ PaginationInfo,
25
+ SortOrder,
26
+ UnauthenticatedResponse,
27
+ ValidationErrorResponse,
28
+ )
20
29
  from agno.os.settings import AgnoAPISettings
21
30
  from agno.os.utils import get_agent_by_id, get_db, get_team_by_id
22
31
  from agno.team.team import Team
@@ -30,7 +39,18 @@ def get_eval_router(
30
39
  teams: Optional[List[Team]] = None,
31
40
  settings: AgnoAPISettings = AgnoAPISettings(),
32
41
  ) -> APIRouter:
33
- router = APIRouter(dependencies=[Depends(get_authentication_dependency(settings))], tags=["Evals"])
42
+ """Create eval router with comprehensive OpenAPI documentation for agent/team evaluation endpoints."""
43
+ router = APIRouter(
44
+ dependencies=[Depends(get_authentication_dependency(settings))],
45
+ tags=["Evals"],
46
+ responses={
47
+ 400: {"description": "Bad Request", "model": BadRequestResponse},
48
+ 401: {"description": "Unauthorized", "model": UnauthenticatedResponse},
49
+ 404: {"description": "Not Found", "model": NotFoundResponse},
50
+ 422: {"description": "Validation Error", "model": ValidationErrorResponse},
51
+ 500: {"description": "Internal Server Error", "model": InternalServerErrorResponse},
52
+ },
53
+ )
34
54
  return attach_routes(router=router, dbs=dbs, agents=agents, teams=teams)
35
55
 
36
56
 
@@ -38,7 +58,47 @@ def attach_routes(
38
58
  router: APIRouter, dbs: dict[str, BaseDb], agents: Optional[List[Agent]] = None, teams: Optional[List[Team]] = None
39
59
  ) -> APIRouter:
40
60
  @router.get(
41
- "/eval-runs", response_model=PaginatedResponse[EvalSchema], status_code=200, operation_id="get_eval_runs"
61
+ "/eval-runs",
62
+ response_model=PaginatedResponse[EvalSchema],
63
+ status_code=200,
64
+ operation_id="get_eval_runs",
65
+ summary="List Evaluation Runs",
66
+ description=(
67
+ "Retrieve paginated evaluation runs with filtering and sorting options. "
68
+ "Filter by agent, team, workflow, model, or evaluation type."
69
+ ),
70
+ responses={
71
+ 200: {
72
+ "description": "Evaluation runs retrieved successfully",
73
+ "content": {
74
+ "application/json": {
75
+ "example": {
76
+ "data": [
77
+ {
78
+ "id": "a03fa2f4-900d-482d-afe0-470d4cd8d1f4",
79
+ "agent_id": "basic-agent",
80
+ "model_id": "gpt-4o",
81
+ "model_provider": "OpenAI",
82
+ "team_id": None,
83
+ "workflow_id": None,
84
+ "name": "Test ",
85
+ "evaluated_component_name": None,
86
+ "eval_type": "reliability",
87
+ "eval_data": {
88
+ "eval_status": "PASSED",
89
+ "failed_tool_calls": [],
90
+ "passed_tool_calls": ["multiply"],
91
+ },
92
+ "eval_input": {"expected_tool_calls": ["multiply"]},
93
+ "created_at": "2025-08-27T15:41:59Z",
94
+ "updated_at": "2025-08-27T15:41:59Z",
95
+ }
96
+ ]
97
+ }
98
+ }
99
+ },
100
+ }
101
+ },
42
102
  )
43
103
  async def get_eval_runs(
44
104
  agent_id: Optional[str] = Query(default=None, description="Agent ID"),
@@ -78,7 +138,43 @@ def attach_routes(
78
138
  ),
79
139
  )
80
140
 
81
- @router.get("/eval-runs/{eval_run_id}", response_model=EvalSchema, status_code=200, operation_id="get_eval_run")
141
+ @router.get(
142
+ "/eval-runs/{eval_run_id}",
143
+ response_model=EvalSchema,
144
+ status_code=200,
145
+ operation_id="get_eval_run",
146
+ summary="Get Evaluation Run",
147
+ description="Retrieve detailed results and metrics for a specific evaluation run.",
148
+ responses={
149
+ 200: {
150
+ "description": "Evaluation run details retrieved successfully",
151
+ "content": {
152
+ "application/json": {
153
+ "example": {
154
+ "id": "a03fa2f4-900d-482d-afe0-470d4cd8d1f4",
155
+ "agent_id": "basic-agent",
156
+ "model_id": "gpt-4o",
157
+ "model_provider": "OpenAI",
158
+ "team_id": None,
159
+ "workflow_id": None,
160
+ "name": "Test ",
161
+ "evaluated_component_name": None,
162
+ "eval_type": "reliability",
163
+ "eval_data": {
164
+ "eval_status": "PASSED",
165
+ "failed_tool_calls": [],
166
+ "passed_tool_calls": ["multiply"],
167
+ },
168
+ "eval_input": {"expected_tool_calls": ["multiply"]},
169
+ "created_at": "2025-08-27T15:41:59Z",
170
+ "updated_at": "2025-08-27T15:41:59Z",
171
+ }
172
+ }
173
+ },
174
+ },
175
+ 404: {"description": "Evaluation run not found", "model": NotFoundResponse},
176
+ },
177
+ )
82
178
  async def get_eval_run(
83
179
  eval_run_id: str,
84
180
  db_id: Optional[str] = Query(default=None, description="The ID of the database to use"),
@@ -90,10 +186,20 @@ def attach_routes(
90
186
 
91
187
  return EvalSchema.from_dict(eval_run) # type: ignore
92
188
 
93
- @router.delete("/eval-runs", status_code=204, operation_id="delete_eval_runs")
189
+ @router.delete(
190
+ "/eval-runs",
191
+ status_code=204,
192
+ operation_id="delete_eval_runs",
193
+ summary="Delete Evaluation Runs",
194
+ description="Delete multiple evaluation runs by their IDs. This action cannot be undone.",
195
+ responses={
196
+ 204: {},
197
+ 500: {"description": "Failed to delete evaluation runs", "model": InternalServerErrorResponse},
198
+ },
199
+ )
94
200
  async def delete_eval_runs(
95
201
  request: DeleteEvalRunsRequest,
96
- db_id: Optional[str] = Query(default=None, description="The ID of the database to use"),
202
+ db_id: Optional[str] = Query(default=None, description="Database ID to use for deletion"),
97
203
  ) -> None:
98
204
  try:
99
205
  db = get_db(dbs, db_id)
@@ -102,7 +208,42 @@ def attach_routes(
102
208
  raise HTTPException(status_code=500, detail=f"Failed to delete eval runs: {e}")
103
209
 
104
210
  @router.patch(
105
- "/eval-runs/{eval_run_id}", response_model=EvalSchema, status_code=200, operation_id="update_eval_run"
211
+ "/eval-runs/{eval_run_id}",
212
+ response_model=EvalSchema,
213
+ status_code=200,
214
+ operation_id="update_eval_run",
215
+ summary="Update Evaluation Run",
216
+ description="Update the name or other properties of an existing evaluation run.",
217
+ responses={
218
+ 200: {
219
+ "description": "Evaluation run updated successfully",
220
+ "content": {
221
+ "application/json": {
222
+ "example": {
223
+ "id": "a03fa2f4-900d-482d-afe0-470d4cd8d1f4",
224
+ "agent_id": "basic-agent",
225
+ "model_id": "gpt-4o",
226
+ "model_provider": "OpenAI",
227
+ "team_id": None,
228
+ "workflow_id": None,
229
+ "name": "Test ",
230
+ "evaluated_component_name": None,
231
+ "eval_type": "reliability",
232
+ "eval_data": {
233
+ "eval_status": "PASSED",
234
+ "failed_tool_calls": [],
235
+ "passed_tool_calls": ["multiply"],
236
+ },
237
+ "eval_input": {"expected_tool_calls": ["multiply"]},
238
+ "created_at": "2025-08-27T15:41:59Z",
239
+ "updated_at": "2025-08-27T15:41:59Z",
240
+ }
241
+ }
242
+ },
243
+ },
244
+ 404: {"description": "Evaluation run not found", "model": NotFoundResponse},
245
+ 500: {"description": "Failed to update evaluation run", "model": InternalServerErrorResponse},
246
+ },
106
247
  )
107
248
  async def update_eval_run(
108
249
  eval_run_id: str,
@@ -120,10 +261,49 @@ def attach_routes(
120
261
 
121
262
  return EvalSchema.from_dict(eval_run) # type: ignore
122
263
 
123
- @router.post("/eval-runs", response_model=EvalSchema, status_code=200, operation_id="run_eval")
264
+ @router.post(
265
+ "/eval-runs",
266
+ response_model=EvalSchema,
267
+ status_code=200,
268
+ operation_id="run_eval",
269
+ summary="Execute Evaluation",
270
+ description=(
271
+ "Run evaluation tests on agents or teams. Supports accuracy, performance, and reliability evaluations. "
272
+ "Requires either agent_id or team_id, but not both."
273
+ ),
274
+ responses={
275
+ 200: {
276
+ "description": "Evaluation executed successfully",
277
+ "content": {
278
+ "application/json": {
279
+ "example": {
280
+ "id": "f2b2d72f-e9e2-4f0e-8810-0a7e1ff58614",
281
+ "agent_id": "basic-agent",
282
+ "model_id": "gpt-4o",
283
+ "model_provider": "OpenAI",
284
+ "team_id": None,
285
+ "workflow_id": None,
286
+ "name": None,
287
+ "evaluated_component_name": None,
288
+ "eval_type": "reliability",
289
+ "eval_data": {
290
+ "eval_status": "PASSED",
291
+ "failed_tool_calls": [],
292
+ "passed_tool_calls": ["multiply"],
293
+ },
294
+ "created_at": "2025-08-27T15:41:59Z",
295
+ "updated_at": "2025-08-27T15:41:59Z",
296
+ }
297
+ }
298
+ },
299
+ },
300
+ 400: {"description": "Invalid request - provide either agent_id or team_id", "model": BadRequestResponse},
301
+ 404: {"description": "Agent or team not found", "model": NotFoundResponse},
302
+ },
303
+ )
124
304
  async def run_eval(
125
305
  eval_run_input: EvalRunInput,
126
- db_id: Optional[str] = Query(default=None, description="The ID of the database to use"),
306
+ db_id: Optional[str] = Query(default=None, description="Database ID to use for evaluation"),
127
307
  ) -> Optional[EvalSchema]:
128
308
  db = get_db(dbs, db_id)
129
309
 
@@ -197,12 +377,17 @@ def attach_routes(
197
377
 
198
378
 
199
379
  def parse_eval_types_filter(
200
- eval_types: Optional[str] = Query(default=None, description="Comma-separated eval types"),
380
+ eval_types: Optional[str] = Query(
381
+ default=None,
382
+ description="Comma-separated eval types (accuracy,performance,reliability)",
383
+ example="accuracy,performance",
384
+ ),
201
385
  ) -> Optional[List[EvalType]]:
202
- """Parse a comma-separated string of eval types into a list of EvalType enums"""
386
+ """Parse comma-separated eval types into EvalType enums for filtering evaluation runs."""
203
387
  if not eval_types:
204
388
  return None
205
389
  try:
206
390
  return [EvalType(item.strip()) for item in eval_types.split(",")]
207
391
  except ValueError as e:
208
- raise HTTPException(status_code=422, detail=f"Invalid eval_type: {e}")
392
+ valid_types = ", ".join([t.value for t in EvalType])
393
+ raise HTTPException(status_code=422, detail=f"Invalid eval_type: {e}. Valid types: {valid_types}")