agno 2.0.0a1__py3-none-any.whl → 2.0.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. agno/agent/agent.py +416 -41
  2. agno/api/agent.py +2 -2
  3. agno/api/evals.py +2 -2
  4. agno/api/os.py +1 -1
  5. agno/api/settings.py +2 -2
  6. agno/api/team.py +2 -2
  7. agno/db/dynamo/dynamo.py +0 -6
  8. agno/db/firestore/firestore.py +0 -6
  9. agno/db/in_memory/in_memory_db.py +0 -6
  10. agno/db/json/json_db.py +0 -6
  11. agno/db/mongo/mongo.py +8 -9
  12. agno/db/mysql/utils.py +0 -1
  13. agno/db/postgres/postgres.py +0 -10
  14. agno/db/postgres/utils.py +0 -1
  15. agno/db/redis/redis.py +0 -4
  16. agno/db/singlestore/singlestore.py +0 -10
  17. agno/db/singlestore/utils.py +0 -1
  18. agno/db/sqlite/sqlite.py +0 -4
  19. agno/db/sqlite/utils.py +0 -1
  20. agno/eval/accuracy.py +12 -5
  21. agno/integrations/discord/client.py +5 -1
  22. agno/knowledge/chunking/strategy.py +14 -14
  23. agno/knowledge/embedder/aws_bedrock.py +2 -2
  24. agno/knowledge/knowledge.py +156 -120
  25. agno/knowledge/reader/arxiv_reader.py +5 -5
  26. agno/knowledge/reader/csv_reader.py +6 -77
  27. agno/knowledge/reader/docx_reader.py +5 -5
  28. agno/knowledge/reader/firecrawl_reader.py +5 -5
  29. agno/knowledge/reader/json_reader.py +5 -5
  30. agno/knowledge/reader/markdown_reader.py +31 -9
  31. agno/knowledge/reader/pdf_reader.py +10 -123
  32. agno/knowledge/reader/reader_factory.py +65 -72
  33. agno/knowledge/reader/s3_reader.py +44 -114
  34. agno/knowledge/reader/text_reader.py +5 -5
  35. agno/knowledge/reader/url_reader.py +75 -31
  36. agno/knowledge/reader/web_search_reader.py +6 -29
  37. agno/knowledge/reader/website_reader.py +5 -5
  38. agno/knowledge/reader/wikipedia_reader.py +5 -5
  39. agno/knowledge/reader/youtube_reader.py +6 -6
  40. agno/knowledge/utils.py +10 -10
  41. agno/models/anthropic/claude.py +2 -49
  42. agno/models/aws/bedrock.py +3 -7
  43. agno/models/base.py +37 -6
  44. agno/models/message.py +7 -6
  45. agno/os/app.py +168 -64
  46. agno/os/interfaces/agui/agui.py +1 -1
  47. agno/os/interfaces/agui/utils.py +16 -9
  48. agno/os/interfaces/slack/slack.py +2 -3
  49. agno/os/interfaces/whatsapp/whatsapp.py +2 -3
  50. agno/os/mcp.py +235 -0
  51. agno/os/router.py +576 -19
  52. agno/os/routers/evals/evals.py +201 -12
  53. agno/os/routers/knowledge/knowledge.py +455 -18
  54. agno/os/routers/memory/memory.py +260 -29
  55. agno/os/routers/metrics/metrics.py +127 -7
  56. agno/os/routers/session/session.py +398 -25
  57. agno/os/schema.py +55 -2
  58. agno/os/settings.py +0 -1
  59. agno/run/agent.py +96 -2
  60. agno/run/cancel.py +0 -2
  61. agno/run/team.py +93 -2
  62. agno/run/workflow.py +25 -12
  63. agno/team/team.py +863 -1053
  64. agno/tools/function.py +65 -7
  65. agno/tools/linear.py +1 -1
  66. agno/tools/mcp.py +1 -2
  67. agno/utils/gemini.py +31 -1
  68. agno/utils/log.py +52 -2
  69. agno/utils/mcp.py +55 -3
  70. agno/utils/models/claude.py +41 -0
  71. agno/utils/print_response/team.py +177 -73
  72. agno/utils/streamlit.py +481 -0
  73. agno/workflow/workflow.py +17 -1
  74. {agno-2.0.0a1.dist-info → agno-2.0.0rc2.dist-info}/METADATA +1 -1
  75. {agno-2.0.0a1.dist-info → agno-2.0.0rc2.dist-info}/RECORD +78 -77
  76. agno/knowledge/reader/gcs_reader.py +0 -67
  77. {agno-2.0.0a1.dist-info → agno-2.0.0rc2.dist-info}/WHEEL +0 -0
  78. {agno-2.0.0a1.dist-info → agno-2.0.0rc2.dist-info}/licenses/LICENSE +0 -0
  79. {agno-2.0.0a1.dist-info → agno-2.0.0rc2.dist-info}/top_level.txt +0 -0
@@ -16,7 +16,16 @@ from agno.os.routers.evals.schemas import (
16
16
  UpdateEvalRunRequest,
17
17
  )
18
18
  from agno.os.routers.evals.utils import run_accuracy_eval, run_performance_eval, run_reliability_eval
19
- from agno.os.schema import PaginatedResponse, PaginationInfo, SortOrder
19
+ from agno.os.schema import (
20
+ BadRequestResponse,
21
+ InternalServerErrorResponse,
22
+ NotFoundResponse,
23
+ PaginatedResponse,
24
+ PaginationInfo,
25
+ SortOrder,
26
+ UnauthenticatedResponse,
27
+ ValidationErrorResponse,
28
+ )
20
29
  from agno.os.settings import AgnoAPISettings
21
30
  from agno.os.utils import get_agent_by_id, get_db, get_team_by_id
22
31
  from agno.team.team import Team
@@ -30,14 +39,67 @@ def get_eval_router(
30
39
  teams: Optional[List[Team]] = None,
31
40
  settings: AgnoAPISettings = AgnoAPISettings(),
32
41
  ) -> APIRouter:
33
- router = APIRouter(dependencies=[Depends(get_authentication_dependency(settings))], tags=["Evals"])
42
+ """Create eval router with comprehensive OpenAPI documentation for agent/team evaluation endpoints."""
43
+ router = APIRouter(
44
+ dependencies=[Depends(get_authentication_dependency(settings))],
45
+ tags=["Evals"],
46
+ responses={
47
+ 400: {"description": "Bad Request", "model": BadRequestResponse},
48
+ 401: {"description": "Unauthorized", "model": UnauthenticatedResponse},
49
+ 404: {"description": "Not Found", "model": NotFoundResponse},
50
+ 422: {"description": "Validation Error", "model": ValidationErrorResponse},
51
+ 500: {"description": "Internal Server Error", "model": InternalServerErrorResponse},
52
+ },
53
+ )
34
54
  return attach_routes(router=router, dbs=dbs, agents=agents, teams=teams)
35
55
 
36
56
 
37
57
  def attach_routes(
38
58
  router: APIRouter, dbs: dict[str, BaseDb], agents: Optional[List[Agent]] = None, teams: Optional[List[Team]] = None
39
59
  ) -> APIRouter:
40
- @router.get("/eval-runs", response_model=PaginatedResponse[EvalSchema], status_code=200)
60
+ @router.get(
61
+ "/eval-runs",
62
+ response_model=PaginatedResponse[EvalSchema],
63
+ status_code=200,
64
+ operation_id="get_eval_runs",
65
+ summary="List Evaluation Runs",
66
+ description=(
67
+ "Retrieve paginated evaluation runs with filtering and sorting options. "
68
+ "Filter by agent, team, workflow, model, or evaluation type."
69
+ ),
70
+ responses={
71
+ 200: {
72
+ "description": "Evaluation runs retrieved successfully",
73
+ "content": {
74
+ "application/json": {
75
+ "example": {
76
+ "data": [
77
+ {
78
+ "id": "a03fa2f4-900d-482d-afe0-470d4cd8d1f4",
79
+ "agent_id": "basic-agent",
80
+ "model_id": "gpt-4o",
81
+ "model_provider": "OpenAI",
82
+ "team_id": None,
83
+ "workflow_id": None,
84
+ "name": "Test ",
85
+ "evaluated_component_name": None,
86
+ "eval_type": "reliability",
87
+ "eval_data": {
88
+ "eval_status": "PASSED",
89
+ "failed_tool_calls": [],
90
+ "passed_tool_calls": ["multiply"],
91
+ },
92
+ "eval_input": {"expected_tool_calls": ["multiply"]},
93
+ "created_at": "2025-08-27T15:41:59Z",
94
+ "updated_at": "2025-08-27T15:41:59Z",
95
+ }
96
+ ]
97
+ }
98
+ }
99
+ },
100
+ }
101
+ },
102
+ )
41
103
  async def get_eval_runs(
42
104
  agent_id: Optional[str] = Query(default=None, description="Agent ID"),
43
105
  team_id: Optional[str] = Query(default=None, description="Team ID"),
@@ -76,7 +138,43 @@ def attach_routes(
76
138
  ),
77
139
  )
78
140
 
79
- @router.get("/eval-runs/{eval_run_id}", response_model=EvalSchema, status_code=200)
141
+ @router.get(
142
+ "/eval-runs/{eval_run_id}",
143
+ response_model=EvalSchema,
144
+ status_code=200,
145
+ operation_id="get_eval_run",
146
+ summary="Get Evaluation Run",
147
+ description="Retrieve detailed results and metrics for a specific evaluation run.",
148
+ responses={
149
+ 200: {
150
+ "description": "Evaluation run details retrieved successfully",
151
+ "content": {
152
+ "application/json": {
153
+ "example": {
154
+ "id": "a03fa2f4-900d-482d-afe0-470d4cd8d1f4",
155
+ "agent_id": "basic-agent",
156
+ "model_id": "gpt-4o",
157
+ "model_provider": "OpenAI",
158
+ "team_id": None,
159
+ "workflow_id": None,
160
+ "name": "Test ",
161
+ "evaluated_component_name": None,
162
+ "eval_type": "reliability",
163
+ "eval_data": {
164
+ "eval_status": "PASSED",
165
+ "failed_tool_calls": [],
166
+ "passed_tool_calls": ["multiply"],
167
+ },
168
+ "eval_input": {"expected_tool_calls": ["multiply"]},
169
+ "created_at": "2025-08-27T15:41:59Z",
170
+ "updated_at": "2025-08-27T15:41:59Z",
171
+ }
172
+ }
173
+ },
174
+ },
175
+ 404: {"description": "Evaluation run not found", "model": NotFoundResponse},
176
+ },
177
+ )
80
178
  async def get_eval_run(
81
179
  eval_run_id: str,
82
180
  db_id: Optional[str] = Query(default=None, description="The ID of the database to use"),
@@ -88,10 +186,20 @@ def attach_routes(
88
186
 
89
187
  return EvalSchema.from_dict(eval_run) # type: ignore
90
188
 
91
- @router.delete("/eval-runs", status_code=204)
189
+ @router.delete(
190
+ "/eval-runs",
191
+ status_code=204,
192
+ operation_id="delete_eval_runs",
193
+ summary="Delete Evaluation Runs",
194
+ description="Delete multiple evaluation runs by their IDs. This action cannot be undone.",
195
+ responses={
196
+ 204: {},
197
+ 500: {"description": "Failed to delete evaluation runs", "model": InternalServerErrorResponse},
198
+ },
199
+ )
92
200
  async def delete_eval_runs(
93
201
  request: DeleteEvalRunsRequest,
94
- db_id: Optional[str] = Query(default=None, description="The ID of the database to use"),
202
+ db_id: Optional[str] = Query(default=None, description="Database ID to use for deletion"),
95
203
  ) -> None:
96
204
  try:
97
205
  db = get_db(dbs, db_id)
@@ -99,7 +207,44 @@ def attach_routes(
99
207
  except Exception as e:
100
208
  raise HTTPException(status_code=500, detail=f"Failed to delete eval runs: {e}")
101
209
 
102
- @router.patch("/eval-runs/{eval_run_id}", response_model=EvalSchema, status_code=200)
210
+ @router.patch(
211
+ "/eval-runs/{eval_run_id}",
212
+ response_model=EvalSchema,
213
+ status_code=200,
214
+ operation_id="update_eval_run",
215
+ summary="Update Evaluation Run",
216
+ description="Update the name or other properties of an existing evaluation run.",
217
+ responses={
218
+ 200: {
219
+ "description": "Evaluation run updated successfully",
220
+ "content": {
221
+ "application/json": {
222
+ "example": {
223
+ "id": "a03fa2f4-900d-482d-afe0-470d4cd8d1f4",
224
+ "agent_id": "basic-agent",
225
+ "model_id": "gpt-4o",
226
+ "model_provider": "OpenAI",
227
+ "team_id": None,
228
+ "workflow_id": None,
229
+ "name": "Test ",
230
+ "evaluated_component_name": None,
231
+ "eval_type": "reliability",
232
+ "eval_data": {
233
+ "eval_status": "PASSED",
234
+ "failed_tool_calls": [],
235
+ "passed_tool_calls": ["multiply"],
236
+ },
237
+ "eval_input": {"expected_tool_calls": ["multiply"]},
238
+ "created_at": "2025-08-27T15:41:59Z",
239
+ "updated_at": "2025-08-27T15:41:59Z",
240
+ }
241
+ }
242
+ },
243
+ },
244
+ 404: {"description": "Evaluation run not found", "model": NotFoundResponse},
245
+ 500: {"description": "Failed to update evaluation run", "model": InternalServerErrorResponse},
246
+ },
247
+ )
103
248
  async def update_eval_run(
104
249
  eval_run_id: str,
105
250
  request: UpdateEvalRunRequest,
@@ -116,10 +261,49 @@ def attach_routes(
116
261
 
117
262
  return EvalSchema.from_dict(eval_run) # type: ignore
118
263
 
119
- @router.post("/eval-runs", response_model=EvalSchema, status_code=200)
264
+ @router.post(
265
+ "/eval-runs",
266
+ response_model=EvalSchema,
267
+ status_code=200,
268
+ operation_id="run_eval",
269
+ summary="Execute Evaluation",
270
+ description=(
271
+ "Run evaluation tests on agents or teams. Supports accuracy, performance, and reliability evaluations. "
272
+ "Requires either agent_id or team_id, but not both."
273
+ ),
274
+ responses={
275
+ 200: {
276
+ "description": "Evaluation executed successfully",
277
+ "content": {
278
+ "application/json": {
279
+ "example": {
280
+ "id": "f2b2d72f-e9e2-4f0e-8810-0a7e1ff58614",
281
+ "agent_id": "basic-agent",
282
+ "model_id": "gpt-4o",
283
+ "model_provider": "OpenAI",
284
+ "team_id": None,
285
+ "workflow_id": None,
286
+ "name": None,
287
+ "evaluated_component_name": None,
288
+ "eval_type": "reliability",
289
+ "eval_data": {
290
+ "eval_status": "PASSED",
291
+ "failed_tool_calls": [],
292
+ "passed_tool_calls": ["multiply"],
293
+ },
294
+ "created_at": "2025-08-27T15:41:59Z",
295
+ "updated_at": "2025-08-27T15:41:59Z",
296
+ }
297
+ }
298
+ },
299
+ },
300
+ 400: {"description": "Invalid request - provide either agent_id or team_id", "model": BadRequestResponse},
301
+ 404: {"description": "Agent or team not found", "model": NotFoundResponse},
302
+ },
303
+ )
120
304
  async def run_eval(
121
305
  eval_run_input: EvalRunInput,
122
- db_id: Optional[str] = Query(default=None, description="The ID of the database to use"),
306
+ db_id: Optional[str] = Query(default=None, description="Database ID to use for evaluation"),
123
307
  ) -> Optional[EvalSchema]:
124
308
  db = get_db(dbs, db_id)
125
309
 
@@ -193,12 +377,17 @@ def attach_routes(
193
377
 
194
378
 
195
379
  def parse_eval_types_filter(
196
- eval_types: Optional[str] = Query(default=None, description="Comma-separated eval types"),
380
+ eval_types: Optional[str] = Query(
381
+ default=None,
382
+ description="Comma-separated eval types (accuracy,performance,reliability)",
383
+ example="accuracy,performance",
384
+ ),
197
385
  ) -> Optional[List[EvalType]]:
198
- """Parse a comma-separated string of eval types into a list of EvalType enums"""
386
+ """Parse comma-separated eval types into EvalType enums for filtering evaluation runs."""
199
387
  if not eval_types:
200
388
  return None
201
389
  try:
202
390
  return [EvalType(item.strip()) for item in eval_types.split(",")]
203
391
  except ValueError as e:
204
- raise HTTPException(status_code=422, detail=f"Invalid eval_type: {e}")
392
+ valid_types = ", ".join([t.value for t in EvalType])
393
+ raise HTTPException(status_code=422, detail=f"Invalid eval_type: {e}. Valid types: {valid_types}")