agno 2.2.0__py3-none-any.whl → 2.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. agno/agent/agent.py +751 -575
  2. agno/culture/manager.py +22 -24
  3. agno/db/async_postgres/__init__.py +1 -1
  4. agno/db/dynamo/dynamo.py +0 -2
  5. agno/db/firestore/firestore.py +0 -2
  6. agno/db/gcs_json/gcs_json_db.py +0 -4
  7. agno/db/gcs_json/utils.py +0 -24
  8. agno/db/in_memory/in_memory_db.py +0 -3
  9. agno/db/json/json_db.py +4 -10
  10. agno/db/json/utils.py +0 -24
  11. agno/db/mongo/mongo.py +0 -2
  12. agno/db/mysql/mysql.py +0 -3
  13. agno/db/postgres/__init__.py +1 -1
  14. agno/db/{async_postgres → postgres}/async_postgres.py +19 -22
  15. agno/db/postgres/postgres.py +7 -10
  16. agno/db/postgres/utils.py +106 -2
  17. agno/db/redis/redis.py +0 -2
  18. agno/db/singlestore/singlestore.py +0 -3
  19. agno/db/sqlite/__init__.py +2 -1
  20. agno/db/sqlite/async_sqlite.py +2269 -0
  21. agno/db/sqlite/sqlite.py +0 -2
  22. agno/db/sqlite/utils.py +96 -0
  23. agno/db/surrealdb/surrealdb.py +0 -6
  24. agno/knowledge/knowledge.py +14 -3
  25. agno/knowledge/reader/pptx_reader.py +101 -0
  26. agno/knowledge/reader/reader_factory.py +30 -0
  27. agno/knowledge/reader/tavily_reader.py +194 -0
  28. agno/knowledge/types.py +1 -0
  29. agno/memory/manager.py +28 -25
  30. agno/models/anthropic/claude.py +63 -6
  31. agno/models/base.py +255 -36
  32. agno/models/response.py +69 -0
  33. agno/os/router.py +7 -5
  34. agno/os/routers/memory/memory.py +2 -1
  35. agno/os/routers/memory/schemas.py +5 -2
  36. agno/os/schema.py +26 -20
  37. agno/os/utils.py +9 -2
  38. agno/run/agent.py +28 -30
  39. agno/run/base.py +17 -1
  40. agno/run/team.py +28 -29
  41. agno/run/workflow.py +32 -17
  42. agno/session/agent.py +3 -0
  43. agno/session/summary.py +4 -1
  44. agno/session/team.py +1 -1
  45. agno/team/team.py +620 -374
  46. agno/tools/dalle.py +2 -4
  47. agno/tools/eleven_labs.py +23 -25
  48. agno/tools/function.py +40 -0
  49. agno/tools/mcp/__init__.py +10 -0
  50. agno/tools/mcp/mcp.py +324 -0
  51. agno/tools/mcp/multi_mcp.py +347 -0
  52. agno/tools/mcp/params.py +24 -0
  53. agno/tools/slack.py +18 -3
  54. agno/tools/tavily.py +146 -0
  55. agno/utils/agent.py +366 -1
  56. agno/utils/mcp.py +92 -2
  57. agno/utils/media.py +166 -1
  58. agno/utils/message.py +60 -0
  59. agno/utils/print_response/workflow.py +17 -1
  60. agno/utils/team.py +89 -1
  61. agno/workflow/step.py +0 -1
  62. agno/workflow/types.py +10 -15
  63. agno/workflow/workflow.py +86 -1
  64. {agno-2.2.0.dist-info → agno-2.2.2.dist-info}/METADATA +31 -25
  65. {agno-2.2.0.dist-info → agno-2.2.2.dist-info}/RECORD +68 -64
  66. agno/db/async_postgres/schemas.py +0 -139
  67. agno/db/async_postgres/utils.py +0 -347
  68. agno/tools/mcp.py +0 -679
  69. {agno-2.2.0.dist-info → agno-2.2.2.dist-info}/WHEEL +0 -0
  70. {agno-2.2.0.dist-info → agno-2.2.2.dist-info}/licenses/LICENSE +0 -0
  71. {agno-2.2.0.dist-info → agno-2.2.2.dist-info}/top_level.txt +0 -0
@@ -1,347 +0,0 @@
1
- """Utility functions for the AsyncPostgres database class."""
2
-
3
- import time
4
- from datetime import date, datetime, timedelta, timezone
5
- from typing import Any, Dict, List, Optional
6
- from uuid import uuid4
7
-
8
- from sqlalchemy.ext.asyncio import AsyncEngine
9
-
10
- from agno.db.async_postgres.schemas import get_table_schema_definition
11
- from agno.db.schemas.culture import CulturalKnowledge
12
- from agno.utils.log import log_debug, log_error, log_warning
13
-
14
- try:
15
- from sqlalchemy import Table
16
- from sqlalchemy.dialects import postgresql
17
- from sqlalchemy.ext.asyncio import AsyncSession
18
- from sqlalchemy.inspection import inspect
19
- from sqlalchemy.sql.expression import text
20
- except ImportError:
21
- raise ImportError("`sqlalchemy` not installed. Please install it using `pip install sqlalchemy`")
22
-
23
-
24
- # -- DB util methods --
25
- def apply_sorting(stmt, table: Table, sort_by: Optional[str] = None, sort_order: Optional[str] = None):
26
- """Apply sorting to the given SQLAlchemy statement.
27
-
28
- Args:
29
- stmt: The SQLAlchemy statement to modify
30
- table: The table being queried
31
- sort_by: The field to sort by
32
- sort_order: The sort order ('asc' or 'desc')
33
-
34
- Returns:
35
- The modified statement with sorting applied
36
- """
37
- if sort_by is None:
38
- return stmt
39
-
40
- if not hasattr(table.c, sort_by):
41
- log_debug(f"Invalid sort field: '{sort_by}'. Will not apply any sorting.")
42
- return stmt
43
-
44
- # Apply the given sorting
45
- sort_column = getattr(table.c, sort_by)
46
- if sort_order and sort_order == "asc":
47
- return stmt.order_by(sort_column.asc())
48
- else:
49
- return stmt.order_by(sort_column.desc())
50
-
51
-
52
- async def create_schema(session: AsyncSession, db_schema: str) -> None:
53
- """Create the database schema if it doesn't exist.
54
-
55
- Args:
56
- session: The async SQLAlchemy session to use
57
- db_schema (str): The definition of the database schema to create
58
- """
59
- try:
60
- log_debug(f"Creating schema if not exists: {db_schema}")
61
- await session.execute(text(f"CREATE SCHEMA IF NOT EXISTS {db_schema};"))
62
- except Exception as e:
63
- log_warning(f"Could not create schema {db_schema}: {e}")
64
-
65
-
66
- async def is_table_available(session: AsyncSession, table_name: str, db_schema: str) -> bool:
67
- """
68
- Check if a table with the given name exists in the given schema.
69
-
70
- Returns:
71
- bool: True if the table exists, False otherwise.
72
- """
73
- try:
74
- exists_query = text(
75
- "SELECT 1 FROM information_schema.tables WHERE table_schema = :schema AND table_name = :table"
76
- )
77
- result = await session.execute(exists_query, {"schema": db_schema, "table": table_name})
78
- exists = result.scalar() is not None
79
- return exists
80
-
81
- except Exception as e:
82
- log_error(f"Error checking if table exists: {e}")
83
- return False
84
-
85
-
86
- async def is_valid_table(db_engine: AsyncEngine, table_name: str, table_type: str, db_schema: str) -> bool:
87
- """
88
- Check if the existing table has the expected column names.
89
- Args:
90
- db_engine: The async database engine
91
- table_name (str): Name of the table to validate
92
- table_type (str): Type of the table to get schema for
93
- db_schema (str): Database schema name
94
- Returns:
95
- bool: True if table has all expected columns, False otherwise
96
- """
97
- try:
98
- expected_table_schema = get_table_schema_definition(table_type)
99
- expected_columns = {col_name for col_name in expected_table_schema.keys() if not col_name.startswith("_")}
100
-
101
- async with db_engine.connect() as conn:
102
-
103
- def inspect_sync(sync_conn):
104
- inspector = inspect(sync_conn)
105
- return inspector.get_columns(table_name, schema=db_schema)
106
-
107
- existing_columns_info = await conn.run_sync(inspect_sync)
108
- existing_columns = set(col["name"] for col in existing_columns_info)
109
-
110
- missing_columns = expected_columns - existing_columns
111
- if missing_columns:
112
- log_warning(f"Missing columns {missing_columns} in table {db_schema}.{table_name}")
113
- return False
114
-
115
- log_debug(f"Table {db_schema}.{table_name} has all expected columns")
116
- return True
117
-
118
- except Exception as e:
119
- log_error(f"Error validating table schema for {db_schema}.{table_name}: {e}")
120
- return False
121
-
122
-
123
- # -- Metrics util methods --
124
- async def bulk_upsert_metrics(session: AsyncSession, table: Table, metrics_records: list[dict]) -> list[dict]:
125
- """Bulk upsert metrics into the database.
126
-
127
- Args:
128
- session: The async session to use
129
- table (Table): The table to upsert into.
130
- metrics_records (list[dict]): The metrics records to upsert.
131
-
132
- Returns:
133
- list[dict]: The upserted metrics records.
134
- """
135
- if not metrics_records:
136
- return []
137
-
138
- results = []
139
- stmt = postgresql.insert(table)
140
-
141
- # Columns to update in case of conflict
142
- update_columns = {
143
- col.name: stmt.excluded[col.name]
144
- for col in table.columns
145
- if col.name not in ["id", "date", "created_at", "aggregation_period"]
146
- }
147
-
148
- stmt = stmt.on_conflict_do_update(index_elements=["date", "aggregation_period"], set_=update_columns).returning( # type: ignore
149
- table
150
- )
151
- result = await session.execute(stmt, metrics_records)
152
- results = [row._mapping for row in result.fetchall()]
153
- await session.commit()
154
-
155
- return results # type: ignore
156
-
157
-
158
- def calculate_date_metrics(date_to_process: date, sessions_data: dict) -> dict:
159
- """Calculate metrics for the given single date.
160
-
161
- Args:
162
- date_to_process (date): The date to calculate metrics for.
163
- sessions_data (dict): The sessions data to calculate metrics for.
164
-
165
- Returns:
166
- dict: The calculated metrics.
167
- """
168
- metrics = {
169
- "users_count": 0,
170
- "agent_sessions_count": 0,
171
- "team_sessions_count": 0,
172
- "workflow_sessions_count": 0,
173
- "agent_runs_count": 0,
174
- "team_runs_count": 0,
175
- "workflow_runs_count": 0,
176
- }
177
- token_metrics = {
178
- "input_tokens": 0,
179
- "output_tokens": 0,
180
- "total_tokens": 0,
181
- "audio_total_tokens": 0,
182
- "audio_input_tokens": 0,
183
- "audio_output_tokens": 0,
184
- "cache_read_tokens": 0,
185
- "cache_write_tokens": 0,
186
- "reasoning_tokens": 0,
187
- }
188
- model_counts: Dict[str, int] = {}
189
-
190
- session_types = [
191
- ("agent", "agent_sessions_count", "agent_runs_count"),
192
- ("team", "team_sessions_count", "team_runs_count"),
193
- ("workflow", "workflow_sessions_count", "workflow_runs_count"),
194
- ]
195
- all_user_ids = set()
196
-
197
- for session_type, sessions_count_key, runs_count_key in session_types:
198
- sessions = sessions_data.get(session_type, [])
199
- metrics[sessions_count_key] = len(sessions)
200
-
201
- for session in sessions:
202
- if session.get("user_id"):
203
- all_user_ids.add(session["user_id"])
204
- metrics[runs_count_key] += len(session.get("runs", []))
205
- if runs := session.get("runs", []):
206
- for run in runs:
207
- if model_id := run.get("model"):
208
- model_provider = run.get("model_provider", "")
209
- model_counts[f"{model_id}:{model_provider}"] = (
210
- model_counts.get(f"{model_id}:{model_provider}", 0) + 1
211
- )
212
-
213
- session_metrics = session.get("session_data", {}).get("session_metrics", {})
214
- for field in token_metrics:
215
- token_metrics[field] += session_metrics.get(field, 0)
216
-
217
- model_metrics = []
218
- for model, count in model_counts.items():
219
- model_id, model_provider = model.split(":")
220
- model_metrics.append({"model_id": model_id, "model_provider": model_provider, "count": count})
221
-
222
- metrics["users_count"] = len(all_user_ids)
223
- current_time = int(time.time())
224
-
225
- return {
226
- "id": str(uuid4()),
227
- "date": date_to_process,
228
- "completed": date_to_process < datetime.now(timezone.utc).date(),
229
- "token_metrics": token_metrics,
230
- "model_metrics": model_metrics,
231
- "created_at": current_time,
232
- "updated_at": current_time,
233
- "aggregation_period": "daily",
234
- **metrics,
235
- }
236
-
237
-
238
- def fetch_all_sessions_data(
239
- sessions: List[Dict[str, Any]], dates_to_process: list[date], start_timestamp: int
240
- ) -> Optional[dict]:
241
- """Return all session data for the given dates, for all session types.
242
-
243
- Args:
244
- sessions: List of session dictionaries
245
- dates_to_process (list[date]): The dates to fetch session data for.
246
- start_timestamp: Starting timestamp
247
-
248
- Returns:
249
- dict: A dictionary with dates as keys and session data as values, for all session types.
250
-
251
- Example:
252
- {
253
- "2000-01-01": {
254
- "agent": [<session1>, <session2>, ...],
255
- "team": [...],
256
- "workflow": [...],
257
- }
258
- }
259
- """
260
- if not dates_to_process:
261
- return None
262
-
263
- all_sessions_data: Dict[str, Dict[str, List[Dict[str, Any]]]] = {
264
- date_to_process.isoformat(): {"agent": [], "team": [], "workflow": []} for date_to_process in dates_to_process
265
- }
266
-
267
- for session in sessions:
268
- session_date = (
269
- datetime.fromtimestamp(session.get("created_at", start_timestamp), tz=timezone.utc).date().isoformat()
270
- )
271
- if session_date in all_sessions_data:
272
- all_sessions_data[session_date][session["session_type"]].append(session)
273
-
274
- return all_sessions_data
275
-
276
-
277
- def get_dates_to_calculate_metrics_for(starting_date: date) -> list[date]:
278
- """Return the list of dates to calculate metrics for.
279
-
280
- Args:
281
- starting_date (date): The starting date to calculate metrics for.
282
-
283
- Returns:
284
- list[date]: The list of dates to calculate metrics for.
285
- """
286
- today = datetime.now(timezone.utc).date()
287
- days_diff = (today - starting_date).days + 1
288
- if days_diff <= 0:
289
- return []
290
- return [starting_date + timedelta(days=x) for x in range(days_diff)]
291
-
292
-
293
- # -- Cultural Knowledge util methods --
294
- def serialize_cultural_knowledge(cultural_knowledge: CulturalKnowledge) -> Dict[str, Any]:
295
- """Serialize a CulturalKnowledge object for database storage.
296
-
297
- Converts the model's separate content, categories, and notes fields
298
- into a single JSON dict for the database content column.
299
-
300
- Args:
301
- cultural_knowledge (CulturalKnowledge): The cultural knowledge object to serialize.
302
-
303
- Returns:
304
- Dict[str, Any]: A dictionary with the content field as JSON containing content, categories, and notes.
305
- """
306
- content_dict: Dict[str, Any] = {}
307
- if cultural_knowledge.content is not None:
308
- content_dict["content"] = cultural_knowledge.content
309
- if cultural_knowledge.categories is not None:
310
- content_dict["categories"] = cultural_knowledge.categories
311
- if cultural_knowledge.notes is not None:
312
- content_dict["notes"] = cultural_knowledge.notes
313
-
314
- return content_dict if content_dict else {}
315
-
316
-
317
- def deserialize_cultural_knowledge(db_row: Dict[str, Any]) -> CulturalKnowledge:
318
- """Deserialize a database row to a CulturalKnowledge object.
319
-
320
- The database stores content as a JSON dict containing content, categories, and notes.
321
- This method extracts those fields and converts them back to the model format.
322
-
323
- Args:
324
- db_row (Dict[str, Any]): The database row as a dictionary.
325
-
326
- Returns:
327
- CulturalKnowledge: The cultural knowledge object.
328
- """
329
- # Extract content, categories, and notes from the JSON content field
330
- content_json = db_row.get("content", {}) or {}
331
-
332
- return CulturalKnowledge.from_dict(
333
- {
334
- "id": db_row.get("id"),
335
- "name": db_row.get("name"),
336
- "summary": db_row.get("summary"),
337
- "content": content_json.get("content"),
338
- "categories": content_json.get("categories"),
339
- "notes": content_json.get("notes"),
340
- "metadata": db_row.get("metadata"),
341
- "input": db_row.get("input"),
342
- "created_at": db_row.get("created_at"),
343
- "updated_at": db_row.get("updated_at"),
344
- "agent_id": db_row.get("agent_id"),
345
- "team_id": db_row.get("team_id"),
346
- }
347
- )