agno 2.0.8__py3-none-any.whl → 2.0.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +2 -2
- agno/db/base.py +14 -0
- agno/db/dynamo/dynamo.py +107 -27
- agno/db/firestore/firestore.py +109 -33
- agno/db/gcs_json/gcs_json_db.py +100 -20
- agno/db/in_memory/in_memory_db.py +95 -20
- agno/db/json/json_db.py +101 -21
- agno/db/migrations/v1_to_v2.py +181 -35
- agno/db/mongo/mongo.py +251 -26
- agno/db/mysql/mysql.py +307 -6
- agno/db/postgres/postgres.py +279 -33
- agno/db/redis/redis.py +99 -22
- agno/db/singlestore/singlestore.py +319 -38
- agno/db/sqlite/sqlite.py +339 -23
- agno/models/anthropic/claude.py +0 -20
- agno/models/huggingface/huggingface.py +2 -1
- agno/models/ollama/chat.py +28 -2
- agno/models/openai/chat.py +7 -0
- agno/models/openai/responses.py +8 -8
- agno/os/interfaces/base.py +2 -0
- agno/os/interfaces/slack/router.py +50 -10
- agno/os/interfaces/slack/slack.py +6 -4
- agno/os/interfaces/whatsapp/router.py +7 -4
- agno/os/router.py +18 -0
- agno/os/utils.py +2 -2
- agno/reasoning/azure_ai_foundry.py +2 -2
- agno/reasoning/deepseek.py +2 -2
- agno/reasoning/groq.py +2 -2
- agno/reasoning/ollama.py +2 -2
- agno/reasoning/openai.py +2 -2
- agno/run/base.py +15 -2
- agno/team/team.py +0 -7
- agno/tools/mcp_toolbox.py +284 -0
- agno/tools/scrapegraph.py +58 -31
- agno/tools/whatsapp.py +1 -1
- agno/utils/print_response/agent.py +2 -2
- agno/utils/print_response/team.py +6 -6
- agno/utils/reasoning.py +22 -1
- agno/utils/string.py +9 -0
- agno/workflow/workflow.py +0 -1
- {agno-2.0.8.dist-info → agno-2.0.9.dist-info}/METADATA +4 -1
- {agno-2.0.8.dist-info → agno-2.0.9.dist-info}/RECORD +45 -44
- {agno-2.0.8.dist-info → agno-2.0.9.dist-info}/WHEEL +0 -0
- {agno-2.0.8.dist-info → agno-2.0.9.dist-info}/licenses/LICENSE +0 -0
- {agno-2.0.8.dist-info → agno-2.0.9.dist-info}/top_level.txt +0 -0
agno/db/migrations/v1_to_v2.py
CHANGED
|
@@ -11,7 +11,7 @@ from agno.db.postgres.postgres import PostgresDb
|
|
|
11
11
|
from agno.db.schemas.memory import UserMemory
|
|
12
12
|
from agno.db.sqlite.sqlite import SqliteDb
|
|
13
13
|
from agno.session import AgentSession, TeamSession, WorkflowSession
|
|
14
|
-
from agno.utils.log import log_error
|
|
14
|
+
from agno.utils.log import log_error, log_info, log_warning
|
|
15
15
|
|
|
16
16
|
|
|
17
17
|
def convert_v1_metrics_to_v2(metrics_dict: Dict[str, Any]) -> Dict[str, Any]:
|
|
@@ -127,22 +127,45 @@ def safe_get_runs_from_memory(memory_data: Any) -> Any:
|
|
|
127
127
|
if memory_data is None:
|
|
128
128
|
return None
|
|
129
129
|
|
|
130
|
+
runs: Any = []
|
|
131
|
+
|
|
130
132
|
# If memory_data is a string, try to parse it as JSON
|
|
131
133
|
if isinstance(memory_data, str):
|
|
132
134
|
try:
|
|
133
135
|
memory_dict = json.loads(memory_data)
|
|
134
136
|
if isinstance(memory_dict, dict):
|
|
135
|
-
|
|
137
|
+
runs = memory_dict.get("runs")
|
|
136
138
|
except (json.JSONDecodeError, AttributeError):
|
|
137
139
|
# If JSON parsing fails, memory_data might just be a string value
|
|
138
140
|
return None
|
|
139
141
|
|
|
140
142
|
# If memory_data is already a dict, access runs directly
|
|
141
143
|
elif isinstance(memory_data, dict):
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
144
|
+
runs = memory_data.get("runs")
|
|
145
|
+
|
|
146
|
+
for run in runs or []:
|
|
147
|
+
# Adjust fields mapping for Agent sessions
|
|
148
|
+
if run.get("agent_id") is not None:
|
|
149
|
+
if run.get("team_id") is not None:
|
|
150
|
+
run.pop("team_id")
|
|
151
|
+
if run.get("team_session_id") is not None:
|
|
152
|
+
run["session_id"] = run.pop("team_session_id")
|
|
153
|
+
if run.get("event"):
|
|
154
|
+
run["events"] = [run.pop("event")]
|
|
155
|
+
|
|
156
|
+
# Adjust fields mapping for Team sessions
|
|
157
|
+
if run.get("team_id") is not None:
|
|
158
|
+
if run.get("agent_id") is not None:
|
|
159
|
+
run.pop("agent_id")
|
|
160
|
+
if member_responses := run.get("member_responses"):
|
|
161
|
+
for response in member_responses:
|
|
162
|
+
if response.get("agent_id") is not None and response.get("team_id") is not None:
|
|
163
|
+
response.pop("team_id")
|
|
164
|
+
if response.get("agent_id") is not None and response.get("team_session_id") is not None:
|
|
165
|
+
response["session_id"] = response.pop("team_session_id")
|
|
166
|
+
run["member_responses"] = member_responses
|
|
167
|
+
|
|
168
|
+
return runs
|
|
146
169
|
|
|
147
170
|
|
|
148
171
|
def convert_v1_media_to_v2(media_data: Dict[str, Any]) -> Dict[str, Any]:
|
|
@@ -298,6 +321,7 @@ def migrate(
|
|
|
298
321
|
team_sessions_table_name: Optional[str] = None,
|
|
299
322
|
workflow_sessions_table_name: Optional[str] = None,
|
|
300
323
|
memories_table_name: Optional[str] = None,
|
|
324
|
+
batch_size: int = 5000,
|
|
301
325
|
):
|
|
302
326
|
"""Given a database connection and table/collection names, parse and migrate the content to corresponding v2 tables/collections.
|
|
303
327
|
|
|
@@ -308,65 +332,171 @@ def migrate(
|
|
|
308
332
|
team_sessions_table_name: The name of the team sessions table/collection. If not provided, team sessions will not be migrated.
|
|
309
333
|
workflow_sessions_table_name: The name of the workflow sessions table/collection. If not provided, workflow sessions will not be migrated.
|
|
310
334
|
memories_table_name: The name of the memories table/collection. If not provided, memories will not be migrated.
|
|
335
|
+
batch_size: Number of records to process in each batch (default: 5000)
|
|
311
336
|
"""
|
|
312
337
|
if agent_sessions_table_name:
|
|
313
|
-
|
|
338
|
+
migrate_table_in_batches(
|
|
339
|
+
db=db,
|
|
314
340
|
v1_db_schema=v1_db_schema,
|
|
315
341
|
v1_table_name=agent_sessions_table_name,
|
|
316
342
|
v1_table_type="agent_sessions",
|
|
343
|
+
batch_size=batch_size,
|
|
317
344
|
)
|
|
318
345
|
|
|
319
346
|
if team_sessions_table_name:
|
|
320
|
-
|
|
347
|
+
migrate_table_in_batches(
|
|
348
|
+
db=db,
|
|
321
349
|
v1_db_schema=v1_db_schema,
|
|
322
350
|
v1_table_name=team_sessions_table_name,
|
|
323
351
|
v1_table_type="team_sessions",
|
|
352
|
+
batch_size=batch_size,
|
|
324
353
|
)
|
|
325
354
|
|
|
326
355
|
if workflow_sessions_table_name:
|
|
327
|
-
|
|
356
|
+
migrate_table_in_batches(
|
|
357
|
+
db=db,
|
|
328
358
|
v1_db_schema=v1_db_schema,
|
|
329
359
|
v1_table_name=workflow_sessions_table_name,
|
|
330
360
|
v1_table_type="workflow_sessions",
|
|
361
|
+
batch_size=batch_size,
|
|
331
362
|
)
|
|
332
363
|
|
|
333
364
|
if memories_table_name:
|
|
334
|
-
|
|
365
|
+
migrate_table_in_batches(
|
|
366
|
+
db=db,
|
|
335
367
|
v1_db_schema=v1_db_schema,
|
|
336
368
|
v1_table_name=memories_table_name,
|
|
337
369
|
v1_table_type="memories",
|
|
370
|
+
batch_size=batch_size,
|
|
338
371
|
)
|
|
339
372
|
|
|
340
373
|
|
|
341
|
-
def
|
|
342
|
-
|
|
374
|
+
def migrate_table_in_batches(
|
|
375
|
+
db: Union[PostgresDb, MySQLDb, SqliteDb, MongoDb],
|
|
376
|
+
v1_db_schema: str,
|
|
377
|
+
v1_table_name: str,
|
|
378
|
+
v1_table_type: str,
|
|
379
|
+
batch_size: int = 5000,
|
|
380
|
+
):
|
|
381
|
+
log_info(f"Starting migration of table {v1_table_name} (type: {v1_table_type}) with batch size {batch_size}")
|
|
382
|
+
|
|
383
|
+
total_migrated = 0
|
|
384
|
+
batch_count = 0
|
|
385
|
+
|
|
386
|
+
for batch_content in get_table_content_in_batches(db, v1_db_schema, v1_table_name, batch_size):
|
|
387
|
+
batch_count += 1
|
|
388
|
+
batch_size_actual = len(batch_content)
|
|
389
|
+
log_info(f"Processing batch {batch_count} with {batch_size_actual} records from table {v1_table_name}")
|
|
390
|
+
|
|
391
|
+
# Parse the content into the new format
|
|
392
|
+
memories: List[UserMemory] = []
|
|
393
|
+
sessions: Union[List[AgentSession], List[TeamSession], List[WorkflowSession]] = []
|
|
394
|
+
|
|
395
|
+
if v1_table_type == "agent_sessions":
|
|
396
|
+
sessions = parse_agent_sessions(batch_content)
|
|
397
|
+
elif v1_table_type == "team_sessions":
|
|
398
|
+
sessions = parse_team_sessions(batch_content)
|
|
399
|
+
elif v1_table_type == "workflow_sessions":
|
|
400
|
+
sessions = parse_workflow_sessions(batch_content)
|
|
401
|
+
elif v1_table_type == "memories":
|
|
402
|
+
memories = parse_memories(batch_content)
|
|
403
|
+
else:
|
|
404
|
+
raise ValueError(f"Invalid table type: {v1_table_type}")
|
|
405
|
+
|
|
406
|
+
# Insert the batch into the new table
|
|
407
|
+
if v1_table_type in ["agent_sessions", "team_sessions", "workflow_sessions"]:
|
|
408
|
+
if sessions:
|
|
409
|
+
# Clear any existing scoped session state for SQL databases to prevent transaction conflicts
|
|
410
|
+
if hasattr(db, "Session"):
|
|
411
|
+
db.Session.remove() # type: ignore
|
|
412
|
+
|
|
413
|
+
db.upsert_sessions(sessions) # type: ignore
|
|
414
|
+
total_migrated += len(sessions)
|
|
415
|
+
log_info(f"Bulk upserted {len(sessions)} sessions in batch {batch_count}")
|
|
416
|
+
|
|
417
|
+
elif v1_table_type == "memories":
|
|
418
|
+
if memories:
|
|
419
|
+
# Clear any existing scoped session state for SQL databases to prevent transaction conflicts
|
|
420
|
+
if hasattr(db, "Session"):
|
|
421
|
+
db.Session.remove() # type: ignore
|
|
422
|
+
|
|
423
|
+
db.upsert_memories(memories)
|
|
424
|
+
total_migrated += len(memories)
|
|
425
|
+
log_info(f"Bulk upserted {len(memories)} memories in batch {batch_count}")
|
|
426
|
+
|
|
427
|
+
log_info(f"Completed batch {batch_count}: migrated {batch_size_actual} records")
|
|
428
|
+
|
|
429
|
+
log_info(f"✅ Migration completed for table {v1_table_name}: {total_migrated} total records migrated")
|
|
430
|
+
|
|
431
|
+
|
|
432
|
+
def get_table_content_in_batches(
|
|
433
|
+
db: Union[PostgresDb, MySQLDb, SqliteDb, MongoDb], db_schema: str, table_name: str, batch_size: int = 5000
|
|
434
|
+
):
|
|
435
|
+
"""Get table content in batches to avoid memory issues with large tables"""
|
|
343
436
|
try:
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
# MongoDB implementation
|
|
437
|
+
if isinstance(db, MongoDb):
|
|
438
|
+
# MongoDB implementation with cursor and batching
|
|
347
439
|
collection = db.database[table_name]
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
for doc in
|
|
440
|
+
cursor = collection.find({}).batch_size(batch_size)
|
|
441
|
+
|
|
442
|
+
batch = []
|
|
443
|
+
for doc in cursor:
|
|
444
|
+
# Convert ObjectId to string for compatibility
|
|
352
445
|
if "_id" in doc:
|
|
353
446
|
doc["_id"] = str(doc["_id"])
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
if db_schema and db_schema.strip():
|
|
360
|
-
sql_query = f"SELECT * FROM {db_schema}.{table_name}"
|
|
361
|
-
else:
|
|
362
|
-
sql_query = f"SELECT * FROM {table_name}"
|
|
447
|
+
batch.append(doc)
|
|
448
|
+
|
|
449
|
+
if len(batch) >= batch_size:
|
|
450
|
+
yield batch
|
|
451
|
+
batch = []
|
|
363
452
|
|
|
364
|
-
|
|
365
|
-
|
|
453
|
+
# Yield remaining items
|
|
454
|
+
if batch:
|
|
455
|
+
yield batch
|
|
456
|
+
else:
|
|
457
|
+
# SQL database implementations (PostgresDb, MySQLDb, SqliteDb)
|
|
458
|
+
offset = 0
|
|
459
|
+
while True:
|
|
460
|
+
# Create a new session for each batch to avoid transaction conflicts
|
|
461
|
+
with db.Session() as sess:
|
|
462
|
+
# Handle empty schema by omitting the schema prefix (needed for SQLite)
|
|
463
|
+
if db_schema and db_schema.strip():
|
|
464
|
+
sql_query = f"SELECT * FROM {db_schema}.{table_name} LIMIT {batch_size} OFFSET {offset}"
|
|
465
|
+
else:
|
|
466
|
+
sql_query = f"SELECT * FROM {table_name} LIMIT {batch_size} OFFSET {offset}"
|
|
467
|
+
|
|
468
|
+
result = sess.execute(text(sql_query))
|
|
469
|
+
batch = [row._asdict() for row in result]
|
|
470
|
+
|
|
471
|
+
if not batch:
|
|
472
|
+
break
|
|
473
|
+
|
|
474
|
+
yield batch
|
|
475
|
+
offset += batch_size
|
|
476
|
+
|
|
477
|
+
# If batch is smaller than batch_size, we've reached the end
|
|
478
|
+
if len(batch) < batch_size:
|
|
479
|
+
break
|
|
366
480
|
|
|
367
481
|
except Exception as e:
|
|
368
|
-
log_error(f"Error getting
|
|
369
|
-
return
|
|
482
|
+
log_error(f"Error getting batched content from table/collection {table_name}: {e}")
|
|
483
|
+
return
|
|
484
|
+
|
|
485
|
+
|
|
486
|
+
def get_all_table_content(db, db_schema: str, table_name: str) -> list[dict[str, Any]]:
|
|
487
|
+
"""Get all content from the given table/collection (legacy method kept for backward compatibility)
|
|
488
|
+
|
|
489
|
+
WARNING: This method loads all data into memory and should not be used for large tables.
|
|
490
|
+
Use get_table_content_in_batches() for large datasets.
|
|
491
|
+
"""
|
|
492
|
+
log_warning(
|
|
493
|
+
f"Loading entire table {table_name} into memory. Consider using get_table_content_in_batches() for large tables, or if you experience any complication."
|
|
494
|
+
)
|
|
495
|
+
|
|
496
|
+
all_content = []
|
|
497
|
+
for batch in get_table_content_in_batches(db, db_schema, table_name):
|
|
498
|
+
all_content.extend(batch)
|
|
499
|
+
return all_content
|
|
370
500
|
|
|
371
501
|
|
|
372
502
|
def parse_agent_sessions(v1_content: List[Dict[str, Any]]) -> List[AgentSession]:
|
|
@@ -385,7 +515,13 @@ def parse_agent_sessions(v1_content: List[Dict[str, Any]]) -> List[AgentSession]
|
|
|
385
515
|
"created_at": item.get("created_at"),
|
|
386
516
|
"updated_at": item.get("updated_at"),
|
|
387
517
|
}
|
|
388
|
-
|
|
518
|
+
|
|
519
|
+
try:
|
|
520
|
+
agent_session = AgentSession.from_dict(session)
|
|
521
|
+
except Exception as e:
|
|
522
|
+
log_error(f"Error parsing agent session: {e}. This is the complete session that failed: {session}")
|
|
523
|
+
continue
|
|
524
|
+
|
|
389
525
|
if agent_session is not None:
|
|
390
526
|
sessions_v2.append(agent_session)
|
|
391
527
|
|
|
@@ -408,7 +544,12 @@ def parse_team_sessions(v1_content: List[Dict[str, Any]]) -> List[TeamSession]:
|
|
|
408
544
|
"created_at": item.get("created_at"),
|
|
409
545
|
"updated_at": item.get("updated_at"),
|
|
410
546
|
}
|
|
411
|
-
|
|
547
|
+
try:
|
|
548
|
+
team_session = TeamSession.from_dict(session)
|
|
549
|
+
except Exception as e:
|
|
550
|
+
log_error(f"Error parsing team session: {e}. This is the complete session that failed: {session}")
|
|
551
|
+
continue
|
|
552
|
+
|
|
412
553
|
if team_session is not None:
|
|
413
554
|
sessions_v2.append(team_session)
|
|
414
555
|
|
|
@@ -433,7 +574,12 @@ def parse_workflow_sessions(v1_content: List[Dict[str, Any]]) -> List[WorkflowSe
|
|
|
433
574
|
"workflow_name": item.get("workflow_name"),
|
|
434
575
|
"runs": convert_any_metrics_in_data(item.get("runs")),
|
|
435
576
|
}
|
|
436
|
-
|
|
577
|
+
try:
|
|
578
|
+
workflow_session = WorkflowSession.from_dict(session)
|
|
579
|
+
except Exception as e:
|
|
580
|
+
log_error(f"Error parsing workflow session: {e}. This is the complete session that failed: {session}")
|
|
581
|
+
continue
|
|
582
|
+
|
|
437
583
|
if workflow_session is not None:
|
|
438
584
|
sessions_v2.append(workflow_session)
|
|
439
585
|
|