agno 2.3.20__py3-none-any.whl → 2.3.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. agno/agent/agent.py +26 -1
  2. agno/agent/remote.py +233 -72
  3. agno/client/a2a/__init__.py +10 -0
  4. agno/client/a2a/client.py +554 -0
  5. agno/client/a2a/schemas.py +112 -0
  6. agno/client/a2a/utils.py +369 -0
  7. agno/db/migrations/utils.py +19 -0
  8. agno/db/migrations/v1_to_v2.py +54 -16
  9. agno/db/migrations/versions/v2_3_0.py +92 -53
  10. agno/db/postgres/async_postgres.py +162 -40
  11. agno/db/postgres/postgres.py +181 -31
  12. agno/db/postgres/utils.py +6 -2
  13. agno/eval/agent_as_judge.py +24 -14
  14. agno/knowledge/chunking/document.py +3 -2
  15. agno/knowledge/chunking/markdown.py +8 -3
  16. agno/knowledge/chunking/recursive.py +2 -2
  17. agno/knowledge/embedder/mistral.py +1 -1
  18. agno/models/openai/chat.py +1 -1
  19. agno/models/openai/responses.py +14 -7
  20. agno/os/middleware/jwt.py +66 -27
  21. agno/os/routers/agents/router.py +2 -2
  22. agno/os/routers/evals/evals.py +0 -9
  23. agno/os/routers/evals/utils.py +6 -6
  24. agno/os/routers/knowledge/knowledge.py +3 -3
  25. agno/os/routers/teams/router.py +2 -2
  26. agno/os/routers/workflows/router.py +2 -2
  27. agno/reasoning/deepseek.py +11 -1
  28. agno/reasoning/gemini.py +6 -2
  29. agno/reasoning/groq.py +8 -3
  30. agno/reasoning/openai.py +2 -0
  31. agno/remote/base.py +105 -8
  32. agno/run/agent.py +19 -19
  33. agno/run/team.py +19 -19
  34. agno/skills/__init__.py +17 -0
  35. agno/skills/agent_skills.py +370 -0
  36. agno/skills/errors.py +32 -0
  37. agno/skills/loaders/__init__.py +4 -0
  38. agno/skills/loaders/base.py +27 -0
  39. agno/skills/loaders/local.py +216 -0
  40. agno/skills/skill.py +65 -0
  41. agno/skills/utils.py +107 -0
  42. agno/skills/validator.py +277 -0
  43. agno/team/remote.py +219 -59
  44. agno/team/team.py +22 -2
  45. agno/tools/mcp/mcp.py +299 -17
  46. agno/tools/mcp/multi_mcp.py +269 -14
  47. agno/utils/mcp.py +49 -8
  48. agno/utils/string.py +43 -1
  49. agno/workflow/condition.py +4 -2
  50. agno/workflow/loop.py +20 -1
  51. agno/workflow/remote.py +172 -32
  52. agno/workflow/router.py +4 -1
  53. agno/workflow/steps.py +4 -0
  54. {agno-2.3.20.dist-info → agno-2.3.22.dist-info}/METADATA +59 -130
  55. {agno-2.3.20.dist-info → agno-2.3.22.dist-info}/RECORD +58 -44
  56. {agno-2.3.20.dist-info → agno-2.3.22.dist-info}/WHEEL +0 -0
  57. {agno-2.3.20.dist-info → agno-2.3.22.dist-info}/licenses/LICENSE +0 -0
  58. {agno-2.3.20.dist-info → agno-2.3.22.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  import time
2
2
  from datetime import date, datetime, timedelta, timezone
3
- from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple, Union
3
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple, Union, cast
4
4
  from uuid import uuid4
5
5
 
6
6
  if TYPE_CHECKING:
@@ -27,7 +27,7 @@ from agno.db.schemas.knowledge import KnowledgeRow
27
27
  from agno.db.schemas.memory import UserMemory
28
28
  from agno.session import AgentSession, Session, TeamSession, WorkflowSession
29
29
  from agno.utils.log import log_debug, log_error, log_info, log_warning
30
- from agno.utils.string import generate_id
30
+ from agno.utils.string import generate_id, sanitize_postgres_string, sanitize_postgres_strings
31
31
 
32
32
  try:
33
33
  from sqlalchemy import ForeignKey, Index, String, UniqueConstraint, and_, case, func, or_, select, update
@@ -91,7 +91,11 @@ class PostgresDb(BaseDb):
91
91
  """
92
92
  _engine: Optional[Engine] = db_engine
93
93
  if _engine is None and db_url is not None:
94
- _engine = create_engine(db_url)
94
+ _engine = create_engine(
95
+ db_url,
96
+ pool_pre_ping=True,
97
+ pool_recycle=3600,
98
+ )
95
99
  if _engine is None:
96
100
  raise ValueError("One of db_url or db_engine must be provided")
97
101
 
@@ -512,6 +516,11 @@ class PostgresDb(BaseDb):
512
516
 
513
517
  if user_id is not None:
514
518
  stmt = stmt.where(table.c.user_id == user_id)
519
+
520
+ # Filter by session_type to ensure we get the correct session type
521
+ session_type_value = session_type.value if isinstance(session_type, SessionType) else session_type
522
+ stmt = stmt.where(table.c.session_type == session_type_value)
523
+
515
524
  result = sess.execute(stmt).fetchone()
516
525
  if result is None:
517
526
  return None
@@ -596,9 +605,7 @@ class PostgresDb(BaseDb):
596
605
  stmt = stmt.where(table.c.created_at <= end_timestamp)
597
606
  if session_name is not None:
598
607
  stmt = stmt.where(
599
- func.coalesce(func.json_extract_path_text(table.c.session_data, "session_name"), "").ilike(
600
- f"%{session_name}%"
601
- )
608
+ func.coalesce(table.c.session_data["session_name"].astext, "").ilike(f"%{session_name}%")
602
609
  )
603
610
  if session_type is not None:
604
611
  session_type_value = session_type.value if isinstance(session_type, SessionType) else session_type
@@ -663,6 +670,8 @@ class PostgresDb(BaseDb):
663
670
  return None
664
671
 
665
672
  with self.Session() as sess, sess.begin():
673
+ # Sanitize session_name to remove null bytes
674
+ sanitized_session_name = sanitize_postgres_string(session_name)
666
675
  stmt = (
667
676
  update(table)
668
677
  .where(table.c.session_id == session_id)
@@ -672,7 +681,7 @@ class PostgresDb(BaseDb):
672
681
  func.jsonb_set(
673
682
  func.cast(table.c.session_data, postgresql.JSONB),
674
683
  text("'{session_name}'"),
675
- func.to_jsonb(session_name),
684
+ func.to_jsonb(sanitized_session_name),
676
685
  ),
677
686
  postgresql.JSON,
678
687
  )
@@ -728,6 +737,21 @@ class PostgresDb(BaseDb):
728
737
  return None
729
738
 
730
739
  session_dict = session.to_dict()
740
+ # Sanitize JSON/dict fields to remove null bytes from nested strings
741
+ if session_dict.get("agent_data"):
742
+ session_dict["agent_data"] = sanitize_postgres_strings(session_dict["agent_data"])
743
+ if session_dict.get("team_data"):
744
+ session_dict["team_data"] = sanitize_postgres_strings(session_dict["team_data"])
745
+ if session_dict.get("workflow_data"):
746
+ session_dict["workflow_data"] = sanitize_postgres_strings(session_dict["workflow_data"])
747
+ if session_dict.get("session_data"):
748
+ session_dict["session_data"] = sanitize_postgres_strings(session_dict["session_data"])
749
+ if session_dict.get("summary"):
750
+ session_dict["summary"] = sanitize_postgres_strings(session_dict["summary"])
751
+ if session_dict.get("metadata"):
752
+ session_dict["metadata"] = sanitize_postgres_strings(session_dict["metadata"])
753
+ if session_dict.get("runs"):
754
+ session_dict["runs"] = sanitize_postgres_strings(session_dict["runs"])
731
755
 
732
756
  if isinstance(session, AgentSession):
733
757
  with self.Session() as sess, sess.begin():
@@ -881,6 +905,18 @@ class PostgresDb(BaseDb):
881
905
  session_records = []
882
906
  for agent_session in agent_sessions:
883
907
  session_dict = agent_session.to_dict()
908
+ # Sanitize JSON/dict fields to remove null bytes from nested strings
909
+ if session_dict.get("agent_data"):
910
+ session_dict["agent_data"] = sanitize_postgres_strings(session_dict["agent_data"])
911
+ if session_dict.get("session_data"):
912
+ session_dict["session_data"] = sanitize_postgres_strings(session_dict["session_data"])
913
+ if session_dict.get("summary"):
914
+ session_dict["summary"] = sanitize_postgres_strings(session_dict["summary"])
915
+ if session_dict.get("metadata"):
916
+ session_dict["metadata"] = sanitize_postgres_strings(session_dict["metadata"])
917
+ if session_dict.get("runs"):
918
+ session_dict["runs"] = sanitize_postgres_strings(session_dict["runs"])
919
+
884
920
  # Use preserved updated_at if flag is set (even if None), otherwise use current time
885
921
  updated_at = session_dict.get("updated_at") if preserve_updated_at else int(time.time())
886
922
  session_records.append(
@@ -926,6 +962,18 @@ class PostgresDb(BaseDb):
926
962
  session_records = []
927
963
  for team_session in team_sessions:
928
964
  session_dict = team_session.to_dict()
965
+ # Sanitize JSON/dict fields to remove null bytes from nested strings
966
+ if session_dict.get("team_data"):
967
+ session_dict["team_data"] = sanitize_postgres_strings(session_dict["team_data"])
968
+ if session_dict.get("session_data"):
969
+ session_dict["session_data"] = sanitize_postgres_strings(session_dict["session_data"])
970
+ if session_dict.get("summary"):
971
+ session_dict["summary"] = sanitize_postgres_strings(session_dict["summary"])
972
+ if session_dict.get("metadata"):
973
+ session_dict["metadata"] = sanitize_postgres_strings(session_dict["metadata"])
974
+ if session_dict.get("runs"):
975
+ session_dict["runs"] = sanitize_postgres_strings(session_dict["runs"])
976
+
929
977
  # Use preserved updated_at if flag is set (even if None), otherwise use current time
930
978
  updated_at = session_dict.get("updated_at") if preserve_updated_at else int(time.time())
931
979
  session_records.append(
@@ -971,6 +1019,18 @@ class PostgresDb(BaseDb):
971
1019
  session_records = []
972
1020
  for workflow_session in workflow_sessions:
973
1021
  session_dict = workflow_session.to_dict()
1022
+ # Sanitize JSON/dict fields to remove null bytes from nested strings
1023
+ if session_dict.get("workflow_data"):
1024
+ session_dict["workflow_data"] = sanitize_postgres_strings(session_dict["workflow_data"])
1025
+ if session_dict.get("session_data"):
1026
+ session_dict["session_data"] = sanitize_postgres_strings(session_dict["session_data"])
1027
+ if session_dict.get("summary"):
1028
+ session_dict["summary"] = sanitize_postgres_strings(session_dict["summary"])
1029
+ if session_dict.get("metadata"):
1030
+ session_dict["metadata"] = sanitize_postgres_strings(session_dict["metadata"])
1031
+ if session_dict.get("runs"):
1032
+ session_dict["runs"] = sanitize_postgres_strings(session_dict["runs"])
1033
+
974
1034
  # Use preserved updated_at if flag is set (even if None), otherwise use current time
975
1035
  updated_at = session_dict.get("updated_at") if preserve_updated_at else int(time.time())
976
1036
  session_records.append(
@@ -1098,16 +1158,35 @@ class PostgresDb(BaseDb):
1098
1158
  return []
1099
1159
 
1100
1160
  with self.Session() as sess, sess.begin():
1161
+ # Filter out NULL topics and ensure topics is an array before extracting elements
1162
+ # jsonb_typeof returns 'array' for JSONB arrays
1163
+ conditions = [
1164
+ table.c.topics.is_not(None),
1165
+ func.jsonb_typeof(table.c.topics) == "array",
1166
+ ]
1167
+
1101
1168
  try:
1102
- stmt = select(func.jsonb_array_elements_text(table.c.topics))
1169
+ # jsonb_array_elements_text is a set-returning function that must be used with select_from
1170
+ stmt = select(func.jsonb_array_elements_text(table.c.topics).label("topic"))
1171
+ stmt = stmt.select_from(table)
1172
+ stmt = stmt.where(and_(*conditions))
1103
1173
  result = sess.execute(stmt).fetchall()
1104
1174
  except ProgrammingError:
1105
1175
  # Retrying with json_array_elements_text. This works in older versions,
1106
1176
  # where the topics column was of type JSON instead of JSONB
1107
- stmt = select(func.json_array_elements_text(table.c.topics))
1177
+ # For JSON (not JSONB), we use json_typeof
1178
+ json_conditions = [
1179
+ table.c.topics.is_not(None),
1180
+ func.json_typeof(table.c.topics) == "array",
1181
+ ]
1182
+ stmt = select(func.json_array_elements_text(table.c.topics).label("topic"))
1183
+ stmt = stmt.select_from(table)
1184
+ stmt = stmt.where(and_(*json_conditions))
1108
1185
  result = sess.execute(stmt).fetchall()
1109
1186
 
1110
- return list(set([record[0] for record in result]))
1187
+ # Extract topics from records - each record is a Row with a 'topic' attribute
1188
+ topics = [record.topic for record in result if record.topic is not None]
1189
+ return list(set(topics))
1111
1190
 
1112
1191
  except Exception as e:
1113
1192
  log_error(f"Exception reading from memory table: {e}")
@@ -1348,6 +1427,10 @@ class PostgresDb(BaseDb):
1348
1427
  if table is None:
1349
1428
  return None
1350
1429
 
1430
+ # Sanitize string fields to remove null bytes (PostgreSQL doesn't allow them)
1431
+ sanitized_input = sanitize_postgres_string(memory.input)
1432
+ sanitized_feedback = sanitize_postgres_string(memory.feedback)
1433
+
1351
1434
  with self.Session() as sess, sess.begin():
1352
1435
  if memory.memory_id is None:
1353
1436
  memory.memory_id = str(uuid4())
@@ -1357,24 +1440,26 @@ class PostgresDb(BaseDb):
1357
1440
  stmt = postgresql.insert(table).values(
1358
1441
  memory_id=memory.memory_id,
1359
1442
  memory=memory.memory,
1360
- input=memory.input,
1443
+ input=sanitized_input,
1361
1444
  user_id=memory.user_id,
1362
1445
  agent_id=memory.agent_id,
1363
1446
  team_id=memory.team_id,
1364
1447
  topics=memory.topics,
1365
- feedback=memory.feedback,
1448
+ feedback=sanitized_feedback,
1366
1449
  created_at=memory.created_at,
1367
- updated_at=memory.created_at,
1450
+ updated_at=memory.updated_at
1451
+ if memory.updated_at is not None
1452
+ else (memory.created_at if memory.created_at is not None else current_time),
1368
1453
  )
1369
1454
  stmt = stmt.on_conflict_do_update( # type: ignore
1370
1455
  index_elements=["memory_id"],
1371
1456
  set_=dict(
1372
1457
  memory=memory.memory,
1373
1458
  topics=memory.topics,
1374
- input=memory.input,
1459
+ input=sanitized_input,
1375
1460
  agent_id=memory.agent_id,
1376
1461
  team_id=memory.team_id,
1377
- feedback=memory.feedback,
1462
+ feedback=sanitized_feedback,
1378
1463
  updated_at=current_time,
1379
1464
  # Preserve created_at on update - don't overwrite existing value
1380
1465
  created_at=table.c.created_at,
@@ -1432,16 +1517,20 @@ class PostgresDb(BaseDb):
1432
1517
  # Use preserved updated_at if flag is set (even if None), otherwise use current time
1433
1518
  updated_at = memory.updated_at if preserve_updated_at else current_time
1434
1519
 
1520
+ # Sanitize string fields to remove null bytes (PostgreSQL doesn't allow them)
1521
+ sanitized_input = sanitize_postgres_string(memory.input)
1522
+ sanitized_feedback = sanitize_postgres_string(memory.feedback)
1523
+
1435
1524
  memory_records.append(
1436
1525
  {
1437
1526
  "memory_id": memory.memory_id,
1438
1527
  "memory": memory.memory,
1439
- "input": memory.input,
1528
+ "input": sanitized_input,
1440
1529
  "user_id": memory.user_id,
1441
1530
  "agent_id": memory.agent_id,
1442
1531
  "team_id": memory.team_id,
1443
1532
  "topics": memory.topics,
1444
- "feedback": memory.feedback,
1533
+ "feedback": sanitized_feedback,
1445
1534
  "created_at": memory.created_at,
1446
1535
  "updated_at": updated_at,
1447
1536
  }
@@ -1747,8 +1836,7 @@ class PostgresDb(BaseDb):
1747
1836
  stmt = select(table)
1748
1837
 
1749
1838
  # Apply sorting
1750
- if sort_by is not None:
1751
- stmt = stmt.order_by(getattr(table.c, sort_by) * (1 if sort_order == "asc" else -1))
1839
+ stmt = apply_sorting(stmt, table, sort_by, sort_order)
1752
1840
 
1753
1841
  # Get total count before applying limit and pagination
1754
1842
  count_stmt = select(func.count()).select_from(stmt.alias())
@@ -1807,10 +1895,19 @@ class PostgresDb(BaseDb):
1807
1895
  }
1808
1896
 
1809
1897
  # Build insert and update data only for fields that exist in the table
1898
+ # String fields that need sanitization
1899
+ string_fields = {"name", "description", "type", "status", "status_message", "external_id", "linked_to"}
1900
+
1810
1901
  for model_field, table_column in field_mapping.items():
1811
1902
  if table_column in table_columns:
1812
1903
  value = getattr(knowledge_row, model_field, None)
1813
1904
  if value is not None:
1905
+ # Sanitize string fields to remove null bytes
1906
+ if table_column in string_fields and isinstance(value, str):
1907
+ value = sanitize_postgres_string(value)
1908
+ # Sanitize metadata dict if present
1909
+ elif table_column == "metadata" and isinstance(value, dict):
1910
+ value = sanitize_postgres_strings(value)
1814
1911
  insert_data[table_column] = value
1815
1912
  # Don't include ID in update_fields since it's the primary key
1816
1913
  if table_column != "id":
@@ -1865,8 +1962,22 @@ class PostgresDb(BaseDb):
1865
1962
 
1866
1963
  with self.Session() as sess, sess.begin():
1867
1964
  current_time = int(time.time())
1965
+ eval_data = eval_run.model_dump()
1966
+ # Sanitize string fields in eval_run
1967
+ if eval_data.get("name"):
1968
+ eval_data["name"] = sanitize_postgres_string(eval_data["name"])
1969
+ if eval_data.get("evaluated_component_name"):
1970
+ eval_data["evaluated_component_name"] = sanitize_postgres_string(
1971
+ eval_data["evaluated_component_name"]
1972
+ )
1973
+ # Sanitize nested dicts/JSON fields
1974
+ if eval_data.get("eval_data"):
1975
+ eval_data["eval_data"] = sanitize_postgres_strings(eval_data["eval_data"])
1976
+ if eval_data.get("eval_input"):
1977
+ eval_data["eval_input"] = sanitize_postgres_strings(eval_data["eval_input"])
1978
+
1868
1979
  stmt = postgresql.insert(table).values(
1869
- {"created_at": current_time, "updated_at": current_time, **eval_run.model_dump()}
1980
+ {"created_at": current_time, "updated_at": current_time, **eval_data}
1870
1981
  )
1871
1982
  sess.execute(stmt)
1872
1983
 
@@ -2080,8 +2191,12 @@ class PostgresDb(BaseDb):
2080
2191
  return None
2081
2192
 
2082
2193
  with self.Session() as sess, sess.begin():
2194
+ # Sanitize string field to remove null bytes
2195
+ sanitized_name = sanitize_postgres_string(name)
2083
2196
  stmt = (
2084
- table.update().where(table.c.run_id == eval_run_id).values(name=name, updated_at=int(time.time()))
2197
+ table.update()
2198
+ .where(table.c.run_id == eval_run_id)
2199
+ .values(name=sanitized_name, updated_at=int(time.time()))
2085
2200
  )
2086
2201
  sess.execute(stmt)
2087
2202
 
@@ -2278,15 +2393,25 @@ class PostgresDb(BaseDb):
2278
2393
 
2279
2394
  # Serialize content, categories, and notes into a JSON dict for DB storage
2280
2395
  content_dict = serialize_cultural_knowledge(cultural_knowledge)
2396
+ # Sanitize content_dict to remove null bytes from nested strings
2397
+ if content_dict:
2398
+ content_dict = cast(Dict[str, Any], sanitize_postgres_strings(content_dict))
2399
+
2400
+ # Sanitize string fields to remove null bytes (PostgreSQL doesn't allow them)
2401
+ sanitized_name = sanitize_postgres_string(cultural_knowledge.name)
2402
+ sanitized_summary = sanitize_postgres_string(cultural_knowledge.summary)
2403
+ sanitized_input = sanitize_postgres_string(cultural_knowledge.input)
2281
2404
 
2282
2405
  with self.Session() as sess, sess.begin():
2283
2406
  stmt = postgresql.insert(table).values(
2284
2407
  id=cultural_knowledge.id,
2285
- name=cultural_knowledge.name,
2286
- summary=cultural_knowledge.summary,
2408
+ name=sanitized_name,
2409
+ summary=sanitized_summary,
2287
2410
  content=content_dict if content_dict else None,
2288
- metadata=cultural_knowledge.metadata,
2289
- input=cultural_knowledge.input,
2411
+ metadata=sanitize_postgres_strings(cultural_knowledge.metadata)
2412
+ if cultural_knowledge.metadata
2413
+ else None,
2414
+ input=sanitized_input,
2290
2415
  created_at=cultural_knowledge.created_at,
2291
2416
  updated_at=int(time.time()),
2292
2417
  agent_id=cultural_knowledge.agent_id,
@@ -2295,11 +2420,13 @@ class PostgresDb(BaseDb):
2295
2420
  stmt = stmt.on_conflict_do_update( # type: ignore
2296
2421
  index_elements=["id"],
2297
2422
  set_=dict(
2298
- name=cultural_knowledge.name,
2299
- summary=cultural_knowledge.summary,
2423
+ name=sanitized_name,
2424
+ summary=sanitized_summary,
2300
2425
  content=content_dict if content_dict else None,
2301
- metadata=cultural_knowledge.metadata,
2302
- input=cultural_knowledge.input,
2426
+ metadata=sanitize_postgres_strings(cultural_knowledge.metadata)
2427
+ if cultural_knowledge.metadata
2428
+ else None,
2429
+ input=sanitized_input,
2303
2430
  updated_at=int(time.time()),
2304
2431
  agent_id=cultural_knowledge.agent_id,
2305
2432
  team_id=cultural_knowledge.team_id,
@@ -2458,6 +2585,13 @@ class PostgresDb(BaseDb):
2458
2585
  trace_dict = trace.to_dict()
2459
2586
  trace_dict.pop("total_spans", None)
2460
2587
  trace_dict.pop("error_count", None)
2588
+ # Sanitize string fields and nested JSON structures
2589
+ if trace_dict.get("name"):
2590
+ trace_dict["name"] = sanitize_postgres_string(trace_dict["name"])
2591
+ if trace_dict.get("status"):
2592
+ trace_dict["status"] = sanitize_postgres_string(trace_dict["status"])
2593
+ # Sanitize any nested dict/JSON fields
2594
+ trace_dict = cast(Dict[str, Any], sanitize_postgres_strings(trace_dict))
2461
2595
 
2462
2596
  with self.Session() as sess, sess.begin():
2463
2597
  # Use upsert to handle concurrent inserts atomically
@@ -2781,7 +2915,15 @@ class PostgresDb(BaseDb):
2781
2915
  return
2782
2916
 
2783
2917
  with self.Session() as sess, sess.begin():
2784
- stmt = postgresql.insert(table).values(span.to_dict())
2918
+ span_dict = span.to_dict()
2919
+ # Sanitize string fields and nested JSON structures
2920
+ if span_dict.get("name"):
2921
+ span_dict["name"] = sanitize_postgres_string(span_dict["name"])
2922
+ if span_dict.get("status_code"):
2923
+ span_dict["status_code"] = sanitize_postgres_string(span_dict["status_code"])
2924
+ # Sanitize any nested dict/JSON fields
2925
+ span_dict = cast(Dict[str, Any], sanitize_postgres_strings(span_dict))
2926
+ stmt = postgresql.insert(table).values(span_dict)
2785
2927
  sess.execute(stmt)
2786
2928
 
2787
2929
  except Exception as e:
@@ -2803,7 +2945,15 @@ class PostgresDb(BaseDb):
2803
2945
 
2804
2946
  with self.Session() as sess, sess.begin():
2805
2947
  for span in spans:
2806
- stmt = postgresql.insert(table).values(span.to_dict())
2948
+ span_dict = span.to_dict()
2949
+ # Sanitize string fields and nested JSON structures
2950
+ if span_dict.get("name"):
2951
+ span_dict["name"] = sanitize_postgres_string(span_dict["name"])
2952
+ if span_dict.get("status_code"):
2953
+ span_dict["status_code"] = sanitize_postgres_string(span_dict["status_code"])
2954
+ # Sanitize any nested dict/JSON fields
2955
+ span_dict = sanitize_postgres_strings(span_dict)
2956
+ stmt = postgresql.insert(table).values(span_dict)
2807
2957
  sess.execute(stmt)
2808
2958
 
2809
2959
  except Exception as e:
agno/db/postgres/utils.py CHANGED
@@ -15,6 +15,7 @@ from agno.utils.log import log_debug, log_error, log_warning
15
15
  try:
16
16
  from sqlalchemy import Table, func
17
17
  from sqlalchemy.dialects import postgresql
18
+ from sqlalchemy.exc import NoSuchTableError
18
19
  from sqlalchemy.inspection import inspect
19
20
  from sqlalchemy.orm import Session
20
21
  from sqlalchemy.sql.expression import text
@@ -183,6 +184,9 @@ async def ais_valid_table(db_engine: AsyncEngine, table_name: str, table_type: s
183
184
  return False
184
185
 
185
186
  return True
187
+ except NoSuchTableError:
188
+ log_error(f"Table {db_schema}.{table_name} does not exist")
189
+ return False
186
190
  except Exception as e:
187
191
  log_error(f"Error validating table schema for {db_schema}.{table_name}: {e}")
188
192
  return False
@@ -317,8 +321,8 @@ def calculate_date_metrics(date_to_process: date, sessions_data: dict) -> dict:
317
321
  model_counts[f"{model_id}:{model_provider}"] = (
318
322
  model_counts.get(f"{model_id}:{model_provider}", 0) + 1
319
323
  )
320
-
321
- session_metrics = session.get("session_data", {}).get("session_metrics", {})
324
+ session_data = session.get("session_data", {}) or {}
325
+ session_metrics = session_data.get("session_metrics", {}) or {}
322
326
  for field in token_metrics:
323
327
  token_metrics[field] += session_metrics.get(field, 0)
324
328
 
@@ -518,8 +518,11 @@ class AgentAsJudgeEval(BaseEval):
518
518
  if self.print_summary or print_summary:
519
519
  result.print_summary(console)
520
520
 
521
+ # evaluator model info
522
+ model_id = self.model.id if self.model is not None else None
523
+ model_provider = self.model.provider if self.model is not None else None
521
524
  # Log to DB
522
- self._log_eval_to_db(run_id=run_id, result=result)
525
+ self._log_eval_to_db(run_id=run_id, result=result, model_id=model_id, model_provider=model_provider)
523
526
 
524
527
  if self.telemetry:
525
528
  from agno.api.evals import EvalRunCreate, create_eval_run_telemetry
@@ -613,8 +616,11 @@ class AgentAsJudgeEval(BaseEval):
613
616
  if self.print_summary or print_summary:
614
617
  result.print_summary(console)
615
618
 
619
+ # evaluator model info
620
+ model_id = self.model.id if self.model is not None else None
621
+ model_provider = self.model.provider if self.model is not None else None
616
622
  # Log to DB
617
- await self._async_log_eval_to_db(run_id=run_id, result=result)
623
+ await self._async_log_eval_to_db(run_id=run_id, result=result, model_id=model_id, model_provider=model_provider)
618
624
 
619
625
  if self.telemetry:
620
626
  from agno.api.evals import EvalRunCreate, async_create_eval_run_telemetry
@@ -681,8 +687,11 @@ class AgentAsJudgeEval(BaseEval):
681
687
  if self.print_summary or print_summary:
682
688
  result.print_summary(console)
683
689
 
690
+ # evaluator model info
691
+ model_id = self.model.id if self.model is not None else None
692
+ model_provider = self.model.provider if self.model is not None else None
684
693
  # Log to DB
685
- self._log_eval_to_db(run_id=run_id, result=result)
694
+ self._log_eval_to_db(run_id=run_id, result=result, model_id=model_id, model_provider=model_provider)
686
695
 
687
696
  if self.telemetry:
688
697
  from agno.api.evals import EvalRunCreate, create_eval_run_telemetry
@@ -748,8 +757,11 @@ class AgentAsJudgeEval(BaseEval):
748
757
  if self.print_summary or print_summary:
749
758
  result.print_summary(console)
750
759
 
760
+ # evaluator model info
761
+ model_id = self.model.id if self.model is not None else None
762
+ model_provider = self.model.provider if self.model is not None else None
751
763
  # Log to DB
752
- await self._async_log_eval_to_db(run_id=run_id, result=result)
764
+ await self._async_log_eval_to_db(run_id=run_id, result=result, model_id=model_id, model_provider=model_provider)
753
765
 
754
766
  if self.telemetry:
755
767
  from agno.api.evals import EvalRunCreate, async_create_eval_run_telemetry
@@ -801,15 +813,14 @@ class AgentAsJudgeEval(BaseEval):
801
813
  if isinstance(run_output, RunOutput):
802
814
  agent_id = run_output.agent_id
803
815
  team_id = None
804
- model_id = run_output.model
805
- model_provider = run_output.model_provider
806
816
  elif isinstance(run_output, TeamRunOutput):
807
817
  agent_id = None
808
818
  team_id = run_output.team_id
809
- model_id = run_output.model
810
- model_provider = run_output.model_provider
811
819
 
812
- # Log to DB if we have a valid result (use run_id from result)
820
+ # evaluator model info
821
+ model_id = self.model.id if self.model is not None else None
822
+ model_provider = self.model.provider if self.model is not None else None
823
+ # Log to DB if we have a valid result
813
824
  if result:
814
825
  self._log_eval_to_db(
815
826
  run_id=result.run_id,
@@ -841,15 +852,14 @@ class AgentAsJudgeEval(BaseEval):
841
852
  if isinstance(run_output, RunOutput):
842
853
  agent_id = run_output.agent_id
843
854
  team_id = None
844
- model_id = run_output.model
845
- model_provider = run_output.model_provider
846
855
  elif isinstance(run_output, TeamRunOutput):
847
856
  agent_id = None
848
857
  team_id = run_output.team_id
849
- model_id = run_output.model
850
- model_provider = run_output.model_provider
851
858
 
852
- # Log to DB if we have a valid result (use run_id from result)
859
+ # evaluator model info
860
+ model_id = self.model.id if self.model is not None else None
861
+ model_provider = self.model.provider if self.model is not None else None
862
+ # Log to DB if we have a valid result
853
863
  if result:
854
864
  await self._async_log_eval_to_db(
855
865
  run_id=result.run_id,
@@ -16,8 +16,9 @@ class DocumentChunking(ChunkingStrategy):
16
16
  if len(document.content) <= self.chunk_size:
17
17
  return [document]
18
18
 
19
- # Split on double newlines first (paragraphs)
20
- paragraphs = self.clean_text(document.content).split("\n\n")
19
+ # Split on double newlines first (paragraphs), then clean each paragraph
20
+ raw_paragraphs = document.content.split("\n\n")
21
+ paragraphs = [self.clean_text(para) for para in raw_paragraphs]
21
22
  chunks: List[Document] = []
22
23
  current_chunk = []
23
24
  current_size = 0
@@ -35,7 +35,8 @@ class MarkdownChunking(ChunkingStrategy):
35
35
  elements = partition_md(filename=temp_file_path)
36
36
 
37
37
  if not elements:
38
- return self.clean_text(content).split("\n\n")
38
+ raw_paragraphs = content.split("\n\n")
39
+ return [self.clean_text(para) for para in raw_paragraphs]
39
40
 
40
41
  # Chunk by title with some default values
41
42
  chunked_elements = chunk_by_title(
@@ -57,7 +58,10 @@ class MarkdownChunking(ChunkingStrategy):
57
58
  if chunk_text.strip():
58
59
  text_chunks.append(chunk_text.strip())
59
60
 
60
- return text_chunks if text_chunks else self.clean_text(content).split("\n\n")
61
+ if text_chunks:
62
+ return text_chunks
63
+ raw_paragraphs = content.split("\n\n")
64
+ return [self.clean_text(para) for para in raw_paragraphs]
61
65
 
62
66
  # Always clean up the temporary file
63
67
  finally:
@@ -65,7 +69,8 @@ class MarkdownChunking(ChunkingStrategy):
65
69
 
66
70
  # Fallback to simple paragraph splitting if the markdown chunking fails
67
71
  except Exception:
68
- return self.clean_text(content).split("\n\n")
72
+ raw_paragraphs = content.split("\n\n")
73
+ return [self.clean_text(para) for para in raw_paragraphs]
69
74
 
70
75
  def chunk(self, document: Document) -> List[Document]:
71
76
  """Split markdown document into chunks based on markdown structure"""
@@ -31,7 +31,7 @@ class RecursiveChunking(ChunkingStrategy):
31
31
  start = 0
32
32
  chunk_meta_data = document.meta_data
33
33
  chunk_number = 1
34
- content = self.clean_text(document.content)
34
+ content = document.content
35
35
 
36
36
  while start < len(content):
37
37
  end = min(start + self.chunk_size, len(content))
@@ -43,7 +43,7 @@ class RecursiveChunking(ChunkingStrategy):
43
43
  end = start + last_sep + 1
44
44
  break
45
45
 
46
- chunk = content[start:end]
46
+ chunk = self.clean_text(content[start:end])
47
47
  meta_data = chunk_meta_data.copy()
48
48
  meta_data["chunk"] = chunk_number
49
49
  chunk_id = None
@@ -37,7 +37,7 @@ class MistralEmbedder(Embedder):
37
37
  "api_key": self.api_key,
38
38
  "endpoint": self.endpoint,
39
39
  "max_retries": self.max_retries,
40
- "timeout": self.timeout,
40
+ "timeout_ms": self.timeout * 1000 if self.timeout else None,
41
41
  }
42
42
  _client_params = {k: v for k, v in _client_params.items() if v is not None}
43
43
 
@@ -248,7 +248,7 @@ class OpenAIChat(Model):
248
248
  # Add tools
249
249
  if tools is not None and len(tools) > 0:
250
250
  # Remove unsupported fields for OpenAILike models
251
- if self.provider in ["AIMLAPI", "Fireworks", "Nvidia"]:
251
+ if self.provider in ["AIMLAPI", "Fireworks", "Nvidia", "VLLM"]:
252
252
  for tool in tools:
253
253
  if tool.get("type") == "function":
254
254
  if tool["function"].get("requires_confirmation") is not None: