agno 0.1.2__py3-none-any.whl → 2.3.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (723) hide show
  1. agno/__init__.py +8 -0
  2. agno/agent/__init__.py +44 -5
  3. agno/agent/agent.py +10531 -2975
  4. agno/api/agent.py +14 -53
  5. agno/api/api.py +7 -46
  6. agno/api/evals.py +22 -0
  7. agno/api/os.py +17 -0
  8. agno/api/routes.py +6 -25
  9. agno/api/schemas/__init__.py +9 -0
  10. agno/api/schemas/agent.py +6 -9
  11. agno/api/schemas/evals.py +16 -0
  12. agno/api/schemas/os.py +14 -0
  13. agno/api/schemas/team.py +10 -10
  14. agno/api/schemas/utils.py +21 -0
  15. agno/api/schemas/workflows.py +16 -0
  16. agno/api/settings.py +53 -0
  17. agno/api/team.py +22 -26
  18. agno/api/workflow.py +28 -0
  19. agno/cloud/aws/base.py +214 -0
  20. agno/cloud/aws/s3/__init__.py +2 -0
  21. agno/cloud/aws/s3/api_client.py +43 -0
  22. agno/cloud/aws/s3/bucket.py +195 -0
  23. agno/cloud/aws/s3/object.py +57 -0
  24. agno/compression/__init__.py +3 -0
  25. agno/compression/manager.py +247 -0
  26. agno/culture/__init__.py +3 -0
  27. agno/culture/manager.py +956 -0
  28. agno/db/__init__.py +24 -0
  29. agno/db/async_postgres/__init__.py +3 -0
  30. agno/db/base.py +946 -0
  31. agno/db/dynamo/__init__.py +3 -0
  32. agno/db/dynamo/dynamo.py +2781 -0
  33. agno/db/dynamo/schemas.py +442 -0
  34. agno/db/dynamo/utils.py +743 -0
  35. agno/db/firestore/__init__.py +3 -0
  36. agno/db/firestore/firestore.py +2379 -0
  37. agno/db/firestore/schemas.py +181 -0
  38. agno/db/firestore/utils.py +376 -0
  39. agno/db/gcs_json/__init__.py +3 -0
  40. agno/db/gcs_json/gcs_json_db.py +1791 -0
  41. agno/db/gcs_json/utils.py +228 -0
  42. agno/db/in_memory/__init__.py +3 -0
  43. agno/db/in_memory/in_memory_db.py +1312 -0
  44. agno/db/in_memory/utils.py +230 -0
  45. agno/db/json/__init__.py +3 -0
  46. agno/db/json/json_db.py +1777 -0
  47. agno/db/json/utils.py +230 -0
  48. agno/db/migrations/manager.py +199 -0
  49. agno/db/migrations/v1_to_v2.py +635 -0
  50. agno/db/migrations/versions/v2_3_0.py +938 -0
  51. agno/db/mongo/__init__.py +17 -0
  52. agno/db/mongo/async_mongo.py +2760 -0
  53. agno/db/mongo/mongo.py +2597 -0
  54. agno/db/mongo/schemas.py +119 -0
  55. agno/db/mongo/utils.py +276 -0
  56. agno/db/mysql/__init__.py +4 -0
  57. agno/db/mysql/async_mysql.py +2912 -0
  58. agno/db/mysql/mysql.py +2923 -0
  59. agno/db/mysql/schemas.py +186 -0
  60. agno/db/mysql/utils.py +488 -0
  61. agno/db/postgres/__init__.py +4 -0
  62. agno/db/postgres/async_postgres.py +2579 -0
  63. agno/db/postgres/postgres.py +2870 -0
  64. agno/db/postgres/schemas.py +187 -0
  65. agno/db/postgres/utils.py +442 -0
  66. agno/db/redis/__init__.py +3 -0
  67. agno/db/redis/redis.py +2141 -0
  68. agno/db/redis/schemas.py +159 -0
  69. agno/db/redis/utils.py +346 -0
  70. agno/db/schemas/__init__.py +4 -0
  71. agno/db/schemas/culture.py +120 -0
  72. agno/db/schemas/evals.py +34 -0
  73. agno/db/schemas/knowledge.py +40 -0
  74. agno/db/schemas/memory.py +61 -0
  75. agno/db/singlestore/__init__.py +3 -0
  76. agno/db/singlestore/schemas.py +179 -0
  77. agno/db/singlestore/singlestore.py +2877 -0
  78. agno/db/singlestore/utils.py +384 -0
  79. agno/db/sqlite/__init__.py +4 -0
  80. agno/db/sqlite/async_sqlite.py +2911 -0
  81. agno/db/sqlite/schemas.py +181 -0
  82. agno/db/sqlite/sqlite.py +2908 -0
  83. agno/db/sqlite/utils.py +429 -0
  84. agno/db/surrealdb/__init__.py +3 -0
  85. agno/db/surrealdb/metrics.py +292 -0
  86. agno/db/surrealdb/models.py +334 -0
  87. agno/db/surrealdb/queries.py +71 -0
  88. agno/db/surrealdb/surrealdb.py +1908 -0
  89. agno/db/surrealdb/utils.py +147 -0
  90. agno/db/utils.py +118 -0
  91. agno/eval/__init__.py +24 -0
  92. agno/eval/accuracy.py +666 -276
  93. agno/eval/agent_as_judge.py +861 -0
  94. agno/eval/base.py +29 -0
  95. agno/eval/performance.py +779 -0
  96. agno/eval/reliability.py +241 -62
  97. agno/eval/utils.py +120 -0
  98. agno/exceptions.py +143 -1
  99. agno/filters.py +354 -0
  100. agno/guardrails/__init__.py +6 -0
  101. agno/guardrails/base.py +19 -0
  102. agno/guardrails/openai.py +144 -0
  103. agno/guardrails/pii.py +94 -0
  104. agno/guardrails/prompt_injection.py +52 -0
  105. agno/hooks/__init__.py +3 -0
  106. agno/hooks/decorator.py +164 -0
  107. agno/integrations/discord/__init__.py +3 -0
  108. agno/integrations/discord/client.py +203 -0
  109. agno/knowledge/__init__.py +5 -1
  110. agno/{document → knowledge}/chunking/agentic.py +22 -14
  111. agno/{document → knowledge}/chunking/document.py +2 -2
  112. agno/{document → knowledge}/chunking/fixed.py +7 -6
  113. agno/knowledge/chunking/markdown.py +151 -0
  114. agno/{document → knowledge}/chunking/recursive.py +15 -3
  115. agno/knowledge/chunking/row.py +39 -0
  116. agno/knowledge/chunking/semantic.py +91 -0
  117. agno/knowledge/chunking/strategy.py +165 -0
  118. agno/knowledge/content.py +74 -0
  119. agno/knowledge/document/__init__.py +5 -0
  120. agno/{document → knowledge/document}/base.py +12 -2
  121. agno/knowledge/embedder/__init__.py +5 -0
  122. agno/knowledge/embedder/aws_bedrock.py +343 -0
  123. agno/knowledge/embedder/azure_openai.py +210 -0
  124. agno/{embedder → knowledge/embedder}/base.py +8 -0
  125. agno/knowledge/embedder/cohere.py +323 -0
  126. agno/knowledge/embedder/fastembed.py +62 -0
  127. agno/{embedder → knowledge/embedder}/fireworks.py +1 -1
  128. agno/knowledge/embedder/google.py +258 -0
  129. agno/knowledge/embedder/huggingface.py +94 -0
  130. agno/knowledge/embedder/jina.py +182 -0
  131. agno/knowledge/embedder/langdb.py +22 -0
  132. agno/knowledge/embedder/mistral.py +206 -0
  133. agno/knowledge/embedder/nebius.py +13 -0
  134. agno/knowledge/embedder/ollama.py +154 -0
  135. agno/knowledge/embedder/openai.py +195 -0
  136. agno/knowledge/embedder/sentence_transformer.py +63 -0
  137. agno/{embedder → knowledge/embedder}/together.py +1 -1
  138. agno/knowledge/embedder/vllm.py +262 -0
  139. agno/knowledge/embedder/voyageai.py +165 -0
  140. agno/knowledge/knowledge.py +3006 -0
  141. agno/knowledge/reader/__init__.py +7 -0
  142. agno/knowledge/reader/arxiv_reader.py +81 -0
  143. agno/knowledge/reader/base.py +95 -0
  144. agno/knowledge/reader/csv_reader.py +164 -0
  145. agno/knowledge/reader/docx_reader.py +82 -0
  146. agno/knowledge/reader/field_labeled_csv_reader.py +290 -0
  147. agno/knowledge/reader/firecrawl_reader.py +201 -0
  148. agno/knowledge/reader/json_reader.py +88 -0
  149. agno/knowledge/reader/markdown_reader.py +137 -0
  150. agno/knowledge/reader/pdf_reader.py +431 -0
  151. agno/knowledge/reader/pptx_reader.py +101 -0
  152. agno/knowledge/reader/reader_factory.py +313 -0
  153. agno/knowledge/reader/s3_reader.py +89 -0
  154. agno/knowledge/reader/tavily_reader.py +193 -0
  155. agno/knowledge/reader/text_reader.py +127 -0
  156. agno/knowledge/reader/web_search_reader.py +325 -0
  157. agno/knowledge/reader/website_reader.py +455 -0
  158. agno/knowledge/reader/wikipedia_reader.py +91 -0
  159. agno/knowledge/reader/youtube_reader.py +78 -0
  160. agno/knowledge/remote_content/remote_content.py +88 -0
  161. agno/knowledge/reranker/__init__.py +3 -0
  162. agno/{reranker → knowledge/reranker}/base.py +1 -1
  163. agno/{reranker → knowledge/reranker}/cohere.py +2 -2
  164. agno/knowledge/reranker/infinity.py +195 -0
  165. agno/knowledge/reranker/sentence_transformer.py +54 -0
  166. agno/knowledge/types.py +39 -0
  167. agno/knowledge/utils.py +234 -0
  168. agno/media.py +439 -95
  169. agno/memory/__init__.py +16 -3
  170. agno/memory/manager.py +1474 -123
  171. agno/memory/strategies/__init__.py +15 -0
  172. agno/memory/strategies/base.py +66 -0
  173. agno/memory/strategies/summarize.py +196 -0
  174. agno/memory/strategies/types.py +37 -0
  175. agno/models/aimlapi/__init__.py +5 -0
  176. agno/models/aimlapi/aimlapi.py +62 -0
  177. agno/models/anthropic/__init__.py +4 -0
  178. agno/models/anthropic/claude.py +960 -496
  179. agno/models/aws/__init__.py +15 -0
  180. agno/models/aws/bedrock.py +686 -451
  181. agno/models/aws/claude.py +190 -183
  182. agno/models/azure/__init__.py +18 -1
  183. agno/models/azure/ai_foundry.py +489 -0
  184. agno/models/azure/openai_chat.py +89 -40
  185. agno/models/base.py +2477 -550
  186. agno/models/cerebras/__init__.py +12 -0
  187. agno/models/cerebras/cerebras.py +565 -0
  188. agno/models/cerebras/cerebras_openai.py +131 -0
  189. agno/models/cohere/__init__.py +4 -0
  190. agno/models/cohere/chat.py +306 -492
  191. agno/models/cometapi/__init__.py +5 -0
  192. agno/models/cometapi/cometapi.py +74 -0
  193. agno/models/dashscope/__init__.py +5 -0
  194. agno/models/dashscope/dashscope.py +90 -0
  195. agno/models/deepinfra/__init__.py +5 -0
  196. agno/models/deepinfra/deepinfra.py +45 -0
  197. agno/models/deepseek/__init__.py +4 -0
  198. agno/models/deepseek/deepseek.py +110 -9
  199. agno/models/fireworks/__init__.py +4 -0
  200. agno/models/fireworks/fireworks.py +19 -22
  201. agno/models/google/__init__.py +3 -7
  202. agno/models/google/gemini.py +1717 -662
  203. agno/models/google/utils.py +22 -0
  204. agno/models/groq/__init__.py +4 -0
  205. agno/models/groq/groq.py +391 -666
  206. agno/models/huggingface/__init__.py +4 -0
  207. agno/models/huggingface/huggingface.py +266 -538
  208. agno/models/ibm/__init__.py +5 -0
  209. agno/models/ibm/watsonx.py +432 -0
  210. agno/models/internlm/__init__.py +3 -0
  211. agno/models/internlm/internlm.py +20 -3
  212. agno/models/langdb/__init__.py +1 -0
  213. agno/models/langdb/langdb.py +60 -0
  214. agno/models/litellm/__init__.py +14 -0
  215. agno/models/litellm/chat.py +503 -0
  216. agno/models/litellm/litellm_openai.py +42 -0
  217. agno/models/llama_cpp/__init__.py +5 -0
  218. agno/models/llama_cpp/llama_cpp.py +22 -0
  219. agno/models/lmstudio/__init__.py +5 -0
  220. agno/models/lmstudio/lmstudio.py +25 -0
  221. agno/models/message.py +361 -39
  222. agno/models/meta/__init__.py +12 -0
  223. agno/models/meta/llama.py +502 -0
  224. agno/models/meta/llama_openai.py +79 -0
  225. agno/models/metrics.py +120 -0
  226. agno/models/mistral/__init__.py +4 -0
  227. agno/models/mistral/mistral.py +293 -393
  228. agno/models/nebius/__init__.py +3 -0
  229. agno/models/nebius/nebius.py +53 -0
  230. agno/models/nexus/__init__.py +3 -0
  231. agno/models/nexus/nexus.py +22 -0
  232. agno/models/nvidia/__init__.py +4 -0
  233. agno/models/nvidia/nvidia.py +22 -3
  234. agno/models/ollama/__init__.py +4 -2
  235. agno/models/ollama/chat.py +257 -492
  236. agno/models/openai/__init__.py +7 -0
  237. agno/models/openai/chat.py +725 -770
  238. agno/models/openai/like.py +16 -2
  239. agno/models/openai/responses.py +1121 -0
  240. agno/models/openrouter/__init__.py +4 -0
  241. agno/models/openrouter/openrouter.py +62 -5
  242. agno/models/perplexity/__init__.py +5 -0
  243. agno/models/perplexity/perplexity.py +203 -0
  244. agno/models/portkey/__init__.py +3 -0
  245. agno/models/portkey/portkey.py +82 -0
  246. agno/models/requesty/__init__.py +5 -0
  247. agno/models/requesty/requesty.py +69 -0
  248. agno/models/response.py +177 -7
  249. agno/models/sambanova/__init__.py +4 -0
  250. agno/models/sambanova/sambanova.py +23 -4
  251. agno/models/siliconflow/__init__.py +5 -0
  252. agno/models/siliconflow/siliconflow.py +42 -0
  253. agno/models/together/__init__.py +4 -0
  254. agno/models/together/together.py +21 -164
  255. agno/models/utils.py +266 -0
  256. agno/models/vercel/__init__.py +3 -0
  257. agno/models/vercel/v0.py +43 -0
  258. agno/models/vertexai/__init__.py +0 -1
  259. agno/models/vertexai/claude.py +190 -0
  260. agno/models/vllm/__init__.py +3 -0
  261. agno/models/vllm/vllm.py +83 -0
  262. agno/models/xai/__init__.py +2 -0
  263. agno/models/xai/xai.py +111 -7
  264. agno/os/__init__.py +3 -0
  265. agno/os/app.py +1027 -0
  266. agno/os/auth.py +244 -0
  267. agno/os/config.py +126 -0
  268. agno/os/interfaces/__init__.py +1 -0
  269. agno/os/interfaces/a2a/__init__.py +3 -0
  270. agno/os/interfaces/a2a/a2a.py +42 -0
  271. agno/os/interfaces/a2a/router.py +249 -0
  272. agno/os/interfaces/a2a/utils.py +924 -0
  273. agno/os/interfaces/agui/__init__.py +3 -0
  274. agno/os/interfaces/agui/agui.py +47 -0
  275. agno/os/interfaces/agui/router.py +147 -0
  276. agno/os/interfaces/agui/utils.py +574 -0
  277. agno/os/interfaces/base.py +25 -0
  278. agno/os/interfaces/slack/__init__.py +3 -0
  279. agno/os/interfaces/slack/router.py +148 -0
  280. agno/os/interfaces/slack/security.py +30 -0
  281. agno/os/interfaces/slack/slack.py +47 -0
  282. agno/os/interfaces/whatsapp/__init__.py +3 -0
  283. agno/os/interfaces/whatsapp/router.py +210 -0
  284. agno/os/interfaces/whatsapp/security.py +55 -0
  285. agno/os/interfaces/whatsapp/whatsapp.py +36 -0
  286. agno/os/mcp.py +293 -0
  287. agno/os/middleware/__init__.py +9 -0
  288. agno/os/middleware/jwt.py +797 -0
  289. agno/os/router.py +258 -0
  290. agno/os/routers/__init__.py +3 -0
  291. agno/os/routers/agents/__init__.py +3 -0
  292. agno/os/routers/agents/router.py +599 -0
  293. agno/os/routers/agents/schema.py +261 -0
  294. agno/os/routers/evals/__init__.py +3 -0
  295. agno/os/routers/evals/evals.py +450 -0
  296. agno/os/routers/evals/schemas.py +174 -0
  297. agno/os/routers/evals/utils.py +231 -0
  298. agno/os/routers/health.py +31 -0
  299. agno/os/routers/home.py +52 -0
  300. agno/os/routers/knowledge/__init__.py +3 -0
  301. agno/os/routers/knowledge/knowledge.py +1008 -0
  302. agno/os/routers/knowledge/schemas.py +178 -0
  303. agno/os/routers/memory/__init__.py +3 -0
  304. agno/os/routers/memory/memory.py +661 -0
  305. agno/os/routers/memory/schemas.py +88 -0
  306. agno/os/routers/metrics/__init__.py +3 -0
  307. agno/os/routers/metrics/metrics.py +190 -0
  308. agno/os/routers/metrics/schemas.py +47 -0
  309. agno/os/routers/session/__init__.py +3 -0
  310. agno/os/routers/session/session.py +997 -0
  311. agno/os/routers/teams/__init__.py +3 -0
  312. agno/os/routers/teams/router.py +512 -0
  313. agno/os/routers/teams/schema.py +257 -0
  314. agno/os/routers/traces/__init__.py +3 -0
  315. agno/os/routers/traces/schemas.py +414 -0
  316. agno/os/routers/traces/traces.py +499 -0
  317. agno/os/routers/workflows/__init__.py +3 -0
  318. agno/os/routers/workflows/router.py +624 -0
  319. agno/os/routers/workflows/schema.py +75 -0
  320. agno/os/schema.py +534 -0
  321. agno/os/scopes.py +469 -0
  322. agno/{playground → os}/settings.py +7 -15
  323. agno/os/utils.py +973 -0
  324. agno/reasoning/anthropic.py +80 -0
  325. agno/reasoning/azure_ai_foundry.py +67 -0
  326. agno/reasoning/deepseek.py +63 -0
  327. agno/reasoning/default.py +97 -0
  328. agno/reasoning/gemini.py +73 -0
  329. agno/reasoning/groq.py +71 -0
  330. agno/reasoning/helpers.py +24 -1
  331. agno/reasoning/ollama.py +67 -0
  332. agno/reasoning/openai.py +86 -0
  333. agno/reasoning/step.py +2 -1
  334. agno/reasoning/vertexai.py +76 -0
  335. agno/run/__init__.py +6 -0
  336. agno/run/agent.py +822 -0
  337. agno/run/base.py +247 -0
  338. agno/run/cancel.py +81 -0
  339. agno/run/requirement.py +181 -0
  340. agno/run/team.py +767 -0
  341. agno/run/workflow.py +708 -0
  342. agno/session/__init__.py +10 -0
  343. agno/session/agent.py +260 -0
  344. agno/session/summary.py +265 -0
  345. agno/session/team.py +342 -0
  346. agno/session/workflow.py +501 -0
  347. agno/table.py +10 -0
  348. agno/team/__init__.py +37 -0
  349. agno/team/team.py +9536 -0
  350. agno/tools/__init__.py +7 -0
  351. agno/tools/agentql.py +120 -0
  352. agno/tools/airflow.py +22 -12
  353. agno/tools/api.py +122 -0
  354. agno/tools/apify.py +276 -83
  355. agno/tools/{arxiv_toolkit.py → arxiv.py} +20 -12
  356. agno/tools/aws_lambda.py +28 -7
  357. agno/tools/aws_ses.py +66 -0
  358. agno/tools/baidusearch.py +11 -4
  359. agno/tools/bitbucket.py +292 -0
  360. agno/tools/brandfetch.py +213 -0
  361. agno/tools/bravesearch.py +106 -0
  362. agno/tools/brightdata.py +367 -0
  363. agno/tools/browserbase.py +209 -0
  364. agno/tools/calcom.py +32 -23
  365. agno/tools/calculator.py +24 -37
  366. agno/tools/cartesia.py +187 -0
  367. agno/tools/{clickup_tool.py → clickup.py} +17 -28
  368. agno/tools/confluence.py +91 -26
  369. agno/tools/crawl4ai.py +139 -43
  370. agno/tools/csv_toolkit.py +28 -22
  371. agno/tools/dalle.py +36 -22
  372. agno/tools/daytona.py +475 -0
  373. agno/tools/decorator.py +169 -14
  374. agno/tools/desi_vocal.py +23 -11
  375. agno/tools/discord.py +32 -29
  376. agno/tools/docker.py +716 -0
  377. agno/tools/duckdb.py +76 -81
  378. agno/tools/duckduckgo.py +43 -40
  379. agno/tools/e2b.py +703 -0
  380. agno/tools/eleven_labs.py +65 -54
  381. agno/tools/email.py +13 -5
  382. agno/tools/evm.py +129 -0
  383. agno/tools/exa.py +324 -42
  384. agno/tools/fal.py +39 -35
  385. agno/tools/file.py +196 -30
  386. agno/tools/file_generation.py +356 -0
  387. agno/tools/financial_datasets.py +288 -0
  388. agno/tools/firecrawl.py +108 -33
  389. agno/tools/function.py +960 -122
  390. agno/tools/giphy.py +34 -12
  391. agno/tools/github.py +1294 -97
  392. agno/tools/gmail.py +922 -0
  393. agno/tools/google_bigquery.py +117 -0
  394. agno/tools/google_drive.py +271 -0
  395. agno/tools/google_maps.py +253 -0
  396. agno/tools/googlecalendar.py +607 -107
  397. agno/tools/googlesheets.py +377 -0
  398. agno/tools/hackernews.py +20 -12
  399. agno/tools/jina.py +24 -14
  400. agno/tools/jira.py +48 -19
  401. agno/tools/knowledge.py +218 -0
  402. agno/tools/linear.py +82 -43
  403. agno/tools/linkup.py +58 -0
  404. agno/tools/local_file_system.py +15 -7
  405. agno/tools/lumalab.py +41 -26
  406. agno/tools/mcp/__init__.py +10 -0
  407. agno/tools/mcp/mcp.py +331 -0
  408. agno/tools/mcp/multi_mcp.py +347 -0
  409. agno/tools/mcp/params.py +24 -0
  410. agno/tools/mcp_toolbox.py +284 -0
  411. agno/tools/mem0.py +193 -0
  412. agno/tools/memory.py +419 -0
  413. agno/tools/mlx_transcribe.py +11 -9
  414. agno/tools/models/azure_openai.py +190 -0
  415. agno/tools/models/gemini.py +203 -0
  416. agno/tools/models/groq.py +158 -0
  417. agno/tools/models/morph.py +186 -0
  418. agno/tools/models/nebius.py +124 -0
  419. agno/tools/models_labs.py +163 -82
  420. agno/tools/moviepy_video.py +18 -13
  421. agno/tools/nano_banana.py +151 -0
  422. agno/tools/neo4j.py +134 -0
  423. agno/tools/newspaper.py +15 -4
  424. agno/tools/newspaper4k.py +19 -6
  425. agno/tools/notion.py +204 -0
  426. agno/tools/openai.py +181 -17
  427. agno/tools/openbb.py +27 -20
  428. agno/tools/opencv.py +321 -0
  429. agno/tools/openweather.py +233 -0
  430. agno/tools/oxylabs.py +385 -0
  431. agno/tools/pandas.py +25 -15
  432. agno/tools/parallel.py +314 -0
  433. agno/tools/postgres.py +238 -185
  434. agno/tools/pubmed.py +125 -13
  435. agno/tools/python.py +48 -35
  436. agno/tools/reasoning.py +283 -0
  437. agno/tools/reddit.py +207 -29
  438. agno/tools/redshift.py +406 -0
  439. agno/tools/replicate.py +69 -26
  440. agno/tools/resend.py +11 -6
  441. agno/tools/scrapegraph.py +179 -19
  442. agno/tools/searxng.py +23 -31
  443. agno/tools/serpapi.py +15 -10
  444. agno/tools/serper.py +255 -0
  445. agno/tools/shell.py +23 -12
  446. agno/tools/shopify.py +1519 -0
  447. agno/tools/slack.py +56 -14
  448. agno/tools/sleep.py +8 -6
  449. agno/tools/spider.py +35 -11
  450. agno/tools/spotify.py +919 -0
  451. agno/tools/sql.py +34 -19
  452. agno/tools/tavily.py +158 -8
  453. agno/tools/telegram.py +18 -8
  454. agno/tools/todoist.py +218 -0
  455. agno/tools/toolkit.py +134 -9
  456. agno/tools/trafilatura.py +388 -0
  457. agno/tools/trello.py +25 -28
  458. agno/tools/twilio.py +18 -9
  459. agno/tools/user_control_flow.py +78 -0
  460. agno/tools/valyu.py +228 -0
  461. agno/tools/visualization.py +467 -0
  462. agno/tools/webbrowser.py +28 -0
  463. agno/tools/webex.py +76 -0
  464. agno/tools/website.py +23 -19
  465. agno/tools/webtools.py +45 -0
  466. agno/tools/whatsapp.py +286 -0
  467. agno/tools/wikipedia.py +28 -19
  468. agno/tools/workflow.py +285 -0
  469. agno/tools/{twitter.py → x.py} +142 -46
  470. agno/tools/yfinance.py +41 -39
  471. agno/tools/youtube.py +34 -17
  472. agno/tools/zendesk.py +15 -5
  473. agno/tools/zep.py +454 -0
  474. agno/tools/zoom.py +86 -37
  475. agno/tracing/__init__.py +12 -0
  476. agno/tracing/exporter.py +157 -0
  477. agno/tracing/schemas.py +276 -0
  478. agno/tracing/setup.py +111 -0
  479. agno/utils/agent.py +938 -0
  480. agno/utils/audio.py +37 -1
  481. agno/utils/certs.py +27 -0
  482. agno/utils/code_execution.py +11 -0
  483. agno/utils/common.py +103 -20
  484. agno/utils/cryptography.py +22 -0
  485. agno/utils/dttm.py +33 -0
  486. agno/utils/events.py +700 -0
  487. agno/utils/functions.py +107 -37
  488. agno/utils/gemini.py +426 -0
  489. agno/utils/hooks.py +171 -0
  490. agno/utils/http.py +185 -0
  491. agno/utils/json_schema.py +159 -37
  492. agno/utils/knowledge.py +36 -0
  493. agno/utils/location.py +19 -0
  494. agno/utils/log.py +221 -8
  495. agno/utils/mcp.py +214 -0
  496. agno/utils/media.py +335 -14
  497. agno/utils/merge_dict.py +22 -1
  498. agno/utils/message.py +77 -2
  499. agno/utils/models/ai_foundry.py +50 -0
  500. agno/utils/models/claude.py +373 -0
  501. agno/utils/models/cohere.py +94 -0
  502. agno/utils/models/llama.py +85 -0
  503. agno/utils/models/mistral.py +100 -0
  504. agno/utils/models/openai_responses.py +140 -0
  505. agno/utils/models/schema_utils.py +153 -0
  506. agno/utils/models/watsonx.py +41 -0
  507. agno/utils/openai.py +257 -0
  508. agno/utils/pickle.py +1 -1
  509. agno/utils/pprint.py +124 -8
  510. agno/utils/print_response/agent.py +930 -0
  511. agno/utils/print_response/team.py +1914 -0
  512. agno/utils/print_response/workflow.py +1668 -0
  513. agno/utils/prompts.py +111 -0
  514. agno/utils/reasoning.py +108 -0
  515. agno/utils/response.py +163 -0
  516. agno/utils/serialize.py +32 -0
  517. agno/utils/shell.py +4 -4
  518. agno/utils/streamlit.py +487 -0
  519. agno/utils/string.py +204 -51
  520. agno/utils/team.py +139 -0
  521. agno/utils/timer.py +9 -2
  522. agno/utils/tokens.py +657 -0
  523. agno/utils/tools.py +19 -1
  524. agno/utils/whatsapp.py +305 -0
  525. agno/utils/yaml_io.py +3 -3
  526. agno/vectordb/__init__.py +2 -0
  527. agno/vectordb/base.py +87 -9
  528. agno/vectordb/cassandra/__init__.py +5 -1
  529. agno/vectordb/cassandra/cassandra.py +383 -27
  530. agno/vectordb/chroma/__init__.py +4 -0
  531. agno/vectordb/chroma/chromadb.py +748 -83
  532. agno/vectordb/clickhouse/__init__.py +7 -1
  533. agno/vectordb/clickhouse/clickhousedb.py +554 -53
  534. agno/vectordb/couchbase/__init__.py +3 -0
  535. agno/vectordb/couchbase/couchbase.py +1446 -0
  536. agno/vectordb/lancedb/__init__.py +5 -0
  537. agno/vectordb/lancedb/lance_db.py +730 -98
  538. agno/vectordb/langchaindb/__init__.py +5 -0
  539. agno/vectordb/langchaindb/langchaindb.py +163 -0
  540. agno/vectordb/lightrag/__init__.py +5 -0
  541. agno/vectordb/lightrag/lightrag.py +388 -0
  542. agno/vectordb/llamaindex/__init__.py +3 -0
  543. agno/vectordb/llamaindex/llamaindexdb.py +166 -0
  544. agno/vectordb/milvus/__init__.py +3 -0
  545. agno/vectordb/milvus/milvus.py +966 -78
  546. agno/vectordb/mongodb/__init__.py +9 -1
  547. agno/vectordb/mongodb/mongodb.py +1175 -172
  548. agno/vectordb/pgvector/__init__.py +8 -0
  549. agno/vectordb/pgvector/pgvector.py +599 -115
  550. agno/vectordb/pineconedb/__init__.py +5 -1
  551. agno/vectordb/pineconedb/pineconedb.py +406 -43
  552. agno/vectordb/qdrant/__init__.py +4 -0
  553. agno/vectordb/qdrant/qdrant.py +914 -61
  554. agno/vectordb/redis/__init__.py +9 -0
  555. agno/vectordb/redis/redisdb.py +682 -0
  556. agno/vectordb/singlestore/__init__.py +8 -1
  557. agno/vectordb/singlestore/singlestore.py +771 -0
  558. agno/vectordb/surrealdb/__init__.py +3 -0
  559. agno/vectordb/surrealdb/surrealdb.py +663 -0
  560. agno/vectordb/upstashdb/__init__.py +5 -0
  561. agno/vectordb/upstashdb/upstashdb.py +718 -0
  562. agno/vectordb/weaviate/__init__.py +8 -0
  563. agno/vectordb/weaviate/index.py +15 -0
  564. agno/vectordb/weaviate/weaviate.py +1009 -0
  565. agno/workflow/__init__.py +23 -1
  566. agno/workflow/agent.py +299 -0
  567. agno/workflow/condition.py +759 -0
  568. agno/workflow/loop.py +756 -0
  569. agno/workflow/parallel.py +853 -0
  570. agno/workflow/router.py +723 -0
  571. agno/workflow/step.py +1564 -0
  572. agno/workflow/steps.py +613 -0
  573. agno/workflow/types.py +556 -0
  574. agno/workflow/workflow.py +4327 -514
  575. agno-2.3.13.dist-info/METADATA +639 -0
  576. agno-2.3.13.dist-info/RECORD +613 -0
  577. {agno-0.1.2.dist-info → agno-2.3.13.dist-info}/WHEEL +1 -1
  578. agno-2.3.13.dist-info/licenses/LICENSE +201 -0
  579. agno/api/playground.py +0 -91
  580. agno/api/schemas/playground.py +0 -22
  581. agno/api/schemas/user.py +0 -22
  582. agno/api/schemas/workspace.py +0 -46
  583. agno/api/user.py +0 -160
  584. agno/api/workspace.py +0 -151
  585. agno/cli/auth_server.py +0 -118
  586. agno/cli/config.py +0 -275
  587. agno/cli/console.py +0 -88
  588. agno/cli/credentials.py +0 -23
  589. agno/cli/entrypoint.py +0 -571
  590. agno/cli/operator.py +0 -355
  591. agno/cli/settings.py +0 -85
  592. agno/cli/ws/ws_cli.py +0 -817
  593. agno/constants.py +0 -13
  594. agno/document/__init__.py +0 -1
  595. agno/document/chunking/semantic.py +0 -47
  596. agno/document/chunking/strategy.py +0 -31
  597. agno/document/reader/__init__.py +0 -1
  598. agno/document/reader/arxiv_reader.py +0 -41
  599. agno/document/reader/base.py +0 -22
  600. agno/document/reader/csv_reader.py +0 -84
  601. agno/document/reader/docx_reader.py +0 -46
  602. agno/document/reader/firecrawl_reader.py +0 -99
  603. agno/document/reader/json_reader.py +0 -43
  604. agno/document/reader/pdf_reader.py +0 -219
  605. agno/document/reader/s3/pdf_reader.py +0 -46
  606. agno/document/reader/s3/text_reader.py +0 -51
  607. agno/document/reader/text_reader.py +0 -41
  608. agno/document/reader/website_reader.py +0 -175
  609. agno/document/reader/youtube_reader.py +0 -50
  610. agno/embedder/__init__.py +0 -1
  611. agno/embedder/azure_openai.py +0 -86
  612. agno/embedder/cohere.py +0 -72
  613. agno/embedder/fastembed.py +0 -37
  614. agno/embedder/google.py +0 -73
  615. agno/embedder/huggingface.py +0 -54
  616. agno/embedder/mistral.py +0 -80
  617. agno/embedder/ollama.py +0 -57
  618. agno/embedder/openai.py +0 -74
  619. agno/embedder/sentence_transformer.py +0 -38
  620. agno/embedder/voyageai.py +0 -64
  621. agno/eval/perf.py +0 -201
  622. agno/file/__init__.py +0 -1
  623. agno/file/file.py +0 -16
  624. agno/file/local/csv.py +0 -32
  625. agno/file/local/txt.py +0 -19
  626. agno/infra/app.py +0 -240
  627. agno/infra/base.py +0 -144
  628. agno/infra/context.py +0 -20
  629. agno/infra/db_app.py +0 -52
  630. agno/infra/resource.py +0 -205
  631. agno/infra/resources.py +0 -55
  632. agno/knowledge/agent.py +0 -230
  633. agno/knowledge/arxiv.py +0 -22
  634. agno/knowledge/combined.py +0 -22
  635. agno/knowledge/csv.py +0 -28
  636. agno/knowledge/csv_url.py +0 -19
  637. agno/knowledge/document.py +0 -20
  638. agno/knowledge/docx.py +0 -30
  639. agno/knowledge/json.py +0 -28
  640. agno/knowledge/langchain.py +0 -71
  641. agno/knowledge/llamaindex.py +0 -66
  642. agno/knowledge/pdf.py +0 -28
  643. agno/knowledge/pdf_url.py +0 -26
  644. agno/knowledge/s3/base.py +0 -60
  645. agno/knowledge/s3/pdf.py +0 -21
  646. agno/knowledge/s3/text.py +0 -23
  647. agno/knowledge/text.py +0 -30
  648. agno/knowledge/website.py +0 -88
  649. agno/knowledge/wikipedia.py +0 -31
  650. agno/knowledge/youtube.py +0 -22
  651. agno/memory/agent.py +0 -392
  652. agno/memory/classifier.py +0 -104
  653. agno/memory/db/__init__.py +0 -1
  654. agno/memory/db/base.py +0 -42
  655. agno/memory/db/mongodb.py +0 -189
  656. agno/memory/db/postgres.py +0 -203
  657. agno/memory/db/sqlite.py +0 -193
  658. agno/memory/memory.py +0 -15
  659. agno/memory/row.py +0 -36
  660. agno/memory/summarizer.py +0 -192
  661. agno/memory/summary.py +0 -19
  662. agno/memory/workflow.py +0 -38
  663. agno/models/google/gemini_openai.py +0 -26
  664. agno/models/ollama/hermes.py +0 -221
  665. agno/models/ollama/tools.py +0 -362
  666. agno/models/vertexai/gemini.py +0 -595
  667. agno/playground/__init__.py +0 -3
  668. agno/playground/async_router.py +0 -421
  669. agno/playground/deploy.py +0 -249
  670. agno/playground/operator.py +0 -92
  671. agno/playground/playground.py +0 -91
  672. agno/playground/schemas.py +0 -76
  673. agno/playground/serve.py +0 -55
  674. agno/playground/sync_router.py +0 -405
  675. agno/reasoning/agent.py +0 -68
  676. agno/run/response.py +0 -112
  677. agno/storage/agent/__init__.py +0 -0
  678. agno/storage/agent/base.py +0 -38
  679. agno/storage/agent/dynamodb.py +0 -350
  680. agno/storage/agent/json.py +0 -92
  681. agno/storage/agent/mongodb.py +0 -228
  682. agno/storage/agent/postgres.py +0 -367
  683. agno/storage/agent/session.py +0 -79
  684. agno/storage/agent/singlestore.py +0 -303
  685. agno/storage/agent/sqlite.py +0 -357
  686. agno/storage/agent/yaml.py +0 -93
  687. agno/storage/workflow/__init__.py +0 -0
  688. agno/storage/workflow/base.py +0 -40
  689. agno/storage/workflow/mongodb.py +0 -233
  690. agno/storage/workflow/postgres.py +0 -366
  691. agno/storage/workflow/session.py +0 -60
  692. agno/storage/workflow/sqlite.py +0 -359
  693. agno/tools/googlesearch.py +0 -88
  694. agno/utils/defaults.py +0 -57
  695. agno/utils/filesystem.py +0 -39
  696. agno/utils/git.py +0 -52
  697. agno/utils/json_io.py +0 -30
  698. agno/utils/load_env.py +0 -19
  699. agno/utils/py_io.py +0 -19
  700. agno/utils/pyproject.py +0 -18
  701. agno/utils/resource_filter.py +0 -31
  702. agno/vectordb/singlestore/s2vectordb.py +0 -390
  703. agno/vectordb/singlestore/s2vectordb2.py +0 -355
  704. agno/workspace/__init__.py +0 -0
  705. agno/workspace/config.py +0 -325
  706. agno/workspace/enums.py +0 -6
  707. agno/workspace/helpers.py +0 -48
  708. agno/workspace/operator.py +0 -758
  709. agno/workspace/settings.py +0 -63
  710. agno-0.1.2.dist-info/LICENSE +0 -375
  711. agno-0.1.2.dist-info/METADATA +0 -502
  712. agno-0.1.2.dist-info/RECORD +0 -352
  713. agno-0.1.2.dist-info/entry_points.txt +0 -3
  714. /agno/{cli → db/migrations}/__init__.py +0 -0
  715. /agno/{cli/ws → db/migrations/versions}/__init__.py +0 -0
  716. /agno/{document/chunking/__init__.py → db/schemas/metrics.py} +0 -0
  717. /agno/{document/reader/s3 → integrations}/__init__.py +0 -0
  718. /agno/{file/local → knowledge/chunking}/__init__.py +0 -0
  719. /agno/{infra → knowledge/remote_content}/__init__.py +0 -0
  720. /agno/{knowledge/s3 → tools/models}/__init__.py +0 -0
  721. /agno/{reranker → utils/models}/__init__.py +0 -0
  722. /agno/{storage → utils/print_response}/__init__.py +0 -0
  723. {agno-0.1.2.dist-info → agno-2.3.13.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,2379 @@
1
+ import json
2
+ import time
3
+ from datetime import date, datetime, timedelta, timezone
4
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
5
+ from uuid import uuid4
6
+
7
+ if TYPE_CHECKING:
8
+ from agno.tracing.schemas import Span, Trace
9
+
10
+ from agno.db.base import BaseDb, SessionType
11
+ from agno.db.firestore.utils import (
12
+ apply_pagination,
13
+ apply_pagination_to_records,
14
+ apply_sorting,
15
+ apply_sorting_to_records,
16
+ bulk_upsert_metrics,
17
+ calculate_date_metrics,
18
+ create_collection_indexes,
19
+ deserialize_cultural_knowledge_from_db,
20
+ fetch_all_sessions_data,
21
+ get_dates_to_calculate_metrics_for,
22
+ serialize_cultural_knowledge_for_db,
23
+ )
24
+ from agno.db.schemas.culture import CulturalKnowledge
25
+ from agno.db.schemas.evals import EvalFilterType, EvalRunRecord, EvalType
26
+ from agno.db.schemas.knowledge import KnowledgeRow
27
+ from agno.db.schemas.memory import UserMemory
28
+ from agno.db.utils import deserialize_session_json_fields, serialize_session_json_fields
29
+ from agno.session import AgentSession, Session, TeamSession, WorkflowSession
30
+ from agno.utils.log import log_debug, log_error, log_info
31
+ from agno.utils.string import generate_id
32
+
33
+ try:
34
+ from google.cloud.firestore import Client, FieldFilter # type: ignore[import-untyped]
35
+ except ImportError:
36
+ raise ImportError(
37
+ "`google-cloud-firestore` not installed. Please install it using `pip install google-cloud-firestore`"
38
+ )
39
+
40
+
41
+ class FirestoreDb(BaseDb):
42
+ def __init__(
43
+ self,
44
+ db_client: Optional[Client] = None,
45
+ project_id: Optional[str] = None,
46
+ session_collection: Optional[str] = None,
47
+ memory_collection: Optional[str] = None,
48
+ metrics_collection: Optional[str] = None,
49
+ eval_collection: Optional[str] = None,
50
+ knowledge_collection: Optional[str] = None,
51
+ culture_collection: Optional[str] = None,
52
+ traces_collection: Optional[str] = None,
53
+ spans_collection: Optional[str] = None,
54
+ id: Optional[str] = None,
55
+ ):
56
+ """
57
+ Interface for interacting with a Firestore database.
58
+
59
+ Args:
60
+ db_client (Optional[Client]): The Firestore client to use.
61
+ project_id (Optional[str]): The GCP project ID for Firestore.
62
+ session_collection (Optional[str]): Name of the collection to store sessions.
63
+ memory_collection (Optional[str]): Name of the collection to store memories.
64
+ metrics_collection (Optional[str]): Name of the collection to store metrics.
65
+ eval_collection (Optional[str]): Name of the collection to store evaluation runs.
66
+ knowledge_collection (Optional[str]): Name of the collection to store knowledge documents.
67
+ culture_collection (Optional[str]): Name of the collection to store cultural knowledge.
68
+ traces_collection (Optional[str]): Name of the collection to store traces.
69
+ spans_collection (Optional[str]): Name of the collection to store spans.
70
+ id (Optional[str]): ID of the database.
71
+
72
+ Raises:
73
+ ValueError: If neither project_id nor db_client is provided.
74
+ """
75
+ if id is None:
76
+ seed = project_id or str(db_client)
77
+ id = generate_id(seed)
78
+
79
+ super().__init__(
80
+ id=id,
81
+ session_table=session_collection,
82
+ memory_table=memory_collection,
83
+ metrics_table=metrics_collection,
84
+ eval_table=eval_collection,
85
+ knowledge_table=knowledge_collection,
86
+ culture_table=culture_collection,
87
+ traces_table=traces_collection,
88
+ spans_table=spans_collection,
89
+ )
90
+
91
+ _client: Optional[Client] = db_client
92
+ if _client is None and project_id is not None:
93
+ _client = Client(project=project_id)
94
+ if _client is None:
95
+ raise ValueError("One of project_id or db_client must be provided")
96
+
97
+ self.project_id: Optional[str] = project_id
98
+ self.db_client: Client = _client
99
+
100
+ # -- DB methods --
101
+
102
+ def table_exists(self, table_name: str) -> bool:
103
+ """Check if a collection with the given name exists in the Firestore database.
104
+
105
+ Args:
106
+ table_name: Name of the collection to check
107
+
108
+ Returns:
109
+ bool: True if the collection exists in the database, False otherwise
110
+ """
111
+ return table_name in self.db_client.list_collections()
112
+
113
+ def _get_collection(self, table_type: str, create_collection_if_not_found: Optional[bool] = True):
114
+ """Get or create a collection based on table type.
115
+
116
+ Args:
117
+ table_type (str): The type of table to get or create.
118
+ create_collection_if_not_found (Optional[bool]): Whether to create the collection if it doesn't exist.
119
+
120
+ Returns:
121
+ CollectionReference: The collection reference.
122
+ """
123
+ if table_type == "sessions":
124
+ if not hasattr(self, "session_collection"):
125
+ if self.session_table_name is None:
126
+ raise ValueError("Session collection was not provided on initialization")
127
+ self.session_collection = self._get_or_create_collection(
128
+ collection_name=self.session_table_name,
129
+ collection_type="sessions",
130
+ create_collection_if_not_found=create_collection_if_not_found,
131
+ )
132
+ return self.session_collection
133
+
134
+ if table_type == "memories":
135
+ if not hasattr(self, "memory_collection"):
136
+ if self.memory_table_name is None:
137
+ raise ValueError("Memory collection was not provided on initialization")
138
+ self.memory_collection = self._get_or_create_collection(
139
+ collection_name=self.memory_table_name,
140
+ collection_type="memories",
141
+ create_collection_if_not_found=create_collection_if_not_found,
142
+ )
143
+ return self.memory_collection
144
+
145
+ if table_type == "metrics":
146
+ if not hasattr(self, "metrics_collection"):
147
+ if self.metrics_table_name is None:
148
+ raise ValueError("Metrics collection was not provided on initialization")
149
+ self.metrics_collection = self._get_or_create_collection(
150
+ collection_name=self.metrics_table_name,
151
+ collection_type="metrics",
152
+ create_collection_if_not_found=create_collection_if_not_found,
153
+ )
154
+ return self.metrics_collection
155
+
156
+ if table_type == "evals":
157
+ if not hasattr(self, "eval_collection"):
158
+ if self.eval_table_name is None:
159
+ raise ValueError("Eval collection was not provided on initialization")
160
+ self.eval_collection = self._get_or_create_collection(
161
+ collection_name=self.eval_table_name,
162
+ collection_type="evals",
163
+ create_collection_if_not_found=create_collection_if_not_found,
164
+ )
165
+ return self.eval_collection
166
+
167
+ if table_type == "knowledge":
168
+ if not hasattr(self, "knowledge_collection"):
169
+ if self.knowledge_table_name is None:
170
+ raise ValueError("Knowledge collection was not provided on initialization")
171
+ self.knowledge_collection = self._get_or_create_collection(
172
+ collection_name=self.knowledge_table_name,
173
+ collection_type="knowledge",
174
+ create_collection_if_not_found=create_collection_if_not_found,
175
+ )
176
+ return self.knowledge_collection
177
+
178
+ if table_type == "culture":
179
+ if not hasattr(self, "culture_collection"):
180
+ if self.culture_table_name is None:
181
+ raise ValueError("Culture collection was not provided on initialization")
182
+ self.culture_collection = self._get_or_create_collection(
183
+ collection_name=self.culture_table_name,
184
+ collection_type="culture",
185
+ create_collection_if_not_found=create_collection_if_not_found,
186
+ )
187
+ return self.culture_collection
188
+
189
+ if table_type == "traces":
190
+ if not hasattr(self, "traces_collection"):
191
+ if self.trace_table_name is None:
192
+ raise ValueError("Traces collection was not provided on initialization")
193
+ self.traces_collection = self._get_or_create_collection(
194
+ collection_name=self.trace_table_name,
195
+ collection_type="traces",
196
+ create_collection_if_not_found=create_collection_if_not_found,
197
+ )
198
+ return self.traces_collection
199
+
200
+ if table_type == "spans":
201
+ # Ensure traces collection exists first (spans reference traces)
202
+ self._get_collection("traces", create_collection_if_not_found=create_collection_if_not_found)
203
+ if not hasattr(self, "spans_collection"):
204
+ if self.span_table_name is None:
205
+ raise ValueError("Spans collection was not provided on initialization")
206
+ self.spans_collection = self._get_or_create_collection(
207
+ collection_name=self.span_table_name,
208
+ collection_type="spans",
209
+ create_collection_if_not_found=create_collection_if_not_found,
210
+ )
211
+ return self.spans_collection
212
+
213
+ raise ValueError(f"Unknown table type: {table_type}")
214
+
215
+ def _get_or_create_collection(
216
+ self, collection_name: str, collection_type: str, create_collection_if_not_found: Optional[bool] = True
217
+ ):
218
+ """Get or create a collection with proper indexes.
219
+
220
+ Args:
221
+ collection_name (str): The name of the collection to get or create.
222
+ collection_type (str): The type of collection to get or create.
223
+ create_collection_if_not_found (Optional[bool]): Whether to create the collection if it doesn't exist.
224
+
225
+ Returns:
226
+ Optional[CollectionReference]: The collection reference.
227
+ """
228
+ try:
229
+ collection_ref = self.db_client.collection(collection_name)
230
+
231
+ if not hasattr(self, f"_{collection_name}_initialized"):
232
+ if not create_collection_if_not_found:
233
+ return None
234
+ create_collection_indexes(self.db_client, collection_name, collection_type)
235
+ setattr(self, f"_{collection_name}_initialized", True)
236
+
237
+ return collection_ref
238
+
239
+ except Exception as e:
240
+ log_error(f"Error getting collection {collection_name}: {e}")
241
+ raise
242
+
243
+ # -- Session methods --
244
+
245
+ def delete_session(self, session_id: str) -> bool:
246
+ """Delete a session from the database.
247
+
248
+ Args:
249
+ session_id (str): The ID of the session to delete.
250
+ session_type (SessionType): The type of session to delete. Defaults to SessionType.AGENT.
251
+
252
+ Returns:
253
+ bool: True if the session was deleted, False otherwise.
254
+
255
+ Raises:
256
+ Exception: If there is an error deleting the session.
257
+ """
258
+ try:
259
+ collection_ref = self._get_collection(table_type="sessions")
260
+ docs = collection_ref.where(filter=FieldFilter("session_id", "==", session_id)).stream()
261
+
262
+ for doc in docs:
263
+ doc.reference.delete()
264
+ log_debug(f"Successfully deleted session with session_id: {session_id}")
265
+ return True
266
+
267
+ log_debug(f"No session found to delete with session_id: {session_id}")
268
+ return False
269
+
270
+ except Exception as e:
271
+ log_error(f"Error deleting session: {e}")
272
+ raise e
273
+
274
+ def get_latest_schema_version(self):
275
+ """Get the latest version of the database schema."""
276
+ pass
277
+
278
+ def upsert_schema_version(self, version: str) -> None:
279
+ """Upsert the schema version into the database."""
280
+ pass
281
+
282
+ def delete_sessions(self, session_ids: List[str]) -> None:
283
+ """Delete multiple sessions from the database.
284
+
285
+ Args:
286
+ session_ids (List[str]): The IDs of the sessions to delete.
287
+ """
288
+ try:
289
+ collection_ref = self._get_collection(table_type="sessions")
290
+ batch = self.db_client.batch()
291
+
292
+ deleted_count = 0
293
+ for session_id in session_ids:
294
+ docs = collection_ref.where(filter=FieldFilter("session_id", "==", session_id)).stream()
295
+ for doc in docs:
296
+ batch.delete(doc.reference)
297
+ deleted_count += 1
298
+
299
+ batch.commit()
300
+
301
+ log_debug(f"Successfully deleted {deleted_count} sessions")
302
+
303
+ except Exception as e:
304
+ log_error(f"Error deleting sessions: {e}")
305
+ raise e
306
+
307
+ def get_session(
308
+ self,
309
+ session_id: str,
310
+ session_type: SessionType,
311
+ user_id: Optional[str] = None,
312
+ deserialize: Optional[bool] = True,
313
+ ) -> Optional[Union[Session, Dict[str, Any]]]:
314
+ """Read a session from the database.
315
+
316
+ Args:
317
+ session_id (str): The ID of the session to get.
318
+ session_type (SessionType): The type of session to get.
319
+ user_id (Optional[str]): The ID of the user to get the session for.
320
+ deserialize (Optional[bool]): Whether to serialize the session. Defaults to True.
321
+
322
+ Returns:
323
+ Union[Session, Dict[str, Any], None]:
324
+ - When deserialize=True: Session object
325
+ - When deserialize=False: Session dictionary
326
+
327
+ Raises:
328
+ Exception: If there is an error reading the session.
329
+ """
330
+ try:
331
+ collection_ref = self._get_collection(table_type="sessions")
332
+ query = collection_ref.where(filter=FieldFilter("session_id", "==", session_id))
333
+
334
+ if user_id is not None:
335
+ query = query.where(filter=FieldFilter("user_id", "==", user_id))
336
+
337
+ docs = query.stream()
338
+ result = None
339
+ for doc in docs:
340
+ result = doc.to_dict()
341
+ break
342
+
343
+ if result is None:
344
+ return None
345
+
346
+ session = deserialize_session_json_fields(result)
347
+
348
+ if not deserialize:
349
+ return session
350
+
351
+ if session_type == SessionType.AGENT:
352
+ return AgentSession.from_dict(session)
353
+ elif session_type == SessionType.TEAM:
354
+ return TeamSession.from_dict(session)
355
+ elif session_type == SessionType.WORKFLOW:
356
+ return WorkflowSession.from_dict(session)
357
+ else:
358
+ raise ValueError(f"Invalid session type: {session_type}")
359
+
360
+ except Exception as e:
361
+ log_error(f"Exception reading session: {e}")
362
+ raise e
363
+
364
+ def get_sessions(
365
+ self,
366
+ session_type: Optional[SessionType] = None,
367
+ user_id: Optional[str] = None,
368
+ component_id: Optional[str] = None,
369
+ session_name: Optional[str] = None,
370
+ start_timestamp: Optional[int] = None,
371
+ end_timestamp: Optional[int] = None,
372
+ limit: Optional[int] = None,
373
+ page: Optional[int] = None,
374
+ sort_by: Optional[str] = None,
375
+ sort_order: Optional[str] = None,
376
+ deserialize: Optional[bool] = True,
377
+ ) -> Union[List[Session], Tuple[List[Dict[str, Any]], int]]:
378
+ """Get all sessions.
379
+
380
+ Args:
381
+ session_type (Optional[SessionType]): The type of session to get.
382
+ user_id (Optional[str]): The ID of the user to get the session for.
383
+ component_id (Optional[str]): The ID of the component to get the session for.
384
+ session_name (Optional[str]): The name of the session to filter by.
385
+ start_timestamp (Optional[int]): The start timestamp to filter sessions by.
386
+ end_timestamp (Optional[int]): The end timestamp to filter sessions by.
387
+ limit (Optional[int]): The limit of the sessions to get.
388
+ page (Optional[int]): The page number to get.
389
+ sort_by (Optional[str]): The field to sort the sessions by.
390
+ sort_order (Optional[str]): The order to sort the sessions by.
391
+ deserialize (Optional[bool]): Whether to serialize the sessions. Defaults to True.
392
+
393
+ Returns:
394
+ Union[List[AgentSession], List[TeamSession], List[WorkflowSession], Tuple[List[Dict[str, Any]], int]]:
395
+ - When deserialize=True: List of Session objects
396
+ - When deserialize=False: List of session dictionaries and the total count
397
+
398
+ Raises:
399
+ Exception: If there is an error reading the sessions.
400
+ """
401
+ try:
402
+ collection_ref = self._get_collection(table_type="sessions")
403
+ if collection_ref is None:
404
+ return [] if deserialize else ([], 0)
405
+
406
+ query = collection_ref
407
+
408
+ if user_id is not None:
409
+ query = query.where(filter=FieldFilter("user_id", "==", user_id))
410
+ if session_type is not None:
411
+ query = query.where(filter=FieldFilter("session_type", "==", session_type.value))
412
+ if component_id is not None:
413
+ if session_type == SessionType.AGENT:
414
+ query = query.where(filter=FieldFilter("agent_id", "==", component_id))
415
+ elif session_type == SessionType.TEAM:
416
+ query = query.where(filter=FieldFilter("team_id", "==", component_id))
417
+ elif session_type == SessionType.WORKFLOW:
418
+ query = query.where(filter=FieldFilter("workflow_id", "==", component_id))
419
+ if start_timestamp is not None:
420
+ query = query.where(filter=FieldFilter("created_at", ">=", start_timestamp))
421
+ if end_timestamp is not None:
422
+ query = query.where(filter=FieldFilter("created_at", "<=", end_timestamp))
423
+ if session_name is not None:
424
+ query = query.where(filter=FieldFilter("session_data.session_name", "==", session_name))
425
+
426
+ # Apply sorting
427
+ query = apply_sorting(query, sort_by, sort_order)
428
+
429
+ # Get all documents for counting before pagination
430
+ all_docs = query.stream()
431
+ all_records = [doc.to_dict() for doc in all_docs]
432
+
433
+ if not all_records:
434
+ return [] if deserialize else ([], 0)
435
+
436
+ all_sessions_raw = [deserialize_session_json_fields(record) for record in all_records]
437
+
438
+ # Get total count before pagination
439
+ total_count = len(all_sessions_raw)
440
+
441
+ # Apply pagination to the results
442
+ if limit is not None and page is not None:
443
+ start_index = (page - 1) * limit
444
+ end_index = start_index + limit
445
+ sessions_raw = all_sessions_raw[start_index:end_index]
446
+ elif limit is not None:
447
+ sessions_raw = all_sessions_raw[:limit]
448
+ else:
449
+ sessions_raw = all_sessions_raw
450
+
451
+ if not deserialize:
452
+ return sessions_raw, total_count
453
+
454
+ sessions: List[Union[AgentSession, TeamSession, WorkflowSession]] = []
455
+ for session in sessions_raw:
456
+ if session["session_type"] == SessionType.AGENT.value:
457
+ agent_session = AgentSession.from_dict(session)
458
+ if agent_session is not None:
459
+ sessions.append(agent_session)
460
+ elif session["session_type"] == SessionType.TEAM.value:
461
+ team_session = TeamSession.from_dict(session)
462
+ if team_session is not None:
463
+ sessions.append(team_session)
464
+ elif session["session_type"] == SessionType.WORKFLOW.value:
465
+ workflow_session = WorkflowSession.from_dict(session)
466
+ if workflow_session is not None:
467
+ sessions.append(workflow_session)
468
+
469
+ if not sessions:
470
+ return [] if deserialize else ([], 0)
471
+
472
+ return sessions
473
+
474
+ except Exception as e:
475
+ log_error(f"Exception reading sessions: {e}")
476
+ raise e
477
+
478
+ def rename_session(
479
+ self, session_id: str, session_type: SessionType, session_name: str, deserialize: Optional[bool] = True
480
+ ) -> Optional[Union[Session, Dict[str, Any]]]:
481
+ """Rename a session in the database.
482
+
483
+ Args:
484
+ session_id (str): The ID of the session to rename.
485
+ session_type (SessionType): The type of session to rename.
486
+ session_name (str): The new name of the session.
487
+ deserialize (Optional[bool]): Whether to serialize the session. Defaults to True.
488
+
489
+ Returns:
490
+ Optional[Union[Session, Dict[str, Any]]]:
491
+ - When deserialize=True: Session object
492
+ - When deserialize=False: Session dictionary
493
+
494
+ Raises:
495
+ Exception: If there is an error renaming the session.
496
+ """
497
+ try:
498
+ collection_ref = self._get_collection(table_type="sessions")
499
+
500
+ docs = collection_ref.where(filter=FieldFilter("session_id", "==", session_id)).stream()
501
+ doc_ref = next((doc.reference for doc in docs), None)
502
+
503
+ if doc_ref is None:
504
+ return None
505
+
506
+ doc_ref.update({"session_data.session_name": session_name, "updated_at": int(time.time())})
507
+
508
+ updated_doc = doc_ref.get()
509
+ if not updated_doc.exists:
510
+ return None
511
+
512
+ result = updated_doc.to_dict()
513
+ if result is None:
514
+ return None
515
+ deserialized_session = deserialize_session_json_fields(result)
516
+
517
+ log_debug(f"Renamed session with id '{session_id}' to '{session_name}'")
518
+
519
+ if not deserialize:
520
+ return deserialized_session
521
+
522
+ if session_type == SessionType.AGENT:
523
+ return AgentSession.from_dict(deserialized_session)
524
+ elif session_type == SessionType.TEAM:
525
+ return TeamSession.from_dict(deserialized_session)
526
+ else:
527
+ return WorkflowSession.from_dict(deserialized_session)
528
+
529
+ except Exception as e:
530
+ log_error(f"Exception renaming session: {e}")
531
+ raise e
532
+
533
+ def upsert_session(
534
+ self, session: Session, deserialize: Optional[bool] = True
535
+ ) -> Optional[Union[Session, Dict[str, Any]]]:
536
+ """Insert or update a session in the database.
537
+
538
+ Args:
539
+ session (Session): The session to upsert.
540
+
541
+ Returns:
542
+ Optional[Session]: The upserted session.
543
+
544
+ Raises:
545
+ Exception: If there is an error upserting the session.
546
+ """
547
+ try:
548
+ collection_ref = self._get_collection(table_type="sessions", create_collection_if_not_found=True)
549
+ serialized_session_dict = serialize_session_json_fields(session.to_dict())
550
+
551
+ if isinstance(session, AgentSession):
552
+ record = {
553
+ "session_id": serialized_session_dict.get("session_id"),
554
+ "session_type": SessionType.AGENT.value,
555
+ "agent_id": serialized_session_dict.get("agent_id"),
556
+ "user_id": serialized_session_dict.get("user_id"),
557
+ "runs": serialized_session_dict.get("runs"),
558
+ "agent_data": serialized_session_dict.get("agent_data"),
559
+ "session_data": serialized_session_dict.get("session_data"),
560
+ "summary": serialized_session_dict.get("summary"),
561
+ "metadata": serialized_session_dict.get("metadata"),
562
+ "created_at": serialized_session_dict.get("created_at"),
563
+ "updated_at": int(time.time()),
564
+ }
565
+
566
+ elif isinstance(session, TeamSession):
567
+ record = {
568
+ "session_id": serialized_session_dict.get("session_id"),
569
+ "session_type": SessionType.TEAM.value,
570
+ "team_id": serialized_session_dict.get("team_id"),
571
+ "user_id": serialized_session_dict.get("user_id"),
572
+ "runs": serialized_session_dict.get("runs"),
573
+ "team_data": serialized_session_dict.get("team_data"),
574
+ "session_data": serialized_session_dict.get("session_data"),
575
+ "summary": serialized_session_dict.get("summary"),
576
+ "metadata": serialized_session_dict.get("metadata"),
577
+ "created_at": serialized_session_dict.get("created_at"),
578
+ "updated_at": int(time.time()),
579
+ }
580
+
581
+ elif isinstance(session, WorkflowSession):
582
+ record = {
583
+ "session_id": serialized_session_dict.get("session_id"),
584
+ "session_type": SessionType.WORKFLOW.value,
585
+ "workflow_id": serialized_session_dict.get("workflow_id"),
586
+ "user_id": serialized_session_dict.get("user_id"),
587
+ "runs": serialized_session_dict.get("runs"),
588
+ "workflow_data": serialized_session_dict.get("workflow_data"),
589
+ "session_data": serialized_session_dict.get("session_data"),
590
+ "summary": serialized_session_dict.get("summary"),
591
+ "metadata": serialized_session_dict.get("metadata"),
592
+ "created_at": serialized_session_dict.get("created_at"),
593
+ "updated_at": int(time.time()),
594
+ }
595
+
596
+ # Find existing document or create new one
597
+ docs = collection_ref.where(filter=FieldFilter("session_id", "==", record["session_id"])).stream()
598
+ doc_ref = next((doc.reference for doc in docs), None)
599
+
600
+ if doc_ref is None:
601
+ # Create new document
602
+ doc_ref = collection_ref.document()
603
+
604
+ doc_ref.set(record, merge=True)
605
+
606
+ # Get the updated document
607
+ updated_doc = doc_ref.get()
608
+ if not updated_doc.exists:
609
+ return None
610
+
611
+ result = updated_doc.to_dict()
612
+ if result is None:
613
+ return None
614
+ deserialized_session = deserialize_session_json_fields(result)
615
+
616
+ if not deserialize:
617
+ return deserialized_session
618
+
619
+ if isinstance(session, AgentSession):
620
+ return AgentSession.from_dict(deserialized_session)
621
+ elif isinstance(session, TeamSession):
622
+ return TeamSession.from_dict(deserialized_session)
623
+ else:
624
+ return WorkflowSession.from_dict(deserialized_session)
625
+
626
+ except Exception as e:
627
+ log_error(f"Exception upserting session: {e}")
628
+ raise e
629
+
630
+ def upsert_sessions(
631
+ self, sessions: List[Session], deserialize: Optional[bool] = True, preserve_updated_at: bool = False
632
+ ) -> List[Union[Session, Dict[str, Any]]]:
633
+ """
634
+ Bulk upsert multiple sessions for improved performance on large datasets.
635
+
636
+ Args:
637
+ sessions (List[Session]): List of sessions to upsert.
638
+ deserialize (Optional[bool]): Whether to deserialize the sessions. Defaults to True.
639
+
640
+ Returns:
641
+ List[Union[Session, Dict[str, Any]]]: List of upserted sessions.
642
+
643
+ Raises:
644
+ Exception: If an error occurs during bulk upsert.
645
+ """
646
+ if not sessions:
647
+ return []
648
+
649
+ try:
650
+ log_info(
651
+ f"FirestoreDb doesn't support efficient bulk operations, falling back to individual upserts for {len(sessions)} sessions"
652
+ )
653
+
654
+ # Fall back to individual upserts
655
+ results = []
656
+ for session in sessions:
657
+ if session is not None:
658
+ result = self.upsert_session(session, deserialize=deserialize)
659
+ if result is not None:
660
+ results.append(result)
661
+ return results
662
+
663
+ except Exception as e:
664
+ log_error(f"Exception during bulk session upsert: {e}")
665
+ return []
666
+
667
+ # -- Memory methods --
668
+
669
+ def delete_user_memory(self, memory_id: str, user_id: Optional[str] = None):
670
+ """Delete a user memory from the database.
671
+
672
+ Args:
673
+ memory_id (str): The ID of the memory to delete.
674
+ user_id (Optional[str]): The ID of the user (optional, for filtering).
675
+
676
+ Returns:
677
+ bool: True if the memory was deleted, False otherwise.
678
+
679
+ Raises:
680
+ Exception: If there is an error deleting the memory.
681
+ """
682
+ try:
683
+ collection_ref = self._get_collection(table_type="memories")
684
+
685
+ # If user_id is provided, verify the memory belongs to the user before deleting
686
+ if user_id:
687
+ docs = collection_ref.where(filter=FieldFilter("memory_id", "==", memory_id)).stream()
688
+ for doc in docs:
689
+ data = doc.to_dict()
690
+ if data.get("user_id") != user_id:
691
+ log_debug(f"Memory {memory_id} does not belong to user {user_id}")
692
+ return
693
+ doc.reference.delete()
694
+ log_debug(f"Successfully deleted user memory id: {memory_id}")
695
+ return
696
+ else:
697
+ docs = collection_ref.where(filter=FieldFilter("memory_id", "==", memory_id)).stream()
698
+ deleted_count = 0
699
+ for doc in docs:
700
+ doc.reference.delete()
701
+ deleted_count += 1
702
+
703
+ success = deleted_count > 0
704
+ if success:
705
+ log_debug(f"Successfully deleted user memory id: {memory_id}")
706
+ else:
707
+ log_debug(f"No user memory found with id: {memory_id}")
708
+
709
+ except Exception as e:
710
+ log_error(f"Error deleting user memory: {e}")
711
+ raise e
712
+
713
+ def delete_user_memories(self, memory_ids: List[str], user_id: Optional[str] = None) -> None:
714
+ """Delete user memories from the database.
715
+
716
+ Args:
717
+ memory_ids (List[str]): The IDs of the memories to delete.
718
+ user_id (Optional[str]): The ID of the user (optional, for filtering).
719
+
720
+ Raises:
721
+ Exception: If there is an error deleting the memories.
722
+ """
723
+ try:
724
+ collection_ref = self._get_collection(table_type="memories")
725
+ batch = self.db_client.batch()
726
+ deleted_count = 0
727
+
728
+ # If user_id is provided, filter memory_ids to only those belonging to the user
729
+ if user_id:
730
+ for memory_id in memory_ids:
731
+ docs = collection_ref.where(filter=FieldFilter("memory_id", "==", memory_id)).stream()
732
+ for doc in docs:
733
+ data = doc.to_dict()
734
+ if data.get("user_id") == user_id:
735
+ batch.delete(doc.reference)
736
+ deleted_count += 1
737
+ else:
738
+ for memory_id in memory_ids:
739
+ docs = collection_ref.where(filter=FieldFilter("memory_id", "==", memory_id)).stream()
740
+ for doc in docs:
741
+ batch.delete(doc.reference)
742
+ deleted_count += 1
743
+
744
+ batch.commit()
745
+
746
+ if deleted_count == 0:
747
+ log_info(f"No memories found with ids: {memory_ids}")
748
+ else:
749
+ log_info(f"Successfully deleted {deleted_count} memories")
750
+
751
+ except Exception as e:
752
+ log_error(f"Error deleting memories: {e}")
753
+ raise e
754
+
755
+ def get_all_memory_topics(self, create_collection_if_not_found: Optional[bool] = True) -> List[str]:
756
+ """Get all memory topics from the database.
757
+
758
+ Returns:
759
+ List[str]: The topics.
760
+
761
+ Raises:
762
+ Exception: If there is an error getting the topics.
763
+ """
764
+ try:
765
+ collection_ref = self._get_collection(table_type="memories")
766
+ if collection_ref is None:
767
+ return []
768
+
769
+ docs = collection_ref.stream()
770
+
771
+ all_topics = set()
772
+ for doc in docs:
773
+ data = doc.to_dict()
774
+ topics = data.get("topics", [])
775
+ if topics:
776
+ all_topics.update(topics)
777
+
778
+ return [topic for topic in all_topics if topic]
779
+
780
+ except Exception as e:
781
+ log_error(f"Exception getting all memory topics: {e}")
782
+ raise e
783
+
784
+ def get_user_memory(
785
+ self, memory_id: str, deserialize: Optional[bool] = True, user_id: Optional[str] = None
786
+ ) -> Optional[UserMemory]:
787
+ """Get a memory from the database.
788
+
789
+ Args:
790
+ memory_id (str): The ID of the memory to get.
791
+ deserialize (Optional[bool]): Whether to serialize the memory. Defaults to True.
792
+ user_id (Optional[str]): The ID of the user (optional, for filtering).
793
+
794
+ Returns:
795
+ Optional[UserMemory]:
796
+ - When deserialize=True: UserMemory object
797
+ - When deserialize=False: Memory dictionary
798
+
799
+ Raises:
800
+ Exception: If there is an error getting the memory.
801
+ """
802
+ try:
803
+ collection_ref = self._get_collection(table_type="memories")
804
+ docs = collection_ref.where(filter=FieldFilter("memory_id", "==", memory_id)).stream()
805
+
806
+ result = None
807
+ for doc in docs:
808
+ result = doc.to_dict()
809
+ break
810
+
811
+ if result is None:
812
+ return None
813
+
814
+ # Filter by user_id if provided
815
+ if user_id and result.get("user_id") != user_id:
816
+ return None
817
+
818
+ if not deserialize:
819
+ return result
820
+
821
+ return UserMemory.from_dict(result)
822
+
823
+ except Exception as e:
824
+ log_error(f"Exception getting user memory: {e}")
825
+ raise e
826
+
827
+ def get_user_memories(
828
+ self,
829
+ user_id: Optional[str] = None,
830
+ agent_id: Optional[str] = None,
831
+ team_id: Optional[str] = None,
832
+ topics: Optional[List[str]] = None,
833
+ search_content: Optional[str] = None,
834
+ limit: Optional[int] = None,
835
+ page: Optional[int] = None,
836
+ sort_by: Optional[str] = None,
837
+ sort_order: Optional[str] = None,
838
+ deserialize: Optional[bool] = True,
839
+ ) -> Union[List[UserMemory], Tuple[List[Dict[str, Any]], int]]:
840
+ """Get all memories from the database as UserMemory objects.
841
+
842
+ Args:
843
+ user_id (Optional[str]): The ID of the user to get the memories for.
844
+ agent_id (Optional[str]): The ID of the agent to get the memories for.
845
+ team_id (Optional[str]): The ID of the team to get the memories for.
846
+ topics (Optional[List[str]]): The topics to filter the memories by.
847
+ search_content (Optional[str]): The content to filter the memories by.
848
+ limit (Optional[int]): The limit of the memories to get.
849
+ page (Optional[int]): The page number to get.
850
+ sort_by (Optional[str]): The field to sort the memories by.
851
+ sort_order (Optional[str]): The order to sort the memories by.
852
+ deserialize (Optional[bool]): Whether to serialize the memories. Defaults to True.
853
+ create_table_if_not_found: Whether to create the index if it doesn't exist.
854
+
855
+ Returns:
856
+ Tuple[List[Dict[str, Any]], int]: A tuple containing the memories and the total count.
857
+
858
+ Raises:
859
+ Exception: If there is an error getting the memories.
860
+ """
861
+ try:
862
+ collection_ref = self._get_collection(table_type="memories")
863
+ if collection_ref is None:
864
+ return [] if deserialize else ([], 0)
865
+
866
+ query = collection_ref
867
+
868
+ if user_id is not None:
869
+ query = query.where(filter=FieldFilter("user_id", "==", user_id))
870
+ if agent_id is not None:
871
+ query = query.where(filter=FieldFilter("agent_id", "==", agent_id))
872
+ if team_id is not None:
873
+ query = query.where(filter=FieldFilter("team_id", "==", team_id))
874
+ if topics is not None and len(topics) > 0:
875
+ query = query.where(filter=FieldFilter("topics", "array_contains_any", topics))
876
+ if search_content is not None:
877
+ query = query.where(filter=FieldFilter("memory", "==", search_content))
878
+
879
+ # Apply sorting
880
+ query = apply_sorting(query, sort_by, sort_order)
881
+
882
+ # Get all documents
883
+ docs = query.stream()
884
+ all_records = [doc.to_dict() for doc in docs]
885
+
886
+ total_count = len(all_records)
887
+
888
+ # Apply pagination to the filtered results
889
+ if limit is not None and page is not None:
890
+ start_index = (page - 1) * limit
891
+ end_index = start_index + limit
892
+ records = all_records[start_index:end_index]
893
+ elif limit is not None:
894
+ records = all_records[:limit]
895
+ else:
896
+ records = all_records
897
+ if not deserialize:
898
+ return records, total_count
899
+
900
+ return [UserMemory.from_dict(record) for record in records]
901
+
902
+ except Exception as e:
903
+ log_error(f"Exception getting user memories: {e}")
904
+ raise e
905
+
906
+ def get_user_memory_stats(
907
+ self,
908
+ limit: Optional[int] = None,
909
+ page: Optional[int] = None,
910
+ user_id: Optional[str] = None,
911
+ ) -> Tuple[List[Dict[str, Any]], int]:
912
+ """Get user memories stats.
913
+
914
+ Args:
915
+ limit (Optional[int]): The limit of the memories to get.
916
+ page (Optional[int]): The page number to get.
917
+
918
+ Returns:
919
+ Tuple[List[Dict[str, Any]], int]: A tuple containing the memories stats and the total count.
920
+
921
+ Raises:
922
+ Exception: If there is an error getting the memories stats.
923
+ """
924
+ try:
925
+ collection_ref = self._get_collection(table_type="memories")
926
+
927
+ if user_id:
928
+ query = collection_ref.where(filter=FieldFilter("user_id", "==", user_id))
929
+ else:
930
+ query = collection_ref.where(filter=FieldFilter("user_id", "!=", None))
931
+
932
+ docs = query.stream()
933
+
934
+ user_stats = {}
935
+ for doc in docs:
936
+ data = doc.to_dict()
937
+ current_user_id = data.get("user_id")
938
+ if current_user_id:
939
+ if current_user_id not in user_stats:
940
+ user_stats[current_user_id] = {
941
+ "user_id": current_user_id,
942
+ "total_memories": 0,
943
+ "last_memory_updated_at": 0,
944
+ }
945
+ user_stats[current_user_id]["total_memories"] += 1
946
+ updated_at = data.get("updated_at", 0)
947
+ if updated_at > user_stats[current_user_id]["last_memory_updated_at"]:
948
+ user_stats[current_user_id]["last_memory_updated_at"] = updated_at
949
+
950
+ # Convert to list and sort
951
+ formatted_results = list(user_stats.values())
952
+ formatted_results.sort(key=lambda x: x["last_memory_updated_at"], reverse=True)
953
+
954
+ total_count = len(formatted_results)
955
+
956
+ # Apply pagination
957
+ if limit is not None:
958
+ start_idx = 0
959
+ if page is not None:
960
+ start_idx = (page - 1) * limit
961
+ formatted_results = formatted_results[start_idx : start_idx + limit]
962
+
963
+ return formatted_results, total_count
964
+
965
+ except Exception as e:
966
+ log_error(f"Exception getting user memory stats: {e}")
967
+ raise e
968
+
969
+ def upsert_user_memory(
970
+ self, memory: UserMemory, deserialize: Optional[bool] = True
971
+ ) -> Optional[Union[UserMemory, Dict[str, Any]]]:
972
+ """Upsert a user memory in the database.
973
+
974
+ Args:
975
+ memory (UserMemory): The memory to upsert.
976
+ deserialize (Optional[bool]): Whether to serialize the memory. Defaults to True.
977
+
978
+ Returns:
979
+ Optional[Union[UserMemory, Dict[str, Any]]]:
980
+ - When deserialize=True: UserMemory object
981
+ - When deserialize=False: Memory dictionary
982
+
983
+ Raises:
984
+ Exception: If there is an error upserting the memory.
985
+ """
986
+ try:
987
+ collection_ref = self._get_collection(table_type="memories", create_collection_if_not_found=True)
988
+ if collection_ref is None:
989
+ return None
990
+
991
+ if memory.memory_id is None:
992
+ memory.memory_id = str(uuid4())
993
+
994
+ update_doc = memory.to_dict()
995
+ update_doc["updated_at"] = int(time.time())
996
+
997
+ # Find existing document or create new one
998
+ docs = collection_ref.where("memory_id", "==", memory.memory_id).stream()
999
+ doc_ref = next((doc.reference for doc in docs), None)
1000
+
1001
+ if doc_ref is None:
1002
+ doc_ref = collection_ref.document()
1003
+
1004
+ doc_ref.set(update_doc, merge=True)
1005
+
1006
+ if not deserialize:
1007
+ return update_doc
1008
+
1009
+ return UserMemory.from_dict(update_doc)
1010
+
1011
+ except Exception as e:
1012
+ log_error(f"Exception upserting user memory: {e}")
1013
+ raise e
1014
+
1015
+ def upsert_memories(
1016
+ self, memories: List[UserMemory], deserialize: Optional[bool] = True, preserve_updated_at: bool = False
1017
+ ) -> List[Union[UserMemory, Dict[str, Any]]]:
1018
+ """
1019
+ Bulk upsert multiple user memories for improved performance on large datasets.
1020
+
1021
+ Args:
1022
+ memories (List[UserMemory]): List of memories to upsert.
1023
+ deserialize (Optional[bool]): Whether to deserialize the memories. Defaults to True.
1024
+
1025
+ Returns:
1026
+ List[Union[UserMemory, Dict[str, Any]]]: List of upserted memories.
1027
+
1028
+ Raises:
1029
+ Exception: If an error occurs during bulk upsert.
1030
+ """
1031
+ if not memories:
1032
+ return []
1033
+
1034
+ try:
1035
+ log_info(
1036
+ f"FirestoreDb doesn't support efficient bulk operations, falling back to individual upserts for {len(memories)} memories"
1037
+ )
1038
+ # Fall back to individual upserts
1039
+ results = []
1040
+ for memory in memories:
1041
+ if memory is not None:
1042
+ result = self.upsert_user_memory(memory, deserialize=deserialize)
1043
+ if result is not None:
1044
+ results.append(result)
1045
+ return results
1046
+
1047
+ except Exception as e:
1048
+ log_error(f"Exception during bulk memory upsert: {e}")
1049
+ return []
1050
+
1051
+ def clear_memories(self) -> None:
1052
+ """Delete all memories from the database.
1053
+
1054
+ Raises:
1055
+ Exception: If an error occurs during deletion.
1056
+ """
1057
+ try:
1058
+ collection_ref = self._get_collection(table_type="memories")
1059
+
1060
+ # Get all documents in the collection
1061
+ docs = collection_ref.stream()
1062
+
1063
+ # Delete all documents in batches
1064
+ batch = self.db_client.batch()
1065
+ batch_count = 0
1066
+
1067
+ for doc in docs:
1068
+ batch.delete(doc.reference)
1069
+ batch_count += 1
1070
+
1071
+ # Firestore batch has a limit of 500 operations
1072
+ if batch_count >= 500:
1073
+ batch.commit()
1074
+ batch = self.db_client.batch()
1075
+ batch_count = 0
1076
+
1077
+ # Commit remaining operations
1078
+ if batch_count > 0:
1079
+ batch.commit()
1080
+
1081
+ except Exception as e:
1082
+ log_error(f"Exception deleting all memories: {e}")
1083
+ raise e
1084
+
1085
+ # -- Cultural Knowledge methods --
1086
+ def clear_cultural_knowledge(self) -> None:
1087
+ """Delete all cultural knowledge from the database.
1088
+
1089
+ Raises:
1090
+ Exception: If an error occurs during deletion.
1091
+ """
1092
+ try:
1093
+ collection_ref = self._get_collection(table_type="culture")
1094
+
1095
+ # Get all documents in the collection
1096
+ docs = collection_ref.stream()
1097
+
1098
+ # Delete all documents in batches
1099
+ batch = self.db_client.batch()
1100
+ batch_count = 0
1101
+
1102
+ for doc in docs:
1103
+ batch.delete(doc.reference)
1104
+ batch_count += 1
1105
+
1106
+ # Firestore batch has a limit of 500 operations
1107
+ if batch_count >= 500:
1108
+ batch.commit()
1109
+ batch = self.db_client.batch()
1110
+ batch_count = 0
1111
+
1112
+ # Commit remaining operations
1113
+ if batch_count > 0:
1114
+ batch.commit()
1115
+
1116
+ except Exception as e:
1117
+ log_error(f"Exception deleting all cultural knowledge: {e}")
1118
+ raise e
1119
+
1120
+ def delete_cultural_knowledge(self, id: str) -> None:
1121
+ """Delete cultural knowledge by ID.
1122
+
1123
+ Args:
1124
+ id (str): The ID of the cultural knowledge to delete.
1125
+
1126
+ Raises:
1127
+ Exception: If an error occurs during deletion.
1128
+ """
1129
+ try:
1130
+ collection_ref = self._get_collection(table_type="culture")
1131
+ docs = collection_ref.where(filter=FieldFilter("id", "==", id)).stream()
1132
+
1133
+ for doc in docs:
1134
+ doc.reference.delete()
1135
+ log_debug(f"Deleted cultural knowledge with ID: {id}")
1136
+
1137
+ except Exception as e:
1138
+ log_error(f"Error deleting cultural knowledge: {e}")
1139
+ raise e
1140
+
1141
+ def get_cultural_knowledge(
1142
+ self, id: str, deserialize: Optional[bool] = True
1143
+ ) -> Optional[Union[CulturalKnowledge, Dict[str, Any]]]:
1144
+ """Get cultural knowledge by ID.
1145
+
1146
+ Args:
1147
+ id (str): The ID of the cultural knowledge to retrieve.
1148
+ deserialize (Optional[bool]): Whether to deserialize to CulturalKnowledge object. Defaults to True.
1149
+
1150
+ Returns:
1151
+ Optional[Union[CulturalKnowledge, Dict[str, Any]]]: The cultural knowledge if found, None otherwise.
1152
+
1153
+ Raises:
1154
+ Exception: If an error occurs during retrieval.
1155
+ """
1156
+ try:
1157
+ collection_ref = self._get_collection(table_type="culture")
1158
+ docs = collection_ref.where(filter=FieldFilter("id", "==", id)).limit(1).stream()
1159
+
1160
+ for doc in docs:
1161
+ result = doc.to_dict()
1162
+ if not deserialize:
1163
+ return result
1164
+ return deserialize_cultural_knowledge_from_db(result)
1165
+
1166
+ return None
1167
+
1168
+ except Exception as e:
1169
+ log_error(f"Error getting cultural knowledge: {e}")
1170
+ raise e
1171
+
1172
+ def get_all_cultural_knowledge(
1173
+ self,
1174
+ agent_id: Optional[str] = None,
1175
+ team_id: Optional[str] = None,
1176
+ name: Optional[str] = None,
1177
+ limit: Optional[int] = None,
1178
+ page: Optional[int] = None,
1179
+ sort_by: Optional[str] = None,
1180
+ sort_order: Optional[str] = None,
1181
+ deserialize: Optional[bool] = True,
1182
+ ) -> Union[List[CulturalKnowledge], Tuple[List[Dict[str, Any]], int]]:
1183
+ """Get all cultural knowledge with filtering and pagination.
1184
+
1185
+ Args:
1186
+ agent_id (Optional[str]): Filter by agent ID.
1187
+ team_id (Optional[str]): Filter by team ID.
1188
+ name (Optional[str]): Filter by name (case-insensitive partial match).
1189
+ limit (Optional[int]): Maximum number of results to return.
1190
+ page (Optional[int]): Page number for pagination.
1191
+ sort_by (Optional[str]): Field to sort by.
1192
+ sort_order (Optional[str]): Sort order ('asc' or 'desc').
1193
+ deserialize (Optional[bool]): Whether to deserialize to CulturalKnowledge objects. Defaults to True.
1194
+
1195
+ Returns:
1196
+ Union[List[CulturalKnowledge], Tuple[List[Dict[str, Any]], int]]:
1197
+ - When deserialize=True: List of CulturalKnowledge objects
1198
+ - When deserialize=False: Tuple with list of dictionaries and total count
1199
+
1200
+ Raises:
1201
+ Exception: If an error occurs during retrieval.
1202
+ """
1203
+ try:
1204
+ collection_ref = self._get_collection(table_type="culture")
1205
+
1206
+ # Build query with filters
1207
+ query = collection_ref
1208
+ if agent_id is not None:
1209
+ query = query.where(filter=FieldFilter("agent_id", "==", agent_id))
1210
+ if team_id is not None:
1211
+ query = query.where(filter=FieldFilter("team_id", "==", team_id))
1212
+
1213
+ # Get all matching documents
1214
+ docs = query.stream()
1215
+ results = [doc.to_dict() for doc in docs]
1216
+
1217
+ # Apply name filter (Firestore doesn't support regex in queries)
1218
+ if name is not None:
1219
+ results = [r for r in results if name.lower() in r.get("name", "").lower()]
1220
+
1221
+ total_count = len(results)
1222
+
1223
+ # Apply sorting and pagination to in-memory results
1224
+ sorted_results = apply_sorting_to_records(records=results, sort_by=sort_by, sort_order=sort_order)
1225
+ paginated_results = apply_pagination_to_records(records=sorted_results, limit=limit, page=page)
1226
+
1227
+ if not deserialize:
1228
+ return paginated_results, total_count
1229
+
1230
+ return [deserialize_cultural_knowledge_from_db(item) for item in paginated_results]
1231
+
1232
+ except Exception as e:
1233
+ log_error(f"Error getting all cultural knowledge: {e}")
1234
+ raise e
1235
+
1236
+ def upsert_cultural_knowledge(
1237
+ self, cultural_knowledge: CulturalKnowledge, deserialize: Optional[bool] = True
1238
+ ) -> Optional[Union[CulturalKnowledge, Dict[str, Any]]]:
1239
+ """Upsert cultural knowledge in Firestore.
1240
+
1241
+ Args:
1242
+ cultural_knowledge (CulturalKnowledge): The cultural knowledge to upsert.
1243
+ deserialize (Optional[bool]): Whether to deserialize the result. Defaults to True.
1244
+
1245
+ Returns:
1246
+ Optional[Union[CulturalKnowledge, Dict[str, Any]]]: The upserted cultural knowledge.
1247
+
1248
+ Raises:
1249
+ Exception: If an error occurs during upsert.
1250
+ """
1251
+ try:
1252
+ collection_ref = self._get_collection(table_type="culture", create_collection_if_not_found=True)
1253
+
1254
+ # Serialize content, categories, and notes into a dict for DB storage
1255
+ content_dict = serialize_cultural_knowledge_for_db(cultural_knowledge)
1256
+
1257
+ # Create the update document with serialized content
1258
+ update_doc = {
1259
+ "id": cultural_knowledge.id,
1260
+ "name": cultural_knowledge.name,
1261
+ "summary": cultural_knowledge.summary,
1262
+ "content": content_dict if content_dict else None,
1263
+ "metadata": cultural_knowledge.metadata,
1264
+ "input": cultural_knowledge.input,
1265
+ "created_at": cultural_knowledge.created_at,
1266
+ "updated_at": int(time.time()),
1267
+ "agent_id": cultural_knowledge.agent_id,
1268
+ "team_id": cultural_knowledge.team_id,
1269
+ }
1270
+
1271
+ # Find and update or create new document
1272
+ docs = collection_ref.where(filter=FieldFilter("id", "==", cultural_knowledge.id)).limit(1).stream()
1273
+
1274
+ doc_found = False
1275
+ for doc in docs:
1276
+ doc.reference.set(update_doc)
1277
+ doc_found = True
1278
+ break
1279
+
1280
+ if not doc_found:
1281
+ collection_ref.add(update_doc)
1282
+
1283
+ if not deserialize:
1284
+ return update_doc
1285
+
1286
+ return deserialize_cultural_knowledge_from_db(update_doc)
1287
+
1288
+ except Exception as e:
1289
+ log_error(f"Error upserting cultural knowledge: {e}")
1290
+ raise e
1291
+
1292
+ # -- Metrics methods --
1293
+
1294
+ def _get_all_sessions_for_metrics_calculation(
1295
+ self, start_timestamp: Optional[int] = None, end_timestamp: Optional[int] = None
1296
+ ) -> List[Dict[str, Any]]:
1297
+ """Get all sessions of all types for metrics calculation."""
1298
+ try:
1299
+ collection_ref = self._get_collection(table_type="sessions")
1300
+
1301
+ query = collection_ref
1302
+ if start_timestamp is not None:
1303
+ query = query.where(filter=FieldFilter("created_at", ">=", start_timestamp))
1304
+ if end_timestamp is not None:
1305
+ query = query.where(filter=FieldFilter("created_at", "<=", end_timestamp))
1306
+
1307
+ docs = query.stream()
1308
+ results = []
1309
+ for doc in docs:
1310
+ data = doc.to_dict()
1311
+ # Only include required fields for metrics
1312
+ result = {
1313
+ "user_id": data.get("user_id"),
1314
+ "session_data": data.get("session_data"),
1315
+ "runs": data.get("runs"),
1316
+ "created_at": data.get("created_at"),
1317
+ "session_type": data.get("session_type"),
1318
+ }
1319
+ results.append(result)
1320
+
1321
+ return results
1322
+
1323
+ except Exception as e:
1324
+ log_error(f"Exception getting all sessions for metrics calculation: {e}")
1325
+ raise e
1326
+
1327
+ def _get_metrics_calculation_starting_date(self, collection_ref) -> Optional[date]:
1328
+ """Get the first date for which metrics calculation is needed."""
1329
+ try:
1330
+ query = collection_ref.order_by("date", direction="DESCENDING").limit(1)
1331
+ docs = query.stream()
1332
+
1333
+ for doc in docs:
1334
+ data = doc.to_dict()
1335
+ result_date = datetime.strptime(data["date"], "%Y-%m-%d").date()
1336
+ if data.get("completed"):
1337
+ return result_date + timedelta(days=1)
1338
+ else:
1339
+ return result_date
1340
+
1341
+ # No metrics records. Return the date of the first recorded session.
1342
+ first_session_result = self.get_sessions(sort_by="created_at", sort_order="asc", limit=1, deserialize=False)
1343
+ first_session_date = None
1344
+
1345
+ if isinstance(first_session_result, list) and len(first_session_result) > 0:
1346
+ first_session_date = first_session_result[0].created_at # type: ignore
1347
+ elif isinstance(first_session_result, tuple) and len(first_session_result[0]) > 0:
1348
+ first_session_date = first_session_result[0][0].get("created_at")
1349
+
1350
+ if first_session_date is None:
1351
+ return None
1352
+
1353
+ return datetime.fromtimestamp(first_session_date, tz=timezone.utc).date()
1354
+
1355
+ except Exception as e:
1356
+ log_error(f"Exception getting metrics calculation starting date: {e}")
1357
+ raise e
1358
+
1359
+ def calculate_metrics(self) -> Optional[list[dict]]:
1360
+ """Calculate metrics for all dates without complete metrics."""
1361
+ try:
1362
+ collection_ref = self._get_collection(table_type="metrics", create_collection_if_not_found=True)
1363
+
1364
+ starting_date = self._get_metrics_calculation_starting_date(collection_ref)
1365
+ if starting_date is None:
1366
+ log_info("No session data found. Won't calculate metrics.")
1367
+ return None
1368
+
1369
+ dates_to_process = get_dates_to_calculate_metrics_for(starting_date)
1370
+ if not dates_to_process:
1371
+ log_info("Metrics already calculated for all relevant dates.")
1372
+ return None
1373
+
1374
+ start_timestamp = int(datetime.combine(dates_to_process[0], datetime.min.time()).timestamp())
1375
+ end_timestamp = int(
1376
+ datetime.combine(dates_to_process[-1] + timedelta(days=1), datetime.min.time()).timestamp()
1377
+ )
1378
+
1379
+ sessions = self._get_all_sessions_for_metrics_calculation(
1380
+ start_timestamp=start_timestamp, end_timestamp=end_timestamp
1381
+ )
1382
+ all_sessions_data = fetch_all_sessions_data(
1383
+ sessions=sessions, dates_to_process=dates_to_process, start_timestamp=start_timestamp
1384
+ )
1385
+ if not all_sessions_data:
1386
+ log_info("No new session data found. Won't calculate metrics.")
1387
+ return None
1388
+
1389
+ results = []
1390
+ metrics_records = []
1391
+
1392
+ for date_to_process in dates_to_process:
1393
+ date_key = date_to_process.isoformat()
1394
+ sessions_for_date = all_sessions_data.get(date_key, {})
1395
+
1396
+ # Skip dates with no sessions
1397
+ if not any(len(sessions) > 0 for sessions in sessions_for_date.values()):
1398
+ continue
1399
+
1400
+ metrics_record = calculate_date_metrics(date_to_process, sessions_for_date)
1401
+ metrics_records.append(metrics_record)
1402
+
1403
+ if metrics_records:
1404
+ results = bulk_upsert_metrics(collection_ref, metrics_records)
1405
+
1406
+ log_debug("Updated metrics calculations")
1407
+
1408
+ return results
1409
+
1410
+ except Exception as e:
1411
+ log_error(f"Exception calculating metrics: {e}")
1412
+ raise e
1413
+
1414
+ def get_metrics(
1415
+ self,
1416
+ starting_date: Optional[date] = None,
1417
+ ending_date: Optional[date] = None,
1418
+ ) -> Tuple[List[dict], Optional[int]]:
1419
+ """Get all metrics matching the given date range."""
1420
+ try:
1421
+ collection_ref = self._get_collection(table_type="metrics")
1422
+ if collection_ref is None:
1423
+ return [], None
1424
+
1425
+ query = collection_ref
1426
+ if starting_date:
1427
+ query = query.where(filter=FieldFilter("date", ">=", starting_date.isoformat()))
1428
+ if ending_date:
1429
+ query = query.where(filter=FieldFilter("date", "<=", ending_date.isoformat()))
1430
+
1431
+ docs = query.stream()
1432
+ records = []
1433
+ latest_updated_at = 0
1434
+
1435
+ for doc in docs:
1436
+ data = doc.to_dict()
1437
+ records.append(data)
1438
+ updated_at = data.get("updated_at", 0)
1439
+ if updated_at > latest_updated_at:
1440
+ latest_updated_at = updated_at
1441
+
1442
+ if not records:
1443
+ return [], None
1444
+
1445
+ return records, latest_updated_at
1446
+
1447
+ except Exception as e:
1448
+ log_error(f"Exception getting metrics: {e}")
1449
+ raise e
1450
+
1451
+ # -- Knowledge methods --
1452
+
1453
+ def delete_knowledge_content(self, id: str):
1454
+ """Delete a knowledge row from the database.
1455
+
1456
+ Args:
1457
+ id (str): The ID of the knowledge row to delete.
1458
+
1459
+ Raises:
1460
+ Exception: If an error occurs during deletion.
1461
+ """
1462
+ try:
1463
+ collection_ref = self._get_collection(table_type="knowledge")
1464
+ docs = collection_ref.where(filter=FieldFilter("id", "==", id)).stream()
1465
+
1466
+ for doc in docs:
1467
+ doc.reference.delete()
1468
+
1469
+ except Exception as e:
1470
+ log_error(f"Error deleting knowledge content: {e}")
1471
+ raise e
1472
+
1473
+ def get_knowledge_content(self, id: str) -> Optional[KnowledgeRow]:
1474
+ """Get a knowledge row from the database.
1475
+
1476
+ Args:
1477
+ id (str): The ID of the knowledge row to get.
1478
+
1479
+ Returns:
1480
+ Optional[KnowledgeRow]: The knowledge row, or None if it doesn't exist.
1481
+
1482
+ Raises:
1483
+ Exception: If an error occurs during retrieval.
1484
+ """
1485
+ try:
1486
+ collection_ref = self._get_collection(table_type="knowledge")
1487
+ docs = collection_ref.where(filter=FieldFilter("id", "==", id)).stream()
1488
+
1489
+ for doc in docs:
1490
+ data = doc.to_dict()
1491
+ return KnowledgeRow.model_validate(data)
1492
+
1493
+ return None
1494
+
1495
+ except Exception as e:
1496
+ log_error(f"Error getting knowledge content: {e}")
1497
+ raise e
1498
+
1499
+ def get_knowledge_contents(
1500
+ self,
1501
+ limit: Optional[int] = None,
1502
+ page: Optional[int] = None,
1503
+ sort_by: Optional[str] = None,
1504
+ sort_order: Optional[str] = None,
1505
+ ) -> Tuple[List[KnowledgeRow], int]:
1506
+ """Get all knowledge contents from the database.
1507
+
1508
+ Args:
1509
+ limit (Optional[int]): The maximum number of knowledge contents to return.
1510
+ page (Optional[int]): The page number.
1511
+ sort_by (Optional[str]): The column to sort by.
1512
+ sort_order (Optional[str]): The order to sort by.
1513
+ create_table_if_not_found (Optional[bool]): Whether to create the table if it doesn't exist.
1514
+
1515
+ Returns:
1516
+ Tuple[List[KnowledgeRow], int]: The knowledge contents and total count.
1517
+
1518
+ Raises:
1519
+ Exception: If an error occurs during retrieval.
1520
+ """
1521
+ try:
1522
+ collection_ref = self._get_collection(table_type="knowledge")
1523
+ if collection_ref is None:
1524
+ return [], 0
1525
+
1526
+ query = collection_ref
1527
+
1528
+ # Apply sorting
1529
+ query = apply_sorting(query, sort_by, sort_order)
1530
+
1531
+ # Apply pagination
1532
+ query = apply_pagination(query, limit, page)
1533
+
1534
+ docs = query.stream()
1535
+ records = []
1536
+ for doc in docs:
1537
+ records.append(doc.to_dict())
1538
+
1539
+ knowledge_rows = [KnowledgeRow.model_validate(record) for record in records]
1540
+ total_count = len(knowledge_rows) # Simplified count
1541
+
1542
+ return knowledge_rows, total_count
1543
+
1544
+ except Exception as e:
1545
+ log_error(f"Error getting knowledge contents: {e}")
1546
+ raise e
1547
+
1548
+ def upsert_knowledge_content(self, knowledge_row: KnowledgeRow):
1549
+ """Upsert knowledge content in the database.
1550
+
1551
+ Args:
1552
+ knowledge_row (KnowledgeRow): The knowledge row to upsert.
1553
+
1554
+ Returns:
1555
+ Optional[KnowledgeRow]: The upserted knowledge row, or None if the operation fails.
1556
+ """
1557
+ try:
1558
+ collection_ref = self._get_collection(table_type="knowledge", create_collection_if_not_found=True)
1559
+ if collection_ref is None:
1560
+ return None
1561
+
1562
+ update_doc = knowledge_row.model_dump()
1563
+
1564
+ # Find existing document or create new one
1565
+ docs = collection_ref.where(filter=FieldFilter("id", "==", knowledge_row.id)).stream()
1566
+ doc_ref = next((doc.reference for doc in docs), None)
1567
+
1568
+ if doc_ref is None:
1569
+ doc_ref = collection_ref.document()
1570
+
1571
+ doc_ref.set(update_doc, merge=True)
1572
+
1573
+ return knowledge_row
1574
+
1575
+ except Exception as e:
1576
+ log_error(f"Error upserting knowledge content: {e}")
1577
+ raise e
1578
+
1579
+ # -- Eval methods --
1580
+
1581
+ def create_eval_run(self, eval_run: EvalRunRecord) -> Optional[EvalRunRecord]:
1582
+ """Create an EvalRunRecord in the database."""
1583
+ try:
1584
+ collection_ref = self._get_collection(table_type="evals", create_collection_if_not_found=True)
1585
+
1586
+ current_time = int(time.time())
1587
+ eval_dict = eval_run.model_dump()
1588
+ eval_dict["created_at"] = current_time
1589
+ eval_dict["updated_at"] = current_time
1590
+
1591
+ doc_ref = collection_ref.document()
1592
+ doc_ref.set(eval_dict)
1593
+
1594
+ log_debug(f"Created eval run with id '{eval_run.run_id}'")
1595
+
1596
+ return eval_run
1597
+
1598
+ except Exception as e:
1599
+ log_error(f"Error creating eval run: {e}")
1600
+ raise e
1601
+
1602
+ def delete_eval_run(self, eval_run_id: str) -> None:
1603
+ """Delete an eval run from the database."""
1604
+ try:
1605
+ collection_ref = self._get_collection(table_type="evals")
1606
+ docs = collection_ref.where(filter=FieldFilter("run_id", "==", eval_run_id)).stream()
1607
+
1608
+ deleted_count = 0
1609
+ for doc in docs:
1610
+ doc.reference.delete()
1611
+ deleted_count += 1
1612
+
1613
+ if deleted_count == 0:
1614
+ log_info(f"No eval run found with ID: {eval_run_id}")
1615
+ else:
1616
+ log_info(f"Deleted eval run with ID: {eval_run_id}")
1617
+
1618
+ except Exception as e:
1619
+ log_error(f"Error deleting eval run {eval_run_id}: {e}")
1620
+ raise e
1621
+
1622
+ def delete_eval_runs(self, eval_run_ids: List[str]) -> None:
1623
+ """Delete multiple eval runs from the database.
1624
+
1625
+ Args:
1626
+ eval_run_ids (List[str]): The IDs of the eval runs to delete.
1627
+
1628
+ Raises:
1629
+ Exception: If there is an error deleting the eval runs.
1630
+ """
1631
+ try:
1632
+ collection_ref = self._get_collection(table_type="evals")
1633
+ batch = self.db_client.batch()
1634
+ deleted_count = 0
1635
+
1636
+ for eval_run_id in eval_run_ids:
1637
+ docs = collection_ref.where(filter=FieldFilter("run_id", "==", eval_run_id)).stream()
1638
+ for doc in docs:
1639
+ batch.delete(doc.reference)
1640
+ deleted_count += 1
1641
+
1642
+ batch.commit()
1643
+
1644
+ if deleted_count == 0:
1645
+ log_info(f"No eval runs found with IDs: {eval_run_ids}")
1646
+ else:
1647
+ log_info(f"Deleted {deleted_count} eval runs")
1648
+
1649
+ except Exception as e:
1650
+ log_error(f"Error deleting eval runs {eval_run_ids}: {e}")
1651
+ raise e
1652
+
1653
+ def get_eval_run(
1654
+ self, eval_run_id: str, deserialize: Optional[bool] = True
1655
+ ) -> Optional[Union[EvalRunRecord, Dict[str, Any]]]:
1656
+ """Get an eval run from the database.
1657
+
1658
+ Args:
1659
+ eval_run_id (str): The ID of the eval run to get.
1660
+ deserialize (Optional[bool]): Whether to serialize the eval run. Defaults to True.
1661
+
1662
+ Returns:
1663
+ Optional[Union[EvalRunRecord, Dict[str, Any]]]:
1664
+ - When deserialize=True: EvalRunRecord object
1665
+ - When deserialize=False: EvalRun dictionary
1666
+
1667
+ Raises:
1668
+ Exception: If there is an error getting the eval run.
1669
+ """
1670
+ try:
1671
+ collection_ref = self._get_collection(table_type="evals")
1672
+ if not collection_ref:
1673
+ return None
1674
+
1675
+ docs = collection_ref.where(filter=FieldFilter("run_id", "==", eval_run_id)).stream()
1676
+
1677
+ eval_run_raw = None
1678
+ for doc in docs:
1679
+ eval_run_raw = doc.to_dict()
1680
+ break
1681
+
1682
+ if not eval_run_raw:
1683
+ return None
1684
+
1685
+ if not deserialize:
1686
+ return eval_run_raw
1687
+
1688
+ return EvalRunRecord.model_validate(eval_run_raw)
1689
+
1690
+ except Exception as e:
1691
+ log_error(f"Exception getting eval run {eval_run_id}: {e}")
1692
+ raise e
1693
+
1694
+ def get_eval_runs(
1695
+ self,
1696
+ limit: Optional[int] = None,
1697
+ page: Optional[int] = None,
1698
+ sort_by: Optional[str] = None,
1699
+ sort_order: Optional[str] = None,
1700
+ agent_id: Optional[str] = None,
1701
+ team_id: Optional[str] = None,
1702
+ workflow_id: Optional[str] = None,
1703
+ model_id: Optional[str] = None,
1704
+ filter_type: Optional[EvalFilterType] = None,
1705
+ eval_type: Optional[List[EvalType]] = None,
1706
+ deserialize: Optional[bool] = True,
1707
+ ) -> Union[List[EvalRunRecord], Tuple[List[Dict[str, Any]], int]]:
1708
+ """Get all eval runs from the database.
1709
+
1710
+ Args:
1711
+ limit (Optional[int]): The maximum number of eval runs to return.
1712
+ page (Optional[int]): The page number to return.
1713
+ sort_by (Optional[str]): The field to sort by.
1714
+ sort_order (Optional[str]): The order to sort by.
1715
+ agent_id (Optional[str]): The ID of the agent to filter by.
1716
+ team_id (Optional[str]): The ID of the team to filter by.
1717
+ workflow_id (Optional[str]): The ID of the workflow to filter by.
1718
+ model_id (Optional[str]): The ID of the model to filter by.
1719
+ eval_type (Optional[List[EvalType]]): The type of eval to filter by.
1720
+ filter_type (Optional[EvalFilterType]): The type of filter to apply.
1721
+ deserialize (Optional[bool]): Whether to serialize the eval runs. Defaults to True.
1722
+ create_table_if_not_found (Optional[bool]): Whether to create the table if it doesn't exist.
1723
+
1724
+ Returns:
1725
+ Union[List[EvalRunRecord], Tuple[List[Dict[str, Any]], int]]:
1726
+ - When deserialize=True: List of EvalRunRecord objects
1727
+ - When deserialize=False: List of eval run dictionaries and the total count
1728
+
1729
+ Raises:
1730
+ Exception: If there is an error getting the eval runs.
1731
+ """
1732
+ try:
1733
+ collection_ref = self._get_collection(table_type="evals")
1734
+ if collection_ref is None:
1735
+ return [] if deserialize else ([], 0)
1736
+
1737
+ query = collection_ref
1738
+
1739
+ if agent_id is not None:
1740
+ query = query.where(filter=FieldFilter("agent_id", "==", agent_id))
1741
+ if team_id is not None:
1742
+ query = query.where(filter=FieldFilter("team_id", "==", team_id))
1743
+ if workflow_id is not None:
1744
+ query = query.where(filter=FieldFilter("workflow_id", "==", workflow_id))
1745
+ if model_id is not None:
1746
+ query = query.where(filter=FieldFilter("model_id", "==", model_id))
1747
+ if eval_type is not None and len(eval_type) > 0:
1748
+ eval_values = [et.value for et in eval_type]
1749
+ query = query.where(filter=FieldFilter("eval_type", "in", eval_values))
1750
+ if filter_type is not None:
1751
+ if filter_type == EvalFilterType.AGENT:
1752
+ query = query.where(filter=FieldFilter("agent_id", "!=", None))
1753
+ elif filter_type == EvalFilterType.TEAM:
1754
+ query = query.where(filter=FieldFilter("team_id", "!=", None))
1755
+ elif filter_type == EvalFilterType.WORKFLOW:
1756
+ query = query.where(filter=FieldFilter("workflow_id", "!=", None))
1757
+
1758
+ # Apply default sorting by created_at desc if no sort parameters provided
1759
+ if sort_by is None:
1760
+ from google.cloud.firestore import Query
1761
+
1762
+ query = query.order_by("created_at", direction=Query.DESCENDING)
1763
+ else:
1764
+ query = apply_sorting(query, sort_by, sort_order)
1765
+
1766
+ # Get all documents for counting before pagination
1767
+ all_docs = query.stream()
1768
+ all_records = [doc.to_dict() for doc in all_docs]
1769
+
1770
+ if not all_records:
1771
+ return [] if deserialize else ([], 0)
1772
+
1773
+ # Get total count before pagination
1774
+ total_count = len(all_records)
1775
+
1776
+ # Apply pagination to the results
1777
+ if limit is not None and page is not None:
1778
+ start_index = (page - 1) * limit
1779
+ end_index = start_index + limit
1780
+ records = all_records[start_index:end_index]
1781
+ elif limit is not None:
1782
+ records = all_records[:limit]
1783
+ else:
1784
+ records = all_records
1785
+
1786
+ if not deserialize:
1787
+ return records, total_count
1788
+
1789
+ return [EvalRunRecord.model_validate(row) for row in records]
1790
+
1791
+ except Exception as e:
1792
+ log_error(f"Exception getting eval runs: {e}")
1793
+ raise e
1794
+
1795
+ def rename_eval_run(
1796
+ self, eval_run_id: str, name: str, deserialize: Optional[bool] = True
1797
+ ) -> Optional[Union[EvalRunRecord, Dict[str, Any]]]:
1798
+ """Update the name of an eval run in the database.
1799
+
1800
+ Args:
1801
+ eval_run_id (str): The ID of the eval run to update.
1802
+ name (str): The new name of the eval run.
1803
+ deserialize (Optional[bool]): Whether to serialize the eval run. Defaults to True.
1804
+
1805
+ Returns:
1806
+ Optional[Union[EvalRunRecord, Dict[str, Any]]]:
1807
+ - When deserialize=True: EvalRunRecord object
1808
+ - When deserialize=False: EvalRun dictionary
1809
+
1810
+ Raises:
1811
+ Exception: If there is an error updating the eval run.
1812
+ """
1813
+ try:
1814
+ collection_ref = self._get_collection(table_type="evals")
1815
+ if not collection_ref:
1816
+ return None
1817
+
1818
+ docs = collection_ref.where(filter=FieldFilter("run_id", "==", eval_run_id)).stream()
1819
+ doc_ref = next((doc.reference for doc in docs), None)
1820
+
1821
+ if doc_ref is None:
1822
+ return None
1823
+
1824
+ doc_ref.update({"name": name, "updated_at": int(time.time())})
1825
+
1826
+ updated_doc = doc_ref.get()
1827
+ if not updated_doc.exists:
1828
+ return None
1829
+
1830
+ result = updated_doc.to_dict()
1831
+
1832
+ log_debug(f"Renamed eval run with id '{eval_run_id}' to '{name}'")
1833
+
1834
+ if not result or not deserialize:
1835
+ return result
1836
+
1837
+ return EvalRunRecord.model_validate(result)
1838
+
1839
+ except Exception as e:
1840
+ log_error(f"Error updating eval run name {eval_run_id}: {e}")
1841
+ raise e
1842
+
1843
+ # --- Traces ---
1844
+ def upsert_trace(self, trace: "Trace") -> None:
1845
+ """Create or update a single trace record in the database.
1846
+
1847
+ Args:
1848
+ trace: The Trace object to store (one per trace_id).
1849
+ """
1850
+ try:
1851
+ collection_ref = self._get_collection(table_type="traces", create_collection_if_not_found=True)
1852
+ if collection_ref is None:
1853
+ return
1854
+
1855
+ # Check if trace already exists
1856
+ docs = collection_ref.where(filter=FieldFilter("trace_id", "==", trace.trace_id)).limit(1).stream()
1857
+ existing_doc = None
1858
+ existing_data = None
1859
+ for doc in docs:
1860
+ existing_doc = doc
1861
+ existing_data = doc.to_dict()
1862
+ break
1863
+
1864
+ if existing_data and existing_doc is not None:
1865
+ # Update existing trace
1866
+ def get_component_level(workflow_id, team_id, agent_id, name):
1867
+ is_root_name = ".run" in name or ".arun" in name
1868
+ if not is_root_name:
1869
+ return 0
1870
+ elif workflow_id:
1871
+ return 3
1872
+ elif team_id:
1873
+ return 2
1874
+ elif agent_id:
1875
+ return 1
1876
+ else:
1877
+ return 0
1878
+
1879
+ existing_level = get_component_level(
1880
+ existing_data.get("workflow_id"),
1881
+ existing_data.get("team_id"),
1882
+ existing_data.get("agent_id"),
1883
+ existing_data.get("name", ""),
1884
+ )
1885
+ new_level = get_component_level(trace.workflow_id, trace.team_id, trace.agent_id, trace.name)
1886
+ should_update_name = new_level > existing_level
1887
+
1888
+ # Parse existing start_time to calculate correct duration
1889
+ existing_start_time_str = existing_data.get("start_time")
1890
+ if isinstance(existing_start_time_str, str):
1891
+ existing_start_time = datetime.fromisoformat(existing_start_time_str.replace("Z", "+00:00"))
1892
+ else:
1893
+ existing_start_time = trace.start_time
1894
+
1895
+ recalculated_duration_ms = int((trace.end_time - existing_start_time).total_seconds() * 1000)
1896
+
1897
+ update_values: Dict[str, Any] = {
1898
+ "end_time": trace.end_time.isoformat(),
1899
+ "duration_ms": recalculated_duration_ms,
1900
+ "status": trace.status,
1901
+ }
1902
+
1903
+ if should_update_name:
1904
+ update_values["name"] = trace.name
1905
+
1906
+ # Update context fields only if new value is not None
1907
+ if trace.run_id is not None:
1908
+ update_values["run_id"] = trace.run_id
1909
+ if trace.session_id is not None:
1910
+ update_values["session_id"] = trace.session_id
1911
+ if trace.user_id is not None:
1912
+ update_values["user_id"] = trace.user_id
1913
+ if trace.agent_id is not None:
1914
+ update_values["agent_id"] = trace.agent_id
1915
+ if trace.team_id is not None:
1916
+ update_values["team_id"] = trace.team_id
1917
+ if trace.workflow_id is not None:
1918
+ update_values["workflow_id"] = trace.workflow_id
1919
+
1920
+ existing_doc.reference.update(update_values)
1921
+ else:
1922
+ # Create new trace with initialized counters
1923
+ trace_dict = trace.to_dict()
1924
+ trace_dict["total_spans"] = 0
1925
+ trace_dict["error_count"] = 0
1926
+ collection_ref.add(trace_dict)
1927
+
1928
+ except Exception as e:
1929
+ log_error(f"Error creating trace: {e}")
1930
+
1931
+ def get_trace(
1932
+ self,
1933
+ trace_id: Optional[str] = None,
1934
+ run_id: Optional[str] = None,
1935
+ ):
1936
+ """Get a single trace by trace_id or other filters.
1937
+
1938
+ Args:
1939
+ trace_id: The unique trace identifier.
1940
+ run_id: Filter by run ID (returns first match).
1941
+
1942
+ Returns:
1943
+ Optional[Trace]: The trace if found, None otherwise.
1944
+
1945
+ Note:
1946
+ If multiple filters are provided, trace_id takes precedence.
1947
+ For other filters, the most recent trace is returned.
1948
+ """
1949
+ try:
1950
+ from agno.tracing.schemas import Trace
1951
+
1952
+ collection_ref = self._get_collection(table_type="traces")
1953
+ if collection_ref is None:
1954
+ return None
1955
+
1956
+ if trace_id:
1957
+ docs = collection_ref.where(filter=FieldFilter("trace_id", "==", trace_id)).limit(1).stream()
1958
+ elif run_id:
1959
+ from google.cloud.firestore import Query
1960
+
1961
+ docs = (
1962
+ collection_ref.where(filter=FieldFilter("run_id", "==", run_id))
1963
+ .order_by("start_time", direction=Query.DESCENDING)
1964
+ .limit(1)
1965
+ .stream()
1966
+ )
1967
+ else:
1968
+ log_debug("get_trace called without any filter parameters")
1969
+ return None
1970
+
1971
+ for doc in docs:
1972
+ trace_data = doc.to_dict()
1973
+ # Use stored values (default to 0 if not present)
1974
+ trace_data.setdefault("total_spans", 0)
1975
+ trace_data.setdefault("error_count", 0)
1976
+ return Trace.from_dict(trace_data)
1977
+
1978
+ return None
1979
+
1980
+ except Exception as e:
1981
+ log_error(f"Error getting trace: {e}")
1982
+ return None
1983
+
1984
+ def get_traces(
1985
+ self,
1986
+ run_id: Optional[str] = None,
1987
+ session_id: Optional[str] = None,
1988
+ user_id: Optional[str] = None,
1989
+ agent_id: Optional[str] = None,
1990
+ team_id: Optional[str] = None,
1991
+ workflow_id: Optional[str] = None,
1992
+ status: Optional[str] = None,
1993
+ start_time: Optional[datetime] = None,
1994
+ end_time: Optional[datetime] = None,
1995
+ limit: Optional[int] = 20,
1996
+ page: Optional[int] = 1,
1997
+ ) -> tuple[List, int]:
1998
+ """Get traces matching the provided filters.
1999
+
2000
+ Args:
2001
+ run_id: Filter by run ID.
2002
+ session_id: Filter by session ID.
2003
+ user_id: Filter by user ID.
2004
+ agent_id: Filter by agent ID.
2005
+ team_id: Filter by team ID.
2006
+ workflow_id: Filter by workflow ID.
2007
+ status: Filter by status (OK, ERROR, UNSET).
2008
+ start_time: Filter traces starting after this datetime.
2009
+ end_time: Filter traces ending before this datetime.
2010
+ limit: Maximum number of traces to return per page.
2011
+ page: Page number (1-indexed).
2012
+
2013
+ Returns:
2014
+ tuple[List[Trace], int]: Tuple of (list of matching traces, total count).
2015
+ """
2016
+ try:
2017
+ from agno.tracing.schemas import Trace
2018
+
2019
+ collection_ref = self._get_collection(table_type="traces")
2020
+ if collection_ref is None:
2021
+ return [], 0
2022
+
2023
+ query = collection_ref
2024
+
2025
+ # Apply filters
2026
+ if run_id:
2027
+ query = query.where(filter=FieldFilter("run_id", "==", run_id))
2028
+ if session_id:
2029
+ query = query.where(filter=FieldFilter("session_id", "==", session_id))
2030
+ if user_id:
2031
+ query = query.where(filter=FieldFilter("user_id", "==", user_id))
2032
+ if agent_id:
2033
+ query = query.where(filter=FieldFilter("agent_id", "==", agent_id))
2034
+ if team_id:
2035
+ query = query.where(filter=FieldFilter("team_id", "==", team_id))
2036
+ if workflow_id:
2037
+ query = query.where(filter=FieldFilter("workflow_id", "==", workflow_id))
2038
+ if status:
2039
+ query = query.where(filter=FieldFilter("status", "==", status))
2040
+ if start_time:
2041
+ query = query.where(filter=FieldFilter("start_time", ">=", start_time.isoformat()))
2042
+ if end_time:
2043
+ query = query.where(filter=FieldFilter("end_time", "<=", end_time.isoformat()))
2044
+
2045
+ # Get all matching documents
2046
+ docs = query.stream()
2047
+ all_records = [doc.to_dict() for doc in docs]
2048
+
2049
+ # Sort by start_time descending
2050
+ all_records.sort(key=lambda x: x.get("start_time", ""), reverse=True)
2051
+
2052
+ # Get total count
2053
+ total_count = len(all_records)
2054
+
2055
+ # Apply pagination
2056
+ if limit and page:
2057
+ offset = (page - 1) * limit
2058
+ paginated_records = all_records[offset : offset + limit]
2059
+ elif limit:
2060
+ paginated_records = all_records[:limit]
2061
+ else:
2062
+ paginated_records = all_records
2063
+
2064
+ # Convert to Trace objects with stored span counts
2065
+ traces = []
2066
+ for trace_data in paginated_records:
2067
+ trace_data.setdefault("total_spans", 0)
2068
+ trace_data.setdefault("error_count", 0)
2069
+ traces.append(Trace.from_dict(trace_data))
2070
+
2071
+ return traces, total_count
2072
+
2073
+ except Exception as e:
2074
+ log_error(f"Error getting traces: {e}")
2075
+ return [], 0
2076
+
2077
+ def get_trace_stats(
2078
+ self,
2079
+ user_id: Optional[str] = None,
2080
+ agent_id: Optional[str] = None,
2081
+ team_id: Optional[str] = None,
2082
+ workflow_id: Optional[str] = None,
2083
+ start_time: Optional[datetime] = None,
2084
+ end_time: Optional[datetime] = None,
2085
+ limit: Optional[int] = 20,
2086
+ page: Optional[int] = 1,
2087
+ ) -> tuple[List[Dict[str, Any]], int]:
2088
+ """Get trace statistics grouped by session.
2089
+
2090
+ Args:
2091
+ user_id: Filter by user ID.
2092
+ agent_id: Filter by agent ID.
2093
+ team_id: Filter by team ID.
2094
+ workflow_id: Filter by workflow ID.
2095
+ start_time: Filter sessions with traces created after this datetime.
2096
+ end_time: Filter sessions with traces created before this datetime.
2097
+ limit: Maximum number of sessions to return per page.
2098
+ page: Page number (1-indexed).
2099
+
2100
+ Returns:
2101
+ tuple[List[Dict], int]: Tuple of (list of session stats dicts, total count).
2102
+ Each dict contains: session_id, user_id, agent_id, team_id, workflow_id, total_traces,
2103
+ first_trace_at, last_trace_at.
2104
+ """
2105
+ try:
2106
+ collection_ref = self._get_collection(table_type="traces")
2107
+ if collection_ref is None:
2108
+ return [], 0
2109
+
2110
+ query = collection_ref
2111
+
2112
+ # Apply filters
2113
+ if user_id:
2114
+ query = query.where(filter=FieldFilter("user_id", "==", user_id))
2115
+ if agent_id:
2116
+ query = query.where(filter=FieldFilter("agent_id", "==", agent_id))
2117
+ if team_id:
2118
+ query = query.where(filter=FieldFilter("team_id", "==", team_id))
2119
+ if workflow_id:
2120
+ query = query.where(filter=FieldFilter("workflow_id", "==", workflow_id))
2121
+ if start_time:
2122
+ query = query.where(filter=FieldFilter("created_at", ">=", start_time.isoformat()))
2123
+ if end_time:
2124
+ query = query.where(filter=FieldFilter("created_at", "<=", end_time.isoformat()))
2125
+
2126
+ # Get all matching documents
2127
+ docs = query.stream()
2128
+
2129
+ # Aggregate by session_id
2130
+ session_stats: Dict[str, Dict[str, Any]] = {}
2131
+ for doc in docs:
2132
+ trace_data = doc.to_dict()
2133
+ session_id = trace_data.get("session_id")
2134
+ if not session_id:
2135
+ continue
2136
+
2137
+ if session_id not in session_stats:
2138
+ session_stats[session_id] = {
2139
+ "session_id": session_id,
2140
+ "user_id": trace_data.get("user_id"),
2141
+ "agent_id": trace_data.get("agent_id"),
2142
+ "team_id": trace_data.get("team_id"),
2143
+ "workflow_id": trace_data.get("workflow_id"),
2144
+ "total_traces": 0,
2145
+ "first_trace_at": trace_data.get("created_at"),
2146
+ "last_trace_at": trace_data.get("created_at"),
2147
+ }
2148
+
2149
+ session_stats[session_id]["total_traces"] += 1
2150
+
2151
+ created_at = trace_data.get("created_at")
2152
+ if (
2153
+ created_at
2154
+ and session_stats[session_id]["first_trace_at"]
2155
+ and session_stats[session_id]["last_trace_at"]
2156
+ ):
2157
+ if created_at < session_stats[session_id]["first_trace_at"]:
2158
+ session_stats[session_id]["first_trace_at"] = created_at
2159
+ if created_at > session_stats[session_id]["last_trace_at"]:
2160
+ session_stats[session_id]["last_trace_at"] = created_at
2161
+
2162
+ # Convert to list and sort by last_trace_at descending
2163
+ stats_list = list(session_stats.values())
2164
+ stats_list.sort(key=lambda x: x.get("last_trace_at", ""), reverse=True)
2165
+
2166
+ # Convert datetime strings to datetime objects
2167
+ for stat in stats_list:
2168
+ first_trace_at = stat["first_trace_at"]
2169
+ last_trace_at = stat["last_trace_at"]
2170
+ if isinstance(first_trace_at, str):
2171
+ stat["first_trace_at"] = datetime.fromisoformat(first_trace_at.replace("Z", "+00:00"))
2172
+ if isinstance(last_trace_at, str):
2173
+ stat["last_trace_at"] = datetime.fromisoformat(last_trace_at.replace("Z", "+00:00"))
2174
+
2175
+ # Get total count
2176
+ total_count = len(stats_list)
2177
+
2178
+ # Apply pagination
2179
+ if limit and page:
2180
+ offset = (page - 1) * limit
2181
+ paginated_stats = stats_list[offset : offset + limit]
2182
+ elif limit:
2183
+ paginated_stats = stats_list[:limit]
2184
+ else:
2185
+ paginated_stats = stats_list
2186
+
2187
+ return paginated_stats, total_count
2188
+
2189
+ except Exception as e:
2190
+ log_error(f"Error getting trace stats: {e}")
2191
+ return [], 0
2192
+
2193
+ # --- Spans ---
2194
+ def create_span(self, span: "Span") -> None:
2195
+ """Create a single span in the database.
2196
+
2197
+ Args:
2198
+ span: The Span object to store.
2199
+ """
2200
+ try:
2201
+ collection_ref = self._get_collection(table_type="spans", create_collection_if_not_found=True)
2202
+ if collection_ref is None:
2203
+ return
2204
+
2205
+ span_dict = span.to_dict()
2206
+ # Serialize attributes as JSON string
2207
+ if "attributes" in span_dict and isinstance(span_dict["attributes"], dict):
2208
+ span_dict["attributes"] = json.dumps(span_dict["attributes"])
2209
+
2210
+ collection_ref.add(span_dict)
2211
+
2212
+ # Increment total_spans and error_count on trace
2213
+ traces_collection = self._get_collection(table_type="traces")
2214
+ if traces_collection:
2215
+ try:
2216
+ docs = (
2217
+ traces_collection.where(filter=FieldFilter("trace_id", "==", span.trace_id)).limit(1).stream()
2218
+ )
2219
+ for doc in docs:
2220
+ trace_data = doc.to_dict()
2221
+ current_total = trace_data.get("total_spans", 0)
2222
+ current_errors = trace_data.get("error_count", 0)
2223
+
2224
+ update_values = {"total_spans": current_total + 1}
2225
+ if span.status_code == "ERROR":
2226
+ update_values["error_count"] = current_errors + 1
2227
+
2228
+ doc.reference.update(update_values)
2229
+ break
2230
+ except Exception as update_error:
2231
+ log_debug(f"Could not update trace span counts: {update_error}")
2232
+
2233
+ except Exception as e:
2234
+ log_error(f"Error creating span: {e}")
2235
+
2236
+ def create_spans(self, spans: List) -> None:
2237
+ """Create multiple spans in the database as a batch.
2238
+
2239
+ Args:
2240
+ spans: List of Span objects to store.
2241
+ """
2242
+ if not spans:
2243
+ return
2244
+
2245
+ try:
2246
+ collection_ref = self._get_collection(table_type="spans", create_collection_if_not_found=True)
2247
+ if collection_ref is None:
2248
+ return
2249
+
2250
+ # Firestore batch has a limit of 500 operations
2251
+ batch = self.db_client.batch()
2252
+ batch_count = 0
2253
+
2254
+ for span in spans:
2255
+ span_dict = span.to_dict()
2256
+ # Serialize attributes as JSON string
2257
+ if "attributes" in span_dict and isinstance(span_dict["attributes"], dict):
2258
+ span_dict["attributes"] = json.dumps(span_dict["attributes"])
2259
+
2260
+ doc_ref = collection_ref.document()
2261
+ batch.set(doc_ref, span_dict)
2262
+ batch_count += 1
2263
+
2264
+ # Commit batch if reaching limit
2265
+ if batch_count >= 500:
2266
+ batch.commit()
2267
+ batch = self.db_client.batch()
2268
+ batch_count = 0
2269
+
2270
+ # Commit remaining operations
2271
+ if batch_count > 0:
2272
+ batch.commit()
2273
+
2274
+ # Update trace with total_spans and error_count
2275
+ trace_id = spans[0].trace_id
2276
+ spans_count = len(spans)
2277
+ error_count = sum(1 for s in spans if s.status_code == "ERROR")
2278
+
2279
+ traces_collection = self._get_collection(table_type="traces")
2280
+ if traces_collection:
2281
+ try:
2282
+ docs = traces_collection.where(filter=FieldFilter("trace_id", "==", trace_id)).limit(1).stream()
2283
+ for doc in docs:
2284
+ trace_data = doc.to_dict()
2285
+ current_total = trace_data.get("total_spans", 0)
2286
+ current_errors = trace_data.get("error_count", 0)
2287
+
2288
+ doc.reference.update(
2289
+ {
2290
+ "total_spans": current_total + spans_count,
2291
+ "error_count": current_errors + error_count,
2292
+ }
2293
+ )
2294
+ break
2295
+ except Exception as update_error:
2296
+ log_debug(f"Could not update trace span counts: {update_error}")
2297
+
2298
+ except Exception as e:
2299
+ log_error(f"Error creating spans batch: {e}")
2300
+
2301
+ def get_span(self, span_id: str):
2302
+ """Get a single span by its span_id.
2303
+
2304
+ Args:
2305
+ span_id: The unique span identifier.
2306
+
2307
+ Returns:
2308
+ Optional[Span]: The span if found, None otherwise.
2309
+ """
2310
+ try:
2311
+ from agno.tracing.schemas import Span
2312
+
2313
+ collection_ref = self._get_collection(table_type="spans")
2314
+ if collection_ref is None:
2315
+ return None
2316
+
2317
+ docs = collection_ref.where(filter=FieldFilter("span_id", "==", span_id)).limit(1).stream()
2318
+
2319
+ for doc in docs:
2320
+ span_data = doc.to_dict()
2321
+ # Deserialize attributes from JSON string
2322
+ if "attributes" in span_data and isinstance(span_data["attributes"], str):
2323
+ span_data["attributes"] = json.loads(span_data["attributes"])
2324
+ return Span.from_dict(span_data)
2325
+
2326
+ return None
2327
+
2328
+ except Exception as e:
2329
+ log_error(f"Error getting span: {e}")
2330
+ return None
2331
+
2332
+ def get_spans(
2333
+ self,
2334
+ trace_id: Optional[str] = None,
2335
+ parent_span_id: Optional[str] = None,
2336
+ limit: Optional[int] = 1000,
2337
+ ) -> List:
2338
+ """Get spans matching the provided filters.
2339
+
2340
+ Args:
2341
+ trace_id: Filter by trace ID.
2342
+ parent_span_id: Filter by parent span ID.
2343
+ limit: Maximum number of spans to return.
2344
+
2345
+ Returns:
2346
+ List[Span]: List of matching spans.
2347
+ """
2348
+ try:
2349
+ from agno.tracing.schemas import Span
2350
+
2351
+ collection_ref = self._get_collection(table_type="spans")
2352
+ if collection_ref is None:
2353
+ return []
2354
+
2355
+ query = collection_ref
2356
+
2357
+ if trace_id:
2358
+ query = query.where(filter=FieldFilter("trace_id", "==", trace_id))
2359
+ if parent_span_id:
2360
+ query = query.where(filter=FieldFilter("parent_span_id", "==", parent_span_id))
2361
+
2362
+ if limit:
2363
+ query = query.limit(limit)
2364
+
2365
+ docs = query.stream()
2366
+
2367
+ spans = []
2368
+ for doc in docs:
2369
+ span_data = doc.to_dict()
2370
+ # Deserialize attributes from JSON string
2371
+ if "attributes" in span_data and isinstance(span_data["attributes"], str):
2372
+ span_data["attributes"] = json.loads(span_data["attributes"])
2373
+ spans.append(Span.from_dict(span_data))
2374
+
2375
+ return spans
2376
+
2377
+ except Exception as e:
2378
+ log_error(f"Error getting spans: {e}")
2379
+ return []