agno 0.1.2__py3-none-any.whl → 2.3.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (723) hide show
  1. agno/__init__.py +8 -0
  2. agno/agent/__init__.py +44 -5
  3. agno/agent/agent.py +10531 -2975
  4. agno/api/agent.py +14 -53
  5. agno/api/api.py +7 -46
  6. agno/api/evals.py +22 -0
  7. agno/api/os.py +17 -0
  8. agno/api/routes.py +6 -25
  9. agno/api/schemas/__init__.py +9 -0
  10. agno/api/schemas/agent.py +6 -9
  11. agno/api/schemas/evals.py +16 -0
  12. agno/api/schemas/os.py +14 -0
  13. agno/api/schemas/team.py +10 -10
  14. agno/api/schemas/utils.py +21 -0
  15. agno/api/schemas/workflows.py +16 -0
  16. agno/api/settings.py +53 -0
  17. agno/api/team.py +22 -26
  18. agno/api/workflow.py +28 -0
  19. agno/cloud/aws/base.py +214 -0
  20. agno/cloud/aws/s3/__init__.py +2 -0
  21. agno/cloud/aws/s3/api_client.py +43 -0
  22. agno/cloud/aws/s3/bucket.py +195 -0
  23. agno/cloud/aws/s3/object.py +57 -0
  24. agno/compression/__init__.py +3 -0
  25. agno/compression/manager.py +247 -0
  26. agno/culture/__init__.py +3 -0
  27. agno/culture/manager.py +956 -0
  28. agno/db/__init__.py +24 -0
  29. agno/db/async_postgres/__init__.py +3 -0
  30. agno/db/base.py +946 -0
  31. agno/db/dynamo/__init__.py +3 -0
  32. agno/db/dynamo/dynamo.py +2781 -0
  33. agno/db/dynamo/schemas.py +442 -0
  34. agno/db/dynamo/utils.py +743 -0
  35. agno/db/firestore/__init__.py +3 -0
  36. agno/db/firestore/firestore.py +2379 -0
  37. agno/db/firestore/schemas.py +181 -0
  38. agno/db/firestore/utils.py +376 -0
  39. agno/db/gcs_json/__init__.py +3 -0
  40. agno/db/gcs_json/gcs_json_db.py +1791 -0
  41. agno/db/gcs_json/utils.py +228 -0
  42. agno/db/in_memory/__init__.py +3 -0
  43. agno/db/in_memory/in_memory_db.py +1312 -0
  44. agno/db/in_memory/utils.py +230 -0
  45. agno/db/json/__init__.py +3 -0
  46. agno/db/json/json_db.py +1777 -0
  47. agno/db/json/utils.py +230 -0
  48. agno/db/migrations/manager.py +199 -0
  49. agno/db/migrations/v1_to_v2.py +635 -0
  50. agno/db/migrations/versions/v2_3_0.py +938 -0
  51. agno/db/mongo/__init__.py +17 -0
  52. agno/db/mongo/async_mongo.py +2760 -0
  53. agno/db/mongo/mongo.py +2597 -0
  54. agno/db/mongo/schemas.py +119 -0
  55. agno/db/mongo/utils.py +276 -0
  56. agno/db/mysql/__init__.py +4 -0
  57. agno/db/mysql/async_mysql.py +2912 -0
  58. agno/db/mysql/mysql.py +2923 -0
  59. agno/db/mysql/schemas.py +186 -0
  60. agno/db/mysql/utils.py +488 -0
  61. agno/db/postgres/__init__.py +4 -0
  62. agno/db/postgres/async_postgres.py +2579 -0
  63. agno/db/postgres/postgres.py +2870 -0
  64. agno/db/postgres/schemas.py +187 -0
  65. agno/db/postgres/utils.py +442 -0
  66. agno/db/redis/__init__.py +3 -0
  67. agno/db/redis/redis.py +2141 -0
  68. agno/db/redis/schemas.py +159 -0
  69. agno/db/redis/utils.py +346 -0
  70. agno/db/schemas/__init__.py +4 -0
  71. agno/db/schemas/culture.py +120 -0
  72. agno/db/schemas/evals.py +34 -0
  73. agno/db/schemas/knowledge.py +40 -0
  74. agno/db/schemas/memory.py +61 -0
  75. agno/db/singlestore/__init__.py +3 -0
  76. agno/db/singlestore/schemas.py +179 -0
  77. agno/db/singlestore/singlestore.py +2877 -0
  78. agno/db/singlestore/utils.py +384 -0
  79. agno/db/sqlite/__init__.py +4 -0
  80. agno/db/sqlite/async_sqlite.py +2911 -0
  81. agno/db/sqlite/schemas.py +181 -0
  82. agno/db/sqlite/sqlite.py +2908 -0
  83. agno/db/sqlite/utils.py +429 -0
  84. agno/db/surrealdb/__init__.py +3 -0
  85. agno/db/surrealdb/metrics.py +292 -0
  86. agno/db/surrealdb/models.py +334 -0
  87. agno/db/surrealdb/queries.py +71 -0
  88. agno/db/surrealdb/surrealdb.py +1908 -0
  89. agno/db/surrealdb/utils.py +147 -0
  90. agno/db/utils.py +118 -0
  91. agno/eval/__init__.py +24 -0
  92. agno/eval/accuracy.py +666 -276
  93. agno/eval/agent_as_judge.py +861 -0
  94. agno/eval/base.py +29 -0
  95. agno/eval/performance.py +779 -0
  96. agno/eval/reliability.py +241 -62
  97. agno/eval/utils.py +120 -0
  98. agno/exceptions.py +143 -1
  99. agno/filters.py +354 -0
  100. agno/guardrails/__init__.py +6 -0
  101. agno/guardrails/base.py +19 -0
  102. agno/guardrails/openai.py +144 -0
  103. agno/guardrails/pii.py +94 -0
  104. agno/guardrails/prompt_injection.py +52 -0
  105. agno/hooks/__init__.py +3 -0
  106. agno/hooks/decorator.py +164 -0
  107. agno/integrations/discord/__init__.py +3 -0
  108. agno/integrations/discord/client.py +203 -0
  109. agno/knowledge/__init__.py +5 -1
  110. agno/{document → knowledge}/chunking/agentic.py +22 -14
  111. agno/{document → knowledge}/chunking/document.py +2 -2
  112. agno/{document → knowledge}/chunking/fixed.py +7 -6
  113. agno/knowledge/chunking/markdown.py +151 -0
  114. agno/{document → knowledge}/chunking/recursive.py +15 -3
  115. agno/knowledge/chunking/row.py +39 -0
  116. agno/knowledge/chunking/semantic.py +91 -0
  117. agno/knowledge/chunking/strategy.py +165 -0
  118. agno/knowledge/content.py +74 -0
  119. agno/knowledge/document/__init__.py +5 -0
  120. agno/{document → knowledge/document}/base.py +12 -2
  121. agno/knowledge/embedder/__init__.py +5 -0
  122. agno/knowledge/embedder/aws_bedrock.py +343 -0
  123. agno/knowledge/embedder/azure_openai.py +210 -0
  124. agno/{embedder → knowledge/embedder}/base.py +8 -0
  125. agno/knowledge/embedder/cohere.py +323 -0
  126. agno/knowledge/embedder/fastembed.py +62 -0
  127. agno/{embedder → knowledge/embedder}/fireworks.py +1 -1
  128. agno/knowledge/embedder/google.py +258 -0
  129. agno/knowledge/embedder/huggingface.py +94 -0
  130. agno/knowledge/embedder/jina.py +182 -0
  131. agno/knowledge/embedder/langdb.py +22 -0
  132. agno/knowledge/embedder/mistral.py +206 -0
  133. agno/knowledge/embedder/nebius.py +13 -0
  134. agno/knowledge/embedder/ollama.py +154 -0
  135. agno/knowledge/embedder/openai.py +195 -0
  136. agno/knowledge/embedder/sentence_transformer.py +63 -0
  137. agno/{embedder → knowledge/embedder}/together.py +1 -1
  138. agno/knowledge/embedder/vllm.py +262 -0
  139. agno/knowledge/embedder/voyageai.py +165 -0
  140. agno/knowledge/knowledge.py +3006 -0
  141. agno/knowledge/reader/__init__.py +7 -0
  142. agno/knowledge/reader/arxiv_reader.py +81 -0
  143. agno/knowledge/reader/base.py +95 -0
  144. agno/knowledge/reader/csv_reader.py +164 -0
  145. agno/knowledge/reader/docx_reader.py +82 -0
  146. agno/knowledge/reader/field_labeled_csv_reader.py +290 -0
  147. agno/knowledge/reader/firecrawl_reader.py +201 -0
  148. agno/knowledge/reader/json_reader.py +88 -0
  149. agno/knowledge/reader/markdown_reader.py +137 -0
  150. agno/knowledge/reader/pdf_reader.py +431 -0
  151. agno/knowledge/reader/pptx_reader.py +101 -0
  152. agno/knowledge/reader/reader_factory.py +313 -0
  153. agno/knowledge/reader/s3_reader.py +89 -0
  154. agno/knowledge/reader/tavily_reader.py +193 -0
  155. agno/knowledge/reader/text_reader.py +127 -0
  156. agno/knowledge/reader/web_search_reader.py +325 -0
  157. agno/knowledge/reader/website_reader.py +455 -0
  158. agno/knowledge/reader/wikipedia_reader.py +91 -0
  159. agno/knowledge/reader/youtube_reader.py +78 -0
  160. agno/knowledge/remote_content/remote_content.py +88 -0
  161. agno/knowledge/reranker/__init__.py +3 -0
  162. agno/{reranker → knowledge/reranker}/base.py +1 -1
  163. agno/{reranker → knowledge/reranker}/cohere.py +2 -2
  164. agno/knowledge/reranker/infinity.py +195 -0
  165. agno/knowledge/reranker/sentence_transformer.py +54 -0
  166. agno/knowledge/types.py +39 -0
  167. agno/knowledge/utils.py +234 -0
  168. agno/media.py +439 -95
  169. agno/memory/__init__.py +16 -3
  170. agno/memory/manager.py +1474 -123
  171. agno/memory/strategies/__init__.py +15 -0
  172. agno/memory/strategies/base.py +66 -0
  173. agno/memory/strategies/summarize.py +196 -0
  174. agno/memory/strategies/types.py +37 -0
  175. agno/models/aimlapi/__init__.py +5 -0
  176. agno/models/aimlapi/aimlapi.py +62 -0
  177. agno/models/anthropic/__init__.py +4 -0
  178. agno/models/anthropic/claude.py +960 -496
  179. agno/models/aws/__init__.py +15 -0
  180. agno/models/aws/bedrock.py +686 -451
  181. agno/models/aws/claude.py +190 -183
  182. agno/models/azure/__init__.py +18 -1
  183. agno/models/azure/ai_foundry.py +489 -0
  184. agno/models/azure/openai_chat.py +89 -40
  185. agno/models/base.py +2477 -550
  186. agno/models/cerebras/__init__.py +12 -0
  187. agno/models/cerebras/cerebras.py +565 -0
  188. agno/models/cerebras/cerebras_openai.py +131 -0
  189. agno/models/cohere/__init__.py +4 -0
  190. agno/models/cohere/chat.py +306 -492
  191. agno/models/cometapi/__init__.py +5 -0
  192. agno/models/cometapi/cometapi.py +74 -0
  193. agno/models/dashscope/__init__.py +5 -0
  194. agno/models/dashscope/dashscope.py +90 -0
  195. agno/models/deepinfra/__init__.py +5 -0
  196. agno/models/deepinfra/deepinfra.py +45 -0
  197. agno/models/deepseek/__init__.py +4 -0
  198. agno/models/deepseek/deepseek.py +110 -9
  199. agno/models/fireworks/__init__.py +4 -0
  200. agno/models/fireworks/fireworks.py +19 -22
  201. agno/models/google/__init__.py +3 -7
  202. agno/models/google/gemini.py +1717 -662
  203. agno/models/google/utils.py +22 -0
  204. agno/models/groq/__init__.py +4 -0
  205. agno/models/groq/groq.py +391 -666
  206. agno/models/huggingface/__init__.py +4 -0
  207. agno/models/huggingface/huggingface.py +266 -538
  208. agno/models/ibm/__init__.py +5 -0
  209. agno/models/ibm/watsonx.py +432 -0
  210. agno/models/internlm/__init__.py +3 -0
  211. agno/models/internlm/internlm.py +20 -3
  212. agno/models/langdb/__init__.py +1 -0
  213. agno/models/langdb/langdb.py +60 -0
  214. agno/models/litellm/__init__.py +14 -0
  215. agno/models/litellm/chat.py +503 -0
  216. agno/models/litellm/litellm_openai.py +42 -0
  217. agno/models/llama_cpp/__init__.py +5 -0
  218. agno/models/llama_cpp/llama_cpp.py +22 -0
  219. agno/models/lmstudio/__init__.py +5 -0
  220. agno/models/lmstudio/lmstudio.py +25 -0
  221. agno/models/message.py +361 -39
  222. agno/models/meta/__init__.py +12 -0
  223. agno/models/meta/llama.py +502 -0
  224. agno/models/meta/llama_openai.py +79 -0
  225. agno/models/metrics.py +120 -0
  226. agno/models/mistral/__init__.py +4 -0
  227. agno/models/mistral/mistral.py +293 -393
  228. agno/models/nebius/__init__.py +3 -0
  229. agno/models/nebius/nebius.py +53 -0
  230. agno/models/nexus/__init__.py +3 -0
  231. agno/models/nexus/nexus.py +22 -0
  232. agno/models/nvidia/__init__.py +4 -0
  233. agno/models/nvidia/nvidia.py +22 -3
  234. agno/models/ollama/__init__.py +4 -2
  235. agno/models/ollama/chat.py +257 -492
  236. agno/models/openai/__init__.py +7 -0
  237. agno/models/openai/chat.py +725 -770
  238. agno/models/openai/like.py +16 -2
  239. agno/models/openai/responses.py +1121 -0
  240. agno/models/openrouter/__init__.py +4 -0
  241. agno/models/openrouter/openrouter.py +62 -5
  242. agno/models/perplexity/__init__.py +5 -0
  243. agno/models/perplexity/perplexity.py +203 -0
  244. agno/models/portkey/__init__.py +3 -0
  245. agno/models/portkey/portkey.py +82 -0
  246. agno/models/requesty/__init__.py +5 -0
  247. agno/models/requesty/requesty.py +69 -0
  248. agno/models/response.py +177 -7
  249. agno/models/sambanova/__init__.py +4 -0
  250. agno/models/sambanova/sambanova.py +23 -4
  251. agno/models/siliconflow/__init__.py +5 -0
  252. agno/models/siliconflow/siliconflow.py +42 -0
  253. agno/models/together/__init__.py +4 -0
  254. agno/models/together/together.py +21 -164
  255. agno/models/utils.py +266 -0
  256. agno/models/vercel/__init__.py +3 -0
  257. agno/models/vercel/v0.py +43 -0
  258. agno/models/vertexai/__init__.py +0 -1
  259. agno/models/vertexai/claude.py +190 -0
  260. agno/models/vllm/__init__.py +3 -0
  261. agno/models/vllm/vllm.py +83 -0
  262. agno/models/xai/__init__.py +2 -0
  263. agno/models/xai/xai.py +111 -7
  264. agno/os/__init__.py +3 -0
  265. agno/os/app.py +1027 -0
  266. agno/os/auth.py +244 -0
  267. agno/os/config.py +126 -0
  268. agno/os/interfaces/__init__.py +1 -0
  269. agno/os/interfaces/a2a/__init__.py +3 -0
  270. agno/os/interfaces/a2a/a2a.py +42 -0
  271. agno/os/interfaces/a2a/router.py +249 -0
  272. agno/os/interfaces/a2a/utils.py +924 -0
  273. agno/os/interfaces/agui/__init__.py +3 -0
  274. agno/os/interfaces/agui/agui.py +47 -0
  275. agno/os/interfaces/agui/router.py +147 -0
  276. agno/os/interfaces/agui/utils.py +574 -0
  277. agno/os/interfaces/base.py +25 -0
  278. agno/os/interfaces/slack/__init__.py +3 -0
  279. agno/os/interfaces/slack/router.py +148 -0
  280. agno/os/interfaces/slack/security.py +30 -0
  281. agno/os/interfaces/slack/slack.py +47 -0
  282. agno/os/interfaces/whatsapp/__init__.py +3 -0
  283. agno/os/interfaces/whatsapp/router.py +210 -0
  284. agno/os/interfaces/whatsapp/security.py +55 -0
  285. agno/os/interfaces/whatsapp/whatsapp.py +36 -0
  286. agno/os/mcp.py +293 -0
  287. agno/os/middleware/__init__.py +9 -0
  288. agno/os/middleware/jwt.py +797 -0
  289. agno/os/router.py +258 -0
  290. agno/os/routers/__init__.py +3 -0
  291. agno/os/routers/agents/__init__.py +3 -0
  292. agno/os/routers/agents/router.py +599 -0
  293. agno/os/routers/agents/schema.py +261 -0
  294. agno/os/routers/evals/__init__.py +3 -0
  295. agno/os/routers/evals/evals.py +450 -0
  296. agno/os/routers/evals/schemas.py +174 -0
  297. agno/os/routers/evals/utils.py +231 -0
  298. agno/os/routers/health.py +31 -0
  299. agno/os/routers/home.py +52 -0
  300. agno/os/routers/knowledge/__init__.py +3 -0
  301. agno/os/routers/knowledge/knowledge.py +1008 -0
  302. agno/os/routers/knowledge/schemas.py +178 -0
  303. agno/os/routers/memory/__init__.py +3 -0
  304. agno/os/routers/memory/memory.py +661 -0
  305. agno/os/routers/memory/schemas.py +88 -0
  306. agno/os/routers/metrics/__init__.py +3 -0
  307. agno/os/routers/metrics/metrics.py +190 -0
  308. agno/os/routers/metrics/schemas.py +47 -0
  309. agno/os/routers/session/__init__.py +3 -0
  310. agno/os/routers/session/session.py +997 -0
  311. agno/os/routers/teams/__init__.py +3 -0
  312. agno/os/routers/teams/router.py +512 -0
  313. agno/os/routers/teams/schema.py +257 -0
  314. agno/os/routers/traces/__init__.py +3 -0
  315. agno/os/routers/traces/schemas.py +414 -0
  316. agno/os/routers/traces/traces.py +499 -0
  317. agno/os/routers/workflows/__init__.py +3 -0
  318. agno/os/routers/workflows/router.py +624 -0
  319. agno/os/routers/workflows/schema.py +75 -0
  320. agno/os/schema.py +534 -0
  321. agno/os/scopes.py +469 -0
  322. agno/{playground → os}/settings.py +7 -15
  323. agno/os/utils.py +973 -0
  324. agno/reasoning/anthropic.py +80 -0
  325. agno/reasoning/azure_ai_foundry.py +67 -0
  326. agno/reasoning/deepseek.py +63 -0
  327. agno/reasoning/default.py +97 -0
  328. agno/reasoning/gemini.py +73 -0
  329. agno/reasoning/groq.py +71 -0
  330. agno/reasoning/helpers.py +24 -1
  331. agno/reasoning/ollama.py +67 -0
  332. agno/reasoning/openai.py +86 -0
  333. agno/reasoning/step.py +2 -1
  334. agno/reasoning/vertexai.py +76 -0
  335. agno/run/__init__.py +6 -0
  336. agno/run/agent.py +822 -0
  337. agno/run/base.py +247 -0
  338. agno/run/cancel.py +81 -0
  339. agno/run/requirement.py +181 -0
  340. agno/run/team.py +767 -0
  341. agno/run/workflow.py +708 -0
  342. agno/session/__init__.py +10 -0
  343. agno/session/agent.py +260 -0
  344. agno/session/summary.py +265 -0
  345. agno/session/team.py +342 -0
  346. agno/session/workflow.py +501 -0
  347. agno/table.py +10 -0
  348. agno/team/__init__.py +37 -0
  349. agno/team/team.py +9536 -0
  350. agno/tools/__init__.py +7 -0
  351. agno/tools/agentql.py +120 -0
  352. agno/tools/airflow.py +22 -12
  353. agno/tools/api.py +122 -0
  354. agno/tools/apify.py +276 -83
  355. agno/tools/{arxiv_toolkit.py → arxiv.py} +20 -12
  356. agno/tools/aws_lambda.py +28 -7
  357. agno/tools/aws_ses.py +66 -0
  358. agno/tools/baidusearch.py +11 -4
  359. agno/tools/bitbucket.py +292 -0
  360. agno/tools/brandfetch.py +213 -0
  361. agno/tools/bravesearch.py +106 -0
  362. agno/tools/brightdata.py +367 -0
  363. agno/tools/browserbase.py +209 -0
  364. agno/tools/calcom.py +32 -23
  365. agno/tools/calculator.py +24 -37
  366. agno/tools/cartesia.py +187 -0
  367. agno/tools/{clickup_tool.py → clickup.py} +17 -28
  368. agno/tools/confluence.py +91 -26
  369. agno/tools/crawl4ai.py +139 -43
  370. agno/tools/csv_toolkit.py +28 -22
  371. agno/tools/dalle.py +36 -22
  372. agno/tools/daytona.py +475 -0
  373. agno/tools/decorator.py +169 -14
  374. agno/tools/desi_vocal.py +23 -11
  375. agno/tools/discord.py +32 -29
  376. agno/tools/docker.py +716 -0
  377. agno/tools/duckdb.py +76 -81
  378. agno/tools/duckduckgo.py +43 -40
  379. agno/tools/e2b.py +703 -0
  380. agno/tools/eleven_labs.py +65 -54
  381. agno/tools/email.py +13 -5
  382. agno/tools/evm.py +129 -0
  383. agno/tools/exa.py +324 -42
  384. agno/tools/fal.py +39 -35
  385. agno/tools/file.py +196 -30
  386. agno/tools/file_generation.py +356 -0
  387. agno/tools/financial_datasets.py +288 -0
  388. agno/tools/firecrawl.py +108 -33
  389. agno/tools/function.py +960 -122
  390. agno/tools/giphy.py +34 -12
  391. agno/tools/github.py +1294 -97
  392. agno/tools/gmail.py +922 -0
  393. agno/tools/google_bigquery.py +117 -0
  394. agno/tools/google_drive.py +271 -0
  395. agno/tools/google_maps.py +253 -0
  396. agno/tools/googlecalendar.py +607 -107
  397. agno/tools/googlesheets.py +377 -0
  398. agno/tools/hackernews.py +20 -12
  399. agno/tools/jina.py +24 -14
  400. agno/tools/jira.py +48 -19
  401. agno/tools/knowledge.py +218 -0
  402. agno/tools/linear.py +82 -43
  403. agno/tools/linkup.py +58 -0
  404. agno/tools/local_file_system.py +15 -7
  405. agno/tools/lumalab.py +41 -26
  406. agno/tools/mcp/__init__.py +10 -0
  407. agno/tools/mcp/mcp.py +331 -0
  408. agno/tools/mcp/multi_mcp.py +347 -0
  409. agno/tools/mcp/params.py +24 -0
  410. agno/tools/mcp_toolbox.py +284 -0
  411. agno/tools/mem0.py +193 -0
  412. agno/tools/memory.py +419 -0
  413. agno/tools/mlx_transcribe.py +11 -9
  414. agno/tools/models/azure_openai.py +190 -0
  415. agno/tools/models/gemini.py +203 -0
  416. agno/tools/models/groq.py +158 -0
  417. agno/tools/models/morph.py +186 -0
  418. agno/tools/models/nebius.py +124 -0
  419. agno/tools/models_labs.py +163 -82
  420. agno/tools/moviepy_video.py +18 -13
  421. agno/tools/nano_banana.py +151 -0
  422. agno/tools/neo4j.py +134 -0
  423. agno/tools/newspaper.py +15 -4
  424. agno/tools/newspaper4k.py +19 -6
  425. agno/tools/notion.py +204 -0
  426. agno/tools/openai.py +181 -17
  427. agno/tools/openbb.py +27 -20
  428. agno/tools/opencv.py +321 -0
  429. agno/tools/openweather.py +233 -0
  430. agno/tools/oxylabs.py +385 -0
  431. agno/tools/pandas.py +25 -15
  432. agno/tools/parallel.py +314 -0
  433. agno/tools/postgres.py +238 -185
  434. agno/tools/pubmed.py +125 -13
  435. agno/tools/python.py +48 -35
  436. agno/tools/reasoning.py +283 -0
  437. agno/tools/reddit.py +207 -29
  438. agno/tools/redshift.py +406 -0
  439. agno/tools/replicate.py +69 -26
  440. agno/tools/resend.py +11 -6
  441. agno/tools/scrapegraph.py +179 -19
  442. agno/tools/searxng.py +23 -31
  443. agno/tools/serpapi.py +15 -10
  444. agno/tools/serper.py +255 -0
  445. agno/tools/shell.py +23 -12
  446. agno/tools/shopify.py +1519 -0
  447. agno/tools/slack.py +56 -14
  448. agno/tools/sleep.py +8 -6
  449. agno/tools/spider.py +35 -11
  450. agno/tools/spotify.py +919 -0
  451. agno/tools/sql.py +34 -19
  452. agno/tools/tavily.py +158 -8
  453. agno/tools/telegram.py +18 -8
  454. agno/tools/todoist.py +218 -0
  455. agno/tools/toolkit.py +134 -9
  456. agno/tools/trafilatura.py +388 -0
  457. agno/tools/trello.py +25 -28
  458. agno/tools/twilio.py +18 -9
  459. agno/tools/user_control_flow.py +78 -0
  460. agno/tools/valyu.py +228 -0
  461. agno/tools/visualization.py +467 -0
  462. agno/tools/webbrowser.py +28 -0
  463. agno/tools/webex.py +76 -0
  464. agno/tools/website.py +23 -19
  465. agno/tools/webtools.py +45 -0
  466. agno/tools/whatsapp.py +286 -0
  467. agno/tools/wikipedia.py +28 -19
  468. agno/tools/workflow.py +285 -0
  469. agno/tools/{twitter.py → x.py} +142 -46
  470. agno/tools/yfinance.py +41 -39
  471. agno/tools/youtube.py +34 -17
  472. agno/tools/zendesk.py +15 -5
  473. agno/tools/zep.py +454 -0
  474. agno/tools/zoom.py +86 -37
  475. agno/tracing/__init__.py +12 -0
  476. agno/tracing/exporter.py +157 -0
  477. agno/tracing/schemas.py +276 -0
  478. agno/tracing/setup.py +111 -0
  479. agno/utils/agent.py +938 -0
  480. agno/utils/audio.py +37 -1
  481. agno/utils/certs.py +27 -0
  482. agno/utils/code_execution.py +11 -0
  483. agno/utils/common.py +103 -20
  484. agno/utils/cryptography.py +22 -0
  485. agno/utils/dttm.py +33 -0
  486. agno/utils/events.py +700 -0
  487. agno/utils/functions.py +107 -37
  488. agno/utils/gemini.py +426 -0
  489. agno/utils/hooks.py +171 -0
  490. agno/utils/http.py +185 -0
  491. agno/utils/json_schema.py +159 -37
  492. agno/utils/knowledge.py +36 -0
  493. agno/utils/location.py +19 -0
  494. agno/utils/log.py +221 -8
  495. agno/utils/mcp.py +214 -0
  496. agno/utils/media.py +335 -14
  497. agno/utils/merge_dict.py +22 -1
  498. agno/utils/message.py +77 -2
  499. agno/utils/models/ai_foundry.py +50 -0
  500. agno/utils/models/claude.py +373 -0
  501. agno/utils/models/cohere.py +94 -0
  502. agno/utils/models/llama.py +85 -0
  503. agno/utils/models/mistral.py +100 -0
  504. agno/utils/models/openai_responses.py +140 -0
  505. agno/utils/models/schema_utils.py +153 -0
  506. agno/utils/models/watsonx.py +41 -0
  507. agno/utils/openai.py +257 -0
  508. agno/utils/pickle.py +1 -1
  509. agno/utils/pprint.py +124 -8
  510. agno/utils/print_response/agent.py +930 -0
  511. agno/utils/print_response/team.py +1914 -0
  512. agno/utils/print_response/workflow.py +1668 -0
  513. agno/utils/prompts.py +111 -0
  514. agno/utils/reasoning.py +108 -0
  515. agno/utils/response.py +163 -0
  516. agno/utils/serialize.py +32 -0
  517. agno/utils/shell.py +4 -4
  518. agno/utils/streamlit.py +487 -0
  519. agno/utils/string.py +204 -51
  520. agno/utils/team.py +139 -0
  521. agno/utils/timer.py +9 -2
  522. agno/utils/tokens.py +657 -0
  523. agno/utils/tools.py +19 -1
  524. agno/utils/whatsapp.py +305 -0
  525. agno/utils/yaml_io.py +3 -3
  526. agno/vectordb/__init__.py +2 -0
  527. agno/vectordb/base.py +87 -9
  528. agno/vectordb/cassandra/__init__.py +5 -1
  529. agno/vectordb/cassandra/cassandra.py +383 -27
  530. agno/vectordb/chroma/__init__.py +4 -0
  531. agno/vectordb/chroma/chromadb.py +748 -83
  532. agno/vectordb/clickhouse/__init__.py +7 -1
  533. agno/vectordb/clickhouse/clickhousedb.py +554 -53
  534. agno/vectordb/couchbase/__init__.py +3 -0
  535. agno/vectordb/couchbase/couchbase.py +1446 -0
  536. agno/vectordb/lancedb/__init__.py +5 -0
  537. agno/vectordb/lancedb/lance_db.py +730 -98
  538. agno/vectordb/langchaindb/__init__.py +5 -0
  539. agno/vectordb/langchaindb/langchaindb.py +163 -0
  540. agno/vectordb/lightrag/__init__.py +5 -0
  541. agno/vectordb/lightrag/lightrag.py +388 -0
  542. agno/vectordb/llamaindex/__init__.py +3 -0
  543. agno/vectordb/llamaindex/llamaindexdb.py +166 -0
  544. agno/vectordb/milvus/__init__.py +3 -0
  545. agno/vectordb/milvus/milvus.py +966 -78
  546. agno/vectordb/mongodb/__init__.py +9 -1
  547. agno/vectordb/mongodb/mongodb.py +1175 -172
  548. agno/vectordb/pgvector/__init__.py +8 -0
  549. agno/vectordb/pgvector/pgvector.py +599 -115
  550. agno/vectordb/pineconedb/__init__.py +5 -1
  551. agno/vectordb/pineconedb/pineconedb.py +406 -43
  552. agno/vectordb/qdrant/__init__.py +4 -0
  553. agno/vectordb/qdrant/qdrant.py +914 -61
  554. agno/vectordb/redis/__init__.py +9 -0
  555. agno/vectordb/redis/redisdb.py +682 -0
  556. agno/vectordb/singlestore/__init__.py +8 -1
  557. agno/vectordb/singlestore/singlestore.py +771 -0
  558. agno/vectordb/surrealdb/__init__.py +3 -0
  559. agno/vectordb/surrealdb/surrealdb.py +663 -0
  560. agno/vectordb/upstashdb/__init__.py +5 -0
  561. agno/vectordb/upstashdb/upstashdb.py +718 -0
  562. agno/vectordb/weaviate/__init__.py +8 -0
  563. agno/vectordb/weaviate/index.py +15 -0
  564. agno/vectordb/weaviate/weaviate.py +1009 -0
  565. agno/workflow/__init__.py +23 -1
  566. agno/workflow/agent.py +299 -0
  567. agno/workflow/condition.py +759 -0
  568. agno/workflow/loop.py +756 -0
  569. agno/workflow/parallel.py +853 -0
  570. agno/workflow/router.py +723 -0
  571. agno/workflow/step.py +1564 -0
  572. agno/workflow/steps.py +613 -0
  573. agno/workflow/types.py +556 -0
  574. agno/workflow/workflow.py +4327 -514
  575. agno-2.3.13.dist-info/METADATA +639 -0
  576. agno-2.3.13.dist-info/RECORD +613 -0
  577. {agno-0.1.2.dist-info → agno-2.3.13.dist-info}/WHEEL +1 -1
  578. agno-2.3.13.dist-info/licenses/LICENSE +201 -0
  579. agno/api/playground.py +0 -91
  580. agno/api/schemas/playground.py +0 -22
  581. agno/api/schemas/user.py +0 -22
  582. agno/api/schemas/workspace.py +0 -46
  583. agno/api/user.py +0 -160
  584. agno/api/workspace.py +0 -151
  585. agno/cli/auth_server.py +0 -118
  586. agno/cli/config.py +0 -275
  587. agno/cli/console.py +0 -88
  588. agno/cli/credentials.py +0 -23
  589. agno/cli/entrypoint.py +0 -571
  590. agno/cli/operator.py +0 -355
  591. agno/cli/settings.py +0 -85
  592. agno/cli/ws/ws_cli.py +0 -817
  593. agno/constants.py +0 -13
  594. agno/document/__init__.py +0 -1
  595. agno/document/chunking/semantic.py +0 -47
  596. agno/document/chunking/strategy.py +0 -31
  597. agno/document/reader/__init__.py +0 -1
  598. agno/document/reader/arxiv_reader.py +0 -41
  599. agno/document/reader/base.py +0 -22
  600. agno/document/reader/csv_reader.py +0 -84
  601. agno/document/reader/docx_reader.py +0 -46
  602. agno/document/reader/firecrawl_reader.py +0 -99
  603. agno/document/reader/json_reader.py +0 -43
  604. agno/document/reader/pdf_reader.py +0 -219
  605. agno/document/reader/s3/pdf_reader.py +0 -46
  606. agno/document/reader/s3/text_reader.py +0 -51
  607. agno/document/reader/text_reader.py +0 -41
  608. agno/document/reader/website_reader.py +0 -175
  609. agno/document/reader/youtube_reader.py +0 -50
  610. agno/embedder/__init__.py +0 -1
  611. agno/embedder/azure_openai.py +0 -86
  612. agno/embedder/cohere.py +0 -72
  613. agno/embedder/fastembed.py +0 -37
  614. agno/embedder/google.py +0 -73
  615. agno/embedder/huggingface.py +0 -54
  616. agno/embedder/mistral.py +0 -80
  617. agno/embedder/ollama.py +0 -57
  618. agno/embedder/openai.py +0 -74
  619. agno/embedder/sentence_transformer.py +0 -38
  620. agno/embedder/voyageai.py +0 -64
  621. agno/eval/perf.py +0 -201
  622. agno/file/__init__.py +0 -1
  623. agno/file/file.py +0 -16
  624. agno/file/local/csv.py +0 -32
  625. agno/file/local/txt.py +0 -19
  626. agno/infra/app.py +0 -240
  627. agno/infra/base.py +0 -144
  628. agno/infra/context.py +0 -20
  629. agno/infra/db_app.py +0 -52
  630. agno/infra/resource.py +0 -205
  631. agno/infra/resources.py +0 -55
  632. agno/knowledge/agent.py +0 -230
  633. agno/knowledge/arxiv.py +0 -22
  634. agno/knowledge/combined.py +0 -22
  635. agno/knowledge/csv.py +0 -28
  636. agno/knowledge/csv_url.py +0 -19
  637. agno/knowledge/document.py +0 -20
  638. agno/knowledge/docx.py +0 -30
  639. agno/knowledge/json.py +0 -28
  640. agno/knowledge/langchain.py +0 -71
  641. agno/knowledge/llamaindex.py +0 -66
  642. agno/knowledge/pdf.py +0 -28
  643. agno/knowledge/pdf_url.py +0 -26
  644. agno/knowledge/s3/base.py +0 -60
  645. agno/knowledge/s3/pdf.py +0 -21
  646. agno/knowledge/s3/text.py +0 -23
  647. agno/knowledge/text.py +0 -30
  648. agno/knowledge/website.py +0 -88
  649. agno/knowledge/wikipedia.py +0 -31
  650. agno/knowledge/youtube.py +0 -22
  651. agno/memory/agent.py +0 -392
  652. agno/memory/classifier.py +0 -104
  653. agno/memory/db/__init__.py +0 -1
  654. agno/memory/db/base.py +0 -42
  655. agno/memory/db/mongodb.py +0 -189
  656. agno/memory/db/postgres.py +0 -203
  657. agno/memory/db/sqlite.py +0 -193
  658. agno/memory/memory.py +0 -15
  659. agno/memory/row.py +0 -36
  660. agno/memory/summarizer.py +0 -192
  661. agno/memory/summary.py +0 -19
  662. agno/memory/workflow.py +0 -38
  663. agno/models/google/gemini_openai.py +0 -26
  664. agno/models/ollama/hermes.py +0 -221
  665. agno/models/ollama/tools.py +0 -362
  666. agno/models/vertexai/gemini.py +0 -595
  667. agno/playground/__init__.py +0 -3
  668. agno/playground/async_router.py +0 -421
  669. agno/playground/deploy.py +0 -249
  670. agno/playground/operator.py +0 -92
  671. agno/playground/playground.py +0 -91
  672. agno/playground/schemas.py +0 -76
  673. agno/playground/serve.py +0 -55
  674. agno/playground/sync_router.py +0 -405
  675. agno/reasoning/agent.py +0 -68
  676. agno/run/response.py +0 -112
  677. agno/storage/agent/__init__.py +0 -0
  678. agno/storage/agent/base.py +0 -38
  679. agno/storage/agent/dynamodb.py +0 -350
  680. agno/storage/agent/json.py +0 -92
  681. agno/storage/agent/mongodb.py +0 -228
  682. agno/storage/agent/postgres.py +0 -367
  683. agno/storage/agent/session.py +0 -79
  684. agno/storage/agent/singlestore.py +0 -303
  685. agno/storage/agent/sqlite.py +0 -357
  686. agno/storage/agent/yaml.py +0 -93
  687. agno/storage/workflow/__init__.py +0 -0
  688. agno/storage/workflow/base.py +0 -40
  689. agno/storage/workflow/mongodb.py +0 -233
  690. agno/storage/workflow/postgres.py +0 -366
  691. agno/storage/workflow/session.py +0 -60
  692. agno/storage/workflow/sqlite.py +0 -359
  693. agno/tools/googlesearch.py +0 -88
  694. agno/utils/defaults.py +0 -57
  695. agno/utils/filesystem.py +0 -39
  696. agno/utils/git.py +0 -52
  697. agno/utils/json_io.py +0 -30
  698. agno/utils/load_env.py +0 -19
  699. agno/utils/py_io.py +0 -19
  700. agno/utils/pyproject.py +0 -18
  701. agno/utils/resource_filter.py +0 -31
  702. agno/vectordb/singlestore/s2vectordb.py +0 -390
  703. agno/vectordb/singlestore/s2vectordb2.py +0 -355
  704. agno/workspace/__init__.py +0 -0
  705. agno/workspace/config.py +0 -325
  706. agno/workspace/enums.py +0 -6
  707. agno/workspace/helpers.py +0 -48
  708. agno/workspace/operator.py +0 -758
  709. agno/workspace/settings.py +0 -63
  710. agno-0.1.2.dist-info/LICENSE +0 -375
  711. agno-0.1.2.dist-info/METADATA +0 -502
  712. agno-0.1.2.dist-info/RECORD +0 -352
  713. agno-0.1.2.dist-info/entry_points.txt +0 -3
  714. /agno/{cli → db/migrations}/__init__.py +0 -0
  715. /agno/{cli/ws → db/migrations/versions}/__init__.py +0 -0
  716. /agno/{document/chunking/__init__.py → db/schemas/metrics.py} +0 -0
  717. /agno/{document/reader/s3 → integrations}/__init__.py +0 -0
  718. /agno/{file/local → knowledge/chunking}/__init__.py +0 -0
  719. /agno/{infra → knowledge/remote_content}/__init__.py +0 -0
  720. /agno/{knowledge/s3 → tools/models}/__init__.py +0 -0
  721. /agno/{reranker → utils/models}/__init__.py +0 -0
  722. /agno/{storage → utils/print_response}/__init__.py +0 -0
  723. {agno-0.1.2.dist-info → agno-2.3.13.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1791 @@
1
+ import json
2
+ import time
3
+ from datetime import date, datetime, timedelta, timezone
4
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
5
+ from uuid import uuid4
6
+
7
+ if TYPE_CHECKING:
8
+ from agno.tracing.schemas import Span, Trace
9
+
10
+ from agno.db.base import BaseDb, SessionType
11
+ from agno.db.gcs_json.utils import (
12
+ apply_sorting,
13
+ calculate_date_metrics,
14
+ deserialize_cultural_knowledge_from_db,
15
+ fetch_all_sessions_data,
16
+ get_dates_to_calculate_metrics_for,
17
+ serialize_cultural_knowledge_for_db,
18
+ )
19
+ from agno.db.schemas.culture import CulturalKnowledge
20
+ from agno.db.schemas.evals import EvalFilterType, EvalRunRecord, EvalType
21
+ from agno.db.schemas.knowledge import KnowledgeRow
22
+ from agno.db.schemas.memory import UserMemory
23
+ from agno.session import AgentSession, Session, TeamSession, WorkflowSession
24
+ from agno.utils.log import log_debug, log_error, log_info, log_warning
25
+ from agno.utils.string import generate_id
26
+
27
+ try:
28
+ from google.cloud import storage as gcs # type: ignore
29
+ except ImportError:
30
+ raise ImportError("`google-cloud-storage` not installed. Please install it with `pip install google-cloud-storage`")
31
+
32
+
33
+ class GcsJsonDb(BaseDb):
34
+ def __init__(
35
+ self,
36
+ bucket_name: str,
37
+ prefix: Optional[str] = None,
38
+ session_table: Optional[str] = None,
39
+ memory_table: Optional[str] = None,
40
+ metrics_table: Optional[str] = None,
41
+ eval_table: Optional[str] = None,
42
+ knowledge_table: Optional[str] = None,
43
+ culture_table: Optional[str] = None,
44
+ traces_table: Optional[str] = None,
45
+ spans_table: Optional[str] = None,
46
+ project: Optional[str] = None,
47
+ credentials: Optional[Any] = None,
48
+ id: Optional[str] = None,
49
+ ):
50
+ """
51
+ Interface for interacting with JSON files stored in Google Cloud Storage as database.
52
+
53
+ Args:
54
+ bucket_name (str): Name of the GCS bucket where JSON files will be stored.
55
+ prefix (Optional[str]): Path prefix for organizing files in the bucket. Defaults to "agno/".
56
+ session_table (Optional[str]): Name of the JSON file to store sessions (without .json extension).
57
+ memory_table (Optional[str]): Name of the JSON file to store user memories.
58
+ metrics_table (Optional[str]): Name of the JSON file to store metrics.
59
+ eval_table (Optional[str]): Name of the JSON file to store evaluation runs.
60
+ knowledge_table (Optional[str]): Name of the JSON file to store knowledge content.
61
+ culture_table (Optional[str]): Name of the JSON file to store cultural knowledge.
62
+ traces_table (Optional[str]): Name of the JSON file to store traces.
63
+ spans_table (Optional[str]): Name of the JSON file to store spans.
64
+ project (Optional[str]): GCP project ID. If None, uses default project.
65
+ location (Optional[str]): GCS bucket location. If None, uses default location.
66
+ credentials (Optional[Any]): GCP credentials. If None, uses default credentials.
67
+ id (Optional[str]): ID of the database.
68
+ """
69
+ if id is None:
70
+ prefix_suffix = prefix or "agno/"
71
+ seed = f"{bucket_name}_{project}#{prefix_suffix}"
72
+ id = generate_id(seed)
73
+
74
+ super().__init__(
75
+ id=id,
76
+ session_table=session_table,
77
+ memory_table=memory_table,
78
+ metrics_table=metrics_table,
79
+ eval_table=eval_table,
80
+ knowledge_table=knowledge_table,
81
+ culture_table=culture_table,
82
+ traces_table=traces_table,
83
+ spans_table=spans_table,
84
+ )
85
+
86
+ self.bucket_name = bucket_name
87
+ self.prefix = prefix or "agno/"
88
+ if self.prefix and not self.prefix.endswith("/"):
89
+ self.prefix += "/"
90
+
91
+ # Initialize GCS client and bucket
92
+ self.client = gcs.Client(project=project, credentials=credentials)
93
+ self.bucket = self.client.bucket(self.bucket_name)
94
+
95
+ def table_exists(self, table_name: str) -> bool:
96
+ """JSON implementation, always returns True."""
97
+ return True
98
+
99
+ def _get_blob_name(self, filename: str) -> str:
100
+ """Get the full blob name including prefix for a given filename."""
101
+ return f"{self.prefix}{filename}.json"
102
+
103
+ def _read_json_file(self, filename: str, create_table_if_not_found: Optional[bool] = False) -> List[Dict[str, Any]]:
104
+ """Read data from a JSON file in GCS, creating it if it doesn't exist.
105
+
106
+ Args:
107
+ filename (str): The name of the JSON file to read.
108
+
109
+ Returns:
110
+ List[Dict[str, Any]]: The data from the JSON file.
111
+
112
+ Raises:
113
+ json.JSONDecodeError: If the JSON file is not valid.
114
+ """
115
+ blob_name = self._get_blob_name(filename)
116
+ blob = self.bucket.blob(blob_name)
117
+
118
+ try:
119
+ data_str = blob.download_as_bytes().decode("utf-8")
120
+ return json.loads(data_str)
121
+
122
+ except Exception as e:
123
+ # Check if it's a 404 (file not found) error
124
+ if "404" in str(e) or "Not Found" in str(e):
125
+ if create_table_if_not_found:
126
+ log_debug(f"Creating new GCS JSON file: {blob_name}")
127
+ blob.upload_from_string("[]", content_type="application/json")
128
+ return []
129
+ else:
130
+ log_error(f"Error reading the {blob_name} JSON file from GCS: {e}")
131
+ raise json.JSONDecodeError(f"Error reading {blob_name}", "", 0)
132
+
133
+ def _write_json_file(self, filename: str, data: List[Dict[str, Any]]) -> None:
134
+ """Write data to a JSON file in GCS.
135
+
136
+ Args:
137
+ filename (str): The name of the JSON file to write.
138
+ data (List[Dict[str, Any]]): The data to write to the JSON file.
139
+
140
+ Raises:
141
+ Exception: If an error occurs while writing to the JSON file.
142
+ """
143
+ blob_name = self._get_blob_name(filename)
144
+ blob = self.bucket.blob(blob_name)
145
+
146
+ try:
147
+ json_data = json.dumps(data, indent=2, default=str)
148
+ blob.upload_from_string(json_data, content_type="application/json")
149
+
150
+ except Exception as e:
151
+ log_error(f"Error writing to the {blob_name} JSON file in GCS: {e}")
152
+ return
153
+
154
+ def get_latest_schema_version(self):
155
+ """Get the latest version of the database schema."""
156
+ pass
157
+
158
+ def upsert_schema_version(self, version: str) -> None:
159
+ """Upsert the schema version into the database."""
160
+ pass
161
+
162
+ # -- Session methods --
163
+
164
+ def delete_session(self, session_id: str) -> bool:
165
+ """Delete a session from the GCS JSON file.
166
+
167
+ Args:
168
+ session_id (str): The ID of the session to delete.
169
+
170
+ Returns:
171
+ bool: True if the session was deleted, False otherwise.
172
+
173
+ Raises:
174
+ Exception: If an error occurs during deletion.
175
+ """
176
+ try:
177
+ sessions = self._read_json_file(self.session_table_name)
178
+ original_count = len(sessions)
179
+ sessions = [s for s in sessions if s.get("session_id") != session_id]
180
+
181
+ if len(sessions) < original_count:
182
+ self._write_json_file(self.session_table_name, sessions)
183
+ log_debug(f"Successfully deleted session with session_id: {session_id}")
184
+ return True
185
+
186
+ else:
187
+ log_debug(f"No session found to delete with session_id: {session_id}")
188
+ return False
189
+
190
+ except Exception as e:
191
+ log_warning(f"Error deleting session: {e}")
192
+ raise e
193
+
194
+ def delete_sessions(self, session_ids: List[str]) -> None:
195
+ """Delete multiple sessions from the GCS JSON file.
196
+
197
+ Args:
198
+ session_ids (List[str]): The IDs of the sessions to delete.
199
+
200
+ Raises:
201
+ Exception: If an error occurs during deletion.
202
+ """
203
+ try:
204
+ sessions = self._read_json_file(self.session_table_name)
205
+ sessions = [s for s in sessions if s.get("session_id") not in session_ids]
206
+ self._write_json_file(self.session_table_name, sessions)
207
+ log_debug(f"Successfully deleted sessions with ids: {session_ids}")
208
+
209
+ except Exception as e:
210
+ log_warning(f"Error deleting sessions: {e}")
211
+ raise e
212
+
213
+ def get_session(
214
+ self,
215
+ session_id: str,
216
+ session_type: SessionType,
217
+ user_id: Optional[str] = None,
218
+ deserialize: Optional[bool] = True,
219
+ ) -> Optional[Union[AgentSession, TeamSession, WorkflowSession, Dict[str, Any]]]:
220
+ """Read a session from the GCS JSON file.
221
+
222
+ Args:
223
+ session_id (str): The ID of the session to read.
224
+ session_type (SessionType): The type of the session to read.
225
+ user_id (Optional[str]): The ID of the user to read the session for.
226
+ deserialize (Optional[bool]): Whether to deserialize the session.
227
+
228
+ Returns:
229
+ Union[Session, Dict[str, Any], None]:
230
+ - When deserialize=True: Session object
231
+ - When deserialize=False: Session dictionary
232
+
233
+ Raises:
234
+ Exception: If an error occurs while reading the session.
235
+ """
236
+ try:
237
+ sessions = self._read_json_file(self.session_table_name)
238
+
239
+ for session_data in sessions:
240
+ if session_data.get("session_id") == session_id:
241
+ if user_id is not None and session_data.get("user_id") != user_id:
242
+ continue
243
+
244
+ if not deserialize:
245
+ return session_data
246
+
247
+ if session_type == SessionType.AGENT:
248
+ return AgentSession.from_dict(session_data)
249
+ elif session_type == SessionType.TEAM:
250
+ return TeamSession.from_dict(session_data)
251
+ elif session_type == SessionType.WORKFLOW:
252
+ return WorkflowSession.from_dict(session_data)
253
+ else:
254
+ raise ValueError(f"Invalid session type: {session_type}")
255
+
256
+ return None
257
+
258
+ except Exception as e:
259
+ log_warning(f"Exception reading from session file: {e}")
260
+ raise e
261
+
262
+ def get_sessions(
263
+ self,
264
+ session_type: Optional[SessionType] = None,
265
+ user_id: Optional[str] = None,
266
+ component_id: Optional[str] = None,
267
+ session_name: Optional[str] = None,
268
+ start_timestamp: Optional[int] = None,
269
+ end_timestamp: Optional[int] = None,
270
+ limit: Optional[int] = None,
271
+ page: Optional[int] = None,
272
+ sort_by: Optional[str] = None,
273
+ sort_order: Optional[str] = None,
274
+ deserialize: Optional[bool] = True,
275
+ ) -> Union[List[Session], Tuple[List[Dict[str, Any]], int]]:
276
+ """Get all sessions from the GCS JSON file with filtering and pagination.
277
+
278
+ Args:
279
+ session_type (Optional[SessionType]): The type of the sessions to read.
280
+ user_id (Optional[str]): The ID of the user to read the sessions for.
281
+ component_id (Optional[str]): The ID of the component to read the sessions for.
282
+ session_name (Optional[str]): The name of the session to read.
283
+ start_timestamp (Optional[int]): The start timestamp of the sessions to read.
284
+ end_timestamp (Optional[int]): The end timestamp of the sessions to read.
285
+ limit (Optional[int]): The limit of the sessions to read.
286
+ page (Optional[int]): The page of the sessions to read.
287
+ sort_by (Optional[str]): The field to sort the sessions by.
288
+ sort_order (Optional[str]): The order to sort the sessions by.
289
+ deserialize (Optional[bool]): Whether to deserialize the sessions.
290
+ create_table_if_not_found (Optional[bool]): Whether to create a file to track sessions if it doesn't exist.
291
+
292
+ Returns:
293
+ Union[List[AgentSession], List[TeamSession], List[WorkflowSession], Tuple[List[Dict[str, Any]], int]]:
294
+ - When deserialize=True: List of sessions
295
+ - When deserialize=False: Tuple with list of sessions and total count
296
+
297
+ Raises:
298
+ Exception: If an error occurs while reading the sessions.
299
+ """
300
+ try:
301
+ sessions = self._read_json_file(self.session_table_name)
302
+
303
+ # Apply filters
304
+ filtered_sessions = []
305
+ for session_data in sessions:
306
+ if user_id is not None and session_data.get("user_id") != user_id:
307
+ continue
308
+ if component_id is not None:
309
+ if session_type == SessionType.AGENT and session_data.get("agent_id") != component_id:
310
+ continue
311
+ elif session_type == SessionType.TEAM and session_data.get("team_id") != component_id:
312
+ continue
313
+ elif session_type == SessionType.WORKFLOW and session_data.get("workflow_id") != component_id:
314
+ continue
315
+ if start_timestamp is not None and session_data.get("created_at", 0) < start_timestamp:
316
+ continue
317
+ if end_timestamp is not None and session_data.get("created_at", 0) > end_timestamp:
318
+ continue
319
+ if session_name is not None:
320
+ stored_name = session_data.get("session_data", {}).get("session_name", "")
321
+ if session_name.lower() not in stored_name.lower():
322
+ continue
323
+ session_type_value = session_type.value if isinstance(session_type, SessionType) else session_type
324
+ if session_data.get("session_type") != session_type_value:
325
+ continue
326
+
327
+ filtered_sessions.append(session_data)
328
+
329
+ total_count = len(filtered_sessions)
330
+
331
+ # Apply sorting
332
+ filtered_sessions = apply_sorting(filtered_sessions, sort_by, sort_order)
333
+
334
+ # Apply pagination
335
+ if limit is not None:
336
+ start_idx = 0
337
+ if page is not None:
338
+ start_idx = (page - 1) * limit
339
+ filtered_sessions = filtered_sessions[start_idx : start_idx + limit]
340
+
341
+ if not deserialize:
342
+ return filtered_sessions, total_count
343
+
344
+ if session_type == SessionType.AGENT:
345
+ return [AgentSession.from_dict(session) for session in filtered_sessions] # type: ignore
346
+ elif session_type == SessionType.TEAM:
347
+ return [TeamSession.from_dict(session) for session in filtered_sessions] # type: ignore
348
+ elif session_type == SessionType.WORKFLOW:
349
+ return [WorkflowSession.from_dict(session) for session in filtered_sessions] # type: ignore
350
+ else:
351
+ raise ValueError(f"Invalid session type: {session_type}")
352
+
353
+ except Exception as e:
354
+ log_warning(f"Exception reading from session file: {e}")
355
+ raise e
356
+
357
+ def rename_session(
358
+ self, session_id: str, session_type: SessionType, session_name: str, deserialize: Optional[bool] = True
359
+ ) -> Optional[Union[Session, Dict[str, Any]]]:
360
+ """Rename a session in the GCS JSON file."""
361
+ try:
362
+ sessions = self._read_json_file(self.session_table_name)
363
+
364
+ for i, session_data in enumerate(sessions):
365
+ if (
366
+ session_data.get("session_id") == session_id
367
+ and session_data.get("session_type") == session_type.value
368
+ ):
369
+ # Update session name in session_data
370
+ if "session_data" not in session_data:
371
+ session_data["session_data"] = {}
372
+ session_data["session_data"]["session_name"] = session_name
373
+
374
+ sessions[i] = session_data
375
+ self._write_json_file(self.session_table_name, sessions)
376
+
377
+ if not deserialize:
378
+ return session_data
379
+
380
+ if session_type == SessionType.AGENT:
381
+ return AgentSession.from_dict(session_data)
382
+ elif session_type == SessionType.TEAM:
383
+ return TeamSession.from_dict(session_data)
384
+ elif session_type == SessionType.WORKFLOW:
385
+ return WorkflowSession.from_dict(session_data)
386
+
387
+ return None
388
+ except Exception as e:
389
+ log_warning(f"Exception renaming session: {e}")
390
+ raise e
391
+
392
+ def upsert_session(
393
+ self, session: Session, deserialize: Optional[bool] = True
394
+ ) -> Optional[Union[Session, Dict[str, Any]]]:
395
+ """Insert or update a session in the GCS JSON file."""
396
+ try:
397
+ sessions = self._read_json_file(self.session_table_name, create_table_if_not_found=True)
398
+ session_dict = session.to_dict()
399
+
400
+ # Add session_type based on session instance type
401
+ if isinstance(session, AgentSession):
402
+ session_dict["session_type"] = SessionType.AGENT.value
403
+ elif isinstance(session, TeamSession):
404
+ session_dict["session_type"] = SessionType.TEAM.value
405
+ elif isinstance(session, WorkflowSession):
406
+ session_dict["session_type"] = SessionType.WORKFLOW.value
407
+
408
+ # Find existing session to update
409
+ session_updated = False
410
+ for i, existing_session in enumerate(sessions):
411
+ if existing_session.get("session_id") == session_dict.get("session_id") and self._matches_session_key(
412
+ existing_session, session
413
+ ):
414
+ # Update existing session
415
+ session_dict["updated_at"] = int(time.time())
416
+ sessions[i] = session_dict
417
+ session_updated = True
418
+ break
419
+
420
+ if not session_updated:
421
+ # Add new session
422
+ session_dict["created_at"] = session_dict.get("created_at", int(time.time()))
423
+ session_dict["updated_at"] = session_dict.get("created_at")
424
+ sessions.append(session_dict)
425
+
426
+ self._write_json_file(self.session_table_name, sessions)
427
+
428
+ if not deserialize:
429
+ return session_dict
430
+
431
+ return session
432
+
433
+ except Exception as e:
434
+ log_warning(f"Exception upserting session: {e}")
435
+ raise e
436
+
437
+ def upsert_sessions(
438
+ self, sessions: List[Session], deserialize: Optional[bool] = True, preserve_updated_at: bool = False
439
+ ) -> List[Union[Session, Dict[str, Any]]]:
440
+ """
441
+ Bulk upsert multiple sessions for improved performance on large datasets.
442
+
443
+ Args:
444
+ sessions (List[Session]): List of sessions to upsert.
445
+ deserialize (Optional[bool]): Whether to deserialize the sessions. Defaults to True.
446
+
447
+ Returns:
448
+ List[Union[Session, Dict[str, Any]]]: List of upserted sessions.
449
+
450
+ Raises:
451
+ Exception: If an error occurs during bulk upsert.
452
+ """
453
+ if not sessions:
454
+ return []
455
+
456
+ try:
457
+ log_info(
458
+ f"GcsJsonDb doesn't support efficient bulk operations, falling back to individual upserts for {len(sessions)} sessions"
459
+ )
460
+
461
+ # Fall back to individual upserts
462
+ results = []
463
+ for session in sessions:
464
+ if session is not None:
465
+ result = self.upsert_session(session, deserialize=deserialize)
466
+ if result is not None:
467
+ results.append(result)
468
+ return results
469
+
470
+ except Exception as e:
471
+ log_error(f"Exception during bulk session upsert: {e}")
472
+ return []
473
+
474
+ def _matches_session_key(self, existing_session: Dict[str, Any], session: Session) -> bool:
475
+ """Check if existing session matches the key for the session type."""
476
+ if isinstance(session, AgentSession):
477
+ return existing_session.get("agent_id") == session.agent_id
478
+ elif isinstance(session, TeamSession):
479
+ return existing_session.get("team_id") == session.team_id
480
+ elif isinstance(session, WorkflowSession):
481
+ return existing_session.get("workflow_id") == session.workflow_id
482
+ return False
483
+
484
+ # -- Memory methods --
485
+ def delete_user_memory(self, memory_id: str, user_id: Optional[str] = None) -> None:
486
+ """Delete a user memory from the GCS JSON file.
487
+
488
+ Args:
489
+ memory_id (str): The ID of the memory to delete.
490
+ user_id (Optional[str]): The ID of the user. If provided, verifies ownership before deletion.
491
+ """
492
+ try:
493
+ memories = self._read_json_file(self.memory_table_name)
494
+ original_count = len(memories)
495
+
496
+ # Filter out the memory, with optional user_id verification
497
+ memories = [
498
+ m
499
+ for m in memories
500
+ if not (m.get("memory_id") == memory_id and (user_id is None or m.get("user_id") == user_id))
501
+ ]
502
+
503
+ if len(memories) < original_count:
504
+ self._write_json_file(self.memory_table_name, memories)
505
+ log_debug(f"Successfully deleted user memory id: {memory_id}")
506
+
507
+ else:
508
+ log_debug(f"No user memory found with id: {memory_id}")
509
+
510
+ except Exception as e:
511
+ log_warning(f"Error deleting user memory: {e}")
512
+ raise e
513
+
514
+ def delete_user_memories(self, memory_ids: List[str], user_id: Optional[str] = None) -> None:
515
+ """Delete multiple user memories from the GCS JSON file.
516
+
517
+ Args:
518
+ memory_ids (List[str]): The IDs of the memories to delete.
519
+ user_id (Optional[str]): The ID of the user. If provided, verifies ownership before deletion.
520
+ """
521
+ try:
522
+ memories = self._read_json_file(self.memory_table_name)
523
+
524
+ # Filter out memories, with optional user_id verification
525
+ memories = [
526
+ m
527
+ for m in memories
528
+ if not (m.get("memory_id") in memory_ids and (user_id is None or m.get("user_id") == user_id))
529
+ ]
530
+
531
+ self._write_json_file(self.memory_table_name, memories)
532
+ log_debug(f"Successfully deleted user memories with ids: {memory_ids}")
533
+ except Exception as e:
534
+ log_warning(f"Error deleting user memories: {e}")
535
+ raise e
536
+
537
+ def get_all_memory_topics(self) -> List[str]:
538
+ """Get all memory topics from the GCS JSON file.
539
+
540
+ Returns:
541
+ List[str]: List of unique memory topics.
542
+ """
543
+ try:
544
+ memories = self._read_json_file(self.memory_table_name)
545
+ topics = set()
546
+ for memory in memories:
547
+ memory_topics = memory.get("topics", [])
548
+ if isinstance(memory_topics, list):
549
+ topics.update(memory_topics)
550
+ return list(topics)
551
+
552
+ except Exception as e:
553
+ log_warning(f"Exception reading from memory file: {e}")
554
+ raise e
555
+
556
+ def get_user_memory(
557
+ self, memory_id: str, deserialize: Optional[bool] = True, user_id: Optional[str] = None
558
+ ) -> Optional[Union[UserMemory, Dict[str, Any]]]:
559
+ """Get a memory from the GCS JSON file.
560
+
561
+ Args:
562
+ memory_id (str): The ID of the memory to retrieve.
563
+ deserialize (Optional[bool]): Whether to deserialize to UserMemory object. Defaults to True.
564
+ user_id (Optional[str]): The ID of the user. If provided, verifies ownership before returning.
565
+
566
+ Returns:
567
+ Optional[Union[UserMemory, Dict[str, Any]]]: The memory if found and ownership matches, None otherwise.
568
+ """
569
+ try:
570
+ memories = self._read_json_file(self.memory_table_name)
571
+
572
+ for memory_data in memories:
573
+ if memory_data.get("memory_id") == memory_id:
574
+ # Verify user ownership if user_id is provided
575
+ if user_id is not None and memory_data.get("user_id") != user_id:
576
+ continue
577
+
578
+ if not deserialize:
579
+ return memory_data
580
+
581
+ return UserMemory.from_dict(memory_data)
582
+
583
+ return None
584
+ except Exception as e:
585
+ log_warning(f"Exception reading from memory file: {e}")
586
+ raise e
587
+
588
+ def get_user_memories(
589
+ self,
590
+ user_id: Optional[str] = None,
591
+ agent_id: Optional[str] = None,
592
+ team_id: Optional[str] = None,
593
+ topics: Optional[List[str]] = None,
594
+ search_content: Optional[str] = None,
595
+ limit: Optional[int] = None,
596
+ page: Optional[int] = None,
597
+ sort_by: Optional[str] = None,
598
+ sort_order: Optional[str] = None,
599
+ deserialize: Optional[bool] = True,
600
+ ) -> Union[List[UserMemory], Tuple[List[Dict[str, Any]], int]]:
601
+ """Get all memories from the GCS JSON file with filtering and pagination."""
602
+ try:
603
+ memories = self._read_json_file(self.memory_table_name)
604
+
605
+ # Apply filters
606
+ filtered_memories = []
607
+ for memory_data in memories:
608
+ if user_id is not None and memory_data.get("user_id") != user_id:
609
+ continue
610
+ if agent_id is not None and memory_data.get("agent_id") != agent_id:
611
+ continue
612
+ if team_id is not None and memory_data.get("team_id") != team_id:
613
+ continue
614
+ if topics is not None:
615
+ memory_topics = memory_data.get("topics", [])
616
+ if not any(topic in memory_topics for topic in topics):
617
+ continue
618
+ if search_content is not None:
619
+ memory_content = str(memory_data.get("memory", ""))
620
+ if search_content.lower() not in memory_content.lower():
621
+ continue
622
+
623
+ filtered_memories.append(memory_data)
624
+
625
+ total_count = len(filtered_memories)
626
+
627
+ # Apply sorting
628
+ filtered_memories = apply_sorting(filtered_memories, sort_by, sort_order)
629
+
630
+ # Apply pagination
631
+ if limit is not None:
632
+ start_idx = 0
633
+ if page is not None:
634
+ start_idx = (page - 1) * limit
635
+ filtered_memories = filtered_memories[start_idx : start_idx + limit]
636
+
637
+ if not deserialize:
638
+ return filtered_memories, total_count
639
+
640
+ return [UserMemory.from_dict(memory) for memory in filtered_memories]
641
+
642
+ except Exception as e:
643
+ log_warning(f"Exception reading from memory file: {e}")
644
+ raise e
645
+
646
+ def get_user_memory_stats(
647
+ self, limit: Optional[int] = None, page: Optional[int] = None, user_id: Optional[str] = None
648
+ ) -> Tuple[List[Dict[str, Any]], int]:
649
+ """Get user memory statistics.
650
+
651
+ Args:
652
+ limit (Optional[int]): Maximum number of results to return.
653
+ page (Optional[int]): Page number for pagination.
654
+ user_id (Optional[str]): User ID for filtering.
655
+
656
+ Returns:
657
+ Tuple[List[Dict[str, Any]], int]: List of user memory statistics and total count.
658
+ """
659
+ try:
660
+ memories = self._read_json_file(self.memory_table_name)
661
+ user_stats = {}
662
+
663
+ for memory in memories:
664
+ memory_user_id = memory.get("user_id")
665
+ # filter by user_id if provided
666
+ if user_id is not None and memory_user_id != user_id:
667
+ continue
668
+ if memory_user_id:
669
+ if memory_user_id not in user_stats:
670
+ user_stats[memory_user_id] = {
671
+ "user_id": memory_user_id,
672
+ "total_memories": 0,
673
+ "last_memory_updated_at": 0,
674
+ }
675
+ user_stats[memory_user_id]["total_memories"] += 1
676
+ updated_at = memory.get("updated_at", 0)
677
+ if updated_at > user_stats[memory_user_id]["last_memory_updated_at"]:
678
+ user_stats[memory_user_id]["last_memory_updated_at"] = updated_at
679
+
680
+ stats_list = list(user_stats.values())
681
+ stats_list.sort(key=lambda x: x["last_memory_updated_at"], reverse=True)
682
+
683
+ total_count = len(stats_list)
684
+
685
+ # Apply pagination
686
+ if limit is not None:
687
+ start_idx = 0
688
+ if page is not None:
689
+ start_idx = (page - 1) * limit
690
+ stats_list = stats_list[start_idx : start_idx + limit]
691
+
692
+ return stats_list, total_count
693
+
694
+ except Exception as e:
695
+ log_warning(f"Exception getting user memory stats: {e}")
696
+ raise e
697
+
698
+ def upsert_user_memory(
699
+ self, memory: UserMemory, deserialize: Optional[bool] = True
700
+ ) -> Optional[Union[UserMemory, Dict[str, Any]]]:
701
+ """Upsert a user memory in the GCS JSON file."""
702
+ try:
703
+ memories = self._read_json_file(self.memory_table_name, create_table_if_not_found=True)
704
+
705
+ if memory.memory_id is None:
706
+ memory.memory_id = str(uuid4())
707
+
708
+ memory_dict = memory.to_dict() if hasattr(memory, "to_dict") else memory.__dict__
709
+ memory_dict["updated_at"] = int(time.time())
710
+
711
+ # Find existing memory to update
712
+ memory_updated = False
713
+ for i, existing_memory in enumerate(memories):
714
+ if existing_memory.get("memory_id") == memory.memory_id:
715
+ memories[i] = memory_dict
716
+ memory_updated = True
717
+ break
718
+
719
+ if not memory_updated:
720
+ memories.append(memory_dict)
721
+
722
+ self._write_json_file(self.memory_table_name, memories)
723
+
724
+ if not deserialize:
725
+ return memory_dict
726
+ return UserMemory.from_dict(memory_dict)
727
+
728
+ except Exception as e:
729
+ log_error(f"Exception upserting user memory: {e}")
730
+ raise e
731
+
732
+ def upsert_memories(
733
+ self, memories: List[UserMemory], deserialize: Optional[bool] = True, preserve_updated_at: bool = False
734
+ ) -> List[Union[UserMemory, Dict[str, Any]]]:
735
+ """
736
+ Bulk upsert multiple user memories for improved performance on large datasets.
737
+
738
+ Args:
739
+ memories (List[UserMemory]): List of memories to upsert.
740
+ deserialize (Optional[bool]): Whether to deserialize the memories. Defaults to True.
741
+
742
+ Returns:
743
+ List[Union[UserMemory, Dict[str, Any]]]: List of upserted memories.
744
+
745
+ Raises:
746
+ Exception: If an error occurs during bulk upsert.
747
+ """
748
+ if not memories:
749
+ return []
750
+
751
+ try:
752
+ log_info(
753
+ f"GcsJsonDb doesn't support efficient bulk operations, falling back to individual upserts for {len(memories)} memories"
754
+ )
755
+ # Fall back to individual upserts
756
+ results = []
757
+ for memory in memories:
758
+ if memory is not None:
759
+ result = self.upsert_user_memory(memory, deserialize=deserialize)
760
+ if result is not None:
761
+ results.append(result)
762
+ return results
763
+
764
+ except Exception as e:
765
+ log_error(f"Exception during bulk memory upsert: {e}")
766
+ return []
767
+
768
+ def clear_memories(self) -> None:
769
+ """Delete all memories from the database.
770
+
771
+ Raises:
772
+ Exception: If an error occurs during deletion.
773
+ """
774
+ try:
775
+ # Simply write an empty list to the memory JSON file
776
+ self._write_json_file(self.memory_table_name, [])
777
+
778
+ except Exception as e:
779
+ log_warning(f"Exception deleting all memories: {e}")
780
+ raise e
781
+
782
+ # -- Metrics methods --
783
+ def calculate_metrics(self) -> Optional[list[dict]]:
784
+ """Calculate metrics for all dates without complete metrics."""
785
+ try:
786
+ metrics = self._read_json_file(self.metrics_table_name, create_table_if_not_found=True)
787
+
788
+ starting_date = self._get_metrics_calculation_starting_date(metrics)
789
+ if starting_date is None:
790
+ log_info("No session data found. Won't calculate metrics.")
791
+ return None
792
+
793
+ dates_to_process = get_dates_to_calculate_metrics_for(starting_date)
794
+ if not dates_to_process:
795
+ log_info("Metrics already calculated for all relevant dates.")
796
+ return None
797
+
798
+ start_timestamp = int(datetime.combine(dates_to_process[0], datetime.min.time()).timestamp())
799
+ end_timestamp = int(
800
+ datetime.combine(dates_to_process[-1] + timedelta(days=1), datetime.min.time()).timestamp()
801
+ )
802
+
803
+ sessions = self._get_all_sessions_for_metrics_calculation(start_timestamp, end_timestamp)
804
+ all_sessions_data = fetch_all_sessions_data(
805
+ sessions=sessions, dates_to_process=dates_to_process, start_timestamp=start_timestamp
806
+ )
807
+ if not all_sessions_data:
808
+ log_info("No new session data found. Won't calculate metrics.")
809
+ return None
810
+
811
+ results = []
812
+
813
+ for date_to_process in dates_to_process:
814
+ date_key = date_to_process.isoformat()
815
+ sessions_for_date = all_sessions_data.get(date_key, {})
816
+
817
+ # Skip dates with no sessions
818
+ if not any(len(sessions) > 0 for sessions in sessions_for_date.values()):
819
+ continue
820
+
821
+ metrics_record = calculate_date_metrics(date_to_process, sessions_for_date)
822
+
823
+ # Upsert metrics record
824
+ existing_record_idx = None
825
+ for i, existing_metric in enumerate(metrics):
826
+ if (
827
+ existing_metric.get("date") == str(date_to_process)
828
+ and existing_metric.get("aggregation_period") == "daily"
829
+ ):
830
+ existing_record_idx = i
831
+ break
832
+
833
+ if existing_record_idx is not None:
834
+ metrics[existing_record_idx] = metrics_record
835
+ else:
836
+ metrics.append(metrics_record)
837
+
838
+ results.append(metrics_record)
839
+
840
+ if results:
841
+ self._write_json_file(self.metrics_table_name, metrics)
842
+
843
+ return results
844
+
845
+ except Exception as e:
846
+ log_warning(f"Exception refreshing metrics: {e}")
847
+ raise e
848
+
849
+ def _get_metrics_calculation_starting_date(self, metrics: List[Dict[str, Any]]) -> Optional[date]:
850
+ """Get the first date for which metrics calculation is needed."""
851
+ if metrics:
852
+ # Sort by date in descending order
853
+ sorted_metrics = sorted(metrics, key=lambda x: x.get("date", ""), reverse=True)
854
+ latest_metric = sorted_metrics[0]
855
+
856
+ if latest_metric.get("completed", False):
857
+ latest_date = datetime.strptime(latest_metric["date"], "%Y-%m-%d").date()
858
+ return latest_date + timedelta(days=1)
859
+ else:
860
+ return datetime.strptime(latest_metric["date"], "%Y-%m-%d").date()
861
+
862
+ # No metrics records. Return the date of the first recorded session.
863
+ # We need to get sessions of all types, so we'll read directly from the file
864
+ all_sessions = self._read_json_file(self.session_table_name)
865
+ if all_sessions:
866
+ # Sort by created_at
867
+ all_sessions.sort(key=lambda x: x.get("created_at", 0))
868
+ first_session_date = all_sessions[0]["created_at"]
869
+ return datetime.fromtimestamp(first_session_date, tz=timezone.utc).date()
870
+
871
+ return None
872
+
873
+ def _get_all_sessions_for_metrics_calculation(
874
+ self, start_timestamp: Optional[int] = None, end_timestamp: Optional[int] = None
875
+ ) -> List[Dict[str, Any]]:
876
+ """Get all sessions for metrics calculation."""
877
+ try:
878
+ sessions = self._read_json_file(self.session_table_name)
879
+
880
+ filtered_sessions = []
881
+ for session in sessions:
882
+ created_at = session.get("created_at", 0)
883
+ if start_timestamp is not None and created_at < start_timestamp:
884
+ continue
885
+ if end_timestamp is not None and created_at >= end_timestamp:
886
+ continue
887
+
888
+ # Only include necessary fields for metrics
889
+ filtered_session = {
890
+ "user_id": session.get("user_id"),
891
+ "session_data": session.get("session_data"),
892
+ "runs": session.get("runs"),
893
+ "created_at": session.get("created_at"),
894
+ "session_type": session.get("session_type"),
895
+ }
896
+ filtered_sessions.append(filtered_session)
897
+
898
+ return filtered_sessions
899
+
900
+ except Exception as e:
901
+ log_warning(f"Exception reading sessions for metrics: {e}")
902
+ raise e
903
+
904
+ def get_metrics(
905
+ self,
906
+ starting_date: Optional[date] = None,
907
+ ending_date: Optional[date] = None,
908
+ ) -> Tuple[List[dict], Optional[int]]:
909
+ """Get all metrics matching the given date range."""
910
+ try:
911
+ metrics = self._read_json_file(self.metrics_table_name)
912
+
913
+ filtered_metrics = []
914
+ latest_updated_at = None
915
+
916
+ for metric in metrics:
917
+ metric_date = datetime.strptime(metric.get("date", ""), "%Y-%m-%d").date()
918
+
919
+ if starting_date and metric_date < starting_date:
920
+ continue
921
+ if ending_date and metric_date > ending_date:
922
+ continue
923
+
924
+ filtered_metrics.append(metric)
925
+
926
+ updated_at = metric.get("updated_at")
927
+ if updated_at and (latest_updated_at is None or updated_at > latest_updated_at):
928
+ latest_updated_at = updated_at
929
+
930
+ return filtered_metrics, latest_updated_at
931
+
932
+ except Exception as e:
933
+ log_warning(f"Exception getting metrics: {e}")
934
+ raise e
935
+
936
+ # -- Knowledge methods --
937
+ def delete_knowledge_content(self, id: str):
938
+ """Delete knowledge content by ID."""
939
+ try:
940
+ knowledge_items = self._read_json_file(self.knowledge_table_name)
941
+ knowledge_items = [item for item in knowledge_items if item.get("id") != id]
942
+ self._write_json_file(self.knowledge_table_name, knowledge_items)
943
+ except Exception as e:
944
+ log_warning(f"Error deleting knowledge content: {e}")
945
+ raise e
946
+
947
+ def get_knowledge_content(self, id: str) -> Optional[KnowledgeRow]:
948
+ """Get knowledge content by ID."""
949
+ try:
950
+ knowledge_items = self._read_json_file(self.knowledge_table_name)
951
+
952
+ for item in knowledge_items:
953
+ if item.get("id") == id:
954
+ return KnowledgeRow.model_validate(item)
955
+
956
+ return None
957
+ except Exception as e:
958
+ log_warning(f"Error getting knowledge content: {e}")
959
+ raise e
960
+
961
+ def get_knowledge_contents(
962
+ self,
963
+ limit: Optional[int] = None,
964
+ page: Optional[int] = None,
965
+ sort_by: Optional[str] = None,
966
+ sort_order: Optional[str] = None,
967
+ ) -> Tuple[List[KnowledgeRow], int]:
968
+ """Get all knowledge contents from the GCS JSON file."""
969
+ try:
970
+ knowledge_items = self._read_json_file(self.knowledge_table_name)
971
+
972
+ total_count = len(knowledge_items)
973
+
974
+ # Apply sorting
975
+ knowledge_items = apply_sorting(knowledge_items, sort_by, sort_order)
976
+
977
+ # Apply pagination
978
+ if limit is not None:
979
+ start_idx = 0
980
+ if page is not None:
981
+ start_idx = (page - 1) * limit
982
+ knowledge_items = knowledge_items[start_idx : start_idx + limit]
983
+
984
+ return [KnowledgeRow.model_validate(item) for item in knowledge_items], total_count
985
+
986
+ except Exception as e:
987
+ log_warning(f"Error getting knowledge contents: {e}")
988
+ raise e
989
+
990
+ def upsert_knowledge_content(self, knowledge_row: KnowledgeRow):
991
+ """Upsert knowledge content in the GCS JSON file."""
992
+ try:
993
+ knowledge_items = self._read_json_file(self.knowledge_table_name, create_table_if_not_found=True)
994
+ knowledge_dict = knowledge_row.model_dump()
995
+
996
+ # Find existing item to update
997
+ item_updated = False
998
+ for i, existing_item in enumerate(knowledge_items):
999
+ if existing_item.get("id") == knowledge_row.id:
1000
+ knowledge_items[i] = knowledge_dict
1001
+ item_updated = True
1002
+ break
1003
+
1004
+ if not item_updated:
1005
+ knowledge_items.append(knowledge_dict)
1006
+
1007
+ self._write_json_file(self.knowledge_table_name, knowledge_items)
1008
+ return knowledge_row
1009
+
1010
+ except Exception as e:
1011
+ log_warning(f"Error upserting knowledge row: {e}")
1012
+ raise e
1013
+
1014
+ # -- Eval methods --
1015
+ def create_eval_run(self, eval_run: EvalRunRecord) -> Optional[EvalRunRecord]:
1016
+ """Create an EvalRunRecord in the GCS JSON file."""
1017
+ try:
1018
+ eval_runs = self._read_json_file(self.eval_table_name, create_table_if_not_found=True)
1019
+
1020
+ current_time = int(time.time())
1021
+ eval_dict = eval_run.model_dump()
1022
+ eval_dict["created_at"] = current_time
1023
+ eval_dict["updated_at"] = current_time
1024
+
1025
+ eval_runs.append(eval_dict)
1026
+ self._write_json_file(self.eval_table_name, eval_runs)
1027
+
1028
+ return eval_run
1029
+ except Exception as e:
1030
+ log_warning(f"Error creating eval run: {e}")
1031
+ raise e
1032
+
1033
+ def delete_eval_run(self, eval_run_id: str) -> None:
1034
+ """Delete an eval run from the GCS JSON file."""
1035
+ try:
1036
+ eval_runs = self._read_json_file(self.eval_table_name)
1037
+ original_count = len(eval_runs)
1038
+ eval_runs = [run for run in eval_runs if run.get("run_id") != eval_run_id]
1039
+
1040
+ if len(eval_runs) < original_count:
1041
+ self._write_json_file(self.eval_table_name, eval_runs)
1042
+ log_debug(f"Deleted eval run with ID: {eval_run_id}")
1043
+ else:
1044
+ log_warning(f"No eval run found with ID: {eval_run_id}")
1045
+ except Exception as e:
1046
+ log_warning(f"Error deleting eval run {eval_run_id}: {e}")
1047
+ raise e
1048
+
1049
+ def delete_eval_runs(self, eval_run_ids: List[str]) -> None:
1050
+ """Delete multiple eval runs from the GCS JSON file."""
1051
+ try:
1052
+ eval_runs = self._read_json_file(self.eval_table_name)
1053
+ original_count = len(eval_runs)
1054
+ eval_runs = [run for run in eval_runs if run.get("run_id") not in eval_run_ids]
1055
+
1056
+ deleted_count = original_count - len(eval_runs)
1057
+ if deleted_count > 0:
1058
+ self._write_json_file(self.eval_table_name, eval_runs)
1059
+ log_debug(f"Deleted {deleted_count} eval runs")
1060
+ else:
1061
+ log_warning(f"No eval runs found with IDs: {eval_run_ids}")
1062
+ except Exception as e:
1063
+ log_warning(f"Error deleting eval runs {eval_run_ids}: {e}")
1064
+ raise e
1065
+
1066
+ def get_eval_run(
1067
+ self, eval_run_id: str, deserialize: Optional[bool] = True
1068
+ ) -> Optional[Union[EvalRunRecord, Dict[str, Any]]]:
1069
+ """Get an eval run from the GCS JSON file."""
1070
+ try:
1071
+ eval_runs = self._read_json_file(self.eval_table_name)
1072
+
1073
+ for run_data in eval_runs:
1074
+ if run_data.get("run_id") == eval_run_id:
1075
+ if not deserialize:
1076
+ return run_data
1077
+ return EvalRunRecord.model_validate(run_data)
1078
+
1079
+ return None
1080
+ except Exception as e:
1081
+ log_warning(f"Exception getting eval run {eval_run_id}: {e}")
1082
+ raise e
1083
+
1084
+ def get_eval_runs(
1085
+ self,
1086
+ limit: Optional[int] = None,
1087
+ page: Optional[int] = None,
1088
+ sort_by: Optional[str] = None,
1089
+ sort_order: Optional[str] = None,
1090
+ agent_id: Optional[str] = None,
1091
+ team_id: Optional[str] = None,
1092
+ workflow_id: Optional[str] = None,
1093
+ model_id: Optional[str] = None,
1094
+ filter_type: Optional[EvalFilterType] = None,
1095
+ eval_type: Optional[List[EvalType]] = None,
1096
+ deserialize: Optional[bool] = True,
1097
+ ) -> Union[List[EvalRunRecord], Tuple[List[Dict[str, Any]], int]]:
1098
+ """Get all eval runs from the GCS JSON file with filtering and pagination."""
1099
+ try:
1100
+ eval_runs = self._read_json_file(self.eval_table_name)
1101
+
1102
+ # Apply filters
1103
+ filtered_runs = []
1104
+ for run_data in eval_runs:
1105
+ if agent_id is not None and run_data.get("agent_id") != agent_id:
1106
+ continue
1107
+ if team_id is not None and run_data.get("team_id") != team_id:
1108
+ continue
1109
+ if workflow_id is not None and run_data.get("workflow_id") != workflow_id:
1110
+ continue
1111
+ if model_id is not None and run_data.get("model_id") != model_id:
1112
+ continue
1113
+ if eval_type is not None and len(eval_type) > 0:
1114
+ if run_data.get("eval_type") not in eval_type:
1115
+ continue
1116
+ if filter_type is not None:
1117
+ if filter_type == EvalFilterType.AGENT and run_data.get("agent_id") is None:
1118
+ continue
1119
+ elif filter_type == EvalFilterType.TEAM and run_data.get("team_id") is None:
1120
+ continue
1121
+ elif filter_type == EvalFilterType.WORKFLOW and run_data.get("workflow_id") is None:
1122
+ continue
1123
+
1124
+ filtered_runs.append(run_data)
1125
+
1126
+ total_count = len(filtered_runs)
1127
+
1128
+ # Apply sorting (default by created_at desc)
1129
+ if sort_by is None:
1130
+ filtered_runs.sort(key=lambda x: x.get("created_at", 0), reverse=True)
1131
+ else:
1132
+ filtered_runs = apply_sorting(filtered_runs, sort_by, sort_order)
1133
+
1134
+ # Apply pagination
1135
+ if limit is not None:
1136
+ start_idx = 0
1137
+ if page is not None:
1138
+ start_idx = (page - 1) * limit
1139
+ filtered_runs = filtered_runs[start_idx : start_idx + limit]
1140
+
1141
+ if not deserialize:
1142
+ return filtered_runs, total_count
1143
+
1144
+ return [EvalRunRecord.model_validate(run) for run in filtered_runs]
1145
+
1146
+ except Exception as e:
1147
+ log_warning(f"Exception getting eval runs: {e}")
1148
+ raise e
1149
+
1150
+ def rename_eval_run(
1151
+ self, eval_run_id: str, name: str, deserialize: Optional[bool] = True
1152
+ ) -> Optional[Union[EvalRunRecord, Dict[str, Any]]]:
1153
+ """Rename an eval run in the GCS JSON file."""
1154
+ try:
1155
+ eval_runs = self._read_json_file(self.eval_table_name)
1156
+
1157
+ for i, run_data in enumerate(eval_runs):
1158
+ if run_data.get("run_id") == eval_run_id:
1159
+ run_data["name"] = name
1160
+ run_data["updated_at"] = int(time.time())
1161
+ eval_runs[i] = run_data
1162
+ self._write_json_file(self.eval_table_name, eval_runs)
1163
+
1164
+ if not deserialize:
1165
+ return run_data
1166
+ return EvalRunRecord.model_validate(run_data)
1167
+
1168
+ return None
1169
+ except Exception as e:
1170
+ log_warning(f"Error renaming eval run {eval_run_id}: {e}")
1171
+ raise e
1172
+
1173
+ # -- Cultural Knowledge methods --
1174
+ def clear_cultural_knowledge(self) -> None:
1175
+ """Delete all cultural knowledge from the database.
1176
+
1177
+ Raises:
1178
+ Exception: If an error occurs during deletion.
1179
+ """
1180
+ try:
1181
+ self._write_json_file(self.culture_table_name, [])
1182
+ except Exception as e:
1183
+ log_warning(f"Exception deleting all cultural knowledge: {e}")
1184
+ raise e
1185
+
1186
+ def delete_cultural_knowledge(self, id: str) -> None:
1187
+ """Delete cultural knowledge by ID.
1188
+
1189
+ Args:
1190
+ id (str): The ID of the cultural knowledge to delete.
1191
+
1192
+ Raises:
1193
+ Exception: If an error occurs during deletion.
1194
+ """
1195
+ try:
1196
+ cultural_knowledge = self._read_json_file(self.culture_table_name)
1197
+ cultural_knowledge = [item for item in cultural_knowledge if item.get("id") != id]
1198
+ self._write_json_file(self.culture_table_name, cultural_knowledge)
1199
+ log_debug(f"Deleted cultural knowledge with ID: {id}")
1200
+ except Exception as e:
1201
+ log_warning(f"Error deleting cultural knowledge: {e}")
1202
+ raise e
1203
+
1204
+ def get_cultural_knowledge(
1205
+ self, id: str, deserialize: Optional[bool] = True
1206
+ ) -> Optional[Union[CulturalKnowledge, Dict[str, Any]]]:
1207
+ """Get cultural knowledge by ID.
1208
+
1209
+ Args:
1210
+ id (str): The ID of the cultural knowledge to retrieve.
1211
+ deserialize (Optional[bool]): Whether to deserialize to CulturalKnowledge object. Defaults to True.
1212
+
1213
+ Returns:
1214
+ Optional[Union[CulturalKnowledge, Dict[str, Any]]]: The cultural knowledge if found, None otherwise.
1215
+
1216
+ Raises:
1217
+ Exception: If an error occurs during retrieval.
1218
+ """
1219
+ try:
1220
+ cultural_knowledge = self._read_json_file(self.culture_table_name)
1221
+
1222
+ for item in cultural_knowledge:
1223
+ if item.get("id") == id:
1224
+ if not deserialize:
1225
+ return item
1226
+ return deserialize_cultural_knowledge_from_db(item)
1227
+
1228
+ return None
1229
+ except Exception as e:
1230
+ log_warning(f"Error getting cultural knowledge: {e}")
1231
+ raise e
1232
+
1233
+ def get_all_cultural_knowledge(
1234
+ self,
1235
+ agent_id: Optional[str] = None,
1236
+ team_id: Optional[str] = None,
1237
+ name: Optional[str] = None,
1238
+ limit: Optional[int] = None,
1239
+ page: Optional[int] = None,
1240
+ sort_by: Optional[str] = None,
1241
+ sort_order: Optional[str] = None,
1242
+ deserialize: Optional[bool] = True,
1243
+ ) -> Union[List[CulturalKnowledge], Tuple[List[Dict[str, Any]], int]]:
1244
+ """Get all cultural knowledge with filtering and pagination.
1245
+
1246
+ Args:
1247
+ agent_id (Optional[str]): Filter by agent ID.
1248
+ team_id (Optional[str]): Filter by team ID.
1249
+ name (Optional[str]): Filter by name (case-insensitive partial match).
1250
+ limit (Optional[int]): Maximum number of results to return.
1251
+ page (Optional[int]): Page number for pagination.
1252
+ sort_by (Optional[str]): Field to sort by.
1253
+ sort_order (Optional[str]): Sort order ('asc' or 'desc').
1254
+ deserialize (Optional[bool]): Whether to deserialize to CulturalKnowledge objects. Defaults to True.
1255
+
1256
+ Returns:
1257
+ Union[List[CulturalKnowledge], Tuple[List[Dict[str, Any]], int]]:
1258
+ - When deserialize=True: List of CulturalKnowledge objects
1259
+ - When deserialize=False: Tuple with list of dictionaries and total count
1260
+
1261
+ Raises:
1262
+ Exception: If an error occurs during retrieval.
1263
+ """
1264
+ try:
1265
+ cultural_knowledge = self._read_json_file(self.culture_table_name)
1266
+
1267
+ # Apply filters
1268
+ filtered_items = []
1269
+ for item in cultural_knowledge:
1270
+ if agent_id is not None and item.get("agent_id") != agent_id:
1271
+ continue
1272
+ if team_id is not None and item.get("team_id") != team_id:
1273
+ continue
1274
+ if name is not None and name.lower() not in item.get("name", "").lower():
1275
+ continue
1276
+
1277
+ filtered_items.append(item)
1278
+
1279
+ total_count = len(filtered_items)
1280
+
1281
+ # Apply sorting
1282
+ filtered_items = apply_sorting(filtered_items, sort_by, sort_order)
1283
+
1284
+ # Apply pagination
1285
+ if limit is not None:
1286
+ start_idx = 0
1287
+ if page is not None:
1288
+ start_idx = (page - 1) * limit
1289
+ filtered_items = filtered_items[start_idx : start_idx + limit]
1290
+
1291
+ if not deserialize:
1292
+ return filtered_items, total_count
1293
+
1294
+ return [deserialize_cultural_knowledge_from_db(item) for item in filtered_items]
1295
+
1296
+ except Exception as e:
1297
+ log_warning(f"Error getting all cultural knowledge: {e}")
1298
+ raise e
1299
+
1300
+ def upsert_cultural_knowledge(
1301
+ self, cultural_knowledge: CulturalKnowledge, deserialize: Optional[bool] = True
1302
+ ) -> Optional[Union[CulturalKnowledge, Dict[str, Any]]]:
1303
+ """Upsert cultural knowledge in the GCS JSON file.
1304
+
1305
+ Args:
1306
+ cultural_knowledge (CulturalKnowledge): The cultural knowledge to upsert.
1307
+ deserialize (Optional[bool]): Whether to deserialize the result. Defaults to True.
1308
+
1309
+ Returns:
1310
+ Optional[Union[CulturalKnowledge, Dict[str, Any]]]: The upserted cultural knowledge.
1311
+
1312
+ Raises:
1313
+ Exception: If an error occurs during upsert.
1314
+ """
1315
+ try:
1316
+ cultural_knowledge_list = self._read_json_file(self.culture_table_name, create_table_if_not_found=True)
1317
+
1318
+ # Serialize content, categories, and notes into a dict for DB storage
1319
+ content_dict = serialize_cultural_knowledge_for_db(cultural_knowledge)
1320
+
1321
+ # Create the item dict with serialized content
1322
+ cultural_knowledge_dict = {
1323
+ "id": cultural_knowledge.id,
1324
+ "name": cultural_knowledge.name,
1325
+ "summary": cultural_knowledge.summary,
1326
+ "content": content_dict if content_dict else None,
1327
+ "metadata": cultural_knowledge.metadata,
1328
+ "input": cultural_knowledge.input,
1329
+ "created_at": cultural_knowledge.created_at,
1330
+ "updated_at": int(time.time()),
1331
+ "agent_id": cultural_knowledge.agent_id,
1332
+ "team_id": cultural_knowledge.team_id,
1333
+ }
1334
+
1335
+ # Find existing item to update
1336
+ item_updated = False
1337
+ for i, existing_item in enumerate(cultural_knowledge_list):
1338
+ if existing_item.get("id") == cultural_knowledge.id:
1339
+ cultural_knowledge_list[i] = cultural_knowledge_dict
1340
+ item_updated = True
1341
+ break
1342
+
1343
+ if not item_updated:
1344
+ cultural_knowledge_list.append(cultural_knowledge_dict)
1345
+
1346
+ self._write_json_file(self.culture_table_name, cultural_knowledge_list)
1347
+
1348
+ if not deserialize:
1349
+ return cultural_knowledge_dict
1350
+
1351
+ return deserialize_cultural_knowledge_from_db(cultural_knowledge_dict)
1352
+
1353
+ except Exception as e:
1354
+ log_warning(f"Error upserting cultural knowledge: {e}")
1355
+ raise e
1356
+
1357
+ # --- Traces ---
1358
+ def upsert_trace(self, trace: "Trace") -> None:
1359
+ """Create or update a single trace record in the database.
1360
+
1361
+ Args:
1362
+ trace: The Trace object to store (one per trace_id).
1363
+ """
1364
+ try:
1365
+ traces = self._read_json_file(self.trace_table_name, create_table_if_not_found=True)
1366
+
1367
+ # Check if trace exists
1368
+ existing_idx = None
1369
+ for i, existing in enumerate(traces):
1370
+ if existing.get("trace_id") == trace.trace_id:
1371
+ existing_idx = i
1372
+ break
1373
+
1374
+ if existing_idx is not None:
1375
+ existing = traces[existing_idx]
1376
+
1377
+ # workflow (level 3) > team (level 2) > agent (level 1) > child/unknown (level 0)
1378
+ def get_component_level(workflow_id: Any, team_id: Any, agent_id: Any, name: str) -> int:
1379
+ is_root_name = ".run" in name or ".arun" in name
1380
+ if not is_root_name:
1381
+ return 0
1382
+ elif workflow_id:
1383
+ return 3
1384
+ elif team_id:
1385
+ return 2
1386
+ elif agent_id:
1387
+ return 1
1388
+ else:
1389
+ return 0
1390
+
1391
+ existing_level = get_component_level(
1392
+ existing.get("workflow_id"),
1393
+ existing.get("team_id"),
1394
+ existing.get("agent_id"),
1395
+ existing.get("name", ""),
1396
+ )
1397
+ new_level = get_component_level(trace.workflow_id, trace.team_id, trace.agent_id, trace.name)
1398
+ should_update_name = new_level > existing_level
1399
+
1400
+ # Parse existing start_time to calculate correct duration
1401
+ existing_start_time_str = existing.get("start_time")
1402
+ if isinstance(existing_start_time_str, str):
1403
+ existing_start_time = datetime.fromisoformat(existing_start_time_str.replace("Z", "+00:00"))
1404
+ else:
1405
+ existing_start_time = trace.start_time
1406
+
1407
+ recalculated_duration_ms = int((trace.end_time - existing_start_time).total_seconds() * 1000)
1408
+
1409
+ # Update existing trace
1410
+ existing["end_time"] = trace.end_time.isoformat()
1411
+ existing["duration_ms"] = recalculated_duration_ms
1412
+ existing["status"] = trace.status
1413
+ if should_update_name:
1414
+ existing["name"] = trace.name
1415
+
1416
+ # Update context fields only if new value is not None
1417
+ if trace.run_id is not None:
1418
+ existing["run_id"] = trace.run_id
1419
+ if trace.session_id is not None:
1420
+ existing["session_id"] = trace.session_id
1421
+ if trace.user_id is not None:
1422
+ existing["user_id"] = trace.user_id
1423
+ if trace.agent_id is not None:
1424
+ existing["agent_id"] = trace.agent_id
1425
+ if trace.team_id is not None:
1426
+ existing["team_id"] = trace.team_id
1427
+ if trace.workflow_id is not None:
1428
+ existing["workflow_id"] = trace.workflow_id
1429
+
1430
+ traces[existing_idx] = existing
1431
+ else:
1432
+ # Add new trace
1433
+ trace_dict = trace.to_dict()
1434
+ trace_dict.pop("total_spans", None)
1435
+ trace_dict.pop("error_count", None)
1436
+ traces.append(trace_dict)
1437
+
1438
+ self._write_json_file(self.trace_table_name, traces)
1439
+
1440
+ except Exception as e:
1441
+ log_error(f"Error creating trace: {e}")
1442
+
1443
+ def get_trace(
1444
+ self,
1445
+ trace_id: Optional[str] = None,
1446
+ run_id: Optional[str] = None,
1447
+ ):
1448
+ """Get a single trace by trace_id or other filters.
1449
+
1450
+ Args:
1451
+ trace_id: The unique trace identifier.
1452
+ run_id: Filter by run ID (returns first match).
1453
+
1454
+ Returns:
1455
+ Optional[Trace]: The trace if found, None otherwise.
1456
+
1457
+ Note:
1458
+ If multiple filters are provided, trace_id takes precedence.
1459
+ For other filters, the most recent trace is returned.
1460
+ """
1461
+ try:
1462
+ from agno.tracing.schemas import Trace
1463
+
1464
+ traces = self._read_json_file(self.trace_table_name, create_table_if_not_found=False)
1465
+ if not traces:
1466
+ return None
1467
+
1468
+ # Get spans for calculating total_spans and error_count
1469
+ spans = self._read_json_file(self.span_table_name, create_table_if_not_found=False)
1470
+
1471
+ # Filter traces
1472
+ filtered = []
1473
+ for t in traces:
1474
+ if trace_id and t.get("trace_id") == trace_id:
1475
+ filtered.append(t)
1476
+ break
1477
+ elif run_id and t.get("run_id") == run_id:
1478
+ filtered.append(t)
1479
+
1480
+ if not filtered:
1481
+ return None
1482
+
1483
+ # Sort by start_time desc and get first
1484
+ filtered.sort(key=lambda x: x.get("start_time", ""), reverse=True)
1485
+ trace_data = filtered[0]
1486
+
1487
+ # Calculate total_spans and error_count
1488
+ trace_spans = [s for s in spans if s.get("trace_id") == trace_data.get("trace_id")]
1489
+ trace_data["total_spans"] = len(trace_spans)
1490
+ trace_data["error_count"] = sum(1 for s in trace_spans if s.get("status_code") == "ERROR")
1491
+
1492
+ return Trace.from_dict(trace_data)
1493
+
1494
+ except Exception as e:
1495
+ log_error(f"Error getting trace: {e}")
1496
+ return None
1497
+
1498
+ def get_traces(
1499
+ self,
1500
+ run_id: Optional[str] = None,
1501
+ session_id: Optional[str] = None,
1502
+ user_id: Optional[str] = None,
1503
+ agent_id: Optional[str] = None,
1504
+ team_id: Optional[str] = None,
1505
+ workflow_id: Optional[str] = None,
1506
+ status: Optional[str] = None,
1507
+ start_time: Optional[datetime] = None,
1508
+ end_time: Optional[datetime] = None,
1509
+ limit: Optional[int] = 20,
1510
+ page: Optional[int] = 1,
1511
+ ) -> tuple[List, int]:
1512
+ """Get traces matching the provided filters with pagination.
1513
+
1514
+ Args:
1515
+ run_id: Filter by run ID.
1516
+ session_id: Filter by session ID.
1517
+ user_id: Filter by user ID.
1518
+ agent_id: Filter by agent ID.
1519
+ team_id: Filter by team ID.
1520
+ workflow_id: Filter by workflow ID.
1521
+ status: Filter by status (OK, ERROR, UNSET).
1522
+ start_time: Filter traces starting after this datetime.
1523
+ end_time: Filter traces ending before this datetime.
1524
+ limit: Maximum number of traces to return per page.
1525
+ page: Page number (1-indexed).
1526
+
1527
+ Returns:
1528
+ tuple[List[Trace], int]: Tuple of (list of matching traces, total count).
1529
+ """
1530
+ try:
1531
+ from agno.tracing.schemas import Trace
1532
+
1533
+ traces = self._read_json_file(self.trace_table_name, create_table_if_not_found=False)
1534
+ if not traces:
1535
+ return [], 0
1536
+
1537
+ # Get spans for calculating total_spans and error_count
1538
+ spans = self._read_json_file(self.span_table_name, create_table_if_not_found=False)
1539
+
1540
+ # Apply filters
1541
+ filtered = []
1542
+ for t in traces:
1543
+ if run_id and t.get("run_id") != run_id:
1544
+ continue
1545
+ if session_id and t.get("session_id") != session_id:
1546
+ continue
1547
+ if user_id and t.get("user_id") != user_id:
1548
+ continue
1549
+ if agent_id and t.get("agent_id") != agent_id:
1550
+ continue
1551
+ if team_id and t.get("team_id") != team_id:
1552
+ continue
1553
+ if workflow_id and t.get("workflow_id") != workflow_id:
1554
+ continue
1555
+ if status and t.get("status") != status:
1556
+ continue
1557
+ if start_time:
1558
+ trace_start = t.get("start_time", "")
1559
+ if trace_start < start_time.isoformat():
1560
+ continue
1561
+ if end_time:
1562
+ trace_end = t.get("end_time", "")
1563
+ if trace_end > end_time.isoformat():
1564
+ continue
1565
+ filtered.append(t)
1566
+
1567
+ total_count = len(filtered)
1568
+
1569
+ # Sort by start_time desc
1570
+ filtered.sort(key=lambda x: x.get("start_time", ""), reverse=True)
1571
+
1572
+ # Apply pagination
1573
+ if limit and page:
1574
+ start_idx = (page - 1) * limit
1575
+ filtered = filtered[start_idx : start_idx + limit]
1576
+
1577
+ # Add total_spans and error_count to each trace
1578
+ result_traces = []
1579
+ for t in filtered:
1580
+ trace_spans = [s for s in spans if s.get("trace_id") == t.get("trace_id")]
1581
+ t["total_spans"] = len(trace_spans)
1582
+ t["error_count"] = sum(1 for s in trace_spans if s.get("status_code") == "ERROR")
1583
+ result_traces.append(Trace.from_dict(t))
1584
+
1585
+ return result_traces, total_count
1586
+
1587
+ except Exception as e:
1588
+ log_error(f"Error getting traces: {e}")
1589
+ return [], 0
1590
+
1591
+ def get_trace_stats(
1592
+ self,
1593
+ user_id: Optional[str] = None,
1594
+ agent_id: Optional[str] = None,
1595
+ team_id: Optional[str] = None,
1596
+ workflow_id: Optional[str] = None,
1597
+ start_time: Optional[datetime] = None,
1598
+ end_time: Optional[datetime] = None,
1599
+ limit: Optional[int] = 20,
1600
+ page: Optional[int] = 1,
1601
+ ) -> tuple[List[Dict[str, Any]], int]:
1602
+ """Get trace statistics grouped by session.
1603
+
1604
+ Args:
1605
+ user_id: Filter by user ID.
1606
+ agent_id: Filter by agent ID.
1607
+ team_id: Filter by team ID.
1608
+ workflow_id: Filter by workflow ID.
1609
+ start_time: Filter sessions with traces created after this datetime.
1610
+ end_time: Filter sessions with traces created before this datetime.
1611
+ limit: Maximum number of sessions to return per page.
1612
+ page: Page number (1-indexed).
1613
+
1614
+ Returns:
1615
+ tuple[List[Dict], int]: Tuple of (list of session stats dicts, total count).
1616
+ Each dict contains: session_id, user_id, agent_id, team_id, workflow_id, total_traces,
1617
+ first_trace_at, last_trace_at.
1618
+ """
1619
+ try:
1620
+ traces = self._read_json_file(self.trace_table_name, create_table_if_not_found=False)
1621
+ if not traces:
1622
+ return [], 0
1623
+
1624
+ # Group by session_id
1625
+ session_stats: Dict[str, Dict[str, Any]] = {}
1626
+
1627
+ for t in traces:
1628
+ trace_session_id = t.get("session_id")
1629
+ if not trace_session_id:
1630
+ continue
1631
+
1632
+ # Apply filters
1633
+ if user_id and t.get("user_id") != user_id:
1634
+ continue
1635
+ if agent_id and t.get("agent_id") != agent_id:
1636
+ continue
1637
+ if team_id and t.get("team_id") != team_id:
1638
+ continue
1639
+ if workflow_id and t.get("workflow_id") != workflow_id:
1640
+ continue
1641
+
1642
+ created_at = t.get("created_at", "")
1643
+ if start_time and created_at < start_time.isoformat():
1644
+ continue
1645
+ if end_time and created_at > end_time.isoformat():
1646
+ continue
1647
+
1648
+ if trace_session_id not in session_stats:
1649
+ session_stats[trace_session_id] = {
1650
+ "session_id": trace_session_id,
1651
+ "user_id": t.get("user_id"),
1652
+ "agent_id": t.get("agent_id"),
1653
+ "team_id": t.get("team_id"),
1654
+ "workflow_id": t.get("workflow_id"),
1655
+ "total_traces": 0,
1656
+ "first_trace_at": created_at,
1657
+ "last_trace_at": created_at,
1658
+ }
1659
+
1660
+ session_stats[trace_session_id]["total_traces"] += 1
1661
+ if created_at and session_stats[trace_session_id]["first_trace_at"]:
1662
+ if created_at < session_stats[trace_session_id]["first_trace_at"]:
1663
+ session_stats[trace_session_id]["first_trace_at"] = created_at
1664
+ if created_at and session_stats[trace_session_id]["last_trace_at"]:
1665
+ if created_at > session_stats[trace_session_id]["last_trace_at"]:
1666
+ session_stats[trace_session_id]["last_trace_at"] = created_at
1667
+
1668
+ stats_list = list(session_stats.values())
1669
+ total_count = len(stats_list)
1670
+
1671
+ # Sort by last_trace_at desc
1672
+ stats_list.sort(key=lambda x: x.get("last_trace_at", ""), reverse=True)
1673
+
1674
+ # Apply pagination
1675
+ if limit and page:
1676
+ start_idx = (page - 1) * limit
1677
+ stats_list = stats_list[start_idx : start_idx + limit]
1678
+
1679
+ # Convert ISO strings to datetime objects
1680
+ for stat in stats_list:
1681
+ first_at = stat.get("first_trace_at", "")
1682
+ last_at = stat.get("last_trace_at", "")
1683
+ if first_at:
1684
+ stat["first_trace_at"] = datetime.fromisoformat(first_at.replace("Z", "+00:00"))
1685
+ if last_at:
1686
+ stat["last_trace_at"] = datetime.fromisoformat(last_at.replace("Z", "+00:00"))
1687
+
1688
+ return stats_list, total_count
1689
+
1690
+ except Exception as e:
1691
+ log_error(f"Error getting trace stats: {e}")
1692
+ return [], 0
1693
+
1694
+ # --- Spans ---
1695
+ def create_span(self, span: "Span") -> None:
1696
+ """Create a single span in the database.
1697
+
1698
+ Args:
1699
+ span: The Span object to store.
1700
+ """
1701
+ try:
1702
+ spans = self._read_json_file(self.span_table_name, create_table_if_not_found=True)
1703
+ spans.append(span.to_dict())
1704
+ self._write_json_file(self.span_table_name, spans)
1705
+
1706
+ except Exception as e:
1707
+ log_error(f"Error creating span: {e}")
1708
+
1709
+ def create_spans(self, spans: List) -> None:
1710
+ """Create multiple spans in the database as a batch.
1711
+
1712
+ Args:
1713
+ spans: List of Span objects to store.
1714
+ """
1715
+ if not spans:
1716
+ return
1717
+
1718
+ try:
1719
+ existing_spans = self._read_json_file(self.span_table_name, create_table_if_not_found=True)
1720
+ for span in spans:
1721
+ existing_spans.append(span.to_dict())
1722
+ self._write_json_file(self.span_table_name, existing_spans)
1723
+
1724
+ except Exception as e:
1725
+ log_error(f"Error creating spans batch: {e}")
1726
+
1727
+ def get_span(self, span_id: str):
1728
+ """Get a single span by its span_id.
1729
+
1730
+ Args:
1731
+ span_id: The unique span identifier.
1732
+
1733
+ Returns:
1734
+ Optional[Span]: The span if found, None otherwise.
1735
+ """
1736
+ try:
1737
+ from agno.tracing.schemas import Span
1738
+
1739
+ spans = self._read_json_file(self.span_table_name, create_table_if_not_found=False)
1740
+
1741
+ for s in spans:
1742
+ if s.get("span_id") == span_id:
1743
+ return Span.from_dict(s)
1744
+
1745
+ return None
1746
+
1747
+ except Exception as e:
1748
+ log_error(f"Error getting span: {e}")
1749
+ return None
1750
+
1751
+ def get_spans(
1752
+ self,
1753
+ trace_id: Optional[str] = None,
1754
+ parent_span_id: Optional[str] = None,
1755
+ limit: Optional[int] = 1000,
1756
+ ) -> List:
1757
+ """Get spans matching the provided filters.
1758
+
1759
+ Args:
1760
+ trace_id: Filter by trace ID.
1761
+ parent_span_id: Filter by parent span ID.
1762
+ limit: Maximum number of spans to return.
1763
+
1764
+ Returns:
1765
+ List[Span]: List of matching spans.
1766
+ """
1767
+ try:
1768
+ from agno.tracing.schemas import Span
1769
+
1770
+ spans = self._read_json_file(self.span_table_name, create_table_if_not_found=False)
1771
+ if not spans:
1772
+ return []
1773
+
1774
+ # Apply filters
1775
+ filtered = []
1776
+ for s in spans:
1777
+ if trace_id and s.get("trace_id") != trace_id:
1778
+ continue
1779
+ if parent_span_id and s.get("parent_span_id") != parent_span_id:
1780
+ continue
1781
+ filtered.append(s)
1782
+
1783
+ # Apply limit
1784
+ if limit:
1785
+ filtered = filtered[:limit]
1786
+
1787
+ return [Span.from_dict(s) for s in filtered]
1788
+
1789
+ except Exception as e:
1790
+ log_error(f"Error getting spans: {e}")
1791
+ return []