agno 2.2.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (575) hide show
  1. agno/__init__.py +8 -0
  2. agno/agent/__init__.py +51 -0
  3. agno/agent/agent.py +10405 -0
  4. agno/api/__init__.py +0 -0
  5. agno/api/agent.py +28 -0
  6. agno/api/api.py +40 -0
  7. agno/api/evals.py +22 -0
  8. agno/api/os.py +17 -0
  9. agno/api/routes.py +13 -0
  10. agno/api/schemas/__init__.py +9 -0
  11. agno/api/schemas/agent.py +16 -0
  12. agno/api/schemas/evals.py +16 -0
  13. agno/api/schemas/os.py +14 -0
  14. agno/api/schemas/response.py +6 -0
  15. agno/api/schemas/team.py +16 -0
  16. agno/api/schemas/utils.py +21 -0
  17. agno/api/schemas/workflows.py +16 -0
  18. agno/api/settings.py +53 -0
  19. agno/api/team.py +30 -0
  20. agno/api/workflow.py +28 -0
  21. agno/cloud/aws/base.py +214 -0
  22. agno/cloud/aws/s3/__init__.py +2 -0
  23. agno/cloud/aws/s3/api_client.py +43 -0
  24. agno/cloud/aws/s3/bucket.py +195 -0
  25. agno/cloud/aws/s3/object.py +57 -0
  26. agno/culture/__init__.py +3 -0
  27. agno/culture/manager.py +956 -0
  28. agno/db/__init__.py +24 -0
  29. agno/db/async_postgres/__init__.py +3 -0
  30. agno/db/base.py +598 -0
  31. agno/db/dynamo/__init__.py +3 -0
  32. agno/db/dynamo/dynamo.py +2042 -0
  33. agno/db/dynamo/schemas.py +314 -0
  34. agno/db/dynamo/utils.py +743 -0
  35. agno/db/firestore/__init__.py +3 -0
  36. agno/db/firestore/firestore.py +1795 -0
  37. agno/db/firestore/schemas.py +140 -0
  38. agno/db/firestore/utils.py +376 -0
  39. agno/db/gcs_json/__init__.py +3 -0
  40. agno/db/gcs_json/gcs_json_db.py +1335 -0
  41. agno/db/gcs_json/utils.py +228 -0
  42. agno/db/in_memory/__init__.py +3 -0
  43. agno/db/in_memory/in_memory_db.py +1160 -0
  44. agno/db/in_memory/utils.py +230 -0
  45. agno/db/json/__init__.py +3 -0
  46. agno/db/json/json_db.py +1328 -0
  47. agno/db/json/utils.py +230 -0
  48. agno/db/migrations/__init__.py +0 -0
  49. agno/db/migrations/v1_to_v2.py +635 -0
  50. agno/db/mongo/__init__.py +17 -0
  51. agno/db/mongo/async_mongo.py +2026 -0
  52. agno/db/mongo/mongo.py +1982 -0
  53. agno/db/mongo/schemas.py +87 -0
  54. agno/db/mongo/utils.py +259 -0
  55. agno/db/mysql/__init__.py +3 -0
  56. agno/db/mysql/mysql.py +2308 -0
  57. agno/db/mysql/schemas.py +138 -0
  58. agno/db/mysql/utils.py +355 -0
  59. agno/db/postgres/__init__.py +4 -0
  60. agno/db/postgres/async_postgres.py +1927 -0
  61. agno/db/postgres/postgres.py +2260 -0
  62. agno/db/postgres/schemas.py +139 -0
  63. agno/db/postgres/utils.py +442 -0
  64. agno/db/redis/__init__.py +3 -0
  65. agno/db/redis/redis.py +1660 -0
  66. agno/db/redis/schemas.py +123 -0
  67. agno/db/redis/utils.py +346 -0
  68. agno/db/schemas/__init__.py +4 -0
  69. agno/db/schemas/culture.py +120 -0
  70. agno/db/schemas/evals.py +33 -0
  71. agno/db/schemas/knowledge.py +40 -0
  72. agno/db/schemas/memory.py +46 -0
  73. agno/db/schemas/metrics.py +0 -0
  74. agno/db/singlestore/__init__.py +3 -0
  75. agno/db/singlestore/schemas.py +130 -0
  76. agno/db/singlestore/singlestore.py +2272 -0
  77. agno/db/singlestore/utils.py +384 -0
  78. agno/db/sqlite/__init__.py +4 -0
  79. agno/db/sqlite/async_sqlite.py +2293 -0
  80. agno/db/sqlite/schemas.py +133 -0
  81. agno/db/sqlite/sqlite.py +2288 -0
  82. agno/db/sqlite/utils.py +431 -0
  83. agno/db/surrealdb/__init__.py +3 -0
  84. agno/db/surrealdb/metrics.py +292 -0
  85. agno/db/surrealdb/models.py +309 -0
  86. agno/db/surrealdb/queries.py +71 -0
  87. agno/db/surrealdb/surrealdb.py +1353 -0
  88. agno/db/surrealdb/utils.py +147 -0
  89. agno/db/utils.py +116 -0
  90. agno/debug.py +18 -0
  91. agno/eval/__init__.py +14 -0
  92. agno/eval/accuracy.py +834 -0
  93. agno/eval/performance.py +773 -0
  94. agno/eval/reliability.py +306 -0
  95. agno/eval/utils.py +119 -0
  96. agno/exceptions.py +161 -0
  97. agno/filters.py +354 -0
  98. agno/guardrails/__init__.py +6 -0
  99. agno/guardrails/base.py +19 -0
  100. agno/guardrails/openai.py +144 -0
  101. agno/guardrails/pii.py +94 -0
  102. agno/guardrails/prompt_injection.py +52 -0
  103. agno/integrations/__init__.py +0 -0
  104. agno/integrations/discord/__init__.py +3 -0
  105. agno/integrations/discord/client.py +203 -0
  106. agno/knowledge/__init__.py +5 -0
  107. agno/knowledge/chunking/__init__.py +0 -0
  108. agno/knowledge/chunking/agentic.py +79 -0
  109. agno/knowledge/chunking/document.py +91 -0
  110. agno/knowledge/chunking/fixed.py +57 -0
  111. agno/knowledge/chunking/markdown.py +151 -0
  112. agno/knowledge/chunking/recursive.py +63 -0
  113. agno/knowledge/chunking/row.py +39 -0
  114. agno/knowledge/chunking/semantic.py +86 -0
  115. agno/knowledge/chunking/strategy.py +165 -0
  116. agno/knowledge/content.py +74 -0
  117. agno/knowledge/document/__init__.py +5 -0
  118. agno/knowledge/document/base.py +58 -0
  119. agno/knowledge/embedder/__init__.py +5 -0
  120. agno/knowledge/embedder/aws_bedrock.py +343 -0
  121. agno/knowledge/embedder/azure_openai.py +210 -0
  122. agno/knowledge/embedder/base.py +23 -0
  123. agno/knowledge/embedder/cohere.py +323 -0
  124. agno/knowledge/embedder/fastembed.py +62 -0
  125. agno/knowledge/embedder/fireworks.py +13 -0
  126. agno/knowledge/embedder/google.py +258 -0
  127. agno/knowledge/embedder/huggingface.py +94 -0
  128. agno/knowledge/embedder/jina.py +182 -0
  129. agno/knowledge/embedder/langdb.py +22 -0
  130. agno/knowledge/embedder/mistral.py +206 -0
  131. agno/knowledge/embedder/nebius.py +13 -0
  132. agno/knowledge/embedder/ollama.py +154 -0
  133. agno/knowledge/embedder/openai.py +195 -0
  134. agno/knowledge/embedder/sentence_transformer.py +63 -0
  135. agno/knowledge/embedder/together.py +13 -0
  136. agno/knowledge/embedder/vllm.py +262 -0
  137. agno/knowledge/embedder/voyageai.py +165 -0
  138. agno/knowledge/knowledge.py +1988 -0
  139. agno/knowledge/reader/__init__.py +7 -0
  140. agno/knowledge/reader/arxiv_reader.py +81 -0
  141. agno/knowledge/reader/base.py +95 -0
  142. agno/knowledge/reader/csv_reader.py +166 -0
  143. agno/knowledge/reader/docx_reader.py +82 -0
  144. agno/knowledge/reader/field_labeled_csv_reader.py +292 -0
  145. agno/knowledge/reader/firecrawl_reader.py +201 -0
  146. agno/knowledge/reader/json_reader.py +87 -0
  147. agno/knowledge/reader/markdown_reader.py +137 -0
  148. agno/knowledge/reader/pdf_reader.py +431 -0
  149. agno/knowledge/reader/pptx_reader.py +101 -0
  150. agno/knowledge/reader/reader_factory.py +313 -0
  151. agno/knowledge/reader/s3_reader.py +89 -0
  152. agno/knowledge/reader/tavily_reader.py +194 -0
  153. agno/knowledge/reader/text_reader.py +115 -0
  154. agno/knowledge/reader/web_search_reader.py +372 -0
  155. agno/knowledge/reader/website_reader.py +455 -0
  156. agno/knowledge/reader/wikipedia_reader.py +59 -0
  157. agno/knowledge/reader/youtube_reader.py +78 -0
  158. agno/knowledge/remote_content/__init__.py +0 -0
  159. agno/knowledge/remote_content/remote_content.py +88 -0
  160. agno/knowledge/reranker/__init__.py +3 -0
  161. agno/knowledge/reranker/base.py +14 -0
  162. agno/knowledge/reranker/cohere.py +64 -0
  163. agno/knowledge/reranker/infinity.py +195 -0
  164. agno/knowledge/reranker/sentence_transformer.py +54 -0
  165. agno/knowledge/types.py +39 -0
  166. agno/knowledge/utils.py +189 -0
  167. agno/media.py +462 -0
  168. agno/memory/__init__.py +3 -0
  169. agno/memory/manager.py +1327 -0
  170. agno/models/__init__.py +0 -0
  171. agno/models/aimlapi/__init__.py +5 -0
  172. agno/models/aimlapi/aimlapi.py +45 -0
  173. agno/models/anthropic/__init__.py +5 -0
  174. agno/models/anthropic/claude.py +757 -0
  175. agno/models/aws/__init__.py +15 -0
  176. agno/models/aws/bedrock.py +701 -0
  177. agno/models/aws/claude.py +378 -0
  178. agno/models/azure/__init__.py +18 -0
  179. agno/models/azure/ai_foundry.py +485 -0
  180. agno/models/azure/openai_chat.py +131 -0
  181. agno/models/base.py +2175 -0
  182. agno/models/cerebras/__init__.py +12 -0
  183. agno/models/cerebras/cerebras.py +501 -0
  184. agno/models/cerebras/cerebras_openai.py +112 -0
  185. agno/models/cohere/__init__.py +5 -0
  186. agno/models/cohere/chat.py +389 -0
  187. agno/models/cometapi/__init__.py +5 -0
  188. agno/models/cometapi/cometapi.py +57 -0
  189. agno/models/dashscope/__init__.py +5 -0
  190. agno/models/dashscope/dashscope.py +91 -0
  191. agno/models/deepinfra/__init__.py +5 -0
  192. agno/models/deepinfra/deepinfra.py +28 -0
  193. agno/models/deepseek/__init__.py +5 -0
  194. agno/models/deepseek/deepseek.py +61 -0
  195. agno/models/defaults.py +1 -0
  196. agno/models/fireworks/__init__.py +5 -0
  197. agno/models/fireworks/fireworks.py +26 -0
  198. agno/models/google/__init__.py +5 -0
  199. agno/models/google/gemini.py +1085 -0
  200. agno/models/groq/__init__.py +5 -0
  201. agno/models/groq/groq.py +556 -0
  202. agno/models/huggingface/__init__.py +5 -0
  203. agno/models/huggingface/huggingface.py +491 -0
  204. agno/models/ibm/__init__.py +5 -0
  205. agno/models/ibm/watsonx.py +422 -0
  206. agno/models/internlm/__init__.py +3 -0
  207. agno/models/internlm/internlm.py +26 -0
  208. agno/models/langdb/__init__.py +1 -0
  209. agno/models/langdb/langdb.py +48 -0
  210. agno/models/litellm/__init__.py +14 -0
  211. agno/models/litellm/chat.py +468 -0
  212. agno/models/litellm/litellm_openai.py +25 -0
  213. agno/models/llama_cpp/__init__.py +5 -0
  214. agno/models/llama_cpp/llama_cpp.py +22 -0
  215. agno/models/lmstudio/__init__.py +5 -0
  216. agno/models/lmstudio/lmstudio.py +25 -0
  217. agno/models/message.py +434 -0
  218. agno/models/meta/__init__.py +12 -0
  219. agno/models/meta/llama.py +475 -0
  220. agno/models/meta/llama_openai.py +78 -0
  221. agno/models/metrics.py +120 -0
  222. agno/models/mistral/__init__.py +5 -0
  223. agno/models/mistral/mistral.py +432 -0
  224. agno/models/nebius/__init__.py +3 -0
  225. agno/models/nebius/nebius.py +54 -0
  226. agno/models/nexus/__init__.py +3 -0
  227. agno/models/nexus/nexus.py +22 -0
  228. agno/models/nvidia/__init__.py +5 -0
  229. agno/models/nvidia/nvidia.py +28 -0
  230. agno/models/ollama/__init__.py +5 -0
  231. agno/models/ollama/chat.py +441 -0
  232. agno/models/openai/__init__.py +9 -0
  233. agno/models/openai/chat.py +883 -0
  234. agno/models/openai/like.py +27 -0
  235. agno/models/openai/responses.py +1050 -0
  236. agno/models/openrouter/__init__.py +5 -0
  237. agno/models/openrouter/openrouter.py +66 -0
  238. agno/models/perplexity/__init__.py +5 -0
  239. agno/models/perplexity/perplexity.py +187 -0
  240. agno/models/portkey/__init__.py +3 -0
  241. agno/models/portkey/portkey.py +81 -0
  242. agno/models/requesty/__init__.py +5 -0
  243. agno/models/requesty/requesty.py +52 -0
  244. agno/models/response.py +199 -0
  245. agno/models/sambanova/__init__.py +5 -0
  246. agno/models/sambanova/sambanova.py +28 -0
  247. agno/models/siliconflow/__init__.py +5 -0
  248. agno/models/siliconflow/siliconflow.py +25 -0
  249. agno/models/together/__init__.py +5 -0
  250. agno/models/together/together.py +25 -0
  251. agno/models/utils.py +266 -0
  252. agno/models/vercel/__init__.py +3 -0
  253. agno/models/vercel/v0.py +26 -0
  254. agno/models/vertexai/__init__.py +0 -0
  255. agno/models/vertexai/claude.py +70 -0
  256. agno/models/vllm/__init__.py +3 -0
  257. agno/models/vllm/vllm.py +78 -0
  258. agno/models/xai/__init__.py +3 -0
  259. agno/models/xai/xai.py +113 -0
  260. agno/os/__init__.py +3 -0
  261. agno/os/app.py +876 -0
  262. agno/os/auth.py +57 -0
  263. agno/os/config.py +104 -0
  264. agno/os/interfaces/__init__.py +1 -0
  265. agno/os/interfaces/a2a/__init__.py +3 -0
  266. agno/os/interfaces/a2a/a2a.py +42 -0
  267. agno/os/interfaces/a2a/router.py +250 -0
  268. agno/os/interfaces/a2a/utils.py +924 -0
  269. agno/os/interfaces/agui/__init__.py +3 -0
  270. agno/os/interfaces/agui/agui.py +47 -0
  271. agno/os/interfaces/agui/router.py +144 -0
  272. agno/os/interfaces/agui/utils.py +534 -0
  273. agno/os/interfaces/base.py +25 -0
  274. agno/os/interfaces/slack/__init__.py +3 -0
  275. agno/os/interfaces/slack/router.py +148 -0
  276. agno/os/interfaces/slack/security.py +30 -0
  277. agno/os/interfaces/slack/slack.py +47 -0
  278. agno/os/interfaces/whatsapp/__init__.py +3 -0
  279. agno/os/interfaces/whatsapp/router.py +211 -0
  280. agno/os/interfaces/whatsapp/security.py +53 -0
  281. agno/os/interfaces/whatsapp/whatsapp.py +36 -0
  282. agno/os/mcp.py +292 -0
  283. agno/os/middleware/__init__.py +7 -0
  284. agno/os/middleware/jwt.py +233 -0
  285. agno/os/router.py +1763 -0
  286. agno/os/routers/__init__.py +3 -0
  287. agno/os/routers/evals/__init__.py +3 -0
  288. agno/os/routers/evals/evals.py +430 -0
  289. agno/os/routers/evals/schemas.py +142 -0
  290. agno/os/routers/evals/utils.py +162 -0
  291. agno/os/routers/health.py +31 -0
  292. agno/os/routers/home.py +52 -0
  293. agno/os/routers/knowledge/__init__.py +3 -0
  294. agno/os/routers/knowledge/knowledge.py +997 -0
  295. agno/os/routers/knowledge/schemas.py +178 -0
  296. agno/os/routers/memory/__init__.py +3 -0
  297. agno/os/routers/memory/memory.py +515 -0
  298. agno/os/routers/memory/schemas.py +62 -0
  299. agno/os/routers/metrics/__init__.py +3 -0
  300. agno/os/routers/metrics/metrics.py +190 -0
  301. agno/os/routers/metrics/schemas.py +47 -0
  302. agno/os/routers/session/__init__.py +3 -0
  303. agno/os/routers/session/session.py +997 -0
  304. agno/os/schema.py +1055 -0
  305. agno/os/settings.py +43 -0
  306. agno/os/utils.py +630 -0
  307. agno/py.typed +0 -0
  308. agno/reasoning/__init__.py +0 -0
  309. agno/reasoning/anthropic.py +80 -0
  310. agno/reasoning/azure_ai_foundry.py +67 -0
  311. agno/reasoning/deepseek.py +63 -0
  312. agno/reasoning/default.py +97 -0
  313. agno/reasoning/gemini.py +73 -0
  314. agno/reasoning/groq.py +71 -0
  315. agno/reasoning/helpers.py +63 -0
  316. agno/reasoning/ollama.py +67 -0
  317. agno/reasoning/openai.py +86 -0
  318. agno/reasoning/step.py +31 -0
  319. agno/reasoning/vertexai.py +76 -0
  320. agno/run/__init__.py +6 -0
  321. agno/run/agent.py +787 -0
  322. agno/run/base.py +229 -0
  323. agno/run/cancel.py +81 -0
  324. agno/run/messages.py +32 -0
  325. agno/run/team.py +753 -0
  326. agno/run/workflow.py +708 -0
  327. agno/session/__init__.py +10 -0
  328. agno/session/agent.py +295 -0
  329. agno/session/summary.py +265 -0
  330. agno/session/team.py +392 -0
  331. agno/session/workflow.py +205 -0
  332. agno/team/__init__.py +37 -0
  333. agno/team/team.py +8793 -0
  334. agno/tools/__init__.py +10 -0
  335. agno/tools/agentql.py +120 -0
  336. agno/tools/airflow.py +69 -0
  337. agno/tools/api.py +122 -0
  338. agno/tools/apify.py +314 -0
  339. agno/tools/arxiv.py +127 -0
  340. agno/tools/aws_lambda.py +53 -0
  341. agno/tools/aws_ses.py +66 -0
  342. agno/tools/baidusearch.py +89 -0
  343. agno/tools/bitbucket.py +292 -0
  344. agno/tools/brandfetch.py +213 -0
  345. agno/tools/bravesearch.py +106 -0
  346. agno/tools/brightdata.py +367 -0
  347. agno/tools/browserbase.py +209 -0
  348. agno/tools/calcom.py +255 -0
  349. agno/tools/calculator.py +151 -0
  350. agno/tools/cartesia.py +187 -0
  351. agno/tools/clickup.py +244 -0
  352. agno/tools/confluence.py +240 -0
  353. agno/tools/crawl4ai.py +158 -0
  354. agno/tools/csv_toolkit.py +185 -0
  355. agno/tools/dalle.py +110 -0
  356. agno/tools/daytona.py +475 -0
  357. agno/tools/decorator.py +262 -0
  358. agno/tools/desi_vocal.py +108 -0
  359. agno/tools/discord.py +161 -0
  360. agno/tools/docker.py +716 -0
  361. agno/tools/duckdb.py +379 -0
  362. agno/tools/duckduckgo.py +91 -0
  363. agno/tools/e2b.py +703 -0
  364. agno/tools/eleven_labs.py +196 -0
  365. agno/tools/email.py +67 -0
  366. agno/tools/evm.py +129 -0
  367. agno/tools/exa.py +396 -0
  368. agno/tools/fal.py +127 -0
  369. agno/tools/file.py +240 -0
  370. agno/tools/file_generation.py +350 -0
  371. agno/tools/financial_datasets.py +288 -0
  372. agno/tools/firecrawl.py +143 -0
  373. agno/tools/function.py +1187 -0
  374. agno/tools/giphy.py +93 -0
  375. agno/tools/github.py +1760 -0
  376. agno/tools/gmail.py +922 -0
  377. agno/tools/google_bigquery.py +117 -0
  378. agno/tools/google_drive.py +270 -0
  379. agno/tools/google_maps.py +253 -0
  380. agno/tools/googlecalendar.py +674 -0
  381. agno/tools/googlesearch.py +98 -0
  382. agno/tools/googlesheets.py +377 -0
  383. agno/tools/hackernews.py +77 -0
  384. agno/tools/jina.py +101 -0
  385. agno/tools/jira.py +170 -0
  386. agno/tools/knowledge.py +218 -0
  387. agno/tools/linear.py +426 -0
  388. agno/tools/linkup.py +58 -0
  389. agno/tools/local_file_system.py +90 -0
  390. agno/tools/lumalab.py +183 -0
  391. agno/tools/mcp/__init__.py +10 -0
  392. agno/tools/mcp/mcp.py +331 -0
  393. agno/tools/mcp/multi_mcp.py +347 -0
  394. agno/tools/mcp/params.py +24 -0
  395. agno/tools/mcp_toolbox.py +284 -0
  396. agno/tools/mem0.py +193 -0
  397. agno/tools/memori.py +339 -0
  398. agno/tools/memory.py +419 -0
  399. agno/tools/mlx_transcribe.py +139 -0
  400. agno/tools/models/__init__.py +0 -0
  401. agno/tools/models/azure_openai.py +190 -0
  402. agno/tools/models/gemini.py +203 -0
  403. agno/tools/models/groq.py +158 -0
  404. agno/tools/models/morph.py +186 -0
  405. agno/tools/models/nebius.py +124 -0
  406. agno/tools/models_labs.py +195 -0
  407. agno/tools/moviepy_video.py +349 -0
  408. agno/tools/neo4j.py +134 -0
  409. agno/tools/newspaper.py +46 -0
  410. agno/tools/newspaper4k.py +93 -0
  411. agno/tools/notion.py +204 -0
  412. agno/tools/openai.py +202 -0
  413. agno/tools/openbb.py +160 -0
  414. agno/tools/opencv.py +321 -0
  415. agno/tools/openweather.py +233 -0
  416. agno/tools/oxylabs.py +385 -0
  417. agno/tools/pandas.py +102 -0
  418. agno/tools/parallel.py +314 -0
  419. agno/tools/postgres.py +257 -0
  420. agno/tools/pubmed.py +188 -0
  421. agno/tools/python.py +205 -0
  422. agno/tools/reasoning.py +283 -0
  423. agno/tools/reddit.py +467 -0
  424. agno/tools/replicate.py +117 -0
  425. agno/tools/resend.py +62 -0
  426. agno/tools/scrapegraph.py +222 -0
  427. agno/tools/searxng.py +152 -0
  428. agno/tools/serpapi.py +116 -0
  429. agno/tools/serper.py +255 -0
  430. agno/tools/shell.py +53 -0
  431. agno/tools/slack.py +136 -0
  432. agno/tools/sleep.py +20 -0
  433. agno/tools/spider.py +116 -0
  434. agno/tools/sql.py +154 -0
  435. agno/tools/streamlit/__init__.py +0 -0
  436. agno/tools/streamlit/components.py +113 -0
  437. agno/tools/tavily.py +254 -0
  438. agno/tools/telegram.py +48 -0
  439. agno/tools/todoist.py +218 -0
  440. agno/tools/tool_registry.py +1 -0
  441. agno/tools/toolkit.py +146 -0
  442. agno/tools/trafilatura.py +388 -0
  443. agno/tools/trello.py +274 -0
  444. agno/tools/twilio.py +186 -0
  445. agno/tools/user_control_flow.py +78 -0
  446. agno/tools/valyu.py +228 -0
  447. agno/tools/visualization.py +467 -0
  448. agno/tools/webbrowser.py +28 -0
  449. agno/tools/webex.py +76 -0
  450. agno/tools/website.py +54 -0
  451. agno/tools/webtools.py +45 -0
  452. agno/tools/whatsapp.py +286 -0
  453. agno/tools/wikipedia.py +63 -0
  454. agno/tools/workflow.py +278 -0
  455. agno/tools/x.py +335 -0
  456. agno/tools/yfinance.py +257 -0
  457. agno/tools/youtube.py +184 -0
  458. agno/tools/zendesk.py +82 -0
  459. agno/tools/zep.py +454 -0
  460. agno/tools/zoom.py +382 -0
  461. agno/utils/__init__.py +0 -0
  462. agno/utils/agent.py +820 -0
  463. agno/utils/audio.py +49 -0
  464. agno/utils/certs.py +27 -0
  465. agno/utils/code_execution.py +11 -0
  466. agno/utils/common.py +132 -0
  467. agno/utils/dttm.py +13 -0
  468. agno/utils/enum.py +22 -0
  469. agno/utils/env.py +11 -0
  470. agno/utils/events.py +696 -0
  471. agno/utils/format_str.py +16 -0
  472. agno/utils/functions.py +166 -0
  473. agno/utils/gemini.py +426 -0
  474. agno/utils/hooks.py +57 -0
  475. agno/utils/http.py +74 -0
  476. agno/utils/json_schema.py +234 -0
  477. agno/utils/knowledge.py +36 -0
  478. agno/utils/location.py +19 -0
  479. agno/utils/log.py +255 -0
  480. agno/utils/mcp.py +214 -0
  481. agno/utils/media.py +352 -0
  482. agno/utils/merge_dict.py +41 -0
  483. agno/utils/message.py +118 -0
  484. agno/utils/models/__init__.py +0 -0
  485. agno/utils/models/ai_foundry.py +43 -0
  486. agno/utils/models/claude.py +358 -0
  487. agno/utils/models/cohere.py +87 -0
  488. agno/utils/models/llama.py +78 -0
  489. agno/utils/models/mistral.py +98 -0
  490. agno/utils/models/openai_responses.py +140 -0
  491. agno/utils/models/schema_utils.py +153 -0
  492. agno/utils/models/watsonx.py +41 -0
  493. agno/utils/openai.py +257 -0
  494. agno/utils/pickle.py +32 -0
  495. agno/utils/pprint.py +178 -0
  496. agno/utils/print_response/__init__.py +0 -0
  497. agno/utils/print_response/agent.py +842 -0
  498. agno/utils/print_response/team.py +1724 -0
  499. agno/utils/print_response/workflow.py +1668 -0
  500. agno/utils/prompts.py +111 -0
  501. agno/utils/reasoning.py +108 -0
  502. agno/utils/response.py +163 -0
  503. agno/utils/response_iterator.py +17 -0
  504. agno/utils/safe_formatter.py +24 -0
  505. agno/utils/serialize.py +32 -0
  506. agno/utils/shell.py +22 -0
  507. agno/utils/streamlit.py +487 -0
  508. agno/utils/string.py +231 -0
  509. agno/utils/team.py +139 -0
  510. agno/utils/timer.py +41 -0
  511. agno/utils/tools.py +102 -0
  512. agno/utils/web.py +23 -0
  513. agno/utils/whatsapp.py +305 -0
  514. agno/utils/yaml_io.py +25 -0
  515. agno/vectordb/__init__.py +3 -0
  516. agno/vectordb/base.py +127 -0
  517. agno/vectordb/cassandra/__init__.py +5 -0
  518. agno/vectordb/cassandra/cassandra.py +501 -0
  519. agno/vectordb/cassandra/extra_param_mixin.py +11 -0
  520. agno/vectordb/cassandra/index.py +13 -0
  521. agno/vectordb/chroma/__init__.py +5 -0
  522. agno/vectordb/chroma/chromadb.py +929 -0
  523. agno/vectordb/clickhouse/__init__.py +9 -0
  524. agno/vectordb/clickhouse/clickhousedb.py +835 -0
  525. agno/vectordb/clickhouse/index.py +9 -0
  526. agno/vectordb/couchbase/__init__.py +3 -0
  527. agno/vectordb/couchbase/couchbase.py +1442 -0
  528. agno/vectordb/distance.py +7 -0
  529. agno/vectordb/lancedb/__init__.py +6 -0
  530. agno/vectordb/lancedb/lance_db.py +995 -0
  531. agno/vectordb/langchaindb/__init__.py +5 -0
  532. agno/vectordb/langchaindb/langchaindb.py +163 -0
  533. agno/vectordb/lightrag/__init__.py +5 -0
  534. agno/vectordb/lightrag/lightrag.py +388 -0
  535. agno/vectordb/llamaindex/__init__.py +3 -0
  536. agno/vectordb/llamaindex/llamaindexdb.py +166 -0
  537. agno/vectordb/milvus/__init__.py +4 -0
  538. agno/vectordb/milvus/milvus.py +1182 -0
  539. agno/vectordb/mongodb/__init__.py +9 -0
  540. agno/vectordb/mongodb/mongodb.py +1417 -0
  541. agno/vectordb/pgvector/__init__.py +12 -0
  542. agno/vectordb/pgvector/index.py +23 -0
  543. agno/vectordb/pgvector/pgvector.py +1462 -0
  544. agno/vectordb/pineconedb/__init__.py +5 -0
  545. agno/vectordb/pineconedb/pineconedb.py +747 -0
  546. agno/vectordb/qdrant/__init__.py +5 -0
  547. agno/vectordb/qdrant/qdrant.py +1134 -0
  548. agno/vectordb/redis/__init__.py +9 -0
  549. agno/vectordb/redis/redisdb.py +694 -0
  550. agno/vectordb/search.py +7 -0
  551. agno/vectordb/singlestore/__init__.py +10 -0
  552. agno/vectordb/singlestore/index.py +41 -0
  553. agno/vectordb/singlestore/singlestore.py +763 -0
  554. agno/vectordb/surrealdb/__init__.py +3 -0
  555. agno/vectordb/surrealdb/surrealdb.py +699 -0
  556. agno/vectordb/upstashdb/__init__.py +5 -0
  557. agno/vectordb/upstashdb/upstashdb.py +718 -0
  558. agno/vectordb/weaviate/__init__.py +8 -0
  559. agno/vectordb/weaviate/index.py +15 -0
  560. agno/vectordb/weaviate/weaviate.py +1005 -0
  561. agno/workflow/__init__.py +23 -0
  562. agno/workflow/agent.py +299 -0
  563. agno/workflow/condition.py +738 -0
  564. agno/workflow/loop.py +735 -0
  565. agno/workflow/parallel.py +824 -0
  566. agno/workflow/router.py +702 -0
  567. agno/workflow/step.py +1432 -0
  568. agno/workflow/steps.py +592 -0
  569. agno/workflow/types.py +520 -0
  570. agno/workflow/workflow.py +4321 -0
  571. agno-2.2.13.dist-info/METADATA +614 -0
  572. agno-2.2.13.dist-info/RECORD +575 -0
  573. agno-2.2.13.dist-info/WHEEL +5 -0
  574. agno-2.2.13.dist-info/licenses/LICENSE +201 -0
  575. agno-2.2.13.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1050 @@
1
+ import time
2
+ from dataclasses import dataclass, field
3
+ from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, Tuple, Type, Union
4
+
5
+ import httpx
6
+ from pydantic import BaseModel
7
+ from typing_extensions import Literal
8
+
9
+ from agno.exceptions import ModelProviderError
10
+ from agno.media import File
11
+ from agno.models.base import Model
12
+ from agno.models.message import Citations, Message, UrlCitation
13
+ from agno.models.metrics import Metrics
14
+ from agno.models.response import ModelResponse
15
+ from agno.run.agent import RunOutput
16
+ from agno.utils.log import log_debug, log_error, log_warning
17
+ from agno.utils.models.openai_responses import images_to_message
18
+ from agno.utils.models.schema_utils import get_response_schema_for_provider
19
+
20
+ try:
21
+ from openai import APIConnectionError, APIStatusError, AsyncOpenAI, OpenAI, RateLimitError
22
+ from openai.types.responses import Response, ResponseReasoningItem, ResponseStreamEvent, ResponseUsage
23
+ except ImportError as e:
24
+ raise ImportError("`openai` not installed. Please install using `pip install openai -U`") from e
25
+
26
+
27
+ @dataclass
28
+ class OpenAIResponses(Model):
29
+ """
30
+ A class for interacting with OpenAI models using the Responses API.
31
+
32
+ For more information, see: https://platform.openai.com/docs/api-reference/responses
33
+ """
34
+
35
+ id: str = "gpt-4o"
36
+ name: str = "OpenAIResponses"
37
+ provider: str = "OpenAI"
38
+ supports_native_structured_outputs: bool = True
39
+
40
+ # Request parameters
41
+ include: Optional[List[str]] = None
42
+ max_output_tokens: Optional[int] = None
43
+ max_tool_calls: Optional[int] = None
44
+ metadata: Optional[Dict[str, Any]] = None
45
+ parallel_tool_calls: Optional[bool] = None
46
+ reasoning: Optional[Dict[str, Any]] = None
47
+ verbosity: Optional[Literal["low", "medium", "high"]] = None
48
+ reasoning_effort: Optional[Literal["minimal", "low", "medium", "high"]] = None
49
+ reasoning_summary: Optional[Literal["auto", "concise", "detailed"]] = None
50
+ store: Optional[bool] = None
51
+ temperature: Optional[float] = None
52
+ top_p: Optional[float] = None
53
+ truncation: Optional[Literal["auto", "disabled"]] = None
54
+ user: Optional[str] = None
55
+ service_tier: Optional[Literal["auto", "default", "flex", "priority"]] = None
56
+ strict_output: bool = True # When True, guarantees schema adherence for structured outputs. When False, attempts to follow schema as a guide but may occasionally deviate
57
+ extra_headers: Optional[Any] = None
58
+ extra_query: Optional[Any] = None
59
+ extra_body: Optional[Any] = None
60
+ request_params: Optional[Dict[str, Any]] = None
61
+
62
+ # Client parameters
63
+ api_key: Optional[str] = None
64
+ organization: Optional[str] = None
65
+ base_url: Optional[Union[str, httpx.URL]] = None
66
+ timeout: Optional[float] = None
67
+ max_retries: Optional[int] = None
68
+ default_headers: Optional[Dict[str, str]] = None
69
+ default_query: Optional[Dict[str, str]] = None
70
+ http_client: Optional[Union[httpx.Client, httpx.AsyncClient]] = None
71
+ client_params: Optional[Dict[str, Any]] = None
72
+
73
+ # Parameters affecting built-in tools
74
+ vector_store_name: str = "knowledge_base"
75
+
76
+ # OpenAI clients
77
+ client: Optional[OpenAI] = None
78
+ async_client: Optional[AsyncOpenAI] = None
79
+
80
+ # The role to map the message role to.
81
+ role_map: Dict[str, str] = field(
82
+ default_factory=lambda: {
83
+ "system": "developer",
84
+ "user": "user",
85
+ "assistant": "assistant",
86
+ "tool": "tool",
87
+ }
88
+ )
89
+
90
+ def _using_reasoning_model(self) -> bool:
91
+ """Return True if the contextual used model is a known reasoning model."""
92
+ return self.id.startswith("o3") or self.id.startswith("o4-mini") or self.id.startswith("gpt-5")
93
+
94
+ def _set_reasoning_request_param(self, base_params: Dict[str, Any]) -> Dict[str, Any]:
95
+ """Set the reasoning request parameter."""
96
+ base_params["reasoning"] = self.reasoning or {}
97
+
98
+ if self.reasoning_effort is not None:
99
+ base_params["reasoning"]["effort"] = self.reasoning_effort
100
+
101
+ if self.reasoning_summary is not None:
102
+ base_params["reasoning"]["summary"] = self.reasoning_summary
103
+
104
+ return base_params
105
+
106
+ def _get_client_params(self) -> Dict[str, Any]:
107
+ """
108
+ Get client parameters for API requests.
109
+
110
+ Returns:
111
+ Dict[str, Any]: Client parameters
112
+ """
113
+ from os import getenv
114
+
115
+ # Fetch API key from env if not already set
116
+ if not self.api_key:
117
+ self.api_key = getenv("OPENAI_API_KEY")
118
+ if not self.api_key:
119
+ log_error("OPENAI_API_KEY not set. Please set the OPENAI_API_KEY environment variable.")
120
+
121
+ # Define base client params
122
+ base_params = {
123
+ "api_key": self.api_key,
124
+ "organization": self.organization,
125
+ "base_url": self.base_url,
126
+ "timeout": self.timeout,
127
+ "max_retries": self.max_retries,
128
+ "default_headers": self.default_headers,
129
+ "default_query": self.default_query,
130
+ }
131
+
132
+ # Create client_params dict with non-None values
133
+ client_params = {k: v for k, v in base_params.items() if v is not None}
134
+
135
+ # Add additional client params if provided
136
+ if self.client_params:
137
+ client_params.update(self.client_params)
138
+
139
+ return client_params
140
+
141
+ def get_client(self) -> OpenAI:
142
+ """
143
+ Returns an OpenAI client.
144
+
145
+ Returns:
146
+ OpenAI: An instance of the OpenAI client.
147
+ """
148
+ if self.client and not self.client.is_closed():
149
+ return self.client
150
+
151
+ client_params: Dict[str, Any] = self._get_client_params()
152
+ if self.http_client:
153
+ if isinstance(self.http_client, httpx.Client):
154
+ client_params["http_client"] = self.http_client
155
+ else:
156
+ log_debug("http_client is not an instance of httpx.Client.")
157
+
158
+ self.client = OpenAI(**client_params)
159
+ return self.client
160
+
161
+ def get_async_client(self) -> AsyncOpenAI:
162
+ """
163
+ Returns an asynchronous OpenAI client.
164
+
165
+ Returns:
166
+ AsyncOpenAI: An instance of the asynchronous OpenAI client.
167
+ """
168
+ if self.async_client and not self.async_client.is_closed():
169
+ return self.async_client
170
+
171
+ client_params: Dict[str, Any] = self._get_client_params()
172
+ if self.http_client and isinstance(self.http_client, httpx.AsyncClient):
173
+ client_params["http_client"] = self.http_client
174
+ else:
175
+ if self.http_client:
176
+ log_debug("The current http_client is not async. A default httpx.AsyncClient will be used instead.")
177
+ # Create a new async HTTP client with custom limits
178
+ client_params["http_client"] = httpx.AsyncClient(
179
+ limits=httpx.Limits(max_connections=1000, max_keepalive_connections=100)
180
+ )
181
+
182
+ self.async_client = AsyncOpenAI(**client_params)
183
+ return self.async_client
184
+
185
+ def get_request_params(
186
+ self,
187
+ messages: Optional[List[Message]] = None,
188
+ response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
189
+ tools: Optional[List[Dict[str, Any]]] = None,
190
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
191
+ ) -> Dict[str, Any]:
192
+ """
193
+ Returns keyword arguments for API requests.
194
+
195
+ Returns:
196
+ Dict[str, Any]: A dictionary of keyword arguments for API requests.
197
+ """
198
+ # Define base request parameters
199
+ base_params: Dict[str, Any] = {
200
+ "include": self.include,
201
+ "max_output_tokens": self.max_output_tokens,
202
+ "max_tool_calls": self.max_tool_calls,
203
+ "metadata": self.metadata,
204
+ "parallel_tool_calls": self.parallel_tool_calls,
205
+ "store": self.store,
206
+ "temperature": self.temperature,
207
+ "top_p": self.top_p,
208
+ "truncation": self.truncation,
209
+ "user": self.user,
210
+ "service_tier": self.service_tier,
211
+ "extra_headers": self.extra_headers,
212
+ "extra_query": self.extra_query,
213
+ "extra_body": self.extra_body,
214
+ }
215
+ # Populate the reasoning parameter
216
+ base_params = self._set_reasoning_request_param(base_params)
217
+
218
+ # Build text parameter
219
+ text_params: Dict[str, Any] = {}
220
+
221
+ # Add verbosity if specified
222
+ if self.verbosity is not None:
223
+ text_params["verbosity"] = self.verbosity
224
+
225
+ # Set the response format
226
+ if response_format is not None:
227
+ if isinstance(response_format, type) and issubclass(response_format, BaseModel):
228
+ schema = get_response_schema_for_provider(response_format, "openai")
229
+ text_params["format"] = {
230
+ "type": "json_schema",
231
+ "name": response_format.__name__,
232
+ "schema": schema,
233
+ "strict": self.strict_output,
234
+ }
235
+ else:
236
+ # JSON mode
237
+ text_params["format"] = {"type": "json_object"}
238
+
239
+ # Add text parameter if there are any text-level params
240
+ if text_params:
241
+ base_params["text"] = text_params
242
+
243
+ # Filter out None values
244
+ request_params: Dict[str, Any] = {k: v for k, v in base_params.items() if v is not None}
245
+
246
+ # Deep research models require web_search_preview tool or MCP tool
247
+ if "deep-research" in self.id:
248
+ if tools is None:
249
+ tools = []
250
+
251
+ # Check if web_search_preview tool is already present
252
+ has_web_search = any(tool.get("type") == "web_search_preview" for tool in tools)
253
+
254
+ # Add web_search_preview if not present - this enables the model to search
255
+ # the web for current information and provide citations
256
+ if not has_web_search:
257
+ web_search_tool = {"type": "web_search_preview"}
258
+ tools.insert(0, web_search_tool)
259
+ log_debug(f"Added web_search_preview tool for deep research model: {self.id}")
260
+
261
+ if tools:
262
+ request_params["tools"] = self._format_tool_params(messages=messages, tools=tools) # type: ignore
263
+
264
+ if tool_choice is not None:
265
+ request_params["tool_choice"] = tool_choice
266
+
267
+ # Handle reasoning tools for o3 and o4-mini models
268
+ if self._using_reasoning_model() and messages is not None:
269
+ if self.store is False:
270
+ request_params["store"] = False
271
+
272
+ # Add encrypted reasoning content to include if not already present
273
+ include_list = request_params.get("include", []) or []
274
+ if "reasoning.encrypted_content" not in include_list:
275
+ include_list.append("reasoning.encrypted_content")
276
+ if request_params.get("include") is None:
277
+ request_params["include"] = include_list
278
+ elif isinstance(request_params["include"], list):
279
+ request_params["include"].extend(include_list)
280
+
281
+ else:
282
+ request_params["store"] = True
283
+
284
+ # Check if the last assistant message has a previous_response_id to continue from
285
+ previous_response_id = None
286
+ for msg in reversed(messages):
287
+ if (
288
+ msg.role == "assistant"
289
+ and hasattr(msg, "provider_data")
290
+ and msg.provider_data
291
+ and "response_id" in msg.provider_data
292
+ ):
293
+ previous_response_id = msg.provider_data["response_id"]
294
+ log_debug(f"Using previous_response_id: {previous_response_id}")
295
+ break
296
+
297
+ if previous_response_id:
298
+ request_params["previous_response_id"] = previous_response_id
299
+
300
+ # Add additional request params if provided
301
+ if self.request_params:
302
+ request_params.update(self.request_params)
303
+
304
+ if request_params:
305
+ log_debug(f"Calling {self.provider} with request parameters: {request_params}", log_level=2)
306
+ return request_params
307
+
308
+ def _upload_file(self, file: File) -> Optional[str]:
309
+ """Upload a file to the OpenAI vector database."""
310
+
311
+ if file.url is not None:
312
+ file_content_tuple = file.file_url_content
313
+ if file_content_tuple is not None:
314
+ file_content = file_content_tuple[0]
315
+ else:
316
+ return None
317
+ file_name = file.url.split("/")[-1]
318
+ file_tuple = (file_name, file_content)
319
+ result = self.get_client().files.create(file=file_tuple, purpose="assistants")
320
+ return result.id
321
+ elif file.filepath is not None:
322
+ import mimetypes
323
+ from pathlib import Path
324
+
325
+ file_path = file.filepath if isinstance(file.filepath, Path) else Path(file.filepath)
326
+ if file_path.exists() and file_path.is_file():
327
+ file_name = file_path.name
328
+ file_content = file_path.read_bytes() # type: ignore
329
+ content_type = mimetypes.guess_type(file_path)[0]
330
+ result = self.get_client().files.create(
331
+ file=(file_name, file_content, content_type),
332
+ purpose="assistants", # type: ignore
333
+ )
334
+ return result.id
335
+ else:
336
+ raise ValueError(f"File not found: {file_path}")
337
+ elif file.content is not None:
338
+ result = self.get_client().files.create(file=file.content, purpose="assistants")
339
+ return result.id
340
+
341
+ return None
342
+
343
+ def _create_vector_store(self, file_ids: List[str]) -> str:
344
+ """Create a vector store for the files."""
345
+ vector_store = self.get_client().vector_stores.create(name=self.vector_store_name)
346
+ for file_id in file_ids:
347
+ self.get_client().vector_stores.files.create(vector_store_id=vector_store.id, file_id=file_id)
348
+ while True:
349
+ uploaded_files = self.get_client().vector_stores.files.list(vector_store_id=vector_store.id)
350
+ all_completed = True
351
+ failed = False
352
+ for file in uploaded_files:
353
+ if file.status == "failed":
354
+ log_error(f"File {file.id} failed to upload.")
355
+ failed = True
356
+ break
357
+ if file.status != "completed":
358
+ all_completed = False
359
+ if all_completed or failed:
360
+ break
361
+ time.sleep(1)
362
+ return vector_store.id
363
+
364
+ def _format_tool_params(
365
+ self, messages: List[Message], tools: Optional[List[Dict[str, Any]]] = None
366
+ ) -> List[Dict[str, Any]]:
367
+ """Format the tool parameters for the OpenAI Responses API."""
368
+ formatted_tools = []
369
+ if tools:
370
+ for _tool in tools:
371
+ if _tool.get("type") == "function":
372
+ _tool_dict = _tool.get("function", {})
373
+ _tool_dict["type"] = "function"
374
+ for prop in _tool_dict.get("parameters", {}).get("properties", {}).values():
375
+ if isinstance(prop.get("type", ""), list):
376
+ prop["type"] = prop["type"][0]
377
+
378
+ formatted_tools.append(_tool_dict)
379
+ else:
380
+ formatted_tools.append(_tool)
381
+
382
+ # Find files to upload to the OpenAI vector database
383
+ file_ids = []
384
+ for message in messages:
385
+ # Upload any attached files to the OpenAI vector database
386
+ if message.files is not None and len(message.files) > 0:
387
+ for file in message.files:
388
+ file_id = self._upload_file(file)
389
+ if file_id is not None:
390
+ file_ids.append(file_id)
391
+
392
+ vector_store_id = self._create_vector_store(file_ids) if file_ids else None
393
+
394
+ # Add the file IDs to the tool parameters
395
+ for _tool in formatted_tools:
396
+ if _tool["type"] == "file_search" and vector_store_id is not None:
397
+ _tool["vector_store_ids"] = [vector_store_id]
398
+
399
+ return formatted_tools
400
+
401
+ def _format_messages(self, messages: List[Message]) -> List[Union[Dict[str, Any], ResponseReasoningItem]]:
402
+ """
403
+ Format a message into the format expected by OpenAI.
404
+
405
+ Args:
406
+ messages (List[Message]): The message to format.
407
+
408
+ Returns:
409
+ Dict[str, Any]: The formatted message.
410
+ """
411
+ formatted_messages: List[Union[Dict[str, Any], ResponseReasoningItem]] = []
412
+
413
+ messages_to_format = messages
414
+ previous_response_id: Optional[str] = None
415
+
416
+ if self._using_reasoning_model() and self.store is not False:
417
+ # Detect whether we're chaining via previous_response_id. If so, we should NOT
418
+ # re-send prior function_call items; the Responses API already has the state and
419
+ # expects only the corresponding function_call_output items.
420
+
421
+ for msg in reversed(messages):
422
+ if (
423
+ msg.role == "assistant"
424
+ and hasattr(msg, "provider_data")
425
+ and msg.provider_data
426
+ and "response_id" in msg.provider_data
427
+ ):
428
+ previous_response_id = msg.provider_data["response_id"]
429
+ msg_index = messages.index(msg)
430
+
431
+ # Include messages after this assistant message
432
+ messages_to_format = messages[msg_index + 1 :]
433
+
434
+ break
435
+
436
+ # Build a mapping from function_call id (fc_*) → call_id (call_*) from prior assistant tool_calls
437
+ fc_id_to_call_id: Dict[str, str] = {}
438
+ for msg in messages:
439
+ tool_calls = getattr(msg, "tool_calls", None)
440
+ if tool_calls:
441
+ for tc in tool_calls:
442
+ fc_id = tc.get("id")
443
+ call_id = tc.get("call_id") or fc_id
444
+ if isinstance(fc_id, str) and isinstance(call_id, str):
445
+ fc_id_to_call_id[fc_id] = call_id
446
+
447
+ for message in messages_to_format:
448
+ if message.role in ["user", "system"]:
449
+ message_dict: Dict[str, Any] = {
450
+ "role": self.role_map[message.role],
451
+ "content": message.content,
452
+ }
453
+ message_dict = {k: v for k, v in message_dict.items() if v is not None}
454
+
455
+ # Ignore non-string message content
456
+ # because we assume that the images/audio are already added to the message
457
+ if message.images is not None and len(message.images) > 0:
458
+ # Ignore non-string message content
459
+ # because we assume that the images/audio are already added to the message
460
+ if isinstance(message.content, str):
461
+ message_dict["content"] = [{"type": "input_text", "text": message.content}]
462
+ if message.images is not None:
463
+ message_dict["content"].extend(images_to_message(images=message.images))
464
+
465
+ if message.audio is not None and len(message.audio) > 0:
466
+ log_warning("Audio input is currently unsupported.")
467
+
468
+ if message.videos is not None and len(message.videos) > 0:
469
+ log_warning("Video input is currently unsupported.")
470
+
471
+ formatted_messages.append(message_dict)
472
+
473
+ # Tool call result
474
+ elif message.role == "tool":
475
+ if message.tool_call_id and message.content is not None:
476
+ function_call_id = message.tool_call_id
477
+ # Normalize: if a fc_* id was provided, translate to its corresponding call_* id
478
+ if isinstance(function_call_id, str) and function_call_id in fc_id_to_call_id:
479
+ call_id_value = fc_id_to_call_id[function_call_id]
480
+ else:
481
+ call_id_value = function_call_id
482
+ formatted_messages.append(
483
+ {"type": "function_call_output", "call_id": call_id_value, "output": message.content}
484
+ )
485
+ # Tool Calls
486
+ elif message.tool_calls is not None and len(message.tool_calls) > 0:
487
+ # Only skip re-sending prior function_call items when we have a previous_response_id
488
+ # (reasoning models). For non-reasoning models, we must include the prior function_call
489
+ # so the API can associate the subsequent function_call_output by call_id.
490
+ if self._using_reasoning_model() and previous_response_id is not None:
491
+ continue
492
+
493
+ for tool_call in message.tool_calls:
494
+ formatted_messages.append(
495
+ {
496
+ "type": "function_call",
497
+ "id": tool_call.get("id"),
498
+ "call_id": tool_call.get("call_id", tool_call.get("id")),
499
+ "name": tool_call["function"]["name"],
500
+ "arguments": tool_call["function"]["arguments"],
501
+ "status": "completed",
502
+ }
503
+ )
504
+ elif message.role == "assistant":
505
+ # Handle null content by converting to empty string
506
+ content = message.content if message.content is not None else ""
507
+ formatted_messages.append({"role": self.role_map[message.role], "content": content})
508
+
509
+ if self.store is False and hasattr(message, "provider_data") and message.provider_data is not None:
510
+ if message.provider_data.get("reasoning_output") is not None:
511
+ reasoning_output = ResponseReasoningItem.model_validate(
512
+ message.provider_data["reasoning_output"]
513
+ )
514
+ formatted_messages.append(reasoning_output)
515
+ return formatted_messages
516
+
517
+ def invoke(
518
+ self,
519
+ messages: List[Message],
520
+ assistant_message: Message,
521
+ response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
522
+ tools: Optional[List[Dict[str, Any]]] = None,
523
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
524
+ run_response: Optional[RunOutput] = None,
525
+ ) -> ModelResponse:
526
+ """
527
+ Send a request to the OpenAI Responses API.
528
+ """
529
+ try:
530
+ request_params = self.get_request_params(
531
+ messages=messages, response_format=response_format, tools=tools, tool_choice=tool_choice
532
+ )
533
+
534
+ if run_response and run_response.metrics:
535
+ run_response.metrics.set_time_to_first_token()
536
+
537
+ assistant_message.metrics.start_timer()
538
+
539
+ provider_response = self.get_client().responses.create(
540
+ model=self.id,
541
+ input=self._format_messages(messages), # type: ignore
542
+ **request_params,
543
+ )
544
+
545
+ assistant_message.metrics.stop_timer()
546
+
547
+ model_response = self._parse_provider_response(provider_response, response_format=response_format)
548
+
549
+ return model_response
550
+
551
+ except RateLimitError as exc:
552
+ log_error(f"Rate limit error from OpenAI API: {exc}")
553
+ error_message = exc.response.json().get("error", {})
554
+ error_message = (
555
+ error_message.get("message", "Unknown model error")
556
+ if isinstance(error_message, dict)
557
+ else error_message
558
+ )
559
+ raise ModelProviderError(
560
+ message=error_message,
561
+ status_code=exc.response.status_code,
562
+ model_name=self.name,
563
+ model_id=self.id,
564
+ ) from exc
565
+ except APIConnectionError as exc:
566
+ log_error(f"API connection error from OpenAI API: {exc}")
567
+ raise ModelProviderError(message=str(exc), model_name=self.name, model_id=self.id) from exc
568
+ except APIStatusError as exc:
569
+ log_error(f"API status error from OpenAI API: {exc}")
570
+ error_message = exc.response.json().get("error", {})
571
+ error_message = (
572
+ error_message.get("message", "Unknown model error")
573
+ if isinstance(error_message, dict)
574
+ else error_message
575
+ )
576
+ raise ModelProviderError(
577
+ message=error_message,
578
+ status_code=exc.response.status_code,
579
+ model_name=self.name,
580
+ model_id=self.id,
581
+ ) from exc
582
+ except Exception as exc:
583
+ log_error(f"Error from OpenAI API: {exc}")
584
+ raise ModelProviderError(message=str(exc), model_name=self.name, model_id=self.id) from exc
585
+
586
+ async def ainvoke(
587
+ self,
588
+ messages: List[Message],
589
+ assistant_message: Message,
590
+ response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
591
+ tools: Optional[List[Dict[str, Any]]] = None,
592
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
593
+ run_response: Optional[RunOutput] = None,
594
+ ) -> ModelResponse:
595
+ """
596
+ Sends an asynchronous request to the OpenAI Responses API.
597
+ """
598
+ try:
599
+ request_params = self.get_request_params(
600
+ messages=messages, response_format=response_format, tools=tools, tool_choice=tool_choice
601
+ )
602
+
603
+ if run_response and run_response.metrics:
604
+ run_response.metrics.set_time_to_first_token()
605
+
606
+ assistant_message.metrics.start_timer()
607
+
608
+ provider_response = await self.get_async_client().responses.create(
609
+ model=self.id,
610
+ input=self._format_messages(messages), # type: ignore
611
+ **request_params,
612
+ )
613
+
614
+ assistant_message.metrics.stop_timer()
615
+
616
+ model_response = self._parse_provider_response(provider_response, response_format=response_format)
617
+
618
+ return model_response
619
+
620
+ except RateLimitError as exc:
621
+ log_error(f"Rate limit error from OpenAI API: {exc}")
622
+ error_message = exc.response.json().get("error", {})
623
+ error_message = (
624
+ error_message.get("message", "Unknown model error")
625
+ if isinstance(error_message, dict)
626
+ else error_message
627
+ )
628
+ raise ModelProviderError(
629
+ message=error_message,
630
+ status_code=exc.response.status_code,
631
+ model_name=self.name,
632
+ model_id=self.id,
633
+ ) from exc
634
+ except APIConnectionError as exc:
635
+ log_error(f"API connection error from OpenAI API: {exc}")
636
+ raise ModelProviderError(message=str(exc), model_name=self.name, model_id=self.id) from exc
637
+ except APIStatusError as exc:
638
+ log_error(f"API status error from OpenAI API: {exc}")
639
+ error_message = exc.response.json().get("error", {})
640
+ error_message = (
641
+ error_message.get("message", "Unknown model error")
642
+ if isinstance(error_message, dict)
643
+ else error_message
644
+ )
645
+ raise ModelProviderError(
646
+ message=error_message,
647
+ status_code=exc.response.status_code,
648
+ model_name=self.name,
649
+ model_id=self.id,
650
+ ) from exc
651
+ except Exception as exc:
652
+ log_error(f"Error from OpenAI API: {exc}")
653
+ raise ModelProviderError(message=str(exc), model_name=self.name, model_id=self.id) from exc
654
+
655
+ def invoke_stream(
656
+ self,
657
+ messages: List[Message],
658
+ assistant_message: Message,
659
+ response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
660
+ tools: Optional[List[Dict[str, Any]]] = None,
661
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
662
+ run_response: Optional[RunOutput] = None,
663
+ ) -> Iterator[ModelResponse]:
664
+ """
665
+ Send a streaming request to the OpenAI Responses API.
666
+ """
667
+ try:
668
+ request_params = self.get_request_params(
669
+ messages=messages, response_format=response_format, tools=tools, tool_choice=tool_choice
670
+ )
671
+ tool_use: Dict[str, Any] = {}
672
+
673
+ if run_response and run_response.metrics:
674
+ run_response.metrics.set_time_to_first_token()
675
+
676
+ assistant_message.metrics.start_timer()
677
+
678
+ for chunk in self.get_client().responses.create(
679
+ model=self.id,
680
+ input=self._format_messages(messages), # type: ignore
681
+ stream=True,
682
+ **request_params,
683
+ ):
684
+ model_response, tool_use = self._parse_provider_response_delta(
685
+ stream_event=chunk, # type: ignore
686
+ assistant_message=assistant_message,
687
+ tool_use=tool_use, # type: ignore
688
+ )
689
+ yield model_response
690
+
691
+ assistant_message.metrics.stop_timer()
692
+
693
+ except RateLimitError as exc:
694
+ log_error(f"Rate limit error from OpenAI API: {exc}")
695
+ error_message = exc.response.json().get("error", {})
696
+ error_message = (
697
+ error_message.get("message", "Unknown model error")
698
+ if isinstance(error_message, dict)
699
+ else error_message
700
+ )
701
+ raise ModelProviderError(
702
+ message=error_message,
703
+ status_code=exc.response.status_code,
704
+ model_name=self.name,
705
+ model_id=self.id,
706
+ ) from exc
707
+ except APIConnectionError as exc:
708
+ log_error(f"API connection error from OpenAI API: {exc}")
709
+ raise ModelProviderError(message=str(exc), model_name=self.name, model_id=self.id) from exc
710
+ except APIStatusError as exc:
711
+ log_error(f"API status error from OpenAI API: {exc}")
712
+ error_message = exc.response.json().get("error", {})
713
+ error_message = (
714
+ error_message.get("message", "Unknown model error")
715
+ if isinstance(error_message, dict)
716
+ else error_message
717
+ )
718
+ raise ModelProviderError(
719
+ message=error_message,
720
+ status_code=exc.response.status_code,
721
+ model_name=self.name,
722
+ model_id=self.id,
723
+ ) from exc
724
+ except Exception as exc:
725
+ log_error(f"Error from OpenAI API: {exc}")
726
+ raise ModelProviderError(message=str(exc), model_name=self.name, model_id=self.id) from exc
727
+
728
+ async def ainvoke_stream(
729
+ self,
730
+ messages: List[Message],
731
+ assistant_message: Message,
732
+ response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
733
+ tools: Optional[List[Dict[str, Any]]] = None,
734
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
735
+ run_response: Optional[RunOutput] = None,
736
+ ) -> AsyncIterator[ModelResponse]:
737
+ """
738
+ Sends an asynchronous streaming request to the OpenAI Responses API.
739
+ """
740
+ try:
741
+ request_params = self.get_request_params(
742
+ messages=messages, response_format=response_format, tools=tools, tool_choice=tool_choice
743
+ )
744
+ tool_use: Dict[str, Any] = {}
745
+
746
+ if run_response and run_response.metrics:
747
+ run_response.metrics.set_time_to_first_token()
748
+
749
+ assistant_message.metrics.start_timer()
750
+
751
+ async_stream = await self.get_async_client().responses.create(
752
+ model=self.id,
753
+ input=self._format_messages(messages), # type: ignore
754
+ stream=True,
755
+ **request_params,
756
+ )
757
+ async for chunk in async_stream: # type: ignore
758
+ model_response, tool_use = self._parse_provider_response_delta(chunk, assistant_message, tool_use) # type: ignore
759
+ yield model_response
760
+
761
+ assistant_message.metrics.stop_timer()
762
+
763
+ except RateLimitError as exc:
764
+ log_error(f"Rate limit error from OpenAI API: {exc}")
765
+ error_message = exc.response.json().get("error", {})
766
+ error_message = (
767
+ error_message.get("message", "Unknown model error")
768
+ if isinstance(error_message, dict)
769
+ else error_message
770
+ )
771
+ raise ModelProviderError(
772
+ message=error_message,
773
+ status_code=exc.response.status_code,
774
+ model_name=self.name,
775
+ model_id=self.id,
776
+ ) from exc
777
+ except APIConnectionError as exc:
778
+ log_error(f"API connection error from OpenAI API: {exc}")
779
+ raise ModelProviderError(message=str(exc), model_name=self.name, model_id=self.id) from exc
780
+ except APIStatusError as exc:
781
+ log_error(f"API status error from OpenAI API: {exc}")
782
+ error_message = exc.response.json().get("error", {})
783
+ error_message = (
784
+ error_message.get("message", "Unknown model error")
785
+ if isinstance(error_message, dict)
786
+ else error_message
787
+ )
788
+ raise ModelProviderError(
789
+ message=error_message,
790
+ status_code=exc.response.status_code,
791
+ model_name=self.name,
792
+ model_id=self.id,
793
+ ) from exc
794
+ except Exception as exc:
795
+ log_error(f"Error from OpenAI API: {exc}")
796
+ raise ModelProviderError(message=str(exc), model_name=self.name, model_id=self.id) from exc
797
+
798
+ def format_function_call_results(
799
+ self, messages: List[Message], function_call_results: List[Message], tool_call_ids: List[str]
800
+ ) -> None:
801
+ """
802
+ Handle the results of function calls.
803
+
804
+ Args:
805
+ messages (List[Message]): The list of conversation messages.
806
+ function_call_results (List[Message]): The results of the function calls.
807
+ tool_ids (List[str]): The tool ids.
808
+ """
809
+ if len(function_call_results) > 0:
810
+ for _fc_message_index, _fc_message in enumerate(function_call_results):
811
+ _fc_message.tool_call_id = tool_call_ids[_fc_message_index]
812
+ messages.append(_fc_message)
813
+
814
+ def _parse_provider_response(self, response: Response, **kwargs) -> ModelResponse:
815
+ """
816
+ Parse the OpenAI response into a ModelResponse.
817
+
818
+ Args:
819
+ response: Response from invoke() method
820
+
821
+ Returns:
822
+ ModelResponse: Parsed response data
823
+ """
824
+ model_response = ModelResponse()
825
+
826
+ if response.error:
827
+ raise ModelProviderError(
828
+ message=response.error.message,
829
+ model_name=self.name,
830
+ model_id=self.id,
831
+ )
832
+
833
+ # Store the response ID for continuity
834
+ if response.id:
835
+ if model_response.provider_data is None:
836
+ model_response.provider_data = {}
837
+ model_response.provider_data["response_id"] = response.id
838
+
839
+ # Add role
840
+ model_response.role = "assistant"
841
+ reasoning_summary: Optional[str] = None
842
+
843
+ for output in response.output:
844
+ # Add content
845
+ if output.type == "message":
846
+ model_response.content = response.output_text
847
+
848
+ # Add citations
849
+ citations = Citations()
850
+ for content in output.content:
851
+ if content.type == "output_text" and content.annotations:
852
+ citations.raw = [annotation.model_dump() for annotation in content.annotations]
853
+ for annotation in content.annotations:
854
+ if annotation.type == "url_citation":
855
+ if citations.urls is None:
856
+ citations.urls = []
857
+ citations.urls.append(UrlCitation(url=annotation.url, title=annotation.title))
858
+ if citations.urls or citations.documents:
859
+ model_response.citations = citations
860
+
861
+ # Add tool calls
862
+ elif output.type == "function_call":
863
+ if model_response.tool_calls is None:
864
+ model_response.tool_calls = []
865
+ model_response.tool_calls.append(
866
+ {
867
+ "id": output.id,
868
+ # Store additional call_id from OpenAI responses
869
+ "call_id": output.call_id or output.id,
870
+ "type": "function",
871
+ "function": {
872
+ "name": output.name,
873
+ "arguments": output.arguments,
874
+ },
875
+ }
876
+ )
877
+
878
+ model_response.extra = model_response.extra or {}
879
+ model_response.extra.setdefault("tool_call_ids", []).append(output.call_id)
880
+
881
+ # Handle reasoning output items
882
+ elif output.type == "reasoning":
883
+ # Save encrypted reasoning content for ZDR mode
884
+ if self.store is False:
885
+ if model_response.provider_data is None:
886
+ model_response.provider_data = {}
887
+ model_response.provider_data["reasoning_output"] = output.model_dump(exclude_none=True)
888
+
889
+ if reasoning_summaries := getattr(output, "summary", None):
890
+ for summary in reasoning_summaries:
891
+ if isinstance(summary, dict):
892
+ summary_text = summary.get("text")
893
+ else:
894
+ summary_text = getattr(summary, "text", None)
895
+ if summary_text:
896
+ reasoning_summary = (reasoning_summary or "") + summary_text
897
+
898
+ # Add reasoning content
899
+ if reasoning_summary is not None:
900
+ model_response.reasoning_content = reasoning_summary
901
+ elif self.reasoning is not None:
902
+ model_response.reasoning_content = response.output_text
903
+
904
+ # Add metrics
905
+ if response.usage is not None:
906
+ model_response.response_usage = self._get_metrics(response.usage)
907
+
908
+ return model_response
909
+
910
+ def _parse_provider_response_delta(
911
+ self, stream_event: ResponseStreamEvent, assistant_message: Message, tool_use: Dict[str, Any]
912
+ ) -> Tuple[ModelResponse, Dict[str, Any]]:
913
+ """
914
+ Parse the streaming response from the model provider into a ModelResponse object.
915
+
916
+ Args:
917
+ response: Raw response chunk from the model provider
918
+
919
+ Returns:
920
+ ModelResponse: Parsed response delta
921
+ """
922
+ model_response = ModelResponse()
923
+
924
+ # 1. Add response ID
925
+ if stream_event.type == "response.created":
926
+ if stream_event.response.id:
927
+ if model_response.provider_data is None:
928
+ model_response.provider_data = {}
929
+ model_response.provider_data["response_id"] = stream_event.response.id
930
+ if not assistant_message.metrics.time_to_first_token:
931
+ assistant_message.metrics.set_time_to_first_token()
932
+
933
+ # 2. Add citations
934
+ elif stream_event.type == "response.output_text.annotation.added":
935
+ if model_response.citations is None:
936
+ model_response.citations = Citations(raw=[stream_event.annotation])
937
+ else:
938
+ model_response.citations.raw.append(stream_event.annotation) # type: ignore
939
+
940
+ if isinstance(stream_event.annotation, dict):
941
+ if stream_event.annotation.get("type") == "url_citation":
942
+ if model_response.citations.urls is None:
943
+ model_response.citations.urls = []
944
+ model_response.citations.urls.append(
945
+ UrlCitation(url=stream_event.annotation.get("url"), title=stream_event.annotation.get("title"))
946
+ )
947
+ else:
948
+ if stream_event.annotation.type == "url_citation": # type: ignore
949
+ if model_response.citations.urls is None:
950
+ model_response.citations.urls = []
951
+ model_response.citations.urls.append(
952
+ UrlCitation(url=stream_event.annotation.url, title=stream_event.annotation.title) # type: ignore
953
+ )
954
+
955
+ # 3. Add content
956
+ elif stream_event.type == "response.output_text.delta":
957
+ model_response.content = stream_event.delta
958
+
959
+ # Treat the output_text deltas as reasoning content if the reasoning summary is not requested.
960
+ if self.reasoning is not None and self.reasoning_summary is None:
961
+ model_response.reasoning_content = stream_event.delta
962
+
963
+ # 4. Add tool calls information
964
+
965
+ # 4.1 Add starting tool call
966
+ elif stream_event.type == "response.output_item.added":
967
+ item = stream_event.item
968
+ if item.type == "function_call":
969
+ tool_use = {
970
+ "id": getattr(item, "id", None),
971
+ "call_id": getattr(item, "call_id", None) or getattr(item, "id", None),
972
+ "type": "function",
973
+ "function": {
974
+ "name": item.name,
975
+ "arguments": item.arguments,
976
+ },
977
+ }
978
+
979
+ # 4.2 Add tool call arguments
980
+ elif stream_event.type == "response.function_call_arguments.delta":
981
+ tool_use["function"]["arguments"] += stream_event.delta
982
+
983
+ # 4.3 Add tool call completion data
984
+ elif stream_event.type == "response.output_item.done" and tool_use:
985
+ model_response.tool_calls = [tool_use]
986
+ if assistant_message.tool_calls is None:
987
+ assistant_message.tool_calls = []
988
+ assistant_message.tool_calls.append(tool_use)
989
+
990
+ model_response.extra = model_response.extra or {}
991
+ model_response.extra.setdefault("tool_call_ids", []).append(tool_use["call_id"])
992
+ tool_use = {}
993
+
994
+ # 5. Add metrics
995
+ elif stream_event.type == "response.completed":
996
+ model_response = ModelResponse()
997
+
998
+ # Handle reasoning output items
999
+ if self.reasoning_summary is not None or self.store is False:
1000
+ summary_text: str = ""
1001
+ for out in getattr(stream_event.response, "output", []) or []:
1002
+ if getattr(out, "type", None) == "reasoning":
1003
+ # In ZDR mode (store=False), store reasoning data for next request
1004
+ if self.store is False and hasattr(out, "encrypted_content"):
1005
+ if model_response.provider_data is None:
1006
+ model_response.provider_data = {}
1007
+ # Store the complete output item
1008
+ model_response.provider_data["reasoning_output"] = out.model_dump(exclude_none=True)
1009
+ if self.reasoning_summary is not None:
1010
+ summaries = getattr(out, "summary", None)
1011
+ if summaries:
1012
+ for s in summaries:
1013
+ text_val = s.get("text") if isinstance(s, dict) else getattr(s, "text", None)
1014
+ if text_val:
1015
+ if summary_text:
1016
+ summary_text += "\n\n"
1017
+ summary_text += text_val
1018
+
1019
+ if summary_text:
1020
+ model_response.reasoning_content = summary_text
1021
+
1022
+ # Add metrics
1023
+ if stream_event.response.usage is not None:
1024
+ model_response.response_usage = self._get_metrics(stream_event.response.usage)
1025
+
1026
+ return model_response, tool_use
1027
+
1028
+ def _get_metrics(self, response_usage: ResponseUsage) -> Metrics:
1029
+ """
1030
+ Parse the given OpenAI-specific usage into an Agno Metrics object.
1031
+
1032
+ Args:
1033
+ response: The response from the provider.
1034
+
1035
+ Returns:
1036
+ Metrics: Parsed metrics data
1037
+ """
1038
+ metrics = Metrics()
1039
+
1040
+ metrics.input_tokens = response_usage.input_tokens or 0
1041
+ metrics.output_tokens = response_usage.output_tokens or 0
1042
+ metrics.total_tokens = response_usage.total_tokens or 0
1043
+
1044
+ if input_tokens_details := response_usage.input_tokens_details:
1045
+ metrics.cache_read_tokens = input_tokens_details.cached_tokens
1046
+
1047
+ if output_tokens_details := response_usage.output_tokens_details:
1048
+ metrics.reasoning_tokens = output_tokens_details.reasoning_tokens
1049
+
1050
+ return metrics