agno 2.2.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (575) hide show
  1. agno/__init__.py +8 -0
  2. agno/agent/__init__.py +51 -0
  3. agno/agent/agent.py +10405 -0
  4. agno/api/__init__.py +0 -0
  5. agno/api/agent.py +28 -0
  6. agno/api/api.py +40 -0
  7. agno/api/evals.py +22 -0
  8. agno/api/os.py +17 -0
  9. agno/api/routes.py +13 -0
  10. agno/api/schemas/__init__.py +9 -0
  11. agno/api/schemas/agent.py +16 -0
  12. agno/api/schemas/evals.py +16 -0
  13. agno/api/schemas/os.py +14 -0
  14. agno/api/schemas/response.py +6 -0
  15. agno/api/schemas/team.py +16 -0
  16. agno/api/schemas/utils.py +21 -0
  17. agno/api/schemas/workflows.py +16 -0
  18. agno/api/settings.py +53 -0
  19. agno/api/team.py +30 -0
  20. agno/api/workflow.py +28 -0
  21. agno/cloud/aws/base.py +214 -0
  22. agno/cloud/aws/s3/__init__.py +2 -0
  23. agno/cloud/aws/s3/api_client.py +43 -0
  24. agno/cloud/aws/s3/bucket.py +195 -0
  25. agno/cloud/aws/s3/object.py +57 -0
  26. agno/culture/__init__.py +3 -0
  27. agno/culture/manager.py +956 -0
  28. agno/db/__init__.py +24 -0
  29. agno/db/async_postgres/__init__.py +3 -0
  30. agno/db/base.py +598 -0
  31. agno/db/dynamo/__init__.py +3 -0
  32. agno/db/dynamo/dynamo.py +2042 -0
  33. agno/db/dynamo/schemas.py +314 -0
  34. agno/db/dynamo/utils.py +743 -0
  35. agno/db/firestore/__init__.py +3 -0
  36. agno/db/firestore/firestore.py +1795 -0
  37. agno/db/firestore/schemas.py +140 -0
  38. agno/db/firestore/utils.py +376 -0
  39. agno/db/gcs_json/__init__.py +3 -0
  40. agno/db/gcs_json/gcs_json_db.py +1335 -0
  41. agno/db/gcs_json/utils.py +228 -0
  42. agno/db/in_memory/__init__.py +3 -0
  43. agno/db/in_memory/in_memory_db.py +1160 -0
  44. agno/db/in_memory/utils.py +230 -0
  45. agno/db/json/__init__.py +3 -0
  46. agno/db/json/json_db.py +1328 -0
  47. agno/db/json/utils.py +230 -0
  48. agno/db/migrations/__init__.py +0 -0
  49. agno/db/migrations/v1_to_v2.py +635 -0
  50. agno/db/mongo/__init__.py +17 -0
  51. agno/db/mongo/async_mongo.py +2026 -0
  52. agno/db/mongo/mongo.py +1982 -0
  53. agno/db/mongo/schemas.py +87 -0
  54. agno/db/mongo/utils.py +259 -0
  55. agno/db/mysql/__init__.py +3 -0
  56. agno/db/mysql/mysql.py +2308 -0
  57. agno/db/mysql/schemas.py +138 -0
  58. agno/db/mysql/utils.py +355 -0
  59. agno/db/postgres/__init__.py +4 -0
  60. agno/db/postgres/async_postgres.py +1927 -0
  61. agno/db/postgres/postgres.py +2260 -0
  62. agno/db/postgres/schemas.py +139 -0
  63. agno/db/postgres/utils.py +442 -0
  64. agno/db/redis/__init__.py +3 -0
  65. agno/db/redis/redis.py +1660 -0
  66. agno/db/redis/schemas.py +123 -0
  67. agno/db/redis/utils.py +346 -0
  68. agno/db/schemas/__init__.py +4 -0
  69. agno/db/schemas/culture.py +120 -0
  70. agno/db/schemas/evals.py +33 -0
  71. agno/db/schemas/knowledge.py +40 -0
  72. agno/db/schemas/memory.py +46 -0
  73. agno/db/schemas/metrics.py +0 -0
  74. agno/db/singlestore/__init__.py +3 -0
  75. agno/db/singlestore/schemas.py +130 -0
  76. agno/db/singlestore/singlestore.py +2272 -0
  77. agno/db/singlestore/utils.py +384 -0
  78. agno/db/sqlite/__init__.py +4 -0
  79. agno/db/sqlite/async_sqlite.py +2293 -0
  80. agno/db/sqlite/schemas.py +133 -0
  81. agno/db/sqlite/sqlite.py +2288 -0
  82. agno/db/sqlite/utils.py +431 -0
  83. agno/db/surrealdb/__init__.py +3 -0
  84. agno/db/surrealdb/metrics.py +292 -0
  85. agno/db/surrealdb/models.py +309 -0
  86. agno/db/surrealdb/queries.py +71 -0
  87. agno/db/surrealdb/surrealdb.py +1353 -0
  88. agno/db/surrealdb/utils.py +147 -0
  89. agno/db/utils.py +116 -0
  90. agno/debug.py +18 -0
  91. agno/eval/__init__.py +14 -0
  92. agno/eval/accuracy.py +834 -0
  93. agno/eval/performance.py +773 -0
  94. agno/eval/reliability.py +306 -0
  95. agno/eval/utils.py +119 -0
  96. agno/exceptions.py +161 -0
  97. agno/filters.py +354 -0
  98. agno/guardrails/__init__.py +6 -0
  99. agno/guardrails/base.py +19 -0
  100. agno/guardrails/openai.py +144 -0
  101. agno/guardrails/pii.py +94 -0
  102. agno/guardrails/prompt_injection.py +52 -0
  103. agno/integrations/__init__.py +0 -0
  104. agno/integrations/discord/__init__.py +3 -0
  105. agno/integrations/discord/client.py +203 -0
  106. agno/knowledge/__init__.py +5 -0
  107. agno/knowledge/chunking/__init__.py +0 -0
  108. agno/knowledge/chunking/agentic.py +79 -0
  109. agno/knowledge/chunking/document.py +91 -0
  110. agno/knowledge/chunking/fixed.py +57 -0
  111. agno/knowledge/chunking/markdown.py +151 -0
  112. agno/knowledge/chunking/recursive.py +63 -0
  113. agno/knowledge/chunking/row.py +39 -0
  114. agno/knowledge/chunking/semantic.py +86 -0
  115. agno/knowledge/chunking/strategy.py +165 -0
  116. agno/knowledge/content.py +74 -0
  117. agno/knowledge/document/__init__.py +5 -0
  118. agno/knowledge/document/base.py +58 -0
  119. agno/knowledge/embedder/__init__.py +5 -0
  120. agno/knowledge/embedder/aws_bedrock.py +343 -0
  121. agno/knowledge/embedder/azure_openai.py +210 -0
  122. agno/knowledge/embedder/base.py +23 -0
  123. agno/knowledge/embedder/cohere.py +323 -0
  124. agno/knowledge/embedder/fastembed.py +62 -0
  125. agno/knowledge/embedder/fireworks.py +13 -0
  126. agno/knowledge/embedder/google.py +258 -0
  127. agno/knowledge/embedder/huggingface.py +94 -0
  128. agno/knowledge/embedder/jina.py +182 -0
  129. agno/knowledge/embedder/langdb.py +22 -0
  130. agno/knowledge/embedder/mistral.py +206 -0
  131. agno/knowledge/embedder/nebius.py +13 -0
  132. agno/knowledge/embedder/ollama.py +154 -0
  133. agno/knowledge/embedder/openai.py +195 -0
  134. agno/knowledge/embedder/sentence_transformer.py +63 -0
  135. agno/knowledge/embedder/together.py +13 -0
  136. agno/knowledge/embedder/vllm.py +262 -0
  137. agno/knowledge/embedder/voyageai.py +165 -0
  138. agno/knowledge/knowledge.py +1988 -0
  139. agno/knowledge/reader/__init__.py +7 -0
  140. agno/knowledge/reader/arxiv_reader.py +81 -0
  141. agno/knowledge/reader/base.py +95 -0
  142. agno/knowledge/reader/csv_reader.py +166 -0
  143. agno/knowledge/reader/docx_reader.py +82 -0
  144. agno/knowledge/reader/field_labeled_csv_reader.py +292 -0
  145. agno/knowledge/reader/firecrawl_reader.py +201 -0
  146. agno/knowledge/reader/json_reader.py +87 -0
  147. agno/knowledge/reader/markdown_reader.py +137 -0
  148. agno/knowledge/reader/pdf_reader.py +431 -0
  149. agno/knowledge/reader/pptx_reader.py +101 -0
  150. agno/knowledge/reader/reader_factory.py +313 -0
  151. agno/knowledge/reader/s3_reader.py +89 -0
  152. agno/knowledge/reader/tavily_reader.py +194 -0
  153. agno/knowledge/reader/text_reader.py +115 -0
  154. agno/knowledge/reader/web_search_reader.py +372 -0
  155. agno/knowledge/reader/website_reader.py +455 -0
  156. agno/knowledge/reader/wikipedia_reader.py +59 -0
  157. agno/knowledge/reader/youtube_reader.py +78 -0
  158. agno/knowledge/remote_content/__init__.py +0 -0
  159. agno/knowledge/remote_content/remote_content.py +88 -0
  160. agno/knowledge/reranker/__init__.py +3 -0
  161. agno/knowledge/reranker/base.py +14 -0
  162. agno/knowledge/reranker/cohere.py +64 -0
  163. agno/knowledge/reranker/infinity.py +195 -0
  164. agno/knowledge/reranker/sentence_transformer.py +54 -0
  165. agno/knowledge/types.py +39 -0
  166. agno/knowledge/utils.py +189 -0
  167. agno/media.py +462 -0
  168. agno/memory/__init__.py +3 -0
  169. agno/memory/manager.py +1327 -0
  170. agno/models/__init__.py +0 -0
  171. agno/models/aimlapi/__init__.py +5 -0
  172. agno/models/aimlapi/aimlapi.py +45 -0
  173. agno/models/anthropic/__init__.py +5 -0
  174. agno/models/anthropic/claude.py +757 -0
  175. agno/models/aws/__init__.py +15 -0
  176. agno/models/aws/bedrock.py +701 -0
  177. agno/models/aws/claude.py +378 -0
  178. agno/models/azure/__init__.py +18 -0
  179. agno/models/azure/ai_foundry.py +485 -0
  180. agno/models/azure/openai_chat.py +131 -0
  181. agno/models/base.py +2175 -0
  182. agno/models/cerebras/__init__.py +12 -0
  183. agno/models/cerebras/cerebras.py +501 -0
  184. agno/models/cerebras/cerebras_openai.py +112 -0
  185. agno/models/cohere/__init__.py +5 -0
  186. agno/models/cohere/chat.py +389 -0
  187. agno/models/cometapi/__init__.py +5 -0
  188. agno/models/cometapi/cometapi.py +57 -0
  189. agno/models/dashscope/__init__.py +5 -0
  190. agno/models/dashscope/dashscope.py +91 -0
  191. agno/models/deepinfra/__init__.py +5 -0
  192. agno/models/deepinfra/deepinfra.py +28 -0
  193. agno/models/deepseek/__init__.py +5 -0
  194. agno/models/deepseek/deepseek.py +61 -0
  195. agno/models/defaults.py +1 -0
  196. agno/models/fireworks/__init__.py +5 -0
  197. agno/models/fireworks/fireworks.py +26 -0
  198. agno/models/google/__init__.py +5 -0
  199. agno/models/google/gemini.py +1085 -0
  200. agno/models/groq/__init__.py +5 -0
  201. agno/models/groq/groq.py +556 -0
  202. agno/models/huggingface/__init__.py +5 -0
  203. agno/models/huggingface/huggingface.py +491 -0
  204. agno/models/ibm/__init__.py +5 -0
  205. agno/models/ibm/watsonx.py +422 -0
  206. agno/models/internlm/__init__.py +3 -0
  207. agno/models/internlm/internlm.py +26 -0
  208. agno/models/langdb/__init__.py +1 -0
  209. agno/models/langdb/langdb.py +48 -0
  210. agno/models/litellm/__init__.py +14 -0
  211. agno/models/litellm/chat.py +468 -0
  212. agno/models/litellm/litellm_openai.py +25 -0
  213. agno/models/llama_cpp/__init__.py +5 -0
  214. agno/models/llama_cpp/llama_cpp.py +22 -0
  215. agno/models/lmstudio/__init__.py +5 -0
  216. agno/models/lmstudio/lmstudio.py +25 -0
  217. agno/models/message.py +434 -0
  218. agno/models/meta/__init__.py +12 -0
  219. agno/models/meta/llama.py +475 -0
  220. agno/models/meta/llama_openai.py +78 -0
  221. agno/models/metrics.py +120 -0
  222. agno/models/mistral/__init__.py +5 -0
  223. agno/models/mistral/mistral.py +432 -0
  224. agno/models/nebius/__init__.py +3 -0
  225. agno/models/nebius/nebius.py +54 -0
  226. agno/models/nexus/__init__.py +3 -0
  227. agno/models/nexus/nexus.py +22 -0
  228. agno/models/nvidia/__init__.py +5 -0
  229. agno/models/nvidia/nvidia.py +28 -0
  230. agno/models/ollama/__init__.py +5 -0
  231. agno/models/ollama/chat.py +441 -0
  232. agno/models/openai/__init__.py +9 -0
  233. agno/models/openai/chat.py +883 -0
  234. agno/models/openai/like.py +27 -0
  235. agno/models/openai/responses.py +1050 -0
  236. agno/models/openrouter/__init__.py +5 -0
  237. agno/models/openrouter/openrouter.py +66 -0
  238. agno/models/perplexity/__init__.py +5 -0
  239. agno/models/perplexity/perplexity.py +187 -0
  240. agno/models/portkey/__init__.py +3 -0
  241. agno/models/portkey/portkey.py +81 -0
  242. agno/models/requesty/__init__.py +5 -0
  243. agno/models/requesty/requesty.py +52 -0
  244. agno/models/response.py +199 -0
  245. agno/models/sambanova/__init__.py +5 -0
  246. agno/models/sambanova/sambanova.py +28 -0
  247. agno/models/siliconflow/__init__.py +5 -0
  248. agno/models/siliconflow/siliconflow.py +25 -0
  249. agno/models/together/__init__.py +5 -0
  250. agno/models/together/together.py +25 -0
  251. agno/models/utils.py +266 -0
  252. agno/models/vercel/__init__.py +3 -0
  253. agno/models/vercel/v0.py +26 -0
  254. agno/models/vertexai/__init__.py +0 -0
  255. agno/models/vertexai/claude.py +70 -0
  256. agno/models/vllm/__init__.py +3 -0
  257. agno/models/vllm/vllm.py +78 -0
  258. agno/models/xai/__init__.py +3 -0
  259. agno/models/xai/xai.py +113 -0
  260. agno/os/__init__.py +3 -0
  261. agno/os/app.py +876 -0
  262. agno/os/auth.py +57 -0
  263. agno/os/config.py +104 -0
  264. agno/os/interfaces/__init__.py +1 -0
  265. agno/os/interfaces/a2a/__init__.py +3 -0
  266. agno/os/interfaces/a2a/a2a.py +42 -0
  267. agno/os/interfaces/a2a/router.py +250 -0
  268. agno/os/interfaces/a2a/utils.py +924 -0
  269. agno/os/interfaces/agui/__init__.py +3 -0
  270. agno/os/interfaces/agui/agui.py +47 -0
  271. agno/os/interfaces/agui/router.py +144 -0
  272. agno/os/interfaces/agui/utils.py +534 -0
  273. agno/os/interfaces/base.py +25 -0
  274. agno/os/interfaces/slack/__init__.py +3 -0
  275. agno/os/interfaces/slack/router.py +148 -0
  276. agno/os/interfaces/slack/security.py +30 -0
  277. agno/os/interfaces/slack/slack.py +47 -0
  278. agno/os/interfaces/whatsapp/__init__.py +3 -0
  279. agno/os/interfaces/whatsapp/router.py +211 -0
  280. agno/os/interfaces/whatsapp/security.py +53 -0
  281. agno/os/interfaces/whatsapp/whatsapp.py +36 -0
  282. agno/os/mcp.py +292 -0
  283. agno/os/middleware/__init__.py +7 -0
  284. agno/os/middleware/jwt.py +233 -0
  285. agno/os/router.py +1763 -0
  286. agno/os/routers/__init__.py +3 -0
  287. agno/os/routers/evals/__init__.py +3 -0
  288. agno/os/routers/evals/evals.py +430 -0
  289. agno/os/routers/evals/schemas.py +142 -0
  290. agno/os/routers/evals/utils.py +162 -0
  291. agno/os/routers/health.py +31 -0
  292. agno/os/routers/home.py +52 -0
  293. agno/os/routers/knowledge/__init__.py +3 -0
  294. agno/os/routers/knowledge/knowledge.py +997 -0
  295. agno/os/routers/knowledge/schemas.py +178 -0
  296. agno/os/routers/memory/__init__.py +3 -0
  297. agno/os/routers/memory/memory.py +515 -0
  298. agno/os/routers/memory/schemas.py +62 -0
  299. agno/os/routers/metrics/__init__.py +3 -0
  300. agno/os/routers/metrics/metrics.py +190 -0
  301. agno/os/routers/metrics/schemas.py +47 -0
  302. agno/os/routers/session/__init__.py +3 -0
  303. agno/os/routers/session/session.py +997 -0
  304. agno/os/schema.py +1055 -0
  305. agno/os/settings.py +43 -0
  306. agno/os/utils.py +630 -0
  307. agno/py.typed +0 -0
  308. agno/reasoning/__init__.py +0 -0
  309. agno/reasoning/anthropic.py +80 -0
  310. agno/reasoning/azure_ai_foundry.py +67 -0
  311. agno/reasoning/deepseek.py +63 -0
  312. agno/reasoning/default.py +97 -0
  313. agno/reasoning/gemini.py +73 -0
  314. agno/reasoning/groq.py +71 -0
  315. agno/reasoning/helpers.py +63 -0
  316. agno/reasoning/ollama.py +67 -0
  317. agno/reasoning/openai.py +86 -0
  318. agno/reasoning/step.py +31 -0
  319. agno/reasoning/vertexai.py +76 -0
  320. agno/run/__init__.py +6 -0
  321. agno/run/agent.py +787 -0
  322. agno/run/base.py +229 -0
  323. agno/run/cancel.py +81 -0
  324. agno/run/messages.py +32 -0
  325. agno/run/team.py +753 -0
  326. agno/run/workflow.py +708 -0
  327. agno/session/__init__.py +10 -0
  328. agno/session/agent.py +295 -0
  329. agno/session/summary.py +265 -0
  330. agno/session/team.py +392 -0
  331. agno/session/workflow.py +205 -0
  332. agno/team/__init__.py +37 -0
  333. agno/team/team.py +8793 -0
  334. agno/tools/__init__.py +10 -0
  335. agno/tools/agentql.py +120 -0
  336. agno/tools/airflow.py +69 -0
  337. agno/tools/api.py +122 -0
  338. agno/tools/apify.py +314 -0
  339. agno/tools/arxiv.py +127 -0
  340. agno/tools/aws_lambda.py +53 -0
  341. agno/tools/aws_ses.py +66 -0
  342. agno/tools/baidusearch.py +89 -0
  343. agno/tools/bitbucket.py +292 -0
  344. agno/tools/brandfetch.py +213 -0
  345. agno/tools/bravesearch.py +106 -0
  346. agno/tools/brightdata.py +367 -0
  347. agno/tools/browserbase.py +209 -0
  348. agno/tools/calcom.py +255 -0
  349. agno/tools/calculator.py +151 -0
  350. agno/tools/cartesia.py +187 -0
  351. agno/tools/clickup.py +244 -0
  352. agno/tools/confluence.py +240 -0
  353. agno/tools/crawl4ai.py +158 -0
  354. agno/tools/csv_toolkit.py +185 -0
  355. agno/tools/dalle.py +110 -0
  356. agno/tools/daytona.py +475 -0
  357. agno/tools/decorator.py +262 -0
  358. agno/tools/desi_vocal.py +108 -0
  359. agno/tools/discord.py +161 -0
  360. agno/tools/docker.py +716 -0
  361. agno/tools/duckdb.py +379 -0
  362. agno/tools/duckduckgo.py +91 -0
  363. agno/tools/e2b.py +703 -0
  364. agno/tools/eleven_labs.py +196 -0
  365. agno/tools/email.py +67 -0
  366. agno/tools/evm.py +129 -0
  367. agno/tools/exa.py +396 -0
  368. agno/tools/fal.py +127 -0
  369. agno/tools/file.py +240 -0
  370. agno/tools/file_generation.py +350 -0
  371. agno/tools/financial_datasets.py +288 -0
  372. agno/tools/firecrawl.py +143 -0
  373. agno/tools/function.py +1187 -0
  374. agno/tools/giphy.py +93 -0
  375. agno/tools/github.py +1760 -0
  376. agno/tools/gmail.py +922 -0
  377. agno/tools/google_bigquery.py +117 -0
  378. agno/tools/google_drive.py +270 -0
  379. agno/tools/google_maps.py +253 -0
  380. agno/tools/googlecalendar.py +674 -0
  381. agno/tools/googlesearch.py +98 -0
  382. agno/tools/googlesheets.py +377 -0
  383. agno/tools/hackernews.py +77 -0
  384. agno/tools/jina.py +101 -0
  385. agno/tools/jira.py +170 -0
  386. agno/tools/knowledge.py +218 -0
  387. agno/tools/linear.py +426 -0
  388. agno/tools/linkup.py +58 -0
  389. agno/tools/local_file_system.py +90 -0
  390. agno/tools/lumalab.py +183 -0
  391. agno/tools/mcp/__init__.py +10 -0
  392. agno/tools/mcp/mcp.py +331 -0
  393. agno/tools/mcp/multi_mcp.py +347 -0
  394. agno/tools/mcp/params.py +24 -0
  395. agno/tools/mcp_toolbox.py +284 -0
  396. agno/tools/mem0.py +193 -0
  397. agno/tools/memori.py +339 -0
  398. agno/tools/memory.py +419 -0
  399. agno/tools/mlx_transcribe.py +139 -0
  400. agno/tools/models/__init__.py +0 -0
  401. agno/tools/models/azure_openai.py +190 -0
  402. agno/tools/models/gemini.py +203 -0
  403. agno/tools/models/groq.py +158 -0
  404. agno/tools/models/morph.py +186 -0
  405. agno/tools/models/nebius.py +124 -0
  406. agno/tools/models_labs.py +195 -0
  407. agno/tools/moviepy_video.py +349 -0
  408. agno/tools/neo4j.py +134 -0
  409. agno/tools/newspaper.py +46 -0
  410. agno/tools/newspaper4k.py +93 -0
  411. agno/tools/notion.py +204 -0
  412. agno/tools/openai.py +202 -0
  413. agno/tools/openbb.py +160 -0
  414. agno/tools/opencv.py +321 -0
  415. agno/tools/openweather.py +233 -0
  416. agno/tools/oxylabs.py +385 -0
  417. agno/tools/pandas.py +102 -0
  418. agno/tools/parallel.py +314 -0
  419. agno/tools/postgres.py +257 -0
  420. agno/tools/pubmed.py +188 -0
  421. agno/tools/python.py +205 -0
  422. agno/tools/reasoning.py +283 -0
  423. agno/tools/reddit.py +467 -0
  424. agno/tools/replicate.py +117 -0
  425. agno/tools/resend.py +62 -0
  426. agno/tools/scrapegraph.py +222 -0
  427. agno/tools/searxng.py +152 -0
  428. agno/tools/serpapi.py +116 -0
  429. agno/tools/serper.py +255 -0
  430. agno/tools/shell.py +53 -0
  431. agno/tools/slack.py +136 -0
  432. agno/tools/sleep.py +20 -0
  433. agno/tools/spider.py +116 -0
  434. agno/tools/sql.py +154 -0
  435. agno/tools/streamlit/__init__.py +0 -0
  436. agno/tools/streamlit/components.py +113 -0
  437. agno/tools/tavily.py +254 -0
  438. agno/tools/telegram.py +48 -0
  439. agno/tools/todoist.py +218 -0
  440. agno/tools/tool_registry.py +1 -0
  441. agno/tools/toolkit.py +146 -0
  442. agno/tools/trafilatura.py +388 -0
  443. agno/tools/trello.py +274 -0
  444. agno/tools/twilio.py +186 -0
  445. agno/tools/user_control_flow.py +78 -0
  446. agno/tools/valyu.py +228 -0
  447. agno/tools/visualization.py +467 -0
  448. agno/tools/webbrowser.py +28 -0
  449. agno/tools/webex.py +76 -0
  450. agno/tools/website.py +54 -0
  451. agno/tools/webtools.py +45 -0
  452. agno/tools/whatsapp.py +286 -0
  453. agno/tools/wikipedia.py +63 -0
  454. agno/tools/workflow.py +278 -0
  455. agno/tools/x.py +335 -0
  456. agno/tools/yfinance.py +257 -0
  457. agno/tools/youtube.py +184 -0
  458. agno/tools/zendesk.py +82 -0
  459. agno/tools/zep.py +454 -0
  460. agno/tools/zoom.py +382 -0
  461. agno/utils/__init__.py +0 -0
  462. agno/utils/agent.py +820 -0
  463. agno/utils/audio.py +49 -0
  464. agno/utils/certs.py +27 -0
  465. agno/utils/code_execution.py +11 -0
  466. agno/utils/common.py +132 -0
  467. agno/utils/dttm.py +13 -0
  468. agno/utils/enum.py +22 -0
  469. agno/utils/env.py +11 -0
  470. agno/utils/events.py +696 -0
  471. agno/utils/format_str.py +16 -0
  472. agno/utils/functions.py +166 -0
  473. agno/utils/gemini.py +426 -0
  474. agno/utils/hooks.py +57 -0
  475. agno/utils/http.py +74 -0
  476. agno/utils/json_schema.py +234 -0
  477. agno/utils/knowledge.py +36 -0
  478. agno/utils/location.py +19 -0
  479. agno/utils/log.py +255 -0
  480. agno/utils/mcp.py +214 -0
  481. agno/utils/media.py +352 -0
  482. agno/utils/merge_dict.py +41 -0
  483. agno/utils/message.py +118 -0
  484. agno/utils/models/__init__.py +0 -0
  485. agno/utils/models/ai_foundry.py +43 -0
  486. agno/utils/models/claude.py +358 -0
  487. agno/utils/models/cohere.py +87 -0
  488. agno/utils/models/llama.py +78 -0
  489. agno/utils/models/mistral.py +98 -0
  490. agno/utils/models/openai_responses.py +140 -0
  491. agno/utils/models/schema_utils.py +153 -0
  492. agno/utils/models/watsonx.py +41 -0
  493. agno/utils/openai.py +257 -0
  494. agno/utils/pickle.py +32 -0
  495. agno/utils/pprint.py +178 -0
  496. agno/utils/print_response/__init__.py +0 -0
  497. agno/utils/print_response/agent.py +842 -0
  498. agno/utils/print_response/team.py +1724 -0
  499. agno/utils/print_response/workflow.py +1668 -0
  500. agno/utils/prompts.py +111 -0
  501. agno/utils/reasoning.py +108 -0
  502. agno/utils/response.py +163 -0
  503. agno/utils/response_iterator.py +17 -0
  504. agno/utils/safe_formatter.py +24 -0
  505. agno/utils/serialize.py +32 -0
  506. agno/utils/shell.py +22 -0
  507. agno/utils/streamlit.py +487 -0
  508. agno/utils/string.py +231 -0
  509. agno/utils/team.py +139 -0
  510. agno/utils/timer.py +41 -0
  511. agno/utils/tools.py +102 -0
  512. agno/utils/web.py +23 -0
  513. agno/utils/whatsapp.py +305 -0
  514. agno/utils/yaml_io.py +25 -0
  515. agno/vectordb/__init__.py +3 -0
  516. agno/vectordb/base.py +127 -0
  517. agno/vectordb/cassandra/__init__.py +5 -0
  518. agno/vectordb/cassandra/cassandra.py +501 -0
  519. agno/vectordb/cassandra/extra_param_mixin.py +11 -0
  520. agno/vectordb/cassandra/index.py +13 -0
  521. agno/vectordb/chroma/__init__.py +5 -0
  522. agno/vectordb/chroma/chromadb.py +929 -0
  523. agno/vectordb/clickhouse/__init__.py +9 -0
  524. agno/vectordb/clickhouse/clickhousedb.py +835 -0
  525. agno/vectordb/clickhouse/index.py +9 -0
  526. agno/vectordb/couchbase/__init__.py +3 -0
  527. agno/vectordb/couchbase/couchbase.py +1442 -0
  528. agno/vectordb/distance.py +7 -0
  529. agno/vectordb/lancedb/__init__.py +6 -0
  530. agno/vectordb/lancedb/lance_db.py +995 -0
  531. agno/vectordb/langchaindb/__init__.py +5 -0
  532. agno/vectordb/langchaindb/langchaindb.py +163 -0
  533. agno/vectordb/lightrag/__init__.py +5 -0
  534. agno/vectordb/lightrag/lightrag.py +388 -0
  535. agno/vectordb/llamaindex/__init__.py +3 -0
  536. agno/vectordb/llamaindex/llamaindexdb.py +166 -0
  537. agno/vectordb/milvus/__init__.py +4 -0
  538. agno/vectordb/milvus/milvus.py +1182 -0
  539. agno/vectordb/mongodb/__init__.py +9 -0
  540. agno/vectordb/mongodb/mongodb.py +1417 -0
  541. agno/vectordb/pgvector/__init__.py +12 -0
  542. agno/vectordb/pgvector/index.py +23 -0
  543. agno/vectordb/pgvector/pgvector.py +1462 -0
  544. agno/vectordb/pineconedb/__init__.py +5 -0
  545. agno/vectordb/pineconedb/pineconedb.py +747 -0
  546. agno/vectordb/qdrant/__init__.py +5 -0
  547. agno/vectordb/qdrant/qdrant.py +1134 -0
  548. agno/vectordb/redis/__init__.py +9 -0
  549. agno/vectordb/redis/redisdb.py +694 -0
  550. agno/vectordb/search.py +7 -0
  551. agno/vectordb/singlestore/__init__.py +10 -0
  552. agno/vectordb/singlestore/index.py +41 -0
  553. agno/vectordb/singlestore/singlestore.py +763 -0
  554. agno/vectordb/surrealdb/__init__.py +3 -0
  555. agno/vectordb/surrealdb/surrealdb.py +699 -0
  556. agno/vectordb/upstashdb/__init__.py +5 -0
  557. agno/vectordb/upstashdb/upstashdb.py +718 -0
  558. agno/vectordb/weaviate/__init__.py +8 -0
  559. agno/vectordb/weaviate/index.py +15 -0
  560. agno/vectordb/weaviate/weaviate.py +1005 -0
  561. agno/workflow/__init__.py +23 -0
  562. agno/workflow/agent.py +299 -0
  563. agno/workflow/condition.py +738 -0
  564. agno/workflow/loop.py +735 -0
  565. agno/workflow/parallel.py +824 -0
  566. agno/workflow/router.py +702 -0
  567. agno/workflow/step.py +1432 -0
  568. agno/workflow/steps.py +592 -0
  569. agno/workflow/types.py +520 -0
  570. agno/workflow/workflow.py +4321 -0
  571. agno-2.2.13.dist-info/METADATA +614 -0
  572. agno-2.2.13.dist-info/RECORD +575 -0
  573. agno-2.2.13.dist-info/WHEEL +5 -0
  574. agno-2.2.13.dist-info/licenses/LICENSE +201 -0
  575. agno-2.2.13.dist-info/top_level.txt +1 -0
@@ -0,0 +1,4321 @@
1
+ import asyncio
2
+ from dataclasses import dataclass
3
+ from datetime import datetime
4
+ from os import getenv
5
+ from typing import (
6
+ Any,
7
+ AsyncIterator,
8
+ Awaitable,
9
+ Callable,
10
+ Dict,
11
+ Iterator,
12
+ List,
13
+ Literal,
14
+ Optional,
15
+ Tuple,
16
+ Type,
17
+ Union,
18
+ cast,
19
+ overload,
20
+ )
21
+ from uuid import uuid4
22
+
23
+ from fastapi import WebSocket
24
+ from pydantic import BaseModel
25
+
26
+ from agno.agent.agent import Agent
27
+ from agno.db.base import AsyncBaseDb, BaseDb, SessionType
28
+ from agno.exceptions import InputCheckError, OutputCheckError, RunCancelledException
29
+ from agno.media import Audio, File, Image, Video
30
+ from agno.models.message import Message
31
+ from agno.models.metrics import Metrics
32
+ from agno.run import RunContext, RunStatus
33
+ from agno.run.agent import RunContentEvent, RunEvent, RunOutput
34
+ from agno.run.cancel import (
35
+ cancel_run as cancel_run_global,
36
+ )
37
+ from agno.run.cancel import (
38
+ cleanup_run,
39
+ raise_if_cancelled,
40
+ register_run,
41
+ )
42
+ from agno.run.team import RunContentEvent as TeamRunContentEvent
43
+ from agno.run.team import TeamRunEvent
44
+ from agno.run.workflow import (
45
+ StepOutputEvent,
46
+ WorkflowCancelledEvent,
47
+ WorkflowCompletedEvent,
48
+ WorkflowRunEvent,
49
+ WorkflowRunOutput,
50
+ WorkflowRunOutputEvent,
51
+ WorkflowStartedEvent,
52
+ )
53
+ from agno.session.workflow import WorkflowSession
54
+ from agno.team.team import Team
55
+ from agno.utils.common import is_typed_dict, validate_typed_dict
56
+ from agno.utils.log import (
57
+ log_debug,
58
+ log_error,
59
+ log_warning,
60
+ logger,
61
+ set_log_level_to_debug,
62
+ set_log_level_to_info,
63
+ use_workflow_logger,
64
+ )
65
+ from agno.utils.print_response.workflow import (
66
+ aprint_response,
67
+ aprint_response_stream,
68
+ print_response,
69
+ print_response_stream,
70
+ )
71
+ from agno.workflow import WorkflowAgent
72
+ from agno.workflow.condition import Condition
73
+ from agno.workflow.loop import Loop
74
+ from agno.workflow.parallel import Parallel
75
+ from agno.workflow.router import Router
76
+ from agno.workflow.step import Step
77
+ from agno.workflow.steps import Steps
78
+ from agno.workflow.types import (
79
+ StepInput,
80
+ StepMetrics,
81
+ StepOutput,
82
+ StepType,
83
+ WebSocketHandler,
84
+ WorkflowExecutionInput,
85
+ WorkflowMetrics,
86
+ )
87
+
88
+ STEP_TYPE_MAPPING = {
89
+ Step: StepType.STEP,
90
+ Steps: StepType.STEPS,
91
+ Loop: StepType.LOOP,
92
+ Parallel: StepType.PARALLEL,
93
+ Condition: StepType.CONDITION,
94
+ Router: StepType.ROUTER,
95
+ }
96
+
97
+ WorkflowSteps = Union[
98
+ Callable[
99
+ ["Workflow", WorkflowExecutionInput],
100
+ Union[StepOutput, Awaitable[StepOutput], Iterator[StepOutput], AsyncIterator[StepOutput], Any],
101
+ ],
102
+ Steps,
103
+ List[
104
+ Union[
105
+ Callable[
106
+ [StepInput], Union[StepOutput, Awaitable[StepOutput], Iterator[StepOutput], AsyncIterator[StepOutput]]
107
+ ],
108
+ Step,
109
+ Steps,
110
+ Loop,
111
+ Parallel,
112
+ Condition,
113
+ Router,
114
+ ]
115
+ ],
116
+ ]
117
+
118
+
119
+ @dataclass
120
+ class Workflow:
121
+ """Pipeline-based workflow execution"""
122
+
123
+ # Workflow identification - make name optional with default
124
+ name: Optional[str] = None
125
+ # Workflow ID (autogenerated if not set)
126
+ id: Optional[str] = None
127
+ # Workflow description
128
+ description: Optional[str] = None
129
+
130
+ # Workflow steps
131
+ steps: Optional[WorkflowSteps] = None
132
+
133
+ # Database to use for this workflow
134
+ db: Optional[Union[BaseDb, AsyncBaseDb]] = None
135
+
136
+ # Agentic Workflow - WorkflowAgent that decides when to run the workflow
137
+ agent: Optional[WorkflowAgent] = None # type: ignore
138
+
139
+ # Default session_id to use for this workflow (autogenerated if not set)
140
+ session_id: Optional[str] = None
141
+ # Default user_id to use for this workflow
142
+ user_id: Optional[str] = None
143
+ # Default session state (stored in the database to persist across runs)
144
+ session_state: Optional[Dict[str, Any]] = None
145
+ # Set to True to overwrite the stored session_state with the session_state provided in the run
146
+ overwrite_db_session_state: bool = False
147
+
148
+ # If True, the workflow runs in debug mode
149
+ debug_mode: Optional[bool] = False
150
+
151
+ # --- Workflow Streaming ---
152
+ # Stream the response from the Workflow
153
+ stream: Optional[bool] = None
154
+ # Stream the intermediate steps from the Workflow
155
+ stream_events: bool = False
156
+ # [Deprecated] Stream the intermediate steps from the Workflow
157
+ stream_intermediate_steps: bool = False
158
+ # Stream events from executors (agents/teams/functions) within steps
159
+ stream_executor_events: bool = True
160
+
161
+ # Persist the events on the run response
162
+ store_events: bool = False
163
+ # Events to skip when persisting the events on the run response
164
+ events_to_skip: Optional[List[Union[WorkflowRunEvent, RunEvent, TeamRunEvent]]] = None
165
+
166
+ # Control whether to store executor responses (agent/team responses) in flattened runs
167
+ store_executor_outputs: bool = True
168
+
169
+ websocket_handler: Optional[WebSocketHandler] = None
170
+
171
+ # Input schema to validate the input to the workflow
172
+ input_schema: Optional[Type[BaseModel]] = None
173
+
174
+ # Metadata stored with this workflow
175
+ metadata: Optional[Dict[str, Any]] = None
176
+
177
+ # --- Telemetry ---
178
+ # telemetry=True logs minimal telemetry for analytics
179
+ # This helps us improve the Agent and provide better support
180
+ telemetry: bool = True
181
+
182
+ # Add this flag to control if the workflow should add history to the steps
183
+ add_workflow_history_to_steps: bool = False
184
+ # Number of historical runs to include in the messages
185
+ num_history_runs: int = 3
186
+
187
+ def __init__(
188
+ self,
189
+ id: Optional[str] = None,
190
+ name: Optional[str] = None,
191
+ description: Optional[str] = None,
192
+ db: Optional[Union[BaseDb, AsyncBaseDb]] = None,
193
+ steps: Optional[WorkflowSteps] = None,
194
+ agent: Optional[WorkflowAgent] = None,
195
+ session_id: Optional[str] = None,
196
+ session_state: Optional[Dict[str, Any]] = None,
197
+ overwrite_db_session_state: bool = False,
198
+ user_id: Optional[str] = None,
199
+ debug_mode: Optional[bool] = False,
200
+ stream: Optional[bool] = None,
201
+ stream_events: bool = False,
202
+ stream_intermediate_steps: bool = False,
203
+ stream_executor_events: bool = True,
204
+ store_events: bool = False,
205
+ events_to_skip: Optional[List[Union[WorkflowRunEvent, RunEvent, TeamRunEvent]]] = None,
206
+ store_executor_outputs: bool = True,
207
+ input_schema: Optional[Type[BaseModel]] = None,
208
+ metadata: Optional[Dict[str, Any]] = None,
209
+ cache_session: bool = False,
210
+ telemetry: bool = True,
211
+ add_workflow_history_to_steps: bool = False,
212
+ num_history_runs: int = 3,
213
+ ):
214
+ self.id = id
215
+ self.name = name
216
+ self.description = description
217
+ self.steps = steps
218
+ self.agent = agent
219
+ self.session_id = session_id
220
+ self.session_state = session_state
221
+ self.overwrite_db_session_state = overwrite_db_session_state
222
+ self.user_id = user_id
223
+ self.debug_mode = debug_mode
224
+ self.store_events = store_events
225
+ self.events_to_skip = events_to_skip or []
226
+ self.stream = stream
227
+ self.stream_events = stream_events
228
+ self.stream_intermediate_steps = stream_intermediate_steps
229
+ self.stream_executor_events = stream_executor_events
230
+ self.store_executor_outputs = store_executor_outputs
231
+ self.input_schema = input_schema
232
+ self.metadata = metadata
233
+ self.cache_session = cache_session
234
+ self.db = db
235
+ self.telemetry = telemetry
236
+ self.add_workflow_history_to_steps = add_workflow_history_to_steps
237
+ self.num_history_runs = num_history_runs
238
+ self._workflow_session: Optional[WorkflowSession] = None
239
+
240
+ # Warn if workflow history is enabled without a database
241
+ if self.add_workflow_history_to_steps and self.db is None:
242
+ log_warning(
243
+ "Workflow history is enabled (add_workflow_history_to_steps=True) but no database is configured. "
244
+ "History won't be persisted. Add a database to persist runs across executions. "
245
+ )
246
+
247
+ def set_id(self) -> None:
248
+ if self.id is None:
249
+ if self.name is not None:
250
+ self.id = self.name.lower().replace(" ", "-")
251
+ else:
252
+ self.id = str(uuid4())
253
+
254
+ def _has_async_db(self) -> bool:
255
+ return self.db is not None and isinstance(self.db, AsyncBaseDb)
256
+
257
+ def _validate_input(
258
+ self, input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]]
259
+ ) -> Optional[Union[str, List, Dict, Message, BaseModel]]:
260
+ """Parse and validate input against input_schema if provided"""
261
+ if self.input_schema is None:
262
+ return input # Return input unchanged if no schema is set
263
+
264
+ if input is None:
265
+ raise ValueError("Input required when input_schema is set")
266
+
267
+ # Handle Message objects - extract content
268
+ if isinstance(input, Message):
269
+ input = input.content # type: ignore
270
+
271
+ # If input is a string, convert it to a dict
272
+ if isinstance(input, str):
273
+ import json
274
+
275
+ try:
276
+ input = json.loads(input)
277
+ except Exception as e:
278
+ raise ValueError(f"Failed to parse input. Is it a valid JSON string?: {e}")
279
+
280
+ # Case 1: Message is already a BaseModel instance
281
+ if isinstance(input, BaseModel):
282
+ if isinstance(input, self.input_schema):
283
+ try:
284
+ return input
285
+ except Exception as e:
286
+ raise ValueError(f"BaseModel validation failed: {str(e)}")
287
+ else:
288
+ # Different BaseModel types
289
+ raise ValueError(f"Expected {self.input_schema.__name__} but got {type(input).__name__}")
290
+
291
+ # Case 2: Message is a dict
292
+ elif isinstance(input, dict):
293
+ try:
294
+ # Check if the schema is a TypedDict
295
+ if is_typed_dict(self.input_schema):
296
+ validated_dict = validate_typed_dict(input, self.input_schema)
297
+ return validated_dict
298
+ else:
299
+ validated_model = self.input_schema(**input)
300
+ return validated_model
301
+ except Exception as e:
302
+ raise ValueError(f"Failed to parse dict into {self.input_schema.__name__}: {str(e)}")
303
+
304
+ # Case 3: Other types not supported for structured input
305
+ else:
306
+ raise ValueError(
307
+ f"Cannot validate {type(input)} against input_schema. Expected dict or {self.input_schema.__name__} instance."
308
+ )
309
+
310
+ @property
311
+ def run_parameters(self) -> Dict[str, Any]:
312
+ """Get the run parameters for the workflow"""
313
+
314
+ if self.steps is None:
315
+ return {}
316
+
317
+ parameters = {}
318
+
319
+ if self.steps and callable(self.steps):
320
+ from inspect import Parameter, signature
321
+
322
+ sig = signature(self.steps) # type: ignore
323
+
324
+ for param_name, param in sig.parameters.items():
325
+ if param_name not in ["workflow", "execution_input", "self"]:
326
+ parameters[param_name] = {
327
+ "name": param_name,
328
+ "default": param.default.default
329
+ if hasattr(param.default, "__class__") and param.default.__class__.__name__ == "FieldInfo"
330
+ else (param.default if param.default is not Parameter.empty else None),
331
+ "annotation": (
332
+ param.annotation.__name__
333
+ if hasattr(param.annotation, "__name__")
334
+ else (
335
+ str(param.annotation).replace("typing.Optional[", "").replace("]", "")
336
+ if "typing.Optional" in str(param.annotation)
337
+ else str(param.annotation)
338
+ )
339
+ )
340
+ if param.annotation is not Parameter.empty
341
+ else None,
342
+ "required": param.default is Parameter.empty,
343
+ }
344
+ else:
345
+ parameters = {
346
+ "message": {
347
+ "name": "message",
348
+ "default": None,
349
+ "annotation": "str",
350
+ "required": True,
351
+ },
352
+ }
353
+
354
+ return parameters
355
+
356
+ def initialize_workflow(self):
357
+ if self.id is None:
358
+ self.set_id()
359
+ log_debug(f"Generated new workflow_id: {self.id}")
360
+
361
+ def _initialize_session(
362
+ self,
363
+ session_id: Optional[str] = None,
364
+ user_id: Optional[str] = None,
365
+ ) -> Tuple[str, Optional[str]]:
366
+ """Initialize the session for the workflow."""
367
+
368
+ if session_id is None:
369
+ if self.session_id:
370
+ session_id = self.session_id
371
+ else:
372
+ session_id = str(uuid4())
373
+ # We make the session_id sticky to the agent instance if no session_id is provided
374
+ self.session_id = session_id
375
+
376
+ log_debug(f"Session ID: {session_id}", center=True)
377
+
378
+ # Use the default user_id when necessary
379
+ if user_id is None or user_id == "":
380
+ user_id = self.user_id
381
+
382
+ return session_id, user_id
383
+
384
+ def _initialize_session_state(
385
+ self,
386
+ session_state: Dict[str, Any],
387
+ user_id: Optional[str] = None,
388
+ session_id: Optional[str] = None,
389
+ run_id: Optional[str] = None,
390
+ ) -> Dict[str, Any]:
391
+ """Initialize the session state for the workflow."""
392
+ if user_id:
393
+ session_state["current_user_id"] = user_id
394
+ if session_id is not None:
395
+ session_state["current_session_id"] = session_id
396
+ if run_id is not None:
397
+ session_state["current_run_id"] = run_id
398
+
399
+ session_state.update(
400
+ {
401
+ "workflow_id": self.id,
402
+ "run_id": run_id,
403
+ "session_id": session_id,
404
+ }
405
+ )
406
+ if self.name:
407
+ session_state["workflow_name"] = self.name
408
+
409
+ return session_state
410
+
411
+ def _generate_workflow_session_name(self) -> str:
412
+ """Generate a name for the workflow session"""
413
+
414
+ if self.session_id is None:
415
+ return f"Workflow Session - {datetime.now().strftime('%Y-%m-%d %H:%M')}"
416
+
417
+ datetime_str = datetime.now().strftime("%Y-%m-%d %H:%M")
418
+ new_session_name = f"Workflow Session-{datetime_str}"
419
+
420
+ if self.description:
421
+ truncated_desc = self.description[:40] + "-" if len(self.description) > 40 else self.description
422
+ new_session_name = f"{truncated_desc} - {datetime_str}"
423
+ return new_session_name
424
+
425
+ async def aset_session_name(
426
+ self, session_id: Optional[str] = None, autogenerate: bool = False, session_name: Optional[str] = None
427
+ ) -> WorkflowSession:
428
+ """Set the session name and save to storage, using an async database"""
429
+ session_id = session_id or self.session_id
430
+
431
+ if session_id is None:
432
+ raise Exception("Session ID is not set")
433
+
434
+ # -*- Read from storage
435
+ session = await self.aget_session(session_id=session_id) # type: ignore
436
+
437
+ if autogenerate:
438
+ # -*- Generate name for session
439
+ session_name = self._generate_workflow_session_name()
440
+ log_debug(f"Generated Workflow Session Name: {session_name}")
441
+ elif session_name is None:
442
+ raise Exception("Session name is not set")
443
+
444
+ # -*- Rename session
445
+ session.session_data["session_name"] = session_name # type: ignore
446
+
447
+ # -*- Save to storage
448
+ await self.asave_session(session=session) # type: ignore
449
+
450
+ return session # type: ignore
451
+
452
+ def set_session_name(
453
+ self, session_id: Optional[str] = None, autogenerate: bool = False, session_name: Optional[str] = None
454
+ ) -> WorkflowSession:
455
+ """Set the session name and save to storage"""
456
+ session_id = session_id or self.session_id
457
+
458
+ if session_id is None:
459
+ raise Exception("Session ID is not set")
460
+
461
+ # -*- Read from storage
462
+ session = self.get_session(session_id=session_id) # type: ignore
463
+
464
+ if autogenerate:
465
+ # -*- Generate name for session
466
+ session_name = self._generate_workflow_session_name()
467
+ log_debug(f"Generated Workflow Session Name: {session_name}")
468
+ elif session_name is None:
469
+ raise Exception("Session name is not set")
470
+
471
+ # -*- Rename session
472
+ session.session_data["session_name"] = session_name # type: ignore
473
+
474
+ # -*- Save to storage
475
+ self.save_session(session=session) # type: ignore
476
+
477
+ return session # type: ignore
478
+
479
+ async def aget_session_name(self, session_id: Optional[str] = None) -> str:
480
+ """Get the session name for the given session ID and user ID."""
481
+ session_id = session_id or self.session_id
482
+ if session_id is None:
483
+ raise Exception("Session ID is not set")
484
+ session = await self.aget_session(session_id=session_id) # type: ignore
485
+ if session is None:
486
+ raise Exception("Session not found")
487
+ return session.session_data.get("session_name", "") if session.session_data else ""
488
+
489
+ def get_session_name(self, session_id: Optional[str] = None) -> str:
490
+ """Get the session name for the given session ID and user ID."""
491
+ session_id = session_id or self.session_id
492
+ if session_id is None:
493
+ raise Exception("Session ID is not set")
494
+ session = self.get_session(session_id=session_id) # type: ignore
495
+ if session is None:
496
+ raise Exception("Session not found")
497
+ return session.session_data.get("session_name", "") if session.session_data else ""
498
+
499
+ async def aget_session_state(self, session_id: Optional[str] = None) -> Dict[str, Any]:
500
+ """Get the session state for the given session ID and user ID."""
501
+ session_id = session_id or self.session_id
502
+ if session_id is None:
503
+ raise Exception("Session ID is not set")
504
+ session = await self.aget_session(session_id=session_id) # type: ignore
505
+ if session is None:
506
+ raise Exception("Session not found")
507
+ return session.session_data.get("session_state", {}) if session.session_data else {}
508
+
509
+ def get_session_state(self, session_id: Optional[str] = None) -> Dict[str, Any]:
510
+ """Get the session state for the given session ID and user ID."""
511
+ session_id = session_id or self.session_id
512
+ if session_id is None:
513
+ raise Exception("Session ID is not set")
514
+ session = self.get_session(session_id=session_id) # type: ignore
515
+ if session is None:
516
+ raise Exception("Session not found")
517
+ return session.session_data.get("session_state", {}) if session.session_data else {}
518
+
519
+ def update_session_state(
520
+ self, session_state_updates: Dict[str, Any], session_id: Optional[str] = None
521
+ ) -> Dict[str, Any]:
522
+ """
523
+ Update the session state for the given session ID.
524
+ Args:
525
+ session_state_updates: The updates to apply to the session state. Should be a dictionary of key-value pairs.
526
+ session_id: The session ID to update. If not provided, the current cached session ID is used.
527
+ Returns:
528
+ dict: The updated session state.
529
+ """
530
+ session_id = session_id or self.session_id
531
+ if session_id is None:
532
+ raise Exception("Session ID is not set")
533
+ session = self.get_session(session_id=session_id) # type: ignore
534
+ if session is None:
535
+ raise Exception("Session not found")
536
+
537
+ if session.session_data is not None and "session_state" not in session.session_data:
538
+ session.session_data["session_state"] = {}
539
+
540
+ for key, value in session_state_updates.items():
541
+ session.session_data["session_state"][key] = value # type: ignore
542
+
543
+ self.save_session(session=session)
544
+
545
+ return session.session_data["session_state"] # type: ignore
546
+
547
+ async def aupdate_session_state(
548
+ self, session_state_updates: Dict[str, Any], session_id: Optional[str] = None
549
+ ) -> Dict[str, Any]:
550
+ """
551
+ Update the session state for the given session ID (async).
552
+ Args:
553
+ session_state_updates: The updates to apply to the session state. Should be a dictionary of key-value pairs.
554
+ session_id: The session ID to update. If not provided, the current cached session ID is used.
555
+ Returns:
556
+ dict: The updated session state.
557
+ """
558
+ session_id = session_id or self.session_id
559
+ if session_id is None:
560
+ raise Exception("Session ID is not set")
561
+ session = await self.aget_session(session_id=session_id) # type: ignore
562
+ if session is None:
563
+ raise Exception("Session not found")
564
+
565
+ if session.session_data is not None and "session_state" not in session.session_data:
566
+ session.session_data["session_state"] = {} # type: ignore
567
+
568
+ for key, value in session_state_updates.items():
569
+ session.session_data["session_state"][key] = value # type: ignore
570
+
571
+ await self.asave_session(session=session)
572
+
573
+ return session.session_data["session_state"] # type: ignore
574
+
575
+ async def adelete_session(self, session_id: str):
576
+ """Delete the current session and save to storage"""
577
+ if self.db is None:
578
+ return
579
+ # -*- Delete session
580
+ await self.db.delete_session(session_id=session_id) # type: ignore
581
+
582
+ def delete_session(self, session_id: str):
583
+ """Delete the current session and save to storage"""
584
+ if self.db is None:
585
+ return
586
+ # -*- Delete session
587
+ self.db.delete_session(session_id=session_id)
588
+
589
+ async def aget_run_output(self, run_id: str, session_id: Optional[str] = None) -> Optional[WorkflowRunOutput]:
590
+ """Get a RunOutput from the database."""
591
+ if self._workflow_session is not None:
592
+ run_response = self._workflow_session.get_run(run_id=run_id)
593
+ if run_response is not None:
594
+ return run_response
595
+ else:
596
+ log_warning(f"RunOutput {run_id} not found in AgentSession {self._workflow_session.session_id}")
597
+ return None
598
+ else:
599
+ workflow_session = await self.aget_session(session_id=session_id) # type: ignore
600
+ if workflow_session is not None:
601
+ run_response = workflow_session.get_run(run_id=run_id)
602
+ if run_response is not None:
603
+ return run_response
604
+ else:
605
+ log_warning(f"RunOutput {run_id} not found in AgentSession {session_id}")
606
+ return None
607
+
608
+ def get_run_output(self, run_id: str, session_id: Optional[str] = None) -> Optional[WorkflowRunOutput]:
609
+ """Get a RunOutput from the database."""
610
+ if self._workflow_session is not None:
611
+ run_response = self._workflow_session.get_run(run_id=run_id)
612
+ if run_response is not None:
613
+ return run_response
614
+ else:
615
+ log_warning(f"RunOutput {run_id} not found in AgentSession {self._workflow_session.session_id}")
616
+ return None
617
+ else:
618
+ workflow_session = self.get_session(session_id=session_id)
619
+ if workflow_session is not None:
620
+ run_response = workflow_session.get_run(run_id=run_id)
621
+ if run_response is not None:
622
+ return run_response
623
+ else:
624
+ log_warning(f"RunOutput {run_id} not found in AgentSession {session_id}")
625
+ return None
626
+
627
+ async def aget_last_run_output(self, session_id: Optional[str] = None) -> Optional[WorkflowRunOutput]:
628
+ """Get the last run response from the database."""
629
+ if (
630
+ self._workflow_session is not None
631
+ and self._workflow_session.runs is not None
632
+ and len(self._workflow_session.runs) > 0
633
+ ):
634
+ run_response = self._workflow_session.runs[-1]
635
+ if run_response is not None:
636
+ return run_response
637
+ else:
638
+ workflow_session = await self.aget_session(session_id=session_id) # type: ignore
639
+ if workflow_session is not None and workflow_session.runs is not None and len(workflow_session.runs) > 0:
640
+ run_response = workflow_session.runs[-1]
641
+ if run_response is not None:
642
+ return run_response
643
+ else:
644
+ log_warning(f"No run responses found in WorkflowSession {session_id}")
645
+ return None
646
+
647
+ def get_last_run_output(self, session_id: Optional[str] = None) -> Optional[WorkflowRunOutput]:
648
+ """Get the last run response from the database."""
649
+ if (
650
+ self._workflow_session is not None
651
+ and self._workflow_session.runs is not None
652
+ and len(self._workflow_session.runs) > 0
653
+ ):
654
+ run_response = self._workflow_session.runs[-1]
655
+ if run_response is not None:
656
+ return run_response
657
+ else:
658
+ workflow_session = self.get_session(session_id=session_id)
659
+ if workflow_session is not None and workflow_session.runs is not None and len(workflow_session.runs) > 0:
660
+ run_response = workflow_session.runs[-1]
661
+ if run_response is not None:
662
+ return run_response
663
+ else:
664
+ log_warning(f"No run responses found in WorkflowSession {session_id}")
665
+ return None
666
+
667
+ def read_or_create_session(
668
+ self,
669
+ session_id: str,
670
+ user_id: Optional[str] = None,
671
+ ) -> WorkflowSession:
672
+ from time import time
673
+
674
+ # Returning cached session if we have one
675
+ if self._workflow_session is not None and self._workflow_session.session_id == session_id:
676
+ return self._workflow_session
677
+
678
+ # Try to load from database
679
+ workflow_session = None
680
+ if self.db is not None:
681
+ log_debug(f"Reading WorkflowSession: {session_id}")
682
+
683
+ workflow_session = cast(WorkflowSession, self._read_session(session_id=session_id))
684
+
685
+ if workflow_session is None:
686
+ # Creating new session if none found
687
+ log_debug(f"Creating new WorkflowSession: {session_id}")
688
+ session_data = {}
689
+ if self.session_state is not None:
690
+ from copy import deepcopy
691
+
692
+ session_data["session_state"] = deepcopy(self.session_state)
693
+ workflow_session = WorkflowSession(
694
+ session_id=session_id,
695
+ workflow_id=self.id,
696
+ user_id=user_id,
697
+ workflow_data=self._get_workflow_data(),
698
+ session_data=session_data,
699
+ metadata=self.metadata,
700
+ created_at=int(time()),
701
+ )
702
+
703
+ # Cache the session if relevant
704
+ if workflow_session is not None and self.cache_session:
705
+ self._workflow_session = workflow_session
706
+
707
+ return workflow_session
708
+
709
+ async def aread_or_create_session(
710
+ self,
711
+ session_id: str,
712
+ user_id: Optional[str] = None,
713
+ ) -> WorkflowSession:
714
+ from time import time
715
+
716
+ # Returning cached session if we have one
717
+ if self._workflow_session is not None and self._workflow_session.session_id == session_id:
718
+ return self._workflow_session
719
+
720
+ # Try to load from database
721
+ workflow_session = None
722
+ if self.db is not None:
723
+ log_debug(f"Reading WorkflowSession: {session_id}")
724
+
725
+ workflow_session = cast(WorkflowSession, await self._aread_session(session_id=session_id))
726
+
727
+ if workflow_session is None:
728
+ # Creating new session if none found
729
+ log_debug(f"Creating new WorkflowSession: {session_id}")
730
+ workflow_session = WorkflowSession(
731
+ session_id=session_id,
732
+ workflow_id=self.id,
733
+ user_id=user_id,
734
+ workflow_data=self._get_workflow_data(),
735
+ session_data={},
736
+ metadata=self.metadata,
737
+ created_at=int(time()),
738
+ )
739
+
740
+ # Cache the session if relevant
741
+ if workflow_session is not None and self.cache_session:
742
+ self._workflow_session = workflow_session
743
+
744
+ return workflow_session
745
+
746
+ async def aget_session(
747
+ self,
748
+ session_id: Optional[str] = None,
749
+ ) -> Optional[WorkflowSession]:
750
+ """Load an WorkflowSession from database.
751
+
752
+ Args:
753
+ session_id: The session_id to load from storage.
754
+
755
+ Returns:
756
+ WorkflowSession: The WorkflowSession loaded from the database or created if it does not exist.
757
+ """
758
+ if not session_id and not self.session_id:
759
+ raise Exception("No session_id provided")
760
+
761
+ session_id_to_load = session_id or self.session_id
762
+
763
+ # Try to load from database
764
+ if self.db is not None and session_id_to_load is not None:
765
+ workflow_session = cast(WorkflowSession, await self._aread_session(session_id=session_id_to_load))
766
+ return workflow_session
767
+
768
+ log_warning(f"WorkflowSession {session_id_to_load} not found in db")
769
+ return None
770
+
771
+ def get_session(
772
+ self,
773
+ session_id: Optional[str] = None,
774
+ ) -> Optional[WorkflowSession]:
775
+ """Load an WorkflowSession from database.
776
+
777
+ Args:
778
+ session_id: The session_id to load from storage.
779
+
780
+ Returns:
781
+ WorkflowSession: The WorkflowSession loaded from the database or created if it does not exist.
782
+ """
783
+ if not session_id and not self.session_id:
784
+ raise Exception("No session_id provided")
785
+
786
+ session_id_to_load = session_id or self.session_id
787
+
788
+ # Try to load from database
789
+ if self.db is not None and session_id_to_load is not None:
790
+ workflow_session = cast(WorkflowSession, self._read_session(session_id=session_id_to_load))
791
+ return workflow_session
792
+
793
+ log_warning(f"WorkflowSession {session_id_to_load} not found in db")
794
+ return None
795
+
796
+ async def asave_session(self, session: WorkflowSession) -> None:
797
+ """Save the WorkflowSession to storage, using an async database.
798
+
799
+ Returns:
800
+ Optional[WorkflowSession]: The saved WorkflowSession or None if not saved.
801
+ """
802
+ if self.db is not None and session.session_data is not None:
803
+ if session.session_data.get("session_state") is not None:
804
+ session.session_data["session_state"].pop("current_session_id", None)
805
+ session.session_data["session_state"].pop("current_user_id", None)
806
+ session.session_data["session_state"].pop("current_run_id", None)
807
+ session.session_data["session_state"].pop("workflow_id", None)
808
+ session.session_data["session_state"].pop("run_id", None)
809
+ session.session_data["session_state"].pop("session_id", None)
810
+ session.session_data["session_state"].pop("workflow_name", None)
811
+
812
+ await self._aupsert_session(session=session) # type: ignore
813
+ log_debug(f"Created or updated WorkflowSession record: {session.session_id}")
814
+
815
+ def save_session(self, session: WorkflowSession) -> None:
816
+ """Save the WorkflowSession to storage
817
+
818
+ Returns:
819
+ Optional[WorkflowSession]: The saved WorkflowSession or None if not saved.
820
+ """
821
+ if self.db is not None and session.session_data is not None:
822
+ if session.session_data.get("session_state") is not None:
823
+ session.session_data["session_state"].pop("current_session_id", None)
824
+ session.session_data["session_state"].pop("current_user_id", None)
825
+ session.session_data["session_state"].pop("current_run_id", None)
826
+ session.session_data["session_state"].pop("workflow_id", None)
827
+ session.session_data["session_state"].pop("run_id", None)
828
+ session.session_data["session_state"].pop("session_id", None)
829
+ session.session_data["session_state"].pop("workflow_name", None)
830
+
831
+ self._upsert_session(session=session)
832
+ log_debug(f"Created or updated WorkflowSession record: {session.session_id}")
833
+
834
+ # -*- Session Database Functions
835
+ async def _aread_session(self, session_id: str) -> Optional[WorkflowSession]:
836
+ """Get a Session from the database."""
837
+ try:
838
+ if not self.db:
839
+ raise ValueError("Db not initialized")
840
+ session = await self.db.get_session(session_id=session_id, session_type=SessionType.WORKFLOW) # type: ignore
841
+ return session if isinstance(session, (WorkflowSession, type(None))) else None
842
+ except Exception as e:
843
+ log_warning(f"Error getting session from db: {e}")
844
+ return None
845
+
846
+ def _read_session(self, session_id: str) -> Optional[WorkflowSession]:
847
+ """Get a Session from the database."""
848
+ try:
849
+ if not self.db:
850
+ raise ValueError("Db not initialized")
851
+ session = self.db.get_session(session_id=session_id, session_type=SessionType.WORKFLOW)
852
+ return session if isinstance(session, (WorkflowSession, type(None))) else None
853
+ except Exception as e:
854
+ log_warning(f"Error getting session from db: {e}")
855
+ return None
856
+
857
+ async def _aupsert_session(self, session: WorkflowSession) -> Optional[WorkflowSession]:
858
+ """Upsert a Session into the database."""
859
+ try:
860
+ if not self.db:
861
+ raise ValueError("Db not initialized")
862
+ result = await self.db.upsert_session(session=session) # type: ignore
863
+ return result if isinstance(result, (WorkflowSession, type(None))) else None
864
+ except Exception as e:
865
+ log_warning(f"Error upserting session into db: {e}")
866
+ return None
867
+
868
+ def _upsert_session(self, session: WorkflowSession) -> Optional[WorkflowSession]:
869
+ """Upsert a Session into the database."""
870
+ try:
871
+ if not self.db:
872
+ raise ValueError("Db not initialized")
873
+ result = self.db.upsert_session(session=session)
874
+ return result if isinstance(result, (WorkflowSession, type(None))) else None
875
+ except Exception as e:
876
+ log_warning(f"Error upserting session into db: {e}")
877
+ return None
878
+
879
+ def _update_metadata(self, session: WorkflowSession):
880
+ """Update the extra_data in the session"""
881
+ from agno.utils.merge_dict import merge_dictionaries
882
+
883
+ # Read metadata from the database
884
+ if session.metadata is not None:
885
+ # If metadata is set in the workflow, update the database metadata with the workflow's metadata
886
+ if self.metadata is not None:
887
+ # Updates workflow's session metadata in place
888
+ merge_dictionaries(session.metadata, self.metadata)
889
+ # Update the current metadata with the metadata from the database which is updated in place
890
+ self.metadata = session.metadata
891
+
892
+ def _load_session_state(self, session: WorkflowSession, session_state: Dict[str, Any]):
893
+ """Load and return the stored session_state from the database, optionally merging it with the given one"""
894
+
895
+ from agno.utils.merge_dict import merge_dictionaries
896
+
897
+ # Get the session_state from the database and merge with proper precedence
898
+ # At this point session_state contains: agent_defaults + run_params
899
+ if session.session_data and "session_state" in session.session_data:
900
+ session_state_from_db = session.session_data.get("session_state")
901
+
902
+ if (
903
+ session_state_from_db is not None
904
+ and isinstance(session_state_from_db, dict)
905
+ and len(session_state_from_db) > 0
906
+ and not self.overwrite_db_session_state
907
+ ):
908
+ # This preserves precedence: run_params > db_state > agent_defaults
909
+ merged_state = session_state_from_db.copy()
910
+ merge_dictionaries(merged_state, session_state)
911
+ session_state.clear()
912
+ session_state.update(merged_state)
913
+
914
+ # Update the session_state in the session
915
+ if session.session_data is None:
916
+ session.session_data = {}
917
+ session.session_data["session_state"] = session_state
918
+
919
+ return session_state
920
+
921
+ def _get_workflow_data(self) -> Dict[str, Any]:
922
+ workflow_data: Dict[str, Any] = {
923
+ "workflow_id": self.id,
924
+ "name": self.name,
925
+ }
926
+
927
+ if self.steps and not callable(self.steps):
928
+ steps_dict = []
929
+ for step in self.steps: # type: ignore
930
+ if callable(step):
931
+ step_type = StepType.STEP
932
+ elif isinstance(step, Agent) or isinstance(step, Team):
933
+ step_type = StepType.STEP
934
+ else:
935
+ step_type = STEP_TYPE_MAPPING[type(step)]
936
+ step_dict = {
937
+ "name": step.name if hasattr(step, "name") else step.__name__, # type: ignore
938
+ "description": step.description if hasattr(step, "description") else "User-defined callable step",
939
+ "type": step_type.value,
940
+ }
941
+ steps_dict.append(step_dict)
942
+
943
+ workflow_data["steps"] = steps_dict
944
+
945
+ elif callable(self.steps):
946
+ workflow_data["steps"] = [
947
+ {
948
+ "name": "Custom Function",
949
+ "description": "User-defined callable workflow",
950
+ "type": "Callable",
951
+ }
952
+ ]
953
+
954
+ return workflow_data
955
+
956
+ def _broadcast_to_websocket(
957
+ self,
958
+ event: Any,
959
+ websocket_handler: Optional[WebSocketHandler] = None,
960
+ ) -> None:
961
+ """Broadcast events to WebSocket if available (async context only)"""
962
+ if websocket_handler:
963
+ try:
964
+ loop = asyncio.get_running_loop()
965
+ if loop:
966
+ asyncio.create_task(websocket_handler.handle_event(event))
967
+ except RuntimeError:
968
+ pass
969
+
970
+ def _handle_event(
971
+ self,
972
+ event: "WorkflowRunOutputEvent",
973
+ workflow_run_response: WorkflowRunOutput,
974
+ websocket_handler: Optional[WebSocketHandler] = None,
975
+ ) -> "WorkflowRunOutputEvent":
976
+ """Handle workflow events for storage - similar to Team._handle_event"""
977
+ from agno.run.agent import RunOutput
978
+ from agno.run.base import BaseRunOutputEvent
979
+ from agno.run.team import TeamRunOutput
980
+
981
+ if isinstance(event, (RunOutput, TeamRunOutput)):
982
+ return event
983
+ if self.store_events:
984
+ # Check if this event type should be skipped
985
+ if self.events_to_skip:
986
+ event_type = event.event
987
+ for skip_event in self.events_to_skip:
988
+ if isinstance(skip_event, str):
989
+ if event_type == skip_event:
990
+ return event
991
+ else:
992
+ # It's a WorkflowRunEvent enum
993
+ if event_type == skip_event.value:
994
+ return event
995
+
996
+ # Store the event
997
+ if isinstance(event, BaseRunOutputEvent):
998
+ if workflow_run_response.events is None:
999
+ workflow_run_response.events = []
1000
+ workflow_run_response.events.append(event)
1001
+
1002
+ # Broadcast to WebSocket if available (async context only)
1003
+ self._broadcast_to_websocket(event, websocket_handler)
1004
+
1005
+ return event
1006
+
1007
+ def _enrich_event_with_workflow_context(
1008
+ self,
1009
+ event: Any,
1010
+ workflow_run_response: WorkflowRunOutput,
1011
+ step_index: Optional[Union[int, tuple]] = None,
1012
+ step: Optional[Any] = None,
1013
+ ) -> Any:
1014
+ """Enrich any event with workflow context information for frontend tracking"""
1015
+
1016
+ step_id = getattr(step, "step_id", None) if step else None
1017
+ step_name = getattr(step, "name", None) if step else None
1018
+
1019
+ if hasattr(event, "workflow_id"):
1020
+ event.workflow_id = workflow_run_response.workflow_id
1021
+ if hasattr(event, "workflow_run_id"):
1022
+ event.workflow_run_id = workflow_run_response.run_id
1023
+ if hasattr(event, "step_id") and step_id:
1024
+ event.step_id = step_id
1025
+ if hasattr(event, "step_name") and step_name is not None:
1026
+ if event.step_name is None:
1027
+ event.step_name = step_name
1028
+ # Only set step_index if it's not already set (preserve parallel.py's tuples)
1029
+ if hasattr(event, "step_index") and step_index is not None:
1030
+ if event.step_index is None:
1031
+ event.step_index = step_index
1032
+
1033
+ return event
1034
+
1035
+ def _transform_step_output_to_event(
1036
+ self, step_output: StepOutput, workflow_run_response: WorkflowRunOutput, step_index: Optional[int] = None
1037
+ ) -> StepOutputEvent:
1038
+ """Transform a StepOutput object into a StepOutputEvent for consistent streaming interface"""
1039
+ return StepOutputEvent(
1040
+ step_output=step_output,
1041
+ run_id=workflow_run_response.run_id or "",
1042
+ workflow_name=workflow_run_response.workflow_name,
1043
+ workflow_id=workflow_run_response.workflow_id,
1044
+ session_id=workflow_run_response.session_id,
1045
+ step_name=step_output.step_name,
1046
+ step_index=step_index,
1047
+ )
1048
+
1049
+ def _set_debug(self) -> None:
1050
+ """Set debug mode and configure logging"""
1051
+ if self.debug_mode or getenv("AGNO_DEBUG", "false").lower() == "true":
1052
+ use_workflow_logger()
1053
+
1054
+ self.debug_mode = True
1055
+ set_log_level_to_debug(source_type="workflow")
1056
+
1057
+ # Propagate to steps - only if steps is iterable (not callable)
1058
+ if self.steps and not callable(self.steps):
1059
+ if isinstance(self.steps, Steps):
1060
+ steps_to_iterate = self.steps.steps
1061
+ else:
1062
+ steps_to_iterate = self.steps
1063
+
1064
+ for step in steps_to_iterate:
1065
+ self._propagate_debug_to_step(step)
1066
+ else:
1067
+ set_log_level_to_info(source_type="workflow")
1068
+
1069
+ def _set_telemetry(self) -> None:
1070
+ """Override telemetry settings based on environment variables."""
1071
+
1072
+ telemetry_env = getenv("AGNO_TELEMETRY")
1073
+ if telemetry_env is not None:
1074
+ self.telemetry = telemetry_env.lower() == "true"
1075
+
1076
+ def _propagate_debug_to_step(self, step):
1077
+ """Recursively propagate debug mode to steps and nested primitives"""
1078
+ # Handle direct Step objects
1079
+ if hasattr(step, "active_executor") and step.active_executor:
1080
+ executor = step.active_executor
1081
+ if hasattr(executor, "debug_mode"):
1082
+ executor.debug_mode = True
1083
+
1084
+ # If it's a team, propagate to all members
1085
+ if hasattr(executor, "members"):
1086
+ for member in executor.members:
1087
+ if hasattr(member, "debug_mode"):
1088
+ member.debug_mode = True
1089
+
1090
+ # Handle nested primitives - check both 'steps' and 'choices' attributes
1091
+ for attr_name in ["steps", "choices"]:
1092
+ if hasattr(step, attr_name):
1093
+ attr_value = getattr(step, attr_name)
1094
+ if attr_value and isinstance(attr_value, list):
1095
+ for nested_step in attr_value:
1096
+ self._propagate_debug_to_step(nested_step)
1097
+
1098
+ def _create_step_input(
1099
+ self,
1100
+ execution_input: WorkflowExecutionInput,
1101
+ previous_step_outputs: Optional[Dict[str, StepOutput]] = None,
1102
+ shared_images: Optional[List[Image]] = None,
1103
+ shared_videos: Optional[List[Video]] = None,
1104
+ shared_audio: Optional[List[Audio]] = None,
1105
+ shared_files: Optional[List[File]] = None,
1106
+ ) -> StepInput:
1107
+ """Helper method to create StepInput with enhanced data flow support"""
1108
+
1109
+ previous_step_content = None
1110
+ if previous_step_outputs:
1111
+ last_output = list(previous_step_outputs.values())[-1]
1112
+ previous_step_content = last_output.content if last_output else None
1113
+ log_debug(f"Using previous step content from: {list(previous_step_outputs.keys())[-1]}")
1114
+
1115
+ return StepInput(
1116
+ input=execution_input.input,
1117
+ previous_step_content=previous_step_content,
1118
+ previous_step_outputs=previous_step_outputs,
1119
+ additional_data=execution_input.additional_data,
1120
+ images=shared_images or [],
1121
+ videos=shared_videos or [],
1122
+ audio=shared_audio or [],
1123
+ files=shared_files or [],
1124
+ )
1125
+
1126
+ def _get_step_count(self) -> int:
1127
+ """Get the number of steps in the workflow"""
1128
+ if self.steps is None:
1129
+ return 0
1130
+ elif callable(self.steps):
1131
+ return 1 # Callable function counts as 1 step
1132
+ else:
1133
+ # Handle Steps wrapper
1134
+ if isinstance(self.steps, Steps):
1135
+ return len(self.steps.steps)
1136
+ else:
1137
+ return len(self.steps)
1138
+
1139
+ def _aggregate_workflow_metrics(
1140
+ self,
1141
+ step_results: List[Union[StepOutput, List[StepOutput]]],
1142
+ current_workflow_metrics: Optional[WorkflowMetrics] = None,
1143
+ ) -> WorkflowMetrics:
1144
+ """Aggregate metrics from all step responses into structured workflow metrics"""
1145
+ steps_dict = {}
1146
+
1147
+ def process_step_output(step_output: StepOutput):
1148
+ """Process a single step output for metrics"""
1149
+
1150
+ # If this step has nested steps, process them recursively
1151
+ if hasattr(step_output, "steps") and step_output.steps:
1152
+ for nested_step in step_output.steps:
1153
+ process_step_output(nested_step)
1154
+
1155
+ # Only collect metrics from steps that actually have metrics (actual agents/teams)
1156
+ if (
1157
+ step_output.step_name and step_output.metrics and step_output.executor_type in ["agent", "team"]
1158
+ ): # Only include actual executors
1159
+ step_metrics = StepMetrics(
1160
+ step_name=step_output.step_name,
1161
+ executor_type=step_output.executor_type or "unknown",
1162
+ executor_name=step_output.executor_name or "unknown",
1163
+ metrics=step_output.metrics,
1164
+ )
1165
+ steps_dict[step_output.step_name] = step_metrics
1166
+
1167
+ # Process all step results
1168
+ for step_result in step_results:
1169
+ process_step_output(cast(StepOutput, step_result))
1170
+
1171
+ duration = None
1172
+ if current_workflow_metrics and current_workflow_metrics.duration is not None:
1173
+ duration = current_workflow_metrics.duration
1174
+
1175
+ return WorkflowMetrics(
1176
+ steps=steps_dict,
1177
+ duration=duration,
1178
+ )
1179
+
1180
+ def _call_custom_function(self, func: Callable, execution_input: WorkflowExecutionInput, **kwargs: Any) -> Any:
1181
+ """Call custom function with only the parameters it expects"""
1182
+ from inspect import signature
1183
+
1184
+ sig = signature(func)
1185
+
1186
+ # Build arguments based on what the function actually accepts
1187
+ call_kwargs: Dict[str, Any] = {}
1188
+
1189
+ # Only add workflow and execution_input if the function expects them
1190
+ if "workflow" in sig.parameters: # type: ignore
1191
+ call_kwargs["workflow"] = self
1192
+ if "execution_input" in sig.parameters:
1193
+ call_kwargs["execution_input"] = execution_input # type: ignore
1194
+ if "session_state" in sig.parameters:
1195
+ call_kwargs["session_state"] = self.session_state # type: ignore
1196
+
1197
+ # Add any other kwargs that the function expects
1198
+ for param_name in kwargs:
1199
+ if param_name in sig.parameters: # type: ignore
1200
+ call_kwargs[param_name] = kwargs[param_name]
1201
+
1202
+ # If function has **kwargs parameter, pass all remaining kwargs
1203
+ for param in sig.parameters.values(): # type: ignore
1204
+ if param.kind == param.VAR_KEYWORD:
1205
+ call_kwargs.update(kwargs)
1206
+ break
1207
+
1208
+ try:
1209
+ return func(**call_kwargs)
1210
+ except TypeError as e:
1211
+ # If signature inspection fails, fall back to original method
1212
+ logger.error(f"Function signature inspection failed: {e}. Falling back to original calling convention.")
1213
+ return func(**kwargs)
1214
+
1215
+ def _accumulate_partial_step_data(
1216
+ self, event: Union[RunContentEvent, TeamRunContentEvent], partial_step_content: str
1217
+ ) -> str:
1218
+ """Accumulate partial step data from streaming events"""
1219
+ if isinstance(event, (RunContentEvent, TeamRunContentEvent)) and event.content:
1220
+ if isinstance(event.content, str):
1221
+ partial_step_content += event.content
1222
+ return partial_step_content
1223
+
1224
+ def _execute(
1225
+ self,
1226
+ session: WorkflowSession,
1227
+ execution_input: WorkflowExecutionInput,
1228
+ workflow_run_response: WorkflowRunOutput,
1229
+ run_context: RunContext,
1230
+ **kwargs: Any,
1231
+ ) -> WorkflowRunOutput:
1232
+ """Execute a specific pipeline by name synchronously"""
1233
+ from inspect import isasyncgenfunction, iscoroutinefunction, isgeneratorfunction
1234
+
1235
+ workflow_run_response.status = RunStatus.running
1236
+ if workflow_run_response.run_id:
1237
+ register_run(workflow_run_response.run_id) # type: ignore
1238
+
1239
+ if callable(self.steps):
1240
+ if iscoroutinefunction(self.steps) or isasyncgenfunction(self.steps):
1241
+ raise ValueError("Cannot use async function with synchronous execution")
1242
+ elif isgeneratorfunction(self.steps):
1243
+ content = ""
1244
+ for chunk in self.steps(self, execution_input, **kwargs):
1245
+ # Check for cancellation while consuming generator
1246
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1247
+ if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
1248
+ content += chunk.content
1249
+ else:
1250
+ content += str(chunk)
1251
+ workflow_run_response.content = content
1252
+ else:
1253
+ # Execute the workflow with the custom executor
1254
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1255
+ workflow_run_response.content = self._call_custom_function(self.steps, execution_input, **kwargs) # type: ignore[arg-type]
1256
+
1257
+ workflow_run_response.status = RunStatus.completed
1258
+ else:
1259
+ try:
1260
+ # Track outputs from each step for enhanced data flow
1261
+ collected_step_outputs: List[Union[StepOutput, List[StepOutput]]] = []
1262
+ previous_step_outputs: Dict[str, StepOutput] = {}
1263
+
1264
+ shared_images: List[Image] = execution_input.images or []
1265
+ output_images: List[Image] = (execution_input.images or []).copy() # Start with input images
1266
+ shared_videos: List[Video] = execution_input.videos or []
1267
+ output_videos: List[Video] = (execution_input.videos or []).copy() # Start with input videos
1268
+ shared_audio: List[Audio] = execution_input.audio or []
1269
+ output_audio: List[Audio] = (execution_input.audio or []).copy() # Start with input audio
1270
+ shared_files: List[File] = execution_input.files or []
1271
+ output_files: List[File] = (execution_input.files or []).copy() # Start with input files
1272
+
1273
+ for i, step in enumerate(self.steps): # type: ignore[arg-type]
1274
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1275
+ step_name = getattr(step, "name", f"step_{i + 1}")
1276
+ log_debug(f"Executing step {i + 1}/{self._get_step_count()}: {step_name}")
1277
+
1278
+ # Create enhanced StepInput
1279
+ step_input = self._create_step_input(
1280
+ execution_input=execution_input,
1281
+ previous_step_outputs=previous_step_outputs,
1282
+ shared_images=shared_images,
1283
+ shared_videos=shared_videos,
1284
+ shared_audio=shared_audio,
1285
+ shared_files=shared_files,
1286
+ )
1287
+
1288
+ # Check for can cellation before executing step
1289
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1290
+
1291
+ step_output = step.execute( # type: ignore[union-attr]
1292
+ step_input,
1293
+ session_id=session.session_id,
1294
+ user_id=self.user_id,
1295
+ workflow_run_response=workflow_run_response,
1296
+ run_context=run_context,
1297
+ store_executor_outputs=self.store_executor_outputs,
1298
+ workflow_session=session,
1299
+ add_workflow_history_to_steps=self.add_workflow_history_to_steps
1300
+ if self.add_workflow_history_to_steps
1301
+ else None,
1302
+ num_history_runs=self.num_history_runs,
1303
+ )
1304
+
1305
+ # Check for cancellation after step execution
1306
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1307
+
1308
+ # Update the workflow-level previous_step_outputs dictionary
1309
+ previous_step_outputs[step_name] = step_output
1310
+ collected_step_outputs.append(step_output)
1311
+
1312
+ # Update shared media for next step
1313
+ shared_images.extend(step_output.images or [])
1314
+ shared_videos.extend(step_output.videos or [])
1315
+ shared_audio.extend(step_output.audio or [])
1316
+ shared_files.extend(step_output.files or [])
1317
+ output_images.extend(step_output.images or [])
1318
+ output_videos.extend(step_output.videos or [])
1319
+ output_audio.extend(step_output.audio or [])
1320
+ output_files.extend(step_output.files or [])
1321
+
1322
+ if step_output.stop:
1323
+ logger.info(f"Early termination requested by step {step_name}")
1324
+ break
1325
+
1326
+ # Update the workflow_run_response with completion data
1327
+ if collected_step_outputs:
1328
+ # Stop the timer for the Run duration
1329
+ if workflow_run_response.metrics:
1330
+ workflow_run_response.metrics.stop_timer()
1331
+
1332
+ workflow_run_response.metrics = self._aggregate_workflow_metrics(
1333
+ collected_step_outputs,
1334
+ workflow_run_response.metrics, # type: ignore[arg-type]
1335
+ )
1336
+ last_output = cast(StepOutput, collected_step_outputs[-1])
1337
+
1338
+ # Use deepest nested content if this is a container (Steps/Router/Loop/etc.)
1339
+ if getattr(last_output, "steps", None):
1340
+ _cur = last_output
1341
+ while getattr(_cur, "steps", None):
1342
+ _steps = _cur.steps or []
1343
+ if not _steps:
1344
+ break
1345
+ _cur = _steps[-1]
1346
+ workflow_run_response.content = _cur.content
1347
+ else:
1348
+ workflow_run_response.content = last_output.content
1349
+ else:
1350
+ workflow_run_response.content = "No steps executed"
1351
+
1352
+ workflow_run_response.step_results = collected_step_outputs
1353
+ workflow_run_response.images = output_images
1354
+ workflow_run_response.videos = output_videos
1355
+ workflow_run_response.audio = output_audio
1356
+ workflow_run_response.status = RunStatus.completed
1357
+
1358
+ except (InputCheckError, OutputCheckError) as e:
1359
+ log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
1360
+ # Store error response
1361
+ workflow_run_response.status = RunStatus.error
1362
+ workflow_run_response.content = f"Validation failed: {str(e)} | Check: {e.check_trigger}"
1363
+
1364
+ raise e
1365
+ except RunCancelledException as e:
1366
+ logger.info(f"Workflow run {workflow_run_response.run_id} was cancelled")
1367
+ workflow_run_response.status = RunStatus.cancelled
1368
+ workflow_run_response.content = str(e)
1369
+ except Exception as e:
1370
+ import traceback
1371
+
1372
+ traceback.print_exc()
1373
+ logger.error(f"Workflow execution failed: {e}")
1374
+ # Store error response
1375
+ workflow_run_response.status = RunStatus.error
1376
+ workflow_run_response.content = f"Workflow execution failed: {e}"
1377
+ raise e
1378
+
1379
+ finally:
1380
+ # Stop timer on error
1381
+ if workflow_run_response.metrics:
1382
+ workflow_run_response.metrics.stop_timer()
1383
+
1384
+ self._update_session_metrics(session=session, workflow_run_response=workflow_run_response)
1385
+ session.upsert_run(run=workflow_run_response)
1386
+ self.save_session(session=session)
1387
+ # Always clean up the run tracking
1388
+ cleanup_run(workflow_run_response.run_id) # type: ignore
1389
+
1390
+ # Log Workflow Telemetry
1391
+ if self.telemetry:
1392
+ self._log_workflow_telemetry(session_id=session.session_id, run_id=workflow_run_response.run_id)
1393
+
1394
+ return workflow_run_response
1395
+
1396
+ def _execute_stream(
1397
+ self,
1398
+ session: WorkflowSession,
1399
+ execution_input: WorkflowExecutionInput,
1400
+ workflow_run_response: WorkflowRunOutput,
1401
+ run_context: RunContext,
1402
+ stream_events: bool = False,
1403
+ **kwargs: Any,
1404
+ ) -> Iterator[WorkflowRunOutputEvent]:
1405
+ """Execute a specific pipeline by name with event streaming"""
1406
+ from inspect import isasyncgenfunction, iscoroutinefunction, isgeneratorfunction
1407
+
1408
+ workflow_run_response.status = RunStatus.running
1409
+
1410
+ # Register run for cancellation tracking
1411
+ if workflow_run_response.run_id:
1412
+ register_run(workflow_run_response.run_id)
1413
+
1414
+ workflow_started_event = WorkflowStartedEvent(
1415
+ run_id=workflow_run_response.run_id or "",
1416
+ workflow_name=workflow_run_response.workflow_name,
1417
+ workflow_id=workflow_run_response.workflow_id,
1418
+ session_id=workflow_run_response.session_id,
1419
+ )
1420
+ yield self._handle_event(workflow_started_event, workflow_run_response)
1421
+
1422
+ if callable(self.steps):
1423
+ if iscoroutinefunction(self.steps) or isasyncgenfunction(self.steps):
1424
+ raise ValueError("Cannot use async function with synchronous execution")
1425
+ elif isgeneratorfunction(self.steps):
1426
+ content = ""
1427
+ for chunk in self._call_custom_function(self.steps, execution_input, **kwargs): # type: ignore[arg-type]
1428
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1429
+ # Update the run_response with the content from the result
1430
+ if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
1431
+ content += chunk.content
1432
+ yield chunk
1433
+ else:
1434
+ content += str(chunk)
1435
+ workflow_run_response.content = content
1436
+ else:
1437
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1438
+ workflow_run_response.content = self._call_custom_function(self.steps, execution_input, **kwargs)
1439
+ workflow_run_response.status = RunStatus.completed
1440
+
1441
+ else:
1442
+ try:
1443
+ # Track outputs from each step for enhanced data flow
1444
+ collected_step_outputs: List[Union[StepOutput, List[StepOutput]]] = []
1445
+ previous_step_outputs: Dict[str, StepOutput] = {}
1446
+
1447
+ shared_images: List[Image] = execution_input.images or []
1448
+ output_images: List[Image] = (execution_input.images or []).copy() # Start with input images
1449
+ shared_videos: List[Video] = execution_input.videos or []
1450
+ output_videos: List[Video] = (execution_input.videos or []).copy() # Start with input videos
1451
+ shared_audio: List[Audio] = execution_input.audio or []
1452
+ output_audio: List[Audio] = (execution_input.audio or []).copy() # Start with input audio
1453
+ shared_files: List[File] = execution_input.files or []
1454
+ output_files: List[File] = (execution_input.files or []).copy() # Start with input files
1455
+
1456
+ early_termination = False
1457
+
1458
+ # Track partial step data in case of cancellation
1459
+ current_step_name = ""
1460
+ current_step = None
1461
+ partial_step_content = ""
1462
+
1463
+ for i, step in enumerate(self.steps): # type: ignore[arg-type]
1464
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1465
+ step_name = getattr(step, "name", f"step_{i + 1}")
1466
+ log_debug(f"Streaming step {i + 1}/{self._get_step_count()}: {step_name}")
1467
+
1468
+ # Track current step for cancellation handler
1469
+ current_step_name = step_name
1470
+ current_step = step
1471
+ # Reset partial data for this step
1472
+ partial_step_content = ""
1473
+
1474
+ # Create enhanced StepInput
1475
+ step_input = self._create_step_input(
1476
+ execution_input=execution_input,
1477
+ previous_step_outputs=previous_step_outputs,
1478
+ shared_images=shared_images,
1479
+ shared_videos=shared_videos,
1480
+ shared_audio=shared_audio,
1481
+ shared_files=shared_files,
1482
+ )
1483
+
1484
+ # Execute step with streaming and yield all events
1485
+ for event in step.execute_stream( # type: ignore[union-attr]
1486
+ step_input,
1487
+ session_id=session.session_id,
1488
+ user_id=self.user_id,
1489
+ stream_events=stream_events,
1490
+ stream_executor_events=self.stream_executor_events,
1491
+ workflow_run_response=workflow_run_response,
1492
+ run_context=run_context,
1493
+ step_index=i,
1494
+ store_executor_outputs=self.store_executor_outputs,
1495
+ workflow_session=session,
1496
+ add_workflow_history_to_steps=self.add_workflow_history_to_steps
1497
+ if self.add_workflow_history_to_steps
1498
+ else None,
1499
+ num_history_runs=self.num_history_runs,
1500
+ ):
1501
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1502
+
1503
+ # Accumulate partial data from streaming events
1504
+ partial_step_content = self._accumulate_partial_step_data(event, partial_step_content) # type: ignore
1505
+
1506
+ # Handle events
1507
+ if isinstance(event, StepOutput):
1508
+ step_output = event
1509
+ collected_step_outputs.append(step_output)
1510
+
1511
+ # Update the workflow-level previous_step_outputs dictionary
1512
+ previous_step_outputs[step_name] = step_output
1513
+
1514
+ # Transform StepOutput to StepOutputEvent for consistent streaming interface
1515
+ step_output_event = self._transform_step_output_to_event(
1516
+ step_output, workflow_run_response, step_index=i
1517
+ )
1518
+
1519
+ if step_output.stop:
1520
+ logger.info(f"Early termination requested by step {step_name}")
1521
+ # Update shared media for next step
1522
+ shared_images.extend(step_output.images or [])
1523
+ shared_videos.extend(step_output.videos or [])
1524
+ shared_audio.extend(step_output.audio or [])
1525
+ shared_files.extend(step_output.files or [])
1526
+ output_images.extend(step_output.images or [])
1527
+ output_videos.extend(step_output.videos or [])
1528
+ output_audio.extend(step_output.audio or [])
1529
+ output_files.extend(step_output.files or [])
1530
+
1531
+ # Only yield StepOutputEvent for function executors, not for agents/teams
1532
+ if getattr(step, "executor_type", None) == "function":
1533
+ yield step_output_event
1534
+
1535
+ # Break out of the step loop
1536
+ early_termination = True
1537
+ break
1538
+
1539
+ # Update shared media for next step
1540
+ shared_images.extend(step_output.images or [])
1541
+ shared_videos.extend(step_output.videos or [])
1542
+ shared_audio.extend(step_output.audio or [])
1543
+ shared_files.extend(step_output.files or [])
1544
+ output_images.extend(step_output.images or [])
1545
+ output_videos.extend(step_output.videos or [])
1546
+ output_audio.extend(step_output.audio or [])
1547
+ output_files.extend(step_output.files or [])
1548
+
1549
+ # Only yield StepOutputEvent for generator functions, not for agents/teams
1550
+ if getattr(step, "executor_type", None) == "function":
1551
+ yield step_output_event
1552
+
1553
+ elif isinstance(event, WorkflowRunOutputEvent): # type: ignore
1554
+ # Enrich event with workflow context before yielding
1555
+ enriched_event = self._enrich_event_with_workflow_context(
1556
+ event, workflow_run_response, step_index=i, step=step
1557
+ )
1558
+ yield self._handle_event(enriched_event, workflow_run_response) # type: ignore
1559
+
1560
+ else:
1561
+ # Enrich other events with workflow context before yielding
1562
+ enriched_event = self._enrich_event_with_workflow_context(
1563
+ event, workflow_run_response, step_index=i, step=step
1564
+ )
1565
+ if self.stream_executor_events:
1566
+ yield self._handle_event(enriched_event, workflow_run_response) # type: ignore
1567
+
1568
+ # Break out of main step loop if early termination was requested
1569
+ if "early_termination" in locals() and early_termination:
1570
+ break
1571
+
1572
+ # Update the workflow_run_response with completion data
1573
+ if collected_step_outputs:
1574
+ # Stop the timer for the Run duration
1575
+ if workflow_run_response.metrics:
1576
+ workflow_run_response.metrics.stop_timer()
1577
+
1578
+ workflow_run_response.metrics = self._aggregate_workflow_metrics(
1579
+ collected_step_outputs,
1580
+ workflow_run_response.metrics, # type: ignore[arg-type]
1581
+ )
1582
+ last_output = cast(StepOutput, collected_step_outputs[-1])
1583
+
1584
+ # Use deepest nested content if this is a container (Steps/Router/Loop/etc.)
1585
+ if getattr(last_output, "steps", None):
1586
+ _cur = last_output
1587
+ while getattr(_cur, "steps", None):
1588
+ _steps = _cur.steps or []
1589
+ if not _steps:
1590
+ break
1591
+ _cur = _steps[-1]
1592
+ workflow_run_response.content = _cur.content
1593
+ else:
1594
+ workflow_run_response.content = last_output.content
1595
+ else:
1596
+ workflow_run_response.content = "No steps executed"
1597
+
1598
+ workflow_run_response.step_results = collected_step_outputs
1599
+ workflow_run_response.images = output_images
1600
+ workflow_run_response.videos = output_videos
1601
+ workflow_run_response.audio = output_audio
1602
+ workflow_run_response.status = RunStatus.completed
1603
+
1604
+ except (InputCheckError, OutputCheckError) as e:
1605
+ log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
1606
+
1607
+ from agno.run.workflow import WorkflowErrorEvent
1608
+
1609
+ error_event = WorkflowErrorEvent(
1610
+ run_id=workflow_run_response.run_id or "",
1611
+ workflow_id=self.id,
1612
+ workflow_name=self.name,
1613
+ session_id=session.session_id,
1614
+ error=str(e),
1615
+ )
1616
+
1617
+ yield error_event
1618
+
1619
+ # Update workflow_run_response with error
1620
+ workflow_run_response.content = error_event.error
1621
+ workflow_run_response.status = RunStatus.error
1622
+ except RunCancelledException as e:
1623
+ # Handle run cancellation during streaming
1624
+ logger.info(f"Workflow run {workflow_run_response.run_id} was cancelled during streaming")
1625
+ workflow_run_response.status = RunStatus.cancelled
1626
+ workflow_run_response.content = str(e)
1627
+
1628
+ # Capture partial progress from the step that was cancelled mid-stream
1629
+ if partial_step_content:
1630
+ logger.info(
1631
+ f"Step with name '{current_step_name}' was cancelled. Setting its partial progress as step output."
1632
+ )
1633
+ partial_step_output = StepOutput(
1634
+ step_name=current_step_name,
1635
+ step_id=getattr(current_step, "step_id", None) if current_step else None,
1636
+ step_type=StepType.STEP,
1637
+ executor_type=getattr(current_step, "executor_type", None) if current_step else None,
1638
+ executor_name=getattr(current_step, "executor_name", None) if current_step else None,
1639
+ content=partial_step_content,
1640
+ success=False,
1641
+ error="Cancelled during execution",
1642
+ )
1643
+ collected_step_outputs.append(partial_step_output)
1644
+
1645
+ # Preserve all progress (completed steps + partial step) before cancellation
1646
+ if collected_step_outputs:
1647
+ workflow_run_response.step_results = collected_step_outputs
1648
+ # Stop the timer for the Run duration
1649
+ if workflow_run_response.metrics:
1650
+ workflow_run_response.metrics.stop_timer()
1651
+
1652
+ workflow_run_response.metrics = self._aggregate_workflow_metrics(
1653
+ collected_step_outputs,
1654
+ workflow_run_response.metrics, # type: ignore[arg-type]
1655
+ )
1656
+
1657
+ cancelled_event = WorkflowCancelledEvent(
1658
+ run_id=workflow_run_response.run_id or "",
1659
+ workflow_id=self.id,
1660
+ workflow_name=self.name,
1661
+ session_id=session.session_id,
1662
+ reason=str(e),
1663
+ )
1664
+ yield self._handle_event(cancelled_event, workflow_run_response)
1665
+ except Exception as e:
1666
+ logger.error(f"Workflow execution failed: {e}")
1667
+
1668
+ from agno.run.workflow import WorkflowErrorEvent
1669
+
1670
+ error_event = WorkflowErrorEvent(
1671
+ run_id=workflow_run_response.run_id or "",
1672
+ workflow_id=self.id,
1673
+ workflow_name=self.name,
1674
+ session_id=session.session_id,
1675
+ error=str(e),
1676
+ )
1677
+
1678
+ yield error_event
1679
+
1680
+ # Update workflow_run_response with error
1681
+ workflow_run_response.content = error_event.error
1682
+ workflow_run_response.status = RunStatus.error
1683
+ raise e
1684
+
1685
+ # Yield workflow completed event
1686
+ workflow_completed_event = WorkflowCompletedEvent(
1687
+ run_id=workflow_run_response.run_id or "",
1688
+ content=workflow_run_response.content,
1689
+ workflow_name=workflow_run_response.workflow_name,
1690
+ workflow_id=workflow_run_response.workflow_id,
1691
+ session_id=workflow_run_response.session_id,
1692
+ step_results=workflow_run_response.step_results, # type: ignore
1693
+ metadata=workflow_run_response.metadata,
1694
+ )
1695
+ yield self._handle_event(workflow_completed_event, workflow_run_response)
1696
+
1697
+ # Stop timer on error
1698
+ if workflow_run_response.metrics:
1699
+ workflow_run_response.metrics.stop_timer()
1700
+
1701
+ # Store the completed workflow response
1702
+ self._update_session_metrics(session=session, workflow_run_response=workflow_run_response)
1703
+ session.upsert_run(run=workflow_run_response)
1704
+ self.save_session(session=session)
1705
+
1706
+ # Always clean up the run tracking
1707
+ cleanup_run(workflow_run_response.run_id) # type: ignore
1708
+
1709
+ # Log Workflow Telemetry
1710
+ if self.telemetry:
1711
+ self._log_workflow_telemetry(session_id=session.session_id, run_id=workflow_run_response.run_id)
1712
+
1713
+ async def _acall_custom_function(
1714
+ self, func: Callable, execution_input: WorkflowExecutionInput, **kwargs: Any
1715
+ ) -> Any:
1716
+ """Call custom function with only the parameters it expects - handles both async functions and async generators"""
1717
+ from inspect import isasyncgenfunction, signature
1718
+
1719
+ sig = signature(func)
1720
+
1721
+ # Build arguments based on what the function actually accepts
1722
+ call_kwargs: Dict[str, Any] = {}
1723
+
1724
+ # Only add workflow and execution_input if the function expects them
1725
+ if "workflow" in sig.parameters: # type: ignore
1726
+ call_kwargs["workflow"] = self
1727
+ if "execution_input" in sig.parameters:
1728
+ call_kwargs["execution_input"] = execution_input # type: ignore
1729
+ if "session_state" in sig.parameters:
1730
+ call_kwargs["session_state"] = self.session_state # type: ignore
1731
+
1732
+ # Add any other kwargs that the function expects
1733
+ for param_name in kwargs:
1734
+ if param_name in sig.parameters: # type: ignore
1735
+ call_kwargs[param_name] = kwargs[param_name]
1736
+
1737
+ # If function has **kwargs parameter, pass all remaining kwargs
1738
+ for param in sig.parameters.values(): # type: ignore
1739
+ if param.kind == param.VAR_KEYWORD:
1740
+ call_kwargs.update(kwargs)
1741
+ break
1742
+
1743
+ try:
1744
+ # Check if it's an async generator function
1745
+ if isasyncgenfunction(func):
1746
+ # For async generators, call the function and return the async generator directly
1747
+ return func(**call_kwargs) # type: ignore
1748
+ else:
1749
+ # For regular async functions, await the result
1750
+ return await func(**call_kwargs) # type: ignore
1751
+ except TypeError as e:
1752
+ # If signature inspection fails, fall back to original method
1753
+ logger.warning(
1754
+ f"Async function signature inspection failed: {e}. Falling back to original calling convention."
1755
+ )
1756
+ if isasyncgenfunction(func):
1757
+ # For async generators, use the same signature inspection logic in fallback
1758
+ return func(**call_kwargs) # type: ignore
1759
+ else:
1760
+ # For regular async functions, use the same signature inspection logic in fallback
1761
+ return await func(**call_kwargs) # type: ignore
1762
+
1763
+ async def _aload_or_create_session(
1764
+ self, session_id: str, user_id: Optional[str], session_state: Optional[Dict[str, Any]]
1765
+ ) -> Tuple[WorkflowSession, Dict[str, Any]]:
1766
+ """Load or create session from database, update metadata, and prepare session state.
1767
+
1768
+ Returns:
1769
+ Tuple of (workflow_session, prepared_session_state)
1770
+ """
1771
+ # Read existing session from database
1772
+ if self._has_async_db():
1773
+ workflow_session = await self.aread_or_create_session(session_id=session_id, user_id=user_id)
1774
+ else:
1775
+ workflow_session = self.read_or_create_session(session_id=session_id, user_id=user_id)
1776
+ self._update_metadata(session=workflow_session)
1777
+
1778
+ # Update session state from DB
1779
+ _session_state = session_state or {}
1780
+ _session_state = self._load_session_state(session=workflow_session, session_state=_session_state)
1781
+
1782
+ return workflow_session, _session_state
1783
+
1784
+ async def _aexecute(
1785
+ self,
1786
+ session_id: str,
1787
+ user_id: Optional[str],
1788
+ execution_input: WorkflowExecutionInput,
1789
+ workflow_run_response: WorkflowRunOutput,
1790
+ run_context: RunContext,
1791
+ **kwargs: Any,
1792
+ ) -> WorkflowRunOutput:
1793
+ """Execute a specific pipeline by name asynchronously"""
1794
+ from inspect import isasyncgenfunction, iscoroutinefunction, isgeneratorfunction
1795
+
1796
+ # Read existing session from database
1797
+ workflow_session, run_context.session_state = await self._aload_or_create_session(
1798
+ session_id=session_id, user_id=user_id, session_state=run_context.session_state
1799
+ )
1800
+
1801
+ workflow_run_response.status = RunStatus.running
1802
+
1803
+ # Register run for cancellation tracking
1804
+ if workflow_run_response.run_id:
1805
+ register_run(workflow_run_response.run_id) # type: ignore
1806
+
1807
+ if callable(self.steps):
1808
+ # Execute the workflow with the custom executor
1809
+ content = ""
1810
+
1811
+ if iscoroutinefunction(self.steps): # type: ignore
1812
+ workflow_run_response.content = await self._acall_custom_function(self.steps, execution_input, **kwargs)
1813
+ elif isgeneratorfunction(self.steps):
1814
+ for chunk in self.steps(self, execution_input, **kwargs): # type: ignore[arg-type]
1815
+ if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
1816
+ content += chunk.content
1817
+ else:
1818
+ content += str(chunk)
1819
+ workflow_run_response.content = content
1820
+ elif isasyncgenfunction(self.steps): # type: ignore
1821
+ async_gen = await self._acall_custom_function(self.steps, execution_input, **kwargs)
1822
+ async for chunk in async_gen:
1823
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1824
+ if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
1825
+ content += chunk.content
1826
+ else:
1827
+ content += str(chunk)
1828
+ workflow_run_response.content = content
1829
+ else:
1830
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1831
+ workflow_run_response.content = self._call_custom_function(self.steps, execution_input, **kwargs)
1832
+ workflow_run_response.status = RunStatus.completed
1833
+
1834
+ else:
1835
+ try:
1836
+ # Track outputs from each step for enhanced data flow
1837
+ collected_step_outputs: List[Union[StepOutput, List[StepOutput]]] = []
1838
+ previous_step_outputs: Dict[str, StepOutput] = {}
1839
+
1840
+ shared_images: List[Image] = execution_input.images or []
1841
+ output_images: List[Image] = (execution_input.images or []).copy() # Start with input images
1842
+ shared_videos: List[Video] = execution_input.videos or []
1843
+ output_videos: List[Video] = (execution_input.videos or []).copy() # Start with input videos
1844
+ shared_audio: List[Audio] = execution_input.audio or []
1845
+ output_audio: List[Audio] = (execution_input.audio or []).copy() # Start with input audio
1846
+ shared_files: List[File] = execution_input.files or []
1847
+ output_files: List[File] = (execution_input.files or []).copy() # Start with input files
1848
+
1849
+ for i, step in enumerate(self.steps): # type: ignore[arg-type]
1850
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1851
+ step_name = getattr(step, "name", f"step_{i + 1}")
1852
+ log_debug(f"Async Executing step {i + 1}/{self._get_step_count()}: {step_name}")
1853
+
1854
+ # Create enhanced StepInput
1855
+ step_input = self._create_step_input(
1856
+ execution_input=execution_input,
1857
+ previous_step_outputs=previous_step_outputs,
1858
+ shared_images=shared_images,
1859
+ shared_videos=shared_videos,
1860
+ shared_audio=shared_audio,
1861
+ shared_files=shared_files,
1862
+ )
1863
+
1864
+ # Check for cancellation before executing step
1865
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1866
+
1867
+ step_output = await step.aexecute( # type: ignore[union-attr]
1868
+ step_input,
1869
+ session_id=session_id,
1870
+ user_id=self.user_id,
1871
+ workflow_run_response=workflow_run_response,
1872
+ run_context=run_context,
1873
+ store_executor_outputs=self.store_executor_outputs,
1874
+ workflow_session=workflow_session,
1875
+ add_workflow_history_to_steps=self.add_workflow_history_to_steps
1876
+ if self.add_workflow_history_to_steps
1877
+ else None,
1878
+ num_history_runs=self.num_history_runs,
1879
+ )
1880
+
1881
+ # Check for cancellation after step execution
1882
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1883
+
1884
+ # Update the workflow-level previous_step_outputs dictionary
1885
+ previous_step_outputs[step_name] = step_output
1886
+ collected_step_outputs.append(step_output)
1887
+
1888
+ # Update shared media for next step
1889
+ shared_images.extend(step_output.images or [])
1890
+ shared_videos.extend(step_output.videos or [])
1891
+ shared_audio.extend(step_output.audio or [])
1892
+ shared_files.extend(step_output.files or [])
1893
+ output_images.extend(step_output.images or [])
1894
+ output_videos.extend(step_output.videos or [])
1895
+ output_audio.extend(step_output.audio or [])
1896
+ output_files.extend(step_output.files or [])
1897
+
1898
+ if step_output.stop:
1899
+ logger.info(f"Early termination requested by step {step_name}")
1900
+ break
1901
+
1902
+ # Update the workflow_run_response with completion data
1903
+ if collected_step_outputs:
1904
+ # Stop the timer for the Run duration
1905
+ if workflow_run_response.metrics:
1906
+ workflow_run_response.metrics.stop_timer()
1907
+
1908
+ workflow_run_response.metrics = self._aggregate_workflow_metrics(
1909
+ collected_step_outputs,
1910
+ workflow_run_response.metrics, # type: ignore[arg-type]
1911
+ )
1912
+ last_output = cast(StepOutput, collected_step_outputs[-1])
1913
+
1914
+ # Use deepest nested content if this is a container (Steps/Router/Loop/etc.)
1915
+ if getattr(last_output, "steps", None):
1916
+ _cur = last_output
1917
+ while getattr(_cur, "steps", None):
1918
+ _steps = _cur.steps or []
1919
+ if not _steps:
1920
+ break
1921
+ _cur = _steps[-1]
1922
+ workflow_run_response.content = _cur.content
1923
+ else:
1924
+ workflow_run_response.content = last_output.content
1925
+ else:
1926
+ workflow_run_response.content = "No steps executed"
1927
+
1928
+ workflow_run_response.step_results = collected_step_outputs
1929
+ workflow_run_response.images = output_images
1930
+ workflow_run_response.videos = output_videos
1931
+ workflow_run_response.audio = output_audio
1932
+ workflow_run_response.status = RunStatus.completed
1933
+
1934
+ except (InputCheckError, OutputCheckError) as e:
1935
+ log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
1936
+ # Store error response
1937
+ workflow_run_response.status = RunStatus.error
1938
+ workflow_run_response.content = f"Validation failed: {str(e)} | Check: {e.check_trigger}"
1939
+
1940
+ raise e
1941
+ except RunCancelledException as e:
1942
+ logger.info(f"Workflow run {workflow_run_response.run_id} was cancelled")
1943
+ workflow_run_response.status = RunStatus.cancelled
1944
+ workflow_run_response.content = str(e)
1945
+ except Exception as e:
1946
+ logger.error(f"Workflow execution failed: {e}")
1947
+ workflow_run_response.status = RunStatus.error
1948
+ workflow_run_response.content = f"Workflow execution failed: {e}"
1949
+ raise e
1950
+
1951
+ # Stop timer on error
1952
+ if workflow_run_response.metrics:
1953
+ workflow_run_response.metrics.stop_timer()
1954
+
1955
+ self._update_session_metrics(session=workflow_session, workflow_run_response=workflow_run_response)
1956
+ workflow_session.upsert_run(run=workflow_run_response)
1957
+ if self._has_async_db():
1958
+ await self.asave_session(session=workflow_session)
1959
+ else:
1960
+ self.save_session(session=workflow_session)
1961
+ # Always clean up the run tracking
1962
+ cleanup_run(workflow_run_response.run_id) # type: ignore
1963
+
1964
+ # Log Workflow Telemetry
1965
+ if self.telemetry:
1966
+ await self._alog_workflow_telemetry(session_id=session_id, run_id=workflow_run_response.run_id)
1967
+
1968
+ return workflow_run_response
1969
+
1970
+ async def _aexecute_stream(
1971
+ self,
1972
+ session_id: str,
1973
+ user_id: Optional[str],
1974
+ execution_input: WorkflowExecutionInput,
1975
+ workflow_run_response: WorkflowRunOutput,
1976
+ run_context: RunContext,
1977
+ stream_events: bool = False,
1978
+ websocket_handler: Optional[WebSocketHandler] = None,
1979
+ **kwargs: Any,
1980
+ ) -> AsyncIterator[WorkflowRunOutputEvent]:
1981
+ """Execute a specific pipeline by name with event streaming"""
1982
+ from inspect import isasyncgenfunction, iscoroutinefunction, isgeneratorfunction
1983
+
1984
+ # Read existing session from database
1985
+ workflow_session, run_context.session_state = await self._aload_or_create_session(
1986
+ session_id=session_id, user_id=user_id, session_state=run_context.session_state
1987
+ )
1988
+
1989
+ workflow_run_response.status = RunStatus.running
1990
+
1991
+ # Register run for cancellation tracking
1992
+ if workflow_run_response.run_id:
1993
+ register_run(workflow_run_response.run_id)
1994
+
1995
+ workflow_started_event = WorkflowStartedEvent(
1996
+ run_id=workflow_run_response.run_id or "",
1997
+ workflow_name=workflow_run_response.workflow_name,
1998
+ workflow_id=workflow_run_response.workflow_id,
1999
+ session_id=workflow_run_response.session_id,
2000
+ )
2001
+ yield self._handle_event(workflow_started_event, workflow_run_response, websocket_handler=websocket_handler)
2002
+
2003
+ if callable(self.steps):
2004
+ if iscoroutinefunction(self.steps): # type: ignore
2005
+ workflow_run_response.content = await self._acall_custom_function(self.steps, execution_input, **kwargs)
2006
+ elif isgeneratorfunction(self.steps):
2007
+ content = ""
2008
+ for chunk in self.steps(self, execution_input, **kwargs): # type: ignore[arg-type]
2009
+ if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
2010
+ content += chunk.content
2011
+ yield chunk
2012
+ else:
2013
+ content += str(chunk)
2014
+ workflow_run_response.content = content
2015
+ elif isasyncgenfunction(self.steps): # type: ignore
2016
+ content = ""
2017
+ async_gen = await self._acall_custom_function(self.steps, execution_input, **kwargs)
2018
+ async for chunk in async_gen:
2019
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
2020
+ if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
2021
+ content += chunk.content
2022
+ yield chunk
2023
+ else:
2024
+ content += str(chunk)
2025
+ workflow_run_response.content = content
2026
+ else:
2027
+ workflow_run_response.content = self.steps(self, execution_input, **kwargs)
2028
+ workflow_run_response.status = RunStatus.completed
2029
+
2030
+ else:
2031
+ try:
2032
+ # Track outputs from each step for enhanced data flow
2033
+ collected_step_outputs: List[Union[StepOutput, List[StepOutput]]] = []
2034
+ previous_step_outputs: Dict[str, StepOutput] = {}
2035
+
2036
+ shared_images: List[Image] = execution_input.images or []
2037
+ output_images: List[Image] = (execution_input.images or []).copy() # Start with input images
2038
+ shared_videos: List[Video] = execution_input.videos or []
2039
+ output_videos: List[Video] = (execution_input.videos or []).copy() # Start with input videos
2040
+ shared_audio: List[Audio] = execution_input.audio or []
2041
+ output_audio: List[Audio] = (execution_input.audio or []).copy() # Start with input audio
2042
+ shared_files: List[File] = execution_input.files or []
2043
+ output_files: List[File] = (execution_input.files or []).copy() # Start with input files
2044
+
2045
+ early_termination = False
2046
+
2047
+ # Track partial step data in case of cancellation
2048
+ current_step_name = ""
2049
+ current_step = None
2050
+ partial_step_content = ""
2051
+
2052
+ for i, step in enumerate(self.steps): # type: ignore[arg-type]
2053
+ if workflow_run_response.run_id:
2054
+ raise_if_cancelled(workflow_run_response.run_id)
2055
+ step_name = getattr(step, "name", f"step_{i + 1}")
2056
+ log_debug(f"Async streaming step {i + 1}/{self._get_step_count()}: {step_name}")
2057
+
2058
+ current_step_name = step_name
2059
+ current_step = step
2060
+ # Reset partial data for this step
2061
+ partial_step_content = ""
2062
+
2063
+ # Create enhanced StepInput
2064
+ step_input = self._create_step_input(
2065
+ execution_input=execution_input,
2066
+ previous_step_outputs=previous_step_outputs,
2067
+ shared_images=shared_images,
2068
+ shared_videos=shared_videos,
2069
+ shared_audio=shared_audio,
2070
+ shared_files=shared_files,
2071
+ )
2072
+
2073
+ # Execute step with streaming and yield all events
2074
+ async for event in step.aexecute_stream( # type: ignore[union-attr]
2075
+ step_input,
2076
+ session_id=session_id,
2077
+ user_id=self.user_id,
2078
+ stream_events=stream_events,
2079
+ stream_executor_events=self.stream_executor_events,
2080
+ workflow_run_response=workflow_run_response,
2081
+ run_context=run_context,
2082
+ step_index=i,
2083
+ store_executor_outputs=self.store_executor_outputs,
2084
+ workflow_session=workflow_session,
2085
+ add_workflow_history_to_steps=self.add_workflow_history_to_steps
2086
+ if self.add_workflow_history_to_steps
2087
+ else None,
2088
+ num_history_runs=self.num_history_runs,
2089
+ ):
2090
+ if workflow_run_response.run_id:
2091
+ raise_if_cancelled(workflow_run_response.run_id)
2092
+
2093
+ # Accumulate partial data from streaming events
2094
+ partial_step_content = self._accumulate_partial_step_data(event, partial_step_content) # type: ignore
2095
+
2096
+ if isinstance(event, StepOutput):
2097
+ step_output = event
2098
+ collected_step_outputs.append(step_output)
2099
+
2100
+ # Update the workflow-level previous_step_outputs dictionary
2101
+ previous_step_outputs[step_name] = step_output
2102
+
2103
+ # Transform StepOutput to StepOutputEvent for consistent streaming interface
2104
+ step_output_event = self._transform_step_output_to_event(
2105
+ step_output, workflow_run_response, step_index=i
2106
+ )
2107
+
2108
+ if step_output.stop:
2109
+ logger.info(f"Early termination requested by step {step_name}")
2110
+ # Update shared media for next step
2111
+ shared_images.extend(step_output.images or [])
2112
+ shared_videos.extend(step_output.videos or [])
2113
+ shared_audio.extend(step_output.audio or [])
2114
+ shared_files.extend(step_output.files or [])
2115
+ output_images.extend(step_output.images or [])
2116
+ output_videos.extend(step_output.videos or [])
2117
+ output_audio.extend(step_output.audio or [])
2118
+ output_files.extend(step_output.files or [])
2119
+
2120
+ if getattr(step, "executor_type", None) == "function":
2121
+ yield step_output_event
2122
+
2123
+ # Break out of the step loop
2124
+ early_termination = True
2125
+ break
2126
+
2127
+ # Update shared media for next step
2128
+ shared_images.extend(step_output.images or [])
2129
+ shared_videos.extend(step_output.videos or [])
2130
+ shared_audio.extend(step_output.audio or [])
2131
+ shared_files.extend(step_output.files or [])
2132
+ output_images.extend(step_output.images or [])
2133
+ output_videos.extend(step_output.videos or [])
2134
+ output_audio.extend(step_output.audio or [])
2135
+ output_files.extend(step_output.files or [])
2136
+
2137
+ # Only yield StepOutputEvent for generator functions, not for agents/teams
2138
+ if getattr(step, "executor_type", None) == "function":
2139
+ yield step_output_event
2140
+
2141
+ elif isinstance(event, WorkflowRunOutputEvent): # type: ignore
2142
+ # Enrich event with workflow context before yielding
2143
+ enriched_event = self._enrich_event_with_workflow_context(
2144
+ event, workflow_run_response, step_index=i, step=step
2145
+ )
2146
+ yield self._handle_event(
2147
+ enriched_event, workflow_run_response, websocket_handler=websocket_handler
2148
+ ) # type: ignore
2149
+
2150
+ else:
2151
+ # Enrich other events with workflow context before yielding
2152
+ enriched_event = self._enrich_event_with_workflow_context(
2153
+ event, workflow_run_response, step_index=i, step=step
2154
+ )
2155
+ if self.stream_executor_events:
2156
+ yield self._handle_event(
2157
+ enriched_event, workflow_run_response, websocket_handler=websocket_handler
2158
+ ) # type: ignore
2159
+
2160
+ # Break out of main step loop if early termination was requested
2161
+ if "early_termination" in locals() and early_termination:
2162
+ break
2163
+
2164
+ # Update the workflow_run_response with completion data
2165
+ if collected_step_outputs:
2166
+ # Stop the timer for the Run duration
2167
+ if workflow_run_response.metrics:
2168
+ workflow_run_response.metrics.stop_timer()
2169
+
2170
+ workflow_run_response.metrics = self._aggregate_workflow_metrics(
2171
+ collected_step_outputs,
2172
+ workflow_run_response.metrics, # type: ignore[arg-type]
2173
+ )
2174
+ last_output = cast(StepOutput, collected_step_outputs[-1])
2175
+
2176
+ # Use deepest nested content if this is a container (Steps/Router/Loop/etc.)
2177
+ if getattr(last_output, "steps", None):
2178
+ _cur = last_output
2179
+ while getattr(_cur, "steps", None):
2180
+ _steps = _cur.steps or []
2181
+ if not _steps:
2182
+ break
2183
+ _cur = _steps[-1]
2184
+ workflow_run_response.content = _cur.content
2185
+ else:
2186
+ workflow_run_response.content = last_output.content
2187
+ else:
2188
+ workflow_run_response.content = "No steps executed"
2189
+
2190
+ workflow_run_response.step_results = collected_step_outputs
2191
+ workflow_run_response.images = output_images
2192
+ workflow_run_response.videos = output_videos
2193
+ workflow_run_response.audio = output_audio
2194
+ workflow_run_response.status = RunStatus.completed
2195
+
2196
+ except (InputCheckError, OutputCheckError) as e:
2197
+ log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
2198
+
2199
+ from agno.run.workflow import WorkflowErrorEvent
2200
+
2201
+ error_event = WorkflowErrorEvent(
2202
+ run_id=workflow_run_response.run_id or "",
2203
+ workflow_id=self.id,
2204
+ workflow_name=self.name,
2205
+ session_id=session_id,
2206
+ error=str(e),
2207
+ )
2208
+
2209
+ yield error_event
2210
+
2211
+ # Update workflow_run_response with error
2212
+ workflow_run_response.content = error_event.error
2213
+ workflow_run_response.status = RunStatus.error
2214
+ except RunCancelledException as e:
2215
+ # Handle run cancellation during streaming
2216
+ logger.info(f"Workflow run {workflow_run_response.run_id} was cancelled during streaming")
2217
+ workflow_run_response.status = RunStatus.cancelled
2218
+ workflow_run_response.content = str(e)
2219
+
2220
+ # Capture partial progress from the step that was cancelled mid-stream
2221
+ if partial_step_content:
2222
+ logger.info(
2223
+ f"Step with name '{current_step_name}' was cancelled. Setting its partial progress as step output."
2224
+ )
2225
+ partial_step_output = StepOutput(
2226
+ step_name=current_step_name,
2227
+ step_id=getattr(current_step, "step_id", None) if current_step else None,
2228
+ step_type=StepType.STEP,
2229
+ executor_type=getattr(current_step, "executor_type", None) if current_step else None,
2230
+ executor_name=getattr(current_step, "executor_name", None) if current_step else None,
2231
+ content=partial_step_content,
2232
+ success=False,
2233
+ error="Cancelled during execution",
2234
+ )
2235
+ collected_step_outputs.append(partial_step_output)
2236
+
2237
+ # Preserve all progress (completed steps + partial step) before cancellation
2238
+ if collected_step_outputs:
2239
+ workflow_run_response.step_results = collected_step_outputs
2240
+ # Stop the timer for the Run duration
2241
+ if workflow_run_response.metrics:
2242
+ workflow_run_response.metrics.stop_timer()
2243
+
2244
+ workflow_run_response.metrics = self._aggregate_workflow_metrics(
2245
+ collected_step_outputs,
2246
+ workflow_run_response.metrics, # type: ignore[arg-type]
2247
+ )
2248
+
2249
+ cancelled_event = WorkflowCancelledEvent(
2250
+ run_id=workflow_run_response.run_id or "",
2251
+ workflow_id=self.id,
2252
+ workflow_name=self.name,
2253
+ session_id=session_id,
2254
+ reason=str(e),
2255
+ )
2256
+ yield self._handle_event(
2257
+ cancelled_event,
2258
+ workflow_run_response,
2259
+ websocket_handler=websocket_handler,
2260
+ )
2261
+ except Exception as e:
2262
+ logger.error(f"Workflow execution failed: {e}")
2263
+
2264
+ from agno.run.workflow import WorkflowErrorEvent
2265
+
2266
+ error_event = WorkflowErrorEvent(
2267
+ run_id=workflow_run_response.run_id or "",
2268
+ workflow_id=self.id,
2269
+ workflow_name=self.name,
2270
+ session_id=session_id,
2271
+ error=str(e),
2272
+ )
2273
+
2274
+ yield error_event
2275
+
2276
+ # Update workflow_run_response with error
2277
+ workflow_run_response.content = error_event.error
2278
+ workflow_run_response.status = RunStatus.error
2279
+ raise e
2280
+
2281
+ # Yield workflow completed event
2282
+ workflow_completed_event = WorkflowCompletedEvent(
2283
+ run_id=workflow_run_response.run_id or "",
2284
+ content=workflow_run_response.content,
2285
+ workflow_name=workflow_run_response.workflow_name,
2286
+ workflow_id=workflow_run_response.workflow_id,
2287
+ session_id=workflow_run_response.session_id,
2288
+ step_results=workflow_run_response.step_results, # type: ignore[arg-type]
2289
+ metadata=workflow_run_response.metadata,
2290
+ )
2291
+ yield self._handle_event(workflow_completed_event, workflow_run_response, websocket_handler=websocket_handler)
2292
+
2293
+ # Stop timer on error
2294
+ if workflow_run_response.metrics:
2295
+ workflow_run_response.metrics.stop_timer()
2296
+
2297
+ # Store the completed workflow response
2298
+ self._update_session_metrics(session=workflow_session, workflow_run_response=workflow_run_response)
2299
+ workflow_session.upsert_run(run=workflow_run_response)
2300
+ if self._has_async_db():
2301
+ await self.asave_session(session=workflow_session)
2302
+ else:
2303
+ self.save_session(session=workflow_session)
2304
+
2305
+ # Log Workflow Telemetry
2306
+ if self.telemetry:
2307
+ await self._alog_workflow_telemetry(session_id=session_id, run_id=workflow_run_response.run_id)
2308
+
2309
+ # Always clean up the run tracking
2310
+ cleanup_run(workflow_run_response.run_id) # type: ignore
2311
+
2312
+ async def _arun_background(
2313
+ self,
2314
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]] = None,
2315
+ additional_data: Optional[Dict[str, Any]] = None,
2316
+ user_id: Optional[str] = None,
2317
+ session_id: Optional[str] = None,
2318
+ session_state: Optional[Dict[str, Any]] = None,
2319
+ audio: Optional[List[Audio]] = None,
2320
+ images: Optional[List[Image]] = None,
2321
+ videos: Optional[List[Video]] = None,
2322
+ files: Optional[List[File]] = None,
2323
+ **kwargs: Any,
2324
+ ) -> WorkflowRunOutput:
2325
+ """Execute workflow in background using asyncio.create_task()"""
2326
+
2327
+ run_id = str(uuid4())
2328
+
2329
+ self.initialize_workflow()
2330
+
2331
+ session_id, user_id = self._initialize_session(session_id=session_id, user_id=user_id)
2332
+
2333
+ # Read existing session from database
2334
+ workflow_session, session_state = await self._aload_or_create_session(
2335
+ session_id=session_id, user_id=user_id, session_state=session_state
2336
+ )
2337
+
2338
+ run_context = RunContext(
2339
+ run_id=run_id,
2340
+ session_id=session_id,
2341
+ user_id=user_id,
2342
+ session_state=session_state,
2343
+ )
2344
+
2345
+ self._prepare_steps()
2346
+
2347
+ # Create workflow run response with PENDING status
2348
+ workflow_run_response = WorkflowRunOutput(
2349
+ run_id=run_id,
2350
+ input=input,
2351
+ session_id=session_id,
2352
+ workflow_id=self.id,
2353
+ workflow_name=self.name,
2354
+ created_at=int(datetime.now().timestamp()),
2355
+ status=RunStatus.pending,
2356
+ )
2357
+
2358
+ # Start the run metrics timer
2359
+ workflow_run_response.metrics = WorkflowMetrics(steps={})
2360
+ workflow_run_response.metrics.start_timer()
2361
+
2362
+ # Store PENDING response immediately
2363
+ workflow_session.upsert_run(run=workflow_run_response)
2364
+ if self._has_async_db():
2365
+ await self.asave_session(session=workflow_session)
2366
+ else:
2367
+ self.save_session(session=workflow_session)
2368
+
2369
+ # Prepare execution input
2370
+ inputs = WorkflowExecutionInput(
2371
+ input=input,
2372
+ additional_data=additional_data,
2373
+ audio=audio, # type: ignore
2374
+ images=images, # type: ignore
2375
+ videos=videos, # type: ignore
2376
+ files=files, # type: ignore
2377
+ )
2378
+
2379
+ self.update_agents_and_teams_session_info()
2380
+
2381
+ async def execute_workflow_background():
2382
+ """Simple background execution"""
2383
+ try:
2384
+ # Update status to RUNNING and save
2385
+ workflow_run_response.status = RunStatus.running
2386
+ if self._has_async_db():
2387
+ await self.asave_session(session=workflow_session)
2388
+ else:
2389
+ self.save_session(session=workflow_session)
2390
+
2391
+ if self.agent is not None:
2392
+ self._aexecute_workflow_agent(
2393
+ user_input=input, # type: ignore
2394
+ execution_input=inputs,
2395
+ run_context=run_context,
2396
+ stream=False,
2397
+ **kwargs,
2398
+ )
2399
+ else:
2400
+ await self._aexecute(
2401
+ session_id=session_id,
2402
+ user_id=user_id,
2403
+ execution_input=inputs,
2404
+ workflow_run_response=workflow_run_response,
2405
+ run_context=run_context,
2406
+ session_state=session_state,
2407
+ **kwargs,
2408
+ )
2409
+
2410
+ log_debug(f"Background execution completed with status: {workflow_run_response.status}")
2411
+
2412
+ except Exception as e:
2413
+ logger.error(f"Background workflow execution failed: {e}")
2414
+ workflow_run_response.status = RunStatus.error
2415
+ workflow_run_response.content = f"Background execution failed: {str(e)}"
2416
+ if self._has_async_db():
2417
+ await self.asave_session(session=workflow_session)
2418
+ else:
2419
+ self.save_session(session=workflow_session)
2420
+
2421
+ # Create and start asyncio task
2422
+ loop = asyncio.get_running_loop()
2423
+ loop.create_task(execute_workflow_background())
2424
+
2425
+ # Return SAME object that will be updated by background execution
2426
+ return workflow_run_response
2427
+
2428
+ async def _arun_background_stream(
2429
+ self,
2430
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]] = None,
2431
+ additional_data: Optional[Dict[str, Any]] = None,
2432
+ user_id: Optional[str] = None,
2433
+ session_id: Optional[str] = None,
2434
+ session_state: Optional[Dict[str, Any]] = None,
2435
+ audio: Optional[List[Audio]] = None,
2436
+ images: Optional[List[Image]] = None,
2437
+ videos: Optional[List[Video]] = None,
2438
+ files: Optional[List[File]] = None,
2439
+ stream_events: bool = False,
2440
+ websocket_handler: Optional[WebSocketHandler] = None,
2441
+ **kwargs: Any,
2442
+ ) -> WorkflowRunOutput:
2443
+ """Execute workflow in background with streaming and WebSocket broadcasting"""
2444
+
2445
+ run_id = str(uuid4())
2446
+
2447
+ self.initialize_workflow()
2448
+
2449
+ session_id, user_id = self._initialize_session(session_id=session_id, user_id=user_id)
2450
+
2451
+ # Read existing session from database
2452
+ workflow_session, session_state = await self._aload_or_create_session(
2453
+ session_id=session_id, user_id=user_id, session_state=session_state
2454
+ )
2455
+
2456
+ run_context = RunContext(
2457
+ run_id=run_id,
2458
+ session_id=session_id,
2459
+ user_id=user_id,
2460
+ session_state=session_state,
2461
+ )
2462
+
2463
+ self._prepare_steps()
2464
+
2465
+ # Create workflow run response with PENDING status
2466
+ workflow_run_response = WorkflowRunOutput(
2467
+ run_id=run_id,
2468
+ input=input,
2469
+ session_id=session_id,
2470
+ workflow_id=self.id,
2471
+ workflow_name=self.name,
2472
+ created_at=int(datetime.now().timestamp()),
2473
+ status=RunStatus.pending,
2474
+ )
2475
+
2476
+ # Start the run metrics timer
2477
+ workflow_run_response.metrics = WorkflowMetrics(steps={})
2478
+ workflow_run_response.metrics.start_timer()
2479
+
2480
+ # Prepare execution input
2481
+ inputs = WorkflowExecutionInput(
2482
+ input=input,
2483
+ additional_data=additional_data,
2484
+ audio=audio, # type: ignore
2485
+ images=images, # type: ignore
2486
+ videos=videos, # type: ignore
2487
+ files=files, # type: ignore
2488
+ )
2489
+
2490
+ self.update_agents_and_teams_session_info()
2491
+
2492
+ async def execute_workflow_background_stream():
2493
+ """Background execution with streaming and WebSocket broadcasting"""
2494
+ try:
2495
+ if self.agent is not None:
2496
+ result = self._aexecute_workflow_agent(
2497
+ user_input=input, # type: ignore
2498
+ run_context=run_context,
2499
+ execution_input=inputs,
2500
+ stream=True,
2501
+ websocket_handler=websocket_handler,
2502
+ **kwargs,
2503
+ )
2504
+ # For streaming, result is an async iterator
2505
+ async for event in result: # type: ignore
2506
+ # Events are automatically broadcast by _handle_event in the agent execution
2507
+ # We just consume them here to drive the execution
2508
+ pass
2509
+ log_debug(
2510
+ f"Background streaming execution (workflow agent) completed with status: {workflow_run_response.status}"
2511
+ )
2512
+ else:
2513
+ # Update status to RUNNING and save
2514
+ workflow_run_response.status = RunStatus.running
2515
+ if self._has_async_db():
2516
+ await self.asave_session(session=workflow_session)
2517
+ else:
2518
+ self.save_session(session=workflow_session)
2519
+
2520
+ # Execute with streaming - consume all events (they're auto-broadcast via _handle_event)
2521
+ async for event in self._aexecute_stream(
2522
+ session_id=session_id,
2523
+ user_id=user_id,
2524
+ execution_input=inputs,
2525
+ workflow_run_response=workflow_run_response,
2526
+ stream_events=stream_events,
2527
+ run_context=run_context,
2528
+ websocket_handler=websocket_handler,
2529
+ **kwargs,
2530
+ ):
2531
+ # Events are automatically broadcast by _handle_event
2532
+ # We just consume them here to drive the execution
2533
+ pass
2534
+
2535
+ log_debug(f"Background streaming execution completed with status: {workflow_run_response.status}")
2536
+
2537
+ except Exception as e:
2538
+ logger.error(f"Background streaming workflow execution failed: {e}")
2539
+ workflow_run_response.status = RunStatus.error
2540
+ workflow_run_response.content = f"Background streaming execution failed: {str(e)}"
2541
+ if self._has_async_db():
2542
+ await self.asave_session(session=workflow_session)
2543
+ else:
2544
+ self.save_session(session=workflow_session)
2545
+
2546
+ # Create and start asyncio task for background streaming execution
2547
+ loop = asyncio.get_running_loop()
2548
+ loop.create_task(execute_workflow_background_stream())
2549
+
2550
+ # Return SAME object that will be updated by background execution
2551
+ return workflow_run_response
2552
+
2553
+ async def aget_run(self, run_id: str, session_id: Optional[str] = None) -> Optional[WorkflowRunOutput]:
2554
+ """Get the status and details of a background workflow run - SIMPLIFIED"""
2555
+ # Use provided session_id or fall back to self.session_id
2556
+ _session_id = session_id if session_id is not None else self.session_id
2557
+
2558
+ if self.db is not None and _session_id is not None:
2559
+ session = await self.db.aget_session(session_id=_session_id, session_type=SessionType.WORKFLOW) # type: ignore
2560
+ if session and isinstance(session, WorkflowSession) and session.runs:
2561
+ # Find the run by ID
2562
+ for run in session.runs:
2563
+ if run.run_id == run_id:
2564
+ return run
2565
+
2566
+ return None
2567
+
2568
+ def get_run(self, run_id: str, session_id: Optional[str] = None) -> Optional[WorkflowRunOutput]:
2569
+ """Get the status and details of a background workflow run - SIMPLIFIED"""
2570
+ # Use provided session_id or fall back to self.session_id
2571
+ _session_id = session_id if session_id is not None else self.session_id
2572
+
2573
+ if self.db is not None and _session_id is not None:
2574
+ session = self.db.get_session(session_id=_session_id, session_type=SessionType.WORKFLOW)
2575
+ if session and isinstance(session, WorkflowSession) and session.runs:
2576
+ # Find the run by ID
2577
+ for run in session.runs:
2578
+ if run.run_id == run_id:
2579
+ return run
2580
+
2581
+ return None
2582
+
2583
+ def _initialize_workflow_agent(
2584
+ self,
2585
+ session: WorkflowSession,
2586
+ execution_input: WorkflowExecutionInput,
2587
+ run_context: RunContext,
2588
+ stream: bool = False,
2589
+ ) -> None:
2590
+ """Initialize the workflow agent with tools (but NOT context - that's passed per-run)"""
2591
+ from agno.tools.function import Function
2592
+
2593
+ workflow_tool_func = self.agent.create_workflow_tool( # type: ignore
2594
+ workflow=self,
2595
+ session=session,
2596
+ execution_input=execution_input,
2597
+ run_context=run_context,
2598
+ stream=stream,
2599
+ )
2600
+ workflow_tool = Function.from_callable(workflow_tool_func)
2601
+
2602
+ self.agent.tools = [workflow_tool] # type: ignore
2603
+ self.agent._rebuild_tools = True # type: ignore
2604
+
2605
+ log_debug("Workflow agent initialized with run_workflow tool")
2606
+
2607
+ def _get_workflow_agent_dependencies(self, session: WorkflowSession) -> Dict[str, Any]:
2608
+ """Build dependencies dict with workflow context to pass to agent.run()"""
2609
+ # Get configuration from the WorkflowAgent instance
2610
+ add_history = True
2611
+ num_runs = 5
2612
+
2613
+ if self.agent and isinstance(self.agent, WorkflowAgent):
2614
+ add_history = self.agent.add_workflow_history
2615
+ num_runs = self.agent.num_history_runs or 5
2616
+
2617
+ if add_history:
2618
+ history_context = (
2619
+ session.get_workflow_history_context(num_runs=num_runs) or "No previous workflow runs in this session."
2620
+ )
2621
+ else:
2622
+ history_context = "No workflow history available."
2623
+
2624
+ # Build workflow context with description and history
2625
+ workflow_context = ""
2626
+ if self.description:
2627
+ workflow_context += f"Workflow Description: {self.description}\n\n"
2628
+
2629
+ workflow_context += history_context
2630
+
2631
+ return {
2632
+ "workflow_context": workflow_context,
2633
+ }
2634
+
2635
+ def _execute_workflow_agent(
2636
+ self,
2637
+ user_input: Union[str, Dict[str, Any], List[Any], BaseModel],
2638
+ session: WorkflowSession,
2639
+ execution_input: WorkflowExecutionInput,
2640
+ run_context: RunContext,
2641
+ stream: bool = False,
2642
+ **kwargs: Any,
2643
+ ) -> Union[WorkflowRunOutput, Iterator[WorkflowRunOutputEvent]]:
2644
+ """
2645
+ Execute the workflow agent in streaming or non-streaming mode.
2646
+
2647
+ The agent decides whether to run the workflow or answer directly from history.
2648
+
2649
+ Args:
2650
+ user_input: The user's input
2651
+ session: The workflow session
2652
+ execution_input: The execution input
2653
+ run_context: The run context
2654
+ stream: Whether to stream the response
2655
+ stream_intermediate_steps: Whether to stream intermediate steps
2656
+
2657
+ Returns:
2658
+ WorkflowRunOutput if stream=False, Iterator[WorkflowRunOutputEvent] if stream=True
2659
+ """
2660
+ if stream:
2661
+ return self._run_workflow_agent_stream(
2662
+ agent_input=user_input,
2663
+ session=session,
2664
+ execution_input=execution_input,
2665
+ run_context=run_context,
2666
+ stream=stream,
2667
+ **kwargs,
2668
+ )
2669
+ else:
2670
+ return self._run_workflow_agent(
2671
+ agent_input=user_input,
2672
+ session=session,
2673
+ execution_input=execution_input,
2674
+ run_context=run_context,
2675
+ stream=stream,
2676
+ )
2677
+
2678
+ def _run_workflow_agent_stream(
2679
+ self,
2680
+ agent_input: Union[str, Dict[str, Any], List[Any], BaseModel],
2681
+ session: WorkflowSession,
2682
+ execution_input: WorkflowExecutionInput,
2683
+ run_context: RunContext,
2684
+ stream: bool = False,
2685
+ **kwargs: Any,
2686
+ ) -> Iterator[WorkflowRunOutputEvent]:
2687
+ """
2688
+ Execute the workflow agent in streaming mode.
2689
+
2690
+ The agent's tool (run_workflow) is a generator that yields workflow events directly.
2691
+ These events bubble up through the agent's streaming and are yielded here.
2692
+ We filter to only yield WorkflowRunOutputEvent to the CLI.
2693
+
2694
+ Yields:
2695
+ WorkflowRunOutputEvent: Events from workflow execution (agent events are filtered)
2696
+ """
2697
+ from typing import get_args
2698
+
2699
+ from agno.run.workflow import WorkflowCompletedEvent, WorkflowRunOutputEvent
2700
+
2701
+ # Initialize agent with stream_intermediate_steps=True so tool yields events
2702
+ self._initialize_workflow_agent(session, execution_input, run_context=run_context, stream=stream)
2703
+
2704
+ # Build dependencies with workflow context
2705
+ run_context.dependencies = self._get_workflow_agent_dependencies(session)
2706
+
2707
+ # Run agent with streaming - workflow events will bubble up from the tool
2708
+ agent_response: Optional[RunOutput] = None
2709
+ workflow_executed = False
2710
+
2711
+ from agno.run.agent import RunContentEvent
2712
+ from agno.run.team import RunContentEvent as TeamRunContentEvent
2713
+ from agno.run.workflow import WorkflowAgentCompletedEvent, WorkflowAgentStartedEvent
2714
+
2715
+ log_debug(f"Executing workflow agent with streaming - input: {agent_input}...")
2716
+
2717
+ # Create a workflow run response upfront for potential direct answer (will be used only if workflow is not executed)
2718
+ run_id = str(uuid4())
2719
+ direct_reply_run_response = WorkflowRunOutput(
2720
+ run_id=run_id,
2721
+ input=execution_input.input,
2722
+ session_id=session.session_id,
2723
+ workflow_id=self.id,
2724
+ workflow_name=self.name,
2725
+ created_at=int(datetime.now().timestamp()),
2726
+ )
2727
+
2728
+ # Yield WorkflowAgentStartedEvent at the beginning (stored in direct_reply_run_response)
2729
+ agent_started_event = WorkflowAgentStartedEvent(
2730
+ workflow_name=self.name,
2731
+ workflow_id=self.id,
2732
+ session_id=session.session_id,
2733
+ )
2734
+ yield agent_started_event
2735
+
2736
+ # Run the agent in streaming mode and yield all events
2737
+ for event in self.agent.run( # type: ignore[union-attr]
2738
+ input=agent_input,
2739
+ stream=True,
2740
+ stream_intermediate_steps=True,
2741
+ yield_run_response=True,
2742
+ session_id=session.session_id,
2743
+ dependencies=run_context.dependencies, # Pass context dynamically per-run
2744
+ session_state=run_context.session_state, # Pass session state dynamically per-run
2745
+ ): # type: ignore
2746
+ if isinstance(event, tuple(get_args(WorkflowRunOutputEvent))):
2747
+ yield event # type: ignore[misc]
2748
+
2749
+ # Track if workflow was executed by checking for WorkflowCompletedEvent
2750
+ if isinstance(event, WorkflowCompletedEvent):
2751
+ workflow_executed = True
2752
+ elif isinstance(event, (RunContentEvent, TeamRunContentEvent)):
2753
+ if event.step_name is None:
2754
+ # This is from the workflow agent itself
2755
+ # Enrich with metadata to mark it as a workflow agent event
2756
+
2757
+ if workflow_executed:
2758
+ continue # Skip if workflow was already executed
2759
+
2760
+ # workflow_agent field is used by consumers of the events to distinguish between workflow agent and regular agent
2761
+ event.workflow_agent = True # type: ignore
2762
+ yield event # type: ignore[misc]
2763
+
2764
+ # Capture the final RunOutput (but don't yield it)
2765
+ if isinstance(event, RunOutput):
2766
+ agent_response = event
2767
+
2768
+ # Handle direct answer case (no workflow execution)
2769
+ if not workflow_executed:
2770
+ # Update the pre-created workflow run response with the direct answer
2771
+ direct_reply_run_response.content = agent_response.content if agent_response else ""
2772
+ direct_reply_run_response.status = RunStatus.completed
2773
+ direct_reply_run_response.workflow_agent_run = agent_response
2774
+
2775
+ workflow_run_response = direct_reply_run_response
2776
+
2777
+ # Store the full agent RunOutput and establish parent-child relationship
2778
+ if agent_response:
2779
+ agent_response.parent_run_id = workflow_run_response.run_id
2780
+ agent_response.workflow_id = workflow_run_response.workflow_id
2781
+
2782
+ log_debug(f"Agent decision: workflow_executed={workflow_executed}")
2783
+
2784
+ # Yield WorkflowAgentCompletedEvent (user internally by print_response_stream)
2785
+ agent_completed_event = WorkflowAgentCompletedEvent(
2786
+ run_id=agent_response.run_id if agent_response else None,
2787
+ workflow_name=self.name,
2788
+ workflow_id=self.id,
2789
+ session_id=session.session_id,
2790
+ content=workflow_run_response.content,
2791
+ )
2792
+ yield agent_completed_event
2793
+
2794
+ # Yield a workflow completed event with the agent's direct response
2795
+ completed_event = WorkflowCompletedEvent(
2796
+ run_id=workflow_run_response.run_id or "",
2797
+ content=workflow_run_response.content,
2798
+ workflow_name=workflow_run_response.workflow_name,
2799
+ workflow_id=workflow_run_response.workflow_id,
2800
+ session_id=workflow_run_response.session_id,
2801
+ step_results=[],
2802
+ metadata={"agent_direct_response": True},
2803
+ )
2804
+ yield completed_event
2805
+
2806
+ # Update the run in session
2807
+ session.upsert_run(run=workflow_run_response)
2808
+ # Save session
2809
+ self.save_session(session=session)
2810
+
2811
+ else:
2812
+ # Workflow was executed by the tool
2813
+ reloaded_session = self.get_session(session_id=session.session_id)
2814
+
2815
+ if reloaded_session and reloaded_session.runs and len(reloaded_session.runs) > 0:
2816
+ # Get the last run (which is the one just created by the tool)
2817
+ last_run = reloaded_session.runs[-1]
2818
+
2819
+ # Yield WorkflowAgentCompletedEvent
2820
+ agent_completed_event = WorkflowAgentCompletedEvent(
2821
+ run_id=agent_response.run_id if agent_response else None,
2822
+ workflow_name=self.name,
2823
+ workflow_id=self.id,
2824
+ session_id=session.session_id,
2825
+ content=agent_response.content if agent_response else None,
2826
+ )
2827
+ yield agent_completed_event
2828
+
2829
+ # Update the last run with workflow_agent_run
2830
+ last_run.workflow_agent_run = agent_response
2831
+
2832
+ # Store the full agent RunOutput and establish parent-child relationship
2833
+ if agent_response:
2834
+ agent_response.parent_run_id = last_run.run_id
2835
+ agent_response.workflow_id = last_run.workflow_id
2836
+
2837
+ # Save the reloaded session (which has the updated run)
2838
+ self.save_session(session=reloaded_session)
2839
+
2840
+ else:
2841
+ log_warning("Could not reload session or no runs found after workflow execution")
2842
+
2843
+ def _run_workflow_agent(
2844
+ self,
2845
+ agent_input: Union[str, Dict[str, Any], List[Any], BaseModel],
2846
+ session: WorkflowSession,
2847
+ execution_input: WorkflowExecutionInput,
2848
+ run_context: RunContext,
2849
+ stream: bool = False,
2850
+ ) -> WorkflowRunOutput:
2851
+ """
2852
+ Execute the workflow agent in non-streaming mode.
2853
+
2854
+ The agent decides whether to run the workflow or answer directly from history.
2855
+
2856
+ Returns:
2857
+ WorkflowRunOutput: The workflow run output with agent response
2858
+ """
2859
+
2860
+ # Initialize the agent
2861
+ self._initialize_workflow_agent(session, execution_input, run_context=run_context, stream=stream)
2862
+
2863
+ # Build dependencies with workflow context
2864
+ run_context.dependencies = self._get_workflow_agent_dependencies(session)
2865
+
2866
+ # Run the agent
2867
+ agent_response: RunOutput = self.agent.run( # type: ignore[union-attr]
2868
+ input=agent_input,
2869
+ session_id=session.session_id,
2870
+ dependencies=run_context.dependencies,
2871
+ session_state=run_context.session_state,
2872
+ stream=stream,
2873
+ ) # type: ignore
2874
+
2875
+ # Check if the agent called the workflow tool
2876
+ workflow_executed = False
2877
+ if agent_response.messages:
2878
+ for message in agent_response.messages:
2879
+ if message.role == "assistant" and message.tool_calls:
2880
+ # Check if the tool call is specifically for run_workflow
2881
+ for tool_call in message.tool_calls:
2882
+ # Handle both dict and object formats
2883
+ if isinstance(tool_call, dict):
2884
+ tool_name = tool_call.get("function", {}).get("name", "")
2885
+ else:
2886
+ tool_name = tool_call.function.name if hasattr(tool_call, "function") else ""
2887
+
2888
+ if tool_name == "run_workflow":
2889
+ workflow_executed = True
2890
+ break
2891
+ if workflow_executed:
2892
+ break
2893
+
2894
+ log_debug(f"Workflow agent execution complete. Workflow executed: {workflow_executed}")
2895
+
2896
+ # Handle direct answer case (no workflow execution)
2897
+ if not workflow_executed:
2898
+ # Create a new workflow run output for the direct answer
2899
+ run_id = str(uuid4())
2900
+ workflow_run_response = WorkflowRunOutput(
2901
+ run_id=run_id,
2902
+ input=execution_input.input,
2903
+ session_id=session.session_id,
2904
+ workflow_id=self.id,
2905
+ workflow_name=self.name,
2906
+ created_at=int(datetime.now().timestamp()),
2907
+ content=agent_response.content,
2908
+ status=RunStatus.completed,
2909
+ workflow_agent_run=agent_response,
2910
+ )
2911
+
2912
+ # Store the full agent RunOutput and establish parent-child relationship
2913
+ if agent_response:
2914
+ agent_response.parent_run_id = workflow_run_response.run_id
2915
+ agent_response.workflow_id = workflow_run_response.workflow_id
2916
+
2917
+ # Update the run in session
2918
+ session.upsert_run(run=workflow_run_response)
2919
+ self.save_session(session=session)
2920
+
2921
+ log_debug(f"Agent decision: workflow_executed={workflow_executed}")
2922
+
2923
+ return workflow_run_response
2924
+ else:
2925
+ # Workflow was executed by the tool
2926
+ reloaded_session = self.get_session(session_id=session.session_id)
2927
+
2928
+ if reloaded_session and reloaded_session.runs and len(reloaded_session.runs) > 0:
2929
+ # Get the last run (which is the one just created by the tool)
2930
+ last_run = reloaded_session.runs[-1]
2931
+
2932
+ # Update the last run directly with workflow_agent_run
2933
+ last_run.workflow_agent_run = agent_response
2934
+
2935
+ # Store the full agent RunOutput and establish parent-child relationship
2936
+ if agent_response:
2937
+ agent_response.parent_run_id = last_run.run_id
2938
+ agent_response.workflow_id = last_run.workflow_id
2939
+
2940
+ # Save the reloaded session (which has the updated run)
2941
+ self.save_session(session=reloaded_session)
2942
+
2943
+ # Return the last run directly (WRO2 from inner workflow)
2944
+ return last_run
2945
+ else:
2946
+ log_warning("Could not reload session or no runs found after workflow execution")
2947
+ # Return a placeholder error response
2948
+ return WorkflowRunOutput(
2949
+ run_id=str(uuid4()),
2950
+ input=execution_input.input,
2951
+ session_id=session.session_id,
2952
+ workflow_id=self.id,
2953
+ workflow_name=self.name,
2954
+ created_at=int(datetime.now().timestamp()),
2955
+ content="Error: Workflow execution failed",
2956
+ status=RunStatus.error,
2957
+ )
2958
+
2959
+ def _async_initialize_workflow_agent(
2960
+ self,
2961
+ session: WorkflowSession,
2962
+ execution_input: WorkflowExecutionInput,
2963
+ run_context: RunContext,
2964
+ websocket_handler: Optional[WebSocketHandler] = None,
2965
+ stream: bool = False,
2966
+ ) -> None:
2967
+ """Initialize the workflow agent with async tools (but NOT context - that's passed per-run)"""
2968
+ from agno.tools.function import Function
2969
+
2970
+ workflow_tool_func = self.agent.async_create_workflow_tool( # type: ignore
2971
+ workflow=self,
2972
+ session=session,
2973
+ execution_input=execution_input,
2974
+ run_context=run_context,
2975
+ stream=stream,
2976
+ websocket_handler=websocket_handler,
2977
+ )
2978
+ workflow_tool = Function.from_callable(workflow_tool_func)
2979
+
2980
+ self.agent.tools = [workflow_tool] # type: ignore
2981
+ self.agent._rebuild_tools = True # type: ignore
2982
+
2983
+ log_debug("Workflow agent initialized with async run_workflow tool")
2984
+
2985
+ async def _aload_session_for_workflow_agent(
2986
+ self,
2987
+ session_id: str,
2988
+ user_id: Optional[str],
2989
+ session_state: Optional[Dict[str, Any]],
2990
+ ) -> Tuple[WorkflowSession, Dict[str, Any]]:
2991
+ """Helper to load or create session for workflow agent execution"""
2992
+ return await self._aload_or_create_session(session_id=session_id, user_id=user_id, session_state=session_state)
2993
+
2994
+ def _aexecute_workflow_agent(
2995
+ self,
2996
+ user_input: Union[str, Dict[str, Any], List[Any], BaseModel],
2997
+ run_context: RunContext,
2998
+ execution_input: WorkflowExecutionInput,
2999
+ stream: bool = False,
3000
+ websocket_handler: Optional[WebSocketHandler] = None,
3001
+ **kwargs: Any,
3002
+ ):
3003
+ """
3004
+ Execute the workflow agent asynchronously in streaming or non-streaming mode.
3005
+
3006
+ The agent decides whether to run the workflow or answer directly from history.
3007
+
3008
+ Args:
3009
+ user_input: The user's input
3010
+ session: The workflow session
3011
+ run_context: The run context
3012
+ execution_input: The execution input
3013
+ stream: Whether to stream the response
3014
+ websocket_handler: The WebSocket handler
3015
+
3016
+ Returns:
3017
+ Coroutine[WorkflowRunOutput] if stream=False, AsyncIterator[WorkflowRunOutputEvent] if stream=True
3018
+ """
3019
+
3020
+ if stream:
3021
+
3022
+ async def _stream():
3023
+ session, session_state_loaded = await self._aload_session_for_workflow_agent(
3024
+ run_context.session_id, run_context.user_id, run_context.session_state
3025
+ )
3026
+ async for event in self._arun_workflow_agent_stream(
3027
+ agent_input=user_input,
3028
+ session=session,
3029
+ execution_input=execution_input,
3030
+ run_context=run_context,
3031
+ stream=stream,
3032
+ websocket_handler=websocket_handler,
3033
+ **kwargs,
3034
+ ):
3035
+ yield event
3036
+
3037
+ return _stream()
3038
+ else:
3039
+
3040
+ async def _execute():
3041
+ session, session_state_loaded = await self._aload_session_for_workflow_agent(
3042
+ run_context.session_id, run_context.user_id, run_context.session_state
3043
+ )
3044
+ return await self._arun_workflow_agent(
3045
+ agent_input=user_input,
3046
+ session=session,
3047
+ execution_input=execution_input,
3048
+ run_context=run_context,
3049
+ stream=stream,
3050
+ )
3051
+
3052
+ return _execute()
3053
+
3054
+ async def _arun_workflow_agent_stream(
3055
+ self,
3056
+ agent_input: Union[str, Dict[str, Any], List[Any], BaseModel],
3057
+ session: WorkflowSession,
3058
+ execution_input: WorkflowExecutionInput,
3059
+ run_context: RunContext,
3060
+ stream: bool = False,
3061
+ websocket_handler: Optional[WebSocketHandler] = None,
3062
+ **kwargs: Any,
3063
+ ) -> AsyncIterator[WorkflowRunOutputEvent]:
3064
+ """
3065
+ Execute the workflow agent asynchronously in streaming mode.
3066
+
3067
+ The agent's tool (run_workflow) is an async generator that yields workflow events directly.
3068
+ These events bubble up through the agent's streaming and are yielded here.
3069
+ We filter to only yield WorkflowRunOutputEvent to the CLI.
3070
+
3071
+ Yields:
3072
+ WorkflowRunOutputEvent: Events from workflow execution (agent events are filtered)
3073
+ """
3074
+ from typing import get_args
3075
+
3076
+ from agno.run.workflow import WorkflowCompletedEvent, WorkflowRunOutputEvent
3077
+
3078
+ logger.info("Workflow agent enabled - async streaming mode")
3079
+ log_debug(f"User input: {agent_input}")
3080
+
3081
+ self._async_initialize_workflow_agent(
3082
+ session,
3083
+ execution_input,
3084
+ run_context=run_context,
3085
+ stream=stream,
3086
+ websocket_handler=websocket_handler,
3087
+ )
3088
+
3089
+ run_context.dependencies = self._get_workflow_agent_dependencies(session)
3090
+
3091
+ agent_response: Optional[RunOutput] = None
3092
+ workflow_executed = False
3093
+
3094
+ from agno.run.agent import RunContentEvent
3095
+ from agno.run.team import RunContentEvent as TeamRunContentEvent
3096
+ from agno.run.workflow import WorkflowAgentCompletedEvent, WorkflowAgentStartedEvent
3097
+
3098
+ log_debug(f"Executing async workflow agent with streaming - input: {agent_input}...")
3099
+
3100
+ # Create a workflow run response upfront for potential direct answer (will be used only if workflow is not executed)
3101
+ run_id = str(uuid4())
3102
+ direct_reply_run_response = WorkflowRunOutput(
3103
+ run_id=run_id,
3104
+ input=execution_input.input,
3105
+ session_id=session.session_id,
3106
+ workflow_id=self.id,
3107
+ workflow_name=self.name,
3108
+ created_at=int(datetime.now().timestamp()),
3109
+ )
3110
+
3111
+ # Yield WorkflowAgentStartedEvent at the beginning (stored in direct_reply_run_response)
3112
+ agent_started_event = WorkflowAgentStartedEvent(
3113
+ workflow_name=self.name,
3114
+ workflow_id=self.id,
3115
+ session_id=session.session_id,
3116
+ )
3117
+ self._broadcast_to_websocket(agent_started_event, websocket_handler)
3118
+ yield agent_started_event
3119
+
3120
+ # Run the agent in streaming mode and yield all events
3121
+ async for event in self.agent.arun( # type: ignore[union-attr]
3122
+ input=agent_input,
3123
+ stream=True,
3124
+ stream_intermediate_steps=True,
3125
+ yield_run_response=True,
3126
+ session_id=session.session_id,
3127
+ dependencies=run_context.dependencies, # Pass context dynamically per-run
3128
+ session_state=run_context.session_state, # Pass session state dynamically per-run
3129
+ ): # type: ignore
3130
+ if isinstance(event, tuple(get_args(WorkflowRunOutputEvent))):
3131
+ yield event # type: ignore[misc]
3132
+
3133
+ if isinstance(event, WorkflowCompletedEvent):
3134
+ workflow_executed = True
3135
+ log_debug("Workflow execution detected via WorkflowCompletedEvent")
3136
+
3137
+ elif isinstance(event, (RunContentEvent, TeamRunContentEvent)):
3138
+ if event.step_name is None:
3139
+ # This is from the workflow agent itself
3140
+ # Enrich with metadata to mark it as a workflow agent event
3141
+
3142
+ if workflow_executed:
3143
+ continue # Skip if workflow was already executed
3144
+
3145
+ # workflow_agent field is used by consumers of the events to distinguish between workflow agent and regular agent
3146
+ event.workflow_agent = True # type: ignore
3147
+
3148
+ # Broadcast to WebSocket if available (async context only)
3149
+ self._broadcast_to_websocket(event, websocket_handler)
3150
+
3151
+ yield event # type: ignore[misc]
3152
+
3153
+ # Capture the final RunOutput (but don't yield it)
3154
+ if isinstance(event, RunOutput):
3155
+ agent_response = event
3156
+ log_debug(
3157
+ f"Agent response: {str(agent_response.content)[:100] if agent_response.content else 'None'}..."
3158
+ )
3159
+
3160
+ # Handle direct answer case (no workflow execution)
3161
+ if not workflow_executed:
3162
+ # Update the pre-created workflow run response with the direct answer
3163
+ direct_reply_run_response.content = agent_response.content if agent_response else ""
3164
+ direct_reply_run_response.status = RunStatus.completed
3165
+ direct_reply_run_response.workflow_agent_run = agent_response
3166
+
3167
+ workflow_run_response = direct_reply_run_response
3168
+
3169
+ # Store the full agent RunOutput and establish parent-child relationship
3170
+ if agent_response:
3171
+ agent_response.parent_run_id = workflow_run_response.run_id
3172
+ agent_response.workflow_id = workflow_run_response.workflow_id
3173
+
3174
+ # Yield WorkflowAgentCompletedEvent
3175
+ agent_completed_event = WorkflowAgentCompletedEvent(
3176
+ workflow_name=self.name,
3177
+ workflow_id=self.id,
3178
+ run_id=agent_response.run_id if agent_response else None,
3179
+ session_id=session.session_id,
3180
+ content=workflow_run_response.content,
3181
+ )
3182
+ self._broadcast_to_websocket(agent_completed_event, websocket_handler)
3183
+ yield agent_completed_event
3184
+
3185
+ # Yield a workflow completed event with the agent's direct response (user internally by aprint_response_stream)
3186
+ completed_event = WorkflowCompletedEvent(
3187
+ run_id=workflow_run_response.run_id or "",
3188
+ content=workflow_run_response.content,
3189
+ workflow_name=workflow_run_response.workflow_name,
3190
+ workflow_id=workflow_run_response.workflow_id,
3191
+ session_id=workflow_run_response.session_id,
3192
+ step_results=[],
3193
+ metadata={"agent_direct_response": True},
3194
+ )
3195
+ yield completed_event
3196
+
3197
+ # Update the run in session
3198
+ session.upsert_run(run=workflow_run_response)
3199
+ # Save session
3200
+ if self._has_async_db():
3201
+ await self.asave_session(session=session)
3202
+ else:
3203
+ self.save_session(session=session)
3204
+
3205
+ else:
3206
+ # Workflow was executed by the tool
3207
+ if self._has_async_db():
3208
+ reloaded_session = await self.aget_session(session_id=session.session_id)
3209
+ else:
3210
+ reloaded_session = self.get_session(session_id=session.session_id)
3211
+
3212
+ if reloaded_session and reloaded_session.runs and len(reloaded_session.runs) > 0:
3213
+ # Get the last run (which is the one just created by the tool)
3214
+ last_run = reloaded_session.runs[-1]
3215
+
3216
+ # Yield WorkflowAgentCompletedEvent
3217
+ agent_completed_event = WorkflowAgentCompletedEvent(
3218
+ run_id=agent_response.run_id if agent_response else None,
3219
+ workflow_name=self.name,
3220
+ workflow_id=self.id,
3221
+ session_id=session.session_id,
3222
+ content=agent_response.content if agent_response else None,
3223
+ )
3224
+
3225
+ self._broadcast_to_websocket(agent_completed_event, websocket_handler)
3226
+
3227
+ yield agent_completed_event
3228
+
3229
+ # Update the last run with workflow_agent_run
3230
+ last_run.workflow_agent_run = agent_response
3231
+
3232
+ # Store the full agent RunOutput and establish parent-child relationship
3233
+ if agent_response:
3234
+ agent_response.parent_run_id = last_run.run_id
3235
+ agent_response.workflow_id = last_run.workflow_id
3236
+
3237
+ # Save the reloaded session (which has the updated run)
3238
+ if self._has_async_db():
3239
+ await self.asave_session(session=reloaded_session)
3240
+ else:
3241
+ self.save_session(session=reloaded_session)
3242
+
3243
+ else:
3244
+ log_warning("Could not reload session or no runs found after workflow execution")
3245
+
3246
+ async def _arun_workflow_agent(
3247
+ self,
3248
+ agent_input: Union[str, Dict[str, Any], List[Any], BaseModel],
3249
+ session: WorkflowSession,
3250
+ execution_input: WorkflowExecutionInput,
3251
+ run_context: RunContext,
3252
+ stream: bool = False,
3253
+ ) -> WorkflowRunOutput:
3254
+ """
3255
+ Execute the workflow agent asynchronously in non-streaming mode.
3256
+
3257
+ The agent decides whether to run the workflow or answer directly from history.
3258
+
3259
+ Returns:
3260
+ WorkflowRunOutput: The workflow run output with agent response
3261
+ """
3262
+ # Initialize the agent
3263
+ self._async_initialize_workflow_agent(session, execution_input, run_context=run_context, stream=stream)
3264
+
3265
+ # Build dependencies with workflow context
3266
+ run_context.dependencies = self._get_workflow_agent_dependencies(session)
3267
+
3268
+ # Run the agent
3269
+ agent_response: RunOutput = await self.agent.arun( # type: ignore[union-attr]
3270
+ input=agent_input,
3271
+ session_id=session.session_id,
3272
+ dependencies=run_context.dependencies,
3273
+ session_state=run_context.session_state,
3274
+ stream=stream,
3275
+ ) # type: ignore
3276
+
3277
+ # Check if the agent called the workflow tool
3278
+ workflow_executed = False
3279
+ if agent_response.messages:
3280
+ for message in agent_response.messages:
3281
+ if message.role == "assistant" and message.tool_calls:
3282
+ # Check if the tool call is specifically for run_workflow
3283
+ for tool_call in message.tool_calls:
3284
+ # Handle both dict and object formats
3285
+ if isinstance(tool_call, dict):
3286
+ tool_name = tool_call.get("function", {}).get("name", "")
3287
+ else:
3288
+ tool_name = tool_call.function.name if hasattr(tool_call, "function") else ""
3289
+
3290
+ if tool_name == "run_workflow":
3291
+ workflow_executed = True
3292
+ break
3293
+ if workflow_executed:
3294
+ break
3295
+
3296
+ # Handle direct answer case (no workflow execution)
3297
+ if not workflow_executed:
3298
+ # Create a new workflow run output for the direct answer
3299
+ run_id = str(uuid4())
3300
+ workflow_run_response = WorkflowRunOutput(
3301
+ run_id=run_id,
3302
+ input=execution_input.input,
3303
+ session_id=session.session_id,
3304
+ workflow_id=self.id,
3305
+ workflow_name=self.name,
3306
+ created_at=int(datetime.now().timestamp()),
3307
+ content=agent_response.content,
3308
+ status=RunStatus.completed,
3309
+ workflow_agent_run=agent_response,
3310
+ )
3311
+
3312
+ # Store the full agent RunOutput and establish parent-child relationship
3313
+ if agent_response:
3314
+ agent_response.parent_run_id = workflow_run_response.run_id
3315
+ agent_response.workflow_id = workflow_run_response.workflow_id
3316
+
3317
+ # Update the run in session
3318
+ session.upsert_run(run=workflow_run_response)
3319
+ if self._has_async_db():
3320
+ await self.asave_session(session=session)
3321
+ else:
3322
+ self.save_session(session=session)
3323
+
3324
+ log_debug(f"Agent decision: workflow_executed={workflow_executed}")
3325
+
3326
+ return workflow_run_response
3327
+ else:
3328
+ # Workflow was executed by the tool
3329
+ logger.info("=" * 80)
3330
+ logger.info("WORKFLOW AGENT: Called run_workflow tool (async)")
3331
+ logger.info(" ➜ Workflow was executed, retrieving results...")
3332
+ logger.info("=" * 80)
3333
+
3334
+ log_debug("Reloading session from database to get the latest workflow run...")
3335
+ if self._has_async_db():
3336
+ reloaded_session = await self.aget_session(session_id=session.session_id)
3337
+ else:
3338
+ reloaded_session = self.get_session(session_id=session.session_id)
3339
+
3340
+ if reloaded_session and reloaded_session.runs and len(reloaded_session.runs) > 0:
3341
+ # Get the last run (which is the one just created by the tool)
3342
+ last_run = reloaded_session.runs[-1]
3343
+ log_debug(f"Retrieved latest workflow run: {last_run.run_id}")
3344
+ log_debug(f"Total workflow runs in session: {len(reloaded_session.runs)}")
3345
+
3346
+ # Update the last run with workflow_agent_run
3347
+ last_run.workflow_agent_run = agent_response
3348
+
3349
+ # Store the full agent RunOutput and establish parent-child relationship
3350
+ if agent_response:
3351
+ agent_response.parent_run_id = last_run.run_id
3352
+ agent_response.workflow_id = last_run.workflow_id
3353
+
3354
+ # Save the reloaded session (which has the updated run)
3355
+ if self._has_async_db():
3356
+ await self.asave_session(session=reloaded_session)
3357
+ else:
3358
+ self.save_session(session=reloaded_session)
3359
+
3360
+ log_debug(f"Agent decision: workflow_executed={workflow_executed}")
3361
+
3362
+ # Return the last run directly (WRO2 from inner workflow)
3363
+ return last_run
3364
+ else:
3365
+ log_warning("Could not reload session or no runs found after workflow execution")
3366
+ # Return a placeholder error response
3367
+ return WorkflowRunOutput(
3368
+ run_id=str(uuid4()),
3369
+ input=execution_input.input,
3370
+ session_id=session.session_id,
3371
+ workflow_id=self.id,
3372
+ workflow_name=self.name,
3373
+ created_at=int(datetime.now().timestamp()),
3374
+ content="Error: Workflow execution failed",
3375
+ status=RunStatus.error,
3376
+ )
3377
+
3378
+ def cancel_run(self, run_id: str) -> bool:
3379
+ """Cancel a running workflow execution.
3380
+
3381
+ Args:
3382
+ run_id (str): The run_id to cancel.
3383
+
3384
+ Returns:
3385
+ bool: True if the run was found and marked for cancellation, False otherwise.
3386
+ """
3387
+ return cancel_run_global(run_id)
3388
+
3389
+ @overload
3390
+ def run(
3391
+ self,
3392
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None,
3393
+ additional_data: Optional[Dict[str, Any]] = None,
3394
+ user_id: Optional[str] = None,
3395
+ session_id: Optional[str] = None,
3396
+ session_state: Optional[Dict[str, Any]] = None,
3397
+ audio: Optional[List[Audio]] = None,
3398
+ images: Optional[List[Image]] = None,
3399
+ videos: Optional[List[Video]] = None,
3400
+ files: Optional[List[File]] = None,
3401
+ stream: Literal[False] = False,
3402
+ stream_events: Optional[bool] = None,
3403
+ stream_intermediate_steps: Optional[bool] = None,
3404
+ background: Optional[bool] = False,
3405
+ ) -> WorkflowRunOutput: ...
3406
+
3407
+ @overload
3408
+ def run(
3409
+ self,
3410
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None,
3411
+ additional_data: Optional[Dict[str, Any]] = None,
3412
+ user_id: Optional[str] = None,
3413
+ session_id: Optional[str] = None,
3414
+ session_state: Optional[Dict[str, Any]] = None,
3415
+ audio: Optional[List[Audio]] = None,
3416
+ images: Optional[List[Image]] = None,
3417
+ videos: Optional[List[Video]] = None,
3418
+ files: Optional[List[File]] = None,
3419
+ stream: Literal[True] = True,
3420
+ stream_events: Optional[bool] = None,
3421
+ stream_intermediate_steps: Optional[bool] = None,
3422
+ background: Optional[bool] = False,
3423
+ ) -> Iterator[WorkflowRunOutputEvent]: ...
3424
+
3425
+ def run(
3426
+ self,
3427
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None,
3428
+ additional_data: Optional[Dict[str, Any]] = None,
3429
+ user_id: Optional[str] = None,
3430
+ session_id: Optional[str] = None,
3431
+ session_state: Optional[Dict[str, Any]] = None,
3432
+ audio: Optional[List[Audio]] = None,
3433
+ images: Optional[List[Image]] = None,
3434
+ videos: Optional[List[Video]] = None,
3435
+ files: Optional[List[File]] = None,
3436
+ stream: bool = False,
3437
+ stream_events: Optional[bool] = None,
3438
+ stream_intermediate_steps: Optional[bool] = None,
3439
+ background: Optional[bool] = False,
3440
+ **kwargs: Any,
3441
+ ) -> Union[WorkflowRunOutput, Iterator[WorkflowRunOutputEvent]]:
3442
+ """Execute the workflow synchronously with optional streaming"""
3443
+ if self._has_async_db():
3444
+ raise Exception("`run()` is not supported with an async DB. Please use `arun()`.")
3445
+
3446
+ input = self._validate_input(input)
3447
+ if background:
3448
+ raise RuntimeError("Background execution is not supported for sync run()")
3449
+
3450
+ self._set_debug()
3451
+
3452
+ run_id = str(uuid4())
3453
+
3454
+ self.initialize_workflow()
3455
+ session_id, user_id = self._initialize_session(session_id=session_id, user_id=user_id)
3456
+
3457
+ # Read existing session from database
3458
+ workflow_session = self.read_or_create_session(session_id=session_id, user_id=user_id)
3459
+ self._update_metadata(session=workflow_session)
3460
+
3461
+ # Initialize session state
3462
+ session_state = self._initialize_session_state(
3463
+ session_state=session_state or {}, user_id=user_id, session_id=session_id, run_id=run_id
3464
+ )
3465
+ # Update session state from DB
3466
+ session_state = self._load_session_state(session=workflow_session, session_state=session_state)
3467
+
3468
+ log_debug(f"Workflow Run Start: {self.name}", center=True)
3469
+
3470
+ # Use simple defaults
3471
+ stream = stream or self.stream or False
3472
+ stream_events = (stream_events or stream_intermediate_steps) or (
3473
+ self.stream_events or self.stream_intermediate_steps
3474
+ )
3475
+
3476
+ # Can't stream events if streaming is disabled
3477
+ if stream is False:
3478
+ stream_events = False
3479
+
3480
+ log_debug(f"Stream: {stream}")
3481
+ log_debug(f"Total steps: {self._get_step_count()}")
3482
+
3483
+ # Prepare steps
3484
+ self._prepare_steps()
3485
+
3486
+ inputs = WorkflowExecutionInput(
3487
+ input=input,
3488
+ additional_data=additional_data,
3489
+ audio=audio, # type: ignore
3490
+ images=images, # type: ignore
3491
+ videos=videos, # type: ignore
3492
+ files=files, # type: ignore
3493
+ )
3494
+ log_debug(
3495
+ f"Created pipeline input with session state keys: {list(session_state.keys()) if session_state else 'None'}"
3496
+ )
3497
+
3498
+ self.update_agents_and_teams_session_info()
3499
+
3500
+ # Initialize run context
3501
+ run_context = RunContext(
3502
+ run_id=run_id,
3503
+ session_id=session_id,
3504
+ user_id=user_id,
3505
+ session_state=session_state,
3506
+ )
3507
+
3508
+ # Execute workflow agent if configured
3509
+ if self.agent is not None:
3510
+ return self._execute_workflow_agent(
3511
+ user_input=input, # type: ignore
3512
+ session=workflow_session,
3513
+ execution_input=inputs,
3514
+ run_context=run_context,
3515
+ stream=stream,
3516
+ **kwargs,
3517
+ )
3518
+
3519
+ # Create workflow run response for regular workflow execution
3520
+ workflow_run_response = WorkflowRunOutput(
3521
+ run_id=run_id,
3522
+ input=input,
3523
+ session_id=session_id,
3524
+ workflow_id=self.id,
3525
+ workflow_name=self.name,
3526
+ created_at=int(datetime.now().timestamp()),
3527
+ )
3528
+
3529
+ # Start the run metrics timer
3530
+ workflow_run_response.metrics = WorkflowMetrics(steps={})
3531
+ workflow_run_response.metrics.start_timer()
3532
+
3533
+ if stream:
3534
+ return self._execute_stream(
3535
+ session=workflow_session,
3536
+ execution_input=inputs, # type: ignore[arg-type]
3537
+ workflow_run_response=workflow_run_response,
3538
+ stream_events=stream_events,
3539
+ run_context=run_context,
3540
+ **kwargs,
3541
+ )
3542
+ else:
3543
+ return self._execute(
3544
+ session=workflow_session,
3545
+ execution_input=inputs, # type: ignore[arg-type]
3546
+ workflow_run_response=workflow_run_response,
3547
+ run_context=run_context,
3548
+ **kwargs,
3549
+ )
3550
+
3551
+ @overload
3552
+ async def arun(
3553
+ self,
3554
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]] = None,
3555
+ additional_data: Optional[Dict[str, Any]] = None,
3556
+ user_id: Optional[str] = None,
3557
+ session_id: Optional[str] = None,
3558
+ session_state: Optional[Dict[str, Any]] = None,
3559
+ audio: Optional[List[Audio]] = None,
3560
+ images: Optional[List[Image]] = None,
3561
+ videos: Optional[List[Video]] = None,
3562
+ files: Optional[List[File]] = None,
3563
+ stream: Literal[False] = False,
3564
+ stream_events: Optional[bool] = None,
3565
+ stream_intermediate_steps: Optional[bool] = None,
3566
+ background: Optional[bool] = False,
3567
+ websocket: Optional[WebSocket] = None,
3568
+ ) -> WorkflowRunOutput: ...
3569
+
3570
+ @overload
3571
+ def arun(
3572
+ self,
3573
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]] = None,
3574
+ additional_data: Optional[Dict[str, Any]] = None,
3575
+ user_id: Optional[str] = None,
3576
+ session_id: Optional[str] = None,
3577
+ session_state: Optional[Dict[str, Any]] = None,
3578
+ audio: Optional[List[Audio]] = None,
3579
+ images: Optional[List[Image]] = None,
3580
+ videos: Optional[List[Video]] = None,
3581
+ files: Optional[List[File]] = None,
3582
+ stream: Literal[True] = True,
3583
+ stream_events: Optional[bool] = None,
3584
+ stream_intermediate_steps: Optional[bool] = None,
3585
+ background: Optional[bool] = False,
3586
+ websocket: Optional[WebSocket] = None,
3587
+ ) -> AsyncIterator[WorkflowRunOutputEvent]: ...
3588
+
3589
+ def arun( # type: ignore
3590
+ self,
3591
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]] = None,
3592
+ additional_data: Optional[Dict[str, Any]] = None,
3593
+ user_id: Optional[str] = None,
3594
+ session_id: Optional[str] = None,
3595
+ session_state: Optional[Dict[str, Any]] = None,
3596
+ audio: Optional[List[Audio]] = None,
3597
+ images: Optional[List[Image]] = None,
3598
+ videos: Optional[List[Video]] = None,
3599
+ files: Optional[List[File]] = None,
3600
+ stream: bool = False,
3601
+ stream_events: Optional[bool] = None,
3602
+ stream_intermediate_steps: Optional[bool] = False,
3603
+ background: Optional[bool] = False,
3604
+ websocket: Optional[WebSocket] = None,
3605
+ **kwargs: Any,
3606
+ ) -> Union[WorkflowRunOutput, AsyncIterator[WorkflowRunOutputEvent]]:
3607
+ """Execute the workflow synchronously with optional streaming"""
3608
+
3609
+ input = self._validate_input(input)
3610
+
3611
+ websocket_handler = None
3612
+ if websocket:
3613
+ from agno.workflow.types import WebSocketHandler
3614
+
3615
+ websocket_handler = WebSocketHandler(websocket=websocket)
3616
+
3617
+ if background:
3618
+ if stream and websocket:
3619
+ # Consider both stream_events and stream_intermediate_steps (deprecated)
3620
+ stream_events = stream_events or stream_intermediate_steps or False
3621
+
3622
+ # Background + Streaming + WebSocket = Real-time events
3623
+ return self._arun_background_stream( # type: ignore
3624
+ input=input,
3625
+ additional_data=additional_data,
3626
+ user_id=user_id,
3627
+ session_id=session_id,
3628
+ session_state=session_state,
3629
+ audio=audio,
3630
+ images=images,
3631
+ videos=videos,
3632
+ files=files,
3633
+ stream_events=stream_events,
3634
+ websocket_handler=websocket_handler,
3635
+ **kwargs,
3636
+ )
3637
+ elif stream and not websocket:
3638
+ # Background + Streaming but no WebSocket = Not supported
3639
+ raise ValueError("Background streaming execution requires a WebSocket for real-time events")
3640
+ else:
3641
+ # Background + Non-streaming = Polling (existing)
3642
+ return self._arun_background( # type: ignore
3643
+ input=input,
3644
+ additional_data=additional_data,
3645
+ user_id=user_id,
3646
+ session_id=session_id,
3647
+ session_state=session_state,
3648
+ audio=audio,
3649
+ images=images,
3650
+ videos=videos,
3651
+ files=files,
3652
+ **kwargs,
3653
+ )
3654
+
3655
+ self._set_debug()
3656
+
3657
+ run_id = str(uuid4())
3658
+
3659
+ self.initialize_workflow()
3660
+ session_id, user_id = self._initialize_session(session_id=session_id, user_id=user_id)
3661
+
3662
+ # Initialize run context
3663
+ run_context = RunContext(
3664
+ run_id=run_id,
3665
+ session_id=session_id,
3666
+ user_id=user_id,
3667
+ session_state=session_state,
3668
+ )
3669
+
3670
+ log_debug(f"Async Workflow Run Start: {self.name}", center=True)
3671
+
3672
+ # Use simple defaults
3673
+ stream = stream or self.stream or False
3674
+ stream_events = (stream_events or stream_intermediate_steps) or (
3675
+ self.stream_events or self.stream_intermediate_steps
3676
+ )
3677
+
3678
+ # Can't stream events if streaming is disabled
3679
+ if stream is False:
3680
+ stream_events = False
3681
+
3682
+ log_debug(f"Stream: {stream}")
3683
+
3684
+ # Prepare steps
3685
+ self._prepare_steps()
3686
+
3687
+ inputs = WorkflowExecutionInput(
3688
+ input=input,
3689
+ additional_data=additional_data,
3690
+ audio=audio, # type: ignore
3691
+ images=images, # type: ignore
3692
+ videos=videos, # type: ignore
3693
+ files=files,
3694
+ )
3695
+ log_debug(
3696
+ f"Created async pipeline input with session state keys: {list(session_state.keys()) if session_state else 'None'}"
3697
+ )
3698
+
3699
+ self.update_agents_and_teams_session_info()
3700
+
3701
+ if self.agent is not None:
3702
+ return self._aexecute_workflow_agent( # type: ignore
3703
+ user_input=input, # type: ignore
3704
+ execution_input=inputs,
3705
+ run_context=run_context,
3706
+ stream=stream,
3707
+ **kwargs,
3708
+ )
3709
+
3710
+ # Create workflow run response for regular workflow execution
3711
+ workflow_run_response = WorkflowRunOutput(
3712
+ run_id=run_id,
3713
+ input=input,
3714
+ session_id=session_id,
3715
+ workflow_id=self.id,
3716
+ workflow_name=self.name,
3717
+ created_at=int(datetime.now().timestamp()),
3718
+ )
3719
+
3720
+ # Start the run metrics timer
3721
+ workflow_run_response.metrics = WorkflowMetrics(steps={})
3722
+ workflow_run_response.metrics.start_timer()
3723
+
3724
+ if stream:
3725
+ return self._aexecute_stream( # type: ignore
3726
+ execution_input=inputs,
3727
+ workflow_run_response=workflow_run_response,
3728
+ session_id=session_id,
3729
+ user_id=user_id,
3730
+ stream_events=stream_events,
3731
+ websocket=websocket,
3732
+ files=files,
3733
+ session_state=session_state,
3734
+ run_context=run_context,
3735
+ **kwargs,
3736
+ )
3737
+ else:
3738
+ return self._aexecute( # type: ignore
3739
+ execution_input=inputs,
3740
+ workflow_run_response=workflow_run_response,
3741
+ session_id=session_id,
3742
+ user_id=user_id,
3743
+ websocket=websocket,
3744
+ files=files,
3745
+ session_state=session_state,
3746
+ run_context=run_context,
3747
+ **kwargs,
3748
+ )
3749
+
3750
+ def _prepare_steps(self):
3751
+ """Prepare the steps for execution"""
3752
+ if not callable(self.steps) and self.steps is not None:
3753
+ prepared_steps: List[Union[Step, Steps, Loop, Parallel, Condition, Router]] = []
3754
+ for i, step in enumerate(self.steps): # type: ignore
3755
+ if callable(step) and hasattr(step, "__name__"):
3756
+ step_name = step.__name__
3757
+ log_debug(f"Step {i + 1}: Wrapping callable function '{step_name}'")
3758
+ prepared_steps.append(Step(name=step_name, description="User-defined callable step", executor=step)) # type: ignore
3759
+ elif isinstance(step, Agent):
3760
+ step_name = step.name or f"step_{i + 1}"
3761
+ log_debug(f"Step {i + 1}: Agent '{step_name}'")
3762
+ prepared_steps.append(Step(name=step_name, description=step.description, agent=step))
3763
+ elif isinstance(step, Team):
3764
+ step_name = step.name or f"step_{i + 1}"
3765
+ log_debug(f"Step {i + 1}: Team '{step_name}' with {len(step.members)} members")
3766
+ prepared_steps.append(Step(name=step_name, description=step.description, team=step))
3767
+ elif isinstance(step, Step) and step.add_workflow_history is True and self.db is None:
3768
+ log_warning(
3769
+ f"Step '{step.name or f'step_{i + 1}'}' has add_workflow_history=True "
3770
+ "but no database is configured in the Workflow. "
3771
+ "History won't be persisted. Add a database to persist runs across executions."
3772
+ )
3773
+ elif isinstance(step, (Step, Steps, Loop, Parallel, Condition, Router)):
3774
+ step_type = type(step).__name__
3775
+ step_name = getattr(step, "name", f"unnamed_{step_type.lower()}")
3776
+ log_debug(f"Step {i + 1}: {step_type} '{step_name}'")
3777
+ prepared_steps.append(step)
3778
+ else:
3779
+ raise ValueError(f"Invalid step type: {type(step).__name__}")
3780
+
3781
+ self.steps = prepared_steps # type: ignore
3782
+ log_debug("Step preparation completed")
3783
+
3784
+ def print_response(
3785
+ self,
3786
+ input: Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]],
3787
+ additional_data: Optional[Dict[str, Any]] = None,
3788
+ user_id: Optional[str] = None,
3789
+ session_id: Optional[str] = None,
3790
+ audio: Optional[List[Audio]] = None,
3791
+ images: Optional[List[Image]] = None,
3792
+ videos: Optional[List[Video]] = None,
3793
+ files: Optional[List[File]] = None,
3794
+ stream: Optional[bool] = None,
3795
+ stream_events: Optional[bool] = None,
3796
+ stream_intermediate_steps: Optional[bool] = None,
3797
+ markdown: bool = True,
3798
+ show_time: bool = True,
3799
+ show_step_details: bool = True,
3800
+ console: Optional[Any] = None,
3801
+ **kwargs: Any,
3802
+ ) -> None:
3803
+ """Print workflow execution with rich formatting and optional streaming
3804
+
3805
+ Args:
3806
+ input: The main query/input for the workflow
3807
+ additional_data: Attached message data to the input
3808
+ user_id: User ID
3809
+ session_id: Session ID
3810
+ audio: Audio input
3811
+ images: Image input
3812
+ videos: Video input
3813
+ files: File input
3814
+ stream: Whether to stream the response content
3815
+ stream_events: Whether to stream intermediate steps
3816
+ markdown: Whether to render content as markdown
3817
+ show_time: Whether to show execution time
3818
+ show_step_details: Whether to show individual step outputs
3819
+ console: Rich console instance (optional)
3820
+ (deprecated) stream_intermediate_steps: Whether to stream intermediate step outputs. If None, uses workflow default.
3821
+ """
3822
+ if self._has_async_db():
3823
+ raise Exception("`print_response()` is not supported with an async DB. Please use `aprint_response()`.")
3824
+
3825
+ if stream is None:
3826
+ stream = self.stream or False
3827
+
3828
+ # Considering both stream_events and stream_intermediate_steps (deprecated)
3829
+ stream_events = stream_events or stream_intermediate_steps
3830
+
3831
+ # Can't stream events if streaming is disabled
3832
+ if stream is False:
3833
+ stream_events = False
3834
+
3835
+ if stream_events is None:
3836
+ stream_events = (
3837
+ False
3838
+ if (self.stream_events is None and self.stream_intermediate_steps is None)
3839
+ else (self.stream_intermediate_steps or self.stream_events)
3840
+ )
3841
+
3842
+ if stream:
3843
+ print_response_stream(
3844
+ workflow=self,
3845
+ input=input,
3846
+ user_id=user_id,
3847
+ session_id=session_id,
3848
+ additional_data=additional_data,
3849
+ audio=audio,
3850
+ images=images,
3851
+ videos=videos,
3852
+ files=files,
3853
+ stream_events=stream_events,
3854
+ markdown=markdown,
3855
+ show_time=show_time,
3856
+ show_step_details=show_step_details,
3857
+ console=console,
3858
+ **kwargs,
3859
+ )
3860
+ else:
3861
+ print_response(
3862
+ workflow=self,
3863
+ input=input,
3864
+ user_id=user_id,
3865
+ session_id=session_id,
3866
+ additional_data=additional_data,
3867
+ audio=audio,
3868
+ images=images,
3869
+ videos=videos,
3870
+ files=files,
3871
+ markdown=markdown,
3872
+ show_time=show_time,
3873
+ show_step_details=show_step_details,
3874
+ console=console,
3875
+ **kwargs,
3876
+ )
3877
+
3878
+ async def aprint_response(
3879
+ self,
3880
+ input: Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]],
3881
+ additional_data: Optional[Dict[str, Any]] = None,
3882
+ user_id: Optional[str] = None,
3883
+ session_id: Optional[str] = None,
3884
+ audio: Optional[List[Audio]] = None,
3885
+ images: Optional[List[Image]] = None,
3886
+ videos: Optional[List[Video]] = None,
3887
+ files: Optional[List[File]] = None,
3888
+ stream: Optional[bool] = None,
3889
+ stream_events: Optional[bool] = None,
3890
+ stream_intermediate_steps: Optional[bool] = None,
3891
+ markdown: bool = True,
3892
+ show_time: bool = True,
3893
+ show_step_details: bool = True,
3894
+ console: Optional[Any] = None,
3895
+ **kwargs: Any,
3896
+ ) -> None:
3897
+ """Print workflow execution with rich formatting and optional streaming
3898
+
3899
+ Args:
3900
+ input: The main message/input for the workflow
3901
+ additional_data: Attached message data to the input
3902
+ user_id: User ID
3903
+ session_id: Session ID
3904
+ audio: Audio input
3905
+ images: Image input
3906
+ videos: Video input
3907
+ files: Files input
3908
+ stream: Whether to stream the response content
3909
+ stream_events: Whether to stream intermediate steps
3910
+ markdown: Whether to render content as markdown
3911
+ show_time: Whether to show execution time
3912
+ show_step_details: Whether to show individual step outputs
3913
+ console: Rich console instance (optional)
3914
+ (deprecated) stream_intermediate_steps: Whether to stream intermediate step outputs. If None, uses workflow default.
3915
+ """
3916
+ if stream is None:
3917
+ stream = self.stream or False
3918
+
3919
+ # Considering both stream_events and stream_intermediate_steps (deprecated)
3920
+ stream_events = stream_events or stream_intermediate_steps
3921
+
3922
+ # Can't stream events if streaming is disabled
3923
+ if stream is False:
3924
+ stream_events = False
3925
+
3926
+ if stream_events is None:
3927
+ stream_events = (
3928
+ False
3929
+ if (self.stream_events is None and self.stream_intermediate_steps is None)
3930
+ else (self.stream_intermediate_steps or self.stream_events)
3931
+ )
3932
+
3933
+ if stream:
3934
+ await aprint_response_stream(
3935
+ workflow=self,
3936
+ input=input,
3937
+ additional_data=additional_data,
3938
+ user_id=user_id,
3939
+ session_id=session_id,
3940
+ audio=audio,
3941
+ images=images,
3942
+ videos=videos,
3943
+ files=files,
3944
+ stream_events=stream_events,
3945
+ markdown=markdown,
3946
+ show_time=show_time,
3947
+ show_step_details=show_step_details,
3948
+ console=console,
3949
+ **kwargs,
3950
+ )
3951
+ else:
3952
+ await aprint_response(
3953
+ workflow=self,
3954
+ input=input,
3955
+ additional_data=additional_data,
3956
+ user_id=user_id,
3957
+ session_id=session_id,
3958
+ audio=audio,
3959
+ images=images,
3960
+ videos=videos,
3961
+ files=files,
3962
+ markdown=markdown,
3963
+ show_time=show_time,
3964
+ show_step_details=show_step_details,
3965
+ console=console,
3966
+ **kwargs,
3967
+ )
3968
+
3969
+ def to_dict(self) -> Dict[str, Any]:
3970
+ """Convert workflow to dictionary representation"""
3971
+
3972
+ def serialize_step(step):
3973
+ # Handle callable functions (not wrapped in Step objects)
3974
+ if callable(step) and hasattr(step, "__name__"):
3975
+ step_dict = {
3976
+ "name": step.__name__,
3977
+ "description": "User-defined callable step",
3978
+ "type": StepType.STEP.value,
3979
+ }
3980
+ return step_dict
3981
+
3982
+ # Handle Agent and Team objects directly
3983
+ if isinstance(step, Agent):
3984
+ step_dict = {
3985
+ "name": step.name or "unnamed_agent",
3986
+ "description": step.description or "Agent step",
3987
+ "type": StepType.STEP.value,
3988
+ "agent": step,
3989
+ }
3990
+ return step_dict
3991
+
3992
+ if isinstance(step, Team):
3993
+ step_dict = {
3994
+ "name": step.name or "unnamed_team",
3995
+ "description": step.description or "Team step",
3996
+ "type": StepType.STEP.value,
3997
+ "team": step,
3998
+ }
3999
+ return step_dict
4000
+
4001
+ step_dict = {
4002
+ "name": step.name if hasattr(step, "name") else f"unnamed_{type(step).__name__.lower()}",
4003
+ "description": step.description if hasattr(step, "description") else "User-defined callable step",
4004
+ "type": STEP_TYPE_MAPPING[type(step)].value, # type: ignore
4005
+ }
4006
+
4007
+ # Handle agent/team/tools
4008
+ if hasattr(step, "agent"):
4009
+ step_dict["agent"] = step.agent if hasattr(step, "agent") else None # type: ignore
4010
+ if hasattr(step, "team"):
4011
+ step_dict["team"] = step.team if hasattr(step, "team") else None # type: ignore
4012
+
4013
+ # Handle nested steps for Router/Loop
4014
+ if isinstance(step, Router):
4015
+ step_dict["steps"] = (
4016
+ [serialize_step(step) for step in step.choices] if hasattr(step, "choices") else None
4017
+ )
4018
+
4019
+ elif isinstance(step, (Loop, Condition, Steps, Parallel)):
4020
+ step_dict["steps"] = [serialize_step(step) for step in step.steps] if hasattr(step, "steps") else None
4021
+
4022
+ return step_dict
4023
+
4024
+ if self.steps is None or callable(self.steps):
4025
+ steps_list = []
4026
+ elif isinstance(self.steps, Steps):
4027
+ steps_list = self.steps.steps
4028
+ else:
4029
+ steps_list = self.steps
4030
+
4031
+ return {
4032
+ "name": self.name,
4033
+ "workflow_id": self.id,
4034
+ "description": self.description,
4035
+ "steps": [serialize_step(s) for s in steps_list],
4036
+ "session_id": self.session_id,
4037
+ }
4038
+
4039
+ def _calculate_session_metrics_from_workflow_metrics(self, workflow_metrics: WorkflowMetrics) -> Metrics:
4040
+ """Calculate session metrics by aggregating all step metrics from workflow metrics"""
4041
+ session_metrics = Metrics()
4042
+
4043
+ # Aggregate metrics from all steps
4044
+ for step_name, step_metrics in workflow_metrics.steps.items():
4045
+ if step_metrics.metrics:
4046
+ session_metrics += step_metrics.metrics
4047
+
4048
+ session_metrics.time_to_first_token = None
4049
+
4050
+ return session_metrics
4051
+
4052
+ def _get_session_metrics(self, session: WorkflowSession) -> Metrics:
4053
+ """Get existing session metrics from the database"""
4054
+ if session.session_data and "session_metrics" in session.session_data:
4055
+ session_metrics_from_db = session.session_data.get("session_metrics")
4056
+ if session_metrics_from_db is not None:
4057
+ if isinstance(session_metrics_from_db, dict):
4058
+ return Metrics(**session_metrics_from_db)
4059
+ elif isinstance(session_metrics_from_db, Metrics):
4060
+ return session_metrics_from_db
4061
+ return Metrics()
4062
+
4063
+ def _update_session_metrics(self, session: WorkflowSession, workflow_run_response: WorkflowRunOutput):
4064
+ """Calculate and update session metrics"""
4065
+ # Get existing session metrics
4066
+ session_metrics = self._get_session_metrics(session=session)
4067
+
4068
+ # If workflow has metrics, convert and add them to session metrics
4069
+ if workflow_run_response.metrics:
4070
+ run_session_metrics = self._calculate_session_metrics_from_workflow_metrics(workflow_run_response.metrics) # type: ignore[arg-type]
4071
+
4072
+ session_metrics += run_session_metrics
4073
+
4074
+ session_metrics.time_to_first_token = None
4075
+
4076
+ # Store updated session metrics - CONVERT TO DICT FOR JSON SERIALIZATION
4077
+ if not session.session_data:
4078
+ session.session_data = {}
4079
+ session.session_data["session_metrics"] = session_metrics.to_dict()
4080
+
4081
+ async def aget_session_metrics(self, session_id: Optional[str] = None) -> Optional[Metrics]:
4082
+ """Get the session metrics for the given session ID and user ID."""
4083
+ session_id = session_id or self.session_id
4084
+ if session_id is None:
4085
+ raise Exception("Session ID is required")
4086
+
4087
+ session = await self.aget_session(session_id=session_id) # type: ignore
4088
+ if session is None:
4089
+ raise Exception("Session not found")
4090
+
4091
+ return self._get_session_metrics(session=session)
4092
+
4093
+ def get_session_metrics(self, session_id: Optional[str] = None) -> Optional[Metrics]:
4094
+ """Get the session metrics for the given session ID and user ID."""
4095
+ session_id = session_id or self.session_id
4096
+ if session_id is None:
4097
+ raise Exception("Session ID is required")
4098
+
4099
+ session = self.get_session(session_id=session_id)
4100
+ if session is None:
4101
+ raise Exception("Session not found")
4102
+
4103
+ return self._get_session_metrics(session=session)
4104
+
4105
+ def update_agents_and_teams_session_info(self):
4106
+ """Update agents and teams with workflow session information"""
4107
+ log_debug("Updating agents and teams with session information")
4108
+ # Initialize steps - only if steps is iterable (not callable)
4109
+ if self.steps and not callable(self.steps):
4110
+ steps_list = self.steps.steps if isinstance(self.steps, Steps) else self.steps
4111
+ for step in steps_list:
4112
+ # TODO: Handle properly steps inside other primitives
4113
+ if isinstance(step, Step):
4114
+ active_executor = step.active_executor
4115
+
4116
+ if hasattr(active_executor, "workflow_id"):
4117
+ active_executor.workflow_id = self.id
4118
+
4119
+ # If it's a team, update all members
4120
+ if hasattr(active_executor, "members"):
4121
+ for member in active_executor.members: # type: ignore
4122
+ if hasattr(member, "workflow_id"):
4123
+ member.workflow_id = self.id
4124
+
4125
+ ###########################################################################
4126
+ # Telemetry functions
4127
+ ###########################################################################
4128
+
4129
+ def _get_telemetry_data(self) -> Dict[str, Any]:
4130
+ """Get the telemetry data for the workflow"""
4131
+ return {
4132
+ "workflow_id": self.id,
4133
+ "db_type": self.db.__class__.__name__ if self.db else None,
4134
+ "has_input_schema": self.input_schema is not None,
4135
+ }
4136
+
4137
+ def _log_workflow_telemetry(self, session_id: str, run_id: Optional[str] = None) -> None:
4138
+ """Send a telemetry event to the API for a created Workflow run"""
4139
+
4140
+ self._set_telemetry()
4141
+ if not self.telemetry:
4142
+ return
4143
+
4144
+ from agno.api.workflow import WorkflowRunCreate, create_workflow_run
4145
+
4146
+ try:
4147
+ create_workflow_run(
4148
+ workflow=WorkflowRunCreate(session_id=session_id, run_id=run_id, data=self._get_telemetry_data()),
4149
+ )
4150
+ except Exception as e:
4151
+ log_debug(f"Could not create Workflow run telemetry event: {e}")
4152
+
4153
+ async def _alog_workflow_telemetry(self, session_id: str, run_id: Optional[str] = None) -> None:
4154
+ """Send a telemetry event to the API for a created Workflow async run"""
4155
+
4156
+ self._set_telemetry()
4157
+ if not self.telemetry:
4158
+ return
4159
+
4160
+ from agno.api.workflow import WorkflowRunCreate, acreate_workflow_run
4161
+
4162
+ try:
4163
+ await acreate_workflow_run(
4164
+ workflow=WorkflowRunCreate(session_id=session_id, run_id=run_id, data=self._get_telemetry_data())
4165
+ )
4166
+ except Exception as e:
4167
+ log_debug(f"Could not create Workflow run telemetry event: {e}")
4168
+
4169
+ def cli_app(
4170
+ self,
4171
+ input: Optional[str] = None,
4172
+ session_id: Optional[str] = None,
4173
+ user_id: Optional[str] = None,
4174
+ user: str = "User",
4175
+ emoji: str = ":technologist:",
4176
+ stream: Optional[bool] = None,
4177
+ stream_events: Optional[bool] = None,
4178
+ stream_intermediate_steps: Optional[bool] = None,
4179
+ markdown: bool = True,
4180
+ show_time: bool = True,
4181
+ show_step_details: bool = True,
4182
+ exit_on: Optional[List[str]] = None,
4183
+ **kwargs: Any,
4184
+ ) -> None:
4185
+ """
4186
+ Run an interactive command-line interface to interact with the workflow.
4187
+
4188
+ This method creates a CLI interface that allows users to interact with the workflow
4189
+ either by providing a single input or through continuous interactive prompts.
4190
+
4191
+ Arguments:
4192
+ input: Optional initial input to process before starting interactive mode.
4193
+ session_id: Optional session identifier for maintaining conversation context.
4194
+ user_id: Optional user identifier for tracking user-specific data.
4195
+ user: Display name for the user in the CLI prompt. Defaults to "User".
4196
+ emoji: Emoji to display next to the user name in prompts. Defaults to ":technologist:".
4197
+ stream: Whether to stream the workflow response. If None, uses workflow default.
4198
+ stream_events: Whether to stream intermediate step outputs. If None, uses workflow default.
4199
+ markdown: Whether to render output as markdown. Defaults to True.
4200
+ show_time: Whether to display timestamps in the output. Defaults to True.
4201
+ show_step_details: Whether to show detailed step information. Defaults to True.
4202
+ exit_on: List of commands that will exit the CLI. Defaults to ["exit", "quit", "bye", "stop"].
4203
+ (deprecated) stream_intermediate_steps: Whether to stream intermediate step outputs. If None, uses workflow default.
4204
+ **kwargs: Additional keyword arguments passed to the workflow's print_response method.
4205
+
4206
+ Returns:
4207
+ None: This method runs interactively and does not return a value.
4208
+ """
4209
+
4210
+ from rich.prompt import Prompt
4211
+
4212
+ # Considering both stream_events and stream_intermediate_steps (deprecated)
4213
+ stream_events = stream_events or stream_intermediate_steps or False
4214
+
4215
+ if input:
4216
+ self.print_response(
4217
+ input=input,
4218
+ stream=stream,
4219
+ stream_events=stream_events,
4220
+ markdown=markdown,
4221
+ show_time=show_time,
4222
+ show_step_details=show_step_details,
4223
+ user_id=user_id,
4224
+ session_id=session_id,
4225
+ **kwargs,
4226
+ )
4227
+
4228
+ _exit_on = exit_on or ["exit", "quit", "bye", "stop"]
4229
+ while True:
4230
+ message = Prompt.ask(f"[bold] {emoji} {user} [/bold]")
4231
+ if message in _exit_on:
4232
+ break
4233
+
4234
+ self.print_response(
4235
+ input=message,
4236
+ stream=stream,
4237
+ stream_events=stream_events,
4238
+ markdown=markdown,
4239
+ show_time=show_time,
4240
+ show_step_details=show_step_details,
4241
+ user_id=user_id,
4242
+ session_id=session_id,
4243
+ **kwargs,
4244
+ )
4245
+
4246
+ async def acli_app(
4247
+ self,
4248
+ input: Optional[str] = None,
4249
+ session_id: Optional[str] = None,
4250
+ user_id: Optional[str] = None,
4251
+ user: str = "User",
4252
+ emoji: str = ":technologist:",
4253
+ stream: Optional[bool] = None,
4254
+ stream_events: Optional[bool] = None,
4255
+ stream_intermediate_steps: Optional[bool] = None,
4256
+ markdown: bool = True,
4257
+ show_time: bool = True,
4258
+ show_step_details: bool = True,
4259
+ exit_on: Optional[List[str]] = None,
4260
+ **kwargs: Any,
4261
+ ) -> None:
4262
+ """
4263
+ Run an interactive command-line interface to interact with the workflow.
4264
+
4265
+ This method creates a CLI interface that allows users to interact with the workflow
4266
+ either by providing a single input or through continuous interactive prompts.
4267
+
4268
+ Arguments:
4269
+ input: Optional initial input to process before starting interactive mode.
4270
+ session_id: Optional session identifier for maintaining conversation context.
4271
+ user_id: Optional user identifier for tracking user-specific data.
4272
+ user: Display name for the user in the CLI prompt. Defaults to "User".
4273
+ emoji: Emoji to display next to the user name in prompts. Defaults to ":technologist:".
4274
+ stream: Whether to stream the workflow response. If None, uses workflow default.
4275
+ stream_events: Whether to stream events from the workflow. If None, uses workflow default.
4276
+ markdown: Whether to render output as markdown. Defaults to True.
4277
+ show_time: Whether to display timestamps in the output. Defaults to True.
4278
+ show_step_details: Whether to show detailed step information. Defaults to True.
4279
+ exit_on: List of commands that will exit the CLI. Defaults to ["exit", "quit", "bye", "stop"].
4280
+ (deprecated) stream_intermediate_steps: Whether to stream intermediate step outputs. If None, uses workflow default.
4281
+ **kwargs: Additional keyword arguments passed to the workflow's print_response method.
4282
+
4283
+ Returns:
4284
+ None: This method runs interactively and does not return a value.
4285
+ """
4286
+
4287
+ from rich.prompt import Prompt
4288
+
4289
+ # Considering both stream_events and stream_intermediate_steps (deprecated)
4290
+ stream_events = stream_events or stream_intermediate_steps or False
4291
+
4292
+ if input:
4293
+ await self.aprint_response(
4294
+ input=input,
4295
+ stream=stream,
4296
+ stream_events=stream_events,
4297
+ markdown=markdown,
4298
+ show_time=show_time,
4299
+ show_step_details=show_step_details,
4300
+ user_id=user_id,
4301
+ session_id=session_id,
4302
+ **kwargs,
4303
+ )
4304
+
4305
+ _exit_on = exit_on or ["exit", "quit", "bye", "stop"]
4306
+ while True:
4307
+ message = Prompt.ask(f"[bold] {emoji} {user} [/bold]")
4308
+ if message in _exit_on:
4309
+ break
4310
+
4311
+ await self.aprint_response(
4312
+ input=message,
4313
+ stream=stream,
4314
+ stream_events=stream_events,
4315
+ markdown=markdown,
4316
+ show_time=show_time,
4317
+ show_step_details=show_step_details,
4318
+ user_id=user_id,
4319
+ session_id=session_id,
4320
+ **kwargs,
4321
+ )