agno 1.8.1__py3-none-any.whl → 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (590) hide show
  1. agno/__init__.py +8 -0
  2. agno/agent/__init__.py +19 -27
  3. agno/agent/agent.py +3143 -4170
  4. agno/api/agent.py +11 -67
  5. agno/api/api.py +5 -46
  6. agno/api/evals.py +8 -19
  7. agno/api/os.py +17 -0
  8. agno/api/routes.py +6 -41
  9. agno/api/schemas/__init__.py +9 -0
  10. agno/api/schemas/agent.py +5 -21
  11. agno/api/schemas/evals.py +7 -16
  12. agno/api/schemas/os.py +14 -0
  13. agno/api/schemas/team.py +5 -21
  14. agno/api/schemas/utils.py +21 -0
  15. agno/api/schemas/workflows.py +11 -7
  16. agno/api/settings.py +53 -0
  17. agno/api/team.py +11 -66
  18. agno/api/workflow.py +28 -0
  19. agno/cloud/aws/base.py +214 -0
  20. agno/cloud/aws/s3/__init__.py +2 -0
  21. agno/cloud/aws/s3/api_client.py +43 -0
  22. agno/cloud/aws/s3/bucket.py +195 -0
  23. agno/cloud/aws/s3/object.py +57 -0
  24. agno/db/__init__.py +24 -0
  25. agno/db/base.py +245 -0
  26. agno/db/dynamo/__init__.py +3 -0
  27. agno/db/dynamo/dynamo.py +1743 -0
  28. agno/db/dynamo/schemas.py +278 -0
  29. agno/db/dynamo/utils.py +684 -0
  30. agno/db/firestore/__init__.py +3 -0
  31. agno/db/firestore/firestore.py +1432 -0
  32. agno/db/firestore/schemas.py +130 -0
  33. agno/db/firestore/utils.py +278 -0
  34. agno/db/gcs_json/__init__.py +3 -0
  35. agno/db/gcs_json/gcs_json_db.py +1001 -0
  36. agno/db/gcs_json/utils.py +194 -0
  37. agno/db/in_memory/__init__.py +3 -0
  38. agno/db/in_memory/in_memory_db.py +882 -0
  39. agno/db/in_memory/utils.py +172 -0
  40. agno/db/json/__init__.py +3 -0
  41. agno/db/json/json_db.py +1045 -0
  42. agno/db/json/utils.py +196 -0
  43. agno/db/migrations/v1_to_v2.py +162 -0
  44. agno/db/mongo/__init__.py +3 -0
  45. agno/db/mongo/mongo.py +1416 -0
  46. agno/db/mongo/schemas.py +77 -0
  47. agno/db/mongo/utils.py +204 -0
  48. agno/db/mysql/__init__.py +3 -0
  49. agno/db/mysql/mysql.py +1719 -0
  50. agno/db/mysql/schemas.py +124 -0
  51. agno/db/mysql/utils.py +297 -0
  52. agno/db/postgres/__init__.py +3 -0
  53. agno/db/postgres/postgres.py +1710 -0
  54. agno/db/postgres/schemas.py +124 -0
  55. agno/db/postgres/utils.py +280 -0
  56. agno/db/redis/__init__.py +3 -0
  57. agno/db/redis/redis.py +1367 -0
  58. agno/db/redis/schemas.py +109 -0
  59. agno/db/redis/utils.py +288 -0
  60. agno/db/schemas/__init__.py +3 -0
  61. agno/db/schemas/evals.py +33 -0
  62. agno/db/schemas/knowledge.py +40 -0
  63. agno/db/schemas/memory.py +46 -0
  64. agno/db/singlestore/__init__.py +3 -0
  65. agno/db/singlestore/schemas.py +116 -0
  66. agno/db/singlestore/singlestore.py +1712 -0
  67. agno/db/singlestore/utils.py +326 -0
  68. agno/db/sqlite/__init__.py +3 -0
  69. agno/db/sqlite/schemas.py +119 -0
  70. agno/db/sqlite/sqlite.py +1676 -0
  71. agno/db/sqlite/utils.py +268 -0
  72. agno/db/utils.py +88 -0
  73. agno/eval/__init__.py +14 -0
  74. agno/eval/accuracy.py +154 -48
  75. agno/eval/performance.py +88 -23
  76. agno/eval/reliability.py +73 -20
  77. agno/eval/utils.py +23 -13
  78. agno/integrations/discord/__init__.py +3 -0
  79. agno/{app → integrations}/discord/client.py +15 -11
  80. agno/knowledge/__init__.py +2 -2
  81. agno/{document → knowledge}/chunking/agentic.py +2 -2
  82. agno/{document → knowledge}/chunking/document.py +2 -2
  83. agno/{document → knowledge}/chunking/fixed.py +3 -3
  84. agno/{document → knowledge}/chunking/markdown.py +2 -2
  85. agno/{document → knowledge}/chunking/recursive.py +2 -2
  86. agno/{document → knowledge}/chunking/row.py +2 -2
  87. agno/knowledge/chunking/semantic.py +59 -0
  88. agno/knowledge/chunking/strategy.py +121 -0
  89. agno/knowledge/content.py +74 -0
  90. agno/knowledge/document/__init__.py +5 -0
  91. agno/{document → knowledge/document}/base.py +12 -2
  92. agno/knowledge/embedder/__init__.py +5 -0
  93. agno/{embedder → knowledge/embedder}/aws_bedrock.py +127 -1
  94. agno/{embedder → knowledge/embedder}/azure_openai.py +65 -1
  95. agno/{embedder → knowledge/embedder}/base.py +6 -0
  96. agno/{embedder → knowledge/embedder}/cohere.py +72 -1
  97. agno/{embedder → knowledge/embedder}/fastembed.py +17 -1
  98. agno/{embedder → knowledge/embedder}/fireworks.py +1 -1
  99. agno/{embedder → knowledge/embedder}/google.py +74 -1
  100. agno/{embedder → knowledge/embedder}/huggingface.py +36 -2
  101. agno/{embedder → knowledge/embedder}/jina.py +48 -2
  102. agno/knowledge/embedder/langdb.py +22 -0
  103. agno/knowledge/embedder/mistral.py +139 -0
  104. agno/{embedder → knowledge/embedder}/nebius.py +1 -1
  105. agno/{embedder → knowledge/embedder}/ollama.py +54 -3
  106. agno/knowledge/embedder/openai.py +223 -0
  107. agno/{embedder → knowledge/embedder}/sentence_transformer.py +16 -1
  108. agno/{embedder → knowledge/embedder}/together.py +1 -1
  109. agno/{embedder → knowledge/embedder}/voyageai.py +49 -1
  110. agno/knowledge/knowledge.py +1551 -0
  111. agno/knowledge/reader/__init__.py +7 -0
  112. agno/{document → knowledge}/reader/arxiv_reader.py +32 -4
  113. agno/knowledge/reader/base.py +88 -0
  114. agno/{document → knowledge}/reader/csv_reader.py +47 -65
  115. agno/knowledge/reader/docx_reader.py +83 -0
  116. agno/{document → knowledge}/reader/firecrawl_reader.py +42 -21
  117. agno/{document → knowledge}/reader/json_reader.py +30 -9
  118. agno/{document → knowledge}/reader/markdown_reader.py +58 -9
  119. agno/{document → knowledge}/reader/pdf_reader.py +71 -126
  120. agno/knowledge/reader/reader_factory.py +268 -0
  121. agno/knowledge/reader/s3_reader.py +101 -0
  122. agno/{document → knowledge}/reader/text_reader.py +31 -10
  123. agno/knowledge/reader/url_reader.py +128 -0
  124. agno/knowledge/reader/web_search_reader.py +366 -0
  125. agno/{document → knowledge}/reader/website_reader.py +37 -10
  126. agno/knowledge/reader/wikipedia_reader.py +59 -0
  127. agno/knowledge/reader/youtube_reader.py +78 -0
  128. agno/knowledge/remote_content/remote_content.py +88 -0
  129. agno/{reranker → knowledge/reranker}/base.py +1 -1
  130. agno/{reranker → knowledge/reranker}/cohere.py +2 -2
  131. agno/{reranker → knowledge/reranker}/infinity.py +2 -2
  132. agno/{reranker → knowledge/reranker}/sentence_transformer.py +2 -2
  133. agno/knowledge/types.py +30 -0
  134. agno/knowledge/utils.py +169 -0
  135. agno/media.py +269 -268
  136. agno/memory/__init__.py +2 -10
  137. agno/memory/manager.py +1003 -148
  138. agno/models/aimlapi/__init__.py +2 -2
  139. agno/models/aimlapi/aimlapi.py +6 -6
  140. agno/models/anthropic/claude.py +131 -131
  141. agno/models/aws/bedrock.py +110 -182
  142. agno/models/aws/claude.py +64 -18
  143. agno/models/azure/ai_foundry.py +73 -23
  144. agno/models/base.py +346 -290
  145. agno/models/cerebras/cerebras.py +84 -27
  146. agno/models/cohere/chat.py +106 -98
  147. agno/models/google/gemini.py +105 -46
  148. agno/models/groq/groq.py +97 -35
  149. agno/models/huggingface/huggingface.py +92 -27
  150. agno/models/ibm/watsonx.py +72 -13
  151. agno/models/litellm/chat.py +85 -13
  152. agno/models/message.py +46 -151
  153. agno/models/meta/llama.py +85 -49
  154. agno/models/metrics.py +120 -0
  155. agno/models/mistral/mistral.py +90 -21
  156. agno/models/ollama/__init__.py +0 -2
  157. agno/models/ollama/chat.py +85 -47
  158. agno/models/openai/chat.py +154 -37
  159. agno/models/openai/responses.py +178 -105
  160. agno/models/perplexity/perplexity.py +26 -2
  161. agno/models/portkey/portkey.py +0 -7
  162. agno/models/response.py +15 -9
  163. agno/models/utils.py +20 -0
  164. agno/models/vercel/__init__.py +2 -2
  165. agno/models/vercel/v0.py +1 -1
  166. agno/models/vllm/__init__.py +2 -2
  167. agno/models/vllm/vllm.py +3 -3
  168. agno/models/xai/xai.py +10 -10
  169. agno/os/__init__.py +3 -0
  170. agno/os/app.py +497 -0
  171. agno/os/auth.py +47 -0
  172. agno/os/config.py +103 -0
  173. agno/os/interfaces/agui/__init__.py +3 -0
  174. agno/os/interfaces/agui/agui.py +31 -0
  175. agno/{app/agui/async_router.py → os/interfaces/agui/router.py} +16 -16
  176. agno/{app → os/interfaces}/agui/utils.py +77 -33
  177. agno/os/interfaces/base.py +21 -0
  178. agno/os/interfaces/slack/__init__.py +3 -0
  179. agno/{app/slack/async_router.py → os/interfaces/slack/router.py} +3 -5
  180. agno/os/interfaces/slack/slack.py +32 -0
  181. agno/os/interfaces/whatsapp/__init__.py +3 -0
  182. agno/{app/whatsapp/async_router.py → os/interfaces/whatsapp/router.py} +4 -7
  183. agno/os/interfaces/whatsapp/whatsapp.py +29 -0
  184. agno/os/mcp.py +235 -0
  185. agno/os/router.py +1400 -0
  186. agno/os/routers/__init__.py +3 -0
  187. agno/os/routers/evals/__init__.py +3 -0
  188. agno/os/routers/evals/evals.py +393 -0
  189. agno/os/routers/evals/schemas.py +142 -0
  190. agno/os/routers/evals/utils.py +161 -0
  191. agno/os/routers/knowledge/__init__.py +3 -0
  192. agno/os/routers/knowledge/knowledge.py +850 -0
  193. agno/os/routers/knowledge/schemas.py +118 -0
  194. agno/os/routers/memory/__init__.py +3 -0
  195. agno/os/routers/memory/memory.py +410 -0
  196. agno/os/routers/memory/schemas.py +58 -0
  197. agno/os/routers/metrics/__init__.py +3 -0
  198. agno/os/routers/metrics/metrics.py +178 -0
  199. agno/os/routers/metrics/schemas.py +47 -0
  200. agno/os/routers/session/__init__.py +3 -0
  201. agno/os/routers/session/session.py +536 -0
  202. agno/os/schema.py +945 -0
  203. agno/{app/playground → os}/settings.py +7 -15
  204. agno/os/utils.py +270 -0
  205. agno/reasoning/azure_ai_foundry.py +4 -4
  206. agno/reasoning/deepseek.py +4 -4
  207. agno/reasoning/default.py +6 -11
  208. agno/reasoning/groq.py +4 -4
  209. agno/reasoning/helpers.py +4 -6
  210. agno/reasoning/ollama.py +4 -4
  211. agno/reasoning/openai.py +4 -4
  212. agno/run/agent.py +633 -0
  213. agno/run/base.py +53 -77
  214. agno/run/cancel.py +81 -0
  215. agno/run/team.py +243 -96
  216. agno/run/workflow.py +550 -12
  217. agno/session/__init__.py +10 -0
  218. agno/session/agent.py +244 -0
  219. agno/session/summary.py +225 -0
  220. agno/session/team.py +262 -0
  221. agno/{storage/session/v2 → session}/workflow.py +47 -24
  222. agno/team/__init__.py +15 -16
  223. agno/team/team.py +3260 -4824
  224. agno/tools/agentql.py +14 -5
  225. agno/tools/airflow.py +9 -4
  226. agno/tools/api.py +7 -3
  227. agno/tools/apify.py +2 -46
  228. agno/tools/arxiv.py +8 -3
  229. agno/tools/aws_lambda.py +7 -5
  230. agno/tools/aws_ses.py +7 -1
  231. agno/tools/baidusearch.py +4 -1
  232. agno/tools/bitbucket.py +4 -4
  233. agno/tools/brandfetch.py +14 -11
  234. agno/tools/bravesearch.py +4 -1
  235. agno/tools/brightdata.py +43 -23
  236. agno/tools/browserbase.py +13 -4
  237. agno/tools/calcom.py +12 -10
  238. agno/tools/calculator.py +10 -27
  239. agno/tools/cartesia.py +20 -17
  240. agno/tools/{clickup_tool.py → clickup.py} +12 -25
  241. agno/tools/confluence.py +8 -8
  242. agno/tools/crawl4ai.py +7 -1
  243. agno/tools/csv_toolkit.py +9 -8
  244. agno/tools/dalle.py +22 -12
  245. agno/tools/daytona.py +13 -16
  246. agno/tools/decorator.py +6 -3
  247. agno/tools/desi_vocal.py +17 -8
  248. agno/tools/discord.py +11 -8
  249. agno/tools/docker.py +30 -42
  250. agno/tools/duckdb.py +34 -53
  251. agno/tools/duckduckgo.py +8 -7
  252. agno/tools/e2b.py +62 -62
  253. agno/tools/eleven_labs.py +36 -29
  254. agno/tools/email.py +4 -1
  255. agno/tools/evm.py +7 -1
  256. agno/tools/exa.py +19 -14
  257. agno/tools/fal.py +30 -30
  258. agno/tools/file.py +9 -8
  259. agno/tools/financial_datasets.py +25 -44
  260. agno/tools/firecrawl.py +22 -22
  261. agno/tools/function.py +127 -18
  262. agno/tools/giphy.py +23 -11
  263. agno/tools/github.py +48 -126
  264. agno/tools/gmail.py +45 -61
  265. agno/tools/google_bigquery.py +7 -6
  266. agno/tools/google_maps.py +11 -26
  267. agno/tools/googlesearch.py +7 -2
  268. agno/tools/googlesheets.py +21 -17
  269. agno/tools/hackernews.py +9 -5
  270. agno/tools/jina.py +5 -4
  271. agno/tools/jira.py +18 -9
  272. agno/tools/knowledge.py +31 -32
  273. agno/tools/linear.py +19 -34
  274. agno/tools/linkup.py +5 -1
  275. agno/tools/local_file_system.py +8 -5
  276. agno/tools/lumalab.py +32 -20
  277. agno/tools/mcp.py +1 -2
  278. agno/tools/mem0.py +18 -12
  279. agno/tools/memori.py +14 -10
  280. agno/tools/mlx_transcribe.py +3 -2
  281. agno/tools/models/azure_openai.py +33 -15
  282. agno/tools/models/gemini.py +59 -32
  283. agno/tools/models/groq.py +30 -23
  284. agno/tools/models/nebius.py +28 -12
  285. agno/tools/models_labs.py +40 -16
  286. agno/tools/moviepy_video.py +7 -6
  287. agno/tools/neo4j.py +10 -8
  288. agno/tools/newspaper.py +7 -2
  289. agno/tools/newspaper4k.py +8 -3
  290. agno/tools/openai.py +58 -32
  291. agno/tools/openbb.py +12 -11
  292. agno/tools/opencv.py +63 -47
  293. agno/tools/openweather.py +14 -12
  294. agno/tools/pandas.py +11 -3
  295. agno/tools/postgres.py +4 -12
  296. agno/tools/pubmed.py +4 -1
  297. agno/tools/python.py +9 -22
  298. agno/tools/reasoning.py +35 -27
  299. agno/tools/reddit.py +11 -26
  300. agno/tools/replicate.py +55 -42
  301. agno/tools/resend.py +4 -1
  302. agno/tools/scrapegraph.py +15 -14
  303. agno/tools/searxng.py +10 -23
  304. agno/tools/serpapi.py +6 -3
  305. agno/tools/serper.py +13 -4
  306. agno/tools/shell.py +9 -2
  307. agno/tools/slack.py +12 -11
  308. agno/tools/sleep.py +3 -2
  309. agno/tools/spider.py +24 -4
  310. agno/tools/sql.py +7 -6
  311. agno/tools/tavily.py +6 -4
  312. agno/tools/telegram.py +12 -4
  313. agno/tools/todoist.py +11 -31
  314. agno/tools/toolkit.py +1 -1
  315. agno/tools/trafilatura.py +22 -6
  316. agno/tools/trello.py +9 -22
  317. agno/tools/twilio.py +10 -3
  318. agno/tools/user_control_flow.py +6 -1
  319. agno/tools/valyu.py +34 -5
  320. agno/tools/visualization.py +19 -28
  321. agno/tools/webbrowser.py +4 -3
  322. agno/tools/webex.py +11 -7
  323. agno/tools/website.py +15 -46
  324. agno/tools/webtools.py +12 -4
  325. agno/tools/whatsapp.py +5 -9
  326. agno/tools/wikipedia.py +20 -13
  327. agno/tools/x.py +14 -13
  328. agno/tools/yfinance.py +13 -40
  329. agno/tools/youtube.py +26 -20
  330. agno/tools/zendesk.py +7 -2
  331. agno/tools/zep.py +10 -7
  332. agno/tools/zoom.py +10 -9
  333. agno/utils/common.py +1 -19
  334. agno/utils/events.py +100 -123
  335. agno/utils/gemini.py +32 -2
  336. agno/utils/knowledge.py +29 -0
  337. agno/utils/log.py +54 -4
  338. agno/utils/mcp.py +68 -10
  339. agno/utils/media.py +39 -0
  340. agno/utils/message.py +12 -1
  341. agno/utils/models/aws_claude.py +1 -1
  342. agno/utils/models/claude.py +47 -4
  343. agno/utils/models/cohere.py +1 -1
  344. agno/utils/models/mistral.py +8 -7
  345. agno/utils/models/schema_utils.py +3 -3
  346. agno/utils/models/watsonx.py +1 -1
  347. agno/utils/openai.py +1 -1
  348. agno/utils/pprint.py +33 -32
  349. agno/utils/print_response/agent.py +779 -0
  350. agno/utils/print_response/team.py +1669 -0
  351. agno/utils/print_response/workflow.py +1451 -0
  352. agno/utils/prompts.py +14 -14
  353. agno/utils/reasoning.py +87 -0
  354. agno/utils/response.py +42 -42
  355. agno/utils/streamlit.py +481 -0
  356. agno/utils/string.py +8 -22
  357. agno/utils/team.py +50 -0
  358. agno/utils/timer.py +2 -2
  359. agno/vectordb/base.py +33 -21
  360. agno/vectordb/cassandra/cassandra.py +287 -23
  361. agno/vectordb/chroma/chromadb.py +482 -59
  362. agno/vectordb/clickhouse/clickhousedb.py +270 -63
  363. agno/vectordb/couchbase/couchbase.py +309 -29
  364. agno/vectordb/lancedb/lance_db.py +360 -21
  365. agno/vectordb/langchaindb/__init__.py +5 -0
  366. agno/vectordb/langchaindb/langchaindb.py +145 -0
  367. agno/vectordb/lightrag/__init__.py +5 -0
  368. agno/vectordb/lightrag/lightrag.py +374 -0
  369. agno/vectordb/llamaindex/llamaindexdb.py +127 -0
  370. agno/vectordb/milvus/milvus.py +242 -32
  371. agno/vectordb/mongodb/mongodb.py +200 -24
  372. agno/vectordb/pgvector/pgvector.py +319 -37
  373. agno/vectordb/pineconedb/pineconedb.py +221 -27
  374. agno/vectordb/qdrant/qdrant.py +334 -14
  375. agno/vectordb/singlestore/singlestore.py +286 -29
  376. agno/vectordb/surrealdb/surrealdb.py +187 -7
  377. agno/vectordb/upstashdb/upstashdb.py +342 -26
  378. agno/vectordb/weaviate/weaviate.py +227 -165
  379. agno/workflow/__init__.py +17 -13
  380. agno/workflow/{v2/condition.py → condition.py} +135 -32
  381. agno/workflow/{v2/loop.py → loop.py} +115 -28
  382. agno/workflow/{v2/parallel.py → parallel.py} +138 -108
  383. agno/workflow/{v2/router.py → router.py} +133 -32
  384. agno/workflow/{v2/step.py → step.py} +207 -49
  385. agno/workflow/{v2/steps.py → steps.py} +147 -66
  386. agno/workflow/types.py +482 -0
  387. agno/workflow/workflow.py +2410 -696
  388. agno-2.0.0.dist-info/METADATA +494 -0
  389. agno-2.0.0.dist-info/RECORD +515 -0
  390. agno-2.0.0.dist-info/licenses/LICENSE +201 -0
  391. agno/agent/metrics.py +0 -107
  392. agno/api/app.py +0 -35
  393. agno/api/playground.py +0 -92
  394. agno/api/schemas/app.py +0 -12
  395. agno/api/schemas/playground.py +0 -22
  396. agno/api/schemas/user.py +0 -35
  397. agno/api/schemas/workspace.py +0 -46
  398. agno/api/user.py +0 -160
  399. agno/api/workflows.py +0 -33
  400. agno/api/workspace.py +0 -175
  401. agno/app/agui/__init__.py +0 -3
  402. agno/app/agui/app.py +0 -17
  403. agno/app/agui/sync_router.py +0 -120
  404. agno/app/base.py +0 -186
  405. agno/app/discord/__init__.py +0 -3
  406. agno/app/fastapi/__init__.py +0 -3
  407. agno/app/fastapi/app.py +0 -107
  408. agno/app/fastapi/async_router.py +0 -457
  409. agno/app/fastapi/sync_router.py +0 -448
  410. agno/app/playground/app.py +0 -228
  411. agno/app/playground/async_router.py +0 -1050
  412. agno/app/playground/deploy.py +0 -249
  413. agno/app/playground/operator.py +0 -183
  414. agno/app/playground/schemas.py +0 -220
  415. agno/app/playground/serve.py +0 -55
  416. agno/app/playground/sync_router.py +0 -1042
  417. agno/app/playground/utils.py +0 -46
  418. agno/app/settings.py +0 -15
  419. agno/app/slack/__init__.py +0 -3
  420. agno/app/slack/app.py +0 -19
  421. agno/app/slack/sync_router.py +0 -92
  422. agno/app/utils.py +0 -54
  423. agno/app/whatsapp/__init__.py +0 -3
  424. agno/app/whatsapp/app.py +0 -15
  425. agno/app/whatsapp/sync_router.py +0 -197
  426. agno/cli/auth_server.py +0 -249
  427. agno/cli/config.py +0 -274
  428. agno/cli/console.py +0 -88
  429. agno/cli/credentials.py +0 -23
  430. agno/cli/entrypoint.py +0 -571
  431. agno/cli/operator.py +0 -357
  432. agno/cli/settings.py +0 -96
  433. agno/cli/ws/ws_cli.py +0 -817
  434. agno/constants.py +0 -13
  435. agno/document/__init__.py +0 -5
  436. agno/document/chunking/semantic.py +0 -45
  437. agno/document/chunking/strategy.py +0 -31
  438. agno/document/reader/__init__.py +0 -5
  439. agno/document/reader/base.py +0 -47
  440. agno/document/reader/docx_reader.py +0 -60
  441. agno/document/reader/gcs/pdf_reader.py +0 -44
  442. agno/document/reader/s3/pdf_reader.py +0 -59
  443. agno/document/reader/s3/text_reader.py +0 -63
  444. agno/document/reader/url_reader.py +0 -59
  445. agno/document/reader/youtube_reader.py +0 -58
  446. agno/embedder/__init__.py +0 -5
  447. agno/embedder/langdb.py +0 -80
  448. agno/embedder/mistral.py +0 -82
  449. agno/embedder/openai.py +0 -78
  450. agno/file/__init__.py +0 -5
  451. agno/file/file.py +0 -16
  452. agno/file/local/csv.py +0 -32
  453. agno/file/local/txt.py +0 -19
  454. agno/infra/app.py +0 -240
  455. agno/infra/base.py +0 -144
  456. agno/infra/context.py +0 -20
  457. agno/infra/db_app.py +0 -52
  458. agno/infra/resource.py +0 -205
  459. agno/infra/resources.py +0 -55
  460. agno/knowledge/agent.py +0 -702
  461. agno/knowledge/arxiv.py +0 -33
  462. agno/knowledge/combined.py +0 -36
  463. agno/knowledge/csv.py +0 -144
  464. agno/knowledge/csv_url.py +0 -124
  465. agno/knowledge/document.py +0 -223
  466. agno/knowledge/docx.py +0 -137
  467. agno/knowledge/firecrawl.py +0 -34
  468. agno/knowledge/gcs/__init__.py +0 -0
  469. agno/knowledge/gcs/base.py +0 -39
  470. agno/knowledge/gcs/pdf.py +0 -125
  471. agno/knowledge/json.py +0 -137
  472. agno/knowledge/langchain.py +0 -71
  473. agno/knowledge/light_rag.py +0 -273
  474. agno/knowledge/llamaindex.py +0 -66
  475. agno/knowledge/markdown.py +0 -154
  476. agno/knowledge/pdf.py +0 -164
  477. agno/knowledge/pdf_bytes.py +0 -42
  478. agno/knowledge/pdf_url.py +0 -148
  479. agno/knowledge/s3/__init__.py +0 -0
  480. agno/knowledge/s3/base.py +0 -64
  481. agno/knowledge/s3/pdf.py +0 -33
  482. agno/knowledge/s3/text.py +0 -34
  483. agno/knowledge/text.py +0 -141
  484. agno/knowledge/url.py +0 -46
  485. agno/knowledge/website.py +0 -179
  486. agno/knowledge/wikipedia.py +0 -32
  487. agno/knowledge/youtube.py +0 -35
  488. agno/memory/agent.py +0 -423
  489. agno/memory/classifier.py +0 -104
  490. agno/memory/db/__init__.py +0 -5
  491. agno/memory/db/base.py +0 -42
  492. agno/memory/db/mongodb.py +0 -189
  493. agno/memory/db/postgres.py +0 -203
  494. agno/memory/db/sqlite.py +0 -193
  495. agno/memory/memory.py +0 -22
  496. agno/memory/row.py +0 -36
  497. agno/memory/summarizer.py +0 -201
  498. agno/memory/summary.py +0 -19
  499. agno/memory/team.py +0 -415
  500. agno/memory/v2/__init__.py +0 -2
  501. agno/memory/v2/db/__init__.py +0 -1
  502. agno/memory/v2/db/base.py +0 -42
  503. agno/memory/v2/db/firestore.py +0 -339
  504. agno/memory/v2/db/mongodb.py +0 -196
  505. agno/memory/v2/db/postgres.py +0 -214
  506. agno/memory/v2/db/redis.py +0 -187
  507. agno/memory/v2/db/schema.py +0 -54
  508. agno/memory/v2/db/sqlite.py +0 -209
  509. agno/memory/v2/manager.py +0 -437
  510. agno/memory/v2/memory.py +0 -1097
  511. agno/memory/v2/schema.py +0 -55
  512. agno/memory/v2/summarizer.py +0 -215
  513. agno/memory/workflow.py +0 -38
  514. agno/models/ollama/tools.py +0 -430
  515. agno/models/qwen/__init__.py +0 -5
  516. agno/playground/__init__.py +0 -10
  517. agno/playground/deploy.py +0 -3
  518. agno/playground/playground.py +0 -3
  519. agno/playground/serve.py +0 -3
  520. agno/playground/settings.py +0 -3
  521. agno/reranker/__init__.py +0 -0
  522. agno/run/response.py +0 -467
  523. agno/run/v2/__init__.py +0 -0
  524. agno/run/v2/workflow.py +0 -567
  525. agno/storage/__init__.py +0 -0
  526. agno/storage/agent/__init__.py +0 -0
  527. agno/storage/agent/dynamodb.py +0 -1
  528. agno/storage/agent/json.py +0 -1
  529. agno/storage/agent/mongodb.py +0 -1
  530. agno/storage/agent/postgres.py +0 -1
  531. agno/storage/agent/singlestore.py +0 -1
  532. agno/storage/agent/sqlite.py +0 -1
  533. agno/storage/agent/yaml.py +0 -1
  534. agno/storage/base.py +0 -60
  535. agno/storage/dynamodb.py +0 -673
  536. agno/storage/firestore.py +0 -297
  537. agno/storage/gcs_json.py +0 -261
  538. agno/storage/in_memory.py +0 -234
  539. agno/storage/json.py +0 -237
  540. agno/storage/mongodb.py +0 -328
  541. agno/storage/mysql.py +0 -685
  542. agno/storage/postgres.py +0 -682
  543. agno/storage/redis.py +0 -336
  544. agno/storage/session/__init__.py +0 -16
  545. agno/storage/session/agent.py +0 -64
  546. agno/storage/session/team.py +0 -63
  547. agno/storage/session/v2/__init__.py +0 -5
  548. agno/storage/session/workflow.py +0 -61
  549. agno/storage/singlestore.py +0 -606
  550. agno/storage/sqlite.py +0 -646
  551. agno/storage/workflow/__init__.py +0 -0
  552. agno/storage/workflow/mongodb.py +0 -1
  553. agno/storage/workflow/postgres.py +0 -1
  554. agno/storage/workflow/sqlite.py +0 -1
  555. agno/storage/yaml.py +0 -241
  556. agno/tools/thinking.py +0 -73
  557. agno/utils/defaults.py +0 -57
  558. agno/utils/filesystem.py +0 -39
  559. agno/utils/git.py +0 -52
  560. agno/utils/json_io.py +0 -30
  561. agno/utils/load_env.py +0 -19
  562. agno/utils/py_io.py +0 -19
  563. agno/utils/pyproject.py +0 -18
  564. agno/utils/resource_filter.py +0 -31
  565. agno/workflow/v2/__init__.py +0 -21
  566. agno/workflow/v2/types.py +0 -357
  567. agno/workflow/v2/workflow.py +0 -3312
  568. agno/workspace/__init__.py +0 -0
  569. agno/workspace/config.py +0 -325
  570. agno/workspace/enums.py +0 -6
  571. agno/workspace/helpers.py +0 -52
  572. agno/workspace/operator.py +0 -757
  573. agno/workspace/settings.py +0 -158
  574. agno-1.8.1.dist-info/METADATA +0 -982
  575. agno-1.8.1.dist-info/RECORD +0 -566
  576. agno-1.8.1.dist-info/entry_points.txt +0 -3
  577. agno-1.8.1.dist-info/licenses/LICENSE +0 -375
  578. /agno/{app → db/migrations}/__init__.py +0 -0
  579. /agno/{app/playground/__init__.py → db/schemas/metrics.py} +0 -0
  580. /agno/{cli → integrations}/__init__.py +0 -0
  581. /agno/{cli/ws → knowledge/chunking}/__init__.py +0 -0
  582. /agno/{document/chunking → knowledge/remote_content}/__init__.py +0 -0
  583. /agno/{document/reader/gcs → knowledge/reranker}/__init__.py +0 -0
  584. /agno/{document/reader/s3 → os/interfaces}/__init__.py +0 -0
  585. /agno/{app → os/interfaces}/slack/security.py +0 -0
  586. /agno/{app → os/interfaces}/whatsapp/security.py +0 -0
  587. /agno/{file/local → utils/print_response}/__init__.py +0 -0
  588. /agno/{infra → vectordb/llamaindex}/__init__.py +0 -0
  589. {agno-1.8.1.dist-info → agno-2.0.0.dist-info}/WHEEL +0 -0
  590. {agno-1.8.1.dist-info → agno-2.0.0.dist-info}/top_level.txt +0 -0
agno/workflow/workflow.py CHANGED
@@ -1,807 +1,2521 @@
1
- from __future__ import annotations
2
-
3
- import collections.abc
4
- import inspect
5
- from dataclasses import dataclass, field, fields
1
+ import asyncio
2
+ from dataclasses import dataclass
3
+ from datetime import datetime
6
4
  from os import getenv
7
- from types import GeneratorType
8
- from typing import Any, AsyncIterator, Callable, Dict, List, Optional, Union, cast, get_args
5
+ from typing import (
6
+ Any,
7
+ AsyncIterator,
8
+ Awaitable,
9
+ Callable,
10
+ Dict,
11
+ Iterator,
12
+ List,
13
+ Literal,
14
+ Optional,
15
+ Tuple,
16
+ Type,
17
+ Union,
18
+ cast,
19
+ overload,
20
+ )
9
21
  from uuid import uuid4
10
22
 
23
+ from fastapi import WebSocket
11
24
  from pydantic import BaseModel
12
25
 
13
- from agno.agent import Agent
14
- from agno.media import AudioArtifact, ImageArtifact, VideoArtifact
15
- from agno.memory.v2.memory import Memory
16
- from agno.memory.workflow import WorkflowMemory, WorkflowRun
17
- from agno.run.response import RunResponse, RunResponseEvent
18
- from agno.run.team import TeamRunResponseEvent
19
- from agno.run.workflow import WorkflowRunResponseEvent
20
- from agno.storage.base import Storage
21
- from agno.storage.session.workflow import WorkflowSession
22
- from agno.utils.common import nested_model_dump
23
- from agno.utils.log import log_debug, log_warning, logger, set_log_level_to_debug, set_log_level_to_info
24
- from agno.utils.merge_dict import merge_dictionaries
25
-
26
-
27
- @dataclass(init=False)
26
+ from agno.agent.agent import Agent
27
+ from agno.db.base import BaseDb, SessionType
28
+ from agno.exceptions import RunCancelledException
29
+ from agno.media import Audio, File, Image, Video
30
+ from agno.models.message import Message
31
+ from agno.models.metrics import Metrics
32
+ from agno.run.agent import RunEvent
33
+ from agno.run.base import RunStatus
34
+ from agno.run.cancel import (
35
+ cancel_run as cancel_run_global,
36
+ )
37
+ from agno.run.cancel import (
38
+ cleanup_run,
39
+ raise_if_cancelled,
40
+ register_run,
41
+ )
42
+ from agno.run.team import TeamRunEvent
43
+ from agno.run.workflow import (
44
+ StepOutputEvent,
45
+ WorkflowCancelledEvent,
46
+ WorkflowCompletedEvent,
47
+ WorkflowRunEvent,
48
+ WorkflowRunOutput,
49
+ WorkflowRunOutputEvent,
50
+ WorkflowStartedEvent,
51
+ )
52
+ from agno.session.workflow import WorkflowSession
53
+ from agno.team.team import Team
54
+ from agno.utils.log import (
55
+ log_debug,
56
+ log_warning,
57
+ logger,
58
+ set_log_level_to_debug,
59
+ set_log_level_to_info,
60
+ use_workflow_logger,
61
+ )
62
+ from agno.utils.print_response.workflow import (
63
+ aprint_response,
64
+ aprint_response_stream,
65
+ print_response,
66
+ print_response_stream,
67
+ )
68
+ from agno.workflow.condition import Condition
69
+ from agno.workflow.loop import Loop
70
+ from agno.workflow.parallel import Parallel
71
+ from agno.workflow.router import Router
72
+ from agno.workflow.step import Step
73
+ from agno.workflow.steps import Steps
74
+ from agno.workflow.types import (
75
+ StepInput,
76
+ StepMetrics,
77
+ StepOutput,
78
+ StepType,
79
+ WebSocketHandler,
80
+ WorkflowExecutionInput,
81
+ WorkflowMetrics,
82
+ )
83
+
84
+ STEP_TYPE_MAPPING = {
85
+ Step: StepType.STEP,
86
+ Steps: StepType.STEPS,
87
+ Loop: StepType.LOOP,
88
+ Parallel: StepType.PARALLEL,
89
+ Condition: StepType.CONDITION,
90
+ Router: StepType.ROUTER,
91
+ }
92
+
93
+ WorkflowSteps = Union[
94
+ Callable[
95
+ ["Workflow", WorkflowExecutionInput],
96
+ Union[StepOutput, Awaitable[StepOutput], Iterator[StepOutput], AsyncIterator[StepOutput], Any],
97
+ ],
98
+ Steps,
99
+ List[
100
+ Union[
101
+ Callable[
102
+ [StepInput], Union[StepOutput, Awaitable[StepOutput], Iterator[StepOutput], AsyncIterator[StepOutput]]
103
+ ],
104
+ Step,
105
+ Steps,
106
+ Loop,
107
+ Parallel,
108
+ Condition,
109
+ Router,
110
+ ]
111
+ ],
112
+ ]
113
+
114
+
115
+ @dataclass
28
116
  class Workflow:
29
- # --- Workflow settings ---
30
- # Workflow name
117
+ """Pipeline-based workflow execution"""
118
+
119
+ # Workflow identification - make name optional with default
31
120
  name: Optional[str] = None
32
- # Workflow UUID (autogenerated if not set)
33
- workflow_id: Optional[str] = None
34
- # Workflow app_id (autogenerated if not set)
35
- app_id: Optional[str] = None
36
- # Workflow description (only shown in the UI)
121
+ # Workflow ID (autogenerated if not set)
122
+ id: Optional[str] = None
123
+ # Workflow description
37
124
  description: Optional[str] = None
38
125
 
39
- # --- User settings ---
40
- # ID of the user interacting with this workflow
41
- user_id: Optional[str] = None
126
+ # Workflow steps
127
+ steps: Optional[WorkflowSteps] = None
42
128
 
43
- # -*- Session settings
44
- # Session UUID (autogenerated if not set)
129
+ # Database to use for this workflow
130
+ db: Optional[BaseDb] = None
131
+
132
+ # Default session_id to use for this workflow (autogenerated if not set)
45
133
  session_id: Optional[str] = None
46
- # Session name
47
- session_name: Optional[str] = None
48
- # Session state stored in the database
49
- session_state: Dict[str, Any] = field(default_factory=dict)
50
-
51
- # --- Workflow Memory ---
52
- memory: Optional[Union[WorkflowMemory, Memory]] = None
53
-
54
- # --- Workflow Storage ---
55
- storage: Optional[Storage] = None
56
- # Extra data stored with this workflow
57
- extra_data: Optional[Dict[str, Any]] = None
58
-
59
- # --- Debug & Monitoring ---
60
- # Enable debug logs
61
- debug_mode: bool = False
62
- # monitoring=True logs Workflow information to agno.com for monitoring
63
- monitoring: bool = field(default_factory=lambda: getenv("AGNO_MONITOR", "false").lower() == "true")
134
+ # Default user_id to use for this workflow
135
+ user_id: Optional[str] = None
136
+ # Default session state (stored in the database to persist across runs)
137
+ session_state: Optional[Dict[str, Any]] = None
138
+
139
+ # If True, the workflow runs in debug mode
140
+ debug_mode: Optional[bool] = False
141
+
142
+ # --- Workflow Streaming ---
143
+ # Stream the response from the Workflow
144
+ stream: Optional[bool] = None
145
+ # Stream the intermediate steps from the Workflow
146
+ stream_intermediate_steps: bool = False
147
+
148
+ # Persist the events on the run response
149
+ store_events: bool = False
150
+ # Events to skip when persisting the events on the run response
151
+ events_to_skip: Optional[List[Union[WorkflowRunEvent, RunEvent, TeamRunEvent]]] = None
152
+
153
+ # Control whether to store executor responses (agent/team responses) in flattened runs
154
+ store_executor_outputs: bool = True
155
+
156
+ websocket_handler: Optional[WebSocketHandler] = None
157
+
158
+ # Input schema to validate the input to the workflow
159
+ input_schema: Optional[Type[BaseModel]] = None
160
+
161
+ # Metadata stored with this workflow
162
+ metadata: Optional[Dict[str, Any]] = None
163
+
164
+ # --- Telemetry ---
64
165
  # telemetry=True logs minimal telemetry for analytics
65
- # This helps us improve the Workflow and provide better support
66
- telemetry: bool = field(default_factory=lambda: getenv("AGNO_TELEMETRY", "true").lower() == "true")
67
-
68
- # --- Run Info: DO NOT SET ---
69
- run_id: Optional[str] = None
70
- run_input: Optional[Dict[str, Any]] = None
71
- run_response: Optional[RunResponse] = None
72
- # Images generated during this session
73
- images: Optional[List[ImageArtifact]] = None
74
- # Videos generated during this session
75
- videos: Optional[List[VideoArtifact]] = None
76
- # Audio generated during this session
77
- audio: Optional[List[AudioArtifact]] = None
166
+ # This helps us improve the Agent and provide better support
167
+ telemetry: bool = True
78
168
 
79
169
  def __init__(
80
170
  self,
81
- *,
171
+ id: Optional[str] = None,
82
172
  name: Optional[str] = None,
83
- workflow_id: Optional[str] = None,
84
173
  description: Optional[str] = None,
85
- user_id: Optional[str] = None,
174
+ db: Optional[BaseDb] = None,
175
+ steps: Optional[WorkflowSteps] = None,
86
176
  session_id: Optional[str] = None,
87
- session_name: Optional[str] = None,
88
177
  session_state: Optional[Dict[str, Any]] = None,
89
- memory: Optional[Union[WorkflowMemory, Memory]] = None,
90
- storage: Optional[Storage] = None,
91
- extra_data: Optional[Dict[str, Any]] = None,
92
- debug_mode: bool = False,
93
- monitoring: bool = False,
178
+ user_id: Optional[str] = None,
179
+ debug_mode: Optional[bool] = False,
180
+ stream: Optional[bool] = None,
181
+ stream_intermediate_steps: bool = False,
182
+ store_events: bool = False,
183
+ events_to_skip: Optional[List[Union[WorkflowRunEvent, RunEvent, TeamRunEvent]]] = None,
184
+ store_executor_outputs: bool = True,
185
+ input_schema: Optional[Type[BaseModel]] = None,
186
+ metadata: Optional[Dict[str, Any]] = None,
187
+ cache_session: bool = False,
94
188
  telemetry: bool = True,
95
- app_id: Optional[str] = None,
96
189
  ):
97
- self.name = name or self.__class__.__name__
98
- self.workflow_id = workflow_id
99
- self.description = description or self.__class__.description
100
- self.app_id = app_id
101
-
190
+ self.id = id
191
+ self.name = name
192
+ self.description = description
193
+ self.steps = steps
194
+ self.session_id = session_id
195
+ self.session_state = session_state
102
196
  self.user_id = user_id
197
+ self.debug_mode = debug_mode
198
+ self.store_events = store_events
199
+ self.events_to_skip = events_to_skip or []
200
+ self.stream = stream
201
+ self.stream_intermediate_steps = stream_intermediate_steps
202
+ self.store_executor_outputs = store_executor_outputs
203
+ self.input_schema = input_schema
204
+ self.metadata = metadata
205
+ self.cache_session = cache_session
206
+ self.db = db
207
+ self.telemetry = telemetry
103
208
 
104
- self.session_id = session_id
105
- self.session_name = session_name
106
- self.session_state: Dict[str, Any] = session_state or {}
209
+ self._workflow_session: Optional[WorkflowSession] = None
107
210
 
108
- self.memory = memory
109
- self.storage = storage
110
- self.extra_data = extra_data
211
+ def set_id(self) -> None:
212
+ if self.id is None:
213
+ if self.name is not None:
214
+ self.id = self.name.lower().replace(" ", "-")
215
+ else:
216
+ self.id = str(uuid4())
111
217
 
112
- self.debug_mode = debug_mode
113
- self.monitoring = monitoring
114
- self.telemetry = telemetry
218
+ def _validate_input(
219
+ self, input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]]
220
+ ) -> Optional[BaseModel]:
221
+ """Parse and validate input against input_schema if provided"""
222
+ if self.input_schema is None:
223
+ return None
115
224
 
116
- self.run_id = None
117
- self.run_input = None
118
- self.run_response = None
119
- self.images = None
120
- self.videos = None
121
- self.audio = None
225
+ if input is None:
226
+ raise ValueError("Input required when input_schema is set")
122
227
 
123
- self.workflow_session: Optional[WorkflowSession] = None
228
+ # If input is a string, convert it to a dict
229
+ if isinstance(input, str):
230
+ import json
124
231
 
125
- # Private attributes to store the run method and its parameters
126
- # The run function provided by the subclass
127
- self._subclass_run: Optional[Callable] = None
128
- self._subclass_arun: Optional[Callable] = None
232
+ try:
233
+ input = json.loads(input)
234
+ except Exception as e:
235
+ raise ValueError(f"Failed to parse input. Is it a valid JSON string?: {e}")
129
236
 
130
- # Parameters of the run function
131
- self._run_parameters: Optional[Dict[str, Any]] = None
132
- # Return type of the run function
133
- self._run_return_type: Optional[str] = None
237
+ # Case 1: Message is already a BaseModel instance
238
+ if isinstance(input, BaseModel):
239
+ if isinstance(input, self.input_schema):
240
+ try:
241
+ # Re-validate to catch any field validation errors
242
+ input.model_validate(input.model_dump())
243
+ return input
244
+ except Exception as e:
245
+ raise ValueError(f"BaseModel validation failed: {str(e)}")
246
+ else:
247
+ # Different BaseModel types
248
+ raise ValueError(f"Expected {self.input_schema.__name__} but got {type(input).__name__}")
134
249
 
135
- self.update_run_method()
250
+ # Case 2: Message is a dict
251
+ elif isinstance(input, dict):
252
+ try:
253
+ validated_model = self.input_schema(**input)
254
+ return validated_model
255
+ except Exception as e:
256
+ raise ValueError(f"Failed to parse dict into {self.input_schema.__name__}: {str(e)}")
136
257
 
137
- self.__post_init__()
258
+ # Case 3: Other types not supported for structured input
259
+ else:
260
+ raise ValueError(
261
+ f"Cannot validate {type(input)} against input_schema. Expected dict or {self.input_schema.__name__} instance."
262
+ )
138
263
 
139
- def __post_init__(self):
140
- for field_name, value in self.__class__.__dict__.items():
141
- if isinstance(value, Agent):
142
- value.session_id = self.session_id
264
+ @property
265
+ def run_parameters(self) -> Dict[str, Any]:
266
+ """Get the run parameters for the workflow"""
267
+
268
+ if self.steps is None:
269
+ return {}
270
+
271
+ parameters = {}
272
+
273
+ if self.steps and callable(self.steps):
274
+ from inspect import Parameter, signature
275
+
276
+ sig = signature(self.steps) # type: ignore
277
+
278
+ for param_name, param in sig.parameters.items():
279
+ if param_name not in ["workflow", "execution_input", "self"]:
280
+ parameters[param_name] = {
281
+ "name": param_name,
282
+ "default": param.default.default
283
+ if hasattr(param.default, "__class__") and param.default.__class__.__name__ == "FieldInfo"
284
+ else (param.default if param.default is not Parameter.empty else None),
285
+ "annotation": (
286
+ param.annotation.__name__
287
+ if hasattr(param.annotation, "__name__")
288
+ else (
289
+ str(param.annotation).replace("typing.Optional[", "").replace("]", "")
290
+ if "typing.Optional" in str(param.annotation)
291
+ else str(param.annotation)
292
+ )
293
+ )
294
+ if param.annotation is not Parameter.empty
295
+ else None,
296
+ "required": param.default is Parameter.empty,
297
+ }
298
+ else:
299
+ parameters = {
300
+ "message": {
301
+ "name": "message",
302
+ "default": None,
303
+ "annotation": "str",
304
+ "required": True,
305
+ },
306
+ }
143
307
 
144
- def run(self, **kwargs: Any):
145
- logger.error(f"{self.__class__.__name__}.run() method not implemented.")
146
- return
308
+ return parameters
147
309
 
148
- def run_workflow(self, **kwargs: Any):
149
- """Run the Workflow"""
310
+ def initialize_workflow(self):
311
+ if self.id is None:
312
+ self.set_id()
313
+ log_debug(f"Generated new workflow_id: {self.id}")
150
314
 
151
- # Set mode, debug, workflow_id, session_id, initialize memory
152
- self.set_storage_mode()
153
- self.set_debug()
154
- self.set_monitoring()
155
- self.set_workflow_id() # Ensure workflow_id is set
156
- self.set_session_id()
157
- self.initialize_memory()
315
+ def _initialize_session(
316
+ self,
317
+ session_id: Optional[str] = None,
318
+ user_id: Optional[str] = None,
319
+ session_state: Optional[Dict[str, Any]] = None,
320
+ run_id: Optional[str] = None,
321
+ ) -> Tuple[str, Optional[str], Dict[str, Any]]:
322
+ """Initialize the session for the agent."""
158
323
 
159
- # Create a run_id
160
- self.run_id = str(uuid4())
324
+ if session_id is None:
325
+ if self.session_id:
326
+ session_id = self.session_id
327
+ else:
328
+ session_id = str(uuid4())
329
+ # We make the session_id sticky to the agent instance if no session_id is provided
330
+ self.session_id = session_id
331
+
332
+ log_debug(f"Session ID: {session_id}", center=True)
333
+
334
+ # Use the default user_id when necessary
335
+ if user_id is None:
336
+ user_id = self.user_id
337
+
338
+ # Determine the session_state
339
+ if session_state is None:
340
+ session_state = self.session_state or {}
341
+
342
+ if user_id is not None:
343
+ session_state["current_user_id"] = user_id
344
+ if session_id is not None:
345
+ session_state["current_session_id"] = session_id
346
+
347
+ session_state.update(
348
+ {
349
+ "workflow_id": self.id,
350
+ "run_id": run_id,
351
+ "session_id": session_id,
352
+ }
353
+ )
354
+ if self.name:
355
+ session_state["workflow_name"] = self.name
161
356
 
162
- # Set run_input, run_response
163
- self.run_input = kwargs
164
- self.run_response = RunResponse(run_id=self.run_id, session_id=self.session_id, workflow_id=self.workflow_id)
357
+ return session_id, user_id, session_state # type: ignore
165
358
 
166
- # Read existing session from storage
167
- self.read_from_storage()
359
+ def _generate_workflow_session_name(self) -> str:
360
+ """Generate a name for the workflow session"""
168
361
 
169
- # Update the session_id for all Agent instances
170
- self.update_agent_session_ids()
362
+ if self.session_id is None:
363
+ return f"Workflow Session - {datetime.now().strftime('%Y-%m-%d %H:%M')}"
171
364
 
172
- log_debug(f"Workflow Run Start: {self.run_id}", center=True)
173
- try:
174
- self._subclass_run = cast(Callable, self._subclass_run)
175
- result = self._subclass_run(**kwargs)
176
- except Exception as e:
177
- logger.error(f"Workflow.run() failed: {e}")
178
- raise e
179
-
180
- # The run_workflow() method handles both Iterator[RunResponse] and RunResponse
181
- # Case 1: The run method returns an Iterator[RunResponse]
182
- if isinstance(result, (GeneratorType, collections.abc.Iterator)):
183
- # Initialize the run_response content
184
- self.run_response.content = ""
185
-
186
- def result_generator():
187
- self.run_response = cast(RunResponse, self.run_response)
188
- if isinstance(self.memory, WorkflowMemory):
189
- self.memory = cast(WorkflowMemory, self.memory)
190
- elif isinstance(self.memory, Memory):
191
- self.memory = cast(Memory, self.memory)
192
-
193
- for item in result:
194
- if (
195
- isinstance(item, tuple(get_args(RunResponseEvent)))
196
- or isinstance(item, tuple(get_args(TeamRunResponseEvent)))
197
- or isinstance(item, tuple(get_args(WorkflowRunResponseEvent)))
198
- or isinstance(item, RunResponse)
199
- ):
200
- # Update the run_id, session_id and workflow_id of the RunResponseEvent
201
- item.run_id = self.run_id
202
- item.session_id = self.session_id
203
- item.workflow_id = self.workflow_id
204
-
205
- # Update the run_response with the content from the result
206
- if hasattr(item, "content") and item.content is not None and isinstance(item.content, str):
207
- self.run_response.content += item.content
208
- else:
209
- logger.warning(f"Workflow.run() should only yield RunResponseEvent objects, got: {type(item)}")
210
- yield item
211
-
212
- # Add the run to the memory
213
- if isinstance(self.memory, WorkflowMemory):
214
- self.memory.add_run(WorkflowRun(input=self.run_input, response=self.run_response))
215
- elif isinstance(self.memory, Memory):
216
- self.memory.add_run(session_id=self.session_id, run=self.run_response) # type: ignore
217
- # Write this run to the database
218
- self.write_to_storage()
219
- log_debug(f"Workflow Run End: {self.run_id}", center=True)
220
-
221
- return result_generator()
222
- # Case 2: The run method returns a RunResponse
223
- elif isinstance(result, RunResponse):
224
- # Update the result with the run_id, session_id and workflow_id of the workflow run
225
- result.run_id = self.run_id
226
- result.session_id = self.session_id
227
- result.workflow_id = self.workflow_id
228
-
229
- # Update the run_response with the content from the result
230
- if result.content is not None and isinstance(result.content, str):
231
- self.run_response.content = result.content
232
-
233
- # Add the run to the memory
234
- if isinstance(self.memory, WorkflowMemory):
235
- self.memory.add_run(WorkflowRun(input=self.run_input, response=self.run_response))
236
- elif isinstance(self.memory, Memory):
237
- self.memory.add_run(session_id=self.session_id, run=self.run_response) # type: ignore
238
- # Write this run to the database
239
- self.write_to_storage()
240
- log_debug(f"Workflow Run End: {self.run_id}", center=True)
241
- return result
365
+ datetime_str = datetime.now().strftime("%Y-%m-%d %H:%M")
366
+ new_session_name = f"Workflow Session-{datetime_str}"
367
+
368
+ if self.description:
369
+ truncated_desc = self.description[:40] + "-" if len(self.description) > 40 else self.description
370
+ new_session_name = f"{truncated_desc} - {datetime_str}"
371
+ return new_session_name
372
+
373
+ def set_session_name(
374
+ self, session_id: Optional[str] = None, autogenerate: bool = False, session_name: Optional[str] = None
375
+ ) -> WorkflowSession:
376
+ """Set the session name and save to storage"""
377
+ session_id = session_id or self.session_id
378
+
379
+ if session_id is None:
380
+ raise Exception("Session ID is not set")
381
+
382
+ # -*- Read from storage
383
+ session = self.get_session(session_id=session_id) # type: ignore
384
+
385
+ if autogenerate:
386
+ # -*- Generate name for session
387
+ session_name = self._generate_workflow_session_name()
388
+ log_debug(f"Generated Workflow Session Name: {session_name}")
389
+ elif session_name is None:
390
+ raise Exception("Session name is not set")
391
+
392
+ # -*- Rename session
393
+ session.session_data["session_name"] = session_name # type: ignore
394
+
395
+ # -*- Save to storage
396
+ self.save_session(session=session) # type: ignore
397
+
398
+ return session # type: ignore
399
+
400
+ def get_session_name(self, session_id: Optional[str] = None) -> str:
401
+ """Get the session name for the given session ID and user ID."""
402
+ session_id = session_id or self.session_id
403
+ if session_id is None:
404
+ raise Exception("Session ID is not set")
405
+ session = self.get_session(session_id=session_id) # type: ignore
406
+ if session is None:
407
+ raise Exception("Session not found")
408
+ return session.session_data.get("session_name", "") if session.session_data else ""
409
+
410
+ def get_session_state(self, session_id: Optional[str] = None) -> Dict[str, Any]:
411
+ """Get the session state for the given session ID and user ID."""
412
+ session_id = session_id or self.session_id
413
+ if session_id is None:
414
+ raise Exception("Session ID is not set")
415
+ session = self.get_session(session_id=session_id) # type: ignore
416
+ if session is None:
417
+ raise Exception("Session not found")
418
+ return session.session_data.get("session_state", {}) if session.session_data else {}
419
+
420
+ def delete_session(self, session_id: str):
421
+ """Delete the current session and save to storage"""
422
+ if self.db is None:
423
+ return
424
+ # -*- Delete session
425
+ self.db.delete_session(session_id=session_id)
426
+
427
+ def get_run_output(self, run_id: str, session_id: Optional[str] = None) -> Optional[WorkflowRunOutput]:
428
+ """Get a RunOutput from the database."""
429
+ if self._workflow_session is not None:
430
+ run_response = self._workflow_session.get_run(run_id=run_id)
431
+ if run_response is not None:
432
+ return run_response
433
+ else:
434
+ log_warning(f"RunOutput {run_id} not found in AgentSession {self._workflow_session.session_id}")
435
+ return None
242
436
  else:
243
- logger.warning(f"Workflow.run() should only return RunResponse objects, got: {type(result)}")
244
- return None
437
+ workflow_session = self.get_session(session_id=session_id)
438
+ if workflow_session is not None:
439
+ run_response = workflow_session.get_run(run_id=run_id)
440
+ if run_response is not None:
441
+ return run_response
442
+ else:
443
+ log_warning(f"RunOutput {run_id} not found in AgentSession {session_id}")
444
+ return None
445
+
446
+ def get_last_run_output(self, session_id: Optional[str] = None) -> Optional[WorkflowRunOutput]:
447
+ """Get the last run response from the database."""
448
+ if (
449
+ self._workflow_session is not None
450
+ and self._workflow_session.runs is not None
451
+ and len(self._workflow_session.runs) > 0
452
+ ):
453
+ run_response = self._workflow_session.runs[-1]
454
+ if run_response is not None:
455
+ return run_response
456
+ else:
457
+ workflow_session = self.get_session(session_id=session_id)
458
+ if workflow_session is not None and workflow_session.runs is not None and len(workflow_session.runs) > 0:
459
+ run_response = workflow_session.runs[-1]
460
+ if run_response is not None:
461
+ return run_response
462
+ else:
463
+ log_warning(f"No run responses found in WorkflowSession {session_id}")
464
+ return None
245
465
 
246
- # Add to workflow.py after the run_workflow method
247
- async def arun_workflow(self, **kwargs: Any):
248
- """Run the Workflow asynchronously"""
466
+ def read_or_create_session(
467
+ self,
468
+ session_id: str,
469
+ user_id: Optional[str] = None,
470
+ ) -> WorkflowSession:
471
+ from time import time
472
+
473
+ # Returning cached session if we have one
474
+ if self._workflow_session is not None and self._workflow_session.session_id == session_id:
475
+ return self._workflow_session
476
+
477
+ # Try to load from database
478
+ workflow_session = None
479
+ if self.db is not None:
480
+ log_debug(f"Reading WorkflowSession: {session_id}")
481
+
482
+ workflow_session = cast(WorkflowSession, self._read_session(session_id=session_id))
483
+
484
+ if workflow_session is None:
485
+ # Creating new session if none found
486
+ log_debug(f"Creating new WorkflowSession: {session_id}")
487
+ workflow_session = WorkflowSession(
488
+ session_id=session_id,
489
+ workflow_id=self.id,
490
+ user_id=user_id,
491
+ workflow_data=self._get_workflow_data(),
492
+ session_data={},
493
+ metadata=self.metadata,
494
+ created_at=int(time()),
495
+ )
496
+
497
+ # Cache the session if relevant
498
+ if workflow_session is not None and self.cache_session:
499
+ self._workflow_session = workflow_session
500
+
501
+ return workflow_session
502
+
503
+ def get_session(
504
+ self,
505
+ session_id: Optional[str] = None,
506
+ ) -> Optional[WorkflowSession]:
507
+ """Load an WorkflowSession from database.
249
508
 
250
- # Set mode, debug, workflow_id, session_id, initialize memory
251
- self.set_storage_mode()
252
- self.set_debug()
253
- self.set_monitoring()
254
- self.set_workflow_id() # Ensure workflow_id is set
255
- self.set_session_id()
256
- self.initialize_memory()
509
+ Args:
510
+ session_id: The session_id to load from storage.
257
511
 
258
- # Create a run_id
259
- self.run_id = str(uuid4())
512
+ Returns:
513
+ WorkflowSession: The WorkflowSession loaded from the database or created if it does not exist.
514
+ """
515
+ if not session_id and not self.session_id:
516
+ raise Exception("No session_id provided")
260
517
 
261
- # Set run_input, run_response
262
- self.run_input = kwargs
263
- self.run_response = RunResponse(run_id=self.run_id, session_id=self.session_id, workflow_id=self.workflow_id)
518
+ session_id_to_load = session_id or self.session_id
264
519
 
265
- # Read existing session from storage
266
- self.read_from_storage()
520
+ # Try to load from database
521
+ if self.db is not None and session_id_to_load is not None:
522
+ workflow_session = cast(WorkflowSession, self._read_session(session_id=session_id_to_load))
523
+ return workflow_session
267
524
 
268
- # Update the session_id for all Agent instances
269
- self.update_agent_session_ids()
525
+ log_warning(f"WorkflowSession {session_id_to_load} not found in db")
526
+ return None
270
527
 
271
- log_debug(f"Workflow Run Start: {self.run_id}", center=True)
528
+ def save_session(self, session: WorkflowSession) -> None:
529
+ """Save the WorkflowSession to storage
530
+
531
+ Returns:
532
+ Optional[WorkflowSession]: The saved WorkflowSession or None if not saved.
533
+ """
534
+ if self.db is not None and session.session_data is not None:
535
+ if session.session_data.get("session_state") is not None:
536
+ session.session_data["session_state"].pop("current_session_id", None)
537
+ session.session_data["session_state"].pop("current_user_id", None)
538
+ session.session_data["session_state"].pop("current_run_id", None)
539
+ session.session_data["session_state"].pop("workflow_id", None)
540
+ session.session_data["session_state"].pop("run_id", None)
541
+ session.session_data["session_state"].pop("session_id", None)
542
+ session.session_data["session_state"].pop("workflow_name", None)
543
+
544
+ self._upsert_session(session=session)
545
+ log_debug(f"Created or updated WorkflowSession record: {session.session_id}")
546
+
547
+ # -*- Session Database Functions
548
+ def _read_session(self, session_id: str) -> Optional[WorkflowSession]:
549
+ """Get a Session from the database."""
272
550
  try:
273
- self._subclass_arun = cast(Callable, self._subclass_arun)
274
- result = await self._subclass_arun(**kwargs)
551
+ if not self.db:
552
+ raise ValueError("Db not initialized")
553
+ session = self.db.get_session(session_id=session_id, session_type=SessionType.WORKFLOW)
554
+ return session if isinstance(session, (WorkflowSession, type(None))) else None
275
555
  except Exception as e:
276
- logger.error(f"Workflow.arun() failed: {e}")
277
- raise e
278
-
279
- # Handle single RunResponse result
280
- if isinstance(result, RunResponse):
281
- # Update the result with the run_id, session_id and workflow_id of the workflow run
282
- result.run_id = self.run_id
283
- result.session_id = self.session_id
284
- result.workflow_id = self.workflow_id
285
-
286
- # Update the run_response with the content from the result
287
- if result.content is not None and isinstance(result.content, str):
288
- self.run_response.content = result.content
289
-
290
- # Add the run to the memory
291
- if isinstance(self.memory, WorkflowMemory):
292
- self.memory.add_run(WorkflowRun(input=self.run_input, response=self.run_response))
293
- elif isinstance(self.memory, Memory):
294
- self.memory.add_run(session_id=self.session_id, run=self.run_response) # type: ignore
295
- # Write this run to the database
296
- self.write_to_storage()
297
- log_debug(f"Workflow Run End: {self.run_id}", center=True)
298
- return result
299
- else:
300
- logger.warning(f"Workflow.arun() should only return RunResponse objects, got: {type(result)}")
556
+ log_warning(f"Error getting session from db: {e}")
301
557
  return None
302
558
 
303
- async def arun_workflow_generator(self, **kwargs: Any) -> AsyncIterator[RunResponse]:
304
- """Run the Workflow asynchronously for async generators"""
559
+ def _upsert_session(self, session: WorkflowSession) -> Optional[WorkflowSession]:
560
+ """Upsert a Session into the database."""
305
561
 
306
- # Set mode, debug, workflow_id, session_id, initialize memory
307
- self.set_storage_mode()
308
- self.set_debug()
309
- self.set_monitoring()
310
- self.set_workflow_id() # Ensure workflow_id is set
311
- self.set_session_id()
312
- self.initialize_memory()
562
+ try:
563
+ if not self.db:
564
+ raise ValueError("Db not initialized")
565
+ result = self.db.upsert_session(session=session)
566
+ return result if isinstance(result, (WorkflowSession, type(None))) else None
567
+ except Exception as e:
568
+ log_warning(f"Error upserting session into db: {e}")
569
+ return None
313
570
 
314
- # Create a run_id
315
- self.run_id = str(uuid4())
571
+ def _update_metadata(self, session: WorkflowSession):
572
+ """Update the extra_data in the session"""
573
+ from agno.utils.merge_dict import merge_dictionaries
316
574
 
317
- # Set run_input, run_response
318
- self.run_input = kwargs
319
- self.run_response = RunResponse(run_id=self.run_id, session_id=self.session_id, workflow_id=self.workflow_id)
575
+ # Read metadata from the database
576
+ if session.metadata is not None:
577
+ # If metadata is set in the workflow, update the database metadata with the workflow's metadata
578
+ if self.metadata is not None:
579
+ # Updates workflow's session metadata in place
580
+ merge_dictionaries(session.metadata, self.metadata)
581
+ # Update the current metadata with the metadata from the database which is updated in place
582
+ self.metadata = session.metadata
320
583
 
321
- # Read existing session from storage
322
- self.read_from_storage()
584
+ def _update_session_state(self, session: WorkflowSession, session_state: Dict[str, Any]):
585
+ """Load the existing Workflow from a WorkflowSession (from the database)"""
586
+
587
+ from agno.utils.merge_dict import merge_dictionaries
588
+
589
+ # Get the session_state from the database and update the current session_state
590
+ if session.session_data and "session_state" in session.session_data:
591
+ session_state_from_db = session.session_data.get("session_state")
592
+
593
+ if (
594
+ session_state_from_db is not None
595
+ and isinstance(session_state_from_db, dict)
596
+ and len(session_state_from_db) > 0
597
+ ):
598
+ # This updates session_state_from_db
599
+ # If there are conflicting keys, values from provided session_state will take precedence
600
+ merge_dictionaries(session_state_from_db, session_state)
601
+ session_state = session_state_from_db
602
+
603
+ # Update the session_state in the session
604
+ if session.session_data is None:
605
+ session.session_data = {}
606
+ session.session_data["session_state"] = session_state
607
+
608
+ return session_state
609
+
610
+ def _get_workflow_data(self) -> Dict[str, Any]:
611
+ workflow_data = {}
612
+
613
+ if self.steps and not callable(self.steps):
614
+ steps_dict = []
615
+ for step in self.steps: # type: ignore
616
+ if callable(step):
617
+ step_type = StepType.STEP
618
+ elif isinstance(step, Agent) or isinstance(step, Team):
619
+ step_type = StepType.STEP
620
+ else:
621
+ step_type = STEP_TYPE_MAPPING[type(step)]
622
+ step_dict = {
623
+ "name": step.name if hasattr(step, "name") else step.__name__,
624
+ "description": step.description if hasattr(step, "description") else "User-defined callable step",
625
+ "type": step_type.value,
626
+ }
627
+ steps_dict.append(step_dict)
323
628
 
324
- # Update the session_id for all Agent instances
325
- self.update_agent_session_ids()
629
+ workflow_data["steps"] = steps_dict
326
630
 
327
- log_debug(f"Workflow Run Start: {self.run_id}", center=True)
328
- # Initialize the run_response content
329
- self.run_response.content = ""
330
- try:
331
- self._subclass_arun = cast(Callable, self._subclass_arun)
332
- async for item in self._subclass_arun(**kwargs):
333
- if (
334
- isinstance(item, tuple(get_args(RunResponseEvent)))
335
- or isinstance(item, tuple(get_args(TeamRunResponseEvent)))
336
- or isinstance(item, tuple(get_args(WorkflowRunResponseEvent)))
337
- or isinstance(item, RunResponse)
338
- ):
339
- # Update the run_id, session_id and workflow_id of the RunResponseEvent
340
- item.run_id = self.run_id
341
- item.session_id = self.session_id
342
- item.workflow_id = self.workflow_id
631
+ elif callable(self.steps):
632
+ workflow_data["steps"] = [
633
+ {
634
+ "name": "Custom Function",
635
+ "description": "User-defined callable workflow",
636
+ "type": "Callable",
637
+ }
638
+ ]
343
639
 
344
- # Update the run_response with the content from the result
345
- if hasattr(item, "content") and item.content is not None and isinstance(item.content, str):
346
- self.run_response.content += item.content
347
- else:
348
- logger.warning(f"Workflow.run() should only yield RunResponseEvent objects, got: {type(item)}")
349
- yield item
350
-
351
- # Add the run to the memory
352
- if isinstance(self.memory, WorkflowMemory):
353
- self.memory.add_run(WorkflowRun(input=self.run_input, response=self.run_response))
354
- elif isinstance(self.memory, Memory):
355
- self.memory.add_run(session_id=self.session_id, run=self.run_response) # type: ignore
356
- # Write this run to the database
357
- self.write_to_storage()
358
- log_debug(f"Workflow Run End: {self.run_id}", center=True)
359
- except Exception as e:
360
- logger.error(f"Workflow.arun() failed: {e}")
361
- raise e
640
+ return workflow_data
362
641
 
363
- async def arun(self, **kwargs: Any):
364
- """Async version of run() that calls arun_workflow()"""
365
- logger.error(f"{self.__class__.__name__}.arun() method not implemented.")
366
- return
642
+ def _handle_event(
643
+ self,
644
+ event: "WorkflowRunOutputEvent",
645
+ workflow_run_response: WorkflowRunOutput,
646
+ websocket_handler: Optional[WebSocketHandler] = None,
647
+ ) -> "WorkflowRunOutputEvent":
648
+ """Handle workflow events for storage - similar to Team._handle_event"""
649
+ if self.store_events:
650
+ # Check if this event type should be skipped
651
+ if self.events_to_skip:
652
+ event_type = event.event
653
+ for skip_event in self.events_to_skip:
654
+ if isinstance(skip_event, str):
655
+ if event_type == skip_event:
656
+ return event
657
+ else:
658
+ # It's a WorkflowRunEvent enum
659
+ if event_type == skip_event.value:
660
+ return event
367
661
 
368
- def set_storage_mode(self):
369
- if self.storage is not None:
370
- self.storage.mode = "workflow"
662
+ # Store the event
663
+ if workflow_run_response.events is None:
664
+ workflow_run_response.events = []
371
665
 
372
- def initialize_workflow(self):
373
- self.set_storage_mode()
666
+ workflow_run_response.events.append(event)
374
667
 
375
- def set_workflow_id(self) -> str:
376
- if self.workflow_id is None:
377
- self.workflow_id = str(uuid4())
378
- log_debug(f"Workflow ID: {self.workflow_id}", center=True)
379
- return self.workflow_id
668
+ # Broadcast to WebSocket if available (async context only)
669
+ if websocket_handler:
670
+ import asyncio
380
671
 
381
- def set_session_id(self) -> str:
382
- if self.session_id is None:
383
- self.session_id = str(uuid4())
384
- log_debug(f"Session ID: {self.session_id}", center=True)
385
- return self.session_id
672
+ try:
673
+ loop = asyncio.get_running_loop()
674
+ if loop:
675
+ asyncio.create_task(websocket_handler.handle_event(event))
676
+ except RuntimeError:
677
+ pass
678
+
679
+ return event
680
+
681
+ def _transform_step_output_to_event(
682
+ self, step_output: StepOutput, workflow_run_response: WorkflowRunOutput, step_index: Optional[int] = None
683
+ ) -> StepOutputEvent:
684
+ """Transform a StepOutput object into a StepOutputEvent for consistent streaming interface"""
685
+ return StepOutputEvent(
686
+ step_output=step_output,
687
+ run_id=workflow_run_response.run_id or "",
688
+ workflow_name=workflow_run_response.workflow_name,
689
+ workflow_id=workflow_run_response.workflow_id,
690
+ session_id=workflow_run_response.session_id,
691
+ step_name=step_output.step_name,
692
+ step_index=step_index,
693
+ )
386
694
 
387
- def set_debug(self) -> None:
695
+ def _set_debug(self) -> None:
696
+ """Set debug mode and configure logging"""
388
697
  if self.debug_mode or getenv("AGNO_DEBUG", "false").lower() == "true":
698
+ use_workflow_logger()
699
+
389
700
  self.debug_mode = True
390
- set_log_level_to_debug()
391
- log_debug("Debug logs enabled")
392
- else:
393
- set_log_level_to_info()
701
+ set_log_level_to_debug(source_type="workflow")
702
+
703
+ # Propagate to steps - only if steps is iterable (not callable)
704
+ if self.steps and not callable(self.steps):
705
+ if isinstance(self.steps, Steps):
706
+ steps_to_iterate = self.steps.steps
707
+ else:
708
+ steps_to_iterate = self.steps
394
709
 
395
- def set_monitoring(self) -> None:
396
- """Override monitoring and telemetry settings based on environment variables."""
710
+ for step in steps_to_iterate:
711
+ self._propagate_debug_to_step(step)
712
+ else:
713
+ set_log_level_to_info(source_type="workflow")
397
714
 
398
- # Only override if the environment variable is set
399
- monitor_env = getenv("AGNO_MONITOR")
400
- if monitor_env is not None:
401
- self.monitoring = monitor_env.lower() == "true"
715
+ def _set_telemetry(self) -> None:
716
+ """Override telemetry settings based on environment variables."""
402
717
 
403
- # Override telemetry if environment variable is set
404
718
  telemetry_env = getenv("AGNO_TELEMETRY")
405
719
  if telemetry_env is not None:
406
720
  self.telemetry = telemetry_env.lower() == "true"
407
721
 
408
- def initialize_memory(self) -> None:
409
- if self.memory is None:
410
- self.memory = Memory()
411
-
412
- def update_run_method(self):
413
- run_type = None
414
- # Update the run() method to call run_workflow() instead of the subclass's run()
415
- # First, check if the subclass has a run method
416
- # If the run() method has been overridden by the subclass,
417
- # then self.__class__.run is not Workflow.run will be True
418
- if self.__class__.run is not Workflow.run:
419
- # Store the original run methods bound to the instance
420
- self._subclass_run = self.__class__.run.__get__(self)
421
- run_type = "sync"
422
- # Get the parameters of the sync run method
423
- sig = inspect.signature(self.__class__.run)
424
-
425
- if self.__class__.arun is not Workflow.arun:
426
- self._subclass_arun = self.__class__.arun.__get__(self)
427
- run_type = "coroutine"
428
-
429
- # Get the parameters of the async run method
430
- sig = inspect.signature(self.__class__.arun)
431
-
432
- # Check if the async method is a coroutine or async generator
433
- from inspect import isasyncgenfunction
434
-
435
- if isasyncgenfunction(self.__class__.arun):
436
- run_type = "async_generator"
437
-
438
- if run_type is not None:
439
- # Convert parameters to a serializable format
440
- self._run_parameters = {
441
- param_name: {
442
- "name": param_name,
443
- "default": param.default.default
444
- if hasattr(param.default, "__class__") and param.default.__class__.__name__ == "FieldInfo"
445
- else (param.default if param.default is not inspect.Parameter.empty else None),
446
- "annotation": (
447
- param.annotation.__name__
448
- if hasattr(param.annotation, "__name__")
449
- else (
450
- str(param.annotation).replace("typing.Optional[", "").replace("]", "")
451
- if "typing.Optional" in str(param.annotation)
452
- else str(param.annotation)
453
- )
722
+ def _propagate_debug_to_step(self, step):
723
+ """Recursively propagate debug mode to steps and nested primitives"""
724
+ # Handle direct Step objects
725
+ if hasattr(step, "active_executor") and step.active_executor:
726
+ executor = step.active_executor
727
+ if hasattr(executor, "debug_mode"):
728
+ executor.debug_mode = True
729
+
730
+ # If it's a team, propagate to all members
731
+ if hasattr(executor, "members"):
732
+ for member in executor.members:
733
+ if hasattr(member, "debug_mode"):
734
+ member.debug_mode = True
735
+
736
+ # Handle nested primitives - check both 'steps' and 'choices' attributes
737
+ for attr_name in ["steps", "choices"]:
738
+ if hasattr(step, attr_name):
739
+ attr_value = getattr(step, attr_name)
740
+ if attr_value and isinstance(attr_value, list):
741
+ for nested_step in attr_value:
742
+ self._propagate_debug_to_step(nested_step)
743
+
744
+ def _create_step_input(
745
+ self,
746
+ execution_input: WorkflowExecutionInput,
747
+ previous_step_outputs: Optional[Dict[str, StepOutput]] = None,
748
+ shared_images: Optional[List[Image]] = None,
749
+ shared_videos: Optional[List[Video]] = None,
750
+ shared_audio: Optional[List[Audio]] = None,
751
+ shared_files: Optional[List[File]] = None,
752
+ ) -> StepInput:
753
+ """Helper method to create StepInput with enhanced data flow support"""
754
+
755
+ previous_step_content = None
756
+ if previous_step_outputs:
757
+ last_output = list(previous_step_outputs.values())[-1]
758
+ previous_step_content = last_output.content if last_output else None
759
+ log_debug(f"Using previous step content from: {list(previous_step_outputs.keys())[-1]}")
760
+
761
+ return StepInput(
762
+ input=execution_input.input,
763
+ previous_step_content=previous_step_content,
764
+ previous_step_outputs=previous_step_outputs,
765
+ additional_data=execution_input.additional_data,
766
+ images=shared_images or [],
767
+ videos=shared_videos or [],
768
+ audio=shared_audio or [],
769
+ files=shared_files or [],
770
+ )
771
+
772
+ def _get_step_count(self) -> int:
773
+ """Get the number of steps in the workflow"""
774
+ if self.steps is None:
775
+ return 0
776
+ elif callable(self.steps):
777
+ return 1 # Callable function counts as 1 step
778
+ else:
779
+ # Handle Steps wrapper
780
+ if isinstance(self.steps, Steps):
781
+ return len(self.steps.steps)
782
+ else:
783
+ return len(self.steps)
784
+
785
+ def _aggregate_workflow_metrics(self, step_results: List[Union[StepOutput, List[StepOutput]]]) -> WorkflowMetrics:
786
+ """Aggregate metrics from all step responses into structured workflow metrics"""
787
+ steps_dict = {}
788
+
789
+ def process_step_output(step_output: StepOutput):
790
+ """Process a single step output for metrics"""
791
+
792
+ # If this step has nested steps, process them recursively
793
+ if hasattr(step_output, "steps") and step_output.steps:
794
+ for nested_step in step_output.steps:
795
+ process_step_output(nested_step)
796
+
797
+ # Only collect metrics from steps that actually have metrics (actual agents/teams)
798
+ if (
799
+ step_output.step_name and step_output.metrics and step_output.executor_type in ["agent", "team"]
800
+ ): # Only include actual executors
801
+ step_metrics = StepMetrics(
802
+ step_name=step_output.step_name,
803
+ executor_type=step_output.executor_type or "unknown",
804
+ executor_name=step_output.executor_name or "unknown",
805
+ metrics=step_output.metrics,
806
+ )
807
+ steps_dict[step_output.step_name] = step_metrics
808
+
809
+ # Process all step results
810
+ for step_result in step_results:
811
+ process_step_output(cast(StepOutput, step_result))
812
+
813
+ return WorkflowMetrics(
814
+ steps=steps_dict,
815
+ )
816
+
817
+ def _call_custom_function(self, func: Callable, execution_input: WorkflowExecutionInput, **kwargs: Any) -> Any:
818
+ """Call custom function with only the parameters it expects"""
819
+ from inspect import signature
820
+
821
+ sig = signature(func)
822
+
823
+ # Build arguments based on what the function actually accepts
824
+ call_kwargs: Dict[str, Any] = {}
825
+
826
+ # Only add workflow and execution_input if the function expects them
827
+ if "workflow" in sig.parameters: # type: ignore
828
+ call_kwargs["workflow"] = self
829
+ if "execution_input" in sig.parameters:
830
+ call_kwargs["execution_input"] = execution_input # type: ignore
831
+ if "session_state" in sig.parameters:
832
+ call_kwargs["session_state"] = self.session_state # type: ignore
833
+
834
+ # Add any other kwargs that the function expects
835
+ for param_name in kwargs:
836
+ if param_name in sig.parameters: # type: ignore
837
+ call_kwargs[param_name] = kwargs[param_name]
838
+
839
+ # If function has **kwargs parameter, pass all remaining kwargs
840
+ for param in sig.parameters.values(): # type: ignore
841
+ if param.kind == param.VAR_KEYWORD:
842
+ call_kwargs.update(kwargs)
843
+ break
844
+
845
+ try:
846
+ return func(**call_kwargs)
847
+ except TypeError as e:
848
+ # If signature inspection fails, fall back to original method
849
+ logger.warning(
850
+ f"Async function signature inspection failed: {e}. Falling back to original calling convention."
851
+ )
852
+ return func(**call_kwargs)
853
+
854
+ def _execute(
855
+ self,
856
+ session: WorkflowSession,
857
+ execution_input: WorkflowExecutionInput,
858
+ workflow_run_response: WorkflowRunOutput,
859
+ session_state: Optional[Dict[str, Any]] = None,
860
+ **kwargs: Any,
861
+ ) -> WorkflowRunOutput:
862
+ """Execute a specific pipeline by name synchronously"""
863
+ from inspect import isasyncgenfunction, iscoroutinefunction, isgeneratorfunction
864
+
865
+ workflow_run_response.status = RunStatus.running
866
+ register_run(workflow_run_response.run_id) # type: ignore
867
+
868
+ if callable(self.steps):
869
+ if iscoroutinefunction(self.steps) or isasyncgenfunction(self.steps):
870
+ raise ValueError("Cannot use async function with synchronous execution")
871
+ elif isgeneratorfunction(self.steps):
872
+ content = ""
873
+ for chunk in self.steps(self, execution_input, **kwargs):
874
+ # Check for cancellation while consuming generator
875
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
876
+ if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
877
+ content += chunk.content
878
+ else:
879
+ content += str(chunk)
880
+ workflow_run_response.content = content
881
+ else:
882
+ # Execute the workflow with the custom executor
883
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
884
+ workflow_run_response.content = self._call_custom_function(self.steps, execution_input, **kwargs) # type: ignore[arg-type]
885
+
886
+ workflow_run_response.status = RunStatus.completed
887
+ else:
888
+ try:
889
+ # Track outputs from each step for enhanced data flow
890
+ collected_step_outputs: List[Union[StepOutput, List[StepOutput]]] = []
891
+ previous_step_outputs: Dict[str, StepOutput] = {}
892
+
893
+ shared_images: List[Image] = execution_input.images or []
894
+ output_images: List[Image] = (execution_input.images or []).copy() # Start with input images
895
+ shared_videos: List[Video] = execution_input.videos or []
896
+ output_videos: List[Video] = (execution_input.videos or []).copy() # Start with input videos
897
+ shared_audio: List[Audio] = execution_input.audio or []
898
+ output_audio: List[Audio] = (execution_input.audio or []).copy() # Start with input audio
899
+ shared_files: List[File] = execution_input.files or []
900
+ output_files: List[File] = (execution_input.files or []).copy() # Start with input files
901
+
902
+ for i, step in enumerate(self.steps): # type: ignore[arg-type]
903
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
904
+ step_name = getattr(step, "name", f"step_{i + 1}")
905
+ log_debug(f"Executing step {i + 1}/{self._get_step_count()}: {step_name}")
906
+
907
+ # Create enhanced StepInput
908
+ step_input = self._create_step_input(
909
+ execution_input=execution_input,
910
+ previous_step_outputs=previous_step_outputs,
911
+ shared_images=shared_images,
912
+ shared_videos=shared_videos,
913
+ shared_audio=shared_audio,
914
+ shared_files=shared_files,
454
915
  )
455
- if param.annotation is not inspect.Parameter.empty
456
- else None,
457
- "required": param.default is inspect.Parameter.empty,
458
- }
459
- for param_name, param in sig.parameters.items()
460
- if param_name != "self"
461
- }
462
- # Determine the return type of the run method
463
- return_annotation = sig.return_annotation
464
- self._run_return_type = (
465
- return_annotation.__name__
466
- if return_annotation is not inspect.Signature.empty and hasattr(return_annotation, "__name__")
467
- else str(return_annotation)
468
- if return_annotation is not inspect.Signature.empty
469
- else None
916
+
917
+ # Check for can cellation before executing step
918
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
919
+
920
+ step_output = step.execute( # type: ignore[union-attr]
921
+ step_input,
922
+ session_id=session.session_id,
923
+ user_id=self.user_id,
924
+ workflow_run_response=workflow_run_response,
925
+ session_state=session_state,
926
+ store_executor_outputs=self.store_executor_outputs,
927
+ )
928
+
929
+ # Check for cancellation after step execution
930
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
931
+
932
+ # Update the workflow-level previous_step_outputs dictionary
933
+ previous_step_outputs[step_name] = step_output
934
+ if step_output.stop:
935
+ logger.info(f"Early termination requested by step {step_name}")
936
+ break
937
+
938
+ # Update shared media for next step
939
+ shared_images.extend(step_output.images or [])
940
+ shared_videos.extend(step_output.videos or [])
941
+ shared_audio.extend(step_output.audio or [])
942
+ shared_files.extend(step_output.files or [])
943
+ output_images.extend(step_output.images or [])
944
+ output_videos.extend(step_output.videos or [])
945
+ output_audio.extend(step_output.audio or [])
946
+ output_files.extend(step_output.files or [])
947
+
948
+ collected_step_outputs.append(step_output)
949
+
950
+ # Update the workflow_run_response with completion data
951
+ if collected_step_outputs:
952
+ workflow_run_response.metrics = self._aggregate_workflow_metrics(collected_step_outputs)
953
+ last_output = cast(StepOutput, collected_step_outputs[-1])
954
+
955
+ # Use deepest nested content if this is a container (Steps/Router/Loop/etc.)
956
+ if getattr(last_output, "steps", None):
957
+ _cur = last_output
958
+ while getattr(_cur, "steps", None):
959
+ _steps = _cur.steps or []
960
+ if not _steps:
961
+ break
962
+ _cur = _steps[-1]
963
+ workflow_run_response.content = _cur.content
964
+ else:
965
+ workflow_run_response.content = last_output.content
966
+ else:
967
+ workflow_run_response.content = "No steps executed"
968
+
969
+ workflow_run_response.step_results = collected_step_outputs
970
+ workflow_run_response.images = output_images
971
+ workflow_run_response.videos = output_videos
972
+ workflow_run_response.audio = output_audio
973
+ workflow_run_response.status = RunStatus.completed
974
+
975
+ except RunCancelledException as e:
976
+ # Handle run cancellation
977
+ logger.info(f"Workflow run {workflow_run_response.run_id} was cancelled")
978
+ workflow_run_response.status = RunStatus.cancelled
979
+ workflow_run_response.content = str(e)
980
+ except Exception as e:
981
+ import traceback
982
+
983
+ traceback.print_exc()
984
+ logger.error(f"Workflow execution failed: {e}")
985
+ # Store error response
986
+ workflow_run_response.status = RunStatus.error
987
+ workflow_run_response.content = f"Workflow execution failed: {e}"
988
+
989
+ finally:
990
+ self._update_session_metrics(session=session, workflow_run_response=workflow_run_response)
991
+ session.upsert_run(run=workflow_run_response)
992
+ self.save_session(session=session)
993
+ # Always clean up the run tracking
994
+ cleanup_run(workflow_run_response.run_id) # type: ignore
995
+
996
+ # Log Workflow Telemetry
997
+ if self.telemetry:
998
+ self._log_workflow_telemetry(session_id=session.session_id, run_id=workflow_run_response.run_id)
999
+
1000
+ return workflow_run_response
1001
+
1002
+ def _execute_stream(
1003
+ self,
1004
+ session: WorkflowSession,
1005
+ execution_input: WorkflowExecutionInput,
1006
+ workflow_run_response: WorkflowRunOutput,
1007
+ session_state: Optional[Dict[str, Any]] = None,
1008
+ stream_intermediate_steps: bool = False,
1009
+ **kwargs: Any,
1010
+ ) -> Iterator[WorkflowRunOutputEvent]:
1011
+ """Execute a specific pipeline by name with event streaming"""
1012
+ from inspect import isasyncgenfunction, iscoroutinefunction, isgeneratorfunction
1013
+
1014
+ workflow_run_response.status = RunStatus.running
1015
+
1016
+ # Register run for cancellation tracking
1017
+ if workflow_run_response.run_id:
1018
+ register_run(workflow_run_response.run_id)
1019
+
1020
+ workflow_started_event = WorkflowStartedEvent(
1021
+ run_id=workflow_run_response.run_id or "",
1022
+ workflow_name=workflow_run_response.workflow_name,
1023
+ workflow_id=workflow_run_response.workflow_id,
1024
+ session_id=workflow_run_response.session_id,
1025
+ )
1026
+ yield self._handle_event(workflow_started_event, workflow_run_response)
1027
+
1028
+ if callable(self.steps):
1029
+ if iscoroutinefunction(self.steps) or isasyncgenfunction(self.steps):
1030
+ raise ValueError("Cannot use async function with synchronous execution")
1031
+ elif isgeneratorfunction(self.steps):
1032
+ content = ""
1033
+ for chunk in self._call_custom_function(self.steps, execution_input, **kwargs): # type: ignore[arg-type]
1034
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1035
+ # Update the run_response with the content from the result
1036
+ if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
1037
+ content += chunk.content
1038
+ yield chunk
1039
+ else:
1040
+ content += str(chunk)
1041
+ workflow_run_response.content = content
1042
+ else:
1043
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1044
+ workflow_run_response.content = self._call_custom_function(self.steps, execution_input, **kwargs)
1045
+ workflow_run_response.status = RunStatus.completed
1046
+
1047
+ else:
1048
+ try:
1049
+ # Track outputs from each step for enhanced data flow
1050
+ collected_step_outputs: List[Union[StepOutput, List[StepOutput]]] = []
1051
+ previous_step_outputs: Dict[str, StepOutput] = {}
1052
+
1053
+ shared_images: List[Image] = execution_input.images or []
1054
+ output_images: List[Image] = (execution_input.images or []).copy() # Start with input images
1055
+ shared_videos: List[Video] = execution_input.videos or []
1056
+ output_videos: List[Video] = (execution_input.videos or []).copy() # Start with input videos
1057
+ shared_audio: List[Audio] = execution_input.audio or []
1058
+ output_audio: List[Audio] = (execution_input.audio or []).copy() # Start with input audio
1059
+ shared_files: List[File] = execution_input.files or []
1060
+ output_files: List[File] = (execution_input.files or []).copy() # Start with input files
1061
+
1062
+ early_termination = False
1063
+
1064
+ for i, step in enumerate(self.steps): # type: ignore[arg-type]
1065
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1066
+ step_name = getattr(step, "name", f"step_{i + 1}")
1067
+ log_debug(f"Streaming step {i + 1}/{self._get_step_count()}: {step_name}")
1068
+
1069
+ # Create enhanced StepInput
1070
+ step_input = self._create_step_input(
1071
+ execution_input=execution_input,
1072
+ previous_step_outputs=previous_step_outputs,
1073
+ shared_images=shared_images,
1074
+ shared_videos=shared_videos,
1075
+ shared_audio=shared_audio,
1076
+ shared_files=shared_files,
1077
+ )
1078
+
1079
+ # Execute step with streaming and yield all events
1080
+ for event in step.execute_stream( # type: ignore[union-attr]
1081
+ step_input,
1082
+ session_id=session.session_id,
1083
+ user_id=self.user_id,
1084
+ stream_intermediate_steps=stream_intermediate_steps,
1085
+ workflow_run_response=workflow_run_response,
1086
+ session_state=session_state,
1087
+ step_index=i,
1088
+ store_executor_outputs=self.store_executor_outputs,
1089
+ ):
1090
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1091
+ # Handle events
1092
+ if isinstance(event, StepOutput):
1093
+ step_output = event
1094
+ collected_step_outputs.append(step_output)
1095
+
1096
+ # Update the workflow-level previous_step_outputs dictionary
1097
+ previous_step_outputs[step_name] = step_output
1098
+
1099
+ # Transform StepOutput to StepOutputEvent for consistent streaming interface
1100
+ step_output_event = self._transform_step_output_to_event(
1101
+ step_output, workflow_run_response, step_index=i
1102
+ )
1103
+
1104
+ if step_output.stop:
1105
+ logger.info(f"Early termination requested by step {step_name}")
1106
+ # Update shared media for next step
1107
+ shared_images.extend(step_output.images or [])
1108
+ shared_videos.extend(step_output.videos or [])
1109
+ shared_audio.extend(step_output.audio or [])
1110
+ shared_files.extend(step_output.files or [])
1111
+ output_images.extend(step_output.images or [])
1112
+ output_videos.extend(step_output.videos or [])
1113
+ output_audio.extend(step_output.audio or [])
1114
+ output_files.extend(step_output.files or [])
1115
+
1116
+ # Only yield StepOutputEvent for function executors, not for agents/teams
1117
+ if getattr(step, "executor_type", None) == "function":
1118
+ yield step_output_event
1119
+
1120
+ # Break out of the step loop
1121
+ early_termination = True
1122
+ break
1123
+
1124
+ # Update shared media for next step
1125
+ shared_images.extend(step_output.images or [])
1126
+ shared_videos.extend(step_output.videos or [])
1127
+ shared_audio.extend(step_output.audio or [])
1128
+ shared_files.extend(step_output.files or [])
1129
+ output_images.extend(step_output.images or [])
1130
+ output_videos.extend(step_output.videos or [])
1131
+ output_audio.extend(step_output.audio or [])
1132
+ output_files.extend(step_output.files or [])
1133
+
1134
+ # Only yield StepOutputEvent for generator functions, not for agents/teams
1135
+ if getattr(step, "executor_type", None) == "function":
1136
+ yield step_output_event
1137
+
1138
+ elif isinstance(event, WorkflowRunOutputEvent): # type: ignore
1139
+ yield self._handle_event(event, workflow_run_response) # type: ignore
1140
+
1141
+ else:
1142
+ # Yield other internal events
1143
+ yield self._handle_event(event, workflow_run_response) # type: ignore
1144
+
1145
+ # Break out of main step loop if early termination was requested
1146
+ if "early_termination" in locals() and early_termination:
1147
+ break
1148
+
1149
+ # Update the workflow_run_response with completion data
1150
+ if collected_step_outputs:
1151
+ workflow_run_response.metrics = self._aggregate_workflow_metrics(collected_step_outputs)
1152
+ last_output = cast(StepOutput, collected_step_outputs[-1])
1153
+
1154
+ # Use deepest nested content if this is a container (Steps/Router/Loop/etc.)
1155
+ if getattr(last_output, "steps", None):
1156
+ _cur = last_output
1157
+ while getattr(_cur, "steps", None):
1158
+ _steps = _cur.steps or []
1159
+ if not _steps:
1160
+ break
1161
+ _cur = _steps[-1]
1162
+ workflow_run_response.content = _cur.content
1163
+ else:
1164
+ workflow_run_response.content = last_output.content
1165
+ else:
1166
+ workflow_run_response.content = "No steps executed"
1167
+
1168
+ workflow_run_response.step_results = collected_step_outputs
1169
+ workflow_run_response.images = output_images
1170
+ workflow_run_response.videos = output_videos
1171
+ workflow_run_response.audio = output_audio
1172
+ workflow_run_response.status = RunStatus.completed
1173
+
1174
+ except RunCancelledException as e:
1175
+ # Handle run cancellation during streaming
1176
+ logger.info(f"Workflow run {workflow_run_response.run_id} was cancelled during streaming")
1177
+ workflow_run_response.status = RunStatus.cancelled
1178
+ workflow_run_response.content = str(e)
1179
+ cancelled_event = WorkflowCancelledEvent(
1180
+ run_id=workflow_run_response.run_id or "",
1181
+ workflow_id=self.id,
1182
+ workflow_name=self.name,
1183
+ session_id=session.session_id,
1184
+ reason=str(e),
1185
+ )
1186
+ yield self._handle_event(cancelled_event, workflow_run_response)
1187
+ except Exception as e:
1188
+ logger.error(f"Workflow execution failed: {e}")
1189
+
1190
+ from agno.run.workflow import WorkflowErrorEvent
1191
+
1192
+ error_event = WorkflowErrorEvent(
1193
+ run_id=workflow_run_response.run_id or "",
1194
+ workflow_id=self.id,
1195
+ workflow_name=self.name,
1196
+ session_id=session.session_id,
1197
+ error=str(e),
1198
+ )
1199
+
1200
+ yield error_event
1201
+
1202
+ # Update workflow_run_response with error
1203
+ workflow_run_response.content = error_event.error
1204
+ workflow_run_response.status = RunStatus.error
1205
+
1206
+ # Yield workflow completed event
1207
+ workflow_completed_event = WorkflowCompletedEvent(
1208
+ run_id=workflow_run_response.run_id or "",
1209
+ content=workflow_run_response.content,
1210
+ workflow_name=workflow_run_response.workflow_name,
1211
+ workflow_id=workflow_run_response.workflow_id,
1212
+ session_id=workflow_run_response.session_id,
1213
+ step_results=workflow_run_response.step_results, # type: ignore
1214
+ metadata=workflow_run_response.metadata,
1215
+ )
1216
+ yield self._handle_event(workflow_completed_event, workflow_run_response)
1217
+
1218
+ # Store the completed workflow response
1219
+ self._update_session_metrics(session=session, workflow_run_response=workflow_run_response)
1220
+ session.upsert_run(run=workflow_run_response)
1221
+ self.save_session(session=session)
1222
+
1223
+ # Always clean up the run tracking
1224
+ cleanup_run(workflow_run_response.run_id) # type: ignore
1225
+
1226
+ # Log Workflow Telemetry
1227
+ if self.telemetry:
1228
+ self._log_workflow_telemetry(session_id=session.session_id, run_id=workflow_run_response.run_id)
1229
+
1230
+ async def _acall_custom_function(
1231
+ self, func: Callable, execution_input: WorkflowExecutionInput, **kwargs: Any
1232
+ ) -> Any:
1233
+ """Call custom function with only the parameters it expects - handles both async functions and async generators"""
1234
+ from inspect import isasyncgenfunction, signature
1235
+
1236
+ sig = signature(func)
1237
+
1238
+ # Build arguments based on what the function actually accepts
1239
+ call_kwargs: Dict[str, Any] = {}
1240
+
1241
+ # Only add workflow and execution_input if the function expects them
1242
+ if "workflow" in sig.parameters: # type: ignore
1243
+ call_kwargs["workflow"] = self
1244
+ if "execution_input" in sig.parameters:
1245
+ call_kwargs["execution_input"] = execution_input # type: ignore
1246
+ if "session_state" in sig.parameters:
1247
+ call_kwargs["session_state"] = self.session_state # type: ignore
1248
+
1249
+ # Add any other kwargs that the function expects
1250
+ for param_name in kwargs:
1251
+ if param_name in sig.parameters: # type: ignore
1252
+ call_kwargs[param_name] = kwargs[param_name]
1253
+
1254
+ # If function has **kwargs parameter, pass all remaining kwargs
1255
+ for param in sig.parameters.values(): # type: ignore
1256
+ if param.kind == param.VAR_KEYWORD:
1257
+ call_kwargs.update(kwargs)
1258
+ break
1259
+
1260
+ try:
1261
+ # Check if it's an async generator function
1262
+ if isasyncgenfunction(func):
1263
+ # For async generators, call the function and return the async generator directly
1264
+ return func(**call_kwargs) # type: ignore
1265
+ else:
1266
+ # For regular async functions, await the result
1267
+ return await func(**call_kwargs) # type: ignore
1268
+ except TypeError as e:
1269
+ # If signature inspection fails, fall back to original method
1270
+ logger.warning(
1271
+ f"Async function signature inspection failed: {e}. Falling back to original calling convention."
470
1272
  )
471
- # Important: Replace the instance's run method with run_workflow
472
- # This is so we call run_workflow() instead of the subclass's run()
473
- if run_type == "sync":
474
- object.__setattr__(self, "run", self.run_workflow.__get__(self))
475
- elif run_type == "coroutine":
476
- object.__setattr__(self, "arun", self.arun_workflow.__get__(self))
477
- elif run_type == "async_generator":
478
- object.__setattr__(self, "arun", self.arun_workflow_generator.__get__(self))
1273
+ if isasyncgenfunction(func):
1274
+ # For async generators, use the same signature inspection logic in fallback
1275
+ return func(**call_kwargs) # type: ignore
1276
+ else:
1277
+ # For regular async functions, use the same signature inspection logic in fallback
1278
+ return await func(**call_kwargs) # type: ignore
1279
+
1280
+ async def _aexecute(
1281
+ self,
1282
+ session: WorkflowSession,
1283
+ execution_input: WorkflowExecutionInput,
1284
+ workflow_run_response: WorkflowRunOutput,
1285
+ session_state: Optional[Dict[str, Any]] = None,
1286
+ **kwargs: Any,
1287
+ ) -> WorkflowRunOutput:
1288
+ """Execute a specific pipeline by name asynchronously"""
1289
+ from inspect import isasyncgenfunction, iscoroutinefunction, isgeneratorfunction
1290
+
1291
+ workflow_run_response.status = RunStatus.running
1292
+
1293
+ # Register run for cancellation tracking
1294
+ register_run(workflow_run_response.run_id) # type: ignore
1295
+
1296
+ if callable(self.steps):
1297
+ # Execute the workflow with the custom executor
1298
+ content = ""
1299
+
1300
+ if iscoroutinefunction(self.steps): # type: ignore
1301
+ workflow_run_response.content = await self._acall_custom_function(self.steps, execution_input, **kwargs)
1302
+ elif isgeneratorfunction(self.steps):
1303
+ for chunk in self.steps(self, execution_input, **kwargs): # type: ignore[arg-type]
1304
+ if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
1305
+ content += chunk.content
1306
+ else:
1307
+ content += str(chunk)
1308
+ workflow_run_response.content = content
1309
+ elif isasyncgenfunction(self.steps): # type: ignore
1310
+ async_gen = await self._acall_custom_function(self.steps, execution_input, **kwargs)
1311
+ async for chunk in async_gen:
1312
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1313
+ if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
1314
+ content += chunk.content
1315
+ else:
1316
+ content += str(chunk)
1317
+ workflow_run_response.content = content
1318
+ else:
1319
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1320
+ workflow_run_response.content = self._call_custom_function(self.steps, execution_input, **kwargs)
1321
+ workflow_run_response.status = RunStatus.completed
1322
+
479
1323
  else:
480
- # If the subclass does not override the run method,
481
- # the Workflow.run() method will be called and will log an error
482
- self._subclass_run = self.run
483
- self._subclass_arun = self.arun
484
-
485
- self._run_parameters = {}
486
- self._run_return_type = None
487
-
488
- def update_agent_session_ids(self):
489
- # Update the session_id for all Agent instances
490
- # use dataclasses.fields() to iterate through fields
491
- for f in fields(self):
492
- field_type = f.type
493
- if isinstance(field_type, Agent):
494
- field_value = getattr(self, f.name)
495
- field_value.session_id = self.session_id
496
-
497
- def get_workflow_data(self) -> Dict[str, Any]:
498
- workflow_data: Dict[str, Any] = {}
499
- if self.name is not None:
500
- workflow_data["name"] = self.name
501
- if self.workflow_id is not None:
502
- workflow_data["workflow_id"] = self.workflow_id
503
- if self.description is not None:
504
- workflow_data["description"] = self.description
505
- return workflow_data
1324
+ try:
1325
+ # Track outputs from each step for enhanced data flow
1326
+ collected_step_outputs: List[Union[StepOutput, List[StepOutput]]] = []
1327
+ previous_step_outputs: Dict[str, StepOutput] = {}
1328
+
1329
+ shared_images: List[Image] = execution_input.images or []
1330
+ output_images: List[Image] = (execution_input.images or []).copy() # Start with input images
1331
+ shared_videos: List[Video] = execution_input.videos or []
1332
+ output_videos: List[Video] = (execution_input.videos or []).copy() # Start with input videos
1333
+ shared_audio: List[Audio] = execution_input.audio or []
1334
+ output_audio: List[Audio] = (execution_input.audio or []).copy() # Start with input audio
1335
+ shared_files: List[File] = execution_input.files or []
1336
+ output_files: List[File] = (execution_input.files or []).copy() # Start with input files
1337
+
1338
+ for i, step in enumerate(self.steps): # type: ignore[arg-type]
1339
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1340
+ step_name = getattr(step, "name", f"step_{i + 1}")
1341
+ log_debug(f"Async Executing step {i + 1}/{self._get_step_count()}: {step_name}")
1342
+
1343
+ # Create enhanced StepInput
1344
+ step_input = self._create_step_input(
1345
+ execution_input=execution_input,
1346
+ previous_step_outputs=previous_step_outputs,
1347
+ shared_images=shared_images,
1348
+ shared_videos=shared_videos,
1349
+ shared_audio=shared_audio,
1350
+ shared_files=shared_files,
1351
+ )
1352
+
1353
+ # Check for cancellation before executing step
1354
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1355
+
1356
+ step_output = await step.aexecute( # type: ignore[union-attr]
1357
+ step_input,
1358
+ session_id=session.session_id,
1359
+ user_id=self.user_id,
1360
+ workflow_run_response=workflow_run_response,
1361
+ session_state=session_state,
1362
+ store_executor_outputs=self.store_executor_outputs,
1363
+ )
1364
+
1365
+ # Check for cancellation after step execution
1366
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1367
+
1368
+ # Update the workflow-level previous_step_outputs dictionary
1369
+ previous_step_outputs[step_name] = step_output
1370
+ if step_output.stop:
1371
+ logger.info(f"Early termination requested by step {step_name}")
1372
+ break
1373
+
1374
+ # Update shared media for next step
1375
+ shared_images.extend(step_output.images or [])
1376
+ shared_videos.extend(step_output.videos or [])
1377
+ shared_audio.extend(step_output.audio or [])
1378
+ shared_files.extend(step_output.files or [])
1379
+ output_images.extend(step_output.images or [])
1380
+ output_videos.extend(step_output.videos or [])
1381
+ output_audio.extend(step_output.audio or [])
1382
+ output_files.extend(step_output.files or [])
1383
+
1384
+ collected_step_outputs.append(step_output)
1385
+
1386
+ # Update the workflow_run_response with completion data
1387
+ if collected_step_outputs:
1388
+ workflow_run_response.metrics = self._aggregate_workflow_metrics(collected_step_outputs)
1389
+ last_output = cast(StepOutput, collected_step_outputs[-1])
1390
+
1391
+ # Use deepest nested content if this is a container (Steps/Router/Loop/etc.)
1392
+ if getattr(last_output, "steps", None):
1393
+ _cur = last_output
1394
+ while getattr(_cur, "steps", None):
1395
+ _steps = _cur.steps or []
1396
+ if not _steps:
1397
+ break
1398
+ _cur = _steps[-1]
1399
+ workflow_run_response.content = _cur.content
1400
+ else:
1401
+ workflow_run_response.content = last_output.content
1402
+ else:
1403
+ workflow_run_response.content = "No steps executed"
1404
+
1405
+ workflow_run_response.step_results = collected_step_outputs
1406
+ workflow_run_response.images = output_images
1407
+ workflow_run_response.videos = output_videos
1408
+ workflow_run_response.audio = output_audio
1409
+ workflow_run_response.status = RunStatus.completed
1410
+
1411
+ except RunCancelledException as e:
1412
+ logger.info(f"Workflow run {workflow_run_response.run_id} was cancelled")
1413
+ workflow_run_response.status = RunStatus.cancelled
1414
+ workflow_run_response.content = str(e)
1415
+ except Exception as e:
1416
+ logger.error(f"Workflow execution failed: {e}")
1417
+ workflow_run_response.status = RunStatus.error
1418
+ workflow_run_response.content = f"Workflow execution failed: {e}"
1419
+
1420
+ self._update_session_metrics(session=session, workflow_run_response=workflow_run_response)
1421
+ session.upsert_run(run=workflow_run_response)
1422
+ self.save_session(session=session)
1423
+ # Always clean up the run tracking
1424
+ cleanup_run(workflow_run_response.run_id) # type: ignore
1425
+
1426
+ # Log Workflow Telemetry
1427
+ if self.telemetry:
1428
+ await self._alog_workflow_telemetry(session_id=session.session_id, run_id=workflow_run_response.run_id)
506
1429
 
507
- def get_session_data(self) -> Dict[str, Any]:
508
- session_data: Dict[str, Any] = {}
509
- if self.session_name is not None:
510
- session_data["session_name"] = self.session_name
511
- if self.session_state and len(self.session_state) > 0:
512
- session_data["session_state"] = nested_model_dump(self.session_state)
513
- if self.images is not None:
514
- session_data["images"] = [img.model_dump() for img in self.images]
515
- if self.videos is not None:
516
- session_data["videos"] = [vid.model_dump() for vid in self.videos]
517
- if self.audio is not None:
518
- session_data["audio"] = [aud.model_dump() for aud in self.audio]
519
- return session_data
520
-
521
- def get_workflow_session(self) -> WorkflowSession:
522
- """Get a WorkflowSession object, which can be saved to the database"""
523
- self.memory = cast(WorkflowMemory, self.memory)
524
- self.session_id = cast(str, self.session_id)
525
- self.workflow_id = cast(str, self.workflow_id)
526
- if self.memory is not None:
527
- if isinstance(self.memory, WorkflowMemory):
528
- self.memory = cast(WorkflowMemory, self.memory)
529
- memory_dict = self.memory.to_dict()
530
- # We only persist the runs for the current session ID (not all runs in memory)
531
- memory_dict["runs"] = [
532
- agent_run.model_dump()
533
- for agent_run in self.memory.runs
534
- if agent_run.response is not None and agent_run.response.session_id == self.session_id
535
- ]
1430
+ return workflow_run_response
1431
+
1432
+ async def _aexecute_stream(
1433
+ self,
1434
+ session: WorkflowSession,
1435
+ execution_input: WorkflowExecutionInput,
1436
+ workflow_run_response: WorkflowRunOutput,
1437
+ session_state: Optional[Dict[str, Any]] = None,
1438
+ stream_intermediate_steps: bool = False,
1439
+ websocket_handler: Optional[WebSocketHandler] = None,
1440
+ **kwargs: Any,
1441
+ ) -> AsyncIterator[WorkflowRunOutputEvent]:
1442
+ """Execute a specific pipeline by name with event streaming"""
1443
+ from inspect import isasyncgenfunction, iscoroutinefunction, isgeneratorfunction
1444
+
1445
+ workflow_run_response.status = RunStatus.running
1446
+ workflow_started_event = WorkflowStartedEvent(
1447
+ run_id=workflow_run_response.run_id or "",
1448
+ workflow_name=workflow_run_response.workflow_name,
1449
+ workflow_id=workflow_run_response.workflow_id,
1450
+ session_id=workflow_run_response.session_id,
1451
+ )
1452
+ yield self._handle_event(workflow_started_event, workflow_run_response, websocket_handler=websocket_handler)
1453
+
1454
+ if callable(self.steps):
1455
+ if iscoroutinefunction(self.steps): # type: ignore
1456
+ workflow_run_response.content = await self._acall_custom_function(self.steps, execution_input, **kwargs)
1457
+ elif isgeneratorfunction(self.steps):
1458
+ content = ""
1459
+ for chunk in self.steps(self, execution_input, **kwargs): # type: ignore[arg-type]
1460
+ if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
1461
+ content += chunk.content
1462
+ yield chunk
1463
+ else:
1464
+ content += str(chunk)
1465
+ workflow_run_response.content = content
1466
+ elif isasyncgenfunction(self.steps): # type: ignore
1467
+ content = ""
1468
+ async_gen = await self._acall_custom_function(self.steps, execution_input, **kwargs)
1469
+ async for chunk in async_gen:
1470
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1471
+ if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
1472
+ content += chunk.content
1473
+ yield chunk
1474
+ else:
1475
+ content += str(chunk)
1476
+ workflow_run_response.content = content
536
1477
  else:
537
- self.memory = cast(Memory, self.memory)
538
- # We fake the structure on storage, to maintain the interface with the legacy implementation
539
- run_responses = self.memory.runs[self.session_id] # type: ignore
540
- memory_dict = self.memory.to_dict()
541
- memory_dict["runs"] = [rr.to_dict() for rr in run_responses]
1478
+ workflow_run_response.content = self.steps(self, execution_input, **kwargs)
1479
+ workflow_run_response.status = RunStatus.completed
1480
+
542
1481
  else:
543
- memory_dict = None
544
- return WorkflowSession(
545
- session_id=self.session_id,
546
- workflow_id=self.workflow_id,
547
- user_id=self.user_id,
548
- memory=memory_dict,
549
- workflow_data=self.get_workflow_data(),
550
- session_data=self.get_session_data(),
551
- extra_data=self.extra_data,
1482
+ try:
1483
+ # Track outputs from each step for enhanced data flow
1484
+ collected_step_outputs: List[Union[StepOutput, List[StepOutput]]] = []
1485
+ previous_step_outputs: Dict[str, StepOutput] = {}
1486
+
1487
+ shared_images: List[Image] = execution_input.images or []
1488
+ output_images: List[Image] = (execution_input.images or []).copy() # Start with input images
1489
+ shared_videos: List[Video] = execution_input.videos or []
1490
+ output_videos: List[Video] = (execution_input.videos or []).copy() # Start with input videos
1491
+ shared_audio: List[Audio] = execution_input.audio or []
1492
+ output_audio: List[Audio] = (execution_input.audio or []).copy() # Start with input audio
1493
+ shared_files: List[File] = execution_input.files or []
1494
+ output_files: List[File] = (execution_input.files or []).copy() # Start with input files
1495
+
1496
+ early_termination = False
1497
+
1498
+ for i, step in enumerate(self.steps): # type: ignore[arg-type]
1499
+ if workflow_run_response.run_id:
1500
+ raise_if_cancelled(workflow_run_response.run_id)
1501
+ step_name = getattr(step, "name", f"step_{i + 1}")
1502
+ log_debug(f"Async streaming step {i + 1}/{self._get_step_count()}: {step_name}")
1503
+
1504
+ # Create enhanced StepInput
1505
+ step_input = self._create_step_input(
1506
+ execution_input=execution_input,
1507
+ previous_step_outputs=previous_step_outputs,
1508
+ shared_images=shared_images,
1509
+ shared_videos=shared_videos,
1510
+ shared_audio=shared_audio,
1511
+ shared_files=shared_files,
1512
+ )
1513
+
1514
+ # Execute step with streaming and yield all events
1515
+ async for event in step.aexecute_stream( # type: ignore[union-attr]
1516
+ step_input,
1517
+ session_id=session.session_id,
1518
+ user_id=self.user_id,
1519
+ stream_intermediate_steps=stream_intermediate_steps,
1520
+ workflow_run_response=workflow_run_response,
1521
+ session_state=session_state,
1522
+ step_index=i,
1523
+ store_executor_outputs=self.store_executor_outputs,
1524
+ ):
1525
+ if workflow_run_response.run_id:
1526
+ raise_if_cancelled(workflow_run_response.run_id)
1527
+ if isinstance(event, StepOutput):
1528
+ step_output = event
1529
+ collected_step_outputs.append(step_output)
1530
+
1531
+ # Update the workflow-level previous_step_outputs dictionary
1532
+ previous_step_outputs[step_name] = step_output
1533
+
1534
+ # Transform StepOutput to StepOutputEvent for consistent streaming interface
1535
+ step_output_event = self._transform_step_output_to_event(
1536
+ step_output, workflow_run_response, step_index=i
1537
+ )
1538
+
1539
+ if step_output.stop:
1540
+ logger.info(f"Early termination requested by step {step_name}")
1541
+ # Update shared media for next step
1542
+ shared_images.extend(step_output.images or [])
1543
+ shared_videos.extend(step_output.videos or [])
1544
+ shared_audio.extend(step_output.audio or [])
1545
+ shared_files.extend(step_output.files or [])
1546
+ output_images.extend(step_output.images or [])
1547
+ output_videos.extend(step_output.videos or [])
1548
+ output_audio.extend(step_output.audio or [])
1549
+ output_files.extend(step_output.files or [])
1550
+
1551
+ if getattr(step, "executor_type", None) == "function":
1552
+ yield step_output_event
1553
+
1554
+ # Break out of the step loop
1555
+ early_termination = True
1556
+ break
1557
+
1558
+ # Update shared media for next step
1559
+ shared_images.extend(step_output.images or [])
1560
+ shared_videos.extend(step_output.videos or [])
1561
+ shared_audio.extend(step_output.audio or [])
1562
+ shared_files.extend(step_output.files or [])
1563
+ output_images.extend(step_output.images or [])
1564
+ output_videos.extend(step_output.videos or [])
1565
+ output_audio.extend(step_output.audio or [])
1566
+ output_files.extend(step_output.files or [])
1567
+
1568
+ # Only yield StepOutputEvent for generator functions, not for agents/teams
1569
+ if getattr(step, "executor_type", None) == "function":
1570
+ yield step_output_event
1571
+
1572
+ elif isinstance(event, WorkflowRunOutputEvent): # type: ignore
1573
+ yield self._handle_event(event, workflow_run_response, websocket_handler=websocket_handler) # type: ignore
1574
+
1575
+ else:
1576
+ # Yield other internal events
1577
+ yield self._handle_event(event, workflow_run_response, websocket_handler=websocket_handler) # type: ignore
1578
+
1579
+ # Break out of main step loop if early termination was requested
1580
+ if "early_termination" in locals() and early_termination:
1581
+ break
1582
+
1583
+ # Update the workflow_run_response with completion data
1584
+ if collected_step_outputs:
1585
+ workflow_run_response.metrics = self._aggregate_workflow_metrics(collected_step_outputs)
1586
+ last_output = cast(StepOutput, collected_step_outputs[-1])
1587
+
1588
+ # Use deepest nested content if this is a container (Steps/Router/Loop/etc.)
1589
+ if getattr(last_output, "steps", None):
1590
+ _cur = last_output
1591
+ while getattr(_cur, "steps", None):
1592
+ _steps = _cur.steps or []
1593
+ if not _steps:
1594
+ break
1595
+ _cur = _steps[-1]
1596
+ workflow_run_response.content = _cur.content
1597
+ else:
1598
+ workflow_run_response.content = last_output.content
1599
+ else:
1600
+ workflow_run_response.content = "No steps executed"
1601
+
1602
+ workflow_run_response.step_results = collected_step_outputs
1603
+ workflow_run_response.images = output_images
1604
+ workflow_run_response.videos = output_videos
1605
+ workflow_run_response.audio = output_audio
1606
+ workflow_run_response.status = RunStatus.completed
1607
+
1608
+ except RunCancelledException as e:
1609
+ # Handle run cancellation during streaming
1610
+ logger.info(f"Workflow run {workflow_run_response.run_id} was cancelled during streaming")
1611
+ workflow_run_response.status = RunStatus.cancelled
1612
+ workflow_run_response.content = str(e)
1613
+ cancelled_event = WorkflowCancelledEvent(
1614
+ run_id=workflow_run_response.run_id or "",
1615
+ workflow_id=self.id,
1616
+ workflow_name=self.name,
1617
+ session_id=session.session_id,
1618
+ reason=str(e),
1619
+ )
1620
+ yield self._handle_event(
1621
+ cancelled_event,
1622
+ workflow_run_response,
1623
+ websocket_handler=websocket_handler,
1624
+ )
1625
+ except Exception as e:
1626
+ logger.error(f"Workflow execution failed: {e}")
1627
+
1628
+ from agno.run.workflow import WorkflowErrorEvent
1629
+
1630
+ error_event = WorkflowErrorEvent(
1631
+ run_id=workflow_run_response.run_id or "",
1632
+ workflow_id=self.id,
1633
+ workflow_name=self.name,
1634
+ session_id=session.session_id,
1635
+ error=str(e),
1636
+ )
1637
+
1638
+ yield error_event
1639
+
1640
+ # Update workflow_run_response with error
1641
+ workflow_run_response.content = error_event.error
1642
+ workflow_run_response.status = RunStatus.error
1643
+
1644
+ # Yield workflow completed event
1645
+ workflow_completed_event = WorkflowCompletedEvent(
1646
+ run_id=workflow_run_response.run_id or "",
1647
+ content=workflow_run_response.content,
1648
+ workflow_name=workflow_run_response.workflow_name,
1649
+ workflow_id=workflow_run_response.workflow_id,
1650
+ session_id=workflow_run_response.session_id,
1651
+ step_results=workflow_run_response.step_results, # type: ignore[arg-type]
1652
+ metadata=workflow_run_response.metadata,
552
1653
  )
1654
+ yield self._handle_event(workflow_completed_event, workflow_run_response, websocket_handler=websocket_handler)
553
1655
 
554
- def load_workflow_session(self, session: WorkflowSession):
555
- """Load the existing Workflow from a WorkflowSession (from the database)"""
1656
+ # Store the completed workflow response
1657
+ self._update_session_metrics(session=session, workflow_run_response=workflow_run_response)
1658
+ session.upsert_run(run=workflow_run_response)
1659
+ self.save_session(session=session)
556
1660
 
557
- # Get the workflow_id, user_id and session_id from the database
558
- if self.workflow_id is None and session.workflow_id is not None:
559
- self.workflow_id = session.workflow_id
560
- if self.user_id is None and session.user_id is not None:
561
- self.user_id = session.user_id
562
- if self.session_id is None and session.session_id is not None:
563
- self.session_id = session.session_id
564
-
565
- # Read workflow_data from the database
566
- if session.workflow_data is not None:
567
- # Get name from database and update the workflow name if not set
568
- if self.name is None and "name" in session.workflow_data:
569
- self.name = session.workflow_data.get("name")
570
-
571
- # Read session_data from the database
572
- if session.session_data is not None:
573
- # Get the session_name from database and update the current session_name if not set
574
- if self.session_name is None and "session_name" in session.session_data:
575
- self.session_name = session.session_data.get("session_name")
576
-
577
- # Get the session_state from database and update the current session_state
578
- if "session_state" in session.session_data:
579
- session_state_from_db = session.session_data.get("session_state")
580
- if (
581
- session_state_from_db is not None
582
- and isinstance(session_state_from_db, dict)
583
- and len(session_state_from_db) > 0
1661
+ # Log Workflow Telemetry
1662
+ if self.telemetry:
1663
+ await self._alog_workflow_telemetry(session_id=session.session_id, run_id=workflow_run_response.run_id)
1664
+
1665
+ # Always clean up the run tracking
1666
+ cleanup_run(workflow_run_response.run_id) # type: ignore
1667
+
1668
+ async def _arun_background(
1669
+ self,
1670
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]] = None,
1671
+ additional_data: Optional[Dict[str, Any]] = None,
1672
+ user_id: Optional[str] = None,
1673
+ session_id: Optional[str] = None,
1674
+ session_state: Optional[Dict[str, Any]] = None,
1675
+ audio: Optional[List[Audio]] = None,
1676
+ images: Optional[List[Image]] = None,
1677
+ videos: Optional[List[Video]] = None,
1678
+ files: Optional[List[File]] = None,
1679
+ **kwargs: Any,
1680
+ ) -> WorkflowRunOutput:
1681
+ """Execute workflow in background using asyncio.create_task()"""
1682
+
1683
+ run_id = str(uuid4())
1684
+
1685
+ self.initialize_workflow()
1686
+
1687
+ session_id, user_id, session_state = self._initialize_session(
1688
+ session_id=session_id, user_id=user_id, session_state=session_state, run_id=run_id
1689
+ )
1690
+
1691
+ # Read existing session from database
1692
+ workflow_session = self.read_or_create_session(session_id=session_id, user_id=user_id)
1693
+ self._update_metadata(session=workflow_session)
1694
+
1695
+ # Update session state from DB
1696
+ session_state = self._update_session_state(session=workflow_session, session_state=session_state)
1697
+
1698
+ self._prepare_steps()
1699
+
1700
+ # Create workflow run response with PENDING status
1701
+ workflow_run_response = WorkflowRunOutput(
1702
+ run_id=run_id,
1703
+ session_id=session_id,
1704
+ workflow_id=self.id,
1705
+ workflow_name=self.name,
1706
+ created_at=int(datetime.now().timestamp()),
1707
+ status=RunStatus.pending,
1708
+ )
1709
+
1710
+ # Store PENDING response immediately
1711
+ workflow_session.upsert_run(run=workflow_run_response)
1712
+ self.save_session(session=workflow_session)
1713
+
1714
+ # Prepare execution input
1715
+ inputs = WorkflowExecutionInput(
1716
+ input=input,
1717
+ additional_data=additional_data,
1718
+ audio=audio, # type: ignore
1719
+ images=images, # type: ignore
1720
+ videos=videos, # type: ignore
1721
+ files=files, # type: ignore
1722
+ )
1723
+
1724
+ self.update_agents_and_teams_session_info()
1725
+
1726
+ async def execute_workflow_background():
1727
+ """Simple background execution"""
1728
+ try:
1729
+ # Update status to RUNNING and save
1730
+ workflow_run_response.status = RunStatus.running
1731
+ self.save_session(session=workflow_session)
1732
+
1733
+ await self._aexecute(
1734
+ session=workflow_session,
1735
+ execution_input=inputs,
1736
+ workflow_run_response=workflow_run_response,
1737
+ session_state=session_state,
1738
+ **kwargs,
1739
+ )
1740
+
1741
+ log_debug(f"Background execution completed with status: {workflow_run_response.status}")
1742
+
1743
+ except Exception as e:
1744
+ logger.error(f"Background workflow execution failed: {e}")
1745
+ workflow_run_response.status = RunStatus.error
1746
+ workflow_run_response.content = f"Background execution failed: {str(e)}"
1747
+ self.save_session(session=workflow_session)
1748
+
1749
+ # Create and start asyncio task
1750
+ loop = asyncio.get_running_loop()
1751
+ loop.create_task(execute_workflow_background())
1752
+
1753
+ # Return SAME object that will be updated by background execution
1754
+ return workflow_run_response
1755
+
1756
+ async def _arun_background_stream(
1757
+ self,
1758
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]] = None,
1759
+ additional_data: Optional[Dict[str, Any]] = None,
1760
+ user_id: Optional[str] = None,
1761
+ session_id: Optional[str] = None,
1762
+ session_state: Optional[Dict[str, Any]] = None,
1763
+ audio: Optional[List[Audio]] = None,
1764
+ images: Optional[List[Image]] = None,
1765
+ videos: Optional[List[Video]] = None,
1766
+ files: Optional[List[File]] = None,
1767
+ stream_intermediate_steps: bool = False,
1768
+ websocket_handler: Optional[WebSocketHandler] = None,
1769
+ **kwargs: Any,
1770
+ ) -> WorkflowRunOutput:
1771
+ """Execute workflow in background with streaming and WebSocket broadcasting"""
1772
+
1773
+ run_id = str(uuid4())
1774
+
1775
+ self.initialize_workflow()
1776
+
1777
+ session_id, user_id, session_state = self._initialize_session(
1778
+ session_id=session_id, user_id=user_id, session_state=session_state, run_id=run_id
1779
+ )
1780
+
1781
+ # Read existing session from database
1782
+ workflow_session = self.read_or_create_session(session_id=session_id, user_id=user_id)
1783
+ self._update_metadata(session=workflow_session)
1784
+
1785
+ # Update session state from DB
1786
+ session_state = self._update_session_state(session=workflow_session, session_state=session_state)
1787
+
1788
+ self._prepare_steps()
1789
+
1790
+ # Create workflow run response with PENDING status
1791
+ workflow_run_response = WorkflowRunOutput(
1792
+ run_id=run_id,
1793
+ session_id=session_id,
1794
+ workflow_id=self.id,
1795
+ workflow_name=self.name,
1796
+ created_at=int(datetime.now().timestamp()),
1797
+ status=RunStatus.pending,
1798
+ )
1799
+
1800
+ # Store PENDING response immediately
1801
+ workflow_session.upsert_run(run=workflow_run_response)
1802
+ self.save_session(session=workflow_session)
1803
+
1804
+ # Prepare execution input
1805
+ inputs = WorkflowExecutionInput(
1806
+ input=input,
1807
+ additional_data=additional_data,
1808
+ audio=audio, # type: ignore
1809
+ images=images, # type: ignore
1810
+ videos=videos, # type: ignore
1811
+ files=files, # type: ignore
1812
+ )
1813
+
1814
+ self.update_agents_and_teams_session_info()
1815
+
1816
+ async def execute_workflow_background_stream():
1817
+ """Background execution with streaming and WebSocket broadcasting"""
1818
+ try:
1819
+ # Update status to RUNNING and save
1820
+ workflow_run_response.status = RunStatus.running
1821
+ self.save_session(session=workflow_session)
1822
+
1823
+ # Execute with streaming - consume all events (they're auto-broadcast via _handle_event)
1824
+ async for event in self._aexecute_stream(
1825
+ execution_input=inputs,
1826
+ session=workflow_session,
1827
+ workflow_run_response=workflow_run_response,
1828
+ stream_intermediate_steps=stream_intermediate_steps,
1829
+ session_state=session_state,
1830
+ websocket_handler=websocket_handler,
1831
+ **kwargs,
584
1832
  ):
585
- # If the session_state is already set, merge the session_state from the database with the current session_state
586
- if len(self.session_state) > 0:
587
- # This updates session_state_from_db
588
- merge_dictionaries(session_state_from_db, self.session_state)
589
- # Update the current session_state
590
- self.session_state = session_state_from_db
591
-
592
- # Get images, videos, and audios from the database
593
- if "images" in session.session_data:
594
- images_from_db = session.session_data.get("images")
595
- if images_from_db is not None and isinstance(images_from_db, list):
596
- if self.images is None:
597
- self.images = []
598
- self.images.extend([ImageArtifact.model_validate(img) for img in images_from_db])
599
- if "videos" in session.session_data:
600
- videos_from_db = session.session_data.get("videos")
601
- if videos_from_db is not None and isinstance(videos_from_db, list):
602
- if self.videos is None:
603
- self.videos = []
604
- self.videos.extend([VideoArtifact.model_validate(vid) for vid in videos_from_db])
605
- if "audio" in session.session_data:
606
- audio_from_db = session.session_data.get("audio")
607
- if audio_from_db is not None and isinstance(audio_from_db, list):
608
- if self.audio is None:
609
- self.audio = []
610
- self.audio.extend([AudioArtifact.model_validate(aud) for aud in audio_from_db])
611
-
612
- # Read extra_data from the database
613
- if session.extra_data is not None:
614
- # If extra_data is set in the workflow, update the database extra_data with the workflow's extra_data
615
- if self.extra_data is not None:
616
- # Updates workflow_session.extra_data in place
617
- merge_dictionaries(session.extra_data, self.extra_data)
618
- # Update the current extra_data with the extra_data from the database which is updated in place
619
- self.extra_data = session.extra_data
620
-
621
- if session.memory is not None:
622
- if self.memory is None:
623
- self.memory = Memory()
624
-
625
- if isinstance(self.memory, Memory):
626
- try:
627
- if self.memory.runs is None:
628
- self.memory.runs = {}
629
- self.memory.runs[session.session_id] = []
630
- for run in session.memory["runs"]:
631
- run_session_id = run["session_id"]
632
- self.memory.runs[run_session_id].append(RunResponse.from_dict(run))
633
- except Exception as e:
634
- log_warning(f"Failed to load runs from memory: {e}")
635
- else:
636
- try:
637
- if "runs" in session.memory:
638
- try:
639
- self.memory.runs = [WorkflowRun(**m) for m in session.memory["runs"]]
640
- except Exception as e:
641
- logger.warning(f"Failed to load runs from memory: {e}")
642
- except Exception as e:
643
- logger.warning(f"Failed to load WorkflowMemory: {e}")
1833
+ # Events are automatically broadcast by _handle_event
1834
+ # We just consume them here to drive the execution
1835
+ pass
644
1836
 
645
- log_debug(f"-*- WorkflowSession loaded: {session.session_id}")
1837
+ log_debug(f"Background streaming execution completed with status: {workflow_run_response.status}")
646
1838
 
647
- def read_from_storage(self) -> Optional[WorkflowSession]:
648
- """Load the WorkflowSession from storage.
1839
+ except Exception as e:
1840
+ logger.error(f"Background streaming workflow execution failed: {e}")
1841
+ workflow_run_response.status = RunStatus.error
1842
+ workflow_run_response.content = f"Background streaming execution failed: {str(e)}"
1843
+ self.save_session(session=workflow_session)
649
1844
 
650
- Returns:
651
- Optional[WorkflowSession]: The loaded WorkflowSession or None if not found.
652
- """
653
- if self.storage is not None and self.session_id is not None:
654
- self.workflow_session = cast(WorkflowSession, self.storage.read(session_id=self.session_id))
655
- if self.workflow_session is not None:
656
- self.load_workflow_session(session=self.workflow_session)
657
- return self.workflow_session
1845
+ # Create and start asyncio task for background streaming execution
1846
+ loop = asyncio.get_running_loop()
1847
+ loop.create_task(execute_workflow_background_stream())
658
1848
 
659
- def write_to_storage(self) -> Optional[WorkflowSession]:
660
- """Save the WorkflowSession to storage
1849
+ # Return SAME object that will be updated by background execution
1850
+ return workflow_run_response
1851
+
1852
+ def get_run(self, run_id: str) -> Optional[WorkflowRunOutput]:
1853
+ """Get the status and details of a background workflow run - SIMPLIFIED"""
1854
+ if self.db is not None and self.session_id is not None:
1855
+ session = self.db.get_session(session_id=self.session_id, session_type=SessionType.WORKFLOW)
1856
+ if session and isinstance(session, WorkflowSession) and session.runs:
1857
+ # Find the run by ID
1858
+ for run in session.runs:
1859
+ if run.run_id == run_id:
1860
+ return run
1861
+
1862
+ return None
1863
+
1864
+ def cancel_run(self, run_id: str) -> bool:
1865
+ """Cancel a running workflow execution.
1866
+
1867
+ Args:
1868
+ run_id (str): The run_id to cancel.
661
1869
 
662
1870
  Returns:
663
- Optional[WorkflowSession]: The saved WorkflowSession or None if not saved.
1871
+ bool: True if the run was found and marked for cancellation, False otherwise.
664
1872
  """
665
- if self.storage is not None:
666
- self.workflow_session = cast(WorkflowSession, self.storage.upsert(session=self.get_workflow_session()))
667
- return self.workflow_session
1873
+ return cancel_run_global(run_id)
668
1874
 
669
- def load_session(self, force: bool = False) -> Optional[str]:
670
- """Load an existing session from the database and return the session_id.
671
- If a session does not exist, create a new session.
1875
+ @overload
1876
+ def run(
1877
+ self,
1878
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None,
1879
+ additional_data: Optional[Dict[str, Any]] = None,
1880
+ user_id: Optional[str] = None,
1881
+ session_id: Optional[str] = None,
1882
+ session_state: Optional[Dict[str, Any]] = None,
1883
+ audio: Optional[List[Audio]] = None,
1884
+ images: Optional[List[Image]] = None,
1885
+ videos: Optional[List[Video]] = None,
1886
+ files: Optional[List[File]] = None,
1887
+ stream: Literal[False] = False,
1888
+ stream_intermediate_steps: Optional[bool] = None,
1889
+ background: Optional[bool] = False,
1890
+ ) -> WorkflowRunOutput: ...
1891
+
1892
+ @overload
1893
+ def run(
1894
+ self,
1895
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None,
1896
+ additional_data: Optional[Dict[str, Any]] = None,
1897
+ user_id: Optional[str] = None,
1898
+ session_id: Optional[str] = None,
1899
+ session_state: Optional[Dict[str, Any]] = None,
1900
+ audio: Optional[List[Audio]] = None,
1901
+ images: Optional[List[Image]] = None,
1902
+ videos: Optional[List[Video]] = None,
1903
+ files: Optional[List[File]] = None,
1904
+ stream: Literal[True] = True,
1905
+ stream_intermediate_steps: Optional[bool] = None,
1906
+ background: Optional[bool] = False,
1907
+ ) -> Iterator[WorkflowRunOutputEvent]: ...
1908
+
1909
+ def run(
1910
+ self,
1911
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None,
1912
+ additional_data: Optional[Dict[str, Any]] = None,
1913
+ user_id: Optional[str] = None,
1914
+ session_id: Optional[str] = None,
1915
+ session_state: Optional[Dict[str, Any]] = None,
1916
+ audio: Optional[List[Audio]] = None,
1917
+ images: Optional[List[Image]] = None,
1918
+ videos: Optional[List[Video]] = None,
1919
+ files: Optional[List[File]] = None,
1920
+ stream: bool = False,
1921
+ stream_intermediate_steps: Optional[bool] = None,
1922
+ background: Optional[bool] = False,
1923
+ **kwargs: Any,
1924
+ ) -> Union[WorkflowRunOutput, Iterator[WorkflowRunOutputEvent]]:
1925
+ """Execute the workflow synchronously with optional streaming"""
1926
+
1927
+ validated_input = self._validate_input(input)
1928
+ if validated_input is not None:
1929
+ input = validated_input
1930
+
1931
+ if background:
1932
+ raise RuntimeError("Background execution is not supported for sync run()")
1933
+
1934
+ self._set_debug()
1935
+
1936
+ run_id = str(uuid4())
1937
+
1938
+ self.initialize_workflow()
1939
+ session_id, user_id, session_state = self._initialize_session(
1940
+ session_id=session_id, user_id=user_id, session_state=session_state, run_id=run_id
1941
+ )
672
1942
 
673
- - If a session exists in the database, load the session.
674
- - If a session does not exist in the database, create a new session.
675
- """
676
- # If a workflow_session is already loaded, return the session_id from the workflow_session
677
- # if the session_id matches the session_id from the workflow_session
678
- if self.workflow_session is not None and not force:
679
- if self.session_id is not None and self.workflow_session.session_id == self.session_id:
680
- return self.workflow_session.session_id
681
-
682
- # Load an existing session or create a new session
683
- if self.storage is not None:
684
- # Load existing session if session_id is provided
685
- log_debug(f"Reading WorkflowSession: {self.session_id}")
686
- self.read_from_storage()
687
-
688
- # Create a new session if it does not exist
689
- if self.workflow_session is None:
690
- log_debug("-*- Creating new WorkflowSession")
691
- # write_to_storage() will create a new WorkflowSession
692
- # and populate self.workflow_session with the new session
693
- self.write_to_storage()
694
- if self.workflow_session is None:
695
- raise Exception("Failed to create new WorkflowSession in storage")
696
- log_debug(f"-*- Created WorkflowSession: {self.workflow_session.session_id}")
697
- self.log_workflow_session()
698
- return self.session_id
699
-
700
- def new_session(self) -> None:
701
- """Create a new Workflow session
702
-
703
- - Clear the workflow_session
704
- - Create a new session_id
705
- - Load the new session
706
- """
707
- self.workflow_session = None
708
- self.session_id = str(uuid4())
709
- self.load_session(force=True)
1943
+ # Read existing session from database
1944
+ workflow_session = self.read_or_create_session(session_id=session_id, user_id=user_id)
1945
+ self._update_metadata(session=workflow_session)
710
1946
 
711
- def log_workflow_session(self):
712
- log_debug(f"*********** Logging WorkflowSession: {self.session_id} ***********")
1947
+ # Update session state from DB
1948
+ session_state = self._update_session_state(session=workflow_session, session_state=session_state)
713
1949
 
714
- def rename(self, name: str) -> None:
715
- """Rename the Workflow and save to storage"""
1950
+ log_debug(f"Workflow Run Start: {self.name}", center=True)
716
1951
 
717
- # -*- Read from storage
718
- self.read_from_storage()
719
- # -*- Rename Workflow
720
- self.name = name
721
- # -*- Save to storage
722
- self.write_to_storage()
723
- # -*- Log Workflow session
724
- self.log_workflow_session()
1952
+ # Use simple defaults
1953
+ stream = stream or self.stream or False
1954
+ stream_intermediate_steps = stream_intermediate_steps or self.stream_intermediate_steps or False
725
1955
 
726
- def rename_session(self, session_name: str):
727
- """Rename the current session and save to storage"""
1956
+ # Can't have stream_intermediate_steps if stream is False
1957
+ if not stream:
1958
+ stream_intermediate_steps = False
728
1959
 
729
- # -*- Read from storage
730
- self.read_from_storage()
731
- # -*- Rename session
732
- self.session_name = session_name
733
- # -*- Save to storage
734
- self.write_to_storage()
735
- # -*- Log Workflow session
736
- self.log_workflow_session()
1960
+ log_debug(f"Stream: {stream}")
1961
+ log_debug(f"Total steps: {self._get_step_count()}")
737
1962
 
738
- def delete_session(self, session_id: str):
739
- """Delete the current session and save to storage"""
740
- if self.storage is None:
741
- return
742
- # -*- Delete session
743
- self.storage.delete_session(session_id=session_id)
1963
+ # Prepare steps
1964
+ self._prepare_steps()
1965
+
1966
+ # Create workflow run response that will be updated by reference
1967
+ workflow_run_response = WorkflowRunOutput(
1968
+ run_id=run_id,
1969
+ session_id=session_id,
1970
+ workflow_id=self.id,
1971
+ workflow_name=self.name,
1972
+ created_at=int(datetime.now().timestamp()),
1973
+ )
1974
+
1975
+ inputs = WorkflowExecutionInput(
1976
+ input=input,
1977
+ additional_data=additional_data,
1978
+ audio=audio, # type: ignore
1979
+ images=images, # type: ignore
1980
+ videos=videos, # type: ignore
1981
+ files=files, # type: ignore
1982
+ )
1983
+ log_debug(
1984
+ f"Created pipeline input with session state keys: {list(session_state.keys()) if session_state else 'None'}"
1985
+ )
744
1986
 
745
- def deep_copy(self, *, update: Optional[Dict[str, Any]] = None) -> Workflow:
746
- """Create and return a deep copy of this Workflow, optionally updating fields.
1987
+ self.update_agents_and_teams_session_info()
1988
+
1989
+ if stream:
1990
+ return self._execute_stream(
1991
+ session=workflow_session,
1992
+ execution_input=inputs, # type: ignore[arg-type]
1993
+ workflow_run_response=workflow_run_response,
1994
+ stream_intermediate_steps=stream_intermediate_steps,
1995
+ session_state=session_state,
1996
+ **kwargs,
1997
+ )
1998
+ else:
1999
+ return self._execute(
2000
+ session=workflow_session,
2001
+ execution_input=inputs, # type: ignore[arg-type]
2002
+ workflow_run_response=workflow_run_response,
2003
+ session_state=session_state,
2004
+ **kwargs,
2005
+ )
2006
+
2007
+ @overload
2008
+ async def arun(
2009
+ self,
2010
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]] = None,
2011
+ additional_data: Optional[Dict[str, Any]] = None,
2012
+ user_id: Optional[str] = None,
2013
+ session_id: Optional[str] = None,
2014
+ session_state: Optional[Dict[str, Any]] = None,
2015
+ audio: Optional[List[Audio]] = None,
2016
+ images: Optional[List[Image]] = None,
2017
+ videos: Optional[List[Video]] = None,
2018
+ files: Optional[List[File]] = None,
2019
+ stream: Literal[False] = False,
2020
+ stream_intermediate_steps: Optional[bool] = None,
2021
+ background: Optional[bool] = False,
2022
+ websocket: Optional[WebSocket] = None,
2023
+ ) -> WorkflowRunOutput: ...
2024
+
2025
+ @overload
2026
+ async def arun(
2027
+ self,
2028
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]] = None,
2029
+ additional_data: Optional[Dict[str, Any]] = None,
2030
+ user_id: Optional[str] = None,
2031
+ session_id: Optional[str] = None,
2032
+ session_state: Optional[Dict[str, Any]] = None,
2033
+ audio: Optional[List[Audio]] = None,
2034
+ images: Optional[List[Image]] = None,
2035
+ videos: Optional[List[Video]] = None,
2036
+ files: Optional[List[File]] = None,
2037
+ stream: Literal[True] = True,
2038
+ stream_intermediate_steps: Optional[bool] = None,
2039
+ background: Optional[bool] = False,
2040
+ websocket: Optional[WebSocket] = None,
2041
+ ) -> AsyncIterator[WorkflowRunOutputEvent]: ...
2042
+
2043
+ async def arun(
2044
+ self,
2045
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]] = None,
2046
+ additional_data: Optional[Dict[str, Any]] = None,
2047
+ user_id: Optional[str] = None,
2048
+ session_id: Optional[str] = None,
2049
+ session_state: Optional[Dict[str, Any]] = None,
2050
+ audio: Optional[List[Audio]] = None,
2051
+ images: Optional[List[Image]] = None,
2052
+ videos: Optional[List[Video]] = None,
2053
+ files: Optional[List[File]] = None,
2054
+ stream: bool = False,
2055
+ stream_intermediate_steps: Optional[bool] = False,
2056
+ background: Optional[bool] = False,
2057
+ websocket: Optional[WebSocket] = None,
2058
+ **kwargs: Any,
2059
+ ) -> Union[WorkflowRunOutput, AsyncIterator[WorkflowRunOutputEvent]]:
2060
+ """Execute the workflow synchronously with optional streaming"""
2061
+
2062
+ validated_input = self._validate_input(input)
2063
+ if validated_input is not None:
2064
+ input = validated_input
2065
+
2066
+ websocket_handler = None
2067
+ if websocket:
2068
+ from agno.workflow.types import WebSocketHandler
2069
+
2070
+ websocket_handler = WebSocketHandler(websocket=websocket)
2071
+
2072
+ if background:
2073
+ if stream and websocket:
2074
+ # Background + Streaming + WebSocket = Real-time events
2075
+ return await self._arun_background_stream(
2076
+ input=input,
2077
+ additional_data=additional_data,
2078
+ user_id=user_id,
2079
+ session_id=session_id,
2080
+ audio=audio,
2081
+ images=images,
2082
+ videos=videos,
2083
+ files=files,
2084
+ stream_intermediate_steps=stream_intermediate_steps or False,
2085
+ websocket_handler=websocket_handler,
2086
+ **kwargs,
2087
+ )
2088
+ elif stream and not websocket:
2089
+ # Background + Streaming but no WebSocket = Not supported
2090
+ raise ValueError("Background streaming execution requires a WebSocket for real-time events")
2091
+ else:
2092
+ # Background + Non-streaming = Polling (existing)
2093
+ return await self._arun_background(
2094
+ input=input,
2095
+ additional_data=additional_data,
2096
+ user_id=user_id,
2097
+ session_id=session_id,
2098
+ audio=audio,
2099
+ images=images,
2100
+ videos=videos,
2101
+ files=files,
2102
+ **kwargs,
2103
+ )
2104
+
2105
+ self._set_debug()
2106
+
2107
+ run_id = str(uuid4())
2108
+
2109
+ self.initialize_workflow()
2110
+ session_id, user_id, session_state = self._initialize_session(
2111
+ session_id=session_id, user_id=user_id, session_state=session_state, run_id=run_id
2112
+ )
2113
+
2114
+ # Read existing session from database
2115
+ workflow_session = self.read_or_create_session(session_id=session_id, user_id=user_id)
2116
+ self._update_metadata(session=workflow_session)
2117
+
2118
+ # Update session state from DB
2119
+ session_state = self._update_session_state(session=workflow_session, session_state=session_state)
2120
+
2121
+ log_debug(f"Async Workflow Run Start: {self.name}", center=True)
2122
+
2123
+ # Use simple defaults
2124
+ stream = stream or self.stream or False
2125
+ stream_intermediate_steps = stream_intermediate_steps or self.stream_intermediate_steps or False
2126
+
2127
+ # Can't have stream_intermediate_steps if stream is False
2128
+ if not stream:
2129
+ stream_intermediate_steps = False
2130
+
2131
+ log_debug(f"Stream: {stream}")
2132
+
2133
+ # Prepare steps
2134
+ self._prepare_steps()
2135
+
2136
+ # Create workflow run response that will be updated by reference
2137
+ workflow_run_response = WorkflowRunOutput(
2138
+ run_id=run_id,
2139
+ session_id=session_id,
2140
+ workflow_id=self.id,
2141
+ workflow_name=self.name,
2142
+ created_at=int(datetime.now().timestamp()),
2143
+ )
2144
+
2145
+ inputs = WorkflowExecutionInput(
2146
+ input=input,
2147
+ additional_data=additional_data,
2148
+ audio=audio, # type: ignore
2149
+ images=images, # type: ignore
2150
+ videos=videos, # type: ignore
2151
+ )
2152
+ log_debug(
2153
+ f"Created async pipeline input with session state keys: {list(session_state.keys()) if session_state else 'None'}"
2154
+ )
2155
+
2156
+ self.update_agents_and_teams_session_info()
2157
+
2158
+ if stream:
2159
+ return self._aexecute_stream(
2160
+ execution_input=inputs,
2161
+ workflow_run_response=workflow_run_response,
2162
+ session=workflow_session,
2163
+ stream_intermediate_steps=stream_intermediate_steps,
2164
+ websocket=websocket,
2165
+ files=files,
2166
+ session_state=session_state,
2167
+ **kwargs,
2168
+ )
2169
+ else:
2170
+ return await self._aexecute(
2171
+ execution_input=inputs,
2172
+ workflow_run_response=workflow_run_response,
2173
+ session=workflow_session,
2174
+ websocket=websocket,
2175
+ files=files,
2176
+ session_state=session_state,
2177
+ **kwargs,
2178
+ )
2179
+
2180
+ def _prepare_steps(self):
2181
+ """Prepare the steps for execution"""
2182
+ if not callable(self.steps) and self.steps is not None:
2183
+ prepared_steps: List[Union[Step, Steps, Loop, Parallel, Condition, Router]] = []
2184
+ for i, step in enumerate(self.steps): # type: ignore
2185
+ if callable(step) and hasattr(step, "__name__"):
2186
+ step_name = step.__name__
2187
+ log_debug(f"Step {i + 1}: Wrapping callable function '{step_name}'")
2188
+ prepared_steps.append(Step(name=step_name, description="User-defined callable step", executor=step))
2189
+ elif isinstance(step, Agent):
2190
+ step_name = step.name or f"step_{i + 1}"
2191
+ log_debug(f"Step {i + 1}: Agent '{step_name}'")
2192
+ prepared_steps.append(Step(name=step_name, description=step.description, agent=step))
2193
+ elif isinstance(step, Team):
2194
+ step_name = step.name or f"step_{i + 1}"
2195
+ log_debug(f"Step {i + 1}: Team '{step_name}' with {len(step.members)} members")
2196
+ prepared_steps.append(Step(name=step_name, description=step.description, team=step))
2197
+ elif isinstance(step, (Step, Steps, Loop, Parallel, Condition, Router)):
2198
+ step_type = type(step).__name__
2199
+ step_name = getattr(step, "name", f"unnamed_{step_type.lower()}")
2200
+ log_debug(f"Step {i + 1}: {step_type} '{step_name}'")
2201
+ prepared_steps.append(step)
2202
+ else:
2203
+ raise ValueError(f"Invalid step type: {type(step).__name__}")
2204
+
2205
+ self.steps = prepared_steps # type: ignore
2206
+ log_debug("Step preparation completed")
2207
+
2208
+ def print_response(
2209
+ self,
2210
+ input: Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]],
2211
+ additional_data: Optional[Dict[str, Any]] = None,
2212
+ user_id: Optional[str] = None,
2213
+ session_id: Optional[str] = None,
2214
+ audio: Optional[List[Audio]] = None,
2215
+ images: Optional[List[Image]] = None,
2216
+ videos: Optional[List[Video]] = None,
2217
+ files: Optional[List[File]] = None,
2218
+ stream: Optional[bool] = None,
2219
+ stream_intermediate_steps: Optional[bool] = None,
2220
+ markdown: bool = True,
2221
+ show_time: bool = True,
2222
+ show_step_details: bool = True,
2223
+ console: Optional[Any] = None,
2224
+ **kwargs: Any,
2225
+ ) -> None:
2226
+ """Print workflow execution with rich formatting and optional streaming
747
2227
 
748
2228
  Args:
749
- update (Optional[Dict[str, Any]]): Optional dictionary of fields for the new Workflow.
2229
+ input: The main query/input for the workflow
2230
+ additional_data: Attached message data to the input
2231
+ user_id: User ID
2232
+ session_id: Session ID
2233
+ audio: Audio input
2234
+ images: Image input
2235
+ videos: Video input
2236
+ stream: Whether to stream the response content
2237
+ stream_intermediate_steps: Whether to stream intermediate steps
2238
+ markdown: Whether to render content as markdown
2239
+ show_time: Whether to show execution time
2240
+ show_step_details: Whether to show individual step outputs
2241
+ console: Rich console instance (optional)
2242
+ """
750
2243
 
751
- Returns:
752
- Workflow: A new Workflow instance.
2244
+ if stream is None:
2245
+ stream = self.stream or False
2246
+
2247
+ if stream_intermediate_steps is None:
2248
+ stream_intermediate_steps = self.stream_intermediate_steps or False
2249
+
2250
+ if stream:
2251
+ print_response_stream(
2252
+ workflow=self,
2253
+ input=input,
2254
+ user_id=user_id,
2255
+ session_id=session_id,
2256
+ additional_data=additional_data,
2257
+ audio=audio,
2258
+ images=images,
2259
+ videos=videos,
2260
+ files=files,
2261
+ stream_intermediate_steps=stream_intermediate_steps,
2262
+ markdown=markdown,
2263
+ show_time=show_time,
2264
+ show_step_details=show_step_details,
2265
+ console=console,
2266
+ **kwargs,
2267
+ )
2268
+ else:
2269
+ print_response(
2270
+ workflow=self,
2271
+ input=input,
2272
+ user_id=user_id,
2273
+ session_id=session_id,
2274
+ additional_data=additional_data,
2275
+ audio=audio,
2276
+ images=images,
2277
+ videos=videos,
2278
+ files=files,
2279
+ markdown=markdown,
2280
+ show_time=show_time,
2281
+ show_step_details=show_step_details,
2282
+ console=console,
2283
+ **kwargs,
2284
+ )
2285
+
2286
+ async def aprint_response(
2287
+ self,
2288
+ input: Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]],
2289
+ additional_data: Optional[Dict[str, Any]] = None,
2290
+ user_id: Optional[str] = None,
2291
+ session_id: Optional[str] = None,
2292
+ audio: Optional[List[Audio]] = None,
2293
+ images: Optional[List[Image]] = None,
2294
+ videos: Optional[List[Video]] = None,
2295
+ files: Optional[List[File]] = None,
2296
+ stream: Optional[bool] = None,
2297
+ stream_intermediate_steps: Optional[bool] = None,
2298
+ markdown: bool = True,
2299
+ show_time: bool = True,
2300
+ show_step_details: bool = True,
2301
+ console: Optional[Any] = None,
2302
+ **kwargs: Any,
2303
+ ) -> None:
2304
+ """Print workflow execution with rich formatting and optional streaming
2305
+
2306
+ Args:
2307
+ input: The main message/input for the workflow
2308
+ additional_data: Attached message data to the input
2309
+ user_id: User ID
2310
+ session_id: Session ID
2311
+ audio: Audio input
2312
+ images: Image input
2313
+ videos: Video input
2314
+ stream_intermediate_steps: Whether to stream intermediate steps
2315
+ stream: Whether to stream the response content
2316
+ markdown: Whether to render content as markdown
2317
+ show_time: Whether to show execution time
2318
+ show_step_details: Whether to show individual step outputs
2319
+ console: Rich console instance (optional)
753
2320
  """
754
- # Extract the fields to set for the new Workflow
755
- fields_for_new_workflow: Dict[str, Any] = {}
756
-
757
- for f in fields(self):
758
- field_value = getattr(self, f.name)
759
- if field_value is not None:
760
- if isinstance(field_value, Agent):
761
- fields_for_new_workflow[f.name] = field_value.deep_copy()
762
- else:
763
- fields_for_new_workflow[f.name] = self._deep_copy_field(f.name, field_value)
2321
+ if stream is None:
2322
+ stream = self.stream or False
2323
+
2324
+ if stream_intermediate_steps is None:
2325
+ stream_intermediate_steps = self.stream_intermediate_steps or False
2326
+
2327
+ if stream:
2328
+ await aprint_response_stream(
2329
+ workflow=self,
2330
+ input=input,
2331
+ additional_data=additional_data,
2332
+ user_id=user_id,
2333
+ session_id=session_id,
2334
+ audio=audio,
2335
+ images=images,
2336
+ videos=videos,
2337
+ files=files,
2338
+ stream_intermediate_steps=stream_intermediate_steps,
2339
+ markdown=markdown,
2340
+ show_time=show_time,
2341
+ show_step_details=show_step_details,
2342
+ console=console,
2343
+ **kwargs,
2344
+ )
2345
+ else:
2346
+ await aprint_response(
2347
+ workflow=self,
2348
+ input=input,
2349
+ additional_data=additional_data,
2350
+ user_id=user_id,
2351
+ session_id=session_id,
2352
+ audio=audio,
2353
+ images=images,
2354
+ videos=videos,
2355
+ files=files,
2356
+ markdown=markdown,
2357
+ show_time=show_time,
2358
+ show_step_details=show_step_details,
2359
+ console=console,
2360
+ **kwargs,
2361
+ )
2362
+
2363
+ def to_dict(self) -> Dict[str, Any]:
2364
+ """Convert workflow to dictionary representation"""
764
2365
 
765
- # Update fields if provided
766
- if update:
767
- fields_for_new_workflow.update(update)
2366
+ def serialize_step(step):
2367
+ step_dict = {
2368
+ "name": step.name if hasattr(step, "name") else f"unnamed_{type(step).__name__.lower()}",
2369
+ "description": step.description if hasattr(step, "description") else "User-defined callable step",
2370
+ "type": STEP_TYPE_MAPPING[type(step)].value, # type: ignore
2371
+ }
768
2372
 
769
- # Create a new Workflow
770
- new_workflow = self.__class__(**fields_for_new_workflow)
771
- log_debug(f"Created new {self.__class__.__name__}")
772
- return new_workflow
2373
+ # Handle agent/team/tools
2374
+ if hasattr(step, "agent"):
2375
+ step_dict["agent"] = step.agent if hasattr(step, "agent") else None # type: ignore
2376
+ if hasattr(step, "team"):
2377
+ step_dict["team"] = step.team if hasattr(step, "team") else None # type: ignore
773
2378
 
774
- def _deep_copy_field(self, field_name: str, field_value: Any) -> Any:
775
- """Helper method to deep copy a field based on its type."""
776
- from copy import copy, deepcopy
2379
+ # Handle nested steps for Router/Loop
2380
+ if isinstance(step, (Router)):
2381
+ step_dict["steps"] = (
2382
+ [serialize_step(step) for step in step.choices] if hasattr(step, "choices") else None
2383
+ )
777
2384
 
778
- # For memory, use its deep_copy method
779
- if field_name == "memory":
780
- return field_value.deep_copy()
2385
+ elif isinstance(step, (Loop, Condition, Steps, Parallel)):
2386
+ step_dict["steps"] = [serialize_step(step) for step in step.steps] if hasattr(step, "steps") else None
781
2387
 
782
- # For compound types, attempt a deep copy
783
- if isinstance(field_value, (list, dict, set, Storage)):
784
- try:
785
- return deepcopy(field_value)
786
- except Exception as e:
787
- logger.warning(f"Failed to deepcopy field: {field_name} - {e}")
788
- try:
789
- return copy(field_value)
790
- except Exception as e:
791
- logger.warning(f"Failed to copy field: {field_name} - {e}")
792
- return field_value
2388
+ return step_dict
793
2389
 
794
- # For pydantic models, attempt a model_copy
795
- if isinstance(field_value, BaseModel):
796
- try:
797
- return field_value.model_copy(deep=True)
798
- except Exception as e:
799
- logger.warning(f"Failed to deepcopy field: {field_name} - {e}")
800
- try:
801
- return field_value.model_copy(deep=False)
802
- except Exception as e:
803
- logger.warning(f"Failed to copy field: {field_name} - {e}")
804
- return field_value
2390
+ if self.steps is None or callable(self.steps):
2391
+ steps_list = []
2392
+ elif isinstance(self.steps, Steps):
2393
+ steps_list = self.steps.steps
2394
+ else:
2395
+ steps_list = self.steps
2396
+
2397
+ return {
2398
+ "name": self.name,
2399
+ "workflow_id": self.id,
2400
+ "description": self.description,
2401
+ "steps": [serialize_step(s) for s in steps_list],
2402
+ "session_id": self.session_id,
2403
+ }
2404
+
2405
+ def _calculate_session_metrics_from_workflow_metrics(self, workflow_metrics: WorkflowMetrics) -> Metrics:
2406
+ """Calculate session metrics by aggregating all step metrics from workflow metrics"""
2407
+ session_metrics = Metrics()
2408
+
2409
+ # Aggregate metrics from all steps
2410
+ for step_name, step_metrics in workflow_metrics.steps.items():
2411
+ if step_metrics.metrics:
2412
+ session_metrics += step_metrics.metrics
2413
+
2414
+ session_metrics.time_to_first_token = None
2415
+
2416
+ return session_metrics
2417
+
2418
+ def _get_session_metrics(self, session: WorkflowSession) -> Metrics:
2419
+ """Get existing session metrics from the database"""
2420
+ if session.session_data and "session_metrics" in session.session_data:
2421
+ session_metrics_from_db = session.session_data.get("session_metrics")
2422
+ if session_metrics_from_db is not None:
2423
+ if isinstance(session_metrics_from_db, dict):
2424
+ return Metrics(**session_metrics_from_db)
2425
+ elif isinstance(session_metrics_from_db, Metrics):
2426
+ return session_metrics_from_db
2427
+ return Metrics()
2428
+
2429
+ def _update_session_metrics(self, session: WorkflowSession, workflow_run_response: WorkflowRunOutput):
2430
+ """Calculate and update session metrics"""
2431
+ # Get existing session metrics
2432
+ session_metrics = self._get_session_metrics(session=session)
2433
+
2434
+ # If workflow has metrics, convert and add them to session metrics
2435
+ if workflow_run_response.metrics:
2436
+ run_session_metrics = self._calculate_session_metrics_from_workflow_metrics(workflow_run_response.metrics)
2437
+
2438
+ session_metrics += run_session_metrics
2439
+
2440
+ session_metrics.time_to_first_token = None
2441
+
2442
+ # Store updated session metrics - CONVERT TO DICT FOR JSON SERIALIZATION
2443
+ if not session.session_data:
2444
+ session.session_data = {}
2445
+ session.session_data["session_metrics"] = session_metrics.to_dict()
2446
+
2447
+ def get_session_metrics(self, session_id: Optional[str] = None) -> Optional[Metrics]:
2448
+ """Get the session metrics for the given session ID and user ID."""
2449
+ session_id = session_id or self.session_id
2450
+ if session_id is None:
2451
+ raise Exception("Session ID is required")
2452
+
2453
+ session = self.get_session(session_id=session_id)
2454
+ if session is None:
2455
+ raise Exception("Session not found")
2456
+
2457
+ return self._get_session_metrics(session=session)
2458
+
2459
+ def update_agents_and_teams_session_info(self):
2460
+ """Update agents and teams with workflow session information"""
2461
+ log_debug("Updating agents and teams with session information")
2462
+ # Initialize steps - only if steps is iterable (not callable)
2463
+ if self.steps and not callable(self.steps):
2464
+ steps_list = self.steps.steps if isinstance(self.steps, Steps) else self.steps
2465
+ for step in steps_list:
2466
+ # TODO: Handle properly steps inside other primitives
2467
+ if isinstance(step, Step):
2468
+ active_executor = step.active_executor
2469
+
2470
+ if hasattr(active_executor, "workflow_id"):
2471
+ active_executor.workflow_id = self.id
2472
+
2473
+ # If it's a team, update all members
2474
+ if hasattr(active_executor, "members"):
2475
+ for member in active_executor.members:
2476
+ if hasattr(member, "workflow_id"):
2477
+ member.workflow_id = self.id
2478
+
2479
+ ###########################################################################
2480
+ # Telemetry functions
2481
+ ###########################################################################
2482
+
2483
+ def _get_telemetry_data(self) -> Dict[str, Any]:
2484
+ """Get the telemetry data for the workflow"""
2485
+ return {
2486
+ "workflow_id": self.id,
2487
+ "db_type": self.db.__class__.__name__ if self.db else None,
2488
+ "has_input_schema": self.input_schema is not None,
2489
+ }
2490
+
2491
+ def _log_workflow_telemetry(self, session_id: str, run_id: Optional[str] = None) -> None:
2492
+ """Send a telemetry event to the API for a created Workflow run"""
2493
+
2494
+ self._set_telemetry()
2495
+ if not self.telemetry:
2496
+ return
2497
+
2498
+ from agno.api.workflow import WorkflowRunCreate, create_workflow_run
805
2499
 
806
- # For other types, return as is
807
- return field_value
2500
+ try:
2501
+ create_workflow_run(
2502
+ workflow=WorkflowRunCreate(session_id=session_id, run_id=run_id, data=self._get_telemetry_data()),
2503
+ )
2504
+ except Exception as e:
2505
+ log_debug(f"Could not create Workflow run telemetry event: {e}")
2506
+
2507
+ async def _alog_workflow_telemetry(self, session_id: str, run_id: Optional[str] = None) -> None:
2508
+ """Send a telemetry event to the API for a created Workflow async run"""
2509
+
2510
+ self._set_telemetry()
2511
+ if not self.telemetry:
2512
+ return
2513
+
2514
+ from agno.api.workflow import WorkflowRunCreate, acreate_workflow_run
2515
+
2516
+ try:
2517
+ await acreate_workflow_run(
2518
+ workflow=WorkflowRunCreate(session_id=session_id, run_id=run_id, data=self._get_telemetry_data())
2519
+ )
2520
+ except Exception as e:
2521
+ log_debug(f"Could not create Workflow run telemetry event: {e}")