agno 1.8.1__py3-none-any.whl → 2.0.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (580) hide show
  1. agno/__init__.py +8 -0
  2. agno/agent/__init__.py +19 -27
  3. agno/agent/agent.py +2778 -4123
  4. agno/api/agent.py +9 -65
  5. agno/api/api.py +5 -46
  6. agno/api/evals.py +6 -17
  7. agno/api/os.py +17 -0
  8. agno/api/routes.py +6 -41
  9. agno/api/schemas/__init__.py +9 -0
  10. agno/api/schemas/agent.py +5 -21
  11. agno/api/schemas/evals.py +7 -16
  12. agno/api/schemas/os.py +14 -0
  13. agno/api/schemas/team.py +5 -21
  14. agno/api/schemas/utils.py +21 -0
  15. agno/api/schemas/workflows.py +11 -7
  16. agno/api/settings.py +53 -0
  17. agno/api/team.py +9 -64
  18. agno/api/workflow.py +28 -0
  19. agno/cloud/aws/base.py +214 -0
  20. agno/cloud/aws/s3/__init__.py +2 -0
  21. agno/cloud/aws/s3/api_client.py +43 -0
  22. agno/cloud/aws/s3/bucket.py +195 -0
  23. agno/cloud/aws/s3/object.py +57 -0
  24. agno/db/__init__.py +24 -0
  25. agno/db/base.py +245 -0
  26. agno/db/dynamo/__init__.py +3 -0
  27. agno/db/dynamo/dynamo.py +1749 -0
  28. agno/db/dynamo/schemas.py +278 -0
  29. agno/db/dynamo/utils.py +684 -0
  30. agno/db/firestore/__init__.py +3 -0
  31. agno/db/firestore/firestore.py +1438 -0
  32. agno/db/firestore/schemas.py +130 -0
  33. agno/db/firestore/utils.py +278 -0
  34. agno/db/gcs_json/__init__.py +3 -0
  35. agno/db/gcs_json/gcs_json_db.py +1001 -0
  36. agno/db/gcs_json/utils.py +194 -0
  37. agno/db/in_memory/__init__.py +3 -0
  38. agno/db/in_memory/in_memory_db.py +888 -0
  39. agno/db/in_memory/utils.py +172 -0
  40. agno/db/json/__init__.py +3 -0
  41. agno/db/json/json_db.py +1051 -0
  42. agno/db/json/utils.py +196 -0
  43. agno/db/migrations/v1_to_v2.py +162 -0
  44. agno/db/mongo/__init__.py +3 -0
  45. agno/db/mongo/mongo.py +1417 -0
  46. agno/db/mongo/schemas.py +77 -0
  47. agno/db/mongo/utils.py +204 -0
  48. agno/db/mysql/__init__.py +3 -0
  49. agno/db/mysql/mysql.py +1719 -0
  50. agno/db/mysql/schemas.py +124 -0
  51. agno/db/mysql/utils.py +298 -0
  52. agno/db/postgres/__init__.py +3 -0
  53. agno/db/postgres/postgres.py +1720 -0
  54. agno/db/postgres/schemas.py +124 -0
  55. agno/db/postgres/utils.py +281 -0
  56. agno/db/redis/__init__.py +3 -0
  57. agno/db/redis/redis.py +1371 -0
  58. agno/db/redis/schemas.py +109 -0
  59. agno/db/redis/utils.py +288 -0
  60. agno/db/schemas/__init__.py +3 -0
  61. agno/db/schemas/evals.py +33 -0
  62. agno/db/schemas/knowledge.py +40 -0
  63. agno/db/schemas/memory.py +46 -0
  64. agno/db/singlestore/__init__.py +3 -0
  65. agno/db/singlestore/schemas.py +116 -0
  66. agno/db/singlestore/singlestore.py +1722 -0
  67. agno/db/singlestore/utils.py +327 -0
  68. agno/db/sqlite/__init__.py +3 -0
  69. agno/db/sqlite/schemas.py +119 -0
  70. agno/db/sqlite/sqlite.py +1680 -0
  71. agno/db/sqlite/utils.py +269 -0
  72. agno/db/utils.py +88 -0
  73. agno/eval/__init__.py +14 -0
  74. agno/eval/accuracy.py +142 -43
  75. agno/eval/performance.py +88 -23
  76. agno/eval/reliability.py +73 -20
  77. agno/eval/utils.py +23 -13
  78. agno/integrations/discord/__init__.py +3 -0
  79. agno/{app → integrations}/discord/client.py +10 -10
  80. agno/knowledge/__init__.py +2 -2
  81. agno/{document → knowledge}/chunking/agentic.py +2 -2
  82. agno/{document → knowledge}/chunking/document.py +2 -2
  83. agno/{document → knowledge}/chunking/fixed.py +3 -3
  84. agno/{document → knowledge}/chunking/markdown.py +2 -2
  85. agno/{document → knowledge}/chunking/recursive.py +2 -2
  86. agno/{document → knowledge}/chunking/row.py +2 -2
  87. agno/knowledge/chunking/semantic.py +59 -0
  88. agno/knowledge/chunking/strategy.py +121 -0
  89. agno/knowledge/content.py +74 -0
  90. agno/knowledge/document/__init__.py +5 -0
  91. agno/{document → knowledge/document}/base.py +12 -2
  92. agno/knowledge/embedder/__init__.py +5 -0
  93. agno/{embedder → knowledge/embedder}/aws_bedrock.py +127 -1
  94. agno/{embedder → knowledge/embedder}/azure_openai.py +65 -1
  95. agno/{embedder → knowledge/embedder}/base.py +6 -0
  96. agno/{embedder → knowledge/embedder}/cohere.py +72 -1
  97. agno/{embedder → knowledge/embedder}/fastembed.py +17 -1
  98. agno/{embedder → knowledge/embedder}/fireworks.py +1 -1
  99. agno/{embedder → knowledge/embedder}/google.py +74 -1
  100. agno/{embedder → knowledge/embedder}/huggingface.py +36 -2
  101. agno/{embedder → knowledge/embedder}/jina.py +48 -2
  102. agno/knowledge/embedder/langdb.py +22 -0
  103. agno/knowledge/embedder/mistral.py +139 -0
  104. agno/{embedder → knowledge/embedder}/nebius.py +1 -1
  105. agno/{embedder → knowledge/embedder}/ollama.py +54 -3
  106. agno/knowledge/embedder/openai.py +223 -0
  107. agno/{embedder → knowledge/embedder}/sentence_transformer.py +16 -1
  108. agno/{embedder → knowledge/embedder}/together.py +1 -1
  109. agno/{embedder → knowledge/embedder}/voyageai.py +49 -1
  110. agno/knowledge/knowledge.py +1515 -0
  111. agno/knowledge/reader/__init__.py +7 -0
  112. agno/{document → knowledge}/reader/arxiv_reader.py +32 -4
  113. agno/knowledge/reader/base.py +88 -0
  114. agno/{document → knowledge}/reader/csv_reader.py +68 -15
  115. agno/knowledge/reader/docx_reader.py +83 -0
  116. agno/{document → knowledge}/reader/firecrawl_reader.py +42 -21
  117. agno/knowledge/reader/gcs_reader.py +67 -0
  118. agno/{document → knowledge}/reader/json_reader.py +30 -9
  119. agno/{document → knowledge}/reader/markdown_reader.py +36 -9
  120. agno/{document → knowledge}/reader/pdf_reader.py +79 -21
  121. agno/knowledge/reader/reader_factory.py +275 -0
  122. agno/knowledge/reader/s3_reader.py +171 -0
  123. agno/{document → knowledge}/reader/text_reader.py +31 -10
  124. agno/knowledge/reader/url_reader.py +84 -0
  125. agno/knowledge/reader/web_search_reader.py +389 -0
  126. agno/{document → knowledge}/reader/website_reader.py +37 -10
  127. agno/knowledge/reader/wikipedia_reader.py +59 -0
  128. agno/knowledge/reader/youtube_reader.py +78 -0
  129. agno/knowledge/remote_content/remote_content.py +88 -0
  130. agno/{reranker → knowledge/reranker}/base.py +1 -1
  131. agno/{reranker → knowledge/reranker}/cohere.py +2 -2
  132. agno/{reranker → knowledge/reranker}/infinity.py +2 -2
  133. agno/{reranker → knowledge/reranker}/sentence_transformer.py +2 -2
  134. agno/knowledge/types.py +30 -0
  135. agno/knowledge/utils.py +169 -0
  136. agno/memory/__init__.py +2 -10
  137. agno/memory/manager.py +1003 -148
  138. agno/models/aimlapi/__init__.py +2 -2
  139. agno/models/aimlapi/aimlapi.py +6 -6
  140. agno/models/anthropic/claude.py +129 -82
  141. agno/models/aws/bedrock.py +107 -175
  142. agno/models/aws/claude.py +64 -18
  143. agno/models/azure/ai_foundry.py +73 -23
  144. agno/models/base.py +347 -287
  145. agno/models/cerebras/cerebras.py +84 -27
  146. agno/models/cohere/chat.py +106 -98
  147. agno/models/google/gemini.py +100 -42
  148. agno/models/groq/groq.py +97 -35
  149. agno/models/huggingface/huggingface.py +92 -27
  150. agno/models/ibm/watsonx.py +72 -13
  151. agno/models/litellm/chat.py +85 -13
  152. agno/models/message.py +38 -144
  153. agno/models/meta/llama.py +85 -49
  154. agno/models/metrics.py +120 -0
  155. agno/models/mistral/mistral.py +90 -21
  156. agno/models/ollama/__init__.py +0 -2
  157. agno/models/ollama/chat.py +84 -46
  158. agno/models/openai/chat.py +121 -23
  159. agno/models/openai/responses.py +178 -105
  160. agno/models/perplexity/perplexity.py +26 -2
  161. agno/models/portkey/portkey.py +0 -7
  162. agno/models/response.py +14 -8
  163. agno/models/utils.py +20 -0
  164. agno/models/vercel/__init__.py +2 -2
  165. agno/models/vercel/v0.py +1 -1
  166. agno/models/vllm/__init__.py +2 -2
  167. agno/models/vllm/vllm.py +3 -3
  168. agno/models/xai/xai.py +10 -10
  169. agno/os/__init__.py +3 -0
  170. agno/os/app.py +393 -0
  171. agno/os/auth.py +47 -0
  172. agno/os/config.py +103 -0
  173. agno/os/interfaces/agui/__init__.py +3 -0
  174. agno/os/interfaces/agui/agui.py +31 -0
  175. agno/{app/agui/async_router.py → os/interfaces/agui/router.py} +16 -16
  176. agno/{app → os/interfaces}/agui/utils.py +65 -28
  177. agno/os/interfaces/base.py +21 -0
  178. agno/os/interfaces/slack/__init__.py +3 -0
  179. agno/{app/slack/async_router.py → os/interfaces/slack/router.py} +3 -5
  180. agno/os/interfaces/slack/slack.py +33 -0
  181. agno/os/interfaces/whatsapp/__init__.py +3 -0
  182. agno/{app/whatsapp/async_router.py → os/interfaces/whatsapp/router.py} +4 -7
  183. agno/os/interfaces/whatsapp/whatsapp.py +30 -0
  184. agno/os/router.py +843 -0
  185. agno/os/routers/__init__.py +3 -0
  186. agno/os/routers/evals/__init__.py +3 -0
  187. agno/os/routers/evals/evals.py +204 -0
  188. agno/os/routers/evals/schemas.py +142 -0
  189. agno/os/routers/evals/utils.py +161 -0
  190. agno/os/routers/knowledge/__init__.py +3 -0
  191. agno/os/routers/knowledge/knowledge.py +413 -0
  192. agno/os/routers/knowledge/schemas.py +118 -0
  193. agno/os/routers/memory/__init__.py +3 -0
  194. agno/os/routers/memory/memory.py +179 -0
  195. agno/os/routers/memory/schemas.py +58 -0
  196. agno/os/routers/metrics/__init__.py +3 -0
  197. agno/os/routers/metrics/metrics.py +58 -0
  198. agno/os/routers/metrics/schemas.py +47 -0
  199. agno/os/routers/session/__init__.py +3 -0
  200. agno/os/routers/session/session.py +163 -0
  201. agno/os/schema.py +892 -0
  202. agno/{app/playground → os}/settings.py +8 -15
  203. agno/os/utils.py +270 -0
  204. agno/reasoning/azure_ai_foundry.py +4 -4
  205. agno/reasoning/deepseek.py +4 -4
  206. agno/reasoning/default.py +6 -11
  207. agno/reasoning/groq.py +4 -4
  208. agno/reasoning/helpers.py +4 -6
  209. agno/reasoning/ollama.py +4 -4
  210. agno/reasoning/openai.py +4 -4
  211. agno/run/{response.py → agent.py} +144 -72
  212. agno/run/base.py +44 -58
  213. agno/run/cancel.py +83 -0
  214. agno/run/team.py +133 -77
  215. agno/run/workflow.py +537 -12
  216. agno/session/__init__.py +10 -0
  217. agno/session/agent.py +244 -0
  218. agno/session/summary.py +225 -0
  219. agno/session/team.py +262 -0
  220. agno/{storage/session/v2 → session}/workflow.py +47 -24
  221. agno/team/__init__.py +15 -16
  222. agno/team/team.py +2961 -4253
  223. agno/tools/agentql.py +14 -5
  224. agno/tools/airflow.py +9 -4
  225. agno/tools/api.py +7 -3
  226. agno/tools/apify.py +2 -46
  227. agno/tools/arxiv.py +8 -3
  228. agno/tools/aws_lambda.py +7 -5
  229. agno/tools/aws_ses.py +7 -1
  230. agno/tools/baidusearch.py +4 -1
  231. agno/tools/bitbucket.py +4 -4
  232. agno/tools/brandfetch.py +14 -11
  233. agno/tools/bravesearch.py +4 -1
  234. agno/tools/brightdata.py +42 -22
  235. agno/tools/browserbase.py +13 -4
  236. agno/tools/calcom.py +12 -10
  237. agno/tools/calculator.py +10 -27
  238. agno/tools/cartesia.py +18 -13
  239. agno/tools/{clickup_tool.py → clickup.py} +12 -25
  240. agno/tools/confluence.py +8 -8
  241. agno/tools/crawl4ai.py +7 -1
  242. agno/tools/csv_toolkit.py +9 -8
  243. agno/tools/dalle.py +18 -11
  244. agno/tools/daytona.py +13 -16
  245. agno/tools/decorator.py +6 -3
  246. agno/tools/desi_vocal.py +16 -7
  247. agno/tools/discord.py +11 -8
  248. agno/tools/docker.py +30 -42
  249. agno/tools/duckdb.py +34 -53
  250. agno/tools/duckduckgo.py +8 -7
  251. agno/tools/e2b.py +61 -61
  252. agno/tools/eleven_labs.py +35 -28
  253. agno/tools/email.py +4 -1
  254. agno/tools/evm.py +7 -1
  255. agno/tools/exa.py +19 -14
  256. agno/tools/fal.py +29 -29
  257. agno/tools/file.py +9 -8
  258. agno/tools/financial_datasets.py +25 -44
  259. agno/tools/firecrawl.py +22 -22
  260. agno/tools/function.py +68 -17
  261. agno/tools/giphy.py +22 -10
  262. agno/tools/github.py +48 -126
  263. agno/tools/gmail.py +45 -61
  264. agno/tools/google_bigquery.py +7 -6
  265. agno/tools/google_maps.py +11 -26
  266. agno/tools/googlesearch.py +7 -2
  267. agno/tools/googlesheets.py +21 -17
  268. agno/tools/hackernews.py +9 -5
  269. agno/tools/jina.py +5 -4
  270. agno/tools/jira.py +18 -9
  271. agno/tools/knowledge.py +31 -32
  272. agno/tools/linear.py +18 -33
  273. agno/tools/linkup.py +5 -1
  274. agno/tools/local_file_system.py +8 -5
  275. agno/tools/lumalab.py +31 -19
  276. agno/tools/mem0.py +18 -12
  277. agno/tools/memori.py +14 -10
  278. agno/tools/mlx_transcribe.py +3 -2
  279. agno/tools/models/azure_openai.py +32 -14
  280. agno/tools/models/gemini.py +58 -31
  281. agno/tools/models/groq.py +29 -20
  282. agno/tools/models/nebius.py +27 -11
  283. agno/tools/models_labs.py +39 -15
  284. agno/tools/moviepy_video.py +7 -6
  285. agno/tools/neo4j.py +10 -8
  286. agno/tools/newspaper.py +7 -2
  287. agno/tools/newspaper4k.py +8 -3
  288. agno/tools/openai.py +57 -26
  289. agno/tools/openbb.py +12 -11
  290. agno/tools/opencv.py +62 -46
  291. agno/tools/openweather.py +14 -12
  292. agno/tools/pandas.py +11 -3
  293. agno/tools/postgres.py +4 -12
  294. agno/tools/pubmed.py +4 -1
  295. agno/tools/python.py +9 -22
  296. agno/tools/reasoning.py +35 -27
  297. agno/tools/reddit.py +11 -26
  298. agno/tools/replicate.py +54 -41
  299. agno/tools/resend.py +4 -1
  300. agno/tools/scrapegraph.py +15 -14
  301. agno/tools/searxng.py +10 -23
  302. agno/tools/serpapi.py +6 -3
  303. agno/tools/serper.py +13 -4
  304. agno/tools/shell.py +9 -2
  305. agno/tools/slack.py +12 -11
  306. agno/tools/sleep.py +3 -2
  307. agno/tools/spider.py +24 -4
  308. agno/tools/sql.py +7 -6
  309. agno/tools/tavily.py +6 -4
  310. agno/tools/telegram.py +12 -4
  311. agno/tools/todoist.py +11 -31
  312. agno/tools/toolkit.py +1 -1
  313. agno/tools/trafilatura.py +22 -6
  314. agno/tools/trello.py +9 -22
  315. agno/tools/twilio.py +10 -3
  316. agno/tools/user_control_flow.py +6 -1
  317. agno/tools/valyu.py +34 -5
  318. agno/tools/visualization.py +19 -28
  319. agno/tools/webbrowser.py +4 -3
  320. agno/tools/webex.py +11 -7
  321. agno/tools/website.py +15 -46
  322. agno/tools/webtools.py +12 -4
  323. agno/tools/whatsapp.py +5 -9
  324. agno/tools/wikipedia.py +20 -13
  325. agno/tools/x.py +14 -13
  326. agno/tools/yfinance.py +13 -40
  327. agno/tools/youtube.py +26 -20
  328. agno/tools/zendesk.py +7 -2
  329. agno/tools/zep.py +10 -7
  330. agno/tools/zoom.py +10 -9
  331. agno/utils/common.py +1 -19
  332. agno/utils/events.py +95 -118
  333. agno/utils/knowledge.py +29 -0
  334. agno/utils/log.py +2 -2
  335. agno/utils/mcp.py +11 -5
  336. agno/utils/media.py +39 -0
  337. agno/utils/message.py +12 -1
  338. agno/utils/models/claude.py +6 -4
  339. agno/utils/models/mistral.py +8 -7
  340. agno/utils/models/schema_utils.py +3 -3
  341. agno/utils/pprint.py +33 -32
  342. agno/utils/print_response/agent.py +779 -0
  343. agno/utils/print_response/team.py +1565 -0
  344. agno/utils/print_response/workflow.py +1451 -0
  345. agno/utils/prompts.py +14 -14
  346. agno/utils/reasoning.py +87 -0
  347. agno/utils/response.py +42 -42
  348. agno/utils/string.py +8 -22
  349. agno/utils/team.py +50 -0
  350. agno/utils/timer.py +2 -2
  351. agno/vectordb/base.py +33 -21
  352. agno/vectordb/cassandra/cassandra.py +287 -23
  353. agno/vectordb/chroma/chromadb.py +482 -59
  354. agno/vectordb/clickhouse/clickhousedb.py +270 -63
  355. agno/vectordb/couchbase/couchbase.py +309 -29
  356. agno/vectordb/lancedb/lance_db.py +360 -21
  357. agno/vectordb/langchaindb/__init__.py +5 -0
  358. agno/vectordb/langchaindb/langchaindb.py +145 -0
  359. agno/vectordb/lightrag/__init__.py +5 -0
  360. agno/vectordb/lightrag/lightrag.py +374 -0
  361. agno/vectordb/llamaindex/llamaindexdb.py +127 -0
  362. agno/vectordb/milvus/milvus.py +242 -32
  363. agno/vectordb/mongodb/mongodb.py +200 -24
  364. agno/vectordb/pgvector/pgvector.py +319 -37
  365. agno/vectordb/pineconedb/pineconedb.py +221 -27
  366. agno/vectordb/qdrant/qdrant.py +334 -14
  367. agno/vectordb/singlestore/singlestore.py +286 -29
  368. agno/vectordb/surrealdb/surrealdb.py +187 -7
  369. agno/vectordb/upstashdb/upstashdb.py +342 -26
  370. agno/vectordb/weaviate/weaviate.py +227 -165
  371. agno/workflow/__init__.py +17 -13
  372. agno/workflow/{v2/condition.py → condition.py} +135 -32
  373. agno/workflow/{v2/loop.py → loop.py} +115 -28
  374. agno/workflow/{v2/parallel.py → parallel.py} +138 -108
  375. agno/workflow/{v2/router.py → router.py} +133 -32
  376. agno/workflow/{v2/step.py → step.py} +200 -42
  377. agno/workflow/{v2/steps.py → steps.py} +147 -66
  378. agno/workflow/types.py +482 -0
  379. agno/workflow/workflow.py +2394 -696
  380. agno-2.0.0a1.dist-info/METADATA +355 -0
  381. agno-2.0.0a1.dist-info/RECORD +514 -0
  382. agno/agent/metrics.py +0 -107
  383. agno/api/app.py +0 -35
  384. agno/api/playground.py +0 -92
  385. agno/api/schemas/app.py +0 -12
  386. agno/api/schemas/playground.py +0 -22
  387. agno/api/schemas/user.py +0 -35
  388. agno/api/schemas/workspace.py +0 -46
  389. agno/api/user.py +0 -160
  390. agno/api/workflows.py +0 -33
  391. agno/api/workspace.py +0 -175
  392. agno/app/agui/__init__.py +0 -3
  393. agno/app/agui/app.py +0 -17
  394. agno/app/agui/sync_router.py +0 -120
  395. agno/app/base.py +0 -186
  396. agno/app/discord/__init__.py +0 -3
  397. agno/app/fastapi/__init__.py +0 -3
  398. agno/app/fastapi/app.py +0 -107
  399. agno/app/fastapi/async_router.py +0 -457
  400. agno/app/fastapi/sync_router.py +0 -448
  401. agno/app/playground/app.py +0 -228
  402. agno/app/playground/async_router.py +0 -1050
  403. agno/app/playground/deploy.py +0 -249
  404. agno/app/playground/operator.py +0 -183
  405. agno/app/playground/schemas.py +0 -220
  406. agno/app/playground/serve.py +0 -55
  407. agno/app/playground/sync_router.py +0 -1042
  408. agno/app/playground/utils.py +0 -46
  409. agno/app/settings.py +0 -15
  410. agno/app/slack/__init__.py +0 -3
  411. agno/app/slack/app.py +0 -19
  412. agno/app/slack/sync_router.py +0 -92
  413. agno/app/utils.py +0 -54
  414. agno/app/whatsapp/__init__.py +0 -3
  415. agno/app/whatsapp/app.py +0 -15
  416. agno/app/whatsapp/sync_router.py +0 -197
  417. agno/cli/auth_server.py +0 -249
  418. agno/cli/config.py +0 -274
  419. agno/cli/console.py +0 -88
  420. agno/cli/credentials.py +0 -23
  421. agno/cli/entrypoint.py +0 -571
  422. agno/cli/operator.py +0 -357
  423. agno/cli/settings.py +0 -96
  424. agno/cli/ws/ws_cli.py +0 -817
  425. agno/constants.py +0 -13
  426. agno/document/__init__.py +0 -5
  427. agno/document/chunking/semantic.py +0 -45
  428. agno/document/chunking/strategy.py +0 -31
  429. agno/document/reader/__init__.py +0 -5
  430. agno/document/reader/base.py +0 -47
  431. agno/document/reader/docx_reader.py +0 -60
  432. agno/document/reader/gcs/pdf_reader.py +0 -44
  433. agno/document/reader/s3/pdf_reader.py +0 -59
  434. agno/document/reader/s3/text_reader.py +0 -63
  435. agno/document/reader/url_reader.py +0 -59
  436. agno/document/reader/youtube_reader.py +0 -58
  437. agno/embedder/__init__.py +0 -5
  438. agno/embedder/langdb.py +0 -80
  439. agno/embedder/mistral.py +0 -82
  440. agno/embedder/openai.py +0 -78
  441. agno/file/__init__.py +0 -5
  442. agno/file/file.py +0 -16
  443. agno/file/local/csv.py +0 -32
  444. agno/file/local/txt.py +0 -19
  445. agno/infra/app.py +0 -240
  446. agno/infra/base.py +0 -144
  447. agno/infra/context.py +0 -20
  448. agno/infra/db_app.py +0 -52
  449. agno/infra/resource.py +0 -205
  450. agno/infra/resources.py +0 -55
  451. agno/knowledge/agent.py +0 -702
  452. agno/knowledge/arxiv.py +0 -33
  453. agno/knowledge/combined.py +0 -36
  454. agno/knowledge/csv.py +0 -144
  455. agno/knowledge/csv_url.py +0 -124
  456. agno/knowledge/document.py +0 -223
  457. agno/knowledge/docx.py +0 -137
  458. agno/knowledge/firecrawl.py +0 -34
  459. agno/knowledge/gcs/__init__.py +0 -0
  460. agno/knowledge/gcs/base.py +0 -39
  461. agno/knowledge/gcs/pdf.py +0 -125
  462. agno/knowledge/json.py +0 -137
  463. agno/knowledge/langchain.py +0 -71
  464. agno/knowledge/light_rag.py +0 -273
  465. agno/knowledge/llamaindex.py +0 -66
  466. agno/knowledge/markdown.py +0 -154
  467. agno/knowledge/pdf.py +0 -164
  468. agno/knowledge/pdf_bytes.py +0 -42
  469. agno/knowledge/pdf_url.py +0 -148
  470. agno/knowledge/s3/__init__.py +0 -0
  471. agno/knowledge/s3/base.py +0 -64
  472. agno/knowledge/s3/pdf.py +0 -33
  473. agno/knowledge/s3/text.py +0 -34
  474. agno/knowledge/text.py +0 -141
  475. agno/knowledge/url.py +0 -46
  476. agno/knowledge/website.py +0 -179
  477. agno/knowledge/wikipedia.py +0 -32
  478. agno/knowledge/youtube.py +0 -35
  479. agno/memory/agent.py +0 -423
  480. agno/memory/classifier.py +0 -104
  481. agno/memory/db/__init__.py +0 -5
  482. agno/memory/db/base.py +0 -42
  483. agno/memory/db/mongodb.py +0 -189
  484. agno/memory/db/postgres.py +0 -203
  485. agno/memory/db/sqlite.py +0 -193
  486. agno/memory/memory.py +0 -22
  487. agno/memory/row.py +0 -36
  488. agno/memory/summarizer.py +0 -201
  489. agno/memory/summary.py +0 -19
  490. agno/memory/team.py +0 -415
  491. agno/memory/v2/__init__.py +0 -2
  492. agno/memory/v2/db/__init__.py +0 -1
  493. agno/memory/v2/db/base.py +0 -42
  494. agno/memory/v2/db/firestore.py +0 -339
  495. agno/memory/v2/db/mongodb.py +0 -196
  496. agno/memory/v2/db/postgres.py +0 -214
  497. agno/memory/v2/db/redis.py +0 -187
  498. agno/memory/v2/db/schema.py +0 -54
  499. agno/memory/v2/db/sqlite.py +0 -209
  500. agno/memory/v2/manager.py +0 -437
  501. agno/memory/v2/memory.py +0 -1097
  502. agno/memory/v2/schema.py +0 -55
  503. agno/memory/v2/summarizer.py +0 -215
  504. agno/memory/workflow.py +0 -38
  505. agno/models/ollama/tools.py +0 -430
  506. agno/models/qwen/__init__.py +0 -5
  507. agno/playground/__init__.py +0 -10
  508. agno/playground/deploy.py +0 -3
  509. agno/playground/playground.py +0 -3
  510. agno/playground/serve.py +0 -3
  511. agno/playground/settings.py +0 -3
  512. agno/reranker/__init__.py +0 -0
  513. agno/run/v2/__init__.py +0 -0
  514. agno/run/v2/workflow.py +0 -567
  515. agno/storage/__init__.py +0 -0
  516. agno/storage/agent/__init__.py +0 -0
  517. agno/storage/agent/dynamodb.py +0 -1
  518. agno/storage/agent/json.py +0 -1
  519. agno/storage/agent/mongodb.py +0 -1
  520. agno/storage/agent/postgres.py +0 -1
  521. agno/storage/agent/singlestore.py +0 -1
  522. agno/storage/agent/sqlite.py +0 -1
  523. agno/storage/agent/yaml.py +0 -1
  524. agno/storage/base.py +0 -60
  525. agno/storage/dynamodb.py +0 -673
  526. agno/storage/firestore.py +0 -297
  527. agno/storage/gcs_json.py +0 -261
  528. agno/storage/in_memory.py +0 -234
  529. agno/storage/json.py +0 -237
  530. agno/storage/mongodb.py +0 -328
  531. agno/storage/mysql.py +0 -685
  532. agno/storage/postgres.py +0 -682
  533. agno/storage/redis.py +0 -336
  534. agno/storage/session/__init__.py +0 -16
  535. agno/storage/session/agent.py +0 -64
  536. agno/storage/session/team.py +0 -63
  537. agno/storage/session/v2/__init__.py +0 -5
  538. agno/storage/session/workflow.py +0 -61
  539. agno/storage/singlestore.py +0 -606
  540. agno/storage/sqlite.py +0 -646
  541. agno/storage/workflow/__init__.py +0 -0
  542. agno/storage/workflow/mongodb.py +0 -1
  543. agno/storage/workflow/postgres.py +0 -1
  544. agno/storage/workflow/sqlite.py +0 -1
  545. agno/storage/yaml.py +0 -241
  546. agno/tools/thinking.py +0 -73
  547. agno/utils/defaults.py +0 -57
  548. agno/utils/filesystem.py +0 -39
  549. agno/utils/git.py +0 -52
  550. agno/utils/json_io.py +0 -30
  551. agno/utils/load_env.py +0 -19
  552. agno/utils/py_io.py +0 -19
  553. agno/utils/pyproject.py +0 -18
  554. agno/utils/resource_filter.py +0 -31
  555. agno/workflow/v2/__init__.py +0 -21
  556. agno/workflow/v2/types.py +0 -357
  557. agno/workflow/v2/workflow.py +0 -3312
  558. agno/workspace/__init__.py +0 -0
  559. agno/workspace/config.py +0 -325
  560. agno/workspace/enums.py +0 -6
  561. agno/workspace/helpers.py +0 -52
  562. agno/workspace/operator.py +0 -757
  563. agno/workspace/settings.py +0 -158
  564. agno-1.8.1.dist-info/METADATA +0 -982
  565. agno-1.8.1.dist-info/RECORD +0 -566
  566. agno-1.8.1.dist-info/entry_points.txt +0 -3
  567. /agno/{app → db/migrations}/__init__.py +0 -0
  568. /agno/{app/playground/__init__.py → db/schemas/metrics.py} +0 -0
  569. /agno/{cli → integrations}/__init__.py +0 -0
  570. /agno/{cli/ws → knowledge/chunking}/__init__.py +0 -0
  571. /agno/{document/chunking → knowledge/remote_content}/__init__.py +0 -0
  572. /agno/{document/reader/gcs → knowledge/reranker}/__init__.py +0 -0
  573. /agno/{document/reader/s3 → os/interfaces}/__init__.py +0 -0
  574. /agno/{app → os/interfaces}/slack/security.py +0 -0
  575. /agno/{app → os/interfaces}/whatsapp/security.py +0 -0
  576. /agno/{file/local → utils/print_response}/__init__.py +0 -0
  577. /agno/{infra → vectordb/llamaindex}/__init__.py +0 -0
  578. {agno-1.8.1.dist-info → agno-2.0.0a1.dist-info}/WHEEL +0 -0
  579. {agno-1.8.1.dist-info → agno-2.0.0a1.dist-info}/licenses/LICENSE +0 -0
  580. {agno-1.8.1.dist-info → agno-2.0.0a1.dist-info}/top_level.txt +0 -0
agno/workflow/workflow.py CHANGED
@@ -1,807 +1,2505 @@
1
- from __future__ import annotations
2
-
3
- import collections.abc
4
- import inspect
5
- from dataclasses import dataclass, field, fields
1
+ import asyncio
2
+ from dataclasses import dataclass
3
+ from datetime import datetime
6
4
  from os import getenv
7
- from types import GeneratorType
8
- from typing import Any, AsyncIterator, Callable, Dict, List, Optional, Union, cast, get_args
5
+ from typing import (
6
+ Any,
7
+ AsyncIterator,
8
+ Awaitable,
9
+ Callable,
10
+ Dict,
11
+ Iterator,
12
+ List,
13
+ Literal,
14
+ Optional,
15
+ Tuple,
16
+ Type,
17
+ Union,
18
+ cast,
19
+ overload,
20
+ )
9
21
  from uuid import uuid4
10
22
 
23
+ from fastapi import WebSocket
11
24
  from pydantic import BaseModel
12
25
 
13
- from agno.agent import Agent
14
- from agno.media import AudioArtifact, ImageArtifact, VideoArtifact
15
- from agno.memory.v2.memory import Memory
16
- from agno.memory.workflow import WorkflowMemory, WorkflowRun
17
- from agno.run.response import RunResponse, RunResponseEvent
18
- from agno.run.team import TeamRunResponseEvent
19
- from agno.run.workflow import WorkflowRunResponseEvent
20
- from agno.storage.base import Storage
21
- from agno.storage.session.workflow import WorkflowSession
22
- from agno.utils.common import nested_model_dump
23
- from agno.utils.log import log_debug, log_warning, logger, set_log_level_to_debug, set_log_level_to_info
24
- from agno.utils.merge_dict import merge_dictionaries
25
-
26
-
27
- @dataclass(init=False)
26
+ from agno.agent.agent import Agent
27
+ from agno.db.base import BaseDb, SessionType
28
+ from agno.exceptions import RunCancelledException
29
+ from agno.media import Audio, AudioArtifact, File, Image, ImageArtifact, Video, VideoArtifact
30
+ from agno.models.message import Message
31
+ from agno.models.metrics import Metrics
32
+ from agno.run.agent import RunEvent
33
+ from agno.run.base import RunStatus
34
+ from agno.run.cancel import (
35
+ cancel_run as cancel_run_global,
36
+ )
37
+ from agno.run.cancel import (
38
+ cleanup_run,
39
+ raise_if_cancelled,
40
+ register_run,
41
+ )
42
+ from agno.run.team import TeamRunEvent
43
+ from agno.run.workflow import (
44
+ StepOutputEvent,
45
+ WorkflowCancelledEvent,
46
+ WorkflowCompletedEvent,
47
+ WorkflowRunEvent,
48
+ WorkflowRunOutput,
49
+ WorkflowRunOutputEvent,
50
+ WorkflowStartedEvent,
51
+ )
52
+ from agno.session.workflow import WorkflowSession
53
+ from agno.team.team import Team
54
+ from agno.utils.log import (
55
+ log_debug,
56
+ log_warning,
57
+ logger,
58
+ set_log_level_to_debug,
59
+ set_log_level_to_info,
60
+ use_workflow_logger,
61
+ )
62
+ from agno.utils.print_response.workflow import (
63
+ aprint_response,
64
+ aprint_response_stream,
65
+ print_response,
66
+ print_response_stream,
67
+ )
68
+ from agno.workflow.condition import Condition
69
+ from agno.workflow.loop import Loop
70
+ from agno.workflow.parallel import Parallel
71
+ from agno.workflow.router import Router
72
+ from agno.workflow.step import Step
73
+ from agno.workflow.steps import Steps
74
+ from agno.workflow.types import (
75
+ StepInput,
76
+ StepMetrics,
77
+ StepOutput,
78
+ StepType,
79
+ WebSocketHandler,
80
+ WorkflowExecutionInput,
81
+ WorkflowMetrics,
82
+ )
83
+
84
+ STEP_TYPE_MAPPING = {
85
+ Step: StepType.STEP,
86
+ Steps: StepType.STEPS,
87
+ Loop: StepType.LOOP,
88
+ Parallel: StepType.PARALLEL,
89
+ Condition: StepType.CONDITION,
90
+ Router: StepType.ROUTER,
91
+ }
92
+
93
+ WorkflowSteps = Union[
94
+ Callable[
95
+ ["Workflow", WorkflowExecutionInput],
96
+ Union[StepOutput, Awaitable[StepOutput], Iterator[StepOutput], AsyncIterator[StepOutput], Any],
97
+ ],
98
+ Steps,
99
+ List[
100
+ Union[
101
+ Callable[
102
+ [StepInput], Union[StepOutput, Awaitable[StepOutput], Iterator[StepOutput], AsyncIterator[StepOutput]]
103
+ ],
104
+ Step,
105
+ Steps,
106
+ Loop,
107
+ Parallel,
108
+ Condition,
109
+ Router,
110
+ ]
111
+ ],
112
+ ]
113
+
114
+
115
+ @dataclass
28
116
  class Workflow:
29
- # --- Workflow settings ---
30
- # Workflow name
117
+ """Pipeline-based workflow execution"""
118
+
119
+ # Workflow identification - make name optional with default
31
120
  name: Optional[str] = None
32
- # Workflow UUID (autogenerated if not set)
33
- workflow_id: Optional[str] = None
34
- # Workflow app_id (autogenerated if not set)
35
- app_id: Optional[str] = None
36
- # Workflow description (only shown in the UI)
121
+ # Workflow ID (autogenerated if not set)
122
+ id: Optional[str] = None
123
+ # Workflow description
37
124
  description: Optional[str] = None
38
125
 
39
- # --- User settings ---
40
- # ID of the user interacting with this workflow
41
- user_id: Optional[str] = None
126
+ # Workflow steps
127
+ steps: Optional[WorkflowSteps] = None
42
128
 
43
- # -*- Session settings
44
- # Session UUID (autogenerated if not set)
129
+ # Database to use for this workflow
130
+ db: Optional[BaseDb] = None
131
+
132
+ # Default session_id to use for this workflow (autogenerated if not set)
45
133
  session_id: Optional[str] = None
46
- # Session name
47
- session_name: Optional[str] = None
48
- # Session state stored in the database
49
- session_state: Dict[str, Any] = field(default_factory=dict)
50
-
51
- # --- Workflow Memory ---
52
- memory: Optional[Union[WorkflowMemory, Memory]] = None
53
-
54
- # --- Workflow Storage ---
55
- storage: Optional[Storage] = None
56
- # Extra data stored with this workflow
57
- extra_data: Optional[Dict[str, Any]] = None
58
-
59
- # --- Debug & Monitoring ---
60
- # Enable debug logs
61
- debug_mode: bool = False
62
- # monitoring=True logs Workflow information to agno.com for monitoring
63
- monitoring: bool = field(default_factory=lambda: getenv("AGNO_MONITOR", "false").lower() == "true")
134
+ # Default user_id to use for this workflow
135
+ user_id: Optional[str] = None
136
+ # Default session state (stored in the database to persist across runs)
137
+ session_state: Optional[Dict[str, Any]] = None
138
+
139
+ # If True, the workflow runs in debug mode
140
+ debug_mode: Optional[bool] = False
141
+
142
+ # --- Workflow Streaming ---
143
+ # Stream the response from the Workflow
144
+ stream: Optional[bool] = None
145
+ # Stream the intermediate steps from the Workflow
146
+ stream_intermediate_steps: bool = False
147
+
148
+ # Persist the events on the run response
149
+ store_events: bool = False
150
+ # Events to skip when persisting the events on the run response
151
+ events_to_skip: Optional[List[Union[WorkflowRunEvent, RunEvent, TeamRunEvent]]] = None
152
+
153
+ # Control whether to store executor responses (agent/team responses) in flattened runs
154
+ store_executor_outputs: bool = True
155
+
156
+ websocket_handler: Optional[WebSocketHandler] = None
157
+
158
+ # Input schema to validate the input to the workflow
159
+ input_schema: Optional[Type[BaseModel]] = None
160
+
161
+ # Metadata stored with this workflow
162
+ metadata: Optional[Dict[str, Any]] = None
163
+
164
+ # --- Telemetry ---
64
165
  # telemetry=True logs minimal telemetry for analytics
65
- # This helps us improve the Workflow and provide better support
66
- telemetry: bool = field(default_factory=lambda: getenv("AGNO_TELEMETRY", "true").lower() == "true")
67
-
68
- # --- Run Info: DO NOT SET ---
69
- run_id: Optional[str] = None
70
- run_input: Optional[Dict[str, Any]] = None
71
- run_response: Optional[RunResponse] = None
72
- # Images generated during this session
73
- images: Optional[List[ImageArtifact]] = None
74
- # Videos generated during this session
75
- videos: Optional[List[VideoArtifact]] = None
76
- # Audio generated during this session
77
- audio: Optional[List[AudioArtifact]] = None
166
+ # This helps us improve the Agent and provide better support
167
+ telemetry: bool = True
78
168
 
79
169
  def __init__(
80
170
  self,
81
- *,
171
+ id: Optional[str] = None,
82
172
  name: Optional[str] = None,
83
- workflow_id: Optional[str] = None,
84
173
  description: Optional[str] = None,
85
- user_id: Optional[str] = None,
174
+ db: Optional[BaseDb] = None,
175
+ steps: Optional[WorkflowSteps] = None,
86
176
  session_id: Optional[str] = None,
87
- session_name: Optional[str] = None,
88
177
  session_state: Optional[Dict[str, Any]] = None,
89
- memory: Optional[Union[WorkflowMemory, Memory]] = None,
90
- storage: Optional[Storage] = None,
91
- extra_data: Optional[Dict[str, Any]] = None,
92
- debug_mode: bool = False,
93
- monitoring: bool = False,
178
+ user_id: Optional[str] = None,
179
+ debug_mode: Optional[bool] = False,
180
+ stream: Optional[bool] = None,
181
+ stream_intermediate_steps: bool = False,
182
+ store_events: bool = False,
183
+ events_to_skip: Optional[List[Union[WorkflowRunEvent, RunEvent, TeamRunEvent]]] = None,
184
+ store_executor_outputs: bool = True,
185
+ input_schema: Optional[Type[BaseModel]] = None,
186
+ metadata: Optional[Dict[str, Any]] = None,
187
+ cache_session: bool = False,
94
188
  telemetry: bool = True,
95
- app_id: Optional[str] = None,
96
189
  ):
97
- self.name = name or self.__class__.__name__
98
- self.workflow_id = workflow_id
99
- self.description = description or self.__class__.description
100
- self.app_id = app_id
101
-
190
+ self.id = id
191
+ self.name = name
192
+ self.description = description
193
+ self.steps = steps
194
+ self.session_id = session_id
195
+ self.session_state = session_state
102
196
  self.user_id = user_id
197
+ self.debug_mode = debug_mode
198
+ self.store_events = store_events
199
+ self.events_to_skip = events_to_skip or []
200
+ self.stream = stream
201
+ self.stream_intermediate_steps = stream_intermediate_steps
202
+ self.store_executor_outputs = store_executor_outputs
203
+ self.input_schema = input_schema
204
+ self.metadata = metadata
205
+ self.cache_session = cache_session
206
+ self.db = db
207
+ self.telemetry = telemetry
103
208
 
104
- self.session_id = session_id
105
- self.session_name = session_name
106
- self.session_state: Dict[str, Any] = session_state or {}
209
+ self._workflow_session: Optional[WorkflowSession] = None
107
210
 
108
- self.memory = memory
109
- self.storage = storage
110
- self.extra_data = extra_data
211
+ def _validate_input(
212
+ self, input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]]
213
+ ) -> Optional[BaseModel]:
214
+ """Parse and validate input against input_schema if provided"""
215
+ if self.input_schema is None:
216
+ return None
111
217
 
112
- self.debug_mode = debug_mode
113
- self.monitoring = monitoring
114
- self.telemetry = telemetry
218
+ if input is None:
219
+ raise ValueError("Input required when input_schema is set")
115
220
 
116
- self.run_id = None
117
- self.run_input = None
118
- self.run_response = None
119
- self.images = None
120
- self.videos = None
121
- self.audio = None
221
+ # Case 1: Message is already a BaseModel instance
222
+ if isinstance(input, BaseModel):
223
+ if isinstance(input, self.input_schema):
224
+ try:
225
+ # Re-validate to catch any field validation errors
226
+ input.model_validate(input.model_dump())
227
+ return input
228
+ except Exception as e:
229
+ raise ValueError(f"BaseModel validation failed: {str(e)}")
230
+ else:
231
+ # Different BaseModel types
232
+ raise ValueError(f"Expected {self.input_schema.__name__} but got {type(input).__name__}")
122
233
 
123
- self.workflow_session: Optional[WorkflowSession] = None
234
+ # Case 2: Message is a dict
235
+ elif isinstance(input, dict):
236
+ try:
237
+ validated_model = self.input_schema(**input)
238
+ return validated_model
239
+ except Exception as e:
240
+ raise ValueError(f"Failed to parse dict into {self.input_schema.__name__}: {str(e)}")
124
241
 
125
- # Private attributes to store the run method and its parameters
126
- # The run function provided by the subclass
127
- self._subclass_run: Optional[Callable] = None
128
- self._subclass_arun: Optional[Callable] = None
242
+ # Case 3: Other types not supported for structured input
243
+ else:
244
+ raise ValueError(
245
+ f"Cannot validate {type(input)} against input_schema. Expected dict or {self.input_schema.__name__} instance."
246
+ )
129
247
 
130
- # Parameters of the run function
131
- self._run_parameters: Optional[Dict[str, Any]] = None
132
- # Return type of the run function
133
- self._run_return_type: Optional[str] = None
248
+ @property
249
+ def run_parameters(self) -> Dict[str, Any]:
250
+ """Get the run parameters for the workflow"""
251
+
252
+ if self.steps is None:
253
+ return {}
254
+
255
+ parameters = {}
256
+
257
+ if self.steps and callable(self.steps):
258
+ from inspect import Parameter, signature
259
+
260
+ sig = signature(self.steps) # type: ignore
261
+
262
+ for param_name, param in sig.parameters.items():
263
+ if param_name not in ["workflow", "execution_input", "self"]:
264
+ parameters[param_name] = {
265
+ "name": param_name,
266
+ "default": param.default.default
267
+ if hasattr(param.default, "__class__") and param.default.__class__.__name__ == "FieldInfo"
268
+ else (param.default if param.default is not Parameter.empty else None),
269
+ "annotation": (
270
+ param.annotation.__name__
271
+ if hasattr(param.annotation, "__name__")
272
+ else (
273
+ str(param.annotation).replace("typing.Optional[", "").replace("]", "")
274
+ if "typing.Optional" in str(param.annotation)
275
+ else str(param.annotation)
276
+ )
277
+ )
278
+ if param.annotation is not Parameter.empty
279
+ else None,
280
+ "required": param.default is Parameter.empty,
281
+ }
282
+ else:
283
+ parameters = {
284
+ "message": {
285
+ "name": "message",
286
+ "default": None,
287
+ "annotation": "str",
288
+ "required": True,
289
+ },
290
+ }
134
291
 
135
- self.update_run_method()
292
+ return parameters
136
293
 
137
- self.__post_init__()
294
+ def initialize_workflow(self):
295
+ if self.id is None:
296
+ self.id = str(uuid4())
297
+ log_debug(f"Generated new workflow_id: {self.id}")
138
298
 
139
- def __post_init__(self):
140
- for field_name, value in self.__class__.__dict__.items():
141
- if isinstance(value, Agent):
142
- value.session_id = self.session_id
299
+ def _initialize_session(
300
+ self,
301
+ session_id: Optional[str] = None,
302
+ user_id: Optional[str] = None,
303
+ session_state: Optional[Dict[str, Any]] = None,
304
+ run_id: Optional[str] = None,
305
+ ) -> Tuple[str, Optional[str], Dict[str, Any]]:
306
+ """Initialize the session for the agent."""
143
307
 
144
- def run(self, **kwargs: Any):
145
- logger.error(f"{self.__class__.__name__}.run() method not implemented.")
146
- return
308
+ if session_id is None:
309
+ if self.session_id:
310
+ session_id = self.session_id
311
+ else:
312
+ session_id = str(uuid4())
313
+ # We make the session_id sticky to the agent instance if no session_id is provided
314
+ self.session_id = session_id
315
+
316
+ log_debug(f"Session ID: {session_id}", center=True)
317
+
318
+ # Use the default user_id when necessary
319
+ if user_id is None:
320
+ user_id = self.user_id
321
+
322
+ # Determine the session_state
323
+ if session_state is None:
324
+ session_state = self.session_state or {}
325
+
326
+ if user_id is not None:
327
+ session_state["current_user_id"] = user_id
328
+ if session_id is not None:
329
+ session_state["current_session_id"] = session_id
330
+
331
+ session_state.update(
332
+ {
333
+ "workflow_id": self.id,
334
+ "run_id": run_id,
335
+ "session_id": session_id,
336
+ }
337
+ )
338
+ if self.name:
339
+ session_state["workflow_name"] = self.name
147
340
 
148
- def run_workflow(self, **kwargs: Any):
149
- """Run the Workflow"""
341
+ return session_id, user_id, session_state # type: ignore
150
342
 
151
- # Set mode, debug, workflow_id, session_id, initialize memory
152
- self.set_storage_mode()
153
- self.set_debug()
154
- self.set_monitoring()
155
- self.set_workflow_id() # Ensure workflow_id is set
156
- self.set_session_id()
157
- self.initialize_memory()
343
+ def _generate_workflow_session_name(self) -> str:
344
+ """Generate a name for the workflow session"""
158
345
 
159
- # Create a run_id
160
- self.run_id = str(uuid4())
346
+ if self.session_id is None:
347
+ return f"Workflow Session - {datetime.now().strftime('%Y-%m-%d %H:%M')}"
161
348
 
162
- # Set run_input, run_response
163
- self.run_input = kwargs
164
- self.run_response = RunResponse(run_id=self.run_id, session_id=self.session_id, workflow_id=self.workflow_id)
349
+ datetime_str = datetime.now().strftime("%Y-%m-%d %H:%M")
350
+ new_session_name = f"Workflow Session-{datetime_str}"
165
351
 
166
- # Read existing session from storage
167
- self.read_from_storage()
352
+ if self.description:
353
+ truncated_desc = self.description[:40] + "-" if len(self.description) > 40 else self.description
354
+ new_session_name = f"{truncated_desc} - {datetime_str}"
355
+ return new_session_name
168
356
 
169
- # Update the session_id for all Agent instances
170
- self.update_agent_session_ids()
357
+ def set_session_name(
358
+ self, session_id: Optional[str] = None, autogenerate: bool = False, session_name: Optional[str] = None
359
+ ) -> WorkflowSession:
360
+ """Set the session name and save to storage"""
361
+ session_id = session_id or self.session_id
171
362
 
172
- log_debug(f"Workflow Run Start: {self.run_id}", center=True)
173
- try:
174
- self._subclass_run = cast(Callable, self._subclass_run)
175
- result = self._subclass_run(**kwargs)
176
- except Exception as e:
177
- logger.error(f"Workflow.run() failed: {e}")
178
- raise e
179
-
180
- # The run_workflow() method handles both Iterator[RunResponse] and RunResponse
181
- # Case 1: The run method returns an Iterator[RunResponse]
182
- if isinstance(result, (GeneratorType, collections.abc.Iterator)):
183
- # Initialize the run_response content
184
- self.run_response.content = ""
185
-
186
- def result_generator():
187
- self.run_response = cast(RunResponse, self.run_response)
188
- if isinstance(self.memory, WorkflowMemory):
189
- self.memory = cast(WorkflowMemory, self.memory)
190
- elif isinstance(self.memory, Memory):
191
- self.memory = cast(Memory, self.memory)
192
-
193
- for item in result:
194
- if (
195
- isinstance(item, tuple(get_args(RunResponseEvent)))
196
- or isinstance(item, tuple(get_args(TeamRunResponseEvent)))
197
- or isinstance(item, tuple(get_args(WorkflowRunResponseEvent)))
198
- or isinstance(item, RunResponse)
199
- ):
200
- # Update the run_id, session_id and workflow_id of the RunResponseEvent
201
- item.run_id = self.run_id
202
- item.session_id = self.session_id
203
- item.workflow_id = self.workflow_id
204
-
205
- # Update the run_response with the content from the result
206
- if hasattr(item, "content") and item.content is not None and isinstance(item.content, str):
207
- self.run_response.content += item.content
208
- else:
209
- logger.warning(f"Workflow.run() should only yield RunResponseEvent objects, got: {type(item)}")
210
- yield item
211
-
212
- # Add the run to the memory
213
- if isinstance(self.memory, WorkflowMemory):
214
- self.memory.add_run(WorkflowRun(input=self.run_input, response=self.run_response))
215
- elif isinstance(self.memory, Memory):
216
- self.memory.add_run(session_id=self.session_id, run=self.run_response) # type: ignore
217
- # Write this run to the database
218
- self.write_to_storage()
219
- log_debug(f"Workflow Run End: {self.run_id}", center=True)
220
-
221
- return result_generator()
222
- # Case 2: The run method returns a RunResponse
223
- elif isinstance(result, RunResponse):
224
- # Update the result with the run_id, session_id and workflow_id of the workflow run
225
- result.run_id = self.run_id
226
- result.session_id = self.session_id
227
- result.workflow_id = self.workflow_id
228
-
229
- # Update the run_response with the content from the result
230
- if result.content is not None and isinstance(result.content, str):
231
- self.run_response.content = result.content
232
-
233
- # Add the run to the memory
234
- if isinstance(self.memory, WorkflowMemory):
235
- self.memory.add_run(WorkflowRun(input=self.run_input, response=self.run_response))
236
- elif isinstance(self.memory, Memory):
237
- self.memory.add_run(session_id=self.session_id, run=self.run_response) # type: ignore
238
- # Write this run to the database
239
- self.write_to_storage()
240
- log_debug(f"Workflow Run End: {self.run_id}", center=True)
241
- return result
363
+ if session_id is None:
364
+ raise Exception("Session ID is not set")
365
+
366
+ # -*- Read from storage
367
+ session = self.get_session(session_id=session_id) # type: ignore
368
+
369
+ if autogenerate:
370
+ # -*- Generate name for session
371
+ session_name = self._generate_workflow_session_name()
372
+ log_debug(f"Generated Workflow Session Name: {session_name}")
373
+ elif session_name is None:
374
+ raise Exception("Session name is not set")
375
+
376
+ # -*- Rename session
377
+ session.session_data["session_name"] = session_name # type: ignore
378
+
379
+ # -*- Save to storage
380
+ self.save_session(session=session) # type: ignore
381
+
382
+ return session # type: ignore
383
+
384
+ def get_session_name(self, session_id: Optional[str] = None) -> str:
385
+ """Get the session name for the given session ID and user ID."""
386
+ session_id = session_id or self.session_id
387
+ if session_id is None:
388
+ raise Exception("Session ID is not set")
389
+ session = self.get_session(session_id=session_id) # type: ignore
390
+ if session is None:
391
+ raise Exception("Session not found")
392
+ return session.session_data.get("session_name", "") if session.session_data else ""
393
+
394
+ def get_session_state(self, session_id: Optional[str] = None) -> Dict[str, Any]:
395
+ """Get the session state for the given session ID and user ID."""
396
+ session_id = session_id or self.session_id
397
+ if session_id is None:
398
+ raise Exception("Session ID is not set")
399
+ session = self.get_session(session_id=session_id) # type: ignore
400
+ if session is None:
401
+ raise Exception("Session not found")
402
+ return session.session_data.get("session_state", {}) if session.session_data else {}
403
+
404
+ def delete_session(self, session_id: str):
405
+ """Delete the current session and save to storage"""
406
+ if self.db is None:
407
+ return
408
+ # -*- Delete session
409
+ self.db.delete_session(session_id=session_id)
410
+
411
+ def get_run_output(self, run_id: str, session_id: Optional[str] = None) -> Optional[WorkflowRunOutput]:
412
+ """Get a RunOutput from the database."""
413
+ if self._workflow_session is not None:
414
+ run_response = self._workflow_session.get_run(run_id=run_id)
415
+ if run_response is not None:
416
+ return run_response
417
+ else:
418
+ log_warning(f"RunOutput {run_id} not found in AgentSession {self._workflow_session.session_id}")
419
+ return None
242
420
  else:
243
- logger.warning(f"Workflow.run() should only return RunResponse objects, got: {type(result)}")
244
- return None
421
+ workflow_session = self.get_session(session_id=session_id)
422
+ if workflow_session is not None:
423
+ run_response = workflow_session.get_run(run_id=run_id)
424
+ if run_response is not None:
425
+ return run_response
426
+ else:
427
+ log_warning(f"RunOutput {run_id} not found in AgentSession {session_id}")
428
+ return None
429
+
430
+ def get_last_run_output(self, session_id: Optional[str] = None) -> Optional[WorkflowRunOutput]:
431
+ """Get the last run response from the database."""
432
+ if (
433
+ self._workflow_session is not None
434
+ and self._workflow_session.runs is not None
435
+ and len(self._workflow_session.runs) > 0
436
+ ):
437
+ run_response = self._workflow_session.runs[-1]
438
+ if run_response is not None:
439
+ return run_response
440
+ else:
441
+ workflow_session = self.get_session(session_id=session_id)
442
+ if workflow_session is not None and workflow_session.runs is not None and len(workflow_session.runs) > 0:
443
+ run_response = workflow_session.runs[-1]
444
+ if run_response is not None:
445
+ return run_response
446
+ else:
447
+ log_warning(f"No run responses found in WorkflowSession {session_id}")
448
+ return None
449
+
450
+ def read_or_create_session(
451
+ self,
452
+ session_id: str,
453
+ user_id: Optional[str] = None,
454
+ ) -> WorkflowSession:
455
+ from time import time
456
+
457
+ # Returning cached session if we have one
458
+ if self._workflow_session is not None and self._workflow_session.session_id == session_id:
459
+ return self._workflow_session
460
+
461
+ # Try to load from database
462
+ workflow_session = None
463
+ if self.db is not None:
464
+ log_debug(f"Reading WorkflowSession: {session_id}")
465
+
466
+ workflow_session = cast(WorkflowSession, self._read_session(session_id=session_id))
467
+
468
+ if workflow_session is None:
469
+ # Creating new session if none found
470
+ log_debug(f"Creating new WorkflowSession: {session_id}")
471
+ workflow_session = WorkflowSession(
472
+ session_id=session_id,
473
+ workflow_id=self.id,
474
+ user_id=user_id,
475
+ workflow_data=self._get_workflow_data(),
476
+ session_data={},
477
+ metadata=self.metadata,
478
+ created_at=int(time()),
479
+ )
480
+
481
+ # Cache the session if relevant
482
+ if workflow_session is not None and self.cache_session:
483
+ self._workflow_session = workflow_session
245
484
 
246
- # Add to workflow.py after the run_workflow method
247
- async def arun_workflow(self, **kwargs: Any):
248
- """Run the Workflow asynchronously"""
485
+ return workflow_session
249
486
 
250
- # Set mode, debug, workflow_id, session_id, initialize memory
251
- self.set_storage_mode()
252
- self.set_debug()
253
- self.set_monitoring()
254
- self.set_workflow_id() # Ensure workflow_id is set
255
- self.set_session_id()
256
- self.initialize_memory()
487
+ def get_session(
488
+ self,
489
+ session_id: Optional[str] = None,
490
+ ) -> Optional[WorkflowSession]:
491
+ """Load an WorkflowSession from database.
492
+
493
+ Args:
494
+ session_id: The session_id to load from storage.
495
+
496
+ Returns:
497
+ WorkflowSession: The WorkflowSession loaded from the database or created if it does not exist.
498
+ """
499
+ if not session_id and not self.session_id:
500
+ raise Exception("No session_id provided")
257
501
 
258
- # Create a run_id
259
- self.run_id = str(uuid4())
502
+ session_id_to_load = session_id or self.session_id
260
503
 
261
- # Set run_input, run_response
262
- self.run_input = kwargs
263
- self.run_response = RunResponse(run_id=self.run_id, session_id=self.session_id, workflow_id=self.workflow_id)
504
+ # Try to load from database
505
+ if self.db is not None and session_id_to_load is not None:
506
+ workflow_session = cast(WorkflowSession, self._read_session(session_id=session_id_to_load))
507
+ return workflow_session
264
508
 
265
- # Read existing session from storage
266
- self.read_from_storage()
509
+ log_warning(f"WorkflowSession {session_id_to_load} not found in db")
510
+ return None
267
511
 
268
- # Update the session_id for all Agent instances
269
- self.update_agent_session_ids()
512
+ def save_session(self, session: WorkflowSession) -> None:
513
+ """Save the WorkflowSession to storage
270
514
 
271
- log_debug(f"Workflow Run Start: {self.run_id}", center=True)
515
+ Returns:
516
+ Optional[WorkflowSession]: The saved WorkflowSession or None if not saved.
517
+ """
518
+ if self.db is not None and session.session_data is not None:
519
+ if session.session_data.get("session_state") is not None:
520
+ session.session_data["session_state"].pop("current_session_id", None)
521
+ session.session_data["session_state"].pop("current_user_id", None)
522
+ session.session_data["session_state"].pop("current_run_id", None)
523
+ session.session_data["session_state"].pop("workflow_id", None)
524
+ session.session_data["session_state"].pop("run_id", None)
525
+ session.session_data["session_state"].pop("session_id", None)
526
+ session.session_data["session_state"].pop("workflow_name", None)
527
+
528
+ self._upsert_session(session=session)
529
+ log_debug(f"Created or updated WorkflowSession record: {session.session_id}")
530
+
531
+ # -*- Session Database Functions
532
+ def _read_session(self, session_id: str) -> Optional[WorkflowSession]:
533
+ """Get a Session from the database."""
272
534
  try:
273
- self._subclass_arun = cast(Callable, self._subclass_arun)
274
- result = await self._subclass_arun(**kwargs)
535
+ if not self.db:
536
+ raise ValueError("Db not initialized")
537
+ session = self.db.get_session(session_id=session_id, session_type=SessionType.WORKFLOW)
538
+ return session if isinstance(session, (WorkflowSession, type(None))) else None
275
539
  except Exception as e:
276
- logger.error(f"Workflow.arun() failed: {e}")
277
- raise e
278
-
279
- # Handle single RunResponse result
280
- if isinstance(result, RunResponse):
281
- # Update the result with the run_id, session_id and workflow_id of the workflow run
282
- result.run_id = self.run_id
283
- result.session_id = self.session_id
284
- result.workflow_id = self.workflow_id
285
-
286
- # Update the run_response with the content from the result
287
- if result.content is not None and isinstance(result.content, str):
288
- self.run_response.content = result.content
289
-
290
- # Add the run to the memory
291
- if isinstance(self.memory, WorkflowMemory):
292
- self.memory.add_run(WorkflowRun(input=self.run_input, response=self.run_response))
293
- elif isinstance(self.memory, Memory):
294
- self.memory.add_run(session_id=self.session_id, run=self.run_response) # type: ignore
295
- # Write this run to the database
296
- self.write_to_storage()
297
- log_debug(f"Workflow Run End: {self.run_id}", center=True)
298
- return result
299
- else:
300
- logger.warning(f"Workflow.arun() should only return RunResponse objects, got: {type(result)}")
540
+ log_warning(f"Error getting session from db: {e}")
301
541
  return None
302
542
 
303
- async def arun_workflow_generator(self, **kwargs: Any) -> AsyncIterator[RunResponse]:
304
- """Run the Workflow asynchronously for async generators"""
543
+ def _upsert_session(self, session: WorkflowSession) -> Optional[WorkflowSession]:
544
+ """Upsert a Session into the database."""
305
545
 
306
- # Set mode, debug, workflow_id, session_id, initialize memory
307
- self.set_storage_mode()
308
- self.set_debug()
309
- self.set_monitoring()
310
- self.set_workflow_id() # Ensure workflow_id is set
311
- self.set_session_id()
312
- self.initialize_memory()
546
+ try:
547
+ if not self.db:
548
+ raise ValueError("Db not initialized")
549
+ result = self.db.upsert_session(session=session)
550
+ return result if isinstance(result, (WorkflowSession, type(None))) else None
551
+ except Exception as e:
552
+ log_warning(f"Error upserting session into db: {e}")
553
+ return None
313
554
 
314
- # Create a run_id
315
- self.run_id = str(uuid4())
555
+ def _update_metadata(self, session: WorkflowSession):
556
+ """Update the extra_data in the session"""
557
+ from agno.utils.merge_dict import merge_dictionaries
316
558
 
317
- # Set run_input, run_response
318
- self.run_input = kwargs
319
- self.run_response = RunResponse(run_id=self.run_id, session_id=self.session_id, workflow_id=self.workflow_id)
559
+ # Read metadata from the database
560
+ if session.metadata is not None:
561
+ # If metadata is set in the workflow, update the database metadata with the workflow's metadata
562
+ if self.metadata is not None:
563
+ # Updates workflow's session metadata in place
564
+ merge_dictionaries(session.metadata, self.metadata)
565
+ # Update the current metadata with the metadata from the database which is updated in place
566
+ self.metadata = session.metadata
320
567
 
321
- # Read existing session from storage
322
- self.read_from_storage()
568
+ def _update_session_state(self, session: WorkflowSession, session_state: Dict[str, Any]):
569
+ """Load the existing Workflow from a WorkflowSession (from the database)"""
323
570
 
324
- # Update the session_id for all Agent instances
325
- self.update_agent_session_ids()
571
+ from agno.utils.merge_dict import merge_dictionaries
572
+
573
+ # Get the session_state from the database and update the current session_state
574
+ if session.session_data and "session_state" in session.session_data:
575
+ session_state_from_db = session.session_data.get("session_state")
576
+
577
+ if (
578
+ session_state_from_db is not None
579
+ and isinstance(session_state_from_db, dict)
580
+ and len(session_state_from_db) > 0
581
+ ):
582
+ # This updates session_state_from_db
583
+ # If there are conflicting keys, values from provided session_state will take precedence
584
+ merge_dictionaries(session_state_from_db, session_state)
585
+ session_state = session_state_from_db
586
+
587
+ # Update the session_state in the session
588
+ if session.session_data is None:
589
+ session.session_data = {}
590
+ session.session_data["session_state"] = session_state
591
+
592
+ return session_state
593
+
594
+ def _get_workflow_data(self) -> Dict[str, Any]:
595
+ workflow_data = {}
596
+
597
+ if self.steps and not callable(self.steps):
598
+ steps_dict = []
599
+ for step in self.steps: # type: ignore
600
+ if callable(step):
601
+ step_type = StepType.STEP
602
+ elif isinstance(step, Agent) or isinstance(step, Team):
603
+ step_type = StepType.STEP
604
+ else:
605
+ step_type = STEP_TYPE_MAPPING[type(step)]
606
+ step_dict = {
607
+ "name": step.name if hasattr(step, "name") else step.__name__,
608
+ "description": step.description if hasattr(step, "description") else "User-defined callable step",
609
+ "type": step_type.value,
610
+ }
611
+ steps_dict.append(step_dict)
326
612
 
327
- log_debug(f"Workflow Run Start: {self.run_id}", center=True)
328
- # Initialize the run_response content
329
- self.run_response.content = ""
330
- try:
331
- self._subclass_arun = cast(Callable, self._subclass_arun)
332
- async for item in self._subclass_arun(**kwargs):
333
- if (
334
- isinstance(item, tuple(get_args(RunResponseEvent)))
335
- or isinstance(item, tuple(get_args(TeamRunResponseEvent)))
336
- or isinstance(item, tuple(get_args(WorkflowRunResponseEvent)))
337
- or isinstance(item, RunResponse)
338
- ):
339
- # Update the run_id, session_id and workflow_id of the RunResponseEvent
340
- item.run_id = self.run_id
341
- item.session_id = self.session_id
342
- item.workflow_id = self.workflow_id
613
+ workflow_data["steps"] = steps_dict
343
614
 
344
- # Update the run_response with the content from the result
345
- if hasattr(item, "content") and item.content is not None and isinstance(item.content, str):
346
- self.run_response.content += item.content
347
- else:
348
- logger.warning(f"Workflow.run() should only yield RunResponseEvent objects, got: {type(item)}")
349
- yield item
350
-
351
- # Add the run to the memory
352
- if isinstance(self.memory, WorkflowMemory):
353
- self.memory.add_run(WorkflowRun(input=self.run_input, response=self.run_response))
354
- elif isinstance(self.memory, Memory):
355
- self.memory.add_run(session_id=self.session_id, run=self.run_response) # type: ignore
356
- # Write this run to the database
357
- self.write_to_storage()
358
- log_debug(f"Workflow Run End: {self.run_id}", center=True)
359
- except Exception as e:
360
- logger.error(f"Workflow.arun() failed: {e}")
361
- raise e
615
+ elif callable(self.steps):
616
+ workflow_data["steps"] = [
617
+ {
618
+ "name": "Custom Function",
619
+ "description": "User-defined callable workflow",
620
+ "type": "Callable",
621
+ }
622
+ ]
362
623
 
363
- async def arun(self, **kwargs: Any):
364
- """Async version of run() that calls arun_workflow()"""
365
- logger.error(f"{self.__class__.__name__}.arun() method not implemented.")
366
- return
624
+ return workflow_data
367
625
 
368
- def set_storage_mode(self):
369
- if self.storage is not None:
370
- self.storage.mode = "workflow"
626
+ def _handle_event(
627
+ self,
628
+ event: "WorkflowRunOutputEvent",
629
+ workflow_run_response: WorkflowRunOutput,
630
+ websocket_handler: Optional[WebSocketHandler] = None,
631
+ ) -> "WorkflowRunOutputEvent":
632
+ """Handle workflow events for storage - similar to Team._handle_event"""
633
+ if self.store_events:
634
+ # Check if this event type should be skipped
635
+ if self.events_to_skip:
636
+ event_type = event.event
637
+ for skip_event in self.events_to_skip:
638
+ if isinstance(skip_event, str):
639
+ if event_type == skip_event:
640
+ return event
641
+ else:
642
+ # It's a WorkflowRunEvent enum
643
+ if event_type == skip_event.value:
644
+ return event
371
645
 
372
- def initialize_workflow(self):
373
- self.set_storage_mode()
646
+ # Store the event
647
+ if workflow_run_response.events is None:
648
+ workflow_run_response.events = []
374
649
 
375
- def set_workflow_id(self) -> str:
376
- if self.workflow_id is None:
377
- self.workflow_id = str(uuid4())
378
- log_debug(f"Workflow ID: {self.workflow_id}", center=True)
379
- return self.workflow_id
650
+ workflow_run_response.events.append(event)
380
651
 
381
- def set_session_id(self) -> str:
382
- if self.session_id is None:
383
- self.session_id = str(uuid4())
384
- log_debug(f"Session ID: {self.session_id}", center=True)
385
- return self.session_id
652
+ # Broadcast to WebSocket if available (async context only)
653
+ if websocket_handler:
654
+ import asyncio
386
655
 
387
- def set_debug(self) -> None:
656
+ try:
657
+ loop = asyncio.get_running_loop()
658
+ if loop:
659
+ asyncio.create_task(websocket_handler.handle_event(event))
660
+ except RuntimeError:
661
+ pass
662
+
663
+ return event
664
+
665
+ def _transform_step_output_to_event(
666
+ self, step_output: StepOutput, workflow_run_response: WorkflowRunOutput, step_index: Optional[int] = None
667
+ ) -> StepOutputEvent:
668
+ """Transform a StepOutput object into a StepOutputEvent for consistent streaming interface"""
669
+ return StepOutputEvent(
670
+ step_output=step_output,
671
+ run_id=workflow_run_response.run_id or "",
672
+ workflow_name=workflow_run_response.workflow_name,
673
+ workflow_id=workflow_run_response.workflow_id,
674
+ session_id=workflow_run_response.session_id,
675
+ step_name=step_output.step_name,
676
+ step_index=step_index,
677
+ )
678
+
679
+ def _set_debug(self) -> None:
680
+ """Set debug mode and configure logging"""
388
681
  if self.debug_mode or getenv("AGNO_DEBUG", "false").lower() == "true":
682
+ use_workflow_logger()
683
+
389
684
  self.debug_mode = True
390
- set_log_level_to_debug()
391
- log_debug("Debug logs enabled")
392
- else:
393
- set_log_level_to_info()
685
+ set_log_level_to_debug(source_type="workflow")
394
686
 
395
- def set_monitoring(self) -> None:
396
- """Override monitoring and telemetry settings based on environment variables."""
687
+ # Propagate to steps - only if steps is iterable (not callable)
688
+ if self.steps and not callable(self.steps):
689
+ if isinstance(self.steps, Steps):
690
+ steps_to_iterate = self.steps.steps
691
+ else:
692
+ steps_to_iterate = self.steps
397
693
 
398
- # Only override if the environment variable is set
399
- monitor_env = getenv("AGNO_MONITOR")
400
- if monitor_env is not None:
401
- self.monitoring = monitor_env.lower() == "true"
694
+ for step in steps_to_iterate:
695
+ self._propagate_debug_to_step(step)
696
+ else:
697
+ set_log_level_to_info(source_type="workflow")
698
+
699
+ def _set_telemetry(self) -> None:
700
+ """Override telemetry settings based on environment variables."""
402
701
 
403
- # Override telemetry if environment variable is set
404
702
  telemetry_env = getenv("AGNO_TELEMETRY")
405
703
  if telemetry_env is not None:
406
704
  self.telemetry = telemetry_env.lower() == "true"
407
705
 
408
- def initialize_memory(self) -> None:
409
- if self.memory is None:
410
- self.memory = Memory()
411
-
412
- def update_run_method(self):
413
- run_type = None
414
- # Update the run() method to call run_workflow() instead of the subclass's run()
415
- # First, check if the subclass has a run method
416
- # If the run() method has been overridden by the subclass,
417
- # then self.__class__.run is not Workflow.run will be True
418
- if self.__class__.run is not Workflow.run:
419
- # Store the original run methods bound to the instance
420
- self._subclass_run = self.__class__.run.__get__(self)
421
- run_type = "sync"
422
- # Get the parameters of the sync run method
423
- sig = inspect.signature(self.__class__.run)
424
-
425
- if self.__class__.arun is not Workflow.arun:
426
- self._subclass_arun = self.__class__.arun.__get__(self)
427
- run_type = "coroutine"
428
-
429
- # Get the parameters of the async run method
430
- sig = inspect.signature(self.__class__.arun)
431
-
432
- # Check if the async method is a coroutine or async generator
433
- from inspect import isasyncgenfunction
434
-
435
- if isasyncgenfunction(self.__class__.arun):
436
- run_type = "async_generator"
437
-
438
- if run_type is not None:
439
- # Convert parameters to a serializable format
440
- self._run_parameters = {
441
- param_name: {
442
- "name": param_name,
443
- "default": param.default.default
444
- if hasattr(param.default, "__class__") and param.default.__class__.__name__ == "FieldInfo"
445
- else (param.default if param.default is not inspect.Parameter.empty else None),
446
- "annotation": (
447
- param.annotation.__name__
448
- if hasattr(param.annotation, "__name__")
449
- else (
450
- str(param.annotation).replace("typing.Optional[", "").replace("]", "")
451
- if "typing.Optional" in str(param.annotation)
452
- else str(param.annotation)
453
- )
706
+ def _propagate_debug_to_step(self, step):
707
+ """Recursively propagate debug mode to steps and nested primitives"""
708
+ # Handle direct Step objects
709
+ if hasattr(step, "active_executor") and step.active_executor:
710
+ executor = step.active_executor
711
+ if hasattr(executor, "debug_mode"):
712
+ executor.debug_mode = True
713
+
714
+ # If it's a team, propagate to all members
715
+ if hasattr(executor, "members"):
716
+ for member in executor.members:
717
+ if hasattr(member, "debug_mode"):
718
+ member.debug_mode = True
719
+
720
+ # Handle nested primitives - check both 'steps' and 'choices' attributes
721
+ for attr_name in ["steps", "choices"]:
722
+ if hasattr(step, attr_name):
723
+ attr_value = getattr(step, attr_name)
724
+ if attr_value and isinstance(attr_value, list):
725
+ for nested_step in attr_value:
726
+ self._propagate_debug_to_step(nested_step)
727
+
728
+ def _create_step_input(
729
+ self,
730
+ execution_input: WorkflowExecutionInput,
731
+ previous_step_outputs: Optional[Dict[str, StepOutput]] = None,
732
+ shared_images: Optional[List[ImageArtifact]] = None,
733
+ shared_videos: Optional[List[VideoArtifact]] = None,
734
+ shared_audio: Optional[List[AudioArtifact]] = None,
735
+ shared_files: Optional[List[File]] = None,
736
+ ) -> StepInput:
737
+ """Helper method to create StepInput with enhanced data flow support"""
738
+
739
+ previous_step_content = None
740
+ if previous_step_outputs:
741
+ last_output = list(previous_step_outputs.values())[-1]
742
+ previous_step_content = last_output.content if last_output else None
743
+ log_debug(f"Using previous step content from: {list(previous_step_outputs.keys())[-1]}")
744
+
745
+ return StepInput(
746
+ input=execution_input.input,
747
+ previous_step_content=previous_step_content,
748
+ previous_step_outputs=previous_step_outputs,
749
+ additional_data=execution_input.additional_data,
750
+ images=shared_images or [],
751
+ videos=shared_videos or [],
752
+ audio=shared_audio or [],
753
+ files=shared_files or [],
754
+ )
755
+
756
+ def _get_step_count(self) -> int:
757
+ """Get the number of steps in the workflow"""
758
+ if self.steps is None:
759
+ return 0
760
+ elif callable(self.steps):
761
+ return 1 # Callable function counts as 1 step
762
+ else:
763
+ # Handle Steps wrapper
764
+ if isinstance(self.steps, Steps):
765
+ return len(self.steps.steps)
766
+ else:
767
+ return len(self.steps)
768
+
769
+ def _aggregate_workflow_metrics(self, step_results: List[Union[StepOutput, List[StepOutput]]]) -> WorkflowMetrics:
770
+ """Aggregate metrics from all step responses into structured workflow metrics"""
771
+ steps_dict = {}
772
+
773
+ def process_step_output(step_output: StepOutput):
774
+ """Process a single step output for metrics"""
775
+
776
+ # If this step has nested steps, process them recursively
777
+ if hasattr(step_output, "steps") and step_output.steps:
778
+ for nested_step in step_output.steps:
779
+ process_step_output(nested_step)
780
+
781
+ # Only collect metrics from steps that actually have metrics (actual agents/teams)
782
+ if (
783
+ step_output.step_name and step_output.metrics and step_output.executor_type in ["agent", "team"]
784
+ ): # Only include actual executors
785
+ step_metrics = StepMetrics(
786
+ step_name=step_output.step_name,
787
+ executor_type=step_output.executor_type or "unknown",
788
+ executor_name=step_output.executor_name or "unknown",
789
+ metrics=step_output.metrics,
790
+ )
791
+ steps_dict[step_output.step_name] = step_metrics
792
+
793
+ # Process all step results
794
+ for step_result in step_results:
795
+ process_step_output(cast(StepOutput, step_result))
796
+
797
+ return WorkflowMetrics(
798
+ steps=steps_dict,
799
+ )
800
+
801
+ def _call_custom_function(self, func: Callable, execution_input: WorkflowExecutionInput, **kwargs: Any) -> Any:
802
+ """Call custom function with only the parameters it expects"""
803
+ from inspect import signature
804
+
805
+ sig = signature(func)
806
+
807
+ # Build arguments based on what the function actually accepts
808
+ call_kwargs: Dict[str, Any] = {}
809
+
810
+ # Only add workflow and execution_input if the function expects them
811
+ if "workflow" in sig.parameters: # type: ignore
812
+ call_kwargs["workflow"] = self
813
+ if "execution_input" in sig.parameters:
814
+ call_kwargs["execution_input"] = execution_input # type: ignore
815
+ if "session_state" in sig.parameters:
816
+ call_kwargs["session_state"] = self.session_state # type: ignore
817
+
818
+ # Add any other kwargs that the function expects
819
+ for param_name in kwargs:
820
+ if param_name in sig.parameters: # type: ignore
821
+ call_kwargs[param_name] = kwargs[param_name]
822
+
823
+ # If function has **kwargs parameter, pass all remaining kwargs
824
+ for param in sig.parameters.values(): # type: ignore
825
+ if param.kind == param.VAR_KEYWORD:
826
+ call_kwargs.update(kwargs)
827
+ break
828
+
829
+ try:
830
+ return func(**call_kwargs)
831
+ except TypeError as e:
832
+ # If signature inspection fails, fall back to original method
833
+ logger.warning(
834
+ f"Async function signature inspection failed: {e}. Falling back to original calling convention."
835
+ )
836
+ return func(**call_kwargs)
837
+
838
+ def _execute(
839
+ self,
840
+ session: WorkflowSession,
841
+ execution_input: WorkflowExecutionInput,
842
+ workflow_run_response: WorkflowRunOutput,
843
+ session_state: Optional[Dict[str, Any]] = None,
844
+ **kwargs: Any,
845
+ ) -> WorkflowRunOutput:
846
+ """Execute a specific pipeline by name synchronously"""
847
+ from inspect import isasyncgenfunction, iscoroutinefunction, isgeneratorfunction
848
+
849
+ workflow_run_response.status = RunStatus.running
850
+ register_run(workflow_run_response.run_id) # type: ignore
851
+
852
+ if callable(self.steps):
853
+ if iscoroutinefunction(self.steps) or isasyncgenfunction(self.steps):
854
+ raise ValueError("Cannot use async function with synchronous execution")
855
+ elif isgeneratorfunction(self.steps):
856
+ content = ""
857
+ for chunk in self.steps(self, execution_input, **kwargs):
858
+ # Check for cancellation while consuming generator
859
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
860
+ if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
861
+ content += chunk.content
862
+ else:
863
+ content += str(chunk)
864
+ workflow_run_response.content = content
865
+ else:
866
+ # Execute the workflow with the custom executor
867
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
868
+ workflow_run_response.content = self._call_custom_function(self.steps, execution_input, **kwargs) # type: ignore[arg-type]
869
+
870
+ workflow_run_response.status = RunStatus.completed
871
+ else:
872
+ try:
873
+ # Track outputs from each step for enhanced data flow
874
+ collected_step_outputs: List[Union[StepOutput, List[StepOutput]]] = []
875
+ previous_step_outputs: Dict[str, StepOutput] = {}
876
+
877
+ shared_images: List[ImageArtifact] = execution_input.images or []
878
+ output_images: List[ImageArtifact] = (execution_input.images or []).copy() # Start with input images
879
+ shared_videos: List[VideoArtifact] = execution_input.videos or []
880
+ output_videos: List[VideoArtifact] = (execution_input.videos or []).copy() # Start with input videos
881
+ shared_audio: List[AudioArtifact] = execution_input.audio or []
882
+ output_audio: List[AudioArtifact] = (execution_input.audio or []).copy() # Start with input audio
883
+ shared_files: List[File] = execution_input.files or []
884
+ output_files: List[File] = (execution_input.files or []).copy() # Start with input files
885
+
886
+ for i, step in enumerate(self.steps): # type: ignore[arg-type]
887
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
888
+ step_name = getattr(step, "name", f"step_{i + 1}")
889
+ log_debug(f"Executing step {i + 1}/{self._get_step_count()}: {step_name}")
890
+
891
+ # Create enhanced StepInput
892
+ step_input = self._create_step_input(
893
+ execution_input=execution_input,
894
+ previous_step_outputs=previous_step_outputs,
895
+ shared_images=shared_images,
896
+ shared_videos=shared_videos,
897
+ shared_audio=shared_audio,
898
+ shared_files=shared_files,
454
899
  )
455
- if param.annotation is not inspect.Parameter.empty
456
- else None,
457
- "required": param.default is inspect.Parameter.empty,
458
- }
459
- for param_name, param in sig.parameters.items()
460
- if param_name != "self"
461
- }
462
- # Determine the return type of the run method
463
- return_annotation = sig.return_annotation
464
- self._run_return_type = (
465
- return_annotation.__name__
466
- if return_annotation is not inspect.Signature.empty and hasattr(return_annotation, "__name__")
467
- else str(return_annotation)
468
- if return_annotation is not inspect.Signature.empty
469
- else None
900
+
901
+ # Check for can cellation before executing step
902
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
903
+
904
+ step_output = step.execute( # type: ignore[union-attr]
905
+ step_input,
906
+ session_id=session.session_id,
907
+ user_id=self.user_id,
908
+ workflow_run_response=workflow_run_response,
909
+ session_state=session_state,
910
+ store_executor_outputs=self.store_executor_outputs,
911
+ )
912
+
913
+ # Check for cancellation after step execution
914
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
915
+
916
+ # Update the workflow-level previous_step_outputs dictionary
917
+ previous_step_outputs[step_name] = step_output
918
+ if step_output.stop:
919
+ logger.info(f"Early termination requested by step {step_name}")
920
+ break
921
+
922
+ # Update shared media for next step
923
+ shared_images.extend(step_output.images or [])
924
+ shared_videos.extend(step_output.videos or [])
925
+ shared_audio.extend(step_output.audio or [])
926
+ shared_files.extend(step_output.files or [])
927
+ output_images.extend(step_output.images or [])
928
+ output_videos.extend(step_output.videos or [])
929
+ output_audio.extend(step_output.audio or [])
930
+ output_files.extend(step_output.files or [])
931
+
932
+ collected_step_outputs.append(step_output)
933
+
934
+ # Update the workflow_run_response with completion data
935
+ if collected_step_outputs:
936
+ workflow_run_response.metrics = self._aggregate_workflow_metrics(collected_step_outputs)
937
+ last_output = cast(StepOutput, collected_step_outputs[-1])
938
+
939
+ # Use deepest nested content if this is a container (Steps/Router/Loop/etc.)
940
+ if getattr(last_output, "steps", None):
941
+ _cur = last_output
942
+ while getattr(_cur, "steps", None):
943
+ _steps = _cur.steps or []
944
+ if not _steps:
945
+ break
946
+ _cur = _steps[-1]
947
+ workflow_run_response.content = _cur.content
948
+ else:
949
+ workflow_run_response.content = last_output.content
950
+ else:
951
+ workflow_run_response.content = "No steps executed"
952
+
953
+ workflow_run_response.step_results = collected_step_outputs
954
+ workflow_run_response.images = output_images
955
+ workflow_run_response.videos = output_videos
956
+ workflow_run_response.audio = output_audio
957
+ workflow_run_response.status = RunStatus.completed
958
+
959
+ except RunCancelledException as e:
960
+ # Handle run cancellation
961
+ logger.info(f"Workflow run {workflow_run_response.run_id} was cancelled")
962
+ workflow_run_response.status = RunStatus.cancelled
963
+ workflow_run_response.content = str(e)
964
+ except Exception as e:
965
+ import traceback
966
+
967
+ traceback.print_exc()
968
+ logger.error(f"Workflow execution failed: {e}")
969
+ # Store error response
970
+ workflow_run_response.status = RunStatus.error
971
+ workflow_run_response.content = f"Workflow execution failed: {e}"
972
+
973
+ finally:
974
+ self._update_session_metrics(session=session, workflow_run_response=workflow_run_response)
975
+ session.upsert_run(run=workflow_run_response)
976
+ self.save_session(session=session)
977
+ # Always clean up the run tracking
978
+ cleanup_run(workflow_run_response.run_id) # type: ignore
979
+
980
+ # Log Workflow Telemetry
981
+ if self.telemetry:
982
+ self._log_workflow_telemetry(session_id=session.session_id, run_id=workflow_run_response.run_id)
983
+
984
+ return workflow_run_response
985
+
986
+ def _execute_stream(
987
+ self,
988
+ session: WorkflowSession,
989
+ execution_input: WorkflowExecutionInput,
990
+ workflow_run_response: WorkflowRunOutput,
991
+ session_state: Optional[Dict[str, Any]] = None,
992
+ stream_intermediate_steps: bool = False,
993
+ **kwargs: Any,
994
+ ) -> Iterator[WorkflowRunOutputEvent]:
995
+ """Execute a specific pipeline by name with event streaming"""
996
+ from inspect import isasyncgenfunction, iscoroutinefunction, isgeneratorfunction
997
+
998
+ workflow_run_response.status = RunStatus.running
999
+
1000
+ # Register run for cancellation tracking
1001
+ if workflow_run_response.run_id:
1002
+ register_run(workflow_run_response.run_id)
1003
+
1004
+ workflow_started_event = WorkflowStartedEvent(
1005
+ run_id=workflow_run_response.run_id or "",
1006
+ workflow_name=workflow_run_response.workflow_name,
1007
+ workflow_id=workflow_run_response.workflow_id,
1008
+ session_id=workflow_run_response.session_id,
1009
+ )
1010
+ yield self._handle_event(workflow_started_event, workflow_run_response)
1011
+
1012
+ if callable(self.steps):
1013
+ if iscoroutinefunction(self.steps) or isasyncgenfunction(self.steps):
1014
+ raise ValueError("Cannot use async function with synchronous execution")
1015
+ elif isgeneratorfunction(self.steps):
1016
+ content = ""
1017
+ for chunk in self._call_custom_function(self.steps, execution_input, **kwargs): # type: ignore[arg-type]
1018
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1019
+ # Update the run_response with the content from the result
1020
+ if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
1021
+ content += chunk.content
1022
+ yield chunk
1023
+ else:
1024
+ content += str(chunk)
1025
+ workflow_run_response.content = content
1026
+ else:
1027
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1028
+ workflow_run_response.content = self._call_custom_function(self.steps, execution_input, **kwargs)
1029
+ workflow_run_response.status = RunStatus.completed
1030
+
1031
+ else:
1032
+ try:
1033
+ # Track outputs from each step for enhanced data flow
1034
+ collected_step_outputs: List[Union[StepOutput, List[StepOutput]]] = []
1035
+ previous_step_outputs: Dict[str, StepOutput] = {}
1036
+
1037
+ shared_images: List[ImageArtifact] = execution_input.images or []
1038
+ output_images: List[ImageArtifact] = (execution_input.images or []).copy() # Start with input images
1039
+ shared_videos: List[VideoArtifact] = execution_input.videos or []
1040
+ output_videos: List[VideoArtifact] = (execution_input.videos or []).copy() # Start with input videos
1041
+ shared_audio: List[AudioArtifact] = execution_input.audio or []
1042
+ output_audio: List[AudioArtifact] = (execution_input.audio or []).copy() # Start with input audio
1043
+ shared_files: List[File] = execution_input.files or []
1044
+ output_files: List[File] = (execution_input.files or []).copy() # Start with input files
1045
+
1046
+ early_termination = False
1047
+
1048
+ for i, step in enumerate(self.steps): # type: ignore[arg-type]
1049
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1050
+ step_name = getattr(step, "name", f"step_{i + 1}")
1051
+ log_debug(f"Streaming step {i + 1}/{self._get_step_count()}: {step_name}")
1052
+
1053
+ # Create enhanced StepInput
1054
+ step_input = self._create_step_input(
1055
+ execution_input=execution_input,
1056
+ previous_step_outputs=previous_step_outputs,
1057
+ shared_images=shared_images,
1058
+ shared_videos=shared_videos,
1059
+ shared_audio=shared_audio,
1060
+ shared_files=shared_files,
1061
+ )
1062
+
1063
+ # Execute step with streaming and yield all events
1064
+ for event in step.execute_stream( # type: ignore[union-attr]
1065
+ step_input,
1066
+ session_id=session.session_id,
1067
+ user_id=self.user_id,
1068
+ stream_intermediate_steps=stream_intermediate_steps,
1069
+ workflow_run_response=workflow_run_response,
1070
+ session_state=session_state,
1071
+ step_index=i,
1072
+ store_executor_outputs=self.store_executor_outputs,
1073
+ ):
1074
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1075
+ # Handle events
1076
+ if isinstance(event, StepOutput):
1077
+ step_output = event
1078
+ collected_step_outputs.append(step_output)
1079
+
1080
+ # Update the workflow-level previous_step_outputs dictionary
1081
+ previous_step_outputs[step_name] = step_output
1082
+
1083
+ # Transform StepOutput to StepOutputEvent for consistent streaming interface
1084
+ step_output_event = self._transform_step_output_to_event(
1085
+ step_output, workflow_run_response, step_index=i
1086
+ )
1087
+
1088
+ if step_output.stop:
1089
+ logger.info(f"Early termination requested by step {step_name}")
1090
+ # Update shared media for next step
1091
+ shared_images.extend(step_output.images or [])
1092
+ shared_videos.extend(step_output.videos or [])
1093
+ shared_audio.extend(step_output.audio or [])
1094
+ shared_files.extend(step_output.files or [])
1095
+ output_images.extend(step_output.images or [])
1096
+ output_videos.extend(step_output.videos or [])
1097
+ output_audio.extend(step_output.audio or [])
1098
+ output_files.extend(step_output.files or [])
1099
+
1100
+ # Only yield StepOutputEvent for function executors, not for agents/teams
1101
+ if getattr(step, "executor_type", None) == "function":
1102
+ yield step_output_event
1103
+
1104
+ # Break out of the step loop
1105
+ early_termination = True
1106
+ break
1107
+
1108
+ # Update shared media for next step
1109
+ shared_images.extend(step_output.images or [])
1110
+ shared_videos.extend(step_output.videos or [])
1111
+ shared_audio.extend(step_output.audio or [])
1112
+ shared_files.extend(step_output.files or [])
1113
+ output_images.extend(step_output.images or [])
1114
+ output_videos.extend(step_output.videos or [])
1115
+ output_audio.extend(step_output.audio or [])
1116
+ output_files.extend(step_output.files or [])
1117
+
1118
+ # Only yield StepOutputEvent for generator functions, not for agents/teams
1119
+ if getattr(step, "executor_type", None) == "function":
1120
+ yield step_output_event
1121
+
1122
+ elif isinstance(event, WorkflowRunOutputEvent): # type: ignore
1123
+ yield self._handle_event(event, workflow_run_response) # type: ignore
1124
+
1125
+ else:
1126
+ # Yield other internal events
1127
+ yield self._handle_event(event, workflow_run_response) # type: ignore
1128
+
1129
+ # Break out of main step loop if early termination was requested
1130
+ if "early_termination" in locals() and early_termination:
1131
+ break
1132
+
1133
+ # Update the workflow_run_response with completion data
1134
+ if collected_step_outputs:
1135
+ workflow_run_response.metrics = self._aggregate_workflow_metrics(collected_step_outputs)
1136
+ last_output = cast(StepOutput, collected_step_outputs[-1])
1137
+
1138
+ # Use deepest nested content if this is a container (Steps/Router/Loop/etc.)
1139
+ if getattr(last_output, "steps", None):
1140
+ _cur = last_output
1141
+ while getattr(_cur, "steps", None):
1142
+ _steps = _cur.steps or []
1143
+ if not _steps:
1144
+ break
1145
+ _cur = _steps[-1]
1146
+ workflow_run_response.content = _cur.content
1147
+ else:
1148
+ workflow_run_response.content = last_output.content
1149
+ else:
1150
+ workflow_run_response.content = "No steps executed"
1151
+
1152
+ workflow_run_response.step_results = collected_step_outputs
1153
+ workflow_run_response.images = output_images
1154
+ workflow_run_response.videos = output_videos
1155
+ workflow_run_response.audio = output_audio
1156
+ workflow_run_response.status = RunStatus.completed
1157
+
1158
+ except RunCancelledException as e:
1159
+ # Handle run cancellation during streaming
1160
+ logger.info(f"Workflow run {workflow_run_response.run_id} was cancelled during streaming")
1161
+ workflow_run_response.status = RunStatus.cancelled
1162
+ workflow_run_response.content = str(e)
1163
+ cancelled_event = WorkflowCancelledEvent(
1164
+ run_id=workflow_run_response.run_id or "",
1165
+ workflow_id=self.id,
1166
+ workflow_name=self.name,
1167
+ session_id=session.session_id,
1168
+ reason=str(e),
1169
+ )
1170
+ yield self._handle_event(cancelled_event, workflow_run_response)
1171
+ except Exception as e:
1172
+ logger.error(f"Workflow execution failed: {e}")
1173
+
1174
+ from agno.run.workflow import WorkflowErrorEvent
1175
+
1176
+ error_event = WorkflowErrorEvent(
1177
+ run_id=workflow_run_response.run_id or "",
1178
+ workflow_id=self.id,
1179
+ workflow_name=self.name,
1180
+ session_id=session.session_id,
1181
+ error=str(e),
1182
+ )
1183
+
1184
+ yield error_event
1185
+
1186
+ # Update workflow_run_response with error
1187
+ workflow_run_response.content = error_event.error
1188
+ workflow_run_response.status = RunStatus.error
1189
+
1190
+ # Yield workflow completed event
1191
+ workflow_completed_event = WorkflowCompletedEvent(
1192
+ run_id=workflow_run_response.run_id or "",
1193
+ content=workflow_run_response.content,
1194
+ workflow_name=workflow_run_response.workflow_name,
1195
+ workflow_id=workflow_run_response.workflow_id,
1196
+ session_id=workflow_run_response.session_id,
1197
+ step_results=workflow_run_response.step_results, # type: ignore
1198
+ metadata=workflow_run_response.metadata,
1199
+ )
1200
+ yield self._handle_event(workflow_completed_event, workflow_run_response)
1201
+
1202
+ # Store the completed workflow response
1203
+ self._update_session_metrics(session=session, workflow_run_response=workflow_run_response)
1204
+ session.upsert_run(run=workflow_run_response)
1205
+ self.save_session(session=session)
1206
+
1207
+ # Always clean up the run tracking
1208
+ cleanup_run(workflow_run_response.run_id) # type: ignore
1209
+
1210
+ # Log Workflow Telemetry
1211
+ if self.telemetry:
1212
+ self._log_workflow_telemetry(session_id=session.session_id, run_id=workflow_run_response.run_id)
1213
+
1214
+ async def _acall_custom_function(
1215
+ self, func: Callable, execution_input: WorkflowExecutionInput, **kwargs: Any
1216
+ ) -> Any:
1217
+ """Call custom function with only the parameters it expects - handles both async functions and async generators"""
1218
+ from inspect import isasyncgenfunction, signature
1219
+
1220
+ sig = signature(func)
1221
+
1222
+ # Build arguments based on what the function actually accepts
1223
+ call_kwargs: Dict[str, Any] = {}
1224
+
1225
+ # Only add workflow and execution_input if the function expects them
1226
+ if "workflow" in sig.parameters: # type: ignore
1227
+ call_kwargs["workflow"] = self
1228
+ if "execution_input" in sig.parameters:
1229
+ call_kwargs["execution_input"] = execution_input # type: ignore
1230
+ if "session_state" in sig.parameters:
1231
+ call_kwargs["session_state"] = self.session_state # type: ignore
1232
+
1233
+ # Add any other kwargs that the function expects
1234
+ for param_name in kwargs:
1235
+ if param_name in sig.parameters: # type: ignore
1236
+ call_kwargs[param_name] = kwargs[param_name]
1237
+
1238
+ # If function has **kwargs parameter, pass all remaining kwargs
1239
+ for param in sig.parameters.values(): # type: ignore
1240
+ if param.kind == param.VAR_KEYWORD:
1241
+ call_kwargs.update(kwargs)
1242
+ break
1243
+
1244
+ try:
1245
+ # Check if it's an async generator function
1246
+ if isasyncgenfunction(func):
1247
+ # For async generators, call the function and return the async generator directly
1248
+ return func(**call_kwargs) # type: ignore
1249
+ else:
1250
+ # For regular async functions, await the result
1251
+ return await func(**call_kwargs) # type: ignore
1252
+ except TypeError as e:
1253
+ # If signature inspection fails, fall back to original method
1254
+ logger.warning(
1255
+ f"Async function signature inspection failed: {e}. Falling back to original calling convention."
470
1256
  )
471
- # Important: Replace the instance's run method with run_workflow
472
- # This is so we call run_workflow() instead of the subclass's run()
473
- if run_type == "sync":
474
- object.__setattr__(self, "run", self.run_workflow.__get__(self))
475
- elif run_type == "coroutine":
476
- object.__setattr__(self, "arun", self.arun_workflow.__get__(self))
477
- elif run_type == "async_generator":
478
- object.__setattr__(self, "arun", self.arun_workflow_generator.__get__(self))
1257
+ if isasyncgenfunction(func):
1258
+ # For async generators, use the same signature inspection logic in fallback
1259
+ return func(**call_kwargs) # type: ignore
1260
+ else:
1261
+ # For regular async functions, use the same signature inspection logic in fallback
1262
+ return await func(**call_kwargs) # type: ignore
1263
+
1264
+ async def _aexecute(
1265
+ self,
1266
+ session: WorkflowSession,
1267
+ execution_input: WorkflowExecutionInput,
1268
+ workflow_run_response: WorkflowRunOutput,
1269
+ session_state: Optional[Dict[str, Any]] = None,
1270
+ **kwargs: Any,
1271
+ ) -> WorkflowRunOutput:
1272
+ """Execute a specific pipeline by name asynchronously"""
1273
+ from inspect import isasyncgenfunction, iscoroutinefunction, isgeneratorfunction
1274
+
1275
+ workflow_run_response.status = RunStatus.running
1276
+
1277
+ # Register run for cancellation tracking
1278
+ register_run(workflow_run_response.run_id) # type: ignore
1279
+
1280
+ if callable(self.steps):
1281
+ # Execute the workflow with the custom executor
1282
+ content = ""
1283
+
1284
+ if iscoroutinefunction(self.steps): # type: ignore
1285
+ workflow_run_response.content = await self._acall_custom_function(self.steps, execution_input, **kwargs)
1286
+ elif isgeneratorfunction(self.steps):
1287
+ for chunk in self.steps(self, execution_input, **kwargs): # type: ignore[arg-type]
1288
+ if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
1289
+ content += chunk.content
1290
+ else:
1291
+ content += str(chunk)
1292
+ workflow_run_response.content = content
1293
+ elif isasyncgenfunction(self.steps): # type: ignore
1294
+ async_gen = await self._acall_custom_function(self.steps, execution_input, **kwargs)
1295
+ async for chunk in async_gen:
1296
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1297
+ if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
1298
+ content += chunk.content
1299
+ else:
1300
+ content += str(chunk)
1301
+ workflow_run_response.content = content
1302
+ else:
1303
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1304
+ workflow_run_response.content = self._call_custom_function(self.steps, execution_input, **kwargs)
1305
+ workflow_run_response.status = RunStatus.completed
1306
+
479
1307
  else:
480
- # If the subclass does not override the run method,
481
- # the Workflow.run() method will be called and will log an error
482
- self._subclass_run = self.run
483
- self._subclass_arun = self.arun
484
-
485
- self._run_parameters = {}
486
- self._run_return_type = None
487
-
488
- def update_agent_session_ids(self):
489
- # Update the session_id for all Agent instances
490
- # use dataclasses.fields() to iterate through fields
491
- for f in fields(self):
492
- field_type = f.type
493
- if isinstance(field_type, Agent):
494
- field_value = getattr(self, f.name)
495
- field_value.session_id = self.session_id
496
-
497
- def get_workflow_data(self) -> Dict[str, Any]:
498
- workflow_data: Dict[str, Any] = {}
499
- if self.name is not None:
500
- workflow_data["name"] = self.name
501
- if self.workflow_id is not None:
502
- workflow_data["workflow_id"] = self.workflow_id
503
- if self.description is not None:
504
- workflow_data["description"] = self.description
505
- return workflow_data
1308
+ try:
1309
+ # Track outputs from each step for enhanced data flow
1310
+ collected_step_outputs: List[Union[StepOutput, List[StepOutput]]] = []
1311
+ previous_step_outputs: Dict[str, StepOutput] = {}
1312
+
1313
+ shared_images: List[ImageArtifact] = execution_input.images or []
1314
+ output_images: List[ImageArtifact] = (execution_input.images or []).copy() # Start with input images
1315
+ shared_videos: List[VideoArtifact] = execution_input.videos or []
1316
+ output_videos: List[VideoArtifact] = (execution_input.videos or []).copy() # Start with input videos
1317
+ shared_audio: List[AudioArtifact] = execution_input.audio or []
1318
+ output_audio: List[AudioArtifact] = (execution_input.audio or []).copy() # Start with input audio
1319
+ shared_files: List[File] = execution_input.files or []
1320
+ output_files: List[File] = (execution_input.files or []).copy() # Start with input files
1321
+
1322
+ for i, step in enumerate(self.steps): # type: ignore[arg-type]
1323
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1324
+ step_name = getattr(step, "name", f"step_{i + 1}")
1325
+ log_debug(f"Async Executing step {i + 1}/{self._get_step_count()}: {step_name}")
1326
+
1327
+ # Create enhanced StepInput
1328
+ step_input = self._create_step_input(
1329
+ execution_input=execution_input,
1330
+ previous_step_outputs=previous_step_outputs,
1331
+ shared_images=shared_images,
1332
+ shared_videos=shared_videos,
1333
+ shared_audio=shared_audio,
1334
+ shared_files=shared_files,
1335
+ )
1336
+
1337
+ # Check for cancellation before executing step
1338
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1339
+
1340
+ step_output = await step.aexecute( # type: ignore[union-attr]
1341
+ step_input,
1342
+ session_id=session.session_id,
1343
+ user_id=self.user_id,
1344
+ workflow_run_response=workflow_run_response,
1345
+ session_state=session_state,
1346
+ store_executor_outputs=self.store_executor_outputs,
1347
+ )
1348
+
1349
+ # Check for cancellation after step execution
1350
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1351
+
1352
+ # Update the workflow-level previous_step_outputs dictionary
1353
+ previous_step_outputs[step_name] = step_output
1354
+ if step_output.stop:
1355
+ logger.info(f"Early termination requested by step {step_name}")
1356
+ break
1357
+
1358
+ # Update shared media for next step
1359
+ shared_images.extend(step_output.images or [])
1360
+ shared_videos.extend(step_output.videos or [])
1361
+ shared_audio.extend(step_output.audio or [])
1362
+ shared_files.extend(step_output.files or [])
1363
+ output_images.extend(step_output.images or [])
1364
+ output_videos.extend(step_output.videos or [])
1365
+ output_audio.extend(step_output.audio or [])
1366
+ output_files.extend(step_output.files or [])
1367
+
1368
+ collected_step_outputs.append(step_output)
1369
+
1370
+ # Update the workflow_run_response with completion data
1371
+ if collected_step_outputs:
1372
+ workflow_run_response.metrics = self._aggregate_workflow_metrics(collected_step_outputs)
1373
+ last_output = cast(StepOutput, collected_step_outputs[-1])
1374
+
1375
+ # Use deepest nested content if this is a container (Steps/Router/Loop/etc.)
1376
+ if getattr(last_output, "steps", None):
1377
+ _cur = last_output
1378
+ while getattr(_cur, "steps", None):
1379
+ _steps = _cur.steps or []
1380
+ if not _steps:
1381
+ break
1382
+ _cur = _steps[-1]
1383
+ workflow_run_response.content = _cur.content
1384
+ else:
1385
+ workflow_run_response.content = last_output.content
1386
+ else:
1387
+ workflow_run_response.content = "No steps executed"
1388
+
1389
+ workflow_run_response.step_results = collected_step_outputs
1390
+ workflow_run_response.images = output_images
1391
+ workflow_run_response.videos = output_videos
1392
+ workflow_run_response.audio = output_audio
1393
+ workflow_run_response.status = RunStatus.completed
1394
+
1395
+ except RunCancelledException as e:
1396
+ logger.info(f"Workflow run {workflow_run_response.run_id} was cancelled")
1397
+ workflow_run_response.status = RunStatus.cancelled
1398
+ workflow_run_response.content = str(e)
1399
+ except Exception as e:
1400
+ logger.error(f"Workflow execution failed: {e}")
1401
+ workflow_run_response.status = RunStatus.error
1402
+ workflow_run_response.content = f"Workflow execution failed: {e}"
1403
+
1404
+ self._update_session_metrics(session=session, workflow_run_response=workflow_run_response)
1405
+ session.upsert_run(run=workflow_run_response)
1406
+ self.save_session(session=session)
1407
+ # Always clean up the run tracking
1408
+ cleanup_run(workflow_run_response.run_id) # type: ignore
1409
+
1410
+ # Log Workflow Telemetry
1411
+ if self.telemetry:
1412
+ await self._alog_workflow_telemetry(session_id=session.session_id, run_id=workflow_run_response.run_id)
506
1413
 
507
- def get_session_data(self) -> Dict[str, Any]:
508
- session_data: Dict[str, Any] = {}
509
- if self.session_name is not None:
510
- session_data["session_name"] = self.session_name
511
- if self.session_state and len(self.session_state) > 0:
512
- session_data["session_state"] = nested_model_dump(self.session_state)
513
- if self.images is not None:
514
- session_data["images"] = [img.model_dump() for img in self.images]
515
- if self.videos is not None:
516
- session_data["videos"] = [vid.model_dump() for vid in self.videos]
517
- if self.audio is not None:
518
- session_data["audio"] = [aud.model_dump() for aud in self.audio]
519
- return session_data
520
-
521
- def get_workflow_session(self) -> WorkflowSession:
522
- """Get a WorkflowSession object, which can be saved to the database"""
523
- self.memory = cast(WorkflowMemory, self.memory)
524
- self.session_id = cast(str, self.session_id)
525
- self.workflow_id = cast(str, self.workflow_id)
526
- if self.memory is not None:
527
- if isinstance(self.memory, WorkflowMemory):
528
- self.memory = cast(WorkflowMemory, self.memory)
529
- memory_dict = self.memory.to_dict()
530
- # We only persist the runs for the current session ID (not all runs in memory)
531
- memory_dict["runs"] = [
532
- agent_run.model_dump()
533
- for agent_run in self.memory.runs
534
- if agent_run.response is not None and agent_run.response.session_id == self.session_id
535
- ]
1414
+ return workflow_run_response
1415
+
1416
+ async def _aexecute_stream(
1417
+ self,
1418
+ session: WorkflowSession,
1419
+ execution_input: WorkflowExecutionInput,
1420
+ workflow_run_response: WorkflowRunOutput,
1421
+ session_state: Optional[Dict[str, Any]] = None,
1422
+ stream_intermediate_steps: bool = False,
1423
+ websocket_handler: Optional[WebSocketHandler] = None,
1424
+ **kwargs: Any,
1425
+ ) -> AsyncIterator[WorkflowRunOutputEvent]:
1426
+ """Execute a specific pipeline by name with event streaming"""
1427
+ from inspect import isasyncgenfunction, iscoroutinefunction, isgeneratorfunction
1428
+
1429
+ workflow_run_response.status = RunStatus.running
1430
+ workflow_started_event = WorkflowStartedEvent(
1431
+ run_id=workflow_run_response.run_id or "",
1432
+ workflow_name=workflow_run_response.workflow_name,
1433
+ workflow_id=workflow_run_response.workflow_id,
1434
+ session_id=workflow_run_response.session_id,
1435
+ )
1436
+ yield self._handle_event(workflow_started_event, workflow_run_response, websocket_handler=websocket_handler)
1437
+
1438
+ if callable(self.steps):
1439
+ if iscoroutinefunction(self.steps): # type: ignore
1440
+ workflow_run_response.content = await self._acall_custom_function(self.steps, execution_input, **kwargs)
1441
+ elif isgeneratorfunction(self.steps):
1442
+ content = ""
1443
+ for chunk in self.steps(self, execution_input, **kwargs): # type: ignore[arg-type]
1444
+ if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
1445
+ content += chunk.content
1446
+ yield chunk
1447
+ else:
1448
+ content += str(chunk)
1449
+ workflow_run_response.content = content
1450
+ elif isasyncgenfunction(self.steps): # type: ignore
1451
+ content = ""
1452
+ async_gen = await self._acall_custom_function(self.steps, execution_input, **kwargs)
1453
+ async for chunk in async_gen:
1454
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1455
+ if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
1456
+ content += chunk.content
1457
+ yield chunk
1458
+ else:
1459
+ content += str(chunk)
1460
+ workflow_run_response.content = content
536
1461
  else:
537
- self.memory = cast(Memory, self.memory)
538
- # We fake the structure on storage, to maintain the interface with the legacy implementation
539
- run_responses = self.memory.runs[self.session_id] # type: ignore
540
- memory_dict = self.memory.to_dict()
541
- memory_dict["runs"] = [rr.to_dict() for rr in run_responses]
1462
+ workflow_run_response.content = self.steps(self, execution_input, **kwargs)
1463
+ workflow_run_response.status = RunStatus.completed
1464
+
542
1465
  else:
543
- memory_dict = None
544
- return WorkflowSession(
545
- session_id=self.session_id,
546
- workflow_id=self.workflow_id,
547
- user_id=self.user_id,
548
- memory=memory_dict,
549
- workflow_data=self.get_workflow_data(),
550
- session_data=self.get_session_data(),
551
- extra_data=self.extra_data,
1466
+ try:
1467
+ # Track outputs from each step for enhanced data flow
1468
+ collected_step_outputs: List[Union[StepOutput, List[StepOutput]]] = []
1469
+ previous_step_outputs: Dict[str, StepOutput] = {}
1470
+
1471
+ shared_images: List[ImageArtifact] = execution_input.images or []
1472
+ output_images: List[ImageArtifact] = (execution_input.images or []).copy() # Start with input images
1473
+ shared_videos: List[VideoArtifact] = execution_input.videos or []
1474
+ output_videos: List[VideoArtifact] = (execution_input.videos or []).copy() # Start with input videos
1475
+ shared_audio: List[AudioArtifact] = execution_input.audio or []
1476
+ output_audio: List[AudioArtifact] = (execution_input.audio or []).copy() # Start with input audio
1477
+ shared_files: List[File] = execution_input.files or []
1478
+ output_files: List[File] = (execution_input.files or []).copy() # Start with input files
1479
+
1480
+ early_termination = False
1481
+
1482
+ for i, step in enumerate(self.steps): # type: ignore[arg-type]
1483
+ if workflow_run_response.run_id:
1484
+ raise_if_cancelled(workflow_run_response.run_id)
1485
+ step_name = getattr(step, "name", f"step_{i + 1}")
1486
+ log_debug(f"Async streaming step {i + 1}/{self._get_step_count()}: {step_name}")
1487
+
1488
+ # Create enhanced StepInput
1489
+ step_input = self._create_step_input(
1490
+ execution_input=execution_input,
1491
+ previous_step_outputs=previous_step_outputs,
1492
+ shared_images=shared_images,
1493
+ shared_videos=shared_videos,
1494
+ shared_audio=shared_audio,
1495
+ shared_files=shared_files,
1496
+ )
1497
+
1498
+ # Execute step with streaming and yield all events
1499
+ async for event in step.aexecute_stream( # type: ignore[union-attr]
1500
+ step_input,
1501
+ session_id=session.session_id,
1502
+ user_id=self.user_id,
1503
+ stream_intermediate_steps=stream_intermediate_steps,
1504
+ workflow_run_response=workflow_run_response,
1505
+ session_state=session_state,
1506
+ step_index=i,
1507
+ store_executor_outputs=self.store_executor_outputs,
1508
+ ):
1509
+ if workflow_run_response.run_id:
1510
+ raise_if_cancelled(workflow_run_response.run_id)
1511
+ if isinstance(event, StepOutput):
1512
+ step_output = event
1513
+ collected_step_outputs.append(step_output)
1514
+
1515
+ # Update the workflow-level previous_step_outputs dictionary
1516
+ previous_step_outputs[step_name] = step_output
1517
+
1518
+ # Transform StepOutput to StepOutputEvent for consistent streaming interface
1519
+ step_output_event = self._transform_step_output_to_event(
1520
+ step_output, workflow_run_response, step_index=i
1521
+ )
1522
+
1523
+ if step_output.stop:
1524
+ logger.info(f"Early termination requested by step {step_name}")
1525
+ # Update shared media for next step
1526
+ shared_images.extend(step_output.images or [])
1527
+ shared_videos.extend(step_output.videos or [])
1528
+ shared_audio.extend(step_output.audio or [])
1529
+ shared_files.extend(step_output.files or [])
1530
+ output_images.extend(step_output.images or [])
1531
+ output_videos.extend(step_output.videos or [])
1532
+ output_audio.extend(step_output.audio or [])
1533
+ output_files.extend(step_output.files or [])
1534
+
1535
+ if getattr(step, "executor_type", None) == "function":
1536
+ yield step_output_event
1537
+
1538
+ # Break out of the step loop
1539
+ early_termination = True
1540
+ break
1541
+
1542
+ # Update shared media for next step
1543
+ shared_images.extend(step_output.images or [])
1544
+ shared_videos.extend(step_output.videos or [])
1545
+ shared_audio.extend(step_output.audio or [])
1546
+ shared_files.extend(step_output.files or [])
1547
+ output_images.extend(step_output.images or [])
1548
+ output_videos.extend(step_output.videos or [])
1549
+ output_audio.extend(step_output.audio or [])
1550
+ output_files.extend(step_output.files or [])
1551
+
1552
+ # Only yield StepOutputEvent for generator functions, not for agents/teams
1553
+ if getattr(step, "executor_type", None) == "function":
1554
+ yield step_output_event
1555
+
1556
+ elif isinstance(event, WorkflowRunOutputEvent): # type: ignore
1557
+ yield self._handle_event(event, workflow_run_response, websocket_handler=websocket_handler) # type: ignore
1558
+
1559
+ else:
1560
+ # Yield other internal events
1561
+ yield self._handle_event(event, workflow_run_response, websocket_handler=websocket_handler) # type: ignore
1562
+
1563
+ # Break out of main step loop if early termination was requested
1564
+ if "early_termination" in locals() and early_termination:
1565
+ break
1566
+
1567
+ # Update the workflow_run_response with completion data
1568
+ if collected_step_outputs:
1569
+ workflow_run_response.metrics = self._aggregate_workflow_metrics(collected_step_outputs)
1570
+ last_output = cast(StepOutput, collected_step_outputs[-1])
1571
+
1572
+ # Use deepest nested content if this is a container (Steps/Router/Loop/etc.)
1573
+ if getattr(last_output, "steps", None):
1574
+ _cur = last_output
1575
+ while getattr(_cur, "steps", None):
1576
+ _steps = _cur.steps or []
1577
+ if not _steps:
1578
+ break
1579
+ _cur = _steps[-1]
1580
+ workflow_run_response.content = _cur.content
1581
+ else:
1582
+ workflow_run_response.content = last_output.content
1583
+ else:
1584
+ workflow_run_response.content = "No steps executed"
1585
+
1586
+ workflow_run_response.step_results = collected_step_outputs
1587
+ workflow_run_response.images = output_images
1588
+ workflow_run_response.videos = output_videos
1589
+ workflow_run_response.audio = output_audio
1590
+ workflow_run_response.status = RunStatus.completed
1591
+
1592
+ except RunCancelledException as e:
1593
+ # Handle run cancellation during streaming
1594
+ logger.info(f"Workflow run {workflow_run_response.run_id} was cancelled during streaming")
1595
+ workflow_run_response.status = RunStatus.cancelled
1596
+ workflow_run_response.content = str(e)
1597
+ cancelled_event = WorkflowCancelledEvent(
1598
+ run_id=workflow_run_response.run_id or "",
1599
+ workflow_id=self.id,
1600
+ workflow_name=self.name,
1601
+ session_id=session.session_id,
1602
+ reason=str(e),
1603
+ )
1604
+ yield self._handle_event(
1605
+ cancelled_event,
1606
+ workflow_run_response,
1607
+ websocket_handler=websocket_handler,
1608
+ )
1609
+ except Exception as e:
1610
+ logger.error(f"Workflow execution failed: {e}")
1611
+
1612
+ from agno.run.workflow import WorkflowErrorEvent
1613
+
1614
+ error_event = WorkflowErrorEvent(
1615
+ run_id=workflow_run_response.run_id or "",
1616
+ workflow_id=self.id,
1617
+ workflow_name=self.name,
1618
+ session_id=session.session_id,
1619
+ error=str(e),
1620
+ )
1621
+
1622
+ yield error_event
1623
+
1624
+ # Update workflow_run_response with error
1625
+ workflow_run_response.content = error_event.error
1626
+ workflow_run_response.status = RunStatus.error
1627
+
1628
+ # Yield workflow completed event
1629
+ workflow_completed_event = WorkflowCompletedEvent(
1630
+ run_id=workflow_run_response.run_id or "",
1631
+ content=workflow_run_response.content,
1632
+ workflow_name=workflow_run_response.workflow_name,
1633
+ workflow_id=workflow_run_response.workflow_id,
1634
+ session_id=workflow_run_response.session_id,
1635
+ step_results=workflow_run_response.step_results, # type: ignore[arg-type]
1636
+ metadata=workflow_run_response.metadata,
552
1637
  )
1638
+ yield self._handle_event(workflow_completed_event, workflow_run_response, websocket_handler=websocket_handler)
553
1639
 
554
- def load_workflow_session(self, session: WorkflowSession):
555
- """Load the existing Workflow from a WorkflowSession (from the database)"""
1640
+ # Store the completed workflow response
1641
+ self._update_session_metrics(session=session, workflow_run_response=workflow_run_response)
1642
+ session.upsert_run(run=workflow_run_response)
1643
+ self.save_session(session=session)
1644
+
1645
+ # Log Workflow Telemetry
1646
+ if self.telemetry:
1647
+ await self._alog_workflow_telemetry(session_id=session.session_id, run_id=workflow_run_response.run_id)
1648
+
1649
+ # Always clean up the run tracking
1650
+ cleanup_run(workflow_run_response.run_id) # type: ignore
1651
+
1652
+ async def _arun_background(
1653
+ self,
1654
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]] = None,
1655
+ additional_data: Optional[Dict[str, Any]] = None,
1656
+ user_id: Optional[str] = None,
1657
+ session_id: Optional[str] = None,
1658
+ session_state: Optional[Dict[str, Any]] = None,
1659
+ audio: Optional[List[Audio]] = None,
1660
+ images: Optional[List[Image]] = None,
1661
+ videos: Optional[List[Video]] = None,
1662
+ files: Optional[List[File]] = None,
1663
+ **kwargs: Any,
1664
+ ) -> WorkflowRunOutput:
1665
+ """Execute workflow in background using asyncio.create_task()"""
1666
+
1667
+ run_id = str(uuid4())
1668
+
1669
+ self.initialize_workflow()
1670
+
1671
+ session_id, user_id, session_state = self._initialize_session(
1672
+ session_id=session_id, user_id=user_id, session_state=session_state, run_id=run_id
1673
+ )
1674
+
1675
+ # Read existing session from database
1676
+ workflow_session = self.read_or_create_session(session_id=session_id, user_id=user_id)
1677
+ self._update_metadata(session=workflow_session)
1678
+
1679
+ # Update session state from DB
1680
+ session_state = self._update_session_state(session=workflow_session, session_state=session_state)
1681
+
1682
+ self._prepare_steps()
1683
+
1684
+ # Create workflow run response with PENDING status
1685
+ workflow_run_response = WorkflowRunOutput(
1686
+ run_id=run_id,
1687
+ session_id=session_id,
1688
+ workflow_id=self.id,
1689
+ workflow_name=self.name,
1690
+ created_at=int(datetime.now().timestamp()),
1691
+ status=RunStatus.pending,
1692
+ )
1693
+
1694
+ # Store PENDING response immediately
1695
+ workflow_session.upsert_run(run=workflow_run_response)
1696
+ self.save_session(session=workflow_session)
1697
+
1698
+ # Prepare execution input
1699
+ inputs = WorkflowExecutionInput(
1700
+ input=input,
1701
+ additional_data=additional_data,
1702
+ audio=audio, # type: ignore
1703
+ images=images, # type: ignore
1704
+ videos=videos, # type: ignore
1705
+ files=files, # type: ignore
1706
+ )
1707
+
1708
+ self.update_agents_and_teams_session_info()
1709
+
1710
+ async def execute_workflow_background():
1711
+ """Simple background execution"""
1712
+ try:
1713
+ # Update status to RUNNING and save
1714
+ workflow_run_response.status = RunStatus.running
1715
+ self.save_session(session=workflow_session)
1716
+
1717
+ await self._aexecute(
1718
+ session=workflow_session,
1719
+ execution_input=inputs,
1720
+ workflow_run_response=workflow_run_response,
1721
+ session_state=session_state,
1722
+ **kwargs,
1723
+ )
1724
+
1725
+ log_debug(f"Background execution completed with status: {workflow_run_response.status}")
1726
+
1727
+ except Exception as e:
1728
+ logger.error(f"Background workflow execution failed: {e}")
1729
+ workflow_run_response.status = RunStatus.error
1730
+ workflow_run_response.content = f"Background execution failed: {str(e)}"
1731
+ self.save_session(session=workflow_session)
1732
+
1733
+ # Create and start asyncio task
1734
+ loop = asyncio.get_running_loop()
1735
+ loop.create_task(execute_workflow_background())
1736
+
1737
+ # Return SAME object that will be updated by background execution
1738
+ return workflow_run_response
1739
+
1740
+ async def _arun_background_stream(
1741
+ self,
1742
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]] = None,
1743
+ additional_data: Optional[Dict[str, Any]] = None,
1744
+ user_id: Optional[str] = None,
1745
+ session_id: Optional[str] = None,
1746
+ session_state: Optional[Dict[str, Any]] = None,
1747
+ audio: Optional[List[Audio]] = None,
1748
+ images: Optional[List[Image]] = None,
1749
+ videos: Optional[List[Video]] = None,
1750
+ files: Optional[List[File]] = None,
1751
+ stream_intermediate_steps: bool = False,
1752
+ websocket_handler: Optional[WebSocketHandler] = None,
1753
+ **kwargs: Any,
1754
+ ) -> WorkflowRunOutput:
1755
+ """Execute workflow in background with streaming and WebSocket broadcasting"""
1756
+
1757
+ run_id = str(uuid4())
1758
+
1759
+ self.initialize_workflow()
1760
+
1761
+ session_id, user_id, session_state = self._initialize_session(
1762
+ session_id=session_id, user_id=user_id, session_state=session_state, run_id=run_id
1763
+ )
1764
+
1765
+ # Read existing session from database
1766
+ workflow_session = self.read_or_create_session(session_id=session_id, user_id=user_id)
1767
+ self._update_metadata(session=workflow_session)
1768
+
1769
+ # Update session state from DB
1770
+ session_state = self._update_session_state(session=workflow_session, session_state=session_state)
1771
+
1772
+ self._prepare_steps()
1773
+
1774
+ # Create workflow run response with PENDING status
1775
+ workflow_run_response = WorkflowRunOutput(
1776
+ run_id=run_id,
1777
+ session_id=session_id,
1778
+ workflow_id=self.id,
1779
+ workflow_name=self.name,
1780
+ created_at=int(datetime.now().timestamp()),
1781
+ status=RunStatus.pending,
1782
+ )
1783
+
1784
+ # Store PENDING response immediately
1785
+ workflow_session.upsert_run(run=workflow_run_response)
1786
+ self.save_session(session=workflow_session)
1787
+
1788
+ # Prepare execution input
1789
+ inputs = WorkflowExecutionInput(
1790
+ input=input,
1791
+ additional_data=additional_data,
1792
+ audio=audio, # type: ignore
1793
+ images=images, # type: ignore
1794
+ videos=videos, # type: ignore
1795
+ files=files, # type: ignore
1796
+ )
556
1797
 
557
- # Get the workflow_id, user_id and session_id from the database
558
- if self.workflow_id is None and session.workflow_id is not None:
559
- self.workflow_id = session.workflow_id
560
- if self.user_id is None and session.user_id is not None:
561
- self.user_id = session.user_id
562
- if self.session_id is None and session.session_id is not None:
563
- self.session_id = session.session_id
564
-
565
- # Read workflow_data from the database
566
- if session.workflow_data is not None:
567
- # Get name from database and update the workflow name if not set
568
- if self.name is None and "name" in session.workflow_data:
569
- self.name = session.workflow_data.get("name")
570
-
571
- # Read session_data from the database
572
- if session.session_data is not None:
573
- # Get the session_name from database and update the current session_name if not set
574
- if self.session_name is None and "session_name" in session.session_data:
575
- self.session_name = session.session_data.get("session_name")
576
-
577
- # Get the session_state from database and update the current session_state
578
- if "session_state" in session.session_data:
579
- session_state_from_db = session.session_data.get("session_state")
580
- if (
581
- session_state_from_db is not None
582
- and isinstance(session_state_from_db, dict)
583
- and len(session_state_from_db) > 0
1798
+ self.update_agents_and_teams_session_info()
1799
+
1800
+ async def execute_workflow_background_stream():
1801
+ """Background execution with streaming and WebSocket broadcasting"""
1802
+ try:
1803
+ # Update status to RUNNING and save
1804
+ workflow_run_response.status = RunStatus.running
1805
+ self.save_session(session=workflow_session)
1806
+
1807
+ # Execute with streaming - consume all events (they're auto-broadcast via _handle_event)
1808
+ async for event in self._aexecute_stream(
1809
+ execution_input=inputs,
1810
+ session=workflow_session,
1811
+ workflow_run_response=workflow_run_response,
1812
+ stream_intermediate_steps=stream_intermediate_steps,
1813
+ session_state=session_state,
1814
+ websocket_handler=websocket_handler,
1815
+ **kwargs,
584
1816
  ):
585
- # If the session_state is already set, merge the session_state from the database with the current session_state
586
- if len(self.session_state) > 0:
587
- # This updates session_state_from_db
588
- merge_dictionaries(session_state_from_db, self.session_state)
589
- # Update the current session_state
590
- self.session_state = session_state_from_db
591
-
592
- # Get images, videos, and audios from the database
593
- if "images" in session.session_data:
594
- images_from_db = session.session_data.get("images")
595
- if images_from_db is not None and isinstance(images_from_db, list):
596
- if self.images is None:
597
- self.images = []
598
- self.images.extend([ImageArtifact.model_validate(img) for img in images_from_db])
599
- if "videos" in session.session_data:
600
- videos_from_db = session.session_data.get("videos")
601
- if videos_from_db is not None and isinstance(videos_from_db, list):
602
- if self.videos is None:
603
- self.videos = []
604
- self.videos.extend([VideoArtifact.model_validate(vid) for vid in videos_from_db])
605
- if "audio" in session.session_data:
606
- audio_from_db = session.session_data.get("audio")
607
- if audio_from_db is not None and isinstance(audio_from_db, list):
608
- if self.audio is None:
609
- self.audio = []
610
- self.audio.extend([AudioArtifact.model_validate(aud) for aud in audio_from_db])
611
-
612
- # Read extra_data from the database
613
- if session.extra_data is not None:
614
- # If extra_data is set in the workflow, update the database extra_data with the workflow's extra_data
615
- if self.extra_data is not None:
616
- # Updates workflow_session.extra_data in place
617
- merge_dictionaries(session.extra_data, self.extra_data)
618
- # Update the current extra_data with the extra_data from the database which is updated in place
619
- self.extra_data = session.extra_data
620
-
621
- if session.memory is not None:
622
- if self.memory is None:
623
- self.memory = Memory()
624
-
625
- if isinstance(self.memory, Memory):
626
- try:
627
- if self.memory.runs is None:
628
- self.memory.runs = {}
629
- self.memory.runs[session.session_id] = []
630
- for run in session.memory["runs"]:
631
- run_session_id = run["session_id"]
632
- self.memory.runs[run_session_id].append(RunResponse.from_dict(run))
633
- except Exception as e:
634
- log_warning(f"Failed to load runs from memory: {e}")
635
- else:
636
- try:
637
- if "runs" in session.memory:
638
- try:
639
- self.memory.runs = [WorkflowRun(**m) for m in session.memory["runs"]]
640
- except Exception as e:
641
- logger.warning(f"Failed to load runs from memory: {e}")
642
- except Exception as e:
643
- logger.warning(f"Failed to load WorkflowMemory: {e}")
1817
+ # Events are automatically broadcast by _handle_event
1818
+ # We just consume them here to drive the execution
1819
+ pass
644
1820
 
645
- log_debug(f"-*- WorkflowSession loaded: {session.session_id}")
1821
+ log_debug(f"Background streaming execution completed with status: {workflow_run_response.status}")
646
1822
 
647
- def read_from_storage(self) -> Optional[WorkflowSession]:
648
- """Load the WorkflowSession from storage.
1823
+ except Exception as e:
1824
+ logger.error(f"Background streaming workflow execution failed: {e}")
1825
+ workflow_run_response.status = RunStatus.error
1826
+ workflow_run_response.content = f"Background streaming execution failed: {str(e)}"
1827
+ self.save_session(session=workflow_session)
649
1828
 
650
- Returns:
651
- Optional[WorkflowSession]: The loaded WorkflowSession or None if not found.
652
- """
653
- if self.storage is not None and self.session_id is not None:
654
- self.workflow_session = cast(WorkflowSession, self.storage.read(session_id=self.session_id))
655
- if self.workflow_session is not None:
656
- self.load_workflow_session(session=self.workflow_session)
657
- return self.workflow_session
1829
+ # Create and start asyncio task for background streaming execution
1830
+ loop = asyncio.get_running_loop()
1831
+ loop.create_task(execute_workflow_background_stream())
658
1832
 
659
- def write_to_storage(self) -> Optional[WorkflowSession]:
660
- """Save the WorkflowSession to storage
1833
+ # Return SAME object that will be updated by background execution
1834
+ return workflow_run_response
1835
+
1836
+ def get_run(self, run_id: str) -> Optional[WorkflowRunOutput]:
1837
+ """Get the status and details of a background workflow run - SIMPLIFIED"""
1838
+ if self.db is not None and self.session_id is not None:
1839
+ session = self.db.get_session(session_id=self.session_id, session_type=SessionType.WORKFLOW)
1840
+ if session and isinstance(session, WorkflowSession) and session.runs:
1841
+ # Find the run by ID
1842
+ for run in session.runs:
1843
+ if run.run_id == run_id:
1844
+ return run
1845
+
1846
+ return None
1847
+
1848
+ def cancel_run(self, run_id: str) -> bool:
1849
+ """Cancel a running workflow execution.
1850
+
1851
+ Args:
1852
+ run_id (str): The run_id to cancel.
661
1853
 
662
1854
  Returns:
663
- Optional[WorkflowSession]: The saved WorkflowSession or None if not saved.
1855
+ bool: True if the run was found and marked for cancellation, False otherwise.
664
1856
  """
665
- if self.storage is not None:
666
- self.workflow_session = cast(WorkflowSession, self.storage.upsert(session=self.get_workflow_session()))
667
- return self.workflow_session
1857
+ return cancel_run_global(run_id)
668
1858
 
669
- def load_session(self, force: bool = False) -> Optional[str]:
670
- """Load an existing session from the database and return the session_id.
671
- If a session does not exist, create a new session.
1859
+ @overload
1860
+ def run(
1861
+ self,
1862
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None,
1863
+ additional_data: Optional[Dict[str, Any]] = None,
1864
+ user_id: Optional[str] = None,
1865
+ session_id: Optional[str] = None,
1866
+ session_state: Optional[Dict[str, Any]] = None,
1867
+ audio: Optional[List[Audio]] = None,
1868
+ images: Optional[List[Image]] = None,
1869
+ videos: Optional[List[Video]] = None,
1870
+ files: Optional[List[File]] = None,
1871
+ stream: Literal[False] = False,
1872
+ stream_intermediate_steps: Optional[bool] = None,
1873
+ background: Optional[bool] = False,
1874
+ ) -> WorkflowRunOutput: ...
1875
+
1876
+ @overload
1877
+ def run(
1878
+ self,
1879
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None,
1880
+ additional_data: Optional[Dict[str, Any]] = None,
1881
+ user_id: Optional[str] = None,
1882
+ session_id: Optional[str] = None,
1883
+ session_state: Optional[Dict[str, Any]] = None,
1884
+ audio: Optional[List[Audio]] = None,
1885
+ images: Optional[List[Image]] = None,
1886
+ videos: Optional[List[Video]] = None,
1887
+ files: Optional[List[File]] = None,
1888
+ stream: Literal[True] = True,
1889
+ stream_intermediate_steps: Optional[bool] = None,
1890
+ background: Optional[bool] = False,
1891
+ ) -> Iterator[WorkflowRunOutputEvent]: ...
1892
+
1893
+ def run(
1894
+ self,
1895
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None,
1896
+ additional_data: Optional[Dict[str, Any]] = None,
1897
+ user_id: Optional[str] = None,
1898
+ session_id: Optional[str] = None,
1899
+ session_state: Optional[Dict[str, Any]] = None,
1900
+ audio: Optional[List[Audio]] = None,
1901
+ images: Optional[List[Image]] = None,
1902
+ videos: Optional[List[Video]] = None,
1903
+ files: Optional[List[File]] = None,
1904
+ stream: bool = False,
1905
+ stream_intermediate_steps: Optional[bool] = None,
1906
+ background: Optional[bool] = False,
1907
+ **kwargs: Any,
1908
+ ) -> Union[WorkflowRunOutput, Iterator[WorkflowRunOutputEvent]]:
1909
+ """Execute the workflow synchronously with optional streaming"""
1910
+
1911
+ validated_input = self._validate_input(input)
1912
+ if validated_input is not None:
1913
+ input = validated_input
1914
+
1915
+ if background:
1916
+ raise RuntimeError("Background execution is not supported for sync run()")
1917
+
1918
+ self._set_debug()
1919
+
1920
+ run_id = str(uuid4())
1921
+
1922
+ self.initialize_workflow()
1923
+ session_id, user_id, session_state = self._initialize_session(
1924
+ session_id=session_id, user_id=user_id, session_state=session_state, run_id=run_id
1925
+ )
672
1926
 
673
- - If a session exists in the database, load the session.
674
- - If a session does not exist in the database, create a new session.
675
- """
676
- # If a workflow_session is already loaded, return the session_id from the workflow_session
677
- # if the session_id matches the session_id from the workflow_session
678
- if self.workflow_session is not None and not force:
679
- if self.session_id is not None and self.workflow_session.session_id == self.session_id:
680
- return self.workflow_session.session_id
681
-
682
- # Load an existing session or create a new session
683
- if self.storage is not None:
684
- # Load existing session if session_id is provided
685
- log_debug(f"Reading WorkflowSession: {self.session_id}")
686
- self.read_from_storage()
687
-
688
- # Create a new session if it does not exist
689
- if self.workflow_session is None:
690
- log_debug("-*- Creating new WorkflowSession")
691
- # write_to_storage() will create a new WorkflowSession
692
- # and populate self.workflow_session with the new session
693
- self.write_to_storage()
694
- if self.workflow_session is None:
695
- raise Exception("Failed to create new WorkflowSession in storage")
696
- log_debug(f"-*- Created WorkflowSession: {self.workflow_session.session_id}")
697
- self.log_workflow_session()
698
- return self.session_id
699
-
700
- def new_session(self) -> None:
701
- """Create a new Workflow session
702
-
703
- - Clear the workflow_session
704
- - Create a new session_id
705
- - Load the new session
706
- """
707
- self.workflow_session = None
708
- self.session_id = str(uuid4())
709
- self.load_session(force=True)
1927
+ # Read existing session from database
1928
+ workflow_session = self.read_or_create_session(session_id=session_id, user_id=user_id)
1929
+ self._update_metadata(session=workflow_session)
710
1930
 
711
- def log_workflow_session(self):
712
- log_debug(f"*********** Logging WorkflowSession: {self.session_id} ***********")
1931
+ # Update session state from DB
1932
+ session_state = self._update_session_state(session=workflow_session, session_state=session_state)
713
1933
 
714
- def rename(self, name: str) -> None:
715
- """Rename the Workflow and save to storage"""
1934
+ log_debug(f"Workflow Run Start: {self.name}", center=True)
716
1935
 
717
- # -*- Read from storage
718
- self.read_from_storage()
719
- # -*- Rename Workflow
720
- self.name = name
721
- # -*- Save to storage
722
- self.write_to_storage()
723
- # -*- Log Workflow session
724
- self.log_workflow_session()
1936
+ # Use simple defaults
1937
+ stream = stream or self.stream or False
1938
+ stream_intermediate_steps = stream_intermediate_steps or self.stream_intermediate_steps or False
725
1939
 
726
- def rename_session(self, session_name: str):
727
- """Rename the current session and save to storage"""
1940
+ # Can't have stream_intermediate_steps if stream is False
1941
+ if not stream:
1942
+ stream_intermediate_steps = False
728
1943
 
729
- # -*- Read from storage
730
- self.read_from_storage()
731
- # -*- Rename session
732
- self.session_name = session_name
733
- # -*- Save to storage
734
- self.write_to_storage()
735
- # -*- Log Workflow session
736
- self.log_workflow_session()
1944
+ log_debug(f"Stream: {stream}")
1945
+ log_debug(f"Total steps: {self._get_step_count()}")
737
1946
 
738
- def delete_session(self, session_id: str):
739
- """Delete the current session and save to storage"""
740
- if self.storage is None:
741
- return
742
- # -*- Delete session
743
- self.storage.delete_session(session_id=session_id)
1947
+ # Prepare steps
1948
+ self._prepare_steps()
1949
+
1950
+ # Create workflow run response that will be updated by reference
1951
+ workflow_run_response = WorkflowRunOutput(
1952
+ run_id=run_id,
1953
+ session_id=session_id,
1954
+ workflow_id=self.id,
1955
+ workflow_name=self.name,
1956
+ created_at=int(datetime.now().timestamp()),
1957
+ )
1958
+
1959
+ inputs = WorkflowExecutionInput(
1960
+ input=input,
1961
+ additional_data=additional_data,
1962
+ audio=audio, # type: ignore
1963
+ images=images, # type: ignore
1964
+ videos=videos, # type: ignore
1965
+ files=files, # type: ignore
1966
+ )
1967
+ log_debug(
1968
+ f"Created pipeline input with session state keys: {list(session_state.keys()) if session_state else 'None'}"
1969
+ )
1970
+
1971
+ self.update_agents_and_teams_session_info()
1972
+
1973
+ if stream:
1974
+ return self._execute_stream(
1975
+ session=workflow_session,
1976
+ execution_input=inputs, # type: ignore[arg-type]
1977
+ workflow_run_response=workflow_run_response,
1978
+ stream_intermediate_steps=stream_intermediate_steps,
1979
+ session_state=session_state,
1980
+ **kwargs,
1981
+ )
1982
+ else:
1983
+ return self._execute(
1984
+ session=workflow_session,
1985
+ execution_input=inputs, # type: ignore[arg-type]
1986
+ workflow_run_response=workflow_run_response,
1987
+ session_state=session_state,
1988
+ **kwargs,
1989
+ )
1990
+
1991
+ @overload
1992
+ async def arun(
1993
+ self,
1994
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]] = None,
1995
+ additional_data: Optional[Dict[str, Any]] = None,
1996
+ user_id: Optional[str] = None,
1997
+ session_id: Optional[str] = None,
1998
+ session_state: Optional[Dict[str, Any]] = None,
1999
+ audio: Optional[List[Audio]] = None,
2000
+ images: Optional[List[Image]] = None,
2001
+ videos: Optional[List[Video]] = None,
2002
+ files: Optional[List[File]] = None,
2003
+ stream: Literal[False] = False,
2004
+ stream_intermediate_steps: Optional[bool] = None,
2005
+ background: Optional[bool] = False,
2006
+ websocket: Optional[WebSocket] = None,
2007
+ ) -> WorkflowRunOutput: ...
2008
+
2009
+ @overload
2010
+ async def arun(
2011
+ self,
2012
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]] = None,
2013
+ additional_data: Optional[Dict[str, Any]] = None,
2014
+ user_id: Optional[str] = None,
2015
+ session_id: Optional[str] = None,
2016
+ session_state: Optional[Dict[str, Any]] = None,
2017
+ audio: Optional[List[Audio]] = None,
2018
+ images: Optional[List[Image]] = None,
2019
+ videos: Optional[List[Video]] = None,
2020
+ files: Optional[List[File]] = None,
2021
+ stream: Literal[True] = True,
2022
+ stream_intermediate_steps: Optional[bool] = None,
2023
+ background: Optional[bool] = False,
2024
+ websocket: Optional[WebSocket] = None,
2025
+ ) -> AsyncIterator[WorkflowRunOutputEvent]: ...
2026
+
2027
+ async def arun(
2028
+ self,
2029
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]] = None,
2030
+ additional_data: Optional[Dict[str, Any]] = None,
2031
+ user_id: Optional[str] = None,
2032
+ session_id: Optional[str] = None,
2033
+ session_state: Optional[Dict[str, Any]] = None,
2034
+ audio: Optional[List[Audio]] = None,
2035
+ images: Optional[List[Image]] = None,
2036
+ videos: Optional[List[Video]] = None,
2037
+ files: Optional[List[File]] = None,
2038
+ stream: bool = False,
2039
+ stream_intermediate_steps: Optional[bool] = False,
2040
+ background: Optional[bool] = False,
2041
+ websocket: Optional[WebSocket] = None,
2042
+ **kwargs: Any,
2043
+ ) -> Union[WorkflowRunOutput, AsyncIterator[WorkflowRunOutputEvent]]:
2044
+ """Execute the workflow synchronously with optional streaming"""
2045
+
2046
+ validated_input = self._validate_input(input)
2047
+ if validated_input is not None:
2048
+ input = validated_input
2049
+
2050
+ websocket_handler = None
2051
+ if websocket:
2052
+ from agno.workflow.types import WebSocketHandler
2053
+
2054
+ websocket_handler = WebSocketHandler(websocket=websocket)
2055
+
2056
+ if background:
2057
+ if stream and websocket:
2058
+ # Background + Streaming + WebSocket = Real-time events
2059
+ return await self._arun_background_stream(
2060
+ input=input,
2061
+ additional_data=additional_data,
2062
+ user_id=user_id,
2063
+ session_id=session_id,
2064
+ audio=audio,
2065
+ images=images,
2066
+ videos=videos,
2067
+ files=files,
2068
+ stream_intermediate_steps=stream_intermediate_steps or False,
2069
+ websocket_handler=websocket_handler,
2070
+ **kwargs,
2071
+ )
2072
+ elif stream and not websocket:
2073
+ # Background + Streaming but no WebSocket = Not supported
2074
+ raise ValueError("Background streaming execution requires a WebSocket for real-time events")
2075
+ else:
2076
+ # Background + Non-streaming = Polling (existing)
2077
+ return await self._arun_background(
2078
+ input=input,
2079
+ additional_data=additional_data,
2080
+ user_id=user_id,
2081
+ session_id=session_id,
2082
+ audio=audio,
2083
+ images=images,
2084
+ videos=videos,
2085
+ files=files,
2086
+ **kwargs,
2087
+ )
2088
+
2089
+ self._set_debug()
2090
+
2091
+ run_id = str(uuid4())
2092
+
2093
+ self.initialize_workflow()
2094
+ session_id, user_id, session_state = self._initialize_session(
2095
+ session_id=session_id, user_id=user_id, session_state=session_state, run_id=run_id
2096
+ )
2097
+
2098
+ # Read existing session from database
2099
+ workflow_session = self.read_or_create_session(session_id=session_id, user_id=user_id)
2100
+ self._update_metadata(session=workflow_session)
2101
+
2102
+ # Update session state from DB
2103
+ session_state = self._update_session_state(session=workflow_session, session_state=session_state)
2104
+
2105
+ log_debug(f"Async Workflow Run Start: {self.name}", center=True)
2106
+
2107
+ # Use simple defaults
2108
+ stream = stream or self.stream or False
2109
+ stream_intermediate_steps = stream_intermediate_steps or self.stream_intermediate_steps or False
2110
+
2111
+ # Can't have stream_intermediate_steps if stream is False
2112
+ if not stream:
2113
+ stream_intermediate_steps = False
744
2114
 
745
- def deep_copy(self, *, update: Optional[Dict[str, Any]] = None) -> Workflow:
746
- """Create and return a deep copy of this Workflow, optionally updating fields.
2115
+ log_debug(f"Stream: {stream}")
2116
+
2117
+ # Prepare steps
2118
+ self._prepare_steps()
2119
+
2120
+ # Create workflow run response that will be updated by reference
2121
+ workflow_run_response = WorkflowRunOutput(
2122
+ run_id=run_id,
2123
+ session_id=session_id,
2124
+ workflow_id=self.id,
2125
+ workflow_name=self.name,
2126
+ created_at=int(datetime.now().timestamp()),
2127
+ )
2128
+
2129
+ inputs = WorkflowExecutionInput(
2130
+ input=input,
2131
+ additional_data=additional_data,
2132
+ audio=audio, # type: ignore
2133
+ images=images, # type: ignore
2134
+ videos=videos, # type: ignore
2135
+ )
2136
+ log_debug(
2137
+ f"Created async pipeline input with session state keys: {list(session_state.keys()) if session_state else 'None'}"
2138
+ )
2139
+
2140
+ self.update_agents_and_teams_session_info()
2141
+
2142
+ if stream:
2143
+ return self._aexecute_stream(
2144
+ execution_input=inputs,
2145
+ workflow_run_response=workflow_run_response,
2146
+ session=workflow_session,
2147
+ stream_intermediate_steps=stream_intermediate_steps,
2148
+ websocket=websocket,
2149
+ files=files,
2150
+ session_state=session_state,
2151
+ **kwargs,
2152
+ )
2153
+ else:
2154
+ return await self._aexecute(
2155
+ execution_input=inputs,
2156
+ workflow_run_response=workflow_run_response,
2157
+ session=workflow_session,
2158
+ websocket=websocket,
2159
+ files=files,
2160
+ session_state=session_state,
2161
+ **kwargs,
2162
+ )
2163
+
2164
+ def _prepare_steps(self):
2165
+ """Prepare the steps for execution"""
2166
+ if not callable(self.steps) and self.steps is not None:
2167
+ prepared_steps: List[Union[Step, Steps, Loop, Parallel, Condition, Router]] = []
2168
+ for i, step in enumerate(self.steps): # type: ignore
2169
+ if callable(step) and hasattr(step, "__name__"):
2170
+ step_name = step.__name__
2171
+ log_debug(f"Step {i + 1}: Wrapping callable function '{step_name}'")
2172
+ prepared_steps.append(Step(name=step_name, description="User-defined callable step", executor=step))
2173
+ elif isinstance(step, Agent):
2174
+ step_name = step.name or f"step_{i + 1}"
2175
+ log_debug(f"Step {i + 1}: Agent '{step_name}'")
2176
+ prepared_steps.append(Step(name=step_name, description=step.description, agent=step))
2177
+ elif isinstance(step, Team):
2178
+ step_name = step.name or f"step_{i + 1}"
2179
+ log_debug(f"Step {i + 1}: Team '{step_name}' with {len(step.members)} members")
2180
+ prepared_steps.append(Step(name=step_name, description=step.description, team=step))
2181
+ elif isinstance(step, (Step, Steps, Loop, Parallel, Condition, Router)):
2182
+ step_type = type(step).__name__
2183
+ step_name = getattr(step, "name", f"unnamed_{step_type.lower()}")
2184
+ log_debug(f"Step {i + 1}: {step_type} '{step_name}'")
2185
+ prepared_steps.append(step)
2186
+ else:
2187
+ raise ValueError(f"Invalid step type: {type(step).__name__}")
2188
+
2189
+ self.steps = prepared_steps # type: ignore
2190
+ log_debug("Step preparation completed")
2191
+
2192
+ def print_response(
2193
+ self,
2194
+ input: Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]],
2195
+ additional_data: Optional[Dict[str, Any]] = None,
2196
+ user_id: Optional[str] = None,
2197
+ session_id: Optional[str] = None,
2198
+ audio: Optional[List[Audio]] = None,
2199
+ images: Optional[List[Image]] = None,
2200
+ videos: Optional[List[Video]] = None,
2201
+ files: Optional[List[File]] = None,
2202
+ stream: Optional[bool] = None,
2203
+ stream_intermediate_steps: Optional[bool] = None,
2204
+ markdown: bool = True,
2205
+ show_time: bool = True,
2206
+ show_step_details: bool = True,
2207
+ console: Optional[Any] = None,
2208
+ **kwargs: Any,
2209
+ ) -> None:
2210
+ """Print workflow execution with rich formatting and optional streaming
747
2211
 
748
2212
  Args:
749
- update (Optional[Dict[str, Any]]): Optional dictionary of fields for the new Workflow.
2213
+ input: The main query/input for the workflow
2214
+ additional_data: Attached message data to the input
2215
+ user_id: User ID
2216
+ session_id: Session ID
2217
+ audio: Audio input
2218
+ images: Image input
2219
+ videos: Video input
2220
+ stream: Whether to stream the response content
2221
+ stream_intermediate_steps: Whether to stream intermediate steps
2222
+ markdown: Whether to render content as markdown
2223
+ show_time: Whether to show execution time
2224
+ show_step_details: Whether to show individual step outputs
2225
+ console: Rich console instance (optional)
2226
+ """
750
2227
 
751
- Returns:
752
- Workflow: A new Workflow instance.
2228
+ if stream is None:
2229
+ stream = self.stream or False
2230
+
2231
+ if stream_intermediate_steps is None:
2232
+ stream_intermediate_steps = self.stream_intermediate_steps or False
2233
+
2234
+ if stream:
2235
+ print_response_stream(
2236
+ workflow=self,
2237
+ input=input,
2238
+ user_id=user_id,
2239
+ session_id=session_id,
2240
+ additional_data=additional_data,
2241
+ audio=audio,
2242
+ images=images,
2243
+ videos=videos,
2244
+ files=files,
2245
+ stream_intermediate_steps=stream_intermediate_steps,
2246
+ markdown=markdown,
2247
+ show_time=show_time,
2248
+ show_step_details=show_step_details,
2249
+ console=console,
2250
+ **kwargs,
2251
+ )
2252
+ else:
2253
+ print_response(
2254
+ workflow=self,
2255
+ input=input,
2256
+ user_id=user_id,
2257
+ session_id=session_id,
2258
+ additional_data=additional_data,
2259
+ audio=audio,
2260
+ images=images,
2261
+ videos=videos,
2262
+ files=files,
2263
+ markdown=markdown,
2264
+ show_time=show_time,
2265
+ show_step_details=show_step_details,
2266
+ console=console,
2267
+ **kwargs,
2268
+ )
2269
+
2270
+ async def aprint_response(
2271
+ self,
2272
+ input: Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]],
2273
+ additional_data: Optional[Dict[str, Any]] = None,
2274
+ user_id: Optional[str] = None,
2275
+ session_id: Optional[str] = None,
2276
+ audio: Optional[List[Audio]] = None,
2277
+ images: Optional[List[Image]] = None,
2278
+ videos: Optional[List[Video]] = None,
2279
+ files: Optional[List[File]] = None,
2280
+ stream: Optional[bool] = None,
2281
+ stream_intermediate_steps: Optional[bool] = None,
2282
+ markdown: bool = True,
2283
+ show_time: bool = True,
2284
+ show_step_details: bool = True,
2285
+ console: Optional[Any] = None,
2286
+ **kwargs: Any,
2287
+ ) -> None:
2288
+ """Print workflow execution with rich formatting and optional streaming
2289
+
2290
+ Args:
2291
+ input: The main message/input for the workflow
2292
+ additional_data: Attached message data to the input
2293
+ user_id: User ID
2294
+ session_id: Session ID
2295
+ audio: Audio input
2296
+ images: Image input
2297
+ videos: Video input
2298
+ stream_intermediate_steps: Whether to stream intermediate steps
2299
+ stream: Whether to stream the response content
2300
+ markdown: Whether to render content as markdown
2301
+ show_time: Whether to show execution time
2302
+ show_step_details: Whether to show individual step outputs
2303
+ console: Rich console instance (optional)
753
2304
  """
754
- # Extract the fields to set for the new Workflow
755
- fields_for_new_workflow: Dict[str, Any] = {}
756
-
757
- for f in fields(self):
758
- field_value = getattr(self, f.name)
759
- if field_value is not None:
760
- if isinstance(field_value, Agent):
761
- fields_for_new_workflow[f.name] = field_value.deep_copy()
762
- else:
763
- fields_for_new_workflow[f.name] = self._deep_copy_field(f.name, field_value)
2305
+ if stream is None:
2306
+ stream = self.stream or False
2307
+
2308
+ if stream_intermediate_steps is None:
2309
+ stream_intermediate_steps = self.stream_intermediate_steps or False
2310
+
2311
+ if stream:
2312
+ await aprint_response_stream(
2313
+ workflow=self,
2314
+ input=input,
2315
+ additional_data=additional_data,
2316
+ user_id=user_id,
2317
+ session_id=session_id,
2318
+ audio=audio,
2319
+ images=images,
2320
+ videos=videos,
2321
+ files=files,
2322
+ stream_intermediate_steps=stream_intermediate_steps,
2323
+ markdown=markdown,
2324
+ show_time=show_time,
2325
+ show_step_details=show_step_details,
2326
+ console=console,
2327
+ **kwargs,
2328
+ )
2329
+ else:
2330
+ await aprint_response(
2331
+ workflow=self,
2332
+ input=input,
2333
+ additional_data=additional_data,
2334
+ user_id=user_id,
2335
+ session_id=session_id,
2336
+ audio=audio,
2337
+ images=images,
2338
+ videos=videos,
2339
+ files=files,
2340
+ markdown=markdown,
2341
+ show_time=show_time,
2342
+ show_step_details=show_step_details,
2343
+ console=console,
2344
+ **kwargs,
2345
+ )
764
2346
 
765
- # Update fields if provided
766
- if update:
767
- fields_for_new_workflow.update(update)
2347
+ def to_dict(self) -> Dict[str, Any]:
2348
+ """Convert workflow to dictionary representation"""
768
2349
 
769
- # Create a new Workflow
770
- new_workflow = self.__class__(**fields_for_new_workflow)
771
- log_debug(f"Created new {self.__class__.__name__}")
772
- return new_workflow
2350
+ def serialize_step(step):
2351
+ step_dict = {
2352
+ "name": step.name if hasattr(step, "name") else f"unnamed_{type(step).__name__.lower()}",
2353
+ "description": step.description if hasattr(step, "description") else "User-defined callable step",
2354
+ "type": STEP_TYPE_MAPPING[type(step)].value, # type: ignore
2355
+ }
773
2356
 
774
- def _deep_copy_field(self, field_name: str, field_value: Any) -> Any:
775
- """Helper method to deep copy a field based on its type."""
776
- from copy import copy, deepcopy
2357
+ # Handle agent/team/tools
2358
+ if hasattr(step, "agent"):
2359
+ step_dict["agent"] = step.agent if hasattr(step, "agent") else None # type: ignore
2360
+ if hasattr(step, "team"):
2361
+ step_dict["team"] = step.team if hasattr(step, "team") else None # type: ignore
777
2362
 
778
- # For memory, use its deep_copy method
779
- if field_name == "memory":
780
- return field_value.deep_copy()
2363
+ # Handle nested steps for Router/Loop
2364
+ if isinstance(step, (Router)):
2365
+ step_dict["steps"] = (
2366
+ [serialize_step(step) for step in step.choices] if hasattr(step, "choices") else None
2367
+ )
781
2368
 
782
- # For compound types, attempt a deep copy
783
- if isinstance(field_value, (list, dict, set, Storage)):
784
- try:
785
- return deepcopy(field_value)
786
- except Exception as e:
787
- logger.warning(f"Failed to deepcopy field: {field_name} - {e}")
788
- try:
789
- return copy(field_value)
790
- except Exception as e:
791
- logger.warning(f"Failed to copy field: {field_name} - {e}")
792
- return field_value
2369
+ elif isinstance(step, (Loop, Condition, Steps, Parallel)):
2370
+ step_dict["steps"] = [serialize_step(step) for step in step.steps] if hasattr(step, "steps") else None
793
2371
 
794
- # For pydantic models, attempt a model_copy
795
- if isinstance(field_value, BaseModel):
796
- try:
797
- return field_value.model_copy(deep=True)
798
- except Exception as e:
799
- logger.warning(f"Failed to deepcopy field: {field_name} - {e}")
800
- try:
801
- return field_value.model_copy(deep=False)
802
- except Exception as e:
803
- logger.warning(f"Failed to copy field: {field_name} - {e}")
804
- return field_value
2372
+ return step_dict
2373
+
2374
+ if self.steps is None or callable(self.steps):
2375
+ steps_list = []
2376
+ elif isinstance(self.steps, Steps):
2377
+ steps_list = self.steps.steps
2378
+ else:
2379
+ steps_list = self.steps
2380
+
2381
+ return {
2382
+ "name": self.name,
2383
+ "workflow_id": self.id,
2384
+ "description": self.description,
2385
+ "steps": [serialize_step(s) for s in steps_list],
2386
+ "session_id": self.session_id,
2387
+ }
2388
+
2389
+ def _calculate_session_metrics_from_workflow_metrics(self, workflow_metrics: WorkflowMetrics) -> Metrics:
2390
+ """Calculate session metrics by aggregating all step metrics from workflow metrics"""
2391
+ session_metrics = Metrics()
2392
+
2393
+ # Aggregate metrics from all steps
2394
+ for step_name, step_metrics in workflow_metrics.steps.items():
2395
+ if step_metrics.metrics:
2396
+ session_metrics += step_metrics.metrics
2397
+
2398
+ session_metrics.time_to_first_token = None
2399
+
2400
+ return session_metrics
2401
+
2402
+ def _get_session_metrics(self, session: WorkflowSession) -> Metrics:
2403
+ """Get existing session metrics from the database"""
2404
+ if session.session_data and "session_metrics" in session.session_data:
2405
+ session_metrics_from_db = session.session_data.get("session_metrics")
2406
+ if session_metrics_from_db is not None:
2407
+ if isinstance(session_metrics_from_db, dict):
2408
+ return Metrics(**session_metrics_from_db)
2409
+ elif isinstance(session_metrics_from_db, Metrics):
2410
+ return session_metrics_from_db
2411
+ return Metrics()
2412
+
2413
+ def _update_session_metrics(self, session: WorkflowSession, workflow_run_response: WorkflowRunOutput):
2414
+ """Calculate and update session metrics"""
2415
+ # Get existing session metrics
2416
+ session_metrics = self._get_session_metrics(session=session)
2417
+
2418
+ # If workflow has metrics, convert and add them to session metrics
2419
+ if workflow_run_response.metrics:
2420
+ run_session_metrics = self._calculate_session_metrics_from_workflow_metrics(workflow_run_response.metrics)
2421
+
2422
+ session_metrics += run_session_metrics
2423
+
2424
+ session_metrics.time_to_first_token = None
2425
+
2426
+ # Store updated session metrics - CONVERT TO DICT FOR JSON SERIALIZATION
2427
+ if not session.session_data:
2428
+ session.session_data = {}
2429
+ session.session_data["session_metrics"] = session_metrics.to_dict()
2430
+
2431
+ def get_session_metrics(self, session_id: Optional[str] = None) -> Optional[Metrics]:
2432
+ """Get the session metrics for the given session ID and user ID."""
2433
+ session_id = session_id or self.session_id
2434
+ if session_id is None:
2435
+ raise Exception("Session ID is required")
2436
+
2437
+ session = self.get_session(session_id=session_id)
2438
+ if session is None:
2439
+ raise Exception("Session not found")
2440
+
2441
+ return self._get_session_metrics(session=session)
2442
+
2443
+ def update_agents_and_teams_session_info(self):
2444
+ """Update agents and teams with workflow session information"""
2445
+ log_debug("Updating agents and teams with session information")
2446
+ # Initialize steps - only if steps is iterable (not callable)
2447
+ if self.steps and not callable(self.steps):
2448
+ steps_list = self.steps.steps if isinstance(self.steps, Steps) else self.steps
2449
+ for step in steps_list:
2450
+ # TODO: Handle properly steps inside other primitives
2451
+ if isinstance(step, Step):
2452
+ active_executor = step.active_executor
2453
+
2454
+ if hasattr(active_executor, "workflow_id"):
2455
+ active_executor.workflow_id = self.id
2456
+
2457
+ # If it's a team, update all members
2458
+ if hasattr(active_executor, "members"):
2459
+ for member in active_executor.members:
2460
+ if hasattr(member, "workflow_id"):
2461
+ member.workflow_id = self.id
2462
+
2463
+ ###########################################################################
2464
+ # Telemetry functions
2465
+ ###########################################################################
2466
+
2467
+ def _get_telemetry_data(self) -> Dict[str, Any]:
2468
+ """Get the telemetry data for the workflow"""
2469
+ return {
2470
+ "workflow_id": self.id,
2471
+ "db_type": self.db.__class__.__name__ if self.db else None,
2472
+ "has_input_schema": self.input_schema is not None,
2473
+ }
2474
+
2475
+ def _log_workflow_telemetry(self, session_id: str, run_id: Optional[str] = None) -> None:
2476
+ """Send a telemetry event to the API for a created Workflow run"""
2477
+
2478
+ self._set_telemetry()
2479
+ if not self.telemetry:
2480
+ return
2481
+
2482
+ from agno.api.workflow import WorkflowRunCreate, create_workflow_run
805
2483
 
806
- # For other types, return as is
807
- return field_value
2484
+ try:
2485
+ create_workflow_run(
2486
+ workflow=WorkflowRunCreate(session_id=session_id, run_id=run_id, data=self._get_telemetry_data()),
2487
+ )
2488
+ except Exception as e:
2489
+ log_debug(f"Could not create Workflow run telemetry event: {e}")
2490
+
2491
+ async def _alog_workflow_telemetry(self, session_id: str, run_id: Optional[str] = None) -> None:
2492
+ """Send a telemetry event to the API for a created Workflow async run"""
2493
+
2494
+ self._set_telemetry()
2495
+ if not self.telemetry:
2496
+ return
2497
+
2498
+ from agno.api.workflow import WorkflowRunCreate, acreate_workflow_run
2499
+
2500
+ try:
2501
+ await acreate_workflow_run(
2502
+ workflow=WorkflowRunCreate(session_id=session_id, run_id=run_id, data=self._get_telemetry_data())
2503
+ )
2504
+ except Exception as e:
2505
+ log_debug(f"Could not create Workflow run telemetry event: {e}")