agno 1.8.1__py3-none-any.whl → 2.0.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (583) hide show
  1. agno/__init__.py +8 -0
  2. agno/agent/__init__.py +19 -27
  3. agno/agent/agent.py +3181 -4169
  4. agno/api/agent.py +11 -67
  5. agno/api/api.py +5 -46
  6. agno/api/evals.py +8 -19
  7. agno/api/os.py +17 -0
  8. agno/api/routes.py +6 -41
  9. agno/api/schemas/__init__.py +9 -0
  10. agno/api/schemas/agent.py +5 -21
  11. agno/api/schemas/evals.py +7 -16
  12. agno/api/schemas/os.py +14 -0
  13. agno/api/schemas/team.py +5 -21
  14. agno/api/schemas/utils.py +21 -0
  15. agno/api/schemas/workflows.py +11 -7
  16. agno/api/settings.py +53 -0
  17. agno/api/team.py +11 -66
  18. agno/api/workflow.py +28 -0
  19. agno/cloud/aws/base.py +214 -0
  20. agno/cloud/aws/s3/__init__.py +2 -0
  21. agno/cloud/aws/s3/api_client.py +43 -0
  22. agno/cloud/aws/s3/bucket.py +195 -0
  23. agno/cloud/aws/s3/object.py +57 -0
  24. agno/db/__init__.py +24 -0
  25. agno/db/base.py +245 -0
  26. agno/db/dynamo/__init__.py +3 -0
  27. agno/db/dynamo/dynamo.py +1743 -0
  28. agno/db/dynamo/schemas.py +278 -0
  29. agno/db/dynamo/utils.py +684 -0
  30. agno/db/firestore/__init__.py +3 -0
  31. agno/db/firestore/firestore.py +1432 -0
  32. agno/db/firestore/schemas.py +130 -0
  33. agno/db/firestore/utils.py +278 -0
  34. agno/db/gcs_json/__init__.py +3 -0
  35. agno/db/gcs_json/gcs_json_db.py +1001 -0
  36. agno/db/gcs_json/utils.py +194 -0
  37. agno/db/in_memory/__init__.py +3 -0
  38. agno/db/in_memory/in_memory_db.py +882 -0
  39. agno/db/in_memory/utils.py +172 -0
  40. agno/db/json/__init__.py +3 -0
  41. agno/db/json/json_db.py +1045 -0
  42. agno/db/json/utils.py +196 -0
  43. agno/db/migrations/v1_to_v2.py +162 -0
  44. agno/db/mongo/__init__.py +3 -0
  45. agno/db/mongo/mongo.py +1411 -0
  46. agno/db/mongo/schemas.py +77 -0
  47. agno/db/mongo/utils.py +204 -0
  48. agno/db/mysql/__init__.py +3 -0
  49. agno/db/mysql/mysql.py +1719 -0
  50. agno/db/mysql/schemas.py +124 -0
  51. agno/db/mysql/utils.py +297 -0
  52. agno/db/postgres/__init__.py +3 -0
  53. agno/db/postgres/postgres.py +1710 -0
  54. agno/db/postgres/schemas.py +124 -0
  55. agno/db/postgres/utils.py +280 -0
  56. agno/db/redis/__init__.py +3 -0
  57. agno/db/redis/redis.py +1367 -0
  58. agno/db/redis/schemas.py +109 -0
  59. agno/db/redis/utils.py +288 -0
  60. agno/db/schemas/__init__.py +3 -0
  61. agno/db/schemas/evals.py +33 -0
  62. agno/db/schemas/knowledge.py +40 -0
  63. agno/db/schemas/memory.py +46 -0
  64. agno/db/singlestore/__init__.py +3 -0
  65. agno/db/singlestore/schemas.py +116 -0
  66. agno/db/singlestore/singlestore.py +1712 -0
  67. agno/db/singlestore/utils.py +326 -0
  68. agno/db/sqlite/__init__.py +3 -0
  69. agno/db/sqlite/schemas.py +119 -0
  70. agno/db/sqlite/sqlite.py +1676 -0
  71. agno/db/sqlite/utils.py +268 -0
  72. agno/db/utils.py +88 -0
  73. agno/eval/__init__.py +14 -0
  74. agno/eval/accuracy.py +142 -43
  75. agno/eval/performance.py +88 -23
  76. agno/eval/reliability.py +73 -20
  77. agno/eval/utils.py +23 -13
  78. agno/integrations/discord/__init__.py +3 -0
  79. agno/{app → integrations}/discord/client.py +15 -11
  80. agno/knowledge/__init__.py +2 -2
  81. agno/{document → knowledge}/chunking/agentic.py +2 -2
  82. agno/{document → knowledge}/chunking/document.py +2 -2
  83. agno/{document → knowledge}/chunking/fixed.py +3 -3
  84. agno/{document → knowledge}/chunking/markdown.py +2 -2
  85. agno/{document → knowledge}/chunking/recursive.py +2 -2
  86. agno/{document → knowledge}/chunking/row.py +2 -2
  87. agno/knowledge/chunking/semantic.py +59 -0
  88. agno/knowledge/chunking/strategy.py +121 -0
  89. agno/knowledge/content.py +74 -0
  90. agno/knowledge/document/__init__.py +5 -0
  91. agno/{document → knowledge/document}/base.py +12 -2
  92. agno/knowledge/embedder/__init__.py +5 -0
  93. agno/{embedder → knowledge/embedder}/aws_bedrock.py +127 -1
  94. agno/{embedder → knowledge/embedder}/azure_openai.py +65 -1
  95. agno/{embedder → knowledge/embedder}/base.py +6 -0
  96. agno/{embedder → knowledge/embedder}/cohere.py +72 -1
  97. agno/{embedder → knowledge/embedder}/fastembed.py +17 -1
  98. agno/{embedder → knowledge/embedder}/fireworks.py +1 -1
  99. agno/{embedder → knowledge/embedder}/google.py +74 -1
  100. agno/{embedder → knowledge/embedder}/huggingface.py +36 -2
  101. agno/{embedder → knowledge/embedder}/jina.py +48 -2
  102. agno/knowledge/embedder/langdb.py +22 -0
  103. agno/knowledge/embedder/mistral.py +139 -0
  104. agno/{embedder → knowledge/embedder}/nebius.py +1 -1
  105. agno/{embedder → knowledge/embedder}/ollama.py +54 -3
  106. agno/knowledge/embedder/openai.py +223 -0
  107. agno/{embedder → knowledge/embedder}/sentence_transformer.py +16 -1
  108. agno/{embedder → knowledge/embedder}/together.py +1 -1
  109. agno/{embedder → knowledge/embedder}/voyageai.py +49 -1
  110. agno/knowledge/knowledge.py +1515 -0
  111. agno/knowledge/reader/__init__.py +7 -0
  112. agno/{document → knowledge}/reader/arxiv_reader.py +32 -4
  113. agno/knowledge/reader/base.py +88 -0
  114. agno/{document → knowledge}/reader/csv_reader.py +68 -15
  115. agno/knowledge/reader/docx_reader.py +83 -0
  116. agno/{document → knowledge}/reader/firecrawl_reader.py +42 -21
  117. agno/knowledge/reader/gcs_reader.py +67 -0
  118. agno/{document → knowledge}/reader/json_reader.py +30 -9
  119. agno/{document → knowledge}/reader/markdown_reader.py +36 -9
  120. agno/{document → knowledge}/reader/pdf_reader.py +79 -21
  121. agno/knowledge/reader/reader_factory.py +275 -0
  122. agno/knowledge/reader/s3_reader.py +171 -0
  123. agno/{document → knowledge}/reader/text_reader.py +31 -10
  124. agno/knowledge/reader/url_reader.py +84 -0
  125. agno/knowledge/reader/web_search_reader.py +389 -0
  126. agno/{document → knowledge}/reader/website_reader.py +37 -10
  127. agno/knowledge/reader/wikipedia_reader.py +59 -0
  128. agno/knowledge/reader/youtube_reader.py +78 -0
  129. agno/knowledge/remote_content/remote_content.py +88 -0
  130. agno/{reranker → knowledge/reranker}/base.py +1 -1
  131. agno/{reranker → knowledge/reranker}/cohere.py +2 -2
  132. agno/{reranker → knowledge/reranker}/infinity.py +2 -2
  133. agno/{reranker → knowledge/reranker}/sentence_transformer.py +2 -2
  134. agno/knowledge/types.py +30 -0
  135. agno/knowledge/utils.py +169 -0
  136. agno/memory/__init__.py +2 -10
  137. agno/memory/manager.py +1003 -148
  138. agno/models/aimlapi/__init__.py +2 -2
  139. agno/models/aimlapi/aimlapi.py +6 -6
  140. agno/models/anthropic/claude.py +131 -131
  141. agno/models/aws/bedrock.py +107 -175
  142. agno/models/aws/claude.py +64 -18
  143. agno/models/azure/ai_foundry.py +73 -23
  144. agno/models/base.py +347 -287
  145. agno/models/cerebras/cerebras.py +84 -27
  146. agno/models/cohere/chat.py +106 -98
  147. agno/models/google/gemini.py +100 -42
  148. agno/models/groq/groq.py +97 -35
  149. agno/models/huggingface/huggingface.py +92 -27
  150. agno/models/ibm/watsonx.py +72 -13
  151. agno/models/litellm/chat.py +85 -13
  152. agno/models/message.py +45 -150
  153. agno/models/meta/llama.py +85 -49
  154. agno/models/metrics.py +120 -0
  155. agno/models/mistral/mistral.py +90 -21
  156. agno/models/ollama/__init__.py +0 -2
  157. agno/models/ollama/chat.py +84 -46
  158. agno/models/openai/chat.py +121 -23
  159. agno/models/openai/responses.py +178 -105
  160. agno/models/perplexity/perplexity.py +26 -2
  161. agno/models/portkey/portkey.py +0 -7
  162. agno/models/response.py +14 -8
  163. agno/models/utils.py +20 -0
  164. agno/models/vercel/__init__.py +2 -2
  165. agno/models/vercel/v0.py +1 -1
  166. agno/models/vllm/__init__.py +2 -2
  167. agno/models/vllm/vllm.py +3 -3
  168. agno/models/xai/xai.py +10 -10
  169. agno/os/__init__.py +3 -0
  170. agno/os/app.py +489 -0
  171. agno/os/auth.py +47 -0
  172. agno/os/config.py +103 -0
  173. agno/os/interfaces/agui/__init__.py +3 -0
  174. agno/os/interfaces/agui/agui.py +31 -0
  175. agno/{app/agui/async_router.py → os/interfaces/agui/router.py} +16 -16
  176. agno/{app → os/interfaces}/agui/utils.py +77 -33
  177. agno/os/interfaces/base.py +21 -0
  178. agno/os/interfaces/slack/__init__.py +3 -0
  179. agno/{app/slack/async_router.py → os/interfaces/slack/router.py} +3 -5
  180. agno/os/interfaces/slack/slack.py +32 -0
  181. agno/os/interfaces/whatsapp/__init__.py +3 -0
  182. agno/{app/whatsapp/async_router.py → os/interfaces/whatsapp/router.py} +4 -7
  183. agno/os/interfaces/whatsapp/whatsapp.py +29 -0
  184. agno/os/mcp.py +255 -0
  185. agno/os/router.py +869 -0
  186. agno/os/routers/__init__.py +3 -0
  187. agno/os/routers/evals/__init__.py +3 -0
  188. agno/os/routers/evals/evals.py +208 -0
  189. agno/os/routers/evals/schemas.py +142 -0
  190. agno/os/routers/evals/utils.py +161 -0
  191. agno/os/routers/knowledge/__init__.py +3 -0
  192. agno/os/routers/knowledge/knowledge.py +436 -0
  193. agno/os/routers/knowledge/schemas.py +118 -0
  194. agno/os/routers/memory/__init__.py +3 -0
  195. agno/os/routers/memory/memory.py +188 -0
  196. agno/os/routers/memory/schemas.py +58 -0
  197. agno/os/routers/metrics/__init__.py +3 -0
  198. agno/os/routers/metrics/metrics.py +60 -0
  199. agno/os/routers/metrics/schemas.py +47 -0
  200. agno/os/routers/session/__init__.py +3 -0
  201. agno/os/routers/session/session.py +168 -0
  202. agno/os/schema.py +892 -0
  203. agno/{app/playground → os}/settings.py +7 -15
  204. agno/os/utils.py +270 -0
  205. agno/reasoning/azure_ai_foundry.py +4 -4
  206. agno/reasoning/deepseek.py +4 -4
  207. agno/reasoning/default.py +6 -11
  208. agno/reasoning/groq.py +4 -4
  209. agno/reasoning/helpers.py +4 -6
  210. agno/reasoning/ollama.py +4 -4
  211. agno/reasoning/openai.py +4 -4
  212. agno/run/{response.py → agent.py} +231 -74
  213. agno/run/base.py +44 -58
  214. agno/run/cancel.py +81 -0
  215. agno/run/team.py +133 -77
  216. agno/run/workflow.py +537 -12
  217. agno/session/__init__.py +10 -0
  218. agno/session/agent.py +244 -0
  219. agno/session/summary.py +225 -0
  220. agno/session/team.py +262 -0
  221. agno/{storage/session/v2 → session}/workflow.py +47 -24
  222. agno/team/__init__.py +15 -16
  223. agno/team/team.py +2960 -4252
  224. agno/tools/agentql.py +14 -5
  225. agno/tools/airflow.py +9 -4
  226. agno/tools/api.py +7 -3
  227. agno/tools/apify.py +2 -46
  228. agno/tools/arxiv.py +8 -3
  229. agno/tools/aws_lambda.py +7 -5
  230. agno/tools/aws_ses.py +7 -1
  231. agno/tools/baidusearch.py +4 -1
  232. agno/tools/bitbucket.py +4 -4
  233. agno/tools/brandfetch.py +14 -11
  234. agno/tools/bravesearch.py +4 -1
  235. agno/tools/brightdata.py +42 -22
  236. agno/tools/browserbase.py +13 -4
  237. agno/tools/calcom.py +12 -10
  238. agno/tools/calculator.py +10 -27
  239. agno/tools/cartesia.py +18 -13
  240. agno/tools/{clickup_tool.py → clickup.py} +12 -25
  241. agno/tools/confluence.py +8 -8
  242. agno/tools/crawl4ai.py +7 -1
  243. agno/tools/csv_toolkit.py +9 -8
  244. agno/tools/dalle.py +18 -11
  245. agno/tools/daytona.py +13 -16
  246. agno/tools/decorator.py +6 -3
  247. agno/tools/desi_vocal.py +16 -7
  248. agno/tools/discord.py +11 -8
  249. agno/tools/docker.py +30 -42
  250. agno/tools/duckdb.py +34 -53
  251. agno/tools/duckduckgo.py +8 -7
  252. agno/tools/e2b.py +61 -61
  253. agno/tools/eleven_labs.py +35 -28
  254. agno/tools/email.py +4 -1
  255. agno/tools/evm.py +7 -1
  256. agno/tools/exa.py +19 -14
  257. agno/tools/fal.py +29 -29
  258. agno/tools/file.py +9 -8
  259. agno/tools/financial_datasets.py +25 -44
  260. agno/tools/firecrawl.py +22 -22
  261. agno/tools/function.py +127 -18
  262. agno/tools/giphy.py +22 -10
  263. agno/tools/github.py +48 -126
  264. agno/tools/gmail.py +45 -61
  265. agno/tools/google_bigquery.py +7 -6
  266. agno/tools/google_maps.py +11 -26
  267. agno/tools/googlesearch.py +7 -2
  268. agno/tools/googlesheets.py +21 -17
  269. agno/tools/hackernews.py +9 -5
  270. agno/tools/jina.py +5 -4
  271. agno/tools/jira.py +18 -9
  272. agno/tools/knowledge.py +31 -32
  273. agno/tools/linear.py +19 -34
  274. agno/tools/linkup.py +5 -1
  275. agno/tools/local_file_system.py +8 -5
  276. agno/tools/lumalab.py +31 -19
  277. agno/tools/mem0.py +18 -12
  278. agno/tools/memori.py +14 -10
  279. agno/tools/mlx_transcribe.py +3 -2
  280. agno/tools/models/azure_openai.py +32 -14
  281. agno/tools/models/gemini.py +58 -31
  282. agno/tools/models/groq.py +29 -20
  283. agno/tools/models/nebius.py +27 -11
  284. agno/tools/models_labs.py +39 -15
  285. agno/tools/moviepy_video.py +7 -6
  286. agno/tools/neo4j.py +10 -8
  287. agno/tools/newspaper.py +7 -2
  288. agno/tools/newspaper4k.py +8 -3
  289. agno/tools/openai.py +57 -26
  290. agno/tools/openbb.py +12 -11
  291. agno/tools/opencv.py +62 -46
  292. agno/tools/openweather.py +14 -12
  293. agno/tools/pandas.py +11 -3
  294. agno/tools/postgres.py +4 -12
  295. agno/tools/pubmed.py +4 -1
  296. agno/tools/python.py +9 -22
  297. agno/tools/reasoning.py +35 -27
  298. agno/tools/reddit.py +11 -26
  299. agno/tools/replicate.py +54 -41
  300. agno/tools/resend.py +4 -1
  301. agno/tools/scrapegraph.py +15 -14
  302. agno/tools/searxng.py +10 -23
  303. agno/tools/serpapi.py +6 -3
  304. agno/tools/serper.py +13 -4
  305. agno/tools/shell.py +9 -2
  306. agno/tools/slack.py +12 -11
  307. agno/tools/sleep.py +3 -2
  308. agno/tools/spider.py +24 -4
  309. agno/tools/sql.py +7 -6
  310. agno/tools/tavily.py +6 -4
  311. agno/tools/telegram.py +12 -4
  312. agno/tools/todoist.py +11 -31
  313. agno/tools/toolkit.py +1 -1
  314. agno/tools/trafilatura.py +22 -6
  315. agno/tools/trello.py +9 -22
  316. agno/tools/twilio.py +10 -3
  317. agno/tools/user_control_flow.py +6 -1
  318. agno/tools/valyu.py +34 -5
  319. agno/tools/visualization.py +19 -28
  320. agno/tools/webbrowser.py +4 -3
  321. agno/tools/webex.py +11 -7
  322. agno/tools/website.py +15 -46
  323. agno/tools/webtools.py +12 -4
  324. agno/tools/whatsapp.py +5 -9
  325. agno/tools/wikipedia.py +20 -13
  326. agno/tools/x.py +14 -13
  327. agno/tools/yfinance.py +13 -40
  328. agno/tools/youtube.py +26 -20
  329. agno/tools/zendesk.py +7 -2
  330. agno/tools/zep.py +10 -7
  331. agno/tools/zoom.py +10 -9
  332. agno/utils/common.py +1 -19
  333. agno/utils/events.py +95 -118
  334. agno/utils/gemini.py +31 -1
  335. agno/utils/knowledge.py +29 -0
  336. agno/utils/log.py +2 -2
  337. agno/utils/mcp.py +11 -5
  338. agno/utils/media.py +39 -0
  339. agno/utils/message.py +12 -1
  340. agno/utils/models/claude.py +55 -4
  341. agno/utils/models/mistral.py +8 -7
  342. agno/utils/models/schema_utils.py +3 -3
  343. agno/utils/pprint.py +33 -32
  344. agno/utils/print_response/agent.py +779 -0
  345. agno/utils/print_response/team.py +1565 -0
  346. agno/utils/print_response/workflow.py +1451 -0
  347. agno/utils/prompts.py +14 -14
  348. agno/utils/reasoning.py +87 -0
  349. agno/utils/response.py +42 -42
  350. agno/utils/streamlit.py +454 -0
  351. agno/utils/string.py +8 -22
  352. agno/utils/team.py +50 -0
  353. agno/utils/timer.py +2 -2
  354. agno/vectordb/base.py +33 -21
  355. agno/vectordb/cassandra/cassandra.py +287 -23
  356. agno/vectordb/chroma/chromadb.py +482 -59
  357. agno/vectordb/clickhouse/clickhousedb.py +270 -63
  358. agno/vectordb/couchbase/couchbase.py +309 -29
  359. agno/vectordb/lancedb/lance_db.py +360 -21
  360. agno/vectordb/langchaindb/__init__.py +5 -0
  361. agno/vectordb/langchaindb/langchaindb.py +145 -0
  362. agno/vectordb/lightrag/__init__.py +5 -0
  363. agno/vectordb/lightrag/lightrag.py +374 -0
  364. agno/vectordb/llamaindex/llamaindexdb.py +127 -0
  365. agno/vectordb/milvus/milvus.py +242 -32
  366. agno/vectordb/mongodb/mongodb.py +200 -24
  367. agno/vectordb/pgvector/pgvector.py +319 -37
  368. agno/vectordb/pineconedb/pineconedb.py +221 -27
  369. agno/vectordb/qdrant/qdrant.py +334 -14
  370. agno/vectordb/singlestore/singlestore.py +286 -29
  371. agno/vectordb/surrealdb/surrealdb.py +187 -7
  372. agno/vectordb/upstashdb/upstashdb.py +342 -26
  373. agno/vectordb/weaviate/weaviate.py +227 -165
  374. agno/workflow/__init__.py +17 -13
  375. agno/workflow/{v2/condition.py → condition.py} +135 -32
  376. agno/workflow/{v2/loop.py → loop.py} +115 -28
  377. agno/workflow/{v2/parallel.py → parallel.py} +138 -108
  378. agno/workflow/{v2/router.py → router.py} +133 -32
  379. agno/workflow/{v2/step.py → step.py} +200 -42
  380. agno/workflow/{v2/steps.py → steps.py} +147 -66
  381. agno/workflow/types.py +482 -0
  382. agno/workflow/workflow.py +2401 -696
  383. agno-2.0.0rc1.dist-info/METADATA +355 -0
  384. agno-2.0.0rc1.dist-info/RECORD +516 -0
  385. agno/agent/metrics.py +0 -107
  386. agno/api/app.py +0 -35
  387. agno/api/playground.py +0 -92
  388. agno/api/schemas/app.py +0 -12
  389. agno/api/schemas/playground.py +0 -22
  390. agno/api/schemas/user.py +0 -35
  391. agno/api/schemas/workspace.py +0 -46
  392. agno/api/user.py +0 -160
  393. agno/api/workflows.py +0 -33
  394. agno/api/workspace.py +0 -175
  395. agno/app/agui/__init__.py +0 -3
  396. agno/app/agui/app.py +0 -17
  397. agno/app/agui/sync_router.py +0 -120
  398. agno/app/base.py +0 -186
  399. agno/app/discord/__init__.py +0 -3
  400. agno/app/fastapi/__init__.py +0 -3
  401. agno/app/fastapi/app.py +0 -107
  402. agno/app/fastapi/async_router.py +0 -457
  403. agno/app/fastapi/sync_router.py +0 -448
  404. agno/app/playground/app.py +0 -228
  405. agno/app/playground/async_router.py +0 -1050
  406. agno/app/playground/deploy.py +0 -249
  407. agno/app/playground/operator.py +0 -183
  408. agno/app/playground/schemas.py +0 -220
  409. agno/app/playground/serve.py +0 -55
  410. agno/app/playground/sync_router.py +0 -1042
  411. agno/app/playground/utils.py +0 -46
  412. agno/app/settings.py +0 -15
  413. agno/app/slack/__init__.py +0 -3
  414. agno/app/slack/app.py +0 -19
  415. agno/app/slack/sync_router.py +0 -92
  416. agno/app/utils.py +0 -54
  417. agno/app/whatsapp/__init__.py +0 -3
  418. agno/app/whatsapp/app.py +0 -15
  419. agno/app/whatsapp/sync_router.py +0 -197
  420. agno/cli/auth_server.py +0 -249
  421. agno/cli/config.py +0 -274
  422. agno/cli/console.py +0 -88
  423. agno/cli/credentials.py +0 -23
  424. agno/cli/entrypoint.py +0 -571
  425. agno/cli/operator.py +0 -357
  426. agno/cli/settings.py +0 -96
  427. agno/cli/ws/ws_cli.py +0 -817
  428. agno/constants.py +0 -13
  429. agno/document/__init__.py +0 -5
  430. agno/document/chunking/semantic.py +0 -45
  431. agno/document/chunking/strategy.py +0 -31
  432. agno/document/reader/__init__.py +0 -5
  433. agno/document/reader/base.py +0 -47
  434. agno/document/reader/docx_reader.py +0 -60
  435. agno/document/reader/gcs/pdf_reader.py +0 -44
  436. agno/document/reader/s3/pdf_reader.py +0 -59
  437. agno/document/reader/s3/text_reader.py +0 -63
  438. agno/document/reader/url_reader.py +0 -59
  439. agno/document/reader/youtube_reader.py +0 -58
  440. agno/embedder/__init__.py +0 -5
  441. agno/embedder/langdb.py +0 -80
  442. agno/embedder/mistral.py +0 -82
  443. agno/embedder/openai.py +0 -78
  444. agno/file/__init__.py +0 -5
  445. agno/file/file.py +0 -16
  446. agno/file/local/csv.py +0 -32
  447. agno/file/local/txt.py +0 -19
  448. agno/infra/app.py +0 -240
  449. agno/infra/base.py +0 -144
  450. agno/infra/context.py +0 -20
  451. agno/infra/db_app.py +0 -52
  452. agno/infra/resource.py +0 -205
  453. agno/infra/resources.py +0 -55
  454. agno/knowledge/agent.py +0 -702
  455. agno/knowledge/arxiv.py +0 -33
  456. agno/knowledge/combined.py +0 -36
  457. agno/knowledge/csv.py +0 -144
  458. agno/knowledge/csv_url.py +0 -124
  459. agno/knowledge/document.py +0 -223
  460. agno/knowledge/docx.py +0 -137
  461. agno/knowledge/firecrawl.py +0 -34
  462. agno/knowledge/gcs/__init__.py +0 -0
  463. agno/knowledge/gcs/base.py +0 -39
  464. agno/knowledge/gcs/pdf.py +0 -125
  465. agno/knowledge/json.py +0 -137
  466. agno/knowledge/langchain.py +0 -71
  467. agno/knowledge/light_rag.py +0 -273
  468. agno/knowledge/llamaindex.py +0 -66
  469. agno/knowledge/markdown.py +0 -154
  470. agno/knowledge/pdf.py +0 -164
  471. agno/knowledge/pdf_bytes.py +0 -42
  472. agno/knowledge/pdf_url.py +0 -148
  473. agno/knowledge/s3/__init__.py +0 -0
  474. agno/knowledge/s3/base.py +0 -64
  475. agno/knowledge/s3/pdf.py +0 -33
  476. agno/knowledge/s3/text.py +0 -34
  477. agno/knowledge/text.py +0 -141
  478. agno/knowledge/url.py +0 -46
  479. agno/knowledge/website.py +0 -179
  480. agno/knowledge/wikipedia.py +0 -32
  481. agno/knowledge/youtube.py +0 -35
  482. agno/memory/agent.py +0 -423
  483. agno/memory/classifier.py +0 -104
  484. agno/memory/db/__init__.py +0 -5
  485. agno/memory/db/base.py +0 -42
  486. agno/memory/db/mongodb.py +0 -189
  487. agno/memory/db/postgres.py +0 -203
  488. agno/memory/db/sqlite.py +0 -193
  489. agno/memory/memory.py +0 -22
  490. agno/memory/row.py +0 -36
  491. agno/memory/summarizer.py +0 -201
  492. agno/memory/summary.py +0 -19
  493. agno/memory/team.py +0 -415
  494. agno/memory/v2/__init__.py +0 -2
  495. agno/memory/v2/db/__init__.py +0 -1
  496. agno/memory/v2/db/base.py +0 -42
  497. agno/memory/v2/db/firestore.py +0 -339
  498. agno/memory/v2/db/mongodb.py +0 -196
  499. agno/memory/v2/db/postgres.py +0 -214
  500. agno/memory/v2/db/redis.py +0 -187
  501. agno/memory/v2/db/schema.py +0 -54
  502. agno/memory/v2/db/sqlite.py +0 -209
  503. agno/memory/v2/manager.py +0 -437
  504. agno/memory/v2/memory.py +0 -1097
  505. agno/memory/v2/schema.py +0 -55
  506. agno/memory/v2/summarizer.py +0 -215
  507. agno/memory/workflow.py +0 -38
  508. agno/models/ollama/tools.py +0 -430
  509. agno/models/qwen/__init__.py +0 -5
  510. agno/playground/__init__.py +0 -10
  511. agno/playground/deploy.py +0 -3
  512. agno/playground/playground.py +0 -3
  513. agno/playground/serve.py +0 -3
  514. agno/playground/settings.py +0 -3
  515. agno/reranker/__init__.py +0 -0
  516. agno/run/v2/__init__.py +0 -0
  517. agno/run/v2/workflow.py +0 -567
  518. agno/storage/__init__.py +0 -0
  519. agno/storage/agent/__init__.py +0 -0
  520. agno/storage/agent/dynamodb.py +0 -1
  521. agno/storage/agent/json.py +0 -1
  522. agno/storage/agent/mongodb.py +0 -1
  523. agno/storage/agent/postgres.py +0 -1
  524. agno/storage/agent/singlestore.py +0 -1
  525. agno/storage/agent/sqlite.py +0 -1
  526. agno/storage/agent/yaml.py +0 -1
  527. agno/storage/base.py +0 -60
  528. agno/storage/dynamodb.py +0 -673
  529. agno/storage/firestore.py +0 -297
  530. agno/storage/gcs_json.py +0 -261
  531. agno/storage/in_memory.py +0 -234
  532. agno/storage/json.py +0 -237
  533. agno/storage/mongodb.py +0 -328
  534. agno/storage/mysql.py +0 -685
  535. agno/storage/postgres.py +0 -682
  536. agno/storage/redis.py +0 -336
  537. agno/storage/session/__init__.py +0 -16
  538. agno/storage/session/agent.py +0 -64
  539. agno/storage/session/team.py +0 -63
  540. agno/storage/session/v2/__init__.py +0 -5
  541. agno/storage/session/workflow.py +0 -61
  542. agno/storage/singlestore.py +0 -606
  543. agno/storage/sqlite.py +0 -646
  544. agno/storage/workflow/__init__.py +0 -0
  545. agno/storage/workflow/mongodb.py +0 -1
  546. agno/storage/workflow/postgres.py +0 -1
  547. agno/storage/workflow/sqlite.py +0 -1
  548. agno/storage/yaml.py +0 -241
  549. agno/tools/thinking.py +0 -73
  550. agno/utils/defaults.py +0 -57
  551. agno/utils/filesystem.py +0 -39
  552. agno/utils/git.py +0 -52
  553. agno/utils/json_io.py +0 -30
  554. agno/utils/load_env.py +0 -19
  555. agno/utils/py_io.py +0 -19
  556. agno/utils/pyproject.py +0 -18
  557. agno/utils/resource_filter.py +0 -31
  558. agno/workflow/v2/__init__.py +0 -21
  559. agno/workflow/v2/types.py +0 -357
  560. agno/workflow/v2/workflow.py +0 -3312
  561. agno/workspace/__init__.py +0 -0
  562. agno/workspace/config.py +0 -325
  563. agno/workspace/enums.py +0 -6
  564. agno/workspace/helpers.py +0 -52
  565. agno/workspace/operator.py +0 -757
  566. agno/workspace/settings.py +0 -158
  567. agno-1.8.1.dist-info/METADATA +0 -982
  568. agno-1.8.1.dist-info/RECORD +0 -566
  569. agno-1.8.1.dist-info/entry_points.txt +0 -3
  570. /agno/{app → db/migrations}/__init__.py +0 -0
  571. /agno/{app/playground/__init__.py → db/schemas/metrics.py} +0 -0
  572. /agno/{cli → integrations}/__init__.py +0 -0
  573. /agno/{cli/ws → knowledge/chunking}/__init__.py +0 -0
  574. /agno/{document/chunking → knowledge/remote_content}/__init__.py +0 -0
  575. /agno/{document/reader/gcs → knowledge/reranker}/__init__.py +0 -0
  576. /agno/{document/reader/s3 → os/interfaces}/__init__.py +0 -0
  577. /agno/{app → os/interfaces}/slack/security.py +0 -0
  578. /agno/{app → os/interfaces}/whatsapp/security.py +0 -0
  579. /agno/{file/local → utils/print_response}/__init__.py +0 -0
  580. /agno/{infra → vectordb/llamaindex}/__init__.py +0 -0
  581. {agno-1.8.1.dist-info → agno-2.0.0rc1.dist-info}/WHEEL +0 -0
  582. {agno-1.8.1.dist-info → agno-2.0.0rc1.dist-info}/licenses/LICENSE +0 -0
  583. {agno-1.8.1.dist-info → agno-2.0.0rc1.dist-info}/top_level.txt +0 -0
agno/workflow/workflow.py CHANGED
@@ -1,807 +1,2512 @@
1
- from __future__ import annotations
2
-
3
- import collections.abc
4
- import inspect
5
- from dataclasses import dataclass, field, fields
1
+ import asyncio
2
+ from dataclasses import dataclass
3
+ from datetime import datetime
6
4
  from os import getenv
7
- from types import GeneratorType
8
- from typing import Any, AsyncIterator, Callable, Dict, List, Optional, Union, cast, get_args
5
+ from typing import (
6
+ Any,
7
+ AsyncIterator,
8
+ Awaitable,
9
+ Callable,
10
+ Dict,
11
+ Iterator,
12
+ List,
13
+ Literal,
14
+ Optional,
15
+ Tuple,
16
+ Type,
17
+ Union,
18
+ cast,
19
+ overload,
20
+ )
9
21
  from uuid import uuid4
10
22
 
23
+ from fastapi import WebSocket
11
24
  from pydantic import BaseModel
12
25
 
13
- from agno.agent import Agent
14
- from agno.media import AudioArtifact, ImageArtifact, VideoArtifact
15
- from agno.memory.v2.memory import Memory
16
- from agno.memory.workflow import WorkflowMemory, WorkflowRun
17
- from agno.run.response import RunResponse, RunResponseEvent
18
- from agno.run.team import TeamRunResponseEvent
19
- from agno.run.workflow import WorkflowRunResponseEvent
20
- from agno.storage.base import Storage
21
- from agno.storage.session.workflow import WorkflowSession
22
- from agno.utils.common import nested_model_dump
23
- from agno.utils.log import log_debug, log_warning, logger, set_log_level_to_debug, set_log_level_to_info
24
- from agno.utils.merge_dict import merge_dictionaries
25
-
26
-
27
- @dataclass(init=False)
26
+ from agno.agent.agent import Agent
27
+ from agno.db.base import BaseDb, SessionType
28
+ from agno.exceptions import RunCancelledException
29
+ from agno.media import Audio, AudioArtifact, File, Image, ImageArtifact, Video, VideoArtifact
30
+ from agno.models.message import Message
31
+ from agno.models.metrics import Metrics
32
+ from agno.run.agent import RunEvent
33
+ from agno.run.base import RunStatus
34
+ from agno.run.cancel import (
35
+ cancel_run as cancel_run_global,
36
+ )
37
+ from agno.run.cancel import (
38
+ cleanup_run,
39
+ raise_if_cancelled,
40
+ register_run,
41
+ )
42
+ from agno.run.team import TeamRunEvent
43
+ from agno.run.workflow import (
44
+ StepOutputEvent,
45
+ WorkflowCancelledEvent,
46
+ WorkflowCompletedEvent,
47
+ WorkflowRunEvent,
48
+ WorkflowRunOutput,
49
+ WorkflowRunOutputEvent,
50
+ WorkflowStartedEvent,
51
+ )
52
+ from agno.session.workflow import WorkflowSession
53
+ from agno.team.team import Team
54
+ from agno.utils.log import (
55
+ log_debug,
56
+ log_warning,
57
+ logger,
58
+ set_log_level_to_debug,
59
+ set_log_level_to_info,
60
+ use_workflow_logger,
61
+ )
62
+ from agno.utils.print_response.workflow import (
63
+ aprint_response,
64
+ aprint_response_stream,
65
+ print_response,
66
+ print_response_stream,
67
+ )
68
+ from agno.workflow.condition import Condition
69
+ from agno.workflow.loop import Loop
70
+ from agno.workflow.parallel import Parallel
71
+ from agno.workflow.router import Router
72
+ from agno.workflow.step import Step
73
+ from agno.workflow.steps import Steps
74
+ from agno.workflow.types import (
75
+ StepInput,
76
+ StepMetrics,
77
+ StepOutput,
78
+ StepType,
79
+ WebSocketHandler,
80
+ WorkflowExecutionInput,
81
+ WorkflowMetrics,
82
+ )
83
+
84
+ STEP_TYPE_MAPPING = {
85
+ Step: StepType.STEP,
86
+ Steps: StepType.STEPS,
87
+ Loop: StepType.LOOP,
88
+ Parallel: StepType.PARALLEL,
89
+ Condition: StepType.CONDITION,
90
+ Router: StepType.ROUTER,
91
+ }
92
+
93
+ WorkflowSteps = Union[
94
+ Callable[
95
+ ["Workflow", WorkflowExecutionInput],
96
+ Union[StepOutput, Awaitable[StepOutput], Iterator[StepOutput], AsyncIterator[StepOutput], Any],
97
+ ],
98
+ Steps,
99
+ List[
100
+ Union[
101
+ Callable[
102
+ [StepInput], Union[StepOutput, Awaitable[StepOutput], Iterator[StepOutput], AsyncIterator[StepOutput]]
103
+ ],
104
+ Step,
105
+ Steps,
106
+ Loop,
107
+ Parallel,
108
+ Condition,
109
+ Router,
110
+ ]
111
+ ],
112
+ ]
113
+
114
+
115
+ @dataclass
28
116
  class Workflow:
29
- # --- Workflow settings ---
30
- # Workflow name
117
+ """Pipeline-based workflow execution"""
118
+
119
+ # Workflow identification - make name optional with default
31
120
  name: Optional[str] = None
32
- # Workflow UUID (autogenerated if not set)
33
- workflow_id: Optional[str] = None
34
- # Workflow app_id (autogenerated if not set)
35
- app_id: Optional[str] = None
36
- # Workflow description (only shown in the UI)
121
+ # Workflow ID (autogenerated if not set)
122
+ id: Optional[str] = None
123
+ # Workflow description
37
124
  description: Optional[str] = None
38
125
 
39
- # --- User settings ---
40
- # ID of the user interacting with this workflow
41
- user_id: Optional[str] = None
126
+ # Workflow steps
127
+ steps: Optional[WorkflowSteps] = None
42
128
 
43
- # -*- Session settings
44
- # Session UUID (autogenerated if not set)
129
+ # Database to use for this workflow
130
+ db: Optional[BaseDb] = None
131
+
132
+ # Default session_id to use for this workflow (autogenerated if not set)
45
133
  session_id: Optional[str] = None
46
- # Session name
47
- session_name: Optional[str] = None
48
- # Session state stored in the database
49
- session_state: Dict[str, Any] = field(default_factory=dict)
50
-
51
- # --- Workflow Memory ---
52
- memory: Optional[Union[WorkflowMemory, Memory]] = None
53
-
54
- # --- Workflow Storage ---
55
- storage: Optional[Storage] = None
56
- # Extra data stored with this workflow
57
- extra_data: Optional[Dict[str, Any]] = None
58
-
59
- # --- Debug & Monitoring ---
60
- # Enable debug logs
61
- debug_mode: bool = False
62
- # monitoring=True logs Workflow information to agno.com for monitoring
63
- monitoring: bool = field(default_factory=lambda: getenv("AGNO_MONITOR", "false").lower() == "true")
134
+ # Default user_id to use for this workflow
135
+ user_id: Optional[str] = None
136
+ # Default session state (stored in the database to persist across runs)
137
+ session_state: Optional[Dict[str, Any]] = None
138
+
139
+ # If True, the workflow runs in debug mode
140
+ debug_mode: Optional[bool] = False
141
+
142
+ # --- Workflow Streaming ---
143
+ # Stream the response from the Workflow
144
+ stream: Optional[bool] = None
145
+ # Stream the intermediate steps from the Workflow
146
+ stream_intermediate_steps: bool = False
147
+
148
+ # Persist the events on the run response
149
+ store_events: bool = False
150
+ # Events to skip when persisting the events on the run response
151
+ events_to_skip: Optional[List[Union[WorkflowRunEvent, RunEvent, TeamRunEvent]]] = None
152
+
153
+ # Control whether to store executor responses (agent/team responses) in flattened runs
154
+ store_executor_outputs: bool = True
155
+
156
+ websocket_handler: Optional[WebSocketHandler] = None
157
+
158
+ # Input schema to validate the input to the workflow
159
+ input_schema: Optional[Type[BaseModel]] = None
160
+
161
+ # Metadata stored with this workflow
162
+ metadata: Optional[Dict[str, Any]] = None
163
+
164
+ # --- Telemetry ---
64
165
  # telemetry=True logs minimal telemetry for analytics
65
- # This helps us improve the Workflow and provide better support
66
- telemetry: bool = field(default_factory=lambda: getenv("AGNO_TELEMETRY", "true").lower() == "true")
67
-
68
- # --- Run Info: DO NOT SET ---
69
- run_id: Optional[str] = None
70
- run_input: Optional[Dict[str, Any]] = None
71
- run_response: Optional[RunResponse] = None
72
- # Images generated during this session
73
- images: Optional[List[ImageArtifact]] = None
74
- # Videos generated during this session
75
- videos: Optional[List[VideoArtifact]] = None
76
- # Audio generated during this session
77
- audio: Optional[List[AudioArtifact]] = None
166
+ # This helps us improve the Agent and provide better support
167
+ telemetry: bool = True
78
168
 
79
169
  def __init__(
80
170
  self,
81
- *,
171
+ id: Optional[str] = None,
82
172
  name: Optional[str] = None,
83
- workflow_id: Optional[str] = None,
84
173
  description: Optional[str] = None,
85
- user_id: Optional[str] = None,
174
+ db: Optional[BaseDb] = None,
175
+ steps: Optional[WorkflowSteps] = None,
86
176
  session_id: Optional[str] = None,
87
- session_name: Optional[str] = None,
88
177
  session_state: Optional[Dict[str, Any]] = None,
89
- memory: Optional[Union[WorkflowMemory, Memory]] = None,
90
- storage: Optional[Storage] = None,
91
- extra_data: Optional[Dict[str, Any]] = None,
92
- debug_mode: bool = False,
93
- monitoring: bool = False,
178
+ user_id: Optional[str] = None,
179
+ debug_mode: Optional[bool] = False,
180
+ stream: Optional[bool] = None,
181
+ stream_intermediate_steps: bool = False,
182
+ store_events: bool = False,
183
+ events_to_skip: Optional[List[Union[WorkflowRunEvent, RunEvent, TeamRunEvent]]] = None,
184
+ store_executor_outputs: bool = True,
185
+ input_schema: Optional[Type[BaseModel]] = None,
186
+ metadata: Optional[Dict[str, Any]] = None,
187
+ cache_session: bool = False,
94
188
  telemetry: bool = True,
95
- app_id: Optional[str] = None,
96
189
  ):
97
- self.name = name or self.__class__.__name__
98
- self.workflow_id = workflow_id
99
- self.description = description or self.__class__.description
100
- self.app_id = app_id
101
-
190
+ self.id = id
191
+ self.name = name
192
+ self.description = description
193
+ self.steps = steps
194
+ self.session_id = session_id
195
+ self.session_state = session_state
102
196
  self.user_id = user_id
197
+ self.debug_mode = debug_mode
198
+ self.store_events = store_events
199
+ self.events_to_skip = events_to_skip or []
200
+ self.stream = stream
201
+ self.stream_intermediate_steps = stream_intermediate_steps
202
+ self.store_executor_outputs = store_executor_outputs
203
+ self.input_schema = input_schema
204
+ self.metadata = metadata
205
+ self.cache_session = cache_session
206
+ self.db = db
207
+ self.telemetry = telemetry
103
208
 
104
- self.session_id = session_id
105
- self.session_name = session_name
106
- self.session_state: Dict[str, Any] = session_state or {}
209
+ self._workflow_session: Optional[WorkflowSession] = None
107
210
 
108
- self.memory = memory
109
- self.storage = storage
110
- self.extra_data = extra_data
211
+ def set_id(self) -> None:
212
+ if self.id is None:
213
+ if self.name is not None:
214
+ self.id = self.name.lower().replace(" ", "-")
215
+ else:
216
+ self.id = str(uuid4())
111
217
 
112
- self.debug_mode = debug_mode
113
- self.monitoring = monitoring
114
- self.telemetry = telemetry
218
+ def _validate_input(
219
+ self, input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]]
220
+ ) -> Optional[BaseModel]:
221
+ """Parse and validate input against input_schema if provided"""
222
+ if self.input_schema is None:
223
+ return None
115
224
 
116
- self.run_id = None
117
- self.run_input = None
118
- self.run_response = None
119
- self.images = None
120
- self.videos = None
121
- self.audio = None
225
+ if input is None:
226
+ raise ValueError("Input required when input_schema is set")
122
227
 
123
- self.workflow_session: Optional[WorkflowSession] = None
228
+ # Case 1: Message is already a BaseModel instance
229
+ if isinstance(input, BaseModel):
230
+ if isinstance(input, self.input_schema):
231
+ try:
232
+ # Re-validate to catch any field validation errors
233
+ input.model_validate(input.model_dump())
234
+ return input
235
+ except Exception as e:
236
+ raise ValueError(f"BaseModel validation failed: {str(e)}")
237
+ else:
238
+ # Different BaseModel types
239
+ raise ValueError(f"Expected {self.input_schema.__name__} but got {type(input).__name__}")
124
240
 
125
- # Private attributes to store the run method and its parameters
126
- # The run function provided by the subclass
127
- self._subclass_run: Optional[Callable] = None
128
- self._subclass_arun: Optional[Callable] = None
241
+ # Case 2: Message is a dict
242
+ elif isinstance(input, dict):
243
+ try:
244
+ validated_model = self.input_schema(**input)
245
+ return validated_model
246
+ except Exception as e:
247
+ raise ValueError(f"Failed to parse dict into {self.input_schema.__name__}: {str(e)}")
129
248
 
130
- # Parameters of the run function
131
- self._run_parameters: Optional[Dict[str, Any]] = None
132
- # Return type of the run function
133
- self._run_return_type: Optional[str] = None
249
+ # Case 3: Other types not supported for structured input
250
+ else:
251
+ raise ValueError(
252
+ f"Cannot validate {type(input)} against input_schema. Expected dict or {self.input_schema.__name__} instance."
253
+ )
134
254
 
135
- self.update_run_method()
255
+ @property
256
+ def run_parameters(self) -> Dict[str, Any]:
257
+ """Get the run parameters for the workflow"""
258
+
259
+ if self.steps is None:
260
+ return {}
261
+
262
+ parameters = {}
263
+
264
+ if self.steps and callable(self.steps):
265
+ from inspect import Parameter, signature
266
+
267
+ sig = signature(self.steps) # type: ignore
268
+
269
+ for param_name, param in sig.parameters.items():
270
+ if param_name not in ["workflow", "execution_input", "self"]:
271
+ parameters[param_name] = {
272
+ "name": param_name,
273
+ "default": param.default.default
274
+ if hasattr(param.default, "__class__") and param.default.__class__.__name__ == "FieldInfo"
275
+ else (param.default if param.default is not Parameter.empty else None),
276
+ "annotation": (
277
+ param.annotation.__name__
278
+ if hasattr(param.annotation, "__name__")
279
+ else (
280
+ str(param.annotation).replace("typing.Optional[", "").replace("]", "")
281
+ if "typing.Optional" in str(param.annotation)
282
+ else str(param.annotation)
283
+ )
284
+ )
285
+ if param.annotation is not Parameter.empty
286
+ else None,
287
+ "required": param.default is Parameter.empty,
288
+ }
289
+ else:
290
+ parameters = {
291
+ "message": {
292
+ "name": "message",
293
+ "default": None,
294
+ "annotation": "str",
295
+ "required": True,
296
+ },
297
+ }
136
298
 
137
- self.__post_init__()
299
+ return parameters
138
300
 
139
- def __post_init__(self):
140
- for field_name, value in self.__class__.__dict__.items():
141
- if isinstance(value, Agent):
142
- value.session_id = self.session_id
301
+ def initialize_workflow(self):
302
+ if self.id is None:
303
+ self.set_id()
304
+ log_debug(f"Generated new workflow_id: {self.id}")
143
305
 
144
- def run(self, **kwargs: Any):
145
- logger.error(f"{self.__class__.__name__}.run() method not implemented.")
146
- return
306
+ def _initialize_session(
307
+ self,
308
+ session_id: Optional[str] = None,
309
+ user_id: Optional[str] = None,
310
+ session_state: Optional[Dict[str, Any]] = None,
311
+ run_id: Optional[str] = None,
312
+ ) -> Tuple[str, Optional[str], Dict[str, Any]]:
313
+ """Initialize the session for the agent."""
147
314
 
148
- def run_workflow(self, **kwargs: Any):
149
- """Run the Workflow"""
315
+ if session_id is None:
316
+ if self.session_id:
317
+ session_id = self.session_id
318
+ else:
319
+ session_id = str(uuid4())
320
+ # We make the session_id sticky to the agent instance if no session_id is provided
321
+ self.session_id = session_id
322
+
323
+ log_debug(f"Session ID: {session_id}", center=True)
324
+
325
+ # Use the default user_id when necessary
326
+ if user_id is None:
327
+ user_id = self.user_id
328
+
329
+ # Determine the session_state
330
+ if session_state is None:
331
+ session_state = self.session_state or {}
332
+
333
+ if user_id is not None:
334
+ session_state["current_user_id"] = user_id
335
+ if session_id is not None:
336
+ session_state["current_session_id"] = session_id
337
+
338
+ session_state.update(
339
+ {
340
+ "workflow_id": self.id,
341
+ "run_id": run_id,
342
+ "session_id": session_id,
343
+ }
344
+ )
345
+ if self.name:
346
+ session_state["workflow_name"] = self.name
150
347
 
151
- # Set mode, debug, workflow_id, session_id, initialize memory
152
- self.set_storage_mode()
153
- self.set_debug()
154
- self.set_monitoring()
155
- self.set_workflow_id() # Ensure workflow_id is set
156
- self.set_session_id()
157
- self.initialize_memory()
348
+ return session_id, user_id, session_state # type: ignore
158
349
 
159
- # Create a run_id
160
- self.run_id = str(uuid4())
350
+ def _generate_workflow_session_name(self) -> str:
351
+ """Generate a name for the workflow session"""
161
352
 
162
- # Set run_input, run_response
163
- self.run_input = kwargs
164
- self.run_response = RunResponse(run_id=self.run_id, session_id=self.session_id, workflow_id=self.workflow_id)
353
+ if self.session_id is None:
354
+ return f"Workflow Session - {datetime.now().strftime('%Y-%m-%d %H:%M')}"
165
355
 
166
- # Read existing session from storage
167
- self.read_from_storage()
356
+ datetime_str = datetime.now().strftime("%Y-%m-%d %H:%M")
357
+ new_session_name = f"Workflow Session-{datetime_str}"
168
358
 
169
- # Update the session_id for all Agent instances
170
- self.update_agent_session_ids()
359
+ if self.description:
360
+ truncated_desc = self.description[:40] + "-" if len(self.description) > 40 else self.description
361
+ new_session_name = f"{truncated_desc} - {datetime_str}"
362
+ return new_session_name
171
363
 
172
- log_debug(f"Workflow Run Start: {self.run_id}", center=True)
173
- try:
174
- self._subclass_run = cast(Callable, self._subclass_run)
175
- result = self._subclass_run(**kwargs)
176
- except Exception as e:
177
- logger.error(f"Workflow.run() failed: {e}")
178
- raise e
179
-
180
- # The run_workflow() method handles both Iterator[RunResponse] and RunResponse
181
- # Case 1: The run method returns an Iterator[RunResponse]
182
- if isinstance(result, (GeneratorType, collections.abc.Iterator)):
183
- # Initialize the run_response content
184
- self.run_response.content = ""
185
-
186
- def result_generator():
187
- self.run_response = cast(RunResponse, self.run_response)
188
- if isinstance(self.memory, WorkflowMemory):
189
- self.memory = cast(WorkflowMemory, self.memory)
190
- elif isinstance(self.memory, Memory):
191
- self.memory = cast(Memory, self.memory)
192
-
193
- for item in result:
194
- if (
195
- isinstance(item, tuple(get_args(RunResponseEvent)))
196
- or isinstance(item, tuple(get_args(TeamRunResponseEvent)))
197
- or isinstance(item, tuple(get_args(WorkflowRunResponseEvent)))
198
- or isinstance(item, RunResponse)
199
- ):
200
- # Update the run_id, session_id and workflow_id of the RunResponseEvent
201
- item.run_id = self.run_id
202
- item.session_id = self.session_id
203
- item.workflow_id = self.workflow_id
204
-
205
- # Update the run_response with the content from the result
206
- if hasattr(item, "content") and item.content is not None and isinstance(item.content, str):
207
- self.run_response.content += item.content
208
- else:
209
- logger.warning(f"Workflow.run() should only yield RunResponseEvent objects, got: {type(item)}")
210
- yield item
211
-
212
- # Add the run to the memory
213
- if isinstance(self.memory, WorkflowMemory):
214
- self.memory.add_run(WorkflowRun(input=self.run_input, response=self.run_response))
215
- elif isinstance(self.memory, Memory):
216
- self.memory.add_run(session_id=self.session_id, run=self.run_response) # type: ignore
217
- # Write this run to the database
218
- self.write_to_storage()
219
- log_debug(f"Workflow Run End: {self.run_id}", center=True)
220
-
221
- return result_generator()
222
- # Case 2: The run method returns a RunResponse
223
- elif isinstance(result, RunResponse):
224
- # Update the result with the run_id, session_id and workflow_id of the workflow run
225
- result.run_id = self.run_id
226
- result.session_id = self.session_id
227
- result.workflow_id = self.workflow_id
228
-
229
- # Update the run_response with the content from the result
230
- if result.content is not None and isinstance(result.content, str):
231
- self.run_response.content = result.content
232
-
233
- # Add the run to the memory
234
- if isinstance(self.memory, WorkflowMemory):
235
- self.memory.add_run(WorkflowRun(input=self.run_input, response=self.run_response))
236
- elif isinstance(self.memory, Memory):
237
- self.memory.add_run(session_id=self.session_id, run=self.run_response) # type: ignore
238
- # Write this run to the database
239
- self.write_to_storage()
240
- log_debug(f"Workflow Run End: {self.run_id}", center=True)
241
- return result
364
+ def set_session_name(
365
+ self, session_id: Optional[str] = None, autogenerate: bool = False, session_name: Optional[str] = None
366
+ ) -> WorkflowSession:
367
+ """Set the session name and save to storage"""
368
+ session_id = session_id or self.session_id
369
+
370
+ if session_id is None:
371
+ raise Exception("Session ID is not set")
372
+
373
+ # -*- Read from storage
374
+ session = self.get_session(session_id=session_id) # type: ignore
375
+
376
+ if autogenerate:
377
+ # -*- Generate name for session
378
+ session_name = self._generate_workflow_session_name()
379
+ log_debug(f"Generated Workflow Session Name: {session_name}")
380
+ elif session_name is None:
381
+ raise Exception("Session name is not set")
382
+
383
+ # -*- Rename session
384
+ session.session_data["session_name"] = session_name # type: ignore
385
+
386
+ # -*- Save to storage
387
+ self.save_session(session=session) # type: ignore
388
+
389
+ return session # type: ignore
390
+
391
+ def get_session_name(self, session_id: Optional[str] = None) -> str:
392
+ """Get the session name for the given session ID and user ID."""
393
+ session_id = session_id or self.session_id
394
+ if session_id is None:
395
+ raise Exception("Session ID is not set")
396
+ session = self.get_session(session_id=session_id) # type: ignore
397
+ if session is None:
398
+ raise Exception("Session not found")
399
+ return session.session_data.get("session_name", "") if session.session_data else ""
400
+
401
+ def get_session_state(self, session_id: Optional[str] = None) -> Dict[str, Any]:
402
+ """Get the session state for the given session ID and user ID."""
403
+ session_id = session_id or self.session_id
404
+ if session_id is None:
405
+ raise Exception("Session ID is not set")
406
+ session = self.get_session(session_id=session_id) # type: ignore
407
+ if session is None:
408
+ raise Exception("Session not found")
409
+ return session.session_data.get("session_state", {}) if session.session_data else {}
410
+
411
+ def delete_session(self, session_id: str):
412
+ """Delete the current session and save to storage"""
413
+ if self.db is None:
414
+ return
415
+ # -*- Delete session
416
+ self.db.delete_session(session_id=session_id)
417
+
418
+ def get_run_output(self, run_id: str, session_id: Optional[str] = None) -> Optional[WorkflowRunOutput]:
419
+ """Get a RunOutput from the database."""
420
+ if self._workflow_session is not None:
421
+ run_response = self._workflow_session.get_run(run_id=run_id)
422
+ if run_response is not None:
423
+ return run_response
424
+ else:
425
+ log_warning(f"RunOutput {run_id} not found in AgentSession {self._workflow_session.session_id}")
426
+ return None
242
427
  else:
243
- logger.warning(f"Workflow.run() should only return RunResponse objects, got: {type(result)}")
244
- return None
428
+ workflow_session = self.get_session(session_id=session_id)
429
+ if workflow_session is not None:
430
+ run_response = workflow_session.get_run(run_id=run_id)
431
+ if run_response is not None:
432
+ return run_response
433
+ else:
434
+ log_warning(f"RunOutput {run_id} not found in AgentSession {session_id}")
435
+ return None
436
+
437
+ def get_last_run_output(self, session_id: Optional[str] = None) -> Optional[WorkflowRunOutput]:
438
+ """Get the last run response from the database."""
439
+ if (
440
+ self._workflow_session is not None
441
+ and self._workflow_session.runs is not None
442
+ and len(self._workflow_session.runs) > 0
443
+ ):
444
+ run_response = self._workflow_session.runs[-1]
445
+ if run_response is not None:
446
+ return run_response
447
+ else:
448
+ workflow_session = self.get_session(session_id=session_id)
449
+ if workflow_session is not None and workflow_session.runs is not None and len(workflow_session.runs) > 0:
450
+ run_response = workflow_session.runs[-1]
451
+ if run_response is not None:
452
+ return run_response
453
+ else:
454
+ log_warning(f"No run responses found in WorkflowSession {session_id}")
455
+ return None
456
+
457
+ def read_or_create_session(
458
+ self,
459
+ session_id: str,
460
+ user_id: Optional[str] = None,
461
+ ) -> WorkflowSession:
462
+ from time import time
463
+
464
+ # Returning cached session if we have one
465
+ if self._workflow_session is not None and self._workflow_session.session_id == session_id:
466
+ return self._workflow_session
467
+
468
+ # Try to load from database
469
+ workflow_session = None
470
+ if self.db is not None:
471
+ log_debug(f"Reading WorkflowSession: {session_id}")
472
+
473
+ workflow_session = cast(WorkflowSession, self._read_session(session_id=session_id))
474
+
475
+ if workflow_session is None:
476
+ # Creating new session if none found
477
+ log_debug(f"Creating new WorkflowSession: {session_id}")
478
+ workflow_session = WorkflowSession(
479
+ session_id=session_id,
480
+ workflow_id=self.id,
481
+ user_id=user_id,
482
+ workflow_data=self._get_workflow_data(),
483
+ session_data={},
484
+ metadata=self.metadata,
485
+ created_at=int(time()),
486
+ )
487
+
488
+ # Cache the session if relevant
489
+ if workflow_session is not None and self.cache_session:
490
+ self._workflow_session = workflow_session
491
+
492
+ return workflow_session
493
+
494
+ def get_session(
495
+ self,
496
+ session_id: Optional[str] = None,
497
+ ) -> Optional[WorkflowSession]:
498
+ """Load an WorkflowSession from database.
245
499
 
246
- # Add to workflow.py after the run_workflow method
247
- async def arun_workflow(self, **kwargs: Any):
248
- """Run the Workflow asynchronously"""
500
+ Args:
501
+ session_id: The session_id to load from storage.
249
502
 
250
- # Set mode, debug, workflow_id, session_id, initialize memory
251
- self.set_storage_mode()
252
- self.set_debug()
253
- self.set_monitoring()
254
- self.set_workflow_id() # Ensure workflow_id is set
255
- self.set_session_id()
256
- self.initialize_memory()
503
+ Returns:
504
+ WorkflowSession: The WorkflowSession loaded from the database or created if it does not exist.
505
+ """
506
+ if not session_id and not self.session_id:
507
+ raise Exception("No session_id provided")
257
508
 
258
- # Create a run_id
259
- self.run_id = str(uuid4())
509
+ session_id_to_load = session_id or self.session_id
260
510
 
261
- # Set run_input, run_response
262
- self.run_input = kwargs
263
- self.run_response = RunResponse(run_id=self.run_id, session_id=self.session_id, workflow_id=self.workflow_id)
511
+ # Try to load from database
512
+ if self.db is not None and session_id_to_load is not None:
513
+ workflow_session = cast(WorkflowSession, self._read_session(session_id=session_id_to_load))
514
+ return workflow_session
264
515
 
265
- # Read existing session from storage
266
- self.read_from_storage()
516
+ log_warning(f"WorkflowSession {session_id_to_load} not found in db")
517
+ return None
267
518
 
268
- # Update the session_id for all Agent instances
269
- self.update_agent_session_ids()
519
+ def save_session(self, session: WorkflowSession) -> None:
520
+ """Save the WorkflowSession to storage
270
521
 
271
- log_debug(f"Workflow Run Start: {self.run_id}", center=True)
522
+ Returns:
523
+ Optional[WorkflowSession]: The saved WorkflowSession or None if not saved.
524
+ """
525
+ if self.db is not None and session.session_data is not None:
526
+ if session.session_data.get("session_state") is not None:
527
+ session.session_data["session_state"].pop("current_session_id", None)
528
+ session.session_data["session_state"].pop("current_user_id", None)
529
+ session.session_data["session_state"].pop("current_run_id", None)
530
+ session.session_data["session_state"].pop("workflow_id", None)
531
+ session.session_data["session_state"].pop("run_id", None)
532
+ session.session_data["session_state"].pop("session_id", None)
533
+ session.session_data["session_state"].pop("workflow_name", None)
534
+
535
+ self._upsert_session(session=session)
536
+ log_debug(f"Created or updated WorkflowSession record: {session.session_id}")
537
+
538
+ # -*- Session Database Functions
539
+ def _read_session(self, session_id: str) -> Optional[WorkflowSession]:
540
+ """Get a Session from the database."""
272
541
  try:
273
- self._subclass_arun = cast(Callable, self._subclass_arun)
274
- result = await self._subclass_arun(**kwargs)
542
+ if not self.db:
543
+ raise ValueError("Db not initialized")
544
+ session = self.db.get_session(session_id=session_id, session_type=SessionType.WORKFLOW)
545
+ return session if isinstance(session, (WorkflowSession, type(None))) else None
275
546
  except Exception as e:
276
- logger.error(f"Workflow.arun() failed: {e}")
277
- raise e
278
-
279
- # Handle single RunResponse result
280
- if isinstance(result, RunResponse):
281
- # Update the result with the run_id, session_id and workflow_id of the workflow run
282
- result.run_id = self.run_id
283
- result.session_id = self.session_id
284
- result.workflow_id = self.workflow_id
285
-
286
- # Update the run_response with the content from the result
287
- if result.content is not None and isinstance(result.content, str):
288
- self.run_response.content = result.content
289
-
290
- # Add the run to the memory
291
- if isinstance(self.memory, WorkflowMemory):
292
- self.memory.add_run(WorkflowRun(input=self.run_input, response=self.run_response))
293
- elif isinstance(self.memory, Memory):
294
- self.memory.add_run(session_id=self.session_id, run=self.run_response) # type: ignore
295
- # Write this run to the database
296
- self.write_to_storage()
297
- log_debug(f"Workflow Run End: {self.run_id}", center=True)
298
- return result
299
- else:
300
- logger.warning(f"Workflow.arun() should only return RunResponse objects, got: {type(result)}")
547
+ log_warning(f"Error getting session from db: {e}")
301
548
  return None
302
549
 
303
- async def arun_workflow_generator(self, **kwargs: Any) -> AsyncIterator[RunResponse]:
304
- """Run the Workflow asynchronously for async generators"""
550
+ def _upsert_session(self, session: WorkflowSession) -> Optional[WorkflowSession]:
551
+ """Upsert a Session into the database."""
305
552
 
306
- # Set mode, debug, workflow_id, session_id, initialize memory
307
- self.set_storage_mode()
308
- self.set_debug()
309
- self.set_monitoring()
310
- self.set_workflow_id() # Ensure workflow_id is set
311
- self.set_session_id()
312
- self.initialize_memory()
553
+ try:
554
+ if not self.db:
555
+ raise ValueError("Db not initialized")
556
+ result = self.db.upsert_session(session=session)
557
+ return result if isinstance(result, (WorkflowSession, type(None))) else None
558
+ except Exception as e:
559
+ log_warning(f"Error upserting session into db: {e}")
560
+ return None
313
561
 
314
- # Create a run_id
315
- self.run_id = str(uuid4())
562
+ def _update_metadata(self, session: WorkflowSession):
563
+ """Update the extra_data in the session"""
564
+ from agno.utils.merge_dict import merge_dictionaries
316
565
 
317
- # Set run_input, run_response
318
- self.run_input = kwargs
319
- self.run_response = RunResponse(run_id=self.run_id, session_id=self.session_id, workflow_id=self.workflow_id)
566
+ # Read metadata from the database
567
+ if session.metadata is not None:
568
+ # If metadata is set in the workflow, update the database metadata with the workflow's metadata
569
+ if self.metadata is not None:
570
+ # Updates workflow's session metadata in place
571
+ merge_dictionaries(session.metadata, self.metadata)
572
+ # Update the current metadata with the metadata from the database which is updated in place
573
+ self.metadata = session.metadata
320
574
 
321
- # Read existing session from storage
322
- self.read_from_storage()
575
+ def _update_session_state(self, session: WorkflowSession, session_state: Dict[str, Any]):
576
+ """Load the existing Workflow from a WorkflowSession (from the database)"""
323
577
 
324
- # Update the session_id for all Agent instances
325
- self.update_agent_session_ids()
578
+ from agno.utils.merge_dict import merge_dictionaries
579
+
580
+ # Get the session_state from the database and update the current session_state
581
+ if session.session_data and "session_state" in session.session_data:
582
+ session_state_from_db = session.session_data.get("session_state")
583
+
584
+ if (
585
+ session_state_from_db is not None
586
+ and isinstance(session_state_from_db, dict)
587
+ and len(session_state_from_db) > 0
588
+ ):
589
+ # This updates session_state_from_db
590
+ # If there are conflicting keys, values from provided session_state will take precedence
591
+ merge_dictionaries(session_state_from_db, session_state)
592
+ session_state = session_state_from_db
593
+
594
+ # Update the session_state in the session
595
+ if session.session_data is None:
596
+ session.session_data = {}
597
+ session.session_data["session_state"] = session_state
598
+
599
+ return session_state
600
+
601
+ def _get_workflow_data(self) -> Dict[str, Any]:
602
+ workflow_data = {}
603
+
604
+ if self.steps and not callable(self.steps):
605
+ steps_dict = []
606
+ for step in self.steps: # type: ignore
607
+ if callable(step):
608
+ step_type = StepType.STEP
609
+ elif isinstance(step, Agent) or isinstance(step, Team):
610
+ step_type = StepType.STEP
611
+ else:
612
+ step_type = STEP_TYPE_MAPPING[type(step)]
613
+ step_dict = {
614
+ "name": step.name if hasattr(step, "name") else step.__name__,
615
+ "description": step.description if hasattr(step, "description") else "User-defined callable step",
616
+ "type": step_type.value,
617
+ }
618
+ steps_dict.append(step_dict)
326
619
 
327
- log_debug(f"Workflow Run Start: {self.run_id}", center=True)
328
- # Initialize the run_response content
329
- self.run_response.content = ""
330
- try:
331
- self._subclass_arun = cast(Callable, self._subclass_arun)
332
- async for item in self._subclass_arun(**kwargs):
333
- if (
334
- isinstance(item, tuple(get_args(RunResponseEvent)))
335
- or isinstance(item, tuple(get_args(TeamRunResponseEvent)))
336
- or isinstance(item, tuple(get_args(WorkflowRunResponseEvent)))
337
- or isinstance(item, RunResponse)
338
- ):
339
- # Update the run_id, session_id and workflow_id of the RunResponseEvent
340
- item.run_id = self.run_id
341
- item.session_id = self.session_id
342
- item.workflow_id = self.workflow_id
620
+ workflow_data["steps"] = steps_dict
343
621
 
344
- # Update the run_response with the content from the result
345
- if hasattr(item, "content") and item.content is not None and isinstance(item.content, str):
346
- self.run_response.content += item.content
347
- else:
348
- logger.warning(f"Workflow.run() should only yield RunResponseEvent objects, got: {type(item)}")
349
- yield item
350
-
351
- # Add the run to the memory
352
- if isinstance(self.memory, WorkflowMemory):
353
- self.memory.add_run(WorkflowRun(input=self.run_input, response=self.run_response))
354
- elif isinstance(self.memory, Memory):
355
- self.memory.add_run(session_id=self.session_id, run=self.run_response) # type: ignore
356
- # Write this run to the database
357
- self.write_to_storage()
358
- log_debug(f"Workflow Run End: {self.run_id}", center=True)
359
- except Exception as e:
360
- logger.error(f"Workflow.arun() failed: {e}")
361
- raise e
622
+ elif callable(self.steps):
623
+ workflow_data["steps"] = [
624
+ {
625
+ "name": "Custom Function",
626
+ "description": "User-defined callable workflow",
627
+ "type": "Callable",
628
+ }
629
+ ]
362
630
 
363
- async def arun(self, **kwargs: Any):
364
- """Async version of run() that calls arun_workflow()"""
365
- logger.error(f"{self.__class__.__name__}.arun() method not implemented.")
366
- return
631
+ return workflow_data
632
+
633
+ def _handle_event(
634
+ self,
635
+ event: "WorkflowRunOutputEvent",
636
+ workflow_run_response: WorkflowRunOutput,
637
+ websocket_handler: Optional[WebSocketHandler] = None,
638
+ ) -> "WorkflowRunOutputEvent":
639
+ """Handle workflow events for storage - similar to Team._handle_event"""
640
+ if self.store_events:
641
+ # Check if this event type should be skipped
642
+ if self.events_to_skip:
643
+ event_type = event.event
644
+ for skip_event in self.events_to_skip:
645
+ if isinstance(skip_event, str):
646
+ if event_type == skip_event:
647
+ return event
648
+ else:
649
+ # It's a WorkflowRunEvent enum
650
+ if event_type == skip_event.value:
651
+ return event
367
652
 
368
- def set_storage_mode(self):
369
- if self.storage is not None:
370
- self.storage.mode = "workflow"
653
+ # Store the event
654
+ if workflow_run_response.events is None:
655
+ workflow_run_response.events = []
371
656
 
372
- def initialize_workflow(self):
373
- self.set_storage_mode()
657
+ workflow_run_response.events.append(event)
374
658
 
375
- def set_workflow_id(self) -> str:
376
- if self.workflow_id is None:
377
- self.workflow_id = str(uuid4())
378
- log_debug(f"Workflow ID: {self.workflow_id}", center=True)
379
- return self.workflow_id
659
+ # Broadcast to WebSocket if available (async context only)
660
+ if websocket_handler:
661
+ import asyncio
380
662
 
381
- def set_session_id(self) -> str:
382
- if self.session_id is None:
383
- self.session_id = str(uuid4())
384
- log_debug(f"Session ID: {self.session_id}", center=True)
385
- return self.session_id
663
+ try:
664
+ loop = asyncio.get_running_loop()
665
+ if loop:
666
+ asyncio.create_task(websocket_handler.handle_event(event))
667
+ except RuntimeError:
668
+ pass
669
+
670
+ return event
671
+
672
+ def _transform_step_output_to_event(
673
+ self, step_output: StepOutput, workflow_run_response: WorkflowRunOutput, step_index: Optional[int] = None
674
+ ) -> StepOutputEvent:
675
+ """Transform a StepOutput object into a StepOutputEvent for consistent streaming interface"""
676
+ return StepOutputEvent(
677
+ step_output=step_output,
678
+ run_id=workflow_run_response.run_id or "",
679
+ workflow_name=workflow_run_response.workflow_name,
680
+ workflow_id=workflow_run_response.workflow_id,
681
+ session_id=workflow_run_response.session_id,
682
+ step_name=step_output.step_name,
683
+ step_index=step_index,
684
+ )
386
685
 
387
- def set_debug(self) -> None:
686
+ def _set_debug(self) -> None:
687
+ """Set debug mode and configure logging"""
388
688
  if self.debug_mode or getenv("AGNO_DEBUG", "false").lower() == "true":
689
+ use_workflow_logger()
690
+
389
691
  self.debug_mode = True
390
- set_log_level_to_debug()
391
- log_debug("Debug logs enabled")
392
- else:
393
- set_log_level_to_info()
692
+ set_log_level_to_debug(source_type="workflow")
693
+
694
+ # Propagate to steps - only if steps is iterable (not callable)
695
+ if self.steps and not callable(self.steps):
696
+ if isinstance(self.steps, Steps):
697
+ steps_to_iterate = self.steps.steps
698
+ else:
699
+ steps_to_iterate = self.steps
394
700
 
395
- def set_monitoring(self) -> None:
396
- """Override monitoring and telemetry settings based on environment variables."""
701
+ for step in steps_to_iterate:
702
+ self._propagate_debug_to_step(step)
703
+ else:
704
+ set_log_level_to_info(source_type="workflow")
397
705
 
398
- # Only override if the environment variable is set
399
- monitor_env = getenv("AGNO_MONITOR")
400
- if monitor_env is not None:
401
- self.monitoring = monitor_env.lower() == "true"
706
+ def _set_telemetry(self) -> None:
707
+ """Override telemetry settings based on environment variables."""
402
708
 
403
- # Override telemetry if environment variable is set
404
709
  telemetry_env = getenv("AGNO_TELEMETRY")
405
710
  if telemetry_env is not None:
406
711
  self.telemetry = telemetry_env.lower() == "true"
407
712
 
408
- def initialize_memory(self) -> None:
409
- if self.memory is None:
410
- self.memory = Memory()
411
-
412
- def update_run_method(self):
413
- run_type = None
414
- # Update the run() method to call run_workflow() instead of the subclass's run()
415
- # First, check if the subclass has a run method
416
- # If the run() method has been overridden by the subclass,
417
- # then self.__class__.run is not Workflow.run will be True
418
- if self.__class__.run is not Workflow.run:
419
- # Store the original run methods bound to the instance
420
- self._subclass_run = self.__class__.run.__get__(self)
421
- run_type = "sync"
422
- # Get the parameters of the sync run method
423
- sig = inspect.signature(self.__class__.run)
424
-
425
- if self.__class__.arun is not Workflow.arun:
426
- self._subclass_arun = self.__class__.arun.__get__(self)
427
- run_type = "coroutine"
428
-
429
- # Get the parameters of the async run method
430
- sig = inspect.signature(self.__class__.arun)
431
-
432
- # Check if the async method is a coroutine or async generator
433
- from inspect import isasyncgenfunction
434
-
435
- if isasyncgenfunction(self.__class__.arun):
436
- run_type = "async_generator"
437
-
438
- if run_type is not None:
439
- # Convert parameters to a serializable format
440
- self._run_parameters = {
441
- param_name: {
442
- "name": param_name,
443
- "default": param.default.default
444
- if hasattr(param.default, "__class__") and param.default.__class__.__name__ == "FieldInfo"
445
- else (param.default if param.default is not inspect.Parameter.empty else None),
446
- "annotation": (
447
- param.annotation.__name__
448
- if hasattr(param.annotation, "__name__")
449
- else (
450
- str(param.annotation).replace("typing.Optional[", "").replace("]", "")
451
- if "typing.Optional" in str(param.annotation)
452
- else str(param.annotation)
453
- )
713
+ def _propagate_debug_to_step(self, step):
714
+ """Recursively propagate debug mode to steps and nested primitives"""
715
+ # Handle direct Step objects
716
+ if hasattr(step, "active_executor") and step.active_executor:
717
+ executor = step.active_executor
718
+ if hasattr(executor, "debug_mode"):
719
+ executor.debug_mode = True
720
+
721
+ # If it's a team, propagate to all members
722
+ if hasattr(executor, "members"):
723
+ for member in executor.members:
724
+ if hasattr(member, "debug_mode"):
725
+ member.debug_mode = True
726
+
727
+ # Handle nested primitives - check both 'steps' and 'choices' attributes
728
+ for attr_name in ["steps", "choices"]:
729
+ if hasattr(step, attr_name):
730
+ attr_value = getattr(step, attr_name)
731
+ if attr_value and isinstance(attr_value, list):
732
+ for nested_step in attr_value:
733
+ self._propagate_debug_to_step(nested_step)
734
+
735
+ def _create_step_input(
736
+ self,
737
+ execution_input: WorkflowExecutionInput,
738
+ previous_step_outputs: Optional[Dict[str, StepOutput]] = None,
739
+ shared_images: Optional[List[ImageArtifact]] = None,
740
+ shared_videos: Optional[List[VideoArtifact]] = None,
741
+ shared_audio: Optional[List[AudioArtifact]] = None,
742
+ shared_files: Optional[List[File]] = None,
743
+ ) -> StepInput:
744
+ """Helper method to create StepInput with enhanced data flow support"""
745
+
746
+ previous_step_content = None
747
+ if previous_step_outputs:
748
+ last_output = list(previous_step_outputs.values())[-1]
749
+ previous_step_content = last_output.content if last_output else None
750
+ log_debug(f"Using previous step content from: {list(previous_step_outputs.keys())[-1]}")
751
+
752
+ return StepInput(
753
+ input=execution_input.input,
754
+ previous_step_content=previous_step_content,
755
+ previous_step_outputs=previous_step_outputs,
756
+ additional_data=execution_input.additional_data,
757
+ images=shared_images or [],
758
+ videos=shared_videos or [],
759
+ audio=shared_audio or [],
760
+ files=shared_files or [],
761
+ )
762
+
763
+ def _get_step_count(self) -> int:
764
+ """Get the number of steps in the workflow"""
765
+ if self.steps is None:
766
+ return 0
767
+ elif callable(self.steps):
768
+ return 1 # Callable function counts as 1 step
769
+ else:
770
+ # Handle Steps wrapper
771
+ if isinstance(self.steps, Steps):
772
+ return len(self.steps.steps)
773
+ else:
774
+ return len(self.steps)
775
+
776
+ def _aggregate_workflow_metrics(self, step_results: List[Union[StepOutput, List[StepOutput]]]) -> WorkflowMetrics:
777
+ """Aggregate metrics from all step responses into structured workflow metrics"""
778
+ steps_dict = {}
779
+
780
+ def process_step_output(step_output: StepOutput):
781
+ """Process a single step output for metrics"""
782
+
783
+ # If this step has nested steps, process them recursively
784
+ if hasattr(step_output, "steps") and step_output.steps:
785
+ for nested_step in step_output.steps:
786
+ process_step_output(nested_step)
787
+
788
+ # Only collect metrics from steps that actually have metrics (actual agents/teams)
789
+ if (
790
+ step_output.step_name and step_output.metrics and step_output.executor_type in ["agent", "team"]
791
+ ): # Only include actual executors
792
+ step_metrics = StepMetrics(
793
+ step_name=step_output.step_name,
794
+ executor_type=step_output.executor_type or "unknown",
795
+ executor_name=step_output.executor_name or "unknown",
796
+ metrics=step_output.metrics,
797
+ )
798
+ steps_dict[step_output.step_name] = step_metrics
799
+
800
+ # Process all step results
801
+ for step_result in step_results:
802
+ process_step_output(cast(StepOutput, step_result))
803
+
804
+ return WorkflowMetrics(
805
+ steps=steps_dict,
806
+ )
807
+
808
+ def _call_custom_function(self, func: Callable, execution_input: WorkflowExecutionInput, **kwargs: Any) -> Any:
809
+ """Call custom function with only the parameters it expects"""
810
+ from inspect import signature
811
+
812
+ sig = signature(func)
813
+
814
+ # Build arguments based on what the function actually accepts
815
+ call_kwargs: Dict[str, Any] = {}
816
+
817
+ # Only add workflow and execution_input if the function expects them
818
+ if "workflow" in sig.parameters: # type: ignore
819
+ call_kwargs["workflow"] = self
820
+ if "execution_input" in sig.parameters:
821
+ call_kwargs["execution_input"] = execution_input # type: ignore
822
+ if "session_state" in sig.parameters:
823
+ call_kwargs["session_state"] = self.session_state # type: ignore
824
+
825
+ # Add any other kwargs that the function expects
826
+ for param_name in kwargs:
827
+ if param_name in sig.parameters: # type: ignore
828
+ call_kwargs[param_name] = kwargs[param_name]
829
+
830
+ # If function has **kwargs parameter, pass all remaining kwargs
831
+ for param in sig.parameters.values(): # type: ignore
832
+ if param.kind == param.VAR_KEYWORD:
833
+ call_kwargs.update(kwargs)
834
+ break
835
+
836
+ try:
837
+ return func(**call_kwargs)
838
+ except TypeError as e:
839
+ # If signature inspection fails, fall back to original method
840
+ logger.warning(
841
+ f"Async function signature inspection failed: {e}. Falling back to original calling convention."
842
+ )
843
+ return func(**call_kwargs)
844
+
845
+ def _execute(
846
+ self,
847
+ session: WorkflowSession,
848
+ execution_input: WorkflowExecutionInput,
849
+ workflow_run_response: WorkflowRunOutput,
850
+ session_state: Optional[Dict[str, Any]] = None,
851
+ **kwargs: Any,
852
+ ) -> WorkflowRunOutput:
853
+ """Execute a specific pipeline by name synchronously"""
854
+ from inspect import isasyncgenfunction, iscoroutinefunction, isgeneratorfunction
855
+
856
+ workflow_run_response.status = RunStatus.running
857
+ register_run(workflow_run_response.run_id) # type: ignore
858
+
859
+ if callable(self.steps):
860
+ if iscoroutinefunction(self.steps) or isasyncgenfunction(self.steps):
861
+ raise ValueError("Cannot use async function with synchronous execution")
862
+ elif isgeneratorfunction(self.steps):
863
+ content = ""
864
+ for chunk in self.steps(self, execution_input, **kwargs):
865
+ # Check for cancellation while consuming generator
866
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
867
+ if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
868
+ content += chunk.content
869
+ else:
870
+ content += str(chunk)
871
+ workflow_run_response.content = content
872
+ else:
873
+ # Execute the workflow with the custom executor
874
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
875
+ workflow_run_response.content = self._call_custom_function(self.steps, execution_input, **kwargs) # type: ignore[arg-type]
876
+
877
+ workflow_run_response.status = RunStatus.completed
878
+ else:
879
+ try:
880
+ # Track outputs from each step for enhanced data flow
881
+ collected_step_outputs: List[Union[StepOutput, List[StepOutput]]] = []
882
+ previous_step_outputs: Dict[str, StepOutput] = {}
883
+
884
+ shared_images: List[ImageArtifact] = execution_input.images or []
885
+ output_images: List[ImageArtifact] = (execution_input.images or []).copy() # Start with input images
886
+ shared_videos: List[VideoArtifact] = execution_input.videos or []
887
+ output_videos: List[VideoArtifact] = (execution_input.videos or []).copy() # Start with input videos
888
+ shared_audio: List[AudioArtifact] = execution_input.audio or []
889
+ output_audio: List[AudioArtifact] = (execution_input.audio or []).copy() # Start with input audio
890
+ shared_files: List[File] = execution_input.files or []
891
+ output_files: List[File] = (execution_input.files or []).copy() # Start with input files
892
+
893
+ for i, step in enumerate(self.steps): # type: ignore[arg-type]
894
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
895
+ step_name = getattr(step, "name", f"step_{i + 1}")
896
+ log_debug(f"Executing step {i + 1}/{self._get_step_count()}: {step_name}")
897
+
898
+ # Create enhanced StepInput
899
+ step_input = self._create_step_input(
900
+ execution_input=execution_input,
901
+ previous_step_outputs=previous_step_outputs,
902
+ shared_images=shared_images,
903
+ shared_videos=shared_videos,
904
+ shared_audio=shared_audio,
905
+ shared_files=shared_files,
454
906
  )
455
- if param.annotation is not inspect.Parameter.empty
456
- else None,
457
- "required": param.default is inspect.Parameter.empty,
458
- }
459
- for param_name, param in sig.parameters.items()
460
- if param_name != "self"
461
- }
462
- # Determine the return type of the run method
463
- return_annotation = sig.return_annotation
464
- self._run_return_type = (
465
- return_annotation.__name__
466
- if return_annotation is not inspect.Signature.empty and hasattr(return_annotation, "__name__")
467
- else str(return_annotation)
468
- if return_annotation is not inspect.Signature.empty
469
- else None
907
+
908
+ # Check for can cellation before executing step
909
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
910
+
911
+ step_output = step.execute( # type: ignore[union-attr]
912
+ step_input,
913
+ session_id=session.session_id,
914
+ user_id=self.user_id,
915
+ workflow_run_response=workflow_run_response,
916
+ session_state=session_state,
917
+ store_executor_outputs=self.store_executor_outputs,
918
+ )
919
+
920
+ # Check for cancellation after step execution
921
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
922
+
923
+ # Update the workflow-level previous_step_outputs dictionary
924
+ previous_step_outputs[step_name] = step_output
925
+ if step_output.stop:
926
+ logger.info(f"Early termination requested by step {step_name}")
927
+ break
928
+
929
+ # Update shared media for next step
930
+ shared_images.extend(step_output.images or [])
931
+ shared_videos.extend(step_output.videos or [])
932
+ shared_audio.extend(step_output.audio or [])
933
+ shared_files.extend(step_output.files or [])
934
+ output_images.extend(step_output.images or [])
935
+ output_videos.extend(step_output.videos or [])
936
+ output_audio.extend(step_output.audio or [])
937
+ output_files.extend(step_output.files or [])
938
+
939
+ collected_step_outputs.append(step_output)
940
+
941
+ # Update the workflow_run_response with completion data
942
+ if collected_step_outputs:
943
+ workflow_run_response.metrics = self._aggregate_workflow_metrics(collected_step_outputs)
944
+ last_output = cast(StepOutput, collected_step_outputs[-1])
945
+
946
+ # Use deepest nested content if this is a container (Steps/Router/Loop/etc.)
947
+ if getattr(last_output, "steps", None):
948
+ _cur = last_output
949
+ while getattr(_cur, "steps", None):
950
+ _steps = _cur.steps or []
951
+ if not _steps:
952
+ break
953
+ _cur = _steps[-1]
954
+ workflow_run_response.content = _cur.content
955
+ else:
956
+ workflow_run_response.content = last_output.content
957
+ else:
958
+ workflow_run_response.content = "No steps executed"
959
+
960
+ workflow_run_response.step_results = collected_step_outputs
961
+ workflow_run_response.images = output_images
962
+ workflow_run_response.videos = output_videos
963
+ workflow_run_response.audio = output_audio
964
+ workflow_run_response.status = RunStatus.completed
965
+
966
+ except RunCancelledException as e:
967
+ # Handle run cancellation
968
+ logger.info(f"Workflow run {workflow_run_response.run_id} was cancelled")
969
+ workflow_run_response.status = RunStatus.cancelled
970
+ workflow_run_response.content = str(e)
971
+ except Exception as e:
972
+ import traceback
973
+
974
+ traceback.print_exc()
975
+ logger.error(f"Workflow execution failed: {e}")
976
+ # Store error response
977
+ workflow_run_response.status = RunStatus.error
978
+ workflow_run_response.content = f"Workflow execution failed: {e}"
979
+
980
+ finally:
981
+ self._update_session_metrics(session=session, workflow_run_response=workflow_run_response)
982
+ session.upsert_run(run=workflow_run_response)
983
+ self.save_session(session=session)
984
+ # Always clean up the run tracking
985
+ cleanup_run(workflow_run_response.run_id) # type: ignore
986
+
987
+ # Log Workflow Telemetry
988
+ if self.telemetry:
989
+ self._log_workflow_telemetry(session_id=session.session_id, run_id=workflow_run_response.run_id)
990
+
991
+ return workflow_run_response
992
+
993
+ def _execute_stream(
994
+ self,
995
+ session: WorkflowSession,
996
+ execution_input: WorkflowExecutionInput,
997
+ workflow_run_response: WorkflowRunOutput,
998
+ session_state: Optional[Dict[str, Any]] = None,
999
+ stream_intermediate_steps: bool = False,
1000
+ **kwargs: Any,
1001
+ ) -> Iterator[WorkflowRunOutputEvent]:
1002
+ """Execute a specific pipeline by name with event streaming"""
1003
+ from inspect import isasyncgenfunction, iscoroutinefunction, isgeneratorfunction
1004
+
1005
+ workflow_run_response.status = RunStatus.running
1006
+
1007
+ # Register run for cancellation tracking
1008
+ if workflow_run_response.run_id:
1009
+ register_run(workflow_run_response.run_id)
1010
+
1011
+ workflow_started_event = WorkflowStartedEvent(
1012
+ run_id=workflow_run_response.run_id or "",
1013
+ workflow_name=workflow_run_response.workflow_name,
1014
+ workflow_id=workflow_run_response.workflow_id,
1015
+ session_id=workflow_run_response.session_id,
1016
+ )
1017
+ yield self._handle_event(workflow_started_event, workflow_run_response)
1018
+
1019
+ if callable(self.steps):
1020
+ if iscoroutinefunction(self.steps) or isasyncgenfunction(self.steps):
1021
+ raise ValueError("Cannot use async function with synchronous execution")
1022
+ elif isgeneratorfunction(self.steps):
1023
+ content = ""
1024
+ for chunk in self._call_custom_function(self.steps, execution_input, **kwargs): # type: ignore[arg-type]
1025
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1026
+ # Update the run_response with the content from the result
1027
+ if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
1028
+ content += chunk.content
1029
+ yield chunk
1030
+ else:
1031
+ content += str(chunk)
1032
+ workflow_run_response.content = content
1033
+ else:
1034
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1035
+ workflow_run_response.content = self._call_custom_function(self.steps, execution_input, **kwargs)
1036
+ workflow_run_response.status = RunStatus.completed
1037
+
1038
+ else:
1039
+ try:
1040
+ # Track outputs from each step for enhanced data flow
1041
+ collected_step_outputs: List[Union[StepOutput, List[StepOutput]]] = []
1042
+ previous_step_outputs: Dict[str, StepOutput] = {}
1043
+
1044
+ shared_images: List[ImageArtifact] = execution_input.images or []
1045
+ output_images: List[ImageArtifact] = (execution_input.images or []).copy() # Start with input images
1046
+ shared_videos: List[VideoArtifact] = execution_input.videos or []
1047
+ output_videos: List[VideoArtifact] = (execution_input.videos or []).copy() # Start with input videos
1048
+ shared_audio: List[AudioArtifact] = execution_input.audio or []
1049
+ output_audio: List[AudioArtifact] = (execution_input.audio or []).copy() # Start with input audio
1050
+ shared_files: List[File] = execution_input.files or []
1051
+ output_files: List[File] = (execution_input.files or []).copy() # Start with input files
1052
+
1053
+ early_termination = False
1054
+
1055
+ for i, step in enumerate(self.steps): # type: ignore[arg-type]
1056
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1057
+ step_name = getattr(step, "name", f"step_{i + 1}")
1058
+ log_debug(f"Streaming step {i + 1}/{self._get_step_count()}: {step_name}")
1059
+
1060
+ # Create enhanced StepInput
1061
+ step_input = self._create_step_input(
1062
+ execution_input=execution_input,
1063
+ previous_step_outputs=previous_step_outputs,
1064
+ shared_images=shared_images,
1065
+ shared_videos=shared_videos,
1066
+ shared_audio=shared_audio,
1067
+ shared_files=shared_files,
1068
+ )
1069
+
1070
+ # Execute step with streaming and yield all events
1071
+ for event in step.execute_stream( # type: ignore[union-attr]
1072
+ step_input,
1073
+ session_id=session.session_id,
1074
+ user_id=self.user_id,
1075
+ stream_intermediate_steps=stream_intermediate_steps,
1076
+ workflow_run_response=workflow_run_response,
1077
+ session_state=session_state,
1078
+ step_index=i,
1079
+ store_executor_outputs=self.store_executor_outputs,
1080
+ ):
1081
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1082
+ # Handle events
1083
+ if isinstance(event, StepOutput):
1084
+ step_output = event
1085
+ collected_step_outputs.append(step_output)
1086
+
1087
+ # Update the workflow-level previous_step_outputs dictionary
1088
+ previous_step_outputs[step_name] = step_output
1089
+
1090
+ # Transform StepOutput to StepOutputEvent for consistent streaming interface
1091
+ step_output_event = self._transform_step_output_to_event(
1092
+ step_output, workflow_run_response, step_index=i
1093
+ )
1094
+
1095
+ if step_output.stop:
1096
+ logger.info(f"Early termination requested by step {step_name}")
1097
+ # Update shared media for next step
1098
+ shared_images.extend(step_output.images or [])
1099
+ shared_videos.extend(step_output.videos or [])
1100
+ shared_audio.extend(step_output.audio or [])
1101
+ shared_files.extend(step_output.files or [])
1102
+ output_images.extend(step_output.images or [])
1103
+ output_videos.extend(step_output.videos or [])
1104
+ output_audio.extend(step_output.audio or [])
1105
+ output_files.extend(step_output.files or [])
1106
+
1107
+ # Only yield StepOutputEvent for function executors, not for agents/teams
1108
+ if getattr(step, "executor_type", None) == "function":
1109
+ yield step_output_event
1110
+
1111
+ # Break out of the step loop
1112
+ early_termination = True
1113
+ break
1114
+
1115
+ # Update shared media for next step
1116
+ shared_images.extend(step_output.images or [])
1117
+ shared_videos.extend(step_output.videos or [])
1118
+ shared_audio.extend(step_output.audio or [])
1119
+ shared_files.extend(step_output.files or [])
1120
+ output_images.extend(step_output.images or [])
1121
+ output_videos.extend(step_output.videos or [])
1122
+ output_audio.extend(step_output.audio or [])
1123
+ output_files.extend(step_output.files or [])
1124
+
1125
+ # Only yield StepOutputEvent for generator functions, not for agents/teams
1126
+ if getattr(step, "executor_type", None) == "function":
1127
+ yield step_output_event
1128
+
1129
+ elif isinstance(event, WorkflowRunOutputEvent): # type: ignore
1130
+ yield self._handle_event(event, workflow_run_response) # type: ignore
1131
+
1132
+ else:
1133
+ # Yield other internal events
1134
+ yield self._handle_event(event, workflow_run_response) # type: ignore
1135
+
1136
+ # Break out of main step loop if early termination was requested
1137
+ if "early_termination" in locals() and early_termination:
1138
+ break
1139
+
1140
+ # Update the workflow_run_response with completion data
1141
+ if collected_step_outputs:
1142
+ workflow_run_response.metrics = self._aggregate_workflow_metrics(collected_step_outputs)
1143
+ last_output = cast(StepOutput, collected_step_outputs[-1])
1144
+
1145
+ # Use deepest nested content if this is a container (Steps/Router/Loop/etc.)
1146
+ if getattr(last_output, "steps", None):
1147
+ _cur = last_output
1148
+ while getattr(_cur, "steps", None):
1149
+ _steps = _cur.steps or []
1150
+ if not _steps:
1151
+ break
1152
+ _cur = _steps[-1]
1153
+ workflow_run_response.content = _cur.content
1154
+ else:
1155
+ workflow_run_response.content = last_output.content
1156
+ else:
1157
+ workflow_run_response.content = "No steps executed"
1158
+
1159
+ workflow_run_response.step_results = collected_step_outputs
1160
+ workflow_run_response.images = output_images
1161
+ workflow_run_response.videos = output_videos
1162
+ workflow_run_response.audio = output_audio
1163
+ workflow_run_response.status = RunStatus.completed
1164
+
1165
+ except RunCancelledException as e:
1166
+ # Handle run cancellation during streaming
1167
+ logger.info(f"Workflow run {workflow_run_response.run_id} was cancelled during streaming")
1168
+ workflow_run_response.status = RunStatus.cancelled
1169
+ workflow_run_response.content = str(e)
1170
+ cancelled_event = WorkflowCancelledEvent(
1171
+ run_id=workflow_run_response.run_id or "",
1172
+ workflow_id=self.id,
1173
+ workflow_name=self.name,
1174
+ session_id=session.session_id,
1175
+ reason=str(e),
1176
+ )
1177
+ yield self._handle_event(cancelled_event, workflow_run_response)
1178
+ except Exception as e:
1179
+ logger.error(f"Workflow execution failed: {e}")
1180
+
1181
+ from agno.run.workflow import WorkflowErrorEvent
1182
+
1183
+ error_event = WorkflowErrorEvent(
1184
+ run_id=workflow_run_response.run_id or "",
1185
+ workflow_id=self.id,
1186
+ workflow_name=self.name,
1187
+ session_id=session.session_id,
1188
+ error=str(e),
1189
+ )
1190
+
1191
+ yield error_event
1192
+
1193
+ # Update workflow_run_response with error
1194
+ workflow_run_response.content = error_event.error
1195
+ workflow_run_response.status = RunStatus.error
1196
+
1197
+ # Yield workflow completed event
1198
+ workflow_completed_event = WorkflowCompletedEvent(
1199
+ run_id=workflow_run_response.run_id or "",
1200
+ content=workflow_run_response.content,
1201
+ workflow_name=workflow_run_response.workflow_name,
1202
+ workflow_id=workflow_run_response.workflow_id,
1203
+ session_id=workflow_run_response.session_id,
1204
+ step_results=workflow_run_response.step_results, # type: ignore
1205
+ metadata=workflow_run_response.metadata,
1206
+ )
1207
+ yield self._handle_event(workflow_completed_event, workflow_run_response)
1208
+
1209
+ # Store the completed workflow response
1210
+ self._update_session_metrics(session=session, workflow_run_response=workflow_run_response)
1211
+ session.upsert_run(run=workflow_run_response)
1212
+ self.save_session(session=session)
1213
+
1214
+ # Always clean up the run tracking
1215
+ cleanup_run(workflow_run_response.run_id) # type: ignore
1216
+
1217
+ # Log Workflow Telemetry
1218
+ if self.telemetry:
1219
+ self._log_workflow_telemetry(session_id=session.session_id, run_id=workflow_run_response.run_id)
1220
+
1221
+ async def _acall_custom_function(
1222
+ self, func: Callable, execution_input: WorkflowExecutionInput, **kwargs: Any
1223
+ ) -> Any:
1224
+ """Call custom function with only the parameters it expects - handles both async functions and async generators"""
1225
+ from inspect import isasyncgenfunction, signature
1226
+
1227
+ sig = signature(func)
1228
+
1229
+ # Build arguments based on what the function actually accepts
1230
+ call_kwargs: Dict[str, Any] = {}
1231
+
1232
+ # Only add workflow and execution_input if the function expects them
1233
+ if "workflow" in sig.parameters: # type: ignore
1234
+ call_kwargs["workflow"] = self
1235
+ if "execution_input" in sig.parameters:
1236
+ call_kwargs["execution_input"] = execution_input # type: ignore
1237
+ if "session_state" in sig.parameters:
1238
+ call_kwargs["session_state"] = self.session_state # type: ignore
1239
+
1240
+ # Add any other kwargs that the function expects
1241
+ for param_name in kwargs:
1242
+ if param_name in sig.parameters: # type: ignore
1243
+ call_kwargs[param_name] = kwargs[param_name]
1244
+
1245
+ # If function has **kwargs parameter, pass all remaining kwargs
1246
+ for param in sig.parameters.values(): # type: ignore
1247
+ if param.kind == param.VAR_KEYWORD:
1248
+ call_kwargs.update(kwargs)
1249
+ break
1250
+
1251
+ try:
1252
+ # Check if it's an async generator function
1253
+ if isasyncgenfunction(func):
1254
+ # For async generators, call the function and return the async generator directly
1255
+ return func(**call_kwargs) # type: ignore
1256
+ else:
1257
+ # For regular async functions, await the result
1258
+ return await func(**call_kwargs) # type: ignore
1259
+ except TypeError as e:
1260
+ # If signature inspection fails, fall back to original method
1261
+ logger.warning(
1262
+ f"Async function signature inspection failed: {e}. Falling back to original calling convention."
470
1263
  )
471
- # Important: Replace the instance's run method with run_workflow
472
- # This is so we call run_workflow() instead of the subclass's run()
473
- if run_type == "sync":
474
- object.__setattr__(self, "run", self.run_workflow.__get__(self))
475
- elif run_type == "coroutine":
476
- object.__setattr__(self, "arun", self.arun_workflow.__get__(self))
477
- elif run_type == "async_generator":
478
- object.__setattr__(self, "arun", self.arun_workflow_generator.__get__(self))
1264
+ if isasyncgenfunction(func):
1265
+ # For async generators, use the same signature inspection logic in fallback
1266
+ return func(**call_kwargs) # type: ignore
1267
+ else:
1268
+ # For regular async functions, use the same signature inspection logic in fallback
1269
+ return await func(**call_kwargs) # type: ignore
1270
+
1271
+ async def _aexecute(
1272
+ self,
1273
+ session: WorkflowSession,
1274
+ execution_input: WorkflowExecutionInput,
1275
+ workflow_run_response: WorkflowRunOutput,
1276
+ session_state: Optional[Dict[str, Any]] = None,
1277
+ **kwargs: Any,
1278
+ ) -> WorkflowRunOutput:
1279
+ """Execute a specific pipeline by name asynchronously"""
1280
+ from inspect import isasyncgenfunction, iscoroutinefunction, isgeneratorfunction
1281
+
1282
+ workflow_run_response.status = RunStatus.running
1283
+
1284
+ # Register run for cancellation tracking
1285
+ register_run(workflow_run_response.run_id) # type: ignore
1286
+
1287
+ if callable(self.steps):
1288
+ # Execute the workflow with the custom executor
1289
+ content = ""
1290
+
1291
+ if iscoroutinefunction(self.steps): # type: ignore
1292
+ workflow_run_response.content = await self._acall_custom_function(self.steps, execution_input, **kwargs)
1293
+ elif isgeneratorfunction(self.steps):
1294
+ for chunk in self.steps(self, execution_input, **kwargs): # type: ignore[arg-type]
1295
+ if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
1296
+ content += chunk.content
1297
+ else:
1298
+ content += str(chunk)
1299
+ workflow_run_response.content = content
1300
+ elif isasyncgenfunction(self.steps): # type: ignore
1301
+ async_gen = await self._acall_custom_function(self.steps, execution_input, **kwargs)
1302
+ async for chunk in async_gen:
1303
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1304
+ if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
1305
+ content += chunk.content
1306
+ else:
1307
+ content += str(chunk)
1308
+ workflow_run_response.content = content
1309
+ else:
1310
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1311
+ workflow_run_response.content = self._call_custom_function(self.steps, execution_input, **kwargs)
1312
+ workflow_run_response.status = RunStatus.completed
1313
+
479
1314
  else:
480
- # If the subclass does not override the run method,
481
- # the Workflow.run() method will be called and will log an error
482
- self._subclass_run = self.run
483
- self._subclass_arun = self.arun
484
-
485
- self._run_parameters = {}
486
- self._run_return_type = None
487
-
488
- def update_agent_session_ids(self):
489
- # Update the session_id for all Agent instances
490
- # use dataclasses.fields() to iterate through fields
491
- for f in fields(self):
492
- field_type = f.type
493
- if isinstance(field_type, Agent):
494
- field_value = getattr(self, f.name)
495
- field_value.session_id = self.session_id
496
-
497
- def get_workflow_data(self) -> Dict[str, Any]:
498
- workflow_data: Dict[str, Any] = {}
499
- if self.name is not None:
500
- workflow_data["name"] = self.name
501
- if self.workflow_id is not None:
502
- workflow_data["workflow_id"] = self.workflow_id
503
- if self.description is not None:
504
- workflow_data["description"] = self.description
505
- return workflow_data
1315
+ try:
1316
+ # Track outputs from each step for enhanced data flow
1317
+ collected_step_outputs: List[Union[StepOutput, List[StepOutput]]] = []
1318
+ previous_step_outputs: Dict[str, StepOutput] = {}
1319
+
1320
+ shared_images: List[ImageArtifact] = execution_input.images or []
1321
+ output_images: List[ImageArtifact] = (execution_input.images or []).copy() # Start with input images
1322
+ shared_videos: List[VideoArtifact] = execution_input.videos or []
1323
+ output_videos: List[VideoArtifact] = (execution_input.videos or []).copy() # Start with input videos
1324
+ shared_audio: List[AudioArtifact] = execution_input.audio or []
1325
+ output_audio: List[AudioArtifact] = (execution_input.audio or []).copy() # Start with input audio
1326
+ shared_files: List[File] = execution_input.files or []
1327
+ output_files: List[File] = (execution_input.files or []).copy() # Start with input files
1328
+
1329
+ for i, step in enumerate(self.steps): # type: ignore[arg-type]
1330
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1331
+ step_name = getattr(step, "name", f"step_{i + 1}")
1332
+ log_debug(f"Async Executing step {i + 1}/{self._get_step_count()}: {step_name}")
1333
+
1334
+ # Create enhanced StepInput
1335
+ step_input = self._create_step_input(
1336
+ execution_input=execution_input,
1337
+ previous_step_outputs=previous_step_outputs,
1338
+ shared_images=shared_images,
1339
+ shared_videos=shared_videos,
1340
+ shared_audio=shared_audio,
1341
+ shared_files=shared_files,
1342
+ )
1343
+
1344
+ # Check for cancellation before executing step
1345
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1346
+
1347
+ step_output = await step.aexecute( # type: ignore[union-attr]
1348
+ step_input,
1349
+ session_id=session.session_id,
1350
+ user_id=self.user_id,
1351
+ workflow_run_response=workflow_run_response,
1352
+ session_state=session_state,
1353
+ store_executor_outputs=self.store_executor_outputs,
1354
+ )
1355
+
1356
+ # Check for cancellation after step execution
1357
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1358
+
1359
+ # Update the workflow-level previous_step_outputs dictionary
1360
+ previous_step_outputs[step_name] = step_output
1361
+ if step_output.stop:
1362
+ logger.info(f"Early termination requested by step {step_name}")
1363
+ break
1364
+
1365
+ # Update shared media for next step
1366
+ shared_images.extend(step_output.images or [])
1367
+ shared_videos.extend(step_output.videos or [])
1368
+ shared_audio.extend(step_output.audio or [])
1369
+ shared_files.extend(step_output.files or [])
1370
+ output_images.extend(step_output.images or [])
1371
+ output_videos.extend(step_output.videos or [])
1372
+ output_audio.extend(step_output.audio or [])
1373
+ output_files.extend(step_output.files or [])
1374
+
1375
+ collected_step_outputs.append(step_output)
1376
+
1377
+ # Update the workflow_run_response with completion data
1378
+ if collected_step_outputs:
1379
+ workflow_run_response.metrics = self._aggregate_workflow_metrics(collected_step_outputs)
1380
+ last_output = cast(StepOutput, collected_step_outputs[-1])
1381
+
1382
+ # Use deepest nested content if this is a container (Steps/Router/Loop/etc.)
1383
+ if getattr(last_output, "steps", None):
1384
+ _cur = last_output
1385
+ while getattr(_cur, "steps", None):
1386
+ _steps = _cur.steps or []
1387
+ if not _steps:
1388
+ break
1389
+ _cur = _steps[-1]
1390
+ workflow_run_response.content = _cur.content
1391
+ else:
1392
+ workflow_run_response.content = last_output.content
1393
+ else:
1394
+ workflow_run_response.content = "No steps executed"
1395
+
1396
+ workflow_run_response.step_results = collected_step_outputs
1397
+ workflow_run_response.images = output_images
1398
+ workflow_run_response.videos = output_videos
1399
+ workflow_run_response.audio = output_audio
1400
+ workflow_run_response.status = RunStatus.completed
1401
+
1402
+ except RunCancelledException as e:
1403
+ logger.info(f"Workflow run {workflow_run_response.run_id} was cancelled")
1404
+ workflow_run_response.status = RunStatus.cancelled
1405
+ workflow_run_response.content = str(e)
1406
+ except Exception as e:
1407
+ logger.error(f"Workflow execution failed: {e}")
1408
+ workflow_run_response.status = RunStatus.error
1409
+ workflow_run_response.content = f"Workflow execution failed: {e}"
1410
+
1411
+ self._update_session_metrics(session=session, workflow_run_response=workflow_run_response)
1412
+ session.upsert_run(run=workflow_run_response)
1413
+ self.save_session(session=session)
1414
+ # Always clean up the run tracking
1415
+ cleanup_run(workflow_run_response.run_id) # type: ignore
506
1416
 
507
- def get_session_data(self) -> Dict[str, Any]:
508
- session_data: Dict[str, Any] = {}
509
- if self.session_name is not None:
510
- session_data["session_name"] = self.session_name
511
- if self.session_state and len(self.session_state) > 0:
512
- session_data["session_state"] = nested_model_dump(self.session_state)
513
- if self.images is not None:
514
- session_data["images"] = [img.model_dump() for img in self.images]
515
- if self.videos is not None:
516
- session_data["videos"] = [vid.model_dump() for vid in self.videos]
517
- if self.audio is not None:
518
- session_data["audio"] = [aud.model_dump() for aud in self.audio]
519
- return session_data
520
-
521
- def get_workflow_session(self) -> WorkflowSession:
522
- """Get a WorkflowSession object, which can be saved to the database"""
523
- self.memory = cast(WorkflowMemory, self.memory)
524
- self.session_id = cast(str, self.session_id)
525
- self.workflow_id = cast(str, self.workflow_id)
526
- if self.memory is not None:
527
- if isinstance(self.memory, WorkflowMemory):
528
- self.memory = cast(WorkflowMemory, self.memory)
529
- memory_dict = self.memory.to_dict()
530
- # We only persist the runs for the current session ID (not all runs in memory)
531
- memory_dict["runs"] = [
532
- agent_run.model_dump()
533
- for agent_run in self.memory.runs
534
- if agent_run.response is not None and agent_run.response.session_id == self.session_id
535
- ]
1417
+ # Log Workflow Telemetry
1418
+ if self.telemetry:
1419
+ await self._alog_workflow_telemetry(session_id=session.session_id, run_id=workflow_run_response.run_id)
1420
+
1421
+ return workflow_run_response
1422
+
1423
+ async def _aexecute_stream(
1424
+ self,
1425
+ session: WorkflowSession,
1426
+ execution_input: WorkflowExecutionInput,
1427
+ workflow_run_response: WorkflowRunOutput,
1428
+ session_state: Optional[Dict[str, Any]] = None,
1429
+ stream_intermediate_steps: bool = False,
1430
+ websocket_handler: Optional[WebSocketHandler] = None,
1431
+ **kwargs: Any,
1432
+ ) -> AsyncIterator[WorkflowRunOutputEvent]:
1433
+ """Execute a specific pipeline by name with event streaming"""
1434
+ from inspect import isasyncgenfunction, iscoroutinefunction, isgeneratorfunction
1435
+
1436
+ workflow_run_response.status = RunStatus.running
1437
+ workflow_started_event = WorkflowStartedEvent(
1438
+ run_id=workflow_run_response.run_id or "",
1439
+ workflow_name=workflow_run_response.workflow_name,
1440
+ workflow_id=workflow_run_response.workflow_id,
1441
+ session_id=workflow_run_response.session_id,
1442
+ )
1443
+ yield self._handle_event(workflow_started_event, workflow_run_response, websocket_handler=websocket_handler)
1444
+
1445
+ if callable(self.steps):
1446
+ if iscoroutinefunction(self.steps): # type: ignore
1447
+ workflow_run_response.content = await self._acall_custom_function(self.steps, execution_input, **kwargs)
1448
+ elif isgeneratorfunction(self.steps):
1449
+ content = ""
1450
+ for chunk in self.steps(self, execution_input, **kwargs): # type: ignore[arg-type]
1451
+ if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
1452
+ content += chunk.content
1453
+ yield chunk
1454
+ else:
1455
+ content += str(chunk)
1456
+ workflow_run_response.content = content
1457
+ elif isasyncgenfunction(self.steps): # type: ignore
1458
+ content = ""
1459
+ async_gen = await self._acall_custom_function(self.steps, execution_input, **kwargs)
1460
+ async for chunk in async_gen:
1461
+ raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1462
+ if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
1463
+ content += chunk.content
1464
+ yield chunk
1465
+ else:
1466
+ content += str(chunk)
1467
+ workflow_run_response.content = content
536
1468
  else:
537
- self.memory = cast(Memory, self.memory)
538
- # We fake the structure on storage, to maintain the interface with the legacy implementation
539
- run_responses = self.memory.runs[self.session_id] # type: ignore
540
- memory_dict = self.memory.to_dict()
541
- memory_dict["runs"] = [rr.to_dict() for rr in run_responses]
1469
+ workflow_run_response.content = self.steps(self, execution_input, **kwargs)
1470
+ workflow_run_response.status = RunStatus.completed
1471
+
542
1472
  else:
543
- memory_dict = None
544
- return WorkflowSession(
545
- session_id=self.session_id,
546
- workflow_id=self.workflow_id,
547
- user_id=self.user_id,
548
- memory=memory_dict,
549
- workflow_data=self.get_workflow_data(),
550
- session_data=self.get_session_data(),
551
- extra_data=self.extra_data,
1473
+ try:
1474
+ # Track outputs from each step for enhanced data flow
1475
+ collected_step_outputs: List[Union[StepOutput, List[StepOutput]]] = []
1476
+ previous_step_outputs: Dict[str, StepOutput] = {}
1477
+
1478
+ shared_images: List[ImageArtifact] = execution_input.images or []
1479
+ output_images: List[ImageArtifact] = (execution_input.images or []).copy() # Start with input images
1480
+ shared_videos: List[VideoArtifact] = execution_input.videos or []
1481
+ output_videos: List[VideoArtifact] = (execution_input.videos or []).copy() # Start with input videos
1482
+ shared_audio: List[AudioArtifact] = execution_input.audio or []
1483
+ output_audio: List[AudioArtifact] = (execution_input.audio or []).copy() # Start with input audio
1484
+ shared_files: List[File] = execution_input.files or []
1485
+ output_files: List[File] = (execution_input.files or []).copy() # Start with input files
1486
+
1487
+ early_termination = False
1488
+
1489
+ for i, step in enumerate(self.steps): # type: ignore[arg-type]
1490
+ if workflow_run_response.run_id:
1491
+ raise_if_cancelled(workflow_run_response.run_id)
1492
+ step_name = getattr(step, "name", f"step_{i + 1}")
1493
+ log_debug(f"Async streaming step {i + 1}/{self._get_step_count()}: {step_name}")
1494
+
1495
+ # Create enhanced StepInput
1496
+ step_input = self._create_step_input(
1497
+ execution_input=execution_input,
1498
+ previous_step_outputs=previous_step_outputs,
1499
+ shared_images=shared_images,
1500
+ shared_videos=shared_videos,
1501
+ shared_audio=shared_audio,
1502
+ shared_files=shared_files,
1503
+ )
1504
+
1505
+ # Execute step with streaming and yield all events
1506
+ async for event in step.aexecute_stream( # type: ignore[union-attr]
1507
+ step_input,
1508
+ session_id=session.session_id,
1509
+ user_id=self.user_id,
1510
+ stream_intermediate_steps=stream_intermediate_steps,
1511
+ workflow_run_response=workflow_run_response,
1512
+ session_state=session_state,
1513
+ step_index=i,
1514
+ store_executor_outputs=self.store_executor_outputs,
1515
+ ):
1516
+ if workflow_run_response.run_id:
1517
+ raise_if_cancelled(workflow_run_response.run_id)
1518
+ if isinstance(event, StepOutput):
1519
+ step_output = event
1520
+ collected_step_outputs.append(step_output)
1521
+
1522
+ # Update the workflow-level previous_step_outputs dictionary
1523
+ previous_step_outputs[step_name] = step_output
1524
+
1525
+ # Transform StepOutput to StepOutputEvent for consistent streaming interface
1526
+ step_output_event = self._transform_step_output_to_event(
1527
+ step_output, workflow_run_response, step_index=i
1528
+ )
1529
+
1530
+ if step_output.stop:
1531
+ logger.info(f"Early termination requested by step {step_name}")
1532
+ # Update shared media for next step
1533
+ shared_images.extend(step_output.images or [])
1534
+ shared_videos.extend(step_output.videos or [])
1535
+ shared_audio.extend(step_output.audio or [])
1536
+ shared_files.extend(step_output.files or [])
1537
+ output_images.extend(step_output.images or [])
1538
+ output_videos.extend(step_output.videos or [])
1539
+ output_audio.extend(step_output.audio or [])
1540
+ output_files.extend(step_output.files or [])
1541
+
1542
+ if getattr(step, "executor_type", None) == "function":
1543
+ yield step_output_event
1544
+
1545
+ # Break out of the step loop
1546
+ early_termination = True
1547
+ break
1548
+
1549
+ # Update shared media for next step
1550
+ shared_images.extend(step_output.images or [])
1551
+ shared_videos.extend(step_output.videos or [])
1552
+ shared_audio.extend(step_output.audio or [])
1553
+ shared_files.extend(step_output.files or [])
1554
+ output_images.extend(step_output.images or [])
1555
+ output_videos.extend(step_output.videos or [])
1556
+ output_audio.extend(step_output.audio or [])
1557
+ output_files.extend(step_output.files or [])
1558
+
1559
+ # Only yield StepOutputEvent for generator functions, not for agents/teams
1560
+ if getattr(step, "executor_type", None) == "function":
1561
+ yield step_output_event
1562
+
1563
+ elif isinstance(event, WorkflowRunOutputEvent): # type: ignore
1564
+ yield self._handle_event(event, workflow_run_response, websocket_handler=websocket_handler) # type: ignore
1565
+
1566
+ else:
1567
+ # Yield other internal events
1568
+ yield self._handle_event(event, workflow_run_response, websocket_handler=websocket_handler) # type: ignore
1569
+
1570
+ # Break out of main step loop if early termination was requested
1571
+ if "early_termination" in locals() and early_termination:
1572
+ break
1573
+
1574
+ # Update the workflow_run_response with completion data
1575
+ if collected_step_outputs:
1576
+ workflow_run_response.metrics = self._aggregate_workflow_metrics(collected_step_outputs)
1577
+ last_output = cast(StepOutput, collected_step_outputs[-1])
1578
+
1579
+ # Use deepest nested content if this is a container (Steps/Router/Loop/etc.)
1580
+ if getattr(last_output, "steps", None):
1581
+ _cur = last_output
1582
+ while getattr(_cur, "steps", None):
1583
+ _steps = _cur.steps or []
1584
+ if not _steps:
1585
+ break
1586
+ _cur = _steps[-1]
1587
+ workflow_run_response.content = _cur.content
1588
+ else:
1589
+ workflow_run_response.content = last_output.content
1590
+ else:
1591
+ workflow_run_response.content = "No steps executed"
1592
+
1593
+ workflow_run_response.step_results = collected_step_outputs
1594
+ workflow_run_response.images = output_images
1595
+ workflow_run_response.videos = output_videos
1596
+ workflow_run_response.audio = output_audio
1597
+ workflow_run_response.status = RunStatus.completed
1598
+
1599
+ except RunCancelledException as e:
1600
+ # Handle run cancellation during streaming
1601
+ logger.info(f"Workflow run {workflow_run_response.run_id} was cancelled during streaming")
1602
+ workflow_run_response.status = RunStatus.cancelled
1603
+ workflow_run_response.content = str(e)
1604
+ cancelled_event = WorkflowCancelledEvent(
1605
+ run_id=workflow_run_response.run_id or "",
1606
+ workflow_id=self.id,
1607
+ workflow_name=self.name,
1608
+ session_id=session.session_id,
1609
+ reason=str(e),
1610
+ )
1611
+ yield self._handle_event(
1612
+ cancelled_event,
1613
+ workflow_run_response,
1614
+ websocket_handler=websocket_handler,
1615
+ )
1616
+ except Exception as e:
1617
+ logger.error(f"Workflow execution failed: {e}")
1618
+
1619
+ from agno.run.workflow import WorkflowErrorEvent
1620
+
1621
+ error_event = WorkflowErrorEvent(
1622
+ run_id=workflow_run_response.run_id or "",
1623
+ workflow_id=self.id,
1624
+ workflow_name=self.name,
1625
+ session_id=session.session_id,
1626
+ error=str(e),
1627
+ )
1628
+
1629
+ yield error_event
1630
+
1631
+ # Update workflow_run_response with error
1632
+ workflow_run_response.content = error_event.error
1633
+ workflow_run_response.status = RunStatus.error
1634
+
1635
+ # Yield workflow completed event
1636
+ workflow_completed_event = WorkflowCompletedEvent(
1637
+ run_id=workflow_run_response.run_id or "",
1638
+ content=workflow_run_response.content,
1639
+ workflow_name=workflow_run_response.workflow_name,
1640
+ workflow_id=workflow_run_response.workflow_id,
1641
+ session_id=workflow_run_response.session_id,
1642
+ step_results=workflow_run_response.step_results, # type: ignore[arg-type]
1643
+ metadata=workflow_run_response.metadata,
552
1644
  )
1645
+ yield self._handle_event(workflow_completed_event, workflow_run_response, websocket_handler=websocket_handler)
553
1646
 
554
- def load_workflow_session(self, session: WorkflowSession):
555
- """Load the existing Workflow from a WorkflowSession (from the database)"""
1647
+ # Store the completed workflow response
1648
+ self._update_session_metrics(session=session, workflow_run_response=workflow_run_response)
1649
+ session.upsert_run(run=workflow_run_response)
1650
+ self.save_session(session=session)
1651
+
1652
+ # Log Workflow Telemetry
1653
+ if self.telemetry:
1654
+ await self._alog_workflow_telemetry(session_id=session.session_id, run_id=workflow_run_response.run_id)
1655
+
1656
+ # Always clean up the run tracking
1657
+ cleanup_run(workflow_run_response.run_id) # type: ignore
1658
+
1659
+ async def _arun_background(
1660
+ self,
1661
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]] = None,
1662
+ additional_data: Optional[Dict[str, Any]] = None,
1663
+ user_id: Optional[str] = None,
1664
+ session_id: Optional[str] = None,
1665
+ session_state: Optional[Dict[str, Any]] = None,
1666
+ audio: Optional[List[Audio]] = None,
1667
+ images: Optional[List[Image]] = None,
1668
+ videos: Optional[List[Video]] = None,
1669
+ files: Optional[List[File]] = None,
1670
+ **kwargs: Any,
1671
+ ) -> WorkflowRunOutput:
1672
+ """Execute workflow in background using asyncio.create_task()"""
1673
+
1674
+ run_id = str(uuid4())
1675
+
1676
+ self.initialize_workflow()
1677
+
1678
+ session_id, user_id, session_state = self._initialize_session(
1679
+ session_id=session_id, user_id=user_id, session_state=session_state, run_id=run_id
1680
+ )
1681
+
1682
+ # Read existing session from database
1683
+ workflow_session = self.read_or_create_session(session_id=session_id, user_id=user_id)
1684
+ self._update_metadata(session=workflow_session)
1685
+
1686
+ # Update session state from DB
1687
+ session_state = self._update_session_state(session=workflow_session, session_state=session_state)
1688
+
1689
+ self._prepare_steps()
1690
+
1691
+ # Create workflow run response with PENDING status
1692
+ workflow_run_response = WorkflowRunOutput(
1693
+ run_id=run_id,
1694
+ session_id=session_id,
1695
+ workflow_id=self.id,
1696
+ workflow_name=self.name,
1697
+ created_at=int(datetime.now().timestamp()),
1698
+ status=RunStatus.pending,
1699
+ )
1700
+
1701
+ # Store PENDING response immediately
1702
+ workflow_session.upsert_run(run=workflow_run_response)
1703
+ self.save_session(session=workflow_session)
1704
+
1705
+ # Prepare execution input
1706
+ inputs = WorkflowExecutionInput(
1707
+ input=input,
1708
+ additional_data=additional_data,
1709
+ audio=audio, # type: ignore
1710
+ images=images, # type: ignore
1711
+ videos=videos, # type: ignore
1712
+ files=files, # type: ignore
1713
+ )
1714
+
1715
+ self.update_agents_and_teams_session_info()
1716
+
1717
+ async def execute_workflow_background():
1718
+ """Simple background execution"""
1719
+ try:
1720
+ # Update status to RUNNING and save
1721
+ workflow_run_response.status = RunStatus.running
1722
+ self.save_session(session=workflow_session)
1723
+
1724
+ await self._aexecute(
1725
+ session=workflow_session,
1726
+ execution_input=inputs,
1727
+ workflow_run_response=workflow_run_response,
1728
+ session_state=session_state,
1729
+ **kwargs,
1730
+ )
1731
+
1732
+ log_debug(f"Background execution completed with status: {workflow_run_response.status}")
1733
+
1734
+ except Exception as e:
1735
+ logger.error(f"Background workflow execution failed: {e}")
1736
+ workflow_run_response.status = RunStatus.error
1737
+ workflow_run_response.content = f"Background execution failed: {str(e)}"
1738
+ self.save_session(session=workflow_session)
1739
+
1740
+ # Create and start asyncio task
1741
+ loop = asyncio.get_running_loop()
1742
+ loop.create_task(execute_workflow_background())
1743
+
1744
+ # Return SAME object that will be updated by background execution
1745
+ return workflow_run_response
1746
+
1747
+ async def _arun_background_stream(
1748
+ self,
1749
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]] = None,
1750
+ additional_data: Optional[Dict[str, Any]] = None,
1751
+ user_id: Optional[str] = None,
1752
+ session_id: Optional[str] = None,
1753
+ session_state: Optional[Dict[str, Any]] = None,
1754
+ audio: Optional[List[Audio]] = None,
1755
+ images: Optional[List[Image]] = None,
1756
+ videos: Optional[List[Video]] = None,
1757
+ files: Optional[List[File]] = None,
1758
+ stream_intermediate_steps: bool = False,
1759
+ websocket_handler: Optional[WebSocketHandler] = None,
1760
+ **kwargs: Any,
1761
+ ) -> WorkflowRunOutput:
1762
+ """Execute workflow in background with streaming and WebSocket broadcasting"""
1763
+
1764
+ run_id = str(uuid4())
1765
+
1766
+ self.initialize_workflow()
1767
+
1768
+ session_id, user_id, session_state = self._initialize_session(
1769
+ session_id=session_id, user_id=user_id, session_state=session_state, run_id=run_id
1770
+ )
1771
+
1772
+ # Read existing session from database
1773
+ workflow_session = self.read_or_create_session(session_id=session_id, user_id=user_id)
1774
+ self._update_metadata(session=workflow_session)
1775
+
1776
+ # Update session state from DB
1777
+ session_state = self._update_session_state(session=workflow_session, session_state=session_state)
1778
+
1779
+ self._prepare_steps()
556
1780
 
557
- # Get the workflow_id, user_id and session_id from the database
558
- if self.workflow_id is None and session.workflow_id is not None:
559
- self.workflow_id = session.workflow_id
560
- if self.user_id is None and session.user_id is not None:
561
- self.user_id = session.user_id
562
- if self.session_id is None and session.session_id is not None:
563
- self.session_id = session.session_id
564
-
565
- # Read workflow_data from the database
566
- if session.workflow_data is not None:
567
- # Get name from database and update the workflow name if not set
568
- if self.name is None and "name" in session.workflow_data:
569
- self.name = session.workflow_data.get("name")
570
-
571
- # Read session_data from the database
572
- if session.session_data is not None:
573
- # Get the session_name from database and update the current session_name if not set
574
- if self.session_name is None and "session_name" in session.session_data:
575
- self.session_name = session.session_data.get("session_name")
576
-
577
- # Get the session_state from database and update the current session_state
578
- if "session_state" in session.session_data:
579
- session_state_from_db = session.session_data.get("session_state")
580
- if (
581
- session_state_from_db is not None
582
- and isinstance(session_state_from_db, dict)
583
- and len(session_state_from_db) > 0
1781
+ # Create workflow run response with PENDING status
1782
+ workflow_run_response = WorkflowRunOutput(
1783
+ run_id=run_id,
1784
+ session_id=session_id,
1785
+ workflow_id=self.id,
1786
+ workflow_name=self.name,
1787
+ created_at=int(datetime.now().timestamp()),
1788
+ status=RunStatus.pending,
1789
+ )
1790
+
1791
+ # Store PENDING response immediately
1792
+ workflow_session.upsert_run(run=workflow_run_response)
1793
+ self.save_session(session=workflow_session)
1794
+
1795
+ # Prepare execution input
1796
+ inputs = WorkflowExecutionInput(
1797
+ input=input,
1798
+ additional_data=additional_data,
1799
+ audio=audio, # type: ignore
1800
+ images=images, # type: ignore
1801
+ videos=videos, # type: ignore
1802
+ files=files, # type: ignore
1803
+ )
1804
+
1805
+ self.update_agents_and_teams_session_info()
1806
+
1807
+ async def execute_workflow_background_stream():
1808
+ """Background execution with streaming and WebSocket broadcasting"""
1809
+ try:
1810
+ # Update status to RUNNING and save
1811
+ workflow_run_response.status = RunStatus.running
1812
+ self.save_session(session=workflow_session)
1813
+
1814
+ # Execute with streaming - consume all events (they're auto-broadcast via _handle_event)
1815
+ async for event in self._aexecute_stream(
1816
+ execution_input=inputs,
1817
+ session=workflow_session,
1818
+ workflow_run_response=workflow_run_response,
1819
+ stream_intermediate_steps=stream_intermediate_steps,
1820
+ session_state=session_state,
1821
+ websocket_handler=websocket_handler,
1822
+ **kwargs,
584
1823
  ):
585
- # If the session_state is already set, merge the session_state from the database with the current session_state
586
- if len(self.session_state) > 0:
587
- # This updates session_state_from_db
588
- merge_dictionaries(session_state_from_db, self.session_state)
589
- # Update the current session_state
590
- self.session_state = session_state_from_db
591
-
592
- # Get images, videos, and audios from the database
593
- if "images" in session.session_data:
594
- images_from_db = session.session_data.get("images")
595
- if images_from_db is not None and isinstance(images_from_db, list):
596
- if self.images is None:
597
- self.images = []
598
- self.images.extend([ImageArtifact.model_validate(img) for img in images_from_db])
599
- if "videos" in session.session_data:
600
- videos_from_db = session.session_data.get("videos")
601
- if videos_from_db is not None and isinstance(videos_from_db, list):
602
- if self.videos is None:
603
- self.videos = []
604
- self.videos.extend([VideoArtifact.model_validate(vid) for vid in videos_from_db])
605
- if "audio" in session.session_data:
606
- audio_from_db = session.session_data.get("audio")
607
- if audio_from_db is not None and isinstance(audio_from_db, list):
608
- if self.audio is None:
609
- self.audio = []
610
- self.audio.extend([AudioArtifact.model_validate(aud) for aud in audio_from_db])
611
-
612
- # Read extra_data from the database
613
- if session.extra_data is not None:
614
- # If extra_data is set in the workflow, update the database extra_data with the workflow's extra_data
615
- if self.extra_data is not None:
616
- # Updates workflow_session.extra_data in place
617
- merge_dictionaries(session.extra_data, self.extra_data)
618
- # Update the current extra_data with the extra_data from the database which is updated in place
619
- self.extra_data = session.extra_data
620
-
621
- if session.memory is not None:
622
- if self.memory is None:
623
- self.memory = Memory()
624
-
625
- if isinstance(self.memory, Memory):
626
- try:
627
- if self.memory.runs is None:
628
- self.memory.runs = {}
629
- self.memory.runs[session.session_id] = []
630
- for run in session.memory["runs"]:
631
- run_session_id = run["session_id"]
632
- self.memory.runs[run_session_id].append(RunResponse.from_dict(run))
633
- except Exception as e:
634
- log_warning(f"Failed to load runs from memory: {e}")
635
- else:
636
- try:
637
- if "runs" in session.memory:
638
- try:
639
- self.memory.runs = [WorkflowRun(**m) for m in session.memory["runs"]]
640
- except Exception as e:
641
- logger.warning(f"Failed to load runs from memory: {e}")
642
- except Exception as e:
643
- logger.warning(f"Failed to load WorkflowMemory: {e}")
1824
+ # Events are automatically broadcast by _handle_event
1825
+ # We just consume them here to drive the execution
1826
+ pass
644
1827
 
645
- log_debug(f"-*- WorkflowSession loaded: {session.session_id}")
1828
+ log_debug(f"Background streaming execution completed with status: {workflow_run_response.status}")
646
1829
 
647
- def read_from_storage(self) -> Optional[WorkflowSession]:
648
- """Load the WorkflowSession from storage.
1830
+ except Exception as e:
1831
+ logger.error(f"Background streaming workflow execution failed: {e}")
1832
+ workflow_run_response.status = RunStatus.error
1833
+ workflow_run_response.content = f"Background streaming execution failed: {str(e)}"
1834
+ self.save_session(session=workflow_session)
649
1835
 
650
- Returns:
651
- Optional[WorkflowSession]: The loaded WorkflowSession or None if not found.
652
- """
653
- if self.storage is not None and self.session_id is not None:
654
- self.workflow_session = cast(WorkflowSession, self.storage.read(session_id=self.session_id))
655
- if self.workflow_session is not None:
656
- self.load_workflow_session(session=self.workflow_session)
657
- return self.workflow_session
1836
+ # Create and start asyncio task for background streaming execution
1837
+ loop = asyncio.get_running_loop()
1838
+ loop.create_task(execute_workflow_background_stream())
658
1839
 
659
- def write_to_storage(self) -> Optional[WorkflowSession]:
660
- """Save the WorkflowSession to storage
1840
+ # Return SAME object that will be updated by background execution
1841
+ return workflow_run_response
1842
+
1843
+ def get_run(self, run_id: str) -> Optional[WorkflowRunOutput]:
1844
+ """Get the status and details of a background workflow run - SIMPLIFIED"""
1845
+ if self.db is not None and self.session_id is not None:
1846
+ session = self.db.get_session(session_id=self.session_id, session_type=SessionType.WORKFLOW)
1847
+ if session and isinstance(session, WorkflowSession) and session.runs:
1848
+ # Find the run by ID
1849
+ for run in session.runs:
1850
+ if run.run_id == run_id:
1851
+ return run
1852
+
1853
+ return None
1854
+
1855
+ def cancel_run(self, run_id: str) -> bool:
1856
+ """Cancel a running workflow execution.
1857
+
1858
+ Args:
1859
+ run_id (str): The run_id to cancel.
661
1860
 
662
1861
  Returns:
663
- Optional[WorkflowSession]: The saved WorkflowSession or None if not saved.
1862
+ bool: True if the run was found and marked for cancellation, False otherwise.
664
1863
  """
665
- if self.storage is not None:
666
- self.workflow_session = cast(WorkflowSession, self.storage.upsert(session=self.get_workflow_session()))
667
- return self.workflow_session
1864
+ return cancel_run_global(run_id)
668
1865
 
669
- def load_session(self, force: bool = False) -> Optional[str]:
670
- """Load an existing session from the database and return the session_id.
671
- If a session does not exist, create a new session.
1866
+ @overload
1867
+ def run(
1868
+ self,
1869
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None,
1870
+ additional_data: Optional[Dict[str, Any]] = None,
1871
+ user_id: Optional[str] = None,
1872
+ session_id: Optional[str] = None,
1873
+ session_state: Optional[Dict[str, Any]] = None,
1874
+ audio: Optional[List[Audio]] = None,
1875
+ images: Optional[List[Image]] = None,
1876
+ videos: Optional[List[Video]] = None,
1877
+ files: Optional[List[File]] = None,
1878
+ stream: Literal[False] = False,
1879
+ stream_intermediate_steps: Optional[bool] = None,
1880
+ background: Optional[bool] = False,
1881
+ ) -> WorkflowRunOutput: ...
1882
+
1883
+ @overload
1884
+ def run(
1885
+ self,
1886
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None,
1887
+ additional_data: Optional[Dict[str, Any]] = None,
1888
+ user_id: Optional[str] = None,
1889
+ session_id: Optional[str] = None,
1890
+ session_state: Optional[Dict[str, Any]] = None,
1891
+ audio: Optional[List[Audio]] = None,
1892
+ images: Optional[List[Image]] = None,
1893
+ videos: Optional[List[Video]] = None,
1894
+ files: Optional[List[File]] = None,
1895
+ stream: Literal[True] = True,
1896
+ stream_intermediate_steps: Optional[bool] = None,
1897
+ background: Optional[bool] = False,
1898
+ ) -> Iterator[WorkflowRunOutputEvent]: ...
1899
+
1900
+ def run(
1901
+ self,
1902
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None,
1903
+ additional_data: Optional[Dict[str, Any]] = None,
1904
+ user_id: Optional[str] = None,
1905
+ session_id: Optional[str] = None,
1906
+ session_state: Optional[Dict[str, Any]] = None,
1907
+ audio: Optional[List[Audio]] = None,
1908
+ images: Optional[List[Image]] = None,
1909
+ videos: Optional[List[Video]] = None,
1910
+ files: Optional[List[File]] = None,
1911
+ stream: bool = False,
1912
+ stream_intermediate_steps: Optional[bool] = None,
1913
+ background: Optional[bool] = False,
1914
+ **kwargs: Any,
1915
+ ) -> Union[WorkflowRunOutput, Iterator[WorkflowRunOutputEvent]]:
1916
+ """Execute the workflow synchronously with optional streaming"""
1917
+
1918
+ validated_input = self._validate_input(input)
1919
+ if validated_input is not None:
1920
+ input = validated_input
1921
+
1922
+ if background:
1923
+ raise RuntimeError("Background execution is not supported for sync run()")
1924
+
1925
+ self._set_debug()
1926
+
1927
+ run_id = str(uuid4())
1928
+
1929
+ self.initialize_workflow()
1930
+ session_id, user_id, session_state = self._initialize_session(
1931
+ session_id=session_id, user_id=user_id, session_state=session_state, run_id=run_id
1932
+ )
672
1933
 
673
- - If a session exists in the database, load the session.
674
- - If a session does not exist in the database, create a new session.
675
- """
676
- # If a workflow_session is already loaded, return the session_id from the workflow_session
677
- # if the session_id matches the session_id from the workflow_session
678
- if self.workflow_session is not None and not force:
679
- if self.session_id is not None and self.workflow_session.session_id == self.session_id:
680
- return self.workflow_session.session_id
681
-
682
- # Load an existing session or create a new session
683
- if self.storage is not None:
684
- # Load existing session if session_id is provided
685
- log_debug(f"Reading WorkflowSession: {self.session_id}")
686
- self.read_from_storage()
687
-
688
- # Create a new session if it does not exist
689
- if self.workflow_session is None:
690
- log_debug("-*- Creating new WorkflowSession")
691
- # write_to_storage() will create a new WorkflowSession
692
- # and populate self.workflow_session with the new session
693
- self.write_to_storage()
694
- if self.workflow_session is None:
695
- raise Exception("Failed to create new WorkflowSession in storage")
696
- log_debug(f"-*- Created WorkflowSession: {self.workflow_session.session_id}")
697
- self.log_workflow_session()
698
- return self.session_id
699
-
700
- def new_session(self) -> None:
701
- """Create a new Workflow session
702
-
703
- - Clear the workflow_session
704
- - Create a new session_id
705
- - Load the new session
706
- """
707
- self.workflow_session = None
708
- self.session_id = str(uuid4())
709
- self.load_session(force=True)
1934
+ # Read existing session from database
1935
+ workflow_session = self.read_or_create_session(session_id=session_id, user_id=user_id)
1936
+ self._update_metadata(session=workflow_session)
710
1937
 
711
- def log_workflow_session(self):
712
- log_debug(f"*********** Logging WorkflowSession: {self.session_id} ***********")
1938
+ # Update session state from DB
1939
+ session_state = self._update_session_state(session=workflow_session, session_state=session_state)
713
1940
 
714
- def rename(self, name: str) -> None:
715
- """Rename the Workflow and save to storage"""
1941
+ log_debug(f"Workflow Run Start: {self.name}", center=True)
716
1942
 
717
- # -*- Read from storage
718
- self.read_from_storage()
719
- # -*- Rename Workflow
720
- self.name = name
721
- # -*- Save to storage
722
- self.write_to_storage()
723
- # -*- Log Workflow session
724
- self.log_workflow_session()
1943
+ # Use simple defaults
1944
+ stream = stream or self.stream or False
1945
+ stream_intermediate_steps = stream_intermediate_steps or self.stream_intermediate_steps or False
725
1946
 
726
- def rename_session(self, session_name: str):
727
- """Rename the current session and save to storage"""
1947
+ # Can't have stream_intermediate_steps if stream is False
1948
+ if not stream:
1949
+ stream_intermediate_steps = False
728
1950
 
729
- # -*- Read from storage
730
- self.read_from_storage()
731
- # -*- Rename session
732
- self.session_name = session_name
733
- # -*- Save to storage
734
- self.write_to_storage()
735
- # -*- Log Workflow session
736
- self.log_workflow_session()
1951
+ log_debug(f"Stream: {stream}")
1952
+ log_debug(f"Total steps: {self._get_step_count()}")
737
1953
 
738
- def delete_session(self, session_id: str):
739
- """Delete the current session and save to storage"""
740
- if self.storage is None:
741
- return
742
- # -*- Delete session
743
- self.storage.delete_session(session_id=session_id)
1954
+ # Prepare steps
1955
+ self._prepare_steps()
1956
+
1957
+ # Create workflow run response that will be updated by reference
1958
+ workflow_run_response = WorkflowRunOutput(
1959
+ run_id=run_id,
1960
+ session_id=session_id,
1961
+ workflow_id=self.id,
1962
+ workflow_name=self.name,
1963
+ created_at=int(datetime.now().timestamp()),
1964
+ )
1965
+
1966
+ inputs = WorkflowExecutionInput(
1967
+ input=input,
1968
+ additional_data=additional_data,
1969
+ audio=audio, # type: ignore
1970
+ images=images, # type: ignore
1971
+ videos=videos, # type: ignore
1972
+ files=files, # type: ignore
1973
+ )
1974
+ log_debug(
1975
+ f"Created pipeline input with session state keys: {list(session_state.keys()) if session_state else 'None'}"
1976
+ )
1977
+
1978
+ self.update_agents_and_teams_session_info()
1979
+
1980
+ if stream:
1981
+ return self._execute_stream(
1982
+ session=workflow_session,
1983
+ execution_input=inputs, # type: ignore[arg-type]
1984
+ workflow_run_response=workflow_run_response,
1985
+ stream_intermediate_steps=stream_intermediate_steps,
1986
+ session_state=session_state,
1987
+ **kwargs,
1988
+ )
1989
+ else:
1990
+ return self._execute(
1991
+ session=workflow_session,
1992
+ execution_input=inputs, # type: ignore[arg-type]
1993
+ workflow_run_response=workflow_run_response,
1994
+ session_state=session_state,
1995
+ **kwargs,
1996
+ )
1997
+
1998
+ @overload
1999
+ async def arun(
2000
+ self,
2001
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]] = None,
2002
+ additional_data: Optional[Dict[str, Any]] = None,
2003
+ user_id: Optional[str] = None,
2004
+ session_id: Optional[str] = None,
2005
+ session_state: Optional[Dict[str, Any]] = None,
2006
+ audio: Optional[List[Audio]] = None,
2007
+ images: Optional[List[Image]] = None,
2008
+ videos: Optional[List[Video]] = None,
2009
+ files: Optional[List[File]] = None,
2010
+ stream: Literal[False] = False,
2011
+ stream_intermediate_steps: Optional[bool] = None,
2012
+ background: Optional[bool] = False,
2013
+ websocket: Optional[WebSocket] = None,
2014
+ ) -> WorkflowRunOutput: ...
2015
+
2016
+ @overload
2017
+ async def arun(
2018
+ self,
2019
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]] = None,
2020
+ additional_data: Optional[Dict[str, Any]] = None,
2021
+ user_id: Optional[str] = None,
2022
+ session_id: Optional[str] = None,
2023
+ session_state: Optional[Dict[str, Any]] = None,
2024
+ audio: Optional[List[Audio]] = None,
2025
+ images: Optional[List[Image]] = None,
2026
+ videos: Optional[List[Video]] = None,
2027
+ files: Optional[List[File]] = None,
2028
+ stream: Literal[True] = True,
2029
+ stream_intermediate_steps: Optional[bool] = None,
2030
+ background: Optional[bool] = False,
2031
+ websocket: Optional[WebSocket] = None,
2032
+ ) -> AsyncIterator[WorkflowRunOutputEvent]: ...
2033
+
2034
+ async def arun(
2035
+ self,
2036
+ input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]] = None,
2037
+ additional_data: Optional[Dict[str, Any]] = None,
2038
+ user_id: Optional[str] = None,
2039
+ session_id: Optional[str] = None,
2040
+ session_state: Optional[Dict[str, Any]] = None,
2041
+ audio: Optional[List[Audio]] = None,
2042
+ images: Optional[List[Image]] = None,
2043
+ videos: Optional[List[Video]] = None,
2044
+ files: Optional[List[File]] = None,
2045
+ stream: bool = False,
2046
+ stream_intermediate_steps: Optional[bool] = False,
2047
+ background: Optional[bool] = False,
2048
+ websocket: Optional[WebSocket] = None,
2049
+ **kwargs: Any,
2050
+ ) -> Union[WorkflowRunOutput, AsyncIterator[WorkflowRunOutputEvent]]:
2051
+ """Execute the workflow synchronously with optional streaming"""
2052
+
2053
+ validated_input = self._validate_input(input)
2054
+ if validated_input is not None:
2055
+ input = validated_input
2056
+
2057
+ websocket_handler = None
2058
+ if websocket:
2059
+ from agno.workflow.types import WebSocketHandler
2060
+
2061
+ websocket_handler = WebSocketHandler(websocket=websocket)
2062
+
2063
+ if background:
2064
+ if stream and websocket:
2065
+ # Background + Streaming + WebSocket = Real-time events
2066
+ return await self._arun_background_stream(
2067
+ input=input,
2068
+ additional_data=additional_data,
2069
+ user_id=user_id,
2070
+ session_id=session_id,
2071
+ audio=audio,
2072
+ images=images,
2073
+ videos=videos,
2074
+ files=files,
2075
+ stream_intermediate_steps=stream_intermediate_steps or False,
2076
+ websocket_handler=websocket_handler,
2077
+ **kwargs,
2078
+ )
2079
+ elif stream and not websocket:
2080
+ # Background + Streaming but no WebSocket = Not supported
2081
+ raise ValueError("Background streaming execution requires a WebSocket for real-time events")
2082
+ else:
2083
+ # Background + Non-streaming = Polling (existing)
2084
+ return await self._arun_background(
2085
+ input=input,
2086
+ additional_data=additional_data,
2087
+ user_id=user_id,
2088
+ session_id=session_id,
2089
+ audio=audio,
2090
+ images=images,
2091
+ videos=videos,
2092
+ files=files,
2093
+ **kwargs,
2094
+ )
2095
+
2096
+ self._set_debug()
2097
+
2098
+ run_id = str(uuid4())
2099
+
2100
+ self.initialize_workflow()
2101
+ session_id, user_id, session_state = self._initialize_session(
2102
+ session_id=session_id, user_id=user_id, session_state=session_state, run_id=run_id
2103
+ )
2104
+
2105
+ # Read existing session from database
2106
+ workflow_session = self.read_or_create_session(session_id=session_id, user_id=user_id)
2107
+ self._update_metadata(session=workflow_session)
2108
+
2109
+ # Update session state from DB
2110
+ session_state = self._update_session_state(session=workflow_session, session_state=session_state)
2111
+
2112
+ log_debug(f"Async Workflow Run Start: {self.name}", center=True)
2113
+
2114
+ # Use simple defaults
2115
+ stream = stream or self.stream or False
2116
+ stream_intermediate_steps = stream_intermediate_steps or self.stream_intermediate_steps or False
2117
+
2118
+ # Can't have stream_intermediate_steps if stream is False
2119
+ if not stream:
2120
+ stream_intermediate_steps = False
2121
+
2122
+ log_debug(f"Stream: {stream}")
2123
+
2124
+ # Prepare steps
2125
+ self._prepare_steps()
2126
+
2127
+ # Create workflow run response that will be updated by reference
2128
+ workflow_run_response = WorkflowRunOutput(
2129
+ run_id=run_id,
2130
+ session_id=session_id,
2131
+ workflow_id=self.id,
2132
+ workflow_name=self.name,
2133
+ created_at=int(datetime.now().timestamp()),
2134
+ )
2135
+
2136
+ inputs = WorkflowExecutionInput(
2137
+ input=input,
2138
+ additional_data=additional_data,
2139
+ audio=audio, # type: ignore
2140
+ images=images, # type: ignore
2141
+ videos=videos, # type: ignore
2142
+ )
2143
+ log_debug(
2144
+ f"Created async pipeline input with session state keys: {list(session_state.keys()) if session_state else 'None'}"
2145
+ )
2146
+
2147
+ self.update_agents_and_teams_session_info()
2148
+
2149
+ if stream:
2150
+ return self._aexecute_stream(
2151
+ execution_input=inputs,
2152
+ workflow_run_response=workflow_run_response,
2153
+ session=workflow_session,
2154
+ stream_intermediate_steps=stream_intermediate_steps,
2155
+ websocket=websocket,
2156
+ files=files,
2157
+ session_state=session_state,
2158
+ **kwargs,
2159
+ )
2160
+ else:
2161
+ return await self._aexecute(
2162
+ execution_input=inputs,
2163
+ workflow_run_response=workflow_run_response,
2164
+ session=workflow_session,
2165
+ websocket=websocket,
2166
+ files=files,
2167
+ session_state=session_state,
2168
+ **kwargs,
2169
+ )
2170
+
2171
+ def _prepare_steps(self):
2172
+ """Prepare the steps for execution"""
2173
+ if not callable(self.steps) and self.steps is not None:
2174
+ prepared_steps: List[Union[Step, Steps, Loop, Parallel, Condition, Router]] = []
2175
+ for i, step in enumerate(self.steps): # type: ignore
2176
+ if callable(step) and hasattr(step, "__name__"):
2177
+ step_name = step.__name__
2178
+ log_debug(f"Step {i + 1}: Wrapping callable function '{step_name}'")
2179
+ prepared_steps.append(Step(name=step_name, description="User-defined callable step", executor=step))
2180
+ elif isinstance(step, Agent):
2181
+ step_name = step.name or f"step_{i + 1}"
2182
+ log_debug(f"Step {i + 1}: Agent '{step_name}'")
2183
+ prepared_steps.append(Step(name=step_name, description=step.description, agent=step))
2184
+ elif isinstance(step, Team):
2185
+ step_name = step.name or f"step_{i + 1}"
2186
+ log_debug(f"Step {i + 1}: Team '{step_name}' with {len(step.members)} members")
2187
+ prepared_steps.append(Step(name=step_name, description=step.description, team=step))
2188
+ elif isinstance(step, (Step, Steps, Loop, Parallel, Condition, Router)):
2189
+ step_type = type(step).__name__
2190
+ step_name = getattr(step, "name", f"unnamed_{step_type.lower()}")
2191
+ log_debug(f"Step {i + 1}: {step_type} '{step_name}'")
2192
+ prepared_steps.append(step)
2193
+ else:
2194
+ raise ValueError(f"Invalid step type: {type(step).__name__}")
744
2195
 
745
- def deep_copy(self, *, update: Optional[Dict[str, Any]] = None) -> Workflow:
746
- """Create and return a deep copy of this Workflow, optionally updating fields.
2196
+ self.steps = prepared_steps # type: ignore
2197
+ log_debug("Step preparation completed")
2198
+
2199
+ def print_response(
2200
+ self,
2201
+ input: Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]],
2202
+ additional_data: Optional[Dict[str, Any]] = None,
2203
+ user_id: Optional[str] = None,
2204
+ session_id: Optional[str] = None,
2205
+ audio: Optional[List[Audio]] = None,
2206
+ images: Optional[List[Image]] = None,
2207
+ videos: Optional[List[Video]] = None,
2208
+ files: Optional[List[File]] = None,
2209
+ stream: Optional[bool] = None,
2210
+ stream_intermediate_steps: Optional[bool] = None,
2211
+ markdown: bool = True,
2212
+ show_time: bool = True,
2213
+ show_step_details: bool = True,
2214
+ console: Optional[Any] = None,
2215
+ **kwargs: Any,
2216
+ ) -> None:
2217
+ """Print workflow execution with rich formatting and optional streaming
747
2218
 
748
2219
  Args:
749
- update (Optional[Dict[str, Any]]): Optional dictionary of fields for the new Workflow.
2220
+ input: The main query/input for the workflow
2221
+ additional_data: Attached message data to the input
2222
+ user_id: User ID
2223
+ session_id: Session ID
2224
+ audio: Audio input
2225
+ images: Image input
2226
+ videos: Video input
2227
+ stream: Whether to stream the response content
2228
+ stream_intermediate_steps: Whether to stream intermediate steps
2229
+ markdown: Whether to render content as markdown
2230
+ show_time: Whether to show execution time
2231
+ show_step_details: Whether to show individual step outputs
2232
+ console: Rich console instance (optional)
2233
+ """
750
2234
 
751
- Returns:
752
- Workflow: A new Workflow instance.
2235
+ if stream is None:
2236
+ stream = self.stream or False
2237
+
2238
+ if stream_intermediate_steps is None:
2239
+ stream_intermediate_steps = self.stream_intermediate_steps or False
2240
+
2241
+ if stream:
2242
+ print_response_stream(
2243
+ workflow=self,
2244
+ input=input,
2245
+ user_id=user_id,
2246
+ session_id=session_id,
2247
+ additional_data=additional_data,
2248
+ audio=audio,
2249
+ images=images,
2250
+ videos=videos,
2251
+ files=files,
2252
+ stream_intermediate_steps=stream_intermediate_steps,
2253
+ markdown=markdown,
2254
+ show_time=show_time,
2255
+ show_step_details=show_step_details,
2256
+ console=console,
2257
+ **kwargs,
2258
+ )
2259
+ else:
2260
+ print_response(
2261
+ workflow=self,
2262
+ input=input,
2263
+ user_id=user_id,
2264
+ session_id=session_id,
2265
+ additional_data=additional_data,
2266
+ audio=audio,
2267
+ images=images,
2268
+ videos=videos,
2269
+ files=files,
2270
+ markdown=markdown,
2271
+ show_time=show_time,
2272
+ show_step_details=show_step_details,
2273
+ console=console,
2274
+ **kwargs,
2275
+ )
2276
+
2277
+ async def aprint_response(
2278
+ self,
2279
+ input: Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]],
2280
+ additional_data: Optional[Dict[str, Any]] = None,
2281
+ user_id: Optional[str] = None,
2282
+ session_id: Optional[str] = None,
2283
+ audio: Optional[List[Audio]] = None,
2284
+ images: Optional[List[Image]] = None,
2285
+ videos: Optional[List[Video]] = None,
2286
+ files: Optional[List[File]] = None,
2287
+ stream: Optional[bool] = None,
2288
+ stream_intermediate_steps: Optional[bool] = None,
2289
+ markdown: bool = True,
2290
+ show_time: bool = True,
2291
+ show_step_details: bool = True,
2292
+ console: Optional[Any] = None,
2293
+ **kwargs: Any,
2294
+ ) -> None:
2295
+ """Print workflow execution with rich formatting and optional streaming
2296
+
2297
+ Args:
2298
+ input: The main message/input for the workflow
2299
+ additional_data: Attached message data to the input
2300
+ user_id: User ID
2301
+ session_id: Session ID
2302
+ audio: Audio input
2303
+ images: Image input
2304
+ videos: Video input
2305
+ stream_intermediate_steps: Whether to stream intermediate steps
2306
+ stream: Whether to stream the response content
2307
+ markdown: Whether to render content as markdown
2308
+ show_time: Whether to show execution time
2309
+ show_step_details: Whether to show individual step outputs
2310
+ console: Rich console instance (optional)
753
2311
  """
754
- # Extract the fields to set for the new Workflow
755
- fields_for_new_workflow: Dict[str, Any] = {}
756
-
757
- for f in fields(self):
758
- field_value = getattr(self, f.name)
759
- if field_value is not None:
760
- if isinstance(field_value, Agent):
761
- fields_for_new_workflow[f.name] = field_value.deep_copy()
762
- else:
763
- fields_for_new_workflow[f.name] = self._deep_copy_field(f.name, field_value)
2312
+ if stream is None:
2313
+ stream = self.stream or False
2314
+
2315
+ if stream_intermediate_steps is None:
2316
+ stream_intermediate_steps = self.stream_intermediate_steps or False
2317
+
2318
+ if stream:
2319
+ await aprint_response_stream(
2320
+ workflow=self,
2321
+ input=input,
2322
+ additional_data=additional_data,
2323
+ user_id=user_id,
2324
+ session_id=session_id,
2325
+ audio=audio,
2326
+ images=images,
2327
+ videos=videos,
2328
+ files=files,
2329
+ stream_intermediate_steps=stream_intermediate_steps,
2330
+ markdown=markdown,
2331
+ show_time=show_time,
2332
+ show_step_details=show_step_details,
2333
+ console=console,
2334
+ **kwargs,
2335
+ )
2336
+ else:
2337
+ await aprint_response(
2338
+ workflow=self,
2339
+ input=input,
2340
+ additional_data=additional_data,
2341
+ user_id=user_id,
2342
+ session_id=session_id,
2343
+ audio=audio,
2344
+ images=images,
2345
+ videos=videos,
2346
+ files=files,
2347
+ markdown=markdown,
2348
+ show_time=show_time,
2349
+ show_step_details=show_step_details,
2350
+ console=console,
2351
+ **kwargs,
2352
+ )
2353
+
2354
+ def to_dict(self) -> Dict[str, Any]:
2355
+ """Convert workflow to dictionary representation"""
764
2356
 
765
- # Update fields if provided
766
- if update:
767
- fields_for_new_workflow.update(update)
2357
+ def serialize_step(step):
2358
+ step_dict = {
2359
+ "name": step.name if hasattr(step, "name") else f"unnamed_{type(step).__name__.lower()}",
2360
+ "description": step.description if hasattr(step, "description") else "User-defined callable step",
2361
+ "type": STEP_TYPE_MAPPING[type(step)].value, # type: ignore
2362
+ }
768
2363
 
769
- # Create a new Workflow
770
- new_workflow = self.__class__(**fields_for_new_workflow)
771
- log_debug(f"Created new {self.__class__.__name__}")
772
- return new_workflow
2364
+ # Handle agent/team/tools
2365
+ if hasattr(step, "agent"):
2366
+ step_dict["agent"] = step.agent if hasattr(step, "agent") else None # type: ignore
2367
+ if hasattr(step, "team"):
2368
+ step_dict["team"] = step.team if hasattr(step, "team") else None # type: ignore
773
2369
 
774
- def _deep_copy_field(self, field_name: str, field_value: Any) -> Any:
775
- """Helper method to deep copy a field based on its type."""
776
- from copy import copy, deepcopy
2370
+ # Handle nested steps for Router/Loop
2371
+ if isinstance(step, (Router)):
2372
+ step_dict["steps"] = (
2373
+ [serialize_step(step) for step in step.choices] if hasattr(step, "choices") else None
2374
+ )
777
2375
 
778
- # For memory, use its deep_copy method
779
- if field_name == "memory":
780
- return field_value.deep_copy()
2376
+ elif isinstance(step, (Loop, Condition, Steps, Parallel)):
2377
+ step_dict["steps"] = [serialize_step(step) for step in step.steps] if hasattr(step, "steps") else None
781
2378
 
782
- # For compound types, attempt a deep copy
783
- if isinstance(field_value, (list, dict, set, Storage)):
784
- try:
785
- return deepcopy(field_value)
786
- except Exception as e:
787
- logger.warning(f"Failed to deepcopy field: {field_name} - {e}")
788
- try:
789
- return copy(field_value)
790
- except Exception as e:
791
- logger.warning(f"Failed to copy field: {field_name} - {e}")
792
- return field_value
2379
+ return step_dict
793
2380
 
794
- # For pydantic models, attempt a model_copy
795
- if isinstance(field_value, BaseModel):
796
- try:
797
- return field_value.model_copy(deep=True)
798
- except Exception as e:
799
- logger.warning(f"Failed to deepcopy field: {field_name} - {e}")
800
- try:
801
- return field_value.model_copy(deep=False)
802
- except Exception as e:
803
- logger.warning(f"Failed to copy field: {field_name} - {e}")
804
- return field_value
2381
+ if self.steps is None or callable(self.steps):
2382
+ steps_list = []
2383
+ elif isinstance(self.steps, Steps):
2384
+ steps_list = self.steps.steps
2385
+ else:
2386
+ steps_list = self.steps
2387
+
2388
+ return {
2389
+ "name": self.name,
2390
+ "workflow_id": self.id,
2391
+ "description": self.description,
2392
+ "steps": [serialize_step(s) for s in steps_list],
2393
+ "session_id": self.session_id,
2394
+ }
2395
+
2396
+ def _calculate_session_metrics_from_workflow_metrics(self, workflow_metrics: WorkflowMetrics) -> Metrics:
2397
+ """Calculate session metrics by aggregating all step metrics from workflow metrics"""
2398
+ session_metrics = Metrics()
2399
+
2400
+ # Aggregate metrics from all steps
2401
+ for step_name, step_metrics in workflow_metrics.steps.items():
2402
+ if step_metrics.metrics:
2403
+ session_metrics += step_metrics.metrics
2404
+
2405
+ session_metrics.time_to_first_token = None
2406
+
2407
+ return session_metrics
2408
+
2409
+ def _get_session_metrics(self, session: WorkflowSession) -> Metrics:
2410
+ """Get existing session metrics from the database"""
2411
+ if session.session_data and "session_metrics" in session.session_data:
2412
+ session_metrics_from_db = session.session_data.get("session_metrics")
2413
+ if session_metrics_from_db is not None:
2414
+ if isinstance(session_metrics_from_db, dict):
2415
+ return Metrics(**session_metrics_from_db)
2416
+ elif isinstance(session_metrics_from_db, Metrics):
2417
+ return session_metrics_from_db
2418
+ return Metrics()
2419
+
2420
+ def _update_session_metrics(self, session: WorkflowSession, workflow_run_response: WorkflowRunOutput):
2421
+ """Calculate and update session metrics"""
2422
+ # Get existing session metrics
2423
+ session_metrics = self._get_session_metrics(session=session)
2424
+
2425
+ # If workflow has metrics, convert and add them to session metrics
2426
+ if workflow_run_response.metrics:
2427
+ run_session_metrics = self._calculate_session_metrics_from_workflow_metrics(workflow_run_response.metrics)
2428
+
2429
+ session_metrics += run_session_metrics
2430
+
2431
+ session_metrics.time_to_first_token = None
2432
+
2433
+ # Store updated session metrics - CONVERT TO DICT FOR JSON SERIALIZATION
2434
+ if not session.session_data:
2435
+ session.session_data = {}
2436
+ session.session_data["session_metrics"] = session_metrics.to_dict()
2437
+
2438
+ def get_session_metrics(self, session_id: Optional[str] = None) -> Optional[Metrics]:
2439
+ """Get the session metrics for the given session ID and user ID."""
2440
+ session_id = session_id or self.session_id
2441
+ if session_id is None:
2442
+ raise Exception("Session ID is required")
2443
+
2444
+ session = self.get_session(session_id=session_id)
2445
+ if session is None:
2446
+ raise Exception("Session not found")
2447
+
2448
+ return self._get_session_metrics(session=session)
2449
+
2450
+ def update_agents_and_teams_session_info(self):
2451
+ """Update agents and teams with workflow session information"""
2452
+ log_debug("Updating agents and teams with session information")
2453
+ # Initialize steps - only if steps is iterable (not callable)
2454
+ if self.steps and not callable(self.steps):
2455
+ steps_list = self.steps.steps if isinstance(self.steps, Steps) else self.steps
2456
+ for step in steps_list:
2457
+ # TODO: Handle properly steps inside other primitives
2458
+ if isinstance(step, Step):
2459
+ active_executor = step.active_executor
2460
+
2461
+ if hasattr(active_executor, "workflow_id"):
2462
+ active_executor.workflow_id = self.id
2463
+
2464
+ # If it's a team, update all members
2465
+ if hasattr(active_executor, "members"):
2466
+ for member in active_executor.members:
2467
+ if hasattr(member, "workflow_id"):
2468
+ member.workflow_id = self.id
2469
+
2470
+ ###########################################################################
2471
+ # Telemetry functions
2472
+ ###########################################################################
2473
+
2474
+ def _get_telemetry_data(self) -> Dict[str, Any]:
2475
+ """Get the telemetry data for the workflow"""
2476
+ return {
2477
+ "workflow_id": self.id,
2478
+ "db_type": self.db.__class__.__name__ if self.db else None,
2479
+ "has_input_schema": self.input_schema is not None,
2480
+ }
2481
+
2482
+ def _log_workflow_telemetry(self, session_id: str, run_id: Optional[str] = None) -> None:
2483
+ """Send a telemetry event to the API for a created Workflow run"""
2484
+
2485
+ self._set_telemetry()
2486
+ if not self.telemetry:
2487
+ return
2488
+
2489
+ from agno.api.workflow import WorkflowRunCreate, create_workflow_run
805
2490
 
806
- # For other types, return as is
807
- return field_value
2491
+ try:
2492
+ create_workflow_run(
2493
+ workflow=WorkflowRunCreate(session_id=session_id, run_id=run_id, data=self._get_telemetry_data()),
2494
+ )
2495
+ except Exception as e:
2496
+ log_debug(f"Could not create Workflow run telemetry event: {e}")
2497
+
2498
+ async def _alog_workflow_telemetry(self, session_id: str, run_id: Optional[str] = None) -> None:
2499
+ """Send a telemetry event to the API for a created Workflow async run"""
2500
+
2501
+ self._set_telemetry()
2502
+ if not self.telemetry:
2503
+ return
2504
+
2505
+ from agno.api.workflow import WorkflowRunCreate, acreate_workflow_run
2506
+
2507
+ try:
2508
+ await acreate_workflow_run(
2509
+ workflow=WorkflowRunCreate(session_id=session_id, run_id=run_id, data=self._get_telemetry_data())
2510
+ )
2511
+ except Exception as e:
2512
+ log_debug(f"Could not create Workflow run telemetry event: {e}")