agno 2.0.0rc2__py3-none-any.whl → 2.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (331) hide show
  1. agno/agent/agent.py +6009 -2874
  2. agno/api/api.py +2 -0
  3. agno/api/os.py +1 -1
  4. agno/culture/__init__.py +3 -0
  5. agno/culture/manager.py +956 -0
  6. agno/db/async_postgres/__init__.py +3 -0
  7. agno/db/base.py +385 -6
  8. agno/db/dynamo/dynamo.py +388 -81
  9. agno/db/dynamo/schemas.py +47 -10
  10. agno/db/dynamo/utils.py +63 -4
  11. agno/db/firestore/firestore.py +435 -64
  12. agno/db/firestore/schemas.py +11 -0
  13. agno/db/firestore/utils.py +102 -4
  14. agno/db/gcs_json/gcs_json_db.py +384 -42
  15. agno/db/gcs_json/utils.py +60 -26
  16. agno/db/in_memory/in_memory_db.py +351 -66
  17. agno/db/in_memory/utils.py +60 -2
  18. agno/db/json/json_db.py +339 -48
  19. agno/db/json/utils.py +60 -26
  20. agno/db/migrations/manager.py +199 -0
  21. agno/db/migrations/v1_to_v2.py +510 -37
  22. agno/db/migrations/versions/__init__.py +0 -0
  23. agno/db/migrations/versions/v2_3_0.py +938 -0
  24. agno/db/mongo/__init__.py +15 -1
  25. agno/db/mongo/async_mongo.py +2036 -0
  26. agno/db/mongo/mongo.py +653 -76
  27. agno/db/mongo/schemas.py +13 -0
  28. agno/db/mongo/utils.py +80 -8
  29. agno/db/mysql/mysql.py +687 -25
  30. agno/db/mysql/schemas.py +61 -37
  31. agno/db/mysql/utils.py +60 -2
  32. agno/db/postgres/__init__.py +2 -1
  33. agno/db/postgres/async_postgres.py +2001 -0
  34. agno/db/postgres/postgres.py +676 -57
  35. agno/db/postgres/schemas.py +43 -18
  36. agno/db/postgres/utils.py +164 -2
  37. agno/db/redis/redis.py +344 -38
  38. agno/db/redis/schemas.py +18 -0
  39. agno/db/redis/utils.py +60 -2
  40. agno/db/schemas/__init__.py +2 -1
  41. agno/db/schemas/culture.py +120 -0
  42. agno/db/schemas/memory.py +13 -0
  43. agno/db/singlestore/schemas.py +26 -1
  44. agno/db/singlestore/singlestore.py +687 -53
  45. agno/db/singlestore/utils.py +60 -2
  46. agno/db/sqlite/__init__.py +2 -1
  47. agno/db/sqlite/async_sqlite.py +2371 -0
  48. agno/db/sqlite/schemas.py +24 -0
  49. agno/db/sqlite/sqlite.py +774 -85
  50. agno/db/sqlite/utils.py +168 -5
  51. agno/db/surrealdb/__init__.py +3 -0
  52. agno/db/surrealdb/metrics.py +292 -0
  53. agno/db/surrealdb/models.py +309 -0
  54. agno/db/surrealdb/queries.py +71 -0
  55. agno/db/surrealdb/surrealdb.py +1361 -0
  56. agno/db/surrealdb/utils.py +147 -0
  57. agno/db/utils.py +50 -22
  58. agno/eval/accuracy.py +50 -43
  59. agno/eval/performance.py +6 -3
  60. agno/eval/reliability.py +6 -3
  61. agno/eval/utils.py +33 -16
  62. agno/exceptions.py +68 -1
  63. agno/filters.py +354 -0
  64. agno/guardrails/__init__.py +6 -0
  65. agno/guardrails/base.py +19 -0
  66. agno/guardrails/openai.py +144 -0
  67. agno/guardrails/pii.py +94 -0
  68. agno/guardrails/prompt_injection.py +52 -0
  69. agno/integrations/discord/client.py +1 -0
  70. agno/knowledge/chunking/agentic.py +13 -10
  71. agno/knowledge/chunking/fixed.py +1 -1
  72. agno/knowledge/chunking/semantic.py +40 -8
  73. agno/knowledge/chunking/strategy.py +59 -15
  74. agno/knowledge/embedder/aws_bedrock.py +9 -4
  75. agno/knowledge/embedder/azure_openai.py +54 -0
  76. agno/knowledge/embedder/base.py +2 -0
  77. agno/knowledge/embedder/cohere.py +184 -5
  78. agno/knowledge/embedder/fastembed.py +1 -1
  79. agno/knowledge/embedder/google.py +79 -1
  80. agno/knowledge/embedder/huggingface.py +9 -4
  81. agno/knowledge/embedder/jina.py +63 -0
  82. agno/knowledge/embedder/mistral.py +78 -11
  83. agno/knowledge/embedder/nebius.py +1 -1
  84. agno/knowledge/embedder/ollama.py +13 -0
  85. agno/knowledge/embedder/openai.py +37 -65
  86. agno/knowledge/embedder/sentence_transformer.py +8 -4
  87. agno/knowledge/embedder/vllm.py +262 -0
  88. agno/knowledge/embedder/voyageai.py +69 -16
  89. agno/knowledge/knowledge.py +595 -187
  90. agno/knowledge/reader/base.py +9 -2
  91. agno/knowledge/reader/csv_reader.py +8 -10
  92. agno/knowledge/reader/docx_reader.py +5 -6
  93. agno/knowledge/reader/field_labeled_csv_reader.py +290 -0
  94. agno/knowledge/reader/json_reader.py +6 -5
  95. agno/knowledge/reader/markdown_reader.py +13 -13
  96. agno/knowledge/reader/pdf_reader.py +43 -68
  97. agno/knowledge/reader/pptx_reader.py +101 -0
  98. agno/knowledge/reader/reader_factory.py +51 -6
  99. agno/knowledge/reader/s3_reader.py +3 -15
  100. agno/knowledge/reader/tavily_reader.py +194 -0
  101. agno/knowledge/reader/text_reader.py +13 -13
  102. agno/knowledge/reader/web_search_reader.py +2 -43
  103. agno/knowledge/reader/website_reader.py +43 -25
  104. agno/knowledge/reranker/__init__.py +3 -0
  105. agno/knowledge/types.py +9 -0
  106. agno/knowledge/utils.py +20 -0
  107. agno/media.py +339 -266
  108. agno/memory/manager.py +336 -82
  109. agno/models/aimlapi/aimlapi.py +2 -2
  110. agno/models/anthropic/claude.py +183 -37
  111. agno/models/aws/bedrock.py +52 -112
  112. agno/models/aws/claude.py +33 -1
  113. agno/models/azure/ai_foundry.py +33 -15
  114. agno/models/azure/openai_chat.py +25 -8
  115. agno/models/base.py +1011 -566
  116. agno/models/cerebras/cerebras.py +19 -13
  117. agno/models/cerebras/cerebras_openai.py +8 -5
  118. agno/models/cohere/chat.py +27 -1
  119. agno/models/cometapi/__init__.py +5 -0
  120. agno/models/cometapi/cometapi.py +57 -0
  121. agno/models/dashscope/dashscope.py +1 -0
  122. agno/models/deepinfra/deepinfra.py +2 -2
  123. agno/models/deepseek/deepseek.py +2 -2
  124. agno/models/fireworks/fireworks.py +2 -2
  125. agno/models/google/gemini.py +110 -37
  126. agno/models/groq/groq.py +28 -11
  127. agno/models/huggingface/huggingface.py +2 -1
  128. agno/models/internlm/internlm.py +2 -2
  129. agno/models/langdb/langdb.py +4 -4
  130. agno/models/litellm/chat.py +18 -1
  131. agno/models/litellm/litellm_openai.py +2 -2
  132. agno/models/llama_cpp/__init__.py +5 -0
  133. agno/models/llama_cpp/llama_cpp.py +22 -0
  134. agno/models/message.py +143 -4
  135. agno/models/meta/llama.py +27 -10
  136. agno/models/meta/llama_openai.py +5 -17
  137. agno/models/nebius/nebius.py +6 -6
  138. agno/models/nexus/__init__.py +3 -0
  139. agno/models/nexus/nexus.py +22 -0
  140. agno/models/nvidia/nvidia.py +2 -2
  141. agno/models/ollama/chat.py +60 -6
  142. agno/models/openai/chat.py +102 -43
  143. agno/models/openai/responses.py +103 -106
  144. agno/models/openrouter/openrouter.py +41 -3
  145. agno/models/perplexity/perplexity.py +4 -5
  146. agno/models/portkey/portkey.py +3 -3
  147. agno/models/requesty/__init__.py +5 -0
  148. agno/models/requesty/requesty.py +52 -0
  149. agno/models/response.py +81 -5
  150. agno/models/sambanova/sambanova.py +2 -2
  151. agno/models/siliconflow/__init__.py +5 -0
  152. agno/models/siliconflow/siliconflow.py +25 -0
  153. agno/models/together/together.py +2 -2
  154. agno/models/utils.py +254 -8
  155. agno/models/vercel/v0.py +2 -2
  156. agno/models/vertexai/__init__.py +0 -0
  157. agno/models/vertexai/claude.py +96 -0
  158. agno/models/vllm/vllm.py +1 -0
  159. agno/models/xai/xai.py +3 -2
  160. agno/os/app.py +543 -175
  161. agno/os/auth.py +24 -14
  162. agno/os/config.py +1 -0
  163. agno/os/interfaces/__init__.py +1 -0
  164. agno/os/interfaces/a2a/__init__.py +3 -0
  165. agno/os/interfaces/a2a/a2a.py +42 -0
  166. agno/os/interfaces/a2a/router.py +250 -0
  167. agno/os/interfaces/a2a/utils.py +924 -0
  168. agno/os/interfaces/agui/agui.py +23 -7
  169. agno/os/interfaces/agui/router.py +27 -3
  170. agno/os/interfaces/agui/utils.py +242 -142
  171. agno/os/interfaces/base.py +6 -2
  172. agno/os/interfaces/slack/router.py +81 -23
  173. agno/os/interfaces/slack/slack.py +29 -14
  174. agno/os/interfaces/whatsapp/router.py +11 -4
  175. agno/os/interfaces/whatsapp/whatsapp.py +14 -7
  176. agno/os/mcp.py +111 -54
  177. agno/os/middleware/__init__.py +7 -0
  178. agno/os/middleware/jwt.py +233 -0
  179. agno/os/router.py +556 -139
  180. agno/os/routers/evals/evals.py +71 -34
  181. agno/os/routers/evals/schemas.py +31 -31
  182. agno/os/routers/evals/utils.py +6 -5
  183. agno/os/routers/health.py +31 -0
  184. agno/os/routers/home.py +52 -0
  185. agno/os/routers/knowledge/knowledge.py +185 -38
  186. agno/os/routers/knowledge/schemas.py +82 -22
  187. agno/os/routers/memory/memory.py +158 -53
  188. agno/os/routers/memory/schemas.py +20 -16
  189. agno/os/routers/metrics/metrics.py +20 -8
  190. agno/os/routers/metrics/schemas.py +16 -16
  191. agno/os/routers/session/session.py +499 -38
  192. agno/os/schema.py +308 -198
  193. agno/os/utils.py +401 -41
  194. agno/reasoning/anthropic.py +80 -0
  195. agno/reasoning/azure_ai_foundry.py +2 -2
  196. agno/reasoning/deepseek.py +2 -2
  197. agno/reasoning/default.py +3 -1
  198. agno/reasoning/gemini.py +73 -0
  199. agno/reasoning/groq.py +2 -2
  200. agno/reasoning/ollama.py +2 -2
  201. agno/reasoning/openai.py +7 -2
  202. agno/reasoning/vertexai.py +76 -0
  203. agno/run/__init__.py +6 -0
  204. agno/run/agent.py +266 -112
  205. agno/run/base.py +53 -24
  206. agno/run/team.py +252 -111
  207. agno/run/workflow.py +156 -45
  208. agno/session/agent.py +105 -89
  209. agno/session/summary.py +65 -25
  210. agno/session/team.py +176 -96
  211. agno/session/workflow.py +406 -40
  212. agno/team/team.py +3854 -1692
  213. agno/tools/brightdata.py +3 -3
  214. agno/tools/cartesia.py +3 -5
  215. agno/tools/dalle.py +9 -8
  216. agno/tools/decorator.py +4 -2
  217. agno/tools/desi_vocal.py +2 -2
  218. agno/tools/duckduckgo.py +15 -11
  219. agno/tools/e2b.py +20 -13
  220. agno/tools/eleven_labs.py +26 -28
  221. agno/tools/exa.py +21 -16
  222. agno/tools/fal.py +4 -4
  223. agno/tools/file.py +153 -23
  224. agno/tools/file_generation.py +350 -0
  225. agno/tools/firecrawl.py +4 -4
  226. agno/tools/function.py +257 -37
  227. agno/tools/giphy.py +2 -2
  228. agno/tools/gmail.py +238 -14
  229. agno/tools/google_drive.py +270 -0
  230. agno/tools/googlecalendar.py +36 -8
  231. agno/tools/googlesheets.py +20 -5
  232. agno/tools/jira.py +20 -0
  233. agno/tools/knowledge.py +3 -3
  234. agno/tools/lumalab.py +3 -3
  235. agno/tools/mcp/__init__.py +10 -0
  236. agno/tools/mcp/mcp.py +331 -0
  237. agno/tools/mcp/multi_mcp.py +347 -0
  238. agno/tools/mcp/params.py +24 -0
  239. agno/tools/mcp_toolbox.py +284 -0
  240. agno/tools/mem0.py +11 -17
  241. agno/tools/memori.py +1 -53
  242. agno/tools/memory.py +419 -0
  243. agno/tools/models/azure_openai.py +2 -2
  244. agno/tools/models/gemini.py +3 -3
  245. agno/tools/models/groq.py +3 -5
  246. agno/tools/models/nebius.py +7 -7
  247. agno/tools/models_labs.py +25 -15
  248. agno/tools/notion.py +204 -0
  249. agno/tools/openai.py +4 -9
  250. agno/tools/opencv.py +3 -3
  251. agno/tools/parallel.py +314 -0
  252. agno/tools/replicate.py +7 -7
  253. agno/tools/scrapegraph.py +58 -31
  254. agno/tools/searxng.py +2 -2
  255. agno/tools/serper.py +2 -2
  256. agno/tools/slack.py +18 -3
  257. agno/tools/spider.py +2 -2
  258. agno/tools/tavily.py +146 -0
  259. agno/tools/whatsapp.py +1 -1
  260. agno/tools/workflow.py +278 -0
  261. agno/tools/yfinance.py +12 -11
  262. agno/utils/agent.py +820 -0
  263. agno/utils/audio.py +27 -0
  264. agno/utils/common.py +90 -1
  265. agno/utils/events.py +222 -7
  266. agno/utils/gemini.py +181 -23
  267. agno/utils/hooks.py +57 -0
  268. agno/utils/http.py +111 -0
  269. agno/utils/knowledge.py +12 -5
  270. agno/utils/log.py +1 -0
  271. agno/utils/mcp.py +95 -5
  272. agno/utils/media.py +188 -10
  273. agno/utils/merge_dict.py +22 -1
  274. agno/utils/message.py +60 -0
  275. agno/utils/models/claude.py +40 -11
  276. agno/utils/models/cohere.py +1 -1
  277. agno/utils/models/watsonx.py +1 -1
  278. agno/utils/openai.py +1 -1
  279. agno/utils/print_response/agent.py +105 -21
  280. agno/utils/print_response/team.py +103 -38
  281. agno/utils/print_response/workflow.py +251 -34
  282. agno/utils/reasoning.py +22 -1
  283. agno/utils/serialize.py +32 -0
  284. agno/utils/streamlit.py +16 -10
  285. agno/utils/string.py +41 -0
  286. agno/utils/team.py +98 -9
  287. agno/utils/tools.py +1 -1
  288. agno/vectordb/base.py +23 -4
  289. agno/vectordb/cassandra/cassandra.py +65 -9
  290. agno/vectordb/chroma/chromadb.py +182 -38
  291. agno/vectordb/clickhouse/clickhousedb.py +64 -11
  292. agno/vectordb/couchbase/couchbase.py +105 -10
  293. agno/vectordb/lancedb/lance_db.py +183 -135
  294. agno/vectordb/langchaindb/langchaindb.py +25 -7
  295. agno/vectordb/lightrag/lightrag.py +17 -3
  296. agno/vectordb/llamaindex/__init__.py +3 -0
  297. agno/vectordb/llamaindex/llamaindexdb.py +46 -7
  298. agno/vectordb/milvus/milvus.py +126 -9
  299. agno/vectordb/mongodb/__init__.py +7 -1
  300. agno/vectordb/mongodb/mongodb.py +112 -7
  301. agno/vectordb/pgvector/pgvector.py +142 -21
  302. agno/vectordb/pineconedb/pineconedb.py +80 -8
  303. agno/vectordb/qdrant/qdrant.py +125 -39
  304. agno/vectordb/redis/__init__.py +9 -0
  305. agno/vectordb/redis/redisdb.py +694 -0
  306. agno/vectordb/singlestore/singlestore.py +111 -25
  307. agno/vectordb/surrealdb/surrealdb.py +31 -5
  308. agno/vectordb/upstashdb/upstashdb.py +76 -8
  309. agno/vectordb/weaviate/weaviate.py +86 -15
  310. agno/workflow/__init__.py +2 -0
  311. agno/workflow/agent.py +299 -0
  312. agno/workflow/condition.py +112 -18
  313. agno/workflow/loop.py +69 -10
  314. agno/workflow/parallel.py +266 -118
  315. agno/workflow/router.py +110 -17
  316. agno/workflow/step.py +645 -136
  317. agno/workflow/steps.py +65 -6
  318. agno/workflow/types.py +71 -33
  319. agno/workflow/workflow.py +2113 -300
  320. agno-2.3.0.dist-info/METADATA +618 -0
  321. agno-2.3.0.dist-info/RECORD +577 -0
  322. agno-2.3.0.dist-info/licenses/LICENSE +201 -0
  323. agno/knowledge/reader/url_reader.py +0 -128
  324. agno/tools/googlesearch.py +0 -98
  325. agno/tools/mcp.py +0 -610
  326. agno/utils/models/aws_claude.py +0 -170
  327. agno-2.0.0rc2.dist-info/METADATA +0 -355
  328. agno-2.0.0rc2.dist-info/RECORD +0 -515
  329. agno-2.0.0rc2.dist-info/licenses/LICENSE +0 -375
  330. {agno-2.0.0rc2.dist-info → agno-2.3.0.dist-info}/WHEEL +0 -0
  331. {agno-2.0.0rc2.dist-info → agno-2.3.0.dist-info}/top_level.txt +0 -0
agno/workflow/workflow.py CHANGED
@@ -1,4 +1,5 @@
1
1
  import asyncio
2
+ import warnings
2
3
  from dataclasses import dataclass
3
4
  from datetime import datetime
4
5
  from os import getenv
@@ -24,13 +25,13 @@ from fastapi import WebSocket
24
25
  from pydantic import BaseModel
25
26
 
26
27
  from agno.agent.agent import Agent
27
- from agno.db.base import BaseDb, SessionType
28
- from agno.exceptions import RunCancelledException
29
- from agno.media import Audio, AudioArtifact, File, Image, ImageArtifact, Video, VideoArtifact
28
+ from agno.db.base import AsyncBaseDb, BaseDb, SessionType
29
+ from agno.exceptions import InputCheckError, OutputCheckError, RunCancelledException
30
+ from agno.media import Audio, File, Image, Video
30
31
  from agno.models.message import Message
31
32
  from agno.models.metrics import Metrics
32
- from agno.run.agent import RunEvent
33
- from agno.run.base import RunStatus
33
+ from agno.run import RunContext, RunStatus
34
+ from agno.run.agent import RunContentEvent, RunEvent, RunOutput
34
35
  from agno.run.cancel import (
35
36
  cancel_run as cancel_run_global,
36
37
  )
@@ -39,6 +40,7 @@ from agno.run.cancel import (
39
40
  raise_if_cancelled,
40
41
  register_run,
41
42
  )
43
+ from agno.run.team import RunContentEvent as TeamRunContentEvent
42
44
  from agno.run.team import TeamRunEvent
43
45
  from agno.run.workflow import (
44
46
  StepOutputEvent,
@@ -49,10 +51,12 @@ from agno.run.workflow import (
49
51
  WorkflowRunOutputEvent,
50
52
  WorkflowStartedEvent,
51
53
  )
52
- from agno.session.workflow import WorkflowSession
54
+ from agno.session.workflow import WorkflowChatInteraction, WorkflowSession
53
55
  from agno.team.team import Team
56
+ from agno.utils.common import is_typed_dict, validate_typed_dict
54
57
  from agno.utils.log import (
55
58
  log_debug,
59
+ log_error,
56
60
  log_warning,
57
61
  logger,
58
62
  set_log_level_to_debug,
@@ -65,6 +69,7 @@ from agno.utils.print_response.workflow import (
65
69
  print_response,
66
70
  print_response_stream,
67
71
  )
72
+ from agno.workflow import WorkflowAgent
68
73
  from agno.workflow.condition import Condition
69
74
  from agno.workflow.loop import Loop
70
75
  from agno.workflow.parallel import Parallel
@@ -127,7 +132,10 @@ class Workflow:
127
132
  steps: Optional[WorkflowSteps] = None
128
133
 
129
134
  # Database to use for this workflow
130
- db: Optional[BaseDb] = None
135
+ db: Optional[Union[BaseDb, AsyncBaseDb]] = None
136
+
137
+ # Agentic Workflow - WorkflowAgent that decides when to run the workflow
138
+ agent: Optional[WorkflowAgent] = None # type: ignore
131
139
 
132
140
  # Default session_id to use for this workflow (autogenerated if not set)
133
141
  session_id: Optional[str] = None
@@ -135,6 +143,8 @@ class Workflow:
135
143
  user_id: Optional[str] = None
136
144
  # Default session state (stored in the database to persist across runs)
137
145
  session_state: Optional[Dict[str, Any]] = None
146
+ # Set to True to overwrite the stored session_state with the session_state provided in the run
147
+ overwrite_db_session_state: bool = False
138
148
 
139
149
  # If True, the workflow runs in debug mode
140
150
  debug_mode: Optional[bool] = False
@@ -143,7 +153,9 @@ class Workflow:
143
153
  # Stream the response from the Workflow
144
154
  stream: Optional[bool] = None
145
155
  # Stream the intermediate steps from the Workflow
146
- stream_intermediate_steps: bool = False
156
+ stream_events: bool = False
157
+ # Stream events from executors (agents/teams/functions) within steps
158
+ stream_executor_events: bool = True
147
159
 
148
160
  # Persist the events on the run response
149
161
  store_events: bool = False
@@ -166,19 +178,31 @@ class Workflow:
166
178
  # This helps us improve the Agent and provide better support
167
179
  telemetry: bool = True
168
180
 
181
+ # Add this flag to control if the workflow should add history to the steps
182
+ add_workflow_history_to_steps: bool = False
183
+ # Number of historical runs to include in the messages
184
+ num_history_runs: int = 3
185
+
186
+ # Deprecated. Use stream_events instead.
187
+ stream_intermediate_steps: bool = False
188
+
169
189
  def __init__(
170
190
  self,
171
191
  id: Optional[str] = None,
172
192
  name: Optional[str] = None,
173
193
  description: Optional[str] = None,
174
- db: Optional[BaseDb] = None,
194
+ db: Optional[Union[BaseDb, AsyncBaseDb]] = None,
175
195
  steps: Optional[WorkflowSteps] = None,
196
+ agent: Optional[WorkflowAgent] = None,
176
197
  session_id: Optional[str] = None,
177
198
  session_state: Optional[Dict[str, Any]] = None,
199
+ overwrite_db_session_state: bool = False,
178
200
  user_id: Optional[str] = None,
179
201
  debug_mode: Optional[bool] = False,
180
202
  stream: Optional[bool] = None,
203
+ stream_events: bool = False,
181
204
  stream_intermediate_steps: bool = False,
205
+ stream_executor_events: bool = True,
182
206
  store_events: bool = False,
183
207
  events_to_skip: Optional[List[Union[WorkflowRunEvent, RunEvent, TeamRunEvent]]] = None,
184
208
  store_executor_outputs: bool = True,
@@ -186,28 +210,48 @@ class Workflow:
186
210
  metadata: Optional[Dict[str, Any]] = None,
187
211
  cache_session: bool = False,
188
212
  telemetry: bool = True,
213
+ add_workflow_history_to_steps: bool = False,
214
+ num_history_runs: int = 3,
189
215
  ):
190
216
  self.id = id
191
217
  self.name = name
192
218
  self.description = description
193
219
  self.steps = steps
220
+ self.agent = agent
194
221
  self.session_id = session_id
195
222
  self.session_state = session_state
223
+ self.overwrite_db_session_state = overwrite_db_session_state
196
224
  self.user_id = user_id
197
225
  self.debug_mode = debug_mode
198
226
  self.store_events = store_events
199
227
  self.events_to_skip = events_to_skip or []
200
228
  self.stream = stream
201
- self.stream_intermediate_steps = stream_intermediate_steps
229
+ self.stream_executor_events = stream_executor_events
202
230
  self.store_executor_outputs = store_executor_outputs
203
231
  self.input_schema = input_schema
204
232
  self.metadata = metadata
205
233
  self.cache_session = cache_session
206
234
  self.db = db
207
235
  self.telemetry = telemetry
208
-
236
+ self.add_workflow_history_to_steps = add_workflow_history_to_steps
237
+ self.num_history_runs = num_history_runs
209
238
  self._workflow_session: Optional[WorkflowSession] = None
210
239
 
240
+ if stream_intermediate_steps is not None:
241
+ warnings.warn(
242
+ "The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Use 'stream_events' instead.",
243
+ DeprecationWarning,
244
+ stacklevel=2,
245
+ )
246
+ self.stream_events = stream_events or stream_intermediate_steps
247
+
248
+ # Warn if workflow history is enabled without a database
249
+ if self.add_workflow_history_to_steps and self.db is None:
250
+ log_warning(
251
+ "Workflow history is enabled (add_workflow_history_to_steps=True) but no database is configured. "
252
+ "History won't be persisted. Add a database to persist runs across executions. "
253
+ )
254
+
211
255
  def set_id(self) -> None:
212
256
  if self.id is None:
213
257
  if self.name is not None:
@@ -215,16 +259,23 @@ class Workflow:
215
259
  else:
216
260
  self.id = str(uuid4())
217
261
 
262
+ def _has_async_db(self) -> bool:
263
+ return self.db is not None and isinstance(self.db, AsyncBaseDb)
264
+
218
265
  def _validate_input(
219
266
  self, input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]]
220
- ) -> Optional[BaseModel]:
267
+ ) -> Optional[Union[str, List, Dict, Message, BaseModel]]:
221
268
  """Parse and validate input against input_schema if provided"""
222
269
  if self.input_schema is None:
223
- return None
270
+ return input # Return input unchanged if no schema is set
224
271
 
225
272
  if input is None:
226
273
  raise ValueError("Input required when input_schema is set")
227
274
 
275
+ # Handle Message objects - extract content
276
+ if isinstance(input, Message):
277
+ input = input.content # type: ignore
278
+
228
279
  # If input is a string, convert it to a dict
229
280
  if isinstance(input, str):
230
281
  import json
@@ -238,8 +289,6 @@ class Workflow:
238
289
  if isinstance(input, BaseModel):
239
290
  if isinstance(input, self.input_schema):
240
291
  try:
241
- # Re-validate to catch any field validation errors
242
- input.model_validate(input.model_dump())
243
292
  return input
244
293
  except Exception as e:
245
294
  raise ValueError(f"BaseModel validation failed: {str(e)}")
@@ -250,8 +299,13 @@ class Workflow:
250
299
  # Case 2: Message is a dict
251
300
  elif isinstance(input, dict):
252
301
  try:
253
- validated_model = self.input_schema(**input)
254
- return validated_model
302
+ # Check if the schema is a TypedDict
303
+ if is_typed_dict(self.input_schema):
304
+ validated_dict = validate_typed_dict(input, self.input_schema)
305
+ return validated_dict
306
+ else:
307
+ validated_model = self.input_schema(**input)
308
+ return validated_model
255
309
  except Exception as e:
256
310
  raise ValueError(f"Failed to parse dict into {self.input_schema.__name__}: {str(e)}")
257
311
 
@@ -316,10 +370,8 @@ class Workflow:
316
370
  self,
317
371
  session_id: Optional[str] = None,
318
372
  user_id: Optional[str] = None,
319
- session_state: Optional[Dict[str, Any]] = None,
320
- run_id: Optional[str] = None,
321
- ) -> Tuple[str, Optional[str], Dict[str, Any]]:
322
- """Initialize the session for the agent."""
373
+ ) -> Tuple[str, Optional[str]]:
374
+ """Initialize the session for the workflow."""
323
375
 
324
376
  if session_id is None:
325
377
  if self.session_id:
@@ -332,17 +384,25 @@ class Workflow:
332
384
  log_debug(f"Session ID: {session_id}", center=True)
333
385
 
334
386
  # Use the default user_id when necessary
335
- if user_id is None:
387
+ if user_id is None or user_id == "":
336
388
  user_id = self.user_id
337
389
 
338
- # Determine the session_state
339
- if session_state is None:
340
- session_state = self.session_state or {}
390
+ return session_id, user_id
341
391
 
342
- if user_id is not None:
392
+ def _initialize_session_state(
393
+ self,
394
+ session_state: Dict[str, Any],
395
+ user_id: Optional[str] = None,
396
+ session_id: Optional[str] = None,
397
+ run_id: Optional[str] = None,
398
+ ) -> Dict[str, Any]:
399
+ """Initialize the session state for the workflow."""
400
+ if user_id:
343
401
  session_state["current_user_id"] = user_id
344
402
  if session_id is not None:
345
403
  session_state["current_session_id"] = session_id
404
+ if run_id is not None:
405
+ session_state["current_run_id"] = run_id
346
406
 
347
407
  session_state.update(
348
408
  {
@@ -354,7 +414,7 @@ class Workflow:
354
414
  if self.name:
355
415
  session_state["workflow_name"] = self.name
356
416
 
357
- return session_id, user_id, session_state # type: ignore
417
+ return session_state
358
418
 
359
419
  def _generate_workflow_session_name(self) -> str:
360
420
  """Generate a name for the workflow session"""
@@ -370,6 +430,33 @@ class Workflow:
370
430
  new_session_name = f"{truncated_desc} - {datetime_str}"
371
431
  return new_session_name
372
432
 
433
+ async def aset_session_name(
434
+ self, session_id: Optional[str] = None, autogenerate: bool = False, session_name: Optional[str] = None
435
+ ) -> WorkflowSession:
436
+ """Set the session name and save to storage, using an async database"""
437
+ session_id = session_id or self.session_id
438
+
439
+ if session_id is None:
440
+ raise Exception("Session ID is not set")
441
+
442
+ # -*- Read from storage
443
+ session = await self.aget_session(session_id=session_id) # type: ignore
444
+
445
+ if autogenerate:
446
+ # -*- Generate name for session
447
+ session_name = self._generate_workflow_session_name()
448
+ log_debug(f"Generated Workflow Session Name: {session_name}")
449
+ elif session_name is None:
450
+ raise Exception("Session name is not set")
451
+
452
+ # -*- Rename session
453
+ session.session_data["session_name"] = session_name # type: ignore
454
+
455
+ # -*- Save to storage
456
+ await self.asave_session(session=session) # type: ignore
457
+
458
+ return session # type: ignore
459
+
373
460
  def set_session_name(
374
461
  self, session_id: Optional[str] = None, autogenerate: bool = False, session_name: Optional[str] = None
375
462
  ) -> WorkflowSession:
@@ -397,6 +484,16 @@ class Workflow:
397
484
 
398
485
  return session # type: ignore
399
486
 
487
+ async def aget_session_name(self, session_id: Optional[str] = None) -> str:
488
+ """Get the session name for the given session ID and user ID."""
489
+ session_id = session_id or self.session_id
490
+ if session_id is None:
491
+ raise Exception("Session ID is not set")
492
+ session = await self.aget_session(session_id=session_id) # type: ignore
493
+ if session is None:
494
+ raise Exception("Session not found")
495
+ return session.session_data.get("session_name", "") if session.session_data else ""
496
+
400
497
  def get_session_name(self, session_id: Optional[str] = None) -> str:
401
498
  """Get the session name for the given session ID and user ID."""
402
499
  session_id = session_id or self.session_id
@@ -407,6 +504,16 @@ class Workflow:
407
504
  raise Exception("Session not found")
408
505
  return session.session_data.get("session_name", "") if session.session_data else ""
409
506
 
507
+ async def aget_session_state(self, session_id: Optional[str] = None) -> Dict[str, Any]:
508
+ """Get the session state for the given session ID and user ID."""
509
+ session_id = session_id or self.session_id
510
+ if session_id is None:
511
+ raise Exception("Session ID is not set")
512
+ session = await self.aget_session(session_id=session_id) # type: ignore
513
+ if session is None:
514
+ raise Exception("Session not found")
515
+ return session.session_data.get("session_state", {}) if session.session_data else {}
516
+
410
517
  def get_session_state(self, session_id: Optional[str] = None) -> Dict[str, Any]:
411
518
  """Get the session state for the given session ID and user ID."""
412
519
  session_id = session_id or self.session_id
@@ -417,6 +524,69 @@ class Workflow:
417
524
  raise Exception("Session not found")
418
525
  return session.session_data.get("session_state", {}) if session.session_data else {}
419
526
 
527
+ def update_session_state(
528
+ self, session_state_updates: Dict[str, Any], session_id: Optional[str] = None
529
+ ) -> Dict[str, Any]:
530
+ """
531
+ Update the session state for the given session ID.
532
+ Args:
533
+ session_state_updates: The updates to apply to the session state. Should be a dictionary of key-value pairs.
534
+ session_id: The session ID to update. If not provided, the current cached session ID is used.
535
+ Returns:
536
+ dict: The updated session state.
537
+ """
538
+ session_id = session_id or self.session_id
539
+ if session_id is None:
540
+ raise Exception("Session ID is not set")
541
+ session = self.get_session(session_id=session_id) # type: ignore
542
+ if session is None:
543
+ raise Exception("Session not found")
544
+
545
+ if session.session_data is not None and "session_state" not in session.session_data:
546
+ session.session_data["session_state"] = {}
547
+
548
+ for key, value in session_state_updates.items():
549
+ session.session_data["session_state"][key] = value # type: ignore
550
+
551
+ self.save_session(session=session)
552
+
553
+ return session.session_data["session_state"] # type: ignore
554
+
555
+ async def aupdate_session_state(
556
+ self, session_state_updates: Dict[str, Any], session_id: Optional[str] = None
557
+ ) -> Dict[str, Any]:
558
+ """
559
+ Update the session state for the given session ID (async).
560
+ Args:
561
+ session_state_updates: The updates to apply to the session state. Should be a dictionary of key-value pairs.
562
+ session_id: The session ID to update. If not provided, the current cached session ID is used.
563
+ Returns:
564
+ dict: The updated session state.
565
+ """
566
+ session_id = session_id or self.session_id
567
+ if session_id is None:
568
+ raise Exception("Session ID is not set")
569
+ session = await self.aget_session(session_id=session_id) # type: ignore
570
+ if session is None:
571
+ raise Exception("Session not found")
572
+
573
+ if session.session_data is not None and "session_state" not in session.session_data:
574
+ session.session_data["session_state"] = {} # type: ignore
575
+
576
+ for key, value in session_state_updates.items():
577
+ session.session_data["session_state"][key] = value # type: ignore
578
+
579
+ await self.asave_session(session=session)
580
+
581
+ return session.session_data["session_state"] # type: ignore
582
+
583
+ async def adelete_session(self, session_id: str):
584
+ """Delete the current session and save to storage"""
585
+ if self.db is None:
586
+ return
587
+ # -*- Delete session
588
+ await self.db.delete_session(session_id=session_id) # type: ignore
589
+
420
590
  def delete_session(self, session_id: str):
421
591
  """Delete the current session and save to storage"""
422
592
  if self.db is None:
@@ -424,6 +594,25 @@ class Workflow:
424
594
  # -*- Delete session
425
595
  self.db.delete_session(session_id=session_id)
426
596
 
597
+ async def aget_run_output(self, run_id: str, session_id: Optional[str] = None) -> Optional[WorkflowRunOutput]:
598
+ """Get a RunOutput from the database."""
599
+ if self._workflow_session is not None:
600
+ run_response = self._workflow_session.get_run(run_id=run_id)
601
+ if run_response is not None:
602
+ return run_response
603
+ else:
604
+ log_warning(f"RunOutput {run_id} not found in AgentSession {self._workflow_session.session_id}")
605
+ return None
606
+ else:
607
+ workflow_session = await self.aget_session(session_id=session_id) # type: ignore
608
+ if workflow_session is not None:
609
+ run_response = workflow_session.get_run(run_id=run_id)
610
+ if run_response is not None:
611
+ return run_response
612
+ else:
613
+ log_warning(f"RunOutput {run_id} not found in AgentSession {session_id}")
614
+ return None
615
+
427
616
  def get_run_output(self, run_id: str, session_id: Optional[str] = None) -> Optional[WorkflowRunOutput]:
428
617
  """Get a RunOutput from the database."""
429
618
  if self._workflow_session is not None:
@@ -443,6 +632,26 @@ class Workflow:
443
632
  log_warning(f"RunOutput {run_id} not found in AgentSession {session_id}")
444
633
  return None
445
634
 
635
+ async def aget_last_run_output(self, session_id: Optional[str] = None) -> Optional[WorkflowRunOutput]:
636
+ """Get the last run response from the database."""
637
+ if (
638
+ self._workflow_session is not None
639
+ and self._workflow_session.runs is not None
640
+ and len(self._workflow_session.runs) > 0
641
+ ):
642
+ run_response = self._workflow_session.runs[-1]
643
+ if run_response is not None:
644
+ return run_response
645
+ else:
646
+ workflow_session = await self.aget_session(session_id=session_id) # type: ignore
647
+ if workflow_session is not None and workflow_session.runs is not None and len(workflow_session.runs) > 0:
648
+ run_response = workflow_session.runs[-1]
649
+ if run_response is not None:
650
+ return run_response
651
+ else:
652
+ log_warning(f"No run responses found in WorkflowSession {session_id}")
653
+ return None
654
+
446
655
  def get_last_run_output(self, session_id: Optional[str] = None) -> Optional[WorkflowRunOutput]:
447
656
  """Get the last run response from the database."""
448
657
  if (
@@ -481,6 +690,48 @@ class Workflow:
481
690
 
482
691
  workflow_session = cast(WorkflowSession, self._read_session(session_id=session_id))
483
692
 
693
+ if workflow_session is None:
694
+ # Creating new session if none found
695
+ log_debug(f"Creating new WorkflowSession: {session_id}")
696
+ session_data = {}
697
+ if self.session_state is not None:
698
+ from copy import deepcopy
699
+
700
+ session_data["session_state"] = deepcopy(self.session_state)
701
+ workflow_session = WorkflowSession(
702
+ session_id=session_id,
703
+ workflow_id=self.id,
704
+ user_id=user_id,
705
+ workflow_data=self._get_workflow_data(),
706
+ session_data=session_data,
707
+ metadata=self.metadata,
708
+ created_at=int(time()),
709
+ )
710
+
711
+ # Cache the session if relevant
712
+ if workflow_session is not None and self.cache_session:
713
+ self._workflow_session = workflow_session
714
+
715
+ return workflow_session
716
+
717
+ async def aread_or_create_session(
718
+ self,
719
+ session_id: str,
720
+ user_id: Optional[str] = None,
721
+ ) -> WorkflowSession:
722
+ from time import time
723
+
724
+ # Returning cached session if we have one
725
+ if self._workflow_session is not None and self._workflow_session.session_id == session_id:
726
+ return self._workflow_session
727
+
728
+ # Try to load from database
729
+ workflow_session = None
730
+ if self.db is not None:
731
+ log_debug(f"Reading WorkflowSession: {session_id}")
732
+
733
+ workflow_session = cast(WorkflowSession, await self._aread_session(session_id=session_id))
734
+
484
735
  if workflow_session is None:
485
736
  # Creating new session if none found
486
737
  log_debug(f"Creating new WorkflowSession: {session_id}")
@@ -500,6 +751,30 @@ class Workflow:
500
751
 
501
752
  return workflow_session
502
753
 
754
+ async def aget_session(
755
+ self,
756
+ session_id: Optional[str] = None,
757
+ ) -> Optional[WorkflowSession]:
758
+ """Load an WorkflowSession from database.
759
+
760
+ Args:
761
+ session_id: The session_id to load from storage.
762
+
763
+ Returns:
764
+ WorkflowSession: The WorkflowSession loaded from the database or created if it does not exist.
765
+ """
766
+ session_id_to_load = session_id or self.session_id
767
+ if session_id_to_load is None:
768
+ raise Exception("No session_id provided")
769
+
770
+ # Try to load from database
771
+ if self.db is not None:
772
+ workflow_session = cast(WorkflowSession, await self._aread_session(session_id=session_id_to_load))
773
+ return workflow_session
774
+
775
+ log_warning(f"WorkflowSession {session_id_to_load} not found in db")
776
+ return None
777
+
503
778
  def get_session(
504
779
  self,
505
780
  session_id: Optional[str] = None,
@@ -525,6 +800,25 @@ class Workflow:
525
800
  log_warning(f"WorkflowSession {session_id_to_load} not found in db")
526
801
  return None
527
802
 
803
+ async def asave_session(self, session: WorkflowSession) -> None:
804
+ """Save the WorkflowSession to storage, using an async database.
805
+
806
+ Returns:
807
+ Optional[WorkflowSession]: The saved WorkflowSession or None if not saved.
808
+ """
809
+ if self.db is not None and session.session_data is not None:
810
+ if session.session_data.get("session_state") is not None:
811
+ session.session_data["session_state"].pop("current_session_id", None)
812
+ session.session_data["session_state"].pop("current_user_id", None)
813
+ session.session_data["session_state"].pop("current_run_id", None)
814
+ session.session_data["session_state"].pop("workflow_id", None)
815
+ session.session_data["session_state"].pop("run_id", None)
816
+ session.session_data["session_state"].pop("session_id", None)
817
+ session.session_data["session_state"].pop("workflow_name", None)
818
+
819
+ await self._aupsert_session(session=session) # type: ignore
820
+ log_debug(f"Created or updated WorkflowSession record: {session.session_id}")
821
+
528
822
  def save_session(self, session: WorkflowSession) -> None:
529
823
  """Save the WorkflowSession to storage
530
824
 
@@ -544,7 +838,66 @@ class Workflow:
544
838
  self._upsert_session(session=session)
545
839
  log_debug(f"Created or updated WorkflowSession record: {session.session_id}")
546
840
 
841
+ def get_chat_history(
842
+ self, session_id: Optional[str] = None, last_n_runs: Optional[int] = None
843
+ ) -> List[WorkflowChatInteraction]:
844
+ """Return a list of dictionaries containing the input and output for each run in the session.
845
+
846
+ Args:
847
+ session_id: The session ID to get the chat history for. If not provided, the current cached session ID is used.
848
+ last_n_runs: Number of recent runs to include. If None, all runs will be considered.
849
+
850
+ Returns:
851
+ A list of WorkflowChatInteraction objects.
852
+ """
853
+ session_id = session_id or self.session_id
854
+ if session_id is None:
855
+ log_warning("Session ID is not set, cannot get messages for session")
856
+ return []
857
+
858
+ session = self.get_session(
859
+ session_id=session_id,
860
+ )
861
+ if session is None:
862
+ raise Exception("Session not found")
863
+
864
+ return session.get_chat_history(last_n_runs=last_n_runs)
865
+
866
+ async def aget_chat_history(
867
+ self, session_id: Optional[str] = None, last_n_runs: Optional[int] = None
868
+ ) -> List[WorkflowChatInteraction]:
869
+ """Return a list of dictionaries containing the input and output for each run in the session.
870
+
871
+ Args:
872
+ session_id: The session ID to get the chat history for. If not provided, the current cached session ID is used.
873
+ last_n_runs: Number of recent runs to include. If None, all runs will be considered.
874
+
875
+ Returns:
876
+ A list of dictionaries containing the input and output for each run.
877
+ """
878
+ session_id = session_id or self.session_id
879
+ if session_id is None:
880
+ log_warning("Session ID is not set, cannot get messages for session")
881
+ return []
882
+
883
+ session = await self.aget_session(session_id=session_id)
884
+ if session is None:
885
+ raise Exception("Session not found")
886
+
887
+ return session.get_chat_history(last_n_runs=last_n_runs)
888
+
547
889
  # -*- Session Database Functions
890
+ async def _aread_session(self, session_id: str) -> Optional[WorkflowSession]:
891
+ """Get a Session from the database."""
892
+ try:
893
+ if not self.db:
894
+ raise ValueError("Db not initialized")
895
+ session = await self.db.get_session(session_id=session_id, session_type=SessionType.WORKFLOW) # type: ignore
896
+ return session if isinstance(session, (WorkflowSession, type(None))) else None
897
+ except Exception as e:
898
+ log_warning(f"Error getting session from db: {e}")
899
+ return None
900
+
548
901
  def _read_session(self, session_id: str) -> Optional[WorkflowSession]:
549
902
  """Get a Session from the database."""
550
903
  try:
@@ -556,9 +909,19 @@ class Workflow:
556
909
  log_warning(f"Error getting session from db: {e}")
557
910
  return None
558
911
 
559
- def _upsert_session(self, session: WorkflowSession) -> Optional[WorkflowSession]:
912
+ async def _aupsert_session(self, session: WorkflowSession) -> Optional[WorkflowSession]:
560
913
  """Upsert a Session into the database."""
914
+ try:
915
+ if not self.db:
916
+ raise ValueError("Db not initialized")
917
+ result = await self.db.upsert_session(session=session) # type: ignore
918
+ return result if isinstance(result, (WorkflowSession, type(None))) else None
919
+ except Exception as e:
920
+ log_warning(f"Error upserting session into db: {e}")
921
+ return None
561
922
 
923
+ def _upsert_session(self, session: WorkflowSession) -> Optional[WorkflowSession]:
924
+ """Upsert a Session into the database."""
562
925
  try:
563
926
  if not self.db:
564
927
  raise ValueError("Db not initialized")
@@ -581,12 +944,13 @@ class Workflow:
581
944
  # Update the current metadata with the metadata from the database which is updated in place
582
945
  self.metadata = session.metadata
583
946
 
584
- def _update_session_state(self, session: WorkflowSession, session_state: Dict[str, Any]):
585
- """Load the existing Workflow from a WorkflowSession (from the database)"""
947
+ def _load_session_state(self, session: WorkflowSession, session_state: Dict[str, Any]):
948
+ """Load and return the stored session_state from the database, optionally merging it with the given one"""
586
949
 
587
950
  from agno.utils.merge_dict import merge_dictionaries
588
951
 
589
- # Get the session_state from the database and update the current session_state
952
+ # Get the session_state from the database and merge with proper precedence
953
+ # At this point session_state contains: agent_defaults + run_params
590
954
  if session.session_data and "session_state" in session.session_data:
591
955
  session_state_from_db = session.session_data.get("session_state")
592
956
 
@@ -594,11 +958,13 @@ class Workflow:
594
958
  session_state_from_db is not None
595
959
  and isinstance(session_state_from_db, dict)
596
960
  and len(session_state_from_db) > 0
961
+ and not self.overwrite_db_session_state
597
962
  ):
598
- # This updates session_state_from_db
599
- # If there are conflicting keys, values from provided session_state will take precedence
600
- merge_dictionaries(session_state_from_db, session_state)
601
- session_state = session_state_from_db
963
+ # This preserves precedence: run_params > db_state > agent_defaults
964
+ merged_state = session_state_from_db.copy()
965
+ merge_dictionaries(merged_state, session_state)
966
+ session_state.clear()
967
+ session_state.update(merged_state)
602
968
 
603
969
  # Update the session_state in the session
604
970
  if session.session_data is None:
@@ -608,7 +974,10 @@ class Workflow:
608
974
  return session_state
609
975
 
610
976
  def _get_workflow_data(self) -> Dict[str, Any]:
611
- workflow_data = {}
977
+ workflow_data: Dict[str, Any] = {
978
+ "workflow_id": self.id,
979
+ "name": self.name,
980
+ }
612
981
 
613
982
  if self.steps and not callable(self.steps):
614
983
  steps_dict = []
@@ -620,7 +989,7 @@ class Workflow:
620
989
  else:
621
990
  step_type = STEP_TYPE_MAPPING[type(step)]
622
991
  step_dict = {
623
- "name": step.name if hasattr(step, "name") else step.__name__,
992
+ "name": step.name if hasattr(step, "name") else step.__name__, # type: ignore
624
993
  "description": step.description if hasattr(step, "description") else "User-defined callable step",
625
994
  "type": step_type.value,
626
995
  }
@@ -639,14 +1008,34 @@ class Workflow:
639
1008
 
640
1009
  return workflow_data
641
1010
 
642
- def _handle_event(
1011
+ def _broadcast_to_websocket(
643
1012
  self,
644
- event: "WorkflowRunOutputEvent",
645
- workflow_run_response: WorkflowRunOutput,
1013
+ event: Any,
646
1014
  websocket_handler: Optional[WebSocketHandler] = None,
647
- ) -> "WorkflowRunOutputEvent":
648
- """Handle workflow events for storage - similar to Team._handle_event"""
649
- if self.store_events:
1015
+ ) -> None:
1016
+ """Broadcast events to WebSocket if available (async context only)"""
1017
+ if websocket_handler:
1018
+ try:
1019
+ loop = asyncio.get_running_loop()
1020
+ if loop:
1021
+ asyncio.create_task(websocket_handler.handle_event(event))
1022
+ except RuntimeError:
1023
+ pass
1024
+
1025
+ def _handle_event(
1026
+ self,
1027
+ event: "WorkflowRunOutputEvent",
1028
+ workflow_run_response: WorkflowRunOutput,
1029
+ websocket_handler: Optional[WebSocketHandler] = None,
1030
+ ) -> "WorkflowRunOutputEvent":
1031
+ """Handle workflow events for storage - similar to Team._handle_event"""
1032
+ from agno.run.agent import RunOutput
1033
+ from agno.run.base import BaseRunOutputEvent
1034
+ from agno.run.team import TeamRunOutput
1035
+
1036
+ if isinstance(event, (RunOutput, TeamRunOutput)):
1037
+ return event
1038
+ if self.store_events:
650
1039
  # Check if this event type should be skipped
651
1040
  if self.events_to_skip:
652
1041
  event_type = event.event
@@ -660,21 +1049,41 @@ class Workflow:
660
1049
  return event
661
1050
 
662
1051
  # Store the event
663
- if workflow_run_response.events is None:
664
- workflow_run_response.events = []
665
-
666
- workflow_run_response.events.append(event)
1052
+ if isinstance(event, BaseRunOutputEvent):
1053
+ if workflow_run_response.events is None:
1054
+ workflow_run_response.events = []
1055
+ workflow_run_response.events.append(event)
667
1056
 
668
1057
  # Broadcast to WebSocket if available (async context only)
669
- if websocket_handler:
670
- import asyncio
1058
+ self._broadcast_to_websocket(event, websocket_handler)
671
1059
 
672
- try:
673
- loop = asyncio.get_running_loop()
674
- if loop:
675
- asyncio.create_task(websocket_handler.handle_event(event))
676
- except RuntimeError:
677
- pass
1060
+ return event
1061
+
1062
+ def _enrich_event_with_workflow_context(
1063
+ self,
1064
+ event: Any,
1065
+ workflow_run_response: WorkflowRunOutput,
1066
+ step_index: Optional[Union[int, tuple]] = None,
1067
+ step: Optional[Any] = None,
1068
+ ) -> Any:
1069
+ """Enrich any event with workflow context information for frontend tracking"""
1070
+
1071
+ step_id = getattr(step, "step_id", None) if step else None
1072
+ step_name = getattr(step, "name", None) if step else None
1073
+
1074
+ if hasattr(event, "workflow_id"):
1075
+ event.workflow_id = workflow_run_response.workflow_id
1076
+ if hasattr(event, "workflow_run_id"):
1077
+ event.workflow_run_id = workflow_run_response.run_id
1078
+ if hasattr(event, "step_id") and step_id:
1079
+ event.step_id = step_id
1080
+ if hasattr(event, "step_name") and step_name is not None:
1081
+ if event.step_name is None:
1082
+ event.step_name = step_name
1083
+ # Only set step_index if it's not already set (preserve parallel.py's tuples)
1084
+ if hasattr(event, "step_index") and step_index is not None:
1085
+ if event.step_index is None:
1086
+ event.step_index = step_index
678
1087
 
679
1088
  return event
680
1089
 
@@ -745,9 +1154,9 @@ class Workflow:
745
1154
  self,
746
1155
  execution_input: WorkflowExecutionInput,
747
1156
  previous_step_outputs: Optional[Dict[str, StepOutput]] = None,
748
- shared_images: Optional[List[ImageArtifact]] = None,
749
- shared_videos: Optional[List[VideoArtifact]] = None,
750
- shared_audio: Optional[List[AudioArtifact]] = None,
1157
+ shared_images: Optional[List[Image]] = None,
1158
+ shared_videos: Optional[List[Video]] = None,
1159
+ shared_audio: Optional[List[Audio]] = None,
751
1160
  shared_files: Optional[List[File]] = None,
752
1161
  ) -> StepInput:
753
1162
  """Helper method to create StepInput with enhanced data flow support"""
@@ -782,7 +1191,11 @@ class Workflow:
782
1191
  else:
783
1192
  return len(self.steps)
784
1193
 
785
- def _aggregate_workflow_metrics(self, step_results: List[Union[StepOutput, List[StepOutput]]]) -> WorkflowMetrics:
1194
+ def _aggregate_workflow_metrics(
1195
+ self,
1196
+ step_results: List[Union[StepOutput, List[StepOutput]]],
1197
+ current_workflow_metrics: Optional[WorkflowMetrics] = None,
1198
+ ) -> WorkflowMetrics:
786
1199
  """Aggregate metrics from all step responses into structured workflow metrics"""
787
1200
  steps_dict = {}
788
1201
 
@@ -810,8 +1223,13 @@ class Workflow:
810
1223
  for step_result in step_results:
811
1224
  process_step_output(cast(StepOutput, step_result))
812
1225
 
1226
+ duration = None
1227
+ if current_workflow_metrics and current_workflow_metrics.duration is not None:
1228
+ duration = current_workflow_metrics.duration
1229
+
813
1230
  return WorkflowMetrics(
814
1231
  steps=steps_dict,
1232
+ duration=duration,
815
1233
  )
816
1234
 
817
1235
  def _call_custom_function(self, func: Callable, execution_input: WorkflowExecutionInput, **kwargs: Any) -> Any:
@@ -846,24 +1264,32 @@ class Workflow:
846
1264
  return func(**call_kwargs)
847
1265
  except TypeError as e:
848
1266
  # If signature inspection fails, fall back to original method
849
- logger.warning(
850
- f"Async function signature inspection failed: {e}. Falling back to original calling convention."
851
- )
852
- return func(**call_kwargs)
1267
+ logger.error(f"Function signature inspection failed: {e}. Falling back to original calling convention.")
1268
+ return func(**kwargs)
1269
+
1270
+ def _accumulate_partial_step_data(
1271
+ self, event: Union[RunContentEvent, TeamRunContentEvent], partial_step_content: str
1272
+ ) -> str:
1273
+ """Accumulate partial step data from streaming events"""
1274
+ if isinstance(event, (RunContentEvent, TeamRunContentEvent)) and event.content:
1275
+ if isinstance(event.content, str):
1276
+ partial_step_content += event.content
1277
+ return partial_step_content
853
1278
 
854
1279
  def _execute(
855
1280
  self,
856
1281
  session: WorkflowSession,
857
1282
  execution_input: WorkflowExecutionInput,
858
1283
  workflow_run_response: WorkflowRunOutput,
859
- session_state: Optional[Dict[str, Any]] = None,
1284
+ run_context: RunContext,
860
1285
  **kwargs: Any,
861
1286
  ) -> WorkflowRunOutput:
862
1287
  """Execute a specific pipeline by name synchronously"""
863
1288
  from inspect import isasyncgenfunction, iscoroutinefunction, isgeneratorfunction
864
1289
 
865
1290
  workflow_run_response.status = RunStatus.running
866
- register_run(workflow_run_response.run_id) # type: ignore
1291
+ if workflow_run_response.run_id:
1292
+ register_run(workflow_run_response.run_id) # type: ignore
867
1293
 
868
1294
  if callable(self.steps):
869
1295
  if iscoroutinefunction(self.steps) or isasyncgenfunction(self.steps):
@@ -890,12 +1316,12 @@ class Workflow:
890
1316
  collected_step_outputs: List[Union[StepOutput, List[StepOutput]]] = []
891
1317
  previous_step_outputs: Dict[str, StepOutput] = {}
892
1318
 
893
- shared_images: List[ImageArtifact] = execution_input.images or []
894
- output_images: List[ImageArtifact] = (execution_input.images or []).copy() # Start with input images
895
- shared_videos: List[VideoArtifact] = execution_input.videos or []
896
- output_videos: List[VideoArtifact] = (execution_input.videos or []).copy() # Start with input videos
897
- shared_audio: List[AudioArtifact] = execution_input.audio or []
898
- output_audio: List[AudioArtifact] = (execution_input.audio or []).copy() # Start with input audio
1319
+ shared_images: List[Image] = execution_input.images or []
1320
+ output_images: List[Image] = (execution_input.images or []).copy() # Start with input images
1321
+ shared_videos: List[Video] = execution_input.videos or []
1322
+ output_videos: List[Video] = (execution_input.videos or []).copy() # Start with input videos
1323
+ shared_audio: List[Audio] = execution_input.audio or []
1324
+ output_audio: List[Audio] = (execution_input.audio or []).copy() # Start with input audio
899
1325
  shared_files: List[File] = execution_input.files or []
900
1326
  output_files: List[File] = (execution_input.files or []).copy() # Start with input files
901
1327
 
@@ -922,8 +1348,13 @@ class Workflow:
922
1348
  session_id=session.session_id,
923
1349
  user_id=self.user_id,
924
1350
  workflow_run_response=workflow_run_response,
925
- session_state=session_state,
1351
+ run_context=run_context,
926
1352
  store_executor_outputs=self.store_executor_outputs,
1353
+ workflow_session=session,
1354
+ add_workflow_history_to_steps=self.add_workflow_history_to_steps
1355
+ if self.add_workflow_history_to_steps
1356
+ else None,
1357
+ num_history_runs=self.num_history_runs,
927
1358
  )
928
1359
 
929
1360
  # Check for cancellation after step execution
@@ -931,9 +1362,7 @@ class Workflow:
931
1362
 
932
1363
  # Update the workflow-level previous_step_outputs dictionary
933
1364
  previous_step_outputs[step_name] = step_output
934
- if step_output.stop:
935
- logger.info(f"Early termination requested by step {step_name}")
936
- break
1365
+ collected_step_outputs.append(step_output)
937
1366
 
938
1367
  # Update shared media for next step
939
1368
  shared_images.extend(step_output.images or [])
@@ -945,11 +1374,20 @@ class Workflow:
945
1374
  output_audio.extend(step_output.audio or [])
946
1375
  output_files.extend(step_output.files or [])
947
1376
 
948
- collected_step_outputs.append(step_output)
1377
+ if step_output.stop:
1378
+ logger.info(f"Early termination requested by step {step_name}")
1379
+ break
949
1380
 
950
1381
  # Update the workflow_run_response with completion data
951
1382
  if collected_step_outputs:
952
- workflow_run_response.metrics = self._aggregate_workflow_metrics(collected_step_outputs)
1383
+ # Stop the timer for the Run duration
1384
+ if workflow_run_response.metrics:
1385
+ workflow_run_response.metrics.stop_timer()
1386
+
1387
+ workflow_run_response.metrics = self._aggregate_workflow_metrics(
1388
+ collected_step_outputs,
1389
+ workflow_run_response.metrics, # type: ignore[arg-type]
1390
+ )
953
1391
  last_output = cast(StepOutput, collected_step_outputs[-1])
954
1392
 
955
1393
  # Use deepest nested content if this is a container (Steps/Router/Loop/etc.)
@@ -972,8 +1410,14 @@ class Workflow:
972
1410
  workflow_run_response.audio = output_audio
973
1411
  workflow_run_response.status = RunStatus.completed
974
1412
 
1413
+ except (InputCheckError, OutputCheckError) as e:
1414
+ log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
1415
+ # Store error response
1416
+ workflow_run_response.status = RunStatus.error
1417
+ workflow_run_response.content = f"Validation failed: {str(e)} | Check: {e.check_trigger}"
1418
+
1419
+ raise e
975
1420
  except RunCancelledException as e:
976
- # Handle run cancellation
977
1421
  logger.info(f"Workflow run {workflow_run_response.run_id} was cancelled")
978
1422
  workflow_run_response.status = RunStatus.cancelled
979
1423
  workflow_run_response.content = str(e)
@@ -985,8 +1429,13 @@ class Workflow:
985
1429
  # Store error response
986
1430
  workflow_run_response.status = RunStatus.error
987
1431
  workflow_run_response.content = f"Workflow execution failed: {e}"
1432
+ raise e
988
1433
 
989
1434
  finally:
1435
+ # Stop timer on error
1436
+ if workflow_run_response.metrics:
1437
+ workflow_run_response.metrics.stop_timer()
1438
+
990
1439
  self._update_session_metrics(session=session, workflow_run_response=workflow_run_response)
991
1440
  session.upsert_run(run=workflow_run_response)
992
1441
  self.save_session(session=session)
@@ -1004,8 +1453,8 @@ class Workflow:
1004
1453
  session: WorkflowSession,
1005
1454
  execution_input: WorkflowExecutionInput,
1006
1455
  workflow_run_response: WorkflowRunOutput,
1007
- session_state: Optional[Dict[str, Any]] = None,
1008
- stream_intermediate_steps: bool = False,
1456
+ run_context: RunContext,
1457
+ stream_events: bool = False,
1009
1458
  **kwargs: Any,
1010
1459
  ) -> Iterator[WorkflowRunOutputEvent]:
1011
1460
  """Execute a specific pipeline by name with event streaming"""
@@ -1050,22 +1499,33 @@ class Workflow:
1050
1499
  collected_step_outputs: List[Union[StepOutput, List[StepOutput]]] = []
1051
1500
  previous_step_outputs: Dict[str, StepOutput] = {}
1052
1501
 
1053
- shared_images: List[ImageArtifact] = execution_input.images or []
1054
- output_images: List[ImageArtifact] = (execution_input.images or []).copy() # Start with input images
1055
- shared_videos: List[VideoArtifact] = execution_input.videos or []
1056
- output_videos: List[VideoArtifact] = (execution_input.videos or []).copy() # Start with input videos
1057
- shared_audio: List[AudioArtifact] = execution_input.audio or []
1058
- output_audio: List[AudioArtifact] = (execution_input.audio or []).copy() # Start with input audio
1502
+ shared_images: List[Image] = execution_input.images or []
1503
+ output_images: List[Image] = (execution_input.images or []).copy() # Start with input images
1504
+ shared_videos: List[Video] = execution_input.videos or []
1505
+ output_videos: List[Video] = (execution_input.videos or []).copy() # Start with input videos
1506
+ shared_audio: List[Audio] = execution_input.audio or []
1507
+ output_audio: List[Audio] = (execution_input.audio or []).copy() # Start with input audio
1059
1508
  shared_files: List[File] = execution_input.files or []
1060
1509
  output_files: List[File] = (execution_input.files or []).copy() # Start with input files
1061
1510
 
1062
1511
  early_termination = False
1063
1512
 
1513
+ # Track partial step data in case of cancellation
1514
+ current_step_name = ""
1515
+ current_step = None
1516
+ partial_step_content = ""
1517
+
1064
1518
  for i, step in enumerate(self.steps): # type: ignore[arg-type]
1065
1519
  raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1066
1520
  step_name = getattr(step, "name", f"step_{i + 1}")
1067
1521
  log_debug(f"Streaming step {i + 1}/{self._get_step_count()}: {step_name}")
1068
1522
 
1523
+ # Track current step for cancellation handler
1524
+ current_step_name = step_name
1525
+ current_step = step
1526
+ # Reset partial data for this step
1527
+ partial_step_content = ""
1528
+
1069
1529
  # Create enhanced StepInput
1070
1530
  step_input = self._create_step_input(
1071
1531
  execution_input=execution_input,
@@ -1081,13 +1541,23 @@ class Workflow:
1081
1541
  step_input,
1082
1542
  session_id=session.session_id,
1083
1543
  user_id=self.user_id,
1084
- stream_intermediate_steps=stream_intermediate_steps,
1544
+ stream_events=stream_events,
1545
+ stream_executor_events=self.stream_executor_events,
1085
1546
  workflow_run_response=workflow_run_response,
1086
- session_state=session_state,
1547
+ run_context=run_context,
1087
1548
  step_index=i,
1088
1549
  store_executor_outputs=self.store_executor_outputs,
1550
+ workflow_session=session,
1551
+ add_workflow_history_to_steps=self.add_workflow_history_to_steps
1552
+ if self.add_workflow_history_to_steps
1553
+ else None,
1554
+ num_history_runs=self.num_history_runs,
1089
1555
  ):
1090
1556
  raise_if_cancelled(workflow_run_response.run_id) # type: ignore
1557
+
1558
+ # Accumulate partial data from streaming events
1559
+ partial_step_content = self._accumulate_partial_step_data(event, partial_step_content) # type: ignore
1560
+
1091
1561
  # Handle events
1092
1562
  if isinstance(event, StepOutput):
1093
1563
  step_output = event
@@ -1136,11 +1606,19 @@ class Workflow:
1136
1606
  yield step_output_event
1137
1607
 
1138
1608
  elif isinstance(event, WorkflowRunOutputEvent): # type: ignore
1139
- yield self._handle_event(event, workflow_run_response) # type: ignore
1609
+ # Enrich event with workflow context before yielding
1610
+ enriched_event = self._enrich_event_with_workflow_context(
1611
+ event, workflow_run_response, step_index=i, step=step
1612
+ )
1613
+ yield self._handle_event(enriched_event, workflow_run_response) # type: ignore
1140
1614
 
1141
1615
  else:
1142
- # Yield other internal events
1143
- yield self._handle_event(event, workflow_run_response) # type: ignore
1616
+ # Enrich other events with workflow context before yielding
1617
+ enriched_event = self._enrich_event_with_workflow_context(
1618
+ event, workflow_run_response, step_index=i, step=step
1619
+ )
1620
+ if self.stream_executor_events:
1621
+ yield self._handle_event(enriched_event, workflow_run_response) # type: ignore
1144
1622
 
1145
1623
  # Break out of main step loop if early termination was requested
1146
1624
  if "early_termination" in locals() and early_termination:
@@ -1148,7 +1626,14 @@ class Workflow:
1148
1626
 
1149
1627
  # Update the workflow_run_response with completion data
1150
1628
  if collected_step_outputs:
1151
- workflow_run_response.metrics = self._aggregate_workflow_metrics(collected_step_outputs)
1629
+ # Stop the timer for the Run duration
1630
+ if workflow_run_response.metrics:
1631
+ workflow_run_response.metrics.stop_timer()
1632
+
1633
+ workflow_run_response.metrics = self._aggregate_workflow_metrics(
1634
+ collected_step_outputs,
1635
+ workflow_run_response.metrics, # type: ignore[arg-type]
1636
+ )
1152
1637
  last_output = cast(StepOutput, collected_step_outputs[-1])
1153
1638
 
1154
1639
  # Use deepest nested content if this is a container (Steps/Router/Loop/etc.)
@@ -1171,11 +1656,59 @@ class Workflow:
1171
1656
  workflow_run_response.audio = output_audio
1172
1657
  workflow_run_response.status = RunStatus.completed
1173
1658
 
1659
+ except (InputCheckError, OutputCheckError) as e:
1660
+ log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
1661
+
1662
+ from agno.run.workflow import WorkflowErrorEvent
1663
+
1664
+ error_event = WorkflowErrorEvent(
1665
+ run_id=workflow_run_response.run_id or "",
1666
+ workflow_id=self.id,
1667
+ workflow_name=self.name,
1668
+ session_id=session.session_id,
1669
+ error=str(e),
1670
+ )
1671
+
1672
+ yield error_event
1673
+
1674
+ # Update workflow_run_response with error
1675
+ workflow_run_response.content = error_event.error
1676
+ workflow_run_response.status = RunStatus.error
1174
1677
  except RunCancelledException as e:
1175
1678
  # Handle run cancellation during streaming
1176
1679
  logger.info(f"Workflow run {workflow_run_response.run_id} was cancelled during streaming")
1177
1680
  workflow_run_response.status = RunStatus.cancelled
1178
1681
  workflow_run_response.content = str(e)
1682
+
1683
+ # Capture partial progress from the step that was cancelled mid-stream
1684
+ if partial_step_content:
1685
+ logger.info(
1686
+ f"Step with name '{current_step_name}' was cancelled. Setting its partial progress as step output."
1687
+ )
1688
+ partial_step_output = StepOutput(
1689
+ step_name=current_step_name,
1690
+ step_id=getattr(current_step, "step_id", None) if current_step else None,
1691
+ step_type=StepType.STEP,
1692
+ executor_type=getattr(current_step, "executor_type", None) if current_step else None,
1693
+ executor_name=getattr(current_step, "executor_name", None) if current_step else None,
1694
+ content=partial_step_content,
1695
+ success=False,
1696
+ error="Cancelled during execution",
1697
+ )
1698
+ collected_step_outputs.append(partial_step_output)
1699
+
1700
+ # Preserve all progress (completed steps + partial step) before cancellation
1701
+ if collected_step_outputs:
1702
+ workflow_run_response.step_results = collected_step_outputs
1703
+ # Stop the timer for the Run duration
1704
+ if workflow_run_response.metrics:
1705
+ workflow_run_response.metrics.stop_timer()
1706
+
1707
+ workflow_run_response.metrics = self._aggregate_workflow_metrics(
1708
+ collected_step_outputs,
1709
+ workflow_run_response.metrics, # type: ignore[arg-type]
1710
+ )
1711
+
1179
1712
  cancelled_event = WorkflowCancelledEvent(
1180
1713
  run_id=workflow_run_response.run_id or "",
1181
1714
  workflow_id=self.id,
@@ -1202,6 +1735,7 @@ class Workflow:
1202
1735
  # Update workflow_run_response with error
1203
1736
  workflow_run_response.content = error_event.error
1204
1737
  workflow_run_response.status = RunStatus.error
1738
+ raise e
1205
1739
 
1206
1740
  # Yield workflow completed event
1207
1741
  workflow_completed_event = WorkflowCompletedEvent(
@@ -1215,6 +1749,10 @@ class Workflow:
1215
1749
  )
1216
1750
  yield self._handle_event(workflow_completed_event, workflow_run_response)
1217
1751
 
1752
+ # Stop timer on error
1753
+ if workflow_run_response.metrics:
1754
+ workflow_run_response.metrics.stop_timer()
1755
+
1218
1756
  # Store the completed workflow response
1219
1757
  self._update_session_metrics(session=session, workflow_run_response=workflow_run_response)
1220
1758
  session.upsert_run(run=workflow_run_response)
@@ -1277,21 +1815,49 @@ class Workflow:
1277
1815
  # For regular async functions, use the same signature inspection logic in fallback
1278
1816
  return await func(**call_kwargs) # type: ignore
1279
1817
 
1818
+ async def _aload_or_create_session(
1819
+ self, session_id: str, user_id: Optional[str], session_state: Optional[Dict[str, Any]]
1820
+ ) -> Tuple[WorkflowSession, Dict[str, Any]]:
1821
+ """Load or create session from database, update metadata, and prepare session state.
1822
+
1823
+ Returns:
1824
+ Tuple of (workflow_session, prepared_session_state)
1825
+ """
1826
+ # Read existing session from database
1827
+ if self._has_async_db():
1828
+ workflow_session = await self.aread_or_create_session(session_id=session_id, user_id=user_id)
1829
+ else:
1830
+ workflow_session = self.read_or_create_session(session_id=session_id, user_id=user_id)
1831
+ self._update_metadata(session=workflow_session)
1832
+
1833
+ # Update session state from DB
1834
+ _session_state = session_state or {}
1835
+ _session_state = self._load_session_state(session=workflow_session, session_state=_session_state)
1836
+
1837
+ return workflow_session, _session_state
1838
+
1280
1839
  async def _aexecute(
1281
1840
  self,
1282
- session: WorkflowSession,
1841
+ session_id: str,
1842
+ user_id: Optional[str],
1283
1843
  execution_input: WorkflowExecutionInput,
1284
1844
  workflow_run_response: WorkflowRunOutput,
1285
- session_state: Optional[Dict[str, Any]] = None,
1845
+ run_context: RunContext,
1286
1846
  **kwargs: Any,
1287
1847
  ) -> WorkflowRunOutput:
1288
1848
  """Execute a specific pipeline by name asynchronously"""
1289
1849
  from inspect import isasyncgenfunction, iscoroutinefunction, isgeneratorfunction
1290
1850
 
1851
+ # Read existing session from database
1852
+ workflow_session, run_context.session_state = await self._aload_or_create_session(
1853
+ session_id=session_id, user_id=user_id, session_state=run_context.session_state
1854
+ )
1855
+
1291
1856
  workflow_run_response.status = RunStatus.running
1292
1857
 
1293
1858
  # Register run for cancellation tracking
1294
- register_run(workflow_run_response.run_id) # type: ignore
1859
+ if workflow_run_response.run_id:
1860
+ register_run(workflow_run_response.run_id) # type: ignore
1295
1861
 
1296
1862
  if callable(self.steps):
1297
1863
  # Execute the workflow with the custom executor
@@ -1326,12 +1892,12 @@ class Workflow:
1326
1892
  collected_step_outputs: List[Union[StepOutput, List[StepOutput]]] = []
1327
1893
  previous_step_outputs: Dict[str, StepOutput] = {}
1328
1894
 
1329
- shared_images: List[ImageArtifact] = execution_input.images or []
1330
- output_images: List[ImageArtifact] = (execution_input.images or []).copy() # Start with input images
1331
- shared_videos: List[VideoArtifact] = execution_input.videos or []
1332
- output_videos: List[VideoArtifact] = (execution_input.videos or []).copy() # Start with input videos
1333
- shared_audio: List[AudioArtifact] = execution_input.audio or []
1334
- output_audio: List[AudioArtifact] = (execution_input.audio or []).copy() # Start with input audio
1895
+ shared_images: List[Image] = execution_input.images or []
1896
+ output_images: List[Image] = (execution_input.images or []).copy() # Start with input images
1897
+ shared_videos: List[Video] = execution_input.videos or []
1898
+ output_videos: List[Video] = (execution_input.videos or []).copy() # Start with input videos
1899
+ shared_audio: List[Audio] = execution_input.audio or []
1900
+ output_audio: List[Audio] = (execution_input.audio or []).copy() # Start with input audio
1335
1901
  shared_files: List[File] = execution_input.files or []
1336
1902
  output_files: List[File] = (execution_input.files or []).copy() # Start with input files
1337
1903
 
@@ -1355,11 +1921,16 @@ class Workflow:
1355
1921
 
1356
1922
  step_output = await step.aexecute( # type: ignore[union-attr]
1357
1923
  step_input,
1358
- session_id=session.session_id,
1924
+ session_id=session_id,
1359
1925
  user_id=self.user_id,
1360
1926
  workflow_run_response=workflow_run_response,
1361
- session_state=session_state,
1927
+ run_context=run_context,
1362
1928
  store_executor_outputs=self.store_executor_outputs,
1929
+ workflow_session=workflow_session,
1930
+ add_workflow_history_to_steps=self.add_workflow_history_to_steps
1931
+ if self.add_workflow_history_to_steps
1932
+ else None,
1933
+ num_history_runs=self.num_history_runs,
1363
1934
  )
1364
1935
 
1365
1936
  # Check for cancellation after step execution
@@ -1367,9 +1938,7 @@ class Workflow:
1367
1938
 
1368
1939
  # Update the workflow-level previous_step_outputs dictionary
1369
1940
  previous_step_outputs[step_name] = step_output
1370
- if step_output.stop:
1371
- logger.info(f"Early termination requested by step {step_name}")
1372
- break
1941
+ collected_step_outputs.append(step_output)
1373
1942
 
1374
1943
  # Update shared media for next step
1375
1944
  shared_images.extend(step_output.images or [])
@@ -1381,11 +1950,20 @@ class Workflow:
1381
1950
  output_audio.extend(step_output.audio or [])
1382
1951
  output_files.extend(step_output.files or [])
1383
1952
 
1384
- collected_step_outputs.append(step_output)
1953
+ if step_output.stop:
1954
+ logger.info(f"Early termination requested by step {step_name}")
1955
+ break
1385
1956
 
1386
1957
  # Update the workflow_run_response with completion data
1387
1958
  if collected_step_outputs:
1388
- workflow_run_response.metrics = self._aggregate_workflow_metrics(collected_step_outputs)
1959
+ # Stop the timer for the Run duration
1960
+ if workflow_run_response.metrics:
1961
+ workflow_run_response.metrics.stop_timer()
1962
+
1963
+ workflow_run_response.metrics = self._aggregate_workflow_metrics(
1964
+ collected_step_outputs,
1965
+ workflow_run_response.metrics, # type: ignore[arg-type]
1966
+ )
1389
1967
  last_output = cast(StepOutput, collected_step_outputs[-1])
1390
1968
 
1391
1969
  # Use deepest nested content if this is a container (Steps/Router/Loop/etc.)
@@ -1408,6 +1986,13 @@ class Workflow:
1408
1986
  workflow_run_response.audio = output_audio
1409
1987
  workflow_run_response.status = RunStatus.completed
1410
1988
 
1989
+ except (InputCheckError, OutputCheckError) as e:
1990
+ log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
1991
+ # Store error response
1992
+ workflow_run_response.status = RunStatus.error
1993
+ workflow_run_response.content = f"Validation failed: {str(e)} | Check: {e.check_trigger}"
1994
+
1995
+ raise e
1411
1996
  except RunCancelledException as e:
1412
1997
  logger.info(f"Workflow run {workflow_run_response.run_id} was cancelled")
1413
1998
  workflow_run_response.status = RunStatus.cancelled
@@ -1416,33 +2001,52 @@ class Workflow:
1416
2001
  logger.error(f"Workflow execution failed: {e}")
1417
2002
  workflow_run_response.status = RunStatus.error
1418
2003
  workflow_run_response.content = f"Workflow execution failed: {e}"
2004
+ raise e
1419
2005
 
1420
- self._update_session_metrics(session=session, workflow_run_response=workflow_run_response)
1421
- session.upsert_run(run=workflow_run_response)
1422
- self.save_session(session=session)
2006
+ # Stop timer on error
2007
+ if workflow_run_response.metrics:
2008
+ workflow_run_response.metrics.stop_timer()
2009
+
2010
+ self._update_session_metrics(session=workflow_session, workflow_run_response=workflow_run_response)
2011
+ workflow_session.upsert_run(run=workflow_run_response)
2012
+ if self._has_async_db():
2013
+ await self.asave_session(session=workflow_session)
2014
+ else:
2015
+ self.save_session(session=workflow_session)
1423
2016
  # Always clean up the run tracking
1424
2017
  cleanup_run(workflow_run_response.run_id) # type: ignore
1425
2018
 
1426
2019
  # Log Workflow Telemetry
1427
2020
  if self.telemetry:
1428
- await self._alog_workflow_telemetry(session_id=session.session_id, run_id=workflow_run_response.run_id)
2021
+ await self._alog_workflow_telemetry(session_id=session_id, run_id=workflow_run_response.run_id)
1429
2022
 
1430
2023
  return workflow_run_response
1431
2024
 
1432
2025
  async def _aexecute_stream(
1433
2026
  self,
1434
- session: WorkflowSession,
2027
+ session_id: str,
2028
+ user_id: Optional[str],
1435
2029
  execution_input: WorkflowExecutionInput,
1436
2030
  workflow_run_response: WorkflowRunOutput,
1437
- session_state: Optional[Dict[str, Any]] = None,
1438
- stream_intermediate_steps: bool = False,
2031
+ run_context: RunContext,
2032
+ stream_events: bool = False,
1439
2033
  websocket_handler: Optional[WebSocketHandler] = None,
1440
2034
  **kwargs: Any,
1441
2035
  ) -> AsyncIterator[WorkflowRunOutputEvent]:
1442
2036
  """Execute a specific pipeline by name with event streaming"""
1443
2037
  from inspect import isasyncgenfunction, iscoroutinefunction, isgeneratorfunction
1444
2038
 
2039
+ # Read existing session from database
2040
+ workflow_session, run_context.session_state = await self._aload_or_create_session(
2041
+ session_id=session_id, user_id=user_id, session_state=run_context.session_state
2042
+ )
2043
+
1445
2044
  workflow_run_response.status = RunStatus.running
2045
+
2046
+ # Register run for cancellation tracking
2047
+ if workflow_run_response.run_id:
2048
+ register_run(workflow_run_response.run_id)
2049
+
1446
2050
  workflow_started_event = WorkflowStartedEvent(
1447
2051
  run_id=workflow_run_response.run_id or "",
1448
2052
  workflow_name=workflow_run_response.workflow_name,
@@ -1484,23 +2088,33 @@ class Workflow:
1484
2088
  collected_step_outputs: List[Union[StepOutput, List[StepOutput]]] = []
1485
2089
  previous_step_outputs: Dict[str, StepOutput] = {}
1486
2090
 
1487
- shared_images: List[ImageArtifact] = execution_input.images or []
1488
- output_images: List[ImageArtifact] = (execution_input.images or []).copy() # Start with input images
1489
- shared_videos: List[VideoArtifact] = execution_input.videos or []
1490
- output_videos: List[VideoArtifact] = (execution_input.videos or []).copy() # Start with input videos
1491
- shared_audio: List[AudioArtifact] = execution_input.audio or []
1492
- output_audio: List[AudioArtifact] = (execution_input.audio or []).copy() # Start with input audio
2091
+ shared_images: List[Image] = execution_input.images or []
2092
+ output_images: List[Image] = (execution_input.images or []).copy() # Start with input images
2093
+ shared_videos: List[Video] = execution_input.videos or []
2094
+ output_videos: List[Video] = (execution_input.videos or []).copy() # Start with input videos
2095
+ shared_audio: List[Audio] = execution_input.audio or []
2096
+ output_audio: List[Audio] = (execution_input.audio or []).copy() # Start with input audio
1493
2097
  shared_files: List[File] = execution_input.files or []
1494
2098
  output_files: List[File] = (execution_input.files or []).copy() # Start with input files
1495
2099
 
1496
2100
  early_termination = False
1497
2101
 
2102
+ # Track partial step data in case of cancellation
2103
+ current_step_name = ""
2104
+ current_step = None
2105
+ partial_step_content = ""
2106
+
1498
2107
  for i, step in enumerate(self.steps): # type: ignore[arg-type]
1499
2108
  if workflow_run_response.run_id:
1500
2109
  raise_if_cancelled(workflow_run_response.run_id)
1501
2110
  step_name = getattr(step, "name", f"step_{i + 1}")
1502
2111
  log_debug(f"Async streaming step {i + 1}/{self._get_step_count()}: {step_name}")
1503
2112
 
2113
+ current_step_name = step_name
2114
+ current_step = step
2115
+ # Reset partial data for this step
2116
+ partial_step_content = ""
2117
+
1504
2118
  # Create enhanced StepInput
1505
2119
  step_input = self._create_step_input(
1506
2120
  execution_input=execution_input,
@@ -1514,16 +2128,26 @@ class Workflow:
1514
2128
  # Execute step with streaming and yield all events
1515
2129
  async for event in step.aexecute_stream( # type: ignore[union-attr]
1516
2130
  step_input,
1517
- session_id=session.session_id,
2131
+ session_id=session_id,
1518
2132
  user_id=self.user_id,
1519
- stream_intermediate_steps=stream_intermediate_steps,
2133
+ stream_events=stream_events,
2134
+ stream_executor_events=self.stream_executor_events,
1520
2135
  workflow_run_response=workflow_run_response,
1521
- session_state=session_state,
2136
+ run_context=run_context,
1522
2137
  step_index=i,
1523
2138
  store_executor_outputs=self.store_executor_outputs,
2139
+ workflow_session=workflow_session,
2140
+ add_workflow_history_to_steps=self.add_workflow_history_to_steps
2141
+ if self.add_workflow_history_to_steps
2142
+ else None,
2143
+ num_history_runs=self.num_history_runs,
1524
2144
  ):
1525
2145
  if workflow_run_response.run_id:
1526
2146
  raise_if_cancelled(workflow_run_response.run_id)
2147
+
2148
+ # Accumulate partial data from streaming events
2149
+ partial_step_content = self._accumulate_partial_step_data(event, partial_step_content) # type: ignore
2150
+
1527
2151
  if isinstance(event, StepOutput):
1528
2152
  step_output = event
1529
2153
  collected_step_outputs.append(step_output)
@@ -1570,11 +2194,23 @@ class Workflow:
1570
2194
  yield step_output_event
1571
2195
 
1572
2196
  elif isinstance(event, WorkflowRunOutputEvent): # type: ignore
1573
- yield self._handle_event(event, workflow_run_response, websocket_handler=websocket_handler) # type: ignore
2197
+ # Enrich event with workflow context before yielding
2198
+ enriched_event = self._enrich_event_with_workflow_context(
2199
+ event, workflow_run_response, step_index=i, step=step
2200
+ )
2201
+ yield self._handle_event(
2202
+ enriched_event, workflow_run_response, websocket_handler=websocket_handler
2203
+ ) # type: ignore
1574
2204
 
1575
2205
  else:
1576
- # Yield other internal events
1577
- yield self._handle_event(event, workflow_run_response, websocket_handler=websocket_handler) # type: ignore
2206
+ # Enrich other events with workflow context before yielding
2207
+ enriched_event = self._enrich_event_with_workflow_context(
2208
+ event, workflow_run_response, step_index=i, step=step
2209
+ )
2210
+ if self.stream_executor_events:
2211
+ yield self._handle_event(
2212
+ enriched_event, workflow_run_response, websocket_handler=websocket_handler
2213
+ ) # type: ignore
1578
2214
 
1579
2215
  # Break out of main step loop if early termination was requested
1580
2216
  if "early_termination" in locals() and early_termination:
@@ -1582,7 +2218,14 @@ class Workflow:
1582
2218
 
1583
2219
  # Update the workflow_run_response with completion data
1584
2220
  if collected_step_outputs:
1585
- workflow_run_response.metrics = self._aggregate_workflow_metrics(collected_step_outputs)
2221
+ # Stop the timer for the Run duration
2222
+ if workflow_run_response.metrics:
2223
+ workflow_run_response.metrics.stop_timer()
2224
+
2225
+ workflow_run_response.metrics = self._aggregate_workflow_metrics(
2226
+ collected_step_outputs,
2227
+ workflow_run_response.metrics, # type: ignore[arg-type]
2228
+ )
1586
2229
  last_output = cast(StepOutput, collected_step_outputs[-1])
1587
2230
 
1588
2231
  # Use deepest nested content if this is a container (Steps/Router/Loop/etc.)
@@ -1605,16 +2248,64 @@ class Workflow:
1605
2248
  workflow_run_response.audio = output_audio
1606
2249
  workflow_run_response.status = RunStatus.completed
1607
2250
 
2251
+ except (InputCheckError, OutputCheckError) as e:
2252
+ log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
2253
+
2254
+ from agno.run.workflow import WorkflowErrorEvent
2255
+
2256
+ error_event = WorkflowErrorEvent(
2257
+ run_id=workflow_run_response.run_id or "",
2258
+ workflow_id=self.id,
2259
+ workflow_name=self.name,
2260
+ session_id=session_id,
2261
+ error=str(e),
2262
+ )
2263
+
2264
+ yield error_event
2265
+
2266
+ # Update workflow_run_response with error
2267
+ workflow_run_response.content = error_event.error
2268
+ workflow_run_response.status = RunStatus.error
1608
2269
  except RunCancelledException as e:
1609
2270
  # Handle run cancellation during streaming
1610
2271
  logger.info(f"Workflow run {workflow_run_response.run_id} was cancelled during streaming")
1611
2272
  workflow_run_response.status = RunStatus.cancelled
1612
2273
  workflow_run_response.content = str(e)
2274
+
2275
+ # Capture partial progress from the step that was cancelled mid-stream
2276
+ if partial_step_content:
2277
+ logger.info(
2278
+ f"Step with name '{current_step_name}' was cancelled. Setting its partial progress as step output."
2279
+ )
2280
+ partial_step_output = StepOutput(
2281
+ step_name=current_step_name,
2282
+ step_id=getattr(current_step, "step_id", None) if current_step else None,
2283
+ step_type=StepType.STEP,
2284
+ executor_type=getattr(current_step, "executor_type", None) if current_step else None,
2285
+ executor_name=getattr(current_step, "executor_name", None) if current_step else None,
2286
+ content=partial_step_content,
2287
+ success=False,
2288
+ error="Cancelled during execution",
2289
+ )
2290
+ collected_step_outputs.append(partial_step_output)
2291
+
2292
+ # Preserve all progress (completed steps + partial step) before cancellation
2293
+ if collected_step_outputs:
2294
+ workflow_run_response.step_results = collected_step_outputs
2295
+ # Stop the timer for the Run duration
2296
+ if workflow_run_response.metrics:
2297
+ workflow_run_response.metrics.stop_timer()
2298
+
2299
+ workflow_run_response.metrics = self._aggregate_workflow_metrics(
2300
+ collected_step_outputs,
2301
+ workflow_run_response.metrics, # type: ignore[arg-type]
2302
+ )
2303
+
1613
2304
  cancelled_event = WorkflowCancelledEvent(
1614
2305
  run_id=workflow_run_response.run_id or "",
1615
2306
  workflow_id=self.id,
1616
2307
  workflow_name=self.name,
1617
- session_id=session.session_id,
2308
+ session_id=session_id,
1618
2309
  reason=str(e),
1619
2310
  )
1620
2311
  yield self._handle_event(
@@ -1631,7 +2322,7 @@ class Workflow:
1631
2322
  run_id=workflow_run_response.run_id or "",
1632
2323
  workflow_id=self.id,
1633
2324
  workflow_name=self.name,
1634
- session_id=session.session_id,
2325
+ session_id=session_id,
1635
2326
  error=str(e),
1636
2327
  )
1637
2328
 
@@ -1640,6 +2331,7 @@ class Workflow:
1640
2331
  # Update workflow_run_response with error
1641
2332
  workflow_run_response.content = error_event.error
1642
2333
  workflow_run_response.status = RunStatus.error
2334
+ raise e
1643
2335
 
1644
2336
  # Yield workflow completed event
1645
2337
  workflow_completed_event = WorkflowCompletedEvent(
@@ -1653,14 +2345,21 @@ class Workflow:
1653
2345
  )
1654
2346
  yield self._handle_event(workflow_completed_event, workflow_run_response, websocket_handler=websocket_handler)
1655
2347
 
2348
+ # Stop timer on error
2349
+ if workflow_run_response.metrics:
2350
+ workflow_run_response.metrics.stop_timer()
2351
+
1656
2352
  # Store the completed workflow response
1657
- self._update_session_metrics(session=session, workflow_run_response=workflow_run_response)
1658
- session.upsert_run(run=workflow_run_response)
1659
- self.save_session(session=session)
2353
+ self._update_session_metrics(session=workflow_session, workflow_run_response=workflow_run_response)
2354
+ workflow_session.upsert_run(run=workflow_run_response)
2355
+ if self._has_async_db():
2356
+ await self.asave_session(session=workflow_session)
2357
+ else:
2358
+ self.save_session(session=workflow_session)
1660
2359
 
1661
2360
  # Log Workflow Telemetry
1662
2361
  if self.telemetry:
1663
- await self._alog_workflow_telemetry(session_id=session.session_id, run_id=workflow_run_response.run_id)
2362
+ await self._alog_workflow_telemetry(session_id=session_id, run_id=workflow_run_response.run_id)
1664
2363
 
1665
2364
  # Always clean up the run tracking
1666
2365
  cleanup_run(workflow_run_response.run_id) # type: ignore
@@ -1684,22 +2383,26 @@ class Workflow:
1684
2383
 
1685
2384
  self.initialize_workflow()
1686
2385
 
1687
- session_id, user_id, session_state = self._initialize_session(
1688
- session_id=session_id, user_id=user_id, session_state=session_state, run_id=run_id
1689
- )
2386
+ session_id, user_id = self._initialize_session(session_id=session_id, user_id=user_id)
1690
2387
 
1691
2388
  # Read existing session from database
1692
- workflow_session = self.read_or_create_session(session_id=session_id, user_id=user_id)
1693
- self._update_metadata(session=workflow_session)
2389
+ workflow_session, session_state = await self._aload_or_create_session(
2390
+ session_id=session_id, user_id=user_id, session_state=session_state
2391
+ )
1694
2392
 
1695
- # Update session state from DB
1696
- session_state = self._update_session_state(session=workflow_session, session_state=session_state)
2393
+ run_context = RunContext(
2394
+ run_id=run_id,
2395
+ session_id=session_id,
2396
+ user_id=user_id,
2397
+ session_state=session_state,
2398
+ )
1697
2399
 
1698
2400
  self._prepare_steps()
1699
2401
 
1700
2402
  # Create workflow run response with PENDING status
1701
2403
  workflow_run_response = WorkflowRunOutput(
1702
2404
  run_id=run_id,
2405
+ input=input,
1703
2406
  session_id=session_id,
1704
2407
  workflow_id=self.id,
1705
2408
  workflow_name=self.name,
@@ -1707,9 +2410,16 @@ class Workflow:
1707
2410
  status=RunStatus.pending,
1708
2411
  )
1709
2412
 
2413
+ # Start the run metrics timer
2414
+ workflow_run_response.metrics = WorkflowMetrics(steps={})
2415
+ workflow_run_response.metrics.start_timer()
2416
+
1710
2417
  # Store PENDING response immediately
1711
2418
  workflow_session.upsert_run(run=workflow_run_response)
1712
- self.save_session(session=workflow_session)
2419
+ if self._has_async_db():
2420
+ await self.asave_session(session=workflow_session)
2421
+ else:
2422
+ self.save_session(session=workflow_session)
1713
2423
 
1714
2424
  # Prepare execution input
1715
2425
  inputs = WorkflowExecutionInput(
@@ -1728,15 +2438,29 @@ class Workflow:
1728
2438
  try:
1729
2439
  # Update status to RUNNING and save
1730
2440
  workflow_run_response.status = RunStatus.running
1731
- self.save_session(session=workflow_session)
1732
-
1733
- await self._aexecute(
1734
- session=workflow_session,
1735
- execution_input=inputs,
1736
- workflow_run_response=workflow_run_response,
1737
- session_state=session_state,
1738
- **kwargs,
1739
- )
2441
+ if self._has_async_db():
2442
+ await self.asave_session(session=workflow_session)
2443
+ else:
2444
+ self.save_session(session=workflow_session)
2445
+
2446
+ if self.agent is not None:
2447
+ self._aexecute_workflow_agent(
2448
+ user_input=input, # type: ignore
2449
+ execution_input=inputs,
2450
+ run_context=run_context,
2451
+ stream=False,
2452
+ **kwargs,
2453
+ )
2454
+ else:
2455
+ await self._aexecute(
2456
+ session_id=session_id,
2457
+ user_id=user_id,
2458
+ execution_input=inputs,
2459
+ workflow_run_response=workflow_run_response,
2460
+ run_context=run_context,
2461
+ session_state=session_state,
2462
+ **kwargs,
2463
+ )
1740
2464
 
1741
2465
  log_debug(f"Background execution completed with status: {workflow_run_response.status}")
1742
2466
 
@@ -1744,7 +2468,10 @@ class Workflow:
1744
2468
  logger.error(f"Background workflow execution failed: {e}")
1745
2469
  workflow_run_response.status = RunStatus.error
1746
2470
  workflow_run_response.content = f"Background execution failed: {str(e)}"
1747
- self.save_session(session=workflow_session)
2471
+ if self._has_async_db():
2472
+ await self.asave_session(session=workflow_session)
2473
+ else:
2474
+ self.save_session(session=workflow_session)
1748
2475
 
1749
2476
  # Create and start asyncio task
1750
2477
  loop = asyncio.get_running_loop()
@@ -1764,102 +2491,944 @@ class Workflow:
1764
2491
  images: Optional[List[Image]] = None,
1765
2492
  videos: Optional[List[Video]] = None,
1766
2493
  files: Optional[List[File]] = None,
1767
- stream_intermediate_steps: bool = False,
2494
+ stream_events: bool = False,
1768
2495
  websocket_handler: Optional[WebSocketHandler] = None,
1769
2496
  **kwargs: Any,
1770
2497
  ) -> WorkflowRunOutput:
1771
2498
  """Execute workflow in background with streaming and WebSocket broadcasting"""
1772
2499
 
1773
- run_id = str(uuid4())
2500
+ run_id = str(uuid4())
2501
+
2502
+ self.initialize_workflow()
2503
+
2504
+ session_id, user_id = self._initialize_session(session_id=session_id, user_id=user_id)
2505
+
2506
+ # Read existing session from database
2507
+ workflow_session, session_state = await self._aload_or_create_session(
2508
+ session_id=session_id, user_id=user_id, session_state=session_state
2509
+ )
2510
+
2511
+ run_context = RunContext(
2512
+ run_id=run_id,
2513
+ session_id=session_id,
2514
+ user_id=user_id,
2515
+ session_state=session_state,
2516
+ )
2517
+
2518
+ self._prepare_steps()
2519
+
2520
+ # Create workflow run response with PENDING status
2521
+ workflow_run_response = WorkflowRunOutput(
2522
+ run_id=run_id,
2523
+ input=input,
2524
+ session_id=session_id,
2525
+ workflow_id=self.id,
2526
+ workflow_name=self.name,
2527
+ created_at=int(datetime.now().timestamp()),
2528
+ status=RunStatus.pending,
2529
+ )
2530
+
2531
+ # Start the run metrics timer
2532
+ workflow_run_response.metrics = WorkflowMetrics(steps={})
2533
+ workflow_run_response.metrics.start_timer()
2534
+
2535
+ # Prepare execution input
2536
+ inputs = WorkflowExecutionInput(
2537
+ input=input,
2538
+ additional_data=additional_data,
2539
+ audio=audio, # type: ignore
2540
+ images=images, # type: ignore
2541
+ videos=videos, # type: ignore
2542
+ files=files, # type: ignore
2543
+ )
2544
+
2545
+ self.update_agents_and_teams_session_info()
2546
+
2547
+ async def execute_workflow_background_stream():
2548
+ """Background execution with streaming and WebSocket broadcasting"""
2549
+ try:
2550
+ if self.agent is not None:
2551
+ result = self._aexecute_workflow_agent(
2552
+ user_input=input, # type: ignore
2553
+ run_context=run_context,
2554
+ execution_input=inputs,
2555
+ stream=True,
2556
+ websocket_handler=websocket_handler,
2557
+ **kwargs,
2558
+ )
2559
+ # For streaming, result is an async iterator
2560
+ async for event in result: # type: ignore
2561
+ # Events are automatically broadcast by _handle_event in the agent execution
2562
+ # We just consume them here to drive the execution
2563
+ pass
2564
+ log_debug(
2565
+ f"Background streaming execution (workflow agent) completed with status: {workflow_run_response.status}"
2566
+ )
2567
+ else:
2568
+ # Update status to RUNNING and save
2569
+ workflow_run_response.status = RunStatus.running
2570
+ if self._has_async_db():
2571
+ await self.asave_session(session=workflow_session)
2572
+ else:
2573
+ self.save_session(session=workflow_session)
2574
+
2575
+ # Execute with streaming - consume all events (they're auto-broadcast via _handle_event)
2576
+ async for event in self._aexecute_stream(
2577
+ session_id=session_id,
2578
+ user_id=user_id,
2579
+ execution_input=inputs,
2580
+ workflow_run_response=workflow_run_response,
2581
+ stream_events=stream_events,
2582
+ run_context=run_context,
2583
+ websocket_handler=websocket_handler,
2584
+ **kwargs,
2585
+ ):
2586
+ # Events are automatically broadcast by _handle_event
2587
+ # We just consume them here to drive the execution
2588
+ pass
2589
+
2590
+ log_debug(f"Background streaming execution completed with status: {workflow_run_response.status}")
2591
+
2592
+ except Exception as e:
2593
+ logger.error(f"Background streaming workflow execution failed: {e}")
2594
+ workflow_run_response.status = RunStatus.error
2595
+ workflow_run_response.content = f"Background streaming execution failed: {str(e)}"
2596
+ if self._has_async_db():
2597
+ await self.asave_session(session=workflow_session)
2598
+ else:
2599
+ self.save_session(session=workflow_session)
2600
+
2601
+ # Create and start asyncio task for background streaming execution
2602
+ loop = asyncio.get_running_loop()
2603
+ loop.create_task(execute_workflow_background_stream())
2604
+
2605
+ # Return SAME object that will be updated by background execution
2606
+ return workflow_run_response
2607
+
2608
+ async def aget_run(self, run_id: str, session_id: Optional[str] = None) -> Optional[WorkflowRunOutput]:
2609
+ """Get the status and details of a background workflow run - SIMPLIFIED"""
2610
+ # Use provided session_id or fall back to self.session_id
2611
+ _session_id = session_id if session_id is not None else self.session_id
2612
+
2613
+ if self.db is not None and _session_id is not None:
2614
+ session = await self.db.aget_session(session_id=_session_id, session_type=SessionType.WORKFLOW) # type: ignore
2615
+ if session and isinstance(session, WorkflowSession) and session.runs:
2616
+ # Find the run by ID
2617
+ for run in session.runs:
2618
+ if run.run_id == run_id:
2619
+ return run
2620
+
2621
+ return None
2622
+
2623
+ def get_run(self, run_id: str, session_id: Optional[str] = None) -> Optional[WorkflowRunOutput]:
2624
+ """Get the status and details of a background workflow run - SIMPLIFIED"""
2625
+ # Use provided session_id or fall back to self.session_id
2626
+ _session_id = session_id if session_id is not None else self.session_id
2627
+
2628
+ if self.db is not None and _session_id is not None:
2629
+ session = self.db.get_session(session_id=_session_id, session_type=SessionType.WORKFLOW)
2630
+ if session and isinstance(session, WorkflowSession) and session.runs:
2631
+ # Find the run by ID
2632
+ for run in session.runs:
2633
+ if run.run_id == run_id:
2634
+ return run
2635
+
2636
+ return None
2637
+
2638
+ def _initialize_workflow_agent(
2639
+ self,
2640
+ session: WorkflowSession,
2641
+ execution_input: WorkflowExecutionInput,
2642
+ run_context: RunContext,
2643
+ stream: bool = False,
2644
+ ) -> None:
2645
+ """Initialize the workflow agent with tools (but NOT context - that's passed per-run)"""
2646
+ from agno.tools.function import Function
2647
+
2648
+ workflow_tool_func = self.agent.create_workflow_tool( # type: ignore
2649
+ workflow=self,
2650
+ session=session,
2651
+ execution_input=execution_input,
2652
+ run_context=run_context,
2653
+ stream=stream,
2654
+ )
2655
+ workflow_tool = Function.from_callable(workflow_tool_func)
2656
+
2657
+ self.agent.tools = [workflow_tool] # type: ignore
2658
+ self.agent._rebuild_tools = True # type: ignore
2659
+
2660
+ log_debug("Workflow agent initialized with run_workflow tool")
2661
+
2662
+ def _get_workflow_agent_dependencies(self, session: WorkflowSession) -> Dict[str, Any]:
2663
+ """Build dependencies dict with workflow context to pass to agent.run()"""
2664
+ # Get configuration from the WorkflowAgent instance
2665
+ add_history = True
2666
+ num_runs = 5
2667
+
2668
+ if self.agent and isinstance(self.agent, WorkflowAgent):
2669
+ add_history = self.agent.add_workflow_history
2670
+ num_runs = self.agent.num_history_runs or 5
2671
+
2672
+ if add_history:
2673
+ history_context = (
2674
+ session.get_workflow_history_context(num_runs=num_runs) or "No previous workflow runs in this session."
2675
+ )
2676
+ else:
2677
+ history_context = "No workflow history available."
2678
+
2679
+ # Build workflow context with description and history
2680
+ workflow_context = ""
2681
+ if self.description:
2682
+ workflow_context += f"Workflow Description: {self.description}\n\n"
2683
+
2684
+ workflow_context += history_context
2685
+
2686
+ return {
2687
+ "workflow_context": workflow_context,
2688
+ }
2689
+
2690
+ def _execute_workflow_agent(
2691
+ self,
2692
+ user_input: Union[str, Dict[str, Any], List[Any], BaseModel],
2693
+ session: WorkflowSession,
2694
+ execution_input: WorkflowExecutionInput,
2695
+ run_context: RunContext,
2696
+ stream: bool = False,
2697
+ **kwargs: Any,
2698
+ ) -> Union[WorkflowRunOutput, Iterator[WorkflowRunOutputEvent]]:
2699
+ """
2700
+ Execute the workflow agent in streaming or non-streaming mode.
2701
+
2702
+ The agent decides whether to run the workflow or answer directly from history.
2703
+
2704
+ Args:
2705
+ user_input: The user's input
2706
+ session: The workflow session
2707
+ execution_input: The execution input
2708
+ run_context: The run context
2709
+ stream: Whether to stream the response
2710
+ stream_intermediate_steps: Whether to stream intermediate steps
2711
+
2712
+ Returns:
2713
+ WorkflowRunOutput if stream=False, Iterator[WorkflowRunOutputEvent] if stream=True
2714
+ """
2715
+ if stream:
2716
+ return self._run_workflow_agent_stream(
2717
+ agent_input=user_input,
2718
+ session=session,
2719
+ execution_input=execution_input,
2720
+ run_context=run_context,
2721
+ stream=stream,
2722
+ **kwargs,
2723
+ )
2724
+ else:
2725
+ return self._run_workflow_agent(
2726
+ agent_input=user_input,
2727
+ session=session,
2728
+ execution_input=execution_input,
2729
+ run_context=run_context,
2730
+ stream=stream,
2731
+ )
2732
+
2733
+ def _run_workflow_agent_stream(
2734
+ self,
2735
+ agent_input: Union[str, Dict[str, Any], List[Any], BaseModel],
2736
+ session: WorkflowSession,
2737
+ execution_input: WorkflowExecutionInput,
2738
+ run_context: RunContext,
2739
+ stream: bool = False,
2740
+ **kwargs: Any,
2741
+ ) -> Iterator[WorkflowRunOutputEvent]:
2742
+ """
2743
+ Execute the workflow agent in streaming mode.
2744
+
2745
+ The agent's tool (run_workflow) is a generator that yields workflow events directly.
2746
+ These events bubble up through the agent's streaming and are yielded here.
2747
+ We filter to only yield WorkflowRunOutputEvent to the CLI.
2748
+
2749
+ Yields:
2750
+ WorkflowRunOutputEvent: Events from workflow execution (agent events are filtered)
2751
+ """
2752
+ from typing import get_args
2753
+
2754
+ from agno.run.workflow import WorkflowCompletedEvent, WorkflowRunOutputEvent
2755
+
2756
+ # Initialize agent with stream_intermediate_steps=True so tool yields events
2757
+ self._initialize_workflow_agent(session, execution_input, run_context=run_context, stream=stream)
2758
+
2759
+ # Build dependencies with workflow context
2760
+ run_context.dependencies = self._get_workflow_agent_dependencies(session)
2761
+
2762
+ # Run agent with streaming - workflow events will bubble up from the tool
2763
+ agent_response: Optional[RunOutput] = None
2764
+ workflow_executed = False
2765
+
2766
+ from agno.run.agent import RunContentEvent
2767
+ from agno.run.team import RunContentEvent as TeamRunContentEvent
2768
+ from agno.run.workflow import WorkflowAgentCompletedEvent, WorkflowAgentStartedEvent
2769
+
2770
+ log_debug(f"Executing workflow agent with streaming - input: {agent_input}...")
2771
+
2772
+ # Create a workflow run response upfront for potential direct answer (will be used only if workflow is not executed)
2773
+ run_id = str(uuid4())
2774
+ direct_reply_run_response = WorkflowRunOutput(
2775
+ run_id=run_id,
2776
+ input=execution_input.input,
2777
+ session_id=session.session_id,
2778
+ workflow_id=self.id,
2779
+ workflow_name=self.name,
2780
+ created_at=int(datetime.now().timestamp()),
2781
+ )
2782
+
2783
+ # Yield WorkflowAgentStartedEvent at the beginning (stored in direct_reply_run_response)
2784
+ agent_started_event = WorkflowAgentStartedEvent(
2785
+ workflow_name=self.name,
2786
+ workflow_id=self.id,
2787
+ session_id=session.session_id,
2788
+ )
2789
+ yield agent_started_event
2790
+
2791
+ # Run the agent in streaming mode and yield all events
2792
+ for event in self.agent.run( # type: ignore[union-attr]
2793
+ input=agent_input,
2794
+ stream=True,
2795
+ stream_intermediate_steps=True,
2796
+ yield_run_response=True,
2797
+ session_id=session.session_id,
2798
+ dependencies=run_context.dependencies, # Pass context dynamically per-run
2799
+ session_state=run_context.session_state, # Pass session state dynamically per-run
2800
+ ): # type: ignore
2801
+ if isinstance(event, tuple(get_args(WorkflowRunOutputEvent))):
2802
+ yield event # type: ignore[misc]
2803
+
2804
+ # Track if workflow was executed by checking for WorkflowCompletedEvent
2805
+ if isinstance(event, WorkflowCompletedEvent):
2806
+ workflow_executed = True
2807
+ elif isinstance(event, (RunContentEvent, TeamRunContentEvent)):
2808
+ if event.step_name is None:
2809
+ # This is from the workflow agent itself
2810
+ # Enrich with metadata to mark it as a workflow agent event
2811
+
2812
+ if workflow_executed:
2813
+ continue # Skip if workflow was already executed
2814
+
2815
+ # workflow_agent field is used by consumers of the events to distinguish between workflow agent and regular agent
2816
+ event.workflow_agent = True # type: ignore
2817
+ yield event # type: ignore[misc]
2818
+
2819
+ # Capture the final RunOutput (but don't yield it)
2820
+ if isinstance(event, RunOutput):
2821
+ agent_response = event
2822
+
2823
+ # Handle direct answer case (no workflow execution)
2824
+ if not workflow_executed:
2825
+ # Update the pre-created workflow run response with the direct answer
2826
+ direct_reply_run_response.content = agent_response.content if agent_response else ""
2827
+ direct_reply_run_response.status = RunStatus.completed
2828
+ direct_reply_run_response.workflow_agent_run = agent_response
2829
+
2830
+ workflow_run_response = direct_reply_run_response
2831
+
2832
+ # Store the full agent RunOutput and establish parent-child relationship
2833
+ if agent_response:
2834
+ agent_response.parent_run_id = workflow_run_response.run_id
2835
+ agent_response.workflow_id = workflow_run_response.workflow_id
2836
+
2837
+ log_debug(f"Agent decision: workflow_executed={workflow_executed}")
2838
+
2839
+ # Yield WorkflowAgentCompletedEvent (user internally by print_response_stream)
2840
+ agent_completed_event = WorkflowAgentCompletedEvent(
2841
+ run_id=agent_response.run_id if agent_response else None,
2842
+ workflow_name=self.name,
2843
+ workflow_id=self.id,
2844
+ session_id=session.session_id,
2845
+ content=workflow_run_response.content,
2846
+ )
2847
+ yield agent_completed_event
2848
+
2849
+ # Yield a workflow completed event with the agent's direct response
2850
+ completed_event = WorkflowCompletedEvent(
2851
+ run_id=workflow_run_response.run_id or "",
2852
+ content=workflow_run_response.content,
2853
+ workflow_name=workflow_run_response.workflow_name,
2854
+ workflow_id=workflow_run_response.workflow_id,
2855
+ session_id=workflow_run_response.session_id,
2856
+ step_results=[],
2857
+ metadata={"agent_direct_response": True},
2858
+ )
2859
+ yield completed_event
2860
+
2861
+ # Update the run in session
2862
+ session.upsert_run(run=workflow_run_response)
2863
+ # Save session
2864
+ self.save_session(session=session)
2865
+
2866
+ else:
2867
+ # Workflow was executed by the tool
2868
+ reloaded_session = self.get_session(session_id=session.session_id)
2869
+
2870
+ if reloaded_session and reloaded_session.runs and len(reloaded_session.runs) > 0:
2871
+ # Get the last run (which is the one just created by the tool)
2872
+ last_run = reloaded_session.runs[-1]
2873
+
2874
+ # Yield WorkflowAgentCompletedEvent
2875
+ agent_completed_event = WorkflowAgentCompletedEvent(
2876
+ run_id=agent_response.run_id if agent_response else None,
2877
+ workflow_name=self.name,
2878
+ workflow_id=self.id,
2879
+ session_id=session.session_id,
2880
+ content=agent_response.content if agent_response else None,
2881
+ )
2882
+ yield agent_completed_event
2883
+
2884
+ # Update the last run with workflow_agent_run
2885
+ last_run.workflow_agent_run = agent_response
2886
+
2887
+ # Store the full agent RunOutput and establish parent-child relationship
2888
+ if agent_response:
2889
+ agent_response.parent_run_id = last_run.run_id
2890
+ agent_response.workflow_id = last_run.workflow_id
2891
+
2892
+ # Save the reloaded session (which has the updated run)
2893
+ self.save_session(session=reloaded_session)
2894
+
2895
+ else:
2896
+ log_warning("Could not reload session or no runs found after workflow execution")
2897
+
2898
+ def _run_workflow_agent(
2899
+ self,
2900
+ agent_input: Union[str, Dict[str, Any], List[Any], BaseModel],
2901
+ session: WorkflowSession,
2902
+ execution_input: WorkflowExecutionInput,
2903
+ run_context: RunContext,
2904
+ stream: bool = False,
2905
+ ) -> WorkflowRunOutput:
2906
+ """
2907
+ Execute the workflow agent in non-streaming mode.
2908
+
2909
+ The agent decides whether to run the workflow or answer directly from history.
2910
+
2911
+ Returns:
2912
+ WorkflowRunOutput: The workflow run output with agent response
2913
+ """
2914
+
2915
+ # Initialize the agent
2916
+ self._initialize_workflow_agent(session, execution_input, run_context=run_context, stream=stream)
2917
+
2918
+ # Build dependencies with workflow context
2919
+ run_context.dependencies = self._get_workflow_agent_dependencies(session)
2920
+
2921
+ # Run the agent
2922
+ agent_response: RunOutput = self.agent.run( # type: ignore[union-attr]
2923
+ input=agent_input,
2924
+ session_id=session.session_id,
2925
+ dependencies=run_context.dependencies,
2926
+ session_state=run_context.session_state,
2927
+ stream=stream,
2928
+ ) # type: ignore
2929
+
2930
+ # Check if the agent called the workflow tool
2931
+ workflow_executed = False
2932
+ if agent_response.messages:
2933
+ for message in agent_response.messages:
2934
+ if message.role == "assistant" and message.tool_calls:
2935
+ # Check if the tool call is specifically for run_workflow
2936
+ for tool_call in message.tool_calls:
2937
+ # Handle both dict and object formats
2938
+ if isinstance(tool_call, dict):
2939
+ tool_name = tool_call.get("function", {}).get("name", "")
2940
+ else:
2941
+ tool_name = tool_call.function.name if hasattr(tool_call, "function") else ""
2942
+
2943
+ if tool_name == "run_workflow":
2944
+ workflow_executed = True
2945
+ break
2946
+ if workflow_executed:
2947
+ break
2948
+
2949
+ log_debug(f"Workflow agent execution complete. Workflow executed: {workflow_executed}")
2950
+
2951
+ # Handle direct answer case (no workflow execution)
2952
+ if not workflow_executed:
2953
+ # Create a new workflow run output for the direct answer
2954
+ run_id = str(uuid4())
2955
+ workflow_run_response = WorkflowRunOutput(
2956
+ run_id=run_id,
2957
+ input=execution_input.input,
2958
+ session_id=session.session_id,
2959
+ workflow_id=self.id,
2960
+ workflow_name=self.name,
2961
+ created_at=int(datetime.now().timestamp()),
2962
+ content=agent_response.content,
2963
+ status=RunStatus.completed,
2964
+ workflow_agent_run=agent_response,
2965
+ )
2966
+
2967
+ # Store the full agent RunOutput and establish parent-child relationship
2968
+ if agent_response:
2969
+ agent_response.parent_run_id = workflow_run_response.run_id
2970
+ agent_response.workflow_id = workflow_run_response.workflow_id
2971
+
2972
+ # Update the run in session
2973
+ session.upsert_run(run=workflow_run_response)
2974
+ self.save_session(session=session)
2975
+
2976
+ log_debug(f"Agent decision: workflow_executed={workflow_executed}")
2977
+
2978
+ return workflow_run_response
2979
+ else:
2980
+ # Workflow was executed by the tool
2981
+ reloaded_session = self.get_session(session_id=session.session_id)
2982
+
2983
+ if reloaded_session and reloaded_session.runs and len(reloaded_session.runs) > 0:
2984
+ # Get the last run (which is the one just created by the tool)
2985
+ last_run = reloaded_session.runs[-1]
2986
+
2987
+ # Update the last run directly with workflow_agent_run
2988
+ last_run.workflow_agent_run = agent_response
2989
+
2990
+ # Store the full agent RunOutput and establish parent-child relationship
2991
+ if agent_response:
2992
+ agent_response.parent_run_id = last_run.run_id
2993
+ agent_response.workflow_id = last_run.workflow_id
2994
+
2995
+ # Save the reloaded session (which has the updated run)
2996
+ self.save_session(session=reloaded_session)
2997
+
2998
+ # Return the last run directly (WRO2 from inner workflow)
2999
+ return last_run
3000
+ else:
3001
+ log_warning("Could not reload session or no runs found after workflow execution")
3002
+ # Return a placeholder error response
3003
+ return WorkflowRunOutput(
3004
+ run_id=str(uuid4()),
3005
+ input=execution_input.input,
3006
+ session_id=session.session_id,
3007
+ workflow_id=self.id,
3008
+ workflow_name=self.name,
3009
+ created_at=int(datetime.now().timestamp()),
3010
+ content="Error: Workflow execution failed",
3011
+ status=RunStatus.error,
3012
+ )
3013
+
3014
+ def _async_initialize_workflow_agent(
3015
+ self,
3016
+ session: WorkflowSession,
3017
+ execution_input: WorkflowExecutionInput,
3018
+ run_context: RunContext,
3019
+ websocket_handler: Optional[WebSocketHandler] = None,
3020
+ stream: bool = False,
3021
+ ) -> None:
3022
+ """Initialize the workflow agent with async tools (but NOT context - that's passed per-run)"""
3023
+ from agno.tools.function import Function
3024
+
3025
+ workflow_tool_func = self.agent.async_create_workflow_tool( # type: ignore
3026
+ workflow=self,
3027
+ session=session,
3028
+ execution_input=execution_input,
3029
+ run_context=run_context,
3030
+ stream=stream,
3031
+ websocket_handler=websocket_handler,
3032
+ )
3033
+ workflow_tool = Function.from_callable(workflow_tool_func)
3034
+
3035
+ self.agent.tools = [workflow_tool] # type: ignore
3036
+ self.agent._rebuild_tools = True # type: ignore
3037
+
3038
+ log_debug("Workflow agent initialized with async run_workflow tool")
3039
+
3040
+ async def _aload_session_for_workflow_agent(
3041
+ self,
3042
+ session_id: str,
3043
+ user_id: Optional[str],
3044
+ session_state: Optional[Dict[str, Any]],
3045
+ ) -> Tuple[WorkflowSession, Dict[str, Any]]:
3046
+ """Helper to load or create session for workflow agent execution"""
3047
+ return await self._aload_or_create_session(session_id=session_id, user_id=user_id, session_state=session_state)
3048
+
3049
+ def _aexecute_workflow_agent(
3050
+ self,
3051
+ user_input: Union[str, Dict[str, Any], List[Any], BaseModel],
3052
+ run_context: RunContext,
3053
+ execution_input: WorkflowExecutionInput,
3054
+ stream: bool = False,
3055
+ websocket_handler: Optional[WebSocketHandler] = None,
3056
+ **kwargs: Any,
3057
+ ):
3058
+ """
3059
+ Execute the workflow agent asynchronously in streaming or non-streaming mode.
3060
+
3061
+ The agent decides whether to run the workflow or answer directly from history.
3062
+
3063
+ Args:
3064
+ user_input: The user's input
3065
+ session: The workflow session
3066
+ run_context: The run context
3067
+ execution_input: The execution input
3068
+ stream: Whether to stream the response
3069
+ websocket_handler: The WebSocket handler
3070
+
3071
+ Returns:
3072
+ Coroutine[WorkflowRunOutput] if stream=False, AsyncIterator[WorkflowRunOutputEvent] if stream=True
3073
+ """
3074
+
3075
+ if stream:
3076
+
3077
+ async def _stream():
3078
+ session, session_state_loaded = await self._aload_session_for_workflow_agent(
3079
+ run_context.session_id, run_context.user_id, run_context.session_state
3080
+ )
3081
+ async for event in self._arun_workflow_agent_stream(
3082
+ agent_input=user_input,
3083
+ session=session,
3084
+ execution_input=execution_input,
3085
+ run_context=run_context,
3086
+ stream=stream,
3087
+ websocket_handler=websocket_handler,
3088
+ **kwargs,
3089
+ ):
3090
+ yield event
3091
+
3092
+ return _stream()
3093
+ else:
3094
+
3095
+ async def _execute():
3096
+ session, session_state_loaded = await self._aload_session_for_workflow_agent(
3097
+ run_context.session_id, run_context.user_id, run_context.session_state
3098
+ )
3099
+ return await self._arun_workflow_agent(
3100
+ agent_input=user_input,
3101
+ session=session,
3102
+ execution_input=execution_input,
3103
+ run_context=run_context,
3104
+ stream=stream,
3105
+ )
3106
+
3107
+ return _execute()
3108
+
3109
+ async def _arun_workflow_agent_stream(
3110
+ self,
3111
+ agent_input: Union[str, Dict[str, Any], List[Any], BaseModel],
3112
+ session: WorkflowSession,
3113
+ execution_input: WorkflowExecutionInput,
3114
+ run_context: RunContext,
3115
+ stream: bool = False,
3116
+ websocket_handler: Optional[WebSocketHandler] = None,
3117
+ **kwargs: Any,
3118
+ ) -> AsyncIterator[WorkflowRunOutputEvent]:
3119
+ """
3120
+ Execute the workflow agent asynchronously in streaming mode.
3121
+
3122
+ The agent's tool (run_workflow) is an async generator that yields workflow events directly.
3123
+ These events bubble up through the agent's streaming and are yielded here.
3124
+ We filter to only yield WorkflowRunOutputEvent to the CLI.
3125
+
3126
+ Yields:
3127
+ WorkflowRunOutputEvent: Events from workflow execution (agent events are filtered)
3128
+ """
3129
+ from typing import get_args
3130
+
3131
+ from agno.run.workflow import WorkflowCompletedEvent, WorkflowRunOutputEvent
3132
+
3133
+ logger.info("Workflow agent enabled - async streaming mode")
3134
+ log_debug(f"User input: {agent_input}")
3135
+
3136
+ self._async_initialize_workflow_agent(
3137
+ session,
3138
+ execution_input,
3139
+ run_context=run_context,
3140
+ stream=stream,
3141
+ websocket_handler=websocket_handler,
3142
+ )
3143
+
3144
+ run_context.dependencies = self._get_workflow_agent_dependencies(session)
3145
+
3146
+ agent_response: Optional[RunOutput] = None
3147
+ workflow_executed = False
3148
+
3149
+ from agno.run.agent import RunContentEvent
3150
+ from agno.run.team import RunContentEvent as TeamRunContentEvent
3151
+ from agno.run.workflow import WorkflowAgentCompletedEvent, WorkflowAgentStartedEvent
3152
+
3153
+ log_debug(f"Executing async workflow agent with streaming - input: {agent_input}...")
3154
+
3155
+ # Create a workflow run response upfront for potential direct answer (will be used only if workflow is not executed)
3156
+ run_id = str(uuid4())
3157
+ direct_reply_run_response = WorkflowRunOutput(
3158
+ run_id=run_id,
3159
+ input=execution_input.input,
3160
+ session_id=session.session_id,
3161
+ workflow_id=self.id,
3162
+ workflow_name=self.name,
3163
+ created_at=int(datetime.now().timestamp()),
3164
+ )
3165
+
3166
+ # Yield WorkflowAgentStartedEvent at the beginning (stored in direct_reply_run_response)
3167
+ agent_started_event = WorkflowAgentStartedEvent(
3168
+ workflow_name=self.name,
3169
+ workflow_id=self.id,
3170
+ session_id=session.session_id,
3171
+ )
3172
+ self._broadcast_to_websocket(agent_started_event, websocket_handler)
3173
+ yield agent_started_event
3174
+
3175
+ # Run the agent in streaming mode and yield all events
3176
+ async for event in self.agent.arun( # type: ignore[union-attr]
3177
+ input=agent_input,
3178
+ stream=True,
3179
+ stream_intermediate_steps=True,
3180
+ yield_run_response=True,
3181
+ session_id=session.session_id,
3182
+ dependencies=run_context.dependencies, # Pass context dynamically per-run
3183
+ session_state=run_context.session_state, # Pass session state dynamically per-run
3184
+ ): # type: ignore
3185
+ if isinstance(event, tuple(get_args(WorkflowRunOutputEvent))):
3186
+ yield event # type: ignore[misc]
3187
+
3188
+ if isinstance(event, WorkflowCompletedEvent):
3189
+ workflow_executed = True
3190
+ log_debug("Workflow execution detected via WorkflowCompletedEvent")
3191
+
3192
+ elif isinstance(event, (RunContentEvent, TeamRunContentEvent)):
3193
+ if event.step_name is None:
3194
+ # This is from the workflow agent itself
3195
+ # Enrich with metadata to mark it as a workflow agent event
3196
+
3197
+ if workflow_executed:
3198
+ continue # Skip if workflow was already executed
3199
+
3200
+ # workflow_agent field is used by consumers of the events to distinguish between workflow agent and regular agent
3201
+ event.workflow_agent = True # type: ignore
3202
+
3203
+ # Broadcast to WebSocket if available (async context only)
3204
+ self._broadcast_to_websocket(event, websocket_handler)
3205
+
3206
+ yield event # type: ignore[misc]
3207
+
3208
+ # Capture the final RunOutput (but don't yield it)
3209
+ if isinstance(event, RunOutput):
3210
+ agent_response = event
3211
+ log_debug(
3212
+ f"Agent response: {str(agent_response.content)[:100] if agent_response.content else 'None'}..."
3213
+ )
3214
+
3215
+ # Handle direct answer case (no workflow execution)
3216
+ if not workflow_executed:
3217
+ # Update the pre-created workflow run response with the direct answer
3218
+ direct_reply_run_response.content = agent_response.content if agent_response else ""
3219
+ direct_reply_run_response.status = RunStatus.completed
3220
+ direct_reply_run_response.workflow_agent_run = agent_response
3221
+
3222
+ workflow_run_response = direct_reply_run_response
3223
+
3224
+ # Store the full agent RunOutput and establish parent-child relationship
3225
+ if agent_response:
3226
+ agent_response.parent_run_id = workflow_run_response.run_id
3227
+ agent_response.workflow_id = workflow_run_response.workflow_id
3228
+
3229
+ # Yield WorkflowAgentCompletedEvent
3230
+ agent_completed_event = WorkflowAgentCompletedEvent(
3231
+ workflow_name=self.name,
3232
+ workflow_id=self.id,
3233
+ run_id=agent_response.run_id if agent_response else None,
3234
+ session_id=session.session_id,
3235
+ content=workflow_run_response.content,
3236
+ )
3237
+ self._broadcast_to_websocket(agent_completed_event, websocket_handler)
3238
+ yield agent_completed_event
3239
+
3240
+ # Yield a workflow completed event with the agent's direct response (user internally by aprint_response_stream)
3241
+ completed_event = WorkflowCompletedEvent(
3242
+ run_id=workflow_run_response.run_id or "",
3243
+ content=workflow_run_response.content,
3244
+ workflow_name=workflow_run_response.workflow_name,
3245
+ workflow_id=workflow_run_response.workflow_id,
3246
+ session_id=workflow_run_response.session_id,
3247
+ step_results=[],
3248
+ metadata={"agent_direct_response": True},
3249
+ )
3250
+ yield completed_event
3251
+
3252
+ # Update the run in session
3253
+ session.upsert_run(run=workflow_run_response)
3254
+ # Save session
3255
+ if self._has_async_db():
3256
+ await self.asave_session(session=session)
3257
+ else:
3258
+ self.save_session(session=session)
3259
+
3260
+ else:
3261
+ # Workflow was executed by the tool
3262
+ if self._has_async_db():
3263
+ reloaded_session = await self.aget_session(session_id=session.session_id)
3264
+ else:
3265
+ reloaded_session = self.get_session(session_id=session.session_id)
3266
+
3267
+ if reloaded_session and reloaded_session.runs and len(reloaded_session.runs) > 0:
3268
+ # Get the last run (which is the one just created by the tool)
3269
+ last_run = reloaded_session.runs[-1]
3270
+
3271
+ # Yield WorkflowAgentCompletedEvent
3272
+ agent_completed_event = WorkflowAgentCompletedEvent(
3273
+ run_id=agent_response.run_id if agent_response else None,
3274
+ workflow_name=self.name,
3275
+ workflow_id=self.id,
3276
+ session_id=session.session_id,
3277
+ content=agent_response.content if agent_response else None,
3278
+ )
3279
+
3280
+ self._broadcast_to_websocket(agent_completed_event, websocket_handler)
1774
3281
 
1775
- self.initialize_workflow()
3282
+ yield agent_completed_event
1776
3283
 
1777
- session_id, user_id, session_state = self._initialize_session(
1778
- session_id=session_id, user_id=user_id, session_state=session_state, run_id=run_id
1779
- )
3284
+ # Update the last run with workflow_agent_run
3285
+ last_run.workflow_agent_run = agent_response
1780
3286
 
1781
- # Read existing session from database
1782
- workflow_session = self.read_or_create_session(session_id=session_id, user_id=user_id)
1783
- self._update_metadata(session=workflow_session)
3287
+ # Store the full agent RunOutput and establish parent-child relationship
3288
+ if agent_response:
3289
+ agent_response.parent_run_id = last_run.run_id
3290
+ agent_response.workflow_id = last_run.workflow_id
1784
3291
 
1785
- # Update session state from DB
1786
- session_state = self._update_session_state(session=workflow_session, session_state=session_state)
3292
+ # Save the reloaded session (which has the updated run)
3293
+ if self._has_async_db():
3294
+ await self.asave_session(session=reloaded_session)
3295
+ else:
3296
+ self.save_session(session=reloaded_session)
1787
3297
 
1788
- self._prepare_steps()
3298
+ else:
3299
+ log_warning("Could not reload session or no runs found after workflow execution")
1789
3300
 
1790
- # Create workflow run response with PENDING status
1791
- workflow_run_response = WorkflowRunOutput(
1792
- run_id=run_id,
1793
- session_id=session_id,
1794
- workflow_id=self.id,
1795
- workflow_name=self.name,
1796
- created_at=int(datetime.now().timestamp()),
1797
- status=RunStatus.pending,
1798
- )
3301
+ async def _arun_workflow_agent(
3302
+ self,
3303
+ agent_input: Union[str, Dict[str, Any], List[Any], BaseModel],
3304
+ session: WorkflowSession,
3305
+ execution_input: WorkflowExecutionInput,
3306
+ run_context: RunContext,
3307
+ stream: bool = False,
3308
+ ) -> WorkflowRunOutput:
3309
+ """
3310
+ Execute the workflow agent asynchronously in non-streaming mode.
1799
3311
 
1800
- # Store PENDING response immediately
1801
- workflow_session.upsert_run(run=workflow_run_response)
1802
- self.save_session(session=workflow_session)
3312
+ The agent decides whether to run the workflow or answer directly from history.
1803
3313
 
1804
- # Prepare execution input
1805
- inputs = WorkflowExecutionInput(
1806
- input=input,
1807
- additional_data=additional_data,
1808
- audio=audio, # type: ignore
1809
- images=images, # type: ignore
1810
- videos=videos, # type: ignore
1811
- files=files, # type: ignore
1812
- )
3314
+ Returns:
3315
+ WorkflowRunOutput: The workflow run output with agent response
3316
+ """
3317
+ # Initialize the agent
3318
+ self._async_initialize_workflow_agent(session, execution_input, run_context=run_context, stream=stream)
3319
+
3320
+ # Build dependencies with workflow context
3321
+ run_context.dependencies = self._get_workflow_agent_dependencies(session)
3322
+
3323
+ # Run the agent
3324
+ agent_response: RunOutput = await self.agent.arun( # type: ignore[union-attr]
3325
+ input=agent_input,
3326
+ session_id=session.session_id,
3327
+ dependencies=run_context.dependencies,
3328
+ session_state=run_context.session_state,
3329
+ stream=stream,
3330
+ ) # type: ignore
3331
+
3332
+ # Check if the agent called the workflow tool
3333
+ workflow_executed = False
3334
+ if agent_response.messages:
3335
+ for message in agent_response.messages:
3336
+ if message.role == "assistant" and message.tool_calls:
3337
+ # Check if the tool call is specifically for run_workflow
3338
+ for tool_call in message.tool_calls:
3339
+ # Handle both dict and object formats
3340
+ if isinstance(tool_call, dict):
3341
+ tool_name = tool_call.get("function", {}).get("name", "")
3342
+ else:
3343
+ tool_name = tool_call.function.name if hasattr(tool_call, "function") else ""
1813
3344
 
1814
- self.update_agents_and_teams_session_info()
3345
+ if tool_name == "run_workflow":
3346
+ workflow_executed = True
3347
+ break
3348
+ if workflow_executed:
3349
+ break
1815
3350
 
1816
- async def execute_workflow_background_stream():
1817
- """Background execution with streaming and WebSocket broadcasting"""
1818
- try:
1819
- # Update status to RUNNING and save
1820
- workflow_run_response.status = RunStatus.running
1821
- self.save_session(session=workflow_session)
1822
-
1823
- # Execute with streaming - consume all events (they're auto-broadcast via _handle_event)
1824
- async for event in self._aexecute_stream(
1825
- execution_input=inputs,
1826
- session=workflow_session,
1827
- workflow_run_response=workflow_run_response,
1828
- stream_intermediate_steps=stream_intermediate_steps,
1829
- session_state=session_state,
1830
- websocket_handler=websocket_handler,
1831
- **kwargs,
1832
- ):
1833
- # Events are automatically broadcast by _handle_event
1834
- # We just consume them here to drive the execution
1835
- pass
3351
+ # Handle direct answer case (no workflow execution)
3352
+ if not workflow_executed:
3353
+ # Create a new workflow run output for the direct answer
3354
+ run_id = str(uuid4())
3355
+ workflow_run_response = WorkflowRunOutput(
3356
+ run_id=run_id,
3357
+ input=execution_input.input,
3358
+ session_id=session.session_id,
3359
+ workflow_id=self.id,
3360
+ workflow_name=self.name,
3361
+ created_at=int(datetime.now().timestamp()),
3362
+ content=agent_response.content,
3363
+ status=RunStatus.completed,
3364
+ workflow_agent_run=agent_response,
3365
+ )
1836
3366
 
1837
- log_debug(f"Background streaming execution completed with status: {workflow_run_response.status}")
3367
+ # Store the full agent RunOutput and establish parent-child relationship
3368
+ if agent_response:
3369
+ agent_response.parent_run_id = workflow_run_response.run_id
3370
+ agent_response.workflow_id = workflow_run_response.workflow_id
1838
3371
 
1839
- except Exception as e:
1840
- logger.error(f"Background streaming workflow execution failed: {e}")
1841
- workflow_run_response.status = RunStatus.error
1842
- workflow_run_response.content = f"Background streaming execution failed: {str(e)}"
1843
- self.save_session(session=workflow_session)
3372
+ # Update the run in session
3373
+ session.upsert_run(run=workflow_run_response)
3374
+ if self._has_async_db():
3375
+ await self.asave_session(session=session)
3376
+ else:
3377
+ self.save_session(session=session)
1844
3378
 
1845
- # Create and start asyncio task for background streaming execution
1846
- loop = asyncio.get_running_loop()
1847
- loop.create_task(execute_workflow_background_stream())
3379
+ log_debug(f"Agent decision: workflow_executed={workflow_executed}")
1848
3380
 
1849
- # Return SAME object that will be updated by background execution
1850
- return workflow_run_response
3381
+ return workflow_run_response
3382
+ else:
3383
+ # Workflow was executed by the tool
3384
+ logger.info("=" * 80)
3385
+ logger.info("WORKFLOW AGENT: Called run_workflow tool (async)")
3386
+ logger.info(" ➜ Workflow was executed, retrieving results...")
3387
+ logger.info("=" * 80)
3388
+
3389
+ log_debug("Reloading session from database to get the latest workflow run...")
3390
+ if self._has_async_db():
3391
+ reloaded_session = await self.aget_session(session_id=session.session_id)
3392
+ else:
3393
+ reloaded_session = self.get_session(session_id=session.session_id)
1851
3394
 
1852
- def get_run(self, run_id: str) -> Optional[WorkflowRunOutput]:
1853
- """Get the status and details of a background workflow run - SIMPLIFIED"""
1854
- if self.db is not None and self.session_id is not None:
1855
- session = self.db.get_session(session_id=self.session_id, session_type=SessionType.WORKFLOW)
1856
- if session and isinstance(session, WorkflowSession) and session.runs:
1857
- # Find the run by ID
1858
- for run in session.runs:
1859
- if run.run_id == run_id:
1860
- return run
3395
+ if reloaded_session and reloaded_session.runs and len(reloaded_session.runs) > 0:
3396
+ # Get the last run (which is the one just created by the tool)
3397
+ last_run = reloaded_session.runs[-1]
3398
+ log_debug(f"Retrieved latest workflow run: {last_run.run_id}")
3399
+ log_debug(f"Total workflow runs in session: {len(reloaded_session.runs)}")
1861
3400
 
1862
- return None
3401
+ # Update the last run with workflow_agent_run
3402
+ last_run.workflow_agent_run = agent_response
3403
+
3404
+ # Store the full agent RunOutput and establish parent-child relationship
3405
+ if agent_response:
3406
+ agent_response.parent_run_id = last_run.run_id
3407
+ agent_response.workflow_id = last_run.workflow_id
3408
+
3409
+ # Save the reloaded session (which has the updated run)
3410
+ if self._has_async_db():
3411
+ await self.asave_session(session=reloaded_session)
3412
+ else:
3413
+ self.save_session(session=reloaded_session)
3414
+
3415
+ log_debug(f"Agent decision: workflow_executed={workflow_executed}")
3416
+
3417
+ # Return the last run directly (WRO2 from inner workflow)
3418
+ return last_run
3419
+ else:
3420
+ log_warning("Could not reload session or no runs found after workflow execution")
3421
+ # Return a placeholder error response
3422
+ return WorkflowRunOutput(
3423
+ run_id=str(uuid4()),
3424
+ input=execution_input.input,
3425
+ session_id=session.session_id,
3426
+ workflow_id=self.id,
3427
+ workflow_name=self.name,
3428
+ created_at=int(datetime.now().timestamp()),
3429
+ content="Error: Workflow execution failed",
3430
+ status=RunStatus.error,
3431
+ )
1863
3432
 
1864
3433
  def cancel_run(self, run_id: str) -> bool:
1865
3434
  """Cancel a running workflow execution.
@@ -1885,6 +3454,7 @@ class Workflow:
1885
3454
  videos: Optional[List[Video]] = None,
1886
3455
  files: Optional[List[File]] = None,
1887
3456
  stream: Literal[False] = False,
3457
+ stream_events: Optional[bool] = None,
1888
3458
  stream_intermediate_steps: Optional[bool] = None,
1889
3459
  background: Optional[bool] = False,
1890
3460
  ) -> WorkflowRunOutput: ...
@@ -1902,6 +3472,7 @@ class Workflow:
1902
3472
  videos: Optional[List[Video]] = None,
1903
3473
  files: Optional[List[File]] = None,
1904
3474
  stream: Literal[True] = True,
3475
+ stream_events: Optional[bool] = None,
1905
3476
  stream_intermediate_steps: Optional[bool] = None,
1906
3477
  background: Optional[bool] = False,
1907
3478
  ) -> Iterator[WorkflowRunOutputEvent]: ...
@@ -1918,16 +3489,16 @@ class Workflow:
1918
3489
  videos: Optional[List[Video]] = None,
1919
3490
  files: Optional[List[File]] = None,
1920
3491
  stream: bool = False,
3492
+ stream_events: Optional[bool] = None,
1921
3493
  stream_intermediate_steps: Optional[bool] = None,
1922
3494
  background: Optional[bool] = False,
1923
3495
  **kwargs: Any,
1924
3496
  ) -> Union[WorkflowRunOutput, Iterator[WorkflowRunOutputEvent]]:
1925
3497
  """Execute the workflow synchronously with optional streaming"""
3498
+ if self._has_async_db():
3499
+ raise Exception("`run()` is not supported with an async DB. Please use `arun()`.")
1926
3500
 
1927
- validated_input = self._validate_input(input)
1928
- if validated_input is not None:
1929
- input = validated_input
1930
-
3501
+ input = self._validate_input(input)
1931
3502
  if background:
1932
3503
  raise RuntimeError("Background execution is not supported for sync run()")
1933
3504
 
@@ -1936,26 +3507,30 @@ class Workflow:
1936
3507
  run_id = str(uuid4())
1937
3508
 
1938
3509
  self.initialize_workflow()
1939
- session_id, user_id, session_state = self._initialize_session(
1940
- session_id=session_id, user_id=user_id, session_state=session_state, run_id=run_id
1941
- )
3510
+ session_id, user_id = self._initialize_session(session_id=session_id, user_id=user_id)
1942
3511
 
1943
3512
  # Read existing session from database
1944
3513
  workflow_session = self.read_or_create_session(session_id=session_id, user_id=user_id)
1945
3514
  self._update_metadata(session=workflow_session)
1946
3515
 
3516
+ # Initialize session state
3517
+ session_state = self._initialize_session_state(
3518
+ session_state=session_state or {}, user_id=user_id, session_id=session_id, run_id=run_id
3519
+ )
1947
3520
  # Update session state from DB
1948
- session_state = self._update_session_state(session=workflow_session, session_state=session_state)
3521
+ session_state = self._load_session_state(session=workflow_session, session_state=session_state)
1949
3522
 
1950
3523
  log_debug(f"Workflow Run Start: {self.name}", center=True)
1951
3524
 
1952
3525
  # Use simple defaults
1953
3526
  stream = stream or self.stream or False
1954
- stream_intermediate_steps = stream_intermediate_steps or self.stream_intermediate_steps or False
3527
+ stream_events = (stream_events or stream_intermediate_steps) or (
3528
+ self.stream_events or self.stream_intermediate_steps
3529
+ )
1955
3530
 
1956
- # Can't have stream_intermediate_steps if stream is False
1957
- if not stream:
1958
- stream_intermediate_steps = False
3531
+ # Can't stream events if streaming is disabled
3532
+ if stream is False:
3533
+ stream_events = False
1959
3534
 
1960
3535
  log_debug(f"Stream: {stream}")
1961
3536
  log_debug(f"Total steps: {self._get_step_count()}")
@@ -1963,15 +3538,6 @@ class Workflow:
1963
3538
  # Prepare steps
1964
3539
  self._prepare_steps()
1965
3540
 
1966
- # Create workflow run response that will be updated by reference
1967
- workflow_run_response = WorkflowRunOutput(
1968
- run_id=run_id,
1969
- session_id=session_id,
1970
- workflow_id=self.id,
1971
- workflow_name=self.name,
1972
- created_at=int(datetime.now().timestamp()),
1973
- )
1974
-
1975
3541
  inputs = WorkflowExecutionInput(
1976
3542
  input=input,
1977
3543
  additional_data=additional_data,
@@ -1986,13 +3552,46 @@ class Workflow:
1986
3552
 
1987
3553
  self.update_agents_and_teams_session_info()
1988
3554
 
3555
+ # Initialize run context
3556
+ run_context = RunContext(
3557
+ run_id=run_id,
3558
+ session_id=session_id,
3559
+ user_id=user_id,
3560
+ session_state=session_state,
3561
+ )
3562
+
3563
+ # Execute workflow agent if configured
3564
+ if self.agent is not None:
3565
+ return self._execute_workflow_agent(
3566
+ user_input=input, # type: ignore
3567
+ session=workflow_session,
3568
+ execution_input=inputs,
3569
+ run_context=run_context,
3570
+ stream=stream,
3571
+ **kwargs,
3572
+ )
3573
+
3574
+ # Create workflow run response for regular workflow execution
3575
+ workflow_run_response = WorkflowRunOutput(
3576
+ run_id=run_id,
3577
+ input=input,
3578
+ session_id=session_id,
3579
+ workflow_id=self.id,
3580
+ workflow_name=self.name,
3581
+ created_at=int(datetime.now().timestamp()),
3582
+ )
3583
+
3584
+ # Start the run metrics timer
3585
+ workflow_run_response.metrics = WorkflowMetrics(steps={})
3586
+ workflow_run_response.metrics.start_timer()
3587
+
1989
3588
  if stream:
1990
3589
  return self._execute_stream(
1991
3590
  session=workflow_session,
1992
3591
  execution_input=inputs, # type: ignore[arg-type]
1993
3592
  workflow_run_response=workflow_run_response,
1994
- stream_intermediate_steps=stream_intermediate_steps,
1995
- session_state=session_state,
3593
+ stream_events=stream_events,
3594
+ run_context=run_context,
1996
3595
  **kwargs,
1997
3596
  )
1998
3597
  else:
@@ -2000,7 +3599,7 @@ class Workflow:
2000
3599
  session=workflow_session,
2001
3600
  execution_input=inputs, # type: ignore[arg-type]
2002
3601
  workflow_run_response=workflow_run_response,
2003
- session_state=session_state,
3602
+ run_context=run_context,
2004
3603
  **kwargs,
2005
3604
  )
2006
3605
 
@@ -2017,13 +3616,14 @@ class Workflow:
2017
3616
  videos: Optional[List[Video]] = None,
2018
3617
  files: Optional[List[File]] = None,
2019
3618
  stream: Literal[False] = False,
3619
+ stream_events: Optional[bool] = None,
2020
3620
  stream_intermediate_steps: Optional[bool] = None,
2021
3621
  background: Optional[bool] = False,
2022
3622
  websocket: Optional[WebSocket] = None,
2023
3623
  ) -> WorkflowRunOutput: ...
2024
3624
 
2025
3625
  @overload
2026
- async def arun(
3626
+ def arun(
2027
3627
  self,
2028
3628
  input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]] = None,
2029
3629
  additional_data: Optional[Dict[str, Any]] = None,
@@ -2035,12 +3635,13 @@ class Workflow:
2035
3635
  videos: Optional[List[Video]] = None,
2036
3636
  files: Optional[List[File]] = None,
2037
3637
  stream: Literal[True] = True,
3638
+ stream_events: Optional[bool] = None,
2038
3639
  stream_intermediate_steps: Optional[bool] = None,
2039
3640
  background: Optional[bool] = False,
2040
3641
  websocket: Optional[WebSocket] = None,
2041
3642
  ) -> AsyncIterator[WorkflowRunOutputEvent]: ...
2042
3643
 
2043
- async def arun(
3644
+ def arun( # type: ignore
2044
3645
  self,
2045
3646
  input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]] = None,
2046
3647
  additional_data: Optional[Dict[str, Any]] = None,
@@ -2052,6 +3653,7 @@ class Workflow:
2052
3653
  videos: Optional[List[Video]] = None,
2053
3654
  files: Optional[List[File]] = None,
2054
3655
  stream: bool = False,
3656
+ stream_events: Optional[bool] = None,
2055
3657
  stream_intermediate_steps: Optional[bool] = False,
2056
3658
  background: Optional[bool] = False,
2057
3659
  websocket: Optional[WebSocket] = None,
@@ -2059,9 +3661,7 @@ class Workflow:
2059
3661
  ) -> Union[WorkflowRunOutput, AsyncIterator[WorkflowRunOutputEvent]]:
2060
3662
  """Execute the workflow synchronously with optional streaming"""
2061
3663
 
2062
- validated_input = self._validate_input(input)
2063
- if validated_input is not None:
2064
- input = validated_input
3664
+ input = self._validate_input(input)
2065
3665
 
2066
3666
  websocket_handler = None
2067
3667
  if websocket:
@@ -2071,17 +3671,27 @@ class Workflow:
2071
3671
 
2072
3672
  if background:
2073
3673
  if stream and websocket:
3674
+ # Consider both stream_events and stream_intermediate_steps (deprecated)
3675
+ if stream_intermediate_steps is not None:
3676
+ warnings.warn(
3677
+ "The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Use 'stream_events' instead.",
3678
+ DeprecationWarning,
3679
+ stacklevel=2,
3680
+ )
3681
+ stream_events = stream_events or stream_intermediate_steps or False
3682
+
2074
3683
  # Background + Streaming + WebSocket = Real-time events
2075
- return await self._arun_background_stream(
3684
+ return self._arun_background_stream( # type: ignore
2076
3685
  input=input,
2077
3686
  additional_data=additional_data,
2078
3687
  user_id=user_id,
2079
3688
  session_id=session_id,
3689
+ session_state=session_state,
2080
3690
  audio=audio,
2081
3691
  images=images,
2082
3692
  videos=videos,
2083
3693
  files=files,
2084
- stream_intermediate_steps=stream_intermediate_steps or False,
3694
+ stream_events=stream_events,
2085
3695
  websocket_handler=websocket_handler,
2086
3696
  **kwargs,
2087
3697
  )
@@ -2090,11 +3700,12 @@ class Workflow:
2090
3700
  raise ValueError("Background streaming execution requires a WebSocket for real-time events")
2091
3701
  else:
2092
3702
  # Background + Non-streaming = Polling (existing)
2093
- return await self._arun_background(
3703
+ return self._arun_background( # type: ignore
2094
3704
  input=input,
2095
3705
  additional_data=additional_data,
2096
3706
  user_id=user_id,
2097
3707
  session_id=session_id,
3708
+ session_state=session_state,
2098
3709
  audio=audio,
2099
3710
  images=images,
2100
3711
  videos=videos,
@@ -2107,47 +3718,40 @@ class Workflow:
2107
3718
  run_id = str(uuid4())
2108
3719
 
2109
3720
  self.initialize_workflow()
2110
- session_id, user_id, session_state = self._initialize_session(
2111
- session_id=session_id, user_id=user_id, session_state=session_state, run_id=run_id
2112
- )
2113
-
2114
- # Read existing session from database
2115
- workflow_session = self.read_or_create_session(session_id=session_id, user_id=user_id)
2116
- self._update_metadata(session=workflow_session)
3721
+ session_id, user_id = self._initialize_session(session_id=session_id, user_id=user_id)
2117
3722
 
2118
- # Update session state from DB
2119
- session_state = self._update_session_state(session=workflow_session, session_state=session_state)
3723
+ # Initialize run context
3724
+ run_context = RunContext(
3725
+ run_id=run_id,
3726
+ session_id=session_id,
3727
+ user_id=user_id,
3728
+ session_state=session_state,
3729
+ )
2120
3730
 
2121
3731
  log_debug(f"Async Workflow Run Start: {self.name}", center=True)
2122
3732
 
2123
3733
  # Use simple defaults
2124
3734
  stream = stream or self.stream or False
2125
- stream_intermediate_steps = stream_intermediate_steps or self.stream_intermediate_steps or False
3735
+ stream_events = (stream_events or stream_intermediate_steps) or (
3736
+ self.stream_events or self.stream_intermediate_steps
3737
+ )
2126
3738
 
2127
- # Can't have stream_intermediate_steps if stream is False
2128
- if not stream:
2129
- stream_intermediate_steps = False
3739
+ # Can't stream events if streaming is disabled
3740
+ if stream is False:
3741
+ stream_events = False
2130
3742
 
2131
3743
  log_debug(f"Stream: {stream}")
2132
3744
 
2133
3745
  # Prepare steps
2134
3746
  self._prepare_steps()
2135
3747
 
2136
- # Create workflow run response that will be updated by reference
2137
- workflow_run_response = WorkflowRunOutput(
2138
- run_id=run_id,
2139
- session_id=session_id,
2140
- workflow_id=self.id,
2141
- workflow_name=self.name,
2142
- created_at=int(datetime.now().timestamp()),
2143
- )
2144
-
2145
3748
  inputs = WorkflowExecutionInput(
2146
3749
  input=input,
2147
3750
  additional_data=additional_data,
2148
3751
  audio=audio, # type: ignore
2149
3752
  images=images, # type: ignore
2150
3753
  videos=videos, # type: ignore
3754
+ files=files,
2151
3755
  )
2152
3756
  log_debug(
2153
3757
  f"Created async pipeline input with session state keys: {list(session_state.keys()) if session_state else 'None'}"
@@ -2155,25 +3759,52 @@ class Workflow:
2155
3759
 
2156
3760
  self.update_agents_and_teams_session_info()
2157
3761
 
3762
+ if self.agent is not None:
3763
+ return self._aexecute_workflow_agent( # type: ignore
3764
+ user_input=input, # type: ignore
3765
+ execution_input=inputs,
3766
+ run_context=run_context,
3767
+ stream=stream,
3768
+ **kwargs,
3769
+ )
3770
+
3771
+ # Create workflow run response for regular workflow execution
3772
+ workflow_run_response = WorkflowRunOutput(
3773
+ run_id=run_id,
3774
+ input=input,
3775
+ session_id=session_id,
3776
+ workflow_id=self.id,
3777
+ workflow_name=self.name,
3778
+ created_at=int(datetime.now().timestamp()),
3779
+ )
3780
+
3781
+ # Start the run metrics timer
3782
+ workflow_run_response.metrics = WorkflowMetrics(steps={})
3783
+ workflow_run_response.metrics.start_timer()
3784
+
2158
3785
  if stream:
2159
- return self._aexecute_stream(
3786
+ return self._aexecute_stream( # type: ignore
2160
3787
  execution_input=inputs,
2161
3788
  workflow_run_response=workflow_run_response,
2162
- session=workflow_session,
2163
- stream_intermediate_steps=stream_intermediate_steps,
3789
+ session_id=session_id,
3790
+ user_id=user_id,
3791
+ stream_events=stream_events,
2164
3792
  websocket=websocket,
2165
3793
  files=files,
2166
3794
  session_state=session_state,
3795
+ run_context=run_context,
2167
3796
  **kwargs,
2168
3797
  )
2169
3798
  else:
2170
- return await self._aexecute(
3799
+ return self._aexecute( # type: ignore
2171
3800
  execution_input=inputs,
2172
3801
  workflow_run_response=workflow_run_response,
2173
- session=workflow_session,
3802
+ session_id=session_id,
3803
+ user_id=user_id,
2174
3804
  websocket=websocket,
2175
3805
  files=files,
2176
3806
  session_state=session_state,
3807
+ run_context=run_context,
2177
3808
  **kwargs,
2178
3809
  )
2179
3810
 
@@ -2185,7 +3816,7 @@ class Workflow:
2185
3816
  if callable(step) and hasattr(step, "__name__"):
2186
3817
  step_name = step.__name__
2187
3818
  log_debug(f"Step {i + 1}: Wrapping callable function '{step_name}'")
2188
- prepared_steps.append(Step(name=step_name, description="User-defined callable step", executor=step))
3819
+ prepared_steps.append(Step(name=step_name, description="User-defined callable step", executor=step)) # type: ignore
2189
3820
  elif isinstance(step, Agent):
2190
3821
  step_name = step.name or f"step_{i + 1}"
2191
3822
  log_debug(f"Step {i + 1}: Agent '{step_name}'")
@@ -2194,6 +3825,12 @@ class Workflow:
2194
3825
  step_name = step.name or f"step_{i + 1}"
2195
3826
  log_debug(f"Step {i + 1}: Team '{step_name}' with {len(step.members)} members")
2196
3827
  prepared_steps.append(Step(name=step_name, description=step.description, team=step))
3828
+ elif isinstance(step, Step) and step.add_workflow_history is True and self.db is None:
3829
+ log_warning(
3830
+ f"Step '{step.name or f'step_{i + 1}'}' has add_workflow_history=True "
3831
+ "but no database is configured in the Workflow. "
3832
+ "History won't be persisted. Add a database to persist runs across executions."
3833
+ )
2197
3834
  elif isinstance(step, (Step, Steps, Loop, Parallel, Condition, Router)):
2198
3835
  step_type = type(step).__name__
2199
3836
  step_name = getattr(step, "name", f"unnamed_{step_type.lower()}")
@@ -2216,7 +3853,6 @@ class Workflow:
2216
3853
  videos: Optional[List[Video]] = None,
2217
3854
  files: Optional[List[File]] = None,
2218
3855
  stream: Optional[bool] = None,
2219
- stream_intermediate_steps: Optional[bool] = None,
2220
3856
  markdown: bool = True,
2221
3857
  show_time: bool = True,
2222
3858
  show_step_details: bool = True,
@@ -2233,19 +3869,21 @@ class Workflow:
2233
3869
  audio: Audio input
2234
3870
  images: Image input
2235
3871
  videos: Video input
3872
+ files: File input
2236
3873
  stream: Whether to stream the response content
2237
- stream_intermediate_steps: Whether to stream intermediate steps
2238
3874
  markdown: Whether to render content as markdown
2239
3875
  show_time: Whether to show execution time
2240
3876
  show_step_details: Whether to show individual step outputs
2241
3877
  console: Rich console instance (optional)
2242
3878
  """
3879
+ if self._has_async_db():
3880
+ raise Exception("`print_response()` is not supported with an async DB. Please use `aprint_response()`.")
2243
3881
 
2244
3882
  if stream is None:
2245
3883
  stream = self.stream or False
2246
3884
 
2247
- if stream_intermediate_steps is None:
2248
- stream_intermediate_steps = self.stream_intermediate_steps or False
3885
+ if "stream_events" in kwargs:
3886
+ kwargs.pop("stream_events")
2249
3887
 
2250
3888
  if stream:
2251
3889
  print_response_stream(
@@ -2258,7 +3896,7 @@ class Workflow:
2258
3896
  images=images,
2259
3897
  videos=videos,
2260
3898
  files=files,
2261
- stream_intermediate_steps=stream_intermediate_steps,
3899
+ stream_events=True,
2262
3900
  markdown=markdown,
2263
3901
  show_time=show_time,
2264
3902
  show_step_details=show_step_details,
@@ -2294,7 +3932,6 @@ class Workflow:
2294
3932
  videos: Optional[List[Video]] = None,
2295
3933
  files: Optional[List[File]] = None,
2296
3934
  stream: Optional[bool] = None,
2297
- stream_intermediate_steps: Optional[bool] = None,
2298
3935
  markdown: bool = True,
2299
3936
  show_time: bool = True,
2300
3937
  show_step_details: bool = True,
@@ -2311,7 +3948,7 @@ class Workflow:
2311
3948
  audio: Audio input
2312
3949
  images: Image input
2313
3950
  videos: Video input
2314
- stream_intermediate_steps: Whether to stream intermediate steps
3951
+ files: Files input
2315
3952
  stream: Whether to stream the response content
2316
3953
  markdown: Whether to render content as markdown
2317
3954
  show_time: Whether to show execution time
@@ -2321,8 +3958,8 @@ class Workflow:
2321
3958
  if stream is None:
2322
3959
  stream = self.stream or False
2323
3960
 
2324
- if stream_intermediate_steps is None:
2325
- stream_intermediate_steps = self.stream_intermediate_steps or False
3961
+ if "stream_events" in kwargs:
3962
+ kwargs.pop("stream_events")
2326
3963
 
2327
3964
  if stream:
2328
3965
  await aprint_response_stream(
@@ -2335,7 +3972,7 @@ class Workflow:
2335
3972
  images=images,
2336
3973
  videos=videos,
2337
3974
  files=files,
2338
- stream_intermediate_steps=stream_intermediate_steps,
3975
+ stream_events=True,
2339
3976
  markdown=markdown,
2340
3977
  show_time=show_time,
2341
3978
  show_step_details=show_step_details,
@@ -2364,6 +4001,34 @@ class Workflow:
2364
4001
  """Convert workflow to dictionary representation"""
2365
4002
 
2366
4003
  def serialize_step(step):
4004
+ # Handle callable functions (not wrapped in Step objects)
4005
+ if callable(step) and hasattr(step, "__name__"):
4006
+ step_dict = {
4007
+ "name": step.__name__,
4008
+ "description": "User-defined callable step",
4009
+ "type": StepType.STEP.value,
4010
+ }
4011
+ return step_dict
4012
+
4013
+ # Handle Agent and Team objects directly
4014
+ if isinstance(step, Agent):
4015
+ step_dict = {
4016
+ "name": step.name or "unnamed_agent",
4017
+ "description": step.description or "Agent step",
4018
+ "type": StepType.STEP.value,
4019
+ "agent": step,
4020
+ }
4021
+ return step_dict
4022
+
4023
+ if isinstance(step, Team):
4024
+ step_dict = {
4025
+ "name": step.name or "unnamed_team",
4026
+ "description": step.description or "Team step",
4027
+ "type": StepType.STEP.value,
4028
+ "team": step,
4029
+ }
4030
+ return step_dict
4031
+
2367
4032
  step_dict = {
2368
4033
  "name": step.name if hasattr(step, "name") else f"unnamed_{type(step).__name__.lower()}",
2369
4034
  "description": step.description if hasattr(step, "description") else "User-defined callable step",
@@ -2377,7 +4042,7 @@ class Workflow:
2377
4042
  step_dict["team"] = step.team if hasattr(step, "team") else None # type: ignore
2378
4043
 
2379
4044
  # Handle nested steps for Router/Loop
2380
- if isinstance(step, (Router)):
4045
+ if isinstance(step, Router):
2381
4046
  step_dict["steps"] = (
2382
4047
  [serialize_step(step) for step in step.choices] if hasattr(step, "choices") else None
2383
4048
  )
@@ -2433,7 +4098,7 @@ class Workflow:
2433
4098
 
2434
4099
  # If workflow has metrics, convert and add them to session metrics
2435
4100
  if workflow_run_response.metrics:
2436
- run_session_metrics = self._calculate_session_metrics_from_workflow_metrics(workflow_run_response.metrics)
4101
+ run_session_metrics = self._calculate_session_metrics_from_workflow_metrics(workflow_run_response.metrics) # type: ignore[arg-type]
2437
4102
 
2438
4103
  session_metrics += run_session_metrics
2439
4104
 
@@ -2444,6 +4109,18 @@ class Workflow:
2444
4109
  session.session_data = {}
2445
4110
  session.session_data["session_metrics"] = session_metrics.to_dict()
2446
4111
 
4112
+ async def aget_session_metrics(self, session_id: Optional[str] = None) -> Optional[Metrics]:
4113
+ """Get the session metrics for the given session ID and user ID."""
4114
+ session_id = session_id or self.session_id
4115
+ if session_id is None:
4116
+ raise Exception("Session ID is required")
4117
+
4118
+ session = await self.aget_session(session_id=session_id) # type: ignore
4119
+ if session is None:
4120
+ raise Exception("Session not found")
4121
+
4122
+ return self._get_session_metrics(session=session)
4123
+
2447
4124
  def get_session_metrics(self, session_id: Optional[str] = None) -> Optional[Metrics]:
2448
4125
  """Get the session metrics for the given session ID and user ID."""
2449
4126
  session_id = session_id or self.session_id
@@ -2472,7 +4149,7 @@ class Workflow:
2472
4149
 
2473
4150
  # If it's a team, update all members
2474
4151
  if hasattr(active_executor, "members"):
2475
- for member in active_executor.members:
4152
+ for member in active_executor.members: # type: ignore
2476
4153
  if hasattr(member, "workflow_id"):
2477
4154
  member.workflow_id = self.id
2478
4155
 
@@ -2519,3 +4196,139 @@ class Workflow:
2519
4196
  )
2520
4197
  except Exception as e:
2521
4198
  log_debug(f"Could not create Workflow run telemetry event: {e}")
4199
+
4200
+ def cli_app(
4201
+ self,
4202
+ input: Optional[str] = None,
4203
+ session_id: Optional[str] = None,
4204
+ user_id: Optional[str] = None,
4205
+ user: str = "User",
4206
+ emoji: str = ":technologist:",
4207
+ stream: Optional[bool] = None,
4208
+ markdown: bool = True,
4209
+ show_time: bool = True,
4210
+ show_step_details: bool = True,
4211
+ exit_on: Optional[List[str]] = None,
4212
+ **kwargs: Any,
4213
+ ) -> None:
4214
+ """
4215
+ Run an interactive command-line interface to interact with the workflow.
4216
+
4217
+ This method creates a CLI interface that allows users to interact with the workflow
4218
+ either by providing a single input or through continuous interactive prompts.
4219
+
4220
+ Arguments:
4221
+ input: Optional initial input to process before starting interactive mode.
4222
+ session_id: Optional session identifier for maintaining conversation context.
4223
+ user_id: Optional user identifier for tracking user-specific data.
4224
+ user: Display name for the user in the CLI prompt. Defaults to "User".
4225
+ emoji: Emoji to display next to the user name in prompts. Defaults to ":technologist:".
4226
+ stream: Whether to stream the workflow response. If None, uses workflow default.
4227
+ markdown: Whether to render output as markdown. Defaults to True.
4228
+ show_time: Whether to display timestamps in the output. Defaults to True.
4229
+ show_step_details: Whether to show detailed step information. Defaults to True.
4230
+ exit_on: List of commands that will exit the CLI. Defaults to ["exit", "quit", "bye", "stop"].
4231
+ **kwargs: Additional keyword arguments passed to the workflow's print_response method.
4232
+
4233
+ Returns:
4234
+ None: This method runs interactively and does not return a value.
4235
+ """
4236
+
4237
+ from rich.prompt import Prompt
4238
+
4239
+ if input:
4240
+ self.print_response(
4241
+ input=input,
4242
+ stream=stream,
4243
+ markdown=markdown,
4244
+ show_time=show_time,
4245
+ show_step_details=show_step_details,
4246
+ user_id=user_id,
4247
+ session_id=session_id,
4248
+ **kwargs,
4249
+ )
4250
+
4251
+ _exit_on = exit_on or ["exit", "quit", "bye", "stop"]
4252
+ while True:
4253
+ message = Prompt.ask(f"[bold] {emoji} {user} [/bold]")
4254
+ if message in _exit_on:
4255
+ break
4256
+
4257
+ self.print_response(
4258
+ input=message,
4259
+ stream=stream,
4260
+ markdown=markdown,
4261
+ show_time=show_time,
4262
+ show_step_details=show_step_details,
4263
+ user_id=user_id,
4264
+ session_id=session_id,
4265
+ **kwargs,
4266
+ )
4267
+
4268
+ async def acli_app(
4269
+ self,
4270
+ input: Optional[str] = None,
4271
+ session_id: Optional[str] = None,
4272
+ user_id: Optional[str] = None,
4273
+ user: str = "User",
4274
+ emoji: str = ":technologist:",
4275
+ stream: Optional[bool] = None,
4276
+ markdown: bool = True,
4277
+ show_time: bool = True,
4278
+ show_step_details: bool = True,
4279
+ exit_on: Optional[List[str]] = None,
4280
+ **kwargs: Any,
4281
+ ) -> None:
4282
+ """
4283
+ Run an interactive command-line interface to interact with the workflow.
4284
+
4285
+ This method creates a CLI interface that allows users to interact with the workflow
4286
+ either by providing a single input or through continuous interactive prompts.
4287
+
4288
+ Arguments:
4289
+ input: Optional initial input to process before starting interactive mode.
4290
+ session_id: Optional session identifier for maintaining conversation context.
4291
+ user_id: Optional user identifier for tracking user-specific data.
4292
+ user: Display name for the user in the CLI prompt. Defaults to "User".
4293
+ emoji: Emoji to display next to the user name in prompts. Defaults to ":technologist:".
4294
+ stream: Whether to stream the workflow response. If None, uses workflow default.
4295
+ markdown: Whether to render output as markdown. Defaults to True.
4296
+ show_time: Whether to display timestamps in the output. Defaults to True.
4297
+ show_step_details: Whether to show detailed step information. Defaults to True.
4298
+ exit_on: List of commands that will exit the CLI. Defaults to ["exit", "quit", "bye", "stop"].
4299
+ **kwargs: Additional keyword arguments passed to the workflow's print_response method.
4300
+
4301
+ Returns:
4302
+ None: This method runs interactively and does not return a value.
4303
+ """
4304
+
4305
+ from rich.prompt import Prompt
4306
+
4307
+ if input:
4308
+ await self.aprint_response(
4309
+ input=input,
4310
+ stream=stream,
4311
+ markdown=markdown,
4312
+ show_time=show_time,
4313
+ show_step_details=show_step_details,
4314
+ user_id=user_id,
4315
+ session_id=session_id,
4316
+ **kwargs,
4317
+ )
4318
+
4319
+ _exit_on = exit_on or ["exit", "quit", "bye", "stop"]
4320
+ while True:
4321
+ message = Prompt.ask(f"[bold] {emoji} {user} [/bold]")
4322
+ if message in _exit_on:
4323
+ break
4324
+
4325
+ await self.aprint_response(
4326
+ input=message,
4327
+ stream=stream,
4328
+ markdown=markdown,
4329
+ show_time=show_time,
4330
+ show_step_details=show_step_details,
4331
+ user_id=user_id,
4332
+ session_id=session_id,
4333
+ **kwargs,
4334
+ )