agno 2.1.2__py3-none-any.whl → 2.3.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (314) hide show
  1. agno/agent/agent.py +5540 -2273
  2. agno/api/api.py +2 -0
  3. agno/api/os.py +1 -1
  4. agno/compression/__init__.py +3 -0
  5. agno/compression/manager.py +247 -0
  6. agno/culture/__init__.py +3 -0
  7. agno/culture/manager.py +956 -0
  8. agno/db/async_postgres/__init__.py +3 -0
  9. agno/db/base.py +689 -6
  10. agno/db/dynamo/dynamo.py +933 -37
  11. agno/db/dynamo/schemas.py +174 -10
  12. agno/db/dynamo/utils.py +63 -4
  13. agno/db/firestore/firestore.py +831 -9
  14. agno/db/firestore/schemas.py +51 -0
  15. agno/db/firestore/utils.py +102 -4
  16. agno/db/gcs_json/gcs_json_db.py +660 -12
  17. agno/db/gcs_json/utils.py +60 -26
  18. agno/db/in_memory/in_memory_db.py +287 -14
  19. agno/db/in_memory/utils.py +60 -2
  20. agno/db/json/json_db.py +590 -14
  21. agno/db/json/utils.py +60 -26
  22. agno/db/migrations/manager.py +199 -0
  23. agno/db/migrations/v1_to_v2.py +43 -13
  24. agno/db/migrations/versions/__init__.py +0 -0
  25. agno/db/migrations/versions/v2_3_0.py +938 -0
  26. agno/db/mongo/__init__.py +15 -1
  27. agno/db/mongo/async_mongo.py +2760 -0
  28. agno/db/mongo/mongo.py +879 -11
  29. agno/db/mongo/schemas.py +42 -0
  30. agno/db/mongo/utils.py +80 -8
  31. agno/db/mysql/__init__.py +2 -1
  32. agno/db/mysql/async_mysql.py +2912 -0
  33. agno/db/mysql/mysql.py +946 -68
  34. agno/db/mysql/schemas.py +72 -10
  35. agno/db/mysql/utils.py +198 -7
  36. agno/db/postgres/__init__.py +2 -1
  37. agno/db/postgres/async_postgres.py +2579 -0
  38. agno/db/postgres/postgres.py +942 -57
  39. agno/db/postgres/schemas.py +81 -18
  40. agno/db/postgres/utils.py +164 -2
  41. agno/db/redis/redis.py +671 -7
  42. agno/db/redis/schemas.py +50 -0
  43. agno/db/redis/utils.py +65 -7
  44. agno/db/schemas/__init__.py +2 -1
  45. agno/db/schemas/culture.py +120 -0
  46. agno/db/schemas/evals.py +1 -0
  47. agno/db/schemas/memory.py +17 -2
  48. agno/db/singlestore/schemas.py +63 -0
  49. agno/db/singlestore/singlestore.py +949 -83
  50. agno/db/singlestore/utils.py +60 -2
  51. agno/db/sqlite/__init__.py +2 -1
  52. agno/db/sqlite/async_sqlite.py +2911 -0
  53. agno/db/sqlite/schemas.py +62 -0
  54. agno/db/sqlite/sqlite.py +965 -46
  55. agno/db/sqlite/utils.py +169 -8
  56. agno/db/surrealdb/__init__.py +3 -0
  57. agno/db/surrealdb/metrics.py +292 -0
  58. agno/db/surrealdb/models.py +334 -0
  59. agno/db/surrealdb/queries.py +71 -0
  60. agno/db/surrealdb/surrealdb.py +1908 -0
  61. agno/db/surrealdb/utils.py +147 -0
  62. agno/db/utils.py +2 -0
  63. agno/eval/__init__.py +10 -0
  64. agno/eval/accuracy.py +75 -55
  65. agno/eval/agent_as_judge.py +861 -0
  66. agno/eval/base.py +29 -0
  67. agno/eval/performance.py +16 -7
  68. agno/eval/reliability.py +28 -16
  69. agno/eval/utils.py +35 -17
  70. agno/exceptions.py +27 -2
  71. agno/filters.py +354 -0
  72. agno/guardrails/prompt_injection.py +1 -0
  73. agno/hooks/__init__.py +3 -0
  74. agno/hooks/decorator.py +164 -0
  75. agno/integrations/discord/client.py +1 -1
  76. agno/knowledge/chunking/agentic.py +13 -10
  77. agno/knowledge/chunking/fixed.py +4 -1
  78. agno/knowledge/chunking/semantic.py +9 -4
  79. agno/knowledge/chunking/strategy.py +59 -15
  80. agno/knowledge/embedder/fastembed.py +1 -1
  81. agno/knowledge/embedder/nebius.py +1 -1
  82. agno/knowledge/embedder/ollama.py +8 -0
  83. agno/knowledge/embedder/openai.py +8 -8
  84. agno/knowledge/embedder/sentence_transformer.py +6 -2
  85. agno/knowledge/embedder/vllm.py +262 -0
  86. agno/knowledge/knowledge.py +1618 -318
  87. agno/knowledge/reader/base.py +6 -2
  88. agno/knowledge/reader/csv_reader.py +8 -10
  89. agno/knowledge/reader/docx_reader.py +5 -6
  90. agno/knowledge/reader/field_labeled_csv_reader.py +16 -20
  91. agno/knowledge/reader/json_reader.py +5 -4
  92. agno/knowledge/reader/markdown_reader.py +8 -8
  93. agno/knowledge/reader/pdf_reader.py +17 -19
  94. agno/knowledge/reader/pptx_reader.py +101 -0
  95. agno/knowledge/reader/reader_factory.py +32 -3
  96. agno/knowledge/reader/s3_reader.py +3 -3
  97. agno/knowledge/reader/tavily_reader.py +193 -0
  98. agno/knowledge/reader/text_reader.py +22 -10
  99. agno/knowledge/reader/web_search_reader.py +1 -48
  100. agno/knowledge/reader/website_reader.py +10 -10
  101. agno/knowledge/reader/wikipedia_reader.py +33 -1
  102. agno/knowledge/types.py +1 -0
  103. agno/knowledge/utils.py +72 -7
  104. agno/media.py +22 -6
  105. agno/memory/__init__.py +14 -1
  106. agno/memory/manager.py +544 -83
  107. agno/memory/strategies/__init__.py +15 -0
  108. agno/memory/strategies/base.py +66 -0
  109. agno/memory/strategies/summarize.py +196 -0
  110. agno/memory/strategies/types.py +37 -0
  111. agno/models/aimlapi/aimlapi.py +17 -0
  112. agno/models/anthropic/claude.py +515 -40
  113. agno/models/aws/bedrock.py +102 -21
  114. agno/models/aws/claude.py +131 -274
  115. agno/models/azure/ai_foundry.py +41 -19
  116. agno/models/azure/openai_chat.py +39 -8
  117. agno/models/base.py +1249 -525
  118. agno/models/cerebras/cerebras.py +91 -21
  119. agno/models/cerebras/cerebras_openai.py +21 -2
  120. agno/models/cohere/chat.py +40 -6
  121. agno/models/cometapi/cometapi.py +18 -1
  122. agno/models/dashscope/dashscope.py +2 -3
  123. agno/models/deepinfra/deepinfra.py +18 -1
  124. agno/models/deepseek/deepseek.py +69 -3
  125. agno/models/fireworks/fireworks.py +18 -1
  126. agno/models/google/gemini.py +877 -80
  127. agno/models/google/utils.py +22 -0
  128. agno/models/groq/groq.py +51 -18
  129. agno/models/huggingface/huggingface.py +17 -6
  130. agno/models/ibm/watsonx.py +16 -6
  131. agno/models/internlm/internlm.py +18 -1
  132. agno/models/langdb/langdb.py +13 -1
  133. agno/models/litellm/chat.py +44 -9
  134. agno/models/litellm/litellm_openai.py +18 -1
  135. agno/models/message.py +28 -5
  136. agno/models/meta/llama.py +47 -14
  137. agno/models/meta/llama_openai.py +22 -17
  138. agno/models/mistral/mistral.py +8 -4
  139. agno/models/nebius/nebius.py +6 -7
  140. agno/models/nvidia/nvidia.py +20 -3
  141. agno/models/ollama/chat.py +24 -8
  142. agno/models/openai/chat.py +104 -29
  143. agno/models/openai/responses.py +101 -81
  144. agno/models/openrouter/openrouter.py +60 -3
  145. agno/models/perplexity/perplexity.py +17 -1
  146. agno/models/portkey/portkey.py +7 -6
  147. agno/models/requesty/requesty.py +24 -4
  148. agno/models/response.py +73 -2
  149. agno/models/sambanova/sambanova.py +20 -3
  150. agno/models/siliconflow/siliconflow.py +19 -2
  151. agno/models/together/together.py +20 -3
  152. agno/models/utils.py +254 -8
  153. agno/models/vercel/v0.py +20 -3
  154. agno/models/vertexai/__init__.py +0 -0
  155. agno/models/vertexai/claude.py +190 -0
  156. agno/models/vllm/vllm.py +19 -14
  157. agno/models/xai/xai.py +19 -2
  158. agno/os/app.py +549 -152
  159. agno/os/auth.py +190 -3
  160. agno/os/config.py +23 -0
  161. agno/os/interfaces/a2a/router.py +8 -11
  162. agno/os/interfaces/a2a/utils.py +1 -1
  163. agno/os/interfaces/agui/router.py +18 -3
  164. agno/os/interfaces/agui/utils.py +152 -39
  165. agno/os/interfaces/slack/router.py +55 -37
  166. agno/os/interfaces/slack/slack.py +9 -1
  167. agno/os/interfaces/whatsapp/router.py +0 -1
  168. agno/os/interfaces/whatsapp/security.py +3 -1
  169. agno/os/mcp.py +110 -52
  170. agno/os/middleware/__init__.py +2 -0
  171. agno/os/middleware/jwt.py +676 -112
  172. agno/os/router.py +40 -1478
  173. agno/os/routers/agents/__init__.py +3 -0
  174. agno/os/routers/agents/router.py +599 -0
  175. agno/os/routers/agents/schema.py +261 -0
  176. agno/os/routers/evals/evals.py +96 -39
  177. agno/os/routers/evals/schemas.py +65 -33
  178. agno/os/routers/evals/utils.py +80 -10
  179. agno/os/routers/health.py +10 -4
  180. agno/os/routers/knowledge/knowledge.py +196 -38
  181. agno/os/routers/knowledge/schemas.py +82 -22
  182. agno/os/routers/memory/memory.py +279 -52
  183. agno/os/routers/memory/schemas.py +46 -17
  184. agno/os/routers/metrics/metrics.py +20 -8
  185. agno/os/routers/metrics/schemas.py +16 -16
  186. agno/os/routers/session/session.py +462 -34
  187. agno/os/routers/teams/__init__.py +3 -0
  188. agno/os/routers/teams/router.py +512 -0
  189. agno/os/routers/teams/schema.py +257 -0
  190. agno/os/routers/traces/__init__.py +3 -0
  191. agno/os/routers/traces/schemas.py +414 -0
  192. agno/os/routers/traces/traces.py +499 -0
  193. agno/os/routers/workflows/__init__.py +3 -0
  194. agno/os/routers/workflows/router.py +624 -0
  195. agno/os/routers/workflows/schema.py +75 -0
  196. agno/os/schema.py +256 -693
  197. agno/os/scopes.py +469 -0
  198. agno/os/utils.py +514 -36
  199. agno/reasoning/anthropic.py +80 -0
  200. agno/reasoning/gemini.py +73 -0
  201. agno/reasoning/openai.py +5 -0
  202. agno/reasoning/vertexai.py +76 -0
  203. agno/run/__init__.py +6 -0
  204. agno/run/agent.py +155 -32
  205. agno/run/base.py +55 -3
  206. agno/run/requirement.py +181 -0
  207. agno/run/team.py +125 -38
  208. agno/run/workflow.py +72 -18
  209. agno/session/agent.py +102 -89
  210. agno/session/summary.py +56 -15
  211. agno/session/team.py +164 -90
  212. agno/session/workflow.py +405 -40
  213. agno/table.py +10 -0
  214. agno/team/team.py +3974 -1903
  215. agno/tools/dalle.py +2 -4
  216. agno/tools/eleven_labs.py +23 -25
  217. agno/tools/exa.py +21 -16
  218. agno/tools/file.py +153 -23
  219. agno/tools/file_generation.py +16 -10
  220. agno/tools/firecrawl.py +15 -7
  221. agno/tools/function.py +193 -38
  222. agno/tools/gmail.py +238 -14
  223. agno/tools/google_drive.py +271 -0
  224. agno/tools/googlecalendar.py +36 -8
  225. agno/tools/googlesheets.py +20 -5
  226. agno/tools/jira.py +20 -0
  227. agno/tools/mcp/__init__.py +10 -0
  228. agno/tools/mcp/mcp.py +331 -0
  229. agno/tools/mcp/multi_mcp.py +347 -0
  230. agno/tools/mcp/params.py +24 -0
  231. agno/tools/mcp_toolbox.py +3 -3
  232. agno/tools/models/nebius.py +5 -5
  233. agno/tools/models_labs.py +20 -10
  234. agno/tools/nano_banana.py +151 -0
  235. agno/tools/notion.py +204 -0
  236. agno/tools/parallel.py +314 -0
  237. agno/tools/postgres.py +76 -36
  238. agno/tools/redshift.py +406 -0
  239. agno/tools/scrapegraph.py +1 -1
  240. agno/tools/shopify.py +1519 -0
  241. agno/tools/slack.py +18 -3
  242. agno/tools/spotify.py +919 -0
  243. agno/tools/tavily.py +146 -0
  244. agno/tools/toolkit.py +25 -0
  245. agno/tools/workflow.py +8 -1
  246. agno/tools/yfinance.py +12 -11
  247. agno/tracing/__init__.py +12 -0
  248. agno/tracing/exporter.py +157 -0
  249. agno/tracing/schemas.py +276 -0
  250. agno/tracing/setup.py +111 -0
  251. agno/utils/agent.py +938 -0
  252. agno/utils/cryptography.py +22 -0
  253. agno/utils/dttm.py +33 -0
  254. agno/utils/events.py +151 -3
  255. agno/utils/gemini.py +15 -5
  256. agno/utils/hooks.py +118 -4
  257. agno/utils/http.py +113 -2
  258. agno/utils/knowledge.py +12 -5
  259. agno/utils/log.py +1 -0
  260. agno/utils/mcp.py +92 -2
  261. agno/utils/media.py +187 -1
  262. agno/utils/merge_dict.py +3 -3
  263. agno/utils/message.py +60 -0
  264. agno/utils/models/ai_foundry.py +9 -2
  265. agno/utils/models/claude.py +49 -14
  266. agno/utils/models/cohere.py +9 -2
  267. agno/utils/models/llama.py +9 -2
  268. agno/utils/models/mistral.py +4 -2
  269. agno/utils/print_response/agent.py +109 -16
  270. agno/utils/print_response/team.py +223 -30
  271. agno/utils/print_response/workflow.py +251 -34
  272. agno/utils/streamlit.py +1 -1
  273. agno/utils/team.py +98 -9
  274. agno/utils/tokens.py +657 -0
  275. agno/vectordb/base.py +39 -7
  276. agno/vectordb/cassandra/cassandra.py +21 -5
  277. agno/vectordb/chroma/chromadb.py +43 -12
  278. agno/vectordb/clickhouse/clickhousedb.py +21 -5
  279. agno/vectordb/couchbase/couchbase.py +29 -5
  280. agno/vectordb/lancedb/lance_db.py +92 -181
  281. agno/vectordb/langchaindb/langchaindb.py +24 -4
  282. agno/vectordb/lightrag/lightrag.py +17 -3
  283. agno/vectordb/llamaindex/llamaindexdb.py +25 -5
  284. agno/vectordb/milvus/milvus.py +50 -37
  285. agno/vectordb/mongodb/__init__.py +7 -1
  286. agno/vectordb/mongodb/mongodb.py +36 -30
  287. agno/vectordb/pgvector/pgvector.py +201 -77
  288. agno/vectordb/pineconedb/pineconedb.py +41 -23
  289. agno/vectordb/qdrant/qdrant.py +67 -54
  290. agno/vectordb/redis/__init__.py +9 -0
  291. agno/vectordb/redis/redisdb.py +682 -0
  292. agno/vectordb/singlestore/singlestore.py +50 -29
  293. agno/vectordb/surrealdb/surrealdb.py +31 -41
  294. agno/vectordb/upstashdb/upstashdb.py +34 -6
  295. agno/vectordb/weaviate/weaviate.py +53 -14
  296. agno/workflow/__init__.py +2 -0
  297. agno/workflow/agent.py +299 -0
  298. agno/workflow/condition.py +120 -18
  299. agno/workflow/loop.py +77 -10
  300. agno/workflow/parallel.py +231 -143
  301. agno/workflow/router.py +118 -17
  302. agno/workflow/step.py +609 -170
  303. agno/workflow/steps.py +73 -6
  304. agno/workflow/types.py +96 -21
  305. agno/workflow/workflow.py +2039 -262
  306. {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/METADATA +201 -66
  307. agno-2.3.13.dist-info/RECORD +613 -0
  308. agno/tools/googlesearch.py +0 -98
  309. agno/tools/mcp.py +0 -679
  310. agno/tools/memori.py +0 -339
  311. agno-2.1.2.dist-info/RECORD +0 -543
  312. {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/WHEEL +0 -0
  313. {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/licenses/LICENSE +0 -0
  314. {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/top_level.txt +0 -0
agno/workflow/parallel.py CHANGED
@@ -1,5 +1,7 @@
1
1
  import asyncio
2
+ import warnings
2
3
  from concurrent.futures import ThreadPoolExecutor, as_completed
4
+ from contextvars import copy_context
3
5
  from copy import deepcopy
4
6
  from dataclasses import dataclass
5
7
  from typing import Any, AsyncIterator, Awaitable, Callable, Dict, Iterator, List, Optional, Union
@@ -7,6 +9,7 @@ from uuid import uuid4
7
9
 
8
10
  from agno.models.metrics import Metrics
9
11
  from agno.run.agent import RunOutputEvent
12
+ from agno.run.base import RunContext
10
13
  from agno.run.team import TeamRunOutputEvent
11
14
  from agno.run.workflow import (
12
15
  ParallelExecutionCompletedEvent,
@@ -14,8 +17,9 @@ from agno.run.workflow import (
14
17
  WorkflowRunOutput,
15
18
  WorkflowRunOutputEvent,
16
19
  )
17
- from agno.utils.merge_dict import merge_parallel_session_states
20
+ from agno.session.workflow import WorkflowSession
18
21
  from agno.utils.log import log_debug, logger
22
+ from agno.utils.merge_dict import merge_parallel_session_states
19
23
  from agno.workflow.condition import Condition
20
24
  from agno.workflow.step import Step
21
25
  from agno.workflow.types import StepInput, StepOutput, StepType
@@ -98,7 +102,7 @@ class Parallel:
98
102
  step_name=self.name or "Parallel",
99
103
  step_id=str(uuid4()),
100
104
  step_type=StepType.PARALLEL,
101
- content=f"Parallel {self.name or 'execution'} completed with 1 result",
105
+ content=self._build_aggregated_content(step_outputs),
102
106
  executor_name=self.name or "Parallel",
103
107
  images=single_result.images,
104
108
  videos=single_result.videos,
@@ -112,8 +116,8 @@ class Parallel:
112
116
 
113
117
  early_termination_requested = any(output.stop for output in step_outputs if hasattr(output, "stop"))
114
118
 
115
- # Multiple results - aggregate them
116
- aggregated_content = f"Parallel {self.name or 'execution'} completed with {len(step_outputs)} results"
119
+ # Multiple results - aggregate them with actual content from all steps
120
+ aggregated_content = self._build_aggregated_content(step_outputs)
117
121
 
118
122
  # Combine all media from parallel steps
119
123
  all_images = []
@@ -199,7 +203,12 @@ class Parallel:
199
203
  user_id: Optional[str] = None,
200
204
  workflow_run_response: Optional[WorkflowRunOutput] = None,
201
205
  store_executor_outputs: bool = True,
206
+ run_context: Optional[RunContext] = None,
202
207
  session_state: Optional[Dict[str, Any]] = None,
208
+ workflow_session: Optional[WorkflowSession] = None,
209
+ add_workflow_history_to_steps: Optional[bool] = False,
210
+ num_history_runs: int = 3,
211
+ background_tasks: Optional[Any] = None,
203
212
  ) -> StepOutput:
204
213
  """Execute all steps in parallel and return aggregated result"""
205
214
  # Use workflow logger for parallel orchestration
@@ -210,17 +219,21 @@ class Parallel:
210
219
  # Create individual session_state copies for each step to prevent race conditions
211
220
  session_state_copies = []
212
221
  for _ in range(len(self.steps)):
213
- if session_state is not None:
214
- session_state_copies.append(deepcopy(session_state))
222
+ # If using run context, no need to deepcopy the state. We want the direct reference.
223
+ if run_context is not None and run_context.session_state is not None:
224
+ session_state_copies.append(run_context.session_state)
215
225
  else:
216
- session_state_copies.append({})
226
+ if session_state is not None:
227
+ session_state_copies.append(deepcopy(session_state))
228
+ else:
229
+ session_state_copies.append({})
217
230
 
218
231
  def execute_step_with_index(step_with_index):
219
232
  """Execute a single step and preserve its original index"""
220
233
  idx, step = step_with_index
221
234
  # Use the individual session_state copy for this step
222
235
  step_session_state = session_state_copies[idx]
223
-
236
+
224
237
  try:
225
238
  step_result = step.execute(
226
239
  step_input,
@@ -228,7 +241,12 @@ class Parallel:
228
241
  user_id=user_id,
229
242
  workflow_run_response=workflow_run_response,
230
243
  store_executor_outputs=store_executor_outputs,
244
+ workflow_session=workflow_session,
245
+ add_workflow_history_to_steps=add_workflow_history_to_steps,
246
+ num_history_runs=num_history_runs,
247
+ run_context=run_context,
231
248
  session_state=step_session_state,
249
+ background_tasks=background_tasks,
232
250
  ) # type: ignore[union-attr]
233
251
  return idx, step_result, step_session_state
234
252
  except Exception as exc:
@@ -250,8 +268,9 @@ class Parallel:
250
268
 
251
269
  with ThreadPoolExecutor(max_workers=len(self.steps)) as executor:
252
270
  # Submit all tasks with their original indices
271
+ # Use copy_context().run to propagate context variables to child threads
253
272
  future_to_index = {
254
- executor.submit(execute_step_with_index, indexed_step): indexed_step[0]
273
+ executor.submit(copy_context().run, execute_step_with_index, indexed_step): indexed_step[0]
255
274
  for indexed_step in indexed_steps
256
275
  }
257
276
 
@@ -281,7 +300,7 @@ class Parallel:
281
300
  )
282
301
  )
283
302
 
284
- if session_state is not None:
303
+ if run_context is None and session_state is not None:
285
304
  merge_parallel_session_states(session_state, modified_session_states)
286
305
 
287
306
  # Sort by original index to preserve order
@@ -309,12 +328,19 @@ class Parallel:
309
328
  step_input: StepInput,
310
329
  session_id: Optional[str] = None,
311
330
  user_id: Optional[str] = None,
331
+ stream_events: bool = False,
312
332
  stream_intermediate_steps: bool = False,
333
+ stream_executor_events: bool = True,
313
334
  workflow_run_response: Optional[WorkflowRunOutput] = None,
314
335
  step_index: Optional[Union[int, tuple]] = None,
315
336
  store_executor_outputs: bool = True,
337
+ run_context: Optional[RunContext] = None,
316
338
  session_state: Optional[Dict[str, Any]] = None,
317
339
  parent_step_id: Optional[str] = None,
340
+ workflow_session: Optional[WorkflowSession] = None,
341
+ add_workflow_history_to_steps: Optional[bool] = False,
342
+ num_history_runs: int = 3,
343
+ background_tasks: Optional[Any] = None,
318
344
  ) -> Iterator[Union[WorkflowRunOutputEvent, StepOutput]]:
319
345
  """Execute all steps in parallel with streaming support"""
320
346
  log_debug(f"Parallel Start: {self.name} ({len(self.steps)} steps)", center=True, symbol="=")
@@ -326,12 +352,25 @@ class Parallel:
326
352
  # Create individual session_state copies for each step to prevent race conditions
327
353
  session_state_copies = []
328
354
  for _ in range(len(self.steps)):
329
- if session_state is not None:
330
- session_state_copies.append(deepcopy(session_state))
355
+ # If using run context, no need to deepcopy the state. We want the direct reference.
356
+ if run_context is not None and run_context.session_state is not None:
357
+ session_state_copies.append(run_context.session_state)
331
358
  else:
332
- session_state_copies.append({})
359
+ if session_state is not None:
360
+ session_state_copies.append(deepcopy(session_state))
361
+ else:
362
+ session_state_copies.append({})
363
+
364
+ # Considering both stream_events and stream_intermediate_steps (deprecated)
365
+ if stream_intermediate_steps is not None:
366
+ warnings.warn(
367
+ "The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Use 'stream_events' instead.",
368
+ DeprecationWarning,
369
+ stacklevel=2,
370
+ )
371
+ stream_events = stream_events or stream_intermediate_steps
333
372
 
334
- if stream_intermediate_steps and workflow_run_response:
373
+ if stream_events and workflow_run_response:
335
374
  # Yield parallel step started event
336
375
  yield ParallelExecutionStartedEvent(
337
376
  run_id=workflow_run_response.run_id or "",
@@ -345,14 +384,20 @@ class Parallel:
345
384
  parent_step_id=parent_step_id,
346
385
  )
347
386
 
387
+ import queue
388
+
389
+ event_queue = queue.Queue() # type: ignore
390
+ step_results = []
391
+ modified_session_states = []
392
+
348
393
  def execute_step_stream_with_index(step_with_index):
349
- """Execute a single step with streaming and preserve its original index"""
394
+ """Execute a single step with streaming and put events in queue immediately"""
350
395
  idx, step = step_with_index
351
396
  # Use the individual session_state copy for this step
352
397
  step_session_state = session_state_copies[idx]
353
-
398
+
354
399
  try:
355
- step_events = []
400
+ step_outputs = []
356
401
 
357
402
  # If step_index is None or integer (main step): create (step_index, sub_index)
358
403
  # If step_index is tuple (child step): all parallel sub-steps get same index
@@ -368,85 +413,94 @@ class Parallel:
368
413
  step_input,
369
414
  session_id=session_id,
370
415
  user_id=user_id,
371
- stream_intermediate_steps=stream_intermediate_steps,
416
+ stream_events=stream_events,
417
+ stream_executor_events=stream_executor_events,
372
418
  workflow_run_response=workflow_run_response,
373
419
  step_index=sub_step_index,
374
420
  store_executor_outputs=store_executor_outputs,
375
421
  session_state=step_session_state,
422
+ run_context=run_context,
376
423
  parent_step_id=parallel_step_id,
424
+ workflow_session=workflow_session,
425
+ add_workflow_history_to_steps=add_workflow_history_to_steps,
426
+ num_history_runs=num_history_runs,
427
+ background_tasks=background_tasks,
377
428
  ):
378
- step_events.append(event)
379
- return idx, step_events, step_session_state
429
+ # Put event immediately in queue
430
+ event_queue.put(("event", idx, event))
431
+ if isinstance(event, StepOutput):
432
+ step_outputs.append(event)
433
+
434
+ # Signal completion for this step
435
+ event_queue.put(("complete", idx, step_outputs, step_session_state))
436
+ return idx, step_outputs, step_session_state
380
437
  except Exception as exc:
381
438
  parallel_step_name = getattr(step, "name", f"step_{idx}")
382
439
  logger.error(f"Parallel step {parallel_step_name} streaming failed: {exc}")
383
- return (
384
- idx,
385
- [
386
- StepOutput(
387
- step_name=parallel_step_name,
388
- content=f"Step {parallel_step_name} failed: {str(exc)}",
389
- success=False,
390
- error=str(exc),
391
- )
392
- ],
393
- step_session_state,
440
+ error_event = StepOutput(
441
+ step_name=parallel_step_name,
442
+ content=f"Step {parallel_step_name} failed: {str(exc)}",
443
+ success=False,
444
+ error=str(exc),
394
445
  )
446
+ event_queue.put(("event", idx, error_event))
447
+ event_queue.put(("complete", idx, [error_event], step_session_state))
448
+ return idx, [error_event], step_session_state
395
449
 
396
- # Use index to preserve order
450
+ # Submit all parallel tasks
397
451
  indexed_steps = list(enumerate(self.steps))
398
- all_events_with_indices = []
399
- step_results = []
400
- modified_session_states = []
401
452
 
402
453
  with ThreadPoolExecutor(max_workers=len(self.steps)) as executor:
403
- # Submit all tasks with their original indices
404
- future_to_index = {
405
- executor.submit(execute_step_stream_with_index, indexed_step): indexed_step[0]
454
+ # Submit all tasks
455
+ # Use copy_context().run to propagate context variables to child threads
456
+ futures = [
457
+ executor.submit(copy_context().run, execute_step_stream_with_index, indexed_step)
406
458
  for indexed_step in indexed_steps
407
- }
459
+ ]
408
460
 
409
- # Collect results and modified session_state copies
410
- for future in as_completed(future_to_index):
461
+ # Process events from queue as they arrive
462
+ completed_steps = 0
463
+ total_steps = len(self.steps)
464
+
465
+ while completed_steps < total_steps:
411
466
  try:
412
- index, events, modified_session_state = future.result()
413
- all_events_with_indices.append((index, events))
414
- modified_session_states.append(modified_session_state)
467
+ message_type, step_idx, *data = event_queue.get(timeout=1.0)
468
+
469
+ if message_type == "event":
470
+ event = data[0]
471
+ # Yield events immediately as they arrive (except StepOutputs)
472
+ if not isinstance(event, StepOutput):
473
+ yield event
415
474
 
416
- # Extract StepOutput from events for the final result
417
- step_outputs = [event for event in events if isinstance(event, StepOutput)]
418
- if step_outputs:
475
+ elif message_type == "complete":
476
+ step_outputs, step_session_state = data
419
477
  step_results.extend(step_outputs)
478
+ modified_session_states.append(step_session_state)
479
+ completed_steps += 1
480
+
481
+ step_name = getattr(self.steps[step_idx], "name", f"step_{step_idx}")
482
+ log_debug(f"Parallel step {step_name} streaming completed")
483
+
484
+ except queue.Empty:
485
+ for i, future in enumerate(futures):
486
+ if future.done() and future.exception():
487
+ logger.error(f"Parallel step {i} failed: {future.exception()}")
488
+ if completed_steps < total_steps:
489
+ completed_steps += 1
490
+ except Exception as e:
491
+ logger.error(f"Error processing parallel step events: {e}")
492
+ completed_steps += 1
420
493
 
421
- step_name = getattr(self.steps[index], "name", f"step_{index}")
422
- log_debug(f"Parallel step {step_name} streaming completed")
494
+ for future in futures:
495
+ try:
496
+ future.result()
423
497
  except Exception as e:
424
- index = future_to_index[future]
425
- step_name = getattr(self.steps[index], "name", f"step_{index}")
426
- logger.error(f"Parallel step {step_name} streaming failed: {e}")
427
- error_event = StepOutput(
428
- step_name=step_name,
429
- content=f"Step {step_name} failed: {str(e)}",
430
- success=False,
431
- error=str(e),
432
- )
433
- all_events_with_indices.append((index, [error_event]))
434
- step_results.append(error_event)
498
+ logger.error(f"Future completion error: {e}")
435
499
 
436
500
  # Merge all session_state changes back into the original session_state
437
- if session_state is not None:
501
+ if run_context is None and session_state is not None:
438
502
  merge_parallel_session_states(session_state, modified_session_states)
439
503
 
440
- # Sort events by original index to preserve order
441
- all_events_with_indices.sort(key=lambda x: x[0])
442
-
443
- # Yield all collected streaming events in order (but not final StepOutputs)
444
- for _, events in all_events_with_indices:
445
- for event in events:
446
- # Only yield non-StepOutput events during streaming to avoid duplication
447
- if not isinstance(event, StepOutput):
448
- yield event
449
-
450
504
  # Flatten step_results - handle steps that return List[StepOutput] (like Condition/Loop)
451
505
  flattened_step_results: List[StepOutput] = []
452
506
  for result in step_results:
@@ -463,7 +517,7 @@ class Parallel:
463
517
 
464
518
  log_debug(f"Parallel End: {self.name} ({len(self.steps)} steps)", center=True, symbol="=")
465
519
 
466
- if stream_intermediate_steps and workflow_run_response:
520
+ if stream_events and workflow_run_response:
467
521
  # Yield parallel step completed event
468
522
  yield ParallelExecutionCompletedEvent(
469
523
  run_id=workflow_run_response.run_id or "",
@@ -473,7 +527,7 @@ class Parallel:
473
527
  step_name=self.name,
474
528
  step_index=step_index,
475
529
  parallel_step_count=len(self.steps),
476
- step_results=[aggregated_result], # Now single aggregated result
530
+ step_results=flattened_step_results,
477
531
  step_id=parallel_step_id,
478
532
  parent_step_id=parent_step_id,
479
533
  )
@@ -485,7 +539,12 @@ class Parallel:
485
539
  user_id: Optional[str] = None,
486
540
  workflow_run_response: Optional[WorkflowRunOutput] = None,
487
541
  store_executor_outputs: bool = True,
542
+ run_context: Optional[RunContext] = None,
488
543
  session_state: Optional[Dict[str, Any]] = None,
544
+ workflow_session: Optional[WorkflowSession] = None,
545
+ add_workflow_history_to_steps: Optional[bool] = False,
546
+ num_history_runs: int = 3,
547
+ background_tasks: Optional[Any] = None,
489
548
  ) -> StepOutput:
490
549
  """Execute all steps in parallel using asyncio and return aggregated result"""
491
550
  # Use workflow logger for async parallel orchestration
@@ -496,17 +555,21 @@ class Parallel:
496
555
  # Create individual session_state copies for each step to prevent race conditions
497
556
  session_state_copies = []
498
557
  for _ in range(len(self.steps)):
499
- if session_state is not None:
500
- session_state_copies.append(deepcopy(session_state))
558
+ # If using run context, no need to deepcopy the state. We want the direct reference.
559
+ if run_context is not None and run_context.session_state is not None:
560
+ session_state_copies.append(run_context.session_state)
501
561
  else:
502
- session_state_copies.append({})
562
+ if session_state is not None:
563
+ session_state_copies.append(deepcopy(session_state))
564
+ else:
565
+ session_state_copies.append({})
503
566
 
504
567
  async def execute_step_async_with_index(step_with_index):
505
568
  """Execute a single step asynchronously and preserve its original index"""
506
569
  idx, step = step_with_index
507
570
  # Use the individual session_state copy for this step
508
571
  step_session_state = session_state_copies[idx]
509
-
572
+
510
573
  try:
511
574
  inner_step_result = await step.aexecute(
512
575
  step_input,
@@ -514,7 +577,12 @@ class Parallel:
514
577
  user_id=user_id,
515
578
  workflow_run_response=workflow_run_response,
516
579
  store_executor_outputs=store_executor_outputs,
580
+ workflow_session=workflow_session,
581
+ add_workflow_history_to_steps=add_workflow_history_to_steps,
582
+ num_history_runs=num_history_runs,
517
583
  session_state=step_session_state,
584
+ run_context=run_context,
585
+ background_tasks=background_tasks,
518
586
  ) # type: ignore[union-attr]
519
587
  return idx, inner_step_result, step_session_state
520
588
  except Exception as exc:
@@ -568,7 +636,7 @@ class Parallel:
568
636
  log_debug(f"Parallel step {step_name} completed")
569
637
 
570
638
  # Smart merge all session_state changes back into the original session_state
571
- if session_state is not None:
639
+ if run_context is None and session_state is not None:
572
640
  merge_parallel_session_states(session_state, modified_session_states)
573
641
 
574
642
  # Sort by original index to preserve order
@@ -596,12 +664,19 @@ class Parallel:
596
664
  step_input: StepInput,
597
665
  session_id: Optional[str] = None,
598
666
  user_id: Optional[str] = None,
667
+ stream_events: bool = False,
599
668
  stream_intermediate_steps: bool = False,
669
+ stream_executor_events: bool = True,
600
670
  workflow_run_response: Optional[WorkflowRunOutput] = None,
601
671
  step_index: Optional[Union[int, tuple]] = None,
602
672
  store_executor_outputs: bool = True,
673
+ run_context: Optional[RunContext] = None,
603
674
  session_state: Optional[Dict[str, Any]] = None,
604
675
  parent_step_id: Optional[str] = None,
676
+ workflow_session: Optional[WorkflowSession] = None,
677
+ add_workflow_history_to_steps: Optional[bool] = False,
678
+ num_history_runs: int = 3,
679
+ background_tasks: Optional[Any] = None,
605
680
  ) -> AsyncIterator[Union[WorkflowRunOutputEvent, TeamRunOutputEvent, RunOutputEvent, StepOutput]]:
606
681
  """Execute all steps in parallel with async streaming support"""
607
682
  log_debug(f"Parallel Start: {self.name} ({len(self.steps)} steps)", center=True, symbol="=")
@@ -613,12 +688,25 @@ class Parallel:
613
688
  # Create individual session_state copies for each step to prevent race conditions
614
689
  session_state_copies = []
615
690
  for _ in range(len(self.steps)):
616
- if session_state is not None:
617
- session_state_copies.append(deepcopy(session_state))
691
+ # If using run context, no need to deepcopy the state. We want the direct reference.
692
+ if run_context is not None and run_context.session_state is not None:
693
+ session_state_copies.append(run_context.session_state)
618
694
  else:
619
- session_state_copies.append({})
695
+ if session_state is not None:
696
+ session_state_copies.append(deepcopy(session_state))
697
+ else:
698
+ session_state_copies.append({})
699
+
700
+ # Considering both stream_events and stream_intermediate_steps (deprecated)
701
+ if stream_intermediate_steps is not None:
702
+ warnings.warn(
703
+ "The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Use 'stream_events' instead.",
704
+ DeprecationWarning,
705
+ stacklevel=2,
706
+ )
707
+ stream_events = stream_events or stream_intermediate_steps
620
708
 
621
- if stream_intermediate_steps and workflow_run_response:
709
+ if stream_events and workflow_run_response:
622
710
  # Yield parallel step started event
623
711
  yield ParallelExecutionStartedEvent(
624
712
  run_id=workflow_run_response.run_id or "",
@@ -632,14 +720,20 @@ class Parallel:
632
720
  parent_step_id=parent_step_id,
633
721
  )
634
722
 
723
+ import asyncio
724
+
725
+ event_queue = asyncio.Queue() # type: ignore
726
+ step_results = []
727
+ modified_session_states = []
728
+
635
729
  async def execute_step_stream_async_with_index(step_with_index):
636
- """Execute a single step with async streaming and preserve its original index"""
730
+ """Execute a single step with async streaming and yield events immediately"""
637
731
  idx, step = step_with_index
638
732
  # Use the individual session_state copy for this step
639
733
  step_session_state = session_state_copies[idx]
640
-
734
+
641
735
  try:
642
- step_events = []
736
+ step_outputs = []
643
737
 
644
738
  # If step_index is None or integer (main step): create (step_index, sub_index)
645
739
  # If step_index is tuple (child step): all parallel sub-steps get same index
@@ -655,83 +749,77 @@ class Parallel:
655
749
  step_input,
656
750
  session_id=session_id,
657
751
  user_id=user_id,
658
- stream_intermediate_steps=stream_intermediate_steps,
752
+ stream_events=stream_events,
753
+ stream_executor_events=stream_executor_events,
659
754
  workflow_run_response=workflow_run_response,
660
755
  step_index=sub_step_index,
661
756
  store_executor_outputs=store_executor_outputs,
662
757
  session_state=step_session_state,
758
+ run_context=run_context,
663
759
  parent_step_id=parallel_step_id,
760
+ workflow_session=workflow_session,
761
+ add_workflow_history_to_steps=add_workflow_history_to_steps,
762
+ num_history_runs=num_history_runs,
763
+ background_tasks=background_tasks,
664
764
  ): # type: ignore[union-attr]
665
- step_events.append(event)
666
- return idx, step_events, step_session_state
765
+ # Yield events immediately to the queue
766
+ await event_queue.put(("event", idx, event))
767
+ if isinstance(event, StepOutput):
768
+ step_outputs.append(event)
769
+
770
+ # Signal completion for this step
771
+ await event_queue.put(("complete", idx, step_outputs, step_session_state))
772
+ return idx, step_outputs, step_session_state
667
773
  except Exception as e:
668
774
  parallel_step_name = getattr(step, "name", f"step_{idx}")
669
775
  logger.error(f"Parallel step {parallel_step_name} async streaming failed: {e}")
670
- return (
671
- idx,
672
- [
673
- StepOutput(
674
- step_name=parallel_step_name,
675
- content=f"Step {parallel_step_name} failed: {str(e)}",
676
- success=False,
677
- error=str(e),
678
- )
679
- ],
680
- step_session_state,
776
+ error_event = StepOutput(
777
+ step_name=parallel_step_name,
778
+ content=f"Step {parallel_step_name} failed: {str(e)}",
779
+ success=False,
780
+ error=str(e),
681
781
  )
782
+ await event_queue.put(("event", idx, error_event))
783
+ await event_queue.put(("complete", idx, [error_event], step_session_state))
784
+ return idx, [error_event], step_session_state
682
785
 
683
- # Use index to preserve order
786
+ # Start all parallel tasks
684
787
  indexed_steps = list(enumerate(self.steps))
685
- all_events_with_indices = []
686
- step_results = []
687
- modified_session_states = []
788
+ tasks = [
789
+ asyncio.create_task(execute_step_stream_async_with_index(indexed_step)) for indexed_step in indexed_steps
790
+ ]
688
791
 
689
- # Create tasks for all steps with their indices
690
- tasks = [execute_step_stream_async_with_index(indexed_step) for indexed_step in indexed_steps]
792
+ # Process events as they arrive and track completion
793
+ completed_steps = 0
794
+ total_steps = len(self.steps)
691
795
 
692
- # Execute all tasks concurrently
693
- results_with_indices = await asyncio.gather(*tasks, return_exceptions=True)
796
+ while completed_steps < total_steps:
797
+ try:
798
+ message_type, step_idx, *data = await event_queue.get()
694
799
 
695
- # Process results and handle exceptions, preserving order
696
- for i, result in enumerate(results_with_indices):
697
- if isinstance(result, Exception):
698
- step_name = getattr(self.steps[i], "name", f"step_{i}")
699
- logger.error(f"Parallel step {step_name} async streaming failed: {result}")
700
- error_event = StepOutput(
701
- step_name=step_name,
702
- content=f"Step {step_name} failed: {str(result)}",
703
- success=False,
704
- error=str(result),
705
- )
706
- all_events_with_indices.append((i, [error_event]))
707
- step_results.append(error_event)
708
- modified_session_states.append(session_state_copies[i])
709
- else:
710
- index, events, modified_session_state = result # type: ignore[misc]
711
- all_events_with_indices.append((index, events))
712
- modified_session_states.append(modified_session_state)
800
+ if message_type == "event":
801
+ event = data[0]
802
+ if not isinstance(event, StepOutput):
803
+ yield event
713
804
 
714
- # Extract StepOutput from events for the final result
715
- step_outputs = [event for event in events if isinstance(event, StepOutput)]
716
- if step_outputs:
805
+ elif message_type == "complete":
806
+ step_outputs, step_session_state = data
717
807
  step_results.extend(step_outputs)
808
+ modified_session_states.append(step_session_state)
809
+ completed_steps += 1
718
810
 
719
- step_name = getattr(self.steps[index], "name", f"step_{index}")
720
- log_debug(f"Parallel step {step_name} async streaming completed")
811
+ step_name = getattr(self.steps[step_idx], "name", f"step_{step_idx}")
812
+ log_debug(f"Parallel step {step_name} async streaming completed")
721
813
 
722
- # Merge all session_state changes back into the original session_state
723
- if session_state is not None:
724
- merge_parallel_session_states(session_state, modified_session_states)
814
+ except Exception as e:
815
+ logger.error(f"Error processing parallel step events: {e}")
816
+ completed_steps += 1
725
817
 
726
- # Sort events by original index to preserve order
727
- all_events_with_indices.sort(key=lambda x: x[0])
818
+ await asyncio.gather(*tasks, return_exceptions=True)
728
819
 
729
- # Yield all collected streaming events in order (but not final StepOutputs)
730
- for _, events in all_events_with_indices:
731
- for event in events:
732
- # Only yield non-StepOutput events during streaming to avoid duplication
733
- if not isinstance(event, StepOutput):
734
- yield event
820
+ # Merge all session_state changes back into the original session_state
821
+ if run_context is None and session_state is not None:
822
+ merge_parallel_session_states(session_state, modified_session_states)
735
823
 
736
824
  # Flatten step_results - handle steps that return List[StepOutput] (like Condition/Loop)
737
825
  flattened_step_results: List[StepOutput] = []
@@ -749,7 +837,7 @@ class Parallel:
749
837
 
750
838
  log_debug(f"Parallel End: {self.name} ({len(self.steps)} steps)", center=True, symbol="=")
751
839
 
752
- if stream_intermediate_steps and workflow_run_response:
840
+ if stream_events and workflow_run_response:
753
841
  # Yield parallel step completed event
754
842
  yield ParallelExecutionCompletedEvent(
755
843
  run_id=workflow_run_response.run_id or "",
@@ -759,7 +847,7 @@ class Parallel:
759
847
  step_name=self.name,
760
848
  step_index=step_index,
761
849
  parallel_step_count=len(self.steps),
762
- step_results=[aggregated_result], # Now single aggregated result
850
+ step_results=flattened_step_results,
763
851
  step_id=parallel_step_id,
764
852
  parent_step_id=parent_step_id,
765
853
  )