agno 2.1.2__py3-none-any.whl → 2.3.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +5540 -2273
- agno/api/api.py +2 -0
- agno/api/os.py +1 -1
- agno/compression/__init__.py +3 -0
- agno/compression/manager.py +247 -0
- agno/culture/__init__.py +3 -0
- agno/culture/manager.py +956 -0
- agno/db/async_postgres/__init__.py +3 -0
- agno/db/base.py +689 -6
- agno/db/dynamo/dynamo.py +933 -37
- agno/db/dynamo/schemas.py +174 -10
- agno/db/dynamo/utils.py +63 -4
- agno/db/firestore/firestore.py +831 -9
- agno/db/firestore/schemas.py +51 -0
- agno/db/firestore/utils.py +102 -4
- agno/db/gcs_json/gcs_json_db.py +660 -12
- agno/db/gcs_json/utils.py +60 -26
- agno/db/in_memory/in_memory_db.py +287 -14
- agno/db/in_memory/utils.py +60 -2
- agno/db/json/json_db.py +590 -14
- agno/db/json/utils.py +60 -26
- agno/db/migrations/manager.py +199 -0
- agno/db/migrations/v1_to_v2.py +43 -13
- agno/db/migrations/versions/__init__.py +0 -0
- agno/db/migrations/versions/v2_3_0.py +938 -0
- agno/db/mongo/__init__.py +15 -1
- agno/db/mongo/async_mongo.py +2760 -0
- agno/db/mongo/mongo.py +879 -11
- agno/db/mongo/schemas.py +42 -0
- agno/db/mongo/utils.py +80 -8
- agno/db/mysql/__init__.py +2 -1
- agno/db/mysql/async_mysql.py +2912 -0
- agno/db/mysql/mysql.py +946 -68
- agno/db/mysql/schemas.py +72 -10
- agno/db/mysql/utils.py +198 -7
- agno/db/postgres/__init__.py +2 -1
- agno/db/postgres/async_postgres.py +2579 -0
- agno/db/postgres/postgres.py +942 -57
- agno/db/postgres/schemas.py +81 -18
- agno/db/postgres/utils.py +164 -2
- agno/db/redis/redis.py +671 -7
- agno/db/redis/schemas.py +50 -0
- agno/db/redis/utils.py +65 -7
- agno/db/schemas/__init__.py +2 -1
- agno/db/schemas/culture.py +120 -0
- agno/db/schemas/evals.py +1 -0
- agno/db/schemas/memory.py +17 -2
- agno/db/singlestore/schemas.py +63 -0
- agno/db/singlestore/singlestore.py +949 -83
- agno/db/singlestore/utils.py +60 -2
- agno/db/sqlite/__init__.py +2 -1
- agno/db/sqlite/async_sqlite.py +2911 -0
- agno/db/sqlite/schemas.py +62 -0
- agno/db/sqlite/sqlite.py +965 -46
- agno/db/sqlite/utils.py +169 -8
- agno/db/surrealdb/__init__.py +3 -0
- agno/db/surrealdb/metrics.py +292 -0
- agno/db/surrealdb/models.py +334 -0
- agno/db/surrealdb/queries.py +71 -0
- agno/db/surrealdb/surrealdb.py +1908 -0
- agno/db/surrealdb/utils.py +147 -0
- agno/db/utils.py +2 -0
- agno/eval/__init__.py +10 -0
- agno/eval/accuracy.py +75 -55
- agno/eval/agent_as_judge.py +861 -0
- agno/eval/base.py +29 -0
- agno/eval/performance.py +16 -7
- agno/eval/reliability.py +28 -16
- agno/eval/utils.py +35 -17
- agno/exceptions.py +27 -2
- agno/filters.py +354 -0
- agno/guardrails/prompt_injection.py +1 -0
- agno/hooks/__init__.py +3 -0
- agno/hooks/decorator.py +164 -0
- agno/integrations/discord/client.py +1 -1
- agno/knowledge/chunking/agentic.py +13 -10
- agno/knowledge/chunking/fixed.py +4 -1
- agno/knowledge/chunking/semantic.py +9 -4
- agno/knowledge/chunking/strategy.py +59 -15
- agno/knowledge/embedder/fastembed.py +1 -1
- agno/knowledge/embedder/nebius.py +1 -1
- agno/knowledge/embedder/ollama.py +8 -0
- agno/knowledge/embedder/openai.py +8 -8
- agno/knowledge/embedder/sentence_transformer.py +6 -2
- agno/knowledge/embedder/vllm.py +262 -0
- agno/knowledge/knowledge.py +1618 -318
- agno/knowledge/reader/base.py +6 -2
- agno/knowledge/reader/csv_reader.py +8 -10
- agno/knowledge/reader/docx_reader.py +5 -6
- agno/knowledge/reader/field_labeled_csv_reader.py +16 -20
- agno/knowledge/reader/json_reader.py +5 -4
- agno/knowledge/reader/markdown_reader.py +8 -8
- agno/knowledge/reader/pdf_reader.py +17 -19
- agno/knowledge/reader/pptx_reader.py +101 -0
- agno/knowledge/reader/reader_factory.py +32 -3
- agno/knowledge/reader/s3_reader.py +3 -3
- agno/knowledge/reader/tavily_reader.py +193 -0
- agno/knowledge/reader/text_reader.py +22 -10
- agno/knowledge/reader/web_search_reader.py +1 -48
- agno/knowledge/reader/website_reader.py +10 -10
- agno/knowledge/reader/wikipedia_reader.py +33 -1
- agno/knowledge/types.py +1 -0
- agno/knowledge/utils.py +72 -7
- agno/media.py +22 -6
- agno/memory/__init__.py +14 -1
- agno/memory/manager.py +544 -83
- agno/memory/strategies/__init__.py +15 -0
- agno/memory/strategies/base.py +66 -0
- agno/memory/strategies/summarize.py +196 -0
- agno/memory/strategies/types.py +37 -0
- agno/models/aimlapi/aimlapi.py +17 -0
- agno/models/anthropic/claude.py +515 -40
- agno/models/aws/bedrock.py +102 -21
- agno/models/aws/claude.py +131 -274
- agno/models/azure/ai_foundry.py +41 -19
- agno/models/azure/openai_chat.py +39 -8
- agno/models/base.py +1249 -525
- agno/models/cerebras/cerebras.py +91 -21
- agno/models/cerebras/cerebras_openai.py +21 -2
- agno/models/cohere/chat.py +40 -6
- agno/models/cometapi/cometapi.py +18 -1
- agno/models/dashscope/dashscope.py +2 -3
- agno/models/deepinfra/deepinfra.py +18 -1
- agno/models/deepseek/deepseek.py +69 -3
- agno/models/fireworks/fireworks.py +18 -1
- agno/models/google/gemini.py +877 -80
- agno/models/google/utils.py +22 -0
- agno/models/groq/groq.py +51 -18
- agno/models/huggingface/huggingface.py +17 -6
- agno/models/ibm/watsonx.py +16 -6
- agno/models/internlm/internlm.py +18 -1
- agno/models/langdb/langdb.py +13 -1
- agno/models/litellm/chat.py +44 -9
- agno/models/litellm/litellm_openai.py +18 -1
- agno/models/message.py +28 -5
- agno/models/meta/llama.py +47 -14
- agno/models/meta/llama_openai.py +22 -17
- agno/models/mistral/mistral.py +8 -4
- agno/models/nebius/nebius.py +6 -7
- agno/models/nvidia/nvidia.py +20 -3
- agno/models/ollama/chat.py +24 -8
- agno/models/openai/chat.py +104 -29
- agno/models/openai/responses.py +101 -81
- agno/models/openrouter/openrouter.py +60 -3
- agno/models/perplexity/perplexity.py +17 -1
- agno/models/portkey/portkey.py +7 -6
- agno/models/requesty/requesty.py +24 -4
- agno/models/response.py +73 -2
- agno/models/sambanova/sambanova.py +20 -3
- agno/models/siliconflow/siliconflow.py +19 -2
- agno/models/together/together.py +20 -3
- agno/models/utils.py +254 -8
- agno/models/vercel/v0.py +20 -3
- agno/models/vertexai/__init__.py +0 -0
- agno/models/vertexai/claude.py +190 -0
- agno/models/vllm/vllm.py +19 -14
- agno/models/xai/xai.py +19 -2
- agno/os/app.py +549 -152
- agno/os/auth.py +190 -3
- agno/os/config.py +23 -0
- agno/os/interfaces/a2a/router.py +8 -11
- agno/os/interfaces/a2a/utils.py +1 -1
- agno/os/interfaces/agui/router.py +18 -3
- agno/os/interfaces/agui/utils.py +152 -39
- agno/os/interfaces/slack/router.py +55 -37
- agno/os/interfaces/slack/slack.py +9 -1
- agno/os/interfaces/whatsapp/router.py +0 -1
- agno/os/interfaces/whatsapp/security.py +3 -1
- agno/os/mcp.py +110 -52
- agno/os/middleware/__init__.py +2 -0
- agno/os/middleware/jwt.py +676 -112
- agno/os/router.py +40 -1478
- agno/os/routers/agents/__init__.py +3 -0
- agno/os/routers/agents/router.py +599 -0
- agno/os/routers/agents/schema.py +261 -0
- agno/os/routers/evals/evals.py +96 -39
- agno/os/routers/evals/schemas.py +65 -33
- agno/os/routers/evals/utils.py +80 -10
- agno/os/routers/health.py +10 -4
- agno/os/routers/knowledge/knowledge.py +196 -38
- agno/os/routers/knowledge/schemas.py +82 -22
- agno/os/routers/memory/memory.py +279 -52
- agno/os/routers/memory/schemas.py +46 -17
- agno/os/routers/metrics/metrics.py +20 -8
- agno/os/routers/metrics/schemas.py +16 -16
- agno/os/routers/session/session.py +462 -34
- agno/os/routers/teams/__init__.py +3 -0
- agno/os/routers/teams/router.py +512 -0
- agno/os/routers/teams/schema.py +257 -0
- agno/os/routers/traces/__init__.py +3 -0
- agno/os/routers/traces/schemas.py +414 -0
- agno/os/routers/traces/traces.py +499 -0
- agno/os/routers/workflows/__init__.py +3 -0
- agno/os/routers/workflows/router.py +624 -0
- agno/os/routers/workflows/schema.py +75 -0
- agno/os/schema.py +256 -693
- agno/os/scopes.py +469 -0
- agno/os/utils.py +514 -36
- agno/reasoning/anthropic.py +80 -0
- agno/reasoning/gemini.py +73 -0
- agno/reasoning/openai.py +5 -0
- agno/reasoning/vertexai.py +76 -0
- agno/run/__init__.py +6 -0
- agno/run/agent.py +155 -32
- agno/run/base.py +55 -3
- agno/run/requirement.py +181 -0
- agno/run/team.py +125 -38
- agno/run/workflow.py +72 -18
- agno/session/agent.py +102 -89
- agno/session/summary.py +56 -15
- agno/session/team.py +164 -90
- agno/session/workflow.py +405 -40
- agno/table.py +10 -0
- agno/team/team.py +3974 -1903
- agno/tools/dalle.py +2 -4
- agno/tools/eleven_labs.py +23 -25
- agno/tools/exa.py +21 -16
- agno/tools/file.py +153 -23
- agno/tools/file_generation.py +16 -10
- agno/tools/firecrawl.py +15 -7
- agno/tools/function.py +193 -38
- agno/tools/gmail.py +238 -14
- agno/tools/google_drive.py +271 -0
- agno/tools/googlecalendar.py +36 -8
- agno/tools/googlesheets.py +20 -5
- agno/tools/jira.py +20 -0
- agno/tools/mcp/__init__.py +10 -0
- agno/tools/mcp/mcp.py +331 -0
- agno/tools/mcp/multi_mcp.py +347 -0
- agno/tools/mcp/params.py +24 -0
- agno/tools/mcp_toolbox.py +3 -3
- agno/tools/models/nebius.py +5 -5
- agno/tools/models_labs.py +20 -10
- agno/tools/nano_banana.py +151 -0
- agno/tools/notion.py +204 -0
- agno/tools/parallel.py +314 -0
- agno/tools/postgres.py +76 -36
- agno/tools/redshift.py +406 -0
- agno/tools/scrapegraph.py +1 -1
- agno/tools/shopify.py +1519 -0
- agno/tools/slack.py +18 -3
- agno/tools/spotify.py +919 -0
- agno/tools/tavily.py +146 -0
- agno/tools/toolkit.py +25 -0
- agno/tools/workflow.py +8 -1
- agno/tools/yfinance.py +12 -11
- agno/tracing/__init__.py +12 -0
- agno/tracing/exporter.py +157 -0
- agno/tracing/schemas.py +276 -0
- agno/tracing/setup.py +111 -0
- agno/utils/agent.py +938 -0
- agno/utils/cryptography.py +22 -0
- agno/utils/dttm.py +33 -0
- agno/utils/events.py +151 -3
- agno/utils/gemini.py +15 -5
- agno/utils/hooks.py +118 -4
- agno/utils/http.py +113 -2
- agno/utils/knowledge.py +12 -5
- agno/utils/log.py +1 -0
- agno/utils/mcp.py +92 -2
- agno/utils/media.py +187 -1
- agno/utils/merge_dict.py +3 -3
- agno/utils/message.py +60 -0
- agno/utils/models/ai_foundry.py +9 -2
- agno/utils/models/claude.py +49 -14
- agno/utils/models/cohere.py +9 -2
- agno/utils/models/llama.py +9 -2
- agno/utils/models/mistral.py +4 -2
- agno/utils/print_response/agent.py +109 -16
- agno/utils/print_response/team.py +223 -30
- agno/utils/print_response/workflow.py +251 -34
- agno/utils/streamlit.py +1 -1
- agno/utils/team.py +98 -9
- agno/utils/tokens.py +657 -0
- agno/vectordb/base.py +39 -7
- agno/vectordb/cassandra/cassandra.py +21 -5
- agno/vectordb/chroma/chromadb.py +43 -12
- agno/vectordb/clickhouse/clickhousedb.py +21 -5
- agno/vectordb/couchbase/couchbase.py +29 -5
- agno/vectordb/lancedb/lance_db.py +92 -181
- agno/vectordb/langchaindb/langchaindb.py +24 -4
- agno/vectordb/lightrag/lightrag.py +17 -3
- agno/vectordb/llamaindex/llamaindexdb.py +25 -5
- agno/vectordb/milvus/milvus.py +50 -37
- agno/vectordb/mongodb/__init__.py +7 -1
- agno/vectordb/mongodb/mongodb.py +36 -30
- agno/vectordb/pgvector/pgvector.py +201 -77
- agno/vectordb/pineconedb/pineconedb.py +41 -23
- agno/vectordb/qdrant/qdrant.py +67 -54
- agno/vectordb/redis/__init__.py +9 -0
- agno/vectordb/redis/redisdb.py +682 -0
- agno/vectordb/singlestore/singlestore.py +50 -29
- agno/vectordb/surrealdb/surrealdb.py +31 -41
- agno/vectordb/upstashdb/upstashdb.py +34 -6
- agno/vectordb/weaviate/weaviate.py +53 -14
- agno/workflow/__init__.py +2 -0
- agno/workflow/agent.py +299 -0
- agno/workflow/condition.py +120 -18
- agno/workflow/loop.py +77 -10
- agno/workflow/parallel.py +231 -143
- agno/workflow/router.py +118 -17
- agno/workflow/step.py +609 -170
- agno/workflow/steps.py +73 -6
- agno/workflow/types.py +96 -21
- agno/workflow/workflow.py +2039 -262
- {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/METADATA +201 -66
- agno-2.3.13.dist-info/RECORD +613 -0
- agno/tools/googlesearch.py +0 -98
- agno/tools/mcp.py +0 -679
- agno/tools/memori.py +0 -339
- agno-2.1.2.dist-info/RECORD +0 -543
- {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/WHEEL +0 -0
- {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/licenses/LICENSE +0 -0
- {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/top_level.txt +0 -0
agno/workflow/parallel.py
CHANGED
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
+
import warnings
|
|
2
3
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
4
|
+
from contextvars import copy_context
|
|
3
5
|
from copy import deepcopy
|
|
4
6
|
from dataclasses import dataclass
|
|
5
7
|
from typing import Any, AsyncIterator, Awaitable, Callable, Dict, Iterator, List, Optional, Union
|
|
@@ -7,6 +9,7 @@ from uuid import uuid4
|
|
|
7
9
|
|
|
8
10
|
from agno.models.metrics import Metrics
|
|
9
11
|
from agno.run.agent import RunOutputEvent
|
|
12
|
+
from agno.run.base import RunContext
|
|
10
13
|
from agno.run.team import TeamRunOutputEvent
|
|
11
14
|
from agno.run.workflow import (
|
|
12
15
|
ParallelExecutionCompletedEvent,
|
|
@@ -14,8 +17,9 @@ from agno.run.workflow import (
|
|
|
14
17
|
WorkflowRunOutput,
|
|
15
18
|
WorkflowRunOutputEvent,
|
|
16
19
|
)
|
|
17
|
-
from agno.
|
|
20
|
+
from agno.session.workflow import WorkflowSession
|
|
18
21
|
from agno.utils.log import log_debug, logger
|
|
22
|
+
from agno.utils.merge_dict import merge_parallel_session_states
|
|
19
23
|
from agno.workflow.condition import Condition
|
|
20
24
|
from agno.workflow.step import Step
|
|
21
25
|
from agno.workflow.types import StepInput, StepOutput, StepType
|
|
@@ -98,7 +102,7 @@ class Parallel:
|
|
|
98
102
|
step_name=self.name or "Parallel",
|
|
99
103
|
step_id=str(uuid4()),
|
|
100
104
|
step_type=StepType.PARALLEL,
|
|
101
|
-
content=
|
|
105
|
+
content=self._build_aggregated_content(step_outputs),
|
|
102
106
|
executor_name=self.name or "Parallel",
|
|
103
107
|
images=single_result.images,
|
|
104
108
|
videos=single_result.videos,
|
|
@@ -112,8 +116,8 @@ class Parallel:
|
|
|
112
116
|
|
|
113
117
|
early_termination_requested = any(output.stop for output in step_outputs if hasattr(output, "stop"))
|
|
114
118
|
|
|
115
|
-
# Multiple results - aggregate them
|
|
116
|
-
aggregated_content =
|
|
119
|
+
# Multiple results - aggregate them with actual content from all steps
|
|
120
|
+
aggregated_content = self._build_aggregated_content(step_outputs)
|
|
117
121
|
|
|
118
122
|
# Combine all media from parallel steps
|
|
119
123
|
all_images = []
|
|
@@ -199,7 +203,12 @@ class Parallel:
|
|
|
199
203
|
user_id: Optional[str] = None,
|
|
200
204
|
workflow_run_response: Optional[WorkflowRunOutput] = None,
|
|
201
205
|
store_executor_outputs: bool = True,
|
|
206
|
+
run_context: Optional[RunContext] = None,
|
|
202
207
|
session_state: Optional[Dict[str, Any]] = None,
|
|
208
|
+
workflow_session: Optional[WorkflowSession] = None,
|
|
209
|
+
add_workflow_history_to_steps: Optional[bool] = False,
|
|
210
|
+
num_history_runs: int = 3,
|
|
211
|
+
background_tasks: Optional[Any] = None,
|
|
203
212
|
) -> StepOutput:
|
|
204
213
|
"""Execute all steps in parallel and return aggregated result"""
|
|
205
214
|
# Use workflow logger for parallel orchestration
|
|
@@ -210,17 +219,21 @@ class Parallel:
|
|
|
210
219
|
# Create individual session_state copies for each step to prevent race conditions
|
|
211
220
|
session_state_copies = []
|
|
212
221
|
for _ in range(len(self.steps)):
|
|
213
|
-
|
|
214
|
-
|
|
222
|
+
# If using run context, no need to deepcopy the state. We want the direct reference.
|
|
223
|
+
if run_context is not None and run_context.session_state is not None:
|
|
224
|
+
session_state_copies.append(run_context.session_state)
|
|
215
225
|
else:
|
|
216
|
-
|
|
226
|
+
if session_state is not None:
|
|
227
|
+
session_state_copies.append(deepcopy(session_state))
|
|
228
|
+
else:
|
|
229
|
+
session_state_copies.append({})
|
|
217
230
|
|
|
218
231
|
def execute_step_with_index(step_with_index):
|
|
219
232
|
"""Execute a single step and preserve its original index"""
|
|
220
233
|
idx, step = step_with_index
|
|
221
234
|
# Use the individual session_state copy for this step
|
|
222
235
|
step_session_state = session_state_copies[idx]
|
|
223
|
-
|
|
236
|
+
|
|
224
237
|
try:
|
|
225
238
|
step_result = step.execute(
|
|
226
239
|
step_input,
|
|
@@ -228,7 +241,12 @@ class Parallel:
|
|
|
228
241
|
user_id=user_id,
|
|
229
242
|
workflow_run_response=workflow_run_response,
|
|
230
243
|
store_executor_outputs=store_executor_outputs,
|
|
244
|
+
workflow_session=workflow_session,
|
|
245
|
+
add_workflow_history_to_steps=add_workflow_history_to_steps,
|
|
246
|
+
num_history_runs=num_history_runs,
|
|
247
|
+
run_context=run_context,
|
|
231
248
|
session_state=step_session_state,
|
|
249
|
+
background_tasks=background_tasks,
|
|
232
250
|
) # type: ignore[union-attr]
|
|
233
251
|
return idx, step_result, step_session_state
|
|
234
252
|
except Exception as exc:
|
|
@@ -250,8 +268,9 @@ class Parallel:
|
|
|
250
268
|
|
|
251
269
|
with ThreadPoolExecutor(max_workers=len(self.steps)) as executor:
|
|
252
270
|
# Submit all tasks with their original indices
|
|
271
|
+
# Use copy_context().run to propagate context variables to child threads
|
|
253
272
|
future_to_index = {
|
|
254
|
-
executor.submit(execute_step_with_index, indexed_step): indexed_step[0]
|
|
273
|
+
executor.submit(copy_context().run, execute_step_with_index, indexed_step): indexed_step[0]
|
|
255
274
|
for indexed_step in indexed_steps
|
|
256
275
|
}
|
|
257
276
|
|
|
@@ -281,7 +300,7 @@ class Parallel:
|
|
|
281
300
|
)
|
|
282
301
|
)
|
|
283
302
|
|
|
284
|
-
if session_state is not None:
|
|
303
|
+
if run_context is None and session_state is not None:
|
|
285
304
|
merge_parallel_session_states(session_state, modified_session_states)
|
|
286
305
|
|
|
287
306
|
# Sort by original index to preserve order
|
|
@@ -309,12 +328,19 @@ class Parallel:
|
|
|
309
328
|
step_input: StepInput,
|
|
310
329
|
session_id: Optional[str] = None,
|
|
311
330
|
user_id: Optional[str] = None,
|
|
331
|
+
stream_events: bool = False,
|
|
312
332
|
stream_intermediate_steps: bool = False,
|
|
333
|
+
stream_executor_events: bool = True,
|
|
313
334
|
workflow_run_response: Optional[WorkflowRunOutput] = None,
|
|
314
335
|
step_index: Optional[Union[int, tuple]] = None,
|
|
315
336
|
store_executor_outputs: bool = True,
|
|
337
|
+
run_context: Optional[RunContext] = None,
|
|
316
338
|
session_state: Optional[Dict[str, Any]] = None,
|
|
317
339
|
parent_step_id: Optional[str] = None,
|
|
340
|
+
workflow_session: Optional[WorkflowSession] = None,
|
|
341
|
+
add_workflow_history_to_steps: Optional[bool] = False,
|
|
342
|
+
num_history_runs: int = 3,
|
|
343
|
+
background_tasks: Optional[Any] = None,
|
|
318
344
|
) -> Iterator[Union[WorkflowRunOutputEvent, StepOutput]]:
|
|
319
345
|
"""Execute all steps in parallel with streaming support"""
|
|
320
346
|
log_debug(f"Parallel Start: {self.name} ({len(self.steps)} steps)", center=True, symbol="=")
|
|
@@ -326,12 +352,25 @@ class Parallel:
|
|
|
326
352
|
# Create individual session_state copies for each step to prevent race conditions
|
|
327
353
|
session_state_copies = []
|
|
328
354
|
for _ in range(len(self.steps)):
|
|
329
|
-
|
|
330
|
-
|
|
355
|
+
# If using run context, no need to deepcopy the state. We want the direct reference.
|
|
356
|
+
if run_context is not None and run_context.session_state is not None:
|
|
357
|
+
session_state_copies.append(run_context.session_state)
|
|
331
358
|
else:
|
|
332
|
-
|
|
359
|
+
if session_state is not None:
|
|
360
|
+
session_state_copies.append(deepcopy(session_state))
|
|
361
|
+
else:
|
|
362
|
+
session_state_copies.append({})
|
|
363
|
+
|
|
364
|
+
# Considering both stream_events and stream_intermediate_steps (deprecated)
|
|
365
|
+
if stream_intermediate_steps is not None:
|
|
366
|
+
warnings.warn(
|
|
367
|
+
"The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Use 'stream_events' instead.",
|
|
368
|
+
DeprecationWarning,
|
|
369
|
+
stacklevel=2,
|
|
370
|
+
)
|
|
371
|
+
stream_events = stream_events or stream_intermediate_steps
|
|
333
372
|
|
|
334
|
-
if
|
|
373
|
+
if stream_events and workflow_run_response:
|
|
335
374
|
# Yield parallel step started event
|
|
336
375
|
yield ParallelExecutionStartedEvent(
|
|
337
376
|
run_id=workflow_run_response.run_id or "",
|
|
@@ -345,14 +384,20 @@ class Parallel:
|
|
|
345
384
|
parent_step_id=parent_step_id,
|
|
346
385
|
)
|
|
347
386
|
|
|
387
|
+
import queue
|
|
388
|
+
|
|
389
|
+
event_queue = queue.Queue() # type: ignore
|
|
390
|
+
step_results = []
|
|
391
|
+
modified_session_states = []
|
|
392
|
+
|
|
348
393
|
def execute_step_stream_with_index(step_with_index):
|
|
349
|
-
"""Execute a single step with streaming and
|
|
394
|
+
"""Execute a single step with streaming and put events in queue immediately"""
|
|
350
395
|
idx, step = step_with_index
|
|
351
396
|
# Use the individual session_state copy for this step
|
|
352
397
|
step_session_state = session_state_copies[idx]
|
|
353
|
-
|
|
398
|
+
|
|
354
399
|
try:
|
|
355
|
-
|
|
400
|
+
step_outputs = []
|
|
356
401
|
|
|
357
402
|
# If step_index is None or integer (main step): create (step_index, sub_index)
|
|
358
403
|
# If step_index is tuple (child step): all parallel sub-steps get same index
|
|
@@ -368,85 +413,94 @@ class Parallel:
|
|
|
368
413
|
step_input,
|
|
369
414
|
session_id=session_id,
|
|
370
415
|
user_id=user_id,
|
|
371
|
-
|
|
416
|
+
stream_events=stream_events,
|
|
417
|
+
stream_executor_events=stream_executor_events,
|
|
372
418
|
workflow_run_response=workflow_run_response,
|
|
373
419
|
step_index=sub_step_index,
|
|
374
420
|
store_executor_outputs=store_executor_outputs,
|
|
375
421
|
session_state=step_session_state,
|
|
422
|
+
run_context=run_context,
|
|
376
423
|
parent_step_id=parallel_step_id,
|
|
424
|
+
workflow_session=workflow_session,
|
|
425
|
+
add_workflow_history_to_steps=add_workflow_history_to_steps,
|
|
426
|
+
num_history_runs=num_history_runs,
|
|
427
|
+
background_tasks=background_tasks,
|
|
377
428
|
):
|
|
378
|
-
|
|
379
|
-
|
|
429
|
+
# Put event immediately in queue
|
|
430
|
+
event_queue.put(("event", idx, event))
|
|
431
|
+
if isinstance(event, StepOutput):
|
|
432
|
+
step_outputs.append(event)
|
|
433
|
+
|
|
434
|
+
# Signal completion for this step
|
|
435
|
+
event_queue.put(("complete", idx, step_outputs, step_session_state))
|
|
436
|
+
return idx, step_outputs, step_session_state
|
|
380
437
|
except Exception as exc:
|
|
381
438
|
parallel_step_name = getattr(step, "name", f"step_{idx}")
|
|
382
439
|
logger.error(f"Parallel step {parallel_step_name} streaming failed: {exc}")
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
content=f"Step {parallel_step_name} failed: {str(exc)}",
|
|
389
|
-
success=False,
|
|
390
|
-
error=str(exc),
|
|
391
|
-
)
|
|
392
|
-
],
|
|
393
|
-
step_session_state,
|
|
440
|
+
error_event = StepOutput(
|
|
441
|
+
step_name=parallel_step_name,
|
|
442
|
+
content=f"Step {parallel_step_name} failed: {str(exc)}",
|
|
443
|
+
success=False,
|
|
444
|
+
error=str(exc),
|
|
394
445
|
)
|
|
446
|
+
event_queue.put(("event", idx, error_event))
|
|
447
|
+
event_queue.put(("complete", idx, [error_event], step_session_state))
|
|
448
|
+
return idx, [error_event], step_session_state
|
|
395
449
|
|
|
396
|
-
#
|
|
450
|
+
# Submit all parallel tasks
|
|
397
451
|
indexed_steps = list(enumerate(self.steps))
|
|
398
|
-
all_events_with_indices = []
|
|
399
|
-
step_results = []
|
|
400
|
-
modified_session_states = []
|
|
401
452
|
|
|
402
453
|
with ThreadPoolExecutor(max_workers=len(self.steps)) as executor:
|
|
403
|
-
# Submit all tasks
|
|
404
|
-
|
|
405
|
-
|
|
454
|
+
# Submit all tasks
|
|
455
|
+
# Use copy_context().run to propagate context variables to child threads
|
|
456
|
+
futures = [
|
|
457
|
+
executor.submit(copy_context().run, execute_step_stream_with_index, indexed_step)
|
|
406
458
|
for indexed_step in indexed_steps
|
|
407
|
-
|
|
459
|
+
]
|
|
408
460
|
|
|
409
|
-
#
|
|
410
|
-
|
|
461
|
+
# Process events from queue as they arrive
|
|
462
|
+
completed_steps = 0
|
|
463
|
+
total_steps = len(self.steps)
|
|
464
|
+
|
|
465
|
+
while completed_steps < total_steps:
|
|
411
466
|
try:
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
467
|
+
message_type, step_idx, *data = event_queue.get(timeout=1.0)
|
|
468
|
+
|
|
469
|
+
if message_type == "event":
|
|
470
|
+
event = data[0]
|
|
471
|
+
# Yield events immediately as they arrive (except StepOutputs)
|
|
472
|
+
if not isinstance(event, StepOutput):
|
|
473
|
+
yield event
|
|
415
474
|
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
if step_outputs:
|
|
475
|
+
elif message_type == "complete":
|
|
476
|
+
step_outputs, step_session_state = data
|
|
419
477
|
step_results.extend(step_outputs)
|
|
478
|
+
modified_session_states.append(step_session_state)
|
|
479
|
+
completed_steps += 1
|
|
480
|
+
|
|
481
|
+
step_name = getattr(self.steps[step_idx], "name", f"step_{step_idx}")
|
|
482
|
+
log_debug(f"Parallel step {step_name} streaming completed")
|
|
483
|
+
|
|
484
|
+
except queue.Empty:
|
|
485
|
+
for i, future in enumerate(futures):
|
|
486
|
+
if future.done() and future.exception():
|
|
487
|
+
logger.error(f"Parallel step {i} failed: {future.exception()}")
|
|
488
|
+
if completed_steps < total_steps:
|
|
489
|
+
completed_steps += 1
|
|
490
|
+
except Exception as e:
|
|
491
|
+
logger.error(f"Error processing parallel step events: {e}")
|
|
492
|
+
completed_steps += 1
|
|
420
493
|
|
|
421
|
-
|
|
422
|
-
|
|
494
|
+
for future in futures:
|
|
495
|
+
try:
|
|
496
|
+
future.result()
|
|
423
497
|
except Exception as e:
|
|
424
|
-
|
|
425
|
-
step_name = getattr(self.steps[index], "name", f"step_{index}")
|
|
426
|
-
logger.error(f"Parallel step {step_name} streaming failed: {e}")
|
|
427
|
-
error_event = StepOutput(
|
|
428
|
-
step_name=step_name,
|
|
429
|
-
content=f"Step {step_name} failed: {str(e)}",
|
|
430
|
-
success=False,
|
|
431
|
-
error=str(e),
|
|
432
|
-
)
|
|
433
|
-
all_events_with_indices.append((index, [error_event]))
|
|
434
|
-
step_results.append(error_event)
|
|
498
|
+
logger.error(f"Future completion error: {e}")
|
|
435
499
|
|
|
436
500
|
# Merge all session_state changes back into the original session_state
|
|
437
|
-
if session_state is not None:
|
|
501
|
+
if run_context is None and session_state is not None:
|
|
438
502
|
merge_parallel_session_states(session_state, modified_session_states)
|
|
439
503
|
|
|
440
|
-
# Sort events by original index to preserve order
|
|
441
|
-
all_events_with_indices.sort(key=lambda x: x[0])
|
|
442
|
-
|
|
443
|
-
# Yield all collected streaming events in order (but not final StepOutputs)
|
|
444
|
-
for _, events in all_events_with_indices:
|
|
445
|
-
for event in events:
|
|
446
|
-
# Only yield non-StepOutput events during streaming to avoid duplication
|
|
447
|
-
if not isinstance(event, StepOutput):
|
|
448
|
-
yield event
|
|
449
|
-
|
|
450
504
|
# Flatten step_results - handle steps that return List[StepOutput] (like Condition/Loop)
|
|
451
505
|
flattened_step_results: List[StepOutput] = []
|
|
452
506
|
for result in step_results:
|
|
@@ -463,7 +517,7 @@ class Parallel:
|
|
|
463
517
|
|
|
464
518
|
log_debug(f"Parallel End: {self.name} ({len(self.steps)} steps)", center=True, symbol="=")
|
|
465
519
|
|
|
466
|
-
if
|
|
520
|
+
if stream_events and workflow_run_response:
|
|
467
521
|
# Yield parallel step completed event
|
|
468
522
|
yield ParallelExecutionCompletedEvent(
|
|
469
523
|
run_id=workflow_run_response.run_id or "",
|
|
@@ -473,7 +527,7 @@ class Parallel:
|
|
|
473
527
|
step_name=self.name,
|
|
474
528
|
step_index=step_index,
|
|
475
529
|
parallel_step_count=len(self.steps),
|
|
476
|
-
step_results=
|
|
530
|
+
step_results=flattened_step_results,
|
|
477
531
|
step_id=parallel_step_id,
|
|
478
532
|
parent_step_id=parent_step_id,
|
|
479
533
|
)
|
|
@@ -485,7 +539,12 @@ class Parallel:
|
|
|
485
539
|
user_id: Optional[str] = None,
|
|
486
540
|
workflow_run_response: Optional[WorkflowRunOutput] = None,
|
|
487
541
|
store_executor_outputs: bool = True,
|
|
542
|
+
run_context: Optional[RunContext] = None,
|
|
488
543
|
session_state: Optional[Dict[str, Any]] = None,
|
|
544
|
+
workflow_session: Optional[WorkflowSession] = None,
|
|
545
|
+
add_workflow_history_to_steps: Optional[bool] = False,
|
|
546
|
+
num_history_runs: int = 3,
|
|
547
|
+
background_tasks: Optional[Any] = None,
|
|
489
548
|
) -> StepOutput:
|
|
490
549
|
"""Execute all steps in parallel using asyncio and return aggregated result"""
|
|
491
550
|
# Use workflow logger for async parallel orchestration
|
|
@@ -496,17 +555,21 @@ class Parallel:
|
|
|
496
555
|
# Create individual session_state copies for each step to prevent race conditions
|
|
497
556
|
session_state_copies = []
|
|
498
557
|
for _ in range(len(self.steps)):
|
|
499
|
-
|
|
500
|
-
|
|
558
|
+
# If using run context, no need to deepcopy the state. We want the direct reference.
|
|
559
|
+
if run_context is not None and run_context.session_state is not None:
|
|
560
|
+
session_state_copies.append(run_context.session_state)
|
|
501
561
|
else:
|
|
502
|
-
|
|
562
|
+
if session_state is not None:
|
|
563
|
+
session_state_copies.append(deepcopy(session_state))
|
|
564
|
+
else:
|
|
565
|
+
session_state_copies.append({})
|
|
503
566
|
|
|
504
567
|
async def execute_step_async_with_index(step_with_index):
|
|
505
568
|
"""Execute a single step asynchronously and preserve its original index"""
|
|
506
569
|
idx, step = step_with_index
|
|
507
570
|
# Use the individual session_state copy for this step
|
|
508
571
|
step_session_state = session_state_copies[idx]
|
|
509
|
-
|
|
572
|
+
|
|
510
573
|
try:
|
|
511
574
|
inner_step_result = await step.aexecute(
|
|
512
575
|
step_input,
|
|
@@ -514,7 +577,12 @@ class Parallel:
|
|
|
514
577
|
user_id=user_id,
|
|
515
578
|
workflow_run_response=workflow_run_response,
|
|
516
579
|
store_executor_outputs=store_executor_outputs,
|
|
580
|
+
workflow_session=workflow_session,
|
|
581
|
+
add_workflow_history_to_steps=add_workflow_history_to_steps,
|
|
582
|
+
num_history_runs=num_history_runs,
|
|
517
583
|
session_state=step_session_state,
|
|
584
|
+
run_context=run_context,
|
|
585
|
+
background_tasks=background_tasks,
|
|
518
586
|
) # type: ignore[union-attr]
|
|
519
587
|
return idx, inner_step_result, step_session_state
|
|
520
588
|
except Exception as exc:
|
|
@@ -568,7 +636,7 @@ class Parallel:
|
|
|
568
636
|
log_debug(f"Parallel step {step_name} completed")
|
|
569
637
|
|
|
570
638
|
# Smart merge all session_state changes back into the original session_state
|
|
571
|
-
if session_state is not None:
|
|
639
|
+
if run_context is None and session_state is not None:
|
|
572
640
|
merge_parallel_session_states(session_state, modified_session_states)
|
|
573
641
|
|
|
574
642
|
# Sort by original index to preserve order
|
|
@@ -596,12 +664,19 @@ class Parallel:
|
|
|
596
664
|
step_input: StepInput,
|
|
597
665
|
session_id: Optional[str] = None,
|
|
598
666
|
user_id: Optional[str] = None,
|
|
667
|
+
stream_events: bool = False,
|
|
599
668
|
stream_intermediate_steps: bool = False,
|
|
669
|
+
stream_executor_events: bool = True,
|
|
600
670
|
workflow_run_response: Optional[WorkflowRunOutput] = None,
|
|
601
671
|
step_index: Optional[Union[int, tuple]] = None,
|
|
602
672
|
store_executor_outputs: bool = True,
|
|
673
|
+
run_context: Optional[RunContext] = None,
|
|
603
674
|
session_state: Optional[Dict[str, Any]] = None,
|
|
604
675
|
parent_step_id: Optional[str] = None,
|
|
676
|
+
workflow_session: Optional[WorkflowSession] = None,
|
|
677
|
+
add_workflow_history_to_steps: Optional[bool] = False,
|
|
678
|
+
num_history_runs: int = 3,
|
|
679
|
+
background_tasks: Optional[Any] = None,
|
|
605
680
|
) -> AsyncIterator[Union[WorkflowRunOutputEvent, TeamRunOutputEvent, RunOutputEvent, StepOutput]]:
|
|
606
681
|
"""Execute all steps in parallel with async streaming support"""
|
|
607
682
|
log_debug(f"Parallel Start: {self.name} ({len(self.steps)} steps)", center=True, symbol="=")
|
|
@@ -613,12 +688,25 @@ class Parallel:
|
|
|
613
688
|
# Create individual session_state copies for each step to prevent race conditions
|
|
614
689
|
session_state_copies = []
|
|
615
690
|
for _ in range(len(self.steps)):
|
|
616
|
-
|
|
617
|
-
|
|
691
|
+
# If using run context, no need to deepcopy the state. We want the direct reference.
|
|
692
|
+
if run_context is not None and run_context.session_state is not None:
|
|
693
|
+
session_state_copies.append(run_context.session_state)
|
|
618
694
|
else:
|
|
619
|
-
|
|
695
|
+
if session_state is not None:
|
|
696
|
+
session_state_copies.append(deepcopy(session_state))
|
|
697
|
+
else:
|
|
698
|
+
session_state_copies.append({})
|
|
699
|
+
|
|
700
|
+
# Considering both stream_events and stream_intermediate_steps (deprecated)
|
|
701
|
+
if stream_intermediate_steps is not None:
|
|
702
|
+
warnings.warn(
|
|
703
|
+
"The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Use 'stream_events' instead.",
|
|
704
|
+
DeprecationWarning,
|
|
705
|
+
stacklevel=2,
|
|
706
|
+
)
|
|
707
|
+
stream_events = stream_events or stream_intermediate_steps
|
|
620
708
|
|
|
621
|
-
if
|
|
709
|
+
if stream_events and workflow_run_response:
|
|
622
710
|
# Yield parallel step started event
|
|
623
711
|
yield ParallelExecutionStartedEvent(
|
|
624
712
|
run_id=workflow_run_response.run_id or "",
|
|
@@ -632,14 +720,20 @@ class Parallel:
|
|
|
632
720
|
parent_step_id=parent_step_id,
|
|
633
721
|
)
|
|
634
722
|
|
|
723
|
+
import asyncio
|
|
724
|
+
|
|
725
|
+
event_queue = asyncio.Queue() # type: ignore
|
|
726
|
+
step_results = []
|
|
727
|
+
modified_session_states = []
|
|
728
|
+
|
|
635
729
|
async def execute_step_stream_async_with_index(step_with_index):
|
|
636
|
-
"""Execute a single step with async streaming and
|
|
730
|
+
"""Execute a single step with async streaming and yield events immediately"""
|
|
637
731
|
idx, step = step_with_index
|
|
638
732
|
# Use the individual session_state copy for this step
|
|
639
733
|
step_session_state = session_state_copies[idx]
|
|
640
|
-
|
|
734
|
+
|
|
641
735
|
try:
|
|
642
|
-
|
|
736
|
+
step_outputs = []
|
|
643
737
|
|
|
644
738
|
# If step_index is None or integer (main step): create (step_index, sub_index)
|
|
645
739
|
# If step_index is tuple (child step): all parallel sub-steps get same index
|
|
@@ -655,83 +749,77 @@ class Parallel:
|
|
|
655
749
|
step_input,
|
|
656
750
|
session_id=session_id,
|
|
657
751
|
user_id=user_id,
|
|
658
|
-
|
|
752
|
+
stream_events=stream_events,
|
|
753
|
+
stream_executor_events=stream_executor_events,
|
|
659
754
|
workflow_run_response=workflow_run_response,
|
|
660
755
|
step_index=sub_step_index,
|
|
661
756
|
store_executor_outputs=store_executor_outputs,
|
|
662
757
|
session_state=step_session_state,
|
|
758
|
+
run_context=run_context,
|
|
663
759
|
parent_step_id=parallel_step_id,
|
|
760
|
+
workflow_session=workflow_session,
|
|
761
|
+
add_workflow_history_to_steps=add_workflow_history_to_steps,
|
|
762
|
+
num_history_runs=num_history_runs,
|
|
763
|
+
background_tasks=background_tasks,
|
|
664
764
|
): # type: ignore[union-attr]
|
|
665
|
-
|
|
666
|
-
|
|
765
|
+
# Yield events immediately to the queue
|
|
766
|
+
await event_queue.put(("event", idx, event))
|
|
767
|
+
if isinstance(event, StepOutput):
|
|
768
|
+
step_outputs.append(event)
|
|
769
|
+
|
|
770
|
+
# Signal completion for this step
|
|
771
|
+
await event_queue.put(("complete", idx, step_outputs, step_session_state))
|
|
772
|
+
return idx, step_outputs, step_session_state
|
|
667
773
|
except Exception as e:
|
|
668
774
|
parallel_step_name = getattr(step, "name", f"step_{idx}")
|
|
669
775
|
logger.error(f"Parallel step {parallel_step_name} async streaming failed: {e}")
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
content=f"Step {parallel_step_name} failed: {str(e)}",
|
|
676
|
-
success=False,
|
|
677
|
-
error=str(e),
|
|
678
|
-
)
|
|
679
|
-
],
|
|
680
|
-
step_session_state,
|
|
776
|
+
error_event = StepOutput(
|
|
777
|
+
step_name=parallel_step_name,
|
|
778
|
+
content=f"Step {parallel_step_name} failed: {str(e)}",
|
|
779
|
+
success=False,
|
|
780
|
+
error=str(e),
|
|
681
781
|
)
|
|
782
|
+
await event_queue.put(("event", idx, error_event))
|
|
783
|
+
await event_queue.put(("complete", idx, [error_event], step_session_state))
|
|
784
|
+
return idx, [error_event], step_session_state
|
|
682
785
|
|
|
683
|
-
#
|
|
786
|
+
# Start all parallel tasks
|
|
684
787
|
indexed_steps = list(enumerate(self.steps))
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
788
|
+
tasks = [
|
|
789
|
+
asyncio.create_task(execute_step_stream_async_with_index(indexed_step)) for indexed_step in indexed_steps
|
|
790
|
+
]
|
|
688
791
|
|
|
689
|
-
#
|
|
690
|
-
|
|
792
|
+
# Process events as they arrive and track completion
|
|
793
|
+
completed_steps = 0
|
|
794
|
+
total_steps = len(self.steps)
|
|
691
795
|
|
|
692
|
-
|
|
693
|
-
|
|
796
|
+
while completed_steps < total_steps:
|
|
797
|
+
try:
|
|
798
|
+
message_type, step_idx, *data = await event_queue.get()
|
|
694
799
|
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
logger.error(f"Parallel step {step_name} async streaming failed: {result}")
|
|
700
|
-
error_event = StepOutput(
|
|
701
|
-
step_name=step_name,
|
|
702
|
-
content=f"Step {step_name} failed: {str(result)}",
|
|
703
|
-
success=False,
|
|
704
|
-
error=str(result),
|
|
705
|
-
)
|
|
706
|
-
all_events_with_indices.append((i, [error_event]))
|
|
707
|
-
step_results.append(error_event)
|
|
708
|
-
modified_session_states.append(session_state_copies[i])
|
|
709
|
-
else:
|
|
710
|
-
index, events, modified_session_state = result # type: ignore[misc]
|
|
711
|
-
all_events_with_indices.append((index, events))
|
|
712
|
-
modified_session_states.append(modified_session_state)
|
|
800
|
+
if message_type == "event":
|
|
801
|
+
event = data[0]
|
|
802
|
+
if not isinstance(event, StepOutput):
|
|
803
|
+
yield event
|
|
713
804
|
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
if step_outputs:
|
|
805
|
+
elif message_type == "complete":
|
|
806
|
+
step_outputs, step_session_state = data
|
|
717
807
|
step_results.extend(step_outputs)
|
|
808
|
+
modified_session_states.append(step_session_state)
|
|
809
|
+
completed_steps += 1
|
|
718
810
|
|
|
719
|
-
|
|
720
|
-
|
|
811
|
+
step_name = getattr(self.steps[step_idx], "name", f"step_{step_idx}")
|
|
812
|
+
log_debug(f"Parallel step {step_name} async streaming completed")
|
|
721
813
|
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
814
|
+
except Exception as e:
|
|
815
|
+
logger.error(f"Error processing parallel step events: {e}")
|
|
816
|
+
completed_steps += 1
|
|
725
817
|
|
|
726
|
-
|
|
727
|
-
all_events_with_indices.sort(key=lambda x: x[0])
|
|
818
|
+
await asyncio.gather(*tasks, return_exceptions=True)
|
|
728
819
|
|
|
729
|
-
#
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
# Only yield non-StepOutput events during streaming to avoid duplication
|
|
733
|
-
if not isinstance(event, StepOutput):
|
|
734
|
-
yield event
|
|
820
|
+
# Merge all session_state changes back into the original session_state
|
|
821
|
+
if run_context is None and session_state is not None:
|
|
822
|
+
merge_parallel_session_states(session_state, modified_session_states)
|
|
735
823
|
|
|
736
824
|
# Flatten step_results - handle steps that return List[StepOutput] (like Condition/Loop)
|
|
737
825
|
flattened_step_results: List[StepOutput] = []
|
|
@@ -749,7 +837,7 @@ class Parallel:
|
|
|
749
837
|
|
|
750
838
|
log_debug(f"Parallel End: {self.name} ({len(self.steps)} steps)", center=True, symbol="=")
|
|
751
839
|
|
|
752
|
-
if
|
|
840
|
+
if stream_events and workflow_run_response:
|
|
753
841
|
# Yield parallel step completed event
|
|
754
842
|
yield ParallelExecutionCompletedEvent(
|
|
755
843
|
run_id=workflow_run_response.run_id or "",
|
|
@@ -759,7 +847,7 @@ class Parallel:
|
|
|
759
847
|
step_name=self.name,
|
|
760
848
|
step_index=step_index,
|
|
761
849
|
parallel_step_count=len(self.steps),
|
|
762
|
-
step_results=
|
|
850
|
+
step_results=flattened_step_results,
|
|
763
851
|
step_id=parallel_step_id,
|
|
764
852
|
parent_step_id=parent_step_id,
|
|
765
853
|
)
|