agno 0.1.2__py3-none-any.whl → 2.3.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/__init__.py +8 -0
- agno/agent/__init__.py +44 -5
- agno/agent/agent.py +10531 -2975
- agno/api/agent.py +14 -53
- agno/api/api.py +7 -46
- agno/api/evals.py +22 -0
- agno/api/os.py +17 -0
- agno/api/routes.py +6 -25
- agno/api/schemas/__init__.py +9 -0
- agno/api/schemas/agent.py +6 -9
- agno/api/schemas/evals.py +16 -0
- agno/api/schemas/os.py +14 -0
- agno/api/schemas/team.py +10 -10
- agno/api/schemas/utils.py +21 -0
- agno/api/schemas/workflows.py +16 -0
- agno/api/settings.py +53 -0
- agno/api/team.py +22 -26
- agno/api/workflow.py +28 -0
- agno/cloud/aws/base.py +214 -0
- agno/cloud/aws/s3/__init__.py +2 -0
- agno/cloud/aws/s3/api_client.py +43 -0
- agno/cloud/aws/s3/bucket.py +195 -0
- agno/cloud/aws/s3/object.py +57 -0
- agno/compression/__init__.py +3 -0
- agno/compression/manager.py +247 -0
- agno/culture/__init__.py +3 -0
- agno/culture/manager.py +956 -0
- agno/db/__init__.py +24 -0
- agno/db/async_postgres/__init__.py +3 -0
- agno/db/base.py +946 -0
- agno/db/dynamo/__init__.py +3 -0
- agno/db/dynamo/dynamo.py +2781 -0
- agno/db/dynamo/schemas.py +442 -0
- agno/db/dynamo/utils.py +743 -0
- agno/db/firestore/__init__.py +3 -0
- agno/db/firestore/firestore.py +2379 -0
- agno/db/firestore/schemas.py +181 -0
- agno/db/firestore/utils.py +376 -0
- agno/db/gcs_json/__init__.py +3 -0
- agno/db/gcs_json/gcs_json_db.py +1791 -0
- agno/db/gcs_json/utils.py +228 -0
- agno/db/in_memory/__init__.py +3 -0
- agno/db/in_memory/in_memory_db.py +1312 -0
- agno/db/in_memory/utils.py +230 -0
- agno/db/json/__init__.py +3 -0
- agno/db/json/json_db.py +1777 -0
- agno/db/json/utils.py +230 -0
- agno/db/migrations/manager.py +199 -0
- agno/db/migrations/v1_to_v2.py +635 -0
- agno/db/migrations/versions/v2_3_0.py +938 -0
- agno/db/mongo/__init__.py +17 -0
- agno/db/mongo/async_mongo.py +2760 -0
- agno/db/mongo/mongo.py +2597 -0
- agno/db/mongo/schemas.py +119 -0
- agno/db/mongo/utils.py +276 -0
- agno/db/mysql/__init__.py +4 -0
- agno/db/mysql/async_mysql.py +2912 -0
- agno/db/mysql/mysql.py +2923 -0
- agno/db/mysql/schemas.py +186 -0
- agno/db/mysql/utils.py +488 -0
- agno/db/postgres/__init__.py +4 -0
- agno/db/postgres/async_postgres.py +2579 -0
- agno/db/postgres/postgres.py +2870 -0
- agno/db/postgres/schemas.py +187 -0
- agno/db/postgres/utils.py +442 -0
- agno/db/redis/__init__.py +3 -0
- agno/db/redis/redis.py +2141 -0
- agno/db/redis/schemas.py +159 -0
- agno/db/redis/utils.py +346 -0
- agno/db/schemas/__init__.py +4 -0
- agno/db/schemas/culture.py +120 -0
- agno/db/schemas/evals.py +34 -0
- agno/db/schemas/knowledge.py +40 -0
- agno/db/schemas/memory.py +61 -0
- agno/db/singlestore/__init__.py +3 -0
- agno/db/singlestore/schemas.py +179 -0
- agno/db/singlestore/singlestore.py +2877 -0
- agno/db/singlestore/utils.py +384 -0
- agno/db/sqlite/__init__.py +4 -0
- agno/db/sqlite/async_sqlite.py +2911 -0
- agno/db/sqlite/schemas.py +181 -0
- agno/db/sqlite/sqlite.py +2908 -0
- agno/db/sqlite/utils.py +429 -0
- agno/db/surrealdb/__init__.py +3 -0
- agno/db/surrealdb/metrics.py +292 -0
- agno/db/surrealdb/models.py +334 -0
- agno/db/surrealdb/queries.py +71 -0
- agno/db/surrealdb/surrealdb.py +1908 -0
- agno/db/surrealdb/utils.py +147 -0
- agno/db/utils.py +118 -0
- agno/eval/__init__.py +24 -0
- agno/eval/accuracy.py +666 -276
- agno/eval/agent_as_judge.py +861 -0
- agno/eval/base.py +29 -0
- agno/eval/performance.py +779 -0
- agno/eval/reliability.py +241 -62
- agno/eval/utils.py +120 -0
- agno/exceptions.py +143 -1
- agno/filters.py +354 -0
- agno/guardrails/__init__.py +6 -0
- agno/guardrails/base.py +19 -0
- agno/guardrails/openai.py +144 -0
- agno/guardrails/pii.py +94 -0
- agno/guardrails/prompt_injection.py +52 -0
- agno/hooks/__init__.py +3 -0
- agno/hooks/decorator.py +164 -0
- agno/integrations/discord/__init__.py +3 -0
- agno/integrations/discord/client.py +203 -0
- agno/knowledge/__init__.py +5 -1
- agno/{document → knowledge}/chunking/agentic.py +22 -14
- agno/{document → knowledge}/chunking/document.py +2 -2
- agno/{document → knowledge}/chunking/fixed.py +7 -6
- agno/knowledge/chunking/markdown.py +151 -0
- agno/{document → knowledge}/chunking/recursive.py +15 -3
- agno/knowledge/chunking/row.py +39 -0
- agno/knowledge/chunking/semantic.py +91 -0
- agno/knowledge/chunking/strategy.py +165 -0
- agno/knowledge/content.py +74 -0
- agno/knowledge/document/__init__.py +5 -0
- agno/{document → knowledge/document}/base.py +12 -2
- agno/knowledge/embedder/__init__.py +5 -0
- agno/knowledge/embedder/aws_bedrock.py +343 -0
- agno/knowledge/embedder/azure_openai.py +210 -0
- agno/{embedder → knowledge/embedder}/base.py +8 -0
- agno/knowledge/embedder/cohere.py +323 -0
- agno/knowledge/embedder/fastembed.py +62 -0
- agno/{embedder → knowledge/embedder}/fireworks.py +1 -1
- agno/knowledge/embedder/google.py +258 -0
- agno/knowledge/embedder/huggingface.py +94 -0
- agno/knowledge/embedder/jina.py +182 -0
- agno/knowledge/embedder/langdb.py +22 -0
- agno/knowledge/embedder/mistral.py +206 -0
- agno/knowledge/embedder/nebius.py +13 -0
- agno/knowledge/embedder/ollama.py +154 -0
- agno/knowledge/embedder/openai.py +195 -0
- agno/knowledge/embedder/sentence_transformer.py +63 -0
- agno/{embedder → knowledge/embedder}/together.py +1 -1
- agno/knowledge/embedder/vllm.py +262 -0
- agno/knowledge/embedder/voyageai.py +165 -0
- agno/knowledge/knowledge.py +3006 -0
- agno/knowledge/reader/__init__.py +7 -0
- agno/knowledge/reader/arxiv_reader.py +81 -0
- agno/knowledge/reader/base.py +95 -0
- agno/knowledge/reader/csv_reader.py +164 -0
- agno/knowledge/reader/docx_reader.py +82 -0
- agno/knowledge/reader/field_labeled_csv_reader.py +290 -0
- agno/knowledge/reader/firecrawl_reader.py +201 -0
- agno/knowledge/reader/json_reader.py +88 -0
- agno/knowledge/reader/markdown_reader.py +137 -0
- agno/knowledge/reader/pdf_reader.py +431 -0
- agno/knowledge/reader/pptx_reader.py +101 -0
- agno/knowledge/reader/reader_factory.py +313 -0
- agno/knowledge/reader/s3_reader.py +89 -0
- agno/knowledge/reader/tavily_reader.py +193 -0
- agno/knowledge/reader/text_reader.py +127 -0
- agno/knowledge/reader/web_search_reader.py +325 -0
- agno/knowledge/reader/website_reader.py +455 -0
- agno/knowledge/reader/wikipedia_reader.py +91 -0
- agno/knowledge/reader/youtube_reader.py +78 -0
- agno/knowledge/remote_content/remote_content.py +88 -0
- agno/knowledge/reranker/__init__.py +3 -0
- agno/{reranker → knowledge/reranker}/base.py +1 -1
- agno/{reranker → knowledge/reranker}/cohere.py +2 -2
- agno/knowledge/reranker/infinity.py +195 -0
- agno/knowledge/reranker/sentence_transformer.py +54 -0
- agno/knowledge/types.py +39 -0
- agno/knowledge/utils.py +234 -0
- agno/media.py +439 -95
- agno/memory/__init__.py +16 -3
- agno/memory/manager.py +1474 -123
- agno/memory/strategies/__init__.py +15 -0
- agno/memory/strategies/base.py +66 -0
- agno/memory/strategies/summarize.py +196 -0
- agno/memory/strategies/types.py +37 -0
- agno/models/aimlapi/__init__.py +5 -0
- agno/models/aimlapi/aimlapi.py +62 -0
- agno/models/anthropic/__init__.py +4 -0
- agno/models/anthropic/claude.py +960 -496
- agno/models/aws/__init__.py +15 -0
- agno/models/aws/bedrock.py +686 -451
- agno/models/aws/claude.py +190 -183
- agno/models/azure/__init__.py +18 -1
- agno/models/azure/ai_foundry.py +489 -0
- agno/models/azure/openai_chat.py +89 -40
- agno/models/base.py +2477 -550
- agno/models/cerebras/__init__.py +12 -0
- agno/models/cerebras/cerebras.py +565 -0
- agno/models/cerebras/cerebras_openai.py +131 -0
- agno/models/cohere/__init__.py +4 -0
- agno/models/cohere/chat.py +306 -492
- agno/models/cometapi/__init__.py +5 -0
- agno/models/cometapi/cometapi.py +74 -0
- agno/models/dashscope/__init__.py +5 -0
- agno/models/dashscope/dashscope.py +90 -0
- agno/models/deepinfra/__init__.py +5 -0
- agno/models/deepinfra/deepinfra.py +45 -0
- agno/models/deepseek/__init__.py +4 -0
- agno/models/deepseek/deepseek.py +110 -9
- agno/models/fireworks/__init__.py +4 -0
- agno/models/fireworks/fireworks.py +19 -22
- agno/models/google/__init__.py +3 -7
- agno/models/google/gemini.py +1717 -662
- agno/models/google/utils.py +22 -0
- agno/models/groq/__init__.py +4 -0
- agno/models/groq/groq.py +391 -666
- agno/models/huggingface/__init__.py +4 -0
- agno/models/huggingface/huggingface.py +266 -538
- agno/models/ibm/__init__.py +5 -0
- agno/models/ibm/watsonx.py +432 -0
- agno/models/internlm/__init__.py +3 -0
- agno/models/internlm/internlm.py +20 -3
- agno/models/langdb/__init__.py +1 -0
- agno/models/langdb/langdb.py +60 -0
- agno/models/litellm/__init__.py +14 -0
- agno/models/litellm/chat.py +503 -0
- agno/models/litellm/litellm_openai.py +42 -0
- agno/models/llama_cpp/__init__.py +5 -0
- agno/models/llama_cpp/llama_cpp.py +22 -0
- agno/models/lmstudio/__init__.py +5 -0
- agno/models/lmstudio/lmstudio.py +25 -0
- agno/models/message.py +361 -39
- agno/models/meta/__init__.py +12 -0
- agno/models/meta/llama.py +502 -0
- agno/models/meta/llama_openai.py +79 -0
- agno/models/metrics.py +120 -0
- agno/models/mistral/__init__.py +4 -0
- agno/models/mistral/mistral.py +293 -393
- agno/models/nebius/__init__.py +3 -0
- agno/models/nebius/nebius.py +53 -0
- agno/models/nexus/__init__.py +3 -0
- agno/models/nexus/nexus.py +22 -0
- agno/models/nvidia/__init__.py +4 -0
- agno/models/nvidia/nvidia.py +22 -3
- agno/models/ollama/__init__.py +4 -2
- agno/models/ollama/chat.py +257 -492
- agno/models/openai/__init__.py +7 -0
- agno/models/openai/chat.py +725 -770
- agno/models/openai/like.py +16 -2
- agno/models/openai/responses.py +1121 -0
- agno/models/openrouter/__init__.py +4 -0
- agno/models/openrouter/openrouter.py +62 -5
- agno/models/perplexity/__init__.py +5 -0
- agno/models/perplexity/perplexity.py +203 -0
- agno/models/portkey/__init__.py +3 -0
- agno/models/portkey/portkey.py +82 -0
- agno/models/requesty/__init__.py +5 -0
- agno/models/requesty/requesty.py +69 -0
- agno/models/response.py +177 -7
- agno/models/sambanova/__init__.py +4 -0
- agno/models/sambanova/sambanova.py +23 -4
- agno/models/siliconflow/__init__.py +5 -0
- agno/models/siliconflow/siliconflow.py +42 -0
- agno/models/together/__init__.py +4 -0
- agno/models/together/together.py +21 -164
- agno/models/utils.py +266 -0
- agno/models/vercel/__init__.py +3 -0
- agno/models/vercel/v0.py +43 -0
- agno/models/vertexai/__init__.py +0 -1
- agno/models/vertexai/claude.py +190 -0
- agno/models/vllm/__init__.py +3 -0
- agno/models/vllm/vllm.py +83 -0
- agno/models/xai/__init__.py +2 -0
- agno/models/xai/xai.py +111 -7
- agno/os/__init__.py +3 -0
- agno/os/app.py +1027 -0
- agno/os/auth.py +244 -0
- agno/os/config.py +126 -0
- agno/os/interfaces/__init__.py +1 -0
- agno/os/interfaces/a2a/__init__.py +3 -0
- agno/os/interfaces/a2a/a2a.py +42 -0
- agno/os/interfaces/a2a/router.py +249 -0
- agno/os/interfaces/a2a/utils.py +924 -0
- agno/os/interfaces/agui/__init__.py +3 -0
- agno/os/interfaces/agui/agui.py +47 -0
- agno/os/interfaces/agui/router.py +147 -0
- agno/os/interfaces/agui/utils.py +574 -0
- agno/os/interfaces/base.py +25 -0
- agno/os/interfaces/slack/__init__.py +3 -0
- agno/os/interfaces/slack/router.py +148 -0
- agno/os/interfaces/slack/security.py +30 -0
- agno/os/interfaces/slack/slack.py +47 -0
- agno/os/interfaces/whatsapp/__init__.py +3 -0
- agno/os/interfaces/whatsapp/router.py +210 -0
- agno/os/interfaces/whatsapp/security.py +55 -0
- agno/os/interfaces/whatsapp/whatsapp.py +36 -0
- agno/os/mcp.py +293 -0
- agno/os/middleware/__init__.py +9 -0
- agno/os/middleware/jwt.py +797 -0
- agno/os/router.py +258 -0
- agno/os/routers/__init__.py +3 -0
- agno/os/routers/agents/__init__.py +3 -0
- agno/os/routers/agents/router.py +599 -0
- agno/os/routers/agents/schema.py +261 -0
- agno/os/routers/evals/__init__.py +3 -0
- agno/os/routers/evals/evals.py +450 -0
- agno/os/routers/evals/schemas.py +174 -0
- agno/os/routers/evals/utils.py +231 -0
- agno/os/routers/health.py +31 -0
- agno/os/routers/home.py +52 -0
- agno/os/routers/knowledge/__init__.py +3 -0
- agno/os/routers/knowledge/knowledge.py +1008 -0
- agno/os/routers/knowledge/schemas.py +178 -0
- agno/os/routers/memory/__init__.py +3 -0
- agno/os/routers/memory/memory.py +661 -0
- agno/os/routers/memory/schemas.py +88 -0
- agno/os/routers/metrics/__init__.py +3 -0
- agno/os/routers/metrics/metrics.py +190 -0
- agno/os/routers/metrics/schemas.py +47 -0
- agno/os/routers/session/__init__.py +3 -0
- agno/os/routers/session/session.py +997 -0
- agno/os/routers/teams/__init__.py +3 -0
- agno/os/routers/teams/router.py +512 -0
- agno/os/routers/teams/schema.py +257 -0
- agno/os/routers/traces/__init__.py +3 -0
- agno/os/routers/traces/schemas.py +414 -0
- agno/os/routers/traces/traces.py +499 -0
- agno/os/routers/workflows/__init__.py +3 -0
- agno/os/routers/workflows/router.py +624 -0
- agno/os/routers/workflows/schema.py +75 -0
- agno/os/schema.py +534 -0
- agno/os/scopes.py +469 -0
- agno/{playground → os}/settings.py +7 -15
- agno/os/utils.py +973 -0
- agno/reasoning/anthropic.py +80 -0
- agno/reasoning/azure_ai_foundry.py +67 -0
- agno/reasoning/deepseek.py +63 -0
- agno/reasoning/default.py +97 -0
- agno/reasoning/gemini.py +73 -0
- agno/reasoning/groq.py +71 -0
- agno/reasoning/helpers.py +24 -1
- agno/reasoning/ollama.py +67 -0
- agno/reasoning/openai.py +86 -0
- agno/reasoning/step.py +2 -1
- agno/reasoning/vertexai.py +76 -0
- agno/run/__init__.py +6 -0
- agno/run/agent.py +822 -0
- agno/run/base.py +247 -0
- agno/run/cancel.py +81 -0
- agno/run/requirement.py +181 -0
- agno/run/team.py +767 -0
- agno/run/workflow.py +708 -0
- agno/session/__init__.py +10 -0
- agno/session/agent.py +260 -0
- agno/session/summary.py +265 -0
- agno/session/team.py +342 -0
- agno/session/workflow.py +501 -0
- agno/table.py +10 -0
- agno/team/__init__.py +37 -0
- agno/team/team.py +9536 -0
- agno/tools/__init__.py +7 -0
- agno/tools/agentql.py +120 -0
- agno/tools/airflow.py +22 -12
- agno/tools/api.py +122 -0
- agno/tools/apify.py +276 -83
- agno/tools/{arxiv_toolkit.py → arxiv.py} +20 -12
- agno/tools/aws_lambda.py +28 -7
- agno/tools/aws_ses.py +66 -0
- agno/tools/baidusearch.py +11 -4
- agno/tools/bitbucket.py +292 -0
- agno/tools/brandfetch.py +213 -0
- agno/tools/bravesearch.py +106 -0
- agno/tools/brightdata.py +367 -0
- agno/tools/browserbase.py +209 -0
- agno/tools/calcom.py +32 -23
- agno/tools/calculator.py +24 -37
- agno/tools/cartesia.py +187 -0
- agno/tools/{clickup_tool.py → clickup.py} +17 -28
- agno/tools/confluence.py +91 -26
- agno/tools/crawl4ai.py +139 -43
- agno/tools/csv_toolkit.py +28 -22
- agno/tools/dalle.py +36 -22
- agno/tools/daytona.py +475 -0
- agno/tools/decorator.py +169 -14
- agno/tools/desi_vocal.py +23 -11
- agno/tools/discord.py +32 -29
- agno/tools/docker.py +716 -0
- agno/tools/duckdb.py +76 -81
- agno/tools/duckduckgo.py +43 -40
- agno/tools/e2b.py +703 -0
- agno/tools/eleven_labs.py +65 -54
- agno/tools/email.py +13 -5
- agno/tools/evm.py +129 -0
- agno/tools/exa.py +324 -42
- agno/tools/fal.py +39 -35
- agno/tools/file.py +196 -30
- agno/tools/file_generation.py +356 -0
- agno/tools/financial_datasets.py +288 -0
- agno/tools/firecrawl.py +108 -33
- agno/tools/function.py +960 -122
- agno/tools/giphy.py +34 -12
- agno/tools/github.py +1294 -97
- agno/tools/gmail.py +922 -0
- agno/tools/google_bigquery.py +117 -0
- agno/tools/google_drive.py +271 -0
- agno/tools/google_maps.py +253 -0
- agno/tools/googlecalendar.py +607 -107
- agno/tools/googlesheets.py +377 -0
- agno/tools/hackernews.py +20 -12
- agno/tools/jina.py +24 -14
- agno/tools/jira.py +48 -19
- agno/tools/knowledge.py +218 -0
- agno/tools/linear.py +82 -43
- agno/tools/linkup.py +58 -0
- agno/tools/local_file_system.py +15 -7
- agno/tools/lumalab.py +41 -26
- agno/tools/mcp/__init__.py +10 -0
- agno/tools/mcp/mcp.py +331 -0
- agno/tools/mcp/multi_mcp.py +347 -0
- agno/tools/mcp/params.py +24 -0
- agno/tools/mcp_toolbox.py +284 -0
- agno/tools/mem0.py +193 -0
- agno/tools/memory.py +419 -0
- agno/tools/mlx_transcribe.py +11 -9
- agno/tools/models/azure_openai.py +190 -0
- agno/tools/models/gemini.py +203 -0
- agno/tools/models/groq.py +158 -0
- agno/tools/models/morph.py +186 -0
- agno/tools/models/nebius.py +124 -0
- agno/tools/models_labs.py +163 -82
- agno/tools/moviepy_video.py +18 -13
- agno/tools/nano_banana.py +151 -0
- agno/tools/neo4j.py +134 -0
- agno/tools/newspaper.py +15 -4
- agno/tools/newspaper4k.py +19 -6
- agno/tools/notion.py +204 -0
- agno/tools/openai.py +181 -17
- agno/tools/openbb.py +27 -20
- agno/tools/opencv.py +321 -0
- agno/tools/openweather.py +233 -0
- agno/tools/oxylabs.py +385 -0
- agno/tools/pandas.py +25 -15
- agno/tools/parallel.py +314 -0
- agno/tools/postgres.py +238 -185
- agno/tools/pubmed.py +125 -13
- agno/tools/python.py +48 -35
- agno/tools/reasoning.py +283 -0
- agno/tools/reddit.py +207 -29
- agno/tools/redshift.py +406 -0
- agno/tools/replicate.py +69 -26
- agno/tools/resend.py +11 -6
- agno/tools/scrapegraph.py +179 -19
- agno/tools/searxng.py +23 -31
- agno/tools/serpapi.py +15 -10
- agno/tools/serper.py +255 -0
- agno/tools/shell.py +23 -12
- agno/tools/shopify.py +1519 -0
- agno/tools/slack.py +56 -14
- agno/tools/sleep.py +8 -6
- agno/tools/spider.py +35 -11
- agno/tools/spotify.py +919 -0
- agno/tools/sql.py +34 -19
- agno/tools/tavily.py +158 -8
- agno/tools/telegram.py +18 -8
- agno/tools/todoist.py +218 -0
- agno/tools/toolkit.py +134 -9
- agno/tools/trafilatura.py +388 -0
- agno/tools/trello.py +25 -28
- agno/tools/twilio.py +18 -9
- agno/tools/user_control_flow.py +78 -0
- agno/tools/valyu.py +228 -0
- agno/tools/visualization.py +467 -0
- agno/tools/webbrowser.py +28 -0
- agno/tools/webex.py +76 -0
- agno/tools/website.py +23 -19
- agno/tools/webtools.py +45 -0
- agno/tools/whatsapp.py +286 -0
- agno/tools/wikipedia.py +28 -19
- agno/tools/workflow.py +285 -0
- agno/tools/{twitter.py → x.py} +142 -46
- agno/tools/yfinance.py +41 -39
- agno/tools/youtube.py +34 -17
- agno/tools/zendesk.py +15 -5
- agno/tools/zep.py +454 -0
- agno/tools/zoom.py +86 -37
- agno/tracing/__init__.py +12 -0
- agno/tracing/exporter.py +157 -0
- agno/tracing/schemas.py +276 -0
- agno/tracing/setup.py +111 -0
- agno/utils/agent.py +938 -0
- agno/utils/audio.py +37 -1
- agno/utils/certs.py +27 -0
- agno/utils/code_execution.py +11 -0
- agno/utils/common.py +103 -20
- agno/utils/cryptography.py +22 -0
- agno/utils/dttm.py +33 -0
- agno/utils/events.py +700 -0
- agno/utils/functions.py +107 -37
- agno/utils/gemini.py +426 -0
- agno/utils/hooks.py +171 -0
- agno/utils/http.py +185 -0
- agno/utils/json_schema.py +159 -37
- agno/utils/knowledge.py +36 -0
- agno/utils/location.py +19 -0
- agno/utils/log.py +221 -8
- agno/utils/mcp.py +214 -0
- agno/utils/media.py +335 -14
- agno/utils/merge_dict.py +22 -1
- agno/utils/message.py +77 -2
- agno/utils/models/ai_foundry.py +50 -0
- agno/utils/models/claude.py +373 -0
- agno/utils/models/cohere.py +94 -0
- agno/utils/models/llama.py +85 -0
- agno/utils/models/mistral.py +100 -0
- agno/utils/models/openai_responses.py +140 -0
- agno/utils/models/schema_utils.py +153 -0
- agno/utils/models/watsonx.py +41 -0
- agno/utils/openai.py +257 -0
- agno/utils/pickle.py +1 -1
- agno/utils/pprint.py +124 -8
- agno/utils/print_response/agent.py +930 -0
- agno/utils/print_response/team.py +1914 -0
- agno/utils/print_response/workflow.py +1668 -0
- agno/utils/prompts.py +111 -0
- agno/utils/reasoning.py +108 -0
- agno/utils/response.py +163 -0
- agno/utils/serialize.py +32 -0
- agno/utils/shell.py +4 -4
- agno/utils/streamlit.py +487 -0
- agno/utils/string.py +204 -51
- agno/utils/team.py +139 -0
- agno/utils/timer.py +9 -2
- agno/utils/tokens.py +657 -0
- agno/utils/tools.py +19 -1
- agno/utils/whatsapp.py +305 -0
- agno/utils/yaml_io.py +3 -3
- agno/vectordb/__init__.py +2 -0
- agno/vectordb/base.py +87 -9
- agno/vectordb/cassandra/__init__.py +5 -1
- agno/vectordb/cassandra/cassandra.py +383 -27
- agno/vectordb/chroma/__init__.py +4 -0
- agno/vectordb/chroma/chromadb.py +748 -83
- agno/vectordb/clickhouse/__init__.py +7 -1
- agno/vectordb/clickhouse/clickhousedb.py +554 -53
- agno/vectordb/couchbase/__init__.py +3 -0
- agno/vectordb/couchbase/couchbase.py +1446 -0
- agno/vectordb/lancedb/__init__.py +5 -0
- agno/vectordb/lancedb/lance_db.py +730 -98
- agno/vectordb/langchaindb/__init__.py +5 -0
- agno/vectordb/langchaindb/langchaindb.py +163 -0
- agno/vectordb/lightrag/__init__.py +5 -0
- agno/vectordb/lightrag/lightrag.py +388 -0
- agno/vectordb/llamaindex/__init__.py +3 -0
- agno/vectordb/llamaindex/llamaindexdb.py +166 -0
- agno/vectordb/milvus/__init__.py +3 -0
- agno/vectordb/milvus/milvus.py +966 -78
- agno/vectordb/mongodb/__init__.py +9 -1
- agno/vectordb/mongodb/mongodb.py +1175 -172
- agno/vectordb/pgvector/__init__.py +8 -0
- agno/vectordb/pgvector/pgvector.py +599 -115
- agno/vectordb/pineconedb/__init__.py +5 -1
- agno/vectordb/pineconedb/pineconedb.py +406 -43
- agno/vectordb/qdrant/__init__.py +4 -0
- agno/vectordb/qdrant/qdrant.py +914 -61
- agno/vectordb/redis/__init__.py +9 -0
- agno/vectordb/redis/redisdb.py +682 -0
- agno/vectordb/singlestore/__init__.py +8 -1
- agno/vectordb/singlestore/singlestore.py +771 -0
- agno/vectordb/surrealdb/__init__.py +3 -0
- agno/vectordb/surrealdb/surrealdb.py +663 -0
- agno/vectordb/upstashdb/__init__.py +5 -0
- agno/vectordb/upstashdb/upstashdb.py +718 -0
- agno/vectordb/weaviate/__init__.py +8 -0
- agno/vectordb/weaviate/index.py +15 -0
- agno/vectordb/weaviate/weaviate.py +1009 -0
- agno/workflow/__init__.py +23 -1
- agno/workflow/agent.py +299 -0
- agno/workflow/condition.py +759 -0
- agno/workflow/loop.py +756 -0
- agno/workflow/parallel.py +853 -0
- agno/workflow/router.py +723 -0
- agno/workflow/step.py +1564 -0
- agno/workflow/steps.py +613 -0
- agno/workflow/types.py +556 -0
- agno/workflow/workflow.py +4327 -514
- agno-2.3.13.dist-info/METADATA +639 -0
- agno-2.3.13.dist-info/RECORD +613 -0
- {agno-0.1.2.dist-info → agno-2.3.13.dist-info}/WHEEL +1 -1
- agno-2.3.13.dist-info/licenses/LICENSE +201 -0
- agno/api/playground.py +0 -91
- agno/api/schemas/playground.py +0 -22
- agno/api/schemas/user.py +0 -22
- agno/api/schemas/workspace.py +0 -46
- agno/api/user.py +0 -160
- agno/api/workspace.py +0 -151
- agno/cli/auth_server.py +0 -118
- agno/cli/config.py +0 -275
- agno/cli/console.py +0 -88
- agno/cli/credentials.py +0 -23
- agno/cli/entrypoint.py +0 -571
- agno/cli/operator.py +0 -355
- agno/cli/settings.py +0 -85
- agno/cli/ws/ws_cli.py +0 -817
- agno/constants.py +0 -13
- agno/document/__init__.py +0 -1
- agno/document/chunking/semantic.py +0 -47
- agno/document/chunking/strategy.py +0 -31
- agno/document/reader/__init__.py +0 -1
- agno/document/reader/arxiv_reader.py +0 -41
- agno/document/reader/base.py +0 -22
- agno/document/reader/csv_reader.py +0 -84
- agno/document/reader/docx_reader.py +0 -46
- agno/document/reader/firecrawl_reader.py +0 -99
- agno/document/reader/json_reader.py +0 -43
- agno/document/reader/pdf_reader.py +0 -219
- agno/document/reader/s3/pdf_reader.py +0 -46
- agno/document/reader/s3/text_reader.py +0 -51
- agno/document/reader/text_reader.py +0 -41
- agno/document/reader/website_reader.py +0 -175
- agno/document/reader/youtube_reader.py +0 -50
- agno/embedder/__init__.py +0 -1
- agno/embedder/azure_openai.py +0 -86
- agno/embedder/cohere.py +0 -72
- agno/embedder/fastembed.py +0 -37
- agno/embedder/google.py +0 -73
- agno/embedder/huggingface.py +0 -54
- agno/embedder/mistral.py +0 -80
- agno/embedder/ollama.py +0 -57
- agno/embedder/openai.py +0 -74
- agno/embedder/sentence_transformer.py +0 -38
- agno/embedder/voyageai.py +0 -64
- agno/eval/perf.py +0 -201
- agno/file/__init__.py +0 -1
- agno/file/file.py +0 -16
- agno/file/local/csv.py +0 -32
- agno/file/local/txt.py +0 -19
- agno/infra/app.py +0 -240
- agno/infra/base.py +0 -144
- agno/infra/context.py +0 -20
- agno/infra/db_app.py +0 -52
- agno/infra/resource.py +0 -205
- agno/infra/resources.py +0 -55
- agno/knowledge/agent.py +0 -230
- agno/knowledge/arxiv.py +0 -22
- agno/knowledge/combined.py +0 -22
- agno/knowledge/csv.py +0 -28
- agno/knowledge/csv_url.py +0 -19
- agno/knowledge/document.py +0 -20
- agno/knowledge/docx.py +0 -30
- agno/knowledge/json.py +0 -28
- agno/knowledge/langchain.py +0 -71
- agno/knowledge/llamaindex.py +0 -66
- agno/knowledge/pdf.py +0 -28
- agno/knowledge/pdf_url.py +0 -26
- agno/knowledge/s3/base.py +0 -60
- agno/knowledge/s3/pdf.py +0 -21
- agno/knowledge/s3/text.py +0 -23
- agno/knowledge/text.py +0 -30
- agno/knowledge/website.py +0 -88
- agno/knowledge/wikipedia.py +0 -31
- agno/knowledge/youtube.py +0 -22
- agno/memory/agent.py +0 -392
- agno/memory/classifier.py +0 -104
- agno/memory/db/__init__.py +0 -1
- agno/memory/db/base.py +0 -42
- agno/memory/db/mongodb.py +0 -189
- agno/memory/db/postgres.py +0 -203
- agno/memory/db/sqlite.py +0 -193
- agno/memory/memory.py +0 -15
- agno/memory/row.py +0 -36
- agno/memory/summarizer.py +0 -192
- agno/memory/summary.py +0 -19
- agno/memory/workflow.py +0 -38
- agno/models/google/gemini_openai.py +0 -26
- agno/models/ollama/hermes.py +0 -221
- agno/models/ollama/tools.py +0 -362
- agno/models/vertexai/gemini.py +0 -595
- agno/playground/__init__.py +0 -3
- agno/playground/async_router.py +0 -421
- agno/playground/deploy.py +0 -249
- agno/playground/operator.py +0 -92
- agno/playground/playground.py +0 -91
- agno/playground/schemas.py +0 -76
- agno/playground/serve.py +0 -55
- agno/playground/sync_router.py +0 -405
- agno/reasoning/agent.py +0 -68
- agno/run/response.py +0 -112
- agno/storage/agent/__init__.py +0 -0
- agno/storage/agent/base.py +0 -38
- agno/storage/agent/dynamodb.py +0 -350
- agno/storage/agent/json.py +0 -92
- agno/storage/agent/mongodb.py +0 -228
- agno/storage/agent/postgres.py +0 -367
- agno/storage/agent/session.py +0 -79
- agno/storage/agent/singlestore.py +0 -303
- agno/storage/agent/sqlite.py +0 -357
- agno/storage/agent/yaml.py +0 -93
- agno/storage/workflow/__init__.py +0 -0
- agno/storage/workflow/base.py +0 -40
- agno/storage/workflow/mongodb.py +0 -233
- agno/storage/workflow/postgres.py +0 -366
- agno/storage/workflow/session.py +0 -60
- agno/storage/workflow/sqlite.py +0 -359
- agno/tools/googlesearch.py +0 -88
- agno/utils/defaults.py +0 -57
- agno/utils/filesystem.py +0 -39
- agno/utils/git.py +0 -52
- agno/utils/json_io.py +0 -30
- agno/utils/load_env.py +0 -19
- agno/utils/py_io.py +0 -19
- agno/utils/pyproject.py +0 -18
- agno/utils/resource_filter.py +0 -31
- agno/vectordb/singlestore/s2vectordb.py +0 -390
- agno/vectordb/singlestore/s2vectordb2.py +0 -355
- agno/workspace/__init__.py +0 -0
- agno/workspace/config.py +0 -325
- agno/workspace/enums.py +0 -6
- agno/workspace/helpers.py +0 -48
- agno/workspace/operator.py +0 -758
- agno/workspace/settings.py +0 -63
- agno-0.1.2.dist-info/LICENSE +0 -375
- agno-0.1.2.dist-info/METADATA +0 -502
- agno-0.1.2.dist-info/RECORD +0 -352
- agno-0.1.2.dist-info/entry_points.txt +0 -3
- /agno/{cli → db/migrations}/__init__.py +0 -0
- /agno/{cli/ws → db/migrations/versions}/__init__.py +0 -0
- /agno/{document/chunking/__init__.py → db/schemas/metrics.py} +0 -0
- /agno/{document/reader/s3 → integrations}/__init__.py +0 -0
- /agno/{file/local → knowledge/chunking}/__init__.py +0 -0
- /agno/{infra → knowledge/remote_content}/__init__.py +0 -0
- /agno/{knowledge/s3 → tools/models}/__init__.py +0 -0
- /agno/{reranker → utils/models}/__init__.py +0 -0
- /agno/{storage → utils/print_response}/__init__.py +0 -0
- {agno-0.1.2.dist-info → agno-2.3.13.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,853 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import warnings
|
|
3
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
4
|
+
from contextvars import copy_context
|
|
5
|
+
from copy import deepcopy
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any, AsyncIterator, Awaitable, Callable, Dict, Iterator, List, Optional, Union
|
|
8
|
+
from uuid import uuid4
|
|
9
|
+
|
|
10
|
+
from agno.models.metrics import Metrics
|
|
11
|
+
from agno.run.agent import RunOutputEvent
|
|
12
|
+
from agno.run.base import RunContext
|
|
13
|
+
from agno.run.team import TeamRunOutputEvent
|
|
14
|
+
from agno.run.workflow import (
|
|
15
|
+
ParallelExecutionCompletedEvent,
|
|
16
|
+
ParallelExecutionStartedEvent,
|
|
17
|
+
WorkflowRunOutput,
|
|
18
|
+
WorkflowRunOutputEvent,
|
|
19
|
+
)
|
|
20
|
+
from agno.session.workflow import WorkflowSession
|
|
21
|
+
from agno.utils.log import log_debug, logger
|
|
22
|
+
from agno.utils.merge_dict import merge_parallel_session_states
|
|
23
|
+
from agno.workflow.condition import Condition
|
|
24
|
+
from agno.workflow.step import Step
|
|
25
|
+
from agno.workflow.types import StepInput, StepOutput, StepType
|
|
26
|
+
|
|
27
|
+
WorkflowSteps = List[
|
|
28
|
+
Union[
|
|
29
|
+
Callable[
|
|
30
|
+
[StepInput], Union[StepOutput, Awaitable[StepOutput], Iterator[StepOutput], AsyncIterator[StepOutput]]
|
|
31
|
+
],
|
|
32
|
+
Step,
|
|
33
|
+
"Steps", # type: ignore # noqa: F821
|
|
34
|
+
"Loop", # type: ignore # noqa: F821
|
|
35
|
+
"Parallel", # type: ignore # noqa: F821
|
|
36
|
+
"Condition", # type: ignore # noqa: F821
|
|
37
|
+
"Router", # type: ignore # noqa: F821
|
|
38
|
+
]
|
|
39
|
+
]
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
@dataclass
|
|
43
|
+
class Parallel:
|
|
44
|
+
"""A list of steps that execute in parallel"""
|
|
45
|
+
|
|
46
|
+
steps: WorkflowSteps
|
|
47
|
+
|
|
48
|
+
name: Optional[str] = None
|
|
49
|
+
description: Optional[str] = None
|
|
50
|
+
|
|
51
|
+
def __init__(
|
|
52
|
+
self,
|
|
53
|
+
*steps: WorkflowSteps,
|
|
54
|
+
name: Optional[str] = None,
|
|
55
|
+
description: Optional[str] = None,
|
|
56
|
+
):
|
|
57
|
+
self.steps = list(steps)
|
|
58
|
+
self.name = name
|
|
59
|
+
self.description = description
|
|
60
|
+
|
|
61
|
+
def _prepare_steps(self):
|
|
62
|
+
"""Prepare the steps for execution - mirrors workflow logic"""
|
|
63
|
+
from agno.agent.agent import Agent
|
|
64
|
+
from agno.team.team import Team
|
|
65
|
+
from agno.workflow.loop import Loop
|
|
66
|
+
from agno.workflow.router import Router
|
|
67
|
+
from agno.workflow.step import Step
|
|
68
|
+
from agno.workflow.steps import Steps
|
|
69
|
+
|
|
70
|
+
prepared_steps: WorkflowSteps = []
|
|
71
|
+
for step in self.steps:
|
|
72
|
+
if callable(step) and hasattr(step, "__name__"):
|
|
73
|
+
prepared_steps.append(Step(name=step.__name__, description="User-defined callable step", executor=step))
|
|
74
|
+
elif isinstance(step, Agent):
|
|
75
|
+
prepared_steps.append(Step(name=step.name, description=step.description, agent=step))
|
|
76
|
+
elif isinstance(step, Team):
|
|
77
|
+
prepared_steps.append(Step(name=step.name, description=step.description, team=step))
|
|
78
|
+
elif isinstance(step, (Step, Steps, Loop, Parallel, Condition, Router)):
|
|
79
|
+
prepared_steps.append(step)
|
|
80
|
+
else:
|
|
81
|
+
raise ValueError(f"Invalid step type: {type(step).__name__}")
|
|
82
|
+
|
|
83
|
+
self.steps = prepared_steps
|
|
84
|
+
|
|
85
|
+
def _aggregate_results(self, step_outputs: List[StepOutput]) -> StepOutput:
|
|
86
|
+
"""Aggregate multiple step outputs into a single StepOutput"""
|
|
87
|
+
if not step_outputs:
|
|
88
|
+
return StepOutput(
|
|
89
|
+
step_name=self.name or "Parallel",
|
|
90
|
+
step_id=str(uuid4()),
|
|
91
|
+
step_type="Parallel",
|
|
92
|
+
content="No parallel steps executed",
|
|
93
|
+
steps=[],
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
if len(step_outputs) == 1:
|
|
97
|
+
# Single result, but still create a Parallel container
|
|
98
|
+
single_result = step_outputs[0]
|
|
99
|
+
aggregated_metrics = self._extract_metrics_from_response(step_outputs)
|
|
100
|
+
|
|
101
|
+
return StepOutput(
|
|
102
|
+
step_name=self.name or "Parallel",
|
|
103
|
+
step_id=str(uuid4()),
|
|
104
|
+
step_type=StepType.PARALLEL,
|
|
105
|
+
content=self._build_aggregated_content(step_outputs),
|
|
106
|
+
executor_name=self.name or "Parallel",
|
|
107
|
+
images=single_result.images,
|
|
108
|
+
videos=single_result.videos,
|
|
109
|
+
audio=single_result.audio,
|
|
110
|
+
metrics=aggregated_metrics,
|
|
111
|
+
success=single_result.success,
|
|
112
|
+
error=single_result.error,
|
|
113
|
+
stop=single_result.stop,
|
|
114
|
+
steps=step_outputs, # This is the key addition
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
early_termination_requested = any(output.stop for output in step_outputs if hasattr(output, "stop"))
|
|
118
|
+
|
|
119
|
+
# Multiple results - aggregate them with actual content from all steps
|
|
120
|
+
aggregated_content = self._build_aggregated_content(step_outputs)
|
|
121
|
+
|
|
122
|
+
# Combine all media from parallel steps
|
|
123
|
+
all_images = []
|
|
124
|
+
all_videos = []
|
|
125
|
+
all_audio = []
|
|
126
|
+
has_any_failure = False
|
|
127
|
+
|
|
128
|
+
for result in step_outputs:
|
|
129
|
+
all_images.extend(result.images or [])
|
|
130
|
+
all_videos.extend(result.videos or [])
|
|
131
|
+
all_audio.extend(result.audio or [])
|
|
132
|
+
if result.success is False:
|
|
133
|
+
has_any_failure = True
|
|
134
|
+
|
|
135
|
+
# Extract metrics using the dedicated method
|
|
136
|
+
aggregated_metrics = self._extract_metrics_from_response(step_outputs)
|
|
137
|
+
|
|
138
|
+
return StepOutput(
|
|
139
|
+
step_name=self.name or "Parallel",
|
|
140
|
+
step_id=str(uuid4()),
|
|
141
|
+
step_type=StepType.PARALLEL,
|
|
142
|
+
executor_type="parallel",
|
|
143
|
+
executor_name=self.name or "Parallel",
|
|
144
|
+
content=aggregated_content,
|
|
145
|
+
images=all_images if all_images else None,
|
|
146
|
+
videos=all_videos if all_videos else None,
|
|
147
|
+
audio=all_audio if all_audio else None,
|
|
148
|
+
success=not has_any_failure,
|
|
149
|
+
stop=early_termination_requested,
|
|
150
|
+
metrics=aggregated_metrics,
|
|
151
|
+
steps=step_outputs,
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
def _extract_metrics_from_response(self, step_outputs: List[StepOutput]) -> Optional[Metrics]:
|
|
155
|
+
"""Extract and aggregate metrics from parallel step outputs"""
|
|
156
|
+
if not step_outputs:
|
|
157
|
+
return None
|
|
158
|
+
|
|
159
|
+
# Aggregate metrics from all parallel step outputs
|
|
160
|
+
total_metrics = Metrics()
|
|
161
|
+
|
|
162
|
+
for result in step_outputs:
|
|
163
|
+
if result.metrics:
|
|
164
|
+
total_metrics = total_metrics + result.metrics
|
|
165
|
+
|
|
166
|
+
# If no metrics were found, return None
|
|
167
|
+
if (
|
|
168
|
+
total_metrics.input_tokens == 0
|
|
169
|
+
and total_metrics.output_tokens == 0
|
|
170
|
+
and total_metrics.total_tokens == 0
|
|
171
|
+
and total_metrics.duration is None
|
|
172
|
+
):
|
|
173
|
+
return None
|
|
174
|
+
|
|
175
|
+
return total_metrics
|
|
176
|
+
|
|
177
|
+
def _build_aggregated_content(self, step_outputs: List[StepOutput]) -> str:
|
|
178
|
+
"""Build aggregated content from multiple step outputs"""
|
|
179
|
+
aggregated = "## Parallel Execution Results\n\n"
|
|
180
|
+
|
|
181
|
+
for i, output in enumerate(step_outputs):
|
|
182
|
+
step_name = output.step_name or f"Step {i + 1}"
|
|
183
|
+
content = output.content or ""
|
|
184
|
+
|
|
185
|
+
# Add status indicator
|
|
186
|
+
if output.success is False:
|
|
187
|
+
status_icon = "❌ FAILURE:"
|
|
188
|
+
else:
|
|
189
|
+
status_icon = "✅ SUCCESS:"
|
|
190
|
+
|
|
191
|
+
aggregated += f"### {status_icon} {step_name}\n"
|
|
192
|
+
if content and str(content).strip():
|
|
193
|
+
aggregated += f"{content}\n\n"
|
|
194
|
+
else:
|
|
195
|
+
aggregated += "*(No content)*\n\n"
|
|
196
|
+
|
|
197
|
+
return aggregated.strip()
|
|
198
|
+
|
|
199
|
+
def execute(
|
|
200
|
+
self,
|
|
201
|
+
step_input: StepInput,
|
|
202
|
+
session_id: Optional[str] = None,
|
|
203
|
+
user_id: Optional[str] = None,
|
|
204
|
+
workflow_run_response: Optional[WorkflowRunOutput] = None,
|
|
205
|
+
store_executor_outputs: bool = True,
|
|
206
|
+
run_context: Optional[RunContext] = None,
|
|
207
|
+
session_state: Optional[Dict[str, Any]] = None,
|
|
208
|
+
workflow_session: Optional[WorkflowSession] = None,
|
|
209
|
+
add_workflow_history_to_steps: Optional[bool] = False,
|
|
210
|
+
num_history_runs: int = 3,
|
|
211
|
+
background_tasks: Optional[Any] = None,
|
|
212
|
+
) -> StepOutput:
|
|
213
|
+
"""Execute all steps in parallel and return aggregated result"""
|
|
214
|
+
# Use workflow logger for parallel orchestration
|
|
215
|
+
log_debug(f"Parallel Start: {self.name} ({len(self.steps)} steps)", center=True, symbol="=")
|
|
216
|
+
|
|
217
|
+
self._prepare_steps()
|
|
218
|
+
|
|
219
|
+
# Create individual session_state copies for each step to prevent race conditions
|
|
220
|
+
session_state_copies = []
|
|
221
|
+
for _ in range(len(self.steps)):
|
|
222
|
+
# If using run context, no need to deepcopy the state. We want the direct reference.
|
|
223
|
+
if run_context is not None and run_context.session_state is not None:
|
|
224
|
+
session_state_copies.append(run_context.session_state)
|
|
225
|
+
else:
|
|
226
|
+
if session_state is not None:
|
|
227
|
+
session_state_copies.append(deepcopy(session_state))
|
|
228
|
+
else:
|
|
229
|
+
session_state_copies.append({})
|
|
230
|
+
|
|
231
|
+
def execute_step_with_index(step_with_index):
|
|
232
|
+
"""Execute a single step and preserve its original index"""
|
|
233
|
+
idx, step = step_with_index
|
|
234
|
+
# Use the individual session_state copy for this step
|
|
235
|
+
step_session_state = session_state_copies[idx]
|
|
236
|
+
|
|
237
|
+
try:
|
|
238
|
+
step_result = step.execute(
|
|
239
|
+
step_input,
|
|
240
|
+
session_id=session_id,
|
|
241
|
+
user_id=user_id,
|
|
242
|
+
workflow_run_response=workflow_run_response,
|
|
243
|
+
store_executor_outputs=store_executor_outputs,
|
|
244
|
+
workflow_session=workflow_session,
|
|
245
|
+
add_workflow_history_to_steps=add_workflow_history_to_steps,
|
|
246
|
+
num_history_runs=num_history_runs,
|
|
247
|
+
run_context=run_context,
|
|
248
|
+
session_state=step_session_state,
|
|
249
|
+
background_tasks=background_tasks,
|
|
250
|
+
) # type: ignore[union-attr]
|
|
251
|
+
return idx, step_result, step_session_state
|
|
252
|
+
except Exception as exc:
|
|
253
|
+
parallel_step_name = getattr(step, "name", f"step_{idx}")
|
|
254
|
+
logger.error(f"Parallel step {parallel_step_name} failed: {exc}")
|
|
255
|
+
return (
|
|
256
|
+
idx,
|
|
257
|
+
StepOutput(
|
|
258
|
+
step_name=parallel_step_name,
|
|
259
|
+
content=f"Step {parallel_step_name} failed: {str(exc)}",
|
|
260
|
+
success=False,
|
|
261
|
+
error=str(exc),
|
|
262
|
+
),
|
|
263
|
+
step_session_state,
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
# Use index to preserve order
|
|
267
|
+
indexed_steps = list(enumerate(self.steps))
|
|
268
|
+
|
|
269
|
+
with ThreadPoolExecutor(max_workers=len(self.steps)) as executor:
|
|
270
|
+
# Submit all tasks with their original indices
|
|
271
|
+
# Use copy_context().run to propagate context variables to child threads
|
|
272
|
+
future_to_index = {
|
|
273
|
+
executor.submit(copy_context().run, execute_step_with_index, indexed_step): indexed_step[0]
|
|
274
|
+
for indexed_step in indexed_steps
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
# Collect results and modified session_state copies
|
|
278
|
+
results_with_indices = []
|
|
279
|
+
modified_session_states = []
|
|
280
|
+
for future in as_completed(future_to_index):
|
|
281
|
+
try:
|
|
282
|
+
index, result, modified_session_state = future.result()
|
|
283
|
+
results_with_indices.append((index, result))
|
|
284
|
+
modified_session_states.append(modified_session_state)
|
|
285
|
+
step_name = getattr(self.steps[index], "name", f"step_{index}")
|
|
286
|
+
log_debug(f"Parallel step {step_name} completed")
|
|
287
|
+
except Exception as e:
|
|
288
|
+
index = future_to_index[future]
|
|
289
|
+
step_name = getattr(self.steps[index], "name", f"step_{index}")
|
|
290
|
+
logger.error(f"Parallel step {step_name} failed: {e}")
|
|
291
|
+
results_with_indices.append(
|
|
292
|
+
(
|
|
293
|
+
index,
|
|
294
|
+
StepOutput(
|
|
295
|
+
step_name=step_name,
|
|
296
|
+
content=f"Step {step_name} failed: {str(e)}",
|
|
297
|
+
success=False,
|
|
298
|
+
error=str(e),
|
|
299
|
+
),
|
|
300
|
+
)
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
if run_context is None and session_state is not None:
|
|
304
|
+
merge_parallel_session_states(session_state, modified_session_states)
|
|
305
|
+
|
|
306
|
+
# Sort by original index to preserve order
|
|
307
|
+
results_with_indices.sort(key=lambda x: x[0])
|
|
308
|
+
results = [result for _, result in results_with_indices]
|
|
309
|
+
|
|
310
|
+
# Flatten results - handle steps that return List[StepOutput] (like Condition/Loop)
|
|
311
|
+
flattened_results: List[StepOutput] = []
|
|
312
|
+
for result in results:
|
|
313
|
+
if isinstance(result, list):
|
|
314
|
+
flattened_results.extend(result)
|
|
315
|
+
else:
|
|
316
|
+
flattened_results.append(result)
|
|
317
|
+
|
|
318
|
+
# Aggregate all results into a single StepOutput
|
|
319
|
+
aggregated_result = self._aggregate_results(flattened_results)
|
|
320
|
+
|
|
321
|
+
# Use workflow logger for parallel completion
|
|
322
|
+
log_debug(f"Parallel End: {self.name} ({len(self.steps)} steps)", center=True, symbol="=")
|
|
323
|
+
|
|
324
|
+
return aggregated_result
|
|
325
|
+
|
|
326
|
+
def execute_stream(
|
|
327
|
+
self,
|
|
328
|
+
step_input: StepInput,
|
|
329
|
+
session_id: Optional[str] = None,
|
|
330
|
+
user_id: Optional[str] = None,
|
|
331
|
+
stream_events: bool = False,
|
|
332
|
+
stream_intermediate_steps: bool = False,
|
|
333
|
+
stream_executor_events: bool = True,
|
|
334
|
+
workflow_run_response: Optional[WorkflowRunOutput] = None,
|
|
335
|
+
step_index: Optional[Union[int, tuple]] = None,
|
|
336
|
+
store_executor_outputs: bool = True,
|
|
337
|
+
run_context: Optional[RunContext] = None,
|
|
338
|
+
session_state: Optional[Dict[str, Any]] = None,
|
|
339
|
+
parent_step_id: Optional[str] = None,
|
|
340
|
+
workflow_session: Optional[WorkflowSession] = None,
|
|
341
|
+
add_workflow_history_to_steps: Optional[bool] = False,
|
|
342
|
+
num_history_runs: int = 3,
|
|
343
|
+
background_tasks: Optional[Any] = None,
|
|
344
|
+
) -> Iterator[Union[WorkflowRunOutputEvent, StepOutput]]:
|
|
345
|
+
"""Execute all steps in parallel with streaming support"""
|
|
346
|
+
log_debug(f"Parallel Start: {self.name} ({len(self.steps)} steps)", center=True, symbol="=")
|
|
347
|
+
|
|
348
|
+
parallel_step_id = str(uuid4())
|
|
349
|
+
|
|
350
|
+
self._prepare_steps()
|
|
351
|
+
|
|
352
|
+
# Create individual session_state copies for each step to prevent race conditions
|
|
353
|
+
session_state_copies = []
|
|
354
|
+
for _ in range(len(self.steps)):
|
|
355
|
+
# If using run context, no need to deepcopy the state. We want the direct reference.
|
|
356
|
+
if run_context is not None and run_context.session_state is not None:
|
|
357
|
+
session_state_copies.append(run_context.session_state)
|
|
358
|
+
else:
|
|
359
|
+
if session_state is not None:
|
|
360
|
+
session_state_copies.append(deepcopy(session_state))
|
|
361
|
+
else:
|
|
362
|
+
session_state_copies.append({})
|
|
363
|
+
|
|
364
|
+
# Considering both stream_events and stream_intermediate_steps (deprecated)
|
|
365
|
+
if stream_intermediate_steps is not None:
|
|
366
|
+
warnings.warn(
|
|
367
|
+
"The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Use 'stream_events' instead.",
|
|
368
|
+
DeprecationWarning,
|
|
369
|
+
stacklevel=2,
|
|
370
|
+
)
|
|
371
|
+
stream_events = stream_events or stream_intermediate_steps
|
|
372
|
+
|
|
373
|
+
if stream_events and workflow_run_response:
|
|
374
|
+
# Yield parallel step started event
|
|
375
|
+
yield ParallelExecutionStartedEvent(
|
|
376
|
+
run_id=workflow_run_response.run_id or "",
|
|
377
|
+
workflow_name=workflow_run_response.workflow_name or "",
|
|
378
|
+
workflow_id=workflow_run_response.workflow_id or "",
|
|
379
|
+
session_id=workflow_run_response.session_id or "",
|
|
380
|
+
step_name=self.name,
|
|
381
|
+
step_index=step_index,
|
|
382
|
+
parallel_step_count=len(self.steps),
|
|
383
|
+
step_id=parallel_step_id,
|
|
384
|
+
parent_step_id=parent_step_id,
|
|
385
|
+
)
|
|
386
|
+
|
|
387
|
+
import queue
|
|
388
|
+
|
|
389
|
+
event_queue = queue.Queue() # type: ignore
|
|
390
|
+
step_results = []
|
|
391
|
+
modified_session_states = []
|
|
392
|
+
|
|
393
|
+
def execute_step_stream_with_index(step_with_index):
|
|
394
|
+
"""Execute a single step with streaming and put events in queue immediately"""
|
|
395
|
+
idx, step = step_with_index
|
|
396
|
+
# Use the individual session_state copy for this step
|
|
397
|
+
step_session_state = session_state_copies[idx]
|
|
398
|
+
|
|
399
|
+
try:
|
|
400
|
+
step_outputs = []
|
|
401
|
+
|
|
402
|
+
# If step_index is None or integer (main step): create (step_index, sub_index)
|
|
403
|
+
# If step_index is tuple (child step): all parallel sub-steps get same index
|
|
404
|
+
if step_index is None or isinstance(step_index, int):
|
|
405
|
+
# Parallel is a main step - sub-steps get sequential numbers: 1.1, 1.2, 1.3
|
|
406
|
+
sub_step_index = (step_index if step_index is not None else 0, idx)
|
|
407
|
+
else:
|
|
408
|
+
# Parallel is a child step - all sub-steps get the same parent number: 1.1, 1.1, 1.1
|
|
409
|
+
sub_step_index = step_index
|
|
410
|
+
|
|
411
|
+
# All workflow step types have execute_stream() method
|
|
412
|
+
for event in step.execute_stream( # type: ignore[union-attr]
|
|
413
|
+
step_input,
|
|
414
|
+
session_id=session_id,
|
|
415
|
+
user_id=user_id,
|
|
416
|
+
stream_events=stream_events,
|
|
417
|
+
stream_executor_events=stream_executor_events,
|
|
418
|
+
workflow_run_response=workflow_run_response,
|
|
419
|
+
step_index=sub_step_index,
|
|
420
|
+
store_executor_outputs=store_executor_outputs,
|
|
421
|
+
session_state=step_session_state,
|
|
422
|
+
run_context=run_context,
|
|
423
|
+
parent_step_id=parallel_step_id,
|
|
424
|
+
workflow_session=workflow_session,
|
|
425
|
+
add_workflow_history_to_steps=add_workflow_history_to_steps,
|
|
426
|
+
num_history_runs=num_history_runs,
|
|
427
|
+
background_tasks=background_tasks,
|
|
428
|
+
):
|
|
429
|
+
# Put event immediately in queue
|
|
430
|
+
event_queue.put(("event", idx, event))
|
|
431
|
+
if isinstance(event, StepOutput):
|
|
432
|
+
step_outputs.append(event)
|
|
433
|
+
|
|
434
|
+
# Signal completion for this step
|
|
435
|
+
event_queue.put(("complete", idx, step_outputs, step_session_state))
|
|
436
|
+
return idx, step_outputs, step_session_state
|
|
437
|
+
except Exception as exc:
|
|
438
|
+
parallel_step_name = getattr(step, "name", f"step_{idx}")
|
|
439
|
+
logger.error(f"Parallel step {parallel_step_name} streaming failed: {exc}")
|
|
440
|
+
error_event = StepOutput(
|
|
441
|
+
step_name=parallel_step_name,
|
|
442
|
+
content=f"Step {parallel_step_name} failed: {str(exc)}",
|
|
443
|
+
success=False,
|
|
444
|
+
error=str(exc),
|
|
445
|
+
)
|
|
446
|
+
event_queue.put(("event", idx, error_event))
|
|
447
|
+
event_queue.put(("complete", idx, [error_event], step_session_state))
|
|
448
|
+
return idx, [error_event], step_session_state
|
|
449
|
+
|
|
450
|
+
# Submit all parallel tasks
|
|
451
|
+
indexed_steps = list(enumerate(self.steps))
|
|
452
|
+
|
|
453
|
+
with ThreadPoolExecutor(max_workers=len(self.steps)) as executor:
|
|
454
|
+
# Submit all tasks
|
|
455
|
+
# Use copy_context().run to propagate context variables to child threads
|
|
456
|
+
futures = [
|
|
457
|
+
executor.submit(copy_context().run, execute_step_stream_with_index, indexed_step)
|
|
458
|
+
for indexed_step in indexed_steps
|
|
459
|
+
]
|
|
460
|
+
|
|
461
|
+
# Process events from queue as they arrive
|
|
462
|
+
completed_steps = 0
|
|
463
|
+
total_steps = len(self.steps)
|
|
464
|
+
|
|
465
|
+
while completed_steps < total_steps:
|
|
466
|
+
try:
|
|
467
|
+
message_type, step_idx, *data = event_queue.get(timeout=1.0)
|
|
468
|
+
|
|
469
|
+
if message_type == "event":
|
|
470
|
+
event = data[0]
|
|
471
|
+
# Yield events immediately as they arrive (except StepOutputs)
|
|
472
|
+
if not isinstance(event, StepOutput):
|
|
473
|
+
yield event
|
|
474
|
+
|
|
475
|
+
elif message_type == "complete":
|
|
476
|
+
step_outputs, step_session_state = data
|
|
477
|
+
step_results.extend(step_outputs)
|
|
478
|
+
modified_session_states.append(step_session_state)
|
|
479
|
+
completed_steps += 1
|
|
480
|
+
|
|
481
|
+
step_name = getattr(self.steps[step_idx], "name", f"step_{step_idx}")
|
|
482
|
+
log_debug(f"Parallel step {step_name} streaming completed")
|
|
483
|
+
|
|
484
|
+
except queue.Empty:
|
|
485
|
+
for i, future in enumerate(futures):
|
|
486
|
+
if future.done() and future.exception():
|
|
487
|
+
logger.error(f"Parallel step {i} failed: {future.exception()}")
|
|
488
|
+
if completed_steps < total_steps:
|
|
489
|
+
completed_steps += 1
|
|
490
|
+
except Exception as e:
|
|
491
|
+
logger.error(f"Error processing parallel step events: {e}")
|
|
492
|
+
completed_steps += 1
|
|
493
|
+
|
|
494
|
+
for future in futures:
|
|
495
|
+
try:
|
|
496
|
+
future.result()
|
|
497
|
+
except Exception as e:
|
|
498
|
+
logger.error(f"Future completion error: {e}")
|
|
499
|
+
|
|
500
|
+
# Merge all session_state changes back into the original session_state
|
|
501
|
+
if run_context is None and session_state is not None:
|
|
502
|
+
merge_parallel_session_states(session_state, modified_session_states)
|
|
503
|
+
|
|
504
|
+
# Flatten step_results - handle steps that return List[StepOutput] (like Condition/Loop)
|
|
505
|
+
flattened_step_results: List[StepOutput] = []
|
|
506
|
+
for result in step_results:
|
|
507
|
+
if isinstance(result, list):
|
|
508
|
+
flattened_step_results.extend(result)
|
|
509
|
+
else:
|
|
510
|
+
flattened_step_results.append(result)
|
|
511
|
+
|
|
512
|
+
# Create aggregated result from all step outputs
|
|
513
|
+
aggregated_result = self._aggregate_results(flattened_step_results)
|
|
514
|
+
|
|
515
|
+
# Yield the final aggregated StepOutput
|
|
516
|
+
yield aggregated_result
|
|
517
|
+
|
|
518
|
+
log_debug(f"Parallel End: {self.name} ({len(self.steps)} steps)", center=True, symbol="=")
|
|
519
|
+
|
|
520
|
+
if stream_events and workflow_run_response:
|
|
521
|
+
# Yield parallel step completed event
|
|
522
|
+
yield ParallelExecutionCompletedEvent(
|
|
523
|
+
run_id=workflow_run_response.run_id or "",
|
|
524
|
+
workflow_name=workflow_run_response.workflow_name or "",
|
|
525
|
+
workflow_id=workflow_run_response.workflow_id or "",
|
|
526
|
+
session_id=workflow_run_response.session_id or "",
|
|
527
|
+
step_name=self.name,
|
|
528
|
+
step_index=step_index,
|
|
529
|
+
parallel_step_count=len(self.steps),
|
|
530
|
+
step_results=flattened_step_results,
|
|
531
|
+
step_id=parallel_step_id,
|
|
532
|
+
parent_step_id=parent_step_id,
|
|
533
|
+
)
|
|
534
|
+
|
|
535
|
+
async def aexecute(
|
|
536
|
+
self,
|
|
537
|
+
step_input: StepInput,
|
|
538
|
+
session_id: Optional[str] = None,
|
|
539
|
+
user_id: Optional[str] = None,
|
|
540
|
+
workflow_run_response: Optional[WorkflowRunOutput] = None,
|
|
541
|
+
store_executor_outputs: bool = True,
|
|
542
|
+
run_context: Optional[RunContext] = None,
|
|
543
|
+
session_state: Optional[Dict[str, Any]] = None,
|
|
544
|
+
workflow_session: Optional[WorkflowSession] = None,
|
|
545
|
+
add_workflow_history_to_steps: Optional[bool] = False,
|
|
546
|
+
num_history_runs: int = 3,
|
|
547
|
+
background_tasks: Optional[Any] = None,
|
|
548
|
+
) -> StepOutput:
|
|
549
|
+
"""Execute all steps in parallel using asyncio and return aggregated result"""
|
|
550
|
+
# Use workflow logger for async parallel orchestration
|
|
551
|
+
log_debug(f"Parallel Start: {self.name} ({len(self.steps)} steps)", center=True, symbol="=")
|
|
552
|
+
|
|
553
|
+
self._prepare_steps()
|
|
554
|
+
|
|
555
|
+
# Create individual session_state copies for each step to prevent race conditions
|
|
556
|
+
session_state_copies = []
|
|
557
|
+
for _ in range(len(self.steps)):
|
|
558
|
+
# If using run context, no need to deepcopy the state. We want the direct reference.
|
|
559
|
+
if run_context is not None and run_context.session_state is not None:
|
|
560
|
+
session_state_copies.append(run_context.session_state)
|
|
561
|
+
else:
|
|
562
|
+
if session_state is not None:
|
|
563
|
+
session_state_copies.append(deepcopy(session_state))
|
|
564
|
+
else:
|
|
565
|
+
session_state_copies.append({})
|
|
566
|
+
|
|
567
|
+
async def execute_step_async_with_index(step_with_index):
|
|
568
|
+
"""Execute a single step asynchronously and preserve its original index"""
|
|
569
|
+
idx, step = step_with_index
|
|
570
|
+
# Use the individual session_state copy for this step
|
|
571
|
+
step_session_state = session_state_copies[idx]
|
|
572
|
+
|
|
573
|
+
try:
|
|
574
|
+
inner_step_result = await step.aexecute(
|
|
575
|
+
step_input,
|
|
576
|
+
session_id=session_id,
|
|
577
|
+
user_id=user_id,
|
|
578
|
+
workflow_run_response=workflow_run_response,
|
|
579
|
+
store_executor_outputs=store_executor_outputs,
|
|
580
|
+
workflow_session=workflow_session,
|
|
581
|
+
add_workflow_history_to_steps=add_workflow_history_to_steps,
|
|
582
|
+
num_history_runs=num_history_runs,
|
|
583
|
+
session_state=step_session_state,
|
|
584
|
+
run_context=run_context,
|
|
585
|
+
background_tasks=background_tasks,
|
|
586
|
+
) # type: ignore[union-attr]
|
|
587
|
+
return idx, inner_step_result, step_session_state
|
|
588
|
+
except Exception as exc:
|
|
589
|
+
parallel_step_name = getattr(step, "name", f"step_{idx}")
|
|
590
|
+
logger.error(f"Parallel step {parallel_step_name} failed: {exc}")
|
|
591
|
+
return (
|
|
592
|
+
idx,
|
|
593
|
+
StepOutput(
|
|
594
|
+
step_name=parallel_step_name,
|
|
595
|
+
content=f"Step {parallel_step_name} failed: {str(exc)}",
|
|
596
|
+
success=False,
|
|
597
|
+
error=str(exc),
|
|
598
|
+
),
|
|
599
|
+
step_session_state,
|
|
600
|
+
)
|
|
601
|
+
|
|
602
|
+
# Use index to preserve order
|
|
603
|
+
indexed_steps = list(enumerate(self.steps))
|
|
604
|
+
|
|
605
|
+
# Create tasks for all steps with their indices
|
|
606
|
+
tasks = [execute_step_async_with_index(indexed_step) for indexed_step in indexed_steps]
|
|
607
|
+
|
|
608
|
+
# Execute all tasks concurrently
|
|
609
|
+
results_with_indices = await asyncio.gather(*tasks, return_exceptions=True)
|
|
610
|
+
|
|
611
|
+
# Process results and handle exceptions, preserving order
|
|
612
|
+
processed_results_with_indices = []
|
|
613
|
+
modified_session_states = []
|
|
614
|
+
for i, result in enumerate(results_with_indices):
|
|
615
|
+
if isinstance(result, Exception):
|
|
616
|
+
step_name = getattr(self.steps[i], "name", f"step_{i}")
|
|
617
|
+
logger.error(f"Parallel step {step_name} failed: {result}")
|
|
618
|
+
processed_results_with_indices.append(
|
|
619
|
+
(
|
|
620
|
+
i,
|
|
621
|
+
StepOutput(
|
|
622
|
+
step_name=step_name,
|
|
623
|
+
content=f"Step {step_name} failed: {str(result)}",
|
|
624
|
+
success=False,
|
|
625
|
+
error=str(result),
|
|
626
|
+
),
|
|
627
|
+
)
|
|
628
|
+
)
|
|
629
|
+
# Still collect the session state copy for failed steps
|
|
630
|
+
modified_session_states.append(session_state_copies[i])
|
|
631
|
+
else:
|
|
632
|
+
index, step_result, modified_session_state = result # type: ignore[misc]
|
|
633
|
+
processed_results_with_indices.append((index, step_result))
|
|
634
|
+
modified_session_states.append(modified_session_state)
|
|
635
|
+
step_name = getattr(self.steps[index], "name", f"step_{index}")
|
|
636
|
+
log_debug(f"Parallel step {step_name} completed")
|
|
637
|
+
|
|
638
|
+
# Smart merge all session_state changes back into the original session_state
|
|
639
|
+
if run_context is None and session_state is not None:
|
|
640
|
+
merge_parallel_session_states(session_state, modified_session_states)
|
|
641
|
+
|
|
642
|
+
# Sort by original index to preserve order
|
|
643
|
+
processed_results_with_indices.sort(key=lambda x: x[0])
|
|
644
|
+
results = [result for _, result in processed_results_with_indices]
|
|
645
|
+
|
|
646
|
+
# Flatten results - handle steps that return List[StepOutput] (like Condition/Loop)
|
|
647
|
+
flattened_results: List[StepOutput] = []
|
|
648
|
+
for result in results:
|
|
649
|
+
if isinstance(result, list):
|
|
650
|
+
flattened_results.extend(result)
|
|
651
|
+
else:
|
|
652
|
+
flattened_results.append(result)
|
|
653
|
+
|
|
654
|
+
# Aggregate all results into a single StepOutput
|
|
655
|
+
aggregated_result = self._aggregate_results(flattened_results)
|
|
656
|
+
|
|
657
|
+
# Use workflow logger for async parallel completion
|
|
658
|
+
log_debug(f"Parallel End: {self.name} ({len(self.steps)} steps)", center=True, symbol="=")
|
|
659
|
+
|
|
660
|
+
return aggregated_result
|
|
661
|
+
|
|
662
|
+
async def aexecute_stream(
|
|
663
|
+
self,
|
|
664
|
+
step_input: StepInput,
|
|
665
|
+
session_id: Optional[str] = None,
|
|
666
|
+
user_id: Optional[str] = None,
|
|
667
|
+
stream_events: bool = False,
|
|
668
|
+
stream_intermediate_steps: bool = False,
|
|
669
|
+
stream_executor_events: bool = True,
|
|
670
|
+
workflow_run_response: Optional[WorkflowRunOutput] = None,
|
|
671
|
+
step_index: Optional[Union[int, tuple]] = None,
|
|
672
|
+
store_executor_outputs: bool = True,
|
|
673
|
+
run_context: Optional[RunContext] = None,
|
|
674
|
+
session_state: Optional[Dict[str, Any]] = None,
|
|
675
|
+
parent_step_id: Optional[str] = None,
|
|
676
|
+
workflow_session: Optional[WorkflowSession] = None,
|
|
677
|
+
add_workflow_history_to_steps: Optional[bool] = False,
|
|
678
|
+
num_history_runs: int = 3,
|
|
679
|
+
background_tasks: Optional[Any] = None,
|
|
680
|
+
) -> AsyncIterator[Union[WorkflowRunOutputEvent, TeamRunOutputEvent, RunOutputEvent, StepOutput]]:
|
|
681
|
+
"""Execute all steps in parallel with async streaming support"""
|
|
682
|
+
log_debug(f"Parallel Start: {self.name} ({len(self.steps)} steps)", center=True, symbol="=")
|
|
683
|
+
|
|
684
|
+
parallel_step_id = str(uuid4())
|
|
685
|
+
|
|
686
|
+
self._prepare_steps()
|
|
687
|
+
|
|
688
|
+
# Create individual session_state copies for each step to prevent race conditions
|
|
689
|
+
session_state_copies = []
|
|
690
|
+
for _ in range(len(self.steps)):
|
|
691
|
+
# If using run context, no need to deepcopy the state. We want the direct reference.
|
|
692
|
+
if run_context is not None and run_context.session_state is not None:
|
|
693
|
+
session_state_copies.append(run_context.session_state)
|
|
694
|
+
else:
|
|
695
|
+
if session_state is not None:
|
|
696
|
+
session_state_copies.append(deepcopy(session_state))
|
|
697
|
+
else:
|
|
698
|
+
session_state_copies.append({})
|
|
699
|
+
|
|
700
|
+
# Considering both stream_events and stream_intermediate_steps (deprecated)
|
|
701
|
+
if stream_intermediate_steps is not None:
|
|
702
|
+
warnings.warn(
|
|
703
|
+
"The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Use 'stream_events' instead.",
|
|
704
|
+
DeprecationWarning,
|
|
705
|
+
stacklevel=2,
|
|
706
|
+
)
|
|
707
|
+
stream_events = stream_events or stream_intermediate_steps
|
|
708
|
+
|
|
709
|
+
if stream_events and workflow_run_response:
|
|
710
|
+
# Yield parallel step started event
|
|
711
|
+
yield ParallelExecutionStartedEvent(
|
|
712
|
+
run_id=workflow_run_response.run_id or "",
|
|
713
|
+
workflow_name=workflow_run_response.workflow_name or "",
|
|
714
|
+
workflow_id=workflow_run_response.workflow_id or "",
|
|
715
|
+
session_id=workflow_run_response.session_id or "",
|
|
716
|
+
step_name=self.name,
|
|
717
|
+
step_index=step_index,
|
|
718
|
+
parallel_step_count=len(self.steps),
|
|
719
|
+
step_id=parallel_step_id,
|
|
720
|
+
parent_step_id=parent_step_id,
|
|
721
|
+
)
|
|
722
|
+
|
|
723
|
+
import asyncio
|
|
724
|
+
|
|
725
|
+
event_queue = asyncio.Queue() # type: ignore
|
|
726
|
+
step_results = []
|
|
727
|
+
modified_session_states = []
|
|
728
|
+
|
|
729
|
+
async def execute_step_stream_async_with_index(step_with_index):
|
|
730
|
+
"""Execute a single step with async streaming and yield events immediately"""
|
|
731
|
+
idx, step = step_with_index
|
|
732
|
+
# Use the individual session_state copy for this step
|
|
733
|
+
step_session_state = session_state_copies[idx]
|
|
734
|
+
|
|
735
|
+
try:
|
|
736
|
+
step_outputs = []
|
|
737
|
+
|
|
738
|
+
# If step_index is None or integer (main step): create (step_index, sub_index)
|
|
739
|
+
# If step_index is tuple (child step): all parallel sub-steps get same index
|
|
740
|
+
if step_index is None or isinstance(step_index, int):
|
|
741
|
+
# Parallel is a main step - sub-steps get sequential numbers: 1.1, 1.2, 1.3
|
|
742
|
+
sub_step_index = (step_index if step_index is not None else 0, idx)
|
|
743
|
+
else:
|
|
744
|
+
# Parallel is a child step - all sub-steps get the same parent number: 1.1, 1.1, 1.1
|
|
745
|
+
sub_step_index = step_index
|
|
746
|
+
|
|
747
|
+
# All workflow step types have aexecute_stream() method
|
|
748
|
+
async for event in step.aexecute_stream(
|
|
749
|
+
step_input,
|
|
750
|
+
session_id=session_id,
|
|
751
|
+
user_id=user_id,
|
|
752
|
+
stream_events=stream_events,
|
|
753
|
+
stream_executor_events=stream_executor_events,
|
|
754
|
+
workflow_run_response=workflow_run_response,
|
|
755
|
+
step_index=sub_step_index,
|
|
756
|
+
store_executor_outputs=store_executor_outputs,
|
|
757
|
+
session_state=step_session_state,
|
|
758
|
+
run_context=run_context,
|
|
759
|
+
parent_step_id=parallel_step_id,
|
|
760
|
+
workflow_session=workflow_session,
|
|
761
|
+
add_workflow_history_to_steps=add_workflow_history_to_steps,
|
|
762
|
+
num_history_runs=num_history_runs,
|
|
763
|
+
background_tasks=background_tasks,
|
|
764
|
+
): # type: ignore[union-attr]
|
|
765
|
+
# Yield events immediately to the queue
|
|
766
|
+
await event_queue.put(("event", idx, event))
|
|
767
|
+
if isinstance(event, StepOutput):
|
|
768
|
+
step_outputs.append(event)
|
|
769
|
+
|
|
770
|
+
# Signal completion for this step
|
|
771
|
+
await event_queue.put(("complete", idx, step_outputs, step_session_state))
|
|
772
|
+
return idx, step_outputs, step_session_state
|
|
773
|
+
except Exception as e:
|
|
774
|
+
parallel_step_name = getattr(step, "name", f"step_{idx}")
|
|
775
|
+
logger.error(f"Parallel step {parallel_step_name} async streaming failed: {e}")
|
|
776
|
+
error_event = StepOutput(
|
|
777
|
+
step_name=parallel_step_name,
|
|
778
|
+
content=f"Step {parallel_step_name} failed: {str(e)}",
|
|
779
|
+
success=False,
|
|
780
|
+
error=str(e),
|
|
781
|
+
)
|
|
782
|
+
await event_queue.put(("event", idx, error_event))
|
|
783
|
+
await event_queue.put(("complete", idx, [error_event], step_session_state))
|
|
784
|
+
return idx, [error_event], step_session_state
|
|
785
|
+
|
|
786
|
+
# Start all parallel tasks
|
|
787
|
+
indexed_steps = list(enumerate(self.steps))
|
|
788
|
+
tasks = [
|
|
789
|
+
asyncio.create_task(execute_step_stream_async_with_index(indexed_step)) for indexed_step in indexed_steps
|
|
790
|
+
]
|
|
791
|
+
|
|
792
|
+
# Process events as they arrive and track completion
|
|
793
|
+
completed_steps = 0
|
|
794
|
+
total_steps = len(self.steps)
|
|
795
|
+
|
|
796
|
+
while completed_steps < total_steps:
|
|
797
|
+
try:
|
|
798
|
+
message_type, step_idx, *data = await event_queue.get()
|
|
799
|
+
|
|
800
|
+
if message_type == "event":
|
|
801
|
+
event = data[0]
|
|
802
|
+
if not isinstance(event, StepOutput):
|
|
803
|
+
yield event
|
|
804
|
+
|
|
805
|
+
elif message_type == "complete":
|
|
806
|
+
step_outputs, step_session_state = data
|
|
807
|
+
step_results.extend(step_outputs)
|
|
808
|
+
modified_session_states.append(step_session_state)
|
|
809
|
+
completed_steps += 1
|
|
810
|
+
|
|
811
|
+
step_name = getattr(self.steps[step_idx], "name", f"step_{step_idx}")
|
|
812
|
+
log_debug(f"Parallel step {step_name} async streaming completed")
|
|
813
|
+
|
|
814
|
+
except Exception as e:
|
|
815
|
+
logger.error(f"Error processing parallel step events: {e}")
|
|
816
|
+
completed_steps += 1
|
|
817
|
+
|
|
818
|
+
await asyncio.gather(*tasks, return_exceptions=True)
|
|
819
|
+
|
|
820
|
+
# Merge all session_state changes back into the original session_state
|
|
821
|
+
if run_context is None and session_state is not None:
|
|
822
|
+
merge_parallel_session_states(session_state, modified_session_states)
|
|
823
|
+
|
|
824
|
+
# Flatten step_results - handle steps that return List[StepOutput] (like Condition/Loop)
|
|
825
|
+
flattened_step_results: List[StepOutput] = []
|
|
826
|
+
for result in step_results:
|
|
827
|
+
if isinstance(result, list):
|
|
828
|
+
flattened_step_results.extend(result)
|
|
829
|
+
else:
|
|
830
|
+
flattened_step_results.append(result)
|
|
831
|
+
|
|
832
|
+
# Create aggregated result from all step outputs
|
|
833
|
+
aggregated_result = self._aggregate_results(flattened_step_results)
|
|
834
|
+
|
|
835
|
+
# Yield the final aggregated StepOutput
|
|
836
|
+
yield aggregated_result
|
|
837
|
+
|
|
838
|
+
log_debug(f"Parallel End: {self.name} ({len(self.steps)} steps)", center=True, symbol="=")
|
|
839
|
+
|
|
840
|
+
if stream_events and workflow_run_response:
|
|
841
|
+
# Yield parallel step completed event
|
|
842
|
+
yield ParallelExecutionCompletedEvent(
|
|
843
|
+
run_id=workflow_run_response.run_id or "",
|
|
844
|
+
workflow_name=workflow_run_response.workflow_name or "",
|
|
845
|
+
workflow_id=workflow_run_response.workflow_id or "",
|
|
846
|
+
session_id=workflow_run_response.session_id or "",
|
|
847
|
+
step_name=self.name,
|
|
848
|
+
step_index=step_index,
|
|
849
|
+
parallel_step_count=len(self.steps),
|
|
850
|
+
step_results=flattened_step_results,
|
|
851
|
+
step_id=parallel_step_id,
|
|
852
|
+
parent_step_id=parent_step_id,
|
|
853
|
+
)
|