agno 2.2.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/__init__.py +8 -0
- agno/agent/__init__.py +51 -0
- agno/agent/agent.py +10405 -0
- agno/api/__init__.py +0 -0
- agno/api/agent.py +28 -0
- agno/api/api.py +40 -0
- agno/api/evals.py +22 -0
- agno/api/os.py +17 -0
- agno/api/routes.py +13 -0
- agno/api/schemas/__init__.py +9 -0
- agno/api/schemas/agent.py +16 -0
- agno/api/schemas/evals.py +16 -0
- agno/api/schemas/os.py +14 -0
- agno/api/schemas/response.py +6 -0
- agno/api/schemas/team.py +16 -0
- agno/api/schemas/utils.py +21 -0
- agno/api/schemas/workflows.py +16 -0
- agno/api/settings.py +53 -0
- agno/api/team.py +30 -0
- agno/api/workflow.py +28 -0
- agno/cloud/aws/base.py +214 -0
- agno/cloud/aws/s3/__init__.py +2 -0
- agno/cloud/aws/s3/api_client.py +43 -0
- agno/cloud/aws/s3/bucket.py +195 -0
- agno/cloud/aws/s3/object.py +57 -0
- agno/culture/__init__.py +3 -0
- agno/culture/manager.py +956 -0
- agno/db/__init__.py +24 -0
- agno/db/async_postgres/__init__.py +3 -0
- agno/db/base.py +598 -0
- agno/db/dynamo/__init__.py +3 -0
- agno/db/dynamo/dynamo.py +2042 -0
- agno/db/dynamo/schemas.py +314 -0
- agno/db/dynamo/utils.py +743 -0
- agno/db/firestore/__init__.py +3 -0
- agno/db/firestore/firestore.py +1795 -0
- agno/db/firestore/schemas.py +140 -0
- agno/db/firestore/utils.py +376 -0
- agno/db/gcs_json/__init__.py +3 -0
- agno/db/gcs_json/gcs_json_db.py +1335 -0
- agno/db/gcs_json/utils.py +228 -0
- agno/db/in_memory/__init__.py +3 -0
- agno/db/in_memory/in_memory_db.py +1160 -0
- agno/db/in_memory/utils.py +230 -0
- agno/db/json/__init__.py +3 -0
- agno/db/json/json_db.py +1328 -0
- agno/db/json/utils.py +230 -0
- agno/db/migrations/__init__.py +0 -0
- agno/db/migrations/v1_to_v2.py +635 -0
- agno/db/mongo/__init__.py +17 -0
- agno/db/mongo/async_mongo.py +2026 -0
- agno/db/mongo/mongo.py +1982 -0
- agno/db/mongo/schemas.py +87 -0
- agno/db/mongo/utils.py +259 -0
- agno/db/mysql/__init__.py +3 -0
- agno/db/mysql/mysql.py +2308 -0
- agno/db/mysql/schemas.py +138 -0
- agno/db/mysql/utils.py +355 -0
- agno/db/postgres/__init__.py +4 -0
- agno/db/postgres/async_postgres.py +1927 -0
- agno/db/postgres/postgres.py +2260 -0
- agno/db/postgres/schemas.py +139 -0
- agno/db/postgres/utils.py +442 -0
- agno/db/redis/__init__.py +3 -0
- agno/db/redis/redis.py +1660 -0
- agno/db/redis/schemas.py +123 -0
- agno/db/redis/utils.py +346 -0
- agno/db/schemas/__init__.py +4 -0
- agno/db/schemas/culture.py +120 -0
- agno/db/schemas/evals.py +33 -0
- agno/db/schemas/knowledge.py +40 -0
- agno/db/schemas/memory.py +46 -0
- agno/db/schemas/metrics.py +0 -0
- agno/db/singlestore/__init__.py +3 -0
- agno/db/singlestore/schemas.py +130 -0
- agno/db/singlestore/singlestore.py +2272 -0
- agno/db/singlestore/utils.py +384 -0
- agno/db/sqlite/__init__.py +4 -0
- agno/db/sqlite/async_sqlite.py +2293 -0
- agno/db/sqlite/schemas.py +133 -0
- agno/db/sqlite/sqlite.py +2288 -0
- agno/db/sqlite/utils.py +431 -0
- agno/db/surrealdb/__init__.py +3 -0
- agno/db/surrealdb/metrics.py +292 -0
- agno/db/surrealdb/models.py +309 -0
- agno/db/surrealdb/queries.py +71 -0
- agno/db/surrealdb/surrealdb.py +1353 -0
- agno/db/surrealdb/utils.py +147 -0
- agno/db/utils.py +116 -0
- agno/debug.py +18 -0
- agno/eval/__init__.py +14 -0
- agno/eval/accuracy.py +834 -0
- agno/eval/performance.py +773 -0
- agno/eval/reliability.py +306 -0
- agno/eval/utils.py +119 -0
- agno/exceptions.py +161 -0
- agno/filters.py +354 -0
- agno/guardrails/__init__.py +6 -0
- agno/guardrails/base.py +19 -0
- agno/guardrails/openai.py +144 -0
- agno/guardrails/pii.py +94 -0
- agno/guardrails/prompt_injection.py +52 -0
- agno/integrations/__init__.py +0 -0
- agno/integrations/discord/__init__.py +3 -0
- agno/integrations/discord/client.py +203 -0
- agno/knowledge/__init__.py +5 -0
- agno/knowledge/chunking/__init__.py +0 -0
- agno/knowledge/chunking/agentic.py +79 -0
- agno/knowledge/chunking/document.py +91 -0
- agno/knowledge/chunking/fixed.py +57 -0
- agno/knowledge/chunking/markdown.py +151 -0
- agno/knowledge/chunking/recursive.py +63 -0
- agno/knowledge/chunking/row.py +39 -0
- agno/knowledge/chunking/semantic.py +86 -0
- agno/knowledge/chunking/strategy.py +165 -0
- agno/knowledge/content.py +74 -0
- agno/knowledge/document/__init__.py +5 -0
- agno/knowledge/document/base.py +58 -0
- agno/knowledge/embedder/__init__.py +5 -0
- agno/knowledge/embedder/aws_bedrock.py +343 -0
- agno/knowledge/embedder/azure_openai.py +210 -0
- agno/knowledge/embedder/base.py +23 -0
- agno/knowledge/embedder/cohere.py +323 -0
- agno/knowledge/embedder/fastembed.py +62 -0
- agno/knowledge/embedder/fireworks.py +13 -0
- agno/knowledge/embedder/google.py +258 -0
- agno/knowledge/embedder/huggingface.py +94 -0
- agno/knowledge/embedder/jina.py +182 -0
- agno/knowledge/embedder/langdb.py +22 -0
- agno/knowledge/embedder/mistral.py +206 -0
- agno/knowledge/embedder/nebius.py +13 -0
- agno/knowledge/embedder/ollama.py +154 -0
- agno/knowledge/embedder/openai.py +195 -0
- agno/knowledge/embedder/sentence_transformer.py +63 -0
- agno/knowledge/embedder/together.py +13 -0
- agno/knowledge/embedder/vllm.py +262 -0
- agno/knowledge/embedder/voyageai.py +165 -0
- agno/knowledge/knowledge.py +1988 -0
- agno/knowledge/reader/__init__.py +7 -0
- agno/knowledge/reader/arxiv_reader.py +81 -0
- agno/knowledge/reader/base.py +95 -0
- agno/knowledge/reader/csv_reader.py +166 -0
- agno/knowledge/reader/docx_reader.py +82 -0
- agno/knowledge/reader/field_labeled_csv_reader.py +292 -0
- agno/knowledge/reader/firecrawl_reader.py +201 -0
- agno/knowledge/reader/json_reader.py +87 -0
- agno/knowledge/reader/markdown_reader.py +137 -0
- agno/knowledge/reader/pdf_reader.py +431 -0
- agno/knowledge/reader/pptx_reader.py +101 -0
- agno/knowledge/reader/reader_factory.py +313 -0
- agno/knowledge/reader/s3_reader.py +89 -0
- agno/knowledge/reader/tavily_reader.py +194 -0
- agno/knowledge/reader/text_reader.py +115 -0
- agno/knowledge/reader/web_search_reader.py +372 -0
- agno/knowledge/reader/website_reader.py +455 -0
- agno/knowledge/reader/wikipedia_reader.py +59 -0
- agno/knowledge/reader/youtube_reader.py +78 -0
- agno/knowledge/remote_content/__init__.py +0 -0
- agno/knowledge/remote_content/remote_content.py +88 -0
- agno/knowledge/reranker/__init__.py +3 -0
- agno/knowledge/reranker/base.py +14 -0
- agno/knowledge/reranker/cohere.py +64 -0
- agno/knowledge/reranker/infinity.py +195 -0
- agno/knowledge/reranker/sentence_transformer.py +54 -0
- agno/knowledge/types.py +39 -0
- agno/knowledge/utils.py +189 -0
- agno/media.py +462 -0
- agno/memory/__init__.py +3 -0
- agno/memory/manager.py +1327 -0
- agno/models/__init__.py +0 -0
- agno/models/aimlapi/__init__.py +5 -0
- agno/models/aimlapi/aimlapi.py +45 -0
- agno/models/anthropic/__init__.py +5 -0
- agno/models/anthropic/claude.py +757 -0
- agno/models/aws/__init__.py +15 -0
- agno/models/aws/bedrock.py +701 -0
- agno/models/aws/claude.py +378 -0
- agno/models/azure/__init__.py +18 -0
- agno/models/azure/ai_foundry.py +485 -0
- agno/models/azure/openai_chat.py +131 -0
- agno/models/base.py +2175 -0
- agno/models/cerebras/__init__.py +12 -0
- agno/models/cerebras/cerebras.py +501 -0
- agno/models/cerebras/cerebras_openai.py +112 -0
- agno/models/cohere/__init__.py +5 -0
- agno/models/cohere/chat.py +389 -0
- agno/models/cometapi/__init__.py +5 -0
- agno/models/cometapi/cometapi.py +57 -0
- agno/models/dashscope/__init__.py +5 -0
- agno/models/dashscope/dashscope.py +91 -0
- agno/models/deepinfra/__init__.py +5 -0
- agno/models/deepinfra/deepinfra.py +28 -0
- agno/models/deepseek/__init__.py +5 -0
- agno/models/deepseek/deepseek.py +61 -0
- agno/models/defaults.py +1 -0
- agno/models/fireworks/__init__.py +5 -0
- agno/models/fireworks/fireworks.py +26 -0
- agno/models/google/__init__.py +5 -0
- agno/models/google/gemini.py +1085 -0
- agno/models/groq/__init__.py +5 -0
- agno/models/groq/groq.py +556 -0
- agno/models/huggingface/__init__.py +5 -0
- agno/models/huggingface/huggingface.py +491 -0
- agno/models/ibm/__init__.py +5 -0
- agno/models/ibm/watsonx.py +422 -0
- agno/models/internlm/__init__.py +3 -0
- agno/models/internlm/internlm.py +26 -0
- agno/models/langdb/__init__.py +1 -0
- agno/models/langdb/langdb.py +48 -0
- agno/models/litellm/__init__.py +14 -0
- agno/models/litellm/chat.py +468 -0
- agno/models/litellm/litellm_openai.py +25 -0
- agno/models/llama_cpp/__init__.py +5 -0
- agno/models/llama_cpp/llama_cpp.py +22 -0
- agno/models/lmstudio/__init__.py +5 -0
- agno/models/lmstudio/lmstudio.py +25 -0
- agno/models/message.py +434 -0
- agno/models/meta/__init__.py +12 -0
- agno/models/meta/llama.py +475 -0
- agno/models/meta/llama_openai.py +78 -0
- agno/models/metrics.py +120 -0
- agno/models/mistral/__init__.py +5 -0
- agno/models/mistral/mistral.py +432 -0
- agno/models/nebius/__init__.py +3 -0
- agno/models/nebius/nebius.py +54 -0
- agno/models/nexus/__init__.py +3 -0
- agno/models/nexus/nexus.py +22 -0
- agno/models/nvidia/__init__.py +5 -0
- agno/models/nvidia/nvidia.py +28 -0
- agno/models/ollama/__init__.py +5 -0
- agno/models/ollama/chat.py +441 -0
- agno/models/openai/__init__.py +9 -0
- agno/models/openai/chat.py +883 -0
- agno/models/openai/like.py +27 -0
- agno/models/openai/responses.py +1050 -0
- agno/models/openrouter/__init__.py +5 -0
- agno/models/openrouter/openrouter.py +66 -0
- agno/models/perplexity/__init__.py +5 -0
- agno/models/perplexity/perplexity.py +187 -0
- agno/models/portkey/__init__.py +3 -0
- agno/models/portkey/portkey.py +81 -0
- agno/models/requesty/__init__.py +5 -0
- agno/models/requesty/requesty.py +52 -0
- agno/models/response.py +199 -0
- agno/models/sambanova/__init__.py +5 -0
- agno/models/sambanova/sambanova.py +28 -0
- agno/models/siliconflow/__init__.py +5 -0
- agno/models/siliconflow/siliconflow.py +25 -0
- agno/models/together/__init__.py +5 -0
- agno/models/together/together.py +25 -0
- agno/models/utils.py +266 -0
- agno/models/vercel/__init__.py +3 -0
- agno/models/vercel/v0.py +26 -0
- agno/models/vertexai/__init__.py +0 -0
- agno/models/vertexai/claude.py +70 -0
- agno/models/vllm/__init__.py +3 -0
- agno/models/vllm/vllm.py +78 -0
- agno/models/xai/__init__.py +3 -0
- agno/models/xai/xai.py +113 -0
- agno/os/__init__.py +3 -0
- agno/os/app.py +876 -0
- agno/os/auth.py +57 -0
- agno/os/config.py +104 -0
- agno/os/interfaces/__init__.py +1 -0
- agno/os/interfaces/a2a/__init__.py +3 -0
- agno/os/interfaces/a2a/a2a.py +42 -0
- agno/os/interfaces/a2a/router.py +250 -0
- agno/os/interfaces/a2a/utils.py +924 -0
- agno/os/interfaces/agui/__init__.py +3 -0
- agno/os/interfaces/agui/agui.py +47 -0
- agno/os/interfaces/agui/router.py +144 -0
- agno/os/interfaces/agui/utils.py +534 -0
- agno/os/interfaces/base.py +25 -0
- agno/os/interfaces/slack/__init__.py +3 -0
- agno/os/interfaces/slack/router.py +148 -0
- agno/os/interfaces/slack/security.py +30 -0
- agno/os/interfaces/slack/slack.py +47 -0
- agno/os/interfaces/whatsapp/__init__.py +3 -0
- agno/os/interfaces/whatsapp/router.py +211 -0
- agno/os/interfaces/whatsapp/security.py +53 -0
- agno/os/interfaces/whatsapp/whatsapp.py +36 -0
- agno/os/mcp.py +292 -0
- agno/os/middleware/__init__.py +7 -0
- agno/os/middleware/jwt.py +233 -0
- agno/os/router.py +1763 -0
- agno/os/routers/__init__.py +3 -0
- agno/os/routers/evals/__init__.py +3 -0
- agno/os/routers/evals/evals.py +430 -0
- agno/os/routers/evals/schemas.py +142 -0
- agno/os/routers/evals/utils.py +162 -0
- agno/os/routers/health.py +31 -0
- agno/os/routers/home.py +52 -0
- agno/os/routers/knowledge/__init__.py +3 -0
- agno/os/routers/knowledge/knowledge.py +997 -0
- agno/os/routers/knowledge/schemas.py +178 -0
- agno/os/routers/memory/__init__.py +3 -0
- agno/os/routers/memory/memory.py +515 -0
- agno/os/routers/memory/schemas.py +62 -0
- agno/os/routers/metrics/__init__.py +3 -0
- agno/os/routers/metrics/metrics.py +190 -0
- agno/os/routers/metrics/schemas.py +47 -0
- agno/os/routers/session/__init__.py +3 -0
- agno/os/routers/session/session.py +997 -0
- agno/os/schema.py +1055 -0
- agno/os/settings.py +43 -0
- agno/os/utils.py +630 -0
- agno/py.typed +0 -0
- agno/reasoning/__init__.py +0 -0
- agno/reasoning/anthropic.py +80 -0
- agno/reasoning/azure_ai_foundry.py +67 -0
- agno/reasoning/deepseek.py +63 -0
- agno/reasoning/default.py +97 -0
- agno/reasoning/gemini.py +73 -0
- agno/reasoning/groq.py +71 -0
- agno/reasoning/helpers.py +63 -0
- agno/reasoning/ollama.py +67 -0
- agno/reasoning/openai.py +86 -0
- agno/reasoning/step.py +31 -0
- agno/reasoning/vertexai.py +76 -0
- agno/run/__init__.py +6 -0
- agno/run/agent.py +787 -0
- agno/run/base.py +229 -0
- agno/run/cancel.py +81 -0
- agno/run/messages.py +32 -0
- agno/run/team.py +753 -0
- agno/run/workflow.py +708 -0
- agno/session/__init__.py +10 -0
- agno/session/agent.py +295 -0
- agno/session/summary.py +265 -0
- agno/session/team.py +392 -0
- agno/session/workflow.py +205 -0
- agno/team/__init__.py +37 -0
- agno/team/team.py +8793 -0
- agno/tools/__init__.py +10 -0
- agno/tools/agentql.py +120 -0
- agno/tools/airflow.py +69 -0
- agno/tools/api.py +122 -0
- agno/tools/apify.py +314 -0
- agno/tools/arxiv.py +127 -0
- agno/tools/aws_lambda.py +53 -0
- agno/tools/aws_ses.py +66 -0
- agno/tools/baidusearch.py +89 -0
- agno/tools/bitbucket.py +292 -0
- agno/tools/brandfetch.py +213 -0
- agno/tools/bravesearch.py +106 -0
- agno/tools/brightdata.py +367 -0
- agno/tools/browserbase.py +209 -0
- agno/tools/calcom.py +255 -0
- agno/tools/calculator.py +151 -0
- agno/tools/cartesia.py +187 -0
- agno/tools/clickup.py +244 -0
- agno/tools/confluence.py +240 -0
- agno/tools/crawl4ai.py +158 -0
- agno/tools/csv_toolkit.py +185 -0
- agno/tools/dalle.py +110 -0
- agno/tools/daytona.py +475 -0
- agno/tools/decorator.py +262 -0
- agno/tools/desi_vocal.py +108 -0
- agno/tools/discord.py +161 -0
- agno/tools/docker.py +716 -0
- agno/tools/duckdb.py +379 -0
- agno/tools/duckduckgo.py +91 -0
- agno/tools/e2b.py +703 -0
- agno/tools/eleven_labs.py +196 -0
- agno/tools/email.py +67 -0
- agno/tools/evm.py +129 -0
- agno/tools/exa.py +396 -0
- agno/tools/fal.py +127 -0
- agno/tools/file.py +240 -0
- agno/tools/file_generation.py +350 -0
- agno/tools/financial_datasets.py +288 -0
- agno/tools/firecrawl.py +143 -0
- agno/tools/function.py +1187 -0
- agno/tools/giphy.py +93 -0
- agno/tools/github.py +1760 -0
- agno/tools/gmail.py +922 -0
- agno/tools/google_bigquery.py +117 -0
- agno/tools/google_drive.py +270 -0
- agno/tools/google_maps.py +253 -0
- agno/tools/googlecalendar.py +674 -0
- agno/tools/googlesearch.py +98 -0
- agno/tools/googlesheets.py +377 -0
- agno/tools/hackernews.py +77 -0
- agno/tools/jina.py +101 -0
- agno/tools/jira.py +170 -0
- agno/tools/knowledge.py +218 -0
- agno/tools/linear.py +426 -0
- agno/tools/linkup.py +58 -0
- agno/tools/local_file_system.py +90 -0
- agno/tools/lumalab.py +183 -0
- agno/tools/mcp/__init__.py +10 -0
- agno/tools/mcp/mcp.py +331 -0
- agno/tools/mcp/multi_mcp.py +347 -0
- agno/tools/mcp/params.py +24 -0
- agno/tools/mcp_toolbox.py +284 -0
- agno/tools/mem0.py +193 -0
- agno/tools/memori.py +339 -0
- agno/tools/memory.py +419 -0
- agno/tools/mlx_transcribe.py +139 -0
- agno/tools/models/__init__.py +0 -0
- agno/tools/models/azure_openai.py +190 -0
- agno/tools/models/gemini.py +203 -0
- agno/tools/models/groq.py +158 -0
- agno/tools/models/morph.py +186 -0
- agno/tools/models/nebius.py +124 -0
- agno/tools/models_labs.py +195 -0
- agno/tools/moviepy_video.py +349 -0
- agno/tools/neo4j.py +134 -0
- agno/tools/newspaper.py +46 -0
- agno/tools/newspaper4k.py +93 -0
- agno/tools/notion.py +204 -0
- agno/tools/openai.py +202 -0
- agno/tools/openbb.py +160 -0
- agno/tools/opencv.py +321 -0
- agno/tools/openweather.py +233 -0
- agno/tools/oxylabs.py +385 -0
- agno/tools/pandas.py +102 -0
- agno/tools/parallel.py +314 -0
- agno/tools/postgres.py +257 -0
- agno/tools/pubmed.py +188 -0
- agno/tools/python.py +205 -0
- agno/tools/reasoning.py +283 -0
- agno/tools/reddit.py +467 -0
- agno/tools/replicate.py +117 -0
- agno/tools/resend.py +62 -0
- agno/tools/scrapegraph.py +222 -0
- agno/tools/searxng.py +152 -0
- agno/tools/serpapi.py +116 -0
- agno/tools/serper.py +255 -0
- agno/tools/shell.py +53 -0
- agno/tools/slack.py +136 -0
- agno/tools/sleep.py +20 -0
- agno/tools/spider.py +116 -0
- agno/tools/sql.py +154 -0
- agno/tools/streamlit/__init__.py +0 -0
- agno/tools/streamlit/components.py +113 -0
- agno/tools/tavily.py +254 -0
- agno/tools/telegram.py +48 -0
- agno/tools/todoist.py +218 -0
- agno/tools/tool_registry.py +1 -0
- agno/tools/toolkit.py +146 -0
- agno/tools/trafilatura.py +388 -0
- agno/tools/trello.py +274 -0
- agno/tools/twilio.py +186 -0
- agno/tools/user_control_flow.py +78 -0
- agno/tools/valyu.py +228 -0
- agno/tools/visualization.py +467 -0
- agno/tools/webbrowser.py +28 -0
- agno/tools/webex.py +76 -0
- agno/tools/website.py +54 -0
- agno/tools/webtools.py +45 -0
- agno/tools/whatsapp.py +286 -0
- agno/tools/wikipedia.py +63 -0
- agno/tools/workflow.py +278 -0
- agno/tools/x.py +335 -0
- agno/tools/yfinance.py +257 -0
- agno/tools/youtube.py +184 -0
- agno/tools/zendesk.py +82 -0
- agno/tools/zep.py +454 -0
- agno/tools/zoom.py +382 -0
- agno/utils/__init__.py +0 -0
- agno/utils/agent.py +820 -0
- agno/utils/audio.py +49 -0
- agno/utils/certs.py +27 -0
- agno/utils/code_execution.py +11 -0
- agno/utils/common.py +132 -0
- agno/utils/dttm.py +13 -0
- agno/utils/enum.py +22 -0
- agno/utils/env.py +11 -0
- agno/utils/events.py +696 -0
- agno/utils/format_str.py +16 -0
- agno/utils/functions.py +166 -0
- agno/utils/gemini.py +426 -0
- agno/utils/hooks.py +57 -0
- agno/utils/http.py +74 -0
- agno/utils/json_schema.py +234 -0
- agno/utils/knowledge.py +36 -0
- agno/utils/location.py +19 -0
- agno/utils/log.py +255 -0
- agno/utils/mcp.py +214 -0
- agno/utils/media.py +352 -0
- agno/utils/merge_dict.py +41 -0
- agno/utils/message.py +118 -0
- agno/utils/models/__init__.py +0 -0
- agno/utils/models/ai_foundry.py +43 -0
- agno/utils/models/claude.py +358 -0
- agno/utils/models/cohere.py +87 -0
- agno/utils/models/llama.py +78 -0
- agno/utils/models/mistral.py +98 -0
- agno/utils/models/openai_responses.py +140 -0
- agno/utils/models/schema_utils.py +153 -0
- agno/utils/models/watsonx.py +41 -0
- agno/utils/openai.py +257 -0
- agno/utils/pickle.py +32 -0
- agno/utils/pprint.py +178 -0
- agno/utils/print_response/__init__.py +0 -0
- agno/utils/print_response/agent.py +842 -0
- agno/utils/print_response/team.py +1724 -0
- agno/utils/print_response/workflow.py +1668 -0
- agno/utils/prompts.py +111 -0
- agno/utils/reasoning.py +108 -0
- agno/utils/response.py +163 -0
- agno/utils/response_iterator.py +17 -0
- agno/utils/safe_formatter.py +24 -0
- agno/utils/serialize.py +32 -0
- agno/utils/shell.py +22 -0
- agno/utils/streamlit.py +487 -0
- agno/utils/string.py +231 -0
- agno/utils/team.py +139 -0
- agno/utils/timer.py +41 -0
- agno/utils/tools.py +102 -0
- agno/utils/web.py +23 -0
- agno/utils/whatsapp.py +305 -0
- agno/utils/yaml_io.py +25 -0
- agno/vectordb/__init__.py +3 -0
- agno/vectordb/base.py +127 -0
- agno/vectordb/cassandra/__init__.py +5 -0
- agno/vectordb/cassandra/cassandra.py +501 -0
- agno/vectordb/cassandra/extra_param_mixin.py +11 -0
- agno/vectordb/cassandra/index.py +13 -0
- agno/vectordb/chroma/__init__.py +5 -0
- agno/vectordb/chroma/chromadb.py +929 -0
- agno/vectordb/clickhouse/__init__.py +9 -0
- agno/vectordb/clickhouse/clickhousedb.py +835 -0
- agno/vectordb/clickhouse/index.py +9 -0
- agno/vectordb/couchbase/__init__.py +3 -0
- agno/vectordb/couchbase/couchbase.py +1442 -0
- agno/vectordb/distance.py +7 -0
- agno/vectordb/lancedb/__init__.py +6 -0
- agno/vectordb/lancedb/lance_db.py +995 -0
- agno/vectordb/langchaindb/__init__.py +5 -0
- agno/vectordb/langchaindb/langchaindb.py +163 -0
- agno/vectordb/lightrag/__init__.py +5 -0
- agno/vectordb/lightrag/lightrag.py +388 -0
- agno/vectordb/llamaindex/__init__.py +3 -0
- agno/vectordb/llamaindex/llamaindexdb.py +166 -0
- agno/vectordb/milvus/__init__.py +4 -0
- agno/vectordb/milvus/milvus.py +1182 -0
- agno/vectordb/mongodb/__init__.py +9 -0
- agno/vectordb/mongodb/mongodb.py +1417 -0
- agno/vectordb/pgvector/__init__.py +12 -0
- agno/vectordb/pgvector/index.py +23 -0
- agno/vectordb/pgvector/pgvector.py +1462 -0
- agno/vectordb/pineconedb/__init__.py +5 -0
- agno/vectordb/pineconedb/pineconedb.py +747 -0
- agno/vectordb/qdrant/__init__.py +5 -0
- agno/vectordb/qdrant/qdrant.py +1134 -0
- agno/vectordb/redis/__init__.py +9 -0
- agno/vectordb/redis/redisdb.py +694 -0
- agno/vectordb/search.py +7 -0
- agno/vectordb/singlestore/__init__.py +10 -0
- agno/vectordb/singlestore/index.py +41 -0
- agno/vectordb/singlestore/singlestore.py +763 -0
- agno/vectordb/surrealdb/__init__.py +3 -0
- agno/vectordb/surrealdb/surrealdb.py +699 -0
- agno/vectordb/upstashdb/__init__.py +5 -0
- agno/vectordb/upstashdb/upstashdb.py +718 -0
- agno/vectordb/weaviate/__init__.py +8 -0
- agno/vectordb/weaviate/index.py +15 -0
- agno/vectordb/weaviate/weaviate.py +1005 -0
- agno/workflow/__init__.py +23 -0
- agno/workflow/agent.py +299 -0
- agno/workflow/condition.py +738 -0
- agno/workflow/loop.py +735 -0
- agno/workflow/parallel.py +824 -0
- agno/workflow/router.py +702 -0
- agno/workflow/step.py +1432 -0
- agno/workflow/steps.py +592 -0
- agno/workflow/types.py +520 -0
- agno/workflow/workflow.py +4321 -0
- agno-2.2.13.dist-info/METADATA +614 -0
- agno-2.2.13.dist-info/RECORD +575 -0
- agno-2.2.13.dist-info/WHEEL +5 -0
- agno-2.2.13.dist-info/licenses/LICENSE +201 -0
- agno-2.2.13.dist-info/top_level.txt +1 -0
agno/workflow/step.py
ADDED
|
@@ -0,0 +1,1432 @@
|
|
|
1
|
+
import inspect
|
|
2
|
+
from copy import copy
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from typing import Any, AsyncIterator, Awaitable, Callable, Dict, Iterator, List, Optional, Union
|
|
5
|
+
from uuid import uuid4
|
|
6
|
+
|
|
7
|
+
from pydantic import BaseModel
|
|
8
|
+
from typing_extensions import TypeGuard
|
|
9
|
+
|
|
10
|
+
from agno.agent import Agent
|
|
11
|
+
from agno.media import Audio, Image, Video
|
|
12
|
+
from agno.models.metrics import Metrics
|
|
13
|
+
from agno.run import RunContext
|
|
14
|
+
from agno.run.agent import RunContentEvent, RunOutput
|
|
15
|
+
from agno.run.base import BaseRunOutputEvent
|
|
16
|
+
from agno.run.team import RunContentEvent as TeamRunContentEvent
|
|
17
|
+
from agno.run.team import TeamRunOutput
|
|
18
|
+
from agno.run.workflow import (
|
|
19
|
+
StepCompletedEvent,
|
|
20
|
+
StepStartedEvent,
|
|
21
|
+
WorkflowRunOutput,
|
|
22
|
+
WorkflowRunOutputEvent,
|
|
23
|
+
)
|
|
24
|
+
from agno.session.workflow import WorkflowSession
|
|
25
|
+
from agno.team import Team
|
|
26
|
+
from agno.utils.log import log_debug, logger, use_agent_logger, use_team_logger, use_workflow_logger
|
|
27
|
+
from agno.utils.merge_dict import merge_dictionaries
|
|
28
|
+
from agno.workflow.types import StepInput, StepOutput, StepType
|
|
29
|
+
|
|
30
|
+
StepExecutor = Callable[
|
|
31
|
+
[StepInput],
|
|
32
|
+
Union[
|
|
33
|
+
StepOutput,
|
|
34
|
+
Iterator[StepOutput],
|
|
35
|
+
Iterator[Any],
|
|
36
|
+
Awaitable[StepOutput],
|
|
37
|
+
Awaitable[Any],
|
|
38
|
+
AsyncIterator[StepOutput],
|
|
39
|
+
AsyncIterator[Any],
|
|
40
|
+
],
|
|
41
|
+
]
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
@dataclass
|
|
45
|
+
class Step:
|
|
46
|
+
"""A single unit of work in a workflow pipeline"""
|
|
47
|
+
|
|
48
|
+
name: Optional[str] = None
|
|
49
|
+
|
|
50
|
+
# Executor options - only one should be provided
|
|
51
|
+
agent: Optional[Agent] = None
|
|
52
|
+
team: Optional[Team] = None
|
|
53
|
+
executor: Optional[StepExecutor] = None
|
|
54
|
+
|
|
55
|
+
step_id: Optional[str] = None
|
|
56
|
+
description: Optional[str] = None
|
|
57
|
+
|
|
58
|
+
# Step configuration
|
|
59
|
+
max_retries: int = 3
|
|
60
|
+
timeout_seconds: Optional[int] = None
|
|
61
|
+
|
|
62
|
+
skip_on_failure: bool = False
|
|
63
|
+
|
|
64
|
+
# Input validation mode
|
|
65
|
+
# If False, only warn about missing inputs
|
|
66
|
+
strict_input_validation: bool = False
|
|
67
|
+
|
|
68
|
+
add_workflow_history: Optional[bool] = None
|
|
69
|
+
num_history_runs: int = 3
|
|
70
|
+
|
|
71
|
+
_retry_count: int = 0
|
|
72
|
+
|
|
73
|
+
def __init__(
|
|
74
|
+
self,
|
|
75
|
+
name: Optional[str] = None,
|
|
76
|
+
agent: Optional[Agent] = None,
|
|
77
|
+
team: Optional[Team] = None,
|
|
78
|
+
executor: Optional[StepExecutor] = None,
|
|
79
|
+
step_id: Optional[str] = None,
|
|
80
|
+
description: Optional[str] = None,
|
|
81
|
+
max_retries: int = 3,
|
|
82
|
+
timeout_seconds: Optional[int] = None,
|
|
83
|
+
skip_on_failure: bool = False,
|
|
84
|
+
strict_input_validation: bool = False,
|
|
85
|
+
add_workflow_history: Optional[bool] = None,
|
|
86
|
+
num_history_runs: int = 3,
|
|
87
|
+
):
|
|
88
|
+
# Auto-detect name for function executors if not provided
|
|
89
|
+
if name is None and executor is not None:
|
|
90
|
+
name = getattr(executor, "__name__", None)
|
|
91
|
+
|
|
92
|
+
self.name = name
|
|
93
|
+
self.agent = agent
|
|
94
|
+
self.team = team
|
|
95
|
+
self.executor = executor
|
|
96
|
+
|
|
97
|
+
# Validate executor configuration
|
|
98
|
+
self._validate_executor_config()
|
|
99
|
+
|
|
100
|
+
self.step_id = step_id
|
|
101
|
+
self.description = description
|
|
102
|
+
self.max_retries = max_retries
|
|
103
|
+
self.timeout_seconds = timeout_seconds
|
|
104
|
+
self.skip_on_failure = skip_on_failure
|
|
105
|
+
self.strict_input_validation = strict_input_validation
|
|
106
|
+
self.add_workflow_history = add_workflow_history
|
|
107
|
+
self.num_history_runs = num_history_runs
|
|
108
|
+
self.step_id = step_id
|
|
109
|
+
|
|
110
|
+
if step_id is None:
|
|
111
|
+
self.step_id = str(uuid4())
|
|
112
|
+
|
|
113
|
+
# Set the active executor
|
|
114
|
+
self._set_active_executor()
|
|
115
|
+
|
|
116
|
+
@property
|
|
117
|
+
def executor_name(self) -> str:
|
|
118
|
+
"""Get the name of the current executor"""
|
|
119
|
+
if hasattr(self.active_executor, "name"):
|
|
120
|
+
return self.active_executor.name or "unnamed_executor"
|
|
121
|
+
elif self._executor_type == "function":
|
|
122
|
+
return getattr(self.active_executor, "__name__", "anonymous_function")
|
|
123
|
+
else:
|
|
124
|
+
return f"{self._executor_type}_executor"
|
|
125
|
+
|
|
126
|
+
@property
|
|
127
|
+
def executor_type(self) -> str:
|
|
128
|
+
"""Get the type of the current executor"""
|
|
129
|
+
return self._executor_type
|
|
130
|
+
|
|
131
|
+
def _validate_executor_config(self):
|
|
132
|
+
"""Validate that only one executor type is provided"""
|
|
133
|
+
executor_count = sum(
|
|
134
|
+
[
|
|
135
|
+
self.agent is not None,
|
|
136
|
+
self.team is not None,
|
|
137
|
+
self.executor is not None,
|
|
138
|
+
]
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
if executor_count == 0:
|
|
142
|
+
raise ValueError(f"Step '{self.name}' must have one executor: agent=, team=, or executor=")
|
|
143
|
+
|
|
144
|
+
if executor_count > 1:
|
|
145
|
+
provided_executors = []
|
|
146
|
+
if self.agent is not None:
|
|
147
|
+
provided_executors.append("agent")
|
|
148
|
+
if self.team is not None:
|
|
149
|
+
provided_executors.append("team")
|
|
150
|
+
if self.executor is not None:
|
|
151
|
+
provided_executors.append("executor")
|
|
152
|
+
|
|
153
|
+
raise ValueError(
|
|
154
|
+
f"Step '{self.name}' can only have one executor type. "
|
|
155
|
+
f"Provided: {', '.join(provided_executors)}. "
|
|
156
|
+
f"Please use only one of: agent=, team=, or executor="
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
def _set_active_executor(self) -> None:
|
|
160
|
+
"""Set the active executor based on what was provided"""
|
|
161
|
+
if self.agent is not None:
|
|
162
|
+
self.active_executor = self.agent # type: ignore[assignment]
|
|
163
|
+
self._executor_type = "agent"
|
|
164
|
+
elif self.team is not None:
|
|
165
|
+
self.active_executor = self.team # type: ignore[assignment]
|
|
166
|
+
self._executor_type = "team"
|
|
167
|
+
elif self.executor is not None:
|
|
168
|
+
self.active_executor = self.executor # type: ignore[assignment]
|
|
169
|
+
self._executor_type = "function"
|
|
170
|
+
else:
|
|
171
|
+
raise ValueError("No executor configured")
|
|
172
|
+
|
|
173
|
+
def _extract_metrics_from_response(self, response: Union[RunOutput, TeamRunOutput]) -> Optional[Metrics]:
|
|
174
|
+
"""Extract metrics from agent or team response"""
|
|
175
|
+
if hasattr(response, "metrics") and response.metrics:
|
|
176
|
+
return response.metrics
|
|
177
|
+
return None
|
|
178
|
+
|
|
179
|
+
def _call_custom_function(
|
|
180
|
+
self,
|
|
181
|
+
func: Callable,
|
|
182
|
+
step_input: StepInput,
|
|
183
|
+
session_state: Optional[Dict[str, Any]] = None,
|
|
184
|
+
run_context: Optional[RunContext] = None,
|
|
185
|
+
) -> Any:
|
|
186
|
+
"""Call custom function with session_state support if the function accepts it"""
|
|
187
|
+
|
|
188
|
+
kwargs: Dict[str, Any] = {}
|
|
189
|
+
if run_context is not None and self._function_has_run_context_param():
|
|
190
|
+
kwargs["run_context"] = run_context
|
|
191
|
+
if session_state is not None and self._function_has_session_state_param():
|
|
192
|
+
kwargs["session_state"] = session_state
|
|
193
|
+
|
|
194
|
+
return func(step_input, **kwargs)
|
|
195
|
+
|
|
196
|
+
async def _acall_custom_function(
|
|
197
|
+
self,
|
|
198
|
+
func: Callable,
|
|
199
|
+
step_input: StepInput,
|
|
200
|
+
session_state: Optional[Dict[str, Any]] = None,
|
|
201
|
+
run_context: Optional[RunContext] = None,
|
|
202
|
+
) -> Any:
|
|
203
|
+
"""Call custom async function with session_state support if the function accepts it"""
|
|
204
|
+
|
|
205
|
+
kwargs: Dict[str, Any] = {}
|
|
206
|
+
if run_context is not None and self._function_has_run_context_param():
|
|
207
|
+
kwargs["run_context"] = run_context
|
|
208
|
+
if session_state is not None and self._function_has_session_state_param():
|
|
209
|
+
kwargs["session_state"] = session_state
|
|
210
|
+
|
|
211
|
+
if _is_async_generator_function(func):
|
|
212
|
+
return func(step_input, **kwargs)
|
|
213
|
+
else:
|
|
214
|
+
return await func(step_input, **kwargs)
|
|
215
|
+
|
|
216
|
+
def execute(
|
|
217
|
+
self,
|
|
218
|
+
step_input: StepInput,
|
|
219
|
+
session_id: Optional[str] = None,
|
|
220
|
+
user_id: Optional[str] = None,
|
|
221
|
+
workflow_run_response: Optional["WorkflowRunOutput"] = None,
|
|
222
|
+
run_context: Optional[RunContext] = None,
|
|
223
|
+
session_state: Optional[Dict[str, Any]] = None,
|
|
224
|
+
store_executor_outputs: bool = True,
|
|
225
|
+
workflow_session: Optional[WorkflowSession] = None,
|
|
226
|
+
add_workflow_history_to_steps: Optional[bool] = False,
|
|
227
|
+
num_history_runs: int = 3,
|
|
228
|
+
) -> StepOutput:
|
|
229
|
+
"""Execute the step with StepInput, returning final StepOutput (non-streaming)"""
|
|
230
|
+
log_debug(f"Executing step: {self.name}")
|
|
231
|
+
|
|
232
|
+
if step_input.previous_step_outputs:
|
|
233
|
+
step_input.previous_step_content = step_input.get_last_step_content()
|
|
234
|
+
|
|
235
|
+
if workflow_session:
|
|
236
|
+
step_input.workflow_session = workflow_session
|
|
237
|
+
|
|
238
|
+
# Create session_state copy once to avoid duplication.
|
|
239
|
+
# Consider both run_context.session_state and session_state.
|
|
240
|
+
if run_context is not None and run_context.session_state is not None:
|
|
241
|
+
session_state_copy = run_context.session_state
|
|
242
|
+
else:
|
|
243
|
+
session_state_copy = copy(session_state) if session_state is not None else {}
|
|
244
|
+
|
|
245
|
+
# Execute with retries
|
|
246
|
+
for attempt in range(self.max_retries + 1):
|
|
247
|
+
try:
|
|
248
|
+
response: Union[RunOutput, TeamRunOutput, StepOutput]
|
|
249
|
+
if self._executor_type == "function":
|
|
250
|
+
if _is_async_callable(self.active_executor) or _is_async_generator_function(self.active_executor):
|
|
251
|
+
raise ValueError("Cannot use async function with synchronous execution")
|
|
252
|
+
if _is_generator_function(self.active_executor):
|
|
253
|
+
content = ""
|
|
254
|
+
final_response = None
|
|
255
|
+
try:
|
|
256
|
+
for chunk in self._call_custom_function(
|
|
257
|
+
self.active_executor,
|
|
258
|
+
step_input,
|
|
259
|
+
session_state_copy, # type: ignore[arg-type]
|
|
260
|
+
run_context,
|
|
261
|
+
): # type: ignore
|
|
262
|
+
if isinstance(chunk, (BaseRunOutputEvent)):
|
|
263
|
+
if (
|
|
264
|
+
isinstance(chunk, (RunContentEvent, TeamRunContentEvent))
|
|
265
|
+
and chunk.content is not None
|
|
266
|
+
):
|
|
267
|
+
# Its a regular chunk of content
|
|
268
|
+
if isinstance(chunk.content, str):
|
|
269
|
+
content += chunk.content
|
|
270
|
+
# Its the BaseModel object, set it as the content. Replace any previous content.
|
|
271
|
+
# There should be no previous str content at this point
|
|
272
|
+
elif isinstance(chunk.content, BaseModel):
|
|
273
|
+
content = chunk.content # type: ignore[assignment]
|
|
274
|
+
else:
|
|
275
|
+
# Safeguard but should never happen
|
|
276
|
+
content += str(chunk.content)
|
|
277
|
+
elif isinstance(chunk, (RunOutput, TeamRunOutput)):
|
|
278
|
+
# This is the final response from the agent/team
|
|
279
|
+
content = chunk.content # type: ignore[assignment]
|
|
280
|
+
else:
|
|
281
|
+
# Non Agent/Team data structure that was yielded
|
|
282
|
+
content += str(chunk)
|
|
283
|
+
# If the chunk is a StepOutput, use it as the final response
|
|
284
|
+
if isinstance(chunk, StepOutput):
|
|
285
|
+
final_response = chunk
|
|
286
|
+
|
|
287
|
+
except StopIteration as e:
|
|
288
|
+
if hasattr(e, "value") and isinstance(e.value, StepOutput):
|
|
289
|
+
final_response = e.value
|
|
290
|
+
|
|
291
|
+
# Merge session_state changes back
|
|
292
|
+
if run_context is None and session_state is not None:
|
|
293
|
+
merge_dictionaries(session_state, session_state_copy)
|
|
294
|
+
|
|
295
|
+
if final_response is not None:
|
|
296
|
+
response = final_response
|
|
297
|
+
else:
|
|
298
|
+
response = StepOutput(content=content)
|
|
299
|
+
else:
|
|
300
|
+
# Execute function with signature inspection for session_state support
|
|
301
|
+
result = self._call_custom_function(
|
|
302
|
+
self.active_executor, # type: ignore[arg-type]
|
|
303
|
+
step_input,
|
|
304
|
+
session_state_copy,
|
|
305
|
+
run_context,
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
# Merge session_state changes back
|
|
309
|
+
if run_context is None and session_state is not None:
|
|
310
|
+
merge_dictionaries(session_state, session_state_copy)
|
|
311
|
+
|
|
312
|
+
# If function returns StepOutput, use it directly
|
|
313
|
+
if isinstance(result, StepOutput):
|
|
314
|
+
response = result
|
|
315
|
+
elif isinstance(result, (RunOutput, TeamRunOutput)):
|
|
316
|
+
response = StepOutput(content=result.content)
|
|
317
|
+
else:
|
|
318
|
+
response = StepOutput(content=str(result))
|
|
319
|
+
else:
|
|
320
|
+
# For agents and teams, prepare message with context
|
|
321
|
+
message = self._prepare_message(
|
|
322
|
+
step_input.input,
|
|
323
|
+
step_input.previous_step_outputs,
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
# Execute agent or team with media
|
|
327
|
+
if self._executor_type in ["agent", "team"]:
|
|
328
|
+
# Switch to appropriate logger based on executor type
|
|
329
|
+
if self._executor_type == "agent":
|
|
330
|
+
use_agent_logger()
|
|
331
|
+
elif self._executor_type == "team":
|
|
332
|
+
use_team_logger()
|
|
333
|
+
|
|
334
|
+
images = (
|
|
335
|
+
self._convert_image_artifacts_to_images(step_input.images) if step_input.images else None
|
|
336
|
+
)
|
|
337
|
+
videos = (
|
|
338
|
+
self._convert_video_artifacts_to_videos(step_input.videos) if step_input.videos else None
|
|
339
|
+
)
|
|
340
|
+
audios = self._convert_audio_artifacts_to_audio(step_input.audio) if step_input.audio else None
|
|
341
|
+
|
|
342
|
+
kwargs: Dict[str, Any] = {}
|
|
343
|
+
if isinstance(self.active_executor, Team):
|
|
344
|
+
kwargs["store_member_responses"] = True
|
|
345
|
+
|
|
346
|
+
num_history_runs = self.num_history_runs if self.num_history_runs else num_history_runs
|
|
347
|
+
|
|
348
|
+
use_history = (
|
|
349
|
+
self.add_workflow_history
|
|
350
|
+
if self.add_workflow_history is not None
|
|
351
|
+
else add_workflow_history_to_steps
|
|
352
|
+
)
|
|
353
|
+
|
|
354
|
+
final_message = message
|
|
355
|
+
if use_history and workflow_session:
|
|
356
|
+
history_messages = workflow_session.get_workflow_history_context(num_runs=num_history_runs)
|
|
357
|
+
if history_messages:
|
|
358
|
+
final_message = f"{history_messages}{message}"
|
|
359
|
+
|
|
360
|
+
response = self.active_executor.run( # type: ignore
|
|
361
|
+
input=final_message, # type: ignore
|
|
362
|
+
images=images,
|
|
363
|
+
videos=videos,
|
|
364
|
+
audio=audios,
|
|
365
|
+
files=step_input.files,
|
|
366
|
+
session_id=session_id,
|
|
367
|
+
user_id=user_id,
|
|
368
|
+
session_state=session_state_copy, # Send a copy to the executor
|
|
369
|
+
run_context=run_context,
|
|
370
|
+
**kwargs,
|
|
371
|
+
)
|
|
372
|
+
|
|
373
|
+
# Update workflow session state
|
|
374
|
+
if run_context is None and session_state is not None:
|
|
375
|
+
merge_dictionaries(session_state, session_state_copy)
|
|
376
|
+
|
|
377
|
+
if store_executor_outputs and workflow_run_response is not None:
|
|
378
|
+
self._store_executor_response(workflow_run_response, response) # type: ignore
|
|
379
|
+
|
|
380
|
+
# Switch back to workflow logger after execution
|
|
381
|
+
use_workflow_logger()
|
|
382
|
+
else:
|
|
383
|
+
raise ValueError(f"Unsupported executor type: {self._executor_type}")
|
|
384
|
+
|
|
385
|
+
# Create StepOutput from response
|
|
386
|
+
step_output = self._process_step_output(response) # type: ignore
|
|
387
|
+
|
|
388
|
+
return step_output
|
|
389
|
+
|
|
390
|
+
except Exception as e:
|
|
391
|
+
self.retry_count = attempt + 1
|
|
392
|
+
logger.warning(f"Step {self.name} failed (attempt {attempt + 1}): {e}")
|
|
393
|
+
|
|
394
|
+
if attempt == self.max_retries:
|
|
395
|
+
if self.skip_on_failure:
|
|
396
|
+
log_debug(f"Step {self.name} failed but continuing due to skip_on_failure=True")
|
|
397
|
+
# Create empty StepOutput for skipped step
|
|
398
|
+
return StepOutput(content=f"Step {self.name} failed but skipped", success=False, error=str(e))
|
|
399
|
+
else:
|
|
400
|
+
raise e
|
|
401
|
+
|
|
402
|
+
return StepOutput(content=f"Step {self.name} failed but skipped", success=False)
|
|
403
|
+
|
|
404
|
+
def _function_has_run_context_param(self) -> bool:
|
|
405
|
+
"""Check if the custom function has a run_context parameter"""
|
|
406
|
+
if self._executor_type != "function":
|
|
407
|
+
return False
|
|
408
|
+
|
|
409
|
+
try:
|
|
410
|
+
sig = inspect.signature(self.active_executor) # type: ignore
|
|
411
|
+
return "run_context" in sig.parameters
|
|
412
|
+
except Exception:
|
|
413
|
+
return False
|
|
414
|
+
|
|
415
|
+
def _function_has_session_state_param(self) -> bool:
|
|
416
|
+
"""Check if the custom function has a session_state parameter"""
|
|
417
|
+
if self._executor_type != "function":
|
|
418
|
+
return False
|
|
419
|
+
|
|
420
|
+
try:
|
|
421
|
+
sig = inspect.signature(self.active_executor) # type: ignore
|
|
422
|
+
return "session_state" in sig.parameters
|
|
423
|
+
except Exception:
|
|
424
|
+
return False
|
|
425
|
+
|
|
426
|
+
def _enrich_event_with_context(
|
|
427
|
+
self,
|
|
428
|
+
event: Any,
|
|
429
|
+
workflow_run_response: Optional["WorkflowRunOutput"] = None,
|
|
430
|
+
step_index: Optional[Union[int, tuple]] = None,
|
|
431
|
+
) -> Any:
|
|
432
|
+
"""Enrich event with step and workflow context information"""
|
|
433
|
+
if workflow_run_response is None:
|
|
434
|
+
return event
|
|
435
|
+
if hasattr(event, "workflow_id"):
|
|
436
|
+
event.workflow_id = workflow_run_response.workflow_id
|
|
437
|
+
if hasattr(event, "workflow_run_id"):
|
|
438
|
+
event.workflow_run_id = workflow_run_response.run_id
|
|
439
|
+
if hasattr(event, "step_id"):
|
|
440
|
+
event.step_id = self.step_id
|
|
441
|
+
if hasattr(event, "step_name") and self.name is not None:
|
|
442
|
+
if getattr(event, "step_name", None) is None:
|
|
443
|
+
event.step_name = self.name
|
|
444
|
+
# Only set step_index if it's not already set (preserve parallel.py's tuples)
|
|
445
|
+
if hasattr(event, "step_index") and step_index is not None:
|
|
446
|
+
if event.step_index is None:
|
|
447
|
+
event.step_index = step_index
|
|
448
|
+
|
|
449
|
+
return event
|
|
450
|
+
|
|
451
|
+
def execute_stream(
|
|
452
|
+
self,
|
|
453
|
+
step_input: StepInput,
|
|
454
|
+
session_id: Optional[str] = None,
|
|
455
|
+
user_id: Optional[str] = None,
|
|
456
|
+
stream_events: bool = False,
|
|
457
|
+
stream_intermediate_steps: bool = False,
|
|
458
|
+
stream_executor_events: bool = True,
|
|
459
|
+
workflow_run_response: Optional["WorkflowRunOutput"] = None,
|
|
460
|
+
run_context: Optional[RunContext] = None,
|
|
461
|
+
session_state: Optional[Dict[str, Any]] = None,
|
|
462
|
+
step_index: Optional[Union[int, tuple]] = None,
|
|
463
|
+
store_executor_outputs: bool = True,
|
|
464
|
+
parent_step_id: Optional[str] = None,
|
|
465
|
+
workflow_session: Optional["WorkflowSession"] = None,
|
|
466
|
+
add_workflow_history_to_steps: Optional[bool] = False,
|
|
467
|
+
num_history_runs: int = 3,
|
|
468
|
+
) -> Iterator[Union[WorkflowRunOutputEvent, StepOutput]]:
|
|
469
|
+
"""Execute the step with event-driven streaming support"""
|
|
470
|
+
|
|
471
|
+
if step_input.previous_step_outputs:
|
|
472
|
+
step_input.previous_step_content = step_input.get_last_step_content()
|
|
473
|
+
|
|
474
|
+
if workflow_session:
|
|
475
|
+
step_input.workflow_session = workflow_session
|
|
476
|
+
|
|
477
|
+
# Create session_state copy once to avoid duplication.
|
|
478
|
+
# Consider both run_context.session_state and session_state.
|
|
479
|
+
if run_context is not None and run_context.session_state is not None:
|
|
480
|
+
session_state_copy = run_context.session_state
|
|
481
|
+
else:
|
|
482
|
+
session_state_copy = copy(session_state) if session_state is not None else {}
|
|
483
|
+
|
|
484
|
+
# Considering both stream_events and stream_intermediate_steps (deprecated)
|
|
485
|
+
stream_events = stream_events or stream_intermediate_steps
|
|
486
|
+
|
|
487
|
+
# Emit StepStartedEvent
|
|
488
|
+
if stream_events and workflow_run_response:
|
|
489
|
+
yield StepStartedEvent(
|
|
490
|
+
run_id=workflow_run_response.run_id or "",
|
|
491
|
+
workflow_name=workflow_run_response.workflow_name or "",
|
|
492
|
+
workflow_id=workflow_run_response.workflow_id or "",
|
|
493
|
+
session_id=workflow_run_response.session_id or "",
|
|
494
|
+
step_name=self.name,
|
|
495
|
+
step_index=step_index,
|
|
496
|
+
step_id=self.step_id,
|
|
497
|
+
parent_step_id=parent_step_id,
|
|
498
|
+
)
|
|
499
|
+
|
|
500
|
+
# Execute with retries and streaming
|
|
501
|
+
for attempt in range(self.max_retries + 1):
|
|
502
|
+
try:
|
|
503
|
+
log_debug(f"Step {self.name} streaming attempt {attempt + 1}/{self.max_retries + 1}")
|
|
504
|
+
final_response = None
|
|
505
|
+
|
|
506
|
+
if self._executor_type == "function":
|
|
507
|
+
log_debug(f"Executing function executor for step: {self.name}")
|
|
508
|
+
if _is_async_callable(self.active_executor) or _is_async_generator_function(self.active_executor):
|
|
509
|
+
raise ValueError("Cannot use async function with synchronous execution")
|
|
510
|
+
if _is_generator_function(self.active_executor):
|
|
511
|
+
log_debug("Function returned iterable, streaming events")
|
|
512
|
+
content = ""
|
|
513
|
+
try:
|
|
514
|
+
iterator = self._call_custom_function(
|
|
515
|
+
self.active_executor,
|
|
516
|
+
step_input,
|
|
517
|
+
session_state_copy,
|
|
518
|
+
run_context,
|
|
519
|
+
)
|
|
520
|
+
for event in iterator: # type: ignore
|
|
521
|
+
if isinstance(event, (BaseRunOutputEvent)):
|
|
522
|
+
if (
|
|
523
|
+
isinstance(event, (RunContentEvent, TeamRunContentEvent))
|
|
524
|
+
and event.content is not None
|
|
525
|
+
):
|
|
526
|
+
if isinstance(event.content, str):
|
|
527
|
+
content += event.content
|
|
528
|
+
elif isinstance(event.content, BaseModel):
|
|
529
|
+
content = event.content # type: ignore[assignment]
|
|
530
|
+
else:
|
|
531
|
+
content = str(event.content)
|
|
532
|
+
# Only yield executor events if stream_executor_events is True
|
|
533
|
+
if stream_executor_events:
|
|
534
|
+
enriched_event = self._enrich_event_with_context(
|
|
535
|
+
event, workflow_run_response, step_index
|
|
536
|
+
)
|
|
537
|
+
yield enriched_event # type: ignore[misc]
|
|
538
|
+
elif isinstance(event, (RunOutput, TeamRunOutput)):
|
|
539
|
+
content = event.content # type: ignore[assignment]
|
|
540
|
+
else:
|
|
541
|
+
content += str(event)
|
|
542
|
+
if isinstance(event, StepOutput):
|
|
543
|
+
final_response = event
|
|
544
|
+
break
|
|
545
|
+
|
|
546
|
+
# Merge session_state changes back
|
|
547
|
+
if run_context is None and session_state is not None:
|
|
548
|
+
merge_dictionaries(session_state, session_state_copy)
|
|
549
|
+
|
|
550
|
+
if not final_response:
|
|
551
|
+
final_response = StepOutput(content=content)
|
|
552
|
+
except StopIteration as e:
|
|
553
|
+
if hasattr(e, "value") and isinstance(e.value, StepOutput):
|
|
554
|
+
final_response = e.value
|
|
555
|
+
|
|
556
|
+
else:
|
|
557
|
+
result = self._call_custom_function(
|
|
558
|
+
self.active_executor, # type: ignore[arg-type]
|
|
559
|
+
step_input,
|
|
560
|
+
session_state_copy,
|
|
561
|
+
run_context,
|
|
562
|
+
)
|
|
563
|
+
|
|
564
|
+
# Merge session_state changes back
|
|
565
|
+
if run_context is None and session_state is not None:
|
|
566
|
+
merge_dictionaries(session_state, session_state_copy)
|
|
567
|
+
|
|
568
|
+
if isinstance(result, StepOutput):
|
|
569
|
+
final_response = result
|
|
570
|
+
elif isinstance(result, (RunOutput, TeamRunOutput)):
|
|
571
|
+
final_response = StepOutput(content=result.content)
|
|
572
|
+
else:
|
|
573
|
+
final_response = StepOutput(content=str(result))
|
|
574
|
+
log_debug("Function returned non-iterable, created StepOutput")
|
|
575
|
+
else:
|
|
576
|
+
# For agents and teams, prepare message with context
|
|
577
|
+
message = self._prepare_message(
|
|
578
|
+
step_input.input,
|
|
579
|
+
step_input.previous_step_outputs,
|
|
580
|
+
)
|
|
581
|
+
|
|
582
|
+
if self._executor_type in ["agent", "team"]:
|
|
583
|
+
# Switch to appropriate logger based on executor type
|
|
584
|
+
if self._executor_type == "agent":
|
|
585
|
+
use_agent_logger()
|
|
586
|
+
elif self._executor_type == "team":
|
|
587
|
+
use_team_logger()
|
|
588
|
+
|
|
589
|
+
images = (
|
|
590
|
+
self._convert_image_artifacts_to_images(step_input.images) if step_input.images else None
|
|
591
|
+
)
|
|
592
|
+
videos = (
|
|
593
|
+
self._convert_video_artifacts_to_videos(step_input.videos) if step_input.videos else None
|
|
594
|
+
)
|
|
595
|
+
audios = self._convert_audio_artifacts_to_audio(step_input.audio) if step_input.audio else None
|
|
596
|
+
|
|
597
|
+
kwargs: Dict[str, Any] = {}
|
|
598
|
+
if isinstance(self.active_executor, Team):
|
|
599
|
+
kwargs["store_member_responses"] = True
|
|
600
|
+
|
|
601
|
+
num_history_runs = self.num_history_runs if self.num_history_runs else num_history_runs
|
|
602
|
+
|
|
603
|
+
use_history = (
|
|
604
|
+
self.add_workflow_history
|
|
605
|
+
if self.add_workflow_history is not None
|
|
606
|
+
else add_workflow_history_to_steps
|
|
607
|
+
)
|
|
608
|
+
|
|
609
|
+
final_message = message
|
|
610
|
+
if use_history and workflow_session:
|
|
611
|
+
history_messages = workflow_session.get_workflow_history_context(num_runs=num_history_runs)
|
|
612
|
+
if history_messages:
|
|
613
|
+
final_message = f"{history_messages}{message}"
|
|
614
|
+
|
|
615
|
+
response_stream = self.active_executor.run( # type: ignore[call-overload, misc]
|
|
616
|
+
input=final_message,
|
|
617
|
+
images=images,
|
|
618
|
+
videos=videos,
|
|
619
|
+
audio=audios,
|
|
620
|
+
files=step_input.files,
|
|
621
|
+
session_id=session_id,
|
|
622
|
+
user_id=user_id,
|
|
623
|
+
session_state=session_state_copy, # Send a copy to the executor
|
|
624
|
+
stream=True,
|
|
625
|
+
stream_events=stream_events,
|
|
626
|
+
yield_run_response=True,
|
|
627
|
+
run_context=run_context,
|
|
628
|
+
**kwargs,
|
|
629
|
+
)
|
|
630
|
+
|
|
631
|
+
active_executor_run_response = None
|
|
632
|
+
for event in response_stream:
|
|
633
|
+
if isinstance(event, RunOutput) or isinstance(event, TeamRunOutput):
|
|
634
|
+
active_executor_run_response = event
|
|
635
|
+
break
|
|
636
|
+
# Only yield executor events if stream_executor_events is True
|
|
637
|
+
if stream_executor_events:
|
|
638
|
+
enriched_event = self._enrich_event_with_context(
|
|
639
|
+
event, workflow_run_response, step_index
|
|
640
|
+
)
|
|
641
|
+
yield enriched_event # type: ignore[misc]
|
|
642
|
+
|
|
643
|
+
# Update workflow session state
|
|
644
|
+
if run_context is None and session_state is not None:
|
|
645
|
+
merge_dictionaries(session_state, session_state_copy)
|
|
646
|
+
|
|
647
|
+
if store_executor_outputs and workflow_run_response is not None:
|
|
648
|
+
self._store_executor_response(workflow_run_response, active_executor_run_response) # type: ignore
|
|
649
|
+
|
|
650
|
+
final_response = active_executor_run_response # type: ignore
|
|
651
|
+
|
|
652
|
+
else:
|
|
653
|
+
raise ValueError(f"Unsupported executor type: {self._executor_type}")
|
|
654
|
+
|
|
655
|
+
# If we didn't get a final response, create one
|
|
656
|
+
if final_response is None:
|
|
657
|
+
final_response = StepOutput(content="")
|
|
658
|
+
log_debug("Created empty StepOutput as fallback")
|
|
659
|
+
|
|
660
|
+
# Switch back to workflow logger after execution
|
|
661
|
+
use_workflow_logger()
|
|
662
|
+
|
|
663
|
+
# Yield the step output
|
|
664
|
+
final_response = self._process_step_output(final_response)
|
|
665
|
+
yield final_response
|
|
666
|
+
|
|
667
|
+
# Emit StepCompletedEvent
|
|
668
|
+
if stream_events and workflow_run_response:
|
|
669
|
+
yield StepCompletedEvent(
|
|
670
|
+
run_id=workflow_run_response.run_id or "",
|
|
671
|
+
workflow_name=workflow_run_response.workflow_name or "",
|
|
672
|
+
workflow_id=workflow_run_response.workflow_id or "",
|
|
673
|
+
session_id=workflow_run_response.session_id or "",
|
|
674
|
+
step_name=self.name,
|
|
675
|
+
step_index=step_index,
|
|
676
|
+
content=final_response.content,
|
|
677
|
+
step_response=final_response,
|
|
678
|
+
parent_step_id=parent_step_id,
|
|
679
|
+
)
|
|
680
|
+
|
|
681
|
+
return
|
|
682
|
+
except Exception as e:
|
|
683
|
+
self.retry_count = attempt + 1
|
|
684
|
+
logger.warning(f"Step {self.name} failed (attempt {attempt + 1}): {e}")
|
|
685
|
+
|
|
686
|
+
if attempt == self.max_retries:
|
|
687
|
+
if self.skip_on_failure:
|
|
688
|
+
log_debug(f"Step {self.name} failed but continuing due to skip_on_failure=True")
|
|
689
|
+
# Create empty StepOutput for skipped step
|
|
690
|
+
step_output = StepOutput(
|
|
691
|
+
content=f"Step {self.name} failed but skipped", success=False, error=str(e)
|
|
692
|
+
)
|
|
693
|
+
yield step_output
|
|
694
|
+
return
|
|
695
|
+
else:
|
|
696
|
+
raise e
|
|
697
|
+
|
|
698
|
+
return
|
|
699
|
+
|
|
700
|
+
async def aexecute(
|
|
701
|
+
self,
|
|
702
|
+
step_input: StepInput,
|
|
703
|
+
session_id: Optional[str] = None,
|
|
704
|
+
user_id: Optional[str] = None,
|
|
705
|
+
workflow_run_response: Optional["WorkflowRunOutput"] = None,
|
|
706
|
+
run_context: Optional[RunContext] = None,
|
|
707
|
+
session_state: Optional[Dict[str, Any]] = None,
|
|
708
|
+
store_executor_outputs: bool = True,
|
|
709
|
+
workflow_session: Optional["WorkflowSession"] = None,
|
|
710
|
+
add_workflow_history_to_steps: Optional[bool] = False,
|
|
711
|
+
num_history_runs: int = 3,
|
|
712
|
+
) -> StepOutput:
|
|
713
|
+
"""Execute the step with StepInput, returning final StepOutput (non-streaming)"""
|
|
714
|
+
logger.info(f"Executing async step (non-streaming): {self.name}")
|
|
715
|
+
log_debug(f"Executor type: {self._executor_type}")
|
|
716
|
+
|
|
717
|
+
if step_input.previous_step_outputs:
|
|
718
|
+
step_input.previous_step_content = step_input.get_last_step_content()
|
|
719
|
+
|
|
720
|
+
if workflow_session:
|
|
721
|
+
step_input.workflow_session = workflow_session
|
|
722
|
+
|
|
723
|
+
# Create session_state copy once to avoid duplication.
|
|
724
|
+
# Consider both run_context.session_state and session_state.
|
|
725
|
+
if run_context is not None and run_context.session_state is not None:
|
|
726
|
+
session_state_copy = run_context.session_state
|
|
727
|
+
else:
|
|
728
|
+
session_state_copy = copy(session_state) if session_state is not None else {}
|
|
729
|
+
|
|
730
|
+
# Execute with retries
|
|
731
|
+
for attempt in range(self.max_retries + 1):
|
|
732
|
+
try:
|
|
733
|
+
if self._executor_type == "function":
|
|
734
|
+
if _is_generator_function(self.active_executor) or _is_async_generator_function(
|
|
735
|
+
self.active_executor
|
|
736
|
+
):
|
|
737
|
+
content = ""
|
|
738
|
+
final_response = None
|
|
739
|
+
try:
|
|
740
|
+
if _is_generator_function(self.active_executor):
|
|
741
|
+
iterator = self._call_custom_function(
|
|
742
|
+
self.active_executor,
|
|
743
|
+
step_input,
|
|
744
|
+
session_state_copy,
|
|
745
|
+
run_context,
|
|
746
|
+
)
|
|
747
|
+
for chunk in iterator: # type: ignore
|
|
748
|
+
if isinstance(chunk, (BaseRunOutputEvent)):
|
|
749
|
+
if (
|
|
750
|
+
isinstance(chunk, (RunContentEvent, TeamRunContentEvent))
|
|
751
|
+
and chunk.content is not None
|
|
752
|
+
):
|
|
753
|
+
if isinstance(chunk.content, str):
|
|
754
|
+
content += chunk.content
|
|
755
|
+
elif isinstance(chunk.content, BaseModel):
|
|
756
|
+
content = chunk.content # type: ignore[assignment]
|
|
757
|
+
else:
|
|
758
|
+
content = str(chunk.content)
|
|
759
|
+
elif isinstance(chunk, (RunOutput, TeamRunOutput)):
|
|
760
|
+
content = chunk.content # type: ignore[assignment]
|
|
761
|
+
else:
|
|
762
|
+
content += str(chunk)
|
|
763
|
+
if isinstance(chunk, StepOutput):
|
|
764
|
+
final_response = chunk
|
|
765
|
+
else:
|
|
766
|
+
if _is_async_generator_function(self.active_executor):
|
|
767
|
+
iterator = await self._acall_custom_function(
|
|
768
|
+
self.active_executor,
|
|
769
|
+
step_input,
|
|
770
|
+
session_state_copy,
|
|
771
|
+
run_context,
|
|
772
|
+
)
|
|
773
|
+
async for chunk in iterator: # type: ignore
|
|
774
|
+
if isinstance(chunk, (BaseRunOutputEvent)):
|
|
775
|
+
if (
|
|
776
|
+
isinstance(chunk, (RunContentEvent, TeamRunContentEvent))
|
|
777
|
+
and chunk.content is not None
|
|
778
|
+
):
|
|
779
|
+
if isinstance(chunk.content, str):
|
|
780
|
+
content += chunk.content
|
|
781
|
+
elif isinstance(chunk.content, BaseModel):
|
|
782
|
+
content = chunk.content # type: ignore[assignment]
|
|
783
|
+
else:
|
|
784
|
+
content = str(chunk.content)
|
|
785
|
+
elif isinstance(chunk, (RunOutput, TeamRunOutput)):
|
|
786
|
+
content = chunk.content # type: ignore[assignment]
|
|
787
|
+
else:
|
|
788
|
+
content += str(chunk)
|
|
789
|
+
if isinstance(chunk, StepOutput):
|
|
790
|
+
final_response = chunk
|
|
791
|
+
|
|
792
|
+
except StopIteration as e:
|
|
793
|
+
if hasattr(e, "value") and isinstance(e.value, StepOutput):
|
|
794
|
+
final_response = e.value
|
|
795
|
+
|
|
796
|
+
# Merge session_state changes back
|
|
797
|
+
if run_context is None and session_state is not None:
|
|
798
|
+
merge_dictionaries(session_state, session_state_copy)
|
|
799
|
+
|
|
800
|
+
if final_response is not None:
|
|
801
|
+
response = final_response
|
|
802
|
+
else:
|
|
803
|
+
response = StepOutput(content=content)
|
|
804
|
+
else:
|
|
805
|
+
if _is_async_callable(self.active_executor):
|
|
806
|
+
result = await self._acall_custom_function(
|
|
807
|
+
self.active_executor,
|
|
808
|
+
step_input,
|
|
809
|
+
session_state_copy,
|
|
810
|
+
run_context,
|
|
811
|
+
)
|
|
812
|
+
else:
|
|
813
|
+
result = self._call_custom_function(
|
|
814
|
+
self.active_executor, # type: ignore[arg-type]
|
|
815
|
+
step_input,
|
|
816
|
+
session_state_copy,
|
|
817
|
+
run_context,
|
|
818
|
+
)
|
|
819
|
+
|
|
820
|
+
# Merge session_state changes back
|
|
821
|
+
if run_context is None and session_state is not None:
|
|
822
|
+
merge_dictionaries(session_state, session_state_copy)
|
|
823
|
+
|
|
824
|
+
# If function returns StepOutput, use it directly
|
|
825
|
+
if isinstance(result, StepOutput):
|
|
826
|
+
response = result
|
|
827
|
+
elif isinstance(result, (RunOutput, TeamRunOutput)):
|
|
828
|
+
response = StepOutput(content=result.content)
|
|
829
|
+
else:
|
|
830
|
+
response = StepOutput(content=str(result))
|
|
831
|
+
|
|
832
|
+
else:
|
|
833
|
+
# For agents and teams, prepare message with context
|
|
834
|
+
message = self._prepare_message(
|
|
835
|
+
step_input.input,
|
|
836
|
+
step_input.previous_step_outputs,
|
|
837
|
+
)
|
|
838
|
+
|
|
839
|
+
# Execute agent or team with media
|
|
840
|
+
if self._executor_type in ["agent", "team"]:
|
|
841
|
+
# Switch to appropriate logger based on executor type
|
|
842
|
+
if self._executor_type == "agent":
|
|
843
|
+
use_agent_logger()
|
|
844
|
+
elif self._executor_type == "team":
|
|
845
|
+
use_team_logger()
|
|
846
|
+
|
|
847
|
+
images = (
|
|
848
|
+
self._convert_image_artifacts_to_images(step_input.images) if step_input.images else None
|
|
849
|
+
)
|
|
850
|
+
videos = (
|
|
851
|
+
self._convert_video_artifacts_to_videos(step_input.videos) if step_input.videos else None
|
|
852
|
+
)
|
|
853
|
+
audios = self._convert_audio_artifacts_to_audio(step_input.audio) if step_input.audio else None
|
|
854
|
+
|
|
855
|
+
kwargs: Dict[str, Any] = {}
|
|
856
|
+
if isinstance(self.active_executor, Team):
|
|
857
|
+
kwargs["store_member_responses"] = True
|
|
858
|
+
|
|
859
|
+
num_history_runs = self.num_history_runs if self.num_history_runs else num_history_runs
|
|
860
|
+
|
|
861
|
+
use_history = (
|
|
862
|
+
self.add_workflow_history
|
|
863
|
+
if self.add_workflow_history is not None
|
|
864
|
+
else add_workflow_history_to_steps
|
|
865
|
+
)
|
|
866
|
+
|
|
867
|
+
final_message = message
|
|
868
|
+
if use_history and workflow_session:
|
|
869
|
+
history_messages = workflow_session.get_workflow_history_context(num_runs=num_history_runs)
|
|
870
|
+
if history_messages:
|
|
871
|
+
final_message = f"{history_messages}{message}"
|
|
872
|
+
|
|
873
|
+
response = await self.active_executor.arun( # type: ignore
|
|
874
|
+
input=final_message, # type: ignore
|
|
875
|
+
images=images,
|
|
876
|
+
videos=videos,
|
|
877
|
+
audio=audios,
|
|
878
|
+
files=step_input.files,
|
|
879
|
+
session_id=session_id,
|
|
880
|
+
user_id=user_id,
|
|
881
|
+
session_state=session_state_copy,
|
|
882
|
+
run_context=run_context,
|
|
883
|
+
**kwargs,
|
|
884
|
+
)
|
|
885
|
+
|
|
886
|
+
# Update workflow session state
|
|
887
|
+
if run_context is None and session_state is not None:
|
|
888
|
+
merge_dictionaries(session_state, session_state_copy)
|
|
889
|
+
|
|
890
|
+
if store_executor_outputs and workflow_run_response is not None:
|
|
891
|
+
self._store_executor_response(workflow_run_response, response) # type: ignore
|
|
892
|
+
|
|
893
|
+
# Switch back to workflow logger after execution
|
|
894
|
+
use_workflow_logger()
|
|
895
|
+
else:
|
|
896
|
+
raise ValueError(f"Unsupported executor type: {self._executor_type}")
|
|
897
|
+
|
|
898
|
+
# Create StepOutput from response
|
|
899
|
+
step_output = self._process_step_output(response) # type: ignore
|
|
900
|
+
|
|
901
|
+
return step_output
|
|
902
|
+
|
|
903
|
+
except Exception as e:
|
|
904
|
+
self.retry_count = attempt + 1
|
|
905
|
+
logger.warning(f"Step {self.name} failed (attempt {attempt + 1}): {e}")
|
|
906
|
+
|
|
907
|
+
if attempt == self.max_retries:
|
|
908
|
+
if self.skip_on_failure:
|
|
909
|
+
log_debug(f"Step {self.name} failed but continuing due to skip_on_failure=True")
|
|
910
|
+
# Create empty StepOutput for skipped step
|
|
911
|
+
return StepOutput(content=f"Step {self.name} failed but skipped", success=False, error=str(e))
|
|
912
|
+
else:
|
|
913
|
+
raise e
|
|
914
|
+
|
|
915
|
+
return StepOutput(content=f"Step {self.name} failed but skipped", success=False)
|
|
916
|
+
|
|
917
|
+
async def aexecute_stream(
|
|
918
|
+
self,
|
|
919
|
+
step_input: StepInput,
|
|
920
|
+
session_id: Optional[str] = None,
|
|
921
|
+
user_id: Optional[str] = None,
|
|
922
|
+
stream_events: bool = False,
|
|
923
|
+
stream_intermediate_steps: bool = False,
|
|
924
|
+
stream_executor_events: bool = True,
|
|
925
|
+
workflow_run_response: Optional["WorkflowRunOutput"] = None,
|
|
926
|
+
run_context: Optional[RunContext] = None,
|
|
927
|
+
session_state: Optional[Dict[str, Any]] = None,
|
|
928
|
+
step_index: Optional[Union[int, tuple]] = None,
|
|
929
|
+
store_executor_outputs: bool = True,
|
|
930
|
+
parent_step_id: Optional[str] = None,
|
|
931
|
+
workflow_session: Optional["WorkflowSession"] = None,
|
|
932
|
+
add_workflow_history_to_steps: Optional[bool] = False,
|
|
933
|
+
num_history_runs: int = 3,
|
|
934
|
+
) -> AsyncIterator[Union[WorkflowRunOutputEvent, StepOutput]]:
|
|
935
|
+
"""Execute the step with event-driven streaming support"""
|
|
936
|
+
|
|
937
|
+
if step_input.previous_step_outputs:
|
|
938
|
+
step_input.previous_step_content = step_input.get_last_step_content()
|
|
939
|
+
|
|
940
|
+
if workflow_session:
|
|
941
|
+
step_input.workflow_session = workflow_session
|
|
942
|
+
|
|
943
|
+
# Create session_state copy once to avoid duplication.
|
|
944
|
+
# Consider both run_context.session_state and session_state.
|
|
945
|
+
if run_context is not None and run_context.session_state is not None:
|
|
946
|
+
session_state_copy = run_context.session_state
|
|
947
|
+
else:
|
|
948
|
+
session_state_copy = copy(session_state) if session_state is not None else {}
|
|
949
|
+
|
|
950
|
+
# Considering both stream_events and stream_intermediate_steps (deprecated)
|
|
951
|
+
stream_events = stream_events or stream_intermediate_steps
|
|
952
|
+
|
|
953
|
+
if stream_events and workflow_run_response:
|
|
954
|
+
# Emit StepStartedEvent
|
|
955
|
+
yield StepStartedEvent(
|
|
956
|
+
run_id=workflow_run_response.run_id or "",
|
|
957
|
+
workflow_name=workflow_run_response.workflow_name or "",
|
|
958
|
+
workflow_id=workflow_run_response.workflow_id or "",
|
|
959
|
+
session_id=workflow_run_response.session_id or "",
|
|
960
|
+
step_name=self.name,
|
|
961
|
+
step_index=step_index,
|
|
962
|
+
step_id=self.step_id,
|
|
963
|
+
parent_step_id=parent_step_id,
|
|
964
|
+
)
|
|
965
|
+
|
|
966
|
+
# Execute with retries and streaming
|
|
967
|
+
for attempt in range(self.max_retries + 1):
|
|
968
|
+
try:
|
|
969
|
+
log_debug(f"Async step {self.name} streaming attempt {attempt + 1}/{self.max_retries + 1}")
|
|
970
|
+
final_response = None
|
|
971
|
+
|
|
972
|
+
if self._executor_type == "function":
|
|
973
|
+
log_debug(f"Executing async function executor for step: {self.name}")
|
|
974
|
+
|
|
975
|
+
# Check if the function is an async generator
|
|
976
|
+
if _is_async_generator_function(self.active_executor):
|
|
977
|
+
content = ""
|
|
978
|
+
# It's an async generator - iterate over it
|
|
979
|
+
iterator = await self._acall_custom_function(
|
|
980
|
+
self.active_executor,
|
|
981
|
+
step_input,
|
|
982
|
+
session_state_copy,
|
|
983
|
+
run_context,
|
|
984
|
+
)
|
|
985
|
+
async for event in iterator: # type: ignore
|
|
986
|
+
if isinstance(event, (BaseRunOutputEvent)):
|
|
987
|
+
if (
|
|
988
|
+
isinstance(event, (RunContentEvent, TeamRunContentEvent))
|
|
989
|
+
and event.content is not None
|
|
990
|
+
):
|
|
991
|
+
if isinstance(event.content, str):
|
|
992
|
+
content += event.content
|
|
993
|
+
elif isinstance(event.content, BaseModel):
|
|
994
|
+
content = event.content # type: ignore[assignment]
|
|
995
|
+
else:
|
|
996
|
+
content = str(event.content)
|
|
997
|
+
|
|
998
|
+
# Only yield executor events if stream_executor_events is True
|
|
999
|
+
if stream_executor_events:
|
|
1000
|
+
enriched_event = self._enrich_event_with_context(
|
|
1001
|
+
event, workflow_run_response, step_index
|
|
1002
|
+
)
|
|
1003
|
+
yield enriched_event # type: ignore[misc]
|
|
1004
|
+
elif isinstance(event, (RunOutput, TeamRunOutput)):
|
|
1005
|
+
content = event.content # type: ignore[assignment]
|
|
1006
|
+
else:
|
|
1007
|
+
content += str(event)
|
|
1008
|
+
if isinstance(event, StepOutput):
|
|
1009
|
+
final_response = event
|
|
1010
|
+
break
|
|
1011
|
+
if not final_response:
|
|
1012
|
+
final_response = StepOutput(content=content)
|
|
1013
|
+
elif _is_async_callable(self.active_executor):
|
|
1014
|
+
# It's a regular async function - await it
|
|
1015
|
+
result = await self._acall_custom_function(
|
|
1016
|
+
self.active_executor,
|
|
1017
|
+
step_input,
|
|
1018
|
+
session_state_copy,
|
|
1019
|
+
run_context,
|
|
1020
|
+
)
|
|
1021
|
+
if isinstance(result, StepOutput):
|
|
1022
|
+
final_response = result
|
|
1023
|
+
elif isinstance(result, (RunOutput, TeamRunOutput)):
|
|
1024
|
+
final_response = StepOutput(content=result.content)
|
|
1025
|
+
else:
|
|
1026
|
+
final_response = StepOutput(content=str(result))
|
|
1027
|
+
elif _is_generator_function(self.active_executor):
|
|
1028
|
+
content = ""
|
|
1029
|
+
# It's a regular generator function - iterate over it
|
|
1030
|
+
iterator = self._call_custom_function(
|
|
1031
|
+
self.active_executor,
|
|
1032
|
+
step_input,
|
|
1033
|
+
session_state_copy,
|
|
1034
|
+
run_context,
|
|
1035
|
+
)
|
|
1036
|
+
for event in iterator: # type: ignore
|
|
1037
|
+
if isinstance(event, (BaseRunOutputEvent)):
|
|
1038
|
+
if (
|
|
1039
|
+
isinstance(event, (RunContentEvent, TeamRunContentEvent))
|
|
1040
|
+
and event.content is not None
|
|
1041
|
+
):
|
|
1042
|
+
if isinstance(event.content, str):
|
|
1043
|
+
content += event.content
|
|
1044
|
+
elif isinstance(event.content, BaseModel):
|
|
1045
|
+
content = event.content # type: ignore[assignment]
|
|
1046
|
+
else:
|
|
1047
|
+
content = str(event.content)
|
|
1048
|
+
|
|
1049
|
+
# Only yield executor events if stream_executor_events is True
|
|
1050
|
+
if stream_executor_events:
|
|
1051
|
+
enriched_event = self._enrich_event_with_context(
|
|
1052
|
+
event, workflow_run_response, step_index
|
|
1053
|
+
)
|
|
1054
|
+
yield enriched_event # type: ignore[misc]
|
|
1055
|
+
elif isinstance(event, (RunOutput, TeamRunOutput)):
|
|
1056
|
+
content = event.content # type: ignore[assignment]
|
|
1057
|
+
else:
|
|
1058
|
+
content += str(event)
|
|
1059
|
+
if isinstance(event, StepOutput):
|
|
1060
|
+
final_response = event
|
|
1061
|
+
break
|
|
1062
|
+
if not final_response:
|
|
1063
|
+
final_response = StepOutput(content=content)
|
|
1064
|
+
else:
|
|
1065
|
+
# It's a regular function - call it directly
|
|
1066
|
+
result = self._call_custom_function(
|
|
1067
|
+
self.active_executor, # type: ignore[arg-type]
|
|
1068
|
+
step_input,
|
|
1069
|
+
session_state_copy,
|
|
1070
|
+
run_context,
|
|
1071
|
+
)
|
|
1072
|
+
if isinstance(result, StepOutput):
|
|
1073
|
+
final_response = result
|
|
1074
|
+
elif isinstance(result, (RunOutput, TeamRunOutput)):
|
|
1075
|
+
final_response = StepOutput(content=result.content)
|
|
1076
|
+
else:
|
|
1077
|
+
final_response = StepOutput(content=str(result))
|
|
1078
|
+
|
|
1079
|
+
# Merge session_state changes back
|
|
1080
|
+
if run_context is None and session_state is not None:
|
|
1081
|
+
merge_dictionaries(session_state, session_state_copy)
|
|
1082
|
+
else:
|
|
1083
|
+
# For agents and teams, prepare message with context
|
|
1084
|
+
message = self._prepare_message(
|
|
1085
|
+
step_input.input,
|
|
1086
|
+
step_input.previous_step_outputs,
|
|
1087
|
+
)
|
|
1088
|
+
|
|
1089
|
+
if self._executor_type in ["agent", "team"]:
|
|
1090
|
+
# Switch to appropriate logger based on executor type
|
|
1091
|
+
if self._executor_type == "agent":
|
|
1092
|
+
use_agent_logger()
|
|
1093
|
+
elif self._executor_type == "team":
|
|
1094
|
+
use_team_logger()
|
|
1095
|
+
|
|
1096
|
+
images = (
|
|
1097
|
+
self._convert_image_artifacts_to_images(step_input.images) if step_input.images else None
|
|
1098
|
+
)
|
|
1099
|
+
videos = (
|
|
1100
|
+
self._convert_video_artifacts_to_videos(step_input.videos) if step_input.videos else None
|
|
1101
|
+
)
|
|
1102
|
+
audios = self._convert_audio_artifacts_to_audio(step_input.audio) if step_input.audio else None
|
|
1103
|
+
|
|
1104
|
+
kwargs: Dict[str, Any] = {}
|
|
1105
|
+
if isinstance(self.active_executor, Team):
|
|
1106
|
+
kwargs["store_member_responses"] = True
|
|
1107
|
+
|
|
1108
|
+
num_history_runs = self.num_history_runs if self.num_history_runs else num_history_runs
|
|
1109
|
+
|
|
1110
|
+
use_history = (
|
|
1111
|
+
self.add_workflow_history
|
|
1112
|
+
if self.add_workflow_history is not None
|
|
1113
|
+
else add_workflow_history_to_steps
|
|
1114
|
+
)
|
|
1115
|
+
|
|
1116
|
+
final_message = message
|
|
1117
|
+
if use_history and workflow_session:
|
|
1118
|
+
history_messages = workflow_session.get_workflow_history_context(num_runs=num_history_runs)
|
|
1119
|
+
if history_messages:
|
|
1120
|
+
final_message = f"{history_messages}{message}"
|
|
1121
|
+
|
|
1122
|
+
response_stream = self.active_executor.arun( # type: ignore
|
|
1123
|
+
input=final_message,
|
|
1124
|
+
images=images,
|
|
1125
|
+
videos=videos,
|
|
1126
|
+
audio=audios,
|
|
1127
|
+
files=step_input.files,
|
|
1128
|
+
session_id=session_id,
|
|
1129
|
+
user_id=user_id,
|
|
1130
|
+
session_state=session_state_copy,
|
|
1131
|
+
stream=True,
|
|
1132
|
+
stream_events=stream_events,
|
|
1133
|
+
run_context=run_context,
|
|
1134
|
+
yield_run_response=True,
|
|
1135
|
+
**kwargs,
|
|
1136
|
+
)
|
|
1137
|
+
|
|
1138
|
+
active_executor_run_response = None
|
|
1139
|
+
async for event in response_stream:
|
|
1140
|
+
if isinstance(event, RunOutput) or isinstance(event, TeamRunOutput):
|
|
1141
|
+
active_executor_run_response = event
|
|
1142
|
+
break
|
|
1143
|
+
# Only yield executor events if stream_executor_events is True
|
|
1144
|
+
if stream_executor_events:
|
|
1145
|
+
enriched_event = self._enrich_event_with_context(
|
|
1146
|
+
event, workflow_run_response, step_index
|
|
1147
|
+
)
|
|
1148
|
+
yield enriched_event # type: ignore[misc]
|
|
1149
|
+
|
|
1150
|
+
# Update workflow session state
|
|
1151
|
+
if run_context is None and session_state is not None:
|
|
1152
|
+
merge_dictionaries(session_state, session_state_copy)
|
|
1153
|
+
|
|
1154
|
+
if store_executor_outputs and workflow_run_response is not None:
|
|
1155
|
+
self._store_executor_response(workflow_run_response, active_executor_run_response) # type: ignore
|
|
1156
|
+
|
|
1157
|
+
final_response = active_executor_run_response # type: ignore
|
|
1158
|
+
else:
|
|
1159
|
+
raise ValueError(f"Unsupported executor type: {self._executor_type}")
|
|
1160
|
+
|
|
1161
|
+
# If we didn't get a final response, create one
|
|
1162
|
+
if final_response is None:
|
|
1163
|
+
final_response = StepOutput(content="")
|
|
1164
|
+
|
|
1165
|
+
# Switch back to workflow logger after execution
|
|
1166
|
+
use_workflow_logger()
|
|
1167
|
+
|
|
1168
|
+
# Yield the final response
|
|
1169
|
+
final_response = self._process_step_output(final_response)
|
|
1170
|
+
yield final_response
|
|
1171
|
+
|
|
1172
|
+
if stream_events and workflow_run_response:
|
|
1173
|
+
# Emit StepCompletedEvent
|
|
1174
|
+
yield StepCompletedEvent(
|
|
1175
|
+
run_id=workflow_run_response.run_id or "",
|
|
1176
|
+
workflow_name=workflow_run_response.workflow_name or "",
|
|
1177
|
+
workflow_id=workflow_run_response.workflow_id or "",
|
|
1178
|
+
session_id=workflow_run_response.session_id or "",
|
|
1179
|
+
step_name=self.name,
|
|
1180
|
+
step_index=step_index,
|
|
1181
|
+
step_id=self.step_id,
|
|
1182
|
+
content=final_response.content,
|
|
1183
|
+
step_response=final_response,
|
|
1184
|
+
parent_step_id=parent_step_id,
|
|
1185
|
+
)
|
|
1186
|
+
return
|
|
1187
|
+
|
|
1188
|
+
except Exception as e:
|
|
1189
|
+
self.retry_count = attempt + 1
|
|
1190
|
+
logger.warning(f"Step {self.name} failed (attempt {attempt + 1}): {e}")
|
|
1191
|
+
|
|
1192
|
+
if attempt == self.max_retries:
|
|
1193
|
+
if self.skip_on_failure:
|
|
1194
|
+
log_debug(f"Step {self.name} failed but continuing due to skip_on_failure=True")
|
|
1195
|
+
# Create empty StepOutput for skipped step
|
|
1196
|
+
step_output = StepOutput(
|
|
1197
|
+
content=f"Step {self.name} failed but skipped", success=False, error=str(e)
|
|
1198
|
+
)
|
|
1199
|
+
yield step_output
|
|
1200
|
+
else:
|
|
1201
|
+
raise e
|
|
1202
|
+
|
|
1203
|
+
return
|
|
1204
|
+
|
|
1205
|
+
def _store_executor_response(
|
|
1206
|
+
self, workflow_run_response: "WorkflowRunOutput", executor_run_response: Union[RunOutput, TeamRunOutput]
|
|
1207
|
+
) -> None:
|
|
1208
|
+
"""Store agent/team responses in step_executor_runs if enabled"""
|
|
1209
|
+
if self._executor_type in ["agent", "team"]:
|
|
1210
|
+
# propogate the workflow run id as parent run id to the executor response
|
|
1211
|
+
executor_run_response.parent_run_id = workflow_run_response.run_id
|
|
1212
|
+
executor_run_response.workflow_step_id = self.step_id
|
|
1213
|
+
|
|
1214
|
+
# Scrub the executor response based on the executor's storage flags before storing
|
|
1215
|
+
if (
|
|
1216
|
+
not self.active_executor.store_media
|
|
1217
|
+
or not self.active_executor.store_tool_messages
|
|
1218
|
+
or not self.active_executor.store_history_messages
|
|
1219
|
+
): # type: ignore
|
|
1220
|
+
self.active_executor._scrub_run_output_for_storage(executor_run_response) # type: ignore
|
|
1221
|
+
|
|
1222
|
+
# Get the raw response from the step's active executor
|
|
1223
|
+
raw_response = executor_run_response
|
|
1224
|
+
if raw_response and isinstance(raw_response, (RunOutput, TeamRunOutput)):
|
|
1225
|
+
if workflow_run_response.step_executor_runs is None:
|
|
1226
|
+
workflow_run_response.step_executor_runs = []
|
|
1227
|
+
|
|
1228
|
+
raw_response.workflow_step_id = self.step_id
|
|
1229
|
+
# Add the primary executor run
|
|
1230
|
+
workflow_run_response.step_executor_runs.append(raw_response)
|
|
1231
|
+
|
|
1232
|
+
# Add direct member agent runs (in case of a team we force store_member_responses=True here)
|
|
1233
|
+
if isinstance(raw_response, TeamRunOutput) and getattr(
|
|
1234
|
+
self.active_executor, "store_member_responses", False
|
|
1235
|
+
):
|
|
1236
|
+
for mr in raw_response.member_responses or []:
|
|
1237
|
+
if isinstance(mr, RunOutput):
|
|
1238
|
+
workflow_run_response.step_executor_runs.append(mr)
|
|
1239
|
+
|
|
1240
|
+
def _get_deepest_content_from_step_output(self, step_output: "StepOutput") -> Optional[str]:
|
|
1241
|
+
"""
|
|
1242
|
+
Extract the deepest content from a step output, handling nested structures like Steps, Router, Loop, etc.
|
|
1243
|
+
|
|
1244
|
+
For container steps (Steps, Router, Loop, etc.), this will recursively find the content from the
|
|
1245
|
+
last actual step rather than using the generic container message.
|
|
1246
|
+
"""
|
|
1247
|
+
# If this step has nested steps (like Steps, Condition, Router, Loop, etc.)
|
|
1248
|
+
if hasattr(step_output, "steps") and step_output.steps and len(step_output.steps) > 0:
|
|
1249
|
+
# Recursively get content from the last nested step
|
|
1250
|
+
return self._get_deepest_content_from_step_output(step_output.steps[-1])
|
|
1251
|
+
|
|
1252
|
+
# For regular steps, return their content
|
|
1253
|
+
return step_output.content # type: ignore
|
|
1254
|
+
|
|
1255
|
+
def _prepare_message(
|
|
1256
|
+
self,
|
|
1257
|
+
message: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]],
|
|
1258
|
+
previous_step_outputs: Optional[Dict[str, StepOutput]] = None,
|
|
1259
|
+
) -> Optional[Union[str, List[Any], Dict[str, Any], BaseModel]]:
|
|
1260
|
+
"""Prepare the primary input by combining message and previous step outputs"""
|
|
1261
|
+
|
|
1262
|
+
if previous_step_outputs and self._executor_type in ["agent", "team"]:
|
|
1263
|
+
last_output = list(previous_step_outputs.values())[-1] if previous_step_outputs else None
|
|
1264
|
+
if last_output:
|
|
1265
|
+
deepest_content = self._get_deepest_content_from_step_output(last_output)
|
|
1266
|
+
if deepest_content:
|
|
1267
|
+
return deepest_content
|
|
1268
|
+
|
|
1269
|
+
# If no previous step outputs, return the original message unchanged
|
|
1270
|
+
return message
|
|
1271
|
+
|
|
1272
|
+
def _process_step_output(self, response: Union[RunOutput, TeamRunOutput, StepOutput]) -> StepOutput:
|
|
1273
|
+
"""Create StepOutput from execution response"""
|
|
1274
|
+
if isinstance(response, StepOutput):
|
|
1275
|
+
response.step_name = self.name or "unnamed_step"
|
|
1276
|
+
response.step_id = self.step_id
|
|
1277
|
+
response.step_type = StepType.STEP
|
|
1278
|
+
response.executor_type = self._executor_type
|
|
1279
|
+
response.executor_name = self.executor_name
|
|
1280
|
+
return response
|
|
1281
|
+
|
|
1282
|
+
# Extract media from response
|
|
1283
|
+
images = getattr(response, "images", None)
|
|
1284
|
+
videos = getattr(response, "videos", None)
|
|
1285
|
+
audio = getattr(response, "audio", None)
|
|
1286
|
+
|
|
1287
|
+
# Extract metrics from response
|
|
1288
|
+
metrics = self._extract_metrics_from_response(response)
|
|
1289
|
+
|
|
1290
|
+
return StepOutput(
|
|
1291
|
+
step_name=self.name or "unnamed_step",
|
|
1292
|
+
step_id=self.step_id,
|
|
1293
|
+
step_type=StepType.STEP,
|
|
1294
|
+
executor_type=self._executor_type,
|
|
1295
|
+
executor_name=self.executor_name,
|
|
1296
|
+
content=response.content,
|
|
1297
|
+
step_run_id=getattr(response, "run_id", None),
|
|
1298
|
+
images=images,
|
|
1299
|
+
videos=videos,
|
|
1300
|
+
audio=audio,
|
|
1301
|
+
metrics=metrics,
|
|
1302
|
+
)
|
|
1303
|
+
|
|
1304
|
+
def _convert_function_result_to_response(self, result: Any) -> RunOutput:
|
|
1305
|
+
"""Convert function execution result to RunOutput"""
|
|
1306
|
+
if isinstance(result, RunOutput):
|
|
1307
|
+
return result
|
|
1308
|
+
elif isinstance(result, str):
|
|
1309
|
+
return RunOutput(content=result)
|
|
1310
|
+
elif isinstance(result, dict):
|
|
1311
|
+
# If it's a dict, try to extract content
|
|
1312
|
+
content = result.get("content", str(result))
|
|
1313
|
+
return RunOutput(content=content)
|
|
1314
|
+
else:
|
|
1315
|
+
# Convert any other type to string
|
|
1316
|
+
return RunOutput(content=str(result))
|
|
1317
|
+
|
|
1318
|
+
def _convert_audio_artifacts_to_audio(self, audio_artifacts: List[Audio]) -> List[Audio]:
|
|
1319
|
+
"""Convert AudioArtifact objects to Audio objects"""
|
|
1320
|
+
audios = []
|
|
1321
|
+
for audio_artifact in audio_artifacts:
|
|
1322
|
+
if audio_artifact.url:
|
|
1323
|
+
audios.append(Audio(url=audio_artifact.url))
|
|
1324
|
+
elif audio_artifact.content:
|
|
1325
|
+
audios.append(Audio(content=audio_artifact.content))
|
|
1326
|
+
else:
|
|
1327
|
+
logger.warning(f"Skipping AudioArtifact with no URL or content: {audio_artifact}")
|
|
1328
|
+
continue
|
|
1329
|
+
return audios
|
|
1330
|
+
|
|
1331
|
+
def _convert_image_artifacts_to_images(self, image_artifacts: List[Image]) -> List[Image]:
|
|
1332
|
+
"""
|
|
1333
|
+
Convert ImageArtifact objects to Image objects with proper content handling.
|
|
1334
|
+
|
|
1335
|
+
Args:
|
|
1336
|
+
image_artifacts: List of ImageArtifact objects to convert
|
|
1337
|
+
|
|
1338
|
+
Returns:
|
|
1339
|
+
List of Image objects ready for agent processing
|
|
1340
|
+
"""
|
|
1341
|
+
import base64
|
|
1342
|
+
|
|
1343
|
+
images = []
|
|
1344
|
+
for i, img_artifact in enumerate(image_artifacts):
|
|
1345
|
+
# Create Image object with proper data from ImageArtifact
|
|
1346
|
+
if img_artifact.url:
|
|
1347
|
+
images.append(Image(url=img_artifact.url))
|
|
1348
|
+
|
|
1349
|
+
elif img_artifact.content:
|
|
1350
|
+
# Handle the case where content is base64-encoded bytes from OpenAI tools
|
|
1351
|
+
try:
|
|
1352
|
+
# Try to decode as base64 first (for images from OpenAI tools)
|
|
1353
|
+
if isinstance(img_artifact.content, bytes):
|
|
1354
|
+
# Decode bytes to string, then decode base64 to get actual image bytes
|
|
1355
|
+
base64_str: str = img_artifact.content.decode("utf-8")
|
|
1356
|
+
actual_image_bytes = base64.b64decode(base64_str)
|
|
1357
|
+
else:
|
|
1358
|
+
# If it's already actual image bytes
|
|
1359
|
+
actual_image_bytes = img_artifact.content
|
|
1360
|
+
|
|
1361
|
+
# Create Image object with proper format
|
|
1362
|
+
image_kwargs = {"content": actual_image_bytes}
|
|
1363
|
+
if img_artifact.mime_type:
|
|
1364
|
+
# Convert mime_type to format (e.g., "image/png" -> "png")
|
|
1365
|
+
if "/" in img_artifact.mime_type:
|
|
1366
|
+
format_from_mime = img_artifact.mime_type.split("/")[-1]
|
|
1367
|
+
image_kwargs["format"] = format_from_mime # type: ignore[assignment]
|
|
1368
|
+
|
|
1369
|
+
images.append(Image(**image_kwargs))
|
|
1370
|
+
|
|
1371
|
+
except Exception as e:
|
|
1372
|
+
logger.error(f"Failed to process image content: {e}")
|
|
1373
|
+
# Skip this image if we can't process it
|
|
1374
|
+
continue
|
|
1375
|
+
|
|
1376
|
+
else:
|
|
1377
|
+
# Skip images that have neither URL nor content
|
|
1378
|
+
logger.warning(f"Skipping ImageArtifact {i} with no URL or content: {img_artifact}")
|
|
1379
|
+
continue
|
|
1380
|
+
|
|
1381
|
+
return images
|
|
1382
|
+
|
|
1383
|
+
def _convert_video_artifacts_to_videos(self, video_artifacts: List[Video]) -> List[Video]:
|
|
1384
|
+
"""
|
|
1385
|
+
Convert VideoArtifact objects to Video objects with proper content handling.
|
|
1386
|
+
|
|
1387
|
+
Args:
|
|
1388
|
+
video_artifacts: List of VideoArtifact objects to convert
|
|
1389
|
+
|
|
1390
|
+
Returns:
|
|
1391
|
+
List of Video objects ready for agent processing
|
|
1392
|
+
"""
|
|
1393
|
+
videos = []
|
|
1394
|
+
for i, video_artifact in enumerate(video_artifacts):
|
|
1395
|
+
# Create Video object with proper data from VideoArtifact
|
|
1396
|
+
if video_artifact.url:
|
|
1397
|
+
videos.append(Video(url=video_artifact.url))
|
|
1398
|
+
|
|
1399
|
+
elif video_artifact.content:
|
|
1400
|
+
videos.append(Video(content=video_artifact.content))
|
|
1401
|
+
|
|
1402
|
+
else:
|
|
1403
|
+
# Skip videos that have neither URL nor content
|
|
1404
|
+
logger.warning(f"Skipping VideoArtifact {i} with no URL or content: {video_artifact}")
|
|
1405
|
+
continue
|
|
1406
|
+
|
|
1407
|
+
return videos
|
|
1408
|
+
|
|
1409
|
+
|
|
1410
|
+
def _is_async_callable(obj: Any) -> TypeGuard[Callable[..., Any]]:
|
|
1411
|
+
"""Checks if obj is an async callable (coroutine function or callable with async __call__)"""
|
|
1412
|
+
return inspect.iscoroutinefunction(obj) or (callable(obj) and inspect.iscoroutinefunction(obj.__call__))
|
|
1413
|
+
|
|
1414
|
+
|
|
1415
|
+
def _is_generator_function(obj: Any) -> TypeGuard[Callable[..., Any]]:
|
|
1416
|
+
"""Checks if obj is a generator function, including callable class instances with generator __call__ methods"""
|
|
1417
|
+
if inspect.isgeneratorfunction(obj):
|
|
1418
|
+
return True
|
|
1419
|
+
# Check if it's a callable class instance with a generator __call__ method
|
|
1420
|
+
if callable(obj) and hasattr(obj, "__call__"):
|
|
1421
|
+
return inspect.isgeneratorfunction(obj.__call__)
|
|
1422
|
+
return False
|
|
1423
|
+
|
|
1424
|
+
|
|
1425
|
+
def _is_async_generator_function(obj: Any) -> TypeGuard[Callable[..., Any]]:
|
|
1426
|
+
"""Checks if obj is an async generator function, including callable class instances"""
|
|
1427
|
+
if inspect.isasyncgenfunction(obj):
|
|
1428
|
+
return True
|
|
1429
|
+
# Check if it's a callable class instance with an async generator __call__ method
|
|
1430
|
+
if callable(obj) and hasattr(obj, "__call__"):
|
|
1431
|
+
return inspect.isasyncgenfunction(obj.__call__)
|
|
1432
|
+
return False
|