agno 1.8.1__py3-none-any.whl → 2.0.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/__init__.py +8 -0
- agno/agent/__init__.py +19 -27
- agno/agent/agent.py +2778 -4123
- agno/api/agent.py +9 -65
- agno/api/api.py +5 -46
- agno/api/evals.py +6 -17
- agno/api/os.py +17 -0
- agno/api/routes.py +6 -41
- agno/api/schemas/__init__.py +9 -0
- agno/api/schemas/agent.py +5 -21
- agno/api/schemas/evals.py +7 -16
- agno/api/schemas/os.py +14 -0
- agno/api/schemas/team.py +5 -21
- agno/api/schemas/utils.py +21 -0
- agno/api/schemas/workflows.py +11 -7
- agno/api/settings.py +53 -0
- agno/api/team.py +9 -64
- agno/api/workflow.py +28 -0
- agno/cloud/aws/base.py +214 -0
- agno/cloud/aws/s3/__init__.py +2 -0
- agno/cloud/aws/s3/api_client.py +43 -0
- agno/cloud/aws/s3/bucket.py +195 -0
- agno/cloud/aws/s3/object.py +57 -0
- agno/db/__init__.py +24 -0
- agno/db/base.py +245 -0
- agno/db/dynamo/__init__.py +3 -0
- agno/db/dynamo/dynamo.py +1749 -0
- agno/db/dynamo/schemas.py +278 -0
- agno/db/dynamo/utils.py +684 -0
- agno/db/firestore/__init__.py +3 -0
- agno/db/firestore/firestore.py +1438 -0
- agno/db/firestore/schemas.py +130 -0
- agno/db/firestore/utils.py +278 -0
- agno/db/gcs_json/__init__.py +3 -0
- agno/db/gcs_json/gcs_json_db.py +1001 -0
- agno/db/gcs_json/utils.py +194 -0
- agno/db/in_memory/__init__.py +3 -0
- agno/db/in_memory/in_memory_db.py +888 -0
- agno/db/in_memory/utils.py +172 -0
- agno/db/json/__init__.py +3 -0
- agno/db/json/json_db.py +1051 -0
- agno/db/json/utils.py +196 -0
- agno/db/migrations/v1_to_v2.py +162 -0
- agno/db/mongo/__init__.py +3 -0
- agno/db/mongo/mongo.py +1417 -0
- agno/db/mongo/schemas.py +77 -0
- agno/db/mongo/utils.py +204 -0
- agno/db/mysql/__init__.py +3 -0
- agno/db/mysql/mysql.py +1719 -0
- agno/db/mysql/schemas.py +124 -0
- agno/db/mysql/utils.py +298 -0
- agno/db/postgres/__init__.py +3 -0
- agno/db/postgres/postgres.py +1720 -0
- agno/db/postgres/schemas.py +124 -0
- agno/db/postgres/utils.py +281 -0
- agno/db/redis/__init__.py +3 -0
- agno/db/redis/redis.py +1371 -0
- agno/db/redis/schemas.py +109 -0
- agno/db/redis/utils.py +288 -0
- agno/db/schemas/__init__.py +3 -0
- agno/db/schemas/evals.py +33 -0
- agno/db/schemas/knowledge.py +40 -0
- agno/db/schemas/memory.py +46 -0
- agno/db/singlestore/__init__.py +3 -0
- agno/db/singlestore/schemas.py +116 -0
- agno/db/singlestore/singlestore.py +1722 -0
- agno/db/singlestore/utils.py +327 -0
- agno/db/sqlite/__init__.py +3 -0
- agno/db/sqlite/schemas.py +119 -0
- agno/db/sqlite/sqlite.py +1680 -0
- agno/db/sqlite/utils.py +269 -0
- agno/db/utils.py +88 -0
- agno/eval/__init__.py +14 -0
- agno/eval/accuracy.py +142 -43
- agno/eval/performance.py +88 -23
- agno/eval/reliability.py +73 -20
- agno/eval/utils.py +23 -13
- agno/integrations/discord/__init__.py +3 -0
- agno/{app → integrations}/discord/client.py +10 -10
- agno/knowledge/__init__.py +2 -2
- agno/{document → knowledge}/chunking/agentic.py +2 -2
- agno/{document → knowledge}/chunking/document.py +2 -2
- agno/{document → knowledge}/chunking/fixed.py +3 -3
- agno/{document → knowledge}/chunking/markdown.py +2 -2
- agno/{document → knowledge}/chunking/recursive.py +2 -2
- agno/{document → knowledge}/chunking/row.py +2 -2
- agno/knowledge/chunking/semantic.py +59 -0
- agno/knowledge/chunking/strategy.py +121 -0
- agno/knowledge/content.py +74 -0
- agno/knowledge/document/__init__.py +5 -0
- agno/{document → knowledge/document}/base.py +12 -2
- agno/knowledge/embedder/__init__.py +5 -0
- agno/{embedder → knowledge/embedder}/aws_bedrock.py +127 -1
- agno/{embedder → knowledge/embedder}/azure_openai.py +65 -1
- agno/{embedder → knowledge/embedder}/base.py +6 -0
- agno/{embedder → knowledge/embedder}/cohere.py +72 -1
- agno/{embedder → knowledge/embedder}/fastembed.py +17 -1
- agno/{embedder → knowledge/embedder}/fireworks.py +1 -1
- agno/{embedder → knowledge/embedder}/google.py +74 -1
- agno/{embedder → knowledge/embedder}/huggingface.py +36 -2
- agno/{embedder → knowledge/embedder}/jina.py +48 -2
- agno/knowledge/embedder/langdb.py +22 -0
- agno/knowledge/embedder/mistral.py +139 -0
- agno/{embedder → knowledge/embedder}/nebius.py +1 -1
- agno/{embedder → knowledge/embedder}/ollama.py +54 -3
- agno/knowledge/embedder/openai.py +223 -0
- agno/{embedder → knowledge/embedder}/sentence_transformer.py +16 -1
- agno/{embedder → knowledge/embedder}/together.py +1 -1
- agno/{embedder → knowledge/embedder}/voyageai.py +49 -1
- agno/knowledge/knowledge.py +1515 -0
- agno/knowledge/reader/__init__.py +7 -0
- agno/{document → knowledge}/reader/arxiv_reader.py +32 -4
- agno/knowledge/reader/base.py +88 -0
- agno/{document → knowledge}/reader/csv_reader.py +68 -15
- agno/knowledge/reader/docx_reader.py +83 -0
- agno/{document → knowledge}/reader/firecrawl_reader.py +42 -21
- agno/knowledge/reader/gcs_reader.py +67 -0
- agno/{document → knowledge}/reader/json_reader.py +30 -9
- agno/{document → knowledge}/reader/markdown_reader.py +36 -9
- agno/{document → knowledge}/reader/pdf_reader.py +79 -21
- agno/knowledge/reader/reader_factory.py +275 -0
- agno/knowledge/reader/s3_reader.py +171 -0
- agno/{document → knowledge}/reader/text_reader.py +31 -10
- agno/knowledge/reader/url_reader.py +84 -0
- agno/knowledge/reader/web_search_reader.py +389 -0
- agno/{document → knowledge}/reader/website_reader.py +37 -10
- agno/knowledge/reader/wikipedia_reader.py +59 -0
- agno/knowledge/reader/youtube_reader.py +78 -0
- agno/knowledge/remote_content/remote_content.py +88 -0
- agno/{reranker → knowledge/reranker}/base.py +1 -1
- agno/{reranker → knowledge/reranker}/cohere.py +2 -2
- agno/{reranker → knowledge/reranker}/infinity.py +2 -2
- agno/{reranker → knowledge/reranker}/sentence_transformer.py +2 -2
- agno/knowledge/types.py +30 -0
- agno/knowledge/utils.py +169 -0
- agno/memory/__init__.py +2 -10
- agno/memory/manager.py +1003 -148
- agno/models/aimlapi/__init__.py +2 -2
- agno/models/aimlapi/aimlapi.py +6 -6
- agno/models/anthropic/claude.py +129 -82
- agno/models/aws/bedrock.py +107 -175
- agno/models/aws/claude.py +64 -18
- agno/models/azure/ai_foundry.py +73 -23
- agno/models/base.py +347 -287
- agno/models/cerebras/cerebras.py +84 -27
- agno/models/cohere/chat.py +106 -98
- agno/models/google/gemini.py +100 -42
- agno/models/groq/groq.py +97 -35
- agno/models/huggingface/huggingface.py +92 -27
- agno/models/ibm/watsonx.py +72 -13
- agno/models/litellm/chat.py +85 -13
- agno/models/message.py +38 -144
- agno/models/meta/llama.py +85 -49
- agno/models/metrics.py +120 -0
- agno/models/mistral/mistral.py +90 -21
- agno/models/ollama/__init__.py +0 -2
- agno/models/ollama/chat.py +84 -46
- agno/models/openai/chat.py +121 -23
- agno/models/openai/responses.py +178 -105
- agno/models/perplexity/perplexity.py +26 -2
- agno/models/portkey/portkey.py +0 -7
- agno/models/response.py +14 -8
- agno/models/utils.py +20 -0
- agno/models/vercel/__init__.py +2 -2
- agno/models/vercel/v0.py +1 -1
- agno/models/vllm/__init__.py +2 -2
- agno/models/vllm/vllm.py +3 -3
- agno/models/xai/xai.py +10 -10
- agno/os/__init__.py +3 -0
- agno/os/app.py +393 -0
- agno/os/auth.py +47 -0
- agno/os/config.py +103 -0
- agno/os/interfaces/agui/__init__.py +3 -0
- agno/os/interfaces/agui/agui.py +31 -0
- agno/{app/agui/async_router.py → os/interfaces/agui/router.py} +16 -16
- agno/{app → os/interfaces}/agui/utils.py +65 -28
- agno/os/interfaces/base.py +21 -0
- agno/os/interfaces/slack/__init__.py +3 -0
- agno/{app/slack/async_router.py → os/interfaces/slack/router.py} +3 -5
- agno/os/interfaces/slack/slack.py +33 -0
- agno/os/interfaces/whatsapp/__init__.py +3 -0
- agno/{app/whatsapp/async_router.py → os/interfaces/whatsapp/router.py} +4 -7
- agno/os/interfaces/whatsapp/whatsapp.py +30 -0
- agno/os/router.py +843 -0
- agno/os/routers/__init__.py +3 -0
- agno/os/routers/evals/__init__.py +3 -0
- agno/os/routers/evals/evals.py +204 -0
- agno/os/routers/evals/schemas.py +142 -0
- agno/os/routers/evals/utils.py +161 -0
- agno/os/routers/knowledge/__init__.py +3 -0
- agno/os/routers/knowledge/knowledge.py +413 -0
- agno/os/routers/knowledge/schemas.py +118 -0
- agno/os/routers/memory/__init__.py +3 -0
- agno/os/routers/memory/memory.py +179 -0
- agno/os/routers/memory/schemas.py +58 -0
- agno/os/routers/metrics/__init__.py +3 -0
- agno/os/routers/metrics/metrics.py +58 -0
- agno/os/routers/metrics/schemas.py +47 -0
- agno/os/routers/session/__init__.py +3 -0
- agno/os/routers/session/session.py +163 -0
- agno/os/schema.py +892 -0
- agno/{app/playground → os}/settings.py +8 -15
- agno/os/utils.py +270 -0
- agno/reasoning/azure_ai_foundry.py +4 -4
- agno/reasoning/deepseek.py +4 -4
- agno/reasoning/default.py +6 -11
- agno/reasoning/groq.py +4 -4
- agno/reasoning/helpers.py +4 -6
- agno/reasoning/ollama.py +4 -4
- agno/reasoning/openai.py +4 -4
- agno/run/{response.py → agent.py} +144 -72
- agno/run/base.py +44 -58
- agno/run/cancel.py +83 -0
- agno/run/team.py +133 -77
- agno/run/workflow.py +537 -12
- agno/session/__init__.py +10 -0
- agno/session/agent.py +244 -0
- agno/session/summary.py +225 -0
- agno/session/team.py +262 -0
- agno/{storage/session/v2 → session}/workflow.py +47 -24
- agno/team/__init__.py +15 -16
- agno/team/team.py +2961 -4253
- agno/tools/agentql.py +14 -5
- agno/tools/airflow.py +9 -4
- agno/tools/api.py +7 -3
- agno/tools/apify.py +2 -46
- agno/tools/arxiv.py +8 -3
- agno/tools/aws_lambda.py +7 -5
- agno/tools/aws_ses.py +7 -1
- agno/tools/baidusearch.py +4 -1
- agno/tools/bitbucket.py +4 -4
- agno/tools/brandfetch.py +14 -11
- agno/tools/bravesearch.py +4 -1
- agno/tools/brightdata.py +42 -22
- agno/tools/browserbase.py +13 -4
- agno/tools/calcom.py +12 -10
- agno/tools/calculator.py +10 -27
- agno/tools/cartesia.py +18 -13
- agno/tools/{clickup_tool.py → clickup.py} +12 -25
- agno/tools/confluence.py +8 -8
- agno/tools/crawl4ai.py +7 -1
- agno/tools/csv_toolkit.py +9 -8
- agno/tools/dalle.py +18 -11
- agno/tools/daytona.py +13 -16
- agno/tools/decorator.py +6 -3
- agno/tools/desi_vocal.py +16 -7
- agno/tools/discord.py +11 -8
- agno/tools/docker.py +30 -42
- agno/tools/duckdb.py +34 -53
- agno/tools/duckduckgo.py +8 -7
- agno/tools/e2b.py +61 -61
- agno/tools/eleven_labs.py +35 -28
- agno/tools/email.py +4 -1
- agno/tools/evm.py +7 -1
- agno/tools/exa.py +19 -14
- agno/tools/fal.py +29 -29
- agno/tools/file.py +9 -8
- agno/tools/financial_datasets.py +25 -44
- agno/tools/firecrawl.py +22 -22
- agno/tools/function.py +68 -17
- agno/tools/giphy.py +22 -10
- agno/tools/github.py +48 -126
- agno/tools/gmail.py +45 -61
- agno/tools/google_bigquery.py +7 -6
- agno/tools/google_maps.py +11 -26
- agno/tools/googlesearch.py +7 -2
- agno/tools/googlesheets.py +21 -17
- agno/tools/hackernews.py +9 -5
- agno/tools/jina.py +5 -4
- agno/tools/jira.py +18 -9
- agno/tools/knowledge.py +31 -32
- agno/tools/linear.py +18 -33
- agno/tools/linkup.py +5 -1
- agno/tools/local_file_system.py +8 -5
- agno/tools/lumalab.py +31 -19
- agno/tools/mem0.py +18 -12
- agno/tools/memori.py +14 -10
- agno/tools/mlx_transcribe.py +3 -2
- agno/tools/models/azure_openai.py +32 -14
- agno/tools/models/gemini.py +58 -31
- agno/tools/models/groq.py +29 -20
- agno/tools/models/nebius.py +27 -11
- agno/tools/models_labs.py +39 -15
- agno/tools/moviepy_video.py +7 -6
- agno/tools/neo4j.py +10 -8
- agno/tools/newspaper.py +7 -2
- agno/tools/newspaper4k.py +8 -3
- agno/tools/openai.py +57 -26
- agno/tools/openbb.py +12 -11
- agno/tools/opencv.py +62 -46
- agno/tools/openweather.py +14 -12
- agno/tools/pandas.py +11 -3
- agno/tools/postgres.py +4 -12
- agno/tools/pubmed.py +4 -1
- agno/tools/python.py +9 -22
- agno/tools/reasoning.py +35 -27
- agno/tools/reddit.py +11 -26
- agno/tools/replicate.py +54 -41
- agno/tools/resend.py +4 -1
- agno/tools/scrapegraph.py +15 -14
- agno/tools/searxng.py +10 -23
- agno/tools/serpapi.py +6 -3
- agno/tools/serper.py +13 -4
- agno/tools/shell.py +9 -2
- agno/tools/slack.py +12 -11
- agno/tools/sleep.py +3 -2
- agno/tools/spider.py +24 -4
- agno/tools/sql.py +7 -6
- agno/tools/tavily.py +6 -4
- agno/tools/telegram.py +12 -4
- agno/tools/todoist.py +11 -31
- agno/tools/toolkit.py +1 -1
- agno/tools/trafilatura.py +22 -6
- agno/tools/trello.py +9 -22
- agno/tools/twilio.py +10 -3
- agno/tools/user_control_flow.py +6 -1
- agno/tools/valyu.py +34 -5
- agno/tools/visualization.py +19 -28
- agno/tools/webbrowser.py +4 -3
- agno/tools/webex.py +11 -7
- agno/tools/website.py +15 -46
- agno/tools/webtools.py +12 -4
- agno/tools/whatsapp.py +5 -9
- agno/tools/wikipedia.py +20 -13
- agno/tools/x.py +14 -13
- agno/tools/yfinance.py +13 -40
- agno/tools/youtube.py +26 -20
- agno/tools/zendesk.py +7 -2
- agno/tools/zep.py +10 -7
- agno/tools/zoom.py +10 -9
- agno/utils/common.py +1 -19
- agno/utils/events.py +95 -118
- agno/utils/knowledge.py +29 -0
- agno/utils/log.py +2 -2
- agno/utils/mcp.py +11 -5
- agno/utils/media.py +39 -0
- agno/utils/message.py +12 -1
- agno/utils/models/claude.py +6 -4
- agno/utils/models/mistral.py +8 -7
- agno/utils/models/schema_utils.py +3 -3
- agno/utils/pprint.py +33 -32
- agno/utils/print_response/agent.py +779 -0
- agno/utils/print_response/team.py +1565 -0
- agno/utils/print_response/workflow.py +1451 -0
- agno/utils/prompts.py +14 -14
- agno/utils/reasoning.py +87 -0
- agno/utils/response.py +42 -42
- agno/utils/string.py +8 -22
- agno/utils/team.py +50 -0
- agno/utils/timer.py +2 -2
- agno/vectordb/base.py +33 -21
- agno/vectordb/cassandra/cassandra.py +287 -23
- agno/vectordb/chroma/chromadb.py +482 -59
- agno/vectordb/clickhouse/clickhousedb.py +270 -63
- agno/vectordb/couchbase/couchbase.py +309 -29
- agno/vectordb/lancedb/lance_db.py +360 -21
- agno/vectordb/langchaindb/__init__.py +5 -0
- agno/vectordb/langchaindb/langchaindb.py +145 -0
- agno/vectordb/lightrag/__init__.py +5 -0
- agno/vectordb/lightrag/lightrag.py +374 -0
- agno/vectordb/llamaindex/llamaindexdb.py +127 -0
- agno/vectordb/milvus/milvus.py +242 -32
- agno/vectordb/mongodb/mongodb.py +200 -24
- agno/vectordb/pgvector/pgvector.py +319 -37
- agno/vectordb/pineconedb/pineconedb.py +221 -27
- agno/vectordb/qdrant/qdrant.py +334 -14
- agno/vectordb/singlestore/singlestore.py +286 -29
- agno/vectordb/surrealdb/surrealdb.py +187 -7
- agno/vectordb/upstashdb/upstashdb.py +342 -26
- agno/vectordb/weaviate/weaviate.py +227 -165
- agno/workflow/__init__.py +17 -13
- agno/workflow/{v2/condition.py → condition.py} +135 -32
- agno/workflow/{v2/loop.py → loop.py} +115 -28
- agno/workflow/{v2/parallel.py → parallel.py} +138 -108
- agno/workflow/{v2/router.py → router.py} +133 -32
- agno/workflow/{v2/step.py → step.py} +200 -42
- agno/workflow/{v2/steps.py → steps.py} +147 -66
- agno/workflow/types.py +482 -0
- agno/workflow/workflow.py +2394 -696
- agno-2.0.0a1.dist-info/METADATA +355 -0
- agno-2.0.0a1.dist-info/RECORD +514 -0
- agno/agent/metrics.py +0 -107
- agno/api/app.py +0 -35
- agno/api/playground.py +0 -92
- agno/api/schemas/app.py +0 -12
- agno/api/schemas/playground.py +0 -22
- agno/api/schemas/user.py +0 -35
- agno/api/schemas/workspace.py +0 -46
- agno/api/user.py +0 -160
- agno/api/workflows.py +0 -33
- agno/api/workspace.py +0 -175
- agno/app/agui/__init__.py +0 -3
- agno/app/agui/app.py +0 -17
- agno/app/agui/sync_router.py +0 -120
- agno/app/base.py +0 -186
- agno/app/discord/__init__.py +0 -3
- agno/app/fastapi/__init__.py +0 -3
- agno/app/fastapi/app.py +0 -107
- agno/app/fastapi/async_router.py +0 -457
- agno/app/fastapi/sync_router.py +0 -448
- agno/app/playground/app.py +0 -228
- agno/app/playground/async_router.py +0 -1050
- agno/app/playground/deploy.py +0 -249
- agno/app/playground/operator.py +0 -183
- agno/app/playground/schemas.py +0 -220
- agno/app/playground/serve.py +0 -55
- agno/app/playground/sync_router.py +0 -1042
- agno/app/playground/utils.py +0 -46
- agno/app/settings.py +0 -15
- agno/app/slack/__init__.py +0 -3
- agno/app/slack/app.py +0 -19
- agno/app/slack/sync_router.py +0 -92
- agno/app/utils.py +0 -54
- agno/app/whatsapp/__init__.py +0 -3
- agno/app/whatsapp/app.py +0 -15
- agno/app/whatsapp/sync_router.py +0 -197
- agno/cli/auth_server.py +0 -249
- agno/cli/config.py +0 -274
- agno/cli/console.py +0 -88
- agno/cli/credentials.py +0 -23
- agno/cli/entrypoint.py +0 -571
- agno/cli/operator.py +0 -357
- agno/cli/settings.py +0 -96
- agno/cli/ws/ws_cli.py +0 -817
- agno/constants.py +0 -13
- agno/document/__init__.py +0 -5
- agno/document/chunking/semantic.py +0 -45
- agno/document/chunking/strategy.py +0 -31
- agno/document/reader/__init__.py +0 -5
- agno/document/reader/base.py +0 -47
- agno/document/reader/docx_reader.py +0 -60
- agno/document/reader/gcs/pdf_reader.py +0 -44
- agno/document/reader/s3/pdf_reader.py +0 -59
- agno/document/reader/s3/text_reader.py +0 -63
- agno/document/reader/url_reader.py +0 -59
- agno/document/reader/youtube_reader.py +0 -58
- agno/embedder/__init__.py +0 -5
- agno/embedder/langdb.py +0 -80
- agno/embedder/mistral.py +0 -82
- agno/embedder/openai.py +0 -78
- agno/file/__init__.py +0 -5
- agno/file/file.py +0 -16
- agno/file/local/csv.py +0 -32
- agno/file/local/txt.py +0 -19
- agno/infra/app.py +0 -240
- agno/infra/base.py +0 -144
- agno/infra/context.py +0 -20
- agno/infra/db_app.py +0 -52
- agno/infra/resource.py +0 -205
- agno/infra/resources.py +0 -55
- agno/knowledge/agent.py +0 -702
- agno/knowledge/arxiv.py +0 -33
- agno/knowledge/combined.py +0 -36
- agno/knowledge/csv.py +0 -144
- agno/knowledge/csv_url.py +0 -124
- agno/knowledge/document.py +0 -223
- agno/knowledge/docx.py +0 -137
- agno/knowledge/firecrawl.py +0 -34
- agno/knowledge/gcs/__init__.py +0 -0
- agno/knowledge/gcs/base.py +0 -39
- agno/knowledge/gcs/pdf.py +0 -125
- agno/knowledge/json.py +0 -137
- agno/knowledge/langchain.py +0 -71
- agno/knowledge/light_rag.py +0 -273
- agno/knowledge/llamaindex.py +0 -66
- agno/knowledge/markdown.py +0 -154
- agno/knowledge/pdf.py +0 -164
- agno/knowledge/pdf_bytes.py +0 -42
- agno/knowledge/pdf_url.py +0 -148
- agno/knowledge/s3/__init__.py +0 -0
- agno/knowledge/s3/base.py +0 -64
- agno/knowledge/s3/pdf.py +0 -33
- agno/knowledge/s3/text.py +0 -34
- agno/knowledge/text.py +0 -141
- agno/knowledge/url.py +0 -46
- agno/knowledge/website.py +0 -179
- agno/knowledge/wikipedia.py +0 -32
- agno/knowledge/youtube.py +0 -35
- agno/memory/agent.py +0 -423
- agno/memory/classifier.py +0 -104
- agno/memory/db/__init__.py +0 -5
- agno/memory/db/base.py +0 -42
- agno/memory/db/mongodb.py +0 -189
- agno/memory/db/postgres.py +0 -203
- agno/memory/db/sqlite.py +0 -193
- agno/memory/memory.py +0 -22
- agno/memory/row.py +0 -36
- agno/memory/summarizer.py +0 -201
- agno/memory/summary.py +0 -19
- agno/memory/team.py +0 -415
- agno/memory/v2/__init__.py +0 -2
- agno/memory/v2/db/__init__.py +0 -1
- agno/memory/v2/db/base.py +0 -42
- agno/memory/v2/db/firestore.py +0 -339
- agno/memory/v2/db/mongodb.py +0 -196
- agno/memory/v2/db/postgres.py +0 -214
- agno/memory/v2/db/redis.py +0 -187
- agno/memory/v2/db/schema.py +0 -54
- agno/memory/v2/db/sqlite.py +0 -209
- agno/memory/v2/manager.py +0 -437
- agno/memory/v2/memory.py +0 -1097
- agno/memory/v2/schema.py +0 -55
- agno/memory/v2/summarizer.py +0 -215
- agno/memory/workflow.py +0 -38
- agno/models/ollama/tools.py +0 -430
- agno/models/qwen/__init__.py +0 -5
- agno/playground/__init__.py +0 -10
- agno/playground/deploy.py +0 -3
- agno/playground/playground.py +0 -3
- agno/playground/serve.py +0 -3
- agno/playground/settings.py +0 -3
- agno/reranker/__init__.py +0 -0
- agno/run/v2/__init__.py +0 -0
- agno/run/v2/workflow.py +0 -567
- agno/storage/__init__.py +0 -0
- agno/storage/agent/__init__.py +0 -0
- agno/storage/agent/dynamodb.py +0 -1
- agno/storage/agent/json.py +0 -1
- agno/storage/agent/mongodb.py +0 -1
- agno/storage/agent/postgres.py +0 -1
- agno/storage/agent/singlestore.py +0 -1
- agno/storage/agent/sqlite.py +0 -1
- agno/storage/agent/yaml.py +0 -1
- agno/storage/base.py +0 -60
- agno/storage/dynamodb.py +0 -673
- agno/storage/firestore.py +0 -297
- agno/storage/gcs_json.py +0 -261
- agno/storage/in_memory.py +0 -234
- agno/storage/json.py +0 -237
- agno/storage/mongodb.py +0 -328
- agno/storage/mysql.py +0 -685
- agno/storage/postgres.py +0 -682
- agno/storage/redis.py +0 -336
- agno/storage/session/__init__.py +0 -16
- agno/storage/session/agent.py +0 -64
- agno/storage/session/team.py +0 -63
- agno/storage/session/v2/__init__.py +0 -5
- agno/storage/session/workflow.py +0 -61
- agno/storage/singlestore.py +0 -606
- agno/storage/sqlite.py +0 -646
- agno/storage/workflow/__init__.py +0 -0
- agno/storage/workflow/mongodb.py +0 -1
- agno/storage/workflow/postgres.py +0 -1
- agno/storage/workflow/sqlite.py +0 -1
- agno/storage/yaml.py +0 -241
- agno/tools/thinking.py +0 -73
- agno/utils/defaults.py +0 -57
- agno/utils/filesystem.py +0 -39
- agno/utils/git.py +0 -52
- agno/utils/json_io.py +0 -30
- agno/utils/load_env.py +0 -19
- agno/utils/py_io.py +0 -19
- agno/utils/pyproject.py +0 -18
- agno/utils/resource_filter.py +0 -31
- agno/workflow/v2/__init__.py +0 -21
- agno/workflow/v2/types.py +0 -357
- agno/workflow/v2/workflow.py +0 -3312
- agno/workspace/__init__.py +0 -0
- agno/workspace/config.py +0 -325
- agno/workspace/enums.py +0 -6
- agno/workspace/helpers.py +0 -52
- agno/workspace/operator.py +0 -757
- agno/workspace/settings.py +0 -158
- agno-1.8.1.dist-info/METADATA +0 -982
- agno-1.8.1.dist-info/RECORD +0 -566
- agno-1.8.1.dist-info/entry_points.txt +0 -3
- /agno/{app → db/migrations}/__init__.py +0 -0
- /agno/{app/playground/__init__.py → db/schemas/metrics.py} +0 -0
- /agno/{cli → integrations}/__init__.py +0 -0
- /agno/{cli/ws → knowledge/chunking}/__init__.py +0 -0
- /agno/{document/chunking → knowledge/remote_content}/__init__.py +0 -0
- /agno/{document/reader/gcs → knowledge/reranker}/__init__.py +0 -0
- /agno/{document/reader/s3 → os/interfaces}/__init__.py +0 -0
- /agno/{app → os/interfaces}/slack/security.py +0 -0
- /agno/{app → os/interfaces}/whatsapp/security.py +0 -0
- /agno/{file/local → utils/print_response}/__init__.py +0 -0
- /agno/{infra → vectordb/llamaindex}/__init__.py +0 -0
- {agno-1.8.1.dist-info → agno-2.0.0a1.dist-info}/WHEEL +0 -0
- {agno-1.8.1.dist-info → agno-2.0.0a1.dist-info}/licenses/LICENSE +0 -0
- {agno-1.8.1.dist-info → agno-2.0.0a1.dist-info}/top_level.txt +0 -0
agno/workflow/v2/workflow.py
DELETED
|
@@ -1,3312 +0,0 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
from dataclasses import dataclass
|
|
3
|
-
from datetime import datetime
|
|
4
|
-
from os import getenv
|
|
5
|
-
from typing import (
|
|
6
|
-
Any,
|
|
7
|
-
AsyncIterator,
|
|
8
|
-
Awaitable,
|
|
9
|
-
Callable,
|
|
10
|
-
Dict,
|
|
11
|
-
Iterator,
|
|
12
|
-
List,
|
|
13
|
-
Literal,
|
|
14
|
-
Optional,
|
|
15
|
-
Union,
|
|
16
|
-
overload,
|
|
17
|
-
)
|
|
18
|
-
from uuid import uuid4
|
|
19
|
-
|
|
20
|
-
from pydantic import BaseModel
|
|
21
|
-
|
|
22
|
-
from agno.agent.agent import Agent
|
|
23
|
-
from agno.media import Audio, AudioArtifact, File, Image, ImageArtifact, Video, VideoArtifact
|
|
24
|
-
from agno.run.base import RunStatus
|
|
25
|
-
from agno.run.v2.workflow import (
|
|
26
|
-
ConditionExecutionCompletedEvent,
|
|
27
|
-
ConditionExecutionStartedEvent,
|
|
28
|
-
LoopExecutionCompletedEvent,
|
|
29
|
-
LoopExecutionStartedEvent,
|
|
30
|
-
LoopIterationCompletedEvent,
|
|
31
|
-
LoopIterationStartedEvent,
|
|
32
|
-
ParallelExecutionCompletedEvent,
|
|
33
|
-
ParallelExecutionStartedEvent,
|
|
34
|
-
RouterExecutionCompletedEvent,
|
|
35
|
-
RouterExecutionStartedEvent,
|
|
36
|
-
StepCompletedEvent,
|
|
37
|
-
StepOutputEvent,
|
|
38
|
-
StepsExecutionCompletedEvent,
|
|
39
|
-
StepsExecutionStartedEvent,
|
|
40
|
-
StepStartedEvent,
|
|
41
|
-
WorkflowCompletedEvent,
|
|
42
|
-
WorkflowRunEvent,
|
|
43
|
-
WorkflowRunResponse,
|
|
44
|
-
WorkflowRunResponseEvent,
|
|
45
|
-
WorkflowStartedEvent,
|
|
46
|
-
)
|
|
47
|
-
from agno.storage.base import Storage
|
|
48
|
-
from agno.storage.session.v2.workflow import WorkflowSession as WorkflowSessionV2
|
|
49
|
-
from agno.team.team import Team
|
|
50
|
-
from agno.utils.log import (
|
|
51
|
-
log_debug,
|
|
52
|
-
logger,
|
|
53
|
-
set_log_level_to_debug,
|
|
54
|
-
set_log_level_to_info,
|
|
55
|
-
use_workflow_logger,
|
|
56
|
-
)
|
|
57
|
-
from agno.workflow.v2.condition import Condition
|
|
58
|
-
from agno.workflow.v2.loop import Loop
|
|
59
|
-
from agno.workflow.v2.parallel import Parallel
|
|
60
|
-
from agno.workflow.v2.router import Router
|
|
61
|
-
from agno.workflow.v2.step import Step
|
|
62
|
-
from agno.workflow.v2.steps import Steps
|
|
63
|
-
from agno.workflow.v2.types import (
|
|
64
|
-
StepInput,
|
|
65
|
-
StepMetrics,
|
|
66
|
-
StepOutput,
|
|
67
|
-
WorkflowExecutionInput,
|
|
68
|
-
WorkflowMetrics,
|
|
69
|
-
)
|
|
70
|
-
|
|
71
|
-
WorkflowSteps = Union[
|
|
72
|
-
Callable[
|
|
73
|
-
["Workflow", WorkflowExecutionInput],
|
|
74
|
-
Union[StepOutput, Awaitable[StepOutput], Iterator[StepOutput], AsyncIterator[StepOutput], Any],
|
|
75
|
-
],
|
|
76
|
-
Steps,
|
|
77
|
-
List[
|
|
78
|
-
Union[
|
|
79
|
-
Callable[
|
|
80
|
-
[StepInput], Union[StepOutput, Awaitable[StepOutput], Iterator[StepOutput], AsyncIterator[StepOutput]]
|
|
81
|
-
],
|
|
82
|
-
Step,
|
|
83
|
-
Steps,
|
|
84
|
-
Loop,
|
|
85
|
-
Parallel,
|
|
86
|
-
Condition,
|
|
87
|
-
Router,
|
|
88
|
-
]
|
|
89
|
-
],
|
|
90
|
-
]
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
@dataclass
|
|
94
|
-
class Workflow:
|
|
95
|
-
"""Pipeline-based workflow execution"""
|
|
96
|
-
|
|
97
|
-
# Workflow identification - make name optional with default
|
|
98
|
-
name: Optional[str] = None
|
|
99
|
-
workflow_id: Optional[str] = None
|
|
100
|
-
description: Optional[str] = None
|
|
101
|
-
|
|
102
|
-
# Workflow configuration
|
|
103
|
-
steps: Optional[WorkflowSteps] = None
|
|
104
|
-
|
|
105
|
-
storage: Optional[Storage] = None
|
|
106
|
-
|
|
107
|
-
# Session management
|
|
108
|
-
session_id: Optional[str] = None
|
|
109
|
-
session_name: Optional[str] = None
|
|
110
|
-
user_id: Optional[str] = None
|
|
111
|
-
workflow_session_id: Optional[str] = None
|
|
112
|
-
workflow_session_state: Optional[Dict[str, Any]] = None
|
|
113
|
-
|
|
114
|
-
# Runtime state
|
|
115
|
-
run_id: Optional[str] = None
|
|
116
|
-
run_response: Optional[WorkflowRunResponse] = None
|
|
117
|
-
|
|
118
|
-
# Workflow session for storage
|
|
119
|
-
workflow_session: Optional[WorkflowSessionV2] = None
|
|
120
|
-
debug_mode: Optional[bool] = False
|
|
121
|
-
|
|
122
|
-
# --- Workflow Streaming ---
|
|
123
|
-
# Stream the response from the Workflow
|
|
124
|
-
stream: Optional[bool] = None
|
|
125
|
-
# Stream the intermediate steps from the Workflow
|
|
126
|
-
stream_intermediate_steps: bool = False
|
|
127
|
-
|
|
128
|
-
store_events: bool = False
|
|
129
|
-
events_to_skip: Optional[List[WorkflowRunEvent]] = None
|
|
130
|
-
|
|
131
|
-
def __init__(
|
|
132
|
-
self,
|
|
133
|
-
workflow_id: Optional[str] = None,
|
|
134
|
-
name: Optional[str] = None,
|
|
135
|
-
description: Optional[str] = None,
|
|
136
|
-
storage: Optional[Storage] = None,
|
|
137
|
-
steps: Optional[WorkflowSteps] = None,
|
|
138
|
-
session_id: Optional[str] = None,
|
|
139
|
-
session_name: Optional[str] = None,
|
|
140
|
-
workflow_session_state: Optional[Dict[str, Any]] = None,
|
|
141
|
-
user_id: Optional[str] = None,
|
|
142
|
-
debug_mode: Optional[bool] = False,
|
|
143
|
-
stream: Optional[bool] = None,
|
|
144
|
-
stream_intermediate_steps: bool = False,
|
|
145
|
-
store_events: bool = False,
|
|
146
|
-
events_to_skip: Optional[List[WorkflowRunEvent]] = None,
|
|
147
|
-
):
|
|
148
|
-
self.workflow_id = workflow_id
|
|
149
|
-
self.name = name
|
|
150
|
-
self.description = description
|
|
151
|
-
self.storage = storage
|
|
152
|
-
self.steps = steps
|
|
153
|
-
self.session_id = session_id
|
|
154
|
-
self.session_name = session_name
|
|
155
|
-
self.workflow_session_state = workflow_session_state
|
|
156
|
-
self.user_id = user_id
|
|
157
|
-
self.debug_mode = debug_mode
|
|
158
|
-
self.store_events = store_events
|
|
159
|
-
self.events_to_skip = events_to_skip or []
|
|
160
|
-
self.stream = stream
|
|
161
|
-
self.stream_intermediate_steps = stream_intermediate_steps
|
|
162
|
-
|
|
163
|
-
@property
|
|
164
|
-
def run_parameters(self) -> Dict[str, Any]:
|
|
165
|
-
"""Get the run parameters for the workflow"""
|
|
166
|
-
|
|
167
|
-
if self.steps is None:
|
|
168
|
-
return {}
|
|
169
|
-
|
|
170
|
-
parameters = {}
|
|
171
|
-
|
|
172
|
-
if self.steps and callable(self.steps):
|
|
173
|
-
from inspect import Parameter, signature
|
|
174
|
-
|
|
175
|
-
sig = signature(self.steps) # type: ignore
|
|
176
|
-
|
|
177
|
-
for param_name, param in sig.parameters.items():
|
|
178
|
-
if param_name not in ["workflow", "execution_input", "self"]:
|
|
179
|
-
parameters[param_name] = {
|
|
180
|
-
"name": param_name,
|
|
181
|
-
"default": param.default.default
|
|
182
|
-
if hasattr(param.default, "__class__") and param.default.__class__.__name__ == "FieldInfo"
|
|
183
|
-
else (param.default if param.default is not Parameter.empty else None),
|
|
184
|
-
"annotation": (
|
|
185
|
-
param.annotation.__name__
|
|
186
|
-
if hasattr(param.annotation, "__name__")
|
|
187
|
-
else (
|
|
188
|
-
str(param.annotation).replace("typing.Optional[", "").replace("]", "")
|
|
189
|
-
if "typing.Optional" in str(param.annotation)
|
|
190
|
-
else str(param.annotation)
|
|
191
|
-
)
|
|
192
|
-
)
|
|
193
|
-
if param.annotation is not Parameter.empty
|
|
194
|
-
else None,
|
|
195
|
-
"required": param.default is Parameter.empty,
|
|
196
|
-
}
|
|
197
|
-
else:
|
|
198
|
-
parameters = {
|
|
199
|
-
"message": {
|
|
200
|
-
"name": "message",
|
|
201
|
-
"default": None,
|
|
202
|
-
"annotation": "str",
|
|
203
|
-
"required": True,
|
|
204
|
-
},
|
|
205
|
-
}
|
|
206
|
-
|
|
207
|
-
return parameters
|
|
208
|
-
|
|
209
|
-
def initialize_workflow(self):
|
|
210
|
-
if self.workflow_id is None:
|
|
211
|
-
self.workflow_id = str(uuid4())
|
|
212
|
-
log_debug(f"Generated new workflow_id: {self.workflow_id}")
|
|
213
|
-
|
|
214
|
-
if self.session_id is None:
|
|
215
|
-
self.session_id = str(uuid4())
|
|
216
|
-
log_debug(f"Generated new session_id: {self.session_id}")
|
|
217
|
-
|
|
218
|
-
# Set storage mode to workflow_v2
|
|
219
|
-
if self.storage is not None:
|
|
220
|
-
self.storage.mode = "workflow_v2"
|
|
221
|
-
|
|
222
|
-
self._update_workflow_session_state()
|
|
223
|
-
|
|
224
|
-
def rename_session(self, session_name: str):
|
|
225
|
-
"""Rename the current session and save to storage"""
|
|
226
|
-
|
|
227
|
-
# -*- Read from storage
|
|
228
|
-
self.read_from_storage()
|
|
229
|
-
# -*- Rename session
|
|
230
|
-
self.session_name = session_name
|
|
231
|
-
# -*- Save to storage
|
|
232
|
-
self.write_to_storage()
|
|
233
|
-
|
|
234
|
-
def delete_session(self, session_id: str):
|
|
235
|
-
"""Delete the current session and save to storage"""
|
|
236
|
-
if self.storage is None:
|
|
237
|
-
return
|
|
238
|
-
# -*- Delete session
|
|
239
|
-
self.storage.delete_session(session_id=session_id)
|
|
240
|
-
|
|
241
|
-
def _handle_event(
|
|
242
|
-
self, event: "WorkflowRunResponseEvent", workflow_run_response: WorkflowRunResponse
|
|
243
|
-
) -> "WorkflowRunResponseEvent":
|
|
244
|
-
"""Handle workflow events for storage - similar to Team._handle_event"""
|
|
245
|
-
if self.store_events:
|
|
246
|
-
# Check if this event type should be skipped
|
|
247
|
-
if self.events_to_skip:
|
|
248
|
-
event_type = event.event
|
|
249
|
-
for skip_event in self.events_to_skip:
|
|
250
|
-
if isinstance(skip_event, str):
|
|
251
|
-
if event_type == skip_event:
|
|
252
|
-
return event
|
|
253
|
-
else:
|
|
254
|
-
# It's a WorkflowRunEvent enum
|
|
255
|
-
if event_type == skip_event.value:
|
|
256
|
-
return event
|
|
257
|
-
|
|
258
|
-
# Store the event
|
|
259
|
-
if workflow_run_response.events is None:
|
|
260
|
-
workflow_run_response.events = []
|
|
261
|
-
|
|
262
|
-
workflow_run_response.events.append(event)
|
|
263
|
-
|
|
264
|
-
return event
|
|
265
|
-
|
|
266
|
-
def _transform_step_output_to_event(
|
|
267
|
-
self, step_output: StepOutput, workflow_run_response: WorkflowRunResponse, step_index: Optional[int] = None
|
|
268
|
-
) -> StepOutputEvent:
|
|
269
|
-
"""Transform a StepOutput object into a StepOutputEvent for consistent streaming interface"""
|
|
270
|
-
return StepOutputEvent(
|
|
271
|
-
step_output=step_output,
|
|
272
|
-
run_id=workflow_run_response.run_id or "",
|
|
273
|
-
workflow_name=workflow_run_response.workflow_name,
|
|
274
|
-
workflow_id=workflow_run_response.workflow_id,
|
|
275
|
-
session_id=workflow_run_response.session_id,
|
|
276
|
-
step_name=step_output.step_name,
|
|
277
|
-
step_index=step_index,
|
|
278
|
-
)
|
|
279
|
-
|
|
280
|
-
def _set_debug(self) -> None:
|
|
281
|
-
"""Set debug mode and configure logging"""
|
|
282
|
-
if self.debug_mode or getenv("AGNO_DEBUG", "false").lower() == "true":
|
|
283
|
-
use_workflow_logger()
|
|
284
|
-
|
|
285
|
-
self.debug_mode = True
|
|
286
|
-
set_log_level_to_debug(source_type="workflow")
|
|
287
|
-
|
|
288
|
-
# Propagate to steps - only if steps is iterable (not callable)
|
|
289
|
-
if self.steps and not callable(self.steps):
|
|
290
|
-
if isinstance(self.steps, Steps):
|
|
291
|
-
steps_to_iterate = self.steps.steps
|
|
292
|
-
else:
|
|
293
|
-
steps_to_iterate = self.steps
|
|
294
|
-
|
|
295
|
-
for step in steps_to_iterate:
|
|
296
|
-
self._propagate_debug_to_step(step)
|
|
297
|
-
else:
|
|
298
|
-
set_log_level_to_info(source_type="workflow")
|
|
299
|
-
|
|
300
|
-
def _propagate_debug_to_step(self, step):
|
|
301
|
-
"""Recursively propagate debug mode to steps and nested primitives"""
|
|
302
|
-
# Handle direct Step objects
|
|
303
|
-
if hasattr(step, "active_executor") and step.active_executor:
|
|
304
|
-
executor = step.active_executor
|
|
305
|
-
if hasattr(executor, "debug_mode"):
|
|
306
|
-
executor.debug_mode = True
|
|
307
|
-
|
|
308
|
-
# If it's a team, propagate to all members
|
|
309
|
-
if hasattr(executor, "members"):
|
|
310
|
-
for member in executor.members:
|
|
311
|
-
if hasattr(member, "debug_mode"):
|
|
312
|
-
member.debug_mode = True
|
|
313
|
-
|
|
314
|
-
# Handle nested primitives - check both 'steps' and 'choices' attributes
|
|
315
|
-
for attr_name in ["steps", "choices"]:
|
|
316
|
-
if hasattr(step, attr_name):
|
|
317
|
-
attr_value = getattr(step, attr_name)
|
|
318
|
-
if attr_value and isinstance(attr_value, list):
|
|
319
|
-
for nested_step in attr_value:
|
|
320
|
-
self._propagate_debug_to_step(nested_step)
|
|
321
|
-
|
|
322
|
-
def _create_step_input(
|
|
323
|
-
self,
|
|
324
|
-
execution_input: WorkflowExecutionInput,
|
|
325
|
-
previous_step_outputs: Optional[Dict[str, StepOutput]] = None,
|
|
326
|
-
shared_images: Optional[List[ImageArtifact]] = None,
|
|
327
|
-
shared_videos: Optional[List[VideoArtifact]] = None,
|
|
328
|
-
shared_audio: Optional[List[AudioArtifact]] = None,
|
|
329
|
-
shared_files: Optional[List[File]] = None,
|
|
330
|
-
) -> StepInput:
|
|
331
|
-
"""Helper method to create StepInput with enhanced data flow support"""
|
|
332
|
-
|
|
333
|
-
previous_step_content = None
|
|
334
|
-
if previous_step_outputs:
|
|
335
|
-
last_output = list(previous_step_outputs.values())[-1]
|
|
336
|
-
previous_step_content = last_output.content if last_output else None
|
|
337
|
-
log_debug(f"Using previous step content from: {list(previous_step_outputs.keys())[-1]}")
|
|
338
|
-
|
|
339
|
-
return StepInput(
|
|
340
|
-
message=execution_input.message,
|
|
341
|
-
previous_step_content=previous_step_content,
|
|
342
|
-
previous_step_outputs=previous_step_outputs,
|
|
343
|
-
additional_data=execution_input.additional_data,
|
|
344
|
-
images=shared_images or [],
|
|
345
|
-
videos=shared_videos or [],
|
|
346
|
-
audio=shared_audio or [],
|
|
347
|
-
files=shared_files or [],
|
|
348
|
-
)
|
|
349
|
-
|
|
350
|
-
def _get_step_count(self) -> int:
|
|
351
|
-
"""Get the number of steps in the workflow"""
|
|
352
|
-
if self.steps is None:
|
|
353
|
-
return 0
|
|
354
|
-
elif callable(self.steps):
|
|
355
|
-
return 1 # Callable function counts as 1 step
|
|
356
|
-
else:
|
|
357
|
-
# Handle Steps wrapper
|
|
358
|
-
if isinstance(self.steps, Steps):
|
|
359
|
-
return len(self.steps.steps)
|
|
360
|
-
else:
|
|
361
|
-
return len(self.steps)
|
|
362
|
-
|
|
363
|
-
def _convert_dict_to_step_metrics(self, step_name: str, metrics_dict: Dict[str, Any]) -> StepMetrics:
|
|
364
|
-
"""Convert dictionary metrics to StepMetrics object"""
|
|
365
|
-
return StepMetrics.from_dict(
|
|
366
|
-
{
|
|
367
|
-
"step_name": step_name,
|
|
368
|
-
"executor_type": metrics_dict.get("executor_type", "unknown"),
|
|
369
|
-
"executor_name": metrics_dict.get("executor_name", "unknown"),
|
|
370
|
-
"metrics": metrics_dict.get("metrics"),
|
|
371
|
-
"parallel_steps": metrics_dict.get("parallel_steps"),
|
|
372
|
-
}
|
|
373
|
-
)
|
|
374
|
-
|
|
375
|
-
def _aggregate_workflow_metrics(self, step_responses: List[Union[StepOutput, List[StepOutput]]]) -> WorkflowMetrics:
|
|
376
|
-
"""Aggregate metrics from all step responses into structured workflow metrics"""
|
|
377
|
-
steps_dict = {}
|
|
378
|
-
total_steps = 0
|
|
379
|
-
|
|
380
|
-
def process_step_output(step_output: StepOutput):
|
|
381
|
-
"""Process a single step output for metrics"""
|
|
382
|
-
nonlocal total_steps
|
|
383
|
-
total_steps += 1
|
|
384
|
-
|
|
385
|
-
# Add step-specific metrics
|
|
386
|
-
if step_output.step_name and step_output.metrics:
|
|
387
|
-
step_metrics = self._convert_dict_to_step_metrics(step_output.step_name, step_output.metrics)
|
|
388
|
-
steps_dict[step_output.step_name] = step_metrics
|
|
389
|
-
|
|
390
|
-
# Process all step responses
|
|
391
|
-
for step_response in step_responses:
|
|
392
|
-
if isinstance(step_response, list):
|
|
393
|
-
# Handle List[StepOutput] from workflow components
|
|
394
|
-
for sub_step_output in step_response:
|
|
395
|
-
process_step_output(sub_step_output)
|
|
396
|
-
else:
|
|
397
|
-
# Handle single StepOutput
|
|
398
|
-
process_step_output(step_response)
|
|
399
|
-
|
|
400
|
-
return WorkflowMetrics(
|
|
401
|
-
total_steps=total_steps,
|
|
402
|
-
steps=steps_dict,
|
|
403
|
-
)
|
|
404
|
-
|
|
405
|
-
def _call_custom_function(
|
|
406
|
-
self, func: Callable, workflow: "Workflow", execution_input: WorkflowExecutionInput, **kwargs: Any
|
|
407
|
-
) -> Any:
|
|
408
|
-
"""Call custom function with only the parameters it expects"""
|
|
409
|
-
from inspect import signature
|
|
410
|
-
|
|
411
|
-
sig = signature(func)
|
|
412
|
-
|
|
413
|
-
# Build arguments based on what the function actually accepts
|
|
414
|
-
call_kwargs = {}
|
|
415
|
-
|
|
416
|
-
# Only add workflow and execution_input if the function expects them
|
|
417
|
-
if "workflow" in sig.parameters: # type: ignore
|
|
418
|
-
call_kwargs["workflow"] = self
|
|
419
|
-
if "execution_input" in sig.parameters:
|
|
420
|
-
call_kwargs["execution_input"] = execution_input # type: ignore
|
|
421
|
-
|
|
422
|
-
# Add any other kwargs that the function expects
|
|
423
|
-
for param_name in kwargs:
|
|
424
|
-
if param_name in sig.parameters: # type: ignore
|
|
425
|
-
call_kwargs[param_name] = kwargs[param_name]
|
|
426
|
-
|
|
427
|
-
# If function has **kwargs parameter, pass all remaining kwargs
|
|
428
|
-
for param in sig.parameters.values(): # type: ignore
|
|
429
|
-
if param.kind == param.VAR_KEYWORD:
|
|
430
|
-
call_kwargs.update(kwargs)
|
|
431
|
-
break
|
|
432
|
-
|
|
433
|
-
try:
|
|
434
|
-
return func(**call_kwargs)
|
|
435
|
-
except TypeError as e:
|
|
436
|
-
# If signature inspection fails, fall back to original method
|
|
437
|
-
logger.warning(
|
|
438
|
-
f"Async function signature inspection failed: {e}. Falling back to original calling convention."
|
|
439
|
-
)
|
|
440
|
-
return func(workflow, execution_input, **kwargs)
|
|
441
|
-
|
|
442
|
-
def _execute(
|
|
443
|
-
self, execution_input: WorkflowExecutionInput, workflow_run_response: WorkflowRunResponse, **kwargs: Any
|
|
444
|
-
) -> WorkflowRunResponse:
|
|
445
|
-
"""Execute a specific pipeline by name synchronously"""
|
|
446
|
-
from inspect import isasyncgenfunction, iscoroutinefunction, isgeneratorfunction
|
|
447
|
-
|
|
448
|
-
workflow_run_response.status = RunStatus.running
|
|
449
|
-
|
|
450
|
-
if callable(self.steps):
|
|
451
|
-
if iscoroutinefunction(self.steps) or isasyncgenfunction(self.steps):
|
|
452
|
-
raise ValueError("Cannot use async function with synchronous execution")
|
|
453
|
-
elif isgeneratorfunction(self.steps):
|
|
454
|
-
content = ""
|
|
455
|
-
for chunk in self.steps(self, execution_input, **kwargs):
|
|
456
|
-
if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
|
|
457
|
-
content += chunk.content
|
|
458
|
-
else:
|
|
459
|
-
content += str(chunk)
|
|
460
|
-
workflow_run_response.content = content
|
|
461
|
-
else:
|
|
462
|
-
# Execute the workflow with the custom executor
|
|
463
|
-
workflow_run_response.content = self._call_custom_function(self.steps, self, execution_input, **kwargs) # type: ignore[arg-type]
|
|
464
|
-
|
|
465
|
-
workflow_run_response.status = RunStatus.completed
|
|
466
|
-
else:
|
|
467
|
-
try:
|
|
468
|
-
# Track outputs from each step for enhanced data flow
|
|
469
|
-
collected_step_outputs: List[Union[StepOutput, List[StepOutput]]] = []
|
|
470
|
-
previous_step_outputs: Dict[str, StepOutput] = {}
|
|
471
|
-
|
|
472
|
-
shared_images: List[ImageArtifact] = execution_input.images or []
|
|
473
|
-
output_images: List[ImageArtifact] = (execution_input.images or []).copy() # Start with input images
|
|
474
|
-
shared_videos: List[VideoArtifact] = execution_input.videos or []
|
|
475
|
-
output_videos: List[VideoArtifact] = (execution_input.videos or []).copy() # Start with input videos
|
|
476
|
-
shared_audio: List[AudioArtifact] = execution_input.audio or []
|
|
477
|
-
output_audio: List[AudioArtifact] = (execution_input.audio or []).copy() # Start with input audio
|
|
478
|
-
shared_files: List[File] = execution_input.files or []
|
|
479
|
-
output_files: List[File] = (execution_input.files or []).copy() # Start with input files
|
|
480
|
-
|
|
481
|
-
for i, step in enumerate(self.steps): # type: ignore[arg-type]
|
|
482
|
-
step_name = getattr(step, "name", f"step_{i + 1}")
|
|
483
|
-
log_debug(f"Executing step {i + 1}/{self._get_step_count()}: {step_name}")
|
|
484
|
-
|
|
485
|
-
# Create enhanced StepInput
|
|
486
|
-
step_input = self._create_step_input(
|
|
487
|
-
execution_input=execution_input,
|
|
488
|
-
previous_step_outputs=previous_step_outputs,
|
|
489
|
-
shared_images=shared_images,
|
|
490
|
-
shared_videos=shared_videos,
|
|
491
|
-
shared_audio=shared_audio,
|
|
492
|
-
shared_files=shared_files,
|
|
493
|
-
)
|
|
494
|
-
|
|
495
|
-
step_output = step.execute(step_input, session_id=self.session_id, user_id=self.user_id) # type: ignore[union-attr]
|
|
496
|
-
|
|
497
|
-
# Update the workflow-level previous_step_outputs dictionary
|
|
498
|
-
if isinstance(step_output, list):
|
|
499
|
-
log_debug(f"Step returned {len(step_output)} outputs")
|
|
500
|
-
# For multiple outputs (from Loop, Condition, etc.), store the last one
|
|
501
|
-
if step_output:
|
|
502
|
-
previous_step_outputs[step_name] = step_output[-1]
|
|
503
|
-
if any(output.stop for output in step_output):
|
|
504
|
-
logger.info(f"Early termination requested by step {step_name}")
|
|
505
|
-
break
|
|
506
|
-
else:
|
|
507
|
-
# Single output
|
|
508
|
-
previous_step_outputs[step_name] = step_output
|
|
509
|
-
if step_output.stop:
|
|
510
|
-
logger.info(f"Early termination requested by step {step_name}")
|
|
511
|
-
break
|
|
512
|
-
|
|
513
|
-
# Update shared media for next step
|
|
514
|
-
if isinstance(step_output, list):
|
|
515
|
-
for output in step_output:
|
|
516
|
-
shared_images.extend(output.images or [])
|
|
517
|
-
shared_videos.extend(output.videos or [])
|
|
518
|
-
shared_audio.extend(output.audio or [])
|
|
519
|
-
shared_files.extend(output.files or [])
|
|
520
|
-
output_images.extend(output.images or [])
|
|
521
|
-
output_videos.extend(output.videos or [])
|
|
522
|
-
output_audio.extend(output.audio or [])
|
|
523
|
-
output_files.extend(output.files or [])
|
|
524
|
-
else:
|
|
525
|
-
shared_images.extend(step_output.images or [])
|
|
526
|
-
shared_videos.extend(step_output.videos or [])
|
|
527
|
-
shared_audio.extend(step_output.audio or [])
|
|
528
|
-
shared_files.extend(step_output.files or [])
|
|
529
|
-
output_images.extend(step_output.images or [])
|
|
530
|
-
output_videos.extend(step_output.videos or [])
|
|
531
|
-
output_audio.extend(step_output.audio or [])
|
|
532
|
-
output_files.extend(step_output.files or [])
|
|
533
|
-
|
|
534
|
-
collected_step_outputs.append(step_output)
|
|
535
|
-
|
|
536
|
-
self._collect_workflow_session_state_from_agents_and_teams()
|
|
537
|
-
|
|
538
|
-
# Update the workflow_run_response with completion data
|
|
539
|
-
if collected_step_outputs:
|
|
540
|
-
workflow_run_response.workflow_metrics = self._aggregate_workflow_metrics(collected_step_outputs)
|
|
541
|
-
last_output = collected_step_outputs[-1]
|
|
542
|
-
if isinstance(last_output, list) and last_output:
|
|
543
|
-
# If it's a list (from Condition/Loop/etc.), use the last one
|
|
544
|
-
workflow_run_response.content = last_output[-1].content
|
|
545
|
-
elif not isinstance(last_output, list):
|
|
546
|
-
# Single StepOutput
|
|
547
|
-
workflow_run_response.content = last_output.content
|
|
548
|
-
else:
|
|
549
|
-
workflow_run_response.content = "No steps executed"
|
|
550
|
-
|
|
551
|
-
workflow_run_response.step_responses = collected_step_outputs
|
|
552
|
-
workflow_run_response.images = output_images
|
|
553
|
-
workflow_run_response.videos = output_videos
|
|
554
|
-
workflow_run_response.audio = output_audio
|
|
555
|
-
workflow_run_response.status = RunStatus.completed
|
|
556
|
-
|
|
557
|
-
except Exception as e:
|
|
558
|
-
import traceback
|
|
559
|
-
|
|
560
|
-
traceback.print_exc()
|
|
561
|
-
logger.error(f"Workflow execution failed: {e}")
|
|
562
|
-
# Store error response
|
|
563
|
-
workflow_run_response.status = RunStatus.error
|
|
564
|
-
workflow_run_response.content = f"Workflow execution failed: {e}"
|
|
565
|
-
|
|
566
|
-
finally:
|
|
567
|
-
self._save_run_to_storage(workflow_run_response)
|
|
568
|
-
|
|
569
|
-
return workflow_run_response
|
|
570
|
-
|
|
571
|
-
def _execute_stream(
|
|
572
|
-
self,
|
|
573
|
-
execution_input: WorkflowExecutionInput,
|
|
574
|
-
workflow_run_response: WorkflowRunResponse,
|
|
575
|
-
stream_intermediate_steps: bool = False,
|
|
576
|
-
**kwargs: Any,
|
|
577
|
-
) -> Iterator[WorkflowRunResponseEvent]:
|
|
578
|
-
"""Execute a specific pipeline by name with event streaming"""
|
|
579
|
-
from inspect import isasyncgenfunction, iscoroutinefunction, isgeneratorfunction
|
|
580
|
-
|
|
581
|
-
workflow_run_response.status = RunStatus.running
|
|
582
|
-
|
|
583
|
-
workflow_started_event = WorkflowStartedEvent(
|
|
584
|
-
run_id=workflow_run_response.run_id or "",
|
|
585
|
-
workflow_name=workflow_run_response.workflow_name,
|
|
586
|
-
workflow_id=workflow_run_response.workflow_id,
|
|
587
|
-
session_id=workflow_run_response.session_id,
|
|
588
|
-
)
|
|
589
|
-
yield self._handle_event(workflow_started_event, workflow_run_response)
|
|
590
|
-
|
|
591
|
-
if callable(self.steps):
|
|
592
|
-
if iscoroutinefunction(self.steps) or isasyncgenfunction(self.steps):
|
|
593
|
-
raise ValueError("Cannot use async function with synchronous execution")
|
|
594
|
-
elif isgeneratorfunction(self.steps):
|
|
595
|
-
content = ""
|
|
596
|
-
for chunk in self._call_custom_function(self.steps, self, execution_input, **kwargs): # type: ignore[arg-type]
|
|
597
|
-
# Update the run_response with the content from the result
|
|
598
|
-
if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
|
|
599
|
-
content += chunk.content
|
|
600
|
-
yield chunk
|
|
601
|
-
else:
|
|
602
|
-
content += str(chunk)
|
|
603
|
-
workflow_run_response.content = content
|
|
604
|
-
else:
|
|
605
|
-
workflow_run_response.content = self._call_custom_function(self.steps, self, execution_input, **kwargs)
|
|
606
|
-
workflow_run_response.status = RunStatus.completed
|
|
607
|
-
|
|
608
|
-
else:
|
|
609
|
-
try:
|
|
610
|
-
# Track outputs from each step for enhanced data flow
|
|
611
|
-
collected_step_outputs: List[Union[StepOutput, List[StepOutput]]] = []
|
|
612
|
-
previous_step_outputs: Dict[str, StepOutput] = {}
|
|
613
|
-
|
|
614
|
-
shared_images: List[ImageArtifact] = execution_input.images or []
|
|
615
|
-
output_images: List[ImageArtifact] = (execution_input.images or []).copy() # Start with input images
|
|
616
|
-
shared_videos: List[VideoArtifact] = execution_input.videos or []
|
|
617
|
-
output_videos: List[VideoArtifact] = (execution_input.videos or []).copy() # Start with input videos
|
|
618
|
-
shared_audio: List[AudioArtifact] = execution_input.audio or []
|
|
619
|
-
output_audio: List[AudioArtifact] = (execution_input.audio or []).copy() # Start with input audio
|
|
620
|
-
shared_files: List[File] = execution_input.files or []
|
|
621
|
-
output_files: List[File] = (execution_input.files or []).copy() # Start with input files
|
|
622
|
-
|
|
623
|
-
early_termination = False
|
|
624
|
-
|
|
625
|
-
for i, step in enumerate(self.steps): # type: ignore[arg-type]
|
|
626
|
-
step_name = getattr(step, "name", f"step_{i + 1}")
|
|
627
|
-
log_debug(f"Streaming step {i + 1}/{self._get_step_count()}: {step_name}")
|
|
628
|
-
|
|
629
|
-
# Create enhanced StepInput
|
|
630
|
-
step_input = self._create_step_input(
|
|
631
|
-
execution_input=execution_input,
|
|
632
|
-
previous_step_outputs=previous_step_outputs,
|
|
633
|
-
shared_images=shared_images,
|
|
634
|
-
shared_videos=shared_videos,
|
|
635
|
-
shared_audio=shared_audio,
|
|
636
|
-
shared_files=shared_files,
|
|
637
|
-
)
|
|
638
|
-
|
|
639
|
-
# Execute step with streaming and yield all events
|
|
640
|
-
for event in step.execute_stream( # type: ignore[union-attr]
|
|
641
|
-
step_input,
|
|
642
|
-
session_id=self.session_id,
|
|
643
|
-
user_id=self.user_id,
|
|
644
|
-
stream_intermediate_steps=stream_intermediate_steps,
|
|
645
|
-
workflow_run_response=workflow_run_response,
|
|
646
|
-
step_index=i,
|
|
647
|
-
):
|
|
648
|
-
# Handle events
|
|
649
|
-
if isinstance(event, StepOutput):
|
|
650
|
-
step_output = event
|
|
651
|
-
collected_step_outputs.append(step_output)
|
|
652
|
-
|
|
653
|
-
# Update the workflow-level previous_step_outputs dictionary
|
|
654
|
-
previous_step_outputs[step_name] = step_output
|
|
655
|
-
|
|
656
|
-
# Transform StepOutput to StepOutputEvent for consistent streaming interface
|
|
657
|
-
step_output_event = self._transform_step_output_to_event(
|
|
658
|
-
step_output, workflow_run_response, step_index=i
|
|
659
|
-
)
|
|
660
|
-
|
|
661
|
-
if step_output.stop:
|
|
662
|
-
logger.info(f"Early termination requested by step {step_name}")
|
|
663
|
-
# Update shared media for next step
|
|
664
|
-
shared_images.extend(step_output.images or [])
|
|
665
|
-
shared_videos.extend(step_output.videos or [])
|
|
666
|
-
shared_audio.extend(step_output.audio or [])
|
|
667
|
-
shared_files.extend(step_output.files or [])
|
|
668
|
-
output_images.extend(step_output.images or [])
|
|
669
|
-
output_videos.extend(step_output.videos or [])
|
|
670
|
-
output_audio.extend(step_output.audio or [])
|
|
671
|
-
output_files.extend(step_output.files or [])
|
|
672
|
-
|
|
673
|
-
# Only yield StepOutputEvent for function executors, not for agents/teams
|
|
674
|
-
if getattr(step, "executor_type", None) == "function":
|
|
675
|
-
yield step_output_event
|
|
676
|
-
|
|
677
|
-
# Break out of the step loop
|
|
678
|
-
early_termination = True
|
|
679
|
-
break
|
|
680
|
-
|
|
681
|
-
# Update shared media for next step
|
|
682
|
-
shared_images.extend(step_output.images or [])
|
|
683
|
-
shared_videos.extend(step_output.videos or [])
|
|
684
|
-
shared_audio.extend(step_output.audio or [])
|
|
685
|
-
shared_files.extend(step_output.files or [])
|
|
686
|
-
output_images.extend(step_output.images or [])
|
|
687
|
-
output_videos.extend(step_output.videos or [])
|
|
688
|
-
output_audio.extend(step_output.audio or [])
|
|
689
|
-
output_files.extend(step_output.files or [])
|
|
690
|
-
|
|
691
|
-
# Only yield StepOutputEvent for generator functions, not for agents/teams
|
|
692
|
-
if getattr(step, "executor_type", None) == "function":
|
|
693
|
-
yield step_output_event
|
|
694
|
-
|
|
695
|
-
elif isinstance(event, WorkflowRunResponseEvent): # type: ignore
|
|
696
|
-
yield self._handle_event(event, workflow_run_response) # type: ignore
|
|
697
|
-
|
|
698
|
-
else:
|
|
699
|
-
# Yield other internal events
|
|
700
|
-
yield event # type: ignore
|
|
701
|
-
# Break out of main step loop if early termination was requested
|
|
702
|
-
if "early_termination" in locals() and early_termination:
|
|
703
|
-
break
|
|
704
|
-
|
|
705
|
-
self._collect_workflow_session_state_from_agents_and_teams()
|
|
706
|
-
|
|
707
|
-
# Update the workflow_run_response with completion data
|
|
708
|
-
if collected_step_outputs:
|
|
709
|
-
workflow_run_response.workflow_metrics = self._aggregate_workflow_metrics(collected_step_outputs)
|
|
710
|
-
last_output = collected_step_outputs[-1]
|
|
711
|
-
if isinstance(last_output, list) and last_output:
|
|
712
|
-
# If it's a list (from Condition/Loop/etc.), use the last one
|
|
713
|
-
workflow_run_response.content = last_output[-1].content
|
|
714
|
-
elif not isinstance(last_output, list):
|
|
715
|
-
# Single StepOutput
|
|
716
|
-
workflow_run_response.content = last_output.content
|
|
717
|
-
else:
|
|
718
|
-
workflow_run_response.content = "No steps executed"
|
|
719
|
-
|
|
720
|
-
workflow_run_response.step_responses = collected_step_outputs
|
|
721
|
-
workflow_run_response.images = output_images
|
|
722
|
-
workflow_run_response.videos = output_videos
|
|
723
|
-
workflow_run_response.audio = output_audio
|
|
724
|
-
workflow_run_response.status = RunStatus.completed
|
|
725
|
-
|
|
726
|
-
except Exception as e:
|
|
727
|
-
logger.error(f"Workflow execution failed: {e}")
|
|
728
|
-
|
|
729
|
-
from agno.run.v2.workflow import WorkflowErrorEvent
|
|
730
|
-
|
|
731
|
-
error_event = WorkflowErrorEvent(
|
|
732
|
-
run_id=self.run_id or "",
|
|
733
|
-
workflow_id=self.workflow_id,
|
|
734
|
-
workflow_name=self.name,
|
|
735
|
-
session_id=self.session_id,
|
|
736
|
-
error=str(e),
|
|
737
|
-
)
|
|
738
|
-
|
|
739
|
-
yield error_event
|
|
740
|
-
|
|
741
|
-
# Update workflow_run_response with error
|
|
742
|
-
workflow_run_response.content = error_event.error
|
|
743
|
-
workflow_run_response.status = RunStatus.error
|
|
744
|
-
|
|
745
|
-
# Yield workflow completed event
|
|
746
|
-
workflow_completed_event = WorkflowCompletedEvent(
|
|
747
|
-
run_id=workflow_run_response.run_id or "",
|
|
748
|
-
content=workflow_run_response.content,
|
|
749
|
-
workflow_name=workflow_run_response.workflow_name,
|
|
750
|
-
workflow_id=workflow_run_response.workflow_id,
|
|
751
|
-
session_id=workflow_run_response.session_id,
|
|
752
|
-
step_responses=workflow_run_response.step_responses, # type: ignore
|
|
753
|
-
extra_data=workflow_run_response.extra_data,
|
|
754
|
-
)
|
|
755
|
-
yield self._handle_event(workflow_completed_event, workflow_run_response)
|
|
756
|
-
|
|
757
|
-
# Store the completed workflow response
|
|
758
|
-
self._save_run_to_storage(workflow_run_response)
|
|
759
|
-
|
|
760
|
-
async def _acall_custom_function(
|
|
761
|
-
self, func: Callable, workflow: "Workflow", execution_input: WorkflowExecutionInput, **kwargs: Any
|
|
762
|
-
) -> Any:
|
|
763
|
-
"""Call custom function with only the parameters it expects - handles both async functions and async generators"""
|
|
764
|
-
from inspect import isasyncgenfunction, signature
|
|
765
|
-
|
|
766
|
-
sig = signature(func)
|
|
767
|
-
|
|
768
|
-
# Build arguments based on what the function actually accepts
|
|
769
|
-
call_kwargs = {}
|
|
770
|
-
|
|
771
|
-
# Only add workflow and execution_input if the function expects them
|
|
772
|
-
if "workflow" in sig.parameters: # type: ignore
|
|
773
|
-
call_kwargs["workflow"] = self
|
|
774
|
-
if "execution_input" in sig.parameters:
|
|
775
|
-
call_kwargs["execution_input"] = execution_input # type: ignore
|
|
776
|
-
|
|
777
|
-
# Add any other kwargs that the function expects
|
|
778
|
-
for param_name in kwargs:
|
|
779
|
-
if param_name in sig.parameters: # type: ignore
|
|
780
|
-
call_kwargs[param_name] = kwargs[param_name]
|
|
781
|
-
|
|
782
|
-
# If function has **kwargs parameter, pass all remaining kwargs
|
|
783
|
-
for param in sig.parameters.values(): # type: ignore
|
|
784
|
-
if param.kind == param.VAR_KEYWORD:
|
|
785
|
-
call_kwargs.update(kwargs)
|
|
786
|
-
break
|
|
787
|
-
|
|
788
|
-
try:
|
|
789
|
-
# Check if it's an async generator function
|
|
790
|
-
if isasyncgenfunction(func):
|
|
791
|
-
# For async generators, call the function and return the async generator directly
|
|
792
|
-
return func(**call_kwargs) # type: ignore
|
|
793
|
-
else:
|
|
794
|
-
# For regular async functions, await the result
|
|
795
|
-
return await func(**call_kwargs) # type: ignore
|
|
796
|
-
except TypeError as e:
|
|
797
|
-
# If signature inspection fails, fall back to original method
|
|
798
|
-
logger.warning(
|
|
799
|
-
f"Async function signature inspection failed: {e}. Falling back to original calling convention."
|
|
800
|
-
)
|
|
801
|
-
if isasyncgenfunction(func):
|
|
802
|
-
# For async generators, use the same signature inspection logic in fallback
|
|
803
|
-
return func(**call_kwargs) # type: ignore
|
|
804
|
-
else:
|
|
805
|
-
# For regular async functions, use the same signature inspection logic in fallback
|
|
806
|
-
return await func(**call_kwargs) # type: ignore
|
|
807
|
-
|
|
808
|
-
async def _aexecute(
|
|
809
|
-
self, execution_input: WorkflowExecutionInput, workflow_run_response: WorkflowRunResponse, **kwargs: Any
|
|
810
|
-
) -> WorkflowRunResponse:
|
|
811
|
-
"""Execute a specific pipeline by name asynchronously"""
|
|
812
|
-
from inspect import isasyncgenfunction, iscoroutinefunction, isgeneratorfunction
|
|
813
|
-
|
|
814
|
-
workflow_run_response.status = RunStatus.running
|
|
815
|
-
|
|
816
|
-
if callable(self.steps):
|
|
817
|
-
# Execute the workflow with the custom executor
|
|
818
|
-
content = ""
|
|
819
|
-
|
|
820
|
-
if iscoroutinefunction(self.steps): # type: ignore
|
|
821
|
-
workflow_run_response.content = await self._acall_custom_function(
|
|
822
|
-
self.steps, self, execution_input, **kwargs
|
|
823
|
-
)
|
|
824
|
-
elif isgeneratorfunction(self.steps):
|
|
825
|
-
for chunk in self.steps(self, execution_input, **kwargs): # type: ignore[arg-type]
|
|
826
|
-
if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
|
|
827
|
-
content += chunk.content
|
|
828
|
-
else:
|
|
829
|
-
content += str(chunk)
|
|
830
|
-
workflow_run_response.content = content
|
|
831
|
-
elif isasyncgenfunction(self.steps): # type: ignore
|
|
832
|
-
async_gen = await self._acall_custom_function(self.steps, self, execution_input, **kwargs)
|
|
833
|
-
async for chunk in async_gen:
|
|
834
|
-
if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
|
|
835
|
-
content += chunk.content
|
|
836
|
-
else:
|
|
837
|
-
content += str(chunk)
|
|
838
|
-
workflow_run_response.content = content
|
|
839
|
-
else:
|
|
840
|
-
workflow_run_response.content = self._call_custom_function(self.steps, self, execution_input, **kwargs)
|
|
841
|
-
workflow_run_response.status = RunStatus.completed
|
|
842
|
-
|
|
843
|
-
else:
|
|
844
|
-
try:
|
|
845
|
-
# Track outputs from each step for enhanced data flow
|
|
846
|
-
collected_step_outputs: List[Union[StepOutput, List[StepOutput]]] = []
|
|
847
|
-
previous_step_outputs: Dict[str, StepOutput] = {}
|
|
848
|
-
|
|
849
|
-
shared_images: List[ImageArtifact] = execution_input.images or []
|
|
850
|
-
output_images: List[ImageArtifact] = (execution_input.images or []).copy() # Start with input images
|
|
851
|
-
shared_videos: List[VideoArtifact] = execution_input.videos or []
|
|
852
|
-
output_videos: List[VideoArtifact] = (execution_input.videos or []).copy() # Start with input videos
|
|
853
|
-
shared_audio: List[AudioArtifact] = execution_input.audio or []
|
|
854
|
-
output_audio: List[AudioArtifact] = (execution_input.audio or []).copy() # Start with input audio
|
|
855
|
-
shared_files: List[File] = execution_input.files or []
|
|
856
|
-
output_files: List[File] = (execution_input.files or []).copy() # Start with input files
|
|
857
|
-
|
|
858
|
-
for i, step in enumerate(self.steps): # type: ignore[arg-type]
|
|
859
|
-
step_name = getattr(step, "name", f"step_{i + 1}")
|
|
860
|
-
log_debug(f"Async Executing step {i + 1}/{self._get_step_count()}: {step_name}")
|
|
861
|
-
|
|
862
|
-
# Create enhanced StepInput
|
|
863
|
-
step_input = self._create_step_input(
|
|
864
|
-
execution_input=execution_input,
|
|
865
|
-
previous_step_outputs=previous_step_outputs,
|
|
866
|
-
shared_images=shared_images,
|
|
867
|
-
shared_videos=shared_videos,
|
|
868
|
-
shared_audio=shared_audio,
|
|
869
|
-
shared_files=shared_files,
|
|
870
|
-
)
|
|
871
|
-
|
|
872
|
-
step_output = await step.aexecute(step_input, session_id=self.session_id, user_id=self.user_id) # type: ignore[union-attr]
|
|
873
|
-
|
|
874
|
-
# Update the workflow-level previous_step_outputs dictionary
|
|
875
|
-
if isinstance(step_output, list):
|
|
876
|
-
# For multiple outputs (from Loop, Condition, etc.), store the last one
|
|
877
|
-
if step_output:
|
|
878
|
-
previous_step_outputs[step_name] = step_output[-1]
|
|
879
|
-
if any(output.stop for output in step_output):
|
|
880
|
-
logger.info(f"Early termination requested by step {step_name}")
|
|
881
|
-
break
|
|
882
|
-
else:
|
|
883
|
-
# Single output
|
|
884
|
-
previous_step_outputs[step_name] = step_output
|
|
885
|
-
if step_output.stop:
|
|
886
|
-
logger.info(f"Early termination requested by step {step_name}")
|
|
887
|
-
break
|
|
888
|
-
|
|
889
|
-
# Update shared media for next step
|
|
890
|
-
if isinstance(step_output, list):
|
|
891
|
-
for output in step_output:
|
|
892
|
-
shared_images.extend(output.images or [])
|
|
893
|
-
shared_videos.extend(output.videos or [])
|
|
894
|
-
shared_audio.extend(output.audio or [])
|
|
895
|
-
shared_files.extend(output.files or [])
|
|
896
|
-
output_images.extend(output.images or [])
|
|
897
|
-
output_videos.extend(output.videos or [])
|
|
898
|
-
output_audio.extend(output.audio or [])
|
|
899
|
-
output_files.extend(output.files or [])
|
|
900
|
-
else:
|
|
901
|
-
shared_images.extend(step_output.images or [])
|
|
902
|
-
shared_videos.extend(step_output.videos or [])
|
|
903
|
-
shared_audio.extend(step_output.audio or [])
|
|
904
|
-
shared_files.extend(step_output.files or [])
|
|
905
|
-
output_images.extend(step_output.images or [])
|
|
906
|
-
output_videos.extend(step_output.videos or [])
|
|
907
|
-
output_audio.extend(step_output.audio or [])
|
|
908
|
-
output_files.extend(step_output.files or [])
|
|
909
|
-
|
|
910
|
-
collected_step_outputs.append(step_output)
|
|
911
|
-
|
|
912
|
-
self._collect_workflow_session_state_from_agents_and_teams()
|
|
913
|
-
|
|
914
|
-
# Update the workflow_run_response with completion data
|
|
915
|
-
if collected_step_outputs:
|
|
916
|
-
workflow_run_response.workflow_metrics = self._aggregate_workflow_metrics(collected_step_outputs)
|
|
917
|
-
last_output = collected_step_outputs[-1]
|
|
918
|
-
if isinstance(last_output, list) and last_output:
|
|
919
|
-
# If it's a list (from Condition/Loop/etc.), use the last one
|
|
920
|
-
workflow_run_response.content = last_output[-1].content
|
|
921
|
-
elif not isinstance(last_output, list):
|
|
922
|
-
# Single StepOutput
|
|
923
|
-
workflow_run_response.content = last_output.content
|
|
924
|
-
else:
|
|
925
|
-
workflow_run_response.content = "No steps executed"
|
|
926
|
-
|
|
927
|
-
workflow_run_response.step_responses = collected_step_outputs
|
|
928
|
-
workflow_run_response.images = output_images
|
|
929
|
-
workflow_run_response.videos = output_videos
|
|
930
|
-
workflow_run_response.audio = output_audio
|
|
931
|
-
workflow_run_response.status = RunStatus.completed
|
|
932
|
-
|
|
933
|
-
except Exception as e:
|
|
934
|
-
logger.error(f"Workflow execution failed: {e}")
|
|
935
|
-
workflow_run_response.status = RunStatus.error
|
|
936
|
-
workflow_run_response.content = f"Workflow execution failed: {e}"
|
|
937
|
-
|
|
938
|
-
# Store error response
|
|
939
|
-
self._save_run_to_storage(workflow_run_response)
|
|
940
|
-
|
|
941
|
-
return workflow_run_response
|
|
942
|
-
|
|
943
|
-
async def _aexecute_stream(
|
|
944
|
-
self,
|
|
945
|
-
execution_input: WorkflowExecutionInput,
|
|
946
|
-
workflow_run_response: WorkflowRunResponse,
|
|
947
|
-
stream_intermediate_steps: bool = False,
|
|
948
|
-
**kwargs: Any,
|
|
949
|
-
) -> AsyncIterator[WorkflowRunResponseEvent]:
|
|
950
|
-
"""Execute a specific pipeline by name with event streaming"""
|
|
951
|
-
from inspect import isasyncgenfunction, iscoroutinefunction, isgeneratorfunction
|
|
952
|
-
|
|
953
|
-
workflow_run_response.status = RunStatus.running
|
|
954
|
-
workflow_started_event = WorkflowStartedEvent(
|
|
955
|
-
run_id=workflow_run_response.run_id or "",
|
|
956
|
-
workflow_name=workflow_run_response.workflow_name,
|
|
957
|
-
workflow_id=workflow_run_response.workflow_id,
|
|
958
|
-
session_id=workflow_run_response.session_id,
|
|
959
|
-
)
|
|
960
|
-
yield self._handle_event(workflow_started_event, workflow_run_response)
|
|
961
|
-
|
|
962
|
-
if callable(self.steps):
|
|
963
|
-
if iscoroutinefunction(self.steps): # type: ignore
|
|
964
|
-
workflow_run_response.content = await self._acall_custom_function(
|
|
965
|
-
self.steps, self, execution_input, **kwargs
|
|
966
|
-
)
|
|
967
|
-
elif isgeneratorfunction(self.steps):
|
|
968
|
-
content = ""
|
|
969
|
-
for chunk in self.steps(self, execution_input, **kwargs): # type: ignore[arg-type]
|
|
970
|
-
if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
|
|
971
|
-
content += chunk.content
|
|
972
|
-
yield chunk
|
|
973
|
-
else:
|
|
974
|
-
content += str(chunk)
|
|
975
|
-
workflow_run_response.content = content
|
|
976
|
-
elif isasyncgenfunction(self.steps): # type: ignore
|
|
977
|
-
content = ""
|
|
978
|
-
async_gen = await self._acall_custom_function(self.steps, self, execution_input, **kwargs)
|
|
979
|
-
async for chunk in async_gen:
|
|
980
|
-
if hasattr(chunk, "content") and chunk.content is not None and isinstance(chunk.content, str):
|
|
981
|
-
content += chunk.content
|
|
982
|
-
yield chunk
|
|
983
|
-
else:
|
|
984
|
-
content += str(chunk)
|
|
985
|
-
workflow_run_response.content = content
|
|
986
|
-
else:
|
|
987
|
-
workflow_run_response.content = self.steps(self, execution_input, **kwargs)
|
|
988
|
-
workflow_run_response.status = RunStatus.completed
|
|
989
|
-
|
|
990
|
-
else:
|
|
991
|
-
try:
|
|
992
|
-
# Track outputs from each step for enhanced data flow
|
|
993
|
-
collected_step_outputs: List[Union[StepOutput, List[StepOutput]]] = []
|
|
994
|
-
previous_step_outputs: Dict[str, StepOutput] = {}
|
|
995
|
-
|
|
996
|
-
shared_images: List[ImageArtifact] = execution_input.images or []
|
|
997
|
-
output_images: List[ImageArtifact] = (execution_input.images or []).copy() # Start with input images
|
|
998
|
-
shared_videos: List[VideoArtifact] = execution_input.videos or []
|
|
999
|
-
output_videos: List[VideoArtifact] = (execution_input.videos or []).copy() # Start with input videos
|
|
1000
|
-
shared_audio: List[AudioArtifact] = execution_input.audio or []
|
|
1001
|
-
output_audio: List[AudioArtifact] = (execution_input.audio or []).copy() # Start with input audio
|
|
1002
|
-
shared_files: List[File] = execution_input.files or []
|
|
1003
|
-
output_files: List[File] = (execution_input.files or []).copy() # Start with input files
|
|
1004
|
-
|
|
1005
|
-
early_termination = False
|
|
1006
|
-
|
|
1007
|
-
for i, step in enumerate(self.steps): # type: ignore[arg-type]
|
|
1008
|
-
step_name = getattr(step, "name", f"step_{i + 1}")
|
|
1009
|
-
log_debug(f"Async streaming step {i + 1}/{self._get_step_count()}: {step_name}")
|
|
1010
|
-
|
|
1011
|
-
# Create enhanced StepInput
|
|
1012
|
-
step_input = self._create_step_input(
|
|
1013
|
-
execution_input=execution_input,
|
|
1014
|
-
previous_step_outputs=previous_step_outputs,
|
|
1015
|
-
shared_images=shared_images,
|
|
1016
|
-
shared_videos=shared_videos,
|
|
1017
|
-
shared_audio=shared_audio,
|
|
1018
|
-
shared_files=shared_files,
|
|
1019
|
-
)
|
|
1020
|
-
|
|
1021
|
-
# Execute step with streaming and yield all events
|
|
1022
|
-
async for event in step.aexecute_stream( # type: ignore[union-attr]
|
|
1023
|
-
step_input,
|
|
1024
|
-
session_id=self.session_id,
|
|
1025
|
-
user_id=self.user_id,
|
|
1026
|
-
stream_intermediate_steps=stream_intermediate_steps,
|
|
1027
|
-
workflow_run_response=workflow_run_response,
|
|
1028
|
-
step_index=i,
|
|
1029
|
-
):
|
|
1030
|
-
if isinstance(event, StepOutput):
|
|
1031
|
-
step_output = event
|
|
1032
|
-
collected_step_outputs.append(step_output)
|
|
1033
|
-
|
|
1034
|
-
# Update the workflow-level previous_step_outputs dictionary
|
|
1035
|
-
previous_step_outputs[step_name] = step_output
|
|
1036
|
-
|
|
1037
|
-
# Transform StepOutput to StepOutputEvent for consistent streaming interface
|
|
1038
|
-
step_output_event = self._transform_step_output_to_event(
|
|
1039
|
-
step_output, workflow_run_response, step_index=i
|
|
1040
|
-
)
|
|
1041
|
-
|
|
1042
|
-
if step_output.stop:
|
|
1043
|
-
logger.info(f"Early termination requested by step {step_name}")
|
|
1044
|
-
# Update shared media for next step
|
|
1045
|
-
shared_images.extend(step_output.images or [])
|
|
1046
|
-
shared_videos.extend(step_output.videos or [])
|
|
1047
|
-
shared_audio.extend(step_output.audio or [])
|
|
1048
|
-
shared_files.extend(step_output.files or [])
|
|
1049
|
-
output_images.extend(step_output.images or [])
|
|
1050
|
-
output_videos.extend(step_output.videos or [])
|
|
1051
|
-
output_audio.extend(step_output.audio or [])
|
|
1052
|
-
output_files.extend(step_output.files or [])
|
|
1053
|
-
|
|
1054
|
-
if getattr(step, "executor_type", None) == "function":
|
|
1055
|
-
yield step_output_event
|
|
1056
|
-
|
|
1057
|
-
# Break out of the step loop
|
|
1058
|
-
early_termination = True
|
|
1059
|
-
break
|
|
1060
|
-
|
|
1061
|
-
# Update shared media for next step
|
|
1062
|
-
shared_images.extend(step_output.images or [])
|
|
1063
|
-
shared_videos.extend(step_output.videos or [])
|
|
1064
|
-
shared_audio.extend(step_output.audio or [])
|
|
1065
|
-
shared_files.extend(step_output.files or [])
|
|
1066
|
-
output_images.extend(step_output.images or [])
|
|
1067
|
-
output_videos.extend(step_output.videos or [])
|
|
1068
|
-
output_audio.extend(step_output.audio or [])
|
|
1069
|
-
output_files.extend(step_output.files or [])
|
|
1070
|
-
|
|
1071
|
-
# Only yield StepOutputEvent for generator functions, not for agents/teams
|
|
1072
|
-
if getattr(step, "executor_type", None) == "function":
|
|
1073
|
-
yield step_output_event
|
|
1074
|
-
|
|
1075
|
-
elif isinstance(event, WorkflowRunResponseEvent): # type: ignore
|
|
1076
|
-
yield self._handle_event(event, workflow_run_response) # type: ignore
|
|
1077
|
-
|
|
1078
|
-
else:
|
|
1079
|
-
# Yield other internal events
|
|
1080
|
-
yield event # type: ignore
|
|
1081
|
-
|
|
1082
|
-
# Break out of main step loop if early termination was requested
|
|
1083
|
-
if "early_termination" in locals() and early_termination:
|
|
1084
|
-
break
|
|
1085
|
-
|
|
1086
|
-
self._collect_workflow_session_state_from_agents_and_teams()
|
|
1087
|
-
|
|
1088
|
-
# Update the workflow_run_response with completion data
|
|
1089
|
-
if collected_step_outputs:
|
|
1090
|
-
workflow_run_response.workflow_metrics = self._aggregate_workflow_metrics(collected_step_outputs)
|
|
1091
|
-
last_output = collected_step_outputs[-1]
|
|
1092
|
-
if isinstance(last_output, list) and last_output:
|
|
1093
|
-
# If it's a list (from Condition/Loop/etc.), use the last one
|
|
1094
|
-
workflow_run_response.content = last_output[-1].content
|
|
1095
|
-
elif not isinstance(last_output, list):
|
|
1096
|
-
# Single StepOutput
|
|
1097
|
-
workflow_run_response.content = last_output.content
|
|
1098
|
-
else:
|
|
1099
|
-
workflow_run_response.content = "No steps executed"
|
|
1100
|
-
|
|
1101
|
-
workflow_run_response.step_responses = collected_step_outputs
|
|
1102
|
-
workflow_run_response.images = output_images
|
|
1103
|
-
workflow_run_response.videos = output_videos
|
|
1104
|
-
workflow_run_response.audio = output_audio
|
|
1105
|
-
workflow_run_response.status = RunStatus.completed
|
|
1106
|
-
|
|
1107
|
-
except Exception as e:
|
|
1108
|
-
logger.error(f"Workflow execution failed: {e}")
|
|
1109
|
-
|
|
1110
|
-
from agno.run.v2.workflow import WorkflowErrorEvent
|
|
1111
|
-
|
|
1112
|
-
error_event = WorkflowErrorEvent(
|
|
1113
|
-
run_id=self.run_id or "",
|
|
1114
|
-
workflow_id=self.workflow_id,
|
|
1115
|
-
workflow_name=self.name,
|
|
1116
|
-
session_id=self.session_id,
|
|
1117
|
-
error=str(e),
|
|
1118
|
-
)
|
|
1119
|
-
|
|
1120
|
-
yield error_event
|
|
1121
|
-
|
|
1122
|
-
# Update workflow_run_response with error
|
|
1123
|
-
workflow_run_response.content = error_event.error
|
|
1124
|
-
workflow_run_response.status = RunStatus.error
|
|
1125
|
-
|
|
1126
|
-
# Yield workflow completed event
|
|
1127
|
-
workflow_completed_event = WorkflowCompletedEvent(
|
|
1128
|
-
run_id=workflow_run_response.run_id or "",
|
|
1129
|
-
content=workflow_run_response.content,
|
|
1130
|
-
workflow_name=workflow_run_response.workflow_name,
|
|
1131
|
-
workflow_id=workflow_run_response.workflow_id,
|
|
1132
|
-
session_id=workflow_run_response.session_id,
|
|
1133
|
-
step_responses=workflow_run_response.step_responses, # type: ignore[arg-type]
|
|
1134
|
-
extra_data=workflow_run_response.extra_data,
|
|
1135
|
-
)
|
|
1136
|
-
yield self._handle_event(workflow_completed_event, workflow_run_response)
|
|
1137
|
-
|
|
1138
|
-
# Store the completed workflow response
|
|
1139
|
-
self._save_run_to_storage(workflow_run_response)
|
|
1140
|
-
|
|
1141
|
-
def _update_workflow_session_state(self):
|
|
1142
|
-
if not self.workflow_session_state:
|
|
1143
|
-
self.workflow_session_state = {}
|
|
1144
|
-
|
|
1145
|
-
self.workflow_session_state.update(
|
|
1146
|
-
{
|
|
1147
|
-
"workflow_id": self.workflow_id,
|
|
1148
|
-
"run_id": self.run_id,
|
|
1149
|
-
"session_id": self.session_id,
|
|
1150
|
-
"session_name": self.session_name,
|
|
1151
|
-
}
|
|
1152
|
-
)
|
|
1153
|
-
if self.name:
|
|
1154
|
-
self.workflow_session_state["workflow_name"] = self.name
|
|
1155
|
-
|
|
1156
|
-
return self.workflow_session_state
|
|
1157
|
-
|
|
1158
|
-
async def _arun_background(
|
|
1159
|
-
self,
|
|
1160
|
-
message: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None,
|
|
1161
|
-
additional_data: Optional[Dict[str, Any]] = None,
|
|
1162
|
-
user_id: Optional[str] = None,
|
|
1163
|
-
session_id: Optional[str] = None,
|
|
1164
|
-
audio: Optional[List[Audio]] = None,
|
|
1165
|
-
images: Optional[List[Image]] = None,
|
|
1166
|
-
videos: Optional[List[Video]] = None,
|
|
1167
|
-
**kwargs: Any,
|
|
1168
|
-
) -> WorkflowRunResponse:
|
|
1169
|
-
"""Execute workflow in background using asyncio.create_task()"""
|
|
1170
|
-
|
|
1171
|
-
if user_id is not None:
|
|
1172
|
-
self.user_id = user_id
|
|
1173
|
-
if session_id is not None:
|
|
1174
|
-
self.session_id = session_id
|
|
1175
|
-
|
|
1176
|
-
if self.session_id is None:
|
|
1177
|
-
self.session_id = str(uuid4())
|
|
1178
|
-
|
|
1179
|
-
if self.run_id is None:
|
|
1180
|
-
self.run_id = str(uuid4())
|
|
1181
|
-
|
|
1182
|
-
self.initialize_workflow()
|
|
1183
|
-
self.load_session()
|
|
1184
|
-
self._prepare_steps()
|
|
1185
|
-
|
|
1186
|
-
# Create workflow run response with PENDING status
|
|
1187
|
-
workflow_run_response = WorkflowRunResponse(
|
|
1188
|
-
run_id=self.run_id,
|
|
1189
|
-
session_id=self.session_id,
|
|
1190
|
-
workflow_id=self.workflow_id,
|
|
1191
|
-
workflow_name=self.name,
|
|
1192
|
-
created_at=int(datetime.now().timestamp()),
|
|
1193
|
-
status=RunStatus.pending,
|
|
1194
|
-
)
|
|
1195
|
-
|
|
1196
|
-
# Store PENDING response immediately
|
|
1197
|
-
self._save_run_to_storage(workflow_run_response)
|
|
1198
|
-
|
|
1199
|
-
# Prepare execution input
|
|
1200
|
-
inputs = WorkflowExecutionInput(
|
|
1201
|
-
message=message,
|
|
1202
|
-
additional_data=additional_data,
|
|
1203
|
-
audio=audio, # type: ignore
|
|
1204
|
-
images=images, # type: ignore
|
|
1205
|
-
videos=videos, # type: ignore
|
|
1206
|
-
)
|
|
1207
|
-
|
|
1208
|
-
self.update_agents_and_teams_session_info()
|
|
1209
|
-
|
|
1210
|
-
async def execute_workflow_background():
|
|
1211
|
-
"""Simple background execution"""
|
|
1212
|
-
try:
|
|
1213
|
-
# Update status to RUNNING and save
|
|
1214
|
-
workflow_run_response.status = RunStatus.running
|
|
1215
|
-
self._save_run_to_storage(workflow_run_response)
|
|
1216
|
-
|
|
1217
|
-
await self._aexecute(execution_input=inputs, workflow_run_response=workflow_run_response, **kwargs)
|
|
1218
|
-
|
|
1219
|
-
self._save_run_to_storage(workflow_run_response)
|
|
1220
|
-
|
|
1221
|
-
log_debug(f"Background execution completed with status: {workflow_run_response.status}")
|
|
1222
|
-
|
|
1223
|
-
except Exception as e:
|
|
1224
|
-
logger.error(f"Background workflow execution failed: {e}")
|
|
1225
|
-
workflow_run_response.status = RunStatus.error
|
|
1226
|
-
workflow_run_response.content = f"Background execution failed: {str(e)}"
|
|
1227
|
-
self._save_run_to_storage(workflow_run_response)
|
|
1228
|
-
|
|
1229
|
-
# Create and start asyncio task
|
|
1230
|
-
loop = asyncio.get_running_loop()
|
|
1231
|
-
loop.create_task(execute_workflow_background())
|
|
1232
|
-
|
|
1233
|
-
# Return SAME object that will be updated by background execution
|
|
1234
|
-
return workflow_run_response
|
|
1235
|
-
|
|
1236
|
-
def get_run(self, run_id: str) -> Optional[WorkflowRunResponse]:
|
|
1237
|
-
"""Get the status and details of a background workflow run - SIMPLIFIED"""
|
|
1238
|
-
if self.storage is not None and self.session_id is not None:
|
|
1239
|
-
session = self.storage.read(session_id=self.session_id)
|
|
1240
|
-
if session and isinstance(session, WorkflowSessionV2) and session.runs:
|
|
1241
|
-
# Find the run by ID
|
|
1242
|
-
for run in session.runs:
|
|
1243
|
-
if run.run_id == run_id:
|
|
1244
|
-
return run
|
|
1245
|
-
|
|
1246
|
-
return None
|
|
1247
|
-
|
|
1248
|
-
@overload
|
|
1249
|
-
def run(
|
|
1250
|
-
self,
|
|
1251
|
-
message: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None,
|
|
1252
|
-
additional_data: Optional[Dict[str, Any]] = None,
|
|
1253
|
-
user_id: Optional[str] = None,
|
|
1254
|
-
session_id: Optional[str] = None,
|
|
1255
|
-
audio: Optional[List[Audio]] = None,
|
|
1256
|
-
images: Optional[List[Image]] = None,
|
|
1257
|
-
videos: Optional[List[Video]] = None,
|
|
1258
|
-
files: Optional[List[File]] = None,
|
|
1259
|
-
stream: Literal[False] = False,
|
|
1260
|
-
stream_intermediate_steps: Optional[bool] = None,
|
|
1261
|
-
background: Optional[bool] = False,
|
|
1262
|
-
) -> WorkflowRunResponse: ...
|
|
1263
|
-
|
|
1264
|
-
@overload
|
|
1265
|
-
def run(
|
|
1266
|
-
self,
|
|
1267
|
-
message: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None,
|
|
1268
|
-
additional_data: Optional[Dict[str, Any]] = None,
|
|
1269
|
-
user_id: Optional[str] = None,
|
|
1270
|
-
session_id: Optional[str] = None,
|
|
1271
|
-
audio: Optional[List[Audio]] = None,
|
|
1272
|
-
images: Optional[List[Image]] = None,
|
|
1273
|
-
videos: Optional[List[Video]] = None,
|
|
1274
|
-
files: Optional[List[File]] = None,
|
|
1275
|
-
stream: Literal[True] = True,
|
|
1276
|
-
stream_intermediate_steps: Optional[bool] = None,
|
|
1277
|
-
background: Optional[bool] = False,
|
|
1278
|
-
) -> Iterator[WorkflowRunResponseEvent]: ...
|
|
1279
|
-
|
|
1280
|
-
def run(
|
|
1281
|
-
self,
|
|
1282
|
-
message: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None,
|
|
1283
|
-
additional_data: Optional[Dict[str, Any]] = None,
|
|
1284
|
-
user_id: Optional[str] = None,
|
|
1285
|
-
session_id: Optional[str] = None,
|
|
1286
|
-
audio: Optional[List[Audio]] = None,
|
|
1287
|
-
images: Optional[List[Image]] = None,
|
|
1288
|
-
videos: Optional[List[Video]] = None,
|
|
1289
|
-
files: Optional[List[File]] = None,
|
|
1290
|
-
stream: bool = False,
|
|
1291
|
-
stream_intermediate_steps: Optional[bool] = None,
|
|
1292
|
-
background: Optional[bool] = False,
|
|
1293
|
-
**kwargs: Any,
|
|
1294
|
-
) -> Union[WorkflowRunResponse, Iterator[WorkflowRunResponseEvent]]:
|
|
1295
|
-
"""Execute the workflow synchronously with optional streaming"""
|
|
1296
|
-
|
|
1297
|
-
if background:
|
|
1298
|
-
raise RuntimeError("Background execution is not supported for sync run()")
|
|
1299
|
-
|
|
1300
|
-
self._set_debug()
|
|
1301
|
-
|
|
1302
|
-
log_debug(f"Workflow Run Start: {self.name}", center=True)
|
|
1303
|
-
|
|
1304
|
-
# Use simple defaults
|
|
1305
|
-
stream = stream or self.stream or False
|
|
1306
|
-
stream_intermediate_steps = stream_intermediate_steps or self.stream_intermediate_steps or False
|
|
1307
|
-
|
|
1308
|
-
# Can't have stream_intermediate_steps if stream is False
|
|
1309
|
-
if not stream:
|
|
1310
|
-
stream_intermediate_steps = False
|
|
1311
|
-
|
|
1312
|
-
log_debug(f"Stream: {stream}")
|
|
1313
|
-
log_debug(f"Total steps: {self._get_step_count()}")
|
|
1314
|
-
|
|
1315
|
-
if user_id is not None:
|
|
1316
|
-
self.user_id = user_id
|
|
1317
|
-
log_debug(f"User ID: {user_id}")
|
|
1318
|
-
if session_id is not None:
|
|
1319
|
-
self.session_id = session_id
|
|
1320
|
-
log_debug(f"Session ID: {session_id}")
|
|
1321
|
-
|
|
1322
|
-
if self.session_id is None:
|
|
1323
|
-
self.session_id = str(uuid4())
|
|
1324
|
-
|
|
1325
|
-
self.run_id = str(uuid4())
|
|
1326
|
-
|
|
1327
|
-
self.initialize_workflow()
|
|
1328
|
-
|
|
1329
|
-
# Load or create session
|
|
1330
|
-
self.load_session()
|
|
1331
|
-
|
|
1332
|
-
# Prepare steps
|
|
1333
|
-
self._prepare_steps()
|
|
1334
|
-
|
|
1335
|
-
# Create workflow run response that will be updated by reference
|
|
1336
|
-
workflow_run_response = WorkflowRunResponse(
|
|
1337
|
-
run_id=self.run_id,
|
|
1338
|
-
session_id=self.session_id,
|
|
1339
|
-
workflow_id=self.workflow_id,
|
|
1340
|
-
workflow_name=self.name,
|
|
1341
|
-
created_at=int(datetime.now().timestamp()),
|
|
1342
|
-
)
|
|
1343
|
-
self.run_response = workflow_run_response
|
|
1344
|
-
|
|
1345
|
-
inputs = WorkflowExecutionInput(
|
|
1346
|
-
message=message,
|
|
1347
|
-
additional_data=additional_data,
|
|
1348
|
-
audio=audio, # type: ignore
|
|
1349
|
-
images=images, # type: ignore
|
|
1350
|
-
videos=videos, # type: ignore
|
|
1351
|
-
files=files, # type: ignore
|
|
1352
|
-
)
|
|
1353
|
-
log_debug(
|
|
1354
|
-
f"Created pipeline input with session state keys: {list(self.workflow_session_state.keys()) if self.workflow_session_state else 'None'}"
|
|
1355
|
-
)
|
|
1356
|
-
|
|
1357
|
-
self.update_agents_and_teams_session_info()
|
|
1358
|
-
|
|
1359
|
-
if stream:
|
|
1360
|
-
return self._execute_stream(
|
|
1361
|
-
execution_input=inputs, # type: ignore[arg-type]
|
|
1362
|
-
workflow_run_response=workflow_run_response,
|
|
1363
|
-
stream_intermediate_steps=stream_intermediate_steps,
|
|
1364
|
-
**kwargs,
|
|
1365
|
-
)
|
|
1366
|
-
else:
|
|
1367
|
-
return self._execute(execution_input=inputs, workflow_run_response=workflow_run_response, **kwargs)
|
|
1368
|
-
|
|
1369
|
-
@overload
|
|
1370
|
-
async def arun(
|
|
1371
|
-
self,
|
|
1372
|
-
message: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None,
|
|
1373
|
-
additional_data: Optional[Dict[str, Any]] = None,
|
|
1374
|
-
user_id: Optional[str] = None,
|
|
1375
|
-
session_id: Optional[str] = None,
|
|
1376
|
-
audio: Optional[List[Audio]] = None,
|
|
1377
|
-
images: Optional[List[Image]] = None,
|
|
1378
|
-
videos: Optional[List[Video]] = None,
|
|
1379
|
-
files: Optional[List[File]] = None,
|
|
1380
|
-
stream: Literal[False] = False,
|
|
1381
|
-
stream_intermediate_steps: Optional[bool] = None,
|
|
1382
|
-
background: Optional[bool] = False,
|
|
1383
|
-
) -> WorkflowRunResponse: ...
|
|
1384
|
-
|
|
1385
|
-
@overload
|
|
1386
|
-
async def arun(
|
|
1387
|
-
self,
|
|
1388
|
-
message: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None,
|
|
1389
|
-
additional_data: Optional[Dict[str, Any]] = None,
|
|
1390
|
-
user_id: Optional[str] = None,
|
|
1391
|
-
session_id: Optional[str] = None,
|
|
1392
|
-
audio: Optional[List[Audio]] = None,
|
|
1393
|
-
images: Optional[List[Image]] = None,
|
|
1394
|
-
videos: Optional[List[Video]] = None,
|
|
1395
|
-
files: Optional[List[File]] = None,
|
|
1396
|
-
stream: Literal[True] = True,
|
|
1397
|
-
stream_intermediate_steps: Optional[bool] = None,
|
|
1398
|
-
background: Optional[bool] = False,
|
|
1399
|
-
) -> AsyncIterator[WorkflowRunResponseEvent]: ...
|
|
1400
|
-
|
|
1401
|
-
async def arun(
|
|
1402
|
-
self,
|
|
1403
|
-
message: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None,
|
|
1404
|
-
additional_data: Optional[Dict[str, Any]] = None,
|
|
1405
|
-
user_id: Optional[str] = None,
|
|
1406
|
-
session_id: Optional[str] = None,
|
|
1407
|
-
audio: Optional[List[Audio]] = None,
|
|
1408
|
-
images: Optional[List[Image]] = None,
|
|
1409
|
-
videos: Optional[List[Video]] = None,
|
|
1410
|
-
files: Optional[List[File]] = None,
|
|
1411
|
-
stream: bool = False,
|
|
1412
|
-
stream_intermediate_steps: Optional[bool] = False,
|
|
1413
|
-
background: Optional[bool] = False,
|
|
1414
|
-
**kwargs: Any,
|
|
1415
|
-
) -> Union[WorkflowRunResponse, AsyncIterator[WorkflowRunResponseEvent]]:
|
|
1416
|
-
"""Execute the workflow synchronously with optional streaming"""
|
|
1417
|
-
if background:
|
|
1418
|
-
return await self._arun_background(
|
|
1419
|
-
message=message,
|
|
1420
|
-
additional_data=additional_data,
|
|
1421
|
-
user_id=user_id,
|
|
1422
|
-
session_id=session_id,
|
|
1423
|
-
audio=audio,
|
|
1424
|
-
images=images,
|
|
1425
|
-
videos=videos,
|
|
1426
|
-
**kwargs,
|
|
1427
|
-
)
|
|
1428
|
-
|
|
1429
|
-
self._set_debug()
|
|
1430
|
-
|
|
1431
|
-
log_debug(f"Async Workflow Run Start: {self.name}", center=True)
|
|
1432
|
-
|
|
1433
|
-
# Use simple defaults
|
|
1434
|
-
stream = stream or self.stream or False
|
|
1435
|
-
stream_intermediate_steps = stream_intermediate_steps or self.stream_intermediate_steps or False
|
|
1436
|
-
|
|
1437
|
-
# Can't have stream_intermediate_steps if stream is False
|
|
1438
|
-
if not stream:
|
|
1439
|
-
stream_intermediate_steps = False
|
|
1440
|
-
|
|
1441
|
-
log_debug(f"Stream: {stream}")
|
|
1442
|
-
|
|
1443
|
-
# Set user_id and session_id if provided
|
|
1444
|
-
if user_id is not None:
|
|
1445
|
-
self.user_id = user_id
|
|
1446
|
-
log_debug(f"User ID: {user_id}")
|
|
1447
|
-
if session_id is not None:
|
|
1448
|
-
self.session_id = session_id
|
|
1449
|
-
log_debug(f"Session ID: {session_id}")
|
|
1450
|
-
|
|
1451
|
-
if self.session_id is None:
|
|
1452
|
-
self.session_id = str(uuid4())
|
|
1453
|
-
|
|
1454
|
-
self.run_id = str(uuid4())
|
|
1455
|
-
|
|
1456
|
-
self.initialize_workflow()
|
|
1457
|
-
|
|
1458
|
-
# Load or create session
|
|
1459
|
-
self.load_session()
|
|
1460
|
-
|
|
1461
|
-
# Prepare steps
|
|
1462
|
-
self._prepare_steps()
|
|
1463
|
-
|
|
1464
|
-
# Create workflow run response that will be updated by reference
|
|
1465
|
-
workflow_run_response = WorkflowRunResponse(
|
|
1466
|
-
run_id=self.run_id,
|
|
1467
|
-
session_id=self.session_id,
|
|
1468
|
-
workflow_id=self.workflow_id,
|
|
1469
|
-
workflow_name=self.name,
|
|
1470
|
-
created_at=int(datetime.now().timestamp()),
|
|
1471
|
-
)
|
|
1472
|
-
self.run_response = workflow_run_response
|
|
1473
|
-
|
|
1474
|
-
inputs = WorkflowExecutionInput(
|
|
1475
|
-
message=message,
|
|
1476
|
-
additional_data=additional_data,
|
|
1477
|
-
audio=audio, # type: ignore
|
|
1478
|
-
images=images, # type: ignore
|
|
1479
|
-
videos=videos, # type: ignore
|
|
1480
|
-
files=files, # type: ignore
|
|
1481
|
-
)
|
|
1482
|
-
log_debug(
|
|
1483
|
-
f"Created async pipeline input with session state keys: {list(self.workflow_session_state.keys()) if self.workflow_session_state else 'None'}"
|
|
1484
|
-
)
|
|
1485
|
-
|
|
1486
|
-
self.update_agents_and_teams_session_info()
|
|
1487
|
-
|
|
1488
|
-
if stream:
|
|
1489
|
-
return self._aexecute_stream(
|
|
1490
|
-
execution_input=inputs,
|
|
1491
|
-
workflow_run_response=workflow_run_response,
|
|
1492
|
-
stream_intermediate_steps=stream_intermediate_steps,
|
|
1493
|
-
**kwargs,
|
|
1494
|
-
)
|
|
1495
|
-
else:
|
|
1496
|
-
return await self._aexecute(execution_input=inputs, workflow_run_response=workflow_run_response, **kwargs)
|
|
1497
|
-
|
|
1498
|
-
def _prepare_steps(self):
|
|
1499
|
-
"""Prepare the steps for execution"""
|
|
1500
|
-
if not callable(self.steps) and self.steps is not None:
|
|
1501
|
-
prepared_steps: List[Union[Step, Steps, Loop, Parallel, Condition, Router]] = []
|
|
1502
|
-
for i, step in enumerate(self.steps): # type: ignore
|
|
1503
|
-
if callable(step) and hasattr(step, "__name__"):
|
|
1504
|
-
step_name = step.__name__
|
|
1505
|
-
log_debug(f"Step {i + 1}: Wrapping callable function '{step_name}'")
|
|
1506
|
-
prepared_steps.append(Step(name=step_name, description="User-defined callable step", executor=step))
|
|
1507
|
-
elif isinstance(step, Agent):
|
|
1508
|
-
step_name = step.name or f"step_{i + 1}"
|
|
1509
|
-
log_debug(f"Step {i + 1}: Agent '{step_name}'")
|
|
1510
|
-
prepared_steps.append(Step(name=step_name, description=step.description, agent=step))
|
|
1511
|
-
elif isinstance(step, Team):
|
|
1512
|
-
step_name = step.name or f"step_{i + 1}"
|
|
1513
|
-
log_debug(f"Step {i + 1}: Team '{step_name}' with {len(step.members)} members")
|
|
1514
|
-
prepared_steps.append(Step(name=step_name, description=step.description, team=step))
|
|
1515
|
-
elif isinstance(step, (Step, Steps, Loop, Parallel, Condition, Router)):
|
|
1516
|
-
step_type = type(step).__name__
|
|
1517
|
-
step_name = getattr(step, "name", f"unnamed_{step_type.lower()}")
|
|
1518
|
-
log_debug(f"Step {i + 1}: {step_type} '{step_name}'")
|
|
1519
|
-
prepared_steps.append(step)
|
|
1520
|
-
else:
|
|
1521
|
-
raise ValueError(f"Invalid step type: {type(step).__name__}")
|
|
1522
|
-
|
|
1523
|
-
self.steps = prepared_steps # type: ignore
|
|
1524
|
-
log_debug("Step preparation completed")
|
|
1525
|
-
|
|
1526
|
-
def get_workflow_session(self) -> WorkflowSessionV2:
|
|
1527
|
-
"""Get a WorkflowSessionV2 object for storage"""
|
|
1528
|
-
workflow_data = {}
|
|
1529
|
-
if self.steps and not callable(self.steps):
|
|
1530
|
-
workflow_data["steps"] = [
|
|
1531
|
-
{
|
|
1532
|
-
"name": step.name if hasattr(step, "name") else step.__name__,
|
|
1533
|
-
"description": step.description if hasattr(step, "description") else "User-defined callable step",
|
|
1534
|
-
}
|
|
1535
|
-
for step in self.steps # type: ignore
|
|
1536
|
-
]
|
|
1537
|
-
elif callable(self.steps):
|
|
1538
|
-
workflow_data["steps"] = [
|
|
1539
|
-
{
|
|
1540
|
-
"name": "Custom Function",
|
|
1541
|
-
"description": "User-defined callable workflow",
|
|
1542
|
-
}
|
|
1543
|
-
]
|
|
1544
|
-
|
|
1545
|
-
if self.session_id is None:
|
|
1546
|
-
raise ValueError("Session ID is required")
|
|
1547
|
-
|
|
1548
|
-
return WorkflowSessionV2(
|
|
1549
|
-
session_id=self.session_id,
|
|
1550
|
-
user_id=self.user_id,
|
|
1551
|
-
workflow_id=self.workflow_id,
|
|
1552
|
-
workflow_name=self.name,
|
|
1553
|
-
runs=self.workflow_session.runs if self.workflow_session else [],
|
|
1554
|
-
workflow_data=workflow_data,
|
|
1555
|
-
session_data={},
|
|
1556
|
-
)
|
|
1557
|
-
|
|
1558
|
-
def load_workflow_session(self, session: WorkflowSessionV2):
|
|
1559
|
-
"""Load workflow session from storage"""
|
|
1560
|
-
if self.workflow_id is None and session.workflow_id is not None:
|
|
1561
|
-
self.workflow_id = session.workflow_id
|
|
1562
|
-
if self.user_id is None and session.user_id is not None:
|
|
1563
|
-
self.user_id = session.user_id
|
|
1564
|
-
if self.session_id is None and session.session_id is not None:
|
|
1565
|
-
self.session_id = session.session_id
|
|
1566
|
-
if self.name is None and session.workflow_name is not None:
|
|
1567
|
-
self.name = session.workflow_name
|
|
1568
|
-
|
|
1569
|
-
self.workflow_session = session
|
|
1570
|
-
log_debug(f"Loaded WorkflowSessionV2: {session.session_id}")
|
|
1571
|
-
|
|
1572
|
-
def read_from_storage(self) -> Optional[WorkflowSessionV2]:
|
|
1573
|
-
"""Load the WorkflowSessionV2 from storage"""
|
|
1574
|
-
if self.storage is not None and self.session_id is not None:
|
|
1575
|
-
session = self.storage.read(session_id=self.session_id)
|
|
1576
|
-
if session and isinstance(session, WorkflowSessionV2):
|
|
1577
|
-
self.load_workflow_session(session)
|
|
1578
|
-
return session
|
|
1579
|
-
return None
|
|
1580
|
-
|
|
1581
|
-
def write_to_storage(self) -> Optional[WorkflowSessionV2]:
|
|
1582
|
-
"""Save the WorkflowSessionV2 to storage"""
|
|
1583
|
-
if self.storage is not None:
|
|
1584
|
-
session_to_save = self.get_workflow_session()
|
|
1585
|
-
saved_session = self.storage.upsert(session=session_to_save)
|
|
1586
|
-
if saved_session and isinstance(saved_session, WorkflowSessionV2):
|
|
1587
|
-
self.workflow_session = saved_session
|
|
1588
|
-
return saved_session
|
|
1589
|
-
return None
|
|
1590
|
-
|
|
1591
|
-
def load_session(self, force: bool = False) -> Optional[str]:
|
|
1592
|
-
"""Load an existing session from storage or create a new one"""
|
|
1593
|
-
if self.workflow_session is not None and not force:
|
|
1594
|
-
if self.session_id is not None and self.workflow_session.session_id == self.session_id:
|
|
1595
|
-
log_debug("Using existing workflow session")
|
|
1596
|
-
return self.workflow_session.session_id
|
|
1597
|
-
|
|
1598
|
-
if self.storage is not None:
|
|
1599
|
-
# Try to load existing session
|
|
1600
|
-
existing_session = self.read_from_storage()
|
|
1601
|
-
|
|
1602
|
-
# Create new session if it doesn't exist
|
|
1603
|
-
if existing_session is None:
|
|
1604
|
-
log_debug("Creating new WorkflowSessionV2")
|
|
1605
|
-
|
|
1606
|
-
# Ensure we have a session_id
|
|
1607
|
-
if self.session_id is None:
|
|
1608
|
-
self.session_id = str(uuid4())
|
|
1609
|
-
|
|
1610
|
-
self.workflow_session = WorkflowSessionV2(
|
|
1611
|
-
session_id=self.session_id,
|
|
1612
|
-
user_id=self.user_id,
|
|
1613
|
-
workflow_id=self.workflow_id,
|
|
1614
|
-
workflow_name=self.name,
|
|
1615
|
-
)
|
|
1616
|
-
saved_session = self.write_to_storage()
|
|
1617
|
-
if saved_session is None:
|
|
1618
|
-
raise Exception("Failed to create new WorkflowSessionV2 in storage")
|
|
1619
|
-
log_debug(f"Created WorkflowSessionV2: {saved_session.session_id}")
|
|
1620
|
-
|
|
1621
|
-
return self.session_id
|
|
1622
|
-
|
|
1623
|
-
def new_session(self) -> None:
|
|
1624
|
-
"""Create a new workflow session"""
|
|
1625
|
-
log_debug("Creating new workflow session")
|
|
1626
|
-
|
|
1627
|
-
self.workflow_session = None
|
|
1628
|
-
self.session_id = str(uuid4())
|
|
1629
|
-
|
|
1630
|
-
log_debug(f"New session ID: {self.session_id}")
|
|
1631
|
-
self.load_session(force=True)
|
|
1632
|
-
|
|
1633
|
-
def _format_step_content_for_display(self, step_output: StepOutput) -> str:
|
|
1634
|
-
"""Format content for display, handling structured outputs. Works for both raw content and StepOutput objects."""
|
|
1635
|
-
# If it's a StepOutput, extract the content
|
|
1636
|
-
if hasattr(step_output, "content"):
|
|
1637
|
-
actual_content = step_output.content
|
|
1638
|
-
else:
|
|
1639
|
-
actual_content = step_output
|
|
1640
|
-
|
|
1641
|
-
if not actual_content:
|
|
1642
|
-
return ""
|
|
1643
|
-
|
|
1644
|
-
# If it's already a string, return as-is
|
|
1645
|
-
if isinstance(actual_content, str):
|
|
1646
|
-
return actual_content
|
|
1647
|
-
|
|
1648
|
-
# If it's a structured output (BaseModel or dict), format it nicely
|
|
1649
|
-
if isinstance(actual_content, BaseModel):
|
|
1650
|
-
return (
|
|
1651
|
-
f"**Structured Output:**\n\n```json\n{actual_content.model_dump_json(indent=2, exclude_none=True)}\n```"
|
|
1652
|
-
)
|
|
1653
|
-
elif isinstance(actual_content, (dict, list)):
|
|
1654
|
-
import json
|
|
1655
|
-
|
|
1656
|
-
return f"**Structured Output:**\n\n```json\n{json.dumps(actual_content, indent=2, default=str)}\n```"
|
|
1657
|
-
else:
|
|
1658
|
-
# Fallback to string conversion
|
|
1659
|
-
return str(actual_content)
|
|
1660
|
-
|
|
1661
|
-
def print_response(
|
|
1662
|
-
self,
|
|
1663
|
-
message: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None,
|
|
1664
|
-
additional_data: Optional[Dict[str, Any]] = None,
|
|
1665
|
-
user_id: Optional[str] = None,
|
|
1666
|
-
session_id: Optional[str] = None,
|
|
1667
|
-
audio: Optional[List[Audio]] = None,
|
|
1668
|
-
images: Optional[List[Image]] = None,
|
|
1669
|
-
videos: Optional[List[Video]] = None,
|
|
1670
|
-
files: Optional[List[File]] = None,
|
|
1671
|
-
stream: bool = False,
|
|
1672
|
-
stream_intermediate_steps: bool = False,
|
|
1673
|
-
markdown: bool = False,
|
|
1674
|
-
show_time: bool = True,
|
|
1675
|
-
show_step_details: bool = True,
|
|
1676
|
-
console: Optional[Any] = None,
|
|
1677
|
-
**kwargs: Any,
|
|
1678
|
-
) -> None:
|
|
1679
|
-
"""Print workflow execution with rich formatting and optional streaming
|
|
1680
|
-
|
|
1681
|
-
Args:
|
|
1682
|
-
message: The main query/input for the workflow
|
|
1683
|
-
message_data: Attached message data to the input
|
|
1684
|
-
user_id: User ID
|
|
1685
|
-
session_id: Session ID
|
|
1686
|
-
audio: Audio input
|
|
1687
|
-
images: Image input
|
|
1688
|
-
videos: Video input
|
|
1689
|
-
files: File input
|
|
1690
|
-
stream: Whether to stream the response content
|
|
1691
|
-
stream_intermediate_steps: Whether to stream intermediate steps
|
|
1692
|
-
markdown: Whether to render content as markdown
|
|
1693
|
-
show_time: Whether to show execution time
|
|
1694
|
-
show_step_details: Whether to show individual step outputs
|
|
1695
|
-
console: Rich console instance (optional)
|
|
1696
|
-
"""
|
|
1697
|
-
|
|
1698
|
-
stream_intermediate_steps = stream_intermediate_steps or self.stream_intermediate_steps or False
|
|
1699
|
-
stream = stream or self.stream or False
|
|
1700
|
-
|
|
1701
|
-
if stream:
|
|
1702
|
-
self._print_response_stream(
|
|
1703
|
-
message=message,
|
|
1704
|
-
user_id=user_id,
|
|
1705
|
-
session_id=session_id,
|
|
1706
|
-
additional_data=additional_data,
|
|
1707
|
-
audio=audio,
|
|
1708
|
-
images=images,
|
|
1709
|
-
videos=videos,
|
|
1710
|
-
files=files,
|
|
1711
|
-
stream_intermediate_steps=stream_intermediate_steps,
|
|
1712
|
-
markdown=markdown,
|
|
1713
|
-
show_time=show_time,
|
|
1714
|
-
show_step_details=show_step_details,
|
|
1715
|
-
console=console,
|
|
1716
|
-
**kwargs,
|
|
1717
|
-
)
|
|
1718
|
-
else:
|
|
1719
|
-
self._print_response(
|
|
1720
|
-
message=message,
|
|
1721
|
-
user_id=user_id,
|
|
1722
|
-
session_id=session_id,
|
|
1723
|
-
additional_data=additional_data,
|
|
1724
|
-
audio=audio,
|
|
1725
|
-
images=images,
|
|
1726
|
-
videos=videos,
|
|
1727
|
-
files=files,
|
|
1728
|
-
markdown=markdown,
|
|
1729
|
-
show_time=show_time,
|
|
1730
|
-
show_step_details=show_step_details,
|
|
1731
|
-
console=console,
|
|
1732
|
-
**kwargs,
|
|
1733
|
-
)
|
|
1734
|
-
|
|
1735
|
-
def _print_response(
|
|
1736
|
-
self,
|
|
1737
|
-
message: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None,
|
|
1738
|
-
user_id: Optional[str] = None,
|
|
1739
|
-
session_id: Optional[str] = None,
|
|
1740
|
-
additional_data: Optional[Dict[str, Any]] = None,
|
|
1741
|
-
audio: Optional[List[Audio]] = None,
|
|
1742
|
-
images: Optional[List[Image]] = None,
|
|
1743
|
-
videos: Optional[List[Video]] = None,
|
|
1744
|
-
files: Optional[List[File]] = None,
|
|
1745
|
-
markdown: bool = False,
|
|
1746
|
-
show_time: bool = True,
|
|
1747
|
-
show_step_details: bool = True,
|
|
1748
|
-
console: Optional[Any] = None,
|
|
1749
|
-
**kwargs: Any,
|
|
1750
|
-
) -> None:
|
|
1751
|
-
"""Print workflow execution with rich formatting (non-streaming)"""
|
|
1752
|
-
from rich.live import Live
|
|
1753
|
-
from rich.markdown import Markdown
|
|
1754
|
-
from rich.status import Status
|
|
1755
|
-
from rich.text import Text
|
|
1756
|
-
|
|
1757
|
-
from agno.utils.response import create_panel
|
|
1758
|
-
from agno.utils.timer import Timer
|
|
1759
|
-
|
|
1760
|
-
if console is None:
|
|
1761
|
-
from agno.cli.console import console
|
|
1762
|
-
|
|
1763
|
-
# Show workflow info
|
|
1764
|
-
media_info = []
|
|
1765
|
-
if audio:
|
|
1766
|
-
media_info.append(f"Audio files: {len(audio)}")
|
|
1767
|
-
if images:
|
|
1768
|
-
media_info.append(f"Images: {len(images)}")
|
|
1769
|
-
if videos:
|
|
1770
|
-
media_info.append(f"Videos: {len(videos)}")
|
|
1771
|
-
|
|
1772
|
-
workflow_info = f"""**Workflow:** {self.name}"""
|
|
1773
|
-
if self.description:
|
|
1774
|
-
workflow_info += f"""\n\n**Description:** {self.description}"""
|
|
1775
|
-
workflow_info += f"""\n\n**Steps:** {self._get_step_count()} steps"""
|
|
1776
|
-
if message:
|
|
1777
|
-
if isinstance(message, str):
|
|
1778
|
-
workflow_info += f"""\n\n**Message:** {message}"""
|
|
1779
|
-
else:
|
|
1780
|
-
# Handle structured input message
|
|
1781
|
-
if isinstance(message, BaseModel):
|
|
1782
|
-
data_display = message.model_dump_json(indent=2, exclude_none=True)
|
|
1783
|
-
elif isinstance(message, (dict, list)):
|
|
1784
|
-
import json
|
|
1785
|
-
|
|
1786
|
-
data_display = json.dumps(message, indent=2, default=str)
|
|
1787
|
-
else:
|
|
1788
|
-
data_display = str(message)
|
|
1789
|
-
workflow_info += f"""\n\n**Structured Input:**\n```json\n{data_display}\n```"""
|
|
1790
|
-
if user_id:
|
|
1791
|
-
workflow_info += f"""\n\n**User ID:** {user_id}"""
|
|
1792
|
-
if session_id:
|
|
1793
|
-
workflow_info += f"""\n\n**Session ID:** {session_id}"""
|
|
1794
|
-
workflow_info = workflow_info.strip()
|
|
1795
|
-
|
|
1796
|
-
workflow_panel = create_panel(
|
|
1797
|
-
content=Markdown(workflow_info) if markdown else workflow_info,
|
|
1798
|
-
title="Workflow Information",
|
|
1799
|
-
border_style="cyan",
|
|
1800
|
-
)
|
|
1801
|
-
console.print(workflow_panel) # type: ignore
|
|
1802
|
-
|
|
1803
|
-
# Start timer
|
|
1804
|
-
response_timer = Timer()
|
|
1805
|
-
response_timer.start()
|
|
1806
|
-
|
|
1807
|
-
with Live(console=console) as live_log:
|
|
1808
|
-
status = Status("Starting workflow...", spinner="dots")
|
|
1809
|
-
live_log.update(status)
|
|
1810
|
-
|
|
1811
|
-
try:
|
|
1812
|
-
# Execute workflow and get the response directly
|
|
1813
|
-
workflow_response: WorkflowRunResponse = self.run(
|
|
1814
|
-
message=message,
|
|
1815
|
-
user_id=user_id,
|
|
1816
|
-
session_id=session_id,
|
|
1817
|
-
additional_data=additional_data,
|
|
1818
|
-
audio=audio,
|
|
1819
|
-
images=images,
|
|
1820
|
-
videos=videos,
|
|
1821
|
-
files=files,
|
|
1822
|
-
**kwargs,
|
|
1823
|
-
) # type: ignore
|
|
1824
|
-
|
|
1825
|
-
response_timer.stop()
|
|
1826
|
-
|
|
1827
|
-
if show_step_details and workflow_response.step_responses:
|
|
1828
|
-
for i, step_output in enumerate(workflow_response.step_responses):
|
|
1829
|
-
# Handle both single StepOutput and List[StepOutput] (from loop/parallel steps)
|
|
1830
|
-
if isinstance(step_output, list):
|
|
1831
|
-
# This is a loop or parallel step with multiple outputs
|
|
1832
|
-
for j, sub_step_output in enumerate(step_output):
|
|
1833
|
-
if sub_step_output.content:
|
|
1834
|
-
formatted_content = self._format_step_content_for_display(sub_step_output)
|
|
1835
|
-
step_panel = create_panel(
|
|
1836
|
-
content=Markdown(formatted_content) if markdown else formatted_content,
|
|
1837
|
-
title=f"Step {i + 1}.{j + 1}: {sub_step_output.step_name} (Completed)",
|
|
1838
|
-
border_style="orange3",
|
|
1839
|
-
)
|
|
1840
|
-
console.print(step_panel) # type: ignore
|
|
1841
|
-
else:
|
|
1842
|
-
# This is a regular single step
|
|
1843
|
-
if step_output.content:
|
|
1844
|
-
formatted_content = self._format_step_content_for_display(step_output)
|
|
1845
|
-
step_panel = create_panel(
|
|
1846
|
-
content=Markdown(formatted_content) if markdown else formatted_content,
|
|
1847
|
-
title=f"Step {i + 1}: {step_output.step_name} (Completed)",
|
|
1848
|
-
border_style="orange3",
|
|
1849
|
-
)
|
|
1850
|
-
console.print(step_panel) # type: ignore
|
|
1851
|
-
|
|
1852
|
-
# For callable functions, show the content directly since there are no step_responses
|
|
1853
|
-
elif show_step_details and callable(self.steps) and workflow_response.content:
|
|
1854
|
-
step_panel = create_panel(
|
|
1855
|
-
content=Markdown(workflow_response.content) if markdown else workflow_response.content, # type: ignore
|
|
1856
|
-
title="Custom Function (Completed)",
|
|
1857
|
-
border_style="orange3",
|
|
1858
|
-
)
|
|
1859
|
-
console.print(step_panel) # type: ignore
|
|
1860
|
-
|
|
1861
|
-
# Show final summary
|
|
1862
|
-
if workflow_response.extra_data:
|
|
1863
|
-
status = workflow_response.status.value # type: ignore
|
|
1864
|
-
summary_content = ""
|
|
1865
|
-
summary_content += f"""\n\n**Status:** {status}"""
|
|
1866
|
-
summary_content += f"""\n\n**Steps Completed:** {len(workflow_response.step_responses) if workflow_response.step_responses else 0}"""
|
|
1867
|
-
summary_content = summary_content.strip()
|
|
1868
|
-
|
|
1869
|
-
summary_panel = create_panel(
|
|
1870
|
-
content=Markdown(summary_content) if markdown else summary_content,
|
|
1871
|
-
title="Execution Summary",
|
|
1872
|
-
border_style="blue",
|
|
1873
|
-
)
|
|
1874
|
-
console.print(summary_panel) # type: ignore
|
|
1875
|
-
|
|
1876
|
-
live_log.update("")
|
|
1877
|
-
|
|
1878
|
-
# Final completion message
|
|
1879
|
-
if show_time:
|
|
1880
|
-
completion_text = Text(f"Completed in {response_timer.elapsed:.1f}s", style="bold green")
|
|
1881
|
-
console.print(completion_text) # type: ignore
|
|
1882
|
-
|
|
1883
|
-
except Exception as e:
|
|
1884
|
-
import traceback
|
|
1885
|
-
|
|
1886
|
-
traceback.print_exc()
|
|
1887
|
-
response_timer.stop()
|
|
1888
|
-
error_panel = create_panel(
|
|
1889
|
-
content=f"Workflow execution failed: {str(e)}", title="Execution Error", border_style="red"
|
|
1890
|
-
)
|
|
1891
|
-
console.print(error_panel) # type: ignore
|
|
1892
|
-
|
|
1893
|
-
def _print_response_stream(
|
|
1894
|
-
self,
|
|
1895
|
-
message: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None,
|
|
1896
|
-
user_id: Optional[str] = None,
|
|
1897
|
-
session_id: Optional[str] = None,
|
|
1898
|
-
additional_data: Optional[Dict[str, Any]] = None,
|
|
1899
|
-
audio: Optional[List[Audio]] = None,
|
|
1900
|
-
images: Optional[List[Image]] = None,
|
|
1901
|
-
videos: Optional[List[Video]] = None,
|
|
1902
|
-
files: Optional[List[File]] = None,
|
|
1903
|
-
stream_intermediate_steps: bool = False,
|
|
1904
|
-
markdown: bool = False,
|
|
1905
|
-
show_time: bool = True,
|
|
1906
|
-
show_step_details: bool = True,
|
|
1907
|
-
console: Optional[Any] = None,
|
|
1908
|
-
**kwargs: Any,
|
|
1909
|
-
) -> None:
|
|
1910
|
-
"""Print workflow execution with clean streaming"""
|
|
1911
|
-
from rich.console import Group
|
|
1912
|
-
from rich.live import Live
|
|
1913
|
-
from rich.markdown import Markdown
|
|
1914
|
-
from rich.status import Status
|
|
1915
|
-
from rich.text import Text
|
|
1916
|
-
|
|
1917
|
-
from agno.utils.response import create_panel
|
|
1918
|
-
from agno.utils.timer import Timer
|
|
1919
|
-
|
|
1920
|
-
if console is None:
|
|
1921
|
-
from agno.cli.console import console
|
|
1922
|
-
|
|
1923
|
-
stream_intermediate_steps = True # With streaming print response, we need to stream intermediate steps
|
|
1924
|
-
|
|
1925
|
-
# Show workflow info (same as before)
|
|
1926
|
-
media_info = []
|
|
1927
|
-
if audio:
|
|
1928
|
-
media_info.append(f"Audio files: {len(audio)}")
|
|
1929
|
-
if images:
|
|
1930
|
-
media_info.append(f"Images: {len(images)}")
|
|
1931
|
-
if videos:
|
|
1932
|
-
media_info.append(f"Videos: {len(videos)}")
|
|
1933
|
-
|
|
1934
|
-
workflow_info = f"""**Workflow:** {self.name}"""
|
|
1935
|
-
if self.description:
|
|
1936
|
-
workflow_info += f"""\n\n**Description:** {self.description}"""
|
|
1937
|
-
workflow_info += f"""\n\n**Steps:** {self._get_step_count()} steps"""
|
|
1938
|
-
if message:
|
|
1939
|
-
if isinstance(message, str):
|
|
1940
|
-
workflow_info += f"""\n\n**Message:** {message}"""
|
|
1941
|
-
else:
|
|
1942
|
-
# Handle structured input message
|
|
1943
|
-
if isinstance(message, BaseModel):
|
|
1944
|
-
data_display = message.model_dump_json(indent=2, exclude_none=True)
|
|
1945
|
-
elif isinstance(message, (dict, list)):
|
|
1946
|
-
import json
|
|
1947
|
-
|
|
1948
|
-
data_display = json.dumps(message, indent=2, default=str)
|
|
1949
|
-
else:
|
|
1950
|
-
data_display = str(message)
|
|
1951
|
-
workflow_info += f"""\n\n**Structured Input:**\n```json\n{data_display}\n```"""
|
|
1952
|
-
if user_id:
|
|
1953
|
-
workflow_info += f"""\n\n**User ID:** {user_id}"""
|
|
1954
|
-
if session_id:
|
|
1955
|
-
workflow_info += f"""\n\n**Session ID:** {session_id}"""
|
|
1956
|
-
workflow_info = workflow_info.strip()
|
|
1957
|
-
|
|
1958
|
-
workflow_panel = create_panel(
|
|
1959
|
-
content=Markdown(workflow_info) if markdown else workflow_info,
|
|
1960
|
-
title="Workflow Information",
|
|
1961
|
-
border_style="cyan",
|
|
1962
|
-
)
|
|
1963
|
-
console.print(workflow_panel) # type: ignore
|
|
1964
|
-
|
|
1965
|
-
# Start timer
|
|
1966
|
-
response_timer = Timer()
|
|
1967
|
-
response_timer.start()
|
|
1968
|
-
|
|
1969
|
-
# Streaming execution variables with smart step tracking
|
|
1970
|
-
current_step_content = ""
|
|
1971
|
-
current_step_name = ""
|
|
1972
|
-
current_step_index = 0
|
|
1973
|
-
step_responses = []
|
|
1974
|
-
step_started_printed = False
|
|
1975
|
-
is_callable_function = callable(self.steps)
|
|
1976
|
-
|
|
1977
|
-
# Smart step hierarchy tracking
|
|
1978
|
-
current_primitive_context = None # Current primitive being executed (parallel, loop, etc.)
|
|
1979
|
-
step_display_cache = {} # type: ignore
|
|
1980
|
-
|
|
1981
|
-
def get_step_display_number(step_index: Union[int, tuple], step_name: str = "") -> str:
|
|
1982
|
-
"""Generate clean two-level step numbering: x.y format only"""
|
|
1983
|
-
|
|
1984
|
-
# Handle tuple format for child steps
|
|
1985
|
-
if isinstance(step_index, tuple):
|
|
1986
|
-
if len(step_index) >= 2:
|
|
1987
|
-
parent_idx, sub_idx = step_index[0], step_index[1]
|
|
1988
|
-
|
|
1989
|
-
# Extract base parent index if it's nested
|
|
1990
|
-
if isinstance(parent_idx, tuple):
|
|
1991
|
-
base_idx = parent_idx[0] if len(parent_idx) > 0 else 0
|
|
1992
|
-
while isinstance(base_idx, tuple) and len(base_idx) > 0:
|
|
1993
|
-
base_idx = base_idx[0]
|
|
1994
|
-
else:
|
|
1995
|
-
base_idx = parent_idx
|
|
1996
|
-
|
|
1997
|
-
# Check context for parallel special case
|
|
1998
|
-
if current_primitive_context and current_primitive_context["type"] == "parallel":
|
|
1999
|
-
# For parallel child steps, all get the same number based on their actual step_index
|
|
2000
|
-
return f"Step {base_idx + 1}.{sub_idx + 1}"
|
|
2001
|
-
elif current_primitive_context and current_primitive_context["type"] == "loop":
|
|
2002
|
-
iteration = current_primitive_context.get("current_iteration", 1)
|
|
2003
|
-
return f"Step {base_idx + 1}.{sub_idx + 1} (Iteration {iteration})"
|
|
2004
|
-
else:
|
|
2005
|
-
# Regular child step numbering
|
|
2006
|
-
return f"Step {base_idx + 1}.{sub_idx + 1}" # type: ignore
|
|
2007
|
-
else:
|
|
2008
|
-
# Single element tuple - treat as main step
|
|
2009
|
-
return f"Step {step_index[0] + 1}"
|
|
2010
|
-
|
|
2011
|
-
# Handle integer step_index - main step
|
|
2012
|
-
if not current_primitive_context:
|
|
2013
|
-
# Regular main step
|
|
2014
|
-
return f"Step {step_index + 1}"
|
|
2015
|
-
else:
|
|
2016
|
-
# This shouldn't happen with the new logic, but fallback
|
|
2017
|
-
return f"Step {step_index + 1}"
|
|
2018
|
-
|
|
2019
|
-
with Live(console=console, refresh_per_second=10) as live_log:
|
|
2020
|
-
status = Status("Starting workflow...", spinner="dots")
|
|
2021
|
-
live_log.update(status)
|
|
2022
|
-
|
|
2023
|
-
try:
|
|
2024
|
-
for response in self.run(
|
|
2025
|
-
message=message,
|
|
2026
|
-
user_id=user_id,
|
|
2027
|
-
session_id=session_id,
|
|
2028
|
-
additional_data=additional_data,
|
|
2029
|
-
audio=audio,
|
|
2030
|
-
images=images,
|
|
2031
|
-
videos=videos,
|
|
2032
|
-
files=files,
|
|
2033
|
-
stream=True,
|
|
2034
|
-
stream_intermediate_steps=stream_intermediate_steps,
|
|
2035
|
-
**kwargs,
|
|
2036
|
-
): # type: ignore
|
|
2037
|
-
# Handle the new event types
|
|
2038
|
-
if isinstance(response, WorkflowStartedEvent):
|
|
2039
|
-
status.update("Workflow started...")
|
|
2040
|
-
if is_callable_function:
|
|
2041
|
-
current_step_name = "Custom Function"
|
|
2042
|
-
current_step_index = 0
|
|
2043
|
-
live_log.update(status)
|
|
2044
|
-
|
|
2045
|
-
elif isinstance(response, StepStartedEvent):
|
|
2046
|
-
current_step_name = response.step_name or "Unknown"
|
|
2047
|
-
current_step_index = response.step_index or 0 # type: ignore
|
|
2048
|
-
current_step_content = ""
|
|
2049
|
-
step_started_printed = False
|
|
2050
|
-
|
|
2051
|
-
# Generate smart step number
|
|
2052
|
-
step_display = get_step_display_number(current_step_index, current_step_name)
|
|
2053
|
-
status.update(f"Starting {step_display}: {current_step_name}...")
|
|
2054
|
-
live_log.update(status)
|
|
2055
|
-
|
|
2056
|
-
elif isinstance(response, StepCompletedEvent):
|
|
2057
|
-
step_name = response.step_name or "Unknown"
|
|
2058
|
-
step_index = response.step_index or 0
|
|
2059
|
-
|
|
2060
|
-
# Generate smart step number for completion (will use cached value)
|
|
2061
|
-
step_display = get_step_display_number(step_index, step_name)
|
|
2062
|
-
status.update(f"Completed {step_display}: {step_name}")
|
|
2063
|
-
|
|
2064
|
-
if response.content:
|
|
2065
|
-
step_responses.append(
|
|
2066
|
-
{
|
|
2067
|
-
"step_name": step_name,
|
|
2068
|
-
"step_index": step_index,
|
|
2069
|
-
"content": response.content,
|
|
2070
|
-
"event": response.event,
|
|
2071
|
-
}
|
|
2072
|
-
)
|
|
2073
|
-
|
|
2074
|
-
# Print the final step result in orange (only once)
|
|
2075
|
-
if show_step_details and current_step_content and not step_started_printed:
|
|
2076
|
-
live_log.update(status, refresh=True)
|
|
2077
|
-
|
|
2078
|
-
final_step_panel = create_panel(
|
|
2079
|
-
content=Markdown(current_step_content) if markdown else current_step_content,
|
|
2080
|
-
title=f"{step_display}: {step_name} (Completed)",
|
|
2081
|
-
border_style="orange3",
|
|
2082
|
-
)
|
|
2083
|
-
console.print(final_step_panel) # type: ignore
|
|
2084
|
-
step_started_printed = True
|
|
2085
|
-
|
|
2086
|
-
elif isinstance(response, LoopExecutionStartedEvent):
|
|
2087
|
-
current_step_name = response.step_name or "Loop"
|
|
2088
|
-
current_step_index = response.step_index or 0 # type: ignore
|
|
2089
|
-
current_step_content = ""
|
|
2090
|
-
step_started_printed = False
|
|
2091
|
-
|
|
2092
|
-
# Set up loop context
|
|
2093
|
-
current_primitive_context = {
|
|
2094
|
-
"type": "loop",
|
|
2095
|
-
"step_index": current_step_index,
|
|
2096
|
-
"sub_step_counter": 0,
|
|
2097
|
-
"current_iteration": 1,
|
|
2098
|
-
"max_iterations": response.max_iterations,
|
|
2099
|
-
}
|
|
2100
|
-
|
|
2101
|
-
# Clear cache for this primitive's sub-steps
|
|
2102
|
-
step_display_cache.clear()
|
|
2103
|
-
|
|
2104
|
-
status.update(
|
|
2105
|
-
f"Starting loop: {current_step_name} (max {response.max_iterations} iterations)..."
|
|
2106
|
-
)
|
|
2107
|
-
live_log.update(status)
|
|
2108
|
-
|
|
2109
|
-
elif isinstance(response, LoopIterationStartedEvent):
|
|
2110
|
-
if current_primitive_context and current_primitive_context["type"] == "loop":
|
|
2111
|
-
current_primitive_context["current_iteration"] = response.iteration
|
|
2112
|
-
current_primitive_context["sub_step_counter"] = 0 # Reset for new iteration
|
|
2113
|
-
# Clear cache for new iteration
|
|
2114
|
-
step_display_cache.clear()
|
|
2115
|
-
|
|
2116
|
-
status.update(
|
|
2117
|
-
f"Loop iteration {response.iteration}/{response.max_iterations}: {response.step_name}..."
|
|
2118
|
-
)
|
|
2119
|
-
live_log.update(status)
|
|
2120
|
-
|
|
2121
|
-
elif isinstance(response, LoopIterationCompletedEvent):
|
|
2122
|
-
status.update(
|
|
2123
|
-
f"Completed iteration {response.iteration}/{response.max_iterations}: {response.step_name}"
|
|
2124
|
-
)
|
|
2125
|
-
|
|
2126
|
-
elif isinstance(response, LoopExecutionCompletedEvent):
|
|
2127
|
-
step_name = response.step_name or "Loop"
|
|
2128
|
-
step_index = response.step_index or 0
|
|
2129
|
-
|
|
2130
|
-
status.update(f"Completed loop: {step_name} ({response.total_iterations} iterations)")
|
|
2131
|
-
live_log.update(status, refresh=True)
|
|
2132
|
-
|
|
2133
|
-
# Print loop summary
|
|
2134
|
-
if show_step_details:
|
|
2135
|
-
summary_content = "**Loop Summary:**\n\n"
|
|
2136
|
-
summary_content += (
|
|
2137
|
-
f"- Total iterations: {response.total_iterations}/{response.max_iterations}\n"
|
|
2138
|
-
)
|
|
2139
|
-
summary_content += (
|
|
2140
|
-
f"- Total steps executed: {sum(len(iteration) for iteration in response.all_results)}\n"
|
|
2141
|
-
)
|
|
2142
|
-
|
|
2143
|
-
loop_summary_panel = create_panel(
|
|
2144
|
-
content=Markdown(summary_content) if markdown else summary_content,
|
|
2145
|
-
title=f"Loop {step_name} (Completed)",
|
|
2146
|
-
border_style="yellow",
|
|
2147
|
-
)
|
|
2148
|
-
console.print(loop_summary_panel) # type: ignore
|
|
2149
|
-
|
|
2150
|
-
# Reset context
|
|
2151
|
-
current_primitive_context = None
|
|
2152
|
-
step_display_cache.clear()
|
|
2153
|
-
step_started_printed = True
|
|
2154
|
-
|
|
2155
|
-
elif isinstance(response, ParallelExecutionStartedEvent):
|
|
2156
|
-
current_step_name = response.step_name or "Parallel Steps"
|
|
2157
|
-
current_step_index = response.step_index or 0 # type: ignore
|
|
2158
|
-
current_step_content = ""
|
|
2159
|
-
step_started_printed = False
|
|
2160
|
-
|
|
2161
|
-
# Set up parallel context
|
|
2162
|
-
current_primitive_context = {
|
|
2163
|
-
"type": "parallel",
|
|
2164
|
-
"step_index": current_step_index,
|
|
2165
|
-
"sub_step_counter": 0,
|
|
2166
|
-
"total_steps": response.parallel_step_count,
|
|
2167
|
-
}
|
|
2168
|
-
|
|
2169
|
-
# Clear cache for this primitive's sub-steps
|
|
2170
|
-
step_display_cache.clear()
|
|
2171
|
-
|
|
2172
|
-
# Print parallel execution summary panel
|
|
2173
|
-
live_log.update(status, refresh=True)
|
|
2174
|
-
parallel_summary = f"**Parallel Steps:** {response.parallel_step_count}"
|
|
2175
|
-
# Use get_step_display_number for consistent numbering
|
|
2176
|
-
step_display = get_step_display_number(current_step_index, current_step_name)
|
|
2177
|
-
parallel_panel = create_panel(
|
|
2178
|
-
content=Markdown(parallel_summary) if markdown else parallel_summary,
|
|
2179
|
-
title=f"{step_display}: {current_step_name}",
|
|
2180
|
-
border_style="cyan",
|
|
2181
|
-
)
|
|
2182
|
-
console.print(parallel_panel) # type: ignore
|
|
2183
|
-
|
|
2184
|
-
status.update(
|
|
2185
|
-
f"Starting parallel execution: {current_step_name} ({response.parallel_step_count} steps)..."
|
|
2186
|
-
)
|
|
2187
|
-
live_log.update(status)
|
|
2188
|
-
|
|
2189
|
-
elif isinstance(response, ParallelExecutionCompletedEvent):
|
|
2190
|
-
step_name = response.step_name or "Parallel Steps"
|
|
2191
|
-
step_index = response.step_index or 0
|
|
2192
|
-
|
|
2193
|
-
status.update(f"Completed parallel execution: {step_name}")
|
|
2194
|
-
|
|
2195
|
-
# Reset context
|
|
2196
|
-
current_primitive_context = None
|
|
2197
|
-
step_display_cache.clear()
|
|
2198
|
-
|
|
2199
|
-
elif isinstance(response, ConditionExecutionStartedEvent):
|
|
2200
|
-
current_step_name = response.step_name or "Condition"
|
|
2201
|
-
current_step_index = response.step_index or 0 # type: ignore
|
|
2202
|
-
current_step_content = ""
|
|
2203
|
-
step_started_printed = False
|
|
2204
|
-
|
|
2205
|
-
# Set up condition context
|
|
2206
|
-
current_primitive_context = {
|
|
2207
|
-
"type": "condition",
|
|
2208
|
-
"step_index": current_step_index,
|
|
2209
|
-
"sub_step_counter": 0,
|
|
2210
|
-
"condition_result": response.condition_result,
|
|
2211
|
-
}
|
|
2212
|
-
|
|
2213
|
-
# Clear cache for this primitive's sub-steps
|
|
2214
|
-
step_display_cache.clear()
|
|
2215
|
-
|
|
2216
|
-
condition_text = "met" if response.condition_result else "not met"
|
|
2217
|
-
status.update(f"Starting condition: {current_step_name} (condition {condition_text})...")
|
|
2218
|
-
live_log.update(status)
|
|
2219
|
-
|
|
2220
|
-
elif isinstance(response, ConditionExecutionCompletedEvent):
|
|
2221
|
-
step_name = response.step_name or "Condition"
|
|
2222
|
-
step_index = response.step_index or 0
|
|
2223
|
-
|
|
2224
|
-
status.update(f"Completed condition: {step_name}")
|
|
2225
|
-
|
|
2226
|
-
# Reset context
|
|
2227
|
-
current_primitive_context = None
|
|
2228
|
-
step_display_cache.clear()
|
|
2229
|
-
|
|
2230
|
-
elif isinstance(response, RouterExecutionStartedEvent):
|
|
2231
|
-
current_step_name = response.step_name or "Router"
|
|
2232
|
-
current_step_index = response.step_index or 0 # type: ignore
|
|
2233
|
-
current_step_content = ""
|
|
2234
|
-
step_started_printed = False
|
|
2235
|
-
|
|
2236
|
-
# Set up router context
|
|
2237
|
-
current_primitive_context = {
|
|
2238
|
-
"type": "router",
|
|
2239
|
-
"step_index": current_step_index,
|
|
2240
|
-
"sub_step_counter": 0,
|
|
2241
|
-
"selected_steps": response.selected_steps,
|
|
2242
|
-
}
|
|
2243
|
-
|
|
2244
|
-
# Clear cache for this primitive's sub-steps
|
|
2245
|
-
step_display_cache.clear()
|
|
2246
|
-
|
|
2247
|
-
selected_steps_text = ", ".join(response.selected_steps) if response.selected_steps else "none"
|
|
2248
|
-
status.update(f"Starting router: {current_step_name} (selected: {selected_steps_text})...")
|
|
2249
|
-
live_log.update(status)
|
|
2250
|
-
|
|
2251
|
-
elif isinstance(response, RouterExecutionCompletedEvent):
|
|
2252
|
-
step_name = response.step_name or "Router"
|
|
2253
|
-
step_index = response.step_index or 0
|
|
2254
|
-
|
|
2255
|
-
status.update(f"Completed router: {step_name}")
|
|
2256
|
-
|
|
2257
|
-
# Print router summary
|
|
2258
|
-
if show_step_details:
|
|
2259
|
-
selected_steps_text = (
|
|
2260
|
-
", ".join(response.selected_steps) if response.selected_steps else "none"
|
|
2261
|
-
)
|
|
2262
|
-
summary_content = "**Router Summary:**\n\n"
|
|
2263
|
-
summary_content += f"- Selected steps: {selected_steps_text}\n"
|
|
2264
|
-
summary_content += f"- Executed steps: {response.executed_steps or 0}\n"
|
|
2265
|
-
|
|
2266
|
-
router_summary_panel = create_panel(
|
|
2267
|
-
content=Markdown(summary_content) if markdown else summary_content,
|
|
2268
|
-
title=f"Router {step_name} (Completed)",
|
|
2269
|
-
border_style="purple",
|
|
2270
|
-
)
|
|
2271
|
-
console.print(router_summary_panel) # type: ignore
|
|
2272
|
-
|
|
2273
|
-
# Reset context
|
|
2274
|
-
current_primitive_context = None
|
|
2275
|
-
step_display_cache.clear()
|
|
2276
|
-
step_started_printed = True
|
|
2277
|
-
|
|
2278
|
-
elif isinstance(response, StepsExecutionStartedEvent):
|
|
2279
|
-
current_step_name = response.step_name or "Steps"
|
|
2280
|
-
current_step_index = response.step_index or 0 # type: ignore
|
|
2281
|
-
current_step_content = ""
|
|
2282
|
-
step_started_printed = False
|
|
2283
|
-
status.update(f"Starting steps: {current_step_name} ({response.steps_count} steps)...")
|
|
2284
|
-
live_log.update(status)
|
|
2285
|
-
|
|
2286
|
-
elif isinstance(response, StepsExecutionCompletedEvent):
|
|
2287
|
-
step_name = response.step_name or "Steps"
|
|
2288
|
-
step_index = response.step_index or 0
|
|
2289
|
-
|
|
2290
|
-
status.update(f"Completed steps: {step_name}")
|
|
2291
|
-
|
|
2292
|
-
# Add results from executed steps to step_responses
|
|
2293
|
-
if response.step_results:
|
|
2294
|
-
for i, step_result in enumerate(response.step_results):
|
|
2295
|
-
# Use the same numbering system as other primitives
|
|
2296
|
-
step_display_number = get_step_display_number(step_index, step_result.step_name or "")
|
|
2297
|
-
step_responses.append(
|
|
2298
|
-
{
|
|
2299
|
-
"step_name": f"{step_display_number}: {step_result.step_name}",
|
|
2300
|
-
"step_index": step_index,
|
|
2301
|
-
"content": step_result.content,
|
|
2302
|
-
"event": "StepsStepResult",
|
|
2303
|
-
}
|
|
2304
|
-
)
|
|
2305
|
-
|
|
2306
|
-
# Print steps summary
|
|
2307
|
-
if show_step_details:
|
|
2308
|
-
summary_content = "**Steps Summary:**\n\n"
|
|
2309
|
-
summary_content += f"- Total steps: {response.steps_count or 0}\n"
|
|
2310
|
-
summary_content += f"- Executed steps: {response.executed_steps or 0}\n"
|
|
2311
|
-
|
|
2312
|
-
steps_summary_panel = create_panel(
|
|
2313
|
-
content=Markdown(summary_content) if markdown else summary_content,
|
|
2314
|
-
title=f"Steps {step_name} (Completed)",
|
|
2315
|
-
border_style="yellow",
|
|
2316
|
-
)
|
|
2317
|
-
console.print(steps_summary_panel) # type: ignore
|
|
2318
|
-
|
|
2319
|
-
step_started_printed = True
|
|
2320
|
-
|
|
2321
|
-
elif isinstance(response, WorkflowCompletedEvent):
|
|
2322
|
-
status.update("Workflow completed!")
|
|
2323
|
-
|
|
2324
|
-
# For callable functions, print the final content block here since there are no step events
|
|
2325
|
-
if (
|
|
2326
|
-
is_callable_function
|
|
2327
|
-
and show_step_details
|
|
2328
|
-
and current_step_content
|
|
2329
|
-
and not step_started_printed
|
|
2330
|
-
):
|
|
2331
|
-
final_step_panel = create_panel(
|
|
2332
|
-
content=Markdown(current_step_content) if markdown else current_step_content,
|
|
2333
|
-
title="Custom Function (Completed)",
|
|
2334
|
-
border_style="orange3",
|
|
2335
|
-
)
|
|
2336
|
-
console.print(final_step_panel) # type: ignore
|
|
2337
|
-
step_started_printed = True
|
|
2338
|
-
|
|
2339
|
-
live_log.update(status, refresh=True)
|
|
2340
|
-
|
|
2341
|
-
# Show final summary
|
|
2342
|
-
if response.extra_data:
|
|
2343
|
-
status = response.status
|
|
2344
|
-
summary_content = ""
|
|
2345
|
-
summary_content += f"""\n\n**Status:** {status}"""
|
|
2346
|
-
summary_content += f"""\n\n**Steps Completed:** {len(response.step_responses) if response.step_responses else 0}"""
|
|
2347
|
-
summary_content = summary_content.strip()
|
|
2348
|
-
|
|
2349
|
-
summary_panel = create_panel(
|
|
2350
|
-
content=Markdown(summary_content) if markdown else summary_content,
|
|
2351
|
-
title="Execution Summary",
|
|
2352
|
-
border_style="blue",
|
|
2353
|
-
)
|
|
2354
|
-
console.print(summary_panel) # type: ignore
|
|
2355
|
-
|
|
2356
|
-
else:
|
|
2357
|
-
# Handle streaming content
|
|
2358
|
-
if isinstance(response, str):
|
|
2359
|
-
response_str = response
|
|
2360
|
-
elif isinstance(response, StepOutputEvent):
|
|
2361
|
-
response_str = response.content or "" # type: ignore
|
|
2362
|
-
else:
|
|
2363
|
-
from agno.run.response import RunResponseContentEvent
|
|
2364
|
-
from agno.run.team import RunResponseContentEvent as TeamRunResponseContentEvent
|
|
2365
|
-
|
|
2366
|
-
current_step_executor_type = None
|
|
2367
|
-
# Handle both integer and tuple step indices for parallel execution
|
|
2368
|
-
actual_step_index = current_step_index
|
|
2369
|
-
if isinstance(current_step_index, tuple):
|
|
2370
|
-
# For tuple indices, use the first element (parent step index)
|
|
2371
|
-
actual_step_index = current_step_index[0]
|
|
2372
|
-
# If it's nested tuple, keep extracting until we get an integer
|
|
2373
|
-
while isinstance(actual_step_index, tuple) and len(actual_step_index) > 0:
|
|
2374
|
-
actual_step_index = actual_step_index[0]
|
|
2375
|
-
|
|
2376
|
-
if not is_callable_function and self.steps and actual_step_index < len(self.steps): # type: ignore
|
|
2377
|
-
step = self.steps[actual_step_index] # type: ignore
|
|
2378
|
-
if hasattr(step, "executor_type"):
|
|
2379
|
-
current_step_executor_type = step.executor_type
|
|
2380
|
-
|
|
2381
|
-
# Check if this is a streaming content event from agent or team
|
|
2382
|
-
if isinstance(response, (TeamRunResponseContentEvent, WorkflowRunResponseEvent)): # type: ignore
|
|
2383
|
-
# Check if this is a team's final structured output
|
|
2384
|
-
is_structured_output = (
|
|
2385
|
-
isinstance(response, TeamRunResponseContentEvent)
|
|
2386
|
-
and hasattr(response, "content_type")
|
|
2387
|
-
and response.content_type != "str"
|
|
2388
|
-
and response.content_type != ""
|
|
2389
|
-
)
|
|
2390
|
-
response_str = response.content # type: ignore
|
|
2391
|
-
elif isinstance(response, RunResponseContentEvent) and current_step_executor_type != "team":
|
|
2392
|
-
response_str = response.content # type: ignore
|
|
2393
|
-
else:
|
|
2394
|
-
continue
|
|
2395
|
-
|
|
2396
|
-
# Use the unified formatting function for consistency
|
|
2397
|
-
response_str = self._format_step_content_for_display(response_str) # type: ignore
|
|
2398
|
-
|
|
2399
|
-
# Filter out empty responses and add to current step content
|
|
2400
|
-
if response_str and response_str.strip():
|
|
2401
|
-
# If it's a structured output from a team, replace the content instead of appending
|
|
2402
|
-
if "is_structured_output" in locals() and is_structured_output:
|
|
2403
|
-
current_step_content = response_str
|
|
2404
|
-
else:
|
|
2405
|
-
current_step_content += response_str
|
|
2406
|
-
|
|
2407
|
-
# Live update the step panel with streaming content
|
|
2408
|
-
if show_step_details and not step_started_printed:
|
|
2409
|
-
# Generate smart step number for streaming title (will use cached value)
|
|
2410
|
-
step_display = get_step_display_number(current_step_index, current_step_name)
|
|
2411
|
-
title = f"{step_display}: {current_step_name} (Streaming...)"
|
|
2412
|
-
if is_callable_function:
|
|
2413
|
-
title = "Custom Function (Streaming...)"
|
|
2414
|
-
|
|
2415
|
-
# Show the streaming content live in orange panel
|
|
2416
|
-
live_step_panel = create_panel(
|
|
2417
|
-
content=Markdown(current_step_content) if markdown else current_step_content,
|
|
2418
|
-
title=title,
|
|
2419
|
-
border_style="orange3",
|
|
2420
|
-
)
|
|
2421
|
-
|
|
2422
|
-
# Create group with status and current step content
|
|
2423
|
-
group = Group(status, live_step_panel)
|
|
2424
|
-
live_log.update(group)
|
|
2425
|
-
|
|
2426
|
-
response_timer.stop()
|
|
2427
|
-
|
|
2428
|
-
live_log.update("")
|
|
2429
|
-
|
|
2430
|
-
# Final completion message
|
|
2431
|
-
if show_time:
|
|
2432
|
-
completion_text = Text(f"Completed in {response_timer.elapsed:.1f}s", style="bold green")
|
|
2433
|
-
console.print(completion_text) # type: ignore
|
|
2434
|
-
|
|
2435
|
-
except Exception as e:
|
|
2436
|
-
import traceback
|
|
2437
|
-
|
|
2438
|
-
traceback.print_exc()
|
|
2439
|
-
response_timer.stop()
|
|
2440
|
-
error_panel = create_panel(
|
|
2441
|
-
content=f"Workflow execution failed: {str(e)}", title="Execution Error", border_style="red"
|
|
2442
|
-
)
|
|
2443
|
-
console.print(error_panel) # type: ignore
|
|
2444
|
-
|
|
2445
|
-
async def aprint_response(
|
|
2446
|
-
self,
|
|
2447
|
-
message: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None,
|
|
2448
|
-
additional_data: Optional[Dict[str, Any]] = None,
|
|
2449
|
-
user_id: Optional[str] = None,
|
|
2450
|
-
session_id: Optional[str] = None,
|
|
2451
|
-
audio: Optional[List[Audio]] = None,
|
|
2452
|
-
images: Optional[List[Image]] = None,
|
|
2453
|
-
videos: Optional[List[Video]] = None,
|
|
2454
|
-
files: Optional[List[File]] = None,
|
|
2455
|
-
stream: bool = False,
|
|
2456
|
-
stream_intermediate_steps: bool = False,
|
|
2457
|
-
markdown: bool = False,
|
|
2458
|
-
show_time: bool = True,
|
|
2459
|
-
show_step_details: bool = True,
|
|
2460
|
-
console: Optional[Any] = None,
|
|
2461
|
-
**kwargs: Any,
|
|
2462
|
-
) -> None:
|
|
2463
|
-
"""Print workflow execution with rich formatting and optional streaming
|
|
2464
|
-
|
|
2465
|
-
Args:
|
|
2466
|
-
message: The main message/input for the workflow
|
|
2467
|
-
message_data: Attached message data to the input
|
|
2468
|
-
user_id: User ID
|
|
2469
|
-
session_id: Session ID
|
|
2470
|
-
audio: Audio input
|
|
2471
|
-
images: Image input
|
|
2472
|
-
videos: Video input
|
|
2473
|
-
stream_intermediate_steps: Whether to stream intermediate steps
|
|
2474
|
-
stream: Whether to stream the response content
|
|
2475
|
-
markdown: Whether to render content as markdown
|
|
2476
|
-
show_time: Whether to show execution time
|
|
2477
|
-
show_step_details: Whether to show individual step outputs
|
|
2478
|
-
console: Rich console instance (optional)
|
|
2479
|
-
"""
|
|
2480
|
-
if stream:
|
|
2481
|
-
await self._aprint_response_stream(
|
|
2482
|
-
message=message,
|
|
2483
|
-
additional_data=additional_data,
|
|
2484
|
-
user_id=user_id,
|
|
2485
|
-
session_id=session_id,
|
|
2486
|
-
audio=audio,
|
|
2487
|
-
images=images,
|
|
2488
|
-
videos=videos,
|
|
2489
|
-
files=files,
|
|
2490
|
-
stream_intermediate_steps=stream_intermediate_steps,
|
|
2491
|
-
markdown=markdown,
|
|
2492
|
-
show_time=show_time,
|
|
2493
|
-
show_step_details=show_step_details,
|
|
2494
|
-
console=console,
|
|
2495
|
-
**kwargs,
|
|
2496
|
-
)
|
|
2497
|
-
else:
|
|
2498
|
-
await self._aprint_response(
|
|
2499
|
-
message=message,
|
|
2500
|
-
additional_data=additional_data,
|
|
2501
|
-
user_id=user_id,
|
|
2502
|
-
session_id=session_id,
|
|
2503
|
-
audio=audio,
|
|
2504
|
-
images=images,
|
|
2505
|
-
videos=videos,
|
|
2506
|
-
files=files,
|
|
2507
|
-
markdown=markdown,
|
|
2508
|
-
show_time=show_time,
|
|
2509
|
-
show_step_details=show_step_details,
|
|
2510
|
-
console=console,
|
|
2511
|
-
**kwargs,
|
|
2512
|
-
)
|
|
2513
|
-
|
|
2514
|
-
async def _aprint_response(
|
|
2515
|
-
self,
|
|
2516
|
-
message: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None,
|
|
2517
|
-
additional_data: Optional[Dict[str, Any]] = None,
|
|
2518
|
-
user_id: Optional[str] = None,
|
|
2519
|
-
session_id: Optional[str] = None,
|
|
2520
|
-
audio: Optional[List[Audio]] = None,
|
|
2521
|
-
images: Optional[List[Image]] = None,
|
|
2522
|
-
videos: Optional[List[Video]] = None,
|
|
2523
|
-
files: Optional[List[File]] = None,
|
|
2524
|
-
markdown: bool = False,
|
|
2525
|
-
show_time: bool = True,
|
|
2526
|
-
show_step_details: bool = True,
|
|
2527
|
-
console: Optional[Any] = None,
|
|
2528
|
-
**kwargs: Any,
|
|
2529
|
-
) -> None:
|
|
2530
|
-
"""Print workflow execution with rich formatting (non-streaming)"""
|
|
2531
|
-
from rich.live import Live
|
|
2532
|
-
from rich.markdown import Markdown
|
|
2533
|
-
from rich.status import Status
|
|
2534
|
-
from rich.text import Text
|
|
2535
|
-
|
|
2536
|
-
from agno.utils.response import create_panel
|
|
2537
|
-
from agno.utils.timer import Timer
|
|
2538
|
-
|
|
2539
|
-
if console is None:
|
|
2540
|
-
from agno.cli.console import console
|
|
2541
|
-
|
|
2542
|
-
# Show workflow info
|
|
2543
|
-
media_info = []
|
|
2544
|
-
if audio:
|
|
2545
|
-
media_info.append(f"Audio files: {len(audio)}")
|
|
2546
|
-
if images:
|
|
2547
|
-
media_info.append(f"Images: {len(images)}")
|
|
2548
|
-
if videos:
|
|
2549
|
-
media_info.append(f"Videos: {len(videos)}")
|
|
2550
|
-
|
|
2551
|
-
workflow_info = f"""**Workflow:** {self.name}"""
|
|
2552
|
-
if self.description:
|
|
2553
|
-
workflow_info += f"""\n\n**Description:** {self.description}"""
|
|
2554
|
-
workflow_info += f"""\n\n**Steps:** {self._get_step_count()} steps"""
|
|
2555
|
-
if message:
|
|
2556
|
-
if isinstance(message, str):
|
|
2557
|
-
workflow_info += f"""\n\n**Message:** {message}"""
|
|
2558
|
-
else:
|
|
2559
|
-
# Handle structured input message
|
|
2560
|
-
if isinstance(message, BaseModel):
|
|
2561
|
-
data_display = message.model_dump_json(indent=2, exclude_none=True)
|
|
2562
|
-
elif isinstance(message, (dict, list)):
|
|
2563
|
-
import json
|
|
2564
|
-
|
|
2565
|
-
data_display = json.dumps(message, indent=2, default=str)
|
|
2566
|
-
else:
|
|
2567
|
-
data_display = str(message)
|
|
2568
|
-
workflow_info += f"""\n\n**Structured Input:**\n```json\n{data_display}\n```"""
|
|
2569
|
-
if user_id:
|
|
2570
|
-
workflow_info += f"""\n\n**User ID:** {user_id}"""
|
|
2571
|
-
if session_id:
|
|
2572
|
-
workflow_info += f"""\n\n**Session ID:** {session_id}"""
|
|
2573
|
-
workflow_info = workflow_info.strip()
|
|
2574
|
-
|
|
2575
|
-
workflow_panel = create_panel(
|
|
2576
|
-
content=Markdown(workflow_info) if markdown else workflow_info,
|
|
2577
|
-
title="Workflow Information",
|
|
2578
|
-
border_style="cyan",
|
|
2579
|
-
)
|
|
2580
|
-
console.print(workflow_panel) # type: ignore
|
|
2581
|
-
|
|
2582
|
-
# Start timer
|
|
2583
|
-
response_timer = Timer()
|
|
2584
|
-
response_timer.start()
|
|
2585
|
-
|
|
2586
|
-
with Live(console=console) as live_log:
|
|
2587
|
-
status = Status("Starting async workflow...\n", spinner="dots")
|
|
2588
|
-
live_log.update(status)
|
|
2589
|
-
|
|
2590
|
-
try:
|
|
2591
|
-
# Execute workflow and get the response directly
|
|
2592
|
-
workflow_response: WorkflowRunResponse = await self.arun(
|
|
2593
|
-
message=message,
|
|
2594
|
-
additional_data=additional_data,
|
|
2595
|
-
user_id=user_id,
|
|
2596
|
-
session_id=session_id,
|
|
2597
|
-
audio=audio,
|
|
2598
|
-
images=images,
|
|
2599
|
-
videos=videos,
|
|
2600
|
-
files=files,
|
|
2601
|
-
**kwargs,
|
|
2602
|
-
) # type: ignore
|
|
2603
|
-
|
|
2604
|
-
response_timer.stop()
|
|
2605
|
-
|
|
2606
|
-
# Show individual step responses if available
|
|
2607
|
-
if show_step_details and workflow_response.step_responses:
|
|
2608
|
-
for i, step_output in enumerate(workflow_response.step_responses):
|
|
2609
|
-
# Handle both single StepOutput and List[StepOutput] (from loop/parallel steps)
|
|
2610
|
-
if isinstance(step_output, list):
|
|
2611
|
-
# This is a loop or parallel step with multiple outputs
|
|
2612
|
-
for j, sub_step_output in enumerate(step_output):
|
|
2613
|
-
if sub_step_output.content:
|
|
2614
|
-
formatted_content = self._format_step_content_for_display(sub_step_output)
|
|
2615
|
-
step_panel = create_panel(
|
|
2616
|
-
content=Markdown(formatted_content) if markdown else formatted_content,
|
|
2617
|
-
title=f"Step {i + 1}.{j + 1}: {sub_step_output.step_name} (Completed)",
|
|
2618
|
-
border_style="orange3",
|
|
2619
|
-
)
|
|
2620
|
-
console.print(step_panel) # type: ignore
|
|
2621
|
-
else:
|
|
2622
|
-
# This is a regular single step
|
|
2623
|
-
if step_output.content:
|
|
2624
|
-
formatted_content = self._format_step_content_for_display(step_output)
|
|
2625
|
-
step_panel = create_panel(
|
|
2626
|
-
content=Markdown(formatted_content) if markdown else formatted_content,
|
|
2627
|
-
title=f"Step {i + 1}: {step_output.step_name} (Completed)",
|
|
2628
|
-
border_style="orange3",
|
|
2629
|
-
)
|
|
2630
|
-
console.print(step_panel) # type: ignore
|
|
2631
|
-
|
|
2632
|
-
# For callable functions, show the content directly since there are no step_responses
|
|
2633
|
-
elif show_step_details and callable(self.steps) and workflow_response.content:
|
|
2634
|
-
step_panel = create_panel(
|
|
2635
|
-
content=Markdown(workflow_response.content) if markdown else workflow_response.content, # type: ignore
|
|
2636
|
-
title="Custom Function (Completed)",
|
|
2637
|
-
border_style="orange3",
|
|
2638
|
-
)
|
|
2639
|
-
console.print(step_panel) # type: ignore
|
|
2640
|
-
|
|
2641
|
-
# Show final summary
|
|
2642
|
-
if workflow_response.extra_data:
|
|
2643
|
-
status = workflow_response.status.value # type: ignore
|
|
2644
|
-
summary_content = ""
|
|
2645
|
-
summary_content += f"""\n\n**Status:** {status}"""
|
|
2646
|
-
summary_content += f"""\n\n**Steps Completed:** {len(workflow_response.step_responses) if workflow_response.step_responses else 0}"""
|
|
2647
|
-
summary_content = summary_content.strip()
|
|
2648
|
-
|
|
2649
|
-
summary_panel = create_panel(
|
|
2650
|
-
content=Markdown(summary_content) if markdown else summary_content,
|
|
2651
|
-
title="Execution Summary",
|
|
2652
|
-
border_style="blue",
|
|
2653
|
-
)
|
|
2654
|
-
console.print(summary_panel) # type: ignore
|
|
2655
|
-
|
|
2656
|
-
live_log.update("")
|
|
2657
|
-
|
|
2658
|
-
# Final completion message
|
|
2659
|
-
if show_time:
|
|
2660
|
-
completion_text = Text(f"Completed in {response_timer.elapsed:.1f}s", style="bold green")
|
|
2661
|
-
console.print(completion_text) # type: ignore
|
|
2662
|
-
|
|
2663
|
-
except Exception as e:
|
|
2664
|
-
import traceback
|
|
2665
|
-
|
|
2666
|
-
traceback.print_exc()
|
|
2667
|
-
response_timer.stop()
|
|
2668
|
-
error_panel = create_panel(
|
|
2669
|
-
content=f"Workflow execution failed: {str(e)}", title="Execution Error", border_style="red"
|
|
2670
|
-
)
|
|
2671
|
-
console.print(error_panel) # type: ignore
|
|
2672
|
-
|
|
2673
|
-
async def _aprint_response_stream(
|
|
2674
|
-
self,
|
|
2675
|
-
message: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None,
|
|
2676
|
-
additional_data: Optional[Dict[str, Any]] = None,
|
|
2677
|
-
user_id: Optional[str] = None,
|
|
2678
|
-
session_id: Optional[str] = None,
|
|
2679
|
-
audio: Optional[List[Audio]] = None,
|
|
2680
|
-
images: Optional[List[Image]] = None,
|
|
2681
|
-
videos: Optional[List[Video]] = None,
|
|
2682
|
-
files: Optional[List[File]] = None,
|
|
2683
|
-
stream_intermediate_steps: bool = False,
|
|
2684
|
-
markdown: bool = False,
|
|
2685
|
-
show_time: bool = True,
|
|
2686
|
-
show_step_details: bool = True,
|
|
2687
|
-
console: Optional[Any] = None,
|
|
2688
|
-
**kwargs: Any,
|
|
2689
|
-
) -> None:
|
|
2690
|
-
"""Print workflow execution with clean streaming - orange step blocks displayed once"""
|
|
2691
|
-
from rich.console import Group
|
|
2692
|
-
from rich.live import Live
|
|
2693
|
-
from rich.markdown import Markdown
|
|
2694
|
-
from rich.status import Status
|
|
2695
|
-
from rich.text import Text
|
|
2696
|
-
|
|
2697
|
-
from agno.utils.response import create_panel
|
|
2698
|
-
from agno.utils.timer import Timer
|
|
2699
|
-
|
|
2700
|
-
if console is None:
|
|
2701
|
-
from agno.cli.console import console
|
|
2702
|
-
|
|
2703
|
-
stream_intermediate_steps = True # With streaming print response, we need to stream intermediate steps
|
|
2704
|
-
|
|
2705
|
-
# Show workflow info (same as before)
|
|
2706
|
-
media_info = []
|
|
2707
|
-
if audio:
|
|
2708
|
-
media_info.append(f"Audio files: {len(audio)}")
|
|
2709
|
-
if images:
|
|
2710
|
-
media_info.append(f"Images: {len(images)}")
|
|
2711
|
-
if videos:
|
|
2712
|
-
media_info.append(f"Videos: {len(videos)}")
|
|
2713
|
-
|
|
2714
|
-
workflow_info = f"""**Workflow:** {self.name}"""
|
|
2715
|
-
if self.description:
|
|
2716
|
-
workflow_info += f"""\n\n**Description:** {self.description}"""
|
|
2717
|
-
workflow_info += f"""\n\n**Steps:** {self._get_step_count()} steps"""
|
|
2718
|
-
if message:
|
|
2719
|
-
if isinstance(message, str):
|
|
2720
|
-
workflow_info += f"""\n\n**Message:** {message}"""
|
|
2721
|
-
else:
|
|
2722
|
-
# Handle structured input message
|
|
2723
|
-
if isinstance(message, BaseModel):
|
|
2724
|
-
data_display = message.model_dump_json(indent=2, exclude_none=True)
|
|
2725
|
-
elif isinstance(message, (dict, list)):
|
|
2726
|
-
import json
|
|
2727
|
-
|
|
2728
|
-
data_display = json.dumps(message, indent=2, default=str)
|
|
2729
|
-
else:
|
|
2730
|
-
data_display = str(message)
|
|
2731
|
-
workflow_info += f"""\n\n**Structured Input:**\n```json\n{data_display}\n```"""
|
|
2732
|
-
if user_id:
|
|
2733
|
-
workflow_info += f"""\n\n**User ID:** {user_id}"""
|
|
2734
|
-
if session_id:
|
|
2735
|
-
workflow_info += f"""\n\n**Session ID:** {session_id}"""
|
|
2736
|
-
workflow_info = workflow_info.strip()
|
|
2737
|
-
|
|
2738
|
-
workflow_panel = create_panel(
|
|
2739
|
-
content=Markdown(workflow_info) if markdown else workflow_info,
|
|
2740
|
-
title="Workflow Information",
|
|
2741
|
-
border_style="cyan",
|
|
2742
|
-
)
|
|
2743
|
-
console.print(workflow_panel) # type: ignore
|
|
2744
|
-
|
|
2745
|
-
# Start timer
|
|
2746
|
-
response_timer = Timer()
|
|
2747
|
-
response_timer.start()
|
|
2748
|
-
|
|
2749
|
-
# Streaming execution variables
|
|
2750
|
-
current_step_content = ""
|
|
2751
|
-
current_step_name = ""
|
|
2752
|
-
current_step_index = 0
|
|
2753
|
-
step_responses = []
|
|
2754
|
-
step_started_printed = False
|
|
2755
|
-
is_callable_function = callable(self.steps)
|
|
2756
|
-
|
|
2757
|
-
# Smart step hierarchy tracking
|
|
2758
|
-
current_primitive_context = None # Current primitive being executed (parallel, loop, etc.)
|
|
2759
|
-
step_display_cache = {} # type: ignore
|
|
2760
|
-
|
|
2761
|
-
def get_step_display_number(step_index: Union[int, tuple], step_name: str = "") -> str:
|
|
2762
|
-
"""Generate clean two-level step numbering: x.y format only"""
|
|
2763
|
-
|
|
2764
|
-
# Handle tuple format for child steps
|
|
2765
|
-
if isinstance(step_index, tuple):
|
|
2766
|
-
if len(step_index) >= 2:
|
|
2767
|
-
parent_idx, sub_idx = step_index[0], step_index[1]
|
|
2768
|
-
|
|
2769
|
-
# Extract base parent index if it's nested
|
|
2770
|
-
if isinstance(parent_idx, tuple):
|
|
2771
|
-
base_idx = parent_idx[0] if len(parent_idx) > 0 else 0
|
|
2772
|
-
while isinstance(base_idx, tuple) and len(base_idx) > 0:
|
|
2773
|
-
base_idx = base_idx[0]
|
|
2774
|
-
else:
|
|
2775
|
-
base_idx = parent_idx
|
|
2776
|
-
|
|
2777
|
-
# Check context for parallel special case
|
|
2778
|
-
if current_primitive_context and current_primitive_context["type"] == "parallel":
|
|
2779
|
-
# For parallel child steps, all get the same number based on their actual step_index
|
|
2780
|
-
return f"Step {base_idx + 1}.{sub_idx + 1}"
|
|
2781
|
-
elif current_primitive_context and current_primitive_context["type"] == "loop":
|
|
2782
|
-
iteration = current_primitive_context.get("current_iteration", 1)
|
|
2783
|
-
return f"Step {base_idx + 1}.{sub_idx + 1} (Iteration {iteration})"
|
|
2784
|
-
else:
|
|
2785
|
-
# Regular child step numbering
|
|
2786
|
-
return f"Step {base_idx + 1}.{sub_idx + 1}" # type: ignore
|
|
2787
|
-
else:
|
|
2788
|
-
# Single element tuple - treat as main step
|
|
2789
|
-
return f"Step {step_index[0] + 1}"
|
|
2790
|
-
|
|
2791
|
-
# Handle integer step_index - main step
|
|
2792
|
-
if not current_primitive_context:
|
|
2793
|
-
# Regular main step
|
|
2794
|
-
return f"Step {step_index + 1}"
|
|
2795
|
-
else:
|
|
2796
|
-
# This shouldn't happen with the new logic, but fallback
|
|
2797
|
-
return f"Step {step_index + 1}"
|
|
2798
|
-
|
|
2799
|
-
with Live(console=console, refresh_per_second=10) as live_log:
|
|
2800
|
-
status = Status("Starting async workflow...", spinner="dots")
|
|
2801
|
-
live_log.update(status)
|
|
2802
|
-
|
|
2803
|
-
try:
|
|
2804
|
-
async for response in await self.arun(
|
|
2805
|
-
message=message,
|
|
2806
|
-
additional_data=additional_data,
|
|
2807
|
-
user_id=user_id,
|
|
2808
|
-
session_id=session_id,
|
|
2809
|
-
audio=audio,
|
|
2810
|
-
images=images,
|
|
2811
|
-
videos=videos,
|
|
2812
|
-
files=files,
|
|
2813
|
-
stream=True,
|
|
2814
|
-
stream_intermediate_steps=stream_intermediate_steps,
|
|
2815
|
-
**kwargs,
|
|
2816
|
-
): # type: ignore
|
|
2817
|
-
# Handle the new event types
|
|
2818
|
-
if isinstance(response, WorkflowStartedEvent):
|
|
2819
|
-
status.update("Workflow started...")
|
|
2820
|
-
if is_callable_function:
|
|
2821
|
-
current_step_name = "Custom Function"
|
|
2822
|
-
current_step_index = 0
|
|
2823
|
-
live_log.update(status)
|
|
2824
|
-
|
|
2825
|
-
elif isinstance(response, StepStartedEvent):
|
|
2826
|
-
current_step_name = response.step_name or "Unknown"
|
|
2827
|
-
current_step_index = response.step_index or 0 # type: ignore
|
|
2828
|
-
current_step_content = ""
|
|
2829
|
-
step_started_printed = False
|
|
2830
|
-
|
|
2831
|
-
# Generate smart step number
|
|
2832
|
-
step_display = get_step_display_number(current_step_index, current_step_name)
|
|
2833
|
-
status.update(f"Starting {step_display}: {current_step_name}...")
|
|
2834
|
-
live_log.update(status)
|
|
2835
|
-
|
|
2836
|
-
elif isinstance(response, StepCompletedEvent):
|
|
2837
|
-
step_name = response.step_name or "Unknown"
|
|
2838
|
-
step_index = response.step_index or 0
|
|
2839
|
-
|
|
2840
|
-
# Generate smart step number for completion (will use cached value)
|
|
2841
|
-
step_display = get_step_display_number(step_index, step_name)
|
|
2842
|
-
status.update(f"Completed {step_display}: {step_name}")
|
|
2843
|
-
|
|
2844
|
-
if response.content:
|
|
2845
|
-
step_responses.append(
|
|
2846
|
-
{
|
|
2847
|
-
"step_name": step_name,
|
|
2848
|
-
"step_index": step_index,
|
|
2849
|
-
"content": response.content,
|
|
2850
|
-
"event": response.event,
|
|
2851
|
-
}
|
|
2852
|
-
)
|
|
2853
|
-
|
|
2854
|
-
# Print the final step result in orange (only once)
|
|
2855
|
-
if show_step_details and current_step_content and not step_started_printed:
|
|
2856
|
-
live_log.update(status, refresh=True)
|
|
2857
|
-
|
|
2858
|
-
final_step_panel = create_panel(
|
|
2859
|
-
content=Markdown(current_step_content) if markdown else current_step_content,
|
|
2860
|
-
title=f"{step_display}: {step_name} (Completed)",
|
|
2861
|
-
border_style="orange3",
|
|
2862
|
-
)
|
|
2863
|
-
console.print(final_step_panel) # type: ignore
|
|
2864
|
-
step_started_printed = True
|
|
2865
|
-
|
|
2866
|
-
elif isinstance(response, LoopExecutionStartedEvent):
|
|
2867
|
-
current_step_name = response.step_name or "Loop"
|
|
2868
|
-
current_step_index = response.step_index or 0 # type: ignore
|
|
2869
|
-
current_step_content = ""
|
|
2870
|
-
step_started_printed = False
|
|
2871
|
-
|
|
2872
|
-
# Set up loop context
|
|
2873
|
-
current_primitive_context = {
|
|
2874
|
-
"type": "loop",
|
|
2875
|
-
"step_index": current_step_index,
|
|
2876
|
-
"sub_step_counter": 0,
|
|
2877
|
-
"current_iteration": 1,
|
|
2878
|
-
"max_iterations": response.max_iterations,
|
|
2879
|
-
}
|
|
2880
|
-
|
|
2881
|
-
# Clear cache for this primitive's sub-steps
|
|
2882
|
-
step_display_cache.clear()
|
|
2883
|
-
|
|
2884
|
-
status.update(
|
|
2885
|
-
f"Starting loop: {current_step_name} (max {response.max_iterations} iterations)..."
|
|
2886
|
-
)
|
|
2887
|
-
live_log.update(status)
|
|
2888
|
-
|
|
2889
|
-
elif isinstance(response, LoopIterationStartedEvent):
|
|
2890
|
-
if current_primitive_context and current_primitive_context["type"] == "loop":
|
|
2891
|
-
current_primitive_context["current_iteration"] = response.iteration
|
|
2892
|
-
current_primitive_context["sub_step_counter"] = 0 # Reset for new iteration
|
|
2893
|
-
# Clear cache for new iteration
|
|
2894
|
-
step_display_cache.clear()
|
|
2895
|
-
|
|
2896
|
-
status.update(
|
|
2897
|
-
f"Loop iteration {response.iteration}/{response.max_iterations}: {response.step_name}..."
|
|
2898
|
-
)
|
|
2899
|
-
live_log.update(status)
|
|
2900
|
-
|
|
2901
|
-
elif isinstance(response, LoopIterationCompletedEvent):
|
|
2902
|
-
status.update(
|
|
2903
|
-
f"Completed iteration {response.iteration}/{response.max_iterations}: {response.step_name}"
|
|
2904
|
-
)
|
|
2905
|
-
|
|
2906
|
-
elif isinstance(response, LoopExecutionCompletedEvent):
|
|
2907
|
-
step_name = response.step_name or "Loop"
|
|
2908
|
-
step_index = response.step_index or 0
|
|
2909
|
-
|
|
2910
|
-
status.update(f"Completed loop: {step_name} ({response.total_iterations} iterations)")
|
|
2911
|
-
live_log.update(status, refresh=True)
|
|
2912
|
-
|
|
2913
|
-
# Print loop summary
|
|
2914
|
-
if show_step_details:
|
|
2915
|
-
summary_content = "**Loop Summary:**\n\n"
|
|
2916
|
-
summary_content += (
|
|
2917
|
-
f"- Total iterations: {response.total_iterations}/{response.max_iterations}\n"
|
|
2918
|
-
)
|
|
2919
|
-
summary_content += (
|
|
2920
|
-
f"- Total steps executed: {sum(len(iteration) for iteration in response.all_results)}\n"
|
|
2921
|
-
)
|
|
2922
|
-
|
|
2923
|
-
loop_summary_panel = create_panel(
|
|
2924
|
-
content=Markdown(summary_content) if markdown else summary_content,
|
|
2925
|
-
title=f"Loop {step_name} (Completed)",
|
|
2926
|
-
border_style="yellow",
|
|
2927
|
-
)
|
|
2928
|
-
console.print(loop_summary_panel) # type: ignore
|
|
2929
|
-
|
|
2930
|
-
# Reset context
|
|
2931
|
-
current_primitive_context = None
|
|
2932
|
-
step_display_cache.clear()
|
|
2933
|
-
step_started_printed = True
|
|
2934
|
-
|
|
2935
|
-
elif isinstance(response, ParallelExecutionStartedEvent):
|
|
2936
|
-
current_step_name = response.step_name or "Parallel Steps"
|
|
2937
|
-
current_step_index = response.step_index or 0 # type: ignore
|
|
2938
|
-
current_step_content = ""
|
|
2939
|
-
step_started_printed = False
|
|
2940
|
-
|
|
2941
|
-
# Set up parallel context
|
|
2942
|
-
current_primitive_context = {
|
|
2943
|
-
"type": "parallel",
|
|
2944
|
-
"step_index": current_step_index,
|
|
2945
|
-
"sub_step_counter": 0,
|
|
2946
|
-
"total_steps": response.parallel_step_count,
|
|
2947
|
-
}
|
|
2948
|
-
|
|
2949
|
-
# Clear cache for this primitive's sub-steps
|
|
2950
|
-
step_display_cache.clear()
|
|
2951
|
-
|
|
2952
|
-
# Print parallel execution summary panel
|
|
2953
|
-
live_log.update(status, refresh=True)
|
|
2954
|
-
parallel_summary = f"**Parallel Steps:** {response.parallel_step_count}"
|
|
2955
|
-
# Use get_step_display_number for consistent numbering
|
|
2956
|
-
step_display = get_step_display_number(current_step_index, current_step_name)
|
|
2957
|
-
parallel_panel = create_panel(
|
|
2958
|
-
content=Markdown(parallel_summary) if markdown else parallel_summary,
|
|
2959
|
-
title=f"{step_display}: {current_step_name}",
|
|
2960
|
-
border_style="cyan",
|
|
2961
|
-
)
|
|
2962
|
-
console.print(parallel_panel) # type: ignore
|
|
2963
|
-
|
|
2964
|
-
status.update(
|
|
2965
|
-
f"Starting parallel execution: {current_step_name} ({response.parallel_step_count} steps)..."
|
|
2966
|
-
)
|
|
2967
|
-
live_log.update(status)
|
|
2968
|
-
|
|
2969
|
-
elif isinstance(response, ParallelExecutionCompletedEvent):
|
|
2970
|
-
step_name = response.step_name or "Parallel Steps"
|
|
2971
|
-
step_index = response.step_index or 0
|
|
2972
|
-
|
|
2973
|
-
status.update(f"Completed parallel execution: {step_name}")
|
|
2974
|
-
|
|
2975
|
-
# Reset context
|
|
2976
|
-
current_primitive_context = None
|
|
2977
|
-
step_display_cache.clear()
|
|
2978
|
-
|
|
2979
|
-
elif isinstance(response, ConditionExecutionStartedEvent):
|
|
2980
|
-
current_step_name = response.step_name or "Condition"
|
|
2981
|
-
current_step_index = response.step_index or 0 # type: ignore
|
|
2982
|
-
current_step_content = ""
|
|
2983
|
-
step_started_printed = False
|
|
2984
|
-
|
|
2985
|
-
# Set up condition context
|
|
2986
|
-
current_primitive_context = {
|
|
2987
|
-
"type": "condition",
|
|
2988
|
-
"step_index": current_step_index,
|
|
2989
|
-
"sub_step_counter": 0,
|
|
2990
|
-
"condition_result": response.condition_result,
|
|
2991
|
-
}
|
|
2992
|
-
|
|
2993
|
-
# Clear cache for this primitive's sub-steps
|
|
2994
|
-
step_display_cache.clear()
|
|
2995
|
-
|
|
2996
|
-
condition_text = "met" if response.condition_result else "not met"
|
|
2997
|
-
status.update(f"Starting condition: {current_step_name} (condition {condition_text})...")
|
|
2998
|
-
live_log.update(status)
|
|
2999
|
-
|
|
3000
|
-
elif isinstance(response, ConditionExecutionCompletedEvent):
|
|
3001
|
-
step_name = response.step_name or "Condition"
|
|
3002
|
-
step_index = response.step_index or 0
|
|
3003
|
-
|
|
3004
|
-
status.update(f"Completed condition: {step_name}")
|
|
3005
|
-
|
|
3006
|
-
# Reset context
|
|
3007
|
-
current_primitive_context = None
|
|
3008
|
-
step_display_cache.clear()
|
|
3009
|
-
|
|
3010
|
-
elif isinstance(response, RouterExecutionStartedEvent):
|
|
3011
|
-
current_step_name = response.step_name or "Router"
|
|
3012
|
-
current_step_index = response.step_index or 0 # type: ignore
|
|
3013
|
-
current_step_content = ""
|
|
3014
|
-
step_started_printed = False
|
|
3015
|
-
|
|
3016
|
-
# Set up router context
|
|
3017
|
-
current_primitive_context = {
|
|
3018
|
-
"type": "router",
|
|
3019
|
-
"step_index": current_step_index,
|
|
3020
|
-
"sub_step_counter": 0,
|
|
3021
|
-
"selected_steps": response.selected_steps,
|
|
3022
|
-
}
|
|
3023
|
-
|
|
3024
|
-
# Clear cache for this primitive's sub-steps
|
|
3025
|
-
step_display_cache.clear()
|
|
3026
|
-
|
|
3027
|
-
selected_steps_text = ", ".join(response.selected_steps) if response.selected_steps else "none"
|
|
3028
|
-
status.update(f"Starting router: {current_step_name} (selected: {selected_steps_text})...")
|
|
3029
|
-
live_log.update(status)
|
|
3030
|
-
|
|
3031
|
-
elif isinstance(response, RouterExecutionCompletedEvent):
|
|
3032
|
-
step_name = response.step_name or "Router"
|
|
3033
|
-
step_index = response.step_index or 0
|
|
3034
|
-
|
|
3035
|
-
status.update(f"Completed router: {step_name}")
|
|
3036
|
-
|
|
3037
|
-
# Print router summary
|
|
3038
|
-
if show_step_details:
|
|
3039
|
-
selected_steps_text = (
|
|
3040
|
-
", ".join(response.selected_steps) if response.selected_steps else "none"
|
|
3041
|
-
)
|
|
3042
|
-
summary_content = "**Router Summary:**\n\n"
|
|
3043
|
-
summary_content += f"- Selected steps: {selected_steps_text}\n"
|
|
3044
|
-
summary_content += f"- Executed steps: {response.executed_steps or 0}\n"
|
|
3045
|
-
|
|
3046
|
-
router_summary_panel = create_panel(
|
|
3047
|
-
content=Markdown(summary_content) if markdown else summary_content,
|
|
3048
|
-
title=f"Router {step_name} (Completed)",
|
|
3049
|
-
border_style="purple",
|
|
3050
|
-
)
|
|
3051
|
-
console.print(router_summary_panel) # type: ignore
|
|
3052
|
-
|
|
3053
|
-
# Reset context
|
|
3054
|
-
current_primitive_context = None
|
|
3055
|
-
step_display_cache.clear()
|
|
3056
|
-
step_started_printed = True
|
|
3057
|
-
|
|
3058
|
-
elif isinstance(response, StepsExecutionStartedEvent):
|
|
3059
|
-
current_step_name = response.step_name or "Steps"
|
|
3060
|
-
current_step_index = response.step_index or 0 # type: ignore
|
|
3061
|
-
current_step_content = ""
|
|
3062
|
-
step_started_printed = False
|
|
3063
|
-
status.update(f"Starting steps: {current_step_name} ({response.steps_count} steps)...")
|
|
3064
|
-
live_log.update(status)
|
|
3065
|
-
|
|
3066
|
-
elif isinstance(response, StepsExecutionCompletedEvent):
|
|
3067
|
-
step_name = response.step_name or "Steps"
|
|
3068
|
-
step_index = response.step_index or 0
|
|
3069
|
-
|
|
3070
|
-
status.update(f"Completed steps: {step_name}")
|
|
3071
|
-
|
|
3072
|
-
# Add results from executed steps to step_responses
|
|
3073
|
-
if response.step_results:
|
|
3074
|
-
for i, step_result in enumerate(response.step_results):
|
|
3075
|
-
# Use the same numbering system as other primitives
|
|
3076
|
-
step_display_number = get_step_display_number(step_index, step_result.step_name or "")
|
|
3077
|
-
step_responses.append(
|
|
3078
|
-
{
|
|
3079
|
-
"step_name": f"{step_display_number}: {step_result.step_name}",
|
|
3080
|
-
"step_index": step_index,
|
|
3081
|
-
"content": step_result.content,
|
|
3082
|
-
"event": "StepsStepResult",
|
|
3083
|
-
}
|
|
3084
|
-
)
|
|
3085
|
-
|
|
3086
|
-
# Print steps summary
|
|
3087
|
-
if show_step_details:
|
|
3088
|
-
summary_content = "**Steps Summary:**\n\n"
|
|
3089
|
-
summary_content += f"- Total steps: {response.steps_count or 0}\n"
|
|
3090
|
-
summary_content += f"- Executed steps: {response.executed_steps or 0}\n"
|
|
3091
|
-
|
|
3092
|
-
steps_summary_panel = create_panel(
|
|
3093
|
-
content=Markdown(summary_content) if markdown else summary_content,
|
|
3094
|
-
title=f"Steps {step_name} (Completed)",
|
|
3095
|
-
border_style="yellow",
|
|
3096
|
-
)
|
|
3097
|
-
console.print(steps_summary_panel) # type: ignore
|
|
3098
|
-
|
|
3099
|
-
step_started_printed = True
|
|
3100
|
-
|
|
3101
|
-
elif isinstance(response, WorkflowCompletedEvent):
|
|
3102
|
-
status.update("Workflow completed!")
|
|
3103
|
-
|
|
3104
|
-
# For callable functions, print the final content block here since there are no step events
|
|
3105
|
-
if (
|
|
3106
|
-
is_callable_function
|
|
3107
|
-
and show_step_details
|
|
3108
|
-
and current_step_content
|
|
3109
|
-
and not step_started_printed
|
|
3110
|
-
):
|
|
3111
|
-
final_step_panel = create_panel(
|
|
3112
|
-
content=Markdown(current_step_content) if markdown else current_step_content,
|
|
3113
|
-
title="Custom Function (Completed)",
|
|
3114
|
-
border_style="orange3",
|
|
3115
|
-
)
|
|
3116
|
-
console.print(final_step_panel) # type: ignore
|
|
3117
|
-
step_started_printed = True
|
|
3118
|
-
|
|
3119
|
-
live_log.update(status, refresh=True)
|
|
3120
|
-
|
|
3121
|
-
# Show final summary
|
|
3122
|
-
if response.extra_data:
|
|
3123
|
-
status = response.status
|
|
3124
|
-
summary_content = ""
|
|
3125
|
-
summary_content += f"""\n\n**Status:** {status}"""
|
|
3126
|
-
summary_content += f"""\n\n**Steps Completed:** {len(response.step_responses) if response.step_responses else 0}"""
|
|
3127
|
-
summary_content = summary_content.strip()
|
|
3128
|
-
|
|
3129
|
-
summary_panel = create_panel(
|
|
3130
|
-
content=Markdown(summary_content) if markdown else summary_content,
|
|
3131
|
-
title="Execution Summary",
|
|
3132
|
-
border_style="blue",
|
|
3133
|
-
)
|
|
3134
|
-
console.print(summary_panel) # type: ignore
|
|
3135
|
-
|
|
3136
|
-
else:
|
|
3137
|
-
if isinstance(response, str):
|
|
3138
|
-
response_str = response
|
|
3139
|
-
elif isinstance(response, StepOutputEvent):
|
|
3140
|
-
# Handle StepOutputEvent objects yielded from workflow
|
|
3141
|
-
response_str = response.content or "" # type: ignore
|
|
3142
|
-
else:
|
|
3143
|
-
from agno.run.response import RunResponseContentEvent
|
|
3144
|
-
from agno.run.team import RunResponseContentEvent as TeamRunResponseContentEvent
|
|
3145
|
-
|
|
3146
|
-
current_step_executor_type = None
|
|
3147
|
-
# Handle both integer and tuple step indices for parallel execution
|
|
3148
|
-
actual_step_index = current_step_index
|
|
3149
|
-
if isinstance(current_step_index, tuple):
|
|
3150
|
-
# For tuple indices, use the first element (parent step index)
|
|
3151
|
-
actual_step_index = current_step_index[0]
|
|
3152
|
-
# If it's nested tuple, keep extracting until we get an integer
|
|
3153
|
-
while isinstance(actual_step_index, tuple) and len(actual_step_index) > 0:
|
|
3154
|
-
actual_step_index = actual_step_index[0]
|
|
3155
|
-
|
|
3156
|
-
# Check if this is a streaming content event from agent or team
|
|
3157
|
-
if isinstance(
|
|
3158
|
-
response,
|
|
3159
|
-
(RunResponseContentEvent, TeamRunResponseContentEvent, WorkflowRunResponseEvent), # type: ignore
|
|
3160
|
-
): # type: ignore
|
|
3161
|
-
# Extract the content from the streaming event
|
|
3162
|
-
response_str = response.content # type: ignore
|
|
3163
|
-
|
|
3164
|
-
# Check if this is a team's final structured output
|
|
3165
|
-
is_structured_output = (
|
|
3166
|
-
isinstance(response, TeamRunResponseContentEvent)
|
|
3167
|
-
and hasattr(response, "content_type")
|
|
3168
|
-
and response.content_type != "str"
|
|
3169
|
-
and response.content_type != ""
|
|
3170
|
-
)
|
|
3171
|
-
elif isinstance(response, RunResponseContentEvent) and current_step_executor_type != "team":
|
|
3172
|
-
response_str = response.content # type: ignore
|
|
3173
|
-
else:
|
|
3174
|
-
continue
|
|
3175
|
-
|
|
3176
|
-
# Use the unified formatting function for consistency
|
|
3177
|
-
response_str = self._format_step_content_for_display(response_str) # type: ignore
|
|
3178
|
-
|
|
3179
|
-
# Filter out empty responses and add to current step content
|
|
3180
|
-
if response_str and response_str.strip():
|
|
3181
|
-
# If it's a structured output from a team, replace the content instead of appending
|
|
3182
|
-
if "is_structured_output" in locals() and is_structured_output:
|
|
3183
|
-
current_step_content = response_str
|
|
3184
|
-
else:
|
|
3185
|
-
current_step_content += response_str
|
|
3186
|
-
|
|
3187
|
-
# Live update the step panel with streaming content
|
|
3188
|
-
if show_step_details and not step_started_printed:
|
|
3189
|
-
# Generate smart step number for streaming title (will use cached value)
|
|
3190
|
-
step_display = get_step_display_number(current_step_index, current_step_name)
|
|
3191
|
-
title = f"{step_display}: {current_step_name} (Streaming...)"
|
|
3192
|
-
if is_callable_function:
|
|
3193
|
-
title = "Custom Function (Streaming...)"
|
|
3194
|
-
|
|
3195
|
-
# Show the streaming content live in orange panel
|
|
3196
|
-
live_step_panel = create_panel(
|
|
3197
|
-
content=Markdown(current_step_content) if markdown else current_step_content,
|
|
3198
|
-
title=title,
|
|
3199
|
-
border_style="orange3",
|
|
3200
|
-
)
|
|
3201
|
-
|
|
3202
|
-
# Create group with status and current step content
|
|
3203
|
-
group = Group(status, live_step_panel)
|
|
3204
|
-
live_log.update(group)
|
|
3205
|
-
|
|
3206
|
-
response_timer.stop()
|
|
3207
|
-
|
|
3208
|
-
live_log.update("")
|
|
3209
|
-
|
|
3210
|
-
# Final completion message
|
|
3211
|
-
if show_time:
|
|
3212
|
-
completion_text = Text(f"Completed in {response_timer.elapsed:.1f}s", style="bold green")
|
|
3213
|
-
console.print(completion_text) # type: ignore
|
|
3214
|
-
|
|
3215
|
-
except Exception as e:
|
|
3216
|
-
import traceback
|
|
3217
|
-
|
|
3218
|
-
traceback.print_exc()
|
|
3219
|
-
response_timer.stop()
|
|
3220
|
-
error_panel = create_panel(
|
|
3221
|
-
content=f"Workflow execution failed: {str(e)}", title="Execution Error", border_style="red"
|
|
3222
|
-
)
|
|
3223
|
-
console.print(error_panel) # type: ignore
|
|
3224
|
-
|
|
3225
|
-
def to_dict(self) -> Dict[str, Any]:
|
|
3226
|
-
"""Convert workflow to dictionary representation"""
|
|
3227
|
-
# TODO: Handle nested
|
|
3228
|
-
if self.steps is None or callable(self.steps):
|
|
3229
|
-
steps_list = []
|
|
3230
|
-
elif isinstance(self.steps, Steps):
|
|
3231
|
-
steps_list = self.steps.steps
|
|
3232
|
-
else:
|
|
3233
|
-
steps_list = self.steps
|
|
3234
|
-
|
|
3235
|
-
return {
|
|
3236
|
-
"name": self.name,
|
|
3237
|
-
"workflow_id": self.workflow_id,
|
|
3238
|
-
"description": self.description,
|
|
3239
|
-
"steps": [
|
|
3240
|
-
{
|
|
3241
|
-
"name": s.name if hasattr(s, "name") else s.__name__,
|
|
3242
|
-
"description": s.description if hasattr(s, "description") else "User-defined callable step",
|
|
3243
|
-
}
|
|
3244
|
-
for s in steps_list
|
|
3245
|
-
],
|
|
3246
|
-
"session_id": self.session_id,
|
|
3247
|
-
}
|
|
3248
|
-
|
|
3249
|
-
def _collect_workflow_session_state_from_agents_and_teams(self):
|
|
3250
|
-
"""Collect updated workflow_session_state from agents after step execution"""
|
|
3251
|
-
if self.workflow_session_state is None:
|
|
3252
|
-
self.workflow_session_state = {}
|
|
3253
|
-
|
|
3254
|
-
# Collect state from all agents in all steps
|
|
3255
|
-
if self.steps and not callable(self.steps):
|
|
3256
|
-
steps_list = self.steps.steps if isinstance(self.steps, Steps) else self.steps
|
|
3257
|
-
for step in steps_list:
|
|
3258
|
-
if isinstance(step, Step):
|
|
3259
|
-
executor = step.active_executor
|
|
3260
|
-
if hasattr(executor, "workflow_session_state") and executor.workflow_session_state:
|
|
3261
|
-
# Merge the agent's session state back into workflow session state
|
|
3262
|
-
from agno.utils.merge_dict import merge_dictionaries
|
|
3263
|
-
|
|
3264
|
-
merge_dictionaries(self.workflow_session_state, executor.workflow_session_state)
|
|
3265
|
-
|
|
3266
|
-
# If it's a team, collect from all members
|
|
3267
|
-
if hasattr(executor, "members"):
|
|
3268
|
-
for member in executor.members:
|
|
3269
|
-
if hasattr(member, "workflow_session_state") and member.workflow_session_state:
|
|
3270
|
-
merge_dictionaries(self.workflow_session_state, member.workflow_session_state)
|
|
3271
|
-
|
|
3272
|
-
def _update_executor_workflow_session_state(self, executor) -> None:
|
|
3273
|
-
"""Update executor with workflow_session_state"""
|
|
3274
|
-
if self.workflow_session_state is not None:
|
|
3275
|
-
# Update session_state with workflow_session_state
|
|
3276
|
-
executor.workflow_session_state = self.workflow_session_state
|
|
3277
|
-
|
|
3278
|
-
def _save_run_to_storage(self, workflow_run_response: WorkflowRunResponse) -> None:
|
|
3279
|
-
"""Helper method to save workflow run response to storage"""
|
|
3280
|
-
if self.workflow_session:
|
|
3281
|
-
self.workflow_session.upsert_run(workflow_run_response)
|
|
3282
|
-
self.write_to_storage()
|
|
3283
|
-
|
|
3284
|
-
def update_agents_and_teams_session_info(self):
|
|
3285
|
-
"""Update agents and teams with workflow session information"""
|
|
3286
|
-
log_debug("Updating agents and teams with session information")
|
|
3287
|
-
# Initialize steps - only if steps is iterable (not callable)
|
|
3288
|
-
if self.steps and not callable(self.steps):
|
|
3289
|
-
steps_list = self.steps.steps if isinstance(self.steps, Steps) else self.steps
|
|
3290
|
-
for step in steps_list:
|
|
3291
|
-
# TODO: Handle properly steps inside other primitives
|
|
3292
|
-
if isinstance(step, Step):
|
|
3293
|
-
active_executor = step.active_executor
|
|
3294
|
-
|
|
3295
|
-
if hasattr(active_executor, "workflow_session_id"):
|
|
3296
|
-
active_executor.workflow_session_id = self.session_id
|
|
3297
|
-
if hasattr(active_executor, "workflow_id"):
|
|
3298
|
-
active_executor.workflow_id = self.workflow_id
|
|
3299
|
-
|
|
3300
|
-
# Set workflow_session_state on agents and teams
|
|
3301
|
-
self._update_executor_workflow_session_state(active_executor)
|
|
3302
|
-
|
|
3303
|
-
# If it's a team, update all members
|
|
3304
|
-
if hasattr(active_executor, "members"):
|
|
3305
|
-
for member in active_executor.members:
|
|
3306
|
-
if hasattr(member, "workflow_session_id"):
|
|
3307
|
-
member.workflow_session_id = self.session_id
|
|
3308
|
-
if hasattr(member, "workflow_id"):
|
|
3309
|
-
member.workflow_id = self.workflow_id
|
|
3310
|
-
|
|
3311
|
-
# Set workflow_session_state on team members
|
|
3312
|
-
self._update_executor_workflow_session_state(member)
|