agno 2.2.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/__init__.py +8 -0
- agno/agent/__init__.py +51 -0
- agno/agent/agent.py +10405 -0
- agno/api/__init__.py +0 -0
- agno/api/agent.py +28 -0
- agno/api/api.py +40 -0
- agno/api/evals.py +22 -0
- agno/api/os.py +17 -0
- agno/api/routes.py +13 -0
- agno/api/schemas/__init__.py +9 -0
- agno/api/schemas/agent.py +16 -0
- agno/api/schemas/evals.py +16 -0
- agno/api/schemas/os.py +14 -0
- agno/api/schemas/response.py +6 -0
- agno/api/schemas/team.py +16 -0
- agno/api/schemas/utils.py +21 -0
- agno/api/schemas/workflows.py +16 -0
- agno/api/settings.py +53 -0
- agno/api/team.py +30 -0
- agno/api/workflow.py +28 -0
- agno/cloud/aws/base.py +214 -0
- agno/cloud/aws/s3/__init__.py +2 -0
- agno/cloud/aws/s3/api_client.py +43 -0
- agno/cloud/aws/s3/bucket.py +195 -0
- agno/cloud/aws/s3/object.py +57 -0
- agno/culture/__init__.py +3 -0
- agno/culture/manager.py +956 -0
- agno/db/__init__.py +24 -0
- agno/db/async_postgres/__init__.py +3 -0
- agno/db/base.py +598 -0
- agno/db/dynamo/__init__.py +3 -0
- agno/db/dynamo/dynamo.py +2042 -0
- agno/db/dynamo/schemas.py +314 -0
- agno/db/dynamo/utils.py +743 -0
- agno/db/firestore/__init__.py +3 -0
- agno/db/firestore/firestore.py +1795 -0
- agno/db/firestore/schemas.py +140 -0
- agno/db/firestore/utils.py +376 -0
- agno/db/gcs_json/__init__.py +3 -0
- agno/db/gcs_json/gcs_json_db.py +1335 -0
- agno/db/gcs_json/utils.py +228 -0
- agno/db/in_memory/__init__.py +3 -0
- agno/db/in_memory/in_memory_db.py +1160 -0
- agno/db/in_memory/utils.py +230 -0
- agno/db/json/__init__.py +3 -0
- agno/db/json/json_db.py +1328 -0
- agno/db/json/utils.py +230 -0
- agno/db/migrations/__init__.py +0 -0
- agno/db/migrations/v1_to_v2.py +635 -0
- agno/db/mongo/__init__.py +17 -0
- agno/db/mongo/async_mongo.py +2026 -0
- agno/db/mongo/mongo.py +1982 -0
- agno/db/mongo/schemas.py +87 -0
- agno/db/mongo/utils.py +259 -0
- agno/db/mysql/__init__.py +3 -0
- agno/db/mysql/mysql.py +2308 -0
- agno/db/mysql/schemas.py +138 -0
- agno/db/mysql/utils.py +355 -0
- agno/db/postgres/__init__.py +4 -0
- agno/db/postgres/async_postgres.py +1927 -0
- agno/db/postgres/postgres.py +2260 -0
- agno/db/postgres/schemas.py +139 -0
- agno/db/postgres/utils.py +442 -0
- agno/db/redis/__init__.py +3 -0
- agno/db/redis/redis.py +1660 -0
- agno/db/redis/schemas.py +123 -0
- agno/db/redis/utils.py +346 -0
- agno/db/schemas/__init__.py +4 -0
- agno/db/schemas/culture.py +120 -0
- agno/db/schemas/evals.py +33 -0
- agno/db/schemas/knowledge.py +40 -0
- agno/db/schemas/memory.py +46 -0
- agno/db/schemas/metrics.py +0 -0
- agno/db/singlestore/__init__.py +3 -0
- agno/db/singlestore/schemas.py +130 -0
- agno/db/singlestore/singlestore.py +2272 -0
- agno/db/singlestore/utils.py +384 -0
- agno/db/sqlite/__init__.py +4 -0
- agno/db/sqlite/async_sqlite.py +2293 -0
- agno/db/sqlite/schemas.py +133 -0
- agno/db/sqlite/sqlite.py +2288 -0
- agno/db/sqlite/utils.py +431 -0
- agno/db/surrealdb/__init__.py +3 -0
- agno/db/surrealdb/metrics.py +292 -0
- agno/db/surrealdb/models.py +309 -0
- agno/db/surrealdb/queries.py +71 -0
- agno/db/surrealdb/surrealdb.py +1353 -0
- agno/db/surrealdb/utils.py +147 -0
- agno/db/utils.py +116 -0
- agno/debug.py +18 -0
- agno/eval/__init__.py +14 -0
- agno/eval/accuracy.py +834 -0
- agno/eval/performance.py +773 -0
- agno/eval/reliability.py +306 -0
- agno/eval/utils.py +119 -0
- agno/exceptions.py +161 -0
- agno/filters.py +354 -0
- agno/guardrails/__init__.py +6 -0
- agno/guardrails/base.py +19 -0
- agno/guardrails/openai.py +144 -0
- agno/guardrails/pii.py +94 -0
- agno/guardrails/prompt_injection.py +52 -0
- agno/integrations/__init__.py +0 -0
- agno/integrations/discord/__init__.py +3 -0
- agno/integrations/discord/client.py +203 -0
- agno/knowledge/__init__.py +5 -0
- agno/knowledge/chunking/__init__.py +0 -0
- agno/knowledge/chunking/agentic.py +79 -0
- agno/knowledge/chunking/document.py +91 -0
- agno/knowledge/chunking/fixed.py +57 -0
- agno/knowledge/chunking/markdown.py +151 -0
- agno/knowledge/chunking/recursive.py +63 -0
- agno/knowledge/chunking/row.py +39 -0
- agno/knowledge/chunking/semantic.py +86 -0
- agno/knowledge/chunking/strategy.py +165 -0
- agno/knowledge/content.py +74 -0
- agno/knowledge/document/__init__.py +5 -0
- agno/knowledge/document/base.py +58 -0
- agno/knowledge/embedder/__init__.py +5 -0
- agno/knowledge/embedder/aws_bedrock.py +343 -0
- agno/knowledge/embedder/azure_openai.py +210 -0
- agno/knowledge/embedder/base.py +23 -0
- agno/knowledge/embedder/cohere.py +323 -0
- agno/knowledge/embedder/fastembed.py +62 -0
- agno/knowledge/embedder/fireworks.py +13 -0
- agno/knowledge/embedder/google.py +258 -0
- agno/knowledge/embedder/huggingface.py +94 -0
- agno/knowledge/embedder/jina.py +182 -0
- agno/knowledge/embedder/langdb.py +22 -0
- agno/knowledge/embedder/mistral.py +206 -0
- agno/knowledge/embedder/nebius.py +13 -0
- agno/knowledge/embedder/ollama.py +154 -0
- agno/knowledge/embedder/openai.py +195 -0
- agno/knowledge/embedder/sentence_transformer.py +63 -0
- agno/knowledge/embedder/together.py +13 -0
- agno/knowledge/embedder/vllm.py +262 -0
- agno/knowledge/embedder/voyageai.py +165 -0
- agno/knowledge/knowledge.py +1988 -0
- agno/knowledge/reader/__init__.py +7 -0
- agno/knowledge/reader/arxiv_reader.py +81 -0
- agno/knowledge/reader/base.py +95 -0
- agno/knowledge/reader/csv_reader.py +166 -0
- agno/knowledge/reader/docx_reader.py +82 -0
- agno/knowledge/reader/field_labeled_csv_reader.py +292 -0
- agno/knowledge/reader/firecrawl_reader.py +201 -0
- agno/knowledge/reader/json_reader.py +87 -0
- agno/knowledge/reader/markdown_reader.py +137 -0
- agno/knowledge/reader/pdf_reader.py +431 -0
- agno/knowledge/reader/pptx_reader.py +101 -0
- agno/knowledge/reader/reader_factory.py +313 -0
- agno/knowledge/reader/s3_reader.py +89 -0
- agno/knowledge/reader/tavily_reader.py +194 -0
- agno/knowledge/reader/text_reader.py +115 -0
- agno/knowledge/reader/web_search_reader.py +372 -0
- agno/knowledge/reader/website_reader.py +455 -0
- agno/knowledge/reader/wikipedia_reader.py +59 -0
- agno/knowledge/reader/youtube_reader.py +78 -0
- agno/knowledge/remote_content/__init__.py +0 -0
- agno/knowledge/remote_content/remote_content.py +88 -0
- agno/knowledge/reranker/__init__.py +3 -0
- agno/knowledge/reranker/base.py +14 -0
- agno/knowledge/reranker/cohere.py +64 -0
- agno/knowledge/reranker/infinity.py +195 -0
- agno/knowledge/reranker/sentence_transformer.py +54 -0
- agno/knowledge/types.py +39 -0
- agno/knowledge/utils.py +189 -0
- agno/media.py +462 -0
- agno/memory/__init__.py +3 -0
- agno/memory/manager.py +1327 -0
- agno/models/__init__.py +0 -0
- agno/models/aimlapi/__init__.py +5 -0
- agno/models/aimlapi/aimlapi.py +45 -0
- agno/models/anthropic/__init__.py +5 -0
- agno/models/anthropic/claude.py +757 -0
- agno/models/aws/__init__.py +15 -0
- agno/models/aws/bedrock.py +701 -0
- agno/models/aws/claude.py +378 -0
- agno/models/azure/__init__.py +18 -0
- agno/models/azure/ai_foundry.py +485 -0
- agno/models/azure/openai_chat.py +131 -0
- agno/models/base.py +2175 -0
- agno/models/cerebras/__init__.py +12 -0
- agno/models/cerebras/cerebras.py +501 -0
- agno/models/cerebras/cerebras_openai.py +112 -0
- agno/models/cohere/__init__.py +5 -0
- agno/models/cohere/chat.py +389 -0
- agno/models/cometapi/__init__.py +5 -0
- agno/models/cometapi/cometapi.py +57 -0
- agno/models/dashscope/__init__.py +5 -0
- agno/models/dashscope/dashscope.py +91 -0
- agno/models/deepinfra/__init__.py +5 -0
- agno/models/deepinfra/deepinfra.py +28 -0
- agno/models/deepseek/__init__.py +5 -0
- agno/models/deepseek/deepseek.py +61 -0
- agno/models/defaults.py +1 -0
- agno/models/fireworks/__init__.py +5 -0
- agno/models/fireworks/fireworks.py +26 -0
- agno/models/google/__init__.py +5 -0
- agno/models/google/gemini.py +1085 -0
- agno/models/groq/__init__.py +5 -0
- agno/models/groq/groq.py +556 -0
- agno/models/huggingface/__init__.py +5 -0
- agno/models/huggingface/huggingface.py +491 -0
- agno/models/ibm/__init__.py +5 -0
- agno/models/ibm/watsonx.py +422 -0
- agno/models/internlm/__init__.py +3 -0
- agno/models/internlm/internlm.py +26 -0
- agno/models/langdb/__init__.py +1 -0
- agno/models/langdb/langdb.py +48 -0
- agno/models/litellm/__init__.py +14 -0
- agno/models/litellm/chat.py +468 -0
- agno/models/litellm/litellm_openai.py +25 -0
- agno/models/llama_cpp/__init__.py +5 -0
- agno/models/llama_cpp/llama_cpp.py +22 -0
- agno/models/lmstudio/__init__.py +5 -0
- agno/models/lmstudio/lmstudio.py +25 -0
- agno/models/message.py +434 -0
- agno/models/meta/__init__.py +12 -0
- agno/models/meta/llama.py +475 -0
- agno/models/meta/llama_openai.py +78 -0
- agno/models/metrics.py +120 -0
- agno/models/mistral/__init__.py +5 -0
- agno/models/mistral/mistral.py +432 -0
- agno/models/nebius/__init__.py +3 -0
- agno/models/nebius/nebius.py +54 -0
- agno/models/nexus/__init__.py +3 -0
- agno/models/nexus/nexus.py +22 -0
- agno/models/nvidia/__init__.py +5 -0
- agno/models/nvidia/nvidia.py +28 -0
- agno/models/ollama/__init__.py +5 -0
- agno/models/ollama/chat.py +441 -0
- agno/models/openai/__init__.py +9 -0
- agno/models/openai/chat.py +883 -0
- agno/models/openai/like.py +27 -0
- agno/models/openai/responses.py +1050 -0
- agno/models/openrouter/__init__.py +5 -0
- agno/models/openrouter/openrouter.py +66 -0
- agno/models/perplexity/__init__.py +5 -0
- agno/models/perplexity/perplexity.py +187 -0
- agno/models/portkey/__init__.py +3 -0
- agno/models/portkey/portkey.py +81 -0
- agno/models/requesty/__init__.py +5 -0
- agno/models/requesty/requesty.py +52 -0
- agno/models/response.py +199 -0
- agno/models/sambanova/__init__.py +5 -0
- agno/models/sambanova/sambanova.py +28 -0
- agno/models/siliconflow/__init__.py +5 -0
- agno/models/siliconflow/siliconflow.py +25 -0
- agno/models/together/__init__.py +5 -0
- agno/models/together/together.py +25 -0
- agno/models/utils.py +266 -0
- agno/models/vercel/__init__.py +3 -0
- agno/models/vercel/v0.py +26 -0
- agno/models/vertexai/__init__.py +0 -0
- agno/models/vertexai/claude.py +70 -0
- agno/models/vllm/__init__.py +3 -0
- agno/models/vllm/vllm.py +78 -0
- agno/models/xai/__init__.py +3 -0
- agno/models/xai/xai.py +113 -0
- agno/os/__init__.py +3 -0
- agno/os/app.py +876 -0
- agno/os/auth.py +57 -0
- agno/os/config.py +104 -0
- agno/os/interfaces/__init__.py +1 -0
- agno/os/interfaces/a2a/__init__.py +3 -0
- agno/os/interfaces/a2a/a2a.py +42 -0
- agno/os/interfaces/a2a/router.py +250 -0
- agno/os/interfaces/a2a/utils.py +924 -0
- agno/os/interfaces/agui/__init__.py +3 -0
- agno/os/interfaces/agui/agui.py +47 -0
- agno/os/interfaces/agui/router.py +144 -0
- agno/os/interfaces/agui/utils.py +534 -0
- agno/os/interfaces/base.py +25 -0
- agno/os/interfaces/slack/__init__.py +3 -0
- agno/os/interfaces/slack/router.py +148 -0
- agno/os/interfaces/slack/security.py +30 -0
- agno/os/interfaces/slack/slack.py +47 -0
- agno/os/interfaces/whatsapp/__init__.py +3 -0
- agno/os/interfaces/whatsapp/router.py +211 -0
- agno/os/interfaces/whatsapp/security.py +53 -0
- agno/os/interfaces/whatsapp/whatsapp.py +36 -0
- agno/os/mcp.py +292 -0
- agno/os/middleware/__init__.py +7 -0
- agno/os/middleware/jwt.py +233 -0
- agno/os/router.py +1763 -0
- agno/os/routers/__init__.py +3 -0
- agno/os/routers/evals/__init__.py +3 -0
- agno/os/routers/evals/evals.py +430 -0
- agno/os/routers/evals/schemas.py +142 -0
- agno/os/routers/evals/utils.py +162 -0
- agno/os/routers/health.py +31 -0
- agno/os/routers/home.py +52 -0
- agno/os/routers/knowledge/__init__.py +3 -0
- agno/os/routers/knowledge/knowledge.py +997 -0
- agno/os/routers/knowledge/schemas.py +178 -0
- agno/os/routers/memory/__init__.py +3 -0
- agno/os/routers/memory/memory.py +515 -0
- agno/os/routers/memory/schemas.py +62 -0
- agno/os/routers/metrics/__init__.py +3 -0
- agno/os/routers/metrics/metrics.py +190 -0
- agno/os/routers/metrics/schemas.py +47 -0
- agno/os/routers/session/__init__.py +3 -0
- agno/os/routers/session/session.py +997 -0
- agno/os/schema.py +1055 -0
- agno/os/settings.py +43 -0
- agno/os/utils.py +630 -0
- agno/py.typed +0 -0
- agno/reasoning/__init__.py +0 -0
- agno/reasoning/anthropic.py +80 -0
- agno/reasoning/azure_ai_foundry.py +67 -0
- agno/reasoning/deepseek.py +63 -0
- agno/reasoning/default.py +97 -0
- agno/reasoning/gemini.py +73 -0
- agno/reasoning/groq.py +71 -0
- agno/reasoning/helpers.py +63 -0
- agno/reasoning/ollama.py +67 -0
- agno/reasoning/openai.py +86 -0
- agno/reasoning/step.py +31 -0
- agno/reasoning/vertexai.py +76 -0
- agno/run/__init__.py +6 -0
- agno/run/agent.py +787 -0
- agno/run/base.py +229 -0
- agno/run/cancel.py +81 -0
- agno/run/messages.py +32 -0
- agno/run/team.py +753 -0
- agno/run/workflow.py +708 -0
- agno/session/__init__.py +10 -0
- agno/session/agent.py +295 -0
- agno/session/summary.py +265 -0
- agno/session/team.py +392 -0
- agno/session/workflow.py +205 -0
- agno/team/__init__.py +37 -0
- agno/team/team.py +8793 -0
- agno/tools/__init__.py +10 -0
- agno/tools/agentql.py +120 -0
- agno/tools/airflow.py +69 -0
- agno/tools/api.py +122 -0
- agno/tools/apify.py +314 -0
- agno/tools/arxiv.py +127 -0
- agno/tools/aws_lambda.py +53 -0
- agno/tools/aws_ses.py +66 -0
- agno/tools/baidusearch.py +89 -0
- agno/tools/bitbucket.py +292 -0
- agno/tools/brandfetch.py +213 -0
- agno/tools/bravesearch.py +106 -0
- agno/tools/brightdata.py +367 -0
- agno/tools/browserbase.py +209 -0
- agno/tools/calcom.py +255 -0
- agno/tools/calculator.py +151 -0
- agno/tools/cartesia.py +187 -0
- agno/tools/clickup.py +244 -0
- agno/tools/confluence.py +240 -0
- agno/tools/crawl4ai.py +158 -0
- agno/tools/csv_toolkit.py +185 -0
- agno/tools/dalle.py +110 -0
- agno/tools/daytona.py +475 -0
- agno/tools/decorator.py +262 -0
- agno/tools/desi_vocal.py +108 -0
- agno/tools/discord.py +161 -0
- agno/tools/docker.py +716 -0
- agno/tools/duckdb.py +379 -0
- agno/tools/duckduckgo.py +91 -0
- agno/tools/e2b.py +703 -0
- agno/tools/eleven_labs.py +196 -0
- agno/tools/email.py +67 -0
- agno/tools/evm.py +129 -0
- agno/tools/exa.py +396 -0
- agno/tools/fal.py +127 -0
- agno/tools/file.py +240 -0
- agno/tools/file_generation.py +350 -0
- agno/tools/financial_datasets.py +288 -0
- agno/tools/firecrawl.py +143 -0
- agno/tools/function.py +1187 -0
- agno/tools/giphy.py +93 -0
- agno/tools/github.py +1760 -0
- agno/tools/gmail.py +922 -0
- agno/tools/google_bigquery.py +117 -0
- agno/tools/google_drive.py +270 -0
- agno/tools/google_maps.py +253 -0
- agno/tools/googlecalendar.py +674 -0
- agno/tools/googlesearch.py +98 -0
- agno/tools/googlesheets.py +377 -0
- agno/tools/hackernews.py +77 -0
- agno/tools/jina.py +101 -0
- agno/tools/jira.py +170 -0
- agno/tools/knowledge.py +218 -0
- agno/tools/linear.py +426 -0
- agno/tools/linkup.py +58 -0
- agno/tools/local_file_system.py +90 -0
- agno/tools/lumalab.py +183 -0
- agno/tools/mcp/__init__.py +10 -0
- agno/tools/mcp/mcp.py +331 -0
- agno/tools/mcp/multi_mcp.py +347 -0
- agno/tools/mcp/params.py +24 -0
- agno/tools/mcp_toolbox.py +284 -0
- agno/tools/mem0.py +193 -0
- agno/tools/memori.py +339 -0
- agno/tools/memory.py +419 -0
- agno/tools/mlx_transcribe.py +139 -0
- agno/tools/models/__init__.py +0 -0
- agno/tools/models/azure_openai.py +190 -0
- agno/tools/models/gemini.py +203 -0
- agno/tools/models/groq.py +158 -0
- agno/tools/models/morph.py +186 -0
- agno/tools/models/nebius.py +124 -0
- agno/tools/models_labs.py +195 -0
- agno/tools/moviepy_video.py +349 -0
- agno/tools/neo4j.py +134 -0
- agno/tools/newspaper.py +46 -0
- agno/tools/newspaper4k.py +93 -0
- agno/tools/notion.py +204 -0
- agno/tools/openai.py +202 -0
- agno/tools/openbb.py +160 -0
- agno/tools/opencv.py +321 -0
- agno/tools/openweather.py +233 -0
- agno/tools/oxylabs.py +385 -0
- agno/tools/pandas.py +102 -0
- agno/tools/parallel.py +314 -0
- agno/tools/postgres.py +257 -0
- agno/tools/pubmed.py +188 -0
- agno/tools/python.py +205 -0
- agno/tools/reasoning.py +283 -0
- agno/tools/reddit.py +467 -0
- agno/tools/replicate.py +117 -0
- agno/tools/resend.py +62 -0
- agno/tools/scrapegraph.py +222 -0
- agno/tools/searxng.py +152 -0
- agno/tools/serpapi.py +116 -0
- agno/tools/serper.py +255 -0
- agno/tools/shell.py +53 -0
- agno/tools/slack.py +136 -0
- agno/tools/sleep.py +20 -0
- agno/tools/spider.py +116 -0
- agno/tools/sql.py +154 -0
- agno/tools/streamlit/__init__.py +0 -0
- agno/tools/streamlit/components.py +113 -0
- agno/tools/tavily.py +254 -0
- agno/tools/telegram.py +48 -0
- agno/tools/todoist.py +218 -0
- agno/tools/tool_registry.py +1 -0
- agno/tools/toolkit.py +146 -0
- agno/tools/trafilatura.py +388 -0
- agno/tools/trello.py +274 -0
- agno/tools/twilio.py +186 -0
- agno/tools/user_control_flow.py +78 -0
- agno/tools/valyu.py +228 -0
- agno/tools/visualization.py +467 -0
- agno/tools/webbrowser.py +28 -0
- agno/tools/webex.py +76 -0
- agno/tools/website.py +54 -0
- agno/tools/webtools.py +45 -0
- agno/tools/whatsapp.py +286 -0
- agno/tools/wikipedia.py +63 -0
- agno/tools/workflow.py +278 -0
- agno/tools/x.py +335 -0
- agno/tools/yfinance.py +257 -0
- agno/tools/youtube.py +184 -0
- agno/tools/zendesk.py +82 -0
- agno/tools/zep.py +454 -0
- agno/tools/zoom.py +382 -0
- agno/utils/__init__.py +0 -0
- agno/utils/agent.py +820 -0
- agno/utils/audio.py +49 -0
- agno/utils/certs.py +27 -0
- agno/utils/code_execution.py +11 -0
- agno/utils/common.py +132 -0
- agno/utils/dttm.py +13 -0
- agno/utils/enum.py +22 -0
- agno/utils/env.py +11 -0
- agno/utils/events.py +696 -0
- agno/utils/format_str.py +16 -0
- agno/utils/functions.py +166 -0
- agno/utils/gemini.py +426 -0
- agno/utils/hooks.py +57 -0
- agno/utils/http.py +74 -0
- agno/utils/json_schema.py +234 -0
- agno/utils/knowledge.py +36 -0
- agno/utils/location.py +19 -0
- agno/utils/log.py +255 -0
- agno/utils/mcp.py +214 -0
- agno/utils/media.py +352 -0
- agno/utils/merge_dict.py +41 -0
- agno/utils/message.py +118 -0
- agno/utils/models/__init__.py +0 -0
- agno/utils/models/ai_foundry.py +43 -0
- agno/utils/models/claude.py +358 -0
- agno/utils/models/cohere.py +87 -0
- agno/utils/models/llama.py +78 -0
- agno/utils/models/mistral.py +98 -0
- agno/utils/models/openai_responses.py +140 -0
- agno/utils/models/schema_utils.py +153 -0
- agno/utils/models/watsonx.py +41 -0
- agno/utils/openai.py +257 -0
- agno/utils/pickle.py +32 -0
- agno/utils/pprint.py +178 -0
- agno/utils/print_response/__init__.py +0 -0
- agno/utils/print_response/agent.py +842 -0
- agno/utils/print_response/team.py +1724 -0
- agno/utils/print_response/workflow.py +1668 -0
- agno/utils/prompts.py +111 -0
- agno/utils/reasoning.py +108 -0
- agno/utils/response.py +163 -0
- agno/utils/response_iterator.py +17 -0
- agno/utils/safe_formatter.py +24 -0
- agno/utils/serialize.py +32 -0
- agno/utils/shell.py +22 -0
- agno/utils/streamlit.py +487 -0
- agno/utils/string.py +231 -0
- agno/utils/team.py +139 -0
- agno/utils/timer.py +41 -0
- agno/utils/tools.py +102 -0
- agno/utils/web.py +23 -0
- agno/utils/whatsapp.py +305 -0
- agno/utils/yaml_io.py +25 -0
- agno/vectordb/__init__.py +3 -0
- agno/vectordb/base.py +127 -0
- agno/vectordb/cassandra/__init__.py +5 -0
- agno/vectordb/cassandra/cassandra.py +501 -0
- agno/vectordb/cassandra/extra_param_mixin.py +11 -0
- agno/vectordb/cassandra/index.py +13 -0
- agno/vectordb/chroma/__init__.py +5 -0
- agno/vectordb/chroma/chromadb.py +929 -0
- agno/vectordb/clickhouse/__init__.py +9 -0
- agno/vectordb/clickhouse/clickhousedb.py +835 -0
- agno/vectordb/clickhouse/index.py +9 -0
- agno/vectordb/couchbase/__init__.py +3 -0
- agno/vectordb/couchbase/couchbase.py +1442 -0
- agno/vectordb/distance.py +7 -0
- agno/vectordb/lancedb/__init__.py +6 -0
- agno/vectordb/lancedb/lance_db.py +995 -0
- agno/vectordb/langchaindb/__init__.py +5 -0
- agno/vectordb/langchaindb/langchaindb.py +163 -0
- agno/vectordb/lightrag/__init__.py +5 -0
- agno/vectordb/lightrag/lightrag.py +388 -0
- agno/vectordb/llamaindex/__init__.py +3 -0
- agno/vectordb/llamaindex/llamaindexdb.py +166 -0
- agno/vectordb/milvus/__init__.py +4 -0
- agno/vectordb/milvus/milvus.py +1182 -0
- agno/vectordb/mongodb/__init__.py +9 -0
- agno/vectordb/mongodb/mongodb.py +1417 -0
- agno/vectordb/pgvector/__init__.py +12 -0
- agno/vectordb/pgvector/index.py +23 -0
- agno/vectordb/pgvector/pgvector.py +1462 -0
- agno/vectordb/pineconedb/__init__.py +5 -0
- agno/vectordb/pineconedb/pineconedb.py +747 -0
- agno/vectordb/qdrant/__init__.py +5 -0
- agno/vectordb/qdrant/qdrant.py +1134 -0
- agno/vectordb/redis/__init__.py +9 -0
- agno/vectordb/redis/redisdb.py +694 -0
- agno/vectordb/search.py +7 -0
- agno/vectordb/singlestore/__init__.py +10 -0
- agno/vectordb/singlestore/index.py +41 -0
- agno/vectordb/singlestore/singlestore.py +763 -0
- agno/vectordb/surrealdb/__init__.py +3 -0
- agno/vectordb/surrealdb/surrealdb.py +699 -0
- agno/vectordb/upstashdb/__init__.py +5 -0
- agno/vectordb/upstashdb/upstashdb.py +718 -0
- agno/vectordb/weaviate/__init__.py +8 -0
- agno/vectordb/weaviate/index.py +15 -0
- agno/vectordb/weaviate/weaviate.py +1005 -0
- agno/workflow/__init__.py +23 -0
- agno/workflow/agent.py +299 -0
- agno/workflow/condition.py +738 -0
- agno/workflow/loop.py +735 -0
- agno/workflow/parallel.py +824 -0
- agno/workflow/router.py +702 -0
- agno/workflow/step.py +1432 -0
- agno/workflow/steps.py +592 -0
- agno/workflow/types.py +520 -0
- agno/workflow/workflow.py +4321 -0
- agno-2.2.13.dist-info/METADATA +614 -0
- agno-2.2.13.dist-info/RECORD +575 -0
- agno-2.2.13.dist-info/WHEEL +5 -0
- agno-2.2.13.dist-info/licenses/LICENSE +201 -0
- agno-2.2.13.dist-info/top_level.txt +1 -0
agno/models/base.py
ADDED
|
@@ -0,0 +1,2175 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import collections.abc
|
|
3
|
+
import json
|
|
4
|
+
from abc import ABC, abstractmethod
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from hashlib import md5
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from time import time
|
|
9
|
+
from types import AsyncGeneratorType, GeneratorType
|
|
10
|
+
from typing import (
|
|
11
|
+
Any,
|
|
12
|
+
AsyncIterator,
|
|
13
|
+
Dict,
|
|
14
|
+
Iterator,
|
|
15
|
+
List,
|
|
16
|
+
Literal,
|
|
17
|
+
Optional,
|
|
18
|
+
Tuple,
|
|
19
|
+
Type,
|
|
20
|
+
Union,
|
|
21
|
+
get_args,
|
|
22
|
+
)
|
|
23
|
+
from uuid import uuid4
|
|
24
|
+
|
|
25
|
+
from pydantic import BaseModel
|
|
26
|
+
|
|
27
|
+
from agno.exceptions import AgentRunException
|
|
28
|
+
from agno.media import Audio, File, Image, Video
|
|
29
|
+
from agno.models.message import Citations, Message
|
|
30
|
+
from agno.models.metrics import Metrics
|
|
31
|
+
from agno.models.response import ModelResponse, ModelResponseEvent, ToolExecution
|
|
32
|
+
from agno.run.agent import CustomEvent, RunContentEvent, RunOutput, RunOutputEvent
|
|
33
|
+
from agno.run.team import RunContentEvent as TeamRunContentEvent
|
|
34
|
+
from agno.run.team import TeamRunOutput, TeamRunOutputEvent
|
|
35
|
+
from agno.run.workflow import WorkflowRunOutputEvent
|
|
36
|
+
from agno.tools.function import Function, FunctionCall, FunctionExecutionResult, UserInputField
|
|
37
|
+
from agno.utils.log import log_debug, log_error, log_info, log_warning
|
|
38
|
+
from agno.utils.timer import Timer
|
|
39
|
+
from agno.utils.tools import get_function_call_for_tool_call, get_function_call_for_tool_execution
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
@dataclass
|
|
43
|
+
class MessageData:
|
|
44
|
+
response_role: Optional[Literal["system", "user", "assistant", "tool"]] = None
|
|
45
|
+
response_content: Any = ""
|
|
46
|
+
response_reasoning_content: Any = ""
|
|
47
|
+
response_redacted_reasoning_content: Any = ""
|
|
48
|
+
response_citations: Optional[Citations] = None
|
|
49
|
+
response_tool_calls: List[Dict[str, Any]] = field(default_factory=list)
|
|
50
|
+
|
|
51
|
+
response_audio: Optional[Audio] = None
|
|
52
|
+
response_image: Optional[Image] = None
|
|
53
|
+
response_video: Optional[Video] = None
|
|
54
|
+
response_file: Optional[File] = None
|
|
55
|
+
|
|
56
|
+
response_metrics: Optional[Metrics] = None
|
|
57
|
+
|
|
58
|
+
# Data from the provider that we might need on subsequent messages
|
|
59
|
+
response_provider_data: Optional[Dict[str, Any]] = None
|
|
60
|
+
|
|
61
|
+
extra: Optional[Dict[str, Any]] = None
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def _log_messages(messages: List[Message]) -> None:
|
|
65
|
+
"""
|
|
66
|
+
Log messages for debugging.
|
|
67
|
+
"""
|
|
68
|
+
for m in messages:
|
|
69
|
+
# Don't log metrics for input messages
|
|
70
|
+
m.log(metrics=False)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def _handle_agent_exception(a_exc: AgentRunException, additional_input: Optional[List[Message]] = None) -> None:
|
|
74
|
+
"""Handle AgentRunException and collect additional messages."""
|
|
75
|
+
if additional_input is None:
|
|
76
|
+
additional_input = []
|
|
77
|
+
if a_exc.user_message is not None:
|
|
78
|
+
msg = (
|
|
79
|
+
Message(role="user", content=a_exc.user_message)
|
|
80
|
+
if isinstance(a_exc.user_message, str)
|
|
81
|
+
else a_exc.user_message
|
|
82
|
+
)
|
|
83
|
+
additional_input.append(msg)
|
|
84
|
+
|
|
85
|
+
if a_exc.agent_message is not None:
|
|
86
|
+
msg = (
|
|
87
|
+
Message(role="assistant", content=a_exc.agent_message)
|
|
88
|
+
if isinstance(a_exc.agent_message, str)
|
|
89
|
+
else a_exc.agent_message
|
|
90
|
+
)
|
|
91
|
+
additional_input.append(msg)
|
|
92
|
+
|
|
93
|
+
if a_exc.messages:
|
|
94
|
+
for m in a_exc.messages:
|
|
95
|
+
if isinstance(m, Message):
|
|
96
|
+
additional_input.append(m)
|
|
97
|
+
elif isinstance(m, dict):
|
|
98
|
+
try:
|
|
99
|
+
additional_input.append(Message(**m))
|
|
100
|
+
except Exception as e:
|
|
101
|
+
log_warning(f"Failed to convert dict to Message: {e}")
|
|
102
|
+
|
|
103
|
+
if a_exc.stop_execution:
|
|
104
|
+
for m in additional_input:
|
|
105
|
+
m.stop_after_tool_call = True
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
@dataclass
|
|
109
|
+
class Model(ABC):
|
|
110
|
+
# ID of the model to use.
|
|
111
|
+
id: str
|
|
112
|
+
# Name for this Model. This is not sent to the Model API.
|
|
113
|
+
name: Optional[str] = None
|
|
114
|
+
# Provider for this Model. This is not sent to the Model API.
|
|
115
|
+
provider: Optional[str] = None
|
|
116
|
+
|
|
117
|
+
# -*- Do not set the following attributes directly -*-
|
|
118
|
+
# -*- Set them on the Agent instead -*-
|
|
119
|
+
|
|
120
|
+
# True if the Model supports structured outputs natively (e.g. OpenAI)
|
|
121
|
+
supports_native_structured_outputs: bool = False
|
|
122
|
+
# True if the Model requires a json_schema for structured outputs (e.g. LMStudio)
|
|
123
|
+
supports_json_schema_outputs: bool = False
|
|
124
|
+
|
|
125
|
+
# Controls which (if any) function is called by the model.
|
|
126
|
+
# "none" means the model will not call a function and instead generates a message.
|
|
127
|
+
# "auto" means the model can pick between generating a message or calling a function.
|
|
128
|
+
# Specifying a particular function via {"type: "function", "function": {"name": "my_function"}}
|
|
129
|
+
# forces the model to call that function.
|
|
130
|
+
# "none" is the default when no functions are present. "auto" is the default if functions are present.
|
|
131
|
+
_tool_choice: Optional[Union[str, Dict[str, Any]]] = None
|
|
132
|
+
|
|
133
|
+
# System prompt from the model added to the Agent.
|
|
134
|
+
system_prompt: Optional[str] = None
|
|
135
|
+
# Instructions from the model added to the Agent.
|
|
136
|
+
instructions: Optional[List[str]] = None
|
|
137
|
+
|
|
138
|
+
# The role of the tool message.
|
|
139
|
+
tool_message_role: str = "tool"
|
|
140
|
+
# The role of the assistant message.
|
|
141
|
+
assistant_message_role: str = "assistant"
|
|
142
|
+
|
|
143
|
+
# Cache model responses to avoid redundant API calls during development
|
|
144
|
+
cache_response: bool = False
|
|
145
|
+
cache_ttl: Optional[int] = None
|
|
146
|
+
cache_dir: Optional[str] = None
|
|
147
|
+
|
|
148
|
+
def __post_init__(self):
|
|
149
|
+
if self.provider is None and self.name is not None:
|
|
150
|
+
self.provider = f"{self.name} ({self.id})"
|
|
151
|
+
|
|
152
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
153
|
+
fields = {"name", "id", "provider"}
|
|
154
|
+
_dict = {field: getattr(self, field) for field in fields if getattr(self, field) is not None}
|
|
155
|
+
return _dict
|
|
156
|
+
|
|
157
|
+
def get_provider(self) -> str:
|
|
158
|
+
return self.provider or self.name or self.__class__.__name__
|
|
159
|
+
|
|
160
|
+
def _get_model_cache_key(self, messages: List[Message], stream: bool, **kwargs: Any) -> str:
|
|
161
|
+
"""Generate a cache key based on model messages and core parameters."""
|
|
162
|
+
message_data = []
|
|
163
|
+
for msg in messages:
|
|
164
|
+
msg_dict = {
|
|
165
|
+
"role": msg.role,
|
|
166
|
+
"content": msg.content,
|
|
167
|
+
}
|
|
168
|
+
message_data.append(msg_dict)
|
|
169
|
+
|
|
170
|
+
# Include tools parameter in cache key
|
|
171
|
+
has_tools = bool(kwargs.get("tools"))
|
|
172
|
+
|
|
173
|
+
cache_data = {
|
|
174
|
+
"model_id": self.id,
|
|
175
|
+
"messages": message_data,
|
|
176
|
+
"has_tools": has_tools,
|
|
177
|
+
"response_format": kwargs.get("response_format"),
|
|
178
|
+
"stream": stream,
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
cache_str = json.dumps(cache_data, sort_keys=True)
|
|
182
|
+
return md5(cache_str.encode()).hexdigest()
|
|
183
|
+
|
|
184
|
+
def _get_model_cache_file_path(self, cache_key: str) -> Path:
|
|
185
|
+
"""Get the file path for a cache key."""
|
|
186
|
+
if self.cache_dir:
|
|
187
|
+
cache_dir = Path(self.cache_dir)
|
|
188
|
+
else:
|
|
189
|
+
cache_dir = Path.home() / ".agno" / "cache" / "model_responses"
|
|
190
|
+
|
|
191
|
+
cache_dir.mkdir(parents=True, exist_ok=True)
|
|
192
|
+
return cache_dir / f"{cache_key}.json"
|
|
193
|
+
|
|
194
|
+
def _get_cached_model_response(self, cache_key: str) -> Optional[Dict[str, Any]]:
|
|
195
|
+
"""Retrieve a cached response if it exists and is not expired."""
|
|
196
|
+
cache_file = self._get_model_cache_file_path(cache_key)
|
|
197
|
+
|
|
198
|
+
if not cache_file.exists():
|
|
199
|
+
return None
|
|
200
|
+
|
|
201
|
+
try:
|
|
202
|
+
with open(cache_file, "r") as f:
|
|
203
|
+
cached_data = json.load(f)
|
|
204
|
+
|
|
205
|
+
# Check TTL if set (None means no expiration)
|
|
206
|
+
if self.cache_ttl is not None:
|
|
207
|
+
if time() - cached_data["timestamp"] > self.cache_ttl:
|
|
208
|
+
return None
|
|
209
|
+
|
|
210
|
+
return cached_data
|
|
211
|
+
except Exception:
|
|
212
|
+
return None
|
|
213
|
+
|
|
214
|
+
def _save_model_response_to_cache(self, cache_key: str, result: ModelResponse, is_streaming: bool = False) -> None:
|
|
215
|
+
"""Save a model response to cache."""
|
|
216
|
+
try:
|
|
217
|
+
cache_file = self._get_model_cache_file_path(cache_key)
|
|
218
|
+
|
|
219
|
+
cache_data = {
|
|
220
|
+
"timestamp": int(time()),
|
|
221
|
+
"is_streaming": is_streaming,
|
|
222
|
+
"result": result.to_dict(),
|
|
223
|
+
}
|
|
224
|
+
with open(cache_file, "w") as f:
|
|
225
|
+
json.dump(cache_data, f)
|
|
226
|
+
except Exception:
|
|
227
|
+
pass
|
|
228
|
+
|
|
229
|
+
def _save_streaming_responses_to_cache(self, cache_key: str, responses: List[ModelResponse]) -> None:
|
|
230
|
+
"""Save streaming responses to cache."""
|
|
231
|
+
cache_file = self._get_model_cache_file_path(cache_key)
|
|
232
|
+
|
|
233
|
+
cache_data = {
|
|
234
|
+
"timestamp": int(time()),
|
|
235
|
+
"is_streaming": True,
|
|
236
|
+
"streaming_responses": [r.to_dict() for r in responses],
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
try:
|
|
240
|
+
with open(cache_file, "w") as f:
|
|
241
|
+
json.dump(cache_data, f)
|
|
242
|
+
except Exception:
|
|
243
|
+
pass
|
|
244
|
+
|
|
245
|
+
def _model_response_from_cache(self, cached_data: Dict[str, Any]) -> ModelResponse:
|
|
246
|
+
"""Reconstruct a ModelResponse from cached data."""
|
|
247
|
+
return ModelResponse.from_dict(cached_data["result"])
|
|
248
|
+
|
|
249
|
+
def _streaming_responses_from_cache(self, cached_data: list) -> Iterator[ModelResponse]:
|
|
250
|
+
"""Reconstruct streaming responses from cached data."""
|
|
251
|
+
for cached_response in cached_data:
|
|
252
|
+
yield ModelResponse.from_dict(cached_response)
|
|
253
|
+
|
|
254
|
+
@abstractmethod
|
|
255
|
+
def invoke(self, *args, **kwargs) -> ModelResponse:
|
|
256
|
+
pass
|
|
257
|
+
|
|
258
|
+
@abstractmethod
|
|
259
|
+
async def ainvoke(self, *args, **kwargs) -> ModelResponse:
|
|
260
|
+
pass
|
|
261
|
+
|
|
262
|
+
@abstractmethod
|
|
263
|
+
def invoke_stream(self, *args, **kwargs) -> Iterator[ModelResponse]:
|
|
264
|
+
pass
|
|
265
|
+
|
|
266
|
+
@abstractmethod
|
|
267
|
+
def ainvoke_stream(self, *args, **kwargs) -> AsyncIterator[ModelResponse]:
|
|
268
|
+
pass
|
|
269
|
+
|
|
270
|
+
@abstractmethod
|
|
271
|
+
def _parse_provider_response(self, response: Any, **kwargs) -> ModelResponse:
|
|
272
|
+
"""
|
|
273
|
+
Parse the raw response from the model provider into a ModelResponse.
|
|
274
|
+
|
|
275
|
+
Args:
|
|
276
|
+
response: Raw response from the model provider
|
|
277
|
+
|
|
278
|
+
Returns:
|
|
279
|
+
ModelResponse: Parsed response data
|
|
280
|
+
"""
|
|
281
|
+
pass
|
|
282
|
+
|
|
283
|
+
@abstractmethod
|
|
284
|
+
def _parse_provider_response_delta(self, response: Any) -> ModelResponse:
|
|
285
|
+
"""
|
|
286
|
+
Parse the streaming response from the model provider into ModelResponse objects.
|
|
287
|
+
|
|
288
|
+
Args:
|
|
289
|
+
response: Raw response chunk from the model provider
|
|
290
|
+
|
|
291
|
+
Returns:
|
|
292
|
+
ModelResponse: Parsed response delta
|
|
293
|
+
"""
|
|
294
|
+
pass
|
|
295
|
+
|
|
296
|
+
def _format_tools(self, tools: Optional[List[Union[Function, dict]]]) -> List[Dict[str, Any]]:
|
|
297
|
+
_tool_dicts = []
|
|
298
|
+
for tool in tools or []:
|
|
299
|
+
if isinstance(tool, Function):
|
|
300
|
+
_tool_dicts.append({"type": "function", "function": tool.to_dict()})
|
|
301
|
+
else:
|
|
302
|
+
# If a dict is passed, it is a builtin tool
|
|
303
|
+
_tool_dicts.append(tool)
|
|
304
|
+
return _tool_dicts
|
|
305
|
+
|
|
306
|
+
def response(
|
|
307
|
+
self,
|
|
308
|
+
messages: List[Message],
|
|
309
|
+
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
310
|
+
tools: Optional[List[Union[Function, dict]]] = None,
|
|
311
|
+
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
|
|
312
|
+
tool_call_limit: Optional[int] = None,
|
|
313
|
+
run_response: Optional[Union[RunOutput, TeamRunOutput]] = None,
|
|
314
|
+
send_media_to_model: bool = True,
|
|
315
|
+
) -> ModelResponse:
|
|
316
|
+
"""
|
|
317
|
+
Generate a response from the model.
|
|
318
|
+
|
|
319
|
+
Args:
|
|
320
|
+
messages: List of messages to send to the model
|
|
321
|
+
response_format: Response format to use
|
|
322
|
+
tools: List of tools to use. This includes the original Function objects and dicts for built-in tools.
|
|
323
|
+
tool_choice: Tool choice to use
|
|
324
|
+
tool_call_limit: Tool call limit
|
|
325
|
+
run_response: Run response to use
|
|
326
|
+
send_media_to_model: Whether to send media to the model
|
|
327
|
+
"""
|
|
328
|
+
|
|
329
|
+
# Check cache if enabled
|
|
330
|
+
if self.cache_response:
|
|
331
|
+
cache_key = self._get_model_cache_key(messages, stream=False, response_format=response_format, tools=tools)
|
|
332
|
+
cached_data = self._get_cached_model_response(cache_key)
|
|
333
|
+
|
|
334
|
+
if cached_data:
|
|
335
|
+
log_info("Cache hit for model response")
|
|
336
|
+
return self._model_response_from_cache(cached_data)
|
|
337
|
+
|
|
338
|
+
log_debug(f"{self.get_provider()} Response Start", center=True, symbol="-")
|
|
339
|
+
log_debug(f"Model: {self.id}", center=True, symbol="-")
|
|
340
|
+
|
|
341
|
+
_log_messages(messages)
|
|
342
|
+
model_response = ModelResponse()
|
|
343
|
+
|
|
344
|
+
function_call_count = 0
|
|
345
|
+
|
|
346
|
+
_tool_dicts = self._format_tools(tools) if tools is not None else []
|
|
347
|
+
_functions = {tool.name: tool for tool in tools if isinstance(tool, Function)} if tools is not None else {}
|
|
348
|
+
|
|
349
|
+
while True:
|
|
350
|
+
# Get response from model
|
|
351
|
+
assistant_message = Message(role=self.assistant_message_role)
|
|
352
|
+
self._process_model_response(
|
|
353
|
+
messages=messages,
|
|
354
|
+
assistant_message=assistant_message,
|
|
355
|
+
model_response=model_response,
|
|
356
|
+
response_format=response_format,
|
|
357
|
+
tools=_tool_dicts,
|
|
358
|
+
tool_choice=tool_choice or self._tool_choice,
|
|
359
|
+
run_response=run_response,
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
# Add assistant message to messages
|
|
363
|
+
messages.append(assistant_message)
|
|
364
|
+
|
|
365
|
+
# Log response and metrics
|
|
366
|
+
assistant_message.log(metrics=True)
|
|
367
|
+
|
|
368
|
+
# Handle tool calls if present
|
|
369
|
+
if assistant_message.tool_calls:
|
|
370
|
+
# Prepare function calls
|
|
371
|
+
function_calls_to_run = self._prepare_function_calls(
|
|
372
|
+
assistant_message=assistant_message,
|
|
373
|
+
messages=messages,
|
|
374
|
+
model_response=model_response,
|
|
375
|
+
functions=_functions,
|
|
376
|
+
)
|
|
377
|
+
function_call_results: List[Message] = []
|
|
378
|
+
|
|
379
|
+
# Execute function calls
|
|
380
|
+
for function_call_response in self.run_function_calls(
|
|
381
|
+
function_calls=function_calls_to_run,
|
|
382
|
+
function_call_results=function_call_results,
|
|
383
|
+
current_function_call_count=function_call_count,
|
|
384
|
+
function_call_limit=tool_call_limit,
|
|
385
|
+
):
|
|
386
|
+
if isinstance(function_call_response, ModelResponse):
|
|
387
|
+
# The session state is updated by the function call
|
|
388
|
+
if function_call_response.updated_session_state is not None:
|
|
389
|
+
model_response.updated_session_state = function_call_response.updated_session_state
|
|
390
|
+
|
|
391
|
+
# Media artifacts are generated by the function call
|
|
392
|
+
if function_call_response.images is not None:
|
|
393
|
+
if model_response.images is None:
|
|
394
|
+
model_response.images = []
|
|
395
|
+
model_response.images.extend(function_call_response.images)
|
|
396
|
+
|
|
397
|
+
if function_call_response.audios is not None:
|
|
398
|
+
if model_response.audios is None:
|
|
399
|
+
model_response.audios = []
|
|
400
|
+
model_response.audios.extend(function_call_response.audios)
|
|
401
|
+
|
|
402
|
+
if function_call_response.videos is not None:
|
|
403
|
+
if model_response.videos is None:
|
|
404
|
+
model_response.videos = []
|
|
405
|
+
model_response.videos.extend(function_call_response.videos)
|
|
406
|
+
|
|
407
|
+
if function_call_response.files is not None:
|
|
408
|
+
if model_response.files is None:
|
|
409
|
+
model_response.files = []
|
|
410
|
+
model_response.files.extend(function_call_response.files)
|
|
411
|
+
|
|
412
|
+
if (
|
|
413
|
+
function_call_response.event
|
|
414
|
+
in [
|
|
415
|
+
ModelResponseEvent.tool_call_completed.value,
|
|
416
|
+
ModelResponseEvent.tool_call_paused.value,
|
|
417
|
+
]
|
|
418
|
+
and function_call_response.tool_executions is not None
|
|
419
|
+
):
|
|
420
|
+
if model_response.tool_executions is None:
|
|
421
|
+
model_response.tool_executions = []
|
|
422
|
+
model_response.tool_executions.extend(function_call_response.tool_executions)
|
|
423
|
+
|
|
424
|
+
elif function_call_response.event not in [
|
|
425
|
+
ModelResponseEvent.tool_call_started.value,
|
|
426
|
+
ModelResponseEvent.tool_call_completed.value,
|
|
427
|
+
]:
|
|
428
|
+
if function_call_response.content:
|
|
429
|
+
model_response.content += function_call_response.content # type: ignore
|
|
430
|
+
|
|
431
|
+
# Add a function call for each successful execution
|
|
432
|
+
function_call_count += len(function_call_results)
|
|
433
|
+
|
|
434
|
+
# Format and add results to messages
|
|
435
|
+
self.format_function_call_results(
|
|
436
|
+
messages=messages, function_call_results=function_call_results, **model_response.extra or {}
|
|
437
|
+
)
|
|
438
|
+
|
|
439
|
+
if any(msg.images or msg.videos or msg.audio or msg.files for msg in function_call_results):
|
|
440
|
+
# Handle function call media
|
|
441
|
+
self._handle_function_call_media(
|
|
442
|
+
messages=messages,
|
|
443
|
+
function_call_results=function_call_results,
|
|
444
|
+
send_media_to_model=send_media_to_model,
|
|
445
|
+
)
|
|
446
|
+
|
|
447
|
+
for function_call_result in function_call_results:
|
|
448
|
+
function_call_result.log(metrics=True)
|
|
449
|
+
|
|
450
|
+
# Check if we should stop after tool calls
|
|
451
|
+
if any(m.stop_after_tool_call for m in function_call_results):
|
|
452
|
+
break
|
|
453
|
+
|
|
454
|
+
# If we have any tool calls that require confirmation, break the loop
|
|
455
|
+
if any(tc.requires_confirmation for tc in model_response.tool_executions or []):
|
|
456
|
+
break
|
|
457
|
+
|
|
458
|
+
# If we have any tool calls that require external execution, break the loop
|
|
459
|
+
if any(tc.external_execution_required for tc in model_response.tool_executions or []):
|
|
460
|
+
break
|
|
461
|
+
|
|
462
|
+
# If we have any tool calls that require user input, break the loop
|
|
463
|
+
if any(tc.requires_user_input for tc in model_response.tool_executions or []):
|
|
464
|
+
break
|
|
465
|
+
|
|
466
|
+
# Continue loop to get next response
|
|
467
|
+
continue
|
|
468
|
+
|
|
469
|
+
# No tool calls or finished processing them
|
|
470
|
+
break
|
|
471
|
+
|
|
472
|
+
log_debug(f"{self.get_provider()} Response End", center=True, symbol="-")
|
|
473
|
+
|
|
474
|
+
# Save to cache if enabled
|
|
475
|
+
if self.cache_response:
|
|
476
|
+
self._save_model_response_to_cache(cache_key, model_response, is_streaming=False)
|
|
477
|
+
|
|
478
|
+
return model_response
|
|
479
|
+
|
|
480
|
+
async def aresponse(
|
|
481
|
+
self,
|
|
482
|
+
messages: List[Message],
|
|
483
|
+
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
484
|
+
tools: Optional[List[Union[Function, dict]]] = None,
|
|
485
|
+
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
|
|
486
|
+
tool_call_limit: Optional[int] = None,
|
|
487
|
+
run_response: Optional[Union[RunOutput, TeamRunOutput]] = None,
|
|
488
|
+
send_media_to_model: bool = True,
|
|
489
|
+
) -> ModelResponse:
|
|
490
|
+
"""
|
|
491
|
+
Generate an asynchronous response from the model.
|
|
492
|
+
"""
|
|
493
|
+
|
|
494
|
+
# Check cache if enabled
|
|
495
|
+
if self.cache_response:
|
|
496
|
+
cache_key = self._get_model_cache_key(messages, stream=False, response_format=response_format, tools=tools)
|
|
497
|
+
cached_data = self._get_cached_model_response(cache_key)
|
|
498
|
+
|
|
499
|
+
if cached_data:
|
|
500
|
+
log_info("Cache hit for model response")
|
|
501
|
+
return self._model_response_from_cache(cached_data)
|
|
502
|
+
|
|
503
|
+
log_debug(f"{self.get_provider()} Async Response Start", center=True, symbol="-")
|
|
504
|
+
log_debug(f"Model: {self.id}", center=True, symbol="-")
|
|
505
|
+
_log_messages(messages)
|
|
506
|
+
model_response = ModelResponse()
|
|
507
|
+
|
|
508
|
+
_tool_dicts = self._format_tools(tools) if tools is not None else []
|
|
509
|
+
_functions = {tool.name: tool for tool in tools if isinstance(tool, Function)} if tools is not None else {}
|
|
510
|
+
|
|
511
|
+
function_call_count = 0
|
|
512
|
+
|
|
513
|
+
while True:
|
|
514
|
+
# Get response from model
|
|
515
|
+
assistant_message = Message(role=self.assistant_message_role)
|
|
516
|
+
await self._aprocess_model_response(
|
|
517
|
+
messages=messages,
|
|
518
|
+
assistant_message=assistant_message,
|
|
519
|
+
model_response=model_response,
|
|
520
|
+
response_format=response_format,
|
|
521
|
+
tools=_tool_dicts,
|
|
522
|
+
tool_choice=tool_choice or self._tool_choice,
|
|
523
|
+
run_response=run_response,
|
|
524
|
+
)
|
|
525
|
+
|
|
526
|
+
# Add assistant message to messages
|
|
527
|
+
messages.append(assistant_message)
|
|
528
|
+
|
|
529
|
+
# Log response and metrics
|
|
530
|
+
assistant_message.log(metrics=True)
|
|
531
|
+
|
|
532
|
+
# Handle tool calls if present
|
|
533
|
+
if assistant_message.tool_calls:
|
|
534
|
+
# Prepare function calls
|
|
535
|
+
function_calls_to_run = self._prepare_function_calls(
|
|
536
|
+
assistant_message=assistant_message,
|
|
537
|
+
messages=messages,
|
|
538
|
+
model_response=model_response,
|
|
539
|
+
functions=_functions,
|
|
540
|
+
)
|
|
541
|
+
function_call_results: List[Message] = []
|
|
542
|
+
|
|
543
|
+
# Execute function calls
|
|
544
|
+
async for function_call_response in self.arun_function_calls(
|
|
545
|
+
function_calls=function_calls_to_run,
|
|
546
|
+
function_call_results=function_call_results,
|
|
547
|
+
current_function_call_count=function_call_count,
|
|
548
|
+
function_call_limit=tool_call_limit,
|
|
549
|
+
):
|
|
550
|
+
if isinstance(function_call_response, ModelResponse):
|
|
551
|
+
# The session state is updated by the function call
|
|
552
|
+
if function_call_response.updated_session_state is not None:
|
|
553
|
+
model_response.updated_session_state = function_call_response.updated_session_state
|
|
554
|
+
|
|
555
|
+
# Media artifacts are generated by the function call
|
|
556
|
+
if function_call_response.images is not None:
|
|
557
|
+
if model_response.images is None:
|
|
558
|
+
model_response.images = []
|
|
559
|
+
model_response.images.extend(function_call_response.images)
|
|
560
|
+
|
|
561
|
+
if function_call_response.audios is not None:
|
|
562
|
+
if model_response.audios is None:
|
|
563
|
+
model_response.audios = []
|
|
564
|
+
model_response.audios.extend(function_call_response.audios)
|
|
565
|
+
|
|
566
|
+
if function_call_response.videos is not None:
|
|
567
|
+
if model_response.videos is None:
|
|
568
|
+
model_response.videos = []
|
|
569
|
+
model_response.videos.extend(function_call_response.videos)
|
|
570
|
+
|
|
571
|
+
if function_call_response.files is not None:
|
|
572
|
+
if model_response.files is None:
|
|
573
|
+
model_response.files = []
|
|
574
|
+
model_response.files.extend(function_call_response.files)
|
|
575
|
+
|
|
576
|
+
if (
|
|
577
|
+
function_call_response.event
|
|
578
|
+
in [
|
|
579
|
+
ModelResponseEvent.tool_call_completed.value,
|
|
580
|
+
ModelResponseEvent.tool_call_paused.value,
|
|
581
|
+
]
|
|
582
|
+
and function_call_response.tool_executions is not None
|
|
583
|
+
):
|
|
584
|
+
if model_response.tool_executions is None:
|
|
585
|
+
model_response.tool_executions = []
|
|
586
|
+
model_response.tool_executions.extend(function_call_response.tool_executions)
|
|
587
|
+
elif function_call_response.event not in [
|
|
588
|
+
ModelResponseEvent.tool_call_started.value,
|
|
589
|
+
ModelResponseEvent.tool_call_completed.value,
|
|
590
|
+
]:
|
|
591
|
+
if function_call_response.content:
|
|
592
|
+
model_response.content += function_call_response.content # type: ignore
|
|
593
|
+
|
|
594
|
+
# Add a function call for each successful execution
|
|
595
|
+
function_call_count += len(function_call_results)
|
|
596
|
+
|
|
597
|
+
# Format and add results to messages
|
|
598
|
+
self.format_function_call_results(
|
|
599
|
+
messages=messages, function_call_results=function_call_results, **model_response.extra or {}
|
|
600
|
+
)
|
|
601
|
+
|
|
602
|
+
if any(msg.images or msg.videos or msg.audio or msg.files for msg in function_call_results):
|
|
603
|
+
# Handle function call media
|
|
604
|
+
self._handle_function_call_media(
|
|
605
|
+
messages=messages,
|
|
606
|
+
function_call_results=function_call_results,
|
|
607
|
+
send_media_to_model=send_media_to_model,
|
|
608
|
+
)
|
|
609
|
+
|
|
610
|
+
for function_call_result in function_call_results:
|
|
611
|
+
function_call_result.log(metrics=True)
|
|
612
|
+
|
|
613
|
+
# Check if we should stop after tool calls
|
|
614
|
+
if any(m.stop_after_tool_call for m in function_call_results):
|
|
615
|
+
break
|
|
616
|
+
|
|
617
|
+
# If we have any tool calls that require confirmation, break the loop
|
|
618
|
+
if any(tc.requires_confirmation for tc in model_response.tool_executions or []):
|
|
619
|
+
break
|
|
620
|
+
|
|
621
|
+
# If we have any tool calls that require external execution, break the loop
|
|
622
|
+
if any(tc.external_execution_required for tc in model_response.tool_executions or []):
|
|
623
|
+
break
|
|
624
|
+
|
|
625
|
+
# If we have any tool calls that require user input, break the loop
|
|
626
|
+
if any(tc.requires_user_input for tc in model_response.tool_executions or []):
|
|
627
|
+
break
|
|
628
|
+
|
|
629
|
+
# Continue loop to get next response
|
|
630
|
+
continue
|
|
631
|
+
|
|
632
|
+
# No tool calls or finished processing them
|
|
633
|
+
break
|
|
634
|
+
|
|
635
|
+
log_debug(f"{self.get_provider()} Async Response End", center=True, symbol="-")
|
|
636
|
+
|
|
637
|
+
# Save to cache if enabled
|
|
638
|
+
if self.cache_response:
|
|
639
|
+
self._save_model_response_to_cache(cache_key, model_response, is_streaming=False)
|
|
640
|
+
|
|
641
|
+
return model_response
|
|
642
|
+
|
|
643
|
+
def _process_model_response(
|
|
644
|
+
self,
|
|
645
|
+
messages: List[Message],
|
|
646
|
+
assistant_message: Message,
|
|
647
|
+
model_response: ModelResponse,
|
|
648
|
+
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
649
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
650
|
+
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
|
|
651
|
+
run_response: Optional[Union[RunOutput, TeamRunOutput]] = None,
|
|
652
|
+
) -> None:
|
|
653
|
+
"""
|
|
654
|
+
Process a single model response and return the assistant message and whether to continue.
|
|
655
|
+
|
|
656
|
+
Returns:
|
|
657
|
+
Tuple[Message, bool]: (assistant_message, should_continue)
|
|
658
|
+
"""
|
|
659
|
+
# Generate response
|
|
660
|
+
provider_response = self.invoke(
|
|
661
|
+
assistant_message=assistant_message,
|
|
662
|
+
messages=messages,
|
|
663
|
+
response_format=response_format,
|
|
664
|
+
tools=tools,
|
|
665
|
+
tool_choice=tool_choice or self._tool_choice,
|
|
666
|
+
run_response=run_response,
|
|
667
|
+
)
|
|
668
|
+
|
|
669
|
+
# Populate the assistant message
|
|
670
|
+
self._populate_assistant_message(assistant_message=assistant_message, provider_response=provider_response)
|
|
671
|
+
|
|
672
|
+
# Update model response with assistant message content and audio
|
|
673
|
+
if assistant_message.content is not None:
|
|
674
|
+
if model_response.content is None:
|
|
675
|
+
model_response.content = assistant_message.get_content_string()
|
|
676
|
+
else:
|
|
677
|
+
model_response.content += assistant_message.get_content_string()
|
|
678
|
+
if assistant_message.reasoning_content is not None:
|
|
679
|
+
model_response.reasoning_content = assistant_message.reasoning_content
|
|
680
|
+
if assistant_message.redacted_reasoning_content is not None:
|
|
681
|
+
model_response.redacted_reasoning_content = assistant_message.redacted_reasoning_content
|
|
682
|
+
if assistant_message.citations is not None:
|
|
683
|
+
model_response.citations = assistant_message.citations
|
|
684
|
+
if assistant_message.audio_output is not None:
|
|
685
|
+
if isinstance(assistant_message.audio_output, Audio):
|
|
686
|
+
model_response.audio = assistant_message.audio_output
|
|
687
|
+
if assistant_message.image_output is not None:
|
|
688
|
+
model_response.images = [assistant_message.image_output]
|
|
689
|
+
if assistant_message.video_output is not None:
|
|
690
|
+
model_response.videos = [assistant_message.video_output]
|
|
691
|
+
if provider_response.extra is not None:
|
|
692
|
+
if model_response.extra is None:
|
|
693
|
+
model_response.extra = {}
|
|
694
|
+
model_response.extra.update(provider_response.extra)
|
|
695
|
+
if provider_response.provider_data is not None:
|
|
696
|
+
model_response.provider_data = provider_response.provider_data
|
|
697
|
+
|
|
698
|
+
async def _aprocess_model_response(
|
|
699
|
+
self,
|
|
700
|
+
messages: List[Message],
|
|
701
|
+
assistant_message: Message,
|
|
702
|
+
model_response: ModelResponse,
|
|
703
|
+
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
704
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
705
|
+
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
|
|
706
|
+
run_response: Optional[Union[RunOutput, TeamRunOutput]] = None,
|
|
707
|
+
) -> None:
|
|
708
|
+
"""
|
|
709
|
+
Process a single async model response and return the assistant message and whether to continue.
|
|
710
|
+
|
|
711
|
+
Returns:
|
|
712
|
+
Tuple[Message, bool]: (assistant_message, should_continue)
|
|
713
|
+
"""
|
|
714
|
+
# Generate response
|
|
715
|
+
provider_response = await self.ainvoke(
|
|
716
|
+
messages=messages,
|
|
717
|
+
response_format=response_format,
|
|
718
|
+
tools=tools,
|
|
719
|
+
tool_choice=tool_choice or self._tool_choice,
|
|
720
|
+
assistant_message=assistant_message,
|
|
721
|
+
run_response=run_response,
|
|
722
|
+
)
|
|
723
|
+
|
|
724
|
+
# Populate the assistant message
|
|
725
|
+
self._populate_assistant_message(assistant_message=assistant_message, provider_response=provider_response)
|
|
726
|
+
|
|
727
|
+
# Update model response with assistant message content and audio
|
|
728
|
+
if assistant_message.content is not None:
|
|
729
|
+
if model_response.content is None:
|
|
730
|
+
model_response.content = assistant_message.get_content_string()
|
|
731
|
+
else:
|
|
732
|
+
model_response.content += assistant_message.get_content_string()
|
|
733
|
+
if assistant_message.reasoning_content is not None:
|
|
734
|
+
model_response.reasoning_content = assistant_message.reasoning_content
|
|
735
|
+
if assistant_message.redacted_reasoning_content is not None:
|
|
736
|
+
model_response.redacted_reasoning_content = assistant_message.redacted_reasoning_content
|
|
737
|
+
if assistant_message.citations is not None:
|
|
738
|
+
model_response.citations = assistant_message.citations
|
|
739
|
+
if assistant_message.audio_output is not None:
|
|
740
|
+
if isinstance(assistant_message.audio_output, Audio):
|
|
741
|
+
model_response.audio = assistant_message.audio_output
|
|
742
|
+
if assistant_message.image_output is not None:
|
|
743
|
+
model_response.images = [assistant_message.image_output]
|
|
744
|
+
if assistant_message.video_output is not None:
|
|
745
|
+
model_response.videos = [assistant_message.video_output]
|
|
746
|
+
if provider_response.extra is not None:
|
|
747
|
+
if model_response.extra is None:
|
|
748
|
+
model_response.extra = {}
|
|
749
|
+
model_response.extra.update(provider_response.extra)
|
|
750
|
+
if provider_response.provider_data is not None:
|
|
751
|
+
model_response.provider_data = provider_response.provider_data
|
|
752
|
+
|
|
753
|
+
def _populate_assistant_message(
|
|
754
|
+
self,
|
|
755
|
+
assistant_message: Message,
|
|
756
|
+
provider_response: ModelResponse,
|
|
757
|
+
) -> Message:
|
|
758
|
+
"""
|
|
759
|
+
Populate an assistant message with the provider response data.
|
|
760
|
+
|
|
761
|
+
Args:
|
|
762
|
+
assistant_message: The assistant message to populate
|
|
763
|
+
provider_response: Parsed response from the model provider
|
|
764
|
+
|
|
765
|
+
Returns:
|
|
766
|
+
Message: The populated assistant message
|
|
767
|
+
"""
|
|
768
|
+
if provider_response.role is not None:
|
|
769
|
+
assistant_message.role = provider_response.role
|
|
770
|
+
|
|
771
|
+
# Add content to assistant message
|
|
772
|
+
if provider_response.content is not None:
|
|
773
|
+
assistant_message.content = provider_response.content
|
|
774
|
+
|
|
775
|
+
# Add tool calls to assistant message
|
|
776
|
+
if provider_response.tool_calls is not None and len(provider_response.tool_calls) > 0:
|
|
777
|
+
assistant_message.tool_calls = provider_response.tool_calls
|
|
778
|
+
|
|
779
|
+
# Add audio to assistant message
|
|
780
|
+
if provider_response.audio is not None:
|
|
781
|
+
assistant_message.audio_output = provider_response.audio
|
|
782
|
+
|
|
783
|
+
# Add image to assistant message
|
|
784
|
+
if provider_response.images is not None:
|
|
785
|
+
if provider_response.images:
|
|
786
|
+
assistant_message.image_output = provider_response.images[-1] # Taking last (most recent) image
|
|
787
|
+
|
|
788
|
+
# Add video to assistant message
|
|
789
|
+
if provider_response.videos is not None:
|
|
790
|
+
if provider_response.videos:
|
|
791
|
+
assistant_message.video_output = provider_response.videos[-1] # Taking last (most recent) video
|
|
792
|
+
|
|
793
|
+
if provider_response.files is not None:
|
|
794
|
+
if provider_response.files:
|
|
795
|
+
assistant_message.file_output = provider_response.files[-1] # Taking last (most recent) file
|
|
796
|
+
|
|
797
|
+
if provider_response.audios is not None:
|
|
798
|
+
if provider_response.audios:
|
|
799
|
+
assistant_message.audio_output = provider_response.audios[-1] # Taking last (most recent) audio
|
|
800
|
+
|
|
801
|
+
# Add redacted thinking content to assistant message
|
|
802
|
+
if provider_response.redacted_reasoning_content is not None:
|
|
803
|
+
assistant_message.redacted_reasoning_content = provider_response.redacted_reasoning_content
|
|
804
|
+
|
|
805
|
+
# Add reasoning content to assistant message
|
|
806
|
+
if provider_response.reasoning_content is not None:
|
|
807
|
+
assistant_message.reasoning_content = provider_response.reasoning_content
|
|
808
|
+
|
|
809
|
+
# Add provider data to assistant message
|
|
810
|
+
if provider_response.provider_data is not None:
|
|
811
|
+
assistant_message.provider_data = provider_response.provider_data
|
|
812
|
+
|
|
813
|
+
# Add citations to assistant message
|
|
814
|
+
if provider_response.citations is not None:
|
|
815
|
+
assistant_message.citations = provider_response.citations
|
|
816
|
+
|
|
817
|
+
# Add usage metrics if provided
|
|
818
|
+
if provider_response.response_usage is not None:
|
|
819
|
+
assistant_message.metrics += provider_response.response_usage
|
|
820
|
+
|
|
821
|
+
return assistant_message
|
|
822
|
+
|
|
823
|
+
def process_response_stream(
|
|
824
|
+
self,
|
|
825
|
+
messages: List[Message],
|
|
826
|
+
assistant_message: Message,
|
|
827
|
+
stream_data: MessageData,
|
|
828
|
+
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
829
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
830
|
+
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
|
|
831
|
+
run_response: Optional[Union[RunOutput, TeamRunOutput]] = None,
|
|
832
|
+
) -> Iterator[ModelResponse]:
|
|
833
|
+
"""
|
|
834
|
+
Process a streaming response from the model.
|
|
835
|
+
"""
|
|
836
|
+
|
|
837
|
+
for response_delta in self.invoke_stream(
|
|
838
|
+
messages=messages,
|
|
839
|
+
assistant_message=assistant_message,
|
|
840
|
+
response_format=response_format,
|
|
841
|
+
tools=tools,
|
|
842
|
+
tool_choice=tool_choice or self._tool_choice,
|
|
843
|
+
run_response=run_response,
|
|
844
|
+
):
|
|
845
|
+
for model_response_delta in self._populate_stream_data(
|
|
846
|
+
stream_data=stream_data,
|
|
847
|
+
model_response_delta=response_delta,
|
|
848
|
+
):
|
|
849
|
+
yield model_response_delta
|
|
850
|
+
|
|
851
|
+
# Populate assistant message from stream data after the stream ends
|
|
852
|
+
self._populate_assistant_message_from_stream_data(assistant_message=assistant_message, stream_data=stream_data)
|
|
853
|
+
|
|
854
|
+
def response_stream(
|
|
855
|
+
self,
|
|
856
|
+
messages: List[Message],
|
|
857
|
+
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
858
|
+
tools: Optional[List[Union[Function, dict]]] = None,
|
|
859
|
+
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
|
|
860
|
+
tool_call_limit: Optional[int] = None,
|
|
861
|
+
stream_model_response: bool = True,
|
|
862
|
+
run_response: Optional[Union[RunOutput, TeamRunOutput]] = None,
|
|
863
|
+
send_media_to_model: bool = True,
|
|
864
|
+
) -> Iterator[Union[ModelResponse, RunOutputEvent, TeamRunOutputEvent]]:
|
|
865
|
+
"""
|
|
866
|
+
Generate a streaming response from the model.
|
|
867
|
+
"""
|
|
868
|
+
|
|
869
|
+
# Check cache if enabled - capture key BEFORE streaming to avoid mismatch
|
|
870
|
+
cache_key = None
|
|
871
|
+
if self.cache_response:
|
|
872
|
+
cache_key = self._get_model_cache_key(messages, stream=True, response_format=response_format, tools=tools)
|
|
873
|
+
cached_data = self._get_cached_model_response(cache_key)
|
|
874
|
+
|
|
875
|
+
if cached_data:
|
|
876
|
+
log_info("Cache hit for streaming model response")
|
|
877
|
+
# Yield cached responses
|
|
878
|
+
for response in self._streaming_responses_from_cache(cached_data["streaming_responses"]):
|
|
879
|
+
yield response
|
|
880
|
+
return
|
|
881
|
+
|
|
882
|
+
log_info("Cache miss for streaming model response")
|
|
883
|
+
|
|
884
|
+
# Track streaming responses for caching
|
|
885
|
+
streaming_responses: List[ModelResponse] = []
|
|
886
|
+
|
|
887
|
+
log_debug(f"{self.get_provider()} Response Stream Start", center=True, symbol="-")
|
|
888
|
+
log_debug(f"Model: {self.id}", center=True, symbol="-")
|
|
889
|
+
_log_messages(messages)
|
|
890
|
+
|
|
891
|
+
_tool_dicts = self._format_tools(tools) if tools is not None else []
|
|
892
|
+
_functions = {tool.name: tool for tool in tools if isinstance(tool, Function)} if tools is not None else {}
|
|
893
|
+
|
|
894
|
+
function_call_count = 0
|
|
895
|
+
|
|
896
|
+
while True:
|
|
897
|
+
assistant_message = Message(role=self.assistant_message_role)
|
|
898
|
+
# Create assistant message and stream data
|
|
899
|
+
stream_data = MessageData()
|
|
900
|
+
model_response = ModelResponse()
|
|
901
|
+
if stream_model_response:
|
|
902
|
+
# Generate response
|
|
903
|
+
for response in self.process_response_stream(
|
|
904
|
+
messages=messages,
|
|
905
|
+
assistant_message=assistant_message,
|
|
906
|
+
stream_data=stream_data,
|
|
907
|
+
response_format=response_format,
|
|
908
|
+
tools=_tool_dicts,
|
|
909
|
+
tool_choice=tool_choice or self._tool_choice,
|
|
910
|
+
run_response=run_response,
|
|
911
|
+
):
|
|
912
|
+
if self.cache_response and isinstance(response, ModelResponse):
|
|
913
|
+
streaming_responses.append(response)
|
|
914
|
+
yield response
|
|
915
|
+
|
|
916
|
+
else:
|
|
917
|
+
self._process_model_response(
|
|
918
|
+
messages=messages,
|
|
919
|
+
assistant_message=assistant_message,
|
|
920
|
+
model_response=model_response,
|
|
921
|
+
response_format=response_format,
|
|
922
|
+
tools=_tool_dicts,
|
|
923
|
+
tool_choice=tool_choice or self._tool_choice,
|
|
924
|
+
)
|
|
925
|
+
if self.cache_response:
|
|
926
|
+
streaming_responses.append(model_response)
|
|
927
|
+
yield model_response
|
|
928
|
+
|
|
929
|
+
# Add assistant message to messages
|
|
930
|
+
messages.append(assistant_message)
|
|
931
|
+
assistant_message.log(metrics=True)
|
|
932
|
+
|
|
933
|
+
# Handle tool calls if present
|
|
934
|
+
if assistant_message.tool_calls is not None:
|
|
935
|
+
# Prepare function calls
|
|
936
|
+
function_calls_to_run: List[FunctionCall] = self.get_function_calls_to_run(
|
|
937
|
+
assistant_message=assistant_message, messages=messages, functions=_functions
|
|
938
|
+
)
|
|
939
|
+
function_call_results: List[Message] = []
|
|
940
|
+
|
|
941
|
+
# Execute function calls
|
|
942
|
+
for function_call_response in self.run_function_calls(
|
|
943
|
+
function_calls=function_calls_to_run,
|
|
944
|
+
function_call_results=function_call_results,
|
|
945
|
+
current_function_call_count=function_call_count,
|
|
946
|
+
function_call_limit=tool_call_limit,
|
|
947
|
+
):
|
|
948
|
+
if self.cache_response and isinstance(function_call_response, ModelResponse):
|
|
949
|
+
streaming_responses.append(function_call_response)
|
|
950
|
+
yield function_call_response
|
|
951
|
+
|
|
952
|
+
# Add a function call for each successful execution
|
|
953
|
+
function_call_count += len(function_call_results)
|
|
954
|
+
|
|
955
|
+
# Format and add results to messages
|
|
956
|
+
if stream_data and stream_data.extra is not None:
|
|
957
|
+
self.format_function_call_results(
|
|
958
|
+
messages=messages, function_call_results=function_call_results, **stream_data.extra
|
|
959
|
+
)
|
|
960
|
+
elif model_response and model_response.extra is not None:
|
|
961
|
+
self.format_function_call_results(
|
|
962
|
+
messages=messages, function_call_results=function_call_results, **model_response.extra
|
|
963
|
+
)
|
|
964
|
+
else:
|
|
965
|
+
self.format_function_call_results(messages=messages, function_call_results=function_call_results)
|
|
966
|
+
|
|
967
|
+
# Handle function call media
|
|
968
|
+
if any(msg.images or msg.videos or msg.audio or msg.files for msg in function_call_results):
|
|
969
|
+
self._handle_function_call_media(
|
|
970
|
+
messages=messages,
|
|
971
|
+
function_call_results=function_call_results,
|
|
972
|
+
send_media_to_model=send_media_to_model,
|
|
973
|
+
)
|
|
974
|
+
|
|
975
|
+
for function_call_result in function_call_results:
|
|
976
|
+
function_call_result.log(metrics=True)
|
|
977
|
+
|
|
978
|
+
# Check if we should stop after tool calls
|
|
979
|
+
if any(m.stop_after_tool_call for m in function_call_results):
|
|
980
|
+
break
|
|
981
|
+
|
|
982
|
+
# If we have any tool calls that require confirmation, break the loop
|
|
983
|
+
if any(fc.function.requires_confirmation for fc in function_calls_to_run):
|
|
984
|
+
break
|
|
985
|
+
|
|
986
|
+
# If we have any tool calls that require external execution, break the loop
|
|
987
|
+
if any(fc.function.external_execution for fc in function_calls_to_run):
|
|
988
|
+
break
|
|
989
|
+
|
|
990
|
+
# If we have any tool calls that require user input, break the loop
|
|
991
|
+
if any(fc.function.requires_user_input for fc in function_calls_to_run):
|
|
992
|
+
break
|
|
993
|
+
|
|
994
|
+
# Continue loop to get next response
|
|
995
|
+
continue
|
|
996
|
+
|
|
997
|
+
# No tool calls or finished processing them
|
|
998
|
+
break
|
|
999
|
+
|
|
1000
|
+
log_debug(f"{self.get_provider()} Response Stream End", center=True, symbol="-")
|
|
1001
|
+
|
|
1002
|
+
# Save streaming responses to cache if enabled
|
|
1003
|
+
if self.cache_response and cache_key and streaming_responses:
|
|
1004
|
+
self._save_streaming_responses_to_cache(cache_key, streaming_responses)
|
|
1005
|
+
|
|
1006
|
+
async def aprocess_response_stream(
|
|
1007
|
+
self,
|
|
1008
|
+
messages: List[Message],
|
|
1009
|
+
assistant_message: Message,
|
|
1010
|
+
stream_data: MessageData,
|
|
1011
|
+
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
1012
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
1013
|
+
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
|
|
1014
|
+
run_response: Optional[Union[RunOutput, TeamRunOutput]] = None,
|
|
1015
|
+
) -> AsyncIterator[ModelResponse]:
|
|
1016
|
+
"""
|
|
1017
|
+
Process a streaming response from the model.
|
|
1018
|
+
"""
|
|
1019
|
+
async for response_delta in self.ainvoke_stream(
|
|
1020
|
+
messages=messages,
|
|
1021
|
+
assistant_message=assistant_message,
|
|
1022
|
+
response_format=response_format,
|
|
1023
|
+
tools=tools,
|
|
1024
|
+
tool_choice=tool_choice or self._tool_choice,
|
|
1025
|
+
run_response=run_response,
|
|
1026
|
+
): # type: ignore
|
|
1027
|
+
for model_response_delta in self._populate_stream_data(
|
|
1028
|
+
stream_data=stream_data,
|
|
1029
|
+
model_response_delta=response_delta,
|
|
1030
|
+
):
|
|
1031
|
+
yield model_response_delta
|
|
1032
|
+
|
|
1033
|
+
# Populate assistant message from stream data after the stream ends
|
|
1034
|
+
self._populate_assistant_message_from_stream_data(assistant_message=assistant_message, stream_data=stream_data)
|
|
1035
|
+
|
|
1036
|
+
async def aresponse_stream(
|
|
1037
|
+
self,
|
|
1038
|
+
messages: List[Message],
|
|
1039
|
+
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
1040
|
+
tools: Optional[List[Union[Function, dict]]] = None,
|
|
1041
|
+
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
|
|
1042
|
+
tool_call_limit: Optional[int] = None,
|
|
1043
|
+
stream_model_response: bool = True,
|
|
1044
|
+
run_response: Optional[Union[RunOutput, TeamRunOutput]] = None,
|
|
1045
|
+
send_media_to_model: bool = True,
|
|
1046
|
+
) -> AsyncIterator[Union[ModelResponse, RunOutputEvent, TeamRunOutputEvent]]:
|
|
1047
|
+
"""
|
|
1048
|
+
Generate an asynchronous streaming response from the model.
|
|
1049
|
+
"""
|
|
1050
|
+
|
|
1051
|
+
# Check cache if enabled - capture key BEFORE streaming to avoid mismatch
|
|
1052
|
+
cache_key = None
|
|
1053
|
+
if self.cache_response:
|
|
1054
|
+
cache_key = self._get_model_cache_key(messages, stream=True, response_format=response_format, tools=tools)
|
|
1055
|
+
cached_data = self._get_cached_model_response(cache_key)
|
|
1056
|
+
|
|
1057
|
+
if cached_data:
|
|
1058
|
+
log_info("Cache hit for async streaming model response")
|
|
1059
|
+
# Yield cached responses
|
|
1060
|
+
for response in self._streaming_responses_from_cache(cached_data["streaming_responses"]):
|
|
1061
|
+
yield response
|
|
1062
|
+
return
|
|
1063
|
+
|
|
1064
|
+
log_info("Cache miss for async streaming model response")
|
|
1065
|
+
|
|
1066
|
+
# Track streaming responses for caching
|
|
1067
|
+
streaming_responses: List[ModelResponse] = []
|
|
1068
|
+
|
|
1069
|
+
log_debug(f"{self.get_provider()} Async Response Stream Start", center=True, symbol="-")
|
|
1070
|
+
log_debug(f"Model: {self.id}", center=True, symbol="-")
|
|
1071
|
+
_log_messages(messages)
|
|
1072
|
+
|
|
1073
|
+
_tool_dicts = self._format_tools(tools) if tools is not None else []
|
|
1074
|
+
_functions = {tool.name: tool for tool in tools if isinstance(tool, Function)} if tools is not None else {}
|
|
1075
|
+
|
|
1076
|
+
function_call_count = 0
|
|
1077
|
+
|
|
1078
|
+
while True:
|
|
1079
|
+
# Create assistant message and stream data
|
|
1080
|
+
assistant_message = Message(role=self.assistant_message_role)
|
|
1081
|
+
stream_data = MessageData()
|
|
1082
|
+
model_response = ModelResponse()
|
|
1083
|
+
if stream_model_response:
|
|
1084
|
+
# Generate response
|
|
1085
|
+
async for model_response in self.aprocess_response_stream(
|
|
1086
|
+
messages=messages,
|
|
1087
|
+
assistant_message=assistant_message,
|
|
1088
|
+
stream_data=stream_data,
|
|
1089
|
+
response_format=response_format,
|
|
1090
|
+
tools=_tool_dicts,
|
|
1091
|
+
tool_choice=tool_choice or self._tool_choice,
|
|
1092
|
+
run_response=run_response,
|
|
1093
|
+
):
|
|
1094
|
+
if self.cache_response and isinstance(model_response, ModelResponse):
|
|
1095
|
+
streaming_responses.append(model_response)
|
|
1096
|
+
yield model_response
|
|
1097
|
+
|
|
1098
|
+
else:
|
|
1099
|
+
await self._aprocess_model_response(
|
|
1100
|
+
messages=messages,
|
|
1101
|
+
assistant_message=assistant_message,
|
|
1102
|
+
model_response=model_response,
|
|
1103
|
+
response_format=response_format,
|
|
1104
|
+
tools=_tool_dicts,
|
|
1105
|
+
tool_choice=tool_choice or self._tool_choice,
|
|
1106
|
+
run_response=run_response,
|
|
1107
|
+
)
|
|
1108
|
+
if self.cache_response:
|
|
1109
|
+
streaming_responses.append(model_response)
|
|
1110
|
+
yield model_response
|
|
1111
|
+
|
|
1112
|
+
# Add assistant message to messages
|
|
1113
|
+
messages.append(assistant_message)
|
|
1114
|
+
assistant_message.log(metrics=True)
|
|
1115
|
+
|
|
1116
|
+
# Handle tool calls if present
|
|
1117
|
+
if assistant_message.tool_calls is not None:
|
|
1118
|
+
# Prepare function calls
|
|
1119
|
+
function_calls_to_run: List[FunctionCall] = self.get_function_calls_to_run(
|
|
1120
|
+
assistant_message=assistant_message, messages=messages, functions=_functions
|
|
1121
|
+
)
|
|
1122
|
+
function_call_results: List[Message] = []
|
|
1123
|
+
|
|
1124
|
+
# Execute function calls
|
|
1125
|
+
async for function_call_response in self.arun_function_calls(
|
|
1126
|
+
function_calls=function_calls_to_run,
|
|
1127
|
+
function_call_results=function_call_results,
|
|
1128
|
+
current_function_call_count=function_call_count,
|
|
1129
|
+
function_call_limit=tool_call_limit,
|
|
1130
|
+
):
|
|
1131
|
+
if self.cache_response and isinstance(function_call_response, ModelResponse):
|
|
1132
|
+
streaming_responses.append(function_call_response)
|
|
1133
|
+
yield function_call_response
|
|
1134
|
+
|
|
1135
|
+
# Add a function call for each successful execution
|
|
1136
|
+
function_call_count += len(function_call_results)
|
|
1137
|
+
|
|
1138
|
+
# Format and add results to messages
|
|
1139
|
+
if stream_data and stream_data.extra is not None:
|
|
1140
|
+
self.format_function_call_results(
|
|
1141
|
+
messages=messages, function_call_results=function_call_results, **stream_data.extra
|
|
1142
|
+
)
|
|
1143
|
+
elif model_response and model_response.extra is not None:
|
|
1144
|
+
self.format_function_call_results(
|
|
1145
|
+
messages=messages, function_call_results=function_call_results, **model_response.extra or {}
|
|
1146
|
+
)
|
|
1147
|
+
else:
|
|
1148
|
+
self.format_function_call_results(messages=messages, function_call_results=function_call_results)
|
|
1149
|
+
|
|
1150
|
+
# Handle function call media
|
|
1151
|
+
if any(msg.images or msg.videos or msg.audio or msg.files for msg in function_call_results):
|
|
1152
|
+
self._handle_function_call_media(
|
|
1153
|
+
messages=messages,
|
|
1154
|
+
function_call_results=function_call_results,
|
|
1155
|
+
send_media_to_model=send_media_to_model,
|
|
1156
|
+
)
|
|
1157
|
+
|
|
1158
|
+
for function_call_result in function_call_results:
|
|
1159
|
+
function_call_result.log(metrics=True)
|
|
1160
|
+
|
|
1161
|
+
# Check if we should stop after tool calls
|
|
1162
|
+
if any(m.stop_after_tool_call for m in function_call_results):
|
|
1163
|
+
break
|
|
1164
|
+
|
|
1165
|
+
# If we have any tool calls that require confirmation, break the loop
|
|
1166
|
+
if any(fc.function.requires_confirmation for fc in function_calls_to_run):
|
|
1167
|
+
break
|
|
1168
|
+
|
|
1169
|
+
# If we have any tool calls that require external execution, break the loop
|
|
1170
|
+
if any(fc.function.external_execution for fc in function_calls_to_run):
|
|
1171
|
+
break
|
|
1172
|
+
|
|
1173
|
+
# If we have any tool calls that require user input, break the loop
|
|
1174
|
+
if any(fc.function.requires_user_input for fc in function_calls_to_run):
|
|
1175
|
+
break
|
|
1176
|
+
|
|
1177
|
+
# Continue loop to get next response
|
|
1178
|
+
continue
|
|
1179
|
+
|
|
1180
|
+
# No tool calls or finished processing them
|
|
1181
|
+
break
|
|
1182
|
+
|
|
1183
|
+
log_debug(f"{self.get_provider()} Async Response Stream End", center=True, symbol="-")
|
|
1184
|
+
|
|
1185
|
+
# Save streaming responses to cache if enabled
|
|
1186
|
+
if self.cache_response and cache_key and streaming_responses:
|
|
1187
|
+
self._save_streaming_responses_to_cache(cache_key, streaming_responses)
|
|
1188
|
+
|
|
1189
|
+
def _populate_assistant_message_from_stream_data(
|
|
1190
|
+
self, assistant_message: Message, stream_data: MessageData
|
|
1191
|
+
) -> None:
|
|
1192
|
+
"""
|
|
1193
|
+
Populate an assistant message with the stream data.
|
|
1194
|
+
"""
|
|
1195
|
+
if stream_data.response_role is not None:
|
|
1196
|
+
assistant_message.role = stream_data.response_role
|
|
1197
|
+
if stream_data.response_metrics is not None:
|
|
1198
|
+
assistant_message.metrics = stream_data.response_metrics
|
|
1199
|
+
if stream_data.response_content:
|
|
1200
|
+
assistant_message.content = stream_data.response_content
|
|
1201
|
+
if stream_data.response_reasoning_content:
|
|
1202
|
+
assistant_message.reasoning_content = stream_data.response_reasoning_content
|
|
1203
|
+
if stream_data.response_redacted_reasoning_content:
|
|
1204
|
+
assistant_message.redacted_reasoning_content = stream_data.response_redacted_reasoning_content
|
|
1205
|
+
if stream_data.response_provider_data:
|
|
1206
|
+
assistant_message.provider_data = stream_data.response_provider_data
|
|
1207
|
+
if stream_data.response_citations:
|
|
1208
|
+
assistant_message.citations = stream_data.response_citations
|
|
1209
|
+
if stream_data.response_audio:
|
|
1210
|
+
assistant_message.audio_output = stream_data.response_audio
|
|
1211
|
+
if stream_data.response_image:
|
|
1212
|
+
assistant_message.image_output = stream_data.response_image
|
|
1213
|
+
if stream_data.response_video:
|
|
1214
|
+
assistant_message.video_output = stream_data.response_video
|
|
1215
|
+
if stream_data.response_file:
|
|
1216
|
+
assistant_message.file_output = stream_data.response_file
|
|
1217
|
+
if stream_data.response_tool_calls and len(stream_data.response_tool_calls) > 0:
|
|
1218
|
+
assistant_message.tool_calls = self.parse_tool_calls(stream_data.response_tool_calls)
|
|
1219
|
+
|
|
1220
|
+
def _populate_stream_data(
|
|
1221
|
+
self, stream_data: MessageData, model_response_delta: ModelResponse
|
|
1222
|
+
) -> Iterator[ModelResponse]:
|
|
1223
|
+
"""Update the stream data and assistant message with the model response."""
|
|
1224
|
+
|
|
1225
|
+
should_yield = False
|
|
1226
|
+
if model_response_delta.role is not None:
|
|
1227
|
+
stream_data.response_role = model_response_delta.role # type: ignore
|
|
1228
|
+
|
|
1229
|
+
if model_response_delta.response_usage is not None:
|
|
1230
|
+
if stream_data.response_metrics is None:
|
|
1231
|
+
stream_data.response_metrics = Metrics()
|
|
1232
|
+
stream_data.response_metrics += model_response_delta.response_usage
|
|
1233
|
+
|
|
1234
|
+
# Update stream_data content
|
|
1235
|
+
if model_response_delta.content is not None:
|
|
1236
|
+
stream_data.response_content += model_response_delta.content
|
|
1237
|
+
should_yield = True
|
|
1238
|
+
|
|
1239
|
+
if model_response_delta.reasoning_content is not None:
|
|
1240
|
+
stream_data.response_reasoning_content += model_response_delta.reasoning_content
|
|
1241
|
+
should_yield = True
|
|
1242
|
+
|
|
1243
|
+
if model_response_delta.redacted_reasoning_content is not None:
|
|
1244
|
+
stream_data.response_redacted_reasoning_content += model_response_delta.redacted_reasoning_content
|
|
1245
|
+
should_yield = True
|
|
1246
|
+
|
|
1247
|
+
if model_response_delta.citations is not None:
|
|
1248
|
+
stream_data.response_citations = model_response_delta.citations
|
|
1249
|
+
should_yield = True
|
|
1250
|
+
|
|
1251
|
+
if model_response_delta.provider_data:
|
|
1252
|
+
if stream_data.response_provider_data is None:
|
|
1253
|
+
stream_data.response_provider_data = {}
|
|
1254
|
+
stream_data.response_provider_data.update(model_response_delta.provider_data)
|
|
1255
|
+
|
|
1256
|
+
# Update stream_data tool calls
|
|
1257
|
+
if model_response_delta.tool_calls is not None:
|
|
1258
|
+
if stream_data.response_tool_calls is None:
|
|
1259
|
+
stream_data.response_tool_calls = []
|
|
1260
|
+
stream_data.response_tool_calls.extend(model_response_delta.tool_calls)
|
|
1261
|
+
should_yield = True
|
|
1262
|
+
|
|
1263
|
+
if model_response_delta.audio is not None and isinstance(model_response_delta.audio, Audio):
|
|
1264
|
+
if stream_data.response_audio is None:
|
|
1265
|
+
stream_data.response_audio = Audio(id=str(uuid4()), content="", transcript="")
|
|
1266
|
+
|
|
1267
|
+
from typing import cast
|
|
1268
|
+
|
|
1269
|
+
audio_response = cast(Audio, model_response_delta.audio)
|
|
1270
|
+
|
|
1271
|
+
# Update the stream data with audio information
|
|
1272
|
+
if audio_response.id is not None:
|
|
1273
|
+
stream_data.response_audio.id = audio_response.id # type: ignore
|
|
1274
|
+
if audio_response.content is not None:
|
|
1275
|
+
stream_data.response_audio.content += audio_response.content # type: ignore
|
|
1276
|
+
if audio_response.transcript is not None:
|
|
1277
|
+
stream_data.response_audio.transcript += audio_response.transcript # type: ignore
|
|
1278
|
+
if audio_response.expires_at is not None:
|
|
1279
|
+
stream_data.response_audio.expires_at = audio_response.expires_at
|
|
1280
|
+
if audio_response.mime_type is not None:
|
|
1281
|
+
stream_data.response_audio.mime_type = audio_response.mime_type
|
|
1282
|
+
stream_data.response_audio.sample_rate = audio_response.sample_rate
|
|
1283
|
+
stream_data.response_audio.channels = audio_response.channels
|
|
1284
|
+
|
|
1285
|
+
should_yield = True
|
|
1286
|
+
|
|
1287
|
+
if model_response_delta.images:
|
|
1288
|
+
if stream_data.response_image is None:
|
|
1289
|
+
stream_data.response_image = model_response_delta.images[-1]
|
|
1290
|
+
should_yield = True
|
|
1291
|
+
|
|
1292
|
+
if model_response_delta.videos:
|
|
1293
|
+
if stream_data.response_video is None:
|
|
1294
|
+
stream_data.response_video = model_response_delta.videos[-1]
|
|
1295
|
+
should_yield = True
|
|
1296
|
+
|
|
1297
|
+
if model_response_delta.extra is not None:
|
|
1298
|
+
if stream_data.extra is None:
|
|
1299
|
+
stream_data.extra = {}
|
|
1300
|
+
for key in model_response_delta.extra:
|
|
1301
|
+
if isinstance(model_response_delta.extra[key], list):
|
|
1302
|
+
if not stream_data.extra.get(key):
|
|
1303
|
+
stream_data.extra[key] = []
|
|
1304
|
+
stream_data.extra[key].extend(model_response_delta.extra[key])
|
|
1305
|
+
else:
|
|
1306
|
+
stream_data.extra[key] = model_response_delta.extra[key]
|
|
1307
|
+
|
|
1308
|
+
if should_yield:
|
|
1309
|
+
yield model_response_delta
|
|
1310
|
+
|
|
1311
|
+
def parse_tool_calls(self, tool_calls_data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
1312
|
+
"""
|
|
1313
|
+
Parse the tool calls from the model provider into a list of tool calls.
|
|
1314
|
+
"""
|
|
1315
|
+
return tool_calls_data
|
|
1316
|
+
|
|
1317
|
+
def get_function_call_to_run_from_tool_execution(
|
|
1318
|
+
self,
|
|
1319
|
+
tool_execution: ToolExecution,
|
|
1320
|
+
functions: Optional[Dict[str, Function]] = None,
|
|
1321
|
+
) -> FunctionCall:
|
|
1322
|
+
function_call = get_function_call_for_tool_execution(
|
|
1323
|
+
tool_execution=tool_execution,
|
|
1324
|
+
functions=functions,
|
|
1325
|
+
)
|
|
1326
|
+
if function_call is None:
|
|
1327
|
+
raise ValueError("Function call not found")
|
|
1328
|
+
return function_call
|
|
1329
|
+
|
|
1330
|
+
def get_function_calls_to_run(
|
|
1331
|
+
self,
|
|
1332
|
+
assistant_message: Message,
|
|
1333
|
+
messages: List[Message],
|
|
1334
|
+
functions: Optional[Dict[str, Function]] = None,
|
|
1335
|
+
) -> List[FunctionCall]:
|
|
1336
|
+
"""
|
|
1337
|
+
Prepare function calls for the assistant message.
|
|
1338
|
+
"""
|
|
1339
|
+
function_calls_to_run: List[FunctionCall] = []
|
|
1340
|
+
if assistant_message.tool_calls is not None:
|
|
1341
|
+
for tool_call in assistant_message.tool_calls:
|
|
1342
|
+
_tool_call_id = tool_call.get("id")
|
|
1343
|
+
_function_call = get_function_call_for_tool_call(tool_call, functions)
|
|
1344
|
+
if _function_call is None:
|
|
1345
|
+
messages.append(
|
|
1346
|
+
Message(
|
|
1347
|
+
role=self.tool_message_role,
|
|
1348
|
+
tool_call_id=_tool_call_id,
|
|
1349
|
+
content="Error: The requested tool does not exist or is not available.",
|
|
1350
|
+
)
|
|
1351
|
+
)
|
|
1352
|
+
continue
|
|
1353
|
+
if _function_call.error is not None:
|
|
1354
|
+
messages.append(
|
|
1355
|
+
Message(role=self.tool_message_role, tool_call_id=_tool_call_id, content=_function_call.error)
|
|
1356
|
+
)
|
|
1357
|
+
continue
|
|
1358
|
+
function_calls_to_run.append(_function_call)
|
|
1359
|
+
return function_calls_to_run
|
|
1360
|
+
|
|
1361
|
+
def create_function_call_result(
|
|
1362
|
+
self,
|
|
1363
|
+
function_call: FunctionCall,
|
|
1364
|
+
success: bool,
|
|
1365
|
+
output: Optional[Union[List[Any], str]] = None,
|
|
1366
|
+
timer: Optional[Timer] = None,
|
|
1367
|
+
function_execution_result: Optional[FunctionExecutionResult] = None,
|
|
1368
|
+
) -> Message:
|
|
1369
|
+
"""Create a function call result message."""
|
|
1370
|
+
kwargs = {}
|
|
1371
|
+
if timer is not None:
|
|
1372
|
+
kwargs["metrics"] = Metrics(duration=timer.elapsed)
|
|
1373
|
+
|
|
1374
|
+
# Include media artifacts from function execution result in the tool message
|
|
1375
|
+
images = None
|
|
1376
|
+
videos = None
|
|
1377
|
+
audios = None
|
|
1378
|
+
files = None
|
|
1379
|
+
|
|
1380
|
+
if success and function_execution_result:
|
|
1381
|
+
# With unified classes, no conversion needed - use directly
|
|
1382
|
+
images = function_execution_result.images
|
|
1383
|
+
videos = function_execution_result.videos
|
|
1384
|
+
audios = function_execution_result.audios
|
|
1385
|
+
files = function_execution_result.files
|
|
1386
|
+
|
|
1387
|
+
return Message(
|
|
1388
|
+
role=self.tool_message_role,
|
|
1389
|
+
content=output if success else function_call.error,
|
|
1390
|
+
tool_call_id=function_call.call_id,
|
|
1391
|
+
tool_name=function_call.function.name,
|
|
1392
|
+
tool_args=function_call.arguments,
|
|
1393
|
+
tool_call_error=not success,
|
|
1394
|
+
stop_after_tool_call=function_call.function.stop_after_tool_call,
|
|
1395
|
+
images=images,
|
|
1396
|
+
videos=videos,
|
|
1397
|
+
audio=audios,
|
|
1398
|
+
files=files,
|
|
1399
|
+
**kwargs, # type: ignore
|
|
1400
|
+
)
|
|
1401
|
+
|
|
1402
|
+
def create_tool_call_limit_error_result(self, function_call: FunctionCall) -> Message:
|
|
1403
|
+
return Message(
|
|
1404
|
+
role=self.tool_message_role,
|
|
1405
|
+
content=f"Tool call limit reached. Tool call {function_call.function.name} not executed. Don't try to execute it again.",
|
|
1406
|
+
tool_call_id=function_call.call_id,
|
|
1407
|
+
tool_name=function_call.function.name,
|
|
1408
|
+
tool_args=function_call.arguments,
|
|
1409
|
+
tool_call_error=True,
|
|
1410
|
+
)
|
|
1411
|
+
|
|
1412
|
+
def run_function_call(
|
|
1413
|
+
self,
|
|
1414
|
+
function_call: FunctionCall,
|
|
1415
|
+
function_call_results: List[Message],
|
|
1416
|
+
additional_input: Optional[List[Message]] = None,
|
|
1417
|
+
) -> Iterator[Union[ModelResponse, RunOutputEvent, TeamRunOutputEvent]]:
|
|
1418
|
+
# Start function call
|
|
1419
|
+
function_call_timer = Timer()
|
|
1420
|
+
function_call_timer.start()
|
|
1421
|
+
# Yield a tool_call_started event
|
|
1422
|
+
yield ModelResponse(
|
|
1423
|
+
content=function_call.get_call_str(),
|
|
1424
|
+
tool_executions=[
|
|
1425
|
+
ToolExecution(
|
|
1426
|
+
tool_call_id=function_call.call_id,
|
|
1427
|
+
tool_name=function_call.function.name,
|
|
1428
|
+
tool_args=function_call.arguments,
|
|
1429
|
+
)
|
|
1430
|
+
],
|
|
1431
|
+
event=ModelResponseEvent.tool_call_started.value,
|
|
1432
|
+
)
|
|
1433
|
+
|
|
1434
|
+
# Run function calls sequentially
|
|
1435
|
+
function_execution_result: FunctionExecutionResult = FunctionExecutionResult(status="failure")
|
|
1436
|
+
try:
|
|
1437
|
+
function_execution_result = function_call.execute()
|
|
1438
|
+
except AgentRunException as a_exc:
|
|
1439
|
+
# Update additional messages from function call
|
|
1440
|
+
_handle_agent_exception(a_exc, additional_input)
|
|
1441
|
+
# Set function call success to False if an exception occurred
|
|
1442
|
+
except Exception as e:
|
|
1443
|
+
log_error(f"Error executing function {function_call.function.name}: {e}")
|
|
1444
|
+
raise e
|
|
1445
|
+
|
|
1446
|
+
function_call_success = function_execution_result.status == "success"
|
|
1447
|
+
|
|
1448
|
+
# Stop function call timer
|
|
1449
|
+
function_call_timer.stop()
|
|
1450
|
+
|
|
1451
|
+
# Process function call output
|
|
1452
|
+
function_call_output: str = ""
|
|
1453
|
+
|
|
1454
|
+
if isinstance(function_execution_result.result, (GeneratorType, collections.abc.Iterator)):
|
|
1455
|
+
for item in function_execution_result.result:
|
|
1456
|
+
# This function yields agent/team/workflow run events
|
|
1457
|
+
if (
|
|
1458
|
+
isinstance(item, tuple(get_args(RunOutputEvent)))
|
|
1459
|
+
or isinstance(item, tuple(get_args(TeamRunOutputEvent)))
|
|
1460
|
+
or isinstance(item, tuple(get_args(WorkflowRunOutputEvent)))
|
|
1461
|
+
):
|
|
1462
|
+
# We only capture content events for output accumulation
|
|
1463
|
+
if isinstance(item, RunContentEvent) or isinstance(item, TeamRunContentEvent):
|
|
1464
|
+
if item.content is not None and isinstance(item.content, BaseModel):
|
|
1465
|
+
function_call_output += item.content.model_dump_json()
|
|
1466
|
+
else:
|
|
1467
|
+
# Capture output
|
|
1468
|
+
function_call_output += item.content or ""
|
|
1469
|
+
|
|
1470
|
+
if function_call.function.show_result and item.content is not None:
|
|
1471
|
+
yield ModelResponse(content=item.content)
|
|
1472
|
+
|
|
1473
|
+
if isinstance(item, CustomEvent):
|
|
1474
|
+
function_call_output += str(item)
|
|
1475
|
+
|
|
1476
|
+
# For WorkflowCompletedEvent, extract content for final output
|
|
1477
|
+
from agno.run.workflow import WorkflowCompletedEvent
|
|
1478
|
+
|
|
1479
|
+
if isinstance(item, WorkflowCompletedEvent):
|
|
1480
|
+
if item.content is not None:
|
|
1481
|
+
if isinstance(item.content, BaseModel):
|
|
1482
|
+
function_call_output += item.content.model_dump_json()
|
|
1483
|
+
else:
|
|
1484
|
+
function_call_output += str(item.content)
|
|
1485
|
+
|
|
1486
|
+
# Yield the event itself to bubble it up
|
|
1487
|
+
yield item
|
|
1488
|
+
|
|
1489
|
+
else:
|
|
1490
|
+
function_call_output += str(item)
|
|
1491
|
+
if function_call.function.show_result and item is not None:
|
|
1492
|
+
yield ModelResponse(content=str(item))
|
|
1493
|
+
else:
|
|
1494
|
+
from agno.tools.function import ToolResult
|
|
1495
|
+
|
|
1496
|
+
if isinstance(function_execution_result.result, ToolResult):
|
|
1497
|
+
# Extract content and media from ToolResult
|
|
1498
|
+
tool_result = function_execution_result.result
|
|
1499
|
+
function_call_output = tool_result.content
|
|
1500
|
+
|
|
1501
|
+
# Transfer media from ToolResult to FunctionExecutionResult
|
|
1502
|
+
if tool_result.images:
|
|
1503
|
+
function_execution_result.images = tool_result.images
|
|
1504
|
+
if tool_result.videos:
|
|
1505
|
+
function_execution_result.videos = tool_result.videos
|
|
1506
|
+
if tool_result.audios:
|
|
1507
|
+
function_execution_result.audios = tool_result.audios
|
|
1508
|
+
if tool_result.files:
|
|
1509
|
+
function_execution_result.files = tool_result.files
|
|
1510
|
+
else:
|
|
1511
|
+
function_call_output = str(function_execution_result.result) if function_execution_result.result else ""
|
|
1512
|
+
|
|
1513
|
+
if function_call.function.show_result and function_call_output is not None:
|
|
1514
|
+
yield ModelResponse(content=function_call_output)
|
|
1515
|
+
|
|
1516
|
+
# Create and yield function call result
|
|
1517
|
+
function_call_result = self.create_function_call_result(
|
|
1518
|
+
function_call,
|
|
1519
|
+
success=function_call_success,
|
|
1520
|
+
output=function_call_output,
|
|
1521
|
+
timer=function_call_timer,
|
|
1522
|
+
function_execution_result=function_execution_result,
|
|
1523
|
+
)
|
|
1524
|
+
yield ModelResponse(
|
|
1525
|
+
content=f"{function_call.get_call_str()} completed in {function_call_timer.elapsed:.4f}s. ",
|
|
1526
|
+
tool_executions=[
|
|
1527
|
+
ToolExecution(
|
|
1528
|
+
tool_call_id=function_call_result.tool_call_id,
|
|
1529
|
+
tool_name=function_call_result.tool_name,
|
|
1530
|
+
tool_args=function_call_result.tool_args,
|
|
1531
|
+
tool_call_error=function_call_result.tool_call_error,
|
|
1532
|
+
result=str(function_call_result.content),
|
|
1533
|
+
stop_after_tool_call=function_call_result.stop_after_tool_call,
|
|
1534
|
+
metrics=function_call_result.metrics,
|
|
1535
|
+
)
|
|
1536
|
+
],
|
|
1537
|
+
event=ModelResponseEvent.tool_call_completed.value,
|
|
1538
|
+
updated_session_state=function_execution_result.updated_session_state,
|
|
1539
|
+
# Add media artifacts from function execution
|
|
1540
|
+
images=function_execution_result.images,
|
|
1541
|
+
videos=function_execution_result.videos,
|
|
1542
|
+
audios=function_execution_result.audios,
|
|
1543
|
+
files=function_execution_result.files,
|
|
1544
|
+
)
|
|
1545
|
+
|
|
1546
|
+
# Add function call to function call results
|
|
1547
|
+
function_call_results.append(function_call_result)
|
|
1548
|
+
|
|
1549
|
+
def run_function_calls(
|
|
1550
|
+
self,
|
|
1551
|
+
function_calls: List[FunctionCall],
|
|
1552
|
+
function_call_results: List[Message],
|
|
1553
|
+
additional_input: Optional[List[Message]] = None,
|
|
1554
|
+
current_function_call_count: int = 0,
|
|
1555
|
+
function_call_limit: Optional[int] = None,
|
|
1556
|
+
) -> Iterator[Union[ModelResponse, RunOutputEvent, TeamRunOutputEvent]]:
|
|
1557
|
+
# Additional messages from function calls that will be added to the function call results
|
|
1558
|
+
if additional_input is None:
|
|
1559
|
+
additional_input = []
|
|
1560
|
+
|
|
1561
|
+
for fc in function_calls:
|
|
1562
|
+
if function_call_limit is not None:
|
|
1563
|
+
current_function_call_count += 1
|
|
1564
|
+
# We have reached the function call limit, so we add an error result to the function call results
|
|
1565
|
+
if current_function_call_count > function_call_limit:
|
|
1566
|
+
function_call_results.append(self.create_tool_call_limit_error_result(fc))
|
|
1567
|
+
continue
|
|
1568
|
+
|
|
1569
|
+
paused_tool_executions = []
|
|
1570
|
+
|
|
1571
|
+
# The function cannot be executed without user confirmation
|
|
1572
|
+
if fc.function.requires_confirmation:
|
|
1573
|
+
paused_tool_executions.append(
|
|
1574
|
+
ToolExecution(
|
|
1575
|
+
tool_call_id=fc.call_id,
|
|
1576
|
+
tool_name=fc.function.name,
|
|
1577
|
+
tool_args=fc.arguments,
|
|
1578
|
+
requires_confirmation=True,
|
|
1579
|
+
)
|
|
1580
|
+
)
|
|
1581
|
+
# If the function requires user input, we yield a message to the user
|
|
1582
|
+
if fc.function.requires_user_input:
|
|
1583
|
+
user_input_schema = fc.function.user_input_schema
|
|
1584
|
+
if fc.arguments and user_input_schema:
|
|
1585
|
+
for name, value in fc.arguments.items():
|
|
1586
|
+
for user_input_field in user_input_schema:
|
|
1587
|
+
if user_input_field.name == name:
|
|
1588
|
+
user_input_field.value = value
|
|
1589
|
+
|
|
1590
|
+
paused_tool_executions.append(
|
|
1591
|
+
ToolExecution(
|
|
1592
|
+
tool_call_id=fc.call_id,
|
|
1593
|
+
tool_name=fc.function.name,
|
|
1594
|
+
tool_args=fc.arguments,
|
|
1595
|
+
requires_user_input=True,
|
|
1596
|
+
user_input_schema=user_input_schema,
|
|
1597
|
+
)
|
|
1598
|
+
)
|
|
1599
|
+
# If the function is from the user control flow tools, we handle it here
|
|
1600
|
+
if fc.function.name == "get_user_input" and fc.arguments and fc.arguments.get("user_input_fields"):
|
|
1601
|
+
user_input_schema = []
|
|
1602
|
+
for input_field in fc.arguments.get("user_input_fields", []):
|
|
1603
|
+
field_type = input_field.get("field_type")
|
|
1604
|
+
try:
|
|
1605
|
+
python_type = eval(field_type) if isinstance(field_type, str) else field_type
|
|
1606
|
+
except (NameError, SyntaxError):
|
|
1607
|
+
python_type = str # Default to str if type is invalid
|
|
1608
|
+
user_input_schema.append(
|
|
1609
|
+
UserInputField(
|
|
1610
|
+
name=input_field.get("field_name"),
|
|
1611
|
+
field_type=python_type,
|
|
1612
|
+
description=input_field.get("field_description"),
|
|
1613
|
+
)
|
|
1614
|
+
)
|
|
1615
|
+
|
|
1616
|
+
paused_tool_executions.append(
|
|
1617
|
+
ToolExecution(
|
|
1618
|
+
tool_call_id=fc.call_id,
|
|
1619
|
+
tool_name=fc.function.name,
|
|
1620
|
+
tool_args=fc.arguments,
|
|
1621
|
+
requires_user_input=True,
|
|
1622
|
+
user_input_schema=user_input_schema,
|
|
1623
|
+
)
|
|
1624
|
+
)
|
|
1625
|
+
# If the function requires external execution, we yield a message to the user
|
|
1626
|
+
if fc.function.external_execution:
|
|
1627
|
+
paused_tool_executions.append(
|
|
1628
|
+
ToolExecution(
|
|
1629
|
+
tool_call_id=fc.call_id,
|
|
1630
|
+
tool_name=fc.function.name,
|
|
1631
|
+
tool_args=fc.arguments,
|
|
1632
|
+
external_execution_required=True,
|
|
1633
|
+
)
|
|
1634
|
+
)
|
|
1635
|
+
|
|
1636
|
+
if paused_tool_executions:
|
|
1637
|
+
yield ModelResponse(
|
|
1638
|
+
tool_executions=paused_tool_executions,
|
|
1639
|
+
event=ModelResponseEvent.tool_call_paused.value,
|
|
1640
|
+
)
|
|
1641
|
+
# We don't execute the function calls here
|
|
1642
|
+
continue
|
|
1643
|
+
|
|
1644
|
+
yield from self.run_function_call(
|
|
1645
|
+
function_call=fc, function_call_results=function_call_results, additional_input=additional_input
|
|
1646
|
+
)
|
|
1647
|
+
|
|
1648
|
+
# Add any additional messages at the end
|
|
1649
|
+
if additional_input:
|
|
1650
|
+
function_call_results.extend(additional_input)
|
|
1651
|
+
|
|
1652
|
+
async def arun_function_call(
|
|
1653
|
+
self,
|
|
1654
|
+
function_call: FunctionCall,
|
|
1655
|
+
) -> Tuple[Union[bool, AgentRunException], Timer, FunctionCall, FunctionExecutionResult]:
|
|
1656
|
+
"""Run a single function call and return its success status, timer, and the FunctionCall object."""
|
|
1657
|
+
from inspect import isasyncgenfunction, iscoroutine, iscoroutinefunction
|
|
1658
|
+
|
|
1659
|
+
function_call_timer = Timer()
|
|
1660
|
+
function_call_timer.start()
|
|
1661
|
+
success: Union[bool, AgentRunException] = False
|
|
1662
|
+
result: FunctionExecutionResult = FunctionExecutionResult(status="failure")
|
|
1663
|
+
|
|
1664
|
+
try:
|
|
1665
|
+
if (
|
|
1666
|
+
iscoroutinefunction(function_call.function.entrypoint)
|
|
1667
|
+
or isasyncgenfunction(function_call.function.entrypoint)
|
|
1668
|
+
or iscoroutine(function_call.function.entrypoint)
|
|
1669
|
+
):
|
|
1670
|
+
result = await function_call.aexecute()
|
|
1671
|
+
success = result.status == "success"
|
|
1672
|
+
|
|
1673
|
+
# If any of the hooks are async, we need to run the function call asynchronously
|
|
1674
|
+
elif function_call.function.tool_hooks is not None and any(
|
|
1675
|
+
iscoroutinefunction(f) for f in function_call.function.tool_hooks
|
|
1676
|
+
):
|
|
1677
|
+
result = await function_call.aexecute()
|
|
1678
|
+
success = result.status == "success"
|
|
1679
|
+
else:
|
|
1680
|
+
result = await asyncio.to_thread(function_call.execute)
|
|
1681
|
+
success = result.status == "success"
|
|
1682
|
+
except AgentRunException as e:
|
|
1683
|
+
success = e
|
|
1684
|
+
except Exception as e:
|
|
1685
|
+
log_error(f"Error executing function {function_call.function.name}: {e}")
|
|
1686
|
+
success = False
|
|
1687
|
+
raise e
|
|
1688
|
+
|
|
1689
|
+
function_call_timer.stop()
|
|
1690
|
+
return success, function_call_timer, function_call, result
|
|
1691
|
+
|
|
1692
|
+
async def arun_function_calls(
|
|
1693
|
+
self,
|
|
1694
|
+
function_calls: List[FunctionCall],
|
|
1695
|
+
function_call_results: List[Message],
|
|
1696
|
+
additional_input: Optional[List[Message]] = None,
|
|
1697
|
+
current_function_call_count: int = 0,
|
|
1698
|
+
function_call_limit: Optional[int] = None,
|
|
1699
|
+
skip_pause_check: bool = False,
|
|
1700
|
+
) -> AsyncIterator[Union[ModelResponse, RunOutputEvent, TeamRunOutputEvent]]:
|
|
1701
|
+
# Additional messages from function calls that will be added to the function call results
|
|
1702
|
+
if additional_input is None:
|
|
1703
|
+
additional_input = []
|
|
1704
|
+
|
|
1705
|
+
function_calls_to_run = []
|
|
1706
|
+
for fc in function_calls:
|
|
1707
|
+
if function_call_limit is not None:
|
|
1708
|
+
current_function_call_count += 1
|
|
1709
|
+
# We have reached the function call limit, so we add an error result to the function call results
|
|
1710
|
+
if current_function_call_count > function_call_limit:
|
|
1711
|
+
function_call_results.append(self.create_tool_call_limit_error_result(fc))
|
|
1712
|
+
# Skip this function call
|
|
1713
|
+
continue
|
|
1714
|
+
function_calls_to_run.append(fc)
|
|
1715
|
+
|
|
1716
|
+
# Yield tool_call_started events for all function calls or pause them
|
|
1717
|
+
for fc in function_calls_to_run:
|
|
1718
|
+
paused_tool_executions = []
|
|
1719
|
+
# The function cannot be executed without user confirmation
|
|
1720
|
+
if fc.function.requires_confirmation and not skip_pause_check:
|
|
1721
|
+
paused_tool_executions.append(
|
|
1722
|
+
ToolExecution(
|
|
1723
|
+
tool_call_id=fc.call_id,
|
|
1724
|
+
tool_name=fc.function.name,
|
|
1725
|
+
tool_args=fc.arguments,
|
|
1726
|
+
requires_confirmation=True,
|
|
1727
|
+
)
|
|
1728
|
+
)
|
|
1729
|
+
# If the function requires user input, we yield a message to the user
|
|
1730
|
+
if fc.function.requires_user_input and not skip_pause_check:
|
|
1731
|
+
user_input_schema = fc.function.user_input_schema
|
|
1732
|
+
if fc.arguments and user_input_schema:
|
|
1733
|
+
for name, value in fc.arguments.items():
|
|
1734
|
+
for user_input_field in user_input_schema:
|
|
1735
|
+
if user_input_field.name == name:
|
|
1736
|
+
user_input_field.value = value
|
|
1737
|
+
|
|
1738
|
+
paused_tool_executions.append(
|
|
1739
|
+
ToolExecution(
|
|
1740
|
+
tool_call_id=fc.call_id,
|
|
1741
|
+
tool_name=fc.function.name,
|
|
1742
|
+
tool_args=fc.arguments,
|
|
1743
|
+
requires_user_input=True,
|
|
1744
|
+
user_input_schema=user_input_schema,
|
|
1745
|
+
)
|
|
1746
|
+
)
|
|
1747
|
+
# If the function is from the user control flow tools, we handle it here
|
|
1748
|
+
if (
|
|
1749
|
+
fc.function.name == "get_user_input"
|
|
1750
|
+
and fc.arguments
|
|
1751
|
+
and fc.arguments.get("user_input_fields")
|
|
1752
|
+
and not skip_pause_check
|
|
1753
|
+
):
|
|
1754
|
+
fc.function.requires_user_input = True
|
|
1755
|
+
user_input_schema = []
|
|
1756
|
+
for input_field in fc.arguments.get("user_input_fields", []):
|
|
1757
|
+
field_type = input_field.get("field_type")
|
|
1758
|
+
try:
|
|
1759
|
+
python_type = eval(field_type) if isinstance(field_type, str) else field_type
|
|
1760
|
+
except (NameError, SyntaxError):
|
|
1761
|
+
python_type = str # Default to str if type is invalid
|
|
1762
|
+
user_input_schema.append(
|
|
1763
|
+
UserInputField(
|
|
1764
|
+
name=input_field.get("field_name"),
|
|
1765
|
+
field_type=python_type,
|
|
1766
|
+
description=input_field.get("field_description"),
|
|
1767
|
+
)
|
|
1768
|
+
)
|
|
1769
|
+
|
|
1770
|
+
paused_tool_executions.append(
|
|
1771
|
+
ToolExecution(
|
|
1772
|
+
tool_call_id=fc.call_id,
|
|
1773
|
+
tool_name=fc.function.name,
|
|
1774
|
+
tool_args=fc.arguments,
|
|
1775
|
+
requires_user_input=True,
|
|
1776
|
+
user_input_schema=user_input_schema,
|
|
1777
|
+
)
|
|
1778
|
+
)
|
|
1779
|
+
# If the function requires external execution, we yield a message to the user
|
|
1780
|
+
if fc.function.external_execution and not skip_pause_check:
|
|
1781
|
+
paused_tool_executions.append(
|
|
1782
|
+
ToolExecution(
|
|
1783
|
+
tool_call_id=fc.call_id,
|
|
1784
|
+
tool_name=fc.function.name,
|
|
1785
|
+
tool_args=fc.arguments,
|
|
1786
|
+
external_execution_required=True,
|
|
1787
|
+
)
|
|
1788
|
+
)
|
|
1789
|
+
|
|
1790
|
+
if paused_tool_executions:
|
|
1791
|
+
yield ModelResponse(
|
|
1792
|
+
tool_executions=paused_tool_executions,
|
|
1793
|
+
event=ModelResponseEvent.tool_call_paused.value,
|
|
1794
|
+
)
|
|
1795
|
+
# We don't execute the function calls here
|
|
1796
|
+
continue
|
|
1797
|
+
|
|
1798
|
+
yield ModelResponse(
|
|
1799
|
+
content=fc.get_call_str(),
|
|
1800
|
+
tool_executions=[
|
|
1801
|
+
ToolExecution(
|
|
1802
|
+
tool_call_id=fc.call_id,
|
|
1803
|
+
tool_name=fc.function.name,
|
|
1804
|
+
tool_args=fc.arguments,
|
|
1805
|
+
)
|
|
1806
|
+
],
|
|
1807
|
+
event=ModelResponseEvent.tool_call_started.value,
|
|
1808
|
+
)
|
|
1809
|
+
|
|
1810
|
+
# Create and run all function calls in parallel (skip ones that need confirmation)
|
|
1811
|
+
if skip_pause_check:
|
|
1812
|
+
function_calls_to_run = function_calls_to_run
|
|
1813
|
+
else:
|
|
1814
|
+
function_calls_to_run = [
|
|
1815
|
+
fc
|
|
1816
|
+
for fc in function_calls_to_run
|
|
1817
|
+
if not (
|
|
1818
|
+
fc.function.requires_confirmation
|
|
1819
|
+
or fc.function.external_execution
|
|
1820
|
+
or fc.function.requires_user_input
|
|
1821
|
+
)
|
|
1822
|
+
]
|
|
1823
|
+
|
|
1824
|
+
results = await asyncio.gather(
|
|
1825
|
+
*(self.arun_function_call(fc) for fc in function_calls_to_run), return_exceptions=True
|
|
1826
|
+
)
|
|
1827
|
+
|
|
1828
|
+
# Separate async generators from other results for concurrent processing
|
|
1829
|
+
async_generator_results: List[Any] = []
|
|
1830
|
+
non_async_generator_results: List[Any] = []
|
|
1831
|
+
|
|
1832
|
+
for result in results:
|
|
1833
|
+
if isinstance(result, BaseException):
|
|
1834
|
+
non_async_generator_results.append(result)
|
|
1835
|
+
continue
|
|
1836
|
+
|
|
1837
|
+
function_call_success, function_call_timer, function_call, function_execution_result = result
|
|
1838
|
+
|
|
1839
|
+
# Check if this result contains an async generator
|
|
1840
|
+
if isinstance(function_call.result, (AsyncGeneratorType, AsyncIterator)):
|
|
1841
|
+
async_generator_results.append(result)
|
|
1842
|
+
else:
|
|
1843
|
+
non_async_generator_results.append(result)
|
|
1844
|
+
|
|
1845
|
+
# Process async generators with real-time event streaming using asyncio.Queue
|
|
1846
|
+
async_generator_outputs: Dict[int, Tuple[Any, str, Optional[BaseException]]] = {}
|
|
1847
|
+
event_queue: asyncio.Queue = asyncio.Queue()
|
|
1848
|
+
active_generators_count: int = len(async_generator_results)
|
|
1849
|
+
|
|
1850
|
+
# Create background tasks for each async generator
|
|
1851
|
+
async def process_async_generator(result, generator_id):
|
|
1852
|
+
function_call_success, function_call_timer, function_call, function_execution_result = result
|
|
1853
|
+
function_call_output = ""
|
|
1854
|
+
|
|
1855
|
+
try:
|
|
1856
|
+
async for item in function_call.result:
|
|
1857
|
+
# This function yields agent/team/workflow run events
|
|
1858
|
+
if isinstance(
|
|
1859
|
+
item,
|
|
1860
|
+
tuple(get_args(RunOutputEvent))
|
|
1861
|
+
+ tuple(get_args(TeamRunOutputEvent))
|
|
1862
|
+
+ tuple(get_args(WorkflowRunOutputEvent)),
|
|
1863
|
+
):
|
|
1864
|
+
# We only capture content events
|
|
1865
|
+
if isinstance(item, RunContentEvent) or isinstance(item, TeamRunContentEvent):
|
|
1866
|
+
if item.content is not None and isinstance(item.content, BaseModel):
|
|
1867
|
+
function_call_output += item.content.model_dump_json()
|
|
1868
|
+
else:
|
|
1869
|
+
# Capture output
|
|
1870
|
+
function_call_output += item.content or ""
|
|
1871
|
+
|
|
1872
|
+
if function_call.function.show_result and item.content is not None:
|
|
1873
|
+
await event_queue.put(ModelResponse(content=item.content))
|
|
1874
|
+
continue
|
|
1875
|
+
|
|
1876
|
+
if isinstance(item, CustomEvent):
|
|
1877
|
+
function_call_output += str(item)
|
|
1878
|
+
|
|
1879
|
+
# For WorkflowCompletedEvent, extract content for final output
|
|
1880
|
+
from agno.run.workflow import WorkflowCompletedEvent
|
|
1881
|
+
|
|
1882
|
+
if isinstance(item, WorkflowCompletedEvent):
|
|
1883
|
+
if item.content is not None:
|
|
1884
|
+
if isinstance(item.content, BaseModel):
|
|
1885
|
+
function_call_output += item.content.model_dump_json()
|
|
1886
|
+
else:
|
|
1887
|
+
function_call_output += str(item.content)
|
|
1888
|
+
|
|
1889
|
+
# Put the event into the queue to be yielded
|
|
1890
|
+
await event_queue.put(item)
|
|
1891
|
+
|
|
1892
|
+
# Yield custom events emitted by the tool
|
|
1893
|
+
else:
|
|
1894
|
+
function_call_output += str(item)
|
|
1895
|
+
if function_call.function.show_result and item is not None:
|
|
1896
|
+
await event_queue.put(ModelResponse(content=str(item)))
|
|
1897
|
+
|
|
1898
|
+
# Store the final output for this generator
|
|
1899
|
+
async_generator_outputs[generator_id] = (result, function_call_output, None)
|
|
1900
|
+
|
|
1901
|
+
except Exception as e:
|
|
1902
|
+
# Store the exception
|
|
1903
|
+
async_generator_outputs[generator_id] = (result, "", e)
|
|
1904
|
+
|
|
1905
|
+
# Signal that this generator is done
|
|
1906
|
+
await event_queue.put(("GENERATOR_DONE", generator_id))
|
|
1907
|
+
|
|
1908
|
+
# Start all async generator tasks
|
|
1909
|
+
generator_tasks = []
|
|
1910
|
+
for i, result in enumerate(async_generator_results):
|
|
1911
|
+
task = asyncio.create_task(process_async_generator(result, i))
|
|
1912
|
+
generator_tasks.append(task)
|
|
1913
|
+
|
|
1914
|
+
# Stream events from the queue as they arrive
|
|
1915
|
+
completed_generators_count = 0
|
|
1916
|
+
while completed_generators_count < active_generators_count:
|
|
1917
|
+
try:
|
|
1918
|
+
event = await event_queue.get()
|
|
1919
|
+
|
|
1920
|
+
# Check if this is a completion signal
|
|
1921
|
+
if isinstance(event, tuple) and event[0] == "GENERATOR_DONE":
|
|
1922
|
+
completed_generators_count += 1
|
|
1923
|
+
continue
|
|
1924
|
+
|
|
1925
|
+
# Yield the actual event
|
|
1926
|
+
yield event
|
|
1927
|
+
|
|
1928
|
+
except Exception as e:
|
|
1929
|
+
log_error(f"Error processing async generator event: {e}")
|
|
1930
|
+
break
|
|
1931
|
+
|
|
1932
|
+
# Now process all results (non-async generators and completed async generators)
|
|
1933
|
+
for i, original_result in enumerate(results):
|
|
1934
|
+
# If result is an exception, skip processing it
|
|
1935
|
+
if isinstance(original_result, BaseException):
|
|
1936
|
+
log_error(f"Error during function call: {original_result}")
|
|
1937
|
+
raise original_result
|
|
1938
|
+
|
|
1939
|
+
# Unpack result
|
|
1940
|
+
function_call_success, function_call_timer, function_call, function_execution_result = original_result
|
|
1941
|
+
|
|
1942
|
+
# Check if this was an async generator that was already processed
|
|
1943
|
+
async_function_call_output = None
|
|
1944
|
+
if isinstance(function_call.result, (AsyncGeneratorType, collections.abc.AsyncIterator)):
|
|
1945
|
+
# Find the corresponding processed result
|
|
1946
|
+
async_gen_index = 0
|
|
1947
|
+
for j, result in enumerate(results[: i + 1]):
|
|
1948
|
+
if not isinstance(result, BaseException):
|
|
1949
|
+
_, _, fc, _ = result
|
|
1950
|
+
if isinstance(fc.result, (AsyncGeneratorType, collections.abc.AsyncIterator)):
|
|
1951
|
+
if j == i: # This is our async generator
|
|
1952
|
+
if async_gen_index in async_generator_outputs:
|
|
1953
|
+
_, async_function_call_output, error = async_generator_outputs[async_gen_index]
|
|
1954
|
+
if error:
|
|
1955
|
+
log_error(f"Error in async generator: {error}")
|
|
1956
|
+
raise error
|
|
1957
|
+
break
|
|
1958
|
+
async_gen_index += 1
|
|
1959
|
+
|
|
1960
|
+
updated_session_state = function_execution_result.updated_session_state
|
|
1961
|
+
|
|
1962
|
+
# Handle AgentRunException
|
|
1963
|
+
if isinstance(function_call_success, AgentRunException):
|
|
1964
|
+
a_exc = function_call_success
|
|
1965
|
+
# Update additional messages from function call
|
|
1966
|
+
_handle_agent_exception(a_exc, additional_input)
|
|
1967
|
+
# Set function call success to False if an exception occurred
|
|
1968
|
+
function_call_success = False
|
|
1969
|
+
|
|
1970
|
+
# Process function call output
|
|
1971
|
+
function_call_output: str = ""
|
|
1972
|
+
|
|
1973
|
+
# Check if this was an async generator that was already processed
|
|
1974
|
+
if async_function_call_output is not None:
|
|
1975
|
+
function_call_output = async_function_call_output
|
|
1976
|
+
# Events from async generators were already yielded in real-time above
|
|
1977
|
+
elif isinstance(function_call.result, (GeneratorType, collections.abc.Iterator)):
|
|
1978
|
+
for item in function_call.result:
|
|
1979
|
+
# This function yields agent/team/workflow run events
|
|
1980
|
+
if isinstance(
|
|
1981
|
+
item,
|
|
1982
|
+
tuple(get_args(RunOutputEvent))
|
|
1983
|
+
+ tuple(get_args(TeamRunOutputEvent))
|
|
1984
|
+
+ tuple(get_args(WorkflowRunOutputEvent)),
|
|
1985
|
+
):
|
|
1986
|
+
# We only capture content events
|
|
1987
|
+
if isinstance(item, RunContentEvent) or isinstance(item, TeamRunContentEvent):
|
|
1988
|
+
if item.content is not None and isinstance(item.content, BaseModel):
|
|
1989
|
+
function_call_output += item.content.model_dump_json()
|
|
1990
|
+
else:
|
|
1991
|
+
# Capture output
|
|
1992
|
+
function_call_output += item.content or ""
|
|
1993
|
+
|
|
1994
|
+
if function_call.function.show_result and item.content is not None:
|
|
1995
|
+
yield ModelResponse(content=item.content)
|
|
1996
|
+
continue
|
|
1997
|
+
|
|
1998
|
+
# Yield the event itself to bubble it up
|
|
1999
|
+
yield item
|
|
2000
|
+
else:
|
|
2001
|
+
function_call_output += str(item)
|
|
2002
|
+
if function_call.function.show_result and item is not None:
|
|
2003
|
+
yield ModelResponse(content=str(item))
|
|
2004
|
+
else:
|
|
2005
|
+
from agno.tools.function import ToolResult
|
|
2006
|
+
|
|
2007
|
+
if isinstance(function_execution_result.result, ToolResult):
|
|
2008
|
+
tool_result = function_execution_result.result
|
|
2009
|
+
function_call_output = tool_result.content
|
|
2010
|
+
|
|
2011
|
+
if tool_result.images:
|
|
2012
|
+
function_execution_result.images = tool_result.images
|
|
2013
|
+
if tool_result.videos:
|
|
2014
|
+
function_execution_result.videos = tool_result.videos
|
|
2015
|
+
if tool_result.audios:
|
|
2016
|
+
function_execution_result.audios = tool_result.audios
|
|
2017
|
+
if tool_result.files:
|
|
2018
|
+
function_execution_result.files = tool_result.files
|
|
2019
|
+
else:
|
|
2020
|
+
function_call_output = str(function_call.result)
|
|
2021
|
+
|
|
2022
|
+
if function_call.function.show_result and function_call_output is not None:
|
|
2023
|
+
yield ModelResponse(content=function_call_output)
|
|
2024
|
+
|
|
2025
|
+
# Create and yield function call result
|
|
2026
|
+
function_call_result = self.create_function_call_result(
|
|
2027
|
+
function_call,
|
|
2028
|
+
success=function_call_success,
|
|
2029
|
+
output=function_call_output,
|
|
2030
|
+
timer=function_call_timer,
|
|
2031
|
+
function_execution_result=function_execution_result,
|
|
2032
|
+
)
|
|
2033
|
+
yield ModelResponse(
|
|
2034
|
+
content=f"{function_call.get_call_str()} completed in {function_call_timer.elapsed:.4f}s. ",
|
|
2035
|
+
tool_executions=[
|
|
2036
|
+
ToolExecution(
|
|
2037
|
+
tool_call_id=function_call_result.tool_call_id,
|
|
2038
|
+
tool_name=function_call_result.tool_name,
|
|
2039
|
+
tool_args=function_call_result.tool_args,
|
|
2040
|
+
tool_call_error=function_call_result.tool_call_error,
|
|
2041
|
+
result=str(function_call_result.content),
|
|
2042
|
+
stop_after_tool_call=function_call_result.stop_after_tool_call,
|
|
2043
|
+
metrics=function_call_result.metrics,
|
|
2044
|
+
)
|
|
2045
|
+
],
|
|
2046
|
+
event=ModelResponseEvent.tool_call_completed.value,
|
|
2047
|
+
updated_session_state=updated_session_state,
|
|
2048
|
+
images=function_execution_result.images,
|
|
2049
|
+
videos=function_execution_result.videos,
|
|
2050
|
+
audios=function_execution_result.audios,
|
|
2051
|
+
files=function_execution_result.files,
|
|
2052
|
+
)
|
|
2053
|
+
|
|
2054
|
+
# Add function call result to function call results
|
|
2055
|
+
function_call_results.append(function_call_result)
|
|
2056
|
+
|
|
2057
|
+
# Add any additional messages at the end
|
|
2058
|
+
if additional_input:
|
|
2059
|
+
function_call_results.extend(additional_input)
|
|
2060
|
+
|
|
2061
|
+
def _prepare_function_calls(
|
|
2062
|
+
self,
|
|
2063
|
+
assistant_message: Message,
|
|
2064
|
+
messages: List[Message],
|
|
2065
|
+
model_response: ModelResponse,
|
|
2066
|
+
functions: Optional[Dict[str, Function]] = None,
|
|
2067
|
+
) -> List[FunctionCall]:
|
|
2068
|
+
"""
|
|
2069
|
+
Prepare function calls from tool calls in the assistant message.
|
|
2070
|
+
"""
|
|
2071
|
+
if model_response.content is None:
|
|
2072
|
+
model_response.content = ""
|
|
2073
|
+
if model_response.tool_calls is None:
|
|
2074
|
+
model_response.tool_calls = []
|
|
2075
|
+
|
|
2076
|
+
function_calls_to_run: List[FunctionCall] = self.get_function_calls_to_run(
|
|
2077
|
+
assistant_message=assistant_message, messages=messages, functions=functions
|
|
2078
|
+
)
|
|
2079
|
+
return function_calls_to_run
|
|
2080
|
+
|
|
2081
|
+
def format_function_call_results(
|
|
2082
|
+
self, messages: List[Message], function_call_results: List[Message], **kwargs
|
|
2083
|
+
) -> None:
|
|
2084
|
+
"""
|
|
2085
|
+
Format function call results.
|
|
2086
|
+
"""
|
|
2087
|
+
if len(function_call_results) > 0:
|
|
2088
|
+
messages.extend(function_call_results)
|
|
2089
|
+
|
|
2090
|
+
def _handle_function_call_media(
|
|
2091
|
+
self, messages: List[Message], function_call_results: List[Message], send_media_to_model: bool = True
|
|
2092
|
+
) -> None:
|
|
2093
|
+
"""
|
|
2094
|
+
Handle media artifacts from function calls by adding follow-up user messages for generated media if needed.
|
|
2095
|
+
"""
|
|
2096
|
+
if not function_call_results:
|
|
2097
|
+
return
|
|
2098
|
+
|
|
2099
|
+
# Collect all media artifacts from function calls
|
|
2100
|
+
all_images: List[Image] = []
|
|
2101
|
+
all_videos: List[Video] = []
|
|
2102
|
+
all_audio: List[Audio] = []
|
|
2103
|
+
all_files: List[File] = []
|
|
2104
|
+
|
|
2105
|
+
for result_message in function_call_results:
|
|
2106
|
+
if result_message.images:
|
|
2107
|
+
all_images.extend(result_message.images)
|
|
2108
|
+
# Remove images from tool message to avoid errors on the LLMs
|
|
2109
|
+
result_message.images = None
|
|
2110
|
+
|
|
2111
|
+
if result_message.videos:
|
|
2112
|
+
all_videos.extend(result_message.videos)
|
|
2113
|
+
result_message.videos = None
|
|
2114
|
+
|
|
2115
|
+
if result_message.audio:
|
|
2116
|
+
all_audio.extend(result_message.audio)
|
|
2117
|
+
result_message.audio = None
|
|
2118
|
+
|
|
2119
|
+
if result_message.files:
|
|
2120
|
+
all_files.extend(result_message.files)
|
|
2121
|
+
result_message.files = None
|
|
2122
|
+
|
|
2123
|
+
# Only add media message if we should send media to model
|
|
2124
|
+
if send_media_to_model and (all_images or all_videos or all_audio or all_files):
|
|
2125
|
+
# If we have media artifacts, add a follow-up "user" message instead of a "tool"
|
|
2126
|
+
# message with the media artifacts which throws error for some models
|
|
2127
|
+
media_message = Message(
|
|
2128
|
+
role="user",
|
|
2129
|
+
content="Take note of the following content",
|
|
2130
|
+
images=all_images if all_images else None,
|
|
2131
|
+
videos=all_videos if all_videos else None,
|
|
2132
|
+
audio=all_audio if all_audio else None,
|
|
2133
|
+
files=all_files if all_files else None,
|
|
2134
|
+
)
|
|
2135
|
+
messages.append(media_message)
|
|
2136
|
+
|
|
2137
|
+
def get_system_message_for_model(self, tools: Optional[List[Any]] = None) -> Optional[str]:
|
|
2138
|
+
return self.system_prompt
|
|
2139
|
+
|
|
2140
|
+
def get_instructions_for_model(self, tools: Optional[List[Any]] = None) -> Optional[List[str]]:
|
|
2141
|
+
return self.instructions
|
|
2142
|
+
|
|
2143
|
+
def __deepcopy__(self, memo):
|
|
2144
|
+
"""Create a deep copy of the Model instance.
|
|
2145
|
+
|
|
2146
|
+
Args:
|
|
2147
|
+
memo (dict): Dictionary of objects already copied during the current copying pass.
|
|
2148
|
+
|
|
2149
|
+
Returns:
|
|
2150
|
+
Model: A new Model instance with deeply copied attributes.
|
|
2151
|
+
"""
|
|
2152
|
+
from copy import copy, deepcopy
|
|
2153
|
+
|
|
2154
|
+
# Create a new instance without calling __init__
|
|
2155
|
+
cls = self.__class__
|
|
2156
|
+
new_model = cls.__new__(cls)
|
|
2157
|
+
memo[id(self)] = new_model
|
|
2158
|
+
|
|
2159
|
+
# Deep copy all attributes except client objects
|
|
2160
|
+
for k, v in self.__dict__.items():
|
|
2161
|
+
if k in {"response_format", "_tools", "_functions"}:
|
|
2162
|
+
continue
|
|
2163
|
+
# Skip client objects
|
|
2164
|
+
if k in {"client", "async_client", "http_client", "mistral_client", "model_client"}:
|
|
2165
|
+
setattr(new_model, k, None)
|
|
2166
|
+
continue
|
|
2167
|
+
try:
|
|
2168
|
+
setattr(new_model, k, deepcopy(v, memo))
|
|
2169
|
+
except Exception:
|
|
2170
|
+
try:
|
|
2171
|
+
setattr(new_model, k, copy(v))
|
|
2172
|
+
except Exception:
|
|
2173
|
+
setattr(new_model, k, v)
|
|
2174
|
+
|
|
2175
|
+
return new_model
|