agno 2.2.13__py3-none-any.whl → 2.4.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/__init__.py +6 -0
- agno/agent/agent.py +5252 -3145
- agno/agent/remote.py +525 -0
- agno/api/api.py +2 -0
- agno/client/__init__.py +3 -0
- agno/client/a2a/__init__.py +10 -0
- agno/client/a2a/client.py +554 -0
- agno/client/a2a/schemas.py +112 -0
- agno/client/a2a/utils.py +369 -0
- agno/client/os.py +2669 -0
- agno/compression/__init__.py +3 -0
- agno/compression/manager.py +247 -0
- agno/culture/manager.py +2 -2
- agno/db/base.py +927 -6
- agno/db/dynamo/dynamo.py +788 -2
- agno/db/dynamo/schemas.py +128 -0
- agno/db/dynamo/utils.py +26 -3
- agno/db/firestore/firestore.py +674 -50
- agno/db/firestore/schemas.py +41 -0
- agno/db/firestore/utils.py +25 -10
- agno/db/gcs_json/gcs_json_db.py +506 -3
- agno/db/gcs_json/utils.py +14 -2
- agno/db/in_memory/in_memory_db.py +203 -4
- agno/db/in_memory/utils.py +14 -2
- agno/db/json/json_db.py +498 -2
- agno/db/json/utils.py +14 -2
- agno/db/migrations/manager.py +199 -0
- agno/db/migrations/utils.py +19 -0
- agno/db/migrations/v1_to_v2.py +54 -16
- agno/db/migrations/versions/__init__.py +0 -0
- agno/db/migrations/versions/v2_3_0.py +977 -0
- agno/db/mongo/async_mongo.py +1013 -39
- agno/db/mongo/mongo.py +684 -4
- agno/db/mongo/schemas.py +48 -0
- agno/db/mongo/utils.py +17 -0
- agno/db/mysql/__init__.py +2 -1
- agno/db/mysql/async_mysql.py +2958 -0
- agno/db/mysql/mysql.py +722 -53
- agno/db/mysql/schemas.py +77 -11
- agno/db/mysql/utils.py +151 -8
- agno/db/postgres/async_postgres.py +1254 -137
- agno/db/postgres/postgres.py +2316 -93
- agno/db/postgres/schemas.py +153 -21
- agno/db/postgres/utils.py +22 -7
- agno/db/redis/redis.py +531 -3
- agno/db/redis/schemas.py +36 -0
- agno/db/redis/utils.py +31 -15
- agno/db/schemas/evals.py +1 -0
- agno/db/schemas/memory.py +20 -9
- agno/db/singlestore/schemas.py +70 -1
- agno/db/singlestore/singlestore.py +737 -74
- agno/db/singlestore/utils.py +13 -3
- agno/db/sqlite/async_sqlite.py +1069 -89
- agno/db/sqlite/schemas.py +133 -1
- agno/db/sqlite/sqlite.py +2203 -165
- agno/db/sqlite/utils.py +21 -11
- agno/db/surrealdb/models.py +25 -0
- agno/db/surrealdb/surrealdb.py +603 -1
- agno/db/utils.py +60 -0
- agno/eval/__init__.py +26 -3
- agno/eval/accuracy.py +25 -12
- agno/eval/agent_as_judge.py +871 -0
- agno/eval/base.py +29 -0
- agno/eval/performance.py +10 -4
- agno/eval/reliability.py +22 -13
- agno/eval/utils.py +2 -1
- agno/exceptions.py +42 -0
- agno/hooks/__init__.py +3 -0
- agno/hooks/decorator.py +164 -0
- agno/integrations/discord/client.py +13 -2
- agno/knowledge/__init__.py +4 -0
- agno/knowledge/chunking/code.py +90 -0
- agno/knowledge/chunking/document.py +65 -4
- agno/knowledge/chunking/fixed.py +4 -1
- agno/knowledge/chunking/markdown.py +102 -11
- agno/knowledge/chunking/recursive.py +2 -2
- agno/knowledge/chunking/semantic.py +130 -48
- agno/knowledge/chunking/strategy.py +18 -0
- agno/knowledge/embedder/azure_openai.py +0 -1
- agno/knowledge/embedder/google.py +1 -1
- agno/knowledge/embedder/mistral.py +1 -1
- agno/knowledge/embedder/nebius.py +1 -1
- agno/knowledge/embedder/openai.py +16 -12
- agno/knowledge/filesystem.py +412 -0
- agno/knowledge/knowledge.py +4261 -1199
- agno/knowledge/protocol.py +134 -0
- agno/knowledge/reader/arxiv_reader.py +3 -2
- agno/knowledge/reader/base.py +9 -7
- agno/knowledge/reader/csv_reader.py +91 -42
- agno/knowledge/reader/docx_reader.py +9 -10
- agno/knowledge/reader/excel_reader.py +225 -0
- agno/knowledge/reader/field_labeled_csv_reader.py +38 -48
- agno/knowledge/reader/firecrawl_reader.py +3 -2
- agno/knowledge/reader/json_reader.py +16 -22
- agno/knowledge/reader/markdown_reader.py +15 -14
- agno/knowledge/reader/pdf_reader.py +33 -28
- agno/knowledge/reader/pptx_reader.py +9 -10
- agno/knowledge/reader/reader_factory.py +135 -1
- agno/knowledge/reader/s3_reader.py +8 -16
- agno/knowledge/reader/tavily_reader.py +3 -3
- agno/knowledge/reader/text_reader.py +15 -14
- agno/knowledge/reader/utils/__init__.py +17 -0
- agno/knowledge/reader/utils/spreadsheet.py +114 -0
- agno/knowledge/reader/web_search_reader.py +8 -65
- agno/knowledge/reader/website_reader.py +16 -13
- agno/knowledge/reader/wikipedia_reader.py +36 -3
- agno/knowledge/reader/youtube_reader.py +3 -2
- agno/knowledge/remote_content/__init__.py +33 -0
- agno/knowledge/remote_content/config.py +266 -0
- agno/knowledge/remote_content/remote_content.py +105 -17
- agno/knowledge/utils.py +76 -22
- agno/learn/__init__.py +71 -0
- agno/learn/config.py +463 -0
- agno/learn/curate.py +185 -0
- agno/learn/machine.py +725 -0
- agno/learn/schemas.py +1114 -0
- agno/learn/stores/__init__.py +38 -0
- agno/learn/stores/decision_log.py +1156 -0
- agno/learn/stores/entity_memory.py +3275 -0
- agno/learn/stores/learned_knowledge.py +1583 -0
- agno/learn/stores/protocol.py +117 -0
- agno/learn/stores/session_context.py +1217 -0
- agno/learn/stores/user_memory.py +1495 -0
- agno/learn/stores/user_profile.py +1220 -0
- agno/learn/utils.py +209 -0
- agno/media.py +22 -6
- agno/memory/__init__.py +14 -1
- agno/memory/manager.py +223 -8
- agno/memory/strategies/__init__.py +15 -0
- agno/memory/strategies/base.py +66 -0
- agno/memory/strategies/summarize.py +196 -0
- agno/memory/strategies/types.py +37 -0
- agno/models/aimlapi/aimlapi.py +17 -0
- agno/models/anthropic/claude.py +434 -59
- agno/models/aws/bedrock.py +121 -20
- agno/models/aws/claude.py +131 -274
- agno/models/azure/ai_foundry.py +10 -6
- agno/models/azure/openai_chat.py +33 -10
- agno/models/base.py +1162 -561
- agno/models/cerebras/cerebras.py +120 -24
- agno/models/cerebras/cerebras_openai.py +21 -2
- agno/models/cohere/chat.py +65 -6
- agno/models/cometapi/cometapi.py +18 -1
- agno/models/dashscope/dashscope.py +2 -3
- agno/models/deepinfra/deepinfra.py +18 -1
- agno/models/deepseek/deepseek.py +69 -3
- agno/models/fireworks/fireworks.py +18 -1
- agno/models/google/gemini.py +959 -89
- agno/models/google/utils.py +22 -0
- agno/models/groq/groq.py +48 -18
- agno/models/huggingface/huggingface.py +17 -6
- agno/models/ibm/watsonx.py +16 -6
- agno/models/internlm/internlm.py +18 -1
- agno/models/langdb/langdb.py +13 -1
- agno/models/litellm/chat.py +88 -9
- agno/models/litellm/litellm_openai.py +18 -1
- agno/models/message.py +24 -5
- agno/models/meta/llama.py +40 -13
- agno/models/meta/llama_openai.py +22 -21
- agno/models/metrics.py +12 -0
- agno/models/mistral/mistral.py +8 -4
- agno/models/n1n/__init__.py +3 -0
- agno/models/n1n/n1n.py +57 -0
- agno/models/nebius/nebius.py +6 -7
- agno/models/nvidia/nvidia.py +20 -3
- agno/models/ollama/__init__.py +2 -0
- agno/models/ollama/chat.py +17 -6
- agno/models/ollama/responses.py +100 -0
- agno/models/openai/__init__.py +2 -0
- agno/models/openai/chat.py +117 -26
- agno/models/openai/open_responses.py +46 -0
- agno/models/openai/responses.py +110 -32
- agno/models/openrouter/__init__.py +2 -0
- agno/models/openrouter/openrouter.py +67 -2
- agno/models/openrouter/responses.py +146 -0
- agno/models/perplexity/perplexity.py +19 -1
- agno/models/portkey/portkey.py +7 -6
- agno/models/requesty/requesty.py +19 -2
- agno/models/response.py +20 -2
- agno/models/sambanova/sambanova.py +20 -3
- agno/models/siliconflow/siliconflow.py +19 -2
- agno/models/together/together.py +20 -3
- agno/models/vercel/v0.py +20 -3
- agno/models/vertexai/claude.py +124 -4
- agno/models/vllm/vllm.py +19 -14
- agno/models/xai/xai.py +19 -2
- agno/os/app.py +467 -137
- agno/os/auth.py +253 -5
- agno/os/config.py +22 -0
- agno/os/interfaces/a2a/a2a.py +7 -6
- agno/os/interfaces/a2a/router.py +635 -26
- agno/os/interfaces/a2a/utils.py +32 -33
- agno/os/interfaces/agui/agui.py +5 -3
- agno/os/interfaces/agui/router.py +26 -16
- agno/os/interfaces/agui/utils.py +97 -57
- agno/os/interfaces/base.py +7 -7
- agno/os/interfaces/slack/router.py +16 -7
- agno/os/interfaces/slack/slack.py +7 -7
- agno/os/interfaces/whatsapp/router.py +35 -7
- agno/os/interfaces/whatsapp/security.py +3 -1
- agno/os/interfaces/whatsapp/whatsapp.py +11 -8
- agno/os/managers.py +326 -0
- agno/os/mcp.py +652 -79
- agno/os/middleware/__init__.py +4 -0
- agno/os/middleware/jwt.py +718 -115
- agno/os/middleware/trailing_slash.py +27 -0
- agno/os/router.py +105 -1558
- agno/os/routers/agents/__init__.py +3 -0
- agno/os/routers/agents/router.py +655 -0
- agno/os/routers/agents/schema.py +288 -0
- agno/os/routers/components/__init__.py +3 -0
- agno/os/routers/components/components.py +475 -0
- agno/os/routers/database.py +155 -0
- agno/os/routers/evals/evals.py +111 -18
- agno/os/routers/evals/schemas.py +38 -5
- agno/os/routers/evals/utils.py +80 -11
- agno/os/routers/health.py +3 -3
- agno/os/routers/knowledge/knowledge.py +284 -35
- agno/os/routers/knowledge/schemas.py +14 -2
- agno/os/routers/memory/memory.py +274 -11
- agno/os/routers/memory/schemas.py +44 -3
- agno/os/routers/metrics/metrics.py +30 -15
- agno/os/routers/metrics/schemas.py +10 -6
- agno/os/routers/registry/__init__.py +3 -0
- agno/os/routers/registry/registry.py +337 -0
- agno/os/routers/session/session.py +143 -14
- agno/os/routers/teams/__init__.py +3 -0
- agno/os/routers/teams/router.py +550 -0
- agno/os/routers/teams/schema.py +280 -0
- agno/os/routers/traces/__init__.py +3 -0
- agno/os/routers/traces/schemas.py +414 -0
- agno/os/routers/traces/traces.py +549 -0
- agno/os/routers/workflows/__init__.py +3 -0
- agno/os/routers/workflows/router.py +757 -0
- agno/os/routers/workflows/schema.py +139 -0
- agno/os/schema.py +157 -584
- agno/os/scopes.py +469 -0
- agno/os/settings.py +3 -0
- agno/os/utils.py +574 -185
- agno/reasoning/anthropic.py +85 -1
- agno/reasoning/azure_ai_foundry.py +93 -1
- agno/reasoning/deepseek.py +102 -2
- agno/reasoning/default.py +6 -7
- agno/reasoning/gemini.py +87 -3
- agno/reasoning/groq.py +109 -2
- agno/reasoning/helpers.py +6 -7
- agno/reasoning/manager.py +1238 -0
- agno/reasoning/ollama.py +93 -1
- agno/reasoning/openai.py +115 -1
- agno/reasoning/vertexai.py +85 -1
- agno/registry/__init__.py +3 -0
- agno/registry/registry.py +68 -0
- agno/remote/__init__.py +3 -0
- agno/remote/base.py +581 -0
- agno/run/__init__.py +2 -4
- agno/run/agent.py +134 -19
- agno/run/base.py +49 -1
- agno/run/cancel.py +65 -52
- agno/run/cancellation_management/__init__.py +9 -0
- agno/run/cancellation_management/base.py +78 -0
- agno/run/cancellation_management/in_memory_cancellation_manager.py +100 -0
- agno/run/cancellation_management/redis_cancellation_manager.py +236 -0
- agno/run/requirement.py +181 -0
- agno/run/team.py +111 -19
- agno/run/workflow.py +2 -1
- agno/session/agent.py +57 -92
- agno/session/summary.py +1 -1
- agno/session/team.py +62 -115
- agno/session/workflow.py +353 -57
- agno/skills/__init__.py +17 -0
- agno/skills/agent_skills.py +377 -0
- agno/skills/errors.py +32 -0
- agno/skills/loaders/__init__.py +4 -0
- agno/skills/loaders/base.py +27 -0
- agno/skills/loaders/local.py +216 -0
- agno/skills/skill.py +65 -0
- agno/skills/utils.py +107 -0
- agno/skills/validator.py +277 -0
- agno/table.py +10 -0
- agno/team/__init__.py +5 -1
- agno/team/remote.py +447 -0
- agno/team/team.py +3769 -2202
- agno/tools/brandfetch.py +27 -18
- agno/tools/browserbase.py +225 -16
- agno/tools/crawl4ai.py +3 -0
- agno/tools/duckduckgo.py +25 -71
- agno/tools/exa.py +0 -21
- agno/tools/file.py +14 -13
- agno/tools/file_generation.py +12 -6
- agno/tools/firecrawl.py +15 -7
- agno/tools/function.py +94 -113
- agno/tools/google_bigquery.py +11 -2
- agno/tools/google_drive.py +4 -3
- agno/tools/knowledge.py +9 -4
- agno/tools/mcp/mcp.py +301 -18
- agno/tools/mcp/multi_mcp.py +269 -14
- agno/tools/mem0.py +11 -10
- agno/tools/memory.py +47 -46
- agno/tools/mlx_transcribe.py +10 -7
- agno/tools/models/nebius.py +5 -5
- agno/tools/models_labs.py +20 -10
- agno/tools/nano_banana.py +151 -0
- agno/tools/parallel.py +0 -7
- agno/tools/postgres.py +76 -36
- agno/tools/python.py +14 -6
- agno/tools/reasoning.py +30 -23
- agno/tools/redshift.py +406 -0
- agno/tools/shopify.py +1519 -0
- agno/tools/spotify.py +919 -0
- agno/tools/tavily.py +4 -1
- agno/tools/toolkit.py +253 -18
- agno/tools/websearch.py +93 -0
- agno/tools/website.py +1 -1
- agno/tools/wikipedia.py +1 -1
- agno/tools/workflow.py +56 -48
- agno/tools/yfinance.py +12 -11
- agno/tracing/__init__.py +12 -0
- agno/tracing/exporter.py +161 -0
- agno/tracing/schemas.py +276 -0
- agno/tracing/setup.py +112 -0
- agno/utils/agent.py +251 -10
- agno/utils/cryptography.py +22 -0
- agno/utils/dttm.py +33 -0
- agno/utils/events.py +264 -7
- agno/utils/hooks.py +111 -3
- agno/utils/http.py +161 -2
- agno/utils/mcp.py +49 -8
- agno/utils/media.py +22 -1
- agno/utils/models/ai_foundry.py +9 -2
- agno/utils/models/claude.py +20 -5
- agno/utils/models/cohere.py +9 -2
- agno/utils/models/llama.py +9 -2
- agno/utils/models/mistral.py +4 -2
- agno/utils/os.py +0 -0
- agno/utils/print_response/agent.py +99 -16
- agno/utils/print_response/team.py +223 -24
- agno/utils/print_response/workflow.py +0 -2
- agno/utils/prompts.py +8 -6
- agno/utils/remote.py +23 -0
- agno/utils/response.py +1 -13
- agno/utils/string.py +91 -2
- agno/utils/team.py +62 -12
- agno/utils/tokens.py +657 -0
- agno/vectordb/base.py +15 -2
- agno/vectordb/cassandra/cassandra.py +1 -1
- agno/vectordb/chroma/__init__.py +2 -1
- agno/vectordb/chroma/chromadb.py +468 -23
- agno/vectordb/clickhouse/clickhousedb.py +1 -1
- agno/vectordb/couchbase/couchbase.py +6 -2
- agno/vectordb/lancedb/lance_db.py +7 -38
- agno/vectordb/lightrag/lightrag.py +7 -6
- agno/vectordb/milvus/milvus.py +118 -84
- agno/vectordb/mongodb/__init__.py +2 -1
- agno/vectordb/mongodb/mongodb.py +14 -31
- agno/vectordb/pgvector/pgvector.py +120 -66
- agno/vectordb/pineconedb/pineconedb.py +2 -19
- agno/vectordb/qdrant/__init__.py +2 -1
- agno/vectordb/qdrant/qdrant.py +33 -56
- agno/vectordb/redis/__init__.py +2 -1
- agno/vectordb/redis/redisdb.py +19 -31
- agno/vectordb/singlestore/singlestore.py +17 -9
- agno/vectordb/surrealdb/surrealdb.py +2 -38
- agno/vectordb/weaviate/__init__.py +2 -1
- agno/vectordb/weaviate/weaviate.py +7 -3
- agno/workflow/__init__.py +5 -1
- agno/workflow/agent.py +2 -2
- agno/workflow/condition.py +12 -10
- agno/workflow/loop.py +28 -9
- agno/workflow/parallel.py +21 -13
- agno/workflow/remote.py +362 -0
- agno/workflow/router.py +12 -9
- agno/workflow/step.py +261 -36
- agno/workflow/steps.py +12 -8
- agno/workflow/types.py +40 -77
- agno/workflow/workflow.py +939 -213
- {agno-2.2.13.dist-info → agno-2.4.3.dist-info}/METADATA +134 -181
- agno-2.4.3.dist-info/RECORD +677 -0
- {agno-2.2.13.dist-info → agno-2.4.3.dist-info}/WHEEL +1 -1
- agno/tools/googlesearch.py +0 -98
- agno/tools/memori.py +0 -339
- agno-2.2.13.dist-info/RECORD +0 -575
- {agno-2.2.13.dist-info → agno-2.4.3.dist-info}/licenses/LICENSE +0 -0
- {agno-2.2.13.dist-info → agno-2.4.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,1238 @@
|
|
|
1
|
+
"""
|
|
2
|
+
ReasoningManager - Centralized manager for all reasoning operations.
|
|
3
|
+
|
|
4
|
+
This module consolidates reasoning logic from the Agent class into a single,
|
|
5
|
+
maintainable manager that handles:
|
|
6
|
+
- Native reasoning models (DeepSeek, Anthropic, OpenAI, Gemini, etc.)
|
|
7
|
+
- Default Chain-of-Thought reasoning
|
|
8
|
+
- Both streaming and non-streaming modes
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from __future__ import annotations
|
|
12
|
+
|
|
13
|
+
from dataclasses import dataclass, field
|
|
14
|
+
from enum import Enum
|
|
15
|
+
from typing import (
|
|
16
|
+
TYPE_CHECKING,
|
|
17
|
+
AsyncIterator,
|
|
18
|
+
Callable,
|
|
19
|
+
Dict,
|
|
20
|
+
Iterator,
|
|
21
|
+
List,
|
|
22
|
+
Literal,
|
|
23
|
+
Optional,
|
|
24
|
+
Tuple,
|
|
25
|
+
Union,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
from agno.models.base import Model
|
|
29
|
+
from agno.models.message import Message
|
|
30
|
+
from agno.reasoning.step import NextAction, ReasoningStep, ReasoningSteps
|
|
31
|
+
from agno.run.base import RunContext
|
|
32
|
+
from agno.run.messages import RunMessages
|
|
33
|
+
from agno.tools import Toolkit
|
|
34
|
+
from agno.tools.function import Function
|
|
35
|
+
from agno.utils.log import log_debug, log_error, log_info, log_warning
|
|
36
|
+
|
|
37
|
+
if TYPE_CHECKING:
|
|
38
|
+
from agno.agent import Agent
|
|
39
|
+
from agno.run.agent import RunOutput
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class ReasoningEventType(str, Enum):
|
|
43
|
+
"""Types of reasoning events that can be emitted."""
|
|
44
|
+
|
|
45
|
+
started = "reasoning_started"
|
|
46
|
+
content_delta = "reasoning_content_delta"
|
|
47
|
+
step = "reasoning_step"
|
|
48
|
+
completed = "reasoning_completed"
|
|
49
|
+
error = "reasoning_error"
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
@dataclass
|
|
53
|
+
class ReasoningEvent:
|
|
54
|
+
"""
|
|
55
|
+
A unified reasoning event that can be converted to Agent or Team specific events.
|
|
56
|
+
|
|
57
|
+
This allows the ReasoningManager to emit events without knowing about the
|
|
58
|
+
specific event types used by Agent or Team.
|
|
59
|
+
"""
|
|
60
|
+
|
|
61
|
+
event_type: ReasoningEventType
|
|
62
|
+
# For content_delta events
|
|
63
|
+
reasoning_content: Optional[str] = None
|
|
64
|
+
# For step events
|
|
65
|
+
reasoning_step: Optional[ReasoningStep] = None
|
|
66
|
+
# For completed events
|
|
67
|
+
reasoning_steps: List[ReasoningStep] = field(default_factory=list)
|
|
68
|
+
# For error events
|
|
69
|
+
error: Optional[str] = None
|
|
70
|
+
# The message to append to run_messages (for native reasoning)
|
|
71
|
+
message: Optional[Message] = None
|
|
72
|
+
# All reasoning messages (for updating run_output)
|
|
73
|
+
reasoning_messages: List[Message] = field(default_factory=list)
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
@dataclass
|
|
77
|
+
class ReasoningConfig:
|
|
78
|
+
"""Configuration for reasoning operations."""
|
|
79
|
+
|
|
80
|
+
reasoning_model: Optional[Model] = None
|
|
81
|
+
reasoning_agent: Optional["Agent"] = None
|
|
82
|
+
min_steps: int = 1
|
|
83
|
+
max_steps: int = 10
|
|
84
|
+
tools: Optional[List[Union[Toolkit, Callable, Function, Dict]]] = None
|
|
85
|
+
tool_call_limit: Optional[int] = None
|
|
86
|
+
use_json_mode: bool = False
|
|
87
|
+
telemetry: bool = True
|
|
88
|
+
debug_mode: bool = False
|
|
89
|
+
debug_level: Literal[1, 2] = 1
|
|
90
|
+
run_context: Optional[RunContext] = None
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
@dataclass
|
|
94
|
+
class ReasoningResult:
|
|
95
|
+
"""Result from a reasoning operation."""
|
|
96
|
+
|
|
97
|
+
message: Optional[Message] = None
|
|
98
|
+
steps: List[ReasoningStep] = field(default_factory=list)
|
|
99
|
+
reasoning_messages: List[Message] = field(default_factory=list)
|
|
100
|
+
success: bool = True
|
|
101
|
+
error: Optional[str] = None
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
class ReasoningManager:
|
|
105
|
+
"""
|
|
106
|
+
Centralized manager for all reasoning operations.
|
|
107
|
+
|
|
108
|
+
Handles both native reasoning models (DeepSeek, Anthropic, OpenAI, etc.)
|
|
109
|
+
and default Chain-of-Thought reasoning with a clean, unified interface.
|
|
110
|
+
"""
|
|
111
|
+
|
|
112
|
+
def __init__(self, config: ReasoningConfig):
|
|
113
|
+
self.config = config
|
|
114
|
+
self._reasoning_agent: Optional["Agent"] = None
|
|
115
|
+
self._model_type: Optional[str] = None
|
|
116
|
+
|
|
117
|
+
@property
|
|
118
|
+
def reasoning_model(self) -> Optional[Model]:
|
|
119
|
+
return self.config.reasoning_model
|
|
120
|
+
|
|
121
|
+
def _detect_model_type(self, model: Model) -> Optional[str]:
|
|
122
|
+
"""Detect the type of reasoning model."""
|
|
123
|
+
from agno.reasoning.anthropic import is_anthropic_reasoning_model
|
|
124
|
+
from agno.reasoning.azure_ai_foundry import is_ai_foundry_reasoning_model
|
|
125
|
+
from agno.reasoning.deepseek import is_deepseek_reasoning_model
|
|
126
|
+
from agno.reasoning.gemini import is_gemini_reasoning_model
|
|
127
|
+
from agno.reasoning.groq import is_groq_reasoning_model
|
|
128
|
+
from agno.reasoning.ollama import is_ollama_reasoning_model
|
|
129
|
+
from agno.reasoning.openai import is_openai_reasoning_model
|
|
130
|
+
from agno.reasoning.vertexai import is_vertexai_reasoning_model
|
|
131
|
+
|
|
132
|
+
if is_deepseek_reasoning_model(model):
|
|
133
|
+
return "deepseek"
|
|
134
|
+
if is_anthropic_reasoning_model(model):
|
|
135
|
+
return "anthropic"
|
|
136
|
+
if is_openai_reasoning_model(model):
|
|
137
|
+
return "openai"
|
|
138
|
+
if is_groq_reasoning_model(model):
|
|
139
|
+
return "groq"
|
|
140
|
+
if is_ollama_reasoning_model(model):
|
|
141
|
+
return "ollama"
|
|
142
|
+
if is_ai_foundry_reasoning_model(model):
|
|
143
|
+
return "ai_foundry"
|
|
144
|
+
if is_gemini_reasoning_model(model):
|
|
145
|
+
return "gemini"
|
|
146
|
+
if is_vertexai_reasoning_model(model):
|
|
147
|
+
return "vertexai"
|
|
148
|
+
return None
|
|
149
|
+
|
|
150
|
+
def _get_reasoning_agent(self, model: Model) -> "Agent":
|
|
151
|
+
"""Get or create a reasoning agent for the given model."""
|
|
152
|
+
if self.config.reasoning_agent is not None:
|
|
153
|
+
return self.config.reasoning_agent
|
|
154
|
+
|
|
155
|
+
from agno.reasoning.helpers import get_reasoning_agent
|
|
156
|
+
|
|
157
|
+
return get_reasoning_agent(
|
|
158
|
+
reasoning_model=model,
|
|
159
|
+
telemetry=self.config.telemetry,
|
|
160
|
+
debug_mode=self.config.debug_mode,
|
|
161
|
+
debug_level=self.config.debug_level,
|
|
162
|
+
run_context=self.config.run_context,
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
def _get_default_reasoning_agent(self, model: Model) -> Optional["Agent"]:
|
|
166
|
+
"""Get or create a default CoT reasoning agent."""
|
|
167
|
+
if self.config.reasoning_agent is not None:
|
|
168
|
+
return self.config.reasoning_agent
|
|
169
|
+
|
|
170
|
+
from agno.reasoning.default import get_default_reasoning_agent
|
|
171
|
+
|
|
172
|
+
return get_default_reasoning_agent(
|
|
173
|
+
reasoning_model=model,
|
|
174
|
+
min_steps=self.config.min_steps,
|
|
175
|
+
max_steps=self.config.max_steps,
|
|
176
|
+
tools=self.config.tools,
|
|
177
|
+
tool_call_limit=self.config.tool_call_limit,
|
|
178
|
+
use_json_mode=self.config.use_json_mode,
|
|
179
|
+
telemetry=self.config.telemetry,
|
|
180
|
+
debug_mode=self.config.debug_mode,
|
|
181
|
+
debug_level=self.config.debug_level,
|
|
182
|
+
run_context=self.config.run_context,
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
def is_native_reasoning_model(self, model: Optional[Model] = None) -> bool:
|
|
186
|
+
"""Check if the model is a native reasoning model."""
|
|
187
|
+
model = model or self.config.reasoning_model
|
|
188
|
+
if model is None:
|
|
189
|
+
return False
|
|
190
|
+
return self._detect_model_type(model) is not None
|
|
191
|
+
|
|
192
|
+
# =========================================================================
|
|
193
|
+
# Native Model Reasoning (Non-Streaming)
|
|
194
|
+
# =========================================================================
|
|
195
|
+
|
|
196
|
+
def get_native_reasoning(self, model: Model, messages: List[Message]) -> ReasoningResult:
|
|
197
|
+
"""Get reasoning from a native reasoning model (non-streaming)."""
|
|
198
|
+
model_type = self._detect_model_type(model)
|
|
199
|
+
if model_type is None:
|
|
200
|
+
return ReasoningResult(success=False, error="Not a native reasoning model")
|
|
201
|
+
|
|
202
|
+
reasoning_agent = self._get_reasoning_agent(model)
|
|
203
|
+
reasoning_message: Optional[Message] = None
|
|
204
|
+
|
|
205
|
+
try:
|
|
206
|
+
if model_type == "deepseek":
|
|
207
|
+
from agno.reasoning.deepseek import get_deepseek_reasoning
|
|
208
|
+
|
|
209
|
+
log_debug("Starting DeepSeek Reasoning", center=True, symbol="=")
|
|
210
|
+
reasoning_message = get_deepseek_reasoning(reasoning_agent, messages)
|
|
211
|
+
|
|
212
|
+
elif model_type == "anthropic":
|
|
213
|
+
from agno.reasoning.anthropic import get_anthropic_reasoning
|
|
214
|
+
|
|
215
|
+
log_debug("Starting Anthropic Claude Reasoning", center=True, symbol="=")
|
|
216
|
+
reasoning_message = get_anthropic_reasoning(reasoning_agent, messages)
|
|
217
|
+
|
|
218
|
+
elif model_type == "openai":
|
|
219
|
+
from agno.reasoning.openai import get_openai_reasoning
|
|
220
|
+
|
|
221
|
+
log_debug("Starting OpenAI Reasoning", center=True, symbol="=")
|
|
222
|
+
reasoning_message = get_openai_reasoning(reasoning_agent, messages)
|
|
223
|
+
|
|
224
|
+
elif model_type == "groq":
|
|
225
|
+
from agno.reasoning.groq import get_groq_reasoning
|
|
226
|
+
|
|
227
|
+
log_debug("Starting Groq Reasoning", center=True, symbol="=")
|
|
228
|
+
reasoning_message = get_groq_reasoning(reasoning_agent, messages)
|
|
229
|
+
|
|
230
|
+
elif model_type == "ollama":
|
|
231
|
+
from agno.reasoning.ollama import get_ollama_reasoning
|
|
232
|
+
|
|
233
|
+
log_debug("Starting Ollama Reasoning", center=True, symbol="=")
|
|
234
|
+
reasoning_message = get_ollama_reasoning(reasoning_agent, messages)
|
|
235
|
+
|
|
236
|
+
elif model_type == "ai_foundry":
|
|
237
|
+
from agno.reasoning.azure_ai_foundry import get_ai_foundry_reasoning
|
|
238
|
+
|
|
239
|
+
log_debug("Starting Azure AI Foundry Reasoning", center=True, symbol="=")
|
|
240
|
+
reasoning_message = get_ai_foundry_reasoning(reasoning_agent, messages)
|
|
241
|
+
|
|
242
|
+
elif model_type == "gemini":
|
|
243
|
+
from agno.reasoning.gemini import get_gemini_reasoning
|
|
244
|
+
|
|
245
|
+
log_debug("Starting Gemini Reasoning", center=True, symbol="=")
|
|
246
|
+
reasoning_message = get_gemini_reasoning(reasoning_agent, messages)
|
|
247
|
+
|
|
248
|
+
elif model_type == "vertexai":
|
|
249
|
+
from agno.reasoning.vertexai import get_vertexai_reasoning
|
|
250
|
+
|
|
251
|
+
log_debug("Starting VertexAI Reasoning", center=True, symbol="=")
|
|
252
|
+
reasoning_message = get_vertexai_reasoning(reasoning_agent, messages)
|
|
253
|
+
|
|
254
|
+
except Exception as e:
|
|
255
|
+
log_error(f"Reasoning error: {e}")
|
|
256
|
+
return ReasoningResult(success=False, error=str(e))
|
|
257
|
+
|
|
258
|
+
if reasoning_message is None:
|
|
259
|
+
return ReasoningResult(
|
|
260
|
+
success=False,
|
|
261
|
+
error="Reasoning response is None",
|
|
262
|
+
)
|
|
263
|
+
|
|
264
|
+
return ReasoningResult(
|
|
265
|
+
message=reasoning_message,
|
|
266
|
+
steps=[ReasoningStep(result=reasoning_message.content)],
|
|
267
|
+
reasoning_messages=[reasoning_message],
|
|
268
|
+
success=True,
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
async def aget_native_reasoning(self, model: Model, messages: List[Message]) -> ReasoningResult:
|
|
272
|
+
"""Get reasoning from a native reasoning model asynchronously (non-streaming)."""
|
|
273
|
+
model_type = self._detect_model_type(model)
|
|
274
|
+
if model_type is None:
|
|
275
|
+
return ReasoningResult(success=False, error="Not a native reasoning model")
|
|
276
|
+
|
|
277
|
+
reasoning_agent = self._get_reasoning_agent(model)
|
|
278
|
+
reasoning_message: Optional[Message] = None
|
|
279
|
+
|
|
280
|
+
try:
|
|
281
|
+
if model_type == "deepseek":
|
|
282
|
+
from agno.reasoning.deepseek import aget_deepseek_reasoning
|
|
283
|
+
|
|
284
|
+
log_debug("Starting DeepSeek Reasoning", center=True, symbol="=")
|
|
285
|
+
reasoning_message = await aget_deepseek_reasoning(reasoning_agent, messages)
|
|
286
|
+
|
|
287
|
+
elif model_type == "anthropic":
|
|
288
|
+
from agno.reasoning.anthropic import aget_anthropic_reasoning
|
|
289
|
+
|
|
290
|
+
log_debug("Starting Anthropic Claude Reasoning", center=True, symbol="=")
|
|
291
|
+
reasoning_message = await aget_anthropic_reasoning(reasoning_agent, messages)
|
|
292
|
+
|
|
293
|
+
elif model_type == "openai":
|
|
294
|
+
from agno.reasoning.openai import aget_openai_reasoning
|
|
295
|
+
|
|
296
|
+
log_debug("Starting OpenAI Reasoning", center=True, symbol="=")
|
|
297
|
+
reasoning_message = await aget_openai_reasoning(reasoning_agent, messages)
|
|
298
|
+
|
|
299
|
+
elif model_type == "groq":
|
|
300
|
+
from agno.reasoning.groq import aget_groq_reasoning
|
|
301
|
+
|
|
302
|
+
log_debug("Starting Groq Reasoning", center=True, symbol="=")
|
|
303
|
+
reasoning_message = await aget_groq_reasoning(reasoning_agent, messages)
|
|
304
|
+
|
|
305
|
+
elif model_type == "ollama":
|
|
306
|
+
from agno.reasoning.ollama import get_ollama_reasoning
|
|
307
|
+
|
|
308
|
+
log_debug("Starting Ollama Reasoning", center=True, symbol="=")
|
|
309
|
+
reasoning_message = get_ollama_reasoning(reasoning_agent, messages)
|
|
310
|
+
|
|
311
|
+
elif model_type == "ai_foundry":
|
|
312
|
+
from agno.reasoning.azure_ai_foundry import get_ai_foundry_reasoning
|
|
313
|
+
|
|
314
|
+
log_debug("Starting Azure AI Foundry Reasoning", center=True, symbol="=")
|
|
315
|
+
reasoning_message = get_ai_foundry_reasoning(reasoning_agent, messages)
|
|
316
|
+
|
|
317
|
+
elif model_type == "gemini":
|
|
318
|
+
from agno.reasoning.gemini import aget_gemini_reasoning
|
|
319
|
+
|
|
320
|
+
log_debug("Starting Gemini Reasoning", center=True, symbol="=")
|
|
321
|
+
reasoning_message = await aget_gemini_reasoning(reasoning_agent, messages)
|
|
322
|
+
|
|
323
|
+
elif model_type == "vertexai":
|
|
324
|
+
from agno.reasoning.vertexai import aget_vertexai_reasoning
|
|
325
|
+
|
|
326
|
+
log_debug("Starting VertexAI Reasoning", center=True, symbol="=")
|
|
327
|
+
reasoning_message = await aget_vertexai_reasoning(reasoning_agent, messages)
|
|
328
|
+
|
|
329
|
+
except Exception as e:
|
|
330
|
+
log_error(f"Reasoning error: {e}")
|
|
331
|
+
return ReasoningResult(success=False, error=str(e))
|
|
332
|
+
|
|
333
|
+
if reasoning_message is None:
|
|
334
|
+
return ReasoningResult(
|
|
335
|
+
success=False,
|
|
336
|
+
error="Reasoning response is None",
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
return ReasoningResult(
|
|
340
|
+
message=reasoning_message,
|
|
341
|
+
steps=[ReasoningStep(result=reasoning_message.content)],
|
|
342
|
+
reasoning_messages=[reasoning_message],
|
|
343
|
+
success=True,
|
|
344
|
+
)
|
|
345
|
+
|
|
346
|
+
# =========================================================================
|
|
347
|
+
# Native Model Reasoning (Streaming)
|
|
348
|
+
# =========================================================================
|
|
349
|
+
|
|
350
|
+
def stream_native_reasoning(
|
|
351
|
+
self, model: Model, messages: List[Message]
|
|
352
|
+
) -> Iterator[Tuple[Optional[str], Optional[ReasoningResult]]]:
|
|
353
|
+
"""
|
|
354
|
+
Stream reasoning from a native reasoning model.
|
|
355
|
+
|
|
356
|
+
Yields:
|
|
357
|
+
Tuple of (reasoning_content_delta, final_result)
|
|
358
|
+
- During streaming: (reasoning_content_delta, None)
|
|
359
|
+
- At the end: (None, ReasoningResult)
|
|
360
|
+
"""
|
|
361
|
+
model_type = self._detect_model_type(model)
|
|
362
|
+
if model_type is None:
|
|
363
|
+
yield (None, ReasoningResult(success=False, error="Not a native reasoning model"))
|
|
364
|
+
return
|
|
365
|
+
|
|
366
|
+
reasoning_agent = self._get_reasoning_agent(model)
|
|
367
|
+
|
|
368
|
+
# Currently only DeepSeek and Anthropic support streaming
|
|
369
|
+
if model_type == "deepseek":
|
|
370
|
+
from agno.reasoning.deepseek import get_deepseek_reasoning_stream
|
|
371
|
+
|
|
372
|
+
log_debug("Starting DeepSeek Reasoning (streaming)", center=True, symbol="=")
|
|
373
|
+
final_message: Optional[Message] = None
|
|
374
|
+
for reasoning_delta, message in get_deepseek_reasoning_stream(reasoning_agent, messages):
|
|
375
|
+
if reasoning_delta is not None:
|
|
376
|
+
yield (reasoning_delta, None)
|
|
377
|
+
if message is not None:
|
|
378
|
+
final_message = message
|
|
379
|
+
|
|
380
|
+
if final_message:
|
|
381
|
+
yield (
|
|
382
|
+
None,
|
|
383
|
+
ReasoningResult(
|
|
384
|
+
message=final_message,
|
|
385
|
+
steps=[ReasoningStep(result=final_message.content)],
|
|
386
|
+
reasoning_messages=[final_message],
|
|
387
|
+
success=True,
|
|
388
|
+
),
|
|
389
|
+
)
|
|
390
|
+
else:
|
|
391
|
+
yield (None, ReasoningResult(success=False, error="No reasoning content"))
|
|
392
|
+
|
|
393
|
+
elif model_type == "anthropic":
|
|
394
|
+
from agno.reasoning.anthropic import get_anthropic_reasoning_stream
|
|
395
|
+
|
|
396
|
+
log_debug("Starting Anthropic Claude Reasoning (streaming)", center=True, symbol="=")
|
|
397
|
+
final_message = None
|
|
398
|
+
for reasoning_delta, message in get_anthropic_reasoning_stream(reasoning_agent, messages):
|
|
399
|
+
if reasoning_delta is not None:
|
|
400
|
+
yield (reasoning_delta, None)
|
|
401
|
+
if message is not None:
|
|
402
|
+
final_message = message
|
|
403
|
+
|
|
404
|
+
if final_message:
|
|
405
|
+
yield (
|
|
406
|
+
None,
|
|
407
|
+
ReasoningResult(
|
|
408
|
+
message=final_message,
|
|
409
|
+
steps=[ReasoningStep(result=final_message.content)],
|
|
410
|
+
reasoning_messages=[final_message],
|
|
411
|
+
success=True,
|
|
412
|
+
),
|
|
413
|
+
)
|
|
414
|
+
else:
|
|
415
|
+
yield (None, ReasoningResult(success=False, error="No reasoning content"))
|
|
416
|
+
|
|
417
|
+
elif model_type == "gemini":
|
|
418
|
+
from agno.reasoning.gemini import get_gemini_reasoning_stream
|
|
419
|
+
|
|
420
|
+
log_debug("Starting Gemini Reasoning (streaming)", center=True, symbol="=")
|
|
421
|
+
final_message = None
|
|
422
|
+
for reasoning_delta, message in get_gemini_reasoning_stream(reasoning_agent, messages):
|
|
423
|
+
if reasoning_delta is not None:
|
|
424
|
+
yield (reasoning_delta, None)
|
|
425
|
+
if message is not None:
|
|
426
|
+
final_message = message
|
|
427
|
+
|
|
428
|
+
if final_message:
|
|
429
|
+
yield (
|
|
430
|
+
None,
|
|
431
|
+
ReasoningResult(
|
|
432
|
+
message=final_message,
|
|
433
|
+
steps=[ReasoningStep(result=final_message.content)],
|
|
434
|
+
reasoning_messages=[final_message],
|
|
435
|
+
success=True,
|
|
436
|
+
),
|
|
437
|
+
)
|
|
438
|
+
else:
|
|
439
|
+
yield (None, ReasoningResult(success=False, error="No reasoning content"))
|
|
440
|
+
|
|
441
|
+
elif model_type == "openai":
|
|
442
|
+
from agno.reasoning.openai import get_openai_reasoning_stream
|
|
443
|
+
|
|
444
|
+
log_debug("Starting OpenAI Reasoning (streaming)", center=True, symbol="=")
|
|
445
|
+
final_message = None
|
|
446
|
+
for reasoning_delta, message in get_openai_reasoning_stream(reasoning_agent, messages):
|
|
447
|
+
if reasoning_delta is not None:
|
|
448
|
+
yield (reasoning_delta, None)
|
|
449
|
+
if message is not None:
|
|
450
|
+
final_message = message
|
|
451
|
+
|
|
452
|
+
if final_message:
|
|
453
|
+
yield (
|
|
454
|
+
None,
|
|
455
|
+
ReasoningResult(
|
|
456
|
+
message=final_message,
|
|
457
|
+
steps=[ReasoningStep(result=final_message.content)],
|
|
458
|
+
reasoning_messages=[final_message],
|
|
459
|
+
success=True,
|
|
460
|
+
),
|
|
461
|
+
)
|
|
462
|
+
else:
|
|
463
|
+
yield (None, ReasoningResult(success=False, error="No reasoning content"))
|
|
464
|
+
|
|
465
|
+
elif model_type == "vertexai":
|
|
466
|
+
from agno.reasoning.vertexai import get_vertexai_reasoning_stream
|
|
467
|
+
|
|
468
|
+
log_debug("Starting VertexAI Reasoning (streaming)", center=True, symbol="=")
|
|
469
|
+
final_message = None
|
|
470
|
+
for reasoning_delta, message in get_vertexai_reasoning_stream(reasoning_agent, messages):
|
|
471
|
+
if reasoning_delta is not None:
|
|
472
|
+
yield (reasoning_delta, None)
|
|
473
|
+
if message is not None:
|
|
474
|
+
final_message = message
|
|
475
|
+
|
|
476
|
+
if final_message:
|
|
477
|
+
yield (
|
|
478
|
+
None,
|
|
479
|
+
ReasoningResult(
|
|
480
|
+
message=final_message,
|
|
481
|
+
steps=[ReasoningStep(result=final_message.content)],
|
|
482
|
+
reasoning_messages=[final_message],
|
|
483
|
+
success=True,
|
|
484
|
+
),
|
|
485
|
+
)
|
|
486
|
+
else:
|
|
487
|
+
yield (None, ReasoningResult(success=False, error="No reasoning content"))
|
|
488
|
+
|
|
489
|
+
elif model_type == "ai_foundry":
|
|
490
|
+
from agno.reasoning.azure_ai_foundry import get_ai_foundry_reasoning_stream
|
|
491
|
+
|
|
492
|
+
log_debug("Starting Azure AI Foundry Reasoning (streaming)", center=True, symbol="=")
|
|
493
|
+
final_message = None
|
|
494
|
+
for reasoning_delta, message in get_ai_foundry_reasoning_stream(reasoning_agent, messages):
|
|
495
|
+
if reasoning_delta is not None:
|
|
496
|
+
yield (reasoning_delta, None)
|
|
497
|
+
if message is not None:
|
|
498
|
+
final_message = message
|
|
499
|
+
|
|
500
|
+
if final_message:
|
|
501
|
+
yield (
|
|
502
|
+
None,
|
|
503
|
+
ReasoningResult(
|
|
504
|
+
message=final_message,
|
|
505
|
+
steps=[ReasoningStep(result=final_message.content)],
|
|
506
|
+
reasoning_messages=[final_message],
|
|
507
|
+
success=True,
|
|
508
|
+
),
|
|
509
|
+
)
|
|
510
|
+
else:
|
|
511
|
+
yield (None, ReasoningResult(success=False, error="No reasoning content"))
|
|
512
|
+
|
|
513
|
+
elif model_type == "groq":
|
|
514
|
+
from agno.reasoning.groq import get_groq_reasoning_stream
|
|
515
|
+
|
|
516
|
+
log_debug("Starting Groq Reasoning (streaming)", center=True, symbol="=")
|
|
517
|
+
final_message = None
|
|
518
|
+
for reasoning_delta, message in get_groq_reasoning_stream(reasoning_agent, messages):
|
|
519
|
+
if reasoning_delta is not None:
|
|
520
|
+
yield (reasoning_delta, None)
|
|
521
|
+
if message is not None:
|
|
522
|
+
final_message = message
|
|
523
|
+
|
|
524
|
+
if final_message:
|
|
525
|
+
yield (
|
|
526
|
+
None,
|
|
527
|
+
ReasoningResult(
|
|
528
|
+
message=final_message,
|
|
529
|
+
steps=[ReasoningStep(result=final_message.content)],
|
|
530
|
+
reasoning_messages=[final_message],
|
|
531
|
+
success=True,
|
|
532
|
+
),
|
|
533
|
+
)
|
|
534
|
+
else:
|
|
535
|
+
yield (None, ReasoningResult(success=False, error="No reasoning content"))
|
|
536
|
+
|
|
537
|
+
elif model_type == "ollama":
|
|
538
|
+
from agno.reasoning.ollama import get_ollama_reasoning_stream
|
|
539
|
+
|
|
540
|
+
log_debug("Starting Ollama Reasoning (streaming)", center=True, symbol="=")
|
|
541
|
+
final_message = None
|
|
542
|
+
for reasoning_delta, message in get_ollama_reasoning_stream(reasoning_agent, messages):
|
|
543
|
+
if reasoning_delta is not None:
|
|
544
|
+
yield (reasoning_delta, None)
|
|
545
|
+
if message is not None:
|
|
546
|
+
final_message = message
|
|
547
|
+
|
|
548
|
+
if final_message:
|
|
549
|
+
yield (
|
|
550
|
+
None,
|
|
551
|
+
ReasoningResult(
|
|
552
|
+
message=final_message,
|
|
553
|
+
steps=[ReasoningStep(result=final_message.content)],
|
|
554
|
+
reasoning_messages=[final_message],
|
|
555
|
+
success=True,
|
|
556
|
+
),
|
|
557
|
+
)
|
|
558
|
+
else:
|
|
559
|
+
yield (None, ReasoningResult(success=False, error="No reasoning content"))
|
|
560
|
+
|
|
561
|
+
else:
|
|
562
|
+
# Fall back to non-streaming for other models
|
|
563
|
+
result = self.get_native_reasoning(model, messages)
|
|
564
|
+
yield (None, result)
|
|
565
|
+
|
|
566
|
+
async def astream_native_reasoning(
|
|
567
|
+
self, model: Model, messages: List[Message]
|
|
568
|
+
) -> AsyncIterator[Tuple[Optional[str], Optional[ReasoningResult]]]:
|
|
569
|
+
"""
|
|
570
|
+
Stream reasoning from a native reasoning model asynchronously.
|
|
571
|
+
|
|
572
|
+
Yields:
|
|
573
|
+
Tuple of (reasoning_content_delta, final_result)
|
|
574
|
+
- During streaming: (reasoning_content_delta, None)
|
|
575
|
+
- At the end: (None, ReasoningResult)
|
|
576
|
+
"""
|
|
577
|
+
model_type = self._detect_model_type(model)
|
|
578
|
+
if model_type is None:
|
|
579
|
+
yield (None, ReasoningResult(success=False, error="Not a native reasoning model"))
|
|
580
|
+
return
|
|
581
|
+
|
|
582
|
+
reasoning_agent = self._get_reasoning_agent(model)
|
|
583
|
+
|
|
584
|
+
# Currently only DeepSeek and Anthropic support streaming
|
|
585
|
+
if model_type == "deepseek":
|
|
586
|
+
from agno.reasoning.deepseek import aget_deepseek_reasoning_stream
|
|
587
|
+
|
|
588
|
+
log_debug("Starting DeepSeek Reasoning (streaming)", center=True, symbol="=")
|
|
589
|
+
final_message: Optional[Message] = None
|
|
590
|
+
async for reasoning_delta, message in aget_deepseek_reasoning_stream(reasoning_agent, messages):
|
|
591
|
+
if reasoning_delta is not None:
|
|
592
|
+
yield (reasoning_delta, None)
|
|
593
|
+
if message is not None:
|
|
594
|
+
final_message = message
|
|
595
|
+
|
|
596
|
+
if final_message:
|
|
597
|
+
yield (
|
|
598
|
+
None,
|
|
599
|
+
ReasoningResult(
|
|
600
|
+
message=final_message,
|
|
601
|
+
steps=[ReasoningStep(result=final_message.content)],
|
|
602
|
+
reasoning_messages=[final_message],
|
|
603
|
+
success=True,
|
|
604
|
+
),
|
|
605
|
+
)
|
|
606
|
+
else:
|
|
607
|
+
yield (None, ReasoningResult(success=False, error="No reasoning content"))
|
|
608
|
+
|
|
609
|
+
elif model_type == "anthropic":
|
|
610
|
+
from agno.reasoning.anthropic import aget_anthropic_reasoning_stream
|
|
611
|
+
|
|
612
|
+
log_debug("Starting Anthropic Claude Reasoning (streaming)", center=True, symbol="=")
|
|
613
|
+
final_message = None
|
|
614
|
+
async for reasoning_delta, message in aget_anthropic_reasoning_stream(reasoning_agent, messages):
|
|
615
|
+
if reasoning_delta is not None:
|
|
616
|
+
yield (reasoning_delta, None)
|
|
617
|
+
if message is not None:
|
|
618
|
+
final_message = message
|
|
619
|
+
|
|
620
|
+
if final_message:
|
|
621
|
+
yield (
|
|
622
|
+
None,
|
|
623
|
+
ReasoningResult(
|
|
624
|
+
message=final_message,
|
|
625
|
+
steps=[ReasoningStep(result=final_message.content)],
|
|
626
|
+
reasoning_messages=[final_message],
|
|
627
|
+
success=True,
|
|
628
|
+
),
|
|
629
|
+
)
|
|
630
|
+
else:
|
|
631
|
+
yield (None, ReasoningResult(success=False, error="No reasoning content"))
|
|
632
|
+
|
|
633
|
+
elif model_type == "gemini":
|
|
634
|
+
from agno.reasoning.gemini import aget_gemini_reasoning_stream
|
|
635
|
+
|
|
636
|
+
log_debug("Starting Gemini Reasoning (streaming)", center=True, symbol="=")
|
|
637
|
+
final_message = None
|
|
638
|
+
async for reasoning_delta, message in aget_gemini_reasoning_stream(reasoning_agent, messages):
|
|
639
|
+
if reasoning_delta is not None:
|
|
640
|
+
yield (reasoning_delta, None)
|
|
641
|
+
if message is not None:
|
|
642
|
+
final_message = message
|
|
643
|
+
|
|
644
|
+
if final_message:
|
|
645
|
+
yield (
|
|
646
|
+
None,
|
|
647
|
+
ReasoningResult(
|
|
648
|
+
message=final_message,
|
|
649
|
+
steps=[ReasoningStep(result=final_message.content)],
|
|
650
|
+
reasoning_messages=[final_message],
|
|
651
|
+
success=True,
|
|
652
|
+
),
|
|
653
|
+
)
|
|
654
|
+
else:
|
|
655
|
+
yield (None, ReasoningResult(success=False, error="No reasoning content"))
|
|
656
|
+
|
|
657
|
+
elif model_type == "openai":
|
|
658
|
+
from agno.reasoning.openai import aget_openai_reasoning_stream
|
|
659
|
+
|
|
660
|
+
log_debug("Starting OpenAI Reasoning (streaming)", center=True, symbol="=")
|
|
661
|
+
final_message = None
|
|
662
|
+
async for reasoning_delta, message in aget_openai_reasoning_stream(reasoning_agent, messages):
|
|
663
|
+
if reasoning_delta is not None:
|
|
664
|
+
yield (reasoning_delta, None)
|
|
665
|
+
if message is not None:
|
|
666
|
+
final_message = message
|
|
667
|
+
|
|
668
|
+
if final_message:
|
|
669
|
+
yield (
|
|
670
|
+
None,
|
|
671
|
+
ReasoningResult(
|
|
672
|
+
message=final_message,
|
|
673
|
+
steps=[ReasoningStep(result=final_message.content)],
|
|
674
|
+
reasoning_messages=[final_message],
|
|
675
|
+
success=True,
|
|
676
|
+
),
|
|
677
|
+
)
|
|
678
|
+
else:
|
|
679
|
+
yield (None, ReasoningResult(success=False, error="No reasoning content"))
|
|
680
|
+
|
|
681
|
+
elif model_type == "vertexai":
|
|
682
|
+
from agno.reasoning.vertexai import aget_vertexai_reasoning_stream
|
|
683
|
+
|
|
684
|
+
log_debug("Starting VertexAI Reasoning (streaming)", center=True, symbol="=")
|
|
685
|
+
final_message = None
|
|
686
|
+
async for reasoning_delta, message in aget_vertexai_reasoning_stream(reasoning_agent, messages):
|
|
687
|
+
if reasoning_delta is not None:
|
|
688
|
+
yield (reasoning_delta, None)
|
|
689
|
+
if message is not None:
|
|
690
|
+
final_message = message
|
|
691
|
+
|
|
692
|
+
if final_message:
|
|
693
|
+
yield (
|
|
694
|
+
None,
|
|
695
|
+
ReasoningResult(
|
|
696
|
+
message=final_message,
|
|
697
|
+
steps=[ReasoningStep(result=final_message.content)],
|
|
698
|
+
reasoning_messages=[final_message],
|
|
699
|
+
success=True,
|
|
700
|
+
),
|
|
701
|
+
)
|
|
702
|
+
else:
|
|
703
|
+
yield (None, ReasoningResult(success=False, error="No reasoning content"))
|
|
704
|
+
|
|
705
|
+
elif model_type == "ai_foundry":
|
|
706
|
+
from agno.reasoning.azure_ai_foundry import aget_ai_foundry_reasoning_stream
|
|
707
|
+
|
|
708
|
+
log_debug("Starting Azure AI Foundry Reasoning (streaming)", center=True, symbol="=")
|
|
709
|
+
final_message = None
|
|
710
|
+
async for reasoning_delta, message in aget_ai_foundry_reasoning_stream(reasoning_agent, messages):
|
|
711
|
+
if reasoning_delta is not None:
|
|
712
|
+
yield (reasoning_delta, None)
|
|
713
|
+
if message is not None:
|
|
714
|
+
final_message = message
|
|
715
|
+
|
|
716
|
+
if final_message:
|
|
717
|
+
yield (
|
|
718
|
+
None,
|
|
719
|
+
ReasoningResult(
|
|
720
|
+
message=final_message,
|
|
721
|
+
steps=[ReasoningStep(result=final_message.content)],
|
|
722
|
+
reasoning_messages=[final_message],
|
|
723
|
+
success=True,
|
|
724
|
+
),
|
|
725
|
+
)
|
|
726
|
+
else:
|
|
727
|
+
yield (None, ReasoningResult(success=False, error="No reasoning content"))
|
|
728
|
+
|
|
729
|
+
elif model_type == "groq":
|
|
730
|
+
from agno.reasoning.groq import aget_groq_reasoning_stream
|
|
731
|
+
|
|
732
|
+
log_debug("Starting Groq Reasoning (streaming)", center=True, symbol="=")
|
|
733
|
+
final_message = None
|
|
734
|
+
async for reasoning_delta, message in aget_groq_reasoning_stream(reasoning_agent, messages):
|
|
735
|
+
if reasoning_delta is not None:
|
|
736
|
+
yield (reasoning_delta, None)
|
|
737
|
+
if message is not None:
|
|
738
|
+
final_message = message
|
|
739
|
+
|
|
740
|
+
if final_message:
|
|
741
|
+
yield (
|
|
742
|
+
None,
|
|
743
|
+
ReasoningResult(
|
|
744
|
+
message=final_message,
|
|
745
|
+
steps=[ReasoningStep(result=final_message.content)],
|
|
746
|
+
reasoning_messages=[final_message],
|
|
747
|
+
success=True,
|
|
748
|
+
),
|
|
749
|
+
)
|
|
750
|
+
else:
|
|
751
|
+
yield (None, ReasoningResult(success=False, error="No reasoning content"))
|
|
752
|
+
|
|
753
|
+
elif model_type == "ollama":
|
|
754
|
+
from agno.reasoning.ollama import aget_ollama_reasoning_stream
|
|
755
|
+
|
|
756
|
+
log_debug("Starting Ollama Reasoning (streaming)", center=True, symbol="=")
|
|
757
|
+
final_message = None
|
|
758
|
+
async for reasoning_delta, message in aget_ollama_reasoning_stream(reasoning_agent, messages):
|
|
759
|
+
if reasoning_delta is not None:
|
|
760
|
+
yield (reasoning_delta, None)
|
|
761
|
+
if message is not None:
|
|
762
|
+
final_message = message
|
|
763
|
+
|
|
764
|
+
if final_message:
|
|
765
|
+
yield (
|
|
766
|
+
None,
|
|
767
|
+
ReasoningResult(
|
|
768
|
+
message=final_message,
|
|
769
|
+
steps=[ReasoningStep(result=final_message.content)],
|
|
770
|
+
reasoning_messages=[final_message],
|
|
771
|
+
success=True,
|
|
772
|
+
),
|
|
773
|
+
)
|
|
774
|
+
else:
|
|
775
|
+
yield (None, ReasoningResult(success=False, error="No reasoning content"))
|
|
776
|
+
|
|
777
|
+
else:
|
|
778
|
+
# Fall back to non-streaming for other models
|
|
779
|
+
result = await self.aget_native_reasoning(model, messages)
|
|
780
|
+
yield (None, result)
|
|
781
|
+
|
|
782
|
+
# =========================================================================
|
|
783
|
+
# Default Chain-of-Thought Reasoning
|
|
784
|
+
# =========================================================================
|
|
785
|
+
|
|
786
|
+
def run_default_reasoning(
|
|
787
|
+
self, model: Model, run_messages: RunMessages
|
|
788
|
+
) -> Iterator[Tuple[Optional[ReasoningStep], Optional[ReasoningResult]]]:
|
|
789
|
+
"""
|
|
790
|
+
Run default Chain-of-Thought reasoning.
|
|
791
|
+
|
|
792
|
+
Yields:
|
|
793
|
+
Tuple of (reasoning_step, final_result)
|
|
794
|
+
- During reasoning: (ReasoningStep, None)
|
|
795
|
+
- At the end: (None, ReasoningResult)
|
|
796
|
+
"""
|
|
797
|
+
from agno.reasoning.helpers import get_next_action, update_messages_with_reasoning
|
|
798
|
+
|
|
799
|
+
reasoning_agent = self._get_default_reasoning_agent(model)
|
|
800
|
+
if reasoning_agent is None:
|
|
801
|
+
yield (None, ReasoningResult(success=False, error="Reasoning agent is None"))
|
|
802
|
+
return
|
|
803
|
+
|
|
804
|
+
# Validate reasoning agent output schema
|
|
805
|
+
if (
|
|
806
|
+
reasoning_agent.output_schema is not None
|
|
807
|
+
and isinstance(reasoning_agent.output_schema, type)
|
|
808
|
+
and not issubclass(reasoning_agent.output_schema, ReasoningSteps)
|
|
809
|
+
):
|
|
810
|
+
yield (
|
|
811
|
+
None,
|
|
812
|
+
ReasoningResult(
|
|
813
|
+
success=False,
|
|
814
|
+
error="Reasoning agent response model should be ReasoningSteps",
|
|
815
|
+
),
|
|
816
|
+
)
|
|
817
|
+
return
|
|
818
|
+
|
|
819
|
+
step_count = 1
|
|
820
|
+
next_action = NextAction.CONTINUE
|
|
821
|
+
reasoning_messages: List[Message] = []
|
|
822
|
+
all_reasoning_steps: List[ReasoningStep] = []
|
|
823
|
+
|
|
824
|
+
log_debug("Starting Reasoning", center=True, symbol="=")
|
|
825
|
+
|
|
826
|
+
while next_action == NextAction.CONTINUE and step_count < self.config.max_steps:
|
|
827
|
+
log_debug(f"Step {step_count}", center=True, symbol="=")
|
|
828
|
+
try:
|
|
829
|
+
reasoning_agent_response: RunOutput = reasoning_agent.run(input=run_messages.get_input_messages())
|
|
830
|
+
|
|
831
|
+
if reasoning_agent_response.content is None or reasoning_agent_response.messages is None:
|
|
832
|
+
log_warning("Reasoning error. Reasoning response is empty")
|
|
833
|
+
break
|
|
834
|
+
|
|
835
|
+
if isinstance(reasoning_agent_response.content, str):
|
|
836
|
+
log_warning("Reasoning error. Content is a string, not structured output")
|
|
837
|
+
break
|
|
838
|
+
|
|
839
|
+
if (
|
|
840
|
+
reasoning_agent_response.content.reasoning_steps is None
|
|
841
|
+
or len(reasoning_agent_response.content.reasoning_steps) == 0
|
|
842
|
+
):
|
|
843
|
+
log_warning("Reasoning error. Reasoning steps are empty")
|
|
844
|
+
break
|
|
845
|
+
|
|
846
|
+
reasoning_steps: List[ReasoningStep] = reasoning_agent_response.content.reasoning_steps
|
|
847
|
+
all_reasoning_steps.extend(reasoning_steps)
|
|
848
|
+
|
|
849
|
+
# Yield each reasoning step
|
|
850
|
+
for step in reasoning_steps:
|
|
851
|
+
yield (step, None)
|
|
852
|
+
|
|
853
|
+
# Extract reasoning messages
|
|
854
|
+
first_assistant_index = next(
|
|
855
|
+
(i for i, m in enumerate(reasoning_agent_response.messages) if m.role == "assistant"),
|
|
856
|
+
len(reasoning_agent_response.messages),
|
|
857
|
+
)
|
|
858
|
+
reasoning_messages = reasoning_agent_response.messages[first_assistant_index:]
|
|
859
|
+
|
|
860
|
+
# Get the next action
|
|
861
|
+
next_action = get_next_action(reasoning_steps[-1])
|
|
862
|
+
if next_action == NextAction.FINAL_ANSWER:
|
|
863
|
+
break
|
|
864
|
+
|
|
865
|
+
except Exception as e:
|
|
866
|
+
log_error(f"Reasoning error: {e}")
|
|
867
|
+
break
|
|
868
|
+
|
|
869
|
+
step_count += 1
|
|
870
|
+
|
|
871
|
+
log_debug(f"Total Reasoning steps: {len(all_reasoning_steps)}")
|
|
872
|
+
log_debug("Reasoning finished", center=True, symbol="=")
|
|
873
|
+
|
|
874
|
+
# Update messages with reasoning
|
|
875
|
+
update_messages_with_reasoning(
|
|
876
|
+
run_messages=run_messages,
|
|
877
|
+
reasoning_messages=reasoning_messages,
|
|
878
|
+
)
|
|
879
|
+
|
|
880
|
+
# Yield final result
|
|
881
|
+
yield (
|
|
882
|
+
None,
|
|
883
|
+
ReasoningResult(
|
|
884
|
+
steps=all_reasoning_steps,
|
|
885
|
+
reasoning_messages=reasoning_messages,
|
|
886
|
+
success=True,
|
|
887
|
+
),
|
|
888
|
+
)
|
|
889
|
+
|
|
890
|
+
async def arun_default_reasoning(
|
|
891
|
+
self, model: Model, run_messages: RunMessages
|
|
892
|
+
) -> AsyncIterator[Tuple[Optional[ReasoningStep], Optional[ReasoningResult]]]:
|
|
893
|
+
"""
|
|
894
|
+
Run default Chain-of-Thought reasoning asynchronously.
|
|
895
|
+
|
|
896
|
+
Yields:
|
|
897
|
+
Tuple of (reasoning_step, final_result)
|
|
898
|
+
- During reasoning: (ReasoningStep, None)
|
|
899
|
+
- At the end: (None, ReasoningResult)
|
|
900
|
+
"""
|
|
901
|
+
from agno.reasoning.helpers import get_next_action, update_messages_with_reasoning
|
|
902
|
+
|
|
903
|
+
reasoning_agent = self._get_default_reasoning_agent(model)
|
|
904
|
+
if reasoning_agent is None:
|
|
905
|
+
yield (None, ReasoningResult(success=False, error="Reasoning agent is None"))
|
|
906
|
+
return
|
|
907
|
+
|
|
908
|
+
# Validate reasoning agent output schema
|
|
909
|
+
if (
|
|
910
|
+
reasoning_agent.output_schema is not None
|
|
911
|
+
and isinstance(reasoning_agent.output_schema, type)
|
|
912
|
+
and not issubclass(reasoning_agent.output_schema, ReasoningSteps)
|
|
913
|
+
):
|
|
914
|
+
yield (
|
|
915
|
+
None,
|
|
916
|
+
ReasoningResult(
|
|
917
|
+
success=False,
|
|
918
|
+
error="Reasoning agent response model should be ReasoningSteps",
|
|
919
|
+
),
|
|
920
|
+
)
|
|
921
|
+
return
|
|
922
|
+
|
|
923
|
+
step_count = 1
|
|
924
|
+
next_action = NextAction.CONTINUE
|
|
925
|
+
reasoning_messages: List[Message] = []
|
|
926
|
+
all_reasoning_steps: List[ReasoningStep] = []
|
|
927
|
+
|
|
928
|
+
log_debug("Starting Reasoning", center=True, symbol="=")
|
|
929
|
+
|
|
930
|
+
while next_action == NextAction.CONTINUE and step_count < self.config.max_steps:
|
|
931
|
+
log_debug(f"Step {step_count}", center=True, symbol="=")
|
|
932
|
+
step_count += 1
|
|
933
|
+
try:
|
|
934
|
+
reasoning_agent_response: RunOutput = await reasoning_agent.arun(
|
|
935
|
+
input=run_messages.get_input_messages()
|
|
936
|
+
)
|
|
937
|
+
|
|
938
|
+
if reasoning_agent_response.content is None or reasoning_agent_response.messages is None:
|
|
939
|
+
log_warning("Reasoning error. Reasoning response is empty")
|
|
940
|
+
break
|
|
941
|
+
|
|
942
|
+
if isinstance(reasoning_agent_response.content, str):
|
|
943
|
+
log_warning("Reasoning error. Content is a string, not structured output")
|
|
944
|
+
break
|
|
945
|
+
|
|
946
|
+
if reasoning_agent_response.content.reasoning_steps is None:
|
|
947
|
+
log_warning("Reasoning error. Reasoning steps are empty")
|
|
948
|
+
break
|
|
949
|
+
|
|
950
|
+
reasoning_steps: List[ReasoningStep] = reasoning_agent_response.content.reasoning_steps
|
|
951
|
+
all_reasoning_steps.extend(reasoning_steps)
|
|
952
|
+
|
|
953
|
+
# Yield each reasoning step
|
|
954
|
+
for step in reasoning_steps:
|
|
955
|
+
yield (step, None)
|
|
956
|
+
|
|
957
|
+
# Extract reasoning messages
|
|
958
|
+
first_assistant_index = next(
|
|
959
|
+
(i for i, m in enumerate(reasoning_agent_response.messages) if m.role == "assistant"),
|
|
960
|
+
len(reasoning_agent_response.messages),
|
|
961
|
+
)
|
|
962
|
+
reasoning_messages = reasoning_agent_response.messages[first_assistant_index:]
|
|
963
|
+
|
|
964
|
+
# Get the next action
|
|
965
|
+
next_action = get_next_action(reasoning_steps[-1])
|
|
966
|
+
if next_action == NextAction.FINAL_ANSWER:
|
|
967
|
+
break
|
|
968
|
+
|
|
969
|
+
except Exception as e:
|
|
970
|
+
log_error(f"Reasoning error: {e}")
|
|
971
|
+
break
|
|
972
|
+
|
|
973
|
+
log_debug(f"Total Reasoning steps: {len(all_reasoning_steps)}")
|
|
974
|
+
log_debug("Reasoning finished", center=True, symbol="=")
|
|
975
|
+
|
|
976
|
+
# Update messages with reasoning
|
|
977
|
+
update_messages_with_reasoning(
|
|
978
|
+
run_messages=run_messages,
|
|
979
|
+
reasoning_messages=reasoning_messages,
|
|
980
|
+
)
|
|
981
|
+
|
|
982
|
+
# Yield final result
|
|
983
|
+
yield (
|
|
984
|
+
None,
|
|
985
|
+
ReasoningResult(
|
|
986
|
+
steps=all_reasoning_steps,
|
|
987
|
+
reasoning_messages=reasoning_messages,
|
|
988
|
+
success=True,
|
|
989
|
+
),
|
|
990
|
+
)
|
|
991
|
+
|
|
992
|
+
def reason(
|
|
993
|
+
self,
|
|
994
|
+
run_messages: RunMessages,
|
|
995
|
+
stream: bool = False,
|
|
996
|
+
) -> Iterator[ReasoningEvent]:
|
|
997
|
+
"""
|
|
998
|
+
Run reasoning and yield ReasoningEvent objects.
|
|
999
|
+
|
|
1000
|
+
Args:
|
|
1001
|
+
run_messages: The messages to reason about
|
|
1002
|
+
stream: Whether to stream reasoning content
|
|
1003
|
+
|
|
1004
|
+
Yields:
|
|
1005
|
+
ReasoningEvent objects for each stage of reasoning
|
|
1006
|
+
"""
|
|
1007
|
+
# Get the reasoning model
|
|
1008
|
+
reasoning_model: Optional[Model] = self.config.reasoning_model
|
|
1009
|
+
reasoning_model_provided = reasoning_model is not None
|
|
1010
|
+
|
|
1011
|
+
if reasoning_model is None:
|
|
1012
|
+
yield ReasoningEvent(
|
|
1013
|
+
event_type=ReasoningEventType.error,
|
|
1014
|
+
error="Reasoning model is None",
|
|
1015
|
+
)
|
|
1016
|
+
return
|
|
1017
|
+
|
|
1018
|
+
# Yield started event
|
|
1019
|
+
yield ReasoningEvent(event_type=ReasoningEventType.started)
|
|
1020
|
+
|
|
1021
|
+
# Check if this is a native reasoning model
|
|
1022
|
+
if reasoning_model_provided and self.is_native_reasoning_model(reasoning_model):
|
|
1023
|
+
# Use streaming for native models when stream is enabled
|
|
1024
|
+
if stream:
|
|
1025
|
+
yield from self._stream_native_reasoning_events(reasoning_model, run_messages)
|
|
1026
|
+
else:
|
|
1027
|
+
yield from self._get_native_reasoning_events(reasoning_model, run_messages)
|
|
1028
|
+
else:
|
|
1029
|
+
# Use default Chain-of-Thought reasoning
|
|
1030
|
+
if reasoning_model_provided:
|
|
1031
|
+
log_info(
|
|
1032
|
+
f"Reasoning model: {reasoning_model.__class__.__name__} is not a native reasoning model, "
|
|
1033
|
+
"defaulting to manual Chain-of-Thought reasoning"
|
|
1034
|
+
)
|
|
1035
|
+
yield from self._run_default_reasoning_events(reasoning_model, run_messages)
|
|
1036
|
+
|
|
1037
|
+
async def areason(
|
|
1038
|
+
self,
|
|
1039
|
+
run_messages: RunMessages,
|
|
1040
|
+
stream: bool = False,
|
|
1041
|
+
) -> AsyncIterator[ReasoningEvent]:
|
|
1042
|
+
"""
|
|
1043
|
+
Unified async reasoning interface that yields ReasoningEvent objects.
|
|
1044
|
+
|
|
1045
|
+
This method handles all reasoning logic and yields events that can be
|
|
1046
|
+
converted to Agent or Team specific events by the caller.
|
|
1047
|
+
|
|
1048
|
+
Args:
|
|
1049
|
+
run_messages: The messages to reason about
|
|
1050
|
+
stream: Whether to stream reasoning content deltas
|
|
1051
|
+
|
|
1052
|
+
Yields:
|
|
1053
|
+
ReasoningEvent objects for each stage of reasoning
|
|
1054
|
+
"""
|
|
1055
|
+
# Get the reasoning model
|
|
1056
|
+
reasoning_model: Optional[Model] = self.config.reasoning_model
|
|
1057
|
+
reasoning_model_provided = reasoning_model is not None
|
|
1058
|
+
|
|
1059
|
+
if reasoning_model is None:
|
|
1060
|
+
yield ReasoningEvent(
|
|
1061
|
+
event_type=ReasoningEventType.error,
|
|
1062
|
+
error="Reasoning model is None",
|
|
1063
|
+
)
|
|
1064
|
+
return
|
|
1065
|
+
|
|
1066
|
+
# Yield started event
|
|
1067
|
+
yield ReasoningEvent(event_type=ReasoningEventType.started)
|
|
1068
|
+
|
|
1069
|
+
# Check if this is a native reasoning model
|
|
1070
|
+
if reasoning_model_provided and self.is_native_reasoning_model(reasoning_model):
|
|
1071
|
+
# Use streaming for native models when stream is enabled
|
|
1072
|
+
if stream:
|
|
1073
|
+
async for event in self._astream_native_reasoning_events(reasoning_model, run_messages):
|
|
1074
|
+
yield event
|
|
1075
|
+
else:
|
|
1076
|
+
async for event in self._aget_native_reasoning_events(reasoning_model, run_messages):
|
|
1077
|
+
yield event
|
|
1078
|
+
else:
|
|
1079
|
+
# Use default Chain-of-Thought reasoning
|
|
1080
|
+
if reasoning_model_provided:
|
|
1081
|
+
log_info(
|
|
1082
|
+
f"Reasoning model: {reasoning_model.__class__.__name__} is not a native reasoning model, "
|
|
1083
|
+
"defaulting to manual Chain-of-Thought reasoning"
|
|
1084
|
+
)
|
|
1085
|
+
async for event in self._arun_default_reasoning_events(reasoning_model, run_messages):
|
|
1086
|
+
yield event
|
|
1087
|
+
|
|
1088
|
+
def _stream_native_reasoning_events(self, model: Model, run_messages: RunMessages) -> Iterator[ReasoningEvent]:
|
|
1089
|
+
"""Stream native reasoning and yield ReasoningEvent objects."""
|
|
1090
|
+
messages = run_messages.get_input_messages()
|
|
1091
|
+
|
|
1092
|
+
for reasoning_delta, result in self.stream_native_reasoning(model, messages):
|
|
1093
|
+
if reasoning_delta is not None:
|
|
1094
|
+
yield ReasoningEvent(
|
|
1095
|
+
event_type=ReasoningEventType.content_delta,
|
|
1096
|
+
reasoning_content=reasoning_delta,
|
|
1097
|
+
)
|
|
1098
|
+
if result is not None:
|
|
1099
|
+
if not result.success:
|
|
1100
|
+
yield ReasoningEvent(
|
|
1101
|
+
event_type=ReasoningEventType.error,
|
|
1102
|
+
error=result.error,
|
|
1103
|
+
)
|
|
1104
|
+
return
|
|
1105
|
+
if result.message:
|
|
1106
|
+
run_messages.messages.append(result.message)
|
|
1107
|
+
yield ReasoningEvent(
|
|
1108
|
+
event_type=ReasoningEventType.completed,
|
|
1109
|
+
reasoning_steps=result.steps,
|
|
1110
|
+
message=result.message,
|
|
1111
|
+
reasoning_messages=result.reasoning_messages,
|
|
1112
|
+
)
|
|
1113
|
+
|
|
1114
|
+
def _get_native_reasoning_events(self, model: Model, run_messages: RunMessages) -> Iterator[ReasoningEvent]:
|
|
1115
|
+
"""Get native reasoning (non-streaming) and yield ReasoningEvent objects."""
|
|
1116
|
+
messages = run_messages.get_input_messages()
|
|
1117
|
+
result = self.get_native_reasoning(model, messages)
|
|
1118
|
+
|
|
1119
|
+
if not result.success:
|
|
1120
|
+
yield ReasoningEvent(
|
|
1121
|
+
event_type=ReasoningEventType.error,
|
|
1122
|
+
error=result.error,
|
|
1123
|
+
)
|
|
1124
|
+
return
|
|
1125
|
+
|
|
1126
|
+
if result.message:
|
|
1127
|
+
run_messages.messages.append(result.message)
|
|
1128
|
+
yield ReasoningEvent(
|
|
1129
|
+
event_type=ReasoningEventType.completed,
|
|
1130
|
+
reasoning_steps=result.steps,
|
|
1131
|
+
message=result.message,
|
|
1132
|
+
reasoning_messages=result.reasoning_messages,
|
|
1133
|
+
)
|
|
1134
|
+
|
|
1135
|
+
def _run_default_reasoning_events(self, model: Model, run_messages: RunMessages) -> Iterator[ReasoningEvent]:
|
|
1136
|
+
"""Run default CoT reasoning and yield ReasoningEvent objects."""
|
|
1137
|
+
all_reasoning_steps: List[ReasoningStep] = []
|
|
1138
|
+
|
|
1139
|
+
for reasoning_step, result in self.run_default_reasoning(model, run_messages):
|
|
1140
|
+
if reasoning_step is not None:
|
|
1141
|
+
all_reasoning_steps.append(reasoning_step)
|
|
1142
|
+
yield ReasoningEvent(
|
|
1143
|
+
event_type=ReasoningEventType.step,
|
|
1144
|
+
reasoning_step=reasoning_step,
|
|
1145
|
+
)
|
|
1146
|
+
if result is not None:
|
|
1147
|
+
if not result.success:
|
|
1148
|
+
yield ReasoningEvent(
|
|
1149
|
+
event_type=ReasoningEventType.error,
|
|
1150
|
+
error=result.error,
|
|
1151
|
+
)
|
|
1152
|
+
return
|
|
1153
|
+
|
|
1154
|
+
# Yield completed event with all steps
|
|
1155
|
+
if all_reasoning_steps:
|
|
1156
|
+
yield ReasoningEvent(
|
|
1157
|
+
event_type=ReasoningEventType.completed,
|
|
1158
|
+
reasoning_steps=all_reasoning_steps,
|
|
1159
|
+
)
|
|
1160
|
+
|
|
1161
|
+
async def _astream_native_reasoning_events(
|
|
1162
|
+
self, model: Model, run_messages: RunMessages
|
|
1163
|
+
) -> AsyncIterator[ReasoningEvent]:
|
|
1164
|
+
"""Stream native reasoning asynchronously and yield ReasoningEvent objects."""
|
|
1165
|
+
messages = run_messages.get_input_messages()
|
|
1166
|
+
|
|
1167
|
+
async for reasoning_delta, result in self.astream_native_reasoning(model, messages):
|
|
1168
|
+
if reasoning_delta is not None:
|
|
1169
|
+
yield ReasoningEvent(
|
|
1170
|
+
event_type=ReasoningEventType.content_delta,
|
|
1171
|
+
reasoning_content=reasoning_delta,
|
|
1172
|
+
)
|
|
1173
|
+
if result is not None:
|
|
1174
|
+
if not result.success:
|
|
1175
|
+
yield ReasoningEvent(
|
|
1176
|
+
event_type=ReasoningEventType.error,
|
|
1177
|
+
error=result.error,
|
|
1178
|
+
)
|
|
1179
|
+
return
|
|
1180
|
+
if result.message:
|
|
1181
|
+
run_messages.messages.append(result.message)
|
|
1182
|
+
yield ReasoningEvent(
|
|
1183
|
+
event_type=ReasoningEventType.completed,
|
|
1184
|
+
reasoning_steps=result.steps,
|
|
1185
|
+
message=result.message,
|
|
1186
|
+
reasoning_messages=result.reasoning_messages,
|
|
1187
|
+
)
|
|
1188
|
+
|
|
1189
|
+
async def _aget_native_reasoning_events(
|
|
1190
|
+
self, model: Model, run_messages: RunMessages
|
|
1191
|
+
) -> AsyncIterator[ReasoningEvent]:
|
|
1192
|
+
"""Get native reasoning asynchronously (non-streaming) and yield ReasoningEvent objects."""
|
|
1193
|
+
messages = run_messages.get_input_messages()
|
|
1194
|
+
result = await self.aget_native_reasoning(model, messages)
|
|
1195
|
+
|
|
1196
|
+
if not result.success:
|
|
1197
|
+
yield ReasoningEvent(
|
|
1198
|
+
event_type=ReasoningEventType.error,
|
|
1199
|
+
error=result.error,
|
|
1200
|
+
)
|
|
1201
|
+
return
|
|
1202
|
+
|
|
1203
|
+
if result.message:
|
|
1204
|
+
run_messages.messages.append(result.message)
|
|
1205
|
+
yield ReasoningEvent(
|
|
1206
|
+
event_type=ReasoningEventType.completed,
|
|
1207
|
+
reasoning_steps=result.steps,
|
|
1208
|
+
message=result.message,
|
|
1209
|
+
reasoning_messages=result.reasoning_messages,
|
|
1210
|
+
)
|
|
1211
|
+
|
|
1212
|
+
async def _arun_default_reasoning_events(
|
|
1213
|
+
self, model: Model, run_messages: RunMessages
|
|
1214
|
+
) -> AsyncIterator[ReasoningEvent]:
|
|
1215
|
+
"""Run default CoT reasoning asynchronously and yield ReasoningEvent objects."""
|
|
1216
|
+
all_reasoning_steps: List[ReasoningStep] = []
|
|
1217
|
+
|
|
1218
|
+
async for reasoning_step, result in self.arun_default_reasoning(model, run_messages):
|
|
1219
|
+
if reasoning_step is not None:
|
|
1220
|
+
all_reasoning_steps.append(reasoning_step)
|
|
1221
|
+
yield ReasoningEvent(
|
|
1222
|
+
event_type=ReasoningEventType.step,
|
|
1223
|
+
reasoning_step=reasoning_step,
|
|
1224
|
+
)
|
|
1225
|
+
if result is not None:
|
|
1226
|
+
if not result.success:
|
|
1227
|
+
yield ReasoningEvent(
|
|
1228
|
+
event_type=ReasoningEventType.error,
|
|
1229
|
+
error=result.error,
|
|
1230
|
+
)
|
|
1231
|
+
return
|
|
1232
|
+
|
|
1233
|
+
# Yield completed event with all steps
|
|
1234
|
+
if all_reasoning_steps:
|
|
1235
|
+
yield ReasoningEvent(
|
|
1236
|
+
event_type=ReasoningEventType.completed,
|
|
1237
|
+
reasoning_steps=all_reasoning_steps,
|
|
1238
|
+
)
|