agno 2.0.1__py3-none-any.whl → 2.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +6015 -2823
- agno/api/api.py +2 -0
- agno/api/os.py +1 -1
- agno/culture/__init__.py +3 -0
- agno/culture/manager.py +956 -0
- agno/db/async_postgres/__init__.py +3 -0
- agno/db/base.py +385 -6
- agno/db/dynamo/dynamo.py +388 -81
- agno/db/dynamo/schemas.py +47 -10
- agno/db/dynamo/utils.py +63 -4
- agno/db/firestore/firestore.py +435 -64
- agno/db/firestore/schemas.py +11 -0
- agno/db/firestore/utils.py +102 -4
- agno/db/gcs_json/gcs_json_db.py +384 -42
- agno/db/gcs_json/utils.py +60 -26
- agno/db/in_memory/in_memory_db.py +351 -66
- agno/db/in_memory/utils.py +60 -2
- agno/db/json/json_db.py +339 -48
- agno/db/json/utils.py +60 -26
- agno/db/migrations/manager.py +199 -0
- agno/db/migrations/v1_to_v2.py +510 -37
- agno/db/migrations/versions/__init__.py +0 -0
- agno/db/migrations/versions/v2_3_0.py +938 -0
- agno/db/mongo/__init__.py +15 -1
- agno/db/mongo/async_mongo.py +2036 -0
- agno/db/mongo/mongo.py +653 -76
- agno/db/mongo/schemas.py +13 -0
- agno/db/mongo/utils.py +80 -8
- agno/db/mysql/mysql.py +687 -25
- agno/db/mysql/schemas.py +61 -37
- agno/db/mysql/utils.py +60 -2
- agno/db/postgres/__init__.py +2 -1
- agno/db/postgres/async_postgres.py +2001 -0
- agno/db/postgres/postgres.py +676 -57
- agno/db/postgres/schemas.py +43 -18
- agno/db/postgres/utils.py +164 -2
- agno/db/redis/redis.py +344 -38
- agno/db/redis/schemas.py +18 -0
- agno/db/redis/utils.py +60 -2
- agno/db/schemas/__init__.py +2 -1
- agno/db/schemas/culture.py +120 -0
- agno/db/schemas/memory.py +13 -0
- agno/db/singlestore/schemas.py +26 -1
- agno/db/singlestore/singlestore.py +687 -53
- agno/db/singlestore/utils.py +60 -2
- agno/db/sqlite/__init__.py +2 -1
- agno/db/sqlite/async_sqlite.py +2371 -0
- agno/db/sqlite/schemas.py +24 -0
- agno/db/sqlite/sqlite.py +774 -85
- agno/db/sqlite/utils.py +168 -5
- agno/db/surrealdb/__init__.py +3 -0
- agno/db/surrealdb/metrics.py +292 -0
- agno/db/surrealdb/models.py +309 -0
- agno/db/surrealdb/queries.py +71 -0
- agno/db/surrealdb/surrealdb.py +1361 -0
- agno/db/surrealdb/utils.py +147 -0
- agno/db/utils.py +50 -22
- agno/eval/accuracy.py +50 -43
- agno/eval/performance.py +6 -3
- agno/eval/reliability.py +6 -3
- agno/eval/utils.py +33 -16
- agno/exceptions.py +68 -1
- agno/filters.py +354 -0
- agno/guardrails/__init__.py +6 -0
- agno/guardrails/base.py +19 -0
- agno/guardrails/openai.py +144 -0
- agno/guardrails/pii.py +94 -0
- agno/guardrails/prompt_injection.py +52 -0
- agno/integrations/discord/client.py +1 -0
- agno/knowledge/chunking/agentic.py +13 -10
- agno/knowledge/chunking/fixed.py +1 -1
- agno/knowledge/chunking/semantic.py +40 -8
- agno/knowledge/chunking/strategy.py +59 -15
- agno/knowledge/embedder/aws_bedrock.py +9 -4
- agno/knowledge/embedder/azure_openai.py +54 -0
- agno/knowledge/embedder/base.py +2 -0
- agno/knowledge/embedder/cohere.py +184 -5
- agno/knowledge/embedder/fastembed.py +1 -1
- agno/knowledge/embedder/google.py +79 -1
- agno/knowledge/embedder/huggingface.py +9 -4
- agno/knowledge/embedder/jina.py +63 -0
- agno/knowledge/embedder/mistral.py +78 -11
- agno/knowledge/embedder/nebius.py +1 -1
- agno/knowledge/embedder/ollama.py +13 -0
- agno/knowledge/embedder/openai.py +37 -65
- agno/knowledge/embedder/sentence_transformer.py +8 -4
- agno/knowledge/embedder/vllm.py +262 -0
- agno/knowledge/embedder/voyageai.py +69 -16
- agno/knowledge/knowledge.py +594 -186
- agno/knowledge/reader/base.py +9 -2
- agno/knowledge/reader/csv_reader.py +8 -10
- agno/knowledge/reader/docx_reader.py +5 -6
- agno/knowledge/reader/field_labeled_csv_reader.py +290 -0
- agno/knowledge/reader/json_reader.py +6 -5
- agno/knowledge/reader/markdown_reader.py +13 -13
- agno/knowledge/reader/pdf_reader.py +43 -68
- agno/knowledge/reader/pptx_reader.py +101 -0
- agno/knowledge/reader/reader_factory.py +51 -6
- agno/knowledge/reader/s3_reader.py +3 -15
- agno/knowledge/reader/tavily_reader.py +194 -0
- agno/knowledge/reader/text_reader.py +13 -13
- agno/knowledge/reader/web_search_reader.py +2 -43
- agno/knowledge/reader/website_reader.py +43 -25
- agno/knowledge/reranker/__init__.py +2 -8
- agno/knowledge/types.py +9 -0
- agno/knowledge/utils.py +20 -0
- agno/media.py +72 -0
- agno/memory/manager.py +336 -82
- agno/models/aimlapi/aimlapi.py +2 -2
- agno/models/anthropic/claude.py +183 -37
- agno/models/aws/bedrock.py +52 -112
- agno/models/aws/claude.py +33 -1
- agno/models/azure/ai_foundry.py +33 -15
- agno/models/azure/openai_chat.py +25 -8
- agno/models/base.py +999 -519
- agno/models/cerebras/cerebras.py +19 -13
- agno/models/cerebras/cerebras_openai.py +8 -5
- agno/models/cohere/chat.py +27 -1
- agno/models/cometapi/__init__.py +5 -0
- agno/models/cometapi/cometapi.py +57 -0
- agno/models/dashscope/dashscope.py +1 -0
- agno/models/deepinfra/deepinfra.py +2 -2
- agno/models/deepseek/deepseek.py +2 -2
- agno/models/fireworks/fireworks.py +2 -2
- agno/models/google/gemini.py +103 -31
- agno/models/groq/groq.py +28 -11
- agno/models/huggingface/huggingface.py +2 -1
- agno/models/internlm/internlm.py +2 -2
- agno/models/langdb/langdb.py +4 -4
- agno/models/litellm/chat.py +18 -1
- agno/models/litellm/litellm_openai.py +2 -2
- agno/models/llama_cpp/__init__.py +5 -0
- agno/models/llama_cpp/llama_cpp.py +22 -0
- agno/models/message.py +139 -0
- agno/models/meta/llama.py +27 -10
- agno/models/meta/llama_openai.py +5 -17
- agno/models/nebius/nebius.py +6 -6
- agno/models/nexus/__init__.py +3 -0
- agno/models/nexus/nexus.py +22 -0
- agno/models/nvidia/nvidia.py +2 -2
- agno/models/ollama/chat.py +59 -5
- agno/models/openai/chat.py +69 -29
- agno/models/openai/responses.py +103 -106
- agno/models/openrouter/openrouter.py +41 -3
- agno/models/perplexity/perplexity.py +4 -5
- agno/models/portkey/portkey.py +3 -3
- agno/models/requesty/__init__.py +5 -0
- agno/models/requesty/requesty.py +52 -0
- agno/models/response.py +77 -1
- agno/models/sambanova/sambanova.py +2 -2
- agno/models/siliconflow/__init__.py +5 -0
- agno/models/siliconflow/siliconflow.py +25 -0
- agno/models/together/together.py +2 -2
- agno/models/utils.py +254 -8
- agno/models/vercel/v0.py +2 -2
- agno/models/vertexai/__init__.py +0 -0
- agno/models/vertexai/claude.py +96 -0
- agno/models/vllm/vllm.py +1 -0
- agno/models/xai/xai.py +3 -2
- agno/os/app.py +543 -178
- agno/os/auth.py +24 -14
- agno/os/config.py +1 -0
- agno/os/interfaces/__init__.py +1 -0
- agno/os/interfaces/a2a/__init__.py +3 -0
- agno/os/interfaces/a2a/a2a.py +42 -0
- agno/os/interfaces/a2a/router.py +250 -0
- agno/os/interfaces/a2a/utils.py +924 -0
- agno/os/interfaces/agui/agui.py +23 -7
- agno/os/interfaces/agui/router.py +27 -3
- agno/os/interfaces/agui/utils.py +242 -142
- agno/os/interfaces/base.py +6 -2
- agno/os/interfaces/slack/router.py +81 -23
- agno/os/interfaces/slack/slack.py +29 -14
- agno/os/interfaces/whatsapp/router.py +11 -4
- agno/os/interfaces/whatsapp/whatsapp.py +14 -7
- agno/os/mcp.py +111 -54
- agno/os/middleware/__init__.py +7 -0
- agno/os/middleware/jwt.py +233 -0
- agno/os/router.py +556 -139
- agno/os/routers/evals/evals.py +71 -34
- agno/os/routers/evals/schemas.py +31 -31
- agno/os/routers/evals/utils.py +6 -5
- agno/os/routers/health.py +31 -0
- agno/os/routers/home.py +52 -0
- agno/os/routers/knowledge/knowledge.py +185 -38
- agno/os/routers/knowledge/schemas.py +82 -22
- agno/os/routers/memory/memory.py +158 -53
- agno/os/routers/memory/schemas.py +20 -16
- agno/os/routers/metrics/metrics.py +20 -8
- agno/os/routers/metrics/schemas.py +16 -16
- agno/os/routers/session/session.py +499 -38
- agno/os/schema.py +308 -198
- agno/os/utils.py +401 -41
- agno/reasoning/anthropic.py +80 -0
- agno/reasoning/azure_ai_foundry.py +2 -2
- agno/reasoning/deepseek.py +2 -2
- agno/reasoning/default.py +3 -1
- agno/reasoning/gemini.py +73 -0
- agno/reasoning/groq.py +2 -2
- agno/reasoning/ollama.py +2 -2
- agno/reasoning/openai.py +7 -2
- agno/reasoning/vertexai.py +76 -0
- agno/run/__init__.py +6 -0
- agno/run/agent.py +248 -94
- agno/run/base.py +44 -5
- agno/run/team.py +238 -97
- agno/run/workflow.py +144 -33
- agno/session/agent.py +105 -89
- agno/session/summary.py +65 -25
- agno/session/team.py +176 -96
- agno/session/workflow.py +406 -40
- agno/team/team.py +3854 -1610
- agno/tools/dalle.py +2 -4
- agno/tools/decorator.py +4 -2
- agno/tools/duckduckgo.py +15 -11
- agno/tools/e2b.py +14 -7
- agno/tools/eleven_labs.py +23 -25
- agno/tools/exa.py +21 -16
- agno/tools/file.py +153 -23
- agno/tools/file_generation.py +350 -0
- agno/tools/firecrawl.py +4 -4
- agno/tools/function.py +250 -30
- agno/tools/gmail.py +238 -14
- agno/tools/google_drive.py +270 -0
- agno/tools/googlecalendar.py +36 -8
- agno/tools/googlesheets.py +20 -5
- agno/tools/jira.py +20 -0
- agno/tools/knowledge.py +3 -3
- agno/tools/mcp/__init__.py +10 -0
- agno/tools/mcp/mcp.py +331 -0
- agno/tools/mcp/multi_mcp.py +347 -0
- agno/tools/mcp/params.py +24 -0
- agno/tools/mcp_toolbox.py +284 -0
- agno/tools/mem0.py +11 -17
- agno/tools/memori.py +1 -53
- agno/tools/memory.py +419 -0
- agno/tools/models/nebius.py +5 -5
- agno/tools/models_labs.py +20 -10
- agno/tools/notion.py +204 -0
- agno/tools/parallel.py +314 -0
- agno/tools/scrapegraph.py +58 -31
- agno/tools/searxng.py +2 -2
- agno/tools/serper.py +2 -2
- agno/tools/slack.py +18 -3
- agno/tools/spider.py +2 -2
- agno/tools/tavily.py +146 -0
- agno/tools/whatsapp.py +1 -1
- agno/tools/workflow.py +278 -0
- agno/tools/yfinance.py +12 -11
- agno/utils/agent.py +820 -0
- agno/utils/audio.py +27 -0
- agno/utils/common.py +90 -1
- agno/utils/events.py +217 -2
- agno/utils/gemini.py +180 -22
- agno/utils/hooks.py +57 -0
- agno/utils/http.py +111 -0
- agno/utils/knowledge.py +12 -5
- agno/utils/log.py +1 -0
- agno/utils/mcp.py +92 -2
- agno/utils/media.py +188 -10
- agno/utils/merge_dict.py +22 -1
- agno/utils/message.py +60 -0
- agno/utils/models/claude.py +40 -11
- agno/utils/print_response/agent.py +105 -21
- agno/utils/print_response/team.py +103 -38
- agno/utils/print_response/workflow.py +251 -34
- agno/utils/reasoning.py +22 -1
- agno/utils/serialize.py +32 -0
- agno/utils/streamlit.py +16 -10
- agno/utils/string.py +41 -0
- agno/utils/team.py +98 -9
- agno/utils/tools.py +1 -1
- agno/vectordb/base.py +23 -4
- agno/vectordb/cassandra/cassandra.py +65 -9
- agno/vectordb/chroma/chromadb.py +182 -38
- agno/vectordb/clickhouse/clickhousedb.py +64 -11
- agno/vectordb/couchbase/couchbase.py +105 -10
- agno/vectordb/lancedb/lance_db.py +124 -133
- agno/vectordb/langchaindb/langchaindb.py +25 -7
- agno/vectordb/lightrag/lightrag.py +17 -3
- agno/vectordb/llamaindex/__init__.py +3 -0
- agno/vectordb/llamaindex/llamaindexdb.py +46 -7
- agno/vectordb/milvus/milvus.py +126 -9
- agno/vectordb/mongodb/__init__.py +7 -1
- agno/vectordb/mongodb/mongodb.py +112 -7
- agno/vectordb/pgvector/pgvector.py +142 -21
- agno/vectordb/pineconedb/pineconedb.py +80 -8
- agno/vectordb/qdrant/qdrant.py +125 -39
- agno/vectordb/redis/__init__.py +9 -0
- agno/vectordb/redis/redisdb.py +694 -0
- agno/vectordb/singlestore/singlestore.py +111 -25
- agno/vectordb/surrealdb/surrealdb.py +31 -5
- agno/vectordb/upstashdb/upstashdb.py +76 -8
- agno/vectordb/weaviate/weaviate.py +86 -15
- agno/workflow/__init__.py +2 -0
- agno/workflow/agent.py +299 -0
- agno/workflow/condition.py +112 -18
- agno/workflow/loop.py +69 -10
- agno/workflow/parallel.py +266 -118
- agno/workflow/router.py +110 -17
- agno/workflow/step.py +638 -129
- agno/workflow/steps.py +65 -6
- agno/workflow/types.py +61 -23
- agno/workflow/workflow.py +2085 -272
- {agno-2.0.1.dist-info → agno-2.3.0.dist-info}/METADATA +182 -58
- agno-2.3.0.dist-info/RECORD +577 -0
- agno/knowledge/reader/url_reader.py +0 -128
- agno/tools/googlesearch.py +0 -98
- agno/tools/mcp.py +0 -610
- agno/utils/models/aws_claude.py +0 -170
- agno-2.0.1.dist-info/RECORD +0 -515
- {agno-2.0.1.dist-info → agno-2.3.0.dist-info}/WHEEL +0 -0
- {agno-2.0.1.dist-info → agno-2.3.0.dist-info}/licenses/LICENSE +0 -0
- {agno-2.0.1.dist-info → agno-2.3.0.dist-info}/top_level.txt +0 -0
|
@@ -7,7 +7,7 @@ from rich.markdown import Markdown
|
|
|
7
7
|
from rich.status import Status
|
|
8
8
|
from rich.text import Text
|
|
9
9
|
|
|
10
|
-
from agno.media import Audio, Image, Video
|
|
10
|
+
from agno.media import Audio, File, Image, Video
|
|
11
11
|
from agno.models.message import Message
|
|
12
12
|
from agno.run.workflow import (
|
|
13
13
|
ConditionExecutionCompletedEvent,
|
|
@@ -25,6 +25,8 @@ from agno.run.workflow import (
|
|
|
25
25
|
StepsExecutionCompletedEvent,
|
|
26
26
|
StepsExecutionStartedEvent,
|
|
27
27
|
StepStartedEvent,
|
|
28
|
+
WorkflowAgentCompletedEvent,
|
|
29
|
+
WorkflowAgentStartedEvent,
|
|
28
30
|
WorkflowCompletedEvent,
|
|
29
31
|
WorkflowErrorEvent,
|
|
30
32
|
WorkflowRunOutput,
|
|
@@ -48,6 +50,7 @@ def print_response(
|
|
|
48
50
|
audio: Optional[List[Audio]] = None,
|
|
49
51
|
images: Optional[List[Image]] = None,
|
|
50
52
|
videos: Optional[List[Video]] = None,
|
|
53
|
+
files: Optional[List[File]] = None,
|
|
51
54
|
markdown: bool = True,
|
|
52
55
|
show_time: bool = True,
|
|
53
56
|
show_step_details: bool = True,
|
|
@@ -76,6 +79,8 @@ def print_response(
|
|
|
76
79
|
media_info.append(f"Images: {len(images)}")
|
|
77
80
|
if videos:
|
|
78
81
|
media_info.append(f"Videos: {len(videos)}")
|
|
82
|
+
if files:
|
|
83
|
+
media_info.append(f"Files: {len(files)}")
|
|
79
84
|
|
|
80
85
|
workflow_info = f"""**Workflow:** {workflow.name}"""
|
|
81
86
|
if workflow.description:
|
|
@@ -126,12 +131,22 @@ def print_response(
|
|
|
126
131
|
audio=audio,
|
|
127
132
|
images=images,
|
|
128
133
|
videos=videos,
|
|
134
|
+
files=files,
|
|
129
135
|
**kwargs,
|
|
130
136
|
) # type: ignore
|
|
131
137
|
|
|
132
138
|
response_timer.stop()
|
|
133
139
|
|
|
134
|
-
if
|
|
140
|
+
# Check if this is a workflow agent direct response
|
|
141
|
+
if workflow_response.workflow_agent_run is not None and not workflow_response.workflow_agent_run.tools:
|
|
142
|
+
# Agent answered directly from history without executing workflow
|
|
143
|
+
agent_response_panel = create_panel(
|
|
144
|
+
content=Markdown(str(workflow_response.content)) if markdown else str(workflow_response.content),
|
|
145
|
+
title="Workflow Agent Response",
|
|
146
|
+
border_style="green",
|
|
147
|
+
)
|
|
148
|
+
console.print(agent_response_panel) # type: ignore
|
|
149
|
+
elif show_step_details and workflow_response.step_results:
|
|
135
150
|
for i, step_output in enumerate(workflow_response.step_results):
|
|
136
151
|
print_step_output_recursive(step_output, i + 1, markdown, console) # type: ignore
|
|
137
152
|
|
|
@@ -186,6 +201,8 @@ def print_response_stream(
|
|
|
186
201
|
audio: Optional[List[Audio]] = None,
|
|
187
202
|
images: Optional[List[Image]] = None,
|
|
188
203
|
videos: Optional[List[Video]] = None,
|
|
204
|
+
files: Optional[List[File]] = None,
|
|
205
|
+
stream_events: bool = False,
|
|
189
206
|
stream_intermediate_steps: bool = False,
|
|
190
207
|
markdown: bool = True,
|
|
191
208
|
show_time: bool = True,
|
|
@@ -199,7 +216,7 @@ def print_response_stream(
|
|
|
199
216
|
|
|
200
217
|
console = Console()
|
|
201
218
|
|
|
202
|
-
|
|
219
|
+
stream_events = True # With streaming print response, we need to stream intermediate steps
|
|
203
220
|
|
|
204
221
|
# Show workflow info (same as before)
|
|
205
222
|
media_info = []
|
|
@@ -209,6 +226,8 @@ def print_response_stream(
|
|
|
209
226
|
media_info.append(f"Images: {len(images)}")
|
|
210
227
|
if videos:
|
|
211
228
|
media_info.append(f"Videos: {len(videos)}")
|
|
229
|
+
if files:
|
|
230
|
+
media_info.append(f"Files: {len(files)}")
|
|
212
231
|
|
|
213
232
|
workflow_info = f"""**Workflow:** {workflow.name}"""
|
|
214
233
|
if workflow.description:
|
|
@@ -252,11 +271,18 @@ def print_response_stream(
|
|
|
252
271
|
step_results = []
|
|
253
272
|
step_started_printed = False
|
|
254
273
|
is_callable_function = callable(workflow.steps)
|
|
274
|
+
workflow_started = False # Track if workflow has actually started
|
|
275
|
+
is_workflow_agent_response = False # Track if this is a workflow agent direct response
|
|
255
276
|
|
|
256
277
|
# Smart step hierarchy tracking
|
|
257
278
|
current_primitive_context = None # Current primitive being executed (parallel, loop, etc.)
|
|
258
279
|
step_display_cache = {} # type: ignore
|
|
259
280
|
|
|
281
|
+
# Parallel-aware tracking for simultaneous steps
|
|
282
|
+
parallel_step_states: Dict[
|
|
283
|
+
Any, Dict[str, Any]
|
|
284
|
+
] = {} # track state of each parallel step: {step_index: {"name": str, "content": str, "started": bool, "completed": bool}}
|
|
285
|
+
|
|
260
286
|
def get_step_display_number(step_index: Union[int, tuple], step_name: str = "") -> str:
|
|
261
287
|
"""Generate clean two-level step numbering: x.y format only"""
|
|
262
288
|
|
|
@@ -308,21 +334,38 @@ def print_response_stream(
|
|
|
308
334
|
audio=audio,
|
|
309
335
|
images=images,
|
|
310
336
|
videos=videos,
|
|
337
|
+
files=files,
|
|
311
338
|
stream=True,
|
|
312
|
-
|
|
339
|
+
stream_events=stream_events,
|
|
313
340
|
**kwargs,
|
|
314
341
|
): # type: ignore
|
|
315
342
|
# Handle the new event types
|
|
316
343
|
if isinstance(response, WorkflowStartedEvent):
|
|
344
|
+
workflow_started = True
|
|
317
345
|
status.update("Workflow started...")
|
|
318
346
|
if is_callable_function:
|
|
319
347
|
current_step_name = "Custom Function"
|
|
320
348
|
current_step_index = 0
|
|
321
349
|
live_log.update(status)
|
|
322
350
|
|
|
351
|
+
elif isinstance(response, WorkflowAgentStartedEvent):
|
|
352
|
+
# Workflow agent is starting to process
|
|
353
|
+
status.update("Workflow agent processing...")
|
|
354
|
+
live_log.update(status)
|
|
355
|
+
continue
|
|
356
|
+
|
|
357
|
+
elif isinstance(response, WorkflowAgentCompletedEvent):
|
|
358
|
+
# Workflow agent has completed
|
|
359
|
+
status.update("Workflow agent completed")
|
|
360
|
+
live_log.update(status)
|
|
361
|
+
continue
|
|
362
|
+
|
|
323
363
|
elif isinstance(response, StepStartedEvent):
|
|
324
|
-
|
|
325
|
-
|
|
364
|
+
step_name = response.step_name or "Unknown"
|
|
365
|
+
step_index = response.step_index or 0 # type: ignore
|
|
366
|
+
|
|
367
|
+
current_step_name = step_name
|
|
368
|
+
current_step_index = step_index # type: ignore
|
|
326
369
|
current_step_content = ""
|
|
327
370
|
step_started_printed = False
|
|
328
371
|
|
|
@@ -335,6 +378,14 @@ def print_response_stream(
|
|
|
335
378
|
step_name = response.step_name or "Unknown"
|
|
336
379
|
step_index = response.step_index or 0
|
|
337
380
|
|
|
381
|
+
# Skip parallel sub-step completed events - they're handled in ParallelExecutionCompletedEvent (avoid duplication)
|
|
382
|
+
if (
|
|
383
|
+
current_primitive_context
|
|
384
|
+
and current_primitive_context["type"] == "parallel"
|
|
385
|
+
and isinstance(step_index, tuple)
|
|
386
|
+
):
|
|
387
|
+
continue
|
|
388
|
+
|
|
338
389
|
# Generate smart step number for completion (will use cached value)
|
|
339
390
|
step_display = get_step_display_number(step_index, step_name)
|
|
340
391
|
status.update(f"Completed {step_display}: {step_name}")
|
|
@@ -376,7 +427,8 @@ def print_response_stream(
|
|
|
376
427
|
"max_iterations": response.max_iterations,
|
|
377
428
|
}
|
|
378
429
|
|
|
379
|
-
#
|
|
430
|
+
# Initialize parallel step tracking - clear previous states
|
|
431
|
+
parallel_step_states.clear()
|
|
380
432
|
step_display_cache.clear()
|
|
381
433
|
|
|
382
434
|
status.update(f"Starting loop: {current_step_name} (max {response.max_iterations} iterations)...")
|
|
@@ -442,7 +494,8 @@ def print_response_stream(
|
|
|
442
494
|
"total_steps": response.parallel_step_count,
|
|
443
495
|
}
|
|
444
496
|
|
|
445
|
-
#
|
|
497
|
+
# Initialize parallel step tracking - clear previous states
|
|
498
|
+
parallel_step_states.clear()
|
|
446
499
|
step_display_cache.clear()
|
|
447
500
|
|
|
448
501
|
# Print parallel execution summary panel
|
|
@@ -468,8 +521,30 @@ def print_response_stream(
|
|
|
468
521
|
|
|
469
522
|
status.update(f"Completed parallel execution: {step_name}")
|
|
470
523
|
|
|
524
|
+
# Display individual parallel step results immediately
|
|
525
|
+
if show_step_details and response.step_results:
|
|
526
|
+
live_log.update(status, refresh=True)
|
|
527
|
+
|
|
528
|
+
# Get the parallel container's display number for consistent numbering
|
|
529
|
+
parallel_step_display = get_step_display_number(step_index, step_name)
|
|
530
|
+
|
|
531
|
+
# Show each parallel step with the same number (1.1, 1.1)
|
|
532
|
+
for step_result in response.step_results:
|
|
533
|
+
if step_result.content:
|
|
534
|
+
step_result_name = step_result.step_name or "Parallel Step"
|
|
535
|
+
formatted_content = format_step_content_for_display(step_result.content) # type: ignore
|
|
536
|
+
|
|
537
|
+
# All parallel sub-steps get the same number
|
|
538
|
+
parallel_step_panel = create_panel(
|
|
539
|
+
content=Markdown(formatted_content) if markdown else formatted_content,
|
|
540
|
+
title=f"{parallel_step_display}: {step_result_name} (Completed)",
|
|
541
|
+
border_style="orange3",
|
|
542
|
+
)
|
|
543
|
+
console.print(parallel_step_panel) # type: ignore
|
|
544
|
+
|
|
471
545
|
# Reset context
|
|
472
546
|
current_primitive_context = None
|
|
547
|
+
parallel_step_states.clear()
|
|
473
548
|
step_display_cache.clear()
|
|
474
549
|
|
|
475
550
|
elif isinstance(response, ConditionExecutionStartedEvent):
|
|
@@ -486,7 +561,8 @@ def print_response_stream(
|
|
|
486
561
|
"condition_result": response.condition_result,
|
|
487
562
|
}
|
|
488
563
|
|
|
489
|
-
#
|
|
564
|
+
# Initialize parallel step tracking - clear previous states
|
|
565
|
+
parallel_step_states.clear()
|
|
490
566
|
step_display_cache.clear()
|
|
491
567
|
|
|
492
568
|
condition_text = "met" if response.condition_result else "not met"
|
|
@@ -517,7 +593,8 @@ def print_response_stream(
|
|
|
517
593
|
"selected_steps": response.selected_steps,
|
|
518
594
|
}
|
|
519
595
|
|
|
520
|
-
#
|
|
596
|
+
# Initialize parallel step tracking - clear previous states
|
|
597
|
+
parallel_step_states.clear()
|
|
521
598
|
step_display_cache.clear()
|
|
522
599
|
|
|
523
600
|
selected_steps_text = ", ".join(response.selected_steps) if response.selected_steps else "none"
|
|
@@ -595,8 +672,23 @@ def print_response_stream(
|
|
|
595
672
|
elif isinstance(response, WorkflowCompletedEvent):
|
|
596
673
|
status.update("Workflow completed!")
|
|
597
674
|
|
|
675
|
+
# Check if this is an agent direct response
|
|
676
|
+
if response.metadata and response.metadata.get("agent_direct_response"):
|
|
677
|
+
is_workflow_agent_response = True
|
|
678
|
+
# Print the agent's direct response from history
|
|
679
|
+
if show_step_details:
|
|
680
|
+
live_log.update(status, refresh=True)
|
|
681
|
+
agent_response_panel = create_panel(
|
|
682
|
+
content=Markdown(str(response.content)) if markdown else str(response.content),
|
|
683
|
+
title="Workflow Agent Response",
|
|
684
|
+
border_style="green",
|
|
685
|
+
)
|
|
686
|
+
console.print(agent_response_panel) # type: ignore
|
|
687
|
+
step_started_printed = True
|
|
598
688
|
# For callable functions, print the final content block here since there are no step events
|
|
599
|
-
|
|
689
|
+
elif (
|
|
690
|
+
is_callable_function and show_step_details and current_step_content and not step_started_printed
|
|
691
|
+
):
|
|
600
692
|
final_step_panel = create_panel(
|
|
601
693
|
content=Markdown(current_step_content) if markdown else current_step_content,
|
|
602
694
|
title="Custom Function (Completed)",
|
|
@@ -607,8 +699,8 @@ def print_response_stream(
|
|
|
607
699
|
|
|
608
700
|
live_log.update(status, refresh=True)
|
|
609
701
|
|
|
610
|
-
# Show final summary
|
|
611
|
-
if response.metadata:
|
|
702
|
+
# Show final summary (skip for agent responses)
|
|
703
|
+
if response.metadata and not is_workflow_agent_response:
|
|
612
704
|
status = response.status
|
|
613
705
|
summary_content = ""
|
|
614
706
|
summary_content += f"""\n\n**Status:** {status}"""
|
|
@@ -659,14 +751,30 @@ def print_response_stream(
|
|
|
659
751
|
and response.content_type != ""
|
|
660
752
|
)
|
|
661
753
|
response_str = response.content # type: ignore
|
|
754
|
+
|
|
755
|
+
if isinstance(response, RunContentEvent) and not workflow_started:
|
|
756
|
+
is_workflow_agent_response = True
|
|
757
|
+
continue
|
|
758
|
+
|
|
662
759
|
elif isinstance(response, RunContentEvent) and current_step_executor_type != "team":
|
|
663
760
|
response_str = response.content # type: ignore
|
|
761
|
+
# If we get RunContentEvent BEFORE workflow starts, it's an agent direct response
|
|
762
|
+
if not workflow_started and not is_workflow_agent_response:
|
|
763
|
+
is_workflow_agent_response = True
|
|
664
764
|
else:
|
|
665
765
|
continue
|
|
666
766
|
|
|
667
767
|
# Use the unified formatting function for consistency
|
|
668
768
|
response_str = format_step_content_for_display(response_str) # type: ignore
|
|
669
769
|
|
|
770
|
+
# Skip streaming content from parallel sub-steps - they're handled in ParallelExecutionCompletedEvent
|
|
771
|
+
if (
|
|
772
|
+
current_primitive_context
|
|
773
|
+
and current_primitive_context["type"] == "parallel"
|
|
774
|
+
and isinstance(current_step_index, tuple)
|
|
775
|
+
):
|
|
776
|
+
continue
|
|
777
|
+
|
|
670
778
|
# Filter out empty responses and add to current step content
|
|
671
779
|
if response_str and response_str.strip():
|
|
672
780
|
# If it's a structured output from a team, replace the content instead of appending
|
|
@@ -675,8 +783,8 @@ def print_response_stream(
|
|
|
675
783
|
else:
|
|
676
784
|
current_step_content += response_str
|
|
677
785
|
|
|
678
|
-
# Live update the step panel with streaming content
|
|
679
|
-
if show_step_details and not step_started_printed:
|
|
786
|
+
# Live update the step panel with streaming content (skip for workflow agent responses)
|
|
787
|
+
if show_step_details and not step_started_printed and not is_workflow_agent_response:
|
|
680
788
|
# Generate smart step number for streaming title (will use cached value)
|
|
681
789
|
step_display = get_step_display_number(current_step_index, current_step_name)
|
|
682
790
|
title = f"{step_display}: {current_step_name} (Streaming...)"
|
|
@@ -698,8 +806,8 @@ def print_response_stream(
|
|
|
698
806
|
|
|
699
807
|
live_log.update("")
|
|
700
808
|
|
|
701
|
-
# Final completion message
|
|
702
|
-
if show_time:
|
|
809
|
+
# Final completion message (skip for agent responses)
|
|
810
|
+
if show_time and not is_workflow_agent_response:
|
|
703
811
|
completion_text = Text(f"Completed in {response_timer.elapsed:.1f}s", style="bold green")
|
|
704
812
|
console.print(completion_text) # type: ignore
|
|
705
813
|
|
|
@@ -781,6 +889,7 @@ async def aprint_response(
|
|
|
781
889
|
audio: Optional[List[Audio]] = None,
|
|
782
890
|
images: Optional[List[Image]] = None,
|
|
783
891
|
videos: Optional[List[Video]] = None,
|
|
892
|
+
files: Optional[List[File]] = None,
|
|
784
893
|
markdown: bool = True,
|
|
785
894
|
show_time: bool = True,
|
|
786
895
|
show_step_details: bool = True,
|
|
@@ -809,6 +918,8 @@ async def aprint_response(
|
|
|
809
918
|
media_info.append(f"Images: {len(images)}")
|
|
810
919
|
if videos:
|
|
811
920
|
media_info.append(f"Videos: {len(videos)}")
|
|
921
|
+
if files:
|
|
922
|
+
media_info.append(f"Files: {len(files)}")
|
|
812
923
|
|
|
813
924
|
workflow_info = f"""**Workflow:** {workflow.name}"""
|
|
814
925
|
if workflow.description:
|
|
@@ -859,12 +970,22 @@ async def aprint_response(
|
|
|
859
970
|
audio=audio,
|
|
860
971
|
images=images,
|
|
861
972
|
videos=videos,
|
|
973
|
+
files=files,
|
|
862
974
|
**kwargs,
|
|
863
975
|
) # type: ignore
|
|
864
976
|
|
|
865
977
|
response_timer.stop()
|
|
866
978
|
|
|
867
|
-
if
|
|
979
|
+
# Check if this is a workflow agent direct response
|
|
980
|
+
if workflow_response.workflow_agent_run is not None and not workflow_response.workflow_agent_run.tools:
|
|
981
|
+
# Agent answered directly from history without executing workflow
|
|
982
|
+
agent_response_panel = create_panel(
|
|
983
|
+
content=Markdown(str(workflow_response.content)) if markdown else str(workflow_response.content),
|
|
984
|
+
title="Workflow Agent Response",
|
|
985
|
+
border_style="green",
|
|
986
|
+
)
|
|
987
|
+
console.print(agent_response_panel) # type: ignore
|
|
988
|
+
elif show_step_details and workflow_response.step_results:
|
|
868
989
|
for i, step_output in enumerate(workflow_response.step_results):
|
|
869
990
|
print_step_output_recursive(step_output, i + 1, markdown, console) # type: ignore
|
|
870
991
|
|
|
@@ -919,6 +1040,8 @@ async def aprint_response_stream(
|
|
|
919
1040
|
audio: Optional[List[Audio]] = None,
|
|
920
1041
|
images: Optional[List[Image]] = None,
|
|
921
1042
|
videos: Optional[List[Video]] = None,
|
|
1043
|
+
files: Optional[List[File]] = None,
|
|
1044
|
+
stream_events: bool = False,
|
|
922
1045
|
stream_intermediate_steps: bool = False,
|
|
923
1046
|
markdown: bool = True,
|
|
924
1047
|
show_time: bool = True,
|
|
@@ -932,7 +1055,7 @@ async def aprint_response_stream(
|
|
|
932
1055
|
|
|
933
1056
|
console = Console()
|
|
934
1057
|
|
|
935
|
-
|
|
1058
|
+
stream_events = True # With streaming print response, we need to stream intermediate steps
|
|
936
1059
|
|
|
937
1060
|
# Show workflow info (same as before)
|
|
938
1061
|
media_info = []
|
|
@@ -942,6 +1065,8 @@ async def aprint_response_stream(
|
|
|
942
1065
|
media_info.append(f"Images: {len(images)}")
|
|
943
1066
|
if videos:
|
|
944
1067
|
media_info.append(f"Videos: {len(videos)}")
|
|
1068
|
+
if files:
|
|
1069
|
+
media_info.append(f"Files: {len(files)}")
|
|
945
1070
|
|
|
946
1071
|
workflow_info = f"""**Workflow:** {workflow.name}"""
|
|
947
1072
|
if workflow.description:
|
|
@@ -985,11 +1110,18 @@ async def aprint_response_stream(
|
|
|
985
1110
|
step_results = []
|
|
986
1111
|
step_started_printed = False
|
|
987
1112
|
is_callable_function = callable(workflow.steps)
|
|
1113
|
+
workflow_started = False # Track if workflow has actually started
|
|
1114
|
+
is_workflow_agent_response = False # Track if this is a workflow agent direct response
|
|
988
1115
|
|
|
989
1116
|
# Smart step hierarchy tracking
|
|
990
1117
|
current_primitive_context = None # Current primitive being executed (parallel, loop, etc.)
|
|
991
1118
|
step_display_cache = {} # type: ignore
|
|
992
1119
|
|
|
1120
|
+
# Parallel-aware tracking for simultaneous steps
|
|
1121
|
+
parallel_step_states: Dict[
|
|
1122
|
+
Any, Dict[str, Any]
|
|
1123
|
+
] = {} # track state of each parallel step: {step_index: {"name": str, "content": str, "started": bool, "completed": bool}}
|
|
1124
|
+
|
|
993
1125
|
def get_step_display_number(step_index: Union[int, tuple], step_name: str = "") -> str:
|
|
994
1126
|
"""Generate clean two-level step numbering: x.y format only"""
|
|
995
1127
|
|
|
@@ -1033,7 +1165,7 @@ async def aprint_response_stream(
|
|
|
1033
1165
|
live_log.update(status)
|
|
1034
1166
|
|
|
1035
1167
|
try:
|
|
1036
|
-
async for response in
|
|
1168
|
+
async for response in workflow.arun(
|
|
1037
1169
|
input=input,
|
|
1038
1170
|
additional_data=additional_data,
|
|
1039
1171
|
user_id=user_id,
|
|
@@ -1041,21 +1173,42 @@ async def aprint_response_stream(
|
|
|
1041
1173
|
audio=audio,
|
|
1042
1174
|
images=images,
|
|
1043
1175
|
videos=videos,
|
|
1176
|
+
files=files,
|
|
1044
1177
|
stream=True,
|
|
1045
|
-
|
|
1178
|
+
stream_events=stream_events,
|
|
1046
1179
|
**kwargs,
|
|
1047
1180
|
): # type: ignore
|
|
1048
1181
|
# Handle the new event types
|
|
1049
1182
|
if isinstance(response, WorkflowStartedEvent):
|
|
1183
|
+
workflow_started = True
|
|
1050
1184
|
status.update("Workflow started...")
|
|
1051
1185
|
if is_callable_function:
|
|
1052
1186
|
current_step_name = "Custom Function"
|
|
1053
1187
|
current_step_index = 0
|
|
1054
1188
|
live_log.update(status)
|
|
1055
1189
|
|
|
1190
|
+
elif isinstance(response, WorkflowAgentStartedEvent):
|
|
1191
|
+
# Workflow agent is starting to process
|
|
1192
|
+
status.update("Workflow agent processing...")
|
|
1193
|
+
live_log.update(status)
|
|
1194
|
+
continue
|
|
1195
|
+
|
|
1196
|
+
elif isinstance(response, WorkflowAgentCompletedEvent):
|
|
1197
|
+
# Workflow agent has completed
|
|
1198
|
+
status.update("Workflow agent completed")
|
|
1199
|
+
live_log.update(status)
|
|
1200
|
+
continue
|
|
1201
|
+
|
|
1056
1202
|
elif isinstance(response, StepStartedEvent):
|
|
1057
|
-
|
|
1058
|
-
|
|
1203
|
+
# Skip step events if workflow hasn't started (agent direct response)
|
|
1204
|
+
if not workflow_started:
|
|
1205
|
+
continue
|
|
1206
|
+
|
|
1207
|
+
step_name = response.step_name or "Unknown"
|
|
1208
|
+
step_index = response.step_index or 0 # type: ignore
|
|
1209
|
+
|
|
1210
|
+
current_step_name = step_name
|
|
1211
|
+
current_step_index = step_index # type: ignore
|
|
1059
1212
|
current_step_content = ""
|
|
1060
1213
|
step_started_printed = False
|
|
1061
1214
|
|
|
@@ -1068,6 +1221,14 @@ async def aprint_response_stream(
|
|
|
1068
1221
|
step_name = response.step_name or "Unknown"
|
|
1069
1222
|
step_index = response.step_index or 0
|
|
1070
1223
|
|
|
1224
|
+
# Skip parallel sub-step completed events - they're handled in ParallelExecutionCompletedEvent (avoid duplication)
|
|
1225
|
+
if (
|
|
1226
|
+
current_primitive_context
|
|
1227
|
+
and current_primitive_context["type"] == "parallel"
|
|
1228
|
+
and isinstance(step_index, tuple)
|
|
1229
|
+
):
|
|
1230
|
+
continue
|
|
1231
|
+
|
|
1071
1232
|
# Generate smart step number for completion (will use cached value)
|
|
1072
1233
|
step_display = get_step_display_number(step_index, step_name)
|
|
1073
1234
|
status.update(f"Completed {step_display}: {step_name}")
|
|
@@ -1109,7 +1270,8 @@ async def aprint_response_stream(
|
|
|
1109
1270
|
"max_iterations": response.max_iterations,
|
|
1110
1271
|
}
|
|
1111
1272
|
|
|
1112
|
-
#
|
|
1273
|
+
# Initialize parallel step tracking - clear previous states
|
|
1274
|
+
parallel_step_states.clear()
|
|
1113
1275
|
step_display_cache.clear()
|
|
1114
1276
|
|
|
1115
1277
|
status.update(f"Starting loop: {current_step_name} (max {response.max_iterations} iterations)...")
|
|
@@ -1175,7 +1337,8 @@ async def aprint_response_stream(
|
|
|
1175
1337
|
"total_steps": response.parallel_step_count,
|
|
1176
1338
|
}
|
|
1177
1339
|
|
|
1178
|
-
#
|
|
1340
|
+
# Initialize parallel step tracking - clear previous states
|
|
1341
|
+
parallel_step_states.clear()
|
|
1179
1342
|
step_display_cache.clear()
|
|
1180
1343
|
|
|
1181
1344
|
# Print parallel execution summary panel
|
|
@@ -1201,8 +1364,30 @@ async def aprint_response_stream(
|
|
|
1201
1364
|
|
|
1202
1365
|
status.update(f"Completed parallel execution: {step_name}")
|
|
1203
1366
|
|
|
1367
|
+
# Display individual parallel step results immediately
|
|
1368
|
+
if show_step_details and response.step_results:
|
|
1369
|
+
live_log.update(status, refresh=True)
|
|
1370
|
+
|
|
1371
|
+
# Get the parallel container's display number for consistent numbering
|
|
1372
|
+
parallel_step_display = get_step_display_number(step_index, step_name)
|
|
1373
|
+
|
|
1374
|
+
# Show each parallel step with the same number (1.1, 1.1)
|
|
1375
|
+
for step_result in response.step_results:
|
|
1376
|
+
if step_result.content:
|
|
1377
|
+
step_result_name = step_result.step_name or "Parallel Step"
|
|
1378
|
+
formatted_content = format_step_content_for_display(step_result.content) # type: ignore
|
|
1379
|
+
|
|
1380
|
+
# All parallel sub-steps get the same number
|
|
1381
|
+
parallel_step_panel = create_panel(
|
|
1382
|
+
content=Markdown(formatted_content) if markdown else formatted_content,
|
|
1383
|
+
title=f"{parallel_step_display}: {step_result_name} (Completed)",
|
|
1384
|
+
border_style="orange3",
|
|
1385
|
+
)
|
|
1386
|
+
console.print(parallel_step_panel) # type: ignore
|
|
1387
|
+
|
|
1204
1388
|
# Reset context
|
|
1205
1389
|
current_primitive_context = None
|
|
1390
|
+
parallel_step_states.clear()
|
|
1206
1391
|
step_display_cache.clear()
|
|
1207
1392
|
|
|
1208
1393
|
elif isinstance(response, ConditionExecutionStartedEvent):
|
|
@@ -1219,7 +1404,8 @@ async def aprint_response_stream(
|
|
|
1219
1404
|
"condition_result": response.condition_result,
|
|
1220
1405
|
}
|
|
1221
1406
|
|
|
1222
|
-
#
|
|
1407
|
+
# Initialize parallel step tracking - clear previous states
|
|
1408
|
+
parallel_step_states.clear()
|
|
1223
1409
|
step_display_cache.clear()
|
|
1224
1410
|
|
|
1225
1411
|
condition_text = "met" if response.condition_result else "not met"
|
|
@@ -1250,7 +1436,8 @@ async def aprint_response_stream(
|
|
|
1250
1436
|
"selected_steps": response.selected_steps,
|
|
1251
1437
|
}
|
|
1252
1438
|
|
|
1253
|
-
#
|
|
1439
|
+
# Initialize parallel step tracking - clear previous states
|
|
1440
|
+
parallel_step_states.clear()
|
|
1254
1441
|
step_display_cache.clear()
|
|
1255
1442
|
|
|
1256
1443
|
selected_steps_text = ", ".join(response.selected_steps) if response.selected_steps else "none"
|
|
@@ -1328,8 +1515,23 @@ async def aprint_response_stream(
|
|
|
1328
1515
|
elif isinstance(response, WorkflowCompletedEvent):
|
|
1329
1516
|
status.update("Workflow completed!")
|
|
1330
1517
|
|
|
1518
|
+
# Check if this is an agent direct response
|
|
1519
|
+
if response.metadata and response.metadata.get("agent_direct_response"):
|
|
1520
|
+
is_workflow_agent_response = True
|
|
1521
|
+
# Print the agent's direct response from history
|
|
1522
|
+
if show_step_details:
|
|
1523
|
+
live_log.update(status, refresh=True)
|
|
1524
|
+
agent_response_panel = create_panel(
|
|
1525
|
+
content=Markdown(str(response.content)) if markdown else str(response.content),
|
|
1526
|
+
title="Workflow Agent Response",
|
|
1527
|
+
border_style="green",
|
|
1528
|
+
)
|
|
1529
|
+
console.print(agent_response_panel) # type: ignore
|
|
1530
|
+
step_started_printed = True
|
|
1331
1531
|
# For callable functions, print the final content block here since there are no step events
|
|
1332
|
-
|
|
1532
|
+
elif (
|
|
1533
|
+
is_callable_function and show_step_details and current_step_content and not step_started_printed
|
|
1534
|
+
):
|
|
1333
1535
|
final_step_panel = create_panel(
|
|
1334
1536
|
content=Markdown(current_step_content) if markdown else current_step_content,
|
|
1335
1537
|
title="Custom Function (Completed)",
|
|
@@ -1340,8 +1542,8 @@ async def aprint_response_stream(
|
|
|
1340
1542
|
|
|
1341
1543
|
live_log.update(status, refresh=True)
|
|
1342
1544
|
|
|
1343
|
-
# Show final summary
|
|
1344
|
-
if response.metadata:
|
|
1545
|
+
# Show final summary (skip for agent responses)
|
|
1546
|
+
if response.metadata and not is_workflow_agent_response:
|
|
1345
1547
|
status = response.status
|
|
1346
1548
|
summary_content = ""
|
|
1347
1549
|
summary_content += f"""\n\n**Status:** {status}"""
|
|
@@ -1389,6 +1591,11 @@ async def aprint_response_stream(
|
|
|
1389
1591
|
# Extract the content from the streaming event
|
|
1390
1592
|
response_str = response.content # type: ignore
|
|
1391
1593
|
|
|
1594
|
+
# If we get RunContentEvent BEFORE workflow starts, it's an agent direct response
|
|
1595
|
+
if isinstance(response, RunContentEvent) and not workflow_started:
|
|
1596
|
+
is_workflow_agent_response = True
|
|
1597
|
+
continue # Skip ALL agent direct response content
|
|
1598
|
+
|
|
1392
1599
|
# Check if this is a team's final structured output
|
|
1393
1600
|
is_structured_output = (
|
|
1394
1601
|
isinstance(response, TeamRunContentEvent)
|
|
@@ -1398,12 +1605,23 @@ async def aprint_response_stream(
|
|
|
1398
1605
|
)
|
|
1399
1606
|
elif isinstance(response, RunContentEvent) and current_step_executor_type != "team":
|
|
1400
1607
|
response_str = response.content # type: ignore
|
|
1608
|
+
# If we get RunContentEvent BEFORE workflow starts, it's an agent direct response
|
|
1609
|
+
if not workflow_started and not is_workflow_agent_response:
|
|
1610
|
+
is_workflow_agent_response = True
|
|
1401
1611
|
else:
|
|
1402
1612
|
continue
|
|
1403
1613
|
|
|
1404
1614
|
# Use the unified formatting function for consistency
|
|
1405
1615
|
response_str = format_step_content_for_display(response_str) # type: ignore
|
|
1406
1616
|
|
|
1617
|
+
# Skip streaming content from parallel sub-steps - they're handled in ParallelExecutionCompletedEvent
|
|
1618
|
+
if (
|
|
1619
|
+
current_primitive_context
|
|
1620
|
+
and current_primitive_context["type"] == "parallel"
|
|
1621
|
+
and isinstance(current_step_index, tuple)
|
|
1622
|
+
):
|
|
1623
|
+
continue
|
|
1624
|
+
|
|
1407
1625
|
# Filter out empty responses and add to current step content
|
|
1408
1626
|
if response_str and response_str.strip():
|
|
1409
1627
|
# If it's a structured output from a team, replace the content instead of appending
|
|
@@ -1412,8 +1630,8 @@ async def aprint_response_stream(
|
|
|
1412
1630
|
else:
|
|
1413
1631
|
current_step_content += response_str
|
|
1414
1632
|
|
|
1415
|
-
# Live update the step panel with streaming content
|
|
1416
|
-
if show_step_details and not step_started_printed:
|
|
1633
|
+
# Live update the step panel with streaming content (skip for workflow agent responses)
|
|
1634
|
+
if show_step_details and not step_started_printed and not is_workflow_agent_response:
|
|
1417
1635
|
# Generate smart step number for streaming title (will use cached value)
|
|
1418
1636
|
step_display = get_step_display_number(current_step_index, current_step_name)
|
|
1419
1637
|
title = f"{step_display}: {current_step_name} (Streaming...)"
|
|
@@ -1435,8 +1653,7 @@ async def aprint_response_stream(
|
|
|
1435
1653
|
|
|
1436
1654
|
live_log.update("")
|
|
1437
1655
|
|
|
1438
|
-
|
|
1439
|
-
if show_time:
|
|
1656
|
+
if show_time and not is_workflow_agent_response:
|
|
1440
1657
|
completion_text = Text(f"Completed in {response_timer.elapsed:.1f}s", style="bold green")
|
|
1441
1658
|
console.print(completion_text) # type: ignore
|
|
1442
1659
|
|
agno/utils/reasoning.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import TYPE_CHECKING, List, Union
|
|
1
|
+
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
|
|
2
2
|
|
|
3
3
|
from agno.models.message import Message
|
|
4
4
|
from agno.models.metrics import Metrics
|
|
@@ -9,6 +9,27 @@ if TYPE_CHECKING:
|
|
|
9
9
|
from agno.team.team import TeamRunOutput
|
|
10
10
|
|
|
11
11
|
|
|
12
|
+
def extract_thinking_content(content: str) -> Tuple[Optional[str], str]:
|
|
13
|
+
"""Extract thinking content from response text between <think> tags."""
|
|
14
|
+
if not content or "</think>" not in content:
|
|
15
|
+
return None, content
|
|
16
|
+
|
|
17
|
+
# Find the end of thinking content
|
|
18
|
+
end_idx = content.find("</think>")
|
|
19
|
+
|
|
20
|
+
# Look for opening <think> tag, if not found, assume thinking starts at beginning
|
|
21
|
+
start_idx = content.find("<think>")
|
|
22
|
+
if start_idx == -1:
|
|
23
|
+
reasoning_content = content[:end_idx].strip()
|
|
24
|
+
else:
|
|
25
|
+
start_idx = start_idx + len("<think>")
|
|
26
|
+
reasoning_content = content[start_idx:end_idx].strip()
|
|
27
|
+
|
|
28
|
+
output_content = content[end_idx + len("</think>") :].strip()
|
|
29
|
+
|
|
30
|
+
return reasoning_content, output_content
|
|
31
|
+
|
|
32
|
+
|
|
12
33
|
def append_to_reasoning_content(run_response: Union["RunOutput", "TeamRunOutput"], content: str) -> None:
|
|
13
34
|
"""Helper to append content to the reasoning_content field."""
|
|
14
35
|
if not hasattr(run_response, "reasoning_content") or not run_response.reasoning_content: # type: ignore
|