agno 2.3.13__py3-none-any.whl → 2.3.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. agno/agent/agent.py +1149 -1392
  2. agno/db/migrations/manager.py +3 -3
  3. agno/eval/__init__.py +21 -8
  4. agno/knowledge/embedder/azure_openai.py +0 -1
  5. agno/knowledge/embedder/google.py +1 -1
  6. agno/models/anthropic/claude.py +9 -4
  7. agno/models/base.py +8 -4
  8. agno/models/metrics.py +12 -0
  9. agno/models/openai/chat.py +2 -0
  10. agno/models/openai/responses.py +2 -2
  11. agno/os/app.py +59 -2
  12. agno/os/auth.py +40 -3
  13. agno/os/interfaces/a2a/router.py +619 -9
  14. agno/os/interfaces/a2a/utils.py +31 -32
  15. agno/os/middleware/jwt.py +5 -5
  16. agno/os/router.py +1 -57
  17. agno/os/routers/agents/schema.py +14 -1
  18. agno/os/routers/database.py +150 -0
  19. agno/os/routers/teams/schema.py +14 -1
  20. agno/os/settings.py +3 -0
  21. agno/os/utils.py +61 -53
  22. agno/reasoning/anthropic.py +85 -1
  23. agno/reasoning/azure_ai_foundry.py +93 -1
  24. agno/reasoning/deepseek.py +91 -1
  25. agno/reasoning/gemini.py +81 -1
  26. agno/reasoning/groq.py +103 -1
  27. agno/reasoning/manager.py +1244 -0
  28. agno/reasoning/ollama.py +93 -1
  29. agno/reasoning/openai.py +113 -1
  30. agno/reasoning/vertexai.py +85 -1
  31. agno/run/agent.py +21 -0
  32. agno/run/base.py +20 -1
  33. agno/run/team.py +21 -0
  34. agno/session/team.py +0 -3
  35. agno/team/team.py +1211 -1445
  36. agno/tools/toolkit.py +119 -8
  37. agno/utils/events.py +99 -4
  38. agno/utils/hooks.py +4 -10
  39. agno/utils/print_response/agent.py +26 -0
  40. agno/utils/print_response/team.py +11 -0
  41. agno/utils/prompts.py +8 -6
  42. agno/utils/string.py +46 -0
  43. agno/utils/team.py +1 -1
  44. agno/vectordb/milvus/milvus.py +32 -3
  45. {agno-2.3.13.dist-info → agno-2.3.15.dist-info}/METADATA +3 -2
  46. {agno-2.3.13.dist-info → agno-2.3.15.dist-info}/RECORD +49 -47
  47. {agno-2.3.13.dist-info → agno-2.3.15.dist-info}/WHEEL +0 -0
  48. {agno-2.3.13.dist-info → agno-2.3.15.dist-info}/licenses/LICENSE +0 -0
  49. {agno-2.3.13.dist-info → agno-2.3.15.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1244 @@
1
+ """
2
+ ReasoningManager - Centralized manager for all reasoning operations.
3
+
4
+ This module consolidates reasoning logic from the Agent class into a single,
5
+ maintainable manager that handles:
6
+ - Native reasoning models (DeepSeek, Anthropic, OpenAI, Gemini, etc.)
7
+ - Default Chain-of-Thought reasoning
8
+ - Both streaming and non-streaming modes
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ from dataclasses import dataclass, field
14
+ from enum import Enum
15
+ from typing import (
16
+ TYPE_CHECKING,
17
+ Any,
18
+ AsyncIterator,
19
+ Callable,
20
+ Dict,
21
+ Iterator,
22
+ List,
23
+ Literal,
24
+ Optional,
25
+ Tuple,
26
+ Union,
27
+ )
28
+
29
+ from agno.models.base import Model
30
+ from agno.models.message import Message
31
+ from agno.reasoning.step import NextAction, ReasoningStep, ReasoningSteps
32
+ from agno.run.messages import RunMessages
33
+ from agno.tools import Toolkit
34
+ from agno.tools.function import Function
35
+ from agno.utils.log import log_debug, log_error, log_info, log_warning
36
+
37
+ if TYPE_CHECKING:
38
+ from agno.agent import Agent
39
+ from agno.run.agent import RunOutput
40
+
41
+
42
+ class ReasoningEventType(str, Enum):
43
+ """Types of reasoning events that can be emitted."""
44
+
45
+ started = "reasoning_started"
46
+ content_delta = "reasoning_content_delta"
47
+ step = "reasoning_step"
48
+ completed = "reasoning_completed"
49
+ error = "reasoning_error"
50
+
51
+
52
+ @dataclass
53
+ class ReasoningEvent:
54
+ """
55
+ A unified reasoning event that can be converted to Agent or Team specific events.
56
+
57
+ This allows the ReasoningManager to emit events without knowing about the
58
+ specific event types used by Agent or Team.
59
+ """
60
+
61
+ event_type: ReasoningEventType
62
+ # For content_delta events
63
+ reasoning_content: Optional[str] = None
64
+ # For step events
65
+ reasoning_step: Optional[ReasoningStep] = None
66
+ # For completed events
67
+ reasoning_steps: List[ReasoningStep] = field(default_factory=list)
68
+ # For error events
69
+ error: Optional[str] = None
70
+ # The message to append to run_messages (for native reasoning)
71
+ message: Optional[Message] = None
72
+ # All reasoning messages (for updating run_output)
73
+ reasoning_messages: List[Message] = field(default_factory=list)
74
+
75
+
76
+ @dataclass
77
+ class ReasoningConfig:
78
+ """Configuration for reasoning operations."""
79
+
80
+ reasoning_model: Optional[Model] = None
81
+ reasoning_agent: Optional["Agent"] = None
82
+ min_steps: int = 1
83
+ max_steps: int = 10
84
+ tools: Optional[List[Union[Toolkit, Callable, Function, Dict]]] = None
85
+ tool_call_limit: Optional[int] = None
86
+ use_json_mode: bool = False
87
+ telemetry: bool = True
88
+ debug_mode: bool = False
89
+ debug_level: Literal[1, 2] = 1
90
+ session_state: Optional[Dict[str, Any]] = None
91
+ dependencies: Optional[Dict[str, Any]] = None
92
+ metadata: Optional[Dict[str, Any]] = None
93
+
94
+
95
+ @dataclass
96
+ class ReasoningResult:
97
+ """Result from a reasoning operation."""
98
+
99
+ message: Optional[Message] = None
100
+ steps: List[ReasoningStep] = field(default_factory=list)
101
+ reasoning_messages: List[Message] = field(default_factory=list)
102
+ success: bool = True
103
+ error: Optional[str] = None
104
+
105
+
106
+ class ReasoningManager:
107
+ """
108
+ Centralized manager for all reasoning operations.
109
+
110
+ Handles both native reasoning models (DeepSeek, Anthropic, OpenAI, etc.)
111
+ and default Chain-of-Thought reasoning with a clean, unified interface.
112
+ """
113
+
114
+ def __init__(self, config: ReasoningConfig):
115
+ self.config = config
116
+ self._reasoning_agent: Optional["Agent"] = None
117
+ self._model_type: Optional[str] = None
118
+
119
+ @property
120
+ def reasoning_model(self) -> Optional[Model]:
121
+ return self.config.reasoning_model
122
+
123
+ def _detect_model_type(self, model: Model) -> Optional[str]:
124
+ """Detect the type of reasoning model."""
125
+ from agno.reasoning.anthropic import is_anthropic_reasoning_model
126
+ from agno.reasoning.azure_ai_foundry import is_ai_foundry_reasoning_model
127
+ from agno.reasoning.deepseek import is_deepseek_reasoning_model
128
+ from agno.reasoning.gemini import is_gemini_reasoning_model
129
+ from agno.reasoning.groq import is_groq_reasoning_model
130
+ from agno.reasoning.ollama import is_ollama_reasoning_model
131
+ from agno.reasoning.openai import is_openai_reasoning_model
132
+ from agno.reasoning.vertexai import is_vertexai_reasoning_model
133
+
134
+ if is_deepseek_reasoning_model(model):
135
+ return "deepseek"
136
+ if is_anthropic_reasoning_model(model):
137
+ return "anthropic"
138
+ if is_openai_reasoning_model(model):
139
+ return "openai"
140
+ if is_groq_reasoning_model(model):
141
+ return "groq"
142
+ if is_ollama_reasoning_model(model):
143
+ return "ollama"
144
+ if is_ai_foundry_reasoning_model(model):
145
+ return "ai_foundry"
146
+ if is_gemini_reasoning_model(model):
147
+ return "gemini"
148
+ if is_vertexai_reasoning_model(model):
149
+ return "vertexai"
150
+ return None
151
+
152
+ def _get_reasoning_agent(self, model: Model) -> "Agent":
153
+ """Get or create a reasoning agent for the given model."""
154
+ if self.config.reasoning_agent is not None:
155
+ return self.config.reasoning_agent
156
+
157
+ from agno.reasoning.helpers import get_reasoning_agent
158
+
159
+ return get_reasoning_agent(
160
+ reasoning_model=model,
161
+ telemetry=self.config.telemetry,
162
+ debug_mode=self.config.debug_mode,
163
+ debug_level=self.config.debug_level,
164
+ session_state=self.config.session_state,
165
+ dependencies=self.config.dependencies,
166
+ metadata=self.config.metadata,
167
+ )
168
+
169
+ def _get_default_reasoning_agent(self, model: Model) -> Optional["Agent"]:
170
+ """Get or create a default CoT reasoning agent."""
171
+ if self.config.reasoning_agent is not None:
172
+ return self.config.reasoning_agent
173
+
174
+ from agno.reasoning.default import get_default_reasoning_agent
175
+
176
+ return get_default_reasoning_agent(
177
+ reasoning_model=model,
178
+ min_steps=self.config.min_steps,
179
+ max_steps=self.config.max_steps,
180
+ tools=self.config.tools,
181
+ tool_call_limit=self.config.tool_call_limit,
182
+ use_json_mode=self.config.use_json_mode,
183
+ telemetry=self.config.telemetry,
184
+ debug_mode=self.config.debug_mode,
185
+ debug_level=self.config.debug_level,
186
+ session_state=self.config.session_state,
187
+ dependencies=self.config.dependencies,
188
+ metadata=self.config.metadata,
189
+ )
190
+
191
+ def is_native_reasoning_model(self, model: Optional[Model] = None) -> bool:
192
+ """Check if the model is a native reasoning model."""
193
+ model = model or self.config.reasoning_model
194
+ if model is None:
195
+ return False
196
+ return self._detect_model_type(model) is not None
197
+
198
+ # =========================================================================
199
+ # Native Model Reasoning (Non-Streaming)
200
+ # =========================================================================
201
+
202
+ def get_native_reasoning(self, model: Model, messages: List[Message]) -> ReasoningResult:
203
+ """Get reasoning from a native reasoning model (non-streaming)."""
204
+ model_type = self._detect_model_type(model)
205
+ if model_type is None:
206
+ return ReasoningResult(success=False, error="Not a native reasoning model")
207
+
208
+ reasoning_agent = self._get_reasoning_agent(model)
209
+ reasoning_message: Optional[Message] = None
210
+
211
+ try:
212
+ if model_type == "deepseek":
213
+ from agno.reasoning.deepseek import get_deepseek_reasoning
214
+
215
+ log_debug("Starting DeepSeek Reasoning", center=True, symbol="=")
216
+ reasoning_message = get_deepseek_reasoning(reasoning_agent, messages)
217
+
218
+ elif model_type == "anthropic":
219
+ from agno.reasoning.anthropic import get_anthropic_reasoning
220
+
221
+ log_debug("Starting Anthropic Claude Reasoning", center=True, symbol="=")
222
+ reasoning_message = get_anthropic_reasoning(reasoning_agent, messages)
223
+
224
+ elif model_type == "openai":
225
+ from agno.reasoning.openai import get_openai_reasoning
226
+
227
+ log_debug("Starting OpenAI Reasoning", center=True, symbol="=")
228
+ reasoning_message = get_openai_reasoning(reasoning_agent, messages)
229
+
230
+ elif model_type == "groq":
231
+ from agno.reasoning.groq import get_groq_reasoning
232
+
233
+ log_debug("Starting Groq Reasoning", center=True, symbol="=")
234
+ reasoning_message = get_groq_reasoning(reasoning_agent, messages)
235
+
236
+ elif model_type == "ollama":
237
+ from agno.reasoning.ollama import get_ollama_reasoning
238
+
239
+ log_debug("Starting Ollama Reasoning", center=True, symbol="=")
240
+ reasoning_message = get_ollama_reasoning(reasoning_agent, messages)
241
+
242
+ elif model_type == "ai_foundry":
243
+ from agno.reasoning.azure_ai_foundry import get_ai_foundry_reasoning
244
+
245
+ log_debug("Starting Azure AI Foundry Reasoning", center=True, symbol="=")
246
+ reasoning_message = get_ai_foundry_reasoning(reasoning_agent, messages)
247
+
248
+ elif model_type == "gemini":
249
+ from agno.reasoning.gemini import get_gemini_reasoning
250
+
251
+ log_debug("Starting Gemini Reasoning", center=True, symbol="=")
252
+ reasoning_message = get_gemini_reasoning(reasoning_agent, messages)
253
+
254
+ elif model_type == "vertexai":
255
+ from agno.reasoning.vertexai import get_vertexai_reasoning
256
+
257
+ log_debug("Starting VertexAI Reasoning", center=True, symbol="=")
258
+ reasoning_message = get_vertexai_reasoning(reasoning_agent, messages)
259
+
260
+ except Exception as e:
261
+ log_error(f"Reasoning error: {e}")
262
+ return ReasoningResult(success=False, error=str(e))
263
+
264
+ if reasoning_message is None:
265
+ return ReasoningResult(
266
+ success=False,
267
+ error="Reasoning response is None",
268
+ )
269
+
270
+ return ReasoningResult(
271
+ message=reasoning_message,
272
+ steps=[ReasoningStep(result=reasoning_message.content)],
273
+ reasoning_messages=[reasoning_message],
274
+ success=True,
275
+ )
276
+
277
+ async def aget_native_reasoning(self, model: Model, messages: List[Message]) -> ReasoningResult:
278
+ """Get reasoning from a native reasoning model asynchronously (non-streaming)."""
279
+ model_type = self._detect_model_type(model)
280
+ if model_type is None:
281
+ return ReasoningResult(success=False, error="Not a native reasoning model")
282
+
283
+ reasoning_agent = self._get_reasoning_agent(model)
284
+ reasoning_message: Optional[Message] = None
285
+
286
+ try:
287
+ if model_type == "deepseek":
288
+ from agno.reasoning.deepseek import aget_deepseek_reasoning
289
+
290
+ log_debug("Starting DeepSeek Reasoning", center=True, symbol="=")
291
+ reasoning_message = await aget_deepseek_reasoning(reasoning_agent, messages)
292
+
293
+ elif model_type == "anthropic":
294
+ from agno.reasoning.anthropic import aget_anthropic_reasoning
295
+
296
+ log_debug("Starting Anthropic Claude Reasoning", center=True, symbol="=")
297
+ reasoning_message = await aget_anthropic_reasoning(reasoning_agent, messages)
298
+
299
+ elif model_type == "openai":
300
+ from agno.reasoning.openai import aget_openai_reasoning
301
+
302
+ log_debug("Starting OpenAI Reasoning", center=True, symbol="=")
303
+ reasoning_message = await aget_openai_reasoning(reasoning_agent, messages)
304
+
305
+ elif model_type == "groq":
306
+ from agno.reasoning.groq import aget_groq_reasoning
307
+
308
+ log_debug("Starting Groq Reasoning", center=True, symbol="=")
309
+ reasoning_message = await aget_groq_reasoning(reasoning_agent, messages)
310
+
311
+ elif model_type == "ollama":
312
+ from agno.reasoning.ollama import get_ollama_reasoning
313
+
314
+ log_debug("Starting Ollama Reasoning", center=True, symbol="=")
315
+ reasoning_message = get_ollama_reasoning(reasoning_agent, messages)
316
+
317
+ elif model_type == "ai_foundry":
318
+ from agno.reasoning.azure_ai_foundry import get_ai_foundry_reasoning
319
+
320
+ log_debug("Starting Azure AI Foundry Reasoning", center=True, symbol="=")
321
+ reasoning_message = get_ai_foundry_reasoning(reasoning_agent, messages)
322
+
323
+ elif model_type == "gemini":
324
+ from agno.reasoning.gemini import aget_gemini_reasoning
325
+
326
+ log_debug("Starting Gemini Reasoning", center=True, symbol="=")
327
+ reasoning_message = await aget_gemini_reasoning(reasoning_agent, messages)
328
+
329
+ elif model_type == "vertexai":
330
+ from agno.reasoning.vertexai import aget_vertexai_reasoning
331
+
332
+ log_debug("Starting VertexAI Reasoning", center=True, symbol="=")
333
+ reasoning_message = await aget_vertexai_reasoning(reasoning_agent, messages)
334
+
335
+ except Exception as e:
336
+ log_error(f"Reasoning error: {e}")
337
+ return ReasoningResult(success=False, error=str(e))
338
+
339
+ if reasoning_message is None:
340
+ return ReasoningResult(
341
+ success=False,
342
+ error="Reasoning response is None",
343
+ )
344
+
345
+ return ReasoningResult(
346
+ message=reasoning_message,
347
+ steps=[ReasoningStep(result=reasoning_message.content)],
348
+ reasoning_messages=[reasoning_message],
349
+ success=True,
350
+ )
351
+
352
+ # =========================================================================
353
+ # Native Model Reasoning (Streaming)
354
+ # =========================================================================
355
+
356
+ def stream_native_reasoning(
357
+ self, model: Model, messages: List[Message]
358
+ ) -> Iterator[Tuple[Optional[str], Optional[ReasoningResult]]]:
359
+ """
360
+ Stream reasoning from a native reasoning model.
361
+
362
+ Yields:
363
+ Tuple of (reasoning_content_delta, final_result)
364
+ - During streaming: (reasoning_content_delta, None)
365
+ - At the end: (None, ReasoningResult)
366
+ """
367
+ model_type = self._detect_model_type(model)
368
+ if model_type is None:
369
+ yield (None, ReasoningResult(success=False, error="Not a native reasoning model"))
370
+ return
371
+
372
+ reasoning_agent = self._get_reasoning_agent(model)
373
+
374
+ # Currently only DeepSeek and Anthropic support streaming
375
+ if model_type == "deepseek":
376
+ from agno.reasoning.deepseek import get_deepseek_reasoning_stream
377
+
378
+ log_debug("Starting DeepSeek Reasoning (streaming)", center=True, symbol="=")
379
+ final_message: Optional[Message] = None
380
+ for reasoning_delta, message in get_deepseek_reasoning_stream(reasoning_agent, messages):
381
+ if reasoning_delta is not None:
382
+ yield (reasoning_delta, None)
383
+ if message is not None:
384
+ final_message = message
385
+
386
+ if final_message:
387
+ yield (
388
+ None,
389
+ ReasoningResult(
390
+ message=final_message,
391
+ steps=[ReasoningStep(result=final_message.content)],
392
+ reasoning_messages=[final_message],
393
+ success=True,
394
+ ),
395
+ )
396
+ else:
397
+ yield (None, ReasoningResult(success=False, error="No reasoning content"))
398
+
399
+ elif model_type == "anthropic":
400
+ from agno.reasoning.anthropic import get_anthropic_reasoning_stream
401
+
402
+ log_debug("Starting Anthropic Claude Reasoning (streaming)", center=True, symbol="=")
403
+ final_message = None
404
+ for reasoning_delta, message in get_anthropic_reasoning_stream(reasoning_agent, messages):
405
+ if reasoning_delta is not None:
406
+ yield (reasoning_delta, None)
407
+ if message is not None:
408
+ final_message = message
409
+
410
+ if final_message:
411
+ yield (
412
+ None,
413
+ ReasoningResult(
414
+ message=final_message,
415
+ steps=[ReasoningStep(result=final_message.content)],
416
+ reasoning_messages=[final_message],
417
+ success=True,
418
+ ),
419
+ )
420
+ else:
421
+ yield (None, ReasoningResult(success=False, error="No reasoning content"))
422
+
423
+ elif model_type == "gemini":
424
+ from agno.reasoning.gemini import get_gemini_reasoning_stream
425
+
426
+ log_debug("Starting Gemini Reasoning (streaming)", center=True, symbol="=")
427
+ final_message = None
428
+ for reasoning_delta, message in get_gemini_reasoning_stream(reasoning_agent, messages):
429
+ if reasoning_delta is not None:
430
+ yield (reasoning_delta, None)
431
+ if message is not None:
432
+ final_message = message
433
+
434
+ if final_message:
435
+ yield (
436
+ None,
437
+ ReasoningResult(
438
+ message=final_message,
439
+ steps=[ReasoningStep(result=final_message.content)],
440
+ reasoning_messages=[final_message],
441
+ success=True,
442
+ ),
443
+ )
444
+ else:
445
+ yield (None, ReasoningResult(success=False, error="No reasoning content"))
446
+
447
+ elif model_type == "openai":
448
+ from agno.reasoning.openai import get_openai_reasoning_stream
449
+
450
+ log_debug("Starting OpenAI Reasoning (streaming)", center=True, symbol="=")
451
+ final_message = None
452
+ for reasoning_delta, message in get_openai_reasoning_stream(reasoning_agent, messages):
453
+ if reasoning_delta is not None:
454
+ yield (reasoning_delta, None)
455
+ if message is not None:
456
+ final_message = message
457
+
458
+ if final_message:
459
+ yield (
460
+ None,
461
+ ReasoningResult(
462
+ message=final_message,
463
+ steps=[ReasoningStep(result=final_message.content)],
464
+ reasoning_messages=[final_message],
465
+ success=True,
466
+ ),
467
+ )
468
+ else:
469
+ yield (None, ReasoningResult(success=False, error="No reasoning content"))
470
+
471
+ elif model_type == "vertexai":
472
+ from agno.reasoning.vertexai import get_vertexai_reasoning_stream
473
+
474
+ log_debug("Starting VertexAI Reasoning (streaming)", center=True, symbol="=")
475
+ final_message = None
476
+ for reasoning_delta, message in get_vertexai_reasoning_stream(reasoning_agent, messages):
477
+ if reasoning_delta is not None:
478
+ yield (reasoning_delta, None)
479
+ if message is not None:
480
+ final_message = message
481
+
482
+ if final_message:
483
+ yield (
484
+ None,
485
+ ReasoningResult(
486
+ message=final_message,
487
+ steps=[ReasoningStep(result=final_message.content)],
488
+ reasoning_messages=[final_message],
489
+ success=True,
490
+ ),
491
+ )
492
+ else:
493
+ yield (None, ReasoningResult(success=False, error="No reasoning content"))
494
+
495
+ elif model_type == "ai_foundry":
496
+ from agno.reasoning.azure_ai_foundry import get_ai_foundry_reasoning_stream
497
+
498
+ log_debug("Starting Azure AI Foundry Reasoning (streaming)", center=True, symbol="=")
499
+ final_message = None
500
+ for reasoning_delta, message in get_ai_foundry_reasoning_stream(reasoning_agent, messages):
501
+ if reasoning_delta is not None:
502
+ yield (reasoning_delta, None)
503
+ if message is not None:
504
+ final_message = message
505
+
506
+ if final_message:
507
+ yield (
508
+ None,
509
+ ReasoningResult(
510
+ message=final_message,
511
+ steps=[ReasoningStep(result=final_message.content)],
512
+ reasoning_messages=[final_message],
513
+ success=True,
514
+ ),
515
+ )
516
+ else:
517
+ yield (None, ReasoningResult(success=False, error="No reasoning content"))
518
+
519
+ elif model_type == "groq":
520
+ from agno.reasoning.groq import get_groq_reasoning_stream
521
+
522
+ log_debug("Starting Groq Reasoning (streaming)", center=True, symbol="=")
523
+ final_message = None
524
+ for reasoning_delta, message in get_groq_reasoning_stream(reasoning_agent, messages):
525
+ if reasoning_delta is not None:
526
+ yield (reasoning_delta, None)
527
+ if message is not None:
528
+ final_message = message
529
+
530
+ if final_message:
531
+ yield (
532
+ None,
533
+ ReasoningResult(
534
+ message=final_message,
535
+ steps=[ReasoningStep(result=final_message.content)],
536
+ reasoning_messages=[final_message],
537
+ success=True,
538
+ ),
539
+ )
540
+ else:
541
+ yield (None, ReasoningResult(success=False, error="No reasoning content"))
542
+
543
+ elif model_type == "ollama":
544
+ from agno.reasoning.ollama import get_ollama_reasoning_stream
545
+
546
+ log_debug("Starting Ollama Reasoning (streaming)", center=True, symbol="=")
547
+ final_message = None
548
+ for reasoning_delta, message in get_ollama_reasoning_stream(reasoning_agent, messages):
549
+ if reasoning_delta is not None:
550
+ yield (reasoning_delta, None)
551
+ if message is not None:
552
+ final_message = message
553
+
554
+ if final_message:
555
+ yield (
556
+ None,
557
+ ReasoningResult(
558
+ message=final_message,
559
+ steps=[ReasoningStep(result=final_message.content)],
560
+ reasoning_messages=[final_message],
561
+ success=True,
562
+ ),
563
+ )
564
+ else:
565
+ yield (None, ReasoningResult(success=False, error="No reasoning content"))
566
+
567
+ else:
568
+ # Fall back to non-streaming for other models
569
+ result = self.get_native_reasoning(model, messages)
570
+ yield (None, result)
571
+
572
+ async def astream_native_reasoning(
573
+ self, model: Model, messages: List[Message]
574
+ ) -> AsyncIterator[Tuple[Optional[str], Optional[ReasoningResult]]]:
575
+ """
576
+ Stream reasoning from a native reasoning model asynchronously.
577
+
578
+ Yields:
579
+ Tuple of (reasoning_content_delta, final_result)
580
+ - During streaming: (reasoning_content_delta, None)
581
+ - At the end: (None, ReasoningResult)
582
+ """
583
+ model_type = self._detect_model_type(model)
584
+ if model_type is None:
585
+ yield (None, ReasoningResult(success=False, error="Not a native reasoning model"))
586
+ return
587
+
588
+ reasoning_agent = self._get_reasoning_agent(model)
589
+
590
+ # Currently only DeepSeek and Anthropic support streaming
591
+ if model_type == "deepseek":
592
+ from agno.reasoning.deepseek import aget_deepseek_reasoning_stream
593
+
594
+ log_debug("Starting DeepSeek Reasoning (streaming)", center=True, symbol="=")
595
+ final_message: Optional[Message] = None
596
+ async for reasoning_delta, message in aget_deepseek_reasoning_stream(reasoning_agent, messages):
597
+ if reasoning_delta is not None:
598
+ yield (reasoning_delta, None)
599
+ if message is not None:
600
+ final_message = message
601
+
602
+ if final_message:
603
+ yield (
604
+ None,
605
+ ReasoningResult(
606
+ message=final_message,
607
+ steps=[ReasoningStep(result=final_message.content)],
608
+ reasoning_messages=[final_message],
609
+ success=True,
610
+ ),
611
+ )
612
+ else:
613
+ yield (None, ReasoningResult(success=False, error="No reasoning content"))
614
+
615
+ elif model_type == "anthropic":
616
+ from agno.reasoning.anthropic import aget_anthropic_reasoning_stream
617
+
618
+ log_debug("Starting Anthropic Claude Reasoning (streaming)", center=True, symbol="=")
619
+ final_message = None
620
+ async for reasoning_delta, message in aget_anthropic_reasoning_stream(reasoning_agent, messages):
621
+ if reasoning_delta is not None:
622
+ yield (reasoning_delta, None)
623
+ if message is not None:
624
+ final_message = message
625
+
626
+ if final_message:
627
+ yield (
628
+ None,
629
+ ReasoningResult(
630
+ message=final_message,
631
+ steps=[ReasoningStep(result=final_message.content)],
632
+ reasoning_messages=[final_message],
633
+ success=True,
634
+ ),
635
+ )
636
+ else:
637
+ yield (None, ReasoningResult(success=False, error="No reasoning content"))
638
+
639
+ elif model_type == "gemini":
640
+ from agno.reasoning.gemini import aget_gemini_reasoning_stream
641
+
642
+ log_debug("Starting Gemini Reasoning (streaming)", center=True, symbol="=")
643
+ final_message = None
644
+ async for reasoning_delta, message in aget_gemini_reasoning_stream(reasoning_agent, messages):
645
+ if reasoning_delta is not None:
646
+ yield (reasoning_delta, None)
647
+ if message is not None:
648
+ final_message = message
649
+
650
+ if final_message:
651
+ yield (
652
+ None,
653
+ ReasoningResult(
654
+ message=final_message,
655
+ steps=[ReasoningStep(result=final_message.content)],
656
+ reasoning_messages=[final_message],
657
+ success=True,
658
+ ),
659
+ )
660
+ else:
661
+ yield (None, ReasoningResult(success=False, error="No reasoning content"))
662
+
663
+ elif model_type == "openai":
664
+ from agno.reasoning.openai import aget_openai_reasoning_stream
665
+
666
+ log_debug("Starting OpenAI Reasoning (streaming)", center=True, symbol="=")
667
+ final_message = None
668
+ async for reasoning_delta, message in aget_openai_reasoning_stream(reasoning_agent, messages):
669
+ if reasoning_delta is not None:
670
+ yield (reasoning_delta, None)
671
+ if message is not None:
672
+ final_message = message
673
+
674
+ if final_message:
675
+ yield (
676
+ None,
677
+ ReasoningResult(
678
+ message=final_message,
679
+ steps=[ReasoningStep(result=final_message.content)],
680
+ reasoning_messages=[final_message],
681
+ success=True,
682
+ ),
683
+ )
684
+ else:
685
+ yield (None, ReasoningResult(success=False, error="No reasoning content"))
686
+
687
+ elif model_type == "vertexai":
688
+ from agno.reasoning.vertexai import aget_vertexai_reasoning_stream
689
+
690
+ log_debug("Starting VertexAI Reasoning (streaming)", center=True, symbol="=")
691
+ final_message = None
692
+ async for reasoning_delta, message in aget_vertexai_reasoning_stream(reasoning_agent, messages):
693
+ if reasoning_delta is not None:
694
+ yield (reasoning_delta, None)
695
+ if message is not None:
696
+ final_message = message
697
+
698
+ if final_message:
699
+ yield (
700
+ None,
701
+ ReasoningResult(
702
+ message=final_message,
703
+ steps=[ReasoningStep(result=final_message.content)],
704
+ reasoning_messages=[final_message],
705
+ success=True,
706
+ ),
707
+ )
708
+ else:
709
+ yield (None, ReasoningResult(success=False, error="No reasoning content"))
710
+
711
+ elif model_type == "ai_foundry":
712
+ from agno.reasoning.azure_ai_foundry import aget_ai_foundry_reasoning_stream
713
+
714
+ log_debug("Starting Azure AI Foundry Reasoning (streaming)", center=True, symbol="=")
715
+ final_message = None
716
+ async for reasoning_delta, message in aget_ai_foundry_reasoning_stream(reasoning_agent, messages):
717
+ if reasoning_delta is not None:
718
+ yield (reasoning_delta, None)
719
+ if message is not None:
720
+ final_message = message
721
+
722
+ if final_message:
723
+ yield (
724
+ None,
725
+ ReasoningResult(
726
+ message=final_message,
727
+ steps=[ReasoningStep(result=final_message.content)],
728
+ reasoning_messages=[final_message],
729
+ success=True,
730
+ ),
731
+ )
732
+ else:
733
+ yield (None, ReasoningResult(success=False, error="No reasoning content"))
734
+
735
+ elif model_type == "groq":
736
+ from agno.reasoning.groq import aget_groq_reasoning_stream
737
+
738
+ log_debug("Starting Groq Reasoning (streaming)", center=True, symbol="=")
739
+ final_message = None
740
+ async for reasoning_delta, message in aget_groq_reasoning_stream(reasoning_agent, messages):
741
+ if reasoning_delta is not None:
742
+ yield (reasoning_delta, None)
743
+ if message is not None:
744
+ final_message = message
745
+
746
+ if final_message:
747
+ yield (
748
+ None,
749
+ ReasoningResult(
750
+ message=final_message,
751
+ steps=[ReasoningStep(result=final_message.content)],
752
+ reasoning_messages=[final_message],
753
+ success=True,
754
+ ),
755
+ )
756
+ else:
757
+ yield (None, ReasoningResult(success=False, error="No reasoning content"))
758
+
759
+ elif model_type == "ollama":
760
+ from agno.reasoning.ollama import aget_ollama_reasoning_stream
761
+
762
+ log_debug("Starting Ollama Reasoning (streaming)", center=True, symbol="=")
763
+ final_message = None
764
+ async for reasoning_delta, message in aget_ollama_reasoning_stream(reasoning_agent, messages):
765
+ if reasoning_delta is not None:
766
+ yield (reasoning_delta, None)
767
+ if message is not None:
768
+ final_message = message
769
+
770
+ if final_message:
771
+ yield (
772
+ None,
773
+ ReasoningResult(
774
+ message=final_message,
775
+ steps=[ReasoningStep(result=final_message.content)],
776
+ reasoning_messages=[final_message],
777
+ success=True,
778
+ ),
779
+ )
780
+ else:
781
+ yield (None, ReasoningResult(success=False, error="No reasoning content"))
782
+
783
+ else:
784
+ # Fall back to non-streaming for other models
785
+ result = await self.aget_native_reasoning(model, messages)
786
+ yield (None, result)
787
+
788
+ # =========================================================================
789
+ # Default Chain-of-Thought Reasoning
790
+ # =========================================================================
791
+
792
+ def run_default_reasoning(
793
+ self, model: Model, run_messages: RunMessages
794
+ ) -> Iterator[Tuple[Optional[ReasoningStep], Optional[ReasoningResult]]]:
795
+ """
796
+ Run default Chain-of-Thought reasoning.
797
+
798
+ Yields:
799
+ Tuple of (reasoning_step, final_result)
800
+ - During reasoning: (ReasoningStep, None)
801
+ - At the end: (None, ReasoningResult)
802
+ """
803
+ from agno.reasoning.helpers import get_next_action, update_messages_with_reasoning
804
+
805
+ reasoning_agent = self._get_default_reasoning_agent(model)
806
+ if reasoning_agent is None:
807
+ yield (None, ReasoningResult(success=False, error="Reasoning agent is None"))
808
+ return
809
+
810
+ # Validate reasoning agent output schema
811
+ if (
812
+ reasoning_agent.output_schema is not None
813
+ and isinstance(reasoning_agent.output_schema, type)
814
+ and not issubclass(reasoning_agent.output_schema, ReasoningSteps)
815
+ ):
816
+ yield (
817
+ None,
818
+ ReasoningResult(
819
+ success=False,
820
+ error="Reasoning agent response model should be ReasoningSteps",
821
+ ),
822
+ )
823
+ return
824
+
825
+ step_count = 1
826
+ next_action = NextAction.CONTINUE
827
+ reasoning_messages: List[Message] = []
828
+ all_reasoning_steps: List[ReasoningStep] = []
829
+
830
+ log_debug("Starting Reasoning", center=True, symbol="=")
831
+
832
+ while next_action == NextAction.CONTINUE and step_count < self.config.max_steps:
833
+ log_debug(f"Step {step_count}", center=True, symbol="=")
834
+ try:
835
+ reasoning_agent_response: RunOutput = reasoning_agent.run(input=run_messages.get_input_messages())
836
+
837
+ if reasoning_agent_response.content is None or reasoning_agent_response.messages is None:
838
+ log_warning("Reasoning error. Reasoning response is empty")
839
+ break
840
+
841
+ if isinstance(reasoning_agent_response.content, str):
842
+ log_warning("Reasoning error. Content is a string, not structured output")
843
+ break
844
+
845
+ if (
846
+ reasoning_agent_response.content.reasoning_steps is None
847
+ or len(reasoning_agent_response.content.reasoning_steps) == 0
848
+ ):
849
+ log_warning("Reasoning error. Reasoning steps are empty")
850
+ break
851
+
852
+ reasoning_steps: List[ReasoningStep] = reasoning_agent_response.content.reasoning_steps
853
+ all_reasoning_steps.extend(reasoning_steps)
854
+
855
+ # Yield each reasoning step
856
+ for step in reasoning_steps:
857
+ yield (step, None)
858
+
859
+ # Extract reasoning messages
860
+ first_assistant_index = next(
861
+ (i for i, m in enumerate(reasoning_agent_response.messages) if m.role == "assistant"),
862
+ len(reasoning_agent_response.messages),
863
+ )
864
+ reasoning_messages = reasoning_agent_response.messages[first_assistant_index:]
865
+
866
+ # Get the next action
867
+ next_action = get_next_action(reasoning_steps[-1])
868
+ if next_action == NextAction.FINAL_ANSWER:
869
+ break
870
+
871
+ except Exception as e:
872
+ log_error(f"Reasoning error: {e}")
873
+ break
874
+
875
+ step_count += 1
876
+
877
+ log_debug(f"Total Reasoning steps: {len(all_reasoning_steps)}")
878
+ log_debug("Reasoning finished", center=True, symbol="=")
879
+
880
+ # Update messages with reasoning
881
+ update_messages_with_reasoning(
882
+ run_messages=run_messages,
883
+ reasoning_messages=reasoning_messages,
884
+ )
885
+
886
+ # Yield final result
887
+ yield (
888
+ None,
889
+ ReasoningResult(
890
+ steps=all_reasoning_steps,
891
+ reasoning_messages=reasoning_messages,
892
+ success=True,
893
+ ),
894
+ )
895
+
896
+ async def arun_default_reasoning(
897
+ self, model: Model, run_messages: RunMessages
898
+ ) -> AsyncIterator[Tuple[Optional[ReasoningStep], Optional[ReasoningResult]]]:
899
+ """
900
+ Run default Chain-of-Thought reasoning asynchronously.
901
+
902
+ Yields:
903
+ Tuple of (reasoning_step, final_result)
904
+ - During reasoning: (ReasoningStep, None)
905
+ - At the end: (None, ReasoningResult)
906
+ """
907
+ from agno.reasoning.helpers import get_next_action, update_messages_with_reasoning
908
+
909
+ reasoning_agent = self._get_default_reasoning_agent(model)
910
+ if reasoning_agent is None:
911
+ yield (None, ReasoningResult(success=False, error="Reasoning agent is None"))
912
+ return
913
+
914
+ # Validate reasoning agent output schema
915
+ if (
916
+ reasoning_agent.output_schema is not None
917
+ and isinstance(reasoning_agent.output_schema, type)
918
+ and not issubclass(reasoning_agent.output_schema, ReasoningSteps)
919
+ ):
920
+ yield (
921
+ None,
922
+ ReasoningResult(
923
+ success=False,
924
+ error="Reasoning agent response model should be ReasoningSteps",
925
+ ),
926
+ )
927
+ return
928
+
929
+ step_count = 1
930
+ next_action = NextAction.CONTINUE
931
+ reasoning_messages: List[Message] = []
932
+ all_reasoning_steps: List[ReasoningStep] = []
933
+
934
+ log_debug("Starting Reasoning", center=True, symbol="=")
935
+
936
+ while next_action == NextAction.CONTINUE and step_count < self.config.max_steps:
937
+ log_debug(f"Step {step_count}", center=True, symbol="=")
938
+ step_count += 1
939
+ try:
940
+ reasoning_agent_response: RunOutput = await reasoning_agent.arun(
941
+ input=run_messages.get_input_messages()
942
+ )
943
+
944
+ if reasoning_agent_response.content is None or reasoning_agent_response.messages is None:
945
+ log_warning("Reasoning error. Reasoning response is empty")
946
+ break
947
+
948
+ if isinstance(reasoning_agent_response.content, str):
949
+ log_warning("Reasoning error. Content is a string, not structured output")
950
+ break
951
+
952
+ if reasoning_agent_response.content.reasoning_steps is None:
953
+ log_warning("Reasoning error. Reasoning steps are empty")
954
+ break
955
+
956
+ reasoning_steps: List[ReasoningStep] = reasoning_agent_response.content.reasoning_steps
957
+ all_reasoning_steps.extend(reasoning_steps)
958
+
959
+ # Yield each reasoning step
960
+ for step in reasoning_steps:
961
+ yield (step, None)
962
+
963
+ # Extract reasoning messages
964
+ first_assistant_index = next(
965
+ (i for i, m in enumerate(reasoning_agent_response.messages) if m.role == "assistant"),
966
+ len(reasoning_agent_response.messages),
967
+ )
968
+ reasoning_messages = reasoning_agent_response.messages[first_assistant_index:]
969
+
970
+ # Get the next action
971
+ next_action = get_next_action(reasoning_steps[-1])
972
+ if next_action == NextAction.FINAL_ANSWER:
973
+ break
974
+
975
+ except Exception as e:
976
+ log_error(f"Reasoning error: {e}")
977
+ break
978
+
979
+ log_debug(f"Total Reasoning steps: {len(all_reasoning_steps)}")
980
+ log_debug("Reasoning finished", center=True, symbol="=")
981
+
982
+ # Update messages with reasoning
983
+ update_messages_with_reasoning(
984
+ run_messages=run_messages,
985
+ reasoning_messages=reasoning_messages,
986
+ )
987
+
988
+ # Yield final result
989
+ yield (
990
+ None,
991
+ ReasoningResult(
992
+ steps=all_reasoning_steps,
993
+ reasoning_messages=reasoning_messages,
994
+ success=True,
995
+ ),
996
+ )
997
+
998
+ def reason(
999
+ self,
1000
+ run_messages: RunMessages,
1001
+ stream: bool = False,
1002
+ ) -> Iterator[ReasoningEvent]:
1003
+ """
1004
+ Run reasoning and yield ReasoningEvent objects.
1005
+
1006
+ Args:
1007
+ run_messages: The messages to reason about
1008
+ stream: Whether to stream reasoning content
1009
+
1010
+ Yields:
1011
+ ReasoningEvent objects for each stage of reasoning
1012
+ """
1013
+ # Get the reasoning model
1014
+ reasoning_model: Optional[Model] = self.config.reasoning_model
1015
+ reasoning_model_provided = reasoning_model is not None
1016
+
1017
+ if reasoning_model is None:
1018
+ yield ReasoningEvent(
1019
+ event_type=ReasoningEventType.error,
1020
+ error="Reasoning model is None",
1021
+ )
1022
+ return
1023
+
1024
+ # Yield started event
1025
+ yield ReasoningEvent(event_type=ReasoningEventType.started)
1026
+
1027
+ # Check if this is a native reasoning model
1028
+ if reasoning_model_provided and self.is_native_reasoning_model(reasoning_model):
1029
+ # Use streaming for native models when stream is enabled
1030
+ if stream:
1031
+ yield from self._stream_native_reasoning_events(reasoning_model, run_messages)
1032
+ else:
1033
+ yield from self._get_native_reasoning_events(reasoning_model, run_messages)
1034
+ else:
1035
+ # Use default Chain-of-Thought reasoning
1036
+ if reasoning_model_provided:
1037
+ log_info(
1038
+ f"Reasoning model: {reasoning_model.__class__.__name__} is not a native reasoning model, "
1039
+ "defaulting to manual Chain-of-Thought reasoning"
1040
+ )
1041
+ yield from self._run_default_reasoning_events(reasoning_model, run_messages)
1042
+
1043
+ async def areason(
1044
+ self,
1045
+ run_messages: RunMessages,
1046
+ stream: bool = False,
1047
+ ) -> AsyncIterator[ReasoningEvent]:
1048
+ """
1049
+ Unified async reasoning interface that yields ReasoningEvent objects.
1050
+
1051
+ This method handles all reasoning logic and yields events that can be
1052
+ converted to Agent or Team specific events by the caller.
1053
+
1054
+ Args:
1055
+ run_messages: The messages to reason about
1056
+ stream: Whether to stream reasoning content deltas
1057
+
1058
+ Yields:
1059
+ ReasoningEvent objects for each stage of reasoning
1060
+ """
1061
+ # Get the reasoning model
1062
+ reasoning_model: Optional[Model] = self.config.reasoning_model
1063
+ reasoning_model_provided = reasoning_model is not None
1064
+
1065
+ if reasoning_model is None:
1066
+ yield ReasoningEvent(
1067
+ event_type=ReasoningEventType.error,
1068
+ error="Reasoning model is None",
1069
+ )
1070
+ return
1071
+
1072
+ # Yield started event
1073
+ yield ReasoningEvent(event_type=ReasoningEventType.started)
1074
+
1075
+ # Check if this is a native reasoning model
1076
+ if reasoning_model_provided and self.is_native_reasoning_model(reasoning_model):
1077
+ # Use streaming for native models when stream is enabled
1078
+ if stream:
1079
+ async for event in self._astream_native_reasoning_events(reasoning_model, run_messages):
1080
+ yield event
1081
+ else:
1082
+ async for event in self._aget_native_reasoning_events(reasoning_model, run_messages):
1083
+ yield event
1084
+ else:
1085
+ # Use default Chain-of-Thought reasoning
1086
+ if reasoning_model_provided:
1087
+ log_info(
1088
+ f"Reasoning model: {reasoning_model.__class__.__name__} is not a native reasoning model, "
1089
+ "defaulting to manual Chain-of-Thought reasoning"
1090
+ )
1091
+ async for event in self._arun_default_reasoning_events(reasoning_model, run_messages):
1092
+ yield event
1093
+
1094
+ def _stream_native_reasoning_events(self, model: Model, run_messages: RunMessages) -> Iterator[ReasoningEvent]:
1095
+ """Stream native reasoning and yield ReasoningEvent objects."""
1096
+ messages = run_messages.get_input_messages()
1097
+
1098
+ for reasoning_delta, result in self.stream_native_reasoning(model, messages):
1099
+ if reasoning_delta is not None:
1100
+ yield ReasoningEvent(
1101
+ event_type=ReasoningEventType.content_delta,
1102
+ reasoning_content=reasoning_delta,
1103
+ )
1104
+ if result is not None:
1105
+ if not result.success:
1106
+ yield ReasoningEvent(
1107
+ event_type=ReasoningEventType.error,
1108
+ error=result.error,
1109
+ )
1110
+ return
1111
+ if result.message:
1112
+ run_messages.messages.append(result.message)
1113
+ yield ReasoningEvent(
1114
+ event_type=ReasoningEventType.completed,
1115
+ reasoning_steps=result.steps,
1116
+ message=result.message,
1117
+ reasoning_messages=result.reasoning_messages,
1118
+ )
1119
+
1120
+ def _get_native_reasoning_events(self, model: Model, run_messages: RunMessages) -> Iterator[ReasoningEvent]:
1121
+ """Get native reasoning (non-streaming) and yield ReasoningEvent objects."""
1122
+ messages = run_messages.get_input_messages()
1123
+ result = self.get_native_reasoning(model, messages)
1124
+
1125
+ if not result.success:
1126
+ yield ReasoningEvent(
1127
+ event_type=ReasoningEventType.error,
1128
+ error=result.error,
1129
+ )
1130
+ return
1131
+
1132
+ if result.message:
1133
+ run_messages.messages.append(result.message)
1134
+ yield ReasoningEvent(
1135
+ event_type=ReasoningEventType.completed,
1136
+ reasoning_steps=result.steps,
1137
+ message=result.message,
1138
+ reasoning_messages=result.reasoning_messages,
1139
+ )
1140
+
1141
+ def _run_default_reasoning_events(self, model: Model, run_messages: RunMessages) -> Iterator[ReasoningEvent]:
1142
+ """Run default CoT reasoning and yield ReasoningEvent objects."""
1143
+ all_reasoning_steps: List[ReasoningStep] = []
1144
+
1145
+ for reasoning_step, result in self.run_default_reasoning(model, run_messages):
1146
+ if reasoning_step is not None:
1147
+ all_reasoning_steps.append(reasoning_step)
1148
+ yield ReasoningEvent(
1149
+ event_type=ReasoningEventType.step,
1150
+ reasoning_step=reasoning_step,
1151
+ )
1152
+ if result is not None:
1153
+ if not result.success:
1154
+ yield ReasoningEvent(
1155
+ event_type=ReasoningEventType.error,
1156
+ error=result.error,
1157
+ )
1158
+ return
1159
+
1160
+ # Yield completed event with all steps
1161
+ if all_reasoning_steps:
1162
+ yield ReasoningEvent(
1163
+ event_type=ReasoningEventType.completed,
1164
+ reasoning_steps=all_reasoning_steps,
1165
+ )
1166
+
1167
+ async def _astream_native_reasoning_events(
1168
+ self, model: Model, run_messages: RunMessages
1169
+ ) -> AsyncIterator[ReasoningEvent]:
1170
+ """Stream native reasoning asynchronously and yield ReasoningEvent objects."""
1171
+ messages = run_messages.get_input_messages()
1172
+
1173
+ async for reasoning_delta, result in self.astream_native_reasoning(model, messages):
1174
+ if reasoning_delta is not None:
1175
+ yield ReasoningEvent(
1176
+ event_type=ReasoningEventType.content_delta,
1177
+ reasoning_content=reasoning_delta,
1178
+ )
1179
+ if result is not None:
1180
+ if not result.success:
1181
+ yield ReasoningEvent(
1182
+ event_type=ReasoningEventType.error,
1183
+ error=result.error,
1184
+ )
1185
+ return
1186
+ if result.message:
1187
+ run_messages.messages.append(result.message)
1188
+ yield ReasoningEvent(
1189
+ event_type=ReasoningEventType.completed,
1190
+ reasoning_steps=result.steps,
1191
+ message=result.message,
1192
+ reasoning_messages=result.reasoning_messages,
1193
+ )
1194
+
1195
+ async def _aget_native_reasoning_events(
1196
+ self, model: Model, run_messages: RunMessages
1197
+ ) -> AsyncIterator[ReasoningEvent]:
1198
+ """Get native reasoning asynchronously (non-streaming) and yield ReasoningEvent objects."""
1199
+ messages = run_messages.get_input_messages()
1200
+ result = await self.aget_native_reasoning(model, messages)
1201
+
1202
+ if not result.success:
1203
+ yield ReasoningEvent(
1204
+ event_type=ReasoningEventType.error,
1205
+ error=result.error,
1206
+ )
1207
+ return
1208
+
1209
+ if result.message:
1210
+ run_messages.messages.append(result.message)
1211
+ yield ReasoningEvent(
1212
+ event_type=ReasoningEventType.completed,
1213
+ reasoning_steps=result.steps,
1214
+ message=result.message,
1215
+ reasoning_messages=result.reasoning_messages,
1216
+ )
1217
+
1218
+ async def _arun_default_reasoning_events(
1219
+ self, model: Model, run_messages: RunMessages
1220
+ ) -> AsyncIterator[ReasoningEvent]:
1221
+ """Run default CoT reasoning asynchronously and yield ReasoningEvent objects."""
1222
+ all_reasoning_steps: List[ReasoningStep] = []
1223
+
1224
+ async for reasoning_step, result in self.arun_default_reasoning(model, run_messages):
1225
+ if reasoning_step is not None:
1226
+ all_reasoning_steps.append(reasoning_step)
1227
+ yield ReasoningEvent(
1228
+ event_type=ReasoningEventType.step,
1229
+ reasoning_step=reasoning_step,
1230
+ )
1231
+ if result is not None:
1232
+ if not result.success:
1233
+ yield ReasoningEvent(
1234
+ event_type=ReasoningEventType.error,
1235
+ error=result.error,
1236
+ )
1237
+ return
1238
+
1239
+ # Yield completed event with all steps
1240
+ if all_reasoning_steps:
1241
+ yield ReasoningEvent(
1242
+ event_type=ReasoningEventType.completed,
1243
+ reasoning_steps=all_reasoning_steps,
1244
+ )