agno 2.3.13__py3-none-any.whl → 2.3.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. agno/agent/agent.py +1149 -1392
  2. agno/db/migrations/manager.py +3 -3
  3. agno/eval/__init__.py +21 -8
  4. agno/knowledge/embedder/azure_openai.py +0 -1
  5. agno/knowledge/embedder/google.py +1 -1
  6. agno/models/anthropic/claude.py +9 -4
  7. agno/models/base.py +8 -4
  8. agno/models/metrics.py +12 -0
  9. agno/models/openai/chat.py +2 -0
  10. agno/models/openai/responses.py +2 -2
  11. agno/os/app.py +59 -2
  12. agno/os/auth.py +40 -3
  13. agno/os/interfaces/a2a/router.py +619 -9
  14. agno/os/interfaces/a2a/utils.py +31 -32
  15. agno/os/middleware/jwt.py +5 -5
  16. agno/os/router.py +1 -57
  17. agno/os/routers/agents/schema.py +14 -1
  18. agno/os/routers/database.py +150 -0
  19. agno/os/routers/teams/schema.py +14 -1
  20. agno/os/settings.py +3 -0
  21. agno/os/utils.py +61 -53
  22. agno/reasoning/anthropic.py +85 -1
  23. agno/reasoning/azure_ai_foundry.py +93 -1
  24. agno/reasoning/deepseek.py +91 -1
  25. agno/reasoning/gemini.py +81 -1
  26. agno/reasoning/groq.py +103 -1
  27. agno/reasoning/manager.py +1244 -0
  28. agno/reasoning/ollama.py +93 -1
  29. agno/reasoning/openai.py +113 -1
  30. agno/reasoning/vertexai.py +85 -1
  31. agno/run/agent.py +21 -0
  32. agno/run/base.py +20 -1
  33. agno/run/team.py +21 -0
  34. agno/session/team.py +0 -3
  35. agno/team/team.py +1211 -1445
  36. agno/tools/toolkit.py +119 -8
  37. agno/utils/events.py +99 -4
  38. agno/utils/hooks.py +4 -10
  39. agno/utils/print_response/agent.py +26 -0
  40. agno/utils/print_response/team.py +11 -0
  41. agno/utils/prompts.py +8 -6
  42. agno/utils/string.py +46 -0
  43. agno/utils/team.py +1 -1
  44. agno/vectordb/milvus/milvus.py +32 -3
  45. {agno-2.3.13.dist-info → agno-2.3.15.dist-info}/METADATA +3 -2
  46. {agno-2.3.13.dist-info → agno-2.3.15.dist-info}/RECORD +49 -47
  47. {agno-2.3.13.dist-info → agno-2.3.15.dist-info}/WHEEL +0 -0
  48. {agno-2.3.13.dist-info → agno-2.3.15.dist-info}/licenses/LICENSE +0 -0
  49. {agno-2.3.13.dist-info → agno-2.3.15.dist-info}/top_level.txt +0 -0
agno/reasoning/ollama.py CHANGED
@@ -1,6 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
- from typing import List, Optional
3
+ from typing import AsyncIterator, Iterator, List, Optional, Tuple
4
4
 
5
5
  from agno.models.base import Model
6
6
  from agno.models.message import Message
@@ -65,3 +65,95 @@ async def aget_ollama_reasoning(reasoning_agent: "Agent", messages: List[Message
65
65
  return Message(
66
66
  role="assistant", content=f"<thinking>\n{reasoning_content}\n</thinking>", reasoning_content=reasoning_content
67
67
  )
68
+
69
+
70
+ def get_ollama_reasoning_stream(
71
+ reasoning_agent: "Agent", # type: ignore # noqa: F821
72
+ messages: List[Message],
73
+ ) -> Iterator[Tuple[Optional[str], Optional[Message]]]:
74
+ """
75
+ Stream reasoning content from Ollama model.
76
+
77
+ For reasoning models on Ollama (qwq, deepseek-r1, etc.), we use the main content output as reasoning content.
78
+
79
+ Yields:
80
+ Tuple of (reasoning_content_delta, final_message)
81
+ - During streaming: (reasoning_content_delta, None)
82
+ - At the end: (None, final_message)
83
+ """
84
+ from agno.run.agent import RunEvent
85
+
86
+ reasoning_content: str = ""
87
+
88
+ try:
89
+ for event in reasoning_agent.run(input=messages, stream=True, stream_intermediate_steps=True):
90
+ if hasattr(event, "event"):
91
+ if event.event == RunEvent.run_content:
92
+ # Check for reasoning_content attribute first (native reasoning)
93
+ if hasattr(event, "reasoning_content") and event.reasoning_content:
94
+ reasoning_content += event.reasoning_content
95
+ yield (event.reasoning_content, None)
96
+ # Use the main content as reasoning content
97
+ elif hasattr(event, "content") and event.content:
98
+ reasoning_content += event.content
99
+ yield (event.content, None)
100
+ elif event.event == RunEvent.run_completed:
101
+ pass
102
+ except Exception as e:
103
+ logger.warning(f"Reasoning error: {e}")
104
+ return
105
+
106
+ # Yield final message
107
+ if reasoning_content:
108
+ final_message = Message(
109
+ role="assistant",
110
+ content=f"<thinking>\n{reasoning_content}\n</thinking>",
111
+ reasoning_content=reasoning_content,
112
+ )
113
+ yield (None, final_message)
114
+
115
+
116
+ async def aget_ollama_reasoning_stream(
117
+ reasoning_agent: "Agent", # type: ignore # noqa: F821
118
+ messages: List[Message],
119
+ ) -> AsyncIterator[Tuple[Optional[str], Optional[Message]]]:
120
+ """
121
+ Stream reasoning content from Ollama model asynchronously.
122
+
123
+ For reasoning models on Ollama (qwq, deepseek-r1, etc.), we use the main content output as reasoning content.
124
+
125
+ Yields:
126
+ Tuple of (reasoning_content_delta, final_message)
127
+ - During streaming: (reasoning_content_delta, None)
128
+ - At the end: (None, final_message)
129
+ """
130
+ from agno.run.agent import RunEvent
131
+
132
+ reasoning_content: str = ""
133
+
134
+ try:
135
+ async for event in reasoning_agent.arun(input=messages, stream=True, stream_intermediate_steps=True):
136
+ if hasattr(event, "event"):
137
+ if event.event == RunEvent.run_content:
138
+ # Check for reasoning_content attribute first (native reasoning)
139
+ if hasattr(event, "reasoning_content") and event.reasoning_content:
140
+ reasoning_content += event.reasoning_content
141
+ yield (event.reasoning_content, None)
142
+ # Use the main content as reasoning content
143
+ elif hasattr(event, "content") and event.content:
144
+ reasoning_content += event.content
145
+ yield (event.content, None)
146
+ elif event.event == RunEvent.run_completed:
147
+ pass
148
+ except Exception as e:
149
+ logger.warning(f"Reasoning error: {e}")
150
+ return
151
+
152
+ # Yield final message
153
+ if reasoning_content:
154
+ final_message = Message(
155
+ role="assistant",
156
+ content=f"<thinking>\n{reasoning_content}\n</thinking>",
157
+ reasoning_content=reasoning_content,
158
+ )
159
+ yield (None, final_message)
agno/reasoning/openai.py CHANGED
@@ -1,6 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
- from typing import List, Optional
3
+ from typing import AsyncIterator, Iterator, List, Optional, Tuple
4
4
 
5
5
  from agno.models.base import Model
6
6
  from agno.models.message import Message
@@ -84,3 +84,115 @@ async def aget_openai_reasoning(reasoning_agent: "Agent", messages: List[Message
84
84
  return Message(
85
85
  role="assistant", content=f"<thinking>\n{reasoning_content}\n</thinking>", reasoning_content=reasoning_content
86
86
  )
87
+
88
+
89
+ def get_openai_reasoning_stream(
90
+ reasoning_agent: "Agent", # type: ignore # noqa: F821
91
+ messages: List[Message],
92
+ ) -> Iterator[Tuple[Optional[str], Optional[Message]]]:
93
+ """
94
+ Stream reasoning content from OpenAI model.
95
+
96
+ For OpenAI reasoning models, we use the main content output as reasoning content.
97
+
98
+ Yields:
99
+ Tuple of (reasoning_content_delta, final_message)
100
+ - During streaming: (reasoning_content_delta, None)
101
+ - At the end: (None, final_message)
102
+ """
103
+ from agno.run.agent import RunEvent
104
+
105
+ # Update system message role to "system"
106
+ for message in messages:
107
+ if message.role == "developer":
108
+ message.role = "system"
109
+
110
+ reasoning_content: str = ""
111
+
112
+ try:
113
+ for event in reasoning_agent.run(input=messages, stream=True, stream_intermediate_steps=True):
114
+ if hasattr(event, "event"):
115
+ if event.event == RunEvent.run_content:
116
+ # Check for reasoning_content attribute first (native reasoning)
117
+ if hasattr(event, "reasoning_content") and event.reasoning_content:
118
+ reasoning_content += event.reasoning_content
119
+ yield (event.reasoning_content, None)
120
+ # Use the main content as reasoning content
121
+ elif hasattr(event, "content") and event.content:
122
+ reasoning_content += event.content
123
+ yield (event.content, None)
124
+ elif event.event == RunEvent.run_completed:
125
+ # Check for reasoning_content at completion (OpenAIResponses with reasoning_summary)
126
+ if hasattr(event, "reasoning_content") and event.reasoning_content:
127
+ # If we haven't accumulated any reasoning content yet, use this
128
+ if not reasoning_content:
129
+ reasoning_content = event.reasoning_content
130
+ yield (event.reasoning_content, None)
131
+ except Exception as e:
132
+ logger.warning(f"Reasoning error: {e}")
133
+ return
134
+
135
+ # Yield final message
136
+ if reasoning_content:
137
+ final_message = Message(
138
+ role="assistant",
139
+ content=f"<thinking>\n{reasoning_content}\n</thinking>",
140
+ reasoning_content=reasoning_content,
141
+ )
142
+ yield (None, final_message)
143
+
144
+
145
+ async def aget_openai_reasoning_stream(
146
+ reasoning_agent: "Agent", # type: ignore # noqa: F821
147
+ messages: List[Message],
148
+ ) -> AsyncIterator[Tuple[Optional[str], Optional[Message]]]:
149
+ """
150
+ Stream reasoning content from OpenAI model asynchronously.
151
+
152
+ For OpenAI reasoning models, we use the main content output as reasoning content.
153
+
154
+ Yields:
155
+ Tuple of (reasoning_content_delta, final_message)
156
+ - During streaming: (reasoning_content_delta, None)
157
+ - At the end: (None, final_message)
158
+ """
159
+ from agno.run.agent import RunEvent
160
+
161
+ # Update system message role to "system"
162
+ for message in messages:
163
+ if message.role == "developer":
164
+ message.role = "system"
165
+
166
+ reasoning_content: str = ""
167
+
168
+ try:
169
+ async for event in reasoning_agent.arun(input=messages, stream=True, stream_intermediate_steps=True):
170
+ if hasattr(event, "event"):
171
+ if event.event == RunEvent.run_content:
172
+ # Check for reasoning_content attribute first (native reasoning)
173
+ if hasattr(event, "reasoning_content") and event.reasoning_content:
174
+ reasoning_content += event.reasoning_content
175
+ yield (event.reasoning_content, None)
176
+ # Use the main content as reasoning content
177
+ elif hasattr(event, "content") and event.content:
178
+ reasoning_content += event.content
179
+ yield (event.content, None)
180
+ elif event.event == RunEvent.run_completed:
181
+ # Check for reasoning_content at completion (OpenAIResponses with reasoning_summary)
182
+ if hasattr(event, "reasoning_content") and event.reasoning_content:
183
+ # If we haven't accumulated any reasoning content yet, use this
184
+ if not reasoning_content:
185
+ reasoning_content = event.reasoning_content
186
+ yield (event.reasoning_content, None)
187
+ except Exception as e:
188
+ logger.warning(f"Reasoning error: {e}")
189
+ return
190
+
191
+ # Yield final message
192
+ if reasoning_content:
193
+ final_message = Message(
194
+ role="assistant",
195
+ content=f"<thinking>\n{reasoning_content}\n</thinking>",
196
+ reasoning_content=reasoning_content,
197
+ )
198
+ yield (None, final_message)
@@ -1,6 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
- from typing import List, Optional
3
+ from typing import AsyncIterator, Iterator, List, Optional, Tuple
4
4
 
5
5
  from agno.models.base import Model
6
6
  from agno.models.message import Message
@@ -74,3 +74,87 @@ async def aget_vertexai_reasoning(reasoning_agent: "Agent", messages: List[Messa
74
74
  reasoning_content=reasoning_content,
75
75
  redacted_reasoning_content=redacted_reasoning_content,
76
76
  )
77
+
78
+
79
+ def get_vertexai_reasoning_stream(
80
+ reasoning_agent: "Agent", # type: ignore # noqa: F821
81
+ messages: List[Message],
82
+ ) -> Iterator[Tuple[Optional[str], Optional[Message]]]:
83
+ """
84
+ Stream reasoning content from VertexAI Claude model.
85
+
86
+ Yields:
87
+ Tuple of (reasoning_content_delta, final_message)
88
+ - During streaming: (reasoning_content_delta, None)
89
+ - At the end: (None, final_message)
90
+ """
91
+ from agno.run.agent import RunEvent
92
+
93
+ reasoning_content: str = ""
94
+ redacted_reasoning_content: Optional[str] = None
95
+
96
+ try:
97
+ for event in reasoning_agent.run(input=messages, stream=True, stream_intermediate_steps=True):
98
+ if hasattr(event, "event"):
99
+ if event.event == RunEvent.run_content:
100
+ # Stream reasoning content as it arrives
101
+ if hasattr(event, "reasoning_content") and event.reasoning_content:
102
+ reasoning_content += event.reasoning_content
103
+ yield (event.reasoning_content, None)
104
+ elif event.event == RunEvent.run_completed:
105
+ pass
106
+ except Exception as e:
107
+ logger.warning(f"Reasoning error: {e}")
108
+ return
109
+
110
+ # Yield final message
111
+ if reasoning_content:
112
+ final_message = Message(
113
+ role="assistant",
114
+ content=f"<thinking>\n{reasoning_content}\n</thinking>",
115
+ reasoning_content=reasoning_content,
116
+ redacted_reasoning_content=redacted_reasoning_content,
117
+ )
118
+ yield (None, final_message)
119
+
120
+
121
+ async def aget_vertexai_reasoning_stream(
122
+ reasoning_agent: "Agent", # type: ignore # noqa: F821
123
+ messages: List[Message],
124
+ ) -> AsyncIterator[Tuple[Optional[str], Optional[Message]]]:
125
+ """
126
+ Stream reasoning content from VertexAI Claude model asynchronously.
127
+
128
+ Yields:
129
+ Tuple of (reasoning_content_delta, final_message)
130
+ - During streaming: (reasoning_content_delta, None)
131
+ - At the end: (None, final_message)
132
+ """
133
+ from agno.run.agent import RunEvent
134
+
135
+ reasoning_content: str = ""
136
+ redacted_reasoning_content: Optional[str] = None
137
+
138
+ try:
139
+ async for event in reasoning_agent.arun(input=messages, stream=True, stream_intermediate_steps=True):
140
+ if hasattr(event, "event"):
141
+ if event.event == RunEvent.run_content:
142
+ # Stream reasoning content as it arrives
143
+ if hasattr(event, "reasoning_content") and event.reasoning_content:
144
+ reasoning_content += event.reasoning_content
145
+ yield (event.reasoning_content, None)
146
+ elif event.event == RunEvent.run_completed:
147
+ pass
148
+ except Exception as e:
149
+ logger.warning(f"Reasoning error: {e}")
150
+ return
151
+
152
+ # Yield final message
153
+ if reasoning_content:
154
+ final_message = Message(
155
+ role="assistant",
156
+ content=f"<thinking>\n{reasoning_content}\n</thinking>",
157
+ reasoning_content=reasoning_content,
158
+ redacted_reasoning_content=redacted_reasoning_content,
159
+ )
160
+ yield (None, final_message)
agno/run/agent.py CHANGED
@@ -153,9 +153,11 @@ class RunEvent(str, Enum):
153
153
 
154
154
  tool_call_started = "ToolCallStarted"
155
155
  tool_call_completed = "ToolCallCompleted"
156
+ tool_call_error = "ToolCallError"
156
157
 
157
158
  reasoning_started = "ReasoningStarted"
158
159
  reasoning_step = "ReasoningStep"
160
+ reasoning_content_delta = "ReasoningContentDelta"
159
161
  reasoning_completed = "ReasoningCompleted"
160
162
 
161
163
  memory_update_started = "MemoryUpdateStarted"
@@ -373,6 +375,14 @@ class ReasoningStepEvent(BaseAgentRunEvent):
373
375
  reasoning_content: str = ""
374
376
 
375
377
 
378
+ @dataclass
379
+ class ReasoningContentDeltaEvent(BaseAgentRunEvent):
380
+ """Event for streaming reasoning content chunks as they arrive."""
381
+
382
+ event: str = RunEvent.reasoning_content_delta.value
383
+ reasoning_content: str = "" # The delta/chunk of reasoning content
384
+
385
+
376
386
  @dataclass
377
387
  class ReasoningCompletedEvent(BaseAgentRunEvent):
378
388
  event: str = RunEvent.reasoning_completed.value
@@ -396,6 +406,13 @@ class ToolCallCompletedEvent(BaseAgentRunEvent):
396
406
  audio: Optional[List[Audio]] = None # Audio produced by the tool call
397
407
 
398
408
 
409
+ @dataclass
410
+ class ToolCallErrorEvent(BaseAgentRunEvent):
411
+ event: str = RunEvent.tool_call_error.value
412
+ tool: Optional[ToolExecution] = None
413
+ error: Optional[str] = None
414
+
415
+
399
416
  @dataclass
400
417
  class ParserModelResponseStartedEvent(BaseAgentRunEvent):
401
418
  event: str = RunEvent.parser_model_response_started.value
@@ -442,6 +459,7 @@ RunOutputEvent = Union[
442
459
  PostHookCompletedEvent,
443
460
  ReasoningStartedEvent,
444
461
  ReasoningStepEvent,
462
+ ReasoningContentDeltaEvent,
445
463
  ReasoningCompletedEvent,
446
464
  MemoryUpdateStartedEvent,
447
465
  MemoryUpdateCompletedEvent,
@@ -449,6 +467,7 @@ RunOutputEvent = Union[
449
467
  SessionSummaryCompletedEvent,
450
468
  ToolCallStartedEvent,
451
469
  ToolCallCompletedEvent,
470
+ ToolCallErrorEvent,
452
471
  ParserModelResponseStartedEvent,
453
472
  ParserModelResponseCompletedEvent,
454
473
  OutputModelResponseStartedEvent,
@@ -474,6 +493,7 @@ RUN_EVENT_TYPE_REGISTRY = {
474
493
  RunEvent.post_hook_completed.value: PostHookCompletedEvent,
475
494
  RunEvent.reasoning_started.value: ReasoningStartedEvent,
476
495
  RunEvent.reasoning_step.value: ReasoningStepEvent,
496
+ RunEvent.reasoning_content_delta.value: ReasoningContentDeltaEvent,
477
497
  RunEvent.reasoning_completed.value: ReasoningCompletedEvent,
478
498
  RunEvent.memory_update_started.value: MemoryUpdateStartedEvent,
479
499
  RunEvent.memory_update_completed.value: MemoryUpdateCompletedEvent,
@@ -481,6 +501,7 @@ RUN_EVENT_TYPE_REGISTRY = {
481
501
  RunEvent.session_summary_completed.value: SessionSummaryCompletedEvent,
482
502
  RunEvent.tool_call_started.value: ToolCallStartedEvent,
483
503
  RunEvent.tool_call_completed.value: ToolCallCompletedEvent,
504
+ RunEvent.tool_call_error.value: ToolCallErrorEvent,
484
505
  RunEvent.parser_model_response_started.value: ParserModelResponseStartedEvent,
485
506
  RunEvent.parser_model_response_completed.value: ParserModelResponseCompletedEvent,
486
507
  RunEvent.output_model_response_started.value: OutputModelResponseStartedEvent,
agno/run/base.py CHANGED
@@ -22,7 +22,7 @@ class RunContext:
22
22
  knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None
23
23
  metadata: Optional[Dict[str, Any]] = None
24
24
  session_state: Optional[Dict[str, Any]] = None
25
- output_schema: Optional[Type[BaseModel]] = None
25
+ output_schema: Optional[Union[Type[BaseModel], Dict[str, Any]]] = None
26
26
 
27
27
 
28
28
  @dataclass
@@ -51,6 +51,7 @@ class BaseRunOutputEvent:
51
51
  "session_summary",
52
52
  "metrics",
53
53
  "run_input",
54
+ "requirements",
54
55
  ]
55
56
  }
56
57
 
@@ -138,6 +139,9 @@ class BaseRunOutputEvent:
138
139
  if hasattr(self, "run_input") and self.run_input is not None:
139
140
  _dict["run_input"] = self.run_input.to_dict()
140
141
 
142
+ if hasattr(self, "requirements") and self.requirements is not None:
143
+ _dict["requirements"] = [req.to_dict() if hasattr(req, "to_dict") else req for req in self.requirements]
144
+
141
145
  return _dict
142
146
 
143
147
  def to_json(self, separators=(", ", ": "), indent: Optional[int] = 2) -> str:
@@ -219,6 +223,21 @@ class BaseRunOutputEvent:
219
223
 
220
224
  data["run_input"] = RunInput.from_dict(run_input)
221
225
 
226
+ # Handle requirements
227
+
228
+ # Handle requirements
229
+ requirements_data = data.pop("requirements", None)
230
+ if requirements_data is not None:
231
+ from agno.run.requirement import RunRequirement
232
+
233
+ requirements_list: List[RunRequirement] = []
234
+ for item in requirements_data:
235
+ if isinstance(item, RunRequirement):
236
+ requirements_list.append(item)
237
+ elif isinstance(item, dict):
238
+ requirements_list.append(RunRequirement.from_dict(item))
239
+ data["requirements"] = requirements_list if requirements_list else None
240
+
222
241
  # Filter data to only include fields that are actually defined in the target class
223
242
  from dataclasses import fields
224
243
 
agno/run/team.py CHANGED
@@ -146,9 +146,11 @@ class TeamRunEvent(str, Enum):
146
146
 
147
147
  tool_call_started = "TeamToolCallStarted"
148
148
  tool_call_completed = "TeamToolCallCompleted"
149
+ tool_call_error = "TeamToolCallError"
149
150
 
150
151
  reasoning_started = "TeamReasoningStarted"
151
152
  reasoning_step = "TeamReasoningStep"
153
+ reasoning_content_delta = "TeamReasoningContentDelta"
152
154
  reasoning_completed = "TeamReasoningCompleted"
153
155
 
154
156
  memory_update_started = "TeamMemoryUpdateStarted"
@@ -346,6 +348,14 @@ class ReasoningStepEvent(BaseTeamRunEvent):
346
348
  reasoning_content: str = ""
347
349
 
348
350
 
351
+ @dataclass
352
+ class ReasoningContentDeltaEvent(BaseTeamRunEvent):
353
+ """Event for streaming reasoning content chunks as they arrive."""
354
+
355
+ event: str = TeamRunEvent.reasoning_content_delta.value
356
+ reasoning_content: str = "" # The delta/chunk of reasoning content
357
+
358
+
349
359
  @dataclass
350
360
  class ReasoningCompletedEvent(BaseTeamRunEvent):
351
361
  event: str = TeamRunEvent.reasoning_completed.value
@@ -369,6 +379,13 @@ class ToolCallCompletedEvent(BaseTeamRunEvent):
369
379
  audio: Optional[List[Audio]] = None # Audio produced by the tool call
370
380
 
371
381
 
382
+ @dataclass
383
+ class ToolCallErrorEvent(BaseTeamRunEvent):
384
+ event: str = TeamRunEvent.tool_call_error.value
385
+ tool: Optional[ToolExecution] = None
386
+ error: Optional[str] = None
387
+
388
+
372
389
  @dataclass
373
390
  class ParserModelResponseStartedEvent(BaseTeamRunEvent):
374
391
  event: str = TeamRunEvent.parser_model_response_started.value
@@ -411,6 +428,7 @@ TeamRunOutputEvent = Union[
411
428
  PreHookCompletedEvent,
412
429
  ReasoningStartedEvent,
413
430
  ReasoningStepEvent,
431
+ ReasoningContentDeltaEvent,
414
432
  ReasoningCompletedEvent,
415
433
  MemoryUpdateStartedEvent,
416
434
  MemoryUpdateCompletedEvent,
@@ -418,6 +436,7 @@ TeamRunOutputEvent = Union[
418
436
  SessionSummaryCompletedEvent,
419
437
  ToolCallStartedEvent,
420
438
  ToolCallCompletedEvent,
439
+ ToolCallErrorEvent,
421
440
  ParserModelResponseStartedEvent,
422
441
  ParserModelResponseCompletedEvent,
423
442
  OutputModelResponseStartedEvent,
@@ -440,6 +459,7 @@ TEAM_RUN_EVENT_TYPE_REGISTRY = {
440
459
  TeamRunEvent.post_hook_completed.value: PostHookCompletedEvent,
441
460
  TeamRunEvent.reasoning_started.value: ReasoningStartedEvent,
442
461
  TeamRunEvent.reasoning_step.value: ReasoningStepEvent,
462
+ TeamRunEvent.reasoning_content_delta.value: ReasoningContentDeltaEvent,
443
463
  TeamRunEvent.reasoning_completed.value: ReasoningCompletedEvent,
444
464
  TeamRunEvent.memory_update_started.value: MemoryUpdateStartedEvent,
445
465
  TeamRunEvent.memory_update_completed.value: MemoryUpdateCompletedEvent,
@@ -447,6 +467,7 @@ TEAM_RUN_EVENT_TYPE_REGISTRY = {
447
467
  TeamRunEvent.session_summary_completed.value: SessionSummaryCompletedEvent,
448
468
  TeamRunEvent.tool_call_started.value: ToolCallStartedEvent,
449
469
  TeamRunEvent.tool_call_completed.value: ToolCallCompletedEvent,
470
+ TeamRunEvent.tool_call_error.value: ToolCallErrorEvent,
450
471
  TeamRunEvent.parser_model_response_started.value: ParserModelResponseStartedEvent,
451
472
  TeamRunEvent.parser_model_response_completed.value: ParserModelResponseCompletedEvent,
452
473
  TeamRunEvent.output_model_response_started.value: OutputModelResponseStartedEvent,
agno/session/team.py CHANGED
@@ -91,10 +91,7 @@ class TeamSession:
91
91
 
92
92
  def upsert_run(self, run_response: Union[TeamRunOutput, RunOutput]):
93
93
  """Adds a RunOutput, together with some calculated data, to the runs list."""
94
-
95
94
  messages = run_response.messages
96
- if messages is None:
97
- return
98
95
 
99
96
  # Make message duration None
100
97
  for m in messages or []: