agno 1.7.7__py3-none-any.whl → 1.7.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +219 -14
- agno/document/reader/youtube_reader.py +8 -4
- agno/models/anthropic/claude.py +1 -1
- agno/models/base.py +4 -0
- agno/models/message.py +6 -2
- agno/models/openai/chat.py +3 -0
- agno/models/openai/responses.py +6 -5
- agno/run/response.py +31 -0
- agno/run/team.py +17 -0
- agno/storage/gcs_json.py +1 -1
- agno/storage/json.py +2 -1
- agno/storage/redis.py +1 -1
- agno/storage/yaml.py +1 -1
- agno/team/team.py +432 -225
- agno/tools/function.py +21 -11
- agno/tools/googlecalendar.py +567 -121
- agno/tools/googlesheets.py +6 -1
- agno/tools/mcp.py +19 -1
- agno/utils/events.py +50 -0
- agno/utils/response.py +3 -1
- {agno-1.7.7.dist-info → agno-1.7.8.dist-info}/METADATA +1 -1
- {agno-1.7.7.dist-info → agno-1.7.8.dist-info}/RECORD +26 -26
- {agno-1.7.7.dist-info → agno-1.7.8.dist-info}/WHEEL +0 -0
- {agno-1.7.7.dist-info → agno-1.7.8.dist-info}/entry_points.txt +0 -0
- {agno-1.7.7.dist-info → agno-1.7.8.dist-info}/licenses/LICENSE +0 -0
- {agno-1.7.7.dist-info → agno-1.7.8.dist-info}/top_level.txt +0 -0
agno/agent/agent.py
CHANGED
|
@@ -274,6 +274,10 @@ class Agent:
|
|
|
274
274
|
parser_model: Optional[Model] = None
|
|
275
275
|
# Provide a prompt for the parser model
|
|
276
276
|
parser_model_prompt: Optional[str] = None
|
|
277
|
+
# Provide an output model to structure the response from the main model
|
|
278
|
+
output_model: Optional[Model] = None
|
|
279
|
+
# Provide a prompt for the output model
|
|
280
|
+
output_model_prompt: Optional[str] = None
|
|
277
281
|
# If True, the response from the Model is converted into the response_model
|
|
278
282
|
# Otherwise, the response is returned as a JSON string
|
|
279
283
|
parse_response: bool = True
|
|
@@ -414,6 +418,8 @@ class Agent:
|
|
|
414
418
|
parser_model_prompt: Optional[str] = None,
|
|
415
419
|
response_model: Optional[Type[BaseModel]] = None,
|
|
416
420
|
parse_response: bool = True,
|
|
421
|
+
output_model: Optional[Model] = None,
|
|
422
|
+
output_model_prompt: Optional[str] = None,
|
|
417
423
|
structured_outputs: Optional[bool] = None,
|
|
418
424
|
use_json_mode: bool = False,
|
|
419
425
|
save_response_to_file: Optional[str] = None,
|
|
@@ -517,6 +523,8 @@ class Agent:
|
|
|
517
523
|
self.parser_model_prompt = parser_model_prompt
|
|
518
524
|
self.response_model = response_model
|
|
519
525
|
self.parse_response = parse_response
|
|
526
|
+
self.output_model = output_model
|
|
527
|
+
self.output_model_prompt = output_model_prompt
|
|
520
528
|
|
|
521
529
|
self.structured_outputs = structured_outputs
|
|
522
530
|
|
|
@@ -791,6 +799,8 @@ class Agent:
|
|
|
791
799
|
tool_call_limit=self.tool_call_limit,
|
|
792
800
|
response_format=response_format,
|
|
793
801
|
)
|
|
802
|
+
# If an output model is provided, generate output using the output model
|
|
803
|
+
self._generate_response_with_output_model(model_response, run_messages)
|
|
794
804
|
|
|
795
805
|
# If a parser model is provided, structure the response separately
|
|
796
806
|
self._parse_response_with_parser_model(model_response, run_messages)
|
|
@@ -874,13 +884,33 @@ class Agent:
|
|
|
874
884
|
index_of_last_user_message = len(run_messages.messages)
|
|
875
885
|
|
|
876
886
|
# 2. Process model response
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
887
|
+
if self.output_model is None:
|
|
888
|
+
for event in self._handle_model_response_stream(
|
|
889
|
+
run_response=run_response,
|
|
890
|
+
run_messages=run_messages,
|
|
891
|
+
response_format=response_format,
|
|
892
|
+
stream_intermediate_steps=stream_intermediate_steps,
|
|
893
|
+
):
|
|
894
|
+
yield event
|
|
895
|
+
else:
|
|
896
|
+
from agno.utils.events import RunResponseContentEvent
|
|
897
|
+
|
|
898
|
+
for event in self._handle_model_response_stream(
|
|
899
|
+
run_response=run_response,
|
|
900
|
+
run_messages=run_messages,
|
|
901
|
+
response_format=response_format,
|
|
902
|
+
stream_intermediate_steps=stream_intermediate_steps,
|
|
903
|
+
):
|
|
904
|
+
if isinstance(event, RunResponseContentEvent):
|
|
905
|
+
if stream_intermediate_steps:
|
|
906
|
+
yield event
|
|
907
|
+
else:
|
|
908
|
+
yield event
|
|
909
|
+
|
|
910
|
+
# If an output model is provided, generate output using the output model
|
|
911
|
+
yield from self._generate_response_with_output_model_stream(
|
|
912
|
+
run_response=run_response, run_messages=run_messages
|
|
913
|
+
)
|
|
884
914
|
|
|
885
915
|
# If a parser model is provided, structure the response separately
|
|
886
916
|
yield from self._parse_response_with_parser_model_stream(
|
|
@@ -1208,6 +1238,9 @@ class Agent:
|
|
|
1208
1238
|
response_format=response_format,
|
|
1209
1239
|
)
|
|
1210
1240
|
|
|
1241
|
+
# If an output model is provided, generate output using the output model
|
|
1242
|
+
await self._agenerate_response_with_output_model(model_response=model_response, run_messages=run_messages)
|
|
1243
|
+
|
|
1211
1244
|
# If a parser model is provided, structure the response separately
|
|
1212
1245
|
await self._aparse_response_with_parser_model(model_response=model_response, run_messages=run_messages)
|
|
1213
1246
|
|
|
@@ -1289,13 +1322,36 @@ class Agent:
|
|
|
1289
1322
|
index_of_last_user_message = len(run_messages.messages)
|
|
1290
1323
|
|
|
1291
1324
|
# 2. Generate a response from the Model
|
|
1292
|
-
|
|
1293
|
-
|
|
1294
|
-
|
|
1295
|
-
|
|
1296
|
-
|
|
1297
|
-
|
|
1298
|
-
|
|
1325
|
+
if self.output_model is None:
|
|
1326
|
+
async for event in self._ahandle_model_response_stream(
|
|
1327
|
+
run_response=run_response,
|
|
1328
|
+
run_messages=run_messages,
|
|
1329
|
+
response_format=response_format,
|
|
1330
|
+
stream_intermediate_steps=stream_intermediate_steps,
|
|
1331
|
+
):
|
|
1332
|
+
yield event
|
|
1333
|
+
else:
|
|
1334
|
+
from agno.utils.events import RunResponseContentEvent
|
|
1335
|
+
|
|
1336
|
+
async for event in self._ahandle_model_response_stream(
|
|
1337
|
+
run_response=run_response,
|
|
1338
|
+
run_messages=run_messages,
|
|
1339
|
+
response_format=response_format,
|
|
1340
|
+
stream_intermediate_steps=stream_intermediate_steps,
|
|
1341
|
+
):
|
|
1342
|
+
if isinstance(event, RunResponseContentEvent):
|
|
1343
|
+
if stream_intermediate_steps:
|
|
1344
|
+
yield event
|
|
1345
|
+
else:
|
|
1346
|
+
yield event
|
|
1347
|
+
|
|
1348
|
+
# If an output model is provided, generate output using the output model
|
|
1349
|
+
async for event in self._agenerate_response_with_output_model_stream(
|
|
1350
|
+
run_response=run_response,
|
|
1351
|
+
run_messages=run_messages,
|
|
1352
|
+
stream_intermediate_steps=stream_intermediate_steps,
|
|
1353
|
+
):
|
|
1354
|
+
yield event
|
|
1299
1355
|
|
|
1300
1356
|
# If a parser model is provided, structure the response separately
|
|
1301
1357
|
async for event in self._aparse_response_with_parser_model_stream(
|
|
@@ -3155,6 +3211,12 @@ class Agent:
|
|
|
3155
3211
|
model_response.thinking = (model_response.thinking or "") + model_response_event.thinking
|
|
3156
3212
|
run_response.thinking = model_response.thinking
|
|
3157
3213
|
|
|
3214
|
+
if model_response_event.reasoning_content is not None:
|
|
3215
|
+
model_response.reasoning_content = (
|
|
3216
|
+
model_response.reasoning_content or ""
|
|
3217
|
+
) + model_response_event.reasoning_content
|
|
3218
|
+
run_response.reasoning_content = model_response.reasoning_content
|
|
3219
|
+
|
|
3158
3220
|
if model_response_event.redacted_thinking is not None:
|
|
3159
3221
|
model_response.redacted_thinking = (
|
|
3160
3222
|
model_response.redacted_thinking or ""
|
|
@@ -3180,6 +3242,7 @@ class Agent:
|
|
|
3180
3242
|
elif (
|
|
3181
3243
|
model_response_event.content is not None
|
|
3182
3244
|
or model_response_event.thinking is not None
|
|
3245
|
+
or model_response_event.reasoning_content is not None
|
|
3183
3246
|
or model_response_event.redacted_thinking is not None
|
|
3184
3247
|
or model_response_event.citations is not None
|
|
3185
3248
|
):
|
|
@@ -3188,6 +3251,7 @@ class Agent:
|
|
|
3188
3251
|
from_run_response=run_response,
|
|
3189
3252
|
content=model_response_event.content,
|
|
3190
3253
|
thinking=model_response_event.thinking,
|
|
3254
|
+
reasoning_content=model_response_event.reasoning_content,
|
|
3191
3255
|
redacted_thinking=model_response_event.redacted_thinking,
|
|
3192
3256
|
citations=model_response_event.citations,
|
|
3193
3257
|
),
|
|
@@ -5034,6 +5098,24 @@ class Agent:
|
|
|
5034
5098
|
Message(role="user", content=run_response.content),
|
|
5035
5099
|
]
|
|
5036
5100
|
|
|
5101
|
+
def get_messages_for_output_model(self, messages: List[Message]) -> List[Message]:
|
|
5102
|
+
"""Get the messages for the output model."""
|
|
5103
|
+
|
|
5104
|
+
if self.output_model_prompt is not None:
|
|
5105
|
+
system_message_exists = False
|
|
5106
|
+
for message in messages:
|
|
5107
|
+
if message.role == "system":
|
|
5108
|
+
system_message_exists = True
|
|
5109
|
+
message.content = self.output_model_prompt
|
|
5110
|
+
break
|
|
5111
|
+
if not system_message_exists:
|
|
5112
|
+
messages.insert(0, Message(role="system", content=self.output_model_prompt))
|
|
5113
|
+
|
|
5114
|
+
# Remove the last assistant message from the messages list
|
|
5115
|
+
messages.pop(-1)
|
|
5116
|
+
|
|
5117
|
+
return messages
|
|
5118
|
+
|
|
5037
5119
|
def get_session_summary(self, session_id: Optional[str] = None, user_id: Optional[str] = None):
|
|
5038
5120
|
"""Get the session summary for the given session ID and user ID."""
|
|
5039
5121
|
if self.memory is None:
|
|
@@ -6354,6 +6436,99 @@ class Agent:
|
|
|
6354
6436
|
else:
|
|
6355
6437
|
log_warning("A response model is required to parse the response with a parser model")
|
|
6356
6438
|
|
|
6439
|
+
def _generate_response_with_output_model(self, model_response: ModelResponse, run_messages: RunMessages) -> None:
|
|
6440
|
+
"""Parse the model response using the output model."""
|
|
6441
|
+
if self.output_model is None:
|
|
6442
|
+
return
|
|
6443
|
+
|
|
6444
|
+
messages_for_output_model = self.get_messages_for_output_model(run_messages.messages)
|
|
6445
|
+
output_model_response: ModelResponse = self.output_model.response(messages=messages_for_output_model)
|
|
6446
|
+
model_response.content = output_model_response.content
|
|
6447
|
+
|
|
6448
|
+
def _generate_response_with_output_model_stream(
|
|
6449
|
+
self, run_response: RunResponse, run_messages: RunMessages, stream_intermediate_steps: bool = False
|
|
6450
|
+
):
|
|
6451
|
+
"""Parse the model response using the output model."""
|
|
6452
|
+
from agno.utils.events import (
|
|
6453
|
+
create_output_model_response_completed_event,
|
|
6454
|
+
create_output_model_response_started_event,
|
|
6455
|
+
)
|
|
6456
|
+
|
|
6457
|
+
if self.output_model is None:
|
|
6458
|
+
return
|
|
6459
|
+
|
|
6460
|
+
if stream_intermediate_steps:
|
|
6461
|
+
yield self._handle_event(create_output_model_response_started_event(run_response), run_response)
|
|
6462
|
+
|
|
6463
|
+
messages_for_output_model = self.get_messages_for_output_model(run_messages.messages)
|
|
6464
|
+
|
|
6465
|
+
model_response = ModelResponse(content="")
|
|
6466
|
+
|
|
6467
|
+
for model_response_event in self.output_model.response_stream(messages=messages_for_output_model):
|
|
6468
|
+
yield from self._handle_model_response_chunk(
|
|
6469
|
+
run_response=run_response,
|
|
6470
|
+
model_response=model_response,
|
|
6471
|
+
model_response_event=model_response_event,
|
|
6472
|
+
)
|
|
6473
|
+
|
|
6474
|
+
if stream_intermediate_steps:
|
|
6475
|
+
yield self._handle_event(create_output_model_response_completed_event(run_response), run_response)
|
|
6476
|
+
|
|
6477
|
+
# Build a list of messages that should be added to the RunResponse
|
|
6478
|
+
messages_for_run_response = [m for m in run_messages.messages if m.add_to_agent_memory]
|
|
6479
|
+
# Update the RunResponse messages
|
|
6480
|
+
run_response.messages = messages_for_run_response
|
|
6481
|
+
# Update the RunResponse metrics
|
|
6482
|
+
run_response.metrics = self.aggregate_metrics_from_messages(messages_for_run_response)
|
|
6483
|
+
|
|
6484
|
+
async def _agenerate_response_with_output_model(self, model_response: ModelResponse, run_messages: RunMessages):
|
|
6485
|
+
"""Parse the model response using the output model."""
|
|
6486
|
+
if self.output_model is None:
|
|
6487
|
+
return
|
|
6488
|
+
|
|
6489
|
+
messages_for_output_model = self.get_messages_for_output_model(run_messages.messages)
|
|
6490
|
+
output_model_response: ModelResponse = await self.output_model.aresponse(messages=messages_for_output_model)
|
|
6491
|
+
model_response.content = output_model_response.content
|
|
6492
|
+
|
|
6493
|
+
async def _agenerate_response_with_output_model_stream(
|
|
6494
|
+
self, run_response: RunResponse, run_messages: RunMessages, stream_intermediate_steps: bool = False
|
|
6495
|
+
):
|
|
6496
|
+
"""Parse the model response using the output model."""
|
|
6497
|
+
from agno.utils.events import (
|
|
6498
|
+
create_output_model_response_completed_event,
|
|
6499
|
+
create_output_model_response_started_event,
|
|
6500
|
+
)
|
|
6501
|
+
|
|
6502
|
+
if self.output_model is None:
|
|
6503
|
+
return
|
|
6504
|
+
|
|
6505
|
+
if stream_intermediate_steps:
|
|
6506
|
+
yield self._handle_event(create_output_model_response_started_event(run_response), run_response)
|
|
6507
|
+
|
|
6508
|
+
messages_for_output_model = self.get_messages_for_output_model(run_messages.messages)
|
|
6509
|
+
|
|
6510
|
+
model_response = ModelResponse(content="")
|
|
6511
|
+
|
|
6512
|
+
model_response_stream = self.output_model.aresponse_stream(messages=messages_for_output_model)
|
|
6513
|
+
|
|
6514
|
+
async for model_response_event in model_response_stream:
|
|
6515
|
+
for event in self._handle_model_response_chunk(
|
|
6516
|
+
run_response=run_response,
|
|
6517
|
+
model_response=model_response,
|
|
6518
|
+
model_response_event=model_response_event,
|
|
6519
|
+
):
|
|
6520
|
+
yield event
|
|
6521
|
+
|
|
6522
|
+
if stream_intermediate_steps:
|
|
6523
|
+
yield self._handle_event(create_output_model_response_completed_event(run_response), run_response)
|
|
6524
|
+
|
|
6525
|
+
# Build a list of messages that should be added to the RunResponse
|
|
6526
|
+
messages_for_run_response = [m for m in run_messages.messages if m.add_to_agent_memory]
|
|
6527
|
+
# Update the RunResponse messages
|
|
6528
|
+
run_response.messages = messages_for_run_response
|
|
6529
|
+
# Update the RunResponse metrics
|
|
6530
|
+
run_response.metrics = self.aggregate_metrics_from_messages(messages_for_run_response)
|
|
6531
|
+
|
|
6357
6532
|
def _handle_event(self, event: RunResponseEvent, run_response: RunResponse):
|
|
6358
6533
|
# We only store events that are not run_response_content events
|
|
6359
6534
|
events_to_skip = [event.value for event in self.events_to_skip] if self.events_to_skip else []
|
|
@@ -6875,6 +7050,7 @@ class Agent:
|
|
|
6875
7050
|
if stream:
|
|
6876
7051
|
_response_content: str = ""
|
|
6877
7052
|
_response_thinking: str = ""
|
|
7053
|
+
_response_reasoning_content: str = ""
|
|
6878
7054
|
response_content_batch: Union[str, JSON, Markdown] = ""
|
|
6879
7055
|
reasoning_steps: List[ReasoningStep] = []
|
|
6880
7056
|
|
|
@@ -6941,6 +7117,8 @@ class Agent:
|
|
|
6941
7117
|
log_warning(f"Failed to convert response to JSON: {e}")
|
|
6942
7118
|
if hasattr(resp, "thinking") and resp.thinking is not None:
|
|
6943
7119
|
_response_thinking += resp.thinking
|
|
7120
|
+
if hasattr(resp, "reasoning_content") and resp.reasoning_content is not None:
|
|
7121
|
+
_response_reasoning_content += resp.reasoning_content
|
|
6944
7122
|
if (
|
|
6945
7123
|
hasattr(resp, "extra_data")
|
|
6946
7124
|
and resp.extra_data is not None
|
|
@@ -7012,6 +7190,18 @@ class Agent:
|
|
|
7012
7190
|
if render:
|
|
7013
7191
|
live_log.update(Group(*panels))
|
|
7014
7192
|
|
|
7193
|
+
if len(_response_reasoning_content) > 0:
|
|
7194
|
+
render = True
|
|
7195
|
+
# Create panel for reasoning content
|
|
7196
|
+
reasoning_panel = create_panel(
|
|
7197
|
+
content=Text(_response_reasoning_content),
|
|
7198
|
+
title=f"Reasoning ({response_timer.elapsed:.1f}s)",
|
|
7199
|
+
border_style="green",
|
|
7200
|
+
)
|
|
7201
|
+
panels.append(reasoning_panel)
|
|
7202
|
+
if render:
|
|
7203
|
+
live_log.update(Group(*panels))
|
|
7204
|
+
|
|
7015
7205
|
# Add tool calls panel if available
|
|
7016
7206
|
if (
|
|
7017
7207
|
self.show_tool_calls
|
|
@@ -7324,6 +7514,7 @@ class Agent:
|
|
|
7324
7514
|
if stream:
|
|
7325
7515
|
_response_content: str = ""
|
|
7326
7516
|
_response_thinking: str = ""
|
|
7517
|
+
_response_reasoning_content: str = ""
|
|
7327
7518
|
reasoning_steps: List[ReasoningStep] = []
|
|
7328
7519
|
response_content_batch: Union[str, JSON, Markdown] = ""
|
|
7329
7520
|
|
|
@@ -7391,6 +7582,8 @@ class Agent:
|
|
|
7391
7582
|
log_warning(f"Failed to convert response to JSON: {e}")
|
|
7392
7583
|
if resp.thinking is not None:
|
|
7393
7584
|
_response_thinking += resp.thinking
|
|
7585
|
+
if hasattr(resp, "reasoning_content") and resp.reasoning_content is not None:
|
|
7586
|
+
_response_reasoning_content += resp.reasoning_content
|
|
7394
7587
|
|
|
7395
7588
|
if (
|
|
7396
7589
|
hasattr(resp, "extra_data")
|
|
@@ -7464,6 +7657,18 @@ class Agent:
|
|
|
7464
7657
|
if render:
|
|
7465
7658
|
live_log.update(Group(*panels))
|
|
7466
7659
|
|
|
7660
|
+
if len(_response_reasoning_content) > 0:
|
|
7661
|
+
render = True
|
|
7662
|
+
# Create panel for reasoning content
|
|
7663
|
+
reasoning_panel = create_panel(
|
|
7664
|
+
content=Text(_response_reasoning_content),
|
|
7665
|
+
title=f"Reasoning ({response_timer.elapsed:.1f}s)",
|
|
7666
|
+
border_style="green",
|
|
7667
|
+
)
|
|
7668
|
+
panels.append(reasoning_panel)
|
|
7669
|
+
if render:
|
|
7670
|
+
live_log.update(Group(*panels))
|
|
7671
|
+
|
|
7467
7672
|
# Add tool calls panel if available
|
|
7468
7673
|
if (
|
|
7469
7674
|
self.show_tool_calls
|
|
@@ -3,7 +3,7 @@ from typing import List
|
|
|
3
3
|
|
|
4
4
|
from agno.document.base import Document
|
|
5
5
|
from agno.document.reader.base import Reader
|
|
6
|
-
from agno.utils.log import log_info, logger
|
|
6
|
+
from agno.utils.log import log_debug, log_info, logger
|
|
7
7
|
|
|
8
8
|
try:
|
|
9
9
|
from youtube_transcript_api import YouTubeTranscriptApi
|
|
@@ -23,12 +23,16 @@ class YouTubeReader(Reader):
|
|
|
23
23
|
log_info(f"Reading transcript for video: {video_id}")
|
|
24
24
|
|
|
25
25
|
# Get transcript
|
|
26
|
-
|
|
26
|
+
log_debug(f"Fetching transcript for video: {video_id}")
|
|
27
|
+
# Create an instance of YouTubeTranscriptApi
|
|
28
|
+
ytt_api = YouTubeTranscriptApi()
|
|
29
|
+
transcript_data = ytt_api.fetch(video_id)
|
|
27
30
|
|
|
28
31
|
# Combine transcript segments into full text
|
|
29
32
|
transcript_text = ""
|
|
30
|
-
|
|
31
|
-
|
|
33
|
+
|
|
34
|
+
for segment in transcript_data:
|
|
35
|
+
transcript_text += f"{segment.text} "
|
|
32
36
|
|
|
33
37
|
documents = [
|
|
34
38
|
Document(
|
agno/models/anthropic/claude.py
CHANGED
|
@@ -449,7 +449,7 @@ class Claude(Model):
|
|
|
449
449
|
|
|
450
450
|
def get_system_message_for_model(self, tools: Optional[List[Any]] = None) -> Optional[str]:
|
|
451
451
|
if tools is not None and len(tools) > 0:
|
|
452
|
-
tool_call_prompt = "Do not reflect on the quality of the returned search results in your response"
|
|
452
|
+
tool_call_prompt = "Do not reflect on the quality of the returned search results in your response\n\n"
|
|
453
453
|
return tool_call_prompt
|
|
454
454
|
return None
|
|
455
455
|
|
agno/models/base.py
CHANGED
|
@@ -1025,6 +1025,10 @@ class Model(ABC):
|
|
|
1025
1025
|
stream_data.response_thinking += model_response_delta.thinking
|
|
1026
1026
|
should_yield = True
|
|
1027
1027
|
|
|
1028
|
+
if model_response_delta.reasoning_content is not None:
|
|
1029
|
+
stream_data.response_thinking += model_response_delta.reasoning_content
|
|
1030
|
+
should_yield = True
|
|
1031
|
+
|
|
1028
1032
|
if model_response_delta.redacted_thinking is not None:
|
|
1029
1033
|
stream_data.response_redacted_thinking += model_response_delta.redacted_thinking
|
|
1030
1034
|
should_yield = True
|
agno/models/message.py
CHANGED
|
@@ -338,8 +338,12 @@ class Message(BaseModel):
|
|
|
338
338
|
if isinstance(tool_call_arguments, dict)
|
|
339
339
|
else json.loads(tool_call_arguments)
|
|
340
340
|
)
|
|
341
|
-
|
|
342
|
-
|
|
341
|
+
# Ensure tool_call_args is a dictionary before calling .items()
|
|
342
|
+
if isinstance(tool_call_args, dict):
|
|
343
|
+
arguments = ", ".join(f"{k}: {v}" for k, v in tool_call_args.items())
|
|
344
|
+
tool_calls_list.append(f" Arguments: '{arguments}'")
|
|
345
|
+
else:
|
|
346
|
+
tool_calls_list.append(f" Arguments: '{tool_call_args}'")
|
|
343
347
|
except json.JSONDecodeError:
|
|
344
348
|
tool_calls_list.append(" Arguments: 'Invalid JSON format'")
|
|
345
349
|
tool_calls_str = "\n".join(tool_calls_list)
|
agno/models/openai/chat.py
CHANGED
|
@@ -62,6 +62,7 @@ class OpenAIChat(Model):
|
|
|
62
62
|
temperature: Optional[float] = None
|
|
63
63
|
user: Optional[str] = None
|
|
64
64
|
top_p: Optional[float] = None
|
|
65
|
+
service_tier: Optional[str] = None # "auto" | "default" | "flex" | "priority", defaults to "auto" when not set
|
|
65
66
|
extra_headers: Optional[Any] = None
|
|
66
67
|
extra_query: Optional[Any] = None
|
|
67
68
|
request_params: Optional[Dict[str, Any]] = None
|
|
@@ -175,6 +176,7 @@ class OpenAIChat(Model):
|
|
|
175
176
|
"extra_headers": self.extra_headers,
|
|
176
177
|
"extra_query": self.extra_query,
|
|
177
178
|
"metadata": self.metadata,
|
|
179
|
+
"service_tier": self.service_tier,
|
|
178
180
|
}
|
|
179
181
|
|
|
180
182
|
# Handle response format - always use JSON schema approach
|
|
@@ -241,6 +243,7 @@ class OpenAIChat(Model):
|
|
|
241
243
|
"user": self.user,
|
|
242
244
|
"extra_headers": self.extra_headers,
|
|
243
245
|
"extra_query": self.extra_query,
|
|
246
|
+
"service_tier": self.service_tier,
|
|
244
247
|
}
|
|
245
248
|
)
|
|
246
249
|
cleaned_dict = {k: v for k, v in model_dict.items() if v is not None}
|
agno/models/openai/responses.py
CHANGED
|
@@ -47,7 +47,7 @@ class OpenAIResponses(Model):
|
|
|
47
47
|
top_p: Optional[float] = None
|
|
48
48
|
truncation: Optional[Literal["auto", "disabled"]] = None
|
|
49
49
|
user: Optional[str] = None
|
|
50
|
-
|
|
50
|
+
service_tier: Optional[Literal["auto", "default", "flex", "priority"]] = None
|
|
51
51
|
request_params: Optional[Dict[str, Any]] = None
|
|
52
52
|
|
|
53
53
|
# Client parameters
|
|
@@ -178,6 +178,7 @@ class OpenAIResponses(Model):
|
|
|
178
178
|
"top_p": self.top_p,
|
|
179
179
|
"truncation": self.truncation,
|
|
180
180
|
"user": self.user,
|
|
181
|
+
"service_tier": self.service_tier,
|
|
181
182
|
}
|
|
182
183
|
# Set the response format
|
|
183
184
|
if response_format is not None:
|
|
@@ -310,11 +311,11 @@ class OpenAIResponses(Model):
|
|
|
310
311
|
formatted_tools = []
|
|
311
312
|
if tools:
|
|
312
313
|
for _tool in tools:
|
|
313
|
-
if _tool
|
|
314
|
-
_tool_dict = _tool
|
|
314
|
+
if _tool.get("type") == "function":
|
|
315
|
+
_tool_dict = _tool.get("function", {})
|
|
315
316
|
_tool_dict["type"] = "function"
|
|
316
|
-
for prop in _tool_dict
|
|
317
|
-
if isinstance(prop
|
|
317
|
+
for prop in _tool_dict.get("parameters", {}).get("properties", {}).values():
|
|
318
|
+
if isinstance(prop.get("type", ""), list):
|
|
318
319
|
prop["type"] = prop["type"][0]
|
|
319
320
|
|
|
320
321
|
formatted_tools.append(_tool_dict)
|
agno/run/response.py
CHANGED
|
@@ -37,6 +37,9 @@ class RunEvent(str, Enum):
|
|
|
37
37
|
parser_model_response_started = "ParserModelResponseStarted"
|
|
38
38
|
parser_model_response_completed = "ParserModelResponseCompleted"
|
|
39
39
|
|
|
40
|
+
output_model_response_started = "OutputModelResponseStarted"
|
|
41
|
+
output_model_response_completed = "OutputModelResponseCompleted"
|
|
42
|
+
|
|
40
43
|
|
|
41
44
|
@dataclass
|
|
42
45
|
class BaseAgentRunResponseEvent(BaseRunResponseEvent):
|
|
@@ -47,10 +50,23 @@ class BaseAgentRunResponseEvent(BaseRunResponseEvent):
|
|
|
47
50
|
run_id: Optional[str] = None
|
|
48
51
|
session_id: Optional[str] = None
|
|
49
52
|
team_session_id: Optional[str] = None
|
|
53
|
+
tools: Optional[List[ToolExecution]] = None
|
|
50
54
|
|
|
51
55
|
# For backwards compatibility
|
|
52
56
|
content: Optional[Any] = None
|
|
53
57
|
|
|
58
|
+
@property
|
|
59
|
+
def tools_requiring_confirmation(self):
|
|
60
|
+
return [t for t in self.tools if t.requires_confirmation] if self.tools else []
|
|
61
|
+
|
|
62
|
+
@property
|
|
63
|
+
def tools_requiring_user_input(self):
|
|
64
|
+
return [t for t in self.tools if t.requires_user_input] if self.tools else []
|
|
65
|
+
|
|
66
|
+
@property
|
|
67
|
+
def tools_awaiting_external_execution(self):
|
|
68
|
+
return [t for t in self.tools if t.external_execution_required] if self.tools else []
|
|
69
|
+
|
|
54
70
|
|
|
55
71
|
@dataclass
|
|
56
72
|
class RunResponseStartedEvent(BaseAgentRunResponseEvent):
|
|
@@ -69,6 +85,7 @@ class RunResponseContentEvent(BaseAgentRunResponseEvent):
|
|
|
69
85
|
content: Optional[Any] = None
|
|
70
86
|
content_type: str = "str"
|
|
71
87
|
thinking: Optional[str] = None
|
|
88
|
+
reasoning_content: Optional[str] = None
|
|
72
89
|
citations: Optional[Citations] = None
|
|
73
90
|
response_audio: Optional[AudioResponse] = None # Model audio response
|
|
74
91
|
image: Optional[ImageArtifact] = None # Image attached to the response
|
|
@@ -177,6 +194,16 @@ class ParserModelResponseCompletedEvent(BaseAgentRunResponseEvent):
|
|
|
177
194
|
event: str = RunEvent.parser_model_response_completed.value
|
|
178
195
|
|
|
179
196
|
|
|
197
|
+
@dataclass
|
|
198
|
+
class OutputModelResponseStartedEvent(BaseAgentRunResponseEvent):
|
|
199
|
+
event: str = RunEvent.output_model_response_started.value
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
@dataclass
|
|
203
|
+
class OutputModelResponseCompletedEvent(BaseAgentRunResponseEvent):
|
|
204
|
+
event: str = RunEvent.output_model_response_completed.value
|
|
205
|
+
|
|
206
|
+
|
|
180
207
|
RunResponseEvent = Union[
|
|
181
208
|
RunResponseStartedEvent,
|
|
182
209
|
RunResponseContentEvent,
|
|
@@ -194,6 +221,8 @@ RunResponseEvent = Union[
|
|
|
194
221
|
ToolCallCompletedEvent,
|
|
195
222
|
ParserModelResponseStartedEvent,
|
|
196
223
|
ParserModelResponseCompletedEvent,
|
|
224
|
+
OutputModelResponseStartedEvent,
|
|
225
|
+
OutputModelResponseCompletedEvent,
|
|
197
226
|
]
|
|
198
227
|
|
|
199
228
|
|
|
@@ -215,6 +244,8 @@ RUN_EVENT_TYPE_REGISTRY = {
|
|
|
215
244
|
RunEvent.tool_call_completed.value: ToolCallCompletedEvent,
|
|
216
245
|
RunEvent.parser_model_response_started.value: ParserModelResponseStartedEvent,
|
|
217
246
|
RunEvent.parser_model_response_completed.value: ParserModelResponseCompletedEvent,
|
|
247
|
+
RunEvent.output_model_response_started.value: OutputModelResponseStartedEvent,
|
|
248
|
+
RunEvent.output_model_response_completed.value: OutputModelResponseCompletedEvent,
|
|
218
249
|
}
|
|
219
250
|
|
|
220
251
|
|
agno/run/team.py
CHANGED
|
@@ -34,6 +34,9 @@ class TeamRunEvent(str, Enum):
|
|
|
34
34
|
parser_model_response_started = "TeamParserModelResponseStarted"
|
|
35
35
|
parser_model_response_completed = "TeamParserModelResponseCompleted"
|
|
36
36
|
|
|
37
|
+
output_model_response_started = "TeamOutputModelResponseStarted"
|
|
38
|
+
output_model_response_completed = "TeamOutputModelResponseCompleted"
|
|
39
|
+
|
|
37
40
|
|
|
38
41
|
@dataclass
|
|
39
42
|
class BaseTeamRunResponseEvent(BaseRunResponseEvent):
|
|
@@ -175,6 +178,16 @@ class ParserModelResponseCompletedEvent(BaseTeamRunResponseEvent):
|
|
|
175
178
|
event: str = TeamRunEvent.parser_model_response_completed.value
|
|
176
179
|
|
|
177
180
|
|
|
181
|
+
@dataclass
|
|
182
|
+
class OutputModelResponseStartedEvent(BaseTeamRunResponseEvent):
|
|
183
|
+
event: str = TeamRunEvent.output_model_response_started.value
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
@dataclass
|
|
187
|
+
class OutputModelResponseCompletedEvent(BaseTeamRunResponseEvent):
|
|
188
|
+
event: str = TeamRunEvent.output_model_response_completed.value
|
|
189
|
+
|
|
190
|
+
|
|
178
191
|
TeamRunResponseEvent = Union[
|
|
179
192
|
RunResponseStartedEvent,
|
|
180
193
|
RunResponseContentEvent,
|
|
@@ -190,6 +203,8 @@ TeamRunResponseEvent = Union[
|
|
|
190
203
|
ToolCallCompletedEvent,
|
|
191
204
|
ParserModelResponseStartedEvent,
|
|
192
205
|
ParserModelResponseCompletedEvent,
|
|
206
|
+
OutputModelResponseStartedEvent,
|
|
207
|
+
OutputModelResponseCompletedEvent,
|
|
193
208
|
]
|
|
194
209
|
|
|
195
210
|
# Map event string to dataclass for team events
|
|
@@ -208,6 +223,8 @@ TEAM_RUN_EVENT_TYPE_REGISTRY = {
|
|
|
208
223
|
TeamRunEvent.tool_call_completed.value: ToolCallCompletedEvent,
|
|
209
224
|
TeamRunEvent.parser_model_response_started.value: ParserModelResponseStartedEvent,
|
|
210
225
|
TeamRunEvent.parser_model_response_completed.value: ParserModelResponseCompletedEvent,
|
|
226
|
+
TeamRunEvent.output_model_response_started.value: OutputModelResponseStartedEvent,
|
|
227
|
+
TeamRunEvent.output_model_response_completed.value: OutputModelResponseCompletedEvent,
|
|
211
228
|
}
|
|
212
229
|
|
|
213
230
|
|
agno/storage/gcs_json.py
CHANGED
|
@@ -222,7 +222,7 @@ class GCSJsonStorage(JsonStorage):
|
|
|
222
222
|
try:
|
|
223
223
|
data = session.to_dict()
|
|
224
224
|
data["updated_at"] = int(time.time())
|
|
225
|
-
if "created_at" not in data:
|
|
225
|
+
if "created_at" not in data or data["created_at"] is None:
|
|
226
226
|
data["created_at"] = data["updated_at"]
|
|
227
227
|
json_data = self.serialize(data)
|
|
228
228
|
blob.upload_from_string(json_data, content_type="application/json")
|
agno/storage/json.py
CHANGED
|
@@ -206,8 +206,9 @@ class JsonStorage(Storage):
|
|
|
206
206
|
data = session.to_dict()
|
|
207
207
|
else:
|
|
208
208
|
data = asdict(session)
|
|
209
|
+
|
|
209
210
|
data["updated_at"] = int(time.time())
|
|
210
|
-
if "created_at" not in data:
|
|
211
|
+
if "created_at" not in data or data["created_at"] is None:
|
|
211
212
|
data["created_at"] = data["updated_at"]
|
|
212
213
|
|
|
213
214
|
with open(self.dir_path / f"{session.session_id}.json", "w", encoding="utf-8") as f:
|
agno/storage/redis.py
CHANGED
|
@@ -294,7 +294,7 @@ class RedisStorage(Storage):
|
|
|
294
294
|
else:
|
|
295
295
|
data = asdict(session)
|
|
296
296
|
data["updated_at"] = int(time.time())
|
|
297
|
-
if "created_at" not in data:
|
|
297
|
+
if "created_at" not in data or data["created_at"] is None:
|
|
298
298
|
data["created_at"] = data["updated_at"]
|
|
299
299
|
|
|
300
300
|
key = self._get_key(session.session_id)
|
agno/storage/yaml.py
CHANGED
|
@@ -213,7 +213,7 @@ class YamlStorage(Storage):
|
|
|
213
213
|
else:
|
|
214
214
|
data = asdict(session)
|
|
215
215
|
data["updated_at"] = int(time.time())
|
|
216
|
-
if "created_at" not in data:
|
|
216
|
+
if "created_at" not in data or data["created_at"] is None:
|
|
217
217
|
data["created_at"] = data["updated_at"]
|
|
218
218
|
with open(self.dir_path / f"{session.session_id}.yaml", "w", encoding="utf-8") as f:
|
|
219
219
|
f.write(self.serialize(data))
|