agno 2.0.7__py3-none-any.whl → 2.0.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +81 -49
- agno/db/migrations/v1_to_v2.py +140 -11
- agno/knowledge/embedder/sentence_transformer.py +3 -3
- agno/knowledge/knowledge.py +152 -31
- agno/knowledge/types.py +8 -0
- agno/models/cometapi/__init__.py +5 -0
- agno/models/cometapi/cometapi.py +57 -0
- agno/models/google/gemini.py +4 -8
- agno/models/ollama/chat.py +24 -1
- agno/models/openai/chat.py +2 -7
- agno/models/openai/responses.py +21 -17
- agno/os/interfaces/agui/agui.py +2 -2
- agno/os/interfaces/agui/utils.py +81 -18
- agno/os/interfaces/slack/slack.py +2 -2
- agno/os/interfaces/whatsapp/whatsapp.py +2 -2
- agno/os/utils.py +8 -0
- agno/reasoning/default.py +3 -1
- agno/session/agent.py +8 -5
- agno/session/team.py +14 -10
- agno/team/team.py +218 -104
- agno/tools/function.py +43 -4
- agno/tools/mcp.py +60 -37
- agno/utils/gemini.py +147 -19
- agno/utils/models/claude.py +9 -0
- agno/utils/print_response/agent.py +16 -0
- agno/utils/print_response/team.py +16 -0
- agno/vectordb/base.py +2 -2
- agno/vectordb/langchaindb/langchaindb.py +5 -7
- agno/vectordb/llamaindex/llamaindexdb.py +25 -6
- agno/workflow/workflow.py +31 -15
- {agno-2.0.7.dist-info → agno-2.0.8.dist-info}/METADATA +1 -1
- {agno-2.0.7.dist-info → agno-2.0.8.dist-info}/RECORD +35 -33
- {agno-2.0.7.dist-info → agno-2.0.8.dist-info}/WHEEL +0 -0
- {agno-2.0.7.dist-info → agno-2.0.8.dist-info}/licenses/LICENSE +0 -0
- {agno-2.0.7.dist-info → agno-2.0.8.dist-info}/top_level.txt +0 -0
agno/team/team.py
CHANGED
|
@@ -33,6 +33,7 @@ from agno.agent import Agent
|
|
|
33
33
|
from agno.db.base import BaseDb, SessionType, UserMemory
|
|
34
34
|
from agno.exceptions import ModelProviderError, RunCancelledException
|
|
35
35
|
from agno.knowledge.knowledge import Knowledge
|
|
36
|
+
from agno.knowledge.types import KnowledgeFilter
|
|
36
37
|
from agno.media import Audio, File, Image, Video
|
|
37
38
|
from agno.memory import MemoryManager
|
|
38
39
|
from agno.models.base import Model
|
|
@@ -515,6 +516,11 @@ class Team:
|
|
|
515
516
|
self.num_history_runs = num_history_runs
|
|
516
517
|
self.metadata = metadata
|
|
517
518
|
|
|
519
|
+
if add_history_to_context and not db:
|
|
520
|
+
log_warning(
|
|
521
|
+
"add_history_to_context is True, but no database has been assigned to the agent. History will not be added to the context."
|
|
522
|
+
)
|
|
523
|
+
|
|
518
524
|
self.reasoning = reasoning
|
|
519
525
|
self.reasoning_model = reasoning_model
|
|
520
526
|
self.reasoning_agent = reasoning_agent
|
|
@@ -727,9 +733,19 @@ class Team:
|
|
|
727
733
|
if user_id is None:
|
|
728
734
|
user_id = self.user_id
|
|
729
735
|
|
|
730
|
-
# Determine the session_state
|
|
736
|
+
# Determine the session_state with proper precedence
|
|
731
737
|
if session_state is None:
|
|
732
738
|
session_state = self.session_state or {}
|
|
739
|
+
else:
|
|
740
|
+
# If run session_state is provided, merge agent defaults under it
|
|
741
|
+
# This ensures run state takes precedence over agent defaults
|
|
742
|
+
if self.session_state:
|
|
743
|
+
from agno.utils.merge_dict import merge_dictionaries
|
|
744
|
+
|
|
745
|
+
base_state = self.session_state.copy()
|
|
746
|
+
merge_dictionaries(base_state, session_state)
|
|
747
|
+
session_state.clear()
|
|
748
|
+
session_state.update(base_state)
|
|
733
749
|
|
|
734
750
|
if user_id is not None:
|
|
735
751
|
session_state["current_user_id"] = user_id
|
|
@@ -1231,7 +1247,9 @@ class Team:
|
|
|
1231
1247
|
workflow_context=workflow_context,
|
|
1232
1248
|
debug_mode=debug_mode,
|
|
1233
1249
|
add_history_to_context=add_history,
|
|
1250
|
+
add_session_state_to_context=add_session_state,
|
|
1234
1251
|
dependencies=run_dependencies,
|
|
1252
|
+
add_dependencies_to_context=add_dependencies,
|
|
1235
1253
|
metadata=metadata,
|
|
1236
1254
|
)
|
|
1237
1255
|
|
|
@@ -1849,6 +1867,8 @@ class Team:
|
|
|
1849
1867
|
workflow_context=workflow_context,
|
|
1850
1868
|
debug_mode=debug_mode,
|
|
1851
1869
|
add_history_to_context=add_history_to_context,
|
|
1870
|
+
add_dependencies_to_context=add_dependencies_to_context,
|
|
1871
|
+
add_session_state_to_context=add_session_state_to_context,
|
|
1852
1872
|
dependencies=dependencies,
|
|
1853
1873
|
metadata=metadata,
|
|
1854
1874
|
)
|
|
@@ -2240,7 +2260,7 @@ class Team:
|
|
|
2240
2260
|
content_type = "str"
|
|
2241
2261
|
|
|
2242
2262
|
should_yield = False
|
|
2243
|
-
# Process content
|
|
2263
|
+
# Process content
|
|
2244
2264
|
if model_response_event.content is not None:
|
|
2245
2265
|
if parse_structured_output:
|
|
2246
2266
|
full_model_response.content = model_response_event.content
|
|
@@ -2256,7 +2276,31 @@ class Team:
|
|
|
2256
2276
|
full_model_response.content = (full_model_response.content or "") + model_response_event.content
|
|
2257
2277
|
should_yield = True
|
|
2258
2278
|
|
|
2259
|
-
# Process
|
|
2279
|
+
# Process reasoning content
|
|
2280
|
+
if model_response_event.reasoning_content is not None:
|
|
2281
|
+
full_model_response.reasoning_content = (
|
|
2282
|
+
full_model_response.reasoning_content or ""
|
|
2283
|
+
) + model_response_event.reasoning_content
|
|
2284
|
+
run_response.reasoning_content = full_model_response.reasoning_content
|
|
2285
|
+
should_yield = True
|
|
2286
|
+
|
|
2287
|
+
if model_response_event.redacted_reasoning_content is not None:
|
|
2288
|
+
if not full_model_response.reasoning_content:
|
|
2289
|
+
full_model_response.reasoning_content = model_response_event.redacted_reasoning_content
|
|
2290
|
+
else:
|
|
2291
|
+
full_model_response.reasoning_content += model_response_event.redacted_reasoning_content
|
|
2292
|
+
run_response.reasoning_content = full_model_response.reasoning_content
|
|
2293
|
+
should_yield = True
|
|
2294
|
+
|
|
2295
|
+
# Handle provider data (one chunk)
|
|
2296
|
+
if model_response_event.provider_data is not None:
|
|
2297
|
+
run_response.model_provider_data = model_response_event.provider_data
|
|
2298
|
+
|
|
2299
|
+
# Handle citations (one chunk)
|
|
2300
|
+
if model_response_event.citations is not None:
|
|
2301
|
+
run_response.citations = model_response_event.citations
|
|
2302
|
+
|
|
2303
|
+
# Process audio
|
|
2260
2304
|
if model_response_event.audio is not None:
|
|
2261
2305
|
if full_model_response.audio is None:
|
|
2262
2306
|
full_model_response.audio = Audio(id=str(uuid4()), content=b"", transcript="")
|
|
@@ -2911,11 +2955,6 @@ class Team:
|
|
|
2911
2955
|
session_id: Optional[str] = None,
|
|
2912
2956
|
session_state: Optional[Dict[str, Any]] = None,
|
|
2913
2957
|
user_id: Optional[str] = None,
|
|
2914
|
-
show_message: bool = True,
|
|
2915
|
-
show_reasoning: bool = True,
|
|
2916
|
-
show_full_reasoning: bool = False,
|
|
2917
|
-
console: Optional[Any] = None,
|
|
2918
|
-
tags_to_include_in_markdown: Optional[Set[str]] = None,
|
|
2919
2958
|
audio: Optional[Sequence[Audio]] = None,
|
|
2920
2959
|
images: Optional[Sequence[Image]] = None,
|
|
2921
2960
|
videos: Optional[Sequence[Video]] = None,
|
|
@@ -2923,9 +2962,16 @@ class Team:
|
|
|
2923
2962
|
markdown: Optional[bool] = None,
|
|
2924
2963
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
2925
2964
|
add_history_to_context: Optional[bool] = None,
|
|
2965
|
+
add_dependencies_to_context: Optional[bool] = None,
|
|
2966
|
+
add_session_state_to_context: Optional[bool] = None,
|
|
2926
2967
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
2927
2968
|
metadata: Optional[Dict[str, Any]] = None,
|
|
2928
2969
|
debug_mode: Optional[bool] = None,
|
|
2970
|
+
show_message: bool = True,
|
|
2971
|
+
show_reasoning: bool = True,
|
|
2972
|
+
show_full_reasoning: bool = False,
|
|
2973
|
+
console: Optional[Any] = None,
|
|
2974
|
+
tags_to_include_in_markdown: Optional[Set[str]] = None,
|
|
2929
2975
|
**kwargs: Any,
|
|
2930
2976
|
) -> None:
|
|
2931
2977
|
if not tags_to_include_in_markdown:
|
|
@@ -2964,6 +3010,8 @@ class Team:
|
|
|
2964
3010
|
knowledge_filters=knowledge_filters,
|
|
2965
3011
|
add_history_to_context=add_history_to_context,
|
|
2966
3012
|
dependencies=dependencies,
|
|
3013
|
+
add_dependencies_to_context=add_dependencies_to_context,
|
|
3014
|
+
add_session_state_to_context=add_session_state_to_context,
|
|
2967
3015
|
metadata=metadata,
|
|
2968
3016
|
debug_mode=debug_mode,
|
|
2969
3017
|
**kwargs,
|
|
@@ -2988,6 +3036,8 @@ class Team:
|
|
|
2988
3036
|
knowledge_filters=knowledge_filters,
|
|
2989
3037
|
add_history_to_context=add_history_to_context,
|
|
2990
3038
|
dependencies=dependencies,
|
|
3039
|
+
add_dependencies_to_context=add_dependencies_to_context,
|
|
3040
|
+
add_session_state_to_context=add_session_state_to_context,
|
|
2991
3041
|
metadata=metadata,
|
|
2992
3042
|
debug_mode=debug_mode,
|
|
2993
3043
|
**kwargs,
|
|
@@ -3002,11 +3052,6 @@ class Team:
|
|
|
3002
3052
|
session_id: Optional[str] = None,
|
|
3003
3053
|
session_state: Optional[Dict[str, Any]] = None,
|
|
3004
3054
|
user_id: Optional[str] = None,
|
|
3005
|
-
show_message: bool = True,
|
|
3006
|
-
show_reasoning: bool = True,
|
|
3007
|
-
show_full_reasoning: bool = False,
|
|
3008
|
-
console: Optional[Any] = None,
|
|
3009
|
-
tags_to_include_in_markdown: Optional[Set[str]] = None,
|
|
3010
3055
|
audio: Optional[Sequence[Audio]] = None,
|
|
3011
3056
|
images: Optional[Sequence[Image]] = None,
|
|
3012
3057
|
videos: Optional[Sequence[Video]] = None,
|
|
@@ -3015,8 +3060,15 @@ class Team:
|
|
|
3015
3060
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
3016
3061
|
add_history_to_context: Optional[bool] = None,
|
|
3017
3062
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
3063
|
+
add_dependencies_to_context: Optional[bool] = None,
|
|
3064
|
+
add_session_state_to_context: Optional[bool] = None,
|
|
3018
3065
|
metadata: Optional[Dict[str, Any]] = None,
|
|
3019
3066
|
debug_mode: Optional[bool] = None,
|
|
3067
|
+
show_message: bool = True,
|
|
3068
|
+
show_reasoning: bool = True,
|
|
3069
|
+
show_full_reasoning: bool = False,
|
|
3070
|
+
console: Optional[Any] = None,
|
|
3071
|
+
tags_to_include_in_markdown: Optional[Set[str]] = None,
|
|
3020
3072
|
**kwargs: Any,
|
|
3021
3073
|
) -> None:
|
|
3022
3074
|
if not tags_to_include_in_markdown:
|
|
@@ -3055,6 +3107,8 @@ class Team:
|
|
|
3055
3107
|
knowledge_filters=knowledge_filters,
|
|
3056
3108
|
add_history_to_context=add_history_to_context,
|
|
3057
3109
|
dependencies=dependencies,
|
|
3110
|
+
add_dependencies_to_context=add_dependencies_to_context,
|
|
3111
|
+
add_session_state_to_context=add_session_state_to_context,
|
|
3058
3112
|
metadata=metadata,
|
|
3059
3113
|
debug_mode=debug_mode,
|
|
3060
3114
|
**kwargs,
|
|
@@ -3079,6 +3133,8 @@ class Team:
|
|
|
3079
3133
|
knowledge_filters=knowledge_filters,
|
|
3080
3134
|
add_history_to_context=add_history_to_context,
|
|
3081
3135
|
dependencies=dependencies,
|
|
3136
|
+
add_dependencies_to_context=add_dependencies_to_context,
|
|
3137
|
+
add_session_state_to_context=add_session_state_to_context,
|
|
3082
3138
|
metadata=metadata,
|
|
3083
3139
|
debug_mode=debug_mode,
|
|
3084
3140
|
**kwargs,
|
|
@@ -3478,6 +3534,7 @@ class Team:
|
|
|
3478
3534
|
reasoning_model=reasoning_model,
|
|
3479
3535
|
min_steps=self.reasoning_min_steps,
|
|
3480
3536
|
max_steps=self.reasoning_max_steps,
|
|
3537
|
+
tool_call_limit=self.tool_call_limit,
|
|
3481
3538
|
telemetry=self.telemetry,
|
|
3482
3539
|
debug_mode=self.debug_mode,
|
|
3483
3540
|
debug_level=self.debug_level,
|
|
@@ -4005,6 +4062,7 @@ class Team:
|
|
|
4005
4062
|
add_history_to_context: Optional[bool] = None,
|
|
4006
4063
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
4007
4064
|
add_dependencies_to_context: Optional[bool] = None,
|
|
4065
|
+
add_session_state_to_context: Optional[bool] = None,
|
|
4008
4066
|
metadata: Optional[Dict[str, Any]] = None,
|
|
4009
4067
|
) -> None:
|
|
4010
4068
|
# Prepare tools
|
|
@@ -4052,46 +4110,51 @@ class Team:
|
|
|
4052
4110
|
if self.knowledge is not None and self.update_knowledge:
|
|
4053
4111
|
_tools.append(self.add_to_knowledge)
|
|
4054
4112
|
|
|
4055
|
-
|
|
4056
|
-
|
|
4057
|
-
|
|
4058
|
-
|
|
4113
|
+
if self.members:
|
|
4114
|
+
# Get the user message if we are using the input directly
|
|
4115
|
+
user_message = None
|
|
4116
|
+
if self.determine_input_for_members is False:
|
|
4117
|
+
user_message = self._get_user_message(
|
|
4118
|
+
run_response=run_response,
|
|
4119
|
+
session_state=session_state,
|
|
4120
|
+
input_message=input_message,
|
|
4121
|
+
user_id=user_id,
|
|
4122
|
+
audio=audio,
|
|
4123
|
+
images=images,
|
|
4124
|
+
videos=videos,
|
|
4125
|
+
files=files,
|
|
4126
|
+
dependencies=dependencies,
|
|
4127
|
+
add_dependencies_to_context=add_dependencies_to_context,
|
|
4128
|
+
metadata=metadata,
|
|
4129
|
+
)
|
|
4130
|
+
|
|
4131
|
+
delegate_task_func = self._get_delegate_task_function(
|
|
4059
4132
|
run_response=run_response,
|
|
4133
|
+
session=session,
|
|
4060
4134
|
session_state=session_state,
|
|
4061
|
-
|
|
4135
|
+
team_run_context=team_run_context,
|
|
4136
|
+
input=user_message,
|
|
4062
4137
|
user_id=user_id,
|
|
4063
|
-
|
|
4064
|
-
|
|
4065
|
-
|
|
4066
|
-
|
|
4138
|
+
stream=self.stream or False,
|
|
4139
|
+
stream_intermediate_steps=self.stream_intermediate_steps,
|
|
4140
|
+
async_mode=async_mode,
|
|
4141
|
+
images=images, # type: ignore
|
|
4142
|
+
videos=videos, # type: ignore
|
|
4143
|
+
audio=audio, # type: ignore
|
|
4144
|
+
files=files, # type: ignore
|
|
4145
|
+
knowledge_filters=knowledge_filters,
|
|
4146
|
+
add_history_to_context=add_history_to_context,
|
|
4147
|
+
workflow_context=workflow_context,
|
|
4067
4148
|
dependencies=dependencies,
|
|
4068
4149
|
add_dependencies_to_context=add_dependencies_to_context,
|
|
4150
|
+
add_session_state_to_context=add_session_state_to_context,
|
|
4069
4151
|
metadata=metadata,
|
|
4152
|
+
debug_mode=debug_mode,
|
|
4070
4153
|
)
|
|
4071
4154
|
|
|
4072
|
-
|
|
4073
|
-
|
|
4074
|
-
|
|
4075
|
-
session_state=session_state,
|
|
4076
|
-
team_run_context=team_run_context,
|
|
4077
|
-
input=user_message,
|
|
4078
|
-
user_id=user_id,
|
|
4079
|
-
stream=self.stream or False,
|
|
4080
|
-
stream_intermediate_steps=self.stream_intermediate_steps,
|
|
4081
|
-
async_mode=async_mode,
|
|
4082
|
-
images=images, # type: ignore
|
|
4083
|
-
videos=videos, # type: ignore
|
|
4084
|
-
audio=audio, # type: ignore
|
|
4085
|
-
files=files, # type: ignore
|
|
4086
|
-
knowledge_filters=knowledge_filters,
|
|
4087
|
-
workflow_context=workflow_context,
|
|
4088
|
-
debug_mode=debug_mode,
|
|
4089
|
-
add_history_to_context=add_history_to_context,
|
|
4090
|
-
)
|
|
4091
|
-
|
|
4092
|
-
_tools.append(delegate_task_func)
|
|
4093
|
-
if self.get_member_information_tool:
|
|
4094
|
-
_tools.append(self.get_member_information)
|
|
4155
|
+
_tools.append(delegate_task_func)
|
|
4156
|
+
if self.get_member_information_tool:
|
|
4157
|
+
_tools.append(self.get_member_information)
|
|
4095
4158
|
|
|
4096
4159
|
self._functions_for_model = {}
|
|
4097
4160
|
self._tools_for_model = []
|
|
@@ -4120,6 +4183,7 @@ class Team:
|
|
|
4120
4183
|
if name not in self._functions_for_model:
|
|
4121
4184
|
func._team = self
|
|
4122
4185
|
func._session_state = session_state
|
|
4186
|
+
func._dependencies = dependencies
|
|
4123
4187
|
func.process_entrypoint(strict=strict)
|
|
4124
4188
|
if strict:
|
|
4125
4189
|
func.strict = True
|
|
@@ -4139,6 +4203,7 @@ class Team:
|
|
|
4139
4203
|
if tool.name not in self._functions_for_model:
|
|
4140
4204
|
tool._team = self
|
|
4141
4205
|
tool._session_state = session_state
|
|
4206
|
+
tool._dependencies = dependencies
|
|
4142
4207
|
tool.process_entrypoint(strict=strict)
|
|
4143
4208
|
if strict and tool.strict is None:
|
|
4144
4209
|
tool.strict = True
|
|
@@ -4160,6 +4225,7 @@ class Team:
|
|
|
4160
4225
|
func = Function.from_callable(tool, strict=strict)
|
|
4161
4226
|
func._team = self
|
|
4162
4227
|
func._session_state = session_state
|
|
4228
|
+
func._dependencies = dependencies
|
|
4163
4229
|
if strict:
|
|
4164
4230
|
func.strict = True
|
|
4165
4231
|
if self.tool_hooks:
|
|
@@ -4355,42 +4421,43 @@ class Team:
|
|
|
4355
4421
|
|
|
4356
4422
|
# 2 Build the default system message for the Agent.
|
|
4357
4423
|
system_message_content: str = ""
|
|
4358
|
-
|
|
4359
|
-
|
|
4424
|
+
if self.members is not None and len(self.members) > 0:
|
|
4425
|
+
system_message_content += "You are the leader of a team and sub-teams of AI Agents.\n"
|
|
4426
|
+
system_message_content += "Your task is to coordinate the team to complete the user's request.\n"
|
|
4360
4427
|
|
|
4361
|
-
|
|
4362
|
-
|
|
4363
|
-
|
|
4364
|
-
|
|
4365
|
-
|
|
4366
|
-
|
|
4428
|
+
system_message_content += "\nHere are the members in your team:\n"
|
|
4429
|
+
system_message_content += "<team_members>\n"
|
|
4430
|
+
system_message_content += self.get_members_system_message_content()
|
|
4431
|
+
if self.get_member_information_tool:
|
|
4432
|
+
system_message_content += "If you need to get information about your team members, you can use the `get_member_information` tool at any time.\n"
|
|
4433
|
+
system_message_content += "</team_members>\n"
|
|
4367
4434
|
|
|
4368
|
-
|
|
4435
|
+
system_message_content += "\n<how_to_respond>\n"
|
|
4369
4436
|
|
|
4370
|
-
|
|
4371
|
-
|
|
4372
|
-
|
|
4373
|
-
|
|
4374
|
-
|
|
4375
|
-
|
|
4376
|
-
|
|
4377
|
-
|
|
4378
|
-
|
|
4379
|
-
|
|
4380
|
-
|
|
4381
|
-
|
|
4382
|
-
|
|
4383
|
-
|
|
4384
|
-
|
|
4385
|
-
|
|
4386
|
-
|
|
4387
|
-
|
|
4388
|
-
|
|
4389
|
-
|
|
4390
|
-
|
|
4391
|
-
|
|
4392
|
-
|
|
4393
|
-
|
|
4437
|
+
if self.delegate_task_to_all_members:
|
|
4438
|
+
system_message_content += (
|
|
4439
|
+
"- You can either respond directly or use the `delegate_task_to_members` tool to delegate a task to all members in your team to get a collaborative response.\n"
|
|
4440
|
+
"- To delegate a task to all members in your team, call `delegate_task_to_members` ONLY once. This will delegate a task to all members in your team.\n"
|
|
4441
|
+
"- Analyze the responses from all members and evaluate whether the task has been completed.\n"
|
|
4442
|
+
"- If you feel the task has been completed, you can stop and respond to the user.\n"
|
|
4443
|
+
)
|
|
4444
|
+
else:
|
|
4445
|
+
system_message_content += (
|
|
4446
|
+
"- Your role is to delegate tasks to members in your team with the highest likelihood of completing the user's request.\n"
|
|
4447
|
+
"- Carefully analyze the tools available to the members and their roles before delegating tasks.\n"
|
|
4448
|
+
"- You cannot use a member tool directly. You can only delegate tasks to members.\n"
|
|
4449
|
+
"- When you delegate a task to another member, make sure to include:\n"
|
|
4450
|
+
" - member_id (str): The ID of the member to delegate the task to. Use only the ID of the member, not the ID of the team followed by the ID of the member.\n"
|
|
4451
|
+
" - task_description (str): A clear description of the task.\n"
|
|
4452
|
+
" - expected_output (str): The expected output.\n"
|
|
4453
|
+
"- You can delegate tasks to multiple members at once.\n"
|
|
4454
|
+
"- You must always analyze the responses from members before responding to the user.\n"
|
|
4455
|
+
"- After analyzing the responses from the members, if you feel the task has been completed, you can stop and respond to the user.\n"
|
|
4456
|
+
"- If you are not satisfied with the responses from the members, you should re-assign the task.\n"
|
|
4457
|
+
"- For simple greetings, thanks, or questions about the team itself, you should respond directly.\n"
|
|
4458
|
+
"- For all work requests, tasks, or questions requiring expertise, route to appropriate team members.\n"
|
|
4459
|
+
)
|
|
4460
|
+
system_message_content += "</how_to_respond>\n\n"
|
|
4394
4461
|
|
|
4395
4462
|
# Attached media
|
|
4396
4463
|
if audio is not None or images is not None or videos is not None or files is not None:
|
|
@@ -4506,7 +4573,7 @@ class Team:
|
|
|
4506
4573
|
f"<additional_context>\n{self.additional_context.strip()}\n</additional_context>\n\n"
|
|
4507
4574
|
)
|
|
4508
4575
|
|
|
4509
|
-
if
|
|
4576
|
+
if add_session_state_to_context and session_state is not None:
|
|
4510
4577
|
system_message_content += self._get_formatted_session_state_for_system_message(session_state)
|
|
4511
4578
|
|
|
4512
4579
|
# Add the JSON output prompt if output_schema is provided and structured_outputs is False
|
|
@@ -4607,9 +4674,16 @@ class Team:
|
|
|
4607
4674
|
if add_history_to_context:
|
|
4608
4675
|
from copy import deepcopy
|
|
4609
4676
|
|
|
4677
|
+
# Only skip messages from history when system_message_role is NOT a standard conversation role.
|
|
4678
|
+
# Standard conversation roles ("user", "assistant", "tool") should never be filtered
|
|
4679
|
+
# to preserve conversation continuity.
|
|
4680
|
+
skip_role = (
|
|
4681
|
+
self.system_message_role if self.system_message_role not in ["user", "assistant", "tool"] else None
|
|
4682
|
+
)
|
|
4683
|
+
|
|
4610
4684
|
history = session.get_messages_from_last_n_runs(
|
|
4611
4685
|
last_n=self.num_history_runs,
|
|
4612
|
-
skip_role=
|
|
4686
|
+
skip_role=skip_role,
|
|
4613
4687
|
team_id=self.id,
|
|
4614
4688
|
)
|
|
4615
4689
|
|
|
@@ -4693,6 +4767,9 @@ class Team:
|
|
|
4693
4767
|
if len(input_message) > 0 and isinstance(input_message[0], dict) and "type" in input_message[0]:
|
|
4694
4768
|
# This is multimodal content (text + images/audio/video), preserve the structure
|
|
4695
4769
|
input_content = input_message
|
|
4770
|
+
elif len(input_message) > 0 and isinstance(input_message[0], Message):
|
|
4771
|
+
# This is a list of Message objects, extract text content from them
|
|
4772
|
+
input_content = get_text_from_message(input_message)
|
|
4696
4773
|
elif all(isinstance(item, str) for item in input_message):
|
|
4697
4774
|
input_content = "\n".join([str(item) for item in input_message])
|
|
4698
4775
|
else:
|
|
@@ -5130,9 +5207,14 @@ class Team:
|
|
|
5130
5207
|
member_agent_id = member_agent.id if isinstance(member_agent, Agent) else None
|
|
5131
5208
|
member_team_id = member_agent.id if isinstance(member_agent, Team) else None
|
|
5132
5209
|
|
|
5210
|
+
# Only skip messages from history when system_message_role is NOT a standard conversation role.
|
|
5211
|
+
# Standard conversation roles ("user", "assistant", "tool") should never be filtered
|
|
5212
|
+
# to preserve conversation continuity.
|
|
5213
|
+
skip_role = self.system_message_role if self.system_message_role not in ["user", "assistant", "tool"] else None
|
|
5214
|
+
|
|
5133
5215
|
history = session.get_messages_from_last_n_runs(
|
|
5134
5216
|
last_n=member_agent.num_history_runs or self.num_history_runs,
|
|
5135
|
-
skip_role=
|
|
5217
|
+
skip_role=skip_role,
|
|
5136
5218
|
agent_id=member_agent_id,
|
|
5137
5219
|
team_id=member_team_id,
|
|
5138
5220
|
member_runs=True,
|
|
@@ -5206,9 +5288,13 @@ class Team:
|
|
|
5206
5288
|
audio: Optional[List[Audio]] = None,
|
|
5207
5289
|
files: Optional[List[File]] = None,
|
|
5208
5290
|
knowledge_filters: Optional[Dict[str, Any]] = None,
|
|
5291
|
+
add_history_to_context: Optional[bool] = None,
|
|
5209
5292
|
workflow_context: Optional[Dict] = None,
|
|
5293
|
+
dependencies: Optional[Dict[str, Any]] = None,
|
|
5294
|
+
add_dependencies_to_context: Optional[bool] = None,
|
|
5295
|
+
add_session_state_to_context: Optional[bool] = None,
|
|
5296
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
5210
5297
|
debug_mode: Optional[bool] = None,
|
|
5211
|
-
add_history_to_context: Optional[bool] = None,
|
|
5212
5298
|
) -> Function:
|
|
5213
5299
|
if not images:
|
|
5214
5300
|
images = []
|
|
@@ -5244,9 +5330,9 @@ class Team:
|
|
|
5244
5330
|
task_description, expected_output, team_member_interactions_str
|
|
5245
5331
|
)
|
|
5246
5332
|
|
|
5247
|
-
# 4. Add history for the member if enabled
|
|
5333
|
+
# 4. Add history for the member if enabled (because we won't load the session for the member, so history won't be loaded automatically)
|
|
5248
5334
|
history = None
|
|
5249
|
-
if member_agent.add_history_to_context:
|
|
5335
|
+
if member_agent.add_history_to_context or add_history_to_context:
|
|
5250
5336
|
history = self._get_history_for_member_agent(session, member_agent)
|
|
5251
5337
|
if history:
|
|
5252
5338
|
if isinstance(member_agent_task, str):
|
|
@@ -5358,8 +5444,11 @@ class Team:
|
|
|
5358
5444
|
stream=True,
|
|
5359
5445
|
stream_intermediate_steps=stream_intermediate_steps,
|
|
5360
5446
|
debug_mode=debug_mode,
|
|
5361
|
-
add_history_to_context=add_history_to_context,
|
|
5362
5447
|
workflow_context=workflow_context,
|
|
5448
|
+
dependencies=dependencies,
|
|
5449
|
+
add_dependencies_to_context=add_dependencies_to_context,
|
|
5450
|
+
metadata=metadata,
|
|
5451
|
+
add_session_state_to_context=add_session_state_to_context,
|
|
5363
5452
|
knowledge_filters=knowledge_filters
|
|
5364
5453
|
if not member_agent.knowledge_filters and member_agent.knowledge
|
|
5365
5454
|
else None,
|
|
@@ -5393,7 +5482,10 @@ class Team:
|
|
|
5393
5482
|
stream=False,
|
|
5394
5483
|
debug_mode=debug_mode,
|
|
5395
5484
|
workflow_context=workflow_context,
|
|
5396
|
-
|
|
5485
|
+
dependencies=dependencies,
|
|
5486
|
+
add_dependencies_to_context=add_dependencies_to_context,
|
|
5487
|
+
add_session_state_to_context=add_session_state_to_context,
|
|
5488
|
+
metadata=metadata,
|
|
5397
5489
|
knowledge_filters=knowledge_filters
|
|
5398
5490
|
if not member_agent.knowledge_filters and member_agent.knowledge
|
|
5399
5491
|
else None,
|
|
@@ -5477,7 +5569,10 @@ class Team:
|
|
|
5477
5569
|
stream=True,
|
|
5478
5570
|
stream_intermediate_steps=stream_intermediate_steps,
|
|
5479
5571
|
debug_mode=debug_mode,
|
|
5480
|
-
|
|
5572
|
+
dependencies=dependencies,
|
|
5573
|
+
add_dependencies_to_context=add_dependencies_to_context,
|
|
5574
|
+
add_session_state_to_context=add_session_state_to_context,
|
|
5575
|
+
metadata=metadata,
|
|
5481
5576
|
workflow_context=workflow_context,
|
|
5482
5577
|
knowledge_filters=knowledge_filters
|
|
5483
5578
|
if not member_agent.knowledge_filters and member_agent.knowledge
|
|
@@ -5512,7 +5607,10 @@ class Team:
|
|
|
5512
5607
|
stream=False,
|
|
5513
5608
|
debug_mode=debug_mode,
|
|
5514
5609
|
workflow_context=workflow_context,
|
|
5515
|
-
|
|
5610
|
+
dependencies=dependencies,
|
|
5611
|
+
add_dependencies_to_context=add_dependencies_to_context,
|
|
5612
|
+
add_session_state_to_context=add_session_state_to_context,
|
|
5613
|
+
metadata=metadata,
|
|
5516
5614
|
knowledge_filters=knowledge_filters
|
|
5517
5615
|
if not member_agent.knowledge_filters and member_agent.knowledge
|
|
5518
5616
|
else None,
|
|
@@ -5590,7 +5688,10 @@ class Team:
|
|
|
5590
5688
|
if not member_agent.knowledge_filters and member_agent.knowledge
|
|
5591
5689
|
else None,
|
|
5592
5690
|
debug_mode=debug_mode,
|
|
5593
|
-
|
|
5691
|
+
dependencies=dependencies,
|
|
5692
|
+
add_dependencies_to_context=add_dependencies_to_context,
|
|
5693
|
+
add_session_state_to_context=add_session_state_to_context,
|
|
5694
|
+
metadata=metadata,
|
|
5594
5695
|
yield_run_response=True,
|
|
5595
5696
|
)
|
|
5596
5697
|
member_agent_run_response = None
|
|
@@ -5625,7 +5726,10 @@ class Team:
|
|
|
5625
5726
|
if not member_agent.knowledge_filters and member_agent.knowledge
|
|
5626
5727
|
else None,
|
|
5627
5728
|
debug_mode=debug_mode,
|
|
5628
|
-
|
|
5729
|
+
dependencies=dependencies,
|
|
5730
|
+
add_dependencies_to_context=add_dependencies_to_context,
|
|
5731
|
+
add_session_state_to_context=add_session_state_to_context,
|
|
5732
|
+
metadata=metadata,
|
|
5629
5733
|
)
|
|
5630
5734
|
|
|
5631
5735
|
check_if_run_cancelled(member_agent_run_response) # type: ignore
|
|
@@ -5699,7 +5803,10 @@ class Team:
|
|
|
5699
5803
|
knowledge_filters=knowledge_filters
|
|
5700
5804
|
if not member_agent.knowledge_filters and member_agent.knowledge
|
|
5701
5805
|
else None,
|
|
5702
|
-
|
|
5806
|
+
dependencies=dependencies,
|
|
5807
|
+
add_dependencies_to_context=add_dependencies_to_context,
|
|
5808
|
+
add_session_state_to_context=add_session_state_to_context,
|
|
5809
|
+
metadata=metadata,
|
|
5703
5810
|
yield_run_response=True,
|
|
5704
5811
|
)
|
|
5705
5812
|
member_agent_run_response = None
|
|
@@ -5716,6 +5823,7 @@ class Team:
|
|
|
5716
5823
|
_process_delegate_task_to_member(
|
|
5717
5824
|
member_agent_run_response, member_agent, member_agent_task, member_session_state_copy
|
|
5718
5825
|
)
|
|
5826
|
+
await queue.put(done_marker)
|
|
5719
5827
|
|
|
5720
5828
|
# Initialize and launch all members
|
|
5721
5829
|
tasks: List[asyncio.Task[None]] = []
|
|
@@ -5743,7 +5851,6 @@ class Team:
|
|
|
5743
5851
|
for t in tasks:
|
|
5744
5852
|
with contextlib.suppress(Exception):
|
|
5745
5853
|
await t
|
|
5746
|
-
|
|
5747
5854
|
else:
|
|
5748
5855
|
# Non-streaming concurrent run of members; collect results when done
|
|
5749
5856
|
tasks = []
|
|
@@ -5773,7 +5880,10 @@ class Team:
|
|
|
5773
5880
|
knowledge_filters=knowledge_filters
|
|
5774
5881
|
if not member_agent.knowledge_filters and member_agent.knowledge
|
|
5775
5882
|
else None,
|
|
5776
|
-
|
|
5883
|
+
dependencies=dependencies,
|
|
5884
|
+
add_dependencies_to_context=add_dependencies_to_context,
|
|
5885
|
+
add_session_state_to_context=add_session_state_to_context,
|
|
5886
|
+
metadata=metadata,
|
|
5777
5887
|
)
|
|
5778
5888
|
check_if_run_cancelled(member_agent_run_response)
|
|
5779
5889
|
|
|
@@ -6006,7 +6116,8 @@ class Team:
|
|
|
6006
6116
|
|
|
6007
6117
|
from agno.utils.merge_dict import merge_dictionaries
|
|
6008
6118
|
|
|
6009
|
-
# Get the session_state from the database and
|
|
6119
|
+
# Get the session_state from the database and merge with proper precedence
|
|
6120
|
+
# At this point session_state contains: agent_defaults + run_params
|
|
6010
6121
|
if session.session_data is not None and "session_state" in session.session_data:
|
|
6011
6122
|
session_state_from_db = session.session_data.get("session_state")
|
|
6012
6123
|
|
|
@@ -6015,10 +6126,11 @@ class Team:
|
|
|
6015
6126
|
and isinstance(session_state_from_db, dict)
|
|
6016
6127
|
and len(session_state_from_db) > 0
|
|
6017
6128
|
):
|
|
6018
|
-
# This
|
|
6019
|
-
|
|
6020
|
-
merge_dictionaries(
|
|
6021
|
-
session_state
|
|
6129
|
+
# This preserves precedence: run_params > db_state > agent_defaults
|
|
6130
|
+
merged_state = session_state_from_db.copy()
|
|
6131
|
+
merge_dictionaries(merged_state, session_state)
|
|
6132
|
+
session_state.clear()
|
|
6133
|
+
session_state.update(merged_state)
|
|
6022
6134
|
|
|
6023
6135
|
# Update the session_state in the session
|
|
6024
6136
|
if session.session_data is not None:
|
|
@@ -6656,17 +6768,18 @@ class Team:
|
|
|
6656
6768
|
) -> Function:
|
|
6657
6769
|
"""Factory function to create a search_knowledge_base function with filters."""
|
|
6658
6770
|
|
|
6659
|
-
def search_knowledge_base(query: str, filters: Optional[
|
|
6771
|
+
def search_knowledge_base(query: str, filters: Optional[List[KnowledgeFilter]] = None) -> str:
|
|
6660
6772
|
"""Use this function to search the knowledge base for information about a query.
|
|
6661
6773
|
|
|
6662
6774
|
Args:
|
|
6663
6775
|
query: The query to search for.
|
|
6664
|
-
filters: The filters to apply to the search. This is a
|
|
6776
|
+
filters (optional): The filters to apply to the search. This is a list of KnowledgeFilter objects.
|
|
6665
6777
|
|
|
6666
6778
|
Returns:
|
|
6667
6779
|
str: A string containing the response from the knowledge base.
|
|
6668
6780
|
"""
|
|
6669
|
-
|
|
6781
|
+
filters_dict = {filt.key: filt.value for filt in filters} if filters else None
|
|
6782
|
+
search_filters = get_agentic_or_user_search_filters(filters_dict, knowledge_filters)
|
|
6670
6783
|
|
|
6671
6784
|
# Get the relevant documents from the knowledge base, passing filters
|
|
6672
6785
|
retrieval_timer = Timer()
|
|
@@ -6687,17 +6800,18 @@ class Team:
|
|
|
6687
6800
|
return "No documents found"
|
|
6688
6801
|
return self._convert_documents_to_string(docs_from_knowledge)
|
|
6689
6802
|
|
|
6690
|
-
async def asearch_knowledge_base(query: str, filters: Optional[
|
|
6803
|
+
async def asearch_knowledge_base(query: str, filters: Optional[List[KnowledgeFilter]] = None) -> str:
|
|
6691
6804
|
"""Use this function to search the knowledge base for information about a query asynchronously.
|
|
6692
6805
|
|
|
6693
6806
|
Args:
|
|
6694
6807
|
query: The query to search for.
|
|
6695
|
-
filters: The filters to apply to the search. This is a
|
|
6808
|
+
filters (optional): The filters to apply to the search. This is a list of KnowledgeFilter objects.
|
|
6696
6809
|
|
|
6697
6810
|
Returns:
|
|
6698
6811
|
str: A string containing the response from the knowledge base.
|
|
6699
6812
|
"""
|
|
6700
|
-
|
|
6813
|
+
filters_dict = {filt.key: filt.value for filt in filters} if filters else None
|
|
6814
|
+
search_filters = get_agentic_or_user_search_filters(filters_dict, knowledge_filters)
|
|
6701
6815
|
|
|
6702
6816
|
retrieval_timer = Timer()
|
|
6703
6817
|
retrieval_timer.start()
|