agno 2.3.26__py3-none-any.whl → 2.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (128) hide show
  1. agno/agent/__init__.py +4 -0
  2. agno/agent/agent.py +1368 -541
  3. agno/agent/remote.py +13 -0
  4. agno/db/base.py +339 -0
  5. agno/db/postgres/async_postgres.py +116 -12
  6. agno/db/postgres/postgres.py +1229 -25
  7. agno/db/postgres/schemas.py +48 -1
  8. agno/db/sqlite/async_sqlite.py +119 -4
  9. agno/db/sqlite/schemas.py +51 -0
  10. agno/db/sqlite/sqlite.py +1173 -13
  11. agno/db/utils.py +37 -1
  12. agno/knowledge/__init__.py +4 -0
  13. agno/knowledge/chunking/code.py +1 -1
  14. agno/knowledge/chunking/semantic.py +1 -1
  15. agno/knowledge/chunking/strategy.py +4 -0
  16. agno/knowledge/filesystem.py +412 -0
  17. agno/knowledge/knowledge.py +2767 -2254
  18. agno/knowledge/protocol.py +134 -0
  19. agno/knowledge/reader/arxiv_reader.py +2 -2
  20. agno/knowledge/reader/base.py +9 -7
  21. agno/knowledge/reader/csv_reader.py +5 -5
  22. agno/knowledge/reader/docx_reader.py +2 -2
  23. agno/knowledge/reader/field_labeled_csv_reader.py +2 -2
  24. agno/knowledge/reader/firecrawl_reader.py +2 -2
  25. agno/knowledge/reader/json_reader.py +2 -2
  26. agno/knowledge/reader/markdown_reader.py +2 -2
  27. agno/knowledge/reader/pdf_reader.py +5 -4
  28. agno/knowledge/reader/pptx_reader.py +2 -2
  29. agno/knowledge/reader/reader_factory.py +110 -0
  30. agno/knowledge/reader/s3_reader.py +2 -2
  31. agno/knowledge/reader/tavily_reader.py +2 -2
  32. agno/knowledge/reader/text_reader.py +2 -2
  33. agno/knowledge/reader/web_search_reader.py +2 -2
  34. agno/knowledge/reader/website_reader.py +5 -3
  35. agno/knowledge/reader/wikipedia_reader.py +2 -2
  36. agno/knowledge/reader/youtube_reader.py +2 -2
  37. agno/knowledge/utils.py +37 -29
  38. agno/learn/__init__.py +6 -0
  39. agno/learn/machine.py +35 -0
  40. agno/learn/schemas.py +82 -11
  41. agno/learn/stores/__init__.py +3 -0
  42. agno/learn/stores/decision_log.py +1156 -0
  43. agno/learn/stores/learned_knowledge.py +6 -6
  44. agno/models/anthropic/claude.py +24 -0
  45. agno/models/aws/bedrock.py +20 -0
  46. agno/models/base.py +48 -4
  47. agno/models/cohere/chat.py +25 -0
  48. agno/models/google/gemini.py +50 -5
  49. agno/models/litellm/chat.py +38 -0
  50. agno/models/openai/chat.py +7 -0
  51. agno/models/openrouter/openrouter.py +46 -0
  52. agno/models/response.py +16 -0
  53. agno/os/app.py +83 -44
  54. agno/os/middleware/__init__.py +2 -0
  55. agno/os/middleware/trailing_slash.py +27 -0
  56. agno/os/router.py +1 -0
  57. agno/os/routers/agents/router.py +29 -16
  58. agno/os/routers/agents/schema.py +6 -4
  59. agno/os/routers/components/__init__.py +3 -0
  60. agno/os/routers/components/components.py +466 -0
  61. agno/os/routers/evals/schemas.py +4 -3
  62. agno/os/routers/health.py +3 -3
  63. agno/os/routers/knowledge/knowledge.py +3 -3
  64. agno/os/routers/memory/schemas.py +4 -2
  65. agno/os/routers/metrics/metrics.py +9 -11
  66. agno/os/routers/metrics/schemas.py +10 -6
  67. agno/os/routers/registry/__init__.py +3 -0
  68. agno/os/routers/registry/registry.py +337 -0
  69. agno/os/routers/teams/router.py +20 -8
  70. agno/os/routers/teams/schema.py +6 -4
  71. agno/os/routers/traces/traces.py +5 -5
  72. agno/os/routers/workflows/router.py +38 -11
  73. agno/os/routers/workflows/schema.py +1 -1
  74. agno/os/schema.py +92 -26
  75. agno/os/utils.py +84 -19
  76. agno/reasoning/anthropic.py +2 -2
  77. agno/reasoning/azure_ai_foundry.py +2 -2
  78. agno/reasoning/deepseek.py +2 -2
  79. agno/reasoning/default.py +6 -7
  80. agno/reasoning/gemini.py +2 -2
  81. agno/reasoning/helpers.py +6 -7
  82. agno/reasoning/manager.py +4 -10
  83. agno/reasoning/ollama.py +2 -2
  84. agno/reasoning/openai.py +2 -2
  85. agno/reasoning/vertexai.py +2 -2
  86. agno/registry/__init__.py +3 -0
  87. agno/registry/registry.py +68 -0
  88. agno/run/agent.py +57 -0
  89. agno/run/base.py +7 -0
  90. agno/run/team.py +57 -0
  91. agno/skills/agent_skills.py +10 -3
  92. agno/team/__init__.py +3 -1
  93. agno/team/team.py +1145 -326
  94. agno/tools/duckduckgo.py +25 -71
  95. agno/tools/exa.py +0 -21
  96. agno/tools/function.py +35 -83
  97. agno/tools/knowledge.py +9 -4
  98. agno/tools/mem0.py +11 -10
  99. agno/tools/memory.py +47 -46
  100. agno/tools/parallel.py +0 -7
  101. agno/tools/reasoning.py +30 -23
  102. agno/tools/tavily.py +4 -1
  103. agno/tools/websearch.py +93 -0
  104. agno/tools/website.py +1 -1
  105. agno/tools/wikipedia.py +1 -1
  106. agno/tools/workflow.py +48 -47
  107. agno/utils/agent.py +42 -5
  108. agno/utils/events.py +160 -2
  109. agno/utils/print_response/agent.py +0 -31
  110. agno/utils/print_response/team.py +0 -2
  111. agno/utils/print_response/workflow.py +0 -2
  112. agno/utils/team.py +61 -11
  113. agno/vectordb/lancedb/lance_db.py +4 -1
  114. agno/vectordb/mongodb/mongodb.py +1 -1
  115. agno/vectordb/qdrant/qdrant.py +4 -4
  116. agno/workflow/__init__.py +3 -1
  117. agno/workflow/condition.py +0 -21
  118. agno/workflow/loop.py +0 -21
  119. agno/workflow/parallel.py +0 -21
  120. agno/workflow/router.py +0 -21
  121. agno/workflow/step.py +117 -24
  122. agno/workflow/steps.py +0 -21
  123. agno/workflow/workflow.py +427 -63
  124. {agno-2.3.26.dist-info → agno-2.4.0.dist-info}/METADATA +46 -76
  125. {agno-2.3.26.dist-info → agno-2.4.0.dist-info}/RECORD +128 -117
  126. {agno-2.3.26.dist-info → agno-2.4.0.dist-info}/WHEEL +0 -0
  127. {agno-2.3.26.dist-info → agno-2.4.0.dist-info}/licenses/LICENSE +0 -0
  128. {agno-2.3.26.dist-info → agno-2.4.0.dist-info}/top_level.txt +0 -0
agno/team/team.py CHANGED
@@ -4,13 +4,11 @@ import asyncio
4
4
  import contextlib
5
5
  import json
6
6
  import time
7
- import warnings
8
7
  from collections import ChainMap, deque
9
8
  from concurrent.futures import Future
10
9
  from copy import copy
11
10
  from dataclasses import dataclass
12
11
  from os import getenv
13
- from textwrap import dedent
14
12
  from typing import (
15
13
  Any,
16
14
  AsyncIterator,
@@ -35,7 +33,8 @@ from pydantic import BaseModel
35
33
 
36
34
  from agno.agent import Agent
37
35
  from agno.compression.manager import CompressionManager
38
- from agno.db.base import AsyncBaseDb, BaseDb, SessionType, UserMemory
36
+ from agno.db.base import AsyncBaseDb, BaseDb, ComponentType, SessionType, UserMemory
37
+ from agno.db.utils import db_from_dict
39
38
  from agno.eval.base import BaseEval
40
39
  from agno.exceptions import (
41
40
  InputCheckError,
@@ -44,7 +43,7 @@ from agno.exceptions import (
44
43
  )
45
44
  from agno.filters import FilterExpr
46
45
  from agno.guardrails import BaseGuardrail
47
- from agno.knowledge.knowledge import Knowledge
46
+ from agno.knowledge.protocol import KnowledgeProtocol
48
47
  from agno.knowledge.types import KnowledgeFilter
49
48
  from agno.media import Audio, File, Image, Video
50
49
  from agno.memory import MemoryManager
@@ -54,6 +53,7 @@ from agno.models.metrics import Metrics
54
53
  from agno.models.response import ModelResponse, ModelResponseEvent
55
54
  from agno.models.utils import get_model
56
55
  from agno.reasoning.step import NextAction, ReasoningStep, ReasoningSteps
56
+ from agno.registry.registry import Registry
57
57
  from agno.run import RunContext, RunStatus
58
58
  from agno.run.agent import RunEvent, RunOutput, RunOutputEvent
59
59
  from agno.run.cancel import (
@@ -118,6 +118,10 @@ from agno.utils.agent import (
118
118
  from agno.utils.common import is_typed_dict
119
119
  from agno.utils.events import (
120
120
  add_team_error_event,
121
+ create_team_compression_completed_event,
122
+ create_team_compression_started_event,
123
+ create_team_model_request_completed_event,
124
+ create_team_model_request_started_event,
121
125
  create_team_parser_model_response_completed_event,
122
126
  create_team_parser_model_response_started_event,
123
127
  create_team_post_hook_completed_event,
@@ -266,6 +270,8 @@ class Team:
266
270
  description: Optional[str] = None
267
271
  # List of instructions for the team.
268
272
  instructions: Optional[Union[str, List[str], Callable]] = None
273
+ # If True, wrap instructions in <instructions> tags. Default is False.
274
+ use_instruction_tags: bool = False
269
275
  # Provide the expected output from the Team.
270
276
  expected_output: Optional[str] = None
271
277
  # Additional context added to the end of the system message.
@@ -314,7 +320,7 @@ class Team:
314
320
  add_dependencies_to_context: bool = False
315
321
 
316
322
  # --- Agent Knowledge ---
317
- knowledge: Optional[Knowledge] = None
323
+ knowledge: Optional[KnowledgeProtocol] = None
318
324
  # Add knowledge_filters to the Agent class attributes
319
325
  knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None
320
326
  # Let the agent choose the knowledge filters
@@ -395,7 +401,9 @@ class Team:
395
401
  # Enable the agent to manage memories of the user
396
402
  enable_agentic_memory: bool = False
397
403
  # If True, the agent creates/updates user memories at the end of runs
398
- enable_user_memories: bool = False
404
+ update_memory_on_run: bool = False
405
+ # Soon to be deprecated. Use update_memory_on_run
406
+ enable_user_memories: Optional[bool] = None
399
407
  # If True, the agent adds a reference to the user memories in the response
400
408
  add_memories_to_context: Optional[bool] = None
401
409
  # If True, the agent creates/updates session summaries at the end of runs
@@ -427,6 +435,8 @@ class Team:
427
435
  # --- Team Storage ---
428
436
  # Metadata stored with this team
429
437
  metadata: Optional[Dict[str, Any]] = None
438
+ # Version of the team config (set when loaded from DB)
439
+ version: Optional[int] = None
430
440
 
431
441
  # --- Team Reasoning ---
432
442
  reasoning: bool = False
@@ -440,8 +450,6 @@ class Team:
440
450
  stream: Optional[bool] = None
441
451
  # Stream the intermediate steps from the Agent
442
452
  stream_events: Optional[bool] = None
443
- # [Deprecated] Stream the intermediate steps from the Agent
444
- stream_intermediate_steps: Optional[bool] = None
445
453
  # Stream the member events from the Team
446
454
  stream_member_events: bool = True
447
455
 
@@ -473,9 +481,6 @@ class Team:
473
481
  # This helps us improve the Teams implementation and provide better support
474
482
  telemetry: bool = True
475
483
 
476
- # Deprecated. Use delegate_to_all_members instead.
477
- delegate_task_to_all_members: bool = False
478
-
479
484
  def __init__(
480
485
  self,
481
486
  members: List[Union[Agent, "Team"]],
@@ -485,7 +490,6 @@ class Team:
485
490
  role: Optional[str] = None,
486
491
  respond_directly: bool = False,
487
492
  determine_input_for_members: bool = True,
488
- delegate_task_to_all_members: bool = False,
489
493
  delegate_to_all_members: bool = False,
490
494
  user_id: Optional[str] = None,
491
495
  session_id: Optional[str] = None,
@@ -501,6 +505,7 @@ class Team:
501
505
  num_history_sessions: Optional[int] = None,
502
506
  description: Optional[str] = None,
503
507
  instructions: Optional[Union[str, List[str], Callable]] = None,
508
+ use_instruction_tags: bool = False,
504
509
  expected_output: Optional[str] = None,
505
510
  additional_context: Optional[str] = None,
506
511
  markdown: bool = False,
@@ -515,7 +520,7 @@ class Team:
515
520
  additional_input: Optional[List[Union[str, Dict, BaseModel, Message]]] = None,
516
521
  dependencies: Optional[Dict[str, Any]] = None,
517
522
  add_dependencies_to_context: bool = False,
518
- knowledge: Optional[Knowledge] = None,
523
+ knowledge: Optional[KnowledgeProtocol] = None,
519
524
  knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
520
525
  add_knowledge_to_context: bool = False,
521
526
  enable_agentic_knowledge_filters: Optional[bool] = False,
@@ -550,7 +555,8 @@ class Team:
550
555
  parse_response: bool = True,
551
556
  db: Optional[Union[BaseDb, AsyncBaseDb]] = None,
552
557
  enable_agentic_memory: bool = False,
553
- enable_user_memories: bool = False,
558
+ update_memory_on_run: bool = False,
559
+ enable_user_memories: Optional[bool] = None, # Soon to be deprecated. Use update_memory_on_run
554
560
  add_memories_to_context: Optional[bool] = None,
555
561
  memory_manager: Optional[MemoryManager] = None,
556
562
  enable_session_summaries: bool = False,
@@ -566,7 +572,6 @@ class Team:
566
572
  reasoning_max_steps: int = 10,
567
573
  stream: Optional[bool] = None,
568
574
  stream_events: Optional[bool] = None,
569
- stream_intermediate_steps: Optional[bool] = None,
570
575
  store_events: bool = False,
571
576
  events_to_skip: Optional[List[Union[RunEvent, TeamRunEvent]]] = None,
572
577
  store_member_responses: bool = False,
@@ -579,13 +584,6 @@ class Team:
579
584
  exponential_backoff: bool = False,
580
585
  telemetry: bool = True,
581
586
  ):
582
- if delegate_task_to_all_members:
583
- warnings.warn(
584
- "The 'delegate_task_to_all_members' parameter is deprecated and will be removed in future versions. Use 'delegate_to_all_members' instead.",
585
- DeprecationWarning,
586
- stacklevel=2,
587
- )
588
-
589
587
  self.members = members
590
588
 
591
589
  self.model = model # type: ignore[assignment]
@@ -596,7 +594,7 @@ class Team:
596
594
 
597
595
  self.respond_directly = respond_directly
598
596
  self.determine_input_for_members = determine_input_for_members
599
- self.delegate_to_all_members = delegate_to_all_members or delegate_task_to_all_members
597
+ self.delegate_to_all_members = delegate_to_all_members
600
598
 
601
599
  self.user_id = user_id
602
600
  self.session_id = session_id
@@ -627,6 +625,7 @@ class Team:
627
625
 
628
626
  self.description = description
629
627
  self.instructions = instructions
628
+ self.use_instruction_tags = use_instruction_tags
630
629
  self.expected_output = expected_output
631
630
  self.additional_context = additional_context
632
631
  self.markdown = markdown
@@ -682,7 +681,13 @@ class Team:
682
681
  self.db = db
683
682
 
684
683
  self.enable_agentic_memory = enable_agentic_memory
685
- self.enable_user_memories = enable_user_memories
684
+
685
+ if enable_user_memories is not None:
686
+ self.update_memory_on_run = enable_user_memories
687
+ else:
688
+ self.update_memory_on_run = update_memory_on_run
689
+ self.enable_user_memories = self.update_memory_on_run # Soon to be deprecated. Use update_memory_on_run
690
+
686
691
  self.add_memories_to_context = add_memories_to_context
687
692
  self.memory_manager = memory_manager
688
693
  self.enable_session_summaries = enable_session_summaries
@@ -702,7 +707,7 @@ class Team:
702
707
  self.reasoning_max_steps = reasoning_max_steps
703
708
 
704
709
  self.stream = stream
705
- self.stream_events = stream_events or stream_intermediate_steps
710
+ self.stream_events = stream_events
706
711
  self.store_events = store_events
707
712
  self.store_member_responses = store_member_responses
708
713
 
@@ -874,7 +879,7 @@ class Team:
874
879
 
875
880
  if self.add_memories_to_context is None:
876
881
  self.add_memories_to_context = (
877
- self.enable_user_memories or self.enable_agentic_memory or self.memory_manager is not None
882
+ self.update_memory_on_run or self.enable_agentic_memory or self.memory_manager is not None
878
883
  )
879
884
 
880
885
  def _set_session_summary_manager(self) -> None:
@@ -978,7 +983,7 @@ class Team:
978
983
  self.set_id()
979
984
 
980
985
  # Set the memory manager and session summary manager
981
- if self.enable_user_memories or self.enable_agentic_memory or self.memory_manager is not None:
986
+ if self.update_memory_on_run or self.enable_agentic_memory or self.memory_manager is not None:
982
987
  self._set_memory_manager()
983
988
  if self.enable_session_summaries or self.session_summary_manager is not None:
984
989
  self._set_session_summary_manager()
@@ -1102,9 +1107,6 @@ class Team:
1102
1107
  "team": self,
1103
1108
  "session": session,
1104
1109
  "user_id": user_id,
1105
- "metadata": run_context.metadata,
1106
- "session_state": run_context.session_state,
1107
- "dependencies": run_context.dependencies,
1108
1110
  "debug_mode": debug_mode or self.debug_mode,
1109
1111
  }
1110
1112
 
@@ -1194,9 +1196,6 @@ class Team:
1194
1196
  "team": self,
1195
1197
  "session": session,
1196
1198
  "user_id": user_id,
1197
- "session_state": run_context.session_state,
1198
- "dependencies": run_context.dependencies,
1199
- "metadata": run_context.metadata,
1200
1199
  "debug_mode": debug_mode or self.debug_mode,
1201
1200
  }
1202
1201
 
@@ -1291,9 +1290,6 @@ class Team:
1291
1290
  "team": self,
1292
1291
  "session": session,
1293
1292
  "user_id": user_id,
1294
- "session_state": run_context.session_state,
1295
- "dependencies": run_context.dependencies,
1296
- "metadata": run_context.metadata,
1297
1293
  "debug_mode": debug_mode or self.debug_mode,
1298
1294
  }
1299
1295
 
@@ -1377,9 +1373,6 @@ class Team:
1377
1373
  "team": self,
1378
1374
  "session": session,
1379
1375
  "user_id": user_id,
1380
- "session_state": run_context.session_state,
1381
- "dependencies": run_context.dependencies,
1382
- "metadata": run_context.metadata,
1383
1376
  "debug_mode": debug_mode or self.debug_mode,
1384
1377
  }
1385
1378
 
@@ -1556,7 +1549,9 @@ class Team:
1556
1549
  raise_if_cancelled(run_response.run_id) # type: ignore
1557
1550
 
1558
1551
  # 5. Reason about the task if reasoning is enabled
1559
- self._handle_reasoning(run_response=run_response, run_messages=run_messages)
1552
+ self._handle_reasoning(
1553
+ run_response=run_response, run_messages=run_messages, run_context=run_context
1554
+ )
1560
1555
 
1561
1556
  # Check for cancellation before model call
1562
1557
  raise_if_cancelled(run_response.run_id) # type: ignore
@@ -1829,6 +1824,7 @@ class Team:
1829
1824
  yield from self._handle_reasoning_stream(
1830
1825
  run_response=run_response,
1831
1826
  run_messages=run_messages,
1827
+ run_context=run_context,
1832
1828
  stream_events=stream_events,
1833
1829
  )
1834
1830
 
@@ -1919,6 +1915,7 @@ class Team:
1919
1915
  stream_events=stream_events,
1920
1916
  events_to_skip=self.events_to_skip, # type: ignore
1921
1917
  store_events=self.store_events,
1918
+ get_memories_callback=lambda: self.get_user_memories(user_id=user_id),
1922
1919
  )
1923
1920
 
1924
1921
  raise_if_cancelled(run_response.run_id) # type: ignore
@@ -2061,7 +2058,6 @@ class Team:
2061
2058
  *,
2062
2059
  stream: Literal[False] = False,
2063
2060
  stream_events: Optional[bool] = None,
2064
- stream_intermediate_steps: Optional[bool] = None,
2065
2061
  session_id: Optional[str] = None,
2066
2062
  session_state: Optional[Dict[str, Any]] = None,
2067
2063
  user_id: Optional[str] = None,
@@ -2088,7 +2084,6 @@ class Team:
2088
2084
  *,
2089
2085
  stream: Literal[True] = True,
2090
2086
  stream_events: Optional[bool] = None,
2091
- stream_intermediate_steps: Optional[bool] = None,
2092
2087
  session_id: Optional[str] = None,
2093
2088
  session_state: Optional[Dict[str, Any]] = None,
2094
2089
  run_context: Optional[RunContext] = None,
@@ -2105,7 +2100,6 @@ class Team:
2105
2100
  dependencies: Optional[Dict[str, Any]] = None,
2106
2101
  metadata: Optional[Dict[str, Any]] = None,
2107
2102
  debug_mode: Optional[bool] = None,
2108
- yield_run_response: Optional[bool] = None, # To be deprecated: use yield_run_output instead
2109
2103
  yield_run_output: bool = False,
2110
2104
  output_schema: Optional[Union[Type[BaseModel], Dict[str, Any]]] = None,
2111
2105
  **kwargs: Any,
@@ -2117,7 +2111,6 @@ class Team:
2117
2111
  *,
2118
2112
  stream: Optional[bool] = None,
2119
2113
  stream_events: Optional[bool] = None,
2120
- stream_intermediate_steps: Optional[bool] = None,
2121
2114
  session_id: Optional[str] = None,
2122
2115
  session_state: Optional[Dict[str, Any]] = None,
2123
2116
  run_context: Optional[RunContext] = None,
@@ -2134,7 +2127,6 @@ class Team:
2134
2127
  dependencies: Optional[Dict[str, Any]] = None,
2135
2128
  metadata: Optional[Dict[str, Any]] = None,
2136
2129
  debug_mode: Optional[bool] = None,
2137
- yield_run_response: Optional[bool] = None, # To be deprecated: use yield_run_output instead
2138
2130
  yield_run_output: bool = False,
2139
2131
  output_schema: Optional[Union[Type[BaseModel], Dict[str, Any]]] = None,
2140
2132
  **kwargs: Any,
@@ -2154,14 +2146,6 @@ class Team:
2154
2146
  "add_history_to_context is True, but no database has been assigned to the team. History will not be added to the context."
2155
2147
  )
2156
2148
 
2157
- if yield_run_response is not None:
2158
- warnings.warn(
2159
- "The 'yield_run_response' parameter is deprecated and will be removed in future versions. Use 'yield_run_output' instead.",
2160
- DeprecationWarning,
2161
- stacklevel=2,
2162
- )
2163
- yield_run_output = yield_run_output or yield_run_response # For backwards compatibility
2164
-
2165
2149
  # Register run for cancellation tracking
2166
2150
  register_run(run_id) # type: ignore
2167
2151
 
@@ -2253,9 +2237,6 @@ class Team:
2253
2237
  if stream is None:
2254
2238
  stream = False if self.stream is None else self.stream
2255
2239
 
2256
- # Considering both stream_events and stream_intermediate_steps (deprecated)
2257
- stream_events = stream_events or stream_intermediate_steps
2258
-
2259
2240
  # Can't stream events if streaming is disabled
2260
2241
  if stream is False:
2261
2242
  stream_events = False
@@ -2473,7 +2454,9 @@ class Team:
2473
2454
 
2474
2455
  await araise_if_cancelled(run_response.run_id) # type: ignore
2475
2456
  # 7. Reason about the task if reasoning is enabled
2476
- await self._ahandle_reasoning(run_response=run_response, run_messages=run_messages)
2457
+ await self._ahandle_reasoning(
2458
+ run_response=run_response, run_messages=run_messages, run_context=run_context
2459
+ )
2477
2460
 
2478
2461
  # Check for cancellation before model call
2479
2462
  await araise_if_cancelled(run_response.run_id) # type: ignore
@@ -2640,7 +2623,6 @@ class Team:
2640
2623
  user_id: Optional[str] = None,
2641
2624
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
2642
2625
  stream_events: bool = False,
2643
- stream_intermediate_steps: bool = False,
2644
2626
  yield_run_output: bool = False,
2645
2627
  add_dependencies_to_context: Optional[bool] = None,
2646
2628
  add_session_state_to_context: Optional[bool] = None,
@@ -2774,9 +2756,6 @@ class Team:
2774
2756
  existing_task=memory_task,
2775
2757
  )
2776
2758
 
2777
- # Considering both stream_events and stream_intermediate_steps (deprecated)
2778
- stream_events = stream_events or stream_intermediate_steps
2779
-
2780
2759
  # Yield the run started event
2781
2760
  if stream_events:
2782
2761
  yield handle_event( # type: ignore
@@ -2790,6 +2769,7 @@ class Team:
2790
2769
  async for item in self._ahandle_reasoning_stream(
2791
2770
  run_response=run_response,
2792
2771
  run_messages=run_messages,
2772
+ run_context=run_context,
2793
2773
  stream_events=stream_events,
2794
2774
  ):
2795
2775
  await araise_if_cancelled(run_response.run_id) # type: ignore
@@ -2888,6 +2868,7 @@ class Team:
2888
2868
  stream_events=stream_events,
2889
2869
  events_to_skip=self.events_to_skip, # type: ignore
2890
2870
  store_events=self.store_events,
2871
+ get_memories_callback=lambda: self.aget_user_memories(user_id=user_id),
2891
2872
  ):
2892
2873
  yield event
2893
2874
 
@@ -3032,7 +3013,6 @@ class Team:
3032
3013
  *,
3033
3014
  stream: Literal[False] = False,
3034
3015
  stream_events: Optional[bool] = None,
3035
- stream_intermediate_steps: Optional[bool] = None,
3036
3016
  session_id: Optional[str] = None,
3037
3017
  session_state: Optional[Dict[str, Any]] = None,
3038
3018
  run_id: Optional[str] = None,
@@ -3060,7 +3040,6 @@ class Team:
3060
3040
  *,
3061
3041
  stream: Literal[True] = True,
3062
3042
  stream_events: Optional[bool] = None,
3063
- stream_intermediate_steps: Optional[bool] = None,
3064
3043
  session_id: Optional[str] = None,
3065
3044
  session_state: Optional[Dict[str, Any]] = None,
3066
3045
  run_id: Optional[str] = None,
@@ -3077,7 +3056,6 @@ class Team:
3077
3056
  dependencies: Optional[Dict[str, Any]] = None,
3078
3057
  metadata: Optional[Dict[str, Any]] = None,
3079
3058
  debug_mode: Optional[bool] = None,
3080
- yield_run_response: Optional[bool] = None, # To be deprecated: use yield_run_output instead
3081
3059
  yield_run_output: bool = False,
3082
3060
  output_schema: Optional[Union[Type[BaseModel], Dict[str, Any]]] = None,
3083
3061
  **kwargs: Any,
@@ -3089,7 +3067,6 @@ class Team:
3089
3067
  *,
3090
3068
  stream: Optional[bool] = None,
3091
3069
  stream_events: Optional[bool] = None,
3092
- stream_intermediate_steps: Optional[bool] = None,
3093
3070
  session_id: Optional[str] = None,
3094
3071
  session_state: Optional[Dict[str, Any]] = None,
3095
3072
  run_id: Optional[str] = None,
@@ -3106,7 +3083,6 @@ class Team:
3106
3083
  dependencies: Optional[Dict[str, Any]] = None,
3107
3084
  metadata: Optional[Dict[str, Any]] = None,
3108
3085
  debug_mode: Optional[bool] = None,
3109
- yield_run_response: Optional[bool] = None, # To be deprecated: use yield_run_output instead
3110
3086
  yield_run_output: bool = False,
3111
3087
  output_schema: Optional[Union[Type[BaseModel], Dict[str, Any]]] = None,
3112
3088
  **kwargs: Any,
@@ -3121,15 +3097,6 @@ class Team:
3121
3097
  "add_history_to_context is True, but no database has been assigned to the team. History will not be added to the context."
3122
3098
  )
3123
3099
 
3124
- if yield_run_response is not None:
3125
- warnings.warn(
3126
- "The 'yield_run_response' parameter is deprecated and will be removed in future versions. Use 'yield_run_output' instead.",
3127
- DeprecationWarning,
3128
- stacklevel=2,
3129
- )
3130
-
3131
- yield_run_output = yield_run_output or yield_run_response # For backwards compatibility
3132
-
3133
3100
  background_tasks = kwargs.pop("background_tasks", None)
3134
3101
  if background_tasks is not None:
3135
3102
  from fastapi import BackgroundTasks
@@ -3181,15 +3148,6 @@ class Team:
3181
3148
  if stream is None:
3182
3149
  stream = False if self.stream is None else self.stream
3183
3150
 
3184
- # Considering both stream_events and stream_intermediate_steps (deprecated)
3185
- if stream_intermediate_steps is not None:
3186
- warnings.warn(
3187
- "The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Use 'stream_events' instead.",
3188
- DeprecationWarning,
3189
- stacklevel=2,
3190
- )
3191
- stream_events = stream_events or stream_intermediate_steps
3192
-
3193
3151
  # Can't stream events if streaming is disabled
3194
3152
  if stream is False:
3195
3153
  stream_events = False
@@ -3252,7 +3210,7 @@ class Team:
3252
3210
  run_response.metrics = Metrics()
3253
3211
  run_response.metrics.start_timer()
3254
3212
 
3255
- yield_run_output = bool(yield_run_output or yield_run_response) # For backwards compatibility
3213
+ yield_run_output = yield_run_output
3256
3214
 
3257
3215
  if stream:
3258
3216
  return self._arun_stream( # type: ignore
@@ -3397,6 +3355,70 @@ class Team:
3397
3355
  send_media_to_model=self.send_media_to_model,
3398
3356
  compression_manager=self.compression_manager if self.compress_tool_results else None,
3399
3357
  ):
3358
+ # Handle LLM request events and compression events from ModelResponse
3359
+ if isinstance(model_response_event, ModelResponse):
3360
+ if model_response_event.event == ModelResponseEvent.model_request_started.value:
3361
+ if stream_events:
3362
+ yield handle_event( # type: ignore
3363
+ create_team_model_request_started_event(
3364
+ from_run_response=run_response,
3365
+ model=self.model.id,
3366
+ model_provider=self.model.provider,
3367
+ ),
3368
+ run_response,
3369
+ events_to_skip=self.events_to_skip,
3370
+ store_events=self.store_events,
3371
+ )
3372
+ continue
3373
+
3374
+ if model_response_event.event == ModelResponseEvent.model_request_completed.value:
3375
+ if stream_events:
3376
+ yield handle_event( # type: ignore
3377
+ create_team_model_request_completed_event(
3378
+ from_run_response=run_response,
3379
+ model=self.model.id,
3380
+ model_provider=self.model.provider,
3381
+ input_tokens=model_response_event.input_tokens,
3382
+ output_tokens=model_response_event.output_tokens,
3383
+ total_tokens=model_response_event.total_tokens,
3384
+ time_to_first_token=model_response_event.time_to_first_token,
3385
+ reasoning_tokens=model_response_event.reasoning_tokens,
3386
+ cache_read_tokens=model_response_event.cache_read_tokens,
3387
+ cache_write_tokens=model_response_event.cache_write_tokens,
3388
+ ),
3389
+ run_response,
3390
+ events_to_skip=self.events_to_skip,
3391
+ store_events=self.store_events,
3392
+ )
3393
+ continue
3394
+
3395
+ # Handle compression events
3396
+ if model_response_event.event == ModelResponseEvent.compression_started.value:
3397
+ if stream_events:
3398
+ yield handle_event( # type: ignore
3399
+ create_team_compression_started_event(from_run_response=run_response),
3400
+ run_response,
3401
+ events_to_skip=self.events_to_skip,
3402
+ store_events=self.store_events,
3403
+ )
3404
+ continue
3405
+
3406
+ if model_response_event.event == ModelResponseEvent.compression_completed.value:
3407
+ if stream_events:
3408
+ stats = model_response_event.compression_stats or {}
3409
+ yield handle_event( # type: ignore
3410
+ create_team_compression_completed_event(
3411
+ from_run_response=run_response,
3412
+ tool_results_compressed=stats.get("tool_results_compressed"),
3413
+ original_size=stats.get("original_size"),
3414
+ compressed_size=stats.get("compressed_size"),
3415
+ ),
3416
+ run_response,
3417
+ events_to_skip=self.events_to_skip,
3418
+ store_events=self.store_events,
3419
+ )
3420
+ continue
3421
+
3400
3422
  yield from self._handle_model_response_chunk(
3401
3423
  session=session,
3402
3424
  run_response=run_response,
@@ -3492,6 +3514,70 @@ class Team:
3492
3514
  compression_manager=self.compression_manager if self.compress_tool_results else None,
3493
3515
  ) # type: ignore
3494
3516
  async for model_response_event in model_stream:
3517
+ # Handle LLM request events and compression events from ModelResponse
3518
+ if isinstance(model_response_event, ModelResponse):
3519
+ if model_response_event.event == ModelResponseEvent.model_request_started.value:
3520
+ if stream_events:
3521
+ yield handle_event( # type: ignore
3522
+ create_team_model_request_started_event(
3523
+ from_run_response=run_response,
3524
+ model=self.model.id,
3525
+ model_provider=self.model.provider,
3526
+ ),
3527
+ run_response,
3528
+ events_to_skip=self.events_to_skip,
3529
+ store_events=self.store_events,
3530
+ )
3531
+ continue
3532
+
3533
+ if model_response_event.event == ModelResponseEvent.model_request_completed.value:
3534
+ if stream_events:
3535
+ yield handle_event( # type: ignore
3536
+ create_team_model_request_completed_event(
3537
+ from_run_response=run_response,
3538
+ model=self.model.id,
3539
+ model_provider=self.model.provider,
3540
+ input_tokens=model_response_event.input_tokens,
3541
+ output_tokens=model_response_event.output_tokens,
3542
+ total_tokens=model_response_event.total_tokens,
3543
+ time_to_first_token=model_response_event.time_to_first_token,
3544
+ reasoning_tokens=model_response_event.reasoning_tokens,
3545
+ cache_read_tokens=model_response_event.cache_read_tokens,
3546
+ cache_write_tokens=model_response_event.cache_write_tokens,
3547
+ ),
3548
+ run_response,
3549
+ events_to_skip=self.events_to_skip,
3550
+ store_events=self.store_events,
3551
+ )
3552
+ continue
3553
+
3554
+ # Handle compression events
3555
+ if model_response_event.event == ModelResponseEvent.compression_started.value:
3556
+ if stream_events:
3557
+ yield handle_event( # type: ignore
3558
+ create_team_compression_started_event(from_run_response=run_response),
3559
+ run_response,
3560
+ events_to_skip=self.events_to_skip,
3561
+ store_events=self.store_events,
3562
+ )
3563
+ continue
3564
+
3565
+ if model_response_event.event == ModelResponseEvent.compression_completed.value:
3566
+ if stream_events:
3567
+ stats = model_response_event.compression_stats or {}
3568
+ yield handle_event( # type: ignore
3569
+ create_team_compression_completed_event(
3570
+ from_run_response=run_response,
3571
+ tool_results_compressed=stats.get("tool_results_compressed"),
3572
+ original_size=stats.get("original_size"),
3573
+ compressed_size=stats.get("compressed_size"),
3574
+ ),
3575
+ run_response,
3576
+ events_to_skip=self.events_to_skip,
3577
+ store_events=self.store_events,
3578
+ )
3579
+ continue
3580
+
3495
3581
  for event in self._handle_model_response_chunk(
3496
3582
  session=session,
3497
3583
  run_response=run_response,
@@ -3984,7 +4070,7 @@ class Team:
3984
4070
  user_message_str is not None
3985
4071
  and user_message_str.strip() != ""
3986
4072
  and self.memory_manager is not None
3987
- and self.enable_user_memories
4073
+ and self.update_memory_on_run
3988
4074
  ):
3989
4075
  log_debug("Managing user memories")
3990
4076
  self.memory_manager.create_user_memories(
@@ -4005,7 +4091,7 @@ class Team:
4005
4091
  user_message_str is not None
4006
4092
  and user_message_str.strip() != ""
4007
4093
  and self.memory_manager is not None
4008
- and self.enable_user_memories
4094
+ and self.update_memory_on_run
4009
4095
  ):
4010
4096
  log_debug("Managing user memories")
4011
4097
  await self.memory_manager.acreate_user_memories(
@@ -4042,7 +4128,7 @@ class Team:
4042
4128
  if (
4043
4129
  run_messages.user_message is not None
4044
4130
  and self.memory_manager is not None
4045
- and self.enable_user_memories
4131
+ and self.update_memory_on_run
4046
4132
  and not self.enable_agentic_memory
4047
4133
  ):
4048
4134
  log_debug("Starting memory creation in background task.")
@@ -4074,7 +4160,7 @@ class Team:
4074
4160
  if (
4075
4161
  run_messages.user_message is not None
4076
4162
  and self.memory_manager is not None
4077
- and self.enable_user_memories
4163
+ and self.update_memory_on_run
4078
4164
  and not self.enable_agentic_memory
4079
4165
  ):
4080
4166
  log_debug("Starting memory creation in background thread.")
@@ -4497,8 +4583,6 @@ class Team:
4497
4583
  add_session_state_to_context: Optional[bool] = None,
4498
4584
  dependencies: Optional[Dict[str, Any]] = None,
4499
4585
  metadata: Optional[Dict[str, Any]] = None,
4500
- stream_events: Optional[bool] = None,
4501
- stream_intermediate_steps: Optional[bool] = None,
4502
4586
  debug_mode: Optional[bool] = None,
4503
4587
  show_message: bool = True,
4504
4588
  show_reasoning: bool = True,
@@ -4508,20 +4592,6 @@ class Team:
4508
4592
  tags_to_include_in_markdown: Optional[Set[str]] = None,
4509
4593
  **kwargs: Any,
4510
4594
  ) -> None:
4511
- if stream_events is not None:
4512
- warnings.warn(
4513
- "The 'stream_events' parameter is deprecated and will be removed in future versions. Event streaming is always enabled using the print_response function.",
4514
- DeprecationWarning,
4515
- stacklevel=2,
4516
- )
4517
-
4518
- if stream_intermediate_steps is not None:
4519
- warnings.warn(
4520
- "The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Event streaming is always enabled using the print_response function.",
4521
- DeprecationWarning,
4522
- stacklevel=2,
4523
- )
4524
-
4525
4595
  if self._has_async_db():
4526
4596
  raise Exception(
4527
4597
  "This method is not supported with an async DB. Please use the async version of this method."
@@ -4623,8 +4693,6 @@ class Team:
4623
4693
  add_dependencies_to_context: Optional[bool] = None,
4624
4694
  add_session_state_to_context: Optional[bool] = None,
4625
4695
  metadata: Optional[Dict[str, Any]] = None,
4626
- stream_events: Optional[bool] = None,
4627
- stream_intermediate_steps: Optional[bool] = None,
4628
4696
  debug_mode: Optional[bool] = None,
4629
4697
  show_message: bool = True,
4630
4698
  show_reasoning: bool = True,
@@ -4634,20 +4702,6 @@ class Team:
4634
4702
  tags_to_include_in_markdown: Optional[Set[str]] = None,
4635
4703
  **kwargs: Any,
4636
4704
  ) -> None:
4637
- if stream_events is not None:
4638
- warnings.warn(
4639
- "The 'stream_events' parameter is deprecated and will be removed in future versions. Event streaming is always enabled using the aprint_response function.",
4640
- DeprecationWarning,
4641
- stacklevel=2,
4642
- )
4643
-
4644
- if stream_intermediate_steps is not None:
4645
- warnings.warn(
4646
- "The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Event streaming is always enabled using the aprint_response function.",
4647
- DeprecationWarning,
4648
- stacklevel=2,
4649
- )
4650
-
4651
4705
  if not tags_to_include_in_markdown:
4652
4706
  tags_to_include_in_markdown = {"think", "thinking"}
4653
4707
 
@@ -4861,40 +4915,56 @@ class Team:
4861
4915
  # Helpers
4862
4916
  ###########################################################################
4863
4917
 
4864
- def _handle_reasoning(self, run_response: TeamRunOutput, run_messages: RunMessages):
4918
+ def _handle_reasoning(
4919
+ self, run_response: TeamRunOutput, run_messages: RunMessages, run_context: Optional[RunContext] = None
4920
+ ) -> None:
4865
4921
  if self.reasoning or self.reasoning_model is not None:
4866
4922
  reasoning_generator = self._reason(
4867
- run_response=run_response, run_messages=run_messages, stream_events=False
4923
+ run_response=run_response, run_messages=run_messages, run_context=run_context, stream_events=False
4868
4924
  )
4869
4925
 
4870
4926
  # Consume the generator without yielding
4871
4927
  deque(reasoning_generator, maxlen=0)
4872
4928
 
4873
4929
  def _handle_reasoning_stream(
4874
- self, run_response: TeamRunOutput, run_messages: RunMessages, stream_events: bool
4930
+ self,
4931
+ run_response: TeamRunOutput,
4932
+ run_messages: RunMessages,
4933
+ run_context: Optional[RunContext] = None,
4934
+ stream_events: bool = False,
4875
4935
  ) -> Iterator[TeamRunOutputEvent]:
4876
4936
  if self.reasoning or self.reasoning_model is not None:
4877
4937
  reasoning_generator = self._reason(
4878
4938
  run_response=run_response,
4879
4939
  run_messages=run_messages,
4940
+ run_context=run_context,
4880
4941
  stream_events=stream_events,
4881
4942
  )
4882
4943
  yield from reasoning_generator
4883
4944
 
4884
- async def _ahandle_reasoning(self, run_response: TeamRunOutput, run_messages: RunMessages) -> None:
4945
+ async def _ahandle_reasoning(
4946
+ self, run_response: TeamRunOutput, run_messages: RunMessages, run_context: Optional[RunContext] = None
4947
+ ) -> None:
4885
4948
  if self.reasoning or self.reasoning_model is not None:
4886
- reason_generator = self._areason(run_response=run_response, run_messages=run_messages, stream_events=False)
4949
+ reason_generator = self._areason(
4950
+ run_response=run_response, run_messages=run_messages, run_context=run_context, stream_events=False
4951
+ )
4887
4952
  # Consume the generator without yielding
4888
4953
  async for _ in reason_generator:
4889
4954
  pass
4890
4955
 
4891
4956
  async def _ahandle_reasoning_stream(
4892
- self, run_response: TeamRunOutput, run_messages: RunMessages, stream_events: bool
4957
+ self,
4958
+ run_response: TeamRunOutput,
4959
+ run_messages: RunMessages,
4960
+ run_context: Optional[RunContext] = None,
4961
+ stream_events: bool = False,
4893
4962
  ) -> AsyncIterator[TeamRunOutputEvent]:
4894
4963
  if self.reasoning or self.reasoning_model is not None:
4895
4964
  reason_generator = self._areason(
4896
4965
  run_response=run_response,
4897
4966
  run_messages=run_messages,
4967
+ run_context=run_context,
4898
4968
  stream_events=stream_events,
4899
4969
  )
4900
4970
  async for item in reason_generator:
@@ -4938,14 +5008,6 @@ class Team:
4938
5008
  if session.session_data is not None:
4939
5009
  session.session_data["session_metrics"] = session_metrics
4940
5010
 
4941
- def _get_reasoning_agent(self, reasoning_model: Model) -> Optional[Agent]:
4942
- return Agent(
4943
- model=reasoning_model,
4944
- telemetry=self.telemetry,
4945
- debug_mode=self.debug_mode,
4946
- debug_level=self.debug_level,
4947
- )
4948
-
4949
5011
  def _format_reasoning_step_content(self, run_response: TeamRunOutput, reasoning_step: ReasoningStep) -> str:
4950
5012
  """Format content for a reasoning step without changing any existing logic."""
4951
5013
  step_content = ""
@@ -5054,7 +5116,8 @@ class Team:
5054
5116
  self,
5055
5117
  run_response: TeamRunOutput,
5056
5118
  run_messages: RunMessages,
5057
- stream_events: bool,
5119
+ run_context: Optional[RunContext] = None,
5120
+ stream_events: bool = False,
5058
5121
  ) -> Iterator[TeamRunOutputEvent]:
5059
5122
  """
5060
5123
  Run reasoning using the ReasoningManager.
@@ -5084,9 +5147,7 @@ class Team:
5084
5147
  telemetry=self.telemetry,
5085
5148
  debug_mode=self.debug_mode,
5086
5149
  debug_level=self.debug_level,
5087
- session_state=self.session_state,
5088
- dependencies=self.dependencies,
5089
- metadata=self.metadata,
5150
+ run_context=run_context,
5090
5151
  )
5091
5152
  )
5092
5153
 
@@ -5098,7 +5159,8 @@ class Team:
5098
5159
  self,
5099
5160
  run_response: TeamRunOutput,
5100
5161
  run_messages: RunMessages,
5101
- stream_events: bool,
5162
+ run_context: Optional[RunContext] = None,
5163
+ stream_events: bool = False,
5102
5164
  ) -> AsyncIterator[TeamRunOutputEvent]:
5103
5165
  """
5104
5166
  Run reasoning asynchronously using the ReasoningManager.
@@ -5128,9 +5190,7 @@ class Team:
5128
5190
  telemetry=self.telemetry,
5129
5191
  debug_mode=self.debug_mode,
5130
5192
  debug_level=self.debug_level,
5131
- session_state=self.session_state,
5132
- dependencies=self.dependencies,
5133
- metadata=self.metadata,
5193
+ run_context=run_context,
5134
5194
  )
5135
5195
  )
5136
5196
 
@@ -5286,36 +5346,20 @@ class Team:
5286
5346
  )
5287
5347
  )
5288
5348
 
5289
- if self.knowledge is not None or self.knowledge_retriever is not None:
5290
- # Check if knowledge retriever is an async function but used in sync mode
5291
- from inspect import iscoroutinefunction
5292
-
5293
- if self.knowledge_retriever is not None and iscoroutinefunction(self.knowledge_retriever):
5294
- log_warning(
5295
- "Async knowledge retriever function is being used with synchronous agent.run() or agent.print_response(). "
5296
- "It is recommended to use agent.arun() or agent.aprint_response() instead."
5349
+ # Add tools for accessing knowledge
5350
+ if self.knowledge is not None and self.search_knowledge:
5351
+ # Use knowledge protocol's get_tools method
5352
+ get_tools_fn = getattr(self.knowledge, "get_tools", None)
5353
+ if callable(get_tools_fn):
5354
+ knowledge_tools = get_tools_fn(
5355
+ run_response=run_response,
5356
+ run_context=run_context,
5357
+ knowledge_filters=run_context.knowledge_filters,
5358
+ async_mode=async_mode,
5359
+ enable_agentic_filters=self.enable_agentic_knowledge_filters,
5360
+ agent=self,
5297
5361
  )
5298
-
5299
- if self.search_knowledge:
5300
- # Use async or sync search based on async_mode
5301
- if self.enable_agentic_knowledge_filters:
5302
- _tools.append(
5303
- self._get_search_knowledge_base_with_agentic_filters_function(
5304
- run_response=run_response,
5305
- knowledge_filters=run_context.knowledge_filters,
5306
- async_mode=async_mode,
5307
- run_context=run_context,
5308
- )
5309
- )
5310
- else:
5311
- _tools.append(
5312
- self._get_search_knowledge_base_function(
5313
- run_response=run_response,
5314
- knowledge_filters=run_context.knowledge_filters,
5315
- async_mode=async_mode,
5316
- run_context=run_context,
5317
- )
5318
- )
5362
+ _tools.extend(knowledge_tools)
5319
5363
 
5320
5364
  if self.knowledge is not None and self.update_knowledge:
5321
5365
  _tools.append(self.add_to_knowledge)
@@ -5394,8 +5438,10 @@ class Team:
5394
5438
  _func = _func.model_copy(deep=True)
5395
5439
 
5396
5440
  _func._team = self
5397
- _func.process_entrypoint(strict=strict)
5398
- if strict:
5441
+ # Respect the function's explicit strict setting if set
5442
+ effective_strict = strict if _func.strict is None else _func.strict
5443
+ _func.process_entrypoint(strict=effective_strict)
5444
+ if strict and _func.strict is None:
5399
5445
  _func.strict = True
5400
5446
  if self.tool_hooks:
5401
5447
  _func.tool_hooks = self.tool_hooks
@@ -5414,7 +5460,9 @@ class Team:
5414
5460
  _function_names.append(tool.name)
5415
5461
  tool = tool.model_copy(deep=True)
5416
5462
  tool._team = self
5417
- tool.process_entrypoint(strict=strict)
5463
+ # Respect the function's explicit strict setting if set
5464
+ effective_strict = strict if tool.strict is None else tool.strict
5465
+ tool.process_entrypoint(strict=effective_strict)
5418
5466
  if strict and tool.strict is None:
5419
5467
  tool.strict = True
5420
5468
  if self.tool_hooks:
@@ -5466,8 +5514,6 @@ class Team:
5466
5514
  for func in _functions: # type: ignore
5467
5515
  if isinstance(func, Function):
5468
5516
  func._run_context = run_context
5469
- func._session_state = run_context.session_state
5470
- func._dependencies = run_context.dependencies
5471
5517
  func._images = joint_images
5472
5518
  func._files = joint_files
5473
5519
  func._audios = joint_audios
@@ -5515,16 +5561,12 @@ class Team:
5515
5561
  self,
5516
5562
  session: TeamSession,
5517
5563
  run_context: Optional[RunContext] = None,
5518
- user_id: Optional[str] = None,
5519
5564
  audio: Optional[Sequence[Audio]] = None,
5520
5565
  images: Optional[Sequence[Image]] = None,
5521
5566
  videos: Optional[Sequence[Video]] = None,
5522
5567
  files: Optional[Sequence[File]] = None,
5523
5568
  tools: Optional[List[Union[Function, dict]]] = None,
5524
5569
  add_session_state_to_context: Optional[bool] = None,
5525
- session_state: Optional[Dict[str, Any]] = None, # Deprecated
5526
- dependencies: Optional[Dict[str, Any]] = None, # Deprecated
5527
- metadata: Optional[Dict[str, Any]] = None, # Deprecated
5528
5570
  ) -> Optional[Message]:
5529
5571
  """Get the system message for the team.
5530
5572
 
@@ -5533,11 +5575,9 @@ class Team:
5533
5575
  3. Build and return the default system message for the Team.
5534
5576
  """
5535
5577
 
5536
- # Consider both run_context and session_state, dependencies, metadata (deprecated fields)
5537
- if run_context is not None:
5538
- session_state = run_context.session_state or session_state
5539
- dependencies = run_context.dependencies or dependencies
5540
- metadata = run_context.metadata or metadata
5578
+ # Extract values from run_context
5579
+ session_state = run_context.session_state if run_context else None
5580
+ user_id = run_context.user_id if run_context else None
5541
5581
 
5542
5582
  # Get output_schema from run_context
5543
5583
  output_schema = run_context.output_schema if run_context else None
@@ -5565,10 +5605,7 @@ class Team:
5565
5605
  if self.resolve_in_context:
5566
5606
  sys_message_content = self._format_message_with_state_variables(
5567
5607
  sys_message_content,
5568
- user_id=user_id,
5569
- session_state=session_state,
5570
- dependencies=dependencies,
5571
- metadata=metadata,
5608
+ run_context=run_context,
5572
5609
  )
5573
5610
 
5574
5611
  # type: ignore
@@ -5638,26 +5675,15 @@ class Team:
5638
5675
  if self.name is not None and self.add_name_to_context:
5639
5676
  additional_information.append(f"Your name is: {self.name}.")
5640
5677
 
5641
- if self.knowledge is not None and self.enable_agentic_knowledge_filters:
5642
- valid_filters = self.knowledge.get_valid_filters()
5643
- if valid_filters:
5644
- valid_filters_str = ", ".join(valid_filters)
5645
- additional_information.append(
5646
- dedent(f"""
5647
- The knowledge base contains documents with these metadata filters: {valid_filters_str}.
5648
- Always use filters when the user query indicates specific metadata.
5649
- Examples:
5650
- 1. If the user asks about a specific person like "Jordan Mitchell", you MUST use the search_knowledge_base tool with the filters parameter set to {{'<valid key like user_id>': '<valid value based on the user query>'}}.
5651
- 2. If the user asks about a specific document type like "contracts", you MUST use the search_knowledge_base tool with the filters parameter set to {{'document_type': 'contract'}}.
5652
- 4. If the user asks about a specific location like "documents from New York", you MUST use the search_knowledge_base tool with the filters parameter set to {{'<valid key like location>': 'New York'}}.
5653
- General Guidelines:
5654
- - Always analyze the user query to identify relevant metadata.
5655
- - Use the most specific filter(s) possible to narrow down results.
5656
- - If multiple filters are relevant, combine them in the filters parameter (e.g., {{'name': 'Jordan Mitchell', 'document_type': 'contract'}}).
5657
- - Ensure the filter keys match the valid metadata filters: {valid_filters_str}.
5658
- You can use the search_knowledge_base tool to search the knowledge base and get the most relevant documents. Make sure to pass the filters as [Dict[str: Any]] to the tool. FOLLOW THIS STRUCTURE STRICTLY.
5659
- """)
5678
+ # Add knowledge context using protocol's build_context
5679
+ if self.knowledge is not None:
5680
+ build_context_fn = getattr(self.knowledge, "build_context", None)
5681
+ if callable(build_context_fn):
5682
+ knowledge_context = build_context_fn(
5683
+ enable_agentic_filters=self.enable_agentic_knowledge_filters,
5660
5684
  )
5685
+ if knowledge_context:
5686
+ additional_information.append(knowledge_context)
5661
5687
 
5662
5688
  # 2 Build the default system message for the Agent.
5663
5689
  system_message_content: str = ""
@@ -5768,15 +5794,22 @@ class Team:
5768
5794
  if self.role is not None:
5769
5795
  system_message_content += f"\n<your_role>\n{self.role}\n</your_role>\n\n"
5770
5796
 
5771
- # 3.3.5 Then add instructions for the Agent
5797
+ # 3.3.5 Then add instructions for the Team
5772
5798
  if len(instructions) > 0:
5773
- system_message_content += "<instructions>"
5774
- if len(instructions) > 1:
5775
- for _upi in instructions:
5776
- system_message_content += f"\n- {_upi}"
5799
+ if self.use_instruction_tags:
5800
+ system_message_content += "<instructions>"
5801
+ if len(instructions) > 1:
5802
+ for _upi in instructions:
5803
+ system_message_content += f"\n- {_upi}"
5804
+ else:
5805
+ system_message_content += "\n" + instructions[0]
5806
+ system_message_content += "\n</instructions>\n\n"
5777
5807
  else:
5778
- system_message_content += "\n" + instructions[0]
5779
- system_message_content += "\n</instructions>\n\n"
5808
+ if len(instructions) > 1:
5809
+ for _upi in instructions:
5810
+ system_message_content += f"- {_upi}\n"
5811
+ else:
5812
+ system_message_content += instructions[0] + "\n\n"
5780
5813
  # 3.3.6 Add additional information
5781
5814
  if len(additional_information) > 0:
5782
5815
  system_message_content += "<additional_information>"
@@ -5792,10 +5825,7 @@ class Team:
5792
5825
  if self.resolve_in_context:
5793
5826
  system_message_content = self._format_message_with_state_variables(
5794
5827
  system_message_content,
5795
- user_id=user_id,
5796
- session_state=session_state,
5797
- dependencies=dependencies,
5798
- metadata=metadata,
5828
+ run_context=run_context,
5799
5829
  )
5800
5830
 
5801
5831
  system_message_from_model = self.model.get_system_message_for_model(tools)
@@ -5832,24 +5862,18 @@ class Team:
5832
5862
  self,
5833
5863
  session: TeamSession,
5834
5864
  run_context: Optional[RunContext] = None,
5835
- user_id: Optional[str] = None,
5836
5865
  audio: Optional[Sequence[Audio]] = None,
5837
5866
  images: Optional[Sequence[Image]] = None,
5838
5867
  videos: Optional[Sequence[Video]] = None,
5839
5868
  files: Optional[Sequence[File]] = None,
5840
5869
  tools: Optional[List[Union[Function, dict]]] = None,
5841
5870
  add_session_state_to_context: Optional[bool] = None,
5842
- session_state: Optional[Dict[str, Any]] = None, # Deprecated
5843
- dependencies: Optional[Dict[str, Any]] = None, # Deprecated
5844
- metadata: Optional[Dict[str, Any]] = None, # Deprecated
5845
5871
  ) -> Optional[Message]:
5846
5872
  """Get the system message for the team."""
5847
5873
 
5848
- # Consider both run_context and session_state, dependencies, metadata (deprecated fields)
5849
- if run_context is not None:
5850
- session_state = run_context.session_state or session_state
5851
- dependencies = run_context.dependencies or dependencies
5852
- metadata = run_context.metadata or metadata
5874
+ # Extract values from run_context
5875
+ session_state = run_context.session_state if run_context else None
5876
+ user_id = run_context.user_id if run_context else None
5853
5877
 
5854
5878
  # Get output_schema from run_context
5855
5879
  output_schema = run_context.output_schema if run_context else None
@@ -5877,10 +5901,7 @@ class Team:
5877
5901
  if self.resolve_in_context:
5878
5902
  sys_message_content = self._format_message_with_state_variables(
5879
5903
  sys_message_content,
5880
- user_id=user_id,
5881
- session_state=session_state,
5882
- dependencies=dependencies,
5883
- metadata=metadata,
5904
+ run_context=run_context,
5884
5905
  )
5885
5906
 
5886
5907
  # type: ignore
@@ -5950,26 +5971,15 @@ class Team:
5950
5971
  if self.name is not None and self.add_name_to_context:
5951
5972
  additional_information.append(f"Your name is: {self.name}.")
5952
5973
 
5953
- if self.knowledge is not None and self.enable_agentic_knowledge_filters:
5954
- valid_filters = await self.knowledge.async_get_valid_filters()
5955
- if valid_filters:
5956
- valid_filters_str = ", ".join(valid_filters)
5957
- additional_information.append(
5958
- dedent(f"""
5959
- The knowledge base contains documents with these metadata filters: {valid_filters_str}.
5960
- Always use filters when the user query indicates specific metadata.
5961
- Examples:
5962
- 1. If the user asks about a specific person like "Jordan Mitchell", you MUST use the search_knowledge_base tool with the filters parameter set to {{'<valid key like user_id>': '<valid value based on the user query>'}}.
5963
- 2. If the user asks about a specific document type like "contracts", you MUST use the search_knowledge_base tool with the filters parameter set to {{'document_type': 'contract'}}.
5964
- 4. If the user asks about a specific location like "documents from New York", you MUST use the search_knowledge_base tool with the filters parameter set to {{'<valid key like location>': 'New York'}}.
5965
- General Guidelines:
5966
- - Always analyze the user query to identify relevant metadata.
5967
- - Use the most specific filter(s) possible to narrow down results.
5968
- - If multiple filters are relevant, combine them in the filters parameter (e.g., {{'name': 'Jordan Mitchell', 'document_type': 'contract'}}).
5969
- - Ensure the filter keys match the valid metadata filters: {valid_filters_str}.
5970
- You can use the search_knowledge_base tool to search the knowledge base and get the most relevant documents. Make sure to pass the filters as [Dict[str: Any]] to the tool. FOLLOW THIS STRUCTURE STRICTLY.
5971
- """)
5974
+ # Add knowledge context using protocol's build_context
5975
+ if self.knowledge is not None:
5976
+ build_context_fn = getattr(self.knowledge, "build_context", None)
5977
+ if callable(build_context_fn):
5978
+ knowledge_context = build_context_fn(
5979
+ enable_agentic_filters=self.enable_agentic_knowledge_filters,
5972
5980
  )
5981
+ if knowledge_context:
5982
+ additional_information.append(knowledge_context)
5973
5983
 
5974
5984
  # 2 Build the default system message for the Agent.
5975
5985
  system_message_content: str = ""
@@ -6085,15 +6095,22 @@ class Team:
6085
6095
  if self.role is not None:
6086
6096
  system_message_content += f"\n<your_role>\n{self.role}\n</your_role>\n\n"
6087
6097
 
6088
- # 3.3.5 Then add instructions for the Agent
6098
+ # 3.3.5 Then add instructions for the Team
6089
6099
  if len(instructions) > 0:
6090
- system_message_content += "<instructions>"
6091
- if len(instructions) > 1:
6092
- for _upi in instructions:
6093
- system_message_content += f"\n- {_upi}"
6100
+ if self.use_instruction_tags:
6101
+ system_message_content += "<instructions>"
6102
+ if len(instructions) > 1:
6103
+ for _upi in instructions:
6104
+ system_message_content += f"\n- {_upi}"
6105
+ else:
6106
+ system_message_content += "\n" + instructions[0]
6107
+ system_message_content += "\n</instructions>\n\n"
6094
6108
  else:
6095
- system_message_content += "\n" + instructions[0]
6096
- system_message_content += "\n</instructions>\n\n"
6109
+ if len(instructions) > 1:
6110
+ for _upi in instructions:
6111
+ system_message_content += f"- {_upi}\n"
6112
+ else:
6113
+ system_message_content += instructions[0] + "\n\n"
6097
6114
  # 3.3.6 Add additional information
6098
6115
  if len(additional_information) > 0:
6099
6116
  system_message_content += "<additional_information>"
@@ -6109,10 +6126,7 @@ class Team:
6109
6126
  if self.resolve_in_context:
6110
6127
  system_message_content = self._format_message_with_state_variables(
6111
6128
  system_message_content,
6112
- user_id=user_id,
6113
- session_state=session_state,
6114
- dependencies=dependencies,
6115
- metadata=metadata,
6129
+ run_context=run_context,
6116
6130
  )
6117
6131
 
6118
6132
  system_message_from_model = self.model.get_system_message_for_model(tools)
@@ -6186,7 +6200,6 @@ class Team:
6186
6200
  system_message = self.get_system_message(
6187
6201
  session=session,
6188
6202
  run_context=run_context,
6189
- user_id=user_id,
6190
6203
  images=images,
6191
6204
  audio=audio,
6192
6205
  videos=videos,
@@ -6319,7 +6332,6 @@ class Team:
6319
6332
  system_message = await self.aget_system_message(
6320
6333
  session=session,
6321
6334
  run_context=run_context,
6322
- user_id=user_id,
6323
6335
  images=images,
6324
6336
  audio=audio,
6325
6337
  videos=videos,
@@ -6419,7 +6431,6 @@ class Team:
6419
6431
  run_response: TeamRunOutput,
6420
6432
  run_context: RunContext,
6421
6433
  input_message: Optional[Union[str, List, Dict, Message, BaseModel, List[Message]]] = None,
6422
- user_id: Optional[str] = None,
6423
6434
  audio: Optional[Sequence[Audio]] = None,
6424
6435
  images: Optional[Sequence[Image]] = None,
6425
6436
  videos: Optional[Sequence[Video]] = None,
@@ -6531,10 +6542,7 @@ class Team:
6531
6542
  if self.resolve_in_context:
6532
6543
  user_msg_content = self._format_message_with_state_variables(
6533
6544
  user_msg_content,
6534
- user_id=user_id,
6535
- session_state=run_context.session_state,
6536
- dependencies=run_context.dependencies,
6537
- metadata=run_context.metadata,
6545
+ run_context=run_context,
6538
6546
  )
6539
6547
 
6540
6548
  # Convert to string for concatenation operations
@@ -6577,7 +6585,6 @@ class Team:
6577
6585
  run_response: TeamRunOutput,
6578
6586
  run_context: RunContext,
6579
6587
  input_message: Optional[Union[str, List, Dict, Message, BaseModel, List[Message]]] = None,
6580
- user_id: Optional[str] = None,
6581
6588
  audio: Optional[Sequence[Audio]] = None,
6582
6589
  images: Optional[Sequence[Image]] = None,
6583
6590
  videos: Optional[Sequence[Video]] = None,
@@ -6689,10 +6696,7 @@ class Team:
6689
6696
  if self.resolve_in_context:
6690
6697
  user_msg_content = self._format_message_with_state_variables(
6691
6698
  user_msg_content,
6692
- user_id=user_id,
6693
- session_state=run_context.session_state,
6694
- dependencies=run_context.dependencies,
6695
- metadata=run_context.metadata,
6699
+ run_context=run_context,
6696
6700
  )
6697
6701
 
6698
6702
  # Convert to string for concatenation operations
@@ -6802,17 +6806,21 @@ class Team:
6802
6806
  def _format_message_with_state_variables(
6803
6807
  self,
6804
6808
  message: Any,
6805
- session_state: Optional[Dict[str, Any]] = None,
6806
- dependencies: Optional[Dict[str, Any]] = None,
6807
- metadata: Optional[Dict[str, Any]] = None,
6808
- user_id: Optional[str] = None,
6809
+ run_context: Optional[RunContext] = None,
6809
6810
  ) -> Any:
6810
- """Format a message with the session state variables."""
6811
+ """Format a message with the session state variables from run_context."""
6811
6812
  import re
6812
6813
  import string
6813
6814
 
6814
6815
  if not isinstance(message, str):
6815
6816
  return message
6817
+
6818
+ # Extract values from run_context
6819
+ session_state = run_context.session_state if run_context else None
6820
+ dependencies = run_context.dependencies if run_context else None
6821
+ metadata = run_context.metadata if run_context else None
6822
+ user_id = run_context.user_id if run_context else None
6823
+
6816
6824
  # Should already be resolved and passed from run() method
6817
6825
  format_variables = ChainMap(
6818
6826
  session_state if session_state is not None else {},
@@ -8217,6 +8225,708 @@ class Team:
8217
8225
  # Update the current metadata with the metadata from the database which is updated in place
8218
8226
  self.metadata = session.metadata
8219
8227
 
8228
+ # -*- Serialization Functions
8229
+ def to_dict(self) -> Dict[str, Any]:
8230
+ """
8231
+ Convert the Team to a dictionary.
8232
+
8233
+ Returns:
8234
+ Dict[str, Any]: Dictionary representation of the team configuration
8235
+ """
8236
+ config: Dict[str, Any] = {}
8237
+
8238
+ # --- Team Settings ---
8239
+ if self.id is not None:
8240
+ config["id"] = self.id
8241
+ if self.name is not None:
8242
+ config["name"] = self.name
8243
+ if self.role is not None:
8244
+ config["role"] = self.role
8245
+ if self.description is not None:
8246
+ config["description"] = self.description
8247
+
8248
+ # --- Model ---
8249
+ if self.model is not None:
8250
+ config["model"] = self.model.to_dict() if isinstance(self.model, Model) else str(self.model)
8251
+
8252
+ # --- Members ---
8253
+ if self.members:
8254
+ serialized_members = []
8255
+ for member in self.members:
8256
+ if isinstance(member, Agent):
8257
+ serialized_members.append({"type": "agent", "agent_id": member.id})
8258
+ elif isinstance(member, Team):
8259
+ serialized_members.append({"type": "team", "team_id": member.id})
8260
+ if serialized_members:
8261
+ config["members"] = serialized_members
8262
+
8263
+ # --- Execution settings (only if non-default) ---
8264
+ if self.respond_directly:
8265
+ config["respond_directly"] = self.respond_directly
8266
+ if self.delegate_to_all_members:
8267
+ config["delegate_to_all_members"] = self.delegate_to_all_members
8268
+ if not self.determine_input_for_members: # default is True
8269
+ config["determine_input_for_members"] = self.determine_input_for_members
8270
+
8271
+ # --- User settings ---
8272
+ if self.user_id is not None:
8273
+ config["user_id"] = self.user_id
8274
+
8275
+ # --- Session settings ---
8276
+ if self.session_id is not None:
8277
+ config["session_id"] = self.session_id
8278
+ if self.session_state is not None:
8279
+ config["session_state"] = self.session_state
8280
+ if self.add_session_state_to_context:
8281
+ config["add_session_state_to_context"] = self.add_session_state_to_context
8282
+ if self.enable_agentic_state:
8283
+ config["enable_agentic_state"] = self.enable_agentic_state
8284
+ if self.overwrite_db_session_state:
8285
+ config["overwrite_db_session_state"] = self.overwrite_db_session_state
8286
+ if self.cache_session:
8287
+ config["cache_session"] = self.cache_session
8288
+
8289
+ # --- Team history settings ---
8290
+ if self.add_team_history_to_members:
8291
+ config["add_team_history_to_members"] = self.add_team_history_to_members
8292
+ if self.num_team_history_runs != 3: # default is 3
8293
+ config["num_team_history_runs"] = self.num_team_history_runs
8294
+ if self.share_member_interactions:
8295
+ config["share_member_interactions"] = self.share_member_interactions
8296
+ if self.search_session_history:
8297
+ config["search_session_history"] = self.search_session_history
8298
+ if self.num_history_sessions is not None:
8299
+ config["num_history_sessions"] = self.num_history_sessions
8300
+ if self.read_chat_history:
8301
+ config["read_chat_history"] = self.read_chat_history
8302
+
8303
+ # --- System message settings ---
8304
+ if self.system_message is not None and isinstance(self.system_message, str):
8305
+ config["system_message"] = self.system_message
8306
+ if self.system_message_role != "system": # default is "system"
8307
+ config["system_message_role"] = self.system_message_role
8308
+ if self.introduction is not None:
8309
+ config["introduction"] = self.introduction
8310
+ if self.instructions is not None and not callable(self.instructions):
8311
+ config["instructions"] = self.instructions
8312
+ if self.expected_output is not None:
8313
+ config["expected_output"] = self.expected_output
8314
+ if self.additional_context is not None:
8315
+ config["additional_context"] = self.additional_context
8316
+
8317
+ # --- Context settings ---
8318
+ if self.markdown:
8319
+ config["markdown"] = self.markdown
8320
+ if self.add_datetime_to_context:
8321
+ config["add_datetime_to_context"] = self.add_datetime_to_context
8322
+ if self.add_location_to_context:
8323
+ config["add_location_to_context"] = self.add_location_to_context
8324
+ if self.timezone_identifier is not None:
8325
+ config["timezone_identifier"] = self.timezone_identifier
8326
+ if self.add_name_to_context:
8327
+ config["add_name_to_context"] = self.add_name_to_context
8328
+ if self.add_member_tools_to_context:
8329
+ config["add_member_tools_to_context"] = self.add_member_tools_to_context
8330
+ if not self.resolve_in_context: # default is True
8331
+ config["resolve_in_context"] = self.resolve_in_context
8332
+
8333
+ # --- Database settings ---
8334
+ if self.db is not None and hasattr(self.db, "to_dict"):
8335
+ config["db"] = self.db.to_dict()
8336
+
8337
+ # --- Dependencies ---
8338
+ if self.dependencies is not None:
8339
+ config["dependencies"] = self.dependencies
8340
+ if self.add_dependencies_to_context:
8341
+ config["add_dependencies_to_context"] = self.add_dependencies_to_context
8342
+
8343
+ # --- Knowledge settings ---
8344
+ # TODO: implement knowledge serialization
8345
+ # if self.knowledge is not None:
8346
+ # config["knowledge"] = self.knowledge.to_dict()
8347
+ if self.knowledge_filters is not None:
8348
+ config["knowledge_filters"] = self.knowledge_filters
8349
+ if self.enable_agentic_knowledge_filters:
8350
+ config["enable_agentic_knowledge_filters"] = self.enable_agentic_knowledge_filters
8351
+ if self.update_knowledge:
8352
+ config["update_knowledge"] = self.update_knowledge
8353
+ if self.add_knowledge_to_context:
8354
+ config["add_knowledge_to_context"] = self.add_knowledge_to_context
8355
+ if not self.search_knowledge: # default is True
8356
+ config["search_knowledge"] = self.search_knowledge
8357
+ if self.references_format != "json": # default is "json"
8358
+ config["references_format"] = self.references_format
8359
+
8360
+ # --- Tools ---
8361
+ if self.tools:
8362
+ serialized_tools = []
8363
+ for tool in self.tools:
8364
+ try:
8365
+ if isinstance(tool, Function):
8366
+ serialized_tools.append(tool.to_dict())
8367
+ elif isinstance(tool, Toolkit):
8368
+ for func in tool.functions.values():
8369
+ serialized_tools.append(func.to_dict())
8370
+ elif callable(tool):
8371
+ func = Function.from_callable(tool)
8372
+ serialized_tools.append(func.to_dict())
8373
+ except Exception as e:
8374
+ log_warning(f"Could not serialize tool {tool}: {e}")
8375
+ if serialized_tools:
8376
+ config["tools"] = serialized_tools
8377
+ if self.tool_choice is not None:
8378
+ config["tool_choice"] = self.tool_choice
8379
+ if self.tool_call_limit is not None:
8380
+ config["tool_call_limit"] = self.tool_call_limit
8381
+ if self.get_member_information_tool:
8382
+ config["get_member_information_tool"] = self.get_member_information_tool
8383
+
8384
+ # --- Schema settings ---
8385
+ if self.input_schema is not None:
8386
+ if issubclass(self.input_schema, BaseModel):
8387
+ config["input_schema"] = self.input_schema.__name__
8388
+ elif isinstance(self.input_schema, dict):
8389
+ config["input_schema"] = self.input_schema
8390
+ if self.output_schema is not None:
8391
+ if isinstance(self.output_schema, type) and issubclass(self.output_schema, BaseModel):
8392
+ config["output_schema"] = self.output_schema.__name__
8393
+ elif isinstance(self.output_schema, dict):
8394
+ config["output_schema"] = self.output_schema
8395
+
8396
+ # --- Parser and output settings ---
8397
+ if self.parser_model is not None:
8398
+ if isinstance(self.parser_model, Model):
8399
+ config["parser_model"] = self.parser_model.to_dict()
8400
+ else:
8401
+ config["parser_model"] = str(self.parser_model)
8402
+ if self.parser_model_prompt is not None:
8403
+ config["parser_model_prompt"] = self.parser_model_prompt
8404
+ if self.output_model is not None:
8405
+ if isinstance(self.output_model, Model):
8406
+ config["output_model"] = self.output_model.to_dict()
8407
+ else:
8408
+ config["output_model"] = str(self.output_model)
8409
+ if self.output_model_prompt is not None:
8410
+ config["output_model_prompt"] = self.output_model_prompt
8411
+ if self.use_json_mode:
8412
+ config["use_json_mode"] = self.use_json_mode
8413
+ if not self.parse_response: # default is True
8414
+ config["parse_response"] = self.parse_response
8415
+
8416
+ # --- Memory settings ---
8417
+ # TODO: implement memory manager serialization
8418
+ # if self.memory_manager is not None:
8419
+ # config["memory_manager"] = self.memory_manager.to_dict()
8420
+ if self.enable_agentic_memory:
8421
+ config["enable_agentic_memory"] = self.enable_agentic_memory
8422
+ if self.enable_user_memories:
8423
+ config["enable_user_memories"] = self.enable_user_memories
8424
+ if self.add_memories_to_context is not None:
8425
+ config["add_memories_to_context"] = self.add_memories_to_context
8426
+ if self.enable_session_summaries:
8427
+ config["enable_session_summaries"] = self.enable_session_summaries
8428
+ if self.add_session_summary_to_context is not None:
8429
+ config["add_session_summary_to_context"] = self.add_session_summary_to_context
8430
+ # TODO: implement session summary manager serialization
8431
+ # if self.session_summary_manager is not None:
8432
+ # config["session_summary_manager"] = self.session_summary_manager.to_dict()
8433
+
8434
+ # --- History settings ---
8435
+ if self.add_history_to_context:
8436
+ config["add_history_to_context"] = self.add_history_to_context
8437
+ if self.num_history_runs is not None:
8438
+ config["num_history_runs"] = self.num_history_runs
8439
+ if self.num_history_messages is not None:
8440
+ config["num_history_messages"] = self.num_history_messages
8441
+ if self.max_tool_calls_from_history is not None:
8442
+ config["max_tool_calls_from_history"] = self.max_tool_calls_from_history
8443
+
8444
+ # --- Media/storage settings ---
8445
+ if not self.send_media_to_model: # default is True
8446
+ config["send_media_to_model"] = self.send_media_to_model
8447
+ if not self.store_media: # default is True
8448
+ config["store_media"] = self.store_media
8449
+ if not self.store_tool_messages: # default is True
8450
+ config["store_tool_messages"] = self.store_tool_messages
8451
+ if not self.store_history_messages: # default is True
8452
+ config["store_history_messages"] = self.store_history_messages
8453
+
8454
+ # --- Compression settings ---
8455
+ if self.compress_tool_results:
8456
+ config["compress_tool_results"] = self.compress_tool_results
8457
+ # TODO: implement compression manager serialization
8458
+ # if self.compression_manager is not None:
8459
+ # config["compression_manager"] = self.compression_manager.to_dict()
8460
+
8461
+ # --- Reasoning settings ---
8462
+ if self.reasoning:
8463
+ config["reasoning"] = self.reasoning
8464
+ # TODO: implement reasoning model serialization
8465
+ # if self.reasoning_model is not None:
8466
+ # config["reasoning_model"] = self.reasoning_model.to_dict() if isinstance(self.reasoning_model, Model) else str(self.reasoning_model)
8467
+ if self.reasoning_min_steps != 1: # default is 1
8468
+ config["reasoning_min_steps"] = self.reasoning_min_steps
8469
+ if self.reasoning_max_steps != 10: # default is 10
8470
+ config["reasoning_max_steps"] = self.reasoning_max_steps
8471
+
8472
+ # --- Streaming settings ---
8473
+ if self.stream is not None:
8474
+ config["stream"] = self.stream
8475
+ if self.stream_events is not None:
8476
+ config["stream_events"] = self.stream_events
8477
+ if not self.stream_member_events: # default is True
8478
+ config["stream_member_events"] = self.stream_member_events
8479
+ if self.store_events:
8480
+ config["store_events"] = self.store_events
8481
+ if self.store_member_responses:
8482
+ config["store_member_responses"] = self.store_member_responses
8483
+
8484
+ # --- Retry settings ---
8485
+ if self.retries > 0:
8486
+ config["retries"] = self.retries
8487
+ if self.delay_between_retries != 1: # default is 1
8488
+ config["delay_between_retries"] = self.delay_between_retries
8489
+ if self.exponential_backoff:
8490
+ config["exponential_backoff"] = self.exponential_backoff
8491
+
8492
+ # --- Metadata ---
8493
+ if self.metadata is not None:
8494
+ config["metadata"] = self.metadata
8495
+
8496
+ # --- Debug and telemetry settings ---
8497
+ if self.debug_mode:
8498
+ config["debug_mode"] = self.debug_mode
8499
+ if self.debug_level != 1: # default is 1
8500
+ config["debug_level"] = self.debug_level
8501
+ if self.show_members_responses:
8502
+ config["show_members_responses"] = self.show_members_responses
8503
+ if not self.telemetry: # default is True
8504
+ config["telemetry"] = self.telemetry
8505
+
8506
+ return config
8507
+
8508
+ @classmethod
8509
+ def from_dict(
8510
+ cls,
8511
+ data: Dict[str, Any],
8512
+ db: Optional["BaseDb"] = None,
8513
+ registry: Optional["Registry"] = None,
8514
+ ) -> "Team":
8515
+ """
8516
+ Create a Team from a dictionary.
8517
+
8518
+ Args:
8519
+ data: Dictionary containing team configuration
8520
+ db: Optional database for loading agents in members
8521
+ registry: Optional registry for rehydrating tools
8522
+
8523
+ Returns:
8524
+ Team: Reconstructed team instance
8525
+ """
8526
+ config = data.copy()
8527
+
8528
+ # --- Handle Model reconstruction ---
8529
+ if "model" in config:
8530
+ model_data = config["model"]
8531
+ if isinstance(model_data, dict) and "id" in model_data:
8532
+ config["model"] = get_model(f"{model_data['provider']}:{model_data['id']}")
8533
+ elif isinstance(model_data, str):
8534
+ config["model"] = get_model(model_data)
8535
+
8536
+ # --- Handle Members reconstruction ---
8537
+ members: Optional[List[Union[Agent, "Team"]]] = None
8538
+ from agno.agent import get_agent_by_id
8539
+
8540
+ if "members" in config and config["members"]:
8541
+ members = []
8542
+ for member_data in config["members"]:
8543
+ member_type = member_data.get("type")
8544
+ if member_type == "agent":
8545
+ # TODO: Make sure to pass the correct version to get_agent_by_id. Right now its returning the latest version.
8546
+ if db is None:
8547
+ log_warning(f"Cannot load member agent {member_data['agent_id']}: db is None")
8548
+ continue
8549
+ agent = get_agent_by_id(id=member_data["agent_id"], db=db, registry=registry)
8550
+ if agent:
8551
+ members.append(agent)
8552
+ else:
8553
+ log_warning(f"Agent not found: {member_data['agent_id']}")
8554
+
8555
+ # --- Handle reasoning_model reconstruction ---
8556
+ # TODO: implement reasoning model deserialization
8557
+ # if "reasoning_model" in config:
8558
+ # model_data = config["reasoning_model"]
8559
+ # if isinstance(model_data, dict) and "id" in model_data:
8560
+ # config["reasoning_model"] = get_model(f"{model_data['provider']}:{model_data['id']}")
8561
+ # elif isinstance(model_data, str):
8562
+ # config["reasoning_model"] = get_model(model_data)
8563
+
8564
+ # --- Handle parser_model reconstruction ---
8565
+ # TODO: implement parser model deserialization
8566
+ # if "parser_model" in config:
8567
+ # model_data = config["parser_model"]
8568
+ # if isinstance(model_data, dict) and "id" in model_data:
8569
+ # config["parser_model"] = get_model(f"{model_data['provider']}:{model_data['id']}")
8570
+ # elif isinstance(model_data, str):
8571
+ # config["parser_model"] = get_model(model_data)
8572
+
8573
+ # --- Handle output_model reconstruction ---
8574
+ # TODO: implement output model deserialization
8575
+ # if "output_model" in config:
8576
+ # model_data = config["output_model"]
8577
+ # if isinstance(model_data, dict) and "id" in model_data:
8578
+ # config["output_model"] = get_model(f"{model_data['provider']}:{model_data['id']}")
8579
+ # elif isinstance(model_data, str):
8580
+ # config["output_model"] = get_model(model_data)
8581
+
8582
+ # --- Handle tools reconstruction ---
8583
+ if "tools" in config and config["tools"]:
8584
+ if registry:
8585
+ config["tools"] = [registry.rehydrate_function(t) for t in config["tools"]]
8586
+ else:
8587
+ log_warning("No registry provided, tools will not be rehydrated.")
8588
+ del config["tools"]
8589
+
8590
+ # --- Handle DB reconstruction ---
8591
+ if "db" in config and isinstance(config["db"], dict):
8592
+ db_data = config["db"]
8593
+ db_id = db_data.get("id")
8594
+
8595
+ # First try to get the db from the registry (preferred - reuses existing connection)
8596
+ if registry and db_id:
8597
+ registry_db = registry.get_db(db_id)
8598
+ if registry_db is not None:
8599
+ config["db"] = registry_db
8600
+ else:
8601
+ del config["db"]
8602
+ else:
8603
+ # No registry or no db_id, fall back to creating from dict
8604
+ config["db"] = db_from_dict(db_data)
8605
+ if config["db"] is None:
8606
+ del config["db"]
8607
+
8608
+ # --- Handle Schema reconstruction ---
8609
+ if "input_schema" in config and isinstance(config["input_schema"], str):
8610
+ schema_cls = registry.get_schema(config["input_schema"]) if registry else None
8611
+ if schema_cls:
8612
+ config["input_schema"] = schema_cls
8613
+ else:
8614
+ log_warning(f"Input schema {config['input_schema']} not found in registry, skipping.")
8615
+ del config["input_schema"]
8616
+
8617
+ if "output_schema" in config and isinstance(config["output_schema"], str):
8618
+ schema_cls = registry.get_schema(config["output_schema"]) if registry else None
8619
+ if schema_cls:
8620
+ config["output_schema"] = schema_cls
8621
+ else:
8622
+ log_warning(f"Output schema {config['output_schema']} not found in registry, skipping.")
8623
+ del config["output_schema"]
8624
+
8625
+ # --- Handle MemoryManager reconstruction ---
8626
+ # TODO: implement memory manager deserialization
8627
+ # if "memory_manager" in config and isinstance(config["memory_manager"], dict):
8628
+ # from agno.memory import MemoryManager
8629
+ # config["memory_manager"] = MemoryManager.from_dict(config["memory_manager"])
8630
+
8631
+ # --- Handle SessionSummaryManager reconstruction ---
8632
+ # TODO: implement session summary manager deserialization
8633
+ # if "session_summary_manager" in config and isinstance(config["session_summary_manager"], dict):
8634
+ # from agno.session import SessionSummaryManager
8635
+ # config["session_summary_manager"] = SessionSummaryManager.from_dict(config["session_summary_manager"])
8636
+
8637
+ # --- Handle Knowledge reconstruction ---
8638
+ # TODO: implement knowledge deserialization
8639
+ # if "knowledge" in config and isinstance(config["knowledge"], dict):
8640
+ # from agno.knowledge import Knowledge
8641
+ # config["knowledge"] = Knowledge.from_dict(config["knowledge"])
8642
+
8643
+ # --- Handle CompressionManager reconstruction ---
8644
+ # TODO: implement compression manager deserialization
8645
+ # if "compression_manager" in config and isinstance(config["compression_manager"], dict):
8646
+ # from agno.compression.manager import CompressionManager
8647
+ # config["compression_manager"] = CompressionManager.from_dict(config["compression_manager"])
8648
+
8649
+ return cls(
8650
+ # --- Team settings ---
8651
+ id=config.get("id"),
8652
+ name=config.get("name"),
8653
+ role=config.get("role"),
8654
+ description=config.get("description"),
8655
+ # --- Model ---
8656
+ model=config.get("model"),
8657
+ # --- Members ---
8658
+ members=members or [],
8659
+ # --- Execution settings ---
8660
+ respond_directly=config.get("respond_directly", False),
8661
+ delegate_to_all_members=config.get("delegate_to_all_members", False),
8662
+ determine_input_for_members=config.get("determine_input_for_members", True),
8663
+ # --- User settings ---
8664
+ user_id=config.get("user_id"),
8665
+ # --- Session settings ---
8666
+ session_id=config.get("session_id"),
8667
+ session_state=config.get("session_state"),
8668
+ add_session_state_to_context=config.get("add_session_state_to_context", False),
8669
+ enable_agentic_state=config.get("enable_agentic_state", False),
8670
+ overwrite_db_session_state=config.get("overwrite_db_session_state", False),
8671
+ cache_session=config.get("cache_session", False),
8672
+ add_team_history_to_members=config.get("add_team_history_to_members", False),
8673
+ num_team_history_runs=config.get("num_team_history_runs", 3),
8674
+ share_member_interactions=config.get("share_member_interactions", False),
8675
+ search_session_history=config.get("search_session_history", False),
8676
+ num_history_sessions=config.get("num_history_sessions"),
8677
+ read_chat_history=config.get("read_chat_history", False),
8678
+ # --- System message settings ---
8679
+ system_message=config.get("system_message"),
8680
+ system_message_role=config.get("system_message_role", "system"),
8681
+ introduction=config.get("introduction"),
8682
+ instructions=config.get("instructions"),
8683
+ expected_output=config.get("expected_output"),
8684
+ additional_context=config.get("additional_context"),
8685
+ markdown=config.get("markdown", False),
8686
+ add_datetime_to_context=config.get("add_datetime_to_context", False),
8687
+ add_location_to_context=config.get("add_location_to_context", False),
8688
+ timezone_identifier=config.get("timezone_identifier"),
8689
+ add_name_to_context=config.get("add_name_to_context", False),
8690
+ add_member_tools_to_context=config.get("add_member_tools_to_context", False),
8691
+ resolve_in_context=config.get("resolve_in_context", True),
8692
+ # --- Database settings ---
8693
+ db=config.get("db"),
8694
+ # --- Dependencies ---
8695
+ dependencies=config.get("dependencies"),
8696
+ add_dependencies_to_context=config.get("add_dependencies_to_context", False),
8697
+ # --- Knowledge settings ---
8698
+ # knowledge=config.get("knowledge"), # TODO
8699
+ knowledge_filters=config.get("knowledge_filters"),
8700
+ enable_agentic_knowledge_filters=config.get("enable_agentic_knowledge_filters", False),
8701
+ add_knowledge_to_context=config.get("add_knowledge_to_context", False),
8702
+ update_knowledge=config.get("update_knowledge", False),
8703
+ search_knowledge=config.get("search_knowledge", True),
8704
+ references_format=config.get("references_format", "json"),
8705
+ # --- Tools ---
8706
+ tools=config.get("tools"),
8707
+ tool_call_limit=config.get("tool_call_limit"),
8708
+ tool_choice=config.get("tool_choice"),
8709
+ get_member_information_tool=config.get("get_member_information_tool", False),
8710
+ # --- Schema settings ---
8711
+ # input_schema=config.get("input_schema"), # TODO
8712
+ # output_schema=config.get("output_schema"), # TODO
8713
+ # --- Parser and output settings ---
8714
+ # parser_model=config.get("parser_model"), # TODO
8715
+ parser_model_prompt=config.get("parser_model_prompt"),
8716
+ # output_model=config.get("output_model"), # TODO
8717
+ output_model_prompt=config.get("output_model_prompt"),
8718
+ use_json_mode=config.get("use_json_mode", False),
8719
+ parse_response=config.get("parse_response", True),
8720
+ # --- Memory settings ---
8721
+ # memory_manager=config.get("memory_manager"), # TODO
8722
+ enable_agentic_memory=config.get("enable_agentic_memory", False),
8723
+ enable_user_memories=config.get("enable_user_memories", False),
8724
+ add_memories_to_context=config.get("add_memories_to_context"),
8725
+ enable_session_summaries=config.get("enable_session_summaries", False),
8726
+ add_session_summary_to_context=config.get("add_session_summary_to_context"),
8727
+ # session_summary_manager=config.get("session_summary_manager"), # TODO
8728
+ # --- History settings ---
8729
+ add_history_to_context=config.get("add_history_to_context", False),
8730
+ num_history_runs=config.get("num_history_runs"),
8731
+ num_history_messages=config.get("num_history_messages"),
8732
+ max_tool_calls_from_history=config.get("max_tool_calls_from_history"),
8733
+ # --- Compression settings ---
8734
+ compress_tool_results=config.get("compress_tool_results", False),
8735
+ # compression_manager=config.get("compression_manager"), # TODO
8736
+ # --- Reasoning settings ---
8737
+ reasoning=config.get("reasoning", False),
8738
+ # reasoning_model=config.get("reasoning_model"), # TODO
8739
+ reasoning_min_steps=config.get("reasoning_min_steps", 1),
8740
+ reasoning_max_steps=config.get("reasoning_max_steps", 10),
8741
+ # --- Streaming settings ---
8742
+ stream=config.get("stream"),
8743
+ stream_events=config.get("stream_events"),
8744
+ stream_member_events=config.get("stream_member_events", True),
8745
+ store_events=config.get("store_events", False),
8746
+ store_member_responses=config.get("store_member_responses", False),
8747
+ # --- Media settings ---
8748
+ send_media_to_model=config.get("send_media_to_model", True),
8749
+ store_media=config.get("store_media", True),
8750
+ store_tool_messages=config.get("store_tool_messages", True),
8751
+ store_history_messages=config.get("store_history_messages", True),
8752
+ # --- Retry settings ---
8753
+ retries=config.get("retries", 0),
8754
+ delay_between_retries=config.get("delay_between_retries", 1),
8755
+ exponential_backoff=config.get("exponential_backoff", False),
8756
+ # --- Metadata ---
8757
+ metadata=config.get("metadata"),
8758
+ # --- Debug and telemetry settings ---
8759
+ debug_mode=config.get("debug_mode", False),
8760
+ debug_level=config.get("debug_level", 1),
8761
+ show_members_responses=config.get("show_members_responses", False),
8762
+ telemetry=config.get("telemetry", True),
8763
+ )
8764
+
8765
+ def save(
8766
+ self,
8767
+ *,
8768
+ db: Optional["BaseDb"] = None,
8769
+ stage: str = "published",
8770
+ label: Optional[str] = None,
8771
+ notes: Optional[str] = None,
8772
+ ) -> Optional[int]:
8773
+ """
8774
+ Save the team component and config to the database, including member agents/teams.
8775
+
8776
+ Args:
8777
+ db: The database to save the component and config to.
8778
+ stage: The stage of the component. Defaults to "published".
8779
+ label: The label of the component.
8780
+ notes: The notes of the component.
8781
+
8782
+ Returns:
8783
+ Optional[int]: The version number of the saved config.
8784
+ """
8785
+ from agno.agent.agent import Agent
8786
+
8787
+ db_ = db or self.db
8788
+ if not db_:
8789
+ raise ValueError("Db not initialized or provided")
8790
+ if not isinstance(db_, BaseDb):
8791
+ raise ValueError("Async databases not yet supported for save(). Use a sync database.")
8792
+ if self.id is None:
8793
+ self.id = generate_id_from_name(self.name)
8794
+
8795
+ try:
8796
+ # Collect all links for members
8797
+ all_links: List[Dict[str, Any]] = []
8798
+
8799
+ # Save each member (Agent or nested Team) and collect links
8800
+ for position, member in enumerate(self.members or []):
8801
+ # Save member first - returns version
8802
+ member_version = member.save(db=db_, stage=stage, label=label, notes=notes)
8803
+
8804
+ # Add link
8805
+ all_links.append(
8806
+ {
8807
+ "link_kind": "member",
8808
+ "link_key": f"member_{position}",
8809
+ "child_component_id": member.id,
8810
+ "child_version": member_version,
8811
+ "position": position,
8812
+ "meta": {"type": "agent" if isinstance(member, Agent) else "team"},
8813
+ }
8814
+ )
8815
+
8816
+ # Create or update component
8817
+ db_.upsert_component(
8818
+ component_id=self.id,
8819
+ component_type=ComponentType.TEAM,
8820
+ name=getattr(self, "name", self.id),
8821
+ description=getattr(self, "description", None),
8822
+ metadata=getattr(self, "metadata", None),
8823
+ )
8824
+
8825
+ # Create or update config with links
8826
+ config = db_.upsert_config(
8827
+ component_id=self.id,
8828
+ config=self.to_dict(),
8829
+ links=all_links if all_links else None,
8830
+ label=label,
8831
+ stage=stage,
8832
+ notes=notes,
8833
+ )
8834
+
8835
+ return config["version"]
8836
+
8837
+ except Exception as e:
8838
+ log_error(f"Error saving Team to database: {e}")
8839
+ raise
8840
+
8841
+ @classmethod
8842
+ def load(
8843
+ cls,
8844
+ id: str,
8845
+ *,
8846
+ db: "BaseDb",
8847
+ registry: Optional["Registry"] = None,
8848
+ label: Optional[str] = None,
8849
+ version: Optional[int] = None,
8850
+ ) -> Optional["Team"]:
8851
+ """
8852
+ Load a team by id, with hydrated members.
8853
+
8854
+ Args:
8855
+ id: The id of the team to load.
8856
+ db: The database to load the team from.
8857
+ label: The label of the team to load.
8858
+
8859
+ Returns:
8860
+ The team loaded from the database with hydrated members, or None if not found.
8861
+ """
8862
+ from agno.agent.agent import Agent
8863
+
8864
+ # Use graph to load team + all members
8865
+ graph = db.load_component_graph(id, version=version, label=label)
8866
+ if graph is None:
8867
+ return None
8868
+
8869
+ config = graph["config"].get("config")
8870
+ if config is None:
8871
+ return None
8872
+
8873
+ team = cls.from_dict(config, db=db, registry=registry)
8874
+ team.id = id
8875
+ team.db = db
8876
+
8877
+ # Hydrate members from graph children
8878
+ team.members = []
8879
+ for child in graph.get("children", []):
8880
+ child_graph = child.get("graph")
8881
+ if child_graph is None:
8882
+ continue
8883
+
8884
+ child_config = child_graph["config"].get("config")
8885
+ if child_config is None:
8886
+ continue
8887
+
8888
+ link_meta = child["link"].get("meta", {})
8889
+ member_type = link_meta.get("type")
8890
+
8891
+ if member_type == "agent":
8892
+ agent = Agent.from_dict(child_config)
8893
+ agent.id = child_graph["component"]["component_id"]
8894
+ agent.db = db
8895
+ team.members.append(agent)
8896
+ elif member_type == "team":
8897
+ # Recursive load for nested teams
8898
+ nested_team = cls.load(child_graph["component"]["component_id"], db=db)
8899
+ if nested_team:
8900
+ team.members.append(nested_team)
8901
+
8902
+ return team
8903
+
8904
+ def delete(
8905
+ self,
8906
+ *,
8907
+ db: Optional["BaseDb"] = None,
8908
+ hard_delete: bool = False,
8909
+ ) -> bool:
8910
+ """
8911
+ Delete the team component.
8912
+
8913
+ Args:
8914
+ db: The database to delete the component from.
8915
+ hard_delete: Whether to hard delete the component.
8916
+
8917
+ Returns:
8918
+ True if the component was deleted, False otherwise.
8919
+ """
8920
+ db_ = db or self.db
8921
+ if not db_:
8922
+ raise ValueError("Db not initialized or provided")
8923
+ if not isinstance(db_, BaseDb):
8924
+ raise ValueError("Async databases not yet supported for delete(). Use a sync database.")
8925
+ if self.id is None:
8926
+ raise ValueError("Cannot delete team without an id")
8927
+
8928
+ return db_.delete_component(component_id=self.id, hard_delete=hard_delete)
8929
+
8220
8930
  # -*- Public convenience functions
8221
8931
  def get_run_output(
8222
8932
  self, run_id: str, session_id: Optional[str] = None
@@ -8864,6 +9574,7 @@ class Team:
8864
9574
  title=title,
8865
9575
  reasoning=thought,
8866
9576
  action=action,
9577
+ result=None,
8867
9578
  next_action=NextAction.CONTINUE,
8868
9579
  confidence=confidence,
8869
9580
  )
@@ -8899,6 +9610,7 @@ class Team:
8899
9610
  # Create a reasoning step
8900
9611
  reasoning_step = ReasoningStep(
8901
9612
  title=title,
9613
+ action=None,
8902
9614
  result=result,
8903
9615
  reasoning=analysis,
8904
9616
  next_action=next_action_enum,
@@ -8927,7 +9639,10 @@ class Team:
8927
9639
  thought = tool_args["thought"]
8928
9640
  reasoning_step = ReasoningStep(
8929
9641
  title="Thinking",
9642
+ action=None,
9643
+ result=None,
8930
9644
  reasoning=thought,
9645
+ next_action=None,
8931
9646
  confidence=None,
8932
9647
  )
8933
9648
  formatted_content = f"## Thinking\n{thought}\n\n"
@@ -8955,16 +9670,17 @@ class Team:
8955
9670
  log_warning("Knowledge is not set, cannot add to knowledge")
8956
9671
  return "Knowledge is not set, cannot add to knowledge"
8957
9672
 
8958
- if self.knowledge.vector_db is None:
8959
- log_warning("Knowledge vector database is not set, cannot add to knowledge")
8960
- return "Knowledge vector database is not set, cannot add to knowledge"
9673
+ insert_method = getattr(self.knowledge, "insert", None)
9674
+ if not callable(insert_method):
9675
+ log_warning("Knowledge base does not support adding content")
9676
+ return "Knowledge base does not support adding content"
8961
9677
 
8962
9678
  document_name = query.replace(" ", "_").replace("?", "").replace("!", "").replace(".", "")
8963
9679
  document_content = json.dumps({"query": query, "result": result})
8964
9680
  log_info(f"Adding document to Knowledge: {document_name}: {document_content}")
8965
9681
  from agno.knowledge.reader.text_reader import TextReader
8966
9682
 
8967
- self.knowledge.add_content(name=document_name, text_content=document_content, reader=TextReader())
9683
+ insert_method(name=document_name, text_content=document_content, reader=TextReader())
8968
9684
  return "Successfully added to knowledge base"
8969
9685
 
8970
9686
  def get_relevant_docs_from_knowledge(
@@ -8982,25 +9698,26 @@ class Team:
8982
9698
  dependencies = run_context.dependencies if run_context else None
8983
9699
 
8984
9700
  if num_documents is None and self.knowledge is not None:
8985
- num_documents = self.knowledge.max_results
9701
+ num_documents = getattr(self.knowledge, "max_results", None)
8986
9702
 
8987
9703
  # Validate the filters against known valid filter keys
8988
- if self.knowledge is not None:
8989
- valid_filters, invalid_keys = self.knowledge.validate_filters(filters) # type: ignore
9704
+ if self.knowledge is not None and filters is not None:
9705
+ validate_filters_method = getattr(self.knowledge, "validate_filters", None)
9706
+ if callable(validate_filters_method):
9707
+ valid_filters, invalid_keys = validate_filters_method(filters)
8990
9708
 
8991
- # Warn about invalid filter keys
8992
- if invalid_keys:
8993
- # type: ignore
8994
- log_warning(f"Invalid filter keys provided: {invalid_keys}. These filters will be ignored.")
9709
+ # Warn about invalid filter keys
9710
+ if invalid_keys:
9711
+ log_warning(f"Invalid filter keys provided: {invalid_keys}. These filters will be ignored.")
8995
9712
 
8996
- # Only use valid filters
8997
- filters = valid_filters
8998
- if not filters:
8999
- log_warning("No valid filters remain after validation. Search will proceed without filters.")
9713
+ # Only use valid filters
9714
+ filters = valid_filters
9715
+ if not filters:
9716
+ log_warning("No valid filters remain after validation. Search will proceed without filters.")
9000
9717
 
9001
- if invalid_keys == [] and valid_filters == {}:
9002
- log_debug("No valid filters provided. Search will proceed without filters.")
9003
- filters = None
9718
+ if invalid_keys == [] and valid_filters == {}:
9719
+ log_debug("No valid filters provided. Search will proceed without filters.")
9720
+ filters = None
9004
9721
 
9005
9722
  if self.knowledge_retriever is not None and callable(self.knowledge_retriever):
9006
9723
  from inspect import signature
@@ -9022,17 +9739,22 @@ class Team:
9022
9739
  except Exception as e:
9023
9740
  log_warning(f"Knowledge retriever failed: {e}")
9024
9741
  raise e
9742
+ # Use knowledge protocol's retrieve method
9025
9743
  try:
9026
- if self.knowledge is None or self.knowledge.vector_db is None:
9744
+ if self.knowledge is None:
9745
+ return None
9746
+
9747
+ # Use protocol retrieve() method if available
9748
+ retrieve_fn = getattr(self.knowledge, "retrieve", None)
9749
+ if not callable(retrieve_fn):
9750
+ log_debug("Knowledge does not implement retrieve()")
9027
9751
  return None
9028
9752
 
9029
9753
  if num_documents is None:
9030
- num_documents = self.knowledge.max_results
9754
+ num_documents = getattr(self.knowledge, "max_results", 10)
9031
9755
 
9032
- log_debug(f"Searching knowledge base with filters: {filters}")
9033
- relevant_docs: List[Document] = self.knowledge.search(
9034
- query=query, max_results=num_documents, filters=filters
9035
- )
9756
+ log_debug(f"Retrieving from knowledge base with filters: {filters}")
9757
+ relevant_docs: List[Document] = retrieve_fn(query=query, max_results=num_documents, filters=filters)
9036
9758
 
9037
9759
  if not relevant_docs or len(relevant_docs) == 0:
9038
9760
  log_debug("No relevant documents found for query")
@@ -9040,7 +9762,7 @@ class Team:
9040
9762
 
9041
9763
  return [doc.to_dict() for doc in relevant_docs]
9042
9764
  except Exception as e:
9043
- log_warning(f"Error searching knowledge base: {e}")
9765
+ log_warning(f"Error retrieving from knowledge base: {e}")
9044
9766
  raise e
9045
9767
 
9046
9768
  async def aget_relevant_docs_from_knowledge(
@@ -9058,25 +9780,26 @@ class Team:
9058
9780
  dependencies = run_context.dependencies if run_context else None
9059
9781
 
9060
9782
  if num_documents is None and self.knowledge is not None:
9061
- num_documents = self.knowledge.max_results
9783
+ num_documents = getattr(self.knowledge, "max_results", None)
9062
9784
 
9063
9785
  # Validate the filters against known valid filter keys
9064
- if self.knowledge is not None:
9065
- valid_filters, invalid_keys = await self.knowledge.async_validate_filters(filters) # type: ignore
9786
+ if self.knowledge is not None and filters is not None:
9787
+ avalidate_filters_method = getattr(self.knowledge, "avalidate_filters", None)
9788
+ if callable(avalidate_filters_method):
9789
+ valid_filters, invalid_keys = await avalidate_filters_method(filters)
9066
9790
 
9067
- # Warn about invalid filter keys
9068
- if invalid_keys:
9069
- # type: ignore
9070
- log_warning(f"Invalid filter keys provided: {invalid_keys}. These filters will be ignored.")
9791
+ # Warn about invalid filter keys
9792
+ if invalid_keys:
9793
+ log_warning(f"Invalid filter keys provided: {invalid_keys}. These filters will be ignored.")
9071
9794
 
9072
- # Only use valid filters
9073
- filters = valid_filters
9074
- if not filters:
9075
- log_warning("No valid filters remain after validation. Search will proceed without filters.")
9795
+ # Only use valid filters
9796
+ filters = valid_filters
9797
+ if not filters:
9798
+ log_warning("No valid filters remain after validation. Search will proceed without filters.")
9076
9799
 
9077
- if invalid_keys == [] and valid_filters == {}:
9078
- log_debug("No valid filters provided. Search will proceed without filters.")
9079
- filters = None
9800
+ if invalid_keys == [] and valid_filters == {}:
9801
+ log_debug("No valid filters provided. Search will proceed without filters.")
9802
+ filters = None
9080
9803
 
9081
9804
  if self.knowledge_retriever is not None and callable(self.knowledge_retriever):
9082
9805
  from inspect import isawaitable, signature
@@ -9105,17 +9828,32 @@ class Team:
9105
9828
  log_warning(f"Knowledge retriever failed: {e}")
9106
9829
  raise e
9107
9830
 
9831
+ # Use knowledge protocol's retrieve method
9108
9832
  try:
9109
- if self.knowledge is None or self.knowledge.vector_db is None:
9833
+ if self.knowledge is None:
9834
+ return None
9835
+
9836
+ # Use protocol aretrieve() or retrieve() method if available
9837
+ aretrieve_fn = getattr(self.knowledge, "aretrieve", None)
9838
+ retrieve_fn = getattr(self.knowledge, "retrieve", None)
9839
+
9840
+ if not callable(aretrieve_fn) and not callable(retrieve_fn):
9841
+ log_debug("Knowledge does not implement retrieve()")
9110
9842
  return None
9111
9843
 
9112
9844
  if num_documents is None:
9113
- num_documents = self.knowledge.max_results
9845
+ num_documents = getattr(self.knowledge, "max_results", 10)
9114
9846
 
9115
- log_debug(f"Searching knowledge base with filters: {filters}")
9116
- relevant_docs: List[Document] = await self.knowledge.async_search(
9117
- query=query, max_results=num_documents, filters=filters
9118
- )
9847
+ log_debug(f"Retrieving from knowledge base with filters: {filters}")
9848
+
9849
+ if callable(aretrieve_fn):
9850
+ relevant_docs: List[Document] = await aretrieve_fn(
9851
+ query=query, max_results=num_documents, filters=filters
9852
+ )
9853
+ elif callable(retrieve_fn):
9854
+ relevant_docs = retrieve_fn(query=query, max_results=num_documents, filters=filters)
9855
+ else:
9856
+ return None
9119
9857
 
9120
9858
  if not relevant_docs or len(relevant_docs) == 0:
9121
9859
  log_debug("No relevant documents found for query")
@@ -9123,7 +9861,7 @@ class Team:
9123
9861
 
9124
9862
  return [doc.to_dict() for doc in relevant_docs]
9125
9863
  except Exception as e:
9126
- log_warning(f"Error searching knowledge base: {e}")
9864
+ log_warning(f"Error retrieving from knowledge base: {e}")
9127
9865
  raise e
9128
9866
 
9129
9867
  def _convert_documents_to_string(self, docs: List[Union[Dict[str, Any], str]]) -> str:
@@ -9523,3 +10261,84 @@ class Team:
9523
10261
  except Exception:
9524
10262
  # If copy fails, return as is
9525
10263
  return field_value
10264
+
10265
+
10266
+ def get_team_by_id(
10267
+ db: "BaseDb",
10268
+ id: str,
10269
+ version: Optional[int] = None,
10270
+ label: Optional[str] = None,
10271
+ registry: Optional["Registry"] = None,
10272
+ ) -> Optional["Team"]:
10273
+ """
10274
+ Get a Team by id from the database.
10275
+
10276
+ Resolution order:
10277
+ - if version is provided: load that version
10278
+ - elif label is provided: load that labeled version
10279
+ - else: load component.current_version
10280
+
10281
+ Args:
10282
+ db: Database handle.
10283
+ id: Team component_id.
10284
+ version: Optional integer config version.
10285
+ label: Optional version_label.
10286
+ registry: Optional Registry for reconstructing unserializable components.
10287
+
10288
+ Returns:
10289
+ Team instance or None.
10290
+ """
10291
+ try:
10292
+ row = db.get_config(component_id=id, version=version, label=label)
10293
+ if row is None:
10294
+ return None
10295
+
10296
+ cfg = row.get("config") if isinstance(row, dict) else None
10297
+ if cfg is None:
10298
+ raise ValueError(f"Invalid config found for team {id}")
10299
+
10300
+ team = Team.from_dict(cfg, db=db, registry=registry)
10301
+ # Ensure team.id is set to the component_id
10302
+ team.id = id
10303
+
10304
+ return team
10305
+
10306
+ except Exception as e:
10307
+ log_error(f"Error loading Team {id} from database: {e}")
10308
+ return None
10309
+
10310
+
10311
+ def get_teams(
10312
+ db: "BaseDb",
10313
+ registry: Optional["Registry"] = None,
10314
+ ) -> List["Team"]:
10315
+ """
10316
+ Get all teams from the database.
10317
+
10318
+ Args:
10319
+ db: Database to load teams from
10320
+ registry: Optional registry for rehydrating tools
10321
+
10322
+ Returns:
10323
+ List of Team instances loaded from the database
10324
+ """
10325
+ teams: List[Team] = []
10326
+ try:
10327
+ components, _ = db.list_components(component_type=ComponentType.TEAM)
10328
+ for component in components:
10329
+ component_id = component["component_id"]
10330
+ config = db.get_config(component_id=component_id)
10331
+ if config is not None:
10332
+ team_config = config.get("config")
10333
+ if team_config is not None:
10334
+ if "id" not in team_config:
10335
+ team_config["id"] = component_id
10336
+ team = Team.from_dict(team_config, db=db, registry=registry)
10337
+ # Ensure team.id is set to the component_id
10338
+ team.id = component_id
10339
+ teams.append(team)
10340
+ return teams
10341
+
10342
+ except Exception as e:
10343
+ log_error(f"Error loading Teams from database: {e}")
10344
+ return []