agno 2.3.26__py3-none-any.whl → 2.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/__init__.py +4 -0
- agno/agent/agent.py +1368 -541
- agno/agent/remote.py +13 -0
- agno/db/base.py +339 -0
- agno/db/postgres/async_postgres.py +116 -12
- agno/db/postgres/postgres.py +1242 -25
- agno/db/postgres/schemas.py +48 -1
- agno/db/sqlite/async_sqlite.py +119 -4
- agno/db/sqlite/schemas.py +51 -0
- agno/db/sqlite/sqlite.py +1186 -13
- agno/db/utils.py +37 -1
- agno/integrations/discord/client.py +12 -1
- agno/knowledge/__init__.py +4 -0
- agno/knowledge/chunking/code.py +1 -1
- agno/knowledge/chunking/semantic.py +1 -1
- agno/knowledge/chunking/strategy.py +4 -0
- agno/knowledge/filesystem.py +412 -0
- agno/knowledge/knowledge.py +3722 -2182
- agno/knowledge/protocol.py +134 -0
- agno/knowledge/reader/arxiv_reader.py +2 -2
- agno/knowledge/reader/base.py +9 -7
- agno/knowledge/reader/csv_reader.py +236 -13
- agno/knowledge/reader/docx_reader.py +2 -2
- agno/knowledge/reader/field_labeled_csv_reader.py +169 -5
- agno/knowledge/reader/firecrawl_reader.py +2 -2
- agno/knowledge/reader/json_reader.py +2 -2
- agno/knowledge/reader/markdown_reader.py +2 -2
- agno/knowledge/reader/pdf_reader.py +5 -4
- agno/knowledge/reader/pptx_reader.py +2 -2
- agno/knowledge/reader/reader_factory.py +118 -1
- agno/knowledge/reader/s3_reader.py +2 -2
- agno/knowledge/reader/tavily_reader.py +2 -2
- agno/knowledge/reader/text_reader.py +2 -2
- agno/knowledge/reader/web_search_reader.py +2 -2
- agno/knowledge/reader/website_reader.py +5 -3
- agno/knowledge/reader/wikipedia_reader.py +2 -2
- agno/knowledge/reader/youtube_reader.py +2 -2
- agno/knowledge/remote_content/__init__.py +29 -0
- agno/knowledge/remote_content/config.py +204 -0
- agno/knowledge/remote_content/remote_content.py +74 -17
- agno/knowledge/utils.py +37 -29
- agno/learn/__init__.py +6 -0
- agno/learn/machine.py +35 -0
- agno/learn/schemas.py +82 -11
- agno/learn/stores/__init__.py +3 -0
- agno/learn/stores/decision_log.py +1156 -0
- agno/learn/stores/learned_knowledge.py +6 -6
- agno/models/anthropic/claude.py +24 -0
- agno/models/aws/bedrock.py +20 -0
- agno/models/base.py +60 -6
- agno/models/cerebras/cerebras.py +34 -2
- agno/models/cohere/chat.py +25 -0
- agno/models/google/gemini.py +50 -5
- agno/models/litellm/chat.py +38 -0
- agno/models/n1n/__init__.py +3 -0
- agno/models/n1n/n1n.py +57 -0
- agno/models/openai/chat.py +25 -1
- agno/models/openrouter/openrouter.py +46 -0
- agno/models/perplexity/perplexity.py +2 -0
- agno/models/response.py +16 -0
- agno/os/app.py +83 -44
- agno/os/interfaces/slack/router.py +10 -1
- agno/os/interfaces/whatsapp/router.py +6 -0
- agno/os/middleware/__init__.py +2 -0
- agno/os/middleware/trailing_slash.py +27 -0
- agno/os/router.py +1 -0
- agno/os/routers/agents/router.py +29 -16
- agno/os/routers/agents/schema.py +6 -4
- agno/os/routers/components/__init__.py +3 -0
- agno/os/routers/components/components.py +475 -0
- agno/os/routers/evals/schemas.py +4 -3
- agno/os/routers/health.py +3 -3
- agno/os/routers/knowledge/knowledge.py +128 -3
- agno/os/routers/knowledge/schemas.py +12 -0
- agno/os/routers/memory/schemas.py +4 -2
- agno/os/routers/metrics/metrics.py +9 -11
- agno/os/routers/metrics/schemas.py +10 -6
- agno/os/routers/registry/__init__.py +3 -0
- agno/os/routers/registry/registry.py +337 -0
- agno/os/routers/teams/router.py +20 -8
- agno/os/routers/teams/schema.py +6 -4
- agno/os/routers/traces/traces.py +5 -5
- agno/os/routers/workflows/router.py +38 -11
- agno/os/routers/workflows/schema.py +1 -1
- agno/os/schema.py +92 -26
- agno/os/utils.py +84 -19
- agno/reasoning/anthropic.py +2 -2
- agno/reasoning/azure_ai_foundry.py +2 -2
- agno/reasoning/deepseek.py +2 -2
- agno/reasoning/default.py +6 -7
- agno/reasoning/gemini.py +2 -2
- agno/reasoning/helpers.py +6 -7
- agno/reasoning/manager.py +4 -10
- agno/reasoning/ollama.py +2 -2
- agno/reasoning/openai.py +2 -2
- agno/reasoning/vertexai.py +2 -2
- agno/registry/__init__.py +3 -0
- agno/registry/registry.py +68 -0
- agno/run/agent.py +59 -0
- agno/run/base.py +7 -0
- agno/run/team.py +57 -0
- agno/skills/agent_skills.py +10 -3
- agno/team/__init__.py +3 -1
- agno/team/team.py +1165 -330
- agno/tools/duckduckgo.py +25 -71
- agno/tools/exa.py +0 -21
- agno/tools/function.py +35 -83
- agno/tools/knowledge.py +9 -4
- agno/tools/mem0.py +11 -10
- agno/tools/memory.py +47 -46
- agno/tools/parallel.py +0 -7
- agno/tools/reasoning.py +30 -23
- agno/tools/tavily.py +4 -1
- agno/tools/websearch.py +93 -0
- agno/tools/website.py +1 -1
- agno/tools/wikipedia.py +1 -1
- agno/tools/workflow.py +48 -47
- agno/utils/agent.py +42 -5
- agno/utils/events.py +160 -2
- agno/utils/print_response/agent.py +0 -31
- agno/utils/print_response/team.py +0 -2
- agno/utils/print_response/workflow.py +0 -2
- agno/utils/team.py +61 -11
- agno/vectordb/lancedb/lance_db.py +4 -1
- agno/vectordb/mongodb/mongodb.py +1 -1
- agno/vectordb/pgvector/pgvector.py +3 -3
- agno/vectordb/qdrant/qdrant.py +4 -4
- agno/workflow/__init__.py +3 -1
- agno/workflow/condition.py +0 -21
- agno/workflow/loop.py +0 -21
- agno/workflow/parallel.py +0 -21
- agno/workflow/router.py +0 -21
- agno/workflow/step.py +117 -24
- agno/workflow/steps.py +0 -21
- agno/workflow/workflow.py +427 -63
- {agno-2.3.26.dist-info → agno-2.4.1.dist-info}/METADATA +49 -76
- {agno-2.3.26.dist-info → agno-2.4.1.dist-info}/RECORD +140 -126
- {agno-2.3.26.dist-info → agno-2.4.1.dist-info}/WHEEL +1 -1
- {agno-2.3.26.dist-info → agno-2.4.1.dist-info}/licenses/LICENSE +0 -0
- {agno-2.3.26.dist-info → agno-2.4.1.dist-info}/top_level.txt +0 -0
agno/team/team.py
CHANGED
|
@@ -4,13 +4,11 @@ import asyncio
|
|
|
4
4
|
import contextlib
|
|
5
5
|
import json
|
|
6
6
|
import time
|
|
7
|
-
import warnings
|
|
8
7
|
from collections import ChainMap, deque
|
|
9
8
|
from concurrent.futures import Future
|
|
10
9
|
from copy import copy
|
|
11
10
|
from dataclasses import dataclass
|
|
12
11
|
from os import getenv
|
|
13
|
-
from textwrap import dedent
|
|
14
12
|
from typing import (
|
|
15
13
|
Any,
|
|
16
14
|
AsyncIterator,
|
|
@@ -35,7 +33,8 @@ from pydantic import BaseModel
|
|
|
35
33
|
|
|
36
34
|
from agno.agent import Agent
|
|
37
35
|
from agno.compression.manager import CompressionManager
|
|
38
|
-
from agno.db.base import AsyncBaseDb, BaseDb, SessionType, UserMemory
|
|
36
|
+
from agno.db.base import AsyncBaseDb, BaseDb, ComponentType, SessionType, UserMemory
|
|
37
|
+
from agno.db.utils import db_from_dict
|
|
39
38
|
from agno.eval.base import BaseEval
|
|
40
39
|
from agno.exceptions import (
|
|
41
40
|
InputCheckError,
|
|
@@ -44,7 +43,7 @@ from agno.exceptions import (
|
|
|
44
43
|
)
|
|
45
44
|
from agno.filters import FilterExpr
|
|
46
45
|
from agno.guardrails import BaseGuardrail
|
|
47
|
-
from agno.knowledge.
|
|
46
|
+
from agno.knowledge.protocol import KnowledgeProtocol
|
|
48
47
|
from agno.knowledge.types import KnowledgeFilter
|
|
49
48
|
from agno.media import Audio, File, Image, Video
|
|
50
49
|
from agno.memory import MemoryManager
|
|
@@ -54,6 +53,7 @@ from agno.models.metrics import Metrics
|
|
|
54
53
|
from agno.models.response import ModelResponse, ModelResponseEvent
|
|
55
54
|
from agno.models.utils import get_model
|
|
56
55
|
from agno.reasoning.step import NextAction, ReasoningStep, ReasoningSteps
|
|
56
|
+
from agno.registry.registry import Registry
|
|
57
57
|
from agno.run import RunContext, RunStatus
|
|
58
58
|
from agno.run.agent import RunEvent, RunOutput, RunOutputEvent
|
|
59
59
|
from agno.run.cancel import (
|
|
@@ -118,6 +118,10 @@ from agno.utils.agent import (
|
|
|
118
118
|
from agno.utils.common import is_typed_dict
|
|
119
119
|
from agno.utils.events import (
|
|
120
120
|
add_team_error_event,
|
|
121
|
+
create_team_compression_completed_event,
|
|
122
|
+
create_team_compression_started_event,
|
|
123
|
+
create_team_model_request_completed_event,
|
|
124
|
+
create_team_model_request_started_event,
|
|
121
125
|
create_team_parser_model_response_completed_event,
|
|
122
126
|
create_team_parser_model_response_started_event,
|
|
123
127
|
create_team_post_hook_completed_event,
|
|
@@ -266,6 +270,8 @@ class Team:
|
|
|
266
270
|
description: Optional[str] = None
|
|
267
271
|
# List of instructions for the team.
|
|
268
272
|
instructions: Optional[Union[str, List[str], Callable]] = None
|
|
273
|
+
# If True, wrap instructions in <instructions> tags. Default is False.
|
|
274
|
+
use_instruction_tags: bool = False
|
|
269
275
|
# Provide the expected output from the Team.
|
|
270
276
|
expected_output: Optional[str] = None
|
|
271
277
|
# Additional context added to the end of the system message.
|
|
@@ -314,7 +320,7 @@ class Team:
|
|
|
314
320
|
add_dependencies_to_context: bool = False
|
|
315
321
|
|
|
316
322
|
# --- Agent Knowledge ---
|
|
317
|
-
knowledge: Optional[
|
|
323
|
+
knowledge: Optional[KnowledgeProtocol] = None
|
|
318
324
|
# Add knowledge_filters to the Agent class attributes
|
|
319
325
|
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None
|
|
320
326
|
# Let the agent choose the knowledge filters
|
|
@@ -395,7 +401,9 @@ class Team:
|
|
|
395
401
|
# Enable the agent to manage memories of the user
|
|
396
402
|
enable_agentic_memory: bool = False
|
|
397
403
|
# If True, the agent creates/updates user memories at the end of runs
|
|
398
|
-
|
|
404
|
+
update_memory_on_run: bool = False
|
|
405
|
+
# Soon to be deprecated. Use update_memory_on_run
|
|
406
|
+
enable_user_memories: Optional[bool] = None
|
|
399
407
|
# If True, the agent adds a reference to the user memories in the response
|
|
400
408
|
add_memories_to_context: Optional[bool] = None
|
|
401
409
|
# If True, the agent creates/updates session summaries at the end of runs
|
|
@@ -427,6 +435,8 @@ class Team:
|
|
|
427
435
|
# --- Team Storage ---
|
|
428
436
|
# Metadata stored with this team
|
|
429
437
|
metadata: Optional[Dict[str, Any]] = None
|
|
438
|
+
# Version of the team config (set when loaded from DB)
|
|
439
|
+
version: Optional[int] = None
|
|
430
440
|
|
|
431
441
|
# --- Team Reasoning ---
|
|
432
442
|
reasoning: bool = False
|
|
@@ -440,8 +450,6 @@ class Team:
|
|
|
440
450
|
stream: Optional[bool] = None
|
|
441
451
|
# Stream the intermediate steps from the Agent
|
|
442
452
|
stream_events: Optional[bool] = None
|
|
443
|
-
# [Deprecated] Stream the intermediate steps from the Agent
|
|
444
|
-
stream_intermediate_steps: Optional[bool] = None
|
|
445
453
|
# Stream the member events from the Team
|
|
446
454
|
stream_member_events: bool = True
|
|
447
455
|
|
|
@@ -473,9 +481,6 @@ class Team:
|
|
|
473
481
|
# This helps us improve the Teams implementation and provide better support
|
|
474
482
|
telemetry: bool = True
|
|
475
483
|
|
|
476
|
-
# Deprecated. Use delegate_to_all_members instead.
|
|
477
|
-
delegate_task_to_all_members: bool = False
|
|
478
|
-
|
|
479
484
|
def __init__(
|
|
480
485
|
self,
|
|
481
486
|
members: List[Union[Agent, "Team"]],
|
|
@@ -485,7 +490,6 @@ class Team:
|
|
|
485
490
|
role: Optional[str] = None,
|
|
486
491
|
respond_directly: bool = False,
|
|
487
492
|
determine_input_for_members: bool = True,
|
|
488
|
-
delegate_task_to_all_members: bool = False,
|
|
489
493
|
delegate_to_all_members: bool = False,
|
|
490
494
|
user_id: Optional[str] = None,
|
|
491
495
|
session_id: Optional[str] = None,
|
|
@@ -501,6 +505,7 @@ class Team:
|
|
|
501
505
|
num_history_sessions: Optional[int] = None,
|
|
502
506
|
description: Optional[str] = None,
|
|
503
507
|
instructions: Optional[Union[str, List[str], Callable]] = None,
|
|
508
|
+
use_instruction_tags: bool = False,
|
|
504
509
|
expected_output: Optional[str] = None,
|
|
505
510
|
additional_context: Optional[str] = None,
|
|
506
511
|
markdown: bool = False,
|
|
@@ -515,7 +520,7 @@ class Team:
|
|
|
515
520
|
additional_input: Optional[List[Union[str, Dict, BaseModel, Message]]] = None,
|
|
516
521
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
517
522
|
add_dependencies_to_context: bool = False,
|
|
518
|
-
knowledge: Optional[
|
|
523
|
+
knowledge: Optional[KnowledgeProtocol] = None,
|
|
519
524
|
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
|
|
520
525
|
add_knowledge_to_context: bool = False,
|
|
521
526
|
enable_agentic_knowledge_filters: Optional[bool] = False,
|
|
@@ -550,7 +555,8 @@ class Team:
|
|
|
550
555
|
parse_response: bool = True,
|
|
551
556
|
db: Optional[Union[BaseDb, AsyncBaseDb]] = None,
|
|
552
557
|
enable_agentic_memory: bool = False,
|
|
553
|
-
|
|
558
|
+
update_memory_on_run: bool = False,
|
|
559
|
+
enable_user_memories: Optional[bool] = None, # Soon to be deprecated. Use update_memory_on_run
|
|
554
560
|
add_memories_to_context: Optional[bool] = None,
|
|
555
561
|
memory_manager: Optional[MemoryManager] = None,
|
|
556
562
|
enable_session_summaries: bool = False,
|
|
@@ -566,7 +572,6 @@ class Team:
|
|
|
566
572
|
reasoning_max_steps: int = 10,
|
|
567
573
|
stream: Optional[bool] = None,
|
|
568
574
|
stream_events: Optional[bool] = None,
|
|
569
|
-
stream_intermediate_steps: Optional[bool] = None,
|
|
570
575
|
store_events: bool = False,
|
|
571
576
|
events_to_skip: Optional[List[Union[RunEvent, TeamRunEvent]]] = None,
|
|
572
577
|
store_member_responses: bool = False,
|
|
@@ -579,13 +584,6 @@ class Team:
|
|
|
579
584
|
exponential_backoff: bool = False,
|
|
580
585
|
telemetry: bool = True,
|
|
581
586
|
):
|
|
582
|
-
if delegate_task_to_all_members:
|
|
583
|
-
warnings.warn(
|
|
584
|
-
"The 'delegate_task_to_all_members' parameter is deprecated and will be removed in future versions. Use 'delegate_to_all_members' instead.",
|
|
585
|
-
DeprecationWarning,
|
|
586
|
-
stacklevel=2,
|
|
587
|
-
)
|
|
588
|
-
|
|
589
587
|
self.members = members
|
|
590
588
|
|
|
591
589
|
self.model = model # type: ignore[assignment]
|
|
@@ -596,7 +594,7 @@ class Team:
|
|
|
596
594
|
|
|
597
595
|
self.respond_directly = respond_directly
|
|
598
596
|
self.determine_input_for_members = determine_input_for_members
|
|
599
|
-
self.delegate_to_all_members = delegate_to_all_members
|
|
597
|
+
self.delegate_to_all_members = delegate_to_all_members
|
|
600
598
|
|
|
601
599
|
self.user_id = user_id
|
|
602
600
|
self.session_id = session_id
|
|
@@ -627,6 +625,7 @@ class Team:
|
|
|
627
625
|
|
|
628
626
|
self.description = description
|
|
629
627
|
self.instructions = instructions
|
|
628
|
+
self.use_instruction_tags = use_instruction_tags
|
|
630
629
|
self.expected_output = expected_output
|
|
631
630
|
self.additional_context = additional_context
|
|
632
631
|
self.markdown = markdown
|
|
@@ -682,7 +681,13 @@ class Team:
|
|
|
682
681
|
self.db = db
|
|
683
682
|
|
|
684
683
|
self.enable_agentic_memory = enable_agentic_memory
|
|
685
|
-
|
|
684
|
+
|
|
685
|
+
if enable_user_memories is not None:
|
|
686
|
+
self.update_memory_on_run = enable_user_memories
|
|
687
|
+
else:
|
|
688
|
+
self.update_memory_on_run = update_memory_on_run
|
|
689
|
+
self.enable_user_memories = self.update_memory_on_run # Soon to be deprecated. Use update_memory_on_run
|
|
690
|
+
|
|
686
691
|
self.add_memories_to_context = add_memories_to_context
|
|
687
692
|
self.memory_manager = memory_manager
|
|
688
693
|
self.enable_session_summaries = enable_session_summaries
|
|
@@ -702,7 +707,7 @@ class Team:
|
|
|
702
707
|
self.reasoning_max_steps = reasoning_max_steps
|
|
703
708
|
|
|
704
709
|
self.stream = stream
|
|
705
|
-
self.stream_events = stream_events
|
|
710
|
+
self.stream_events = stream_events
|
|
706
711
|
self.store_events = store_events
|
|
707
712
|
self.store_member_responses = store_member_responses
|
|
708
713
|
|
|
@@ -874,7 +879,7 @@ class Team:
|
|
|
874
879
|
|
|
875
880
|
if self.add_memories_to_context is None:
|
|
876
881
|
self.add_memories_to_context = (
|
|
877
|
-
self.
|
|
882
|
+
self.update_memory_on_run or self.enable_agentic_memory or self.memory_manager is not None
|
|
878
883
|
)
|
|
879
884
|
|
|
880
885
|
def _set_session_summary_manager(self) -> None:
|
|
@@ -978,7 +983,7 @@ class Team:
|
|
|
978
983
|
self.set_id()
|
|
979
984
|
|
|
980
985
|
# Set the memory manager and session summary manager
|
|
981
|
-
if self.
|
|
986
|
+
if self.update_memory_on_run or self.enable_agentic_memory or self.memory_manager is not None:
|
|
982
987
|
self._set_memory_manager()
|
|
983
988
|
if self.enable_session_summaries or self.session_summary_manager is not None:
|
|
984
989
|
self._set_session_summary_manager()
|
|
@@ -1102,9 +1107,6 @@ class Team:
|
|
|
1102
1107
|
"team": self,
|
|
1103
1108
|
"session": session,
|
|
1104
1109
|
"user_id": user_id,
|
|
1105
|
-
"metadata": run_context.metadata,
|
|
1106
|
-
"session_state": run_context.session_state,
|
|
1107
|
-
"dependencies": run_context.dependencies,
|
|
1108
1110
|
"debug_mode": debug_mode or self.debug_mode,
|
|
1109
1111
|
}
|
|
1110
1112
|
|
|
@@ -1194,9 +1196,6 @@ class Team:
|
|
|
1194
1196
|
"team": self,
|
|
1195
1197
|
"session": session,
|
|
1196
1198
|
"user_id": user_id,
|
|
1197
|
-
"session_state": run_context.session_state,
|
|
1198
|
-
"dependencies": run_context.dependencies,
|
|
1199
|
-
"metadata": run_context.metadata,
|
|
1200
1199
|
"debug_mode": debug_mode or self.debug_mode,
|
|
1201
1200
|
}
|
|
1202
1201
|
|
|
@@ -1291,9 +1290,6 @@ class Team:
|
|
|
1291
1290
|
"team": self,
|
|
1292
1291
|
"session": session,
|
|
1293
1292
|
"user_id": user_id,
|
|
1294
|
-
"session_state": run_context.session_state,
|
|
1295
|
-
"dependencies": run_context.dependencies,
|
|
1296
|
-
"metadata": run_context.metadata,
|
|
1297
1293
|
"debug_mode": debug_mode or self.debug_mode,
|
|
1298
1294
|
}
|
|
1299
1295
|
|
|
@@ -1377,9 +1373,6 @@ class Team:
|
|
|
1377
1373
|
"team": self,
|
|
1378
1374
|
"session": session,
|
|
1379
1375
|
"user_id": user_id,
|
|
1380
|
-
"session_state": run_context.session_state,
|
|
1381
|
-
"dependencies": run_context.dependencies,
|
|
1382
|
-
"metadata": run_context.metadata,
|
|
1383
1376
|
"debug_mode": debug_mode or self.debug_mode,
|
|
1384
1377
|
}
|
|
1385
1378
|
|
|
@@ -1556,7 +1549,9 @@ class Team:
|
|
|
1556
1549
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
1557
1550
|
|
|
1558
1551
|
# 5. Reason about the task if reasoning is enabled
|
|
1559
|
-
self._handle_reasoning(
|
|
1552
|
+
self._handle_reasoning(
|
|
1553
|
+
run_response=run_response, run_messages=run_messages, run_context=run_context
|
|
1554
|
+
)
|
|
1560
1555
|
|
|
1561
1556
|
# Check for cancellation before model call
|
|
1562
1557
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
@@ -1829,6 +1824,7 @@ class Team:
|
|
|
1829
1824
|
yield from self._handle_reasoning_stream(
|
|
1830
1825
|
run_response=run_response,
|
|
1831
1826
|
run_messages=run_messages,
|
|
1827
|
+
run_context=run_context,
|
|
1832
1828
|
stream_events=stream_events,
|
|
1833
1829
|
)
|
|
1834
1830
|
|
|
@@ -1919,6 +1915,7 @@ class Team:
|
|
|
1919
1915
|
stream_events=stream_events,
|
|
1920
1916
|
events_to_skip=self.events_to_skip, # type: ignore
|
|
1921
1917
|
store_events=self.store_events,
|
|
1918
|
+
get_memories_callback=lambda: self.get_user_memories(user_id=user_id),
|
|
1922
1919
|
)
|
|
1923
1920
|
|
|
1924
1921
|
raise_if_cancelled(run_response.run_id) # type: ignore
|
|
@@ -2061,7 +2058,6 @@ class Team:
|
|
|
2061
2058
|
*,
|
|
2062
2059
|
stream: Literal[False] = False,
|
|
2063
2060
|
stream_events: Optional[bool] = None,
|
|
2064
|
-
stream_intermediate_steps: Optional[bool] = None,
|
|
2065
2061
|
session_id: Optional[str] = None,
|
|
2066
2062
|
session_state: Optional[Dict[str, Any]] = None,
|
|
2067
2063
|
user_id: Optional[str] = None,
|
|
@@ -2088,7 +2084,6 @@ class Team:
|
|
|
2088
2084
|
*,
|
|
2089
2085
|
stream: Literal[True] = True,
|
|
2090
2086
|
stream_events: Optional[bool] = None,
|
|
2091
|
-
stream_intermediate_steps: Optional[bool] = None,
|
|
2092
2087
|
session_id: Optional[str] = None,
|
|
2093
2088
|
session_state: Optional[Dict[str, Any]] = None,
|
|
2094
2089
|
run_context: Optional[RunContext] = None,
|
|
@@ -2105,7 +2100,6 @@ class Team:
|
|
|
2105
2100
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
2106
2101
|
metadata: Optional[Dict[str, Any]] = None,
|
|
2107
2102
|
debug_mode: Optional[bool] = None,
|
|
2108
|
-
yield_run_response: Optional[bool] = None, # To be deprecated: use yield_run_output instead
|
|
2109
2103
|
yield_run_output: bool = False,
|
|
2110
2104
|
output_schema: Optional[Union[Type[BaseModel], Dict[str, Any]]] = None,
|
|
2111
2105
|
**kwargs: Any,
|
|
@@ -2117,7 +2111,6 @@ class Team:
|
|
|
2117
2111
|
*,
|
|
2118
2112
|
stream: Optional[bool] = None,
|
|
2119
2113
|
stream_events: Optional[bool] = None,
|
|
2120
|
-
stream_intermediate_steps: Optional[bool] = None,
|
|
2121
2114
|
session_id: Optional[str] = None,
|
|
2122
2115
|
session_state: Optional[Dict[str, Any]] = None,
|
|
2123
2116
|
run_context: Optional[RunContext] = None,
|
|
@@ -2134,7 +2127,6 @@ class Team:
|
|
|
2134
2127
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
2135
2128
|
metadata: Optional[Dict[str, Any]] = None,
|
|
2136
2129
|
debug_mode: Optional[bool] = None,
|
|
2137
|
-
yield_run_response: Optional[bool] = None, # To be deprecated: use yield_run_output instead
|
|
2138
2130
|
yield_run_output: bool = False,
|
|
2139
2131
|
output_schema: Optional[Union[Type[BaseModel], Dict[str, Any]]] = None,
|
|
2140
2132
|
**kwargs: Any,
|
|
@@ -2154,14 +2146,6 @@ class Team:
|
|
|
2154
2146
|
"add_history_to_context is True, but no database has been assigned to the team. History will not be added to the context."
|
|
2155
2147
|
)
|
|
2156
2148
|
|
|
2157
|
-
if yield_run_response is not None:
|
|
2158
|
-
warnings.warn(
|
|
2159
|
-
"The 'yield_run_response' parameter is deprecated and will be removed in future versions. Use 'yield_run_output' instead.",
|
|
2160
|
-
DeprecationWarning,
|
|
2161
|
-
stacklevel=2,
|
|
2162
|
-
)
|
|
2163
|
-
yield_run_output = yield_run_output or yield_run_response # For backwards compatibility
|
|
2164
|
-
|
|
2165
2149
|
# Register run for cancellation tracking
|
|
2166
2150
|
register_run(run_id) # type: ignore
|
|
2167
2151
|
|
|
@@ -2253,9 +2237,6 @@ class Team:
|
|
|
2253
2237
|
if stream is None:
|
|
2254
2238
|
stream = False if self.stream is None else self.stream
|
|
2255
2239
|
|
|
2256
|
-
# Considering both stream_events and stream_intermediate_steps (deprecated)
|
|
2257
|
-
stream_events = stream_events or stream_intermediate_steps
|
|
2258
|
-
|
|
2259
2240
|
# Can't stream events if streaming is disabled
|
|
2260
2241
|
if stream is False:
|
|
2261
2242
|
stream_events = False
|
|
@@ -2473,7 +2454,9 @@ class Team:
|
|
|
2473
2454
|
|
|
2474
2455
|
await araise_if_cancelled(run_response.run_id) # type: ignore
|
|
2475
2456
|
# 7. Reason about the task if reasoning is enabled
|
|
2476
|
-
await self._ahandle_reasoning(
|
|
2457
|
+
await self._ahandle_reasoning(
|
|
2458
|
+
run_response=run_response, run_messages=run_messages, run_context=run_context
|
|
2459
|
+
)
|
|
2477
2460
|
|
|
2478
2461
|
# Check for cancellation before model call
|
|
2479
2462
|
await araise_if_cancelled(run_response.run_id) # type: ignore
|
|
@@ -2640,7 +2623,6 @@ class Team:
|
|
|
2640
2623
|
user_id: Optional[str] = None,
|
|
2641
2624
|
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
2642
2625
|
stream_events: bool = False,
|
|
2643
|
-
stream_intermediate_steps: bool = False,
|
|
2644
2626
|
yield_run_output: bool = False,
|
|
2645
2627
|
add_dependencies_to_context: Optional[bool] = None,
|
|
2646
2628
|
add_session_state_to_context: Optional[bool] = None,
|
|
@@ -2774,9 +2756,6 @@ class Team:
|
|
|
2774
2756
|
existing_task=memory_task,
|
|
2775
2757
|
)
|
|
2776
2758
|
|
|
2777
|
-
# Considering both stream_events and stream_intermediate_steps (deprecated)
|
|
2778
|
-
stream_events = stream_events or stream_intermediate_steps
|
|
2779
|
-
|
|
2780
2759
|
# Yield the run started event
|
|
2781
2760
|
if stream_events:
|
|
2782
2761
|
yield handle_event( # type: ignore
|
|
@@ -2790,6 +2769,7 @@ class Team:
|
|
|
2790
2769
|
async for item in self._ahandle_reasoning_stream(
|
|
2791
2770
|
run_response=run_response,
|
|
2792
2771
|
run_messages=run_messages,
|
|
2772
|
+
run_context=run_context,
|
|
2793
2773
|
stream_events=stream_events,
|
|
2794
2774
|
):
|
|
2795
2775
|
await araise_if_cancelled(run_response.run_id) # type: ignore
|
|
@@ -2888,6 +2868,7 @@ class Team:
|
|
|
2888
2868
|
stream_events=stream_events,
|
|
2889
2869
|
events_to_skip=self.events_to_skip, # type: ignore
|
|
2890
2870
|
store_events=self.store_events,
|
|
2871
|
+
get_memories_callback=lambda: self.aget_user_memories(user_id=user_id),
|
|
2891
2872
|
):
|
|
2892
2873
|
yield event
|
|
2893
2874
|
|
|
@@ -3032,7 +3013,6 @@ class Team:
|
|
|
3032
3013
|
*,
|
|
3033
3014
|
stream: Literal[False] = False,
|
|
3034
3015
|
stream_events: Optional[bool] = None,
|
|
3035
|
-
stream_intermediate_steps: Optional[bool] = None,
|
|
3036
3016
|
session_id: Optional[str] = None,
|
|
3037
3017
|
session_state: Optional[Dict[str, Any]] = None,
|
|
3038
3018
|
run_id: Optional[str] = None,
|
|
@@ -3060,7 +3040,6 @@ class Team:
|
|
|
3060
3040
|
*,
|
|
3061
3041
|
stream: Literal[True] = True,
|
|
3062
3042
|
stream_events: Optional[bool] = None,
|
|
3063
|
-
stream_intermediate_steps: Optional[bool] = None,
|
|
3064
3043
|
session_id: Optional[str] = None,
|
|
3065
3044
|
session_state: Optional[Dict[str, Any]] = None,
|
|
3066
3045
|
run_id: Optional[str] = None,
|
|
@@ -3077,7 +3056,6 @@ class Team:
|
|
|
3077
3056
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
3078
3057
|
metadata: Optional[Dict[str, Any]] = None,
|
|
3079
3058
|
debug_mode: Optional[bool] = None,
|
|
3080
|
-
yield_run_response: Optional[bool] = None, # To be deprecated: use yield_run_output instead
|
|
3081
3059
|
yield_run_output: bool = False,
|
|
3082
3060
|
output_schema: Optional[Union[Type[BaseModel], Dict[str, Any]]] = None,
|
|
3083
3061
|
**kwargs: Any,
|
|
@@ -3089,7 +3067,6 @@ class Team:
|
|
|
3089
3067
|
*,
|
|
3090
3068
|
stream: Optional[bool] = None,
|
|
3091
3069
|
stream_events: Optional[bool] = None,
|
|
3092
|
-
stream_intermediate_steps: Optional[bool] = None,
|
|
3093
3070
|
session_id: Optional[str] = None,
|
|
3094
3071
|
session_state: Optional[Dict[str, Any]] = None,
|
|
3095
3072
|
run_id: Optional[str] = None,
|
|
@@ -3106,7 +3083,6 @@ class Team:
|
|
|
3106
3083
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
3107
3084
|
metadata: Optional[Dict[str, Any]] = None,
|
|
3108
3085
|
debug_mode: Optional[bool] = None,
|
|
3109
|
-
yield_run_response: Optional[bool] = None, # To be deprecated: use yield_run_output instead
|
|
3110
3086
|
yield_run_output: bool = False,
|
|
3111
3087
|
output_schema: Optional[Union[Type[BaseModel], Dict[str, Any]]] = None,
|
|
3112
3088
|
**kwargs: Any,
|
|
@@ -3121,15 +3097,6 @@ class Team:
|
|
|
3121
3097
|
"add_history_to_context is True, but no database has been assigned to the team. History will not be added to the context."
|
|
3122
3098
|
)
|
|
3123
3099
|
|
|
3124
|
-
if yield_run_response is not None:
|
|
3125
|
-
warnings.warn(
|
|
3126
|
-
"The 'yield_run_response' parameter is deprecated and will be removed in future versions. Use 'yield_run_output' instead.",
|
|
3127
|
-
DeprecationWarning,
|
|
3128
|
-
stacklevel=2,
|
|
3129
|
-
)
|
|
3130
|
-
|
|
3131
|
-
yield_run_output = yield_run_output or yield_run_response # For backwards compatibility
|
|
3132
|
-
|
|
3133
3100
|
background_tasks = kwargs.pop("background_tasks", None)
|
|
3134
3101
|
if background_tasks is not None:
|
|
3135
3102
|
from fastapi import BackgroundTasks
|
|
@@ -3181,15 +3148,6 @@ class Team:
|
|
|
3181
3148
|
if stream is None:
|
|
3182
3149
|
stream = False if self.stream is None else self.stream
|
|
3183
3150
|
|
|
3184
|
-
# Considering both stream_events and stream_intermediate_steps (deprecated)
|
|
3185
|
-
if stream_intermediate_steps is not None:
|
|
3186
|
-
warnings.warn(
|
|
3187
|
-
"The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Use 'stream_events' instead.",
|
|
3188
|
-
DeprecationWarning,
|
|
3189
|
-
stacklevel=2,
|
|
3190
|
-
)
|
|
3191
|
-
stream_events = stream_events or stream_intermediate_steps
|
|
3192
|
-
|
|
3193
3151
|
# Can't stream events if streaming is disabled
|
|
3194
3152
|
if stream is False:
|
|
3195
3153
|
stream_events = False
|
|
@@ -3252,7 +3210,7 @@ class Team:
|
|
|
3252
3210
|
run_response.metrics = Metrics()
|
|
3253
3211
|
run_response.metrics.start_timer()
|
|
3254
3212
|
|
|
3255
|
-
yield_run_output =
|
|
3213
|
+
yield_run_output = yield_run_output
|
|
3256
3214
|
|
|
3257
3215
|
if stream:
|
|
3258
3216
|
return self._arun_stream( # type: ignore
|
|
@@ -3397,6 +3355,70 @@ class Team:
|
|
|
3397
3355
|
send_media_to_model=self.send_media_to_model,
|
|
3398
3356
|
compression_manager=self.compression_manager if self.compress_tool_results else None,
|
|
3399
3357
|
):
|
|
3358
|
+
# Handle LLM request events and compression events from ModelResponse
|
|
3359
|
+
if isinstance(model_response_event, ModelResponse):
|
|
3360
|
+
if model_response_event.event == ModelResponseEvent.model_request_started.value:
|
|
3361
|
+
if stream_events:
|
|
3362
|
+
yield handle_event( # type: ignore
|
|
3363
|
+
create_team_model_request_started_event(
|
|
3364
|
+
from_run_response=run_response,
|
|
3365
|
+
model=self.model.id,
|
|
3366
|
+
model_provider=self.model.provider,
|
|
3367
|
+
),
|
|
3368
|
+
run_response,
|
|
3369
|
+
events_to_skip=self.events_to_skip,
|
|
3370
|
+
store_events=self.store_events,
|
|
3371
|
+
)
|
|
3372
|
+
continue
|
|
3373
|
+
|
|
3374
|
+
if model_response_event.event == ModelResponseEvent.model_request_completed.value:
|
|
3375
|
+
if stream_events:
|
|
3376
|
+
yield handle_event( # type: ignore
|
|
3377
|
+
create_team_model_request_completed_event(
|
|
3378
|
+
from_run_response=run_response,
|
|
3379
|
+
model=self.model.id,
|
|
3380
|
+
model_provider=self.model.provider,
|
|
3381
|
+
input_tokens=model_response_event.input_tokens,
|
|
3382
|
+
output_tokens=model_response_event.output_tokens,
|
|
3383
|
+
total_tokens=model_response_event.total_tokens,
|
|
3384
|
+
time_to_first_token=model_response_event.time_to_first_token,
|
|
3385
|
+
reasoning_tokens=model_response_event.reasoning_tokens,
|
|
3386
|
+
cache_read_tokens=model_response_event.cache_read_tokens,
|
|
3387
|
+
cache_write_tokens=model_response_event.cache_write_tokens,
|
|
3388
|
+
),
|
|
3389
|
+
run_response,
|
|
3390
|
+
events_to_skip=self.events_to_skip,
|
|
3391
|
+
store_events=self.store_events,
|
|
3392
|
+
)
|
|
3393
|
+
continue
|
|
3394
|
+
|
|
3395
|
+
# Handle compression events
|
|
3396
|
+
if model_response_event.event == ModelResponseEvent.compression_started.value:
|
|
3397
|
+
if stream_events:
|
|
3398
|
+
yield handle_event( # type: ignore
|
|
3399
|
+
create_team_compression_started_event(from_run_response=run_response),
|
|
3400
|
+
run_response,
|
|
3401
|
+
events_to_skip=self.events_to_skip,
|
|
3402
|
+
store_events=self.store_events,
|
|
3403
|
+
)
|
|
3404
|
+
continue
|
|
3405
|
+
|
|
3406
|
+
if model_response_event.event == ModelResponseEvent.compression_completed.value:
|
|
3407
|
+
if stream_events:
|
|
3408
|
+
stats = model_response_event.compression_stats or {}
|
|
3409
|
+
yield handle_event( # type: ignore
|
|
3410
|
+
create_team_compression_completed_event(
|
|
3411
|
+
from_run_response=run_response,
|
|
3412
|
+
tool_results_compressed=stats.get("tool_results_compressed"),
|
|
3413
|
+
original_size=stats.get("original_size"),
|
|
3414
|
+
compressed_size=stats.get("compressed_size"),
|
|
3415
|
+
),
|
|
3416
|
+
run_response,
|
|
3417
|
+
events_to_skip=self.events_to_skip,
|
|
3418
|
+
store_events=self.store_events,
|
|
3419
|
+
)
|
|
3420
|
+
continue
|
|
3421
|
+
|
|
3400
3422
|
yield from self._handle_model_response_chunk(
|
|
3401
3423
|
session=session,
|
|
3402
3424
|
run_response=run_response,
|
|
@@ -3492,6 +3514,70 @@ class Team:
|
|
|
3492
3514
|
compression_manager=self.compression_manager if self.compress_tool_results else None,
|
|
3493
3515
|
) # type: ignore
|
|
3494
3516
|
async for model_response_event in model_stream:
|
|
3517
|
+
# Handle LLM request events and compression events from ModelResponse
|
|
3518
|
+
if isinstance(model_response_event, ModelResponse):
|
|
3519
|
+
if model_response_event.event == ModelResponseEvent.model_request_started.value:
|
|
3520
|
+
if stream_events:
|
|
3521
|
+
yield handle_event( # type: ignore
|
|
3522
|
+
create_team_model_request_started_event(
|
|
3523
|
+
from_run_response=run_response,
|
|
3524
|
+
model=self.model.id,
|
|
3525
|
+
model_provider=self.model.provider,
|
|
3526
|
+
),
|
|
3527
|
+
run_response,
|
|
3528
|
+
events_to_skip=self.events_to_skip,
|
|
3529
|
+
store_events=self.store_events,
|
|
3530
|
+
)
|
|
3531
|
+
continue
|
|
3532
|
+
|
|
3533
|
+
if model_response_event.event == ModelResponseEvent.model_request_completed.value:
|
|
3534
|
+
if stream_events:
|
|
3535
|
+
yield handle_event( # type: ignore
|
|
3536
|
+
create_team_model_request_completed_event(
|
|
3537
|
+
from_run_response=run_response,
|
|
3538
|
+
model=self.model.id,
|
|
3539
|
+
model_provider=self.model.provider,
|
|
3540
|
+
input_tokens=model_response_event.input_tokens,
|
|
3541
|
+
output_tokens=model_response_event.output_tokens,
|
|
3542
|
+
total_tokens=model_response_event.total_tokens,
|
|
3543
|
+
time_to_first_token=model_response_event.time_to_first_token,
|
|
3544
|
+
reasoning_tokens=model_response_event.reasoning_tokens,
|
|
3545
|
+
cache_read_tokens=model_response_event.cache_read_tokens,
|
|
3546
|
+
cache_write_tokens=model_response_event.cache_write_tokens,
|
|
3547
|
+
),
|
|
3548
|
+
run_response,
|
|
3549
|
+
events_to_skip=self.events_to_skip,
|
|
3550
|
+
store_events=self.store_events,
|
|
3551
|
+
)
|
|
3552
|
+
continue
|
|
3553
|
+
|
|
3554
|
+
# Handle compression events
|
|
3555
|
+
if model_response_event.event == ModelResponseEvent.compression_started.value:
|
|
3556
|
+
if stream_events:
|
|
3557
|
+
yield handle_event( # type: ignore
|
|
3558
|
+
create_team_compression_started_event(from_run_response=run_response),
|
|
3559
|
+
run_response,
|
|
3560
|
+
events_to_skip=self.events_to_skip,
|
|
3561
|
+
store_events=self.store_events,
|
|
3562
|
+
)
|
|
3563
|
+
continue
|
|
3564
|
+
|
|
3565
|
+
if model_response_event.event == ModelResponseEvent.compression_completed.value:
|
|
3566
|
+
if stream_events:
|
|
3567
|
+
stats = model_response_event.compression_stats or {}
|
|
3568
|
+
yield handle_event( # type: ignore
|
|
3569
|
+
create_team_compression_completed_event(
|
|
3570
|
+
from_run_response=run_response,
|
|
3571
|
+
tool_results_compressed=stats.get("tool_results_compressed"),
|
|
3572
|
+
original_size=stats.get("original_size"),
|
|
3573
|
+
compressed_size=stats.get("compressed_size"),
|
|
3574
|
+
),
|
|
3575
|
+
run_response,
|
|
3576
|
+
events_to_skip=self.events_to_skip,
|
|
3577
|
+
store_events=self.store_events,
|
|
3578
|
+
)
|
|
3579
|
+
continue
|
|
3580
|
+
|
|
3495
3581
|
for event in self._handle_model_response_chunk(
|
|
3496
3582
|
session=session,
|
|
3497
3583
|
run_response=run_response,
|
|
@@ -3984,7 +4070,7 @@ class Team:
|
|
|
3984
4070
|
user_message_str is not None
|
|
3985
4071
|
and user_message_str.strip() != ""
|
|
3986
4072
|
and self.memory_manager is not None
|
|
3987
|
-
and self.
|
|
4073
|
+
and self.update_memory_on_run
|
|
3988
4074
|
):
|
|
3989
4075
|
log_debug("Managing user memories")
|
|
3990
4076
|
self.memory_manager.create_user_memories(
|
|
@@ -4005,7 +4091,7 @@ class Team:
|
|
|
4005
4091
|
user_message_str is not None
|
|
4006
4092
|
and user_message_str.strip() != ""
|
|
4007
4093
|
and self.memory_manager is not None
|
|
4008
|
-
and self.
|
|
4094
|
+
and self.update_memory_on_run
|
|
4009
4095
|
):
|
|
4010
4096
|
log_debug("Managing user memories")
|
|
4011
4097
|
await self.memory_manager.acreate_user_memories(
|
|
@@ -4042,7 +4128,7 @@ class Team:
|
|
|
4042
4128
|
if (
|
|
4043
4129
|
run_messages.user_message is not None
|
|
4044
4130
|
and self.memory_manager is not None
|
|
4045
|
-
and self.
|
|
4131
|
+
and self.update_memory_on_run
|
|
4046
4132
|
and not self.enable_agentic_memory
|
|
4047
4133
|
):
|
|
4048
4134
|
log_debug("Starting memory creation in background task.")
|
|
@@ -4074,7 +4160,7 @@ class Team:
|
|
|
4074
4160
|
if (
|
|
4075
4161
|
run_messages.user_message is not None
|
|
4076
4162
|
and self.memory_manager is not None
|
|
4077
|
-
and self.
|
|
4163
|
+
and self.update_memory_on_run
|
|
4078
4164
|
and not self.enable_agentic_memory
|
|
4079
4165
|
):
|
|
4080
4166
|
log_debug("Starting memory creation in background thread.")
|
|
@@ -4497,8 +4583,6 @@ class Team:
|
|
|
4497
4583
|
add_session_state_to_context: Optional[bool] = None,
|
|
4498
4584
|
dependencies: Optional[Dict[str, Any]] = None,
|
|
4499
4585
|
metadata: Optional[Dict[str, Any]] = None,
|
|
4500
|
-
stream_events: Optional[bool] = None,
|
|
4501
|
-
stream_intermediate_steps: Optional[bool] = None,
|
|
4502
4586
|
debug_mode: Optional[bool] = None,
|
|
4503
4587
|
show_message: bool = True,
|
|
4504
4588
|
show_reasoning: bool = True,
|
|
@@ -4508,20 +4592,6 @@ class Team:
|
|
|
4508
4592
|
tags_to_include_in_markdown: Optional[Set[str]] = None,
|
|
4509
4593
|
**kwargs: Any,
|
|
4510
4594
|
) -> None:
|
|
4511
|
-
if stream_events is not None:
|
|
4512
|
-
warnings.warn(
|
|
4513
|
-
"The 'stream_events' parameter is deprecated and will be removed in future versions. Event streaming is always enabled using the print_response function.",
|
|
4514
|
-
DeprecationWarning,
|
|
4515
|
-
stacklevel=2,
|
|
4516
|
-
)
|
|
4517
|
-
|
|
4518
|
-
if stream_intermediate_steps is not None:
|
|
4519
|
-
warnings.warn(
|
|
4520
|
-
"The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Event streaming is always enabled using the print_response function.",
|
|
4521
|
-
DeprecationWarning,
|
|
4522
|
-
stacklevel=2,
|
|
4523
|
-
)
|
|
4524
|
-
|
|
4525
4595
|
if self._has_async_db():
|
|
4526
4596
|
raise Exception(
|
|
4527
4597
|
"This method is not supported with an async DB. Please use the async version of this method."
|
|
@@ -4623,8 +4693,6 @@ class Team:
|
|
|
4623
4693
|
add_dependencies_to_context: Optional[bool] = None,
|
|
4624
4694
|
add_session_state_to_context: Optional[bool] = None,
|
|
4625
4695
|
metadata: Optional[Dict[str, Any]] = None,
|
|
4626
|
-
stream_events: Optional[bool] = None,
|
|
4627
|
-
stream_intermediate_steps: Optional[bool] = None,
|
|
4628
4696
|
debug_mode: Optional[bool] = None,
|
|
4629
4697
|
show_message: bool = True,
|
|
4630
4698
|
show_reasoning: bool = True,
|
|
@@ -4634,20 +4702,6 @@ class Team:
|
|
|
4634
4702
|
tags_to_include_in_markdown: Optional[Set[str]] = None,
|
|
4635
4703
|
**kwargs: Any,
|
|
4636
4704
|
) -> None:
|
|
4637
|
-
if stream_events is not None:
|
|
4638
|
-
warnings.warn(
|
|
4639
|
-
"The 'stream_events' parameter is deprecated and will be removed in future versions. Event streaming is always enabled using the aprint_response function.",
|
|
4640
|
-
DeprecationWarning,
|
|
4641
|
-
stacklevel=2,
|
|
4642
|
-
)
|
|
4643
|
-
|
|
4644
|
-
if stream_intermediate_steps is not None:
|
|
4645
|
-
warnings.warn(
|
|
4646
|
-
"The 'stream_intermediate_steps' parameter is deprecated and will be removed in future versions. Event streaming is always enabled using the aprint_response function.",
|
|
4647
|
-
DeprecationWarning,
|
|
4648
|
-
stacklevel=2,
|
|
4649
|
-
)
|
|
4650
|
-
|
|
4651
4705
|
if not tags_to_include_in_markdown:
|
|
4652
4706
|
tags_to_include_in_markdown = {"think", "thinking"}
|
|
4653
4707
|
|
|
@@ -4861,40 +4915,56 @@ class Team:
|
|
|
4861
4915
|
# Helpers
|
|
4862
4916
|
###########################################################################
|
|
4863
4917
|
|
|
4864
|
-
def _handle_reasoning(
|
|
4918
|
+
def _handle_reasoning(
|
|
4919
|
+
self, run_response: TeamRunOutput, run_messages: RunMessages, run_context: Optional[RunContext] = None
|
|
4920
|
+
) -> None:
|
|
4865
4921
|
if self.reasoning or self.reasoning_model is not None:
|
|
4866
4922
|
reasoning_generator = self._reason(
|
|
4867
|
-
run_response=run_response, run_messages=run_messages, stream_events=False
|
|
4923
|
+
run_response=run_response, run_messages=run_messages, run_context=run_context, stream_events=False
|
|
4868
4924
|
)
|
|
4869
4925
|
|
|
4870
4926
|
# Consume the generator without yielding
|
|
4871
4927
|
deque(reasoning_generator, maxlen=0)
|
|
4872
4928
|
|
|
4873
4929
|
def _handle_reasoning_stream(
|
|
4874
|
-
self,
|
|
4930
|
+
self,
|
|
4931
|
+
run_response: TeamRunOutput,
|
|
4932
|
+
run_messages: RunMessages,
|
|
4933
|
+
run_context: Optional[RunContext] = None,
|
|
4934
|
+
stream_events: bool = False,
|
|
4875
4935
|
) -> Iterator[TeamRunOutputEvent]:
|
|
4876
4936
|
if self.reasoning or self.reasoning_model is not None:
|
|
4877
4937
|
reasoning_generator = self._reason(
|
|
4878
4938
|
run_response=run_response,
|
|
4879
4939
|
run_messages=run_messages,
|
|
4940
|
+
run_context=run_context,
|
|
4880
4941
|
stream_events=stream_events,
|
|
4881
4942
|
)
|
|
4882
4943
|
yield from reasoning_generator
|
|
4883
4944
|
|
|
4884
|
-
async def _ahandle_reasoning(
|
|
4945
|
+
async def _ahandle_reasoning(
|
|
4946
|
+
self, run_response: TeamRunOutput, run_messages: RunMessages, run_context: Optional[RunContext] = None
|
|
4947
|
+
) -> None:
|
|
4885
4948
|
if self.reasoning or self.reasoning_model is not None:
|
|
4886
|
-
reason_generator = self._areason(
|
|
4949
|
+
reason_generator = self._areason(
|
|
4950
|
+
run_response=run_response, run_messages=run_messages, run_context=run_context, stream_events=False
|
|
4951
|
+
)
|
|
4887
4952
|
# Consume the generator without yielding
|
|
4888
4953
|
async for _ in reason_generator:
|
|
4889
4954
|
pass
|
|
4890
4955
|
|
|
4891
4956
|
async def _ahandle_reasoning_stream(
|
|
4892
|
-
self,
|
|
4957
|
+
self,
|
|
4958
|
+
run_response: TeamRunOutput,
|
|
4959
|
+
run_messages: RunMessages,
|
|
4960
|
+
run_context: Optional[RunContext] = None,
|
|
4961
|
+
stream_events: bool = False,
|
|
4893
4962
|
) -> AsyncIterator[TeamRunOutputEvent]:
|
|
4894
4963
|
if self.reasoning or self.reasoning_model is not None:
|
|
4895
4964
|
reason_generator = self._areason(
|
|
4896
4965
|
run_response=run_response,
|
|
4897
4966
|
run_messages=run_messages,
|
|
4967
|
+
run_context=run_context,
|
|
4898
4968
|
stream_events=stream_events,
|
|
4899
4969
|
)
|
|
4900
4970
|
async for item in reason_generator:
|
|
@@ -4938,14 +5008,6 @@ class Team:
|
|
|
4938
5008
|
if session.session_data is not None:
|
|
4939
5009
|
session.session_data["session_metrics"] = session_metrics
|
|
4940
5010
|
|
|
4941
|
-
def _get_reasoning_agent(self, reasoning_model: Model) -> Optional[Agent]:
|
|
4942
|
-
return Agent(
|
|
4943
|
-
model=reasoning_model,
|
|
4944
|
-
telemetry=self.telemetry,
|
|
4945
|
-
debug_mode=self.debug_mode,
|
|
4946
|
-
debug_level=self.debug_level,
|
|
4947
|
-
)
|
|
4948
|
-
|
|
4949
5011
|
def _format_reasoning_step_content(self, run_response: TeamRunOutput, reasoning_step: ReasoningStep) -> str:
|
|
4950
5012
|
"""Format content for a reasoning step without changing any existing logic."""
|
|
4951
5013
|
step_content = ""
|
|
@@ -5054,7 +5116,8 @@ class Team:
|
|
|
5054
5116
|
self,
|
|
5055
5117
|
run_response: TeamRunOutput,
|
|
5056
5118
|
run_messages: RunMessages,
|
|
5057
|
-
|
|
5119
|
+
run_context: Optional[RunContext] = None,
|
|
5120
|
+
stream_events: bool = False,
|
|
5058
5121
|
) -> Iterator[TeamRunOutputEvent]:
|
|
5059
5122
|
"""
|
|
5060
5123
|
Run reasoning using the ReasoningManager.
|
|
@@ -5084,9 +5147,7 @@ class Team:
|
|
|
5084
5147
|
telemetry=self.telemetry,
|
|
5085
5148
|
debug_mode=self.debug_mode,
|
|
5086
5149
|
debug_level=self.debug_level,
|
|
5087
|
-
|
|
5088
|
-
dependencies=self.dependencies,
|
|
5089
|
-
metadata=self.metadata,
|
|
5150
|
+
run_context=run_context,
|
|
5090
5151
|
)
|
|
5091
5152
|
)
|
|
5092
5153
|
|
|
@@ -5098,7 +5159,8 @@ class Team:
|
|
|
5098
5159
|
self,
|
|
5099
5160
|
run_response: TeamRunOutput,
|
|
5100
5161
|
run_messages: RunMessages,
|
|
5101
|
-
|
|
5162
|
+
run_context: Optional[RunContext] = None,
|
|
5163
|
+
stream_events: bool = False,
|
|
5102
5164
|
) -> AsyncIterator[TeamRunOutputEvent]:
|
|
5103
5165
|
"""
|
|
5104
5166
|
Run reasoning asynchronously using the ReasoningManager.
|
|
@@ -5128,9 +5190,7 @@ class Team:
|
|
|
5128
5190
|
telemetry=self.telemetry,
|
|
5129
5191
|
debug_mode=self.debug_mode,
|
|
5130
5192
|
debug_level=self.debug_level,
|
|
5131
|
-
|
|
5132
|
-
dependencies=self.dependencies,
|
|
5133
|
-
metadata=self.metadata,
|
|
5193
|
+
run_context=run_context,
|
|
5134
5194
|
)
|
|
5135
5195
|
)
|
|
5136
5196
|
|
|
@@ -5286,36 +5346,20 @@ class Team:
|
|
|
5286
5346
|
)
|
|
5287
5347
|
)
|
|
5288
5348
|
|
|
5289
|
-
|
|
5290
|
-
|
|
5291
|
-
|
|
5292
|
-
|
|
5293
|
-
if
|
|
5294
|
-
|
|
5295
|
-
|
|
5296
|
-
|
|
5349
|
+
# Add tools for accessing knowledge
|
|
5350
|
+
if self.knowledge is not None and self.search_knowledge:
|
|
5351
|
+
# Use knowledge protocol's get_tools method
|
|
5352
|
+
get_tools_fn = getattr(self.knowledge, "get_tools", None)
|
|
5353
|
+
if callable(get_tools_fn):
|
|
5354
|
+
knowledge_tools = get_tools_fn(
|
|
5355
|
+
run_response=run_response,
|
|
5356
|
+
run_context=run_context,
|
|
5357
|
+
knowledge_filters=run_context.knowledge_filters,
|
|
5358
|
+
async_mode=async_mode,
|
|
5359
|
+
enable_agentic_filters=self.enable_agentic_knowledge_filters,
|
|
5360
|
+
agent=self,
|
|
5297
5361
|
)
|
|
5298
|
-
|
|
5299
|
-
if self.search_knowledge:
|
|
5300
|
-
# Use async or sync search based on async_mode
|
|
5301
|
-
if self.enable_agentic_knowledge_filters:
|
|
5302
|
-
_tools.append(
|
|
5303
|
-
self._get_search_knowledge_base_with_agentic_filters_function(
|
|
5304
|
-
run_response=run_response,
|
|
5305
|
-
knowledge_filters=run_context.knowledge_filters,
|
|
5306
|
-
async_mode=async_mode,
|
|
5307
|
-
run_context=run_context,
|
|
5308
|
-
)
|
|
5309
|
-
)
|
|
5310
|
-
else:
|
|
5311
|
-
_tools.append(
|
|
5312
|
-
self._get_search_knowledge_base_function(
|
|
5313
|
-
run_response=run_response,
|
|
5314
|
-
knowledge_filters=run_context.knowledge_filters,
|
|
5315
|
-
async_mode=async_mode,
|
|
5316
|
-
run_context=run_context,
|
|
5317
|
-
)
|
|
5318
|
-
)
|
|
5362
|
+
_tools.extend(knowledge_tools)
|
|
5319
5363
|
|
|
5320
5364
|
if self.knowledge is not None and self.update_knowledge:
|
|
5321
5365
|
_tools.append(self.add_to_knowledge)
|
|
@@ -5394,8 +5438,10 @@ class Team:
|
|
|
5394
5438
|
_func = _func.model_copy(deep=True)
|
|
5395
5439
|
|
|
5396
5440
|
_func._team = self
|
|
5397
|
-
|
|
5398
|
-
if strict
|
|
5441
|
+
# Respect the function's explicit strict setting if set
|
|
5442
|
+
effective_strict = strict if _func.strict is None else _func.strict
|
|
5443
|
+
_func.process_entrypoint(strict=effective_strict)
|
|
5444
|
+
if strict and _func.strict is None:
|
|
5399
5445
|
_func.strict = True
|
|
5400
5446
|
if self.tool_hooks:
|
|
5401
5447
|
_func.tool_hooks = self.tool_hooks
|
|
@@ -5414,7 +5460,9 @@ class Team:
|
|
|
5414
5460
|
_function_names.append(tool.name)
|
|
5415
5461
|
tool = tool.model_copy(deep=True)
|
|
5416
5462
|
tool._team = self
|
|
5417
|
-
|
|
5463
|
+
# Respect the function's explicit strict setting if set
|
|
5464
|
+
effective_strict = strict if tool.strict is None else tool.strict
|
|
5465
|
+
tool.process_entrypoint(strict=effective_strict)
|
|
5418
5466
|
if strict and tool.strict is None:
|
|
5419
5467
|
tool.strict = True
|
|
5420
5468
|
if self.tool_hooks:
|
|
@@ -5466,8 +5514,6 @@ class Team:
|
|
|
5466
5514
|
for func in _functions: # type: ignore
|
|
5467
5515
|
if isinstance(func, Function):
|
|
5468
5516
|
func._run_context = run_context
|
|
5469
|
-
func._session_state = run_context.session_state
|
|
5470
|
-
func._dependencies = run_context.dependencies
|
|
5471
5517
|
func._images = joint_images
|
|
5472
5518
|
func._files = joint_files
|
|
5473
5519
|
func._audios = joint_audios
|
|
@@ -5515,16 +5561,12 @@ class Team:
|
|
|
5515
5561
|
self,
|
|
5516
5562
|
session: TeamSession,
|
|
5517
5563
|
run_context: Optional[RunContext] = None,
|
|
5518
|
-
user_id: Optional[str] = None,
|
|
5519
5564
|
audio: Optional[Sequence[Audio]] = None,
|
|
5520
5565
|
images: Optional[Sequence[Image]] = None,
|
|
5521
5566
|
videos: Optional[Sequence[Video]] = None,
|
|
5522
5567
|
files: Optional[Sequence[File]] = None,
|
|
5523
5568
|
tools: Optional[List[Union[Function, dict]]] = None,
|
|
5524
5569
|
add_session_state_to_context: Optional[bool] = None,
|
|
5525
|
-
session_state: Optional[Dict[str, Any]] = None, # Deprecated
|
|
5526
|
-
dependencies: Optional[Dict[str, Any]] = None, # Deprecated
|
|
5527
|
-
metadata: Optional[Dict[str, Any]] = None, # Deprecated
|
|
5528
5570
|
) -> Optional[Message]:
|
|
5529
5571
|
"""Get the system message for the team.
|
|
5530
5572
|
|
|
@@ -5533,11 +5575,9 @@ class Team:
|
|
|
5533
5575
|
3. Build and return the default system message for the Team.
|
|
5534
5576
|
"""
|
|
5535
5577
|
|
|
5536
|
-
#
|
|
5537
|
-
if run_context
|
|
5538
|
-
|
|
5539
|
-
dependencies = run_context.dependencies or dependencies
|
|
5540
|
-
metadata = run_context.metadata or metadata
|
|
5578
|
+
# Extract values from run_context
|
|
5579
|
+
session_state = run_context.session_state if run_context else None
|
|
5580
|
+
user_id = run_context.user_id if run_context else None
|
|
5541
5581
|
|
|
5542
5582
|
# Get output_schema from run_context
|
|
5543
5583
|
output_schema = run_context.output_schema if run_context else None
|
|
@@ -5565,10 +5605,7 @@ class Team:
|
|
|
5565
5605
|
if self.resolve_in_context:
|
|
5566
5606
|
sys_message_content = self._format_message_with_state_variables(
|
|
5567
5607
|
sys_message_content,
|
|
5568
|
-
|
|
5569
|
-
session_state=session_state,
|
|
5570
|
-
dependencies=dependencies,
|
|
5571
|
-
metadata=metadata,
|
|
5608
|
+
run_context=run_context,
|
|
5572
5609
|
)
|
|
5573
5610
|
|
|
5574
5611
|
# type: ignore
|
|
@@ -5638,26 +5675,15 @@ class Team:
|
|
|
5638
5675
|
if self.name is not None and self.add_name_to_context:
|
|
5639
5676
|
additional_information.append(f"Your name is: {self.name}.")
|
|
5640
5677
|
|
|
5641
|
-
|
|
5642
|
-
|
|
5643
|
-
|
|
5644
|
-
|
|
5645
|
-
|
|
5646
|
-
|
|
5647
|
-
The knowledge base contains documents with these metadata filters: {valid_filters_str}.
|
|
5648
|
-
Always use filters when the user query indicates specific metadata.
|
|
5649
|
-
Examples:
|
|
5650
|
-
1. If the user asks about a specific person like "Jordan Mitchell", you MUST use the search_knowledge_base tool with the filters parameter set to {{'<valid key like user_id>': '<valid value based on the user query>'}}.
|
|
5651
|
-
2. If the user asks about a specific document type like "contracts", you MUST use the search_knowledge_base tool with the filters parameter set to {{'document_type': 'contract'}}.
|
|
5652
|
-
4. If the user asks about a specific location like "documents from New York", you MUST use the search_knowledge_base tool with the filters parameter set to {{'<valid key like location>': 'New York'}}.
|
|
5653
|
-
General Guidelines:
|
|
5654
|
-
- Always analyze the user query to identify relevant metadata.
|
|
5655
|
-
- Use the most specific filter(s) possible to narrow down results.
|
|
5656
|
-
- If multiple filters are relevant, combine them in the filters parameter (e.g., {{'name': 'Jordan Mitchell', 'document_type': 'contract'}}).
|
|
5657
|
-
- Ensure the filter keys match the valid metadata filters: {valid_filters_str}.
|
|
5658
|
-
You can use the search_knowledge_base tool to search the knowledge base and get the most relevant documents. Make sure to pass the filters as [Dict[str: Any]] to the tool. FOLLOW THIS STRUCTURE STRICTLY.
|
|
5659
|
-
""")
|
|
5678
|
+
# Add knowledge context using protocol's build_context
|
|
5679
|
+
if self.knowledge is not None:
|
|
5680
|
+
build_context_fn = getattr(self.knowledge, "build_context", None)
|
|
5681
|
+
if callable(build_context_fn):
|
|
5682
|
+
knowledge_context = build_context_fn(
|
|
5683
|
+
enable_agentic_filters=self.enable_agentic_knowledge_filters,
|
|
5660
5684
|
)
|
|
5685
|
+
if knowledge_context:
|
|
5686
|
+
additional_information.append(knowledge_context)
|
|
5661
5687
|
|
|
5662
5688
|
# 2 Build the default system message for the Agent.
|
|
5663
5689
|
system_message_content: str = ""
|
|
@@ -5768,15 +5794,22 @@ class Team:
|
|
|
5768
5794
|
if self.role is not None:
|
|
5769
5795
|
system_message_content += f"\n<your_role>\n{self.role}\n</your_role>\n\n"
|
|
5770
5796
|
|
|
5771
|
-
# 3.3.5 Then add instructions for the
|
|
5797
|
+
# 3.3.5 Then add instructions for the Team
|
|
5772
5798
|
if len(instructions) > 0:
|
|
5773
|
-
|
|
5774
|
-
|
|
5775
|
-
|
|
5776
|
-
|
|
5799
|
+
if self.use_instruction_tags:
|
|
5800
|
+
system_message_content += "<instructions>"
|
|
5801
|
+
if len(instructions) > 1:
|
|
5802
|
+
for _upi in instructions:
|
|
5803
|
+
system_message_content += f"\n- {_upi}"
|
|
5804
|
+
else:
|
|
5805
|
+
system_message_content += "\n" + instructions[0]
|
|
5806
|
+
system_message_content += "\n</instructions>\n\n"
|
|
5777
5807
|
else:
|
|
5778
|
-
|
|
5779
|
-
|
|
5808
|
+
if len(instructions) > 1:
|
|
5809
|
+
for _upi in instructions:
|
|
5810
|
+
system_message_content += f"- {_upi}\n"
|
|
5811
|
+
else:
|
|
5812
|
+
system_message_content += instructions[0] + "\n\n"
|
|
5780
5813
|
# 3.3.6 Add additional information
|
|
5781
5814
|
if len(additional_information) > 0:
|
|
5782
5815
|
system_message_content += "<additional_information>"
|
|
@@ -5792,10 +5825,7 @@ class Team:
|
|
|
5792
5825
|
if self.resolve_in_context:
|
|
5793
5826
|
system_message_content = self._format_message_with_state_variables(
|
|
5794
5827
|
system_message_content,
|
|
5795
|
-
|
|
5796
|
-
session_state=session_state,
|
|
5797
|
-
dependencies=dependencies,
|
|
5798
|
-
metadata=metadata,
|
|
5828
|
+
run_context=run_context,
|
|
5799
5829
|
)
|
|
5800
5830
|
|
|
5801
5831
|
system_message_from_model = self.model.get_system_message_for_model(tools)
|
|
@@ -5832,24 +5862,18 @@ class Team:
|
|
|
5832
5862
|
self,
|
|
5833
5863
|
session: TeamSession,
|
|
5834
5864
|
run_context: Optional[RunContext] = None,
|
|
5835
|
-
user_id: Optional[str] = None,
|
|
5836
5865
|
audio: Optional[Sequence[Audio]] = None,
|
|
5837
5866
|
images: Optional[Sequence[Image]] = None,
|
|
5838
5867
|
videos: Optional[Sequence[Video]] = None,
|
|
5839
5868
|
files: Optional[Sequence[File]] = None,
|
|
5840
5869
|
tools: Optional[List[Union[Function, dict]]] = None,
|
|
5841
5870
|
add_session_state_to_context: Optional[bool] = None,
|
|
5842
|
-
session_state: Optional[Dict[str, Any]] = None, # Deprecated
|
|
5843
|
-
dependencies: Optional[Dict[str, Any]] = None, # Deprecated
|
|
5844
|
-
metadata: Optional[Dict[str, Any]] = None, # Deprecated
|
|
5845
5871
|
) -> Optional[Message]:
|
|
5846
5872
|
"""Get the system message for the team."""
|
|
5847
5873
|
|
|
5848
|
-
#
|
|
5849
|
-
if run_context
|
|
5850
|
-
|
|
5851
|
-
dependencies = run_context.dependencies or dependencies
|
|
5852
|
-
metadata = run_context.metadata or metadata
|
|
5874
|
+
# Extract values from run_context
|
|
5875
|
+
session_state = run_context.session_state if run_context else None
|
|
5876
|
+
user_id = run_context.user_id if run_context else None
|
|
5853
5877
|
|
|
5854
5878
|
# Get output_schema from run_context
|
|
5855
5879
|
output_schema = run_context.output_schema if run_context else None
|
|
@@ -5877,10 +5901,7 @@ class Team:
|
|
|
5877
5901
|
if self.resolve_in_context:
|
|
5878
5902
|
sys_message_content = self._format_message_with_state_variables(
|
|
5879
5903
|
sys_message_content,
|
|
5880
|
-
|
|
5881
|
-
session_state=session_state,
|
|
5882
|
-
dependencies=dependencies,
|
|
5883
|
-
metadata=metadata,
|
|
5904
|
+
run_context=run_context,
|
|
5884
5905
|
)
|
|
5885
5906
|
|
|
5886
5907
|
# type: ignore
|
|
@@ -5950,26 +5971,15 @@ class Team:
|
|
|
5950
5971
|
if self.name is not None and self.add_name_to_context:
|
|
5951
5972
|
additional_information.append(f"Your name is: {self.name}.")
|
|
5952
5973
|
|
|
5953
|
-
|
|
5954
|
-
|
|
5955
|
-
|
|
5956
|
-
|
|
5957
|
-
|
|
5958
|
-
|
|
5959
|
-
The knowledge base contains documents with these metadata filters: {valid_filters_str}.
|
|
5960
|
-
Always use filters when the user query indicates specific metadata.
|
|
5961
|
-
Examples:
|
|
5962
|
-
1. If the user asks about a specific person like "Jordan Mitchell", you MUST use the search_knowledge_base tool with the filters parameter set to {{'<valid key like user_id>': '<valid value based on the user query>'}}.
|
|
5963
|
-
2. If the user asks about a specific document type like "contracts", you MUST use the search_knowledge_base tool with the filters parameter set to {{'document_type': 'contract'}}.
|
|
5964
|
-
4. If the user asks about a specific location like "documents from New York", you MUST use the search_knowledge_base tool with the filters parameter set to {{'<valid key like location>': 'New York'}}.
|
|
5965
|
-
General Guidelines:
|
|
5966
|
-
- Always analyze the user query to identify relevant metadata.
|
|
5967
|
-
- Use the most specific filter(s) possible to narrow down results.
|
|
5968
|
-
- If multiple filters are relevant, combine them in the filters parameter (e.g., {{'name': 'Jordan Mitchell', 'document_type': 'contract'}}).
|
|
5969
|
-
- Ensure the filter keys match the valid metadata filters: {valid_filters_str}.
|
|
5970
|
-
You can use the search_knowledge_base tool to search the knowledge base and get the most relevant documents. Make sure to pass the filters as [Dict[str: Any]] to the tool. FOLLOW THIS STRUCTURE STRICTLY.
|
|
5971
|
-
""")
|
|
5974
|
+
# Add knowledge context using protocol's build_context
|
|
5975
|
+
if self.knowledge is not None:
|
|
5976
|
+
build_context_fn = getattr(self.knowledge, "build_context", None)
|
|
5977
|
+
if callable(build_context_fn):
|
|
5978
|
+
knowledge_context = build_context_fn(
|
|
5979
|
+
enable_agentic_filters=self.enable_agentic_knowledge_filters,
|
|
5972
5980
|
)
|
|
5981
|
+
if knowledge_context:
|
|
5982
|
+
additional_information.append(knowledge_context)
|
|
5973
5983
|
|
|
5974
5984
|
# 2 Build the default system message for the Agent.
|
|
5975
5985
|
system_message_content: str = ""
|
|
@@ -6085,15 +6095,22 @@ class Team:
|
|
|
6085
6095
|
if self.role is not None:
|
|
6086
6096
|
system_message_content += f"\n<your_role>\n{self.role}\n</your_role>\n\n"
|
|
6087
6097
|
|
|
6088
|
-
# 3.3.5 Then add instructions for the
|
|
6098
|
+
# 3.3.5 Then add instructions for the Team
|
|
6089
6099
|
if len(instructions) > 0:
|
|
6090
|
-
|
|
6091
|
-
|
|
6092
|
-
|
|
6093
|
-
|
|
6100
|
+
if self.use_instruction_tags:
|
|
6101
|
+
system_message_content += "<instructions>"
|
|
6102
|
+
if len(instructions) > 1:
|
|
6103
|
+
for _upi in instructions:
|
|
6104
|
+
system_message_content += f"\n- {_upi}"
|
|
6105
|
+
else:
|
|
6106
|
+
system_message_content += "\n" + instructions[0]
|
|
6107
|
+
system_message_content += "\n</instructions>\n\n"
|
|
6094
6108
|
else:
|
|
6095
|
-
|
|
6096
|
-
|
|
6109
|
+
if len(instructions) > 1:
|
|
6110
|
+
for _upi in instructions:
|
|
6111
|
+
system_message_content += f"- {_upi}\n"
|
|
6112
|
+
else:
|
|
6113
|
+
system_message_content += instructions[0] + "\n\n"
|
|
6097
6114
|
# 3.3.6 Add additional information
|
|
6098
6115
|
if len(additional_information) > 0:
|
|
6099
6116
|
system_message_content += "<additional_information>"
|
|
@@ -6109,10 +6126,7 @@ class Team:
|
|
|
6109
6126
|
if self.resolve_in_context:
|
|
6110
6127
|
system_message_content = self._format_message_with_state_variables(
|
|
6111
6128
|
system_message_content,
|
|
6112
|
-
|
|
6113
|
-
session_state=session_state,
|
|
6114
|
-
dependencies=dependencies,
|
|
6115
|
-
metadata=metadata,
|
|
6129
|
+
run_context=run_context,
|
|
6116
6130
|
)
|
|
6117
6131
|
|
|
6118
6132
|
system_message_from_model = self.model.get_system_message_for_model(tools)
|
|
@@ -6186,7 +6200,6 @@ class Team:
|
|
|
6186
6200
|
system_message = self.get_system_message(
|
|
6187
6201
|
session=session,
|
|
6188
6202
|
run_context=run_context,
|
|
6189
|
-
user_id=user_id,
|
|
6190
6203
|
images=images,
|
|
6191
6204
|
audio=audio,
|
|
6192
6205
|
videos=videos,
|
|
@@ -6319,7 +6332,6 @@ class Team:
|
|
|
6319
6332
|
system_message = await self.aget_system_message(
|
|
6320
6333
|
session=session,
|
|
6321
6334
|
run_context=run_context,
|
|
6322
|
-
user_id=user_id,
|
|
6323
6335
|
images=images,
|
|
6324
6336
|
audio=audio,
|
|
6325
6337
|
videos=videos,
|
|
@@ -6419,7 +6431,6 @@ class Team:
|
|
|
6419
6431
|
run_response: TeamRunOutput,
|
|
6420
6432
|
run_context: RunContext,
|
|
6421
6433
|
input_message: Optional[Union[str, List, Dict, Message, BaseModel, List[Message]]] = None,
|
|
6422
|
-
user_id: Optional[str] = None,
|
|
6423
6434
|
audio: Optional[Sequence[Audio]] = None,
|
|
6424
6435
|
images: Optional[Sequence[Image]] = None,
|
|
6425
6436
|
videos: Optional[Sequence[Video]] = None,
|
|
@@ -6531,10 +6542,7 @@ class Team:
|
|
|
6531
6542
|
if self.resolve_in_context:
|
|
6532
6543
|
user_msg_content = self._format_message_with_state_variables(
|
|
6533
6544
|
user_msg_content,
|
|
6534
|
-
|
|
6535
|
-
session_state=run_context.session_state,
|
|
6536
|
-
dependencies=run_context.dependencies,
|
|
6537
|
-
metadata=run_context.metadata,
|
|
6545
|
+
run_context=run_context,
|
|
6538
6546
|
)
|
|
6539
6547
|
|
|
6540
6548
|
# Convert to string for concatenation operations
|
|
@@ -6577,7 +6585,6 @@ class Team:
|
|
|
6577
6585
|
run_response: TeamRunOutput,
|
|
6578
6586
|
run_context: RunContext,
|
|
6579
6587
|
input_message: Optional[Union[str, List, Dict, Message, BaseModel, List[Message]]] = None,
|
|
6580
|
-
user_id: Optional[str] = None,
|
|
6581
6588
|
audio: Optional[Sequence[Audio]] = None,
|
|
6582
6589
|
images: Optional[Sequence[Image]] = None,
|
|
6583
6590
|
videos: Optional[Sequence[Video]] = None,
|
|
@@ -6689,10 +6696,7 @@ class Team:
|
|
|
6689
6696
|
if self.resolve_in_context:
|
|
6690
6697
|
user_msg_content = self._format_message_with_state_variables(
|
|
6691
6698
|
user_msg_content,
|
|
6692
|
-
|
|
6693
|
-
session_state=run_context.session_state,
|
|
6694
|
-
dependencies=run_context.dependencies,
|
|
6695
|
-
metadata=run_context.metadata,
|
|
6699
|
+
run_context=run_context,
|
|
6696
6700
|
)
|
|
6697
6701
|
|
|
6698
6702
|
# Convert to string for concatenation operations
|
|
@@ -6802,17 +6806,21 @@ class Team:
|
|
|
6802
6806
|
def _format_message_with_state_variables(
|
|
6803
6807
|
self,
|
|
6804
6808
|
message: Any,
|
|
6805
|
-
|
|
6806
|
-
dependencies: Optional[Dict[str, Any]] = None,
|
|
6807
|
-
metadata: Optional[Dict[str, Any]] = None,
|
|
6808
|
-
user_id: Optional[str] = None,
|
|
6809
|
+
run_context: Optional[RunContext] = None,
|
|
6809
6810
|
) -> Any:
|
|
6810
|
-
"""Format a message with the session state variables."""
|
|
6811
|
+
"""Format a message with the session state variables from run_context."""
|
|
6811
6812
|
import re
|
|
6812
6813
|
import string
|
|
6813
6814
|
|
|
6814
6815
|
if not isinstance(message, str):
|
|
6815
6816
|
return message
|
|
6817
|
+
|
|
6818
|
+
# Extract values from run_context
|
|
6819
|
+
session_state = run_context.session_state if run_context else None
|
|
6820
|
+
dependencies = run_context.dependencies if run_context else None
|
|
6821
|
+
metadata = run_context.metadata if run_context else None
|
|
6822
|
+
user_id = run_context.user_id if run_context else None
|
|
6823
|
+
|
|
6816
6824
|
# Should already be resolved and passed from run() method
|
|
6817
6825
|
format_variables = ChainMap(
|
|
6818
6826
|
session_state if session_state is not None else {},
|
|
@@ -7844,7 +7852,7 @@ class Team:
|
|
|
7844
7852
|
stream_events=stream_events or self.stream_member_events,
|
|
7845
7853
|
debug_mode=debug_mode,
|
|
7846
7854
|
knowledge_filters=run_context.knowledge_filters
|
|
7847
|
-
if not
|
|
7855
|
+
if not agent.knowledge_filters and agent.knowledge
|
|
7848
7856
|
else None,
|
|
7849
7857
|
dependencies=run_context.dependencies,
|
|
7850
7858
|
add_dependencies_to_context=add_dependencies_to_context,
|
|
@@ -7869,7 +7877,7 @@ class Team:
|
|
|
7869
7877
|
finally:
|
|
7870
7878
|
_process_delegate_task_to_member(
|
|
7871
7879
|
member_agent_run_response,
|
|
7872
|
-
|
|
7880
|
+
agent,
|
|
7873
7881
|
member_agent_task, # type: ignore
|
|
7874
7882
|
member_session_state_copy, # type: ignore
|
|
7875
7883
|
)
|
|
@@ -7907,10 +7915,15 @@ class Team:
|
|
|
7907
7915
|
current_agent = member_agent
|
|
7908
7916
|
member_agent_task, history = _setup_delegate_task_to_member(member_agent=current_agent, task=task)
|
|
7909
7917
|
|
|
7910
|
-
async def run_member_agent(
|
|
7918
|
+
async def run_member_agent(
|
|
7919
|
+
member_agent=current_agent,
|
|
7920
|
+
member_agent_task=member_agent_task,
|
|
7921
|
+
history=history,
|
|
7922
|
+
member_agent_index=member_agent_index,
|
|
7923
|
+
) -> str:
|
|
7911
7924
|
member_session_state_copy = copy(run_context.session_state)
|
|
7912
7925
|
|
|
7913
|
-
member_agent_run_response = await
|
|
7926
|
+
member_agent_run_response = await member_agent.arun(
|
|
7914
7927
|
input=member_agent_task if not history else history,
|
|
7915
7928
|
user_id=user_id,
|
|
7916
7929
|
# All members have the same session_id
|
|
@@ -8217,6 +8230,719 @@ class Team:
|
|
|
8217
8230
|
# Update the current metadata with the metadata from the database which is updated in place
|
|
8218
8231
|
self.metadata = session.metadata
|
|
8219
8232
|
|
|
8233
|
+
# -*- Serialization Functions
|
|
8234
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
8235
|
+
"""
|
|
8236
|
+
Convert the Team to a dictionary.
|
|
8237
|
+
|
|
8238
|
+
Returns:
|
|
8239
|
+
Dict[str, Any]: Dictionary representation of the team configuration
|
|
8240
|
+
"""
|
|
8241
|
+
config: Dict[str, Any] = {}
|
|
8242
|
+
|
|
8243
|
+
# --- Team Settings ---
|
|
8244
|
+
if self.id is not None:
|
|
8245
|
+
config["id"] = self.id
|
|
8246
|
+
if self.name is not None:
|
|
8247
|
+
config["name"] = self.name
|
|
8248
|
+
if self.role is not None:
|
|
8249
|
+
config["role"] = self.role
|
|
8250
|
+
if self.description is not None:
|
|
8251
|
+
config["description"] = self.description
|
|
8252
|
+
|
|
8253
|
+
# --- Model ---
|
|
8254
|
+
if self.model is not None:
|
|
8255
|
+
config["model"] = self.model.to_dict() if isinstance(self.model, Model) else str(self.model)
|
|
8256
|
+
|
|
8257
|
+
# --- Members ---
|
|
8258
|
+
if self.members:
|
|
8259
|
+
serialized_members = []
|
|
8260
|
+
for member in self.members:
|
|
8261
|
+
if isinstance(member, Agent):
|
|
8262
|
+
serialized_members.append({"type": "agent", "agent_id": member.id})
|
|
8263
|
+
elif isinstance(member, Team):
|
|
8264
|
+
serialized_members.append({"type": "team", "team_id": member.id})
|
|
8265
|
+
if serialized_members:
|
|
8266
|
+
config["members"] = serialized_members
|
|
8267
|
+
|
|
8268
|
+
# --- Execution settings (only if non-default) ---
|
|
8269
|
+
if self.respond_directly:
|
|
8270
|
+
config["respond_directly"] = self.respond_directly
|
|
8271
|
+
if self.delegate_to_all_members:
|
|
8272
|
+
config["delegate_to_all_members"] = self.delegate_to_all_members
|
|
8273
|
+
if not self.determine_input_for_members: # default is True
|
|
8274
|
+
config["determine_input_for_members"] = self.determine_input_for_members
|
|
8275
|
+
|
|
8276
|
+
# --- User settings ---
|
|
8277
|
+
if self.user_id is not None:
|
|
8278
|
+
config["user_id"] = self.user_id
|
|
8279
|
+
|
|
8280
|
+
# --- Session settings ---
|
|
8281
|
+
if self.session_id is not None:
|
|
8282
|
+
config["session_id"] = self.session_id
|
|
8283
|
+
if self.session_state is not None:
|
|
8284
|
+
config["session_state"] = self.session_state
|
|
8285
|
+
if self.add_session_state_to_context:
|
|
8286
|
+
config["add_session_state_to_context"] = self.add_session_state_to_context
|
|
8287
|
+
if self.enable_agentic_state:
|
|
8288
|
+
config["enable_agentic_state"] = self.enable_agentic_state
|
|
8289
|
+
if self.overwrite_db_session_state:
|
|
8290
|
+
config["overwrite_db_session_state"] = self.overwrite_db_session_state
|
|
8291
|
+
if self.cache_session:
|
|
8292
|
+
config["cache_session"] = self.cache_session
|
|
8293
|
+
|
|
8294
|
+
# --- Team history settings ---
|
|
8295
|
+
if self.add_team_history_to_members:
|
|
8296
|
+
config["add_team_history_to_members"] = self.add_team_history_to_members
|
|
8297
|
+
if self.num_team_history_runs != 3: # default is 3
|
|
8298
|
+
config["num_team_history_runs"] = self.num_team_history_runs
|
|
8299
|
+
if self.share_member_interactions:
|
|
8300
|
+
config["share_member_interactions"] = self.share_member_interactions
|
|
8301
|
+
if self.search_session_history:
|
|
8302
|
+
config["search_session_history"] = self.search_session_history
|
|
8303
|
+
if self.num_history_sessions is not None:
|
|
8304
|
+
config["num_history_sessions"] = self.num_history_sessions
|
|
8305
|
+
if self.read_chat_history:
|
|
8306
|
+
config["read_chat_history"] = self.read_chat_history
|
|
8307
|
+
|
|
8308
|
+
# --- System message settings ---
|
|
8309
|
+
if self.system_message is not None and isinstance(self.system_message, str):
|
|
8310
|
+
config["system_message"] = self.system_message
|
|
8311
|
+
if self.system_message_role != "system": # default is "system"
|
|
8312
|
+
config["system_message_role"] = self.system_message_role
|
|
8313
|
+
if self.introduction is not None:
|
|
8314
|
+
config["introduction"] = self.introduction
|
|
8315
|
+
if self.instructions is not None and not callable(self.instructions):
|
|
8316
|
+
config["instructions"] = self.instructions
|
|
8317
|
+
if self.expected_output is not None:
|
|
8318
|
+
config["expected_output"] = self.expected_output
|
|
8319
|
+
if self.additional_context is not None:
|
|
8320
|
+
config["additional_context"] = self.additional_context
|
|
8321
|
+
|
|
8322
|
+
# --- Context settings ---
|
|
8323
|
+
if self.markdown:
|
|
8324
|
+
config["markdown"] = self.markdown
|
|
8325
|
+
if self.add_datetime_to_context:
|
|
8326
|
+
config["add_datetime_to_context"] = self.add_datetime_to_context
|
|
8327
|
+
if self.add_location_to_context:
|
|
8328
|
+
config["add_location_to_context"] = self.add_location_to_context
|
|
8329
|
+
if self.timezone_identifier is not None:
|
|
8330
|
+
config["timezone_identifier"] = self.timezone_identifier
|
|
8331
|
+
if self.add_name_to_context:
|
|
8332
|
+
config["add_name_to_context"] = self.add_name_to_context
|
|
8333
|
+
if self.add_member_tools_to_context:
|
|
8334
|
+
config["add_member_tools_to_context"] = self.add_member_tools_to_context
|
|
8335
|
+
if not self.resolve_in_context: # default is True
|
|
8336
|
+
config["resolve_in_context"] = self.resolve_in_context
|
|
8337
|
+
|
|
8338
|
+
# --- Database settings ---
|
|
8339
|
+
if self.db is not None and hasattr(self.db, "to_dict"):
|
|
8340
|
+
config["db"] = self.db.to_dict()
|
|
8341
|
+
|
|
8342
|
+
# --- Dependencies ---
|
|
8343
|
+
if self.dependencies is not None:
|
|
8344
|
+
config["dependencies"] = self.dependencies
|
|
8345
|
+
if self.add_dependencies_to_context:
|
|
8346
|
+
config["add_dependencies_to_context"] = self.add_dependencies_to_context
|
|
8347
|
+
|
|
8348
|
+
# --- Knowledge settings ---
|
|
8349
|
+
# TODO: implement knowledge serialization
|
|
8350
|
+
# if self.knowledge is not None:
|
|
8351
|
+
# config["knowledge"] = self.knowledge.to_dict()
|
|
8352
|
+
if self.knowledge_filters is not None:
|
|
8353
|
+
config["knowledge_filters"] = self.knowledge_filters
|
|
8354
|
+
if self.enable_agentic_knowledge_filters:
|
|
8355
|
+
config["enable_agentic_knowledge_filters"] = self.enable_agentic_knowledge_filters
|
|
8356
|
+
if self.update_knowledge:
|
|
8357
|
+
config["update_knowledge"] = self.update_knowledge
|
|
8358
|
+
if self.add_knowledge_to_context:
|
|
8359
|
+
config["add_knowledge_to_context"] = self.add_knowledge_to_context
|
|
8360
|
+
if not self.search_knowledge: # default is True
|
|
8361
|
+
config["search_knowledge"] = self.search_knowledge
|
|
8362
|
+
if self.references_format != "json": # default is "json"
|
|
8363
|
+
config["references_format"] = self.references_format
|
|
8364
|
+
|
|
8365
|
+
# --- Tools ---
|
|
8366
|
+
if self.tools:
|
|
8367
|
+
serialized_tools = []
|
|
8368
|
+
for tool in self.tools:
|
|
8369
|
+
try:
|
|
8370
|
+
if isinstance(tool, Function):
|
|
8371
|
+
serialized_tools.append(tool.to_dict())
|
|
8372
|
+
elif isinstance(tool, Toolkit):
|
|
8373
|
+
for func in tool.functions.values():
|
|
8374
|
+
serialized_tools.append(func.to_dict())
|
|
8375
|
+
elif callable(tool):
|
|
8376
|
+
func = Function.from_callable(tool)
|
|
8377
|
+
serialized_tools.append(func.to_dict())
|
|
8378
|
+
except Exception as e:
|
|
8379
|
+
log_warning(f"Could not serialize tool {tool}: {e}")
|
|
8380
|
+
if serialized_tools:
|
|
8381
|
+
config["tools"] = serialized_tools
|
|
8382
|
+
if self.tool_choice is not None:
|
|
8383
|
+
config["tool_choice"] = self.tool_choice
|
|
8384
|
+
if self.tool_call_limit is not None:
|
|
8385
|
+
config["tool_call_limit"] = self.tool_call_limit
|
|
8386
|
+
if self.get_member_information_tool:
|
|
8387
|
+
config["get_member_information_tool"] = self.get_member_information_tool
|
|
8388
|
+
|
|
8389
|
+
# --- Schema settings ---
|
|
8390
|
+
if self.input_schema is not None:
|
|
8391
|
+
if issubclass(self.input_schema, BaseModel):
|
|
8392
|
+
config["input_schema"] = self.input_schema.__name__
|
|
8393
|
+
elif isinstance(self.input_schema, dict):
|
|
8394
|
+
config["input_schema"] = self.input_schema
|
|
8395
|
+
if self.output_schema is not None:
|
|
8396
|
+
if isinstance(self.output_schema, type) and issubclass(self.output_schema, BaseModel):
|
|
8397
|
+
config["output_schema"] = self.output_schema.__name__
|
|
8398
|
+
elif isinstance(self.output_schema, dict):
|
|
8399
|
+
config["output_schema"] = self.output_schema
|
|
8400
|
+
|
|
8401
|
+
# --- Parser and output settings ---
|
|
8402
|
+
if self.parser_model is not None:
|
|
8403
|
+
if isinstance(self.parser_model, Model):
|
|
8404
|
+
config["parser_model"] = self.parser_model.to_dict()
|
|
8405
|
+
else:
|
|
8406
|
+
config["parser_model"] = str(self.parser_model)
|
|
8407
|
+
if self.parser_model_prompt is not None:
|
|
8408
|
+
config["parser_model_prompt"] = self.parser_model_prompt
|
|
8409
|
+
if self.output_model is not None:
|
|
8410
|
+
if isinstance(self.output_model, Model):
|
|
8411
|
+
config["output_model"] = self.output_model.to_dict()
|
|
8412
|
+
else:
|
|
8413
|
+
config["output_model"] = str(self.output_model)
|
|
8414
|
+
if self.output_model_prompt is not None:
|
|
8415
|
+
config["output_model_prompt"] = self.output_model_prompt
|
|
8416
|
+
if self.use_json_mode:
|
|
8417
|
+
config["use_json_mode"] = self.use_json_mode
|
|
8418
|
+
if not self.parse_response: # default is True
|
|
8419
|
+
config["parse_response"] = self.parse_response
|
|
8420
|
+
|
|
8421
|
+
# --- Memory settings ---
|
|
8422
|
+
# TODO: implement memory manager serialization
|
|
8423
|
+
# if self.memory_manager is not None:
|
|
8424
|
+
# config["memory_manager"] = self.memory_manager.to_dict()
|
|
8425
|
+
if self.enable_agentic_memory:
|
|
8426
|
+
config["enable_agentic_memory"] = self.enable_agentic_memory
|
|
8427
|
+
if self.enable_user_memories:
|
|
8428
|
+
config["enable_user_memories"] = self.enable_user_memories
|
|
8429
|
+
if self.add_memories_to_context is not None:
|
|
8430
|
+
config["add_memories_to_context"] = self.add_memories_to_context
|
|
8431
|
+
if self.enable_session_summaries:
|
|
8432
|
+
config["enable_session_summaries"] = self.enable_session_summaries
|
|
8433
|
+
if self.add_session_summary_to_context is not None:
|
|
8434
|
+
config["add_session_summary_to_context"] = self.add_session_summary_to_context
|
|
8435
|
+
# TODO: implement session summary manager serialization
|
|
8436
|
+
# if self.session_summary_manager is not None:
|
|
8437
|
+
# config["session_summary_manager"] = self.session_summary_manager.to_dict()
|
|
8438
|
+
|
|
8439
|
+
# --- History settings ---
|
|
8440
|
+
if self.add_history_to_context:
|
|
8441
|
+
config["add_history_to_context"] = self.add_history_to_context
|
|
8442
|
+
if self.num_history_runs is not None:
|
|
8443
|
+
config["num_history_runs"] = self.num_history_runs
|
|
8444
|
+
if self.num_history_messages is not None:
|
|
8445
|
+
config["num_history_messages"] = self.num_history_messages
|
|
8446
|
+
if self.max_tool_calls_from_history is not None:
|
|
8447
|
+
config["max_tool_calls_from_history"] = self.max_tool_calls_from_history
|
|
8448
|
+
|
|
8449
|
+
# --- Media/storage settings ---
|
|
8450
|
+
if not self.send_media_to_model: # default is True
|
|
8451
|
+
config["send_media_to_model"] = self.send_media_to_model
|
|
8452
|
+
if not self.store_media: # default is True
|
|
8453
|
+
config["store_media"] = self.store_media
|
|
8454
|
+
if not self.store_tool_messages: # default is True
|
|
8455
|
+
config["store_tool_messages"] = self.store_tool_messages
|
|
8456
|
+
if not self.store_history_messages: # default is True
|
|
8457
|
+
config["store_history_messages"] = self.store_history_messages
|
|
8458
|
+
|
|
8459
|
+
# --- Compression settings ---
|
|
8460
|
+
if self.compress_tool_results:
|
|
8461
|
+
config["compress_tool_results"] = self.compress_tool_results
|
|
8462
|
+
# TODO: implement compression manager serialization
|
|
8463
|
+
# if self.compression_manager is not None:
|
|
8464
|
+
# config["compression_manager"] = self.compression_manager.to_dict()
|
|
8465
|
+
|
|
8466
|
+
# --- Reasoning settings ---
|
|
8467
|
+
if self.reasoning:
|
|
8468
|
+
config["reasoning"] = self.reasoning
|
|
8469
|
+
# TODO: implement reasoning model serialization
|
|
8470
|
+
# if self.reasoning_model is not None:
|
|
8471
|
+
# config["reasoning_model"] = self.reasoning_model.to_dict() if isinstance(self.reasoning_model, Model) else str(self.reasoning_model)
|
|
8472
|
+
if self.reasoning_min_steps != 1: # default is 1
|
|
8473
|
+
config["reasoning_min_steps"] = self.reasoning_min_steps
|
|
8474
|
+
if self.reasoning_max_steps != 10: # default is 10
|
|
8475
|
+
config["reasoning_max_steps"] = self.reasoning_max_steps
|
|
8476
|
+
|
|
8477
|
+
# --- Streaming settings ---
|
|
8478
|
+
if self.stream is not None:
|
|
8479
|
+
config["stream"] = self.stream
|
|
8480
|
+
if self.stream_events is not None:
|
|
8481
|
+
config["stream_events"] = self.stream_events
|
|
8482
|
+
if not self.stream_member_events: # default is True
|
|
8483
|
+
config["stream_member_events"] = self.stream_member_events
|
|
8484
|
+
if self.store_events:
|
|
8485
|
+
config["store_events"] = self.store_events
|
|
8486
|
+
if self.store_member_responses:
|
|
8487
|
+
config["store_member_responses"] = self.store_member_responses
|
|
8488
|
+
|
|
8489
|
+
# --- Retry settings ---
|
|
8490
|
+
if self.retries > 0:
|
|
8491
|
+
config["retries"] = self.retries
|
|
8492
|
+
if self.delay_between_retries != 1: # default is 1
|
|
8493
|
+
config["delay_between_retries"] = self.delay_between_retries
|
|
8494
|
+
if self.exponential_backoff:
|
|
8495
|
+
config["exponential_backoff"] = self.exponential_backoff
|
|
8496
|
+
|
|
8497
|
+
# --- Metadata ---
|
|
8498
|
+
if self.metadata is not None:
|
|
8499
|
+
config["metadata"] = self.metadata
|
|
8500
|
+
|
|
8501
|
+
# --- Debug and telemetry settings ---
|
|
8502
|
+
if self.debug_mode:
|
|
8503
|
+
config["debug_mode"] = self.debug_mode
|
|
8504
|
+
if self.debug_level != 1: # default is 1
|
|
8505
|
+
config["debug_level"] = self.debug_level
|
|
8506
|
+
if self.show_members_responses:
|
|
8507
|
+
config["show_members_responses"] = self.show_members_responses
|
|
8508
|
+
if not self.telemetry: # default is True
|
|
8509
|
+
config["telemetry"] = self.telemetry
|
|
8510
|
+
|
|
8511
|
+
return config
|
|
8512
|
+
|
|
8513
|
+
@classmethod
|
|
8514
|
+
def from_dict(
|
|
8515
|
+
cls,
|
|
8516
|
+
data: Dict[str, Any],
|
|
8517
|
+
db: Optional["BaseDb"] = None,
|
|
8518
|
+
registry: Optional["Registry"] = None,
|
|
8519
|
+
) -> "Team":
|
|
8520
|
+
"""
|
|
8521
|
+
Create a Team from a dictionary.
|
|
8522
|
+
|
|
8523
|
+
Args:
|
|
8524
|
+
data: Dictionary containing team configuration
|
|
8525
|
+
db: Optional database for loading agents in members
|
|
8526
|
+
registry: Optional registry for rehydrating tools
|
|
8527
|
+
|
|
8528
|
+
Returns:
|
|
8529
|
+
Team: Reconstructed team instance
|
|
8530
|
+
"""
|
|
8531
|
+
config = data.copy()
|
|
8532
|
+
|
|
8533
|
+
# --- Handle Model reconstruction ---
|
|
8534
|
+
if "model" in config:
|
|
8535
|
+
model_data = config["model"]
|
|
8536
|
+
if isinstance(model_data, dict) and "id" in model_data:
|
|
8537
|
+
config["model"] = get_model(f"{model_data['provider']}:{model_data['id']}")
|
|
8538
|
+
elif isinstance(model_data, str):
|
|
8539
|
+
config["model"] = get_model(model_data)
|
|
8540
|
+
|
|
8541
|
+
# --- Handle Members reconstruction ---
|
|
8542
|
+
members: Optional[List[Union[Agent, "Team"]]] = None
|
|
8543
|
+
from agno.agent import get_agent_by_id
|
|
8544
|
+
from agno.team import get_team_by_id
|
|
8545
|
+
|
|
8546
|
+
if "members" in config and config["members"]:
|
|
8547
|
+
members = []
|
|
8548
|
+
for member_data in config["members"]:
|
|
8549
|
+
member_type = member_data.get("type")
|
|
8550
|
+
if member_type == "agent":
|
|
8551
|
+
# TODO: Make sure to pass the correct version to get_agent_by_id. Right now its returning the latest version.
|
|
8552
|
+
if db is None:
|
|
8553
|
+
log_warning(f"Cannot load member agent {member_data['agent_id']}: db is None")
|
|
8554
|
+
continue
|
|
8555
|
+
agent = get_agent_by_id(id=member_data["agent_id"], db=db, registry=registry)
|
|
8556
|
+
if agent:
|
|
8557
|
+
members.append(agent)
|
|
8558
|
+
else:
|
|
8559
|
+
log_warning(f"Agent not found: {member_data['agent_id']}")
|
|
8560
|
+
elif member_type == "team":
|
|
8561
|
+
# Handle nested teams as members
|
|
8562
|
+
if db is None:
|
|
8563
|
+
log_warning(f"Cannot load member team {member_data['team_id']}: db is None")
|
|
8564
|
+
continue
|
|
8565
|
+
nested_team = get_team_by_id(id=member_data["team_id"], db=db, registry=registry)
|
|
8566
|
+
if nested_team:
|
|
8567
|
+
members.append(nested_team)
|
|
8568
|
+
else:
|
|
8569
|
+
log_warning(f"Team not found: {member_data['team_id']}")
|
|
8570
|
+
|
|
8571
|
+
# --- Handle reasoning_model reconstruction ---
|
|
8572
|
+
# TODO: implement reasoning model deserialization
|
|
8573
|
+
# if "reasoning_model" in config:
|
|
8574
|
+
# model_data = config["reasoning_model"]
|
|
8575
|
+
# if isinstance(model_data, dict) and "id" in model_data:
|
|
8576
|
+
# config["reasoning_model"] = get_model(f"{model_data['provider']}:{model_data['id']}")
|
|
8577
|
+
# elif isinstance(model_data, str):
|
|
8578
|
+
# config["reasoning_model"] = get_model(model_data)
|
|
8579
|
+
|
|
8580
|
+
# --- Handle parser_model reconstruction ---
|
|
8581
|
+
# TODO: implement parser model deserialization
|
|
8582
|
+
# if "parser_model" in config:
|
|
8583
|
+
# model_data = config["parser_model"]
|
|
8584
|
+
# if isinstance(model_data, dict) and "id" in model_data:
|
|
8585
|
+
# config["parser_model"] = get_model(f"{model_data['provider']}:{model_data['id']}")
|
|
8586
|
+
# elif isinstance(model_data, str):
|
|
8587
|
+
# config["parser_model"] = get_model(model_data)
|
|
8588
|
+
|
|
8589
|
+
# --- Handle output_model reconstruction ---
|
|
8590
|
+
# TODO: implement output model deserialization
|
|
8591
|
+
# if "output_model" in config:
|
|
8592
|
+
# model_data = config["output_model"]
|
|
8593
|
+
# if isinstance(model_data, dict) and "id" in model_data:
|
|
8594
|
+
# config["output_model"] = get_model(f"{model_data['provider']}:{model_data['id']}")
|
|
8595
|
+
# elif isinstance(model_data, str):
|
|
8596
|
+
# config["output_model"] = get_model(model_data)
|
|
8597
|
+
|
|
8598
|
+
# --- Handle tools reconstruction ---
|
|
8599
|
+
if "tools" in config and config["tools"]:
|
|
8600
|
+
if registry:
|
|
8601
|
+
config["tools"] = [registry.rehydrate_function(t) for t in config["tools"]]
|
|
8602
|
+
else:
|
|
8603
|
+
log_warning("No registry provided, tools will not be rehydrated.")
|
|
8604
|
+
del config["tools"]
|
|
8605
|
+
|
|
8606
|
+
# --- Handle DB reconstruction ---
|
|
8607
|
+
if "db" in config and isinstance(config["db"], dict):
|
|
8608
|
+
db_data = config["db"]
|
|
8609
|
+
db_id = db_data.get("id")
|
|
8610
|
+
|
|
8611
|
+
# First try to get the db from the registry (preferred - reuses existing connection)
|
|
8612
|
+
if registry and db_id:
|
|
8613
|
+
registry_db = registry.get_db(db_id)
|
|
8614
|
+
if registry_db is not None:
|
|
8615
|
+
config["db"] = registry_db
|
|
8616
|
+
else:
|
|
8617
|
+
del config["db"]
|
|
8618
|
+
else:
|
|
8619
|
+
# No registry or no db_id, fall back to creating from dict
|
|
8620
|
+
config["db"] = db_from_dict(db_data)
|
|
8621
|
+
if config["db"] is None:
|
|
8622
|
+
del config["db"]
|
|
8623
|
+
|
|
8624
|
+
# --- Handle Schema reconstruction ---
|
|
8625
|
+
if "input_schema" in config and isinstance(config["input_schema"], str):
|
|
8626
|
+
schema_cls = registry.get_schema(config["input_schema"]) if registry else None
|
|
8627
|
+
if schema_cls:
|
|
8628
|
+
config["input_schema"] = schema_cls
|
|
8629
|
+
else:
|
|
8630
|
+
log_warning(f"Input schema {config['input_schema']} not found in registry, skipping.")
|
|
8631
|
+
del config["input_schema"]
|
|
8632
|
+
|
|
8633
|
+
if "output_schema" in config and isinstance(config["output_schema"], str):
|
|
8634
|
+
schema_cls = registry.get_schema(config["output_schema"]) if registry else None
|
|
8635
|
+
if schema_cls:
|
|
8636
|
+
config["output_schema"] = schema_cls
|
|
8637
|
+
else:
|
|
8638
|
+
log_warning(f"Output schema {config['output_schema']} not found in registry, skipping.")
|
|
8639
|
+
del config["output_schema"]
|
|
8640
|
+
|
|
8641
|
+
# --- Handle MemoryManager reconstruction ---
|
|
8642
|
+
# TODO: implement memory manager deserialization
|
|
8643
|
+
# if "memory_manager" in config and isinstance(config["memory_manager"], dict):
|
|
8644
|
+
# from agno.memory import MemoryManager
|
|
8645
|
+
# config["memory_manager"] = MemoryManager.from_dict(config["memory_manager"])
|
|
8646
|
+
|
|
8647
|
+
# --- Handle SessionSummaryManager reconstruction ---
|
|
8648
|
+
# TODO: implement session summary manager deserialization
|
|
8649
|
+
# if "session_summary_manager" in config and isinstance(config["session_summary_manager"], dict):
|
|
8650
|
+
# from agno.session import SessionSummaryManager
|
|
8651
|
+
# config["session_summary_manager"] = SessionSummaryManager.from_dict(config["session_summary_manager"])
|
|
8652
|
+
|
|
8653
|
+
# --- Handle Knowledge reconstruction ---
|
|
8654
|
+
# TODO: implement knowledge deserialization
|
|
8655
|
+
# if "knowledge" in config and isinstance(config["knowledge"], dict):
|
|
8656
|
+
# from agno.knowledge import Knowledge
|
|
8657
|
+
# config["knowledge"] = Knowledge.from_dict(config["knowledge"])
|
|
8658
|
+
|
|
8659
|
+
# --- Handle CompressionManager reconstruction ---
|
|
8660
|
+
# TODO: implement compression manager deserialization
|
|
8661
|
+
# if "compression_manager" in config and isinstance(config["compression_manager"], dict):
|
|
8662
|
+
# from agno.compression.manager import CompressionManager
|
|
8663
|
+
# config["compression_manager"] = CompressionManager.from_dict(config["compression_manager"])
|
|
8664
|
+
|
|
8665
|
+
return cls(
|
|
8666
|
+
# --- Team settings ---
|
|
8667
|
+
id=config.get("id"),
|
|
8668
|
+
name=config.get("name"),
|
|
8669
|
+
role=config.get("role"),
|
|
8670
|
+
description=config.get("description"),
|
|
8671
|
+
# --- Model ---
|
|
8672
|
+
model=config.get("model"),
|
|
8673
|
+
# --- Members ---
|
|
8674
|
+
members=members or [],
|
|
8675
|
+
# --- Execution settings ---
|
|
8676
|
+
respond_directly=config.get("respond_directly", False),
|
|
8677
|
+
delegate_to_all_members=config.get("delegate_to_all_members", False),
|
|
8678
|
+
determine_input_for_members=config.get("determine_input_for_members", True),
|
|
8679
|
+
# --- User settings ---
|
|
8680
|
+
user_id=config.get("user_id"),
|
|
8681
|
+
# --- Session settings ---
|
|
8682
|
+
session_id=config.get("session_id"),
|
|
8683
|
+
session_state=config.get("session_state"),
|
|
8684
|
+
add_session_state_to_context=config.get("add_session_state_to_context", False),
|
|
8685
|
+
enable_agentic_state=config.get("enable_agentic_state", False),
|
|
8686
|
+
overwrite_db_session_state=config.get("overwrite_db_session_state", False),
|
|
8687
|
+
cache_session=config.get("cache_session", False),
|
|
8688
|
+
add_team_history_to_members=config.get("add_team_history_to_members", False),
|
|
8689
|
+
num_team_history_runs=config.get("num_team_history_runs", 3),
|
|
8690
|
+
share_member_interactions=config.get("share_member_interactions", False),
|
|
8691
|
+
search_session_history=config.get("search_session_history", False),
|
|
8692
|
+
num_history_sessions=config.get("num_history_sessions"),
|
|
8693
|
+
read_chat_history=config.get("read_chat_history", False),
|
|
8694
|
+
# --- System message settings ---
|
|
8695
|
+
system_message=config.get("system_message"),
|
|
8696
|
+
system_message_role=config.get("system_message_role", "system"),
|
|
8697
|
+
introduction=config.get("introduction"),
|
|
8698
|
+
instructions=config.get("instructions"),
|
|
8699
|
+
expected_output=config.get("expected_output"),
|
|
8700
|
+
additional_context=config.get("additional_context"),
|
|
8701
|
+
markdown=config.get("markdown", False),
|
|
8702
|
+
add_datetime_to_context=config.get("add_datetime_to_context", False),
|
|
8703
|
+
add_location_to_context=config.get("add_location_to_context", False),
|
|
8704
|
+
timezone_identifier=config.get("timezone_identifier"),
|
|
8705
|
+
add_name_to_context=config.get("add_name_to_context", False),
|
|
8706
|
+
add_member_tools_to_context=config.get("add_member_tools_to_context", False),
|
|
8707
|
+
resolve_in_context=config.get("resolve_in_context", True),
|
|
8708
|
+
# --- Database settings ---
|
|
8709
|
+
db=config.get("db"),
|
|
8710
|
+
# --- Dependencies ---
|
|
8711
|
+
dependencies=config.get("dependencies"),
|
|
8712
|
+
add_dependencies_to_context=config.get("add_dependencies_to_context", False),
|
|
8713
|
+
# --- Knowledge settings ---
|
|
8714
|
+
# knowledge=config.get("knowledge"), # TODO
|
|
8715
|
+
knowledge_filters=config.get("knowledge_filters"),
|
|
8716
|
+
enable_agentic_knowledge_filters=config.get("enable_agentic_knowledge_filters", False),
|
|
8717
|
+
add_knowledge_to_context=config.get("add_knowledge_to_context", False),
|
|
8718
|
+
update_knowledge=config.get("update_knowledge", False),
|
|
8719
|
+
search_knowledge=config.get("search_knowledge", True),
|
|
8720
|
+
references_format=config.get("references_format", "json"),
|
|
8721
|
+
# --- Tools ---
|
|
8722
|
+
tools=config.get("tools"),
|
|
8723
|
+
tool_call_limit=config.get("tool_call_limit"),
|
|
8724
|
+
tool_choice=config.get("tool_choice"),
|
|
8725
|
+
get_member_information_tool=config.get("get_member_information_tool", False),
|
|
8726
|
+
# --- Schema settings ---
|
|
8727
|
+
# input_schema=config.get("input_schema"), # TODO
|
|
8728
|
+
# output_schema=config.get("output_schema"), # TODO
|
|
8729
|
+
# --- Parser and output settings ---
|
|
8730
|
+
# parser_model=config.get("parser_model"), # TODO
|
|
8731
|
+
parser_model_prompt=config.get("parser_model_prompt"),
|
|
8732
|
+
# output_model=config.get("output_model"), # TODO
|
|
8733
|
+
output_model_prompt=config.get("output_model_prompt"),
|
|
8734
|
+
use_json_mode=config.get("use_json_mode", False),
|
|
8735
|
+
parse_response=config.get("parse_response", True),
|
|
8736
|
+
# --- Memory settings ---
|
|
8737
|
+
# memory_manager=config.get("memory_manager"), # TODO
|
|
8738
|
+
enable_agentic_memory=config.get("enable_agentic_memory", False),
|
|
8739
|
+
enable_user_memories=config.get("enable_user_memories", False),
|
|
8740
|
+
add_memories_to_context=config.get("add_memories_to_context"),
|
|
8741
|
+
enable_session_summaries=config.get("enable_session_summaries", False),
|
|
8742
|
+
add_session_summary_to_context=config.get("add_session_summary_to_context"),
|
|
8743
|
+
# session_summary_manager=config.get("session_summary_manager"), # TODO
|
|
8744
|
+
# --- History settings ---
|
|
8745
|
+
add_history_to_context=config.get("add_history_to_context", False),
|
|
8746
|
+
num_history_runs=config.get("num_history_runs"),
|
|
8747
|
+
num_history_messages=config.get("num_history_messages"),
|
|
8748
|
+
max_tool_calls_from_history=config.get("max_tool_calls_from_history"),
|
|
8749
|
+
# --- Compression settings ---
|
|
8750
|
+
compress_tool_results=config.get("compress_tool_results", False),
|
|
8751
|
+
# compression_manager=config.get("compression_manager"), # TODO
|
|
8752
|
+
# --- Reasoning settings ---
|
|
8753
|
+
reasoning=config.get("reasoning", False),
|
|
8754
|
+
# reasoning_model=config.get("reasoning_model"), # TODO
|
|
8755
|
+
reasoning_min_steps=config.get("reasoning_min_steps", 1),
|
|
8756
|
+
reasoning_max_steps=config.get("reasoning_max_steps", 10),
|
|
8757
|
+
# --- Streaming settings ---
|
|
8758
|
+
stream=config.get("stream"),
|
|
8759
|
+
stream_events=config.get("stream_events"),
|
|
8760
|
+
stream_member_events=config.get("stream_member_events", True),
|
|
8761
|
+
store_events=config.get("store_events", False),
|
|
8762
|
+
store_member_responses=config.get("store_member_responses", False),
|
|
8763
|
+
# --- Media settings ---
|
|
8764
|
+
send_media_to_model=config.get("send_media_to_model", True),
|
|
8765
|
+
store_media=config.get("store_media", True),
|
|
8766
|
+
store_tool_messages=config.get("store_tool_messages", True),
|
|
8767
|
+
store_history_messages=config.get("store_history_messages", True),
|
|
8768
|
+
# --- Retry settings ---
|
|
8769
|
+
retries=config.get("retries", 0),
|
|
8770
|
+
delay_between_retries=config.get("delay_between_retries", 1),
|
|
8771
|
+
exponential_backoff=config.get("exponential_backoff", False),
|
|
8772
|
+
# --- Metadata ---
|
|
8773
|
+
metadata=config.get("metadata"),
|
|
8774
|
+
# --- Debug and telemetry settings ---
|
|
8775
|
+
debug_mode=config.get("debug_mode", False),
|
|
8776
|
+
debug_level=config.get("debug_level", 1),
|
|
8777
|
+
show_members_responses=config.get("show_members_responses", False),
|
|
8778
|
+
telemetry=config.get("telemetry", True),
|
|
8779
|
+
)
|
|
8780
|
+
|
|
8781
|
+
def save(
|
|
8782
|
+
self,
|
|
8783
|
+
*,
|
|
8784
|
+
db: Optional["BaseDb"] = None,
|
|
8785
|
+
stage: str = "published",
|
|
8786
|
+
label: Optional[str] = None,
|
|
8787
|
+
notes: Optional[str] = None,
|
|
8788
|
+
) -> Optional[int]:
|
|
8789
|
+
"""
|
|
8790
|
+
Save the team component and config to the database, including member agents/teams.
|
|
8791
|
+
|
|
8792
|
+
Args:
|
|
8793
|
+
db: The database to save the component and config to.
|
|
8794
|
+
stage: The stage of the component. Defaults to "published".
|
|
8795
|
+
label: The label of the component.
|
|
8796
|
+
notes: The notes of the component.
|
|
8797
|
+
|
|
8798
|
+
Returns:
|
|
8799
|
+
Optional[int]: The version number of the saved config.
|
|
8800
|
+
"""
|
|
8801
|
+
from agno.agent.agent import Agent
|
|
8802
|
+
|
|
8803
|
+
db_ = db or self.db
|
|
8804
|
+
if not db_:
|
|
8805
|
+
raise ValueError("Db not initialized or provided")
|
|
8806
|
+
if not isinstance(db_, BaseDb):
|
|
8807
|
+
raise ValueError("Async databases not yet supported for save(). Use a sync database.")
|
|
8808
|
+
if self.id is None:
|
|
8809
|
+
self.id = generate_id_from_name(self.name)
|
|
8810
|
+
|
|
8811
|
+
try:
|
|
8812
|
+
# Collect all links for members
|
|
8813
|
+
all_links: List[Dict[str, Any]] = []
|
|
8814
|
+
|
|
8815
|
+
# Save each member (Agent or nested Team) and collect links
|
|
8816
|
+
for position, member in enumerate(self.members or []):
|
|
8817
|
+
# Save member first - returns version
|
|
8818
|
+
member_version = member.save(db=db_, stage=stage, label=label, notes=notes)
|
|
8819
|
+
|
|
8820
|
+
# Add link
|
|
8821
|
+
all_links.append(
|
|
8822
|
+
{
|
|
8823
|
+
"link_kind": "member",
|
|
8824
|
+
"link_key": f"member_{position}",
|
|
8825
|
+
"child_component_id": member.id,
|
|
8826
|
+
"child_version": member_version,
|
|
8827
|
+
"position": position,
|
|
8828
|
+
"meta": {"type": "agent" if isinstance(member, Agent) else "team"},
|
|
8829
|
+
}
|
|
8830
|
+
)
|
|
8831
|
+
|
|
8832
|
+
# Create or update component
|
|
8833
|
+
db_.upsert_component(
|
|
8834
|
+
component_id=self.id,
|
|
8835
|
+
component_type=ComponentType.TEAM,
|
|
8836
|
+
name=getattr(self, "name", self.id),
|
|
8837
|
+
description=getattr(self, "description", None),
|
|
8838
|
+
metadata=getattr(self, "metadata", None),
|
|
8839
|
+
)
|
|
8840
|
+
|
|
8841
|
+
# Create or update config with links
|
|
8842
|
+
config = db_.upsert_config(
|
|
8843
|
+
component_id=self.id,
|
|
8844
|
+
config=self.to_dict(),
|
|
8845
|
+
links=all_links if all_links else None,
|
|
8846
|
+
label=label,
|
|
8847
|
+
stage=stage,
|
|
8848
|
+
notes=notes,
|
|
8849
|
+
)
|
|
8850
|
+
|
|
8851
|
+
return config["version"]
|
|
8852
|
+
|
|
8853
|
+
except Exception as e:
|
|
8854
|
+
log_error(f"Error saving Team to database: {e}")
|
|
8855
|
+
raise
|
|
8856
|
+
|
|
8857
|
+
@classmethod
|
|
8858
|
+
def load(
|
|
8859
|
+
cls,
|
|
8860
|
+
id: str,
|
|
8861
|
+
*,
|
|
8862
|
+
db: "BaseDb",
|
|
8863
|
+
registry: Optional["Registry"] = None,
|
|
8864
|
+
label: Optional[str] = None,
|
|
8865
|
+
version: Optional[int] = None,
|
|
8866
|
+
) -> Optional["Team"]:
|
|
8867
|
+
"""
|
|
8868
|
+
Load a team by id, with hydrated members.
|
|
8869
|
+
|
|
8870
|
+
Args:
|
|
8871
|
+
id: The id of the team to load.
|
|
8872
|
+
db: The database to load the team from.
|
|
8873
|
+
label: The label of the team to load.
|
|
8874
|
+
|
|
8875
|
+
Returns:
|
|
8876
|
+
The team loaded from the database with hydrated members, or None if not found.
|
|
8877
|
+
"""
|
|
8878
|
+
from agno.agent.agent import Agent
|
|
8879
|
+
|
|
8880
|
+
# Use graph to load team + all members
|
|
8881
|
+
graph = db.load_component_graph(id, version=version, label=label)
|
|
8882
|
+
if graph is None:
|
|
8883
|
+
return None
|
|
8884
|
+
|
|
8885
|
+
config = graph["config"].get("config")
|
|
8886
|
+
if config is None:
|
|
8887
|
+
return None
|
|
8888
|
+
|
|
8889
|
+
team = cls.from_dict(config, db=db, registry=registry)
|
|
8890
|
+
team.id = id
|
|
8891
|
+
team.db = db
|
|
8892
|
+
|
|
8893
|
+
# Hydrate members from graph children
|
|
8894
|
+
team.members = []
|
|
8895
|
+
for child in graph.get("children", []):
|
|
8896
|
+
child_graph = child.get("graph")
|
|
8897
|
+
if child_graph is None:
|
|
8898
|
+
continue
|
|
8899
|
+
|
|
8900
|
+
child_config = child_graph["config"].get("config")
|
|
8901
|
+
if child_config is None:
|
|
8902
|
+
continue
|
|
8903
|
+
|
|
8904
|
+
link_meta = child["link"].get("meta", {})
|
|
8905
|
+
member_type = link_meta.get("type")
|
|
8906
|
+
|
|
8907
|
+
if member_type == "agent":
|
|
8908
|
+
agent = Agent.from_dict(child_config)
|
|
8909
|
+
agent.id = child_graph["component"]["component_id"]
|
|
8910
|
+
agent.db = db
|
|
8911
|
+
team.members.append(agent)
|
|
8912
|
+
elif member_type == "team":
|
|
8913
|
+
# Recursive load for nested teams
|
|
8914
|
+
nested_team = cls.load(child_graph["component"]["component_id"], db=db)
|
|
8915
|
+
if nested_team:
|
|
8916
|
+
team.members.append(nested_team)
|
|
8917
|
+
|
|
8918
|
+
return team
|
|
8919
|
+
|
|
8920
|
+
def delete(
|
|
8921
|
+
self,
|
|
8922
|
+
*,
|
|
8923
|
+
db: Optional["BaseDb"] = None,
|
|
8924
|
+
hard_delete: bool = False,
|
|
8925
|
+
) -> bool:
|
|
8926
|
+
"""
|
|
8927
|
+
Delete the team component.
|
|
8928
|
+
|
|
8929
|
+
Args:
|
|
8930
|
+
db: The database to delete the component from.
|
|
8931
|
+
hard_delete: Whether to hard delete the component.
|
|
8932
|
+
|
|
8933
|
+
Returns:
|
|
8934
|
+
True if the component was deleted, False otherwise.
|
|
8935
|
+
"""
|
|
8936
|
+
db_ = db or self.db
|
|
8937
|
+
if not db_:
|
|
8938
|
+
raise ValueError("Db not initialized or provided")
|
|
8939
|
+
if not isinstance(db_, BaseDb):
|
|
8940
|
+
raise ValueError("Async databases not yet supported for delete(). Use a sync database.")
|
|
8941
|
+
if self.id is None:
|
|
8942
|
+
raise ValueError("Cannot delete team without an id")
|
|
8943
|
+
|
|
8944
|
+
return db_.delete_component(component_id=self.id, hard_delete=hard_delete)
|
|
8945
|
+
|
|
8220
8946
|
# -*- Public convenience functions
|
|
8221
8947
|
def get_run_output(
|
|
8222
8948
|
self, run_id: str, session_id: Optional[str] = None
|
|
@@ -8864,6 +9590,7 @@ class Team:
|
|
|
8864
9590
|
title=title,
|
|
8865
9591
|
reasoning=thought,
|
|
8866
9592
|
action=action,
|
|
9593
|
+
result=None,
|
|
8867
9594
|
next_action=NextAction.CONTINUE,
|
|
8868
9595
|
confidence=confidence,
|
|
8869
9596
|
)
|
|
@@ -8899,6 +9626,7 @@ class Team:
|
|
|
8899
9626
|
# Create a reasoning step
|
|
8900
9627
|
reasoning_step = ReasoningStep(
|
|
8901
9628
|
title=title,
|
|
9629
|
+
action=None,
|
|
8902
9630
|
result=result,
|
|
8903
9631
|
reasoning=analysis,
|
|
8904
9632
|
next_action=next_action_enum,
|
|
@@ -8927,7 +9655,10 @@ class Team:
|
|
|
8927
9655
|
thought = tool_args["thought"]
|
|
8928
9656
|
reasoning_step = ReasoningStep(
|
|
8929
9657
|
title="Thinking",
|
|
9658
|
+
action=None,
|
|
9659
|
+
result=None,
|
|
8930
9660
|
reasoning=thought,
|
|
9661
|
+
next_action=None,
|
|
8931
9662
|
confidence=None,
|
|
8932
9663
|
)
|
|
8933
9664
|
formatted_content = f"## Thinking\n{thought}\n\n"
|
|
@@ -8955,16 +9686,17 @@ class Team:
|
|
|
8955
9686
|
log_warning("Knowledge is not set, cannot add to knowledge")
|
|
8956
9687
|
return "Knowledge is not set, cannot add to knowledge"
|
|
8957
9688
|
|
|
8958
|
-
|
|
8959
|
-
|
|
8960
|
-
|
|
9689
|
+
insert_method = getattr(self.knowledge, "insert", None)
|
|
9690
|
+
if not callable(insert_method):
|
|
9691
|
+
log_warning("Knowledge base does not support adding content")
|
|
9692
|
+
return "Knowledge base does not support adding content"
|
|
8961
9693
|
|
|
8962
9694
|
document_name = query.replace(" ", "_").replace("?", "").replace("!", "").replace(".", "")
|
|
8963
9695
|
document_content = json.dumps({"query": query, "result": result})
|
|
8964
9696
|
log_info(f"Adding document to Knowledge: {document_name}: {document_content}")
|
|
8965
9697
|
from agno.knowledge.reader.text_reader import TextReader
|
|
8966
9698
|
|
|
8967
|
-
|
|
9699
|
+
insert_method(name=document_name, text_content=document_content, reader=TextReader())
|
|
8968
9700
|
return "Successfully added to knowledge base"
|
|
8969
9701
|
|
|
8970
9702
|
def get_relevant_docs_from_knowledge(
|
|
@@ -8982,25 +9714,26 @@ class Team:
|
|
|
8982
9714
|
dependencies = run_context.dependencies if run_context else None
|
|
8983
9715
|
|
|
8984
9716
|
if num_documents is None and self.knowledge is not None:
|
|
8985
|
-
num_documents = self.knowledge
|
|
9717
|
+
num_documents = getattr(self.knowledge, "max_results", None)
|
|
8986
9718
|
|
|
8987
9719
|
# Validate the filters against known valid filter keys
|
|
8988
|
-
if self.knowledge is not None:
|
|
8989
|
-
|
|
9720
|
+
if self.knowledge is not None and filters is not None:
|
|
9721
|
+
validate_filters_method = getattr(self.knowledge, "validate_filters", None)
|
|
9722
|
+
if callable(validate_filters_method):
|
|
9723
|
+
valid_filters, invalid_keys = validate_filters_method(filters)
|
|
8990
9724
|
|
|
8991
|
-
|
|
8992
|
-
|
|
8993
|
-
|
|
8994
|
-
log_warning(f"Invalid filter keys provided: {invalid_keys}. These filters will be ignored.")
|
|
9725
|
+
# Warn about invalid filter keys
|
|
9726
|
+
if invalid_keys:
|
|
9727
|
+
log_warning(f"Invalid filter keys provided: {invalid_keys}. These filters will be ignored.")
|
|
8995
9728
|
|
|
8996
|
-
|
|
8997
|
-
|
|
8998
|
-
|
|
8999
|
-
|
|
9729
|
+
# Only use valid filters
|
|
9730
|
+
filters = valid_filters
|
|
9731
|
+
if not filters:
|
|
9732
|
+
log_warning("No valid filters remain after validation. Search will proceed without filters.")
|
|
9000
9733
|
|
|
9001
|
-
|
|
9002
|
-
|
|
9003
|
-
|
|
9734
|
+
if invalid_keys == [] and valid_filters == {}:
|
|
9735
|
+
log_debug("No valid filters provided. Search will proceed without filters.")
|
|
9736
|
+
filters = None
|
|
9004
9737
|
|
|
9005
9738
|
if self.knowledge_retriever is not None and callable(self.knowledge_retriever):
|
|
9006
9739
|
from inspect import signature
|
|
@@ -9022,17 +9755,22 @@ class Team:
|
|
|
9022
9755
|
except Exception as e:
|
|
9023
9756
|
log_warning(f"Knowledge retriever failed: {e}")
|
|
9024
9757
|
raise e
|
|
9758
|
+
# Use knowledge protocol's retrieve method
|
|
9025
9759
|
try:
|
|
9026
|
-
if self.knowledge is None
|
|
9760
|
+
if self.knowledge is None:
|
|
9761
|
+
return None
|
|
9762
|
+
|
|
9763
|
+
# Use protocol retrieve() method if available
|
|
9764
|
+
retrieve_fn = getattr(self.knowledge, "retrieve", None)
|
|
9765
|
+
if not callable(retrieve_fn):
|
|
9766
|
+
log_debug("Knowledge does not implement retrieve()")
|
|
9027
9767
|
return None
|
|
9028
9768
|
|
|
9029
9769
|
if num_documents is None:
|
|
9030
|
-
num_documents = self.knowledge
|
|
9770
|
+
num_documents = getattr(self.knowledge, "max_results", 10)
|
|
9031
9771
|
|
|
9032
|
-
log_debug(f"
|
|
9033
|
-
relevant_docs: List[Document] =
|
|
9034
|
-
query=query, max_results=num_documents, filters=filters
|
|
9035
|
-
)
|
|
9772
|
+
log_debug(f"Retrieving from knowledge base with filters: {filters}")
|
|
9773
|
+
relevant_docs: List[Document] = retrieve_fn(query=query, max_results=num_documents, filters=filters)
|
|
9036
9774
|
|
|
9037
9775
|
if not relevant_docs or len(relevant_docs) == 0:
|
|
9038
9776
|
log_debug("No relevant documents found for query")
|
|
@@ -9040,7 +9778,7 @@ class Team:
|
|
|
9040
9778
|
|
|
9041
9779
|
return [doc.to_dict() for doc in relevant_docs]
|
|
9042
9780
|
except Exception as e:
|
|
9043
|
-
log_warning(f"Error
|
|
9781
|
+
log_warning(f"Error retrieving from knowledge base: {e}")
|
|
9044
9782
|
raise e
|
|
9045
9783
|
|
|
9046
9784
|
async def aget_relevant_docs_from_knowledge(
|
|
@@ -9058,25 +9796,26 @@ class Team:
|
|
|
9058
9796
|
dependencies = run_context.dependencies if run_context else None
|
|
9059
9797
|
|
|
9060
9798
|
if num_documents is None and self.knowledge is not None:
|
|
9061
|
-
num_documents = self.knowledge
|
|
9799
|
+
num_documents = getattr(self.knowledge, "max_results", None)
|
|
9062
9800
|
|
|
9063
9801
|
# Validate the filters against known valid filter keys
|
|
9064
|
-
if self.knowledge is not None:
|
|
9065
|
-
|
|
9802
|
+
if self.knowledge is not None and filters is not None:
|
|
9803
|
+
avalidate_filters_method = getattr(self.knowledge, "avalidate_filters", None)
|
|
9804
|
+
if callable(avalidate_filters_method):
|
|
9805
|
+
valid_filters, invalid_keys = await avalidate_filters_method(filters)
|
|
9066
9806
|
|
|
9067
|
-
|
|
9068
|
-
|
|
9069
|
-
|
|
9070
|
-
log_warning(f"Invalid filter keys provided: {invalid_keys}. These filters will be ignored.")
|
|
9807
|
+
# Warn about invalid filter keys
|
|
9808
|
+
if invalid_keys:
|
|
9809
|
+
log_warning(f"Invalid filter keys provided: {invalid_keys}. These filters will be ignored.")
|
|
9071
9810
|
|
|
9072
|
-
|
|
9073
|
-
|
|
9074
|
-
|
|
9075
|
-
|
|
9811
|
+
# Only use valid filters
|
|
9812
|
+
filters = valid_filters
|
|
9813
|
+
if not filters:
|
|
9814
|
+
log_warning("No valid filters remain after validation. Search will proceed without filters.")
|
|
9076
9815
|
|
|
9077
|
-
|
|
9078
|
-
|
|
9079
|
-
|
|
9816
|
+
if invalid_keys == [] and valid_filters == {}:
|
|
9817
|
+
log_debug("No valid filters provided. Search will proceed without filters.")
|
|
9818
|
+
filters = None
|
|
9080
9819
|
|
|
9081
9820
|
if self.knowledge_retriever is not None and callable(self.knowledge_retriever):
|
|
9082
9821
|
from inspect import isawaitable, signature
|
|
@@ -9105,17 +9844,32 @@ class Team:
|
|
|
9105
9844
|
log_warning(f"Knowledge retriever failed: {e}")
|
|
9106
9845
|
raise e
|
|
9107
9846
|
|
|
9847
|
+
# Use knowledge protocol's retrieve method
|
|
9108
9848
|
try:
|
|
9109
|
-
if self.knowledge is None
|
|
9849
|
+
if self.knowledge is None:
|
|
9850
|
+
return None
|
|
9851
|
+
|
|
9852
|
+
# Use protocol aretrieve() or retrieve() method if available
|
|
9853
|
+
aretrieve_fn = getattr(self.knowledge, "aretrieve", None)
|
|
9854
|
+
retrieve_fn = getattr(self.knowledge, "retrieve", None)
|
|
9855
|
+
|
|
9856
|
+
if not callable(aretrieve_fn) and not callable(retrieve_fn):
|
|
9857
|
+
log_debug("Knowledge does not implement retrieve()")
|
|
9110
9858
|
return None
|
|
9111
9859
|
|
|
9112
9860
|
if num_documents is None:
|
|
9113
|
-
num_documents = self.knowledge
|
|
9861
|
+
num_documents = getattr(self.knowledge, "max_results", 10)
|
|
9114
9862
|
|
|
9115
|
-
log_debug(f"
|
|
9116
|
-
|
|
9117
|
-
|
|
9118
|
-
|
|
9863
|
+
log_debug(f"Retrieving from knowledge base with filters: {filters}")
|
|
9864
|
+
|
|
9865
|
+
if callable(aretrieve_fn):
|
|
9866
|
+
relevant_docs: List[Document] = await aretrieve_fn(
|
|
9867
|
+
query=query, max_results=num_documents, filters=filters
|
|
9868
|
+
)
|
|
9869
|
+
elif callable(retrieve_fn):
|
|
9870
|
+
relevant_docs = retrieve_fn(query=query, max_results=num_documents, filters=filters)
|
|
9871
|
+
else:
|
|
9872
|
+
return None
|
|
9119
9873
|
|
|
9120
9874
|
if not relevant_docs or len(relevant_docs) == 0:
|
|
9121
9875
|
log_debug("No relevant documents found for query")
|
|
@@ -9123,7 +9877,7 @@ class Team:
|
|
|
9123
9877
|
|
|
9124
9878
|
return [doc.to_dict() for doc in relevant_docs]
|
|
9125
9879
|
except Exception as e:
|
|
9126
|
-
log_warning(f"Error
|
|
9880
|
+
log_warning(f"Error retrieving from knowledge base: {e}")
|
|
9127
9881
|
raise e
|
|
9128
9882
|
|
|
9129
9883
|
def _convert_documents_to_string(self, docs: List[Union[Dict[str, Any], str]]) -> str:
|
|
@@ -9523,3 +10277,84 @@ class Team:
|
|
|
9523
10277
|
except Exception:
|
|
9524
10278
|
# If copy fails, return as is
|
|
9525
10279
|
return field_value
|
|
10280
|
+
|
|
10281
|
+
|
|
10282
|
+
def get_team_by_id(
|
|
10283
|
+
db: "BaseDb",
|
|
10284
|
+
id: str,
|
|
10285
|
+
version: Optional[int] = None,
|
|
10286
|
+
label: Optional[str] = None,
|
|
10287
|
+
registry: Optional["Registry"] = None,
|
|
10288
|
+
) -> Optional["Team"]:
|
|
10289
|
+
"""
|
|
10290
|
+
Get a Team by id from the database.
|
|
10291
|
+
|
|
10292
|
+
Resolution order:
|
|
10293
|
+
- if version is provided: load that version
|
|
10294
|
+
- elif label is provided: load that labeled version
|
|
10295
|
+
- else: load component.current_version
|
|
10296
|
+
|
|
10297
|
+
Args:
|
|
10298
|
+
db: Database handle.
|
|
10299
|
+
id: Team component_id.
|
|
10300
|
+
version: Optional integer config version.
|
|
10301
|
+
label: Optional version_label.
|
|
10302
|
+
registry: Optional Registry for reconstructing unserializable components.
|
|
10303
|
+
|
|
10304
|
+
Returns:
|
|
10305
|
+
Team instance or None.
|
|
10306
|
+
"""
|
|
10307
|
+
try:
|
|
10308
|
+
row = db.get_config(component_id=id, version=version, label=label)
|
|
10309
|
+
if row is None:
|
|
10310
|
+
return None
|
|
10311
|
+
|
|
10312
|
+
cfg = row.get("config") if isinstance(row, dict) else None
|
|
10313
|
+
if cfg is None:
|
|
10314
|
+
raise ValueError(f"Invalid config found for team {id}")
|
|
10315
|
+
|
|
10316
|
+
team = Team.from_dict(cfg, db=db, registry=registry)
|
|
10317
|
+
# Ensure team.id is set to the component_id
|
|
10318
|
+
team.id = id
|
|
10319
|
+
|
|
10320
|
+
return team
|
|
10321
|
+
|
|
10322
|
+
except Exception as e:
|
|
10323
|
+
log_error(f"Error loading Team {id} from database: {e}")
|
|
10324
|
+
return None
|
|
10325
|
+
|
|
10326
|
+
|
|
10327
|
+
def get_teams(
|
|
10328
|
+
db: "BaseDb",
|
|
10329
|
+
registry: Optional["Registry"] = None,
|
|
10330
|
+
) -> List["Team"]:
|
|
10331
|
+
"""
|
|
10332
|
+
Get all teams from the database.
|
|
10333
|
+
|
|
10334
|
+
Args:
|
|
10335
|
+
db: Database to load teams from
|
|
10336
|
+
registry: Optional registry for rehydrating tools
|
|
10337
|
+
|
|
10338
|
+
Returns:
|
|
10339
|
+
List of Team instances loaded from the database
|
|
10340
|
+
"""
|
|
10341
|
+
teams: List[Team] = []
|
|
10342
|
+
try:
|
|
10343
|
+
components, _ = db.list_components(component_type=ComponentType.TEAM)
|
|
10344
|
+
for component in components:
|
|
10345
|
+
component_id = component["component_id"]
|
|
10346
|
+
config = db.get_config(component_id=component_id)
|
|
10347
|
+
if config is not None:
|
|
10348
|
+
team_config = config.get("config")
|
|
10349
|
+
if team_config is not None:
|
|
10350
|
+
if "id" not in team_config:
|
|
10351
|
+
team_config["id"] = component_id
|
|
10352
|
+
team = Team.from_dict(team_config, db=db, registry=registry)
|
|
10353
|
+
# Ensure team.id is set to the component_id
|
|
10354
|
+
team.id = component_id
|
|
10355
|
+
teams.append(team)
|
|
10356
|
+
return teams
|
|
10357
|
+
|
|
10358
|
+
except Exception as e:
|
|
10359
|
+
log_error(f"Error loading Teams from database: {e}")
|
|
10360
|
+
return []
|