agno 1.7.5__py3-none-any.whl → 1.7.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +5 -24
- agno/app/agui/async_router.py +5 -5
- agno/app/agui/sync_router.py +5 -5
- agno/app/agui/utils.py +84 -14
- agno/app/playground/app.py +3 -2
- agno/document/chunking/row.py +39 -0
- agno/document/reader/base.py +0 -7
- agno/embedder/jina.py +73 -0
- agno/embedder/openai.py +5 -1
- agno/memory/agent.py +2 -2
- agno/memory/team.py +2 -2
- agno/models/anthropic/claude.py +9 -1
- agno/models/aws/bedrock.py +311 -15
- agno/models/google/gemini.py +26 -6
- agno/models/litellm/chat.py +38 -7
- agno/models/message.py +1 -0
- agno/models/openai/chat.py +1 -22
- agno/models/openai/responses.py +5 -5
- agno/models/portkey/__init__.py +3 -0
- agno/models/portkey/portkey.py +88 -0
- agno/models/xai/xai.py +54 -0
- agno/run/v2/workflow.py +4 -0
- agno/storage/mysql.py +2 -0
- agno/storage/postgres.py +5 -3
- agno/storage/session/v2/workflow.py +29 -5
- agno/storage/singlestore.py +4 -1
- agno/storage/sqlite.py +0 -1
- agno/team/team.py +38 -36
- agno/tools/bitbucket.py +292 -0
- agno/tools/daytona.py +411 -63
- agno/tools/evm.py +123 -0
- agno/tools/jina.py +13 -6
- agno/tools/linkup.py +54 -0
- agno/tools/mcp.py +170 -26
- agno/tools/mem0.py +15 -2
- agno/tools/models/morph.py +186 -0
- agno/tools/postgres.py +186 -168
- agno/tools/zep.py +21 -32
- agno/utils/log.py +16 -0
- agno/utils/models/claude.py +1 -0
- agno/utils/string.py +14 -0
- agno/vectordb/pgvector/pgvector.py +4 -5
- agno/workflow/v2/workflow.py +152 -25
- agno/workflow/workflow.py +90 -63
- {agno-1.7.5.dist-info → agno-1.7.7.dist-info}/METADATA +20 -3
- {agno-1.7.5.dist-info → agno-1.7.7.dist-info}/RECORD +50 -42
- {agno-1.7.5.dist-info → agno-1.7.7.dist-info}/WHEEL +0 -0
- {agno-1.7.5.dist-info → agno-1.7.7.dist-info}/entry_points.txt +0 -0
- {agno-1.7.5.dist-info → agno-1.7.7.dist-info}/licenses/LICENSE +0 -0
- {agno-1.7.5.dist-info → agno-1.7.7.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from os import getenv
|
|
3
|
+
from typing import Any, Dict, Optional, cast
|
|
4
|
+
|
|
5
|
+
from agno.exceptions import ModelProviderError
|
|
6
|
+
from agno.models.openai.like import OpenAILike
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
from portkey_ai import PORTKEY_GATEWAY_URL, createHeaders
|
|
10
|
+
except ImportError:
|
|
11
|
+
raise ImportError("`portkey-ai` not installed. Please install using `pip install portkey-ai`")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass
|
|
15
|
+
class Portkey(OpenAILike):
|
|
16
|
+
"""
|
|
17
|
+
A class for using models through the Portkey AI Gateway.
|
|
18
|
+
|
|
19
|
+
Attributes:
|
|
20
|
+
id (str): The model id. Defaults to "gpt-4o-mini".
|
|
21
|
+
name (str): The model name. Defaults to "Portkey".
|
|
22
|
+
provider (str): The provider name. Defaults to "Portkey".
|
|
23
|
+
portkey_api_key (Optional[str]): The Portkey API key.
|
|
24
|
+
virtual_key (Optional[str]): The virtual key for model routing.
|
|
25
|
+
config (Optional[Dict[str, Any]]): Portkey configuration for routing, retries, etc.
|
|
26
|
+
base_url (str): The Portkey gateway URL.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
id: str = "gpt-4o-mini"
|
|
30
|
+
name: str = "Portkey"
|
|
31
|
+
provider: str = "Portkey"
|
|
32
|
+
|
|
33
|
+
portkey_api_key: Optional[str] = getenv("PORTKEY_API_KEY")
|
|
34
|
+
virtual_key: Optional[str] = getenv("PORTKEY_VIRTUAL_KEY")
|
|
35
|
+
config: Optional[Dict[str, Any]] = None
|
|
36
|
+
base_url: str = PORTKEY_GATEWAY_URL
|
|
37
|
+
|
|
38
|
+
def _get_client_params(self) -> Dict[str, Any]:
|
|
39
|
+
# Check for required keys
|
|
40
|
+
if not self.portkey_api_key:
|
|
41
|
+
raise ModelProviderError(
|
|
42
|
+
message="PORTKEY_API_KEY not set. Please set the PORTKEY_API_KEY environment variable.",
|
|
43
|
+
model_name=self.name,
|
|
44
|
+
model_id=self.id,
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
if not self.virtual_key:
|
|
48
|
+
raise ModelProviderError(
|
|
49
|
+
message="PORTKEY_VIRTUAL_KEY not set. Please set the PORTKEY_VIRTUAL_KEY environment variable.",
|
|
50
|
+
model_name=self.name,
|
|
51
|
+
model_id=self.id,
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
# Create headers using Portkey's createHeaders function
|
|
55
|
+
header_params: Dict[str, Any] = {
|
|
56
|
+
"api_key": self.portkey_api_key,
|
|
57
|
+
"virtual_key": self.virtual_key,
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
if self.config is not None:
|
|
61
|
+
header_params["config"] = self.config
|
|
62
|
+
|
|
63
|
+
portkey_headers = cast(Dict[str, Any], createHeaders(**header_params))
|
|
64
|
+
|
|
65
|
+
# Merge with any existing default headers
|
|
66
|
+
default_headers: Dict[str, Any] = {}
|
|
67
|
+
if self.default_headers and isinstance(self.default_headers, dict):
|
|
68
|
+
default_headers.update(self.default_headers)
|
|
69
|
+
default_headers.update(portkey_headers)
|
|
70
|
+
|
|
71
|
+
# Define base client params
|
|
72
|
+
base_params = {
|
|
73
|
+
"api_key": "not-needed", # We use virtual keys instead
|
|
74
|
+
"organization": self.organization,
|
|
75
|
+
"base_url": self.base_url,
|
|
76
|
+
"timeout": self.timeout,
|
|
77
|
+
"max_retries": self.max_retries,
|
|
78
|
+
"default_headers": default_headers,
|
|
79
|
+
"default_query": self.default_query,
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
# Create client_params dict with non-None values
|
|
83
|
+
client_params = {k: v for k, v in base_params.items() if v is not None}
|
|
84
|
+
|
|
85
|
+
# Add additional client params if provided
|
|
86
|
+
if self.client_params:
|
|
87
|
+
client_params.update(self.client_params)
|
|
88
|
+
return client_params
|
agno/models/xai/xai.py
CHANGED
|
@@ -4,9 +4,17 @@ from typing import Any, Dict, List, Optional, Type, Union
|
|
|
4
4
|
|
|
5
5
|
from pydantic import BaseModel
|
|
6
6
|
|
|
7
|
+
from agno.models.message import Citations, UrlCitation
|
|
7
8
|
from agno.models.openai.like import OpenAILike
|
|
9
|
+
from agno.models.response import ModelResponse
|
|
8
10
|
from agno.utils.log import log_debug
|
|
9
11
|
|
|
12
|
+
try:
|
|
13
|
+
from openai.types.chat.chat_completion import ChatCompletion
|
|
14
|
+
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
|
|
15
|
+
except (ImportError, ModuleNotFoundError):
|
|
16
|
+
raise ImportError("`openai` not installed. Please install using `pip install openai`")
|
|
17
|
+
|
|
10
18
|
|
|
11
19
|
@dataclass
|
|
12
20
|
class xAI(OpenAILike):
|
|
@@ -56,3 +64,49 @@ class xAI(OpenAILike):
|
|
|
56
64
|
log_debug(f"Calling {self.provider} with request parameters: {request_params}", log_level=2)
|
|
57
65
|
|
|
58
66
|
return request_params
|
|
67
|
+
|
|
68
|
+
def parse_provider_response(
|
|
69
|
+
self,
|
|
70
|
+
response: ChatCompletion,
|
|
71
|
+
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
|
|
72
|
+
) -> ModelResponse:
|
|
73
|
+
"""
|
|
74
|
+
Parse the xAI response into a ModelResponse.
|
|
75
|
+
"""
|
|
76
|
+
model_response = super().parse_provider_response(response, response_format)
|
|
77
|
+
|
|
78
|
+
if hasattr(response, "citations") and response.citations:
|
|
79
|
+
citations = Citations()
|
|
80
|
+
url_citations = []
|
|
81
|
+
for citation_url in response.citations:
|
|
82
|
+
url_citations.append(UrlCitation(url=str(citation_url)))
|
|
83
|
+
|
|
84
|
+
citations.urls = url_citations
|
|
85
|
+
citations.raw = response.citations
|
|
86
|
+
model_response.citations = citations
|
|
87
|
+
|
|
88
|
+
return model_response
|
|
89
|
+
|
|
90
|
+
def parse_provider_response_delta(self, response_delta: ChatCompletionChunk) -> ModelResponse:
|
|
91
|
+
"""
|
|
92
|
+
Parse the xAI streaming response.
|
|
93
|
+
|
|
94
|
+
Args:
|
|
95
|
+
response_delta: Raw response chunk
|
|
96
|
+
|
|
97
|
+
Returns:
|
|
98
|
+
ModelResponse: Parsed response data
|
|
99
|
+
"""
|
|
100
|
+
model_response = super().parse_provider_response_delta(response_delta)
|
|
101
|
+
|
|
102
|
+
if hasattr(response_delta, "citations") and response_delta.citations:
|
|
103
|
+
citations = Citations()
|
|
104
|
+
url_citations = []
|
|
105
|
+
for citation_url in response_delta.citations:
|
|
106
|
+
url_citations.append(UrlCitation(url=str(citation_url)))
|
|
107
|
+
|
|
108
|
+
citations.urls = url_citations
|
|
109
|
+
citations.raw = response_delta.citations
|
|
110
|
+
model_response.citations = citations
|
|
111
|
+
|
|
112
|
+
return model_response
|
agno/run/v2/workflow.py
CHANGED
|
@@ -561,3 +561,7 @@ class WorkflowRunResponse:
|
|
|
561
561
|
return self.content.model_dump_json(exclude_none=True, **kwargs)
|
|
562
562
|
else:
|
|
563
563
|
return json.dumps(self.content, **kwargs)
|
|
564
|
+
|
|
565
|
+
def has_completed(self) -> bool:
|
|
566
|
+
"""Check if the workflow run is completed (either successfully or with error)"""
|
|
567
|
+
return self.status in [RunStatus.completed, RunStatus.error]
|
agno/storage/mysql.py
CHANGED
|
@@ -131,7 +131,9 @@ class MySQLStorage(Storage):
|
|
|
131
131
|
elif self.mode == "workflow_v2":
|
|
132
132
|
specific_columns = [
|
|
133
133
|
Column("workflow_id", String(255), index=True),
|
|
134
|
+
Column("workflow_name", String(255), index=True),
|
|
134
135
|
Column("workflow_data", JSON),
|
|
136
|
+
Column("runs", JSON),
|
|
135
137
|
]
|
|
136
138
|
# Create table with all columns
|
|
137
139
|
table = Table(
|
agno/storage/postgres.py
CHANGED
|
@@ -30,7 +30,7 @@ class PostgresStorage(Storage):
|
|
|
30
30
|
db_engine: Optional[Engine] = None,
|
|
31
31
|
schema_version: int = 1,
|
|
32
32
|
auto_upgrade_schema: bool = False,
|
|
33
|
-
mode: Optional[Literal["agent", "team", "workflow"]] = "agent",
|
|
33
|
+
mode: Optional[Literal["agent", "team", "workflow", "workflow_v2"]] = "agent",
|
|
34
34
|
):
|
|
35
35
|
"""
|
|
36
36
|
This class provides agent storage using a PostgreSQL table.
|
|
@@ -131,7 +131,9 @@ class PostgresStorage(Storage):
|
|
|
131
131
|
elif self.mode == "workflow_v2":
|
|
132
132
|
specific_columns = [
|
|
133
133
|
Column("workflow_id", String, index=True),
|
|
134
|
+
Column("workflow_name", String, index=True),
|
|
134
135
|
Column("workflow_data", postgresql.JSONB),
|
|
136
|
+
Column("runs", postgresql.JSONB),
|
|
135
137
|
]
|
|
136
138
|
|
|
137
139
|
# Create table with all columns
|
|
@@ -569,9 +571,9 @@ class PostgresStorage(Storage):
|
|
|
569
571
|
stmt = postgresql.insert(self.table).values(
|
|
570
572
|
session_id=session.session_id,
|
|
571
573
|
workflow_id=session.workflow_id, # type: ignore
|
|
572
|
-
workflow_name=session.workflow_name, # type: ignore
|
|
573
574
|
user_id=session.user_id,
|
|
574
575
|
runs=session_dict.get("runs"),
|
|
576
|
+
workflow_name=session.workflow_name, # type: ignore
|
|
575
577
|
workflow_data=session.workflow_data, # type: ignore
|
|
576
578
|
session_data=session.session_data,
|
|
577
579
|
extra_data=session.extra_data,
|
|
@@ -582,9 +584,9 @@ class PostgresStorage(Storage):
|
|
|
582
584
|
index_elements=["session_id"],
|
|
583
585
|
set_=dict(
|
|
584
586
|
workflow_id=session.workflow_id, # type: ignore
|
|
585
|
-
workflow_name=session.workflow_name, # type: ignore
|
|
586
587
|
user_id=session.user_id,
|
|
587
588
|
runs=session_dict.get("runs"),
|
|
589
|
+
workflow_name=session.workflow_name, # type: ignore
|
|
588
590
|
workflow_data=session.workflow_data, # type: ignore
|
|
589
591
|
session_data=session.session_data,
|
|
590
592
|
extra_data=session.extra_data,
|
|
@@ -40,21 +40,45 @@ class WorkflowSession:
|
|
|
40
40
|
if self.runs is None:
|
|
41
41
|
self.runs = []
|
|
42
42
|
|
|
43
|
-
def
|
|
44
|
-
"""Add a workflow run
|
|
43
|
+
def upsert_run(self, run: WorkflowRunResponse) -> None:
|
|
44
|
+
"""Add or update a workflow run (upsert behavior)"""
|
|
45
45
|
if self.runs is None:
|
|
46
46
|
self.runs = []
|
|
47
|
-
|
|
48
|
-
|
|
47
|
+
|
|
48
|
+
# Find existing run and update it, or append new one
|
|
49
|
+
for i, existing_run in enumerate(self.runs):
|
|
50
|
+
if existing_run.run_id == run.run_id:
|
|
51
|
+
# Update existing run
|
|
52
|
+
self.runs[i] = run
|
|
53
|
+
return
|
|
54
|
+
|
|
55
|
+
# Run not found, append new one
|
|
56
|
+
self.runs.append(run)
|
|
49
57
|
|
|
50
58
|
def to_dict(self) -> Dict[str, Any]:
|
|
51
59
|
"""Convert to dictionary for storage, serializing runs to dicts"""
|
|
60
|
+
|
|
61
|
+
runs_data = None
|
|
62
|
+
if self.runs:
|
|
63
|
+
runs_data = []
|
|
64
|
+
for run in self.runs:
|
|
65
|
+
try:
|
|
66
|
+
runs_data.append(run.to_dict())
|
|
67
|
+
except Exception as e:
|
|
68
|
+
# If run serialization fails, create a minimal representation
|
|
69
|
+
runs_data.append(
|
|
70
|
+
{
|
|
71
|
+
"run_id": getattr(run, "run_id", "unknown"),
|
|
72
|
+
"status": str(getattr(run, "status", "unknown")),
|
|
73
|
+
"error": f"Serialization failed: {str(e)}",
|
|
74
|
+
}
|
|
75
|
+
)
|
|
52
76
|
return {
|
|
53
77
|
"session_id": self.session_id,
|
|
54
78
|
"user_id": self.user_id,
|
|
55
79
|
"workflow_id": self.workflow_id,
|
|
56
80
|
"workflow_name": self.workflow_name,
|
|
57
|
-
"runs":
|
|
81
|
+
"runs": runs_data,
|
|
58
82
|
"session_data": self.session_data,
|
|
59
83
|
"workflow_data": self.workflow_data,
|
|
60
84
|
"extra_data": self.extra_data,
|
agno/storage/singlestore.py
CHANGED
|
@@ -120,6 +120,7 @@ class SingleStoreStorage(Storage):
|
|
|
120
120
|
specific_columns = [
|
|
121
121
|
Column("workflow_id", mysql.TEXT),
|
|
122
122
|
Column("workflow_data", mysql.JSON),
|
|
123
|
+
Column("runs", mysql.JSON),
|
|
123
124
|
]
|
|
124
125
|
|
|
125
126
|
# Create table with all columns
|
|
@@ -518,7 +519,9 @@ class SingleStoreStorage(Storage):
|
|
|
518
519
|
"workflow_id": session.workflow_id, # type: ignore
|
|
519
520
|
"user_id": session.user_id,
|
|
520
521
|
"workflow_name": session.workflow_name, # type: ignore
|
|
521
|
-
"runs": session_dict.get("runs"),
|
|
522
|
+
"runs": json.dumps(session_dict.get("runs"), ensure_ascii=False)
|
|
523
|
+
if session_dict.get("runs")
|
|
524
|
+
else None,
|
|
522
525
|
"workflow_data": json.dumps(session.workflow_data, ensure_ascii=False) # type: ignore
|
|
523
526
|
if session.workflow_data is not None # type: ignore
|
|
524
527
|
else None,
|
agno/storage/sqlite.py
CHANGED
agno/team/team.py
CHANGED
|
@@ -688,15 +688,6 @@ class Team:
|
|
|
688
688
|
if self.workflow_session_state is not None:
|
|
689
689
|
self.workflow_session_state["current_user_id"] = user_id
|
|
690
690
|
|
|
691
|
-
def _reset_session_state(self) -> None:
|
|
692
|
-
"""Reset the session state for the agent."""
|
|
693
|
-
if self.team_session_state is not None:
|
|
694
|
-
self.team_session_state.pop("current_session_id", None)
|
|
695
|
-
self.team_session_state.pop("current_user_id", None)
|
|
696
|
-
if self.session_state is not None:
|
|
697
|
-
self.session_state.pop("current_session_id", None)
|
|
698
|
-
self.session_state.pop("current_user_id", None)
|
|
699
|
-
|
|
700
691
|
def _initialize_session(
|
|
701
692
|
self,
|
|
702
693
|
session_id: Optional[str] = None,
|
|
@@ -974,8 +965,6 @@ class Team:
|
|
|
974
965
|
from_run_response=run_response,
|
|
975
966
|
session_id=session_id,
|
|
976
967
|
)
|
|
977
|
-
finally:
|
|
978
|
-
self._reset_session_state()
|
|
979
968
|
|
|
980
969
|
# If we get here, all retries failed
|
|
981
970
|
if last_exception is not None:
|
|
@@ -1363,8 +1352,6 @@ class Team:
|
|
|
1363
1352
|
from_run_response=run_response,
|
|
1364
1353
|
session_id=session_id,
|
|
1365
1354
|
)
|
|
1366
|
-
finally:
|
|
1367
|
-
self._reset_session_state()
|
|
1368
1355
|
|
|
1369
1356
|
# If we get here, all retries failed
|
|
1370
1357
|
if last_exception is not None:
|
|
@@ -5892,7 +5879,7 @@ class Team:
|
|
|
5892
5879
|
|
|
5893
5880
|
Args:
|
|
5894
5881
|
task_description (str): The task description to send to the member agents.
|
|
5895
|
-
expected_output (str): The expected output from the member agents.
|
|
5882
|
+
expected_output (str, optional): The expected output from the member agents.
|
|
5896
5883
|
|
|
5897
5884
|
Returns:
|
|
5898
5885
|
str: The responses from the member agents.
|
|
@@ -5906,14 +5893,18 @@ class Team:
|
|
|
5906
5893
|
session_id, images, videos, audio
|
|
5907
5894
|
)
|
|
5908
5895
|
|
|
5909
|
-
# 3.
|
|
5910
|
-
member_agent_task = self._format_member_agent_task(
|
|
5911
|
-
task_description, expected_output, team_context_str, team_member_interactions_str
|
|
5912
|
-
)
|
|
5913
|
-
|
|
5896
|
+
# 3. Run members
|
|
5914
5897
|
for member_agent_index, member_agent in enumerate(self.members):
|
|
5915
5898
|
self._initialize_member(member_agent, session_id=session_id)
|
|
5916
5899
|
|
|
5900
|
+
# Don't override the expected output of a member agent
|
|
5901
|
+
if member_agent.expected_output is not None:
|
|
5902
|
+
expected_output = None
|
|
5903
|
+
|
|
5904
|
+
member_agent_task = self._format_member_agent_task(
|
|
5905
|
+
task_description, expected_output, team_context_str, team_member_interactions_str
|
|
5906
|
+
)
|
|
5907
|
+
|
|
5917
5908
|
if stream:
|
|
5918
5909
|
member_agent_run_response_stream = member_agent.run(
|
|
5919
5910
|
member_agent_task,
|
|
@@ -6021,11 +6012,6 @@ class Team:
|
|
|
6021
6012
|
session_id, images, videos, audio
|
|
6022
6013
|
)
|
|
6023
6014
|
|
|
6024
|
-
# 3. Create the member agent task
|
|
6025
|
-
member_agent_task = self._format_member_agent_task(
|
|
6026
|
-
task_description, expected_output, team_context_str, team_member_interactions_str
|
|
6027
|
-
)
|
|
6028
|
-
|
|
6029
6015
|
# Create tasks for all member agents
|
|
6030
6016
|
tasks = []
|
|
6031
6017
|
for member_agent_index, member_agent in enumerate(self.members):
|
|
@@ -6034,6 +6020,14 @@ class Team:
|
|
|
6034
6020
|
current_index = member_agent_index # Create a reference to the current index
|
|
6035
6021
|
self._initialize_member(current_agent, session_id=session_id)
|
|
6036
6022
|
|
|
6023
|
+
# Don't override the expected output of a member agent
|
|
6024
|
+
if current_agent.expected_output is not None:
|
|
6025
|
+
expected_output = None
|
|
6026
|
+
|
|
6027
|
+
member_agent_task = self._format_member_agent_task(
|
|
6028
|
+
task_description, expected_output, team_context_str, team_member_interactions_str
|
|
6029
|
+
)
|
|
6030
|
+
|
|
6037
6031
|
async def run_member_agent(agent=current_agent, idx=current_index) -> str:
|
|
6038
6032
|
response = await agent.arun(
|
|
6039
6033
|
member_agent_task,
|
|
@@ -6185,7 +6179,7 @@ class Team:
|
|
|
6185
6179
|
Args:
|
|
6186
6180
|
member_id (str): The ID of the member to transfer the task to. Use only the ID of the member, not the ID of the team followed by the ID of the member.
|
|
6187
6181
|
task_description (str): A clear and concise description of the task the member should achieve.
|
|
6188
|
-
expected_output (str): The expected output from the member (optional).
|
|
6182
|
+
expected_output (str, optional): The expected output from the member (optional).
|
|
6189
6183
|
Returns:
|
|
6190
6184
|
str: The result of the delegated task.
|
|
6191
6185
|
"""
|
|
@@ -6204,6 +6198,9 @@ class Team:
|
|
|
6204
6198
|
)
|
|
6205
6199
|
|
|
6206
6200
|
# 3. Create the member agent task
|
|
6201
|
+
# Don't override the expected output of a member agent
|
|
6202
|
+
if member_agent.expected_output is not None:
|
|
6203
|
+
expected_output = None
|
|
6207
6204
|
member_agent_task = self._format_member_agent_task(
|
|
6208
6205
|
task_description, expected_output, team_context_str, team_member_interactions_str
|
|
6209
6206
|
)
|
|
@@ -6324,7 +6321,7 @@ class Team:
|
|
|
6324
6321
|
Args:
|
|
6325
6322
|
member_id (str): The ID of the member to transfer the task to. Use only the ID of the member, not the ID of the team followed by the ID of the member.
|
|
6326
6323
|
task_description (str): A clear and concise description of the task the member should achieve.
|
|
6327
|
-
expected_output (str): The expected output from the member (optional).
|
|
6324
|
+
expected_output (str, optional): The expected output from the member (optional).
|
|
6328
6325
|
Returns:
|
|
6329
6326
|
str: The result of the delegated task.
|
|
6330
6327
|
"""
|
|
@@ -6344,6 +6341,9 @@ class Team:
|
|
|
6344
6341
|
)
|
|
6345
6342
|
|
|
6346
6343
|
# 3. Create the member agent task
|
|
6344
|
+
# Don't override the expected output of a member agent
|
|
6345
|
+
if member_agent.expected_output is not None:
|
|
6346
|
+
expected_output = None
|
|
6347
6347
|
member_agent_task = self._format_member_agent_task(
|
|
6348
6348
|
task_description, expected_output, team_context_str, team_member_interactions_str
|
|
6349
6349
|
)
|
|
@@ -6555,7 +6555,7 @@ class Team:
|
|
|
6555
6555
|
"""Use this function to forward the request to the selected team member.
|
|
6556
6556
|
Args:
|
|
6557
6557
|
member_id (str): The ID of the member to transfer the task to. Use only the ID of the member, not the ID of the team followed by the ID of the member.
|
|
6558
|
-
expected_output (str): The expected output from the member (optional).
|
|
6558
|
+
expected_output (str, optional): The expected output from the member (optional).
|
|
6559
6559
|
Returns:
|
|
6560
6560
|
str: The result of the delegated task.
|
|
6561
6561
|
"""
|
|
@@ -6584,7 +6584,8 @@ class Team:
|
|
|
6584
6584
|
# If found in subteam, include the path in the task description
|
|
6585
6585
|
member_agent_task = message.get_content_string()
|
|
6586
6586
|
|
|
6587
|
-
|
|
6587
|
+
# Don't override the expected output of a member agent
|
|
6588
|
+
if member_agent.expected_output is None and expected_output:
|
|
6588
6589
|
member_agent_task += f"\n\n<expected_output>\n{expected_output}\n</expected_output>"
|
|
6589
6590
|
|
|
6590
6591
|
# Handle enable_agentic_knowledge_filters
|
|
@@ -6694,7 +6695,7 @@ class Team:
|
|
|
6694
6695
|
|
|
6695
6696
|
Args:
|
|
6696
6697
|
member_id (str): The ID of the member to transfer the task to. Use only the ID of the member, not the ID of the team followed by the ID of the member.
|
|
6697
|
-
expected_output (str): The expected output from the member (optional).
|
|
6698
|
+
expected_output (str, optional): The expected output from the member (optional).
|
|
6698
6699
|
Returns:
|
|
6699
6700
|
str: The result of the delegated task.
|
|
6700
6701
|
"""
|
|
@@ -6719,7 +6720,8 @@ class Team:
|
|
|
6719
6720
|
# If found in subteam, include the path in the task description
|
|
6720
6721
|
member_agent_task = message.get_content_string()
|
|
6721
6722
|
|
|
6722
|
-
|
|
6723
|
+
# Don't override the expected output of a member agent
|
|
6724
|
+
if member_agent.expected_output is None and expected_output:
|
|
6723
6725
|
member_agent_task += f"\n\n<expected_output>\n{expected_output}\n</expected_output>"
|
|
6724
6726
|
|
|
6725
6727
|
# Handle enable_agentic_knowledge_filters
|
|
@@ -7370,6 +7372,9 @@ class Team:
|
|
|
7370
7372
|
"""Return a list of references from the knowledge base"""
|
|
7371
7373
|
from agno.document import Document
|
|
7372
7374
|
|
|
7375
|
+
if num_documents is None and self.knowledge is not None:
|
|
7376
|
+
num_documents = self.knowledge.num_documents
|
|
7377
|
+
|
|
7373
7378
|
# Validate the filters against known valid filter keys
|
|
7374
7379
|
if self.knowledge is not None:
|
|
7375
7380
|
valid_filters, invalid_keys = self.knowledge.validate_filters(filters) # type: ignore
|
|
@@ -7404,9 +7409,6 @@ class Team:
|
|
|
7404
7409
|
if self.knowledge is None or self.knowledge.vector_db is None:
|
|
7405
7410
|
return None
|
|
7406
7411
|
|
|
7407
|
-
if num_documents is None:
|
|
7408
|
-
num_documents = self.knowledge.num_documents
|
|
7409
|
-
|
|
7410
7412
|
log_debug(f"Searching knowledge base with filters: {filters}")
|
|
7411
7413
|
relevant_docs: List[Document] = self.knowledge.search(
|
|
7412
7414
|
query=query, num_documents=num_documents, filters=filters
|
|
@@ -7427,6 +7429,9 @@ class Team:
|
|
|
7427
7429
|
"""Get relevant documents from knowledge base asynchronously."""
|
|
7428
7430
|
from agno.document import Document
|
|
7429
7431
|
|
|
7432
|
+
if num_documents is None and self.knowledge is not None:
|
|
7433
|
+
num_documents = self.knowledge.num_documents
|
|
7434
|
+
|
|
7430
7435
|
# Validate the filters against known valid filter keys
|
|
7431
7436
|
if self.knowledge is not None:
|
|
7432
7437
|
valid_filters, invalid_keys = self.knowledge.validate_filters(filters) # type: ignore
|
|
@@ -7463,9 +7468,6 @@ class Team:
|
|
|
7463
7468
|
if self.knowledge is None or self.knowledge.vector_db is None:
|
|
7464
7469
|
return None
|
|
7465
7470
|
|
|
7466
|
-
if num_documents is None:
|
|
7467
|
-
num_documents = self.knowledge.num_documents
|
|
7468
|
-
|
|
7469
7471
|
log_debug(f"Searching knowledge base with filters: {filters}")
|
|
7470
7472
|
relevant_docs: List[Document] = await self.knowledge.async_search(
|
|
7471
7473
|
query=query, num_documents=num_documents, filters=filters
|