agno 2.2.5__py3-none-any.whl → 2.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +500 -423
- agno/api/os.py +1 -1
- agno/culture/manager.py +12 -8
- agno/guardrails/prompt_injection.py +1 -0
- agno/knowledge/chunking/agentic.py +6 -2
- agno/knowledge/embedder/vllm.py +262 -0
- agno/knowledge/knowledge.py +37 -5
- agno/memory/manager.py +9 -4
- agno/models/anthropic/claude.py +1 -2
- agno/models/azure/ai_foundry.py +31 -14
- agno/models/azure/openai_chat.py +12 -4
- agno/models/base.py +106 -65
- agno/models/cerebras/cerebras.py +11 -6
- agno/models/groq/groq.py +7 -4
- agno/models/meta/llama.py +12 -6
- agno/models/meta/llama_openai.py +5 -1
- agno/models/openai/chat.py +26 -17
- agno/models/openai/responses.py +11 -63
- agno/models/requesty/requesty.py +5 -2
- agno/models/utils.py +254 -8
- agno/models/vertexai/claude.py +9 -13
- agno/os/app.py +13 -12
- agno/os/routers/evals/evals.py +8 -8
- agno/os/routers/evals/utils.py +1 -0
- agno/os/schema.py +56 -38
- agno/os/utils.py +27 -0
- agno/run/__init__.py +6 -0
- agno/run/agent.py +5 -0
- agno/run/base.py +18 -1
- agno/run/team.py +13 -9
- agno/run/workflow.py +39 -0
- agno/session/summary.py +8 -2
- agno/session/workflow.py +4 -3
- agno/team/team.py +302 -369
- agno/tools/exa.py +21 -16
- agno/tools/file.py +153 -25
- agno/tools/function.py +98 -17
- agno/tools/mcp/mcp.py +8 -1
- agno/tools/notion.py +204 -0
- agno/utils/agent.py +78 -0
- agno/utils/events.py +2 -0
- agno/utils/hooks.py +1 -1
- agno/utils/models/claude.py +25 -8
- agno/utils/print_response/workflow.py +115 -16
- agno/vectordb/__init__.py +2 -1
- agno/vectordb/milvus/milvus.py +5 -0
- agno/vectordb/redis/__init__.py +5 -0
- agno/vectordb/redis/redisdb.py +687 -0
- agno/workflow/__init__.py +2 -0
- agno/workflow/agent.py +299 -0
- agno/workflow/step.py +13 -2
- agno/workflow/workflow.py +969 -72
- {agno-2.2.5.dist-info → agno-2.2.7.dist-info}/METADATA +10 -3
- {agno-2.2.5.dist-info → agno-2.2.7.dist-info}/RECORD +57 -52
- {agno-2.2.5.dist-info → agno-2.2.7.dist-info}/WHEEL +0 -0
- {agno-2.2.5.dist-info → agno-2.2.7.dist-info}/licenses/LICENSE +0 -0
- {agno-2.2.5.dist-info → agno-2.2.7.dist-info}/top_level.txt +0 -0
agno/tools/exa.py
CHANGED
|
@@ -27,14 +27,14 @@ class ExaTools(Toolkit):
|
|
|
27
27
|
all (bool): Enable all tools. Overrides individual flags when True. Default is False.
|
|
28
28
|
text (bool): Retrieve text content from results. Default is True.
|
|
29
29
|
text_length_limit (int): Max length of text content per result. Default is 1000.
|
|
30
|
-
highlights (bool): Include highlighted snippets.
|
|
30
|
+
highlights (bool): Include highlighted snippets. Deprecated since it was removed in the Exa API. It will be removed from Agno in a future release.
|
|
31
31
|
api_key (Optional[str]): Exa API key. Retrieved from `EXA_API_KEY` env variable if not provided.
|
|
32
32
|
num_results (Optional[int]): Default number of search results. Overrides individual searches if set.
|
|
33
33
|
start_crawl_date (Optional[str]): Include results crawled on/after this date (`YYYY-MM-DD`).
|
|
34
34
|
end_crawl_date (Optional[str]): Include results crawled on/before this date (`YYYY-MM-DD`).
|
|
35
35
|
start_published_date (Optional[str]): Include results published on/after this date (`YYYY-MM-DD`).
|
|
36
36
|
end_published_date (Optional[str]): Include results published on/before this date (`YYYY-MM-DD`).
|
|
37
|
-
use_autoprompt (Optional[bool]): Enable autoprompt features in queries.
|
|
37
|
+
use_autoprompt (Optional[bool]): Enable autoprompt features in queries. Deprecated since it was removed in the Exa API. It will be removed from Agno in a future release.
|
|
38
38
|
type (Optional[str]): Specify content type (e.g., article, blog, video).
|
|
39
39
|
category (Optional[str]): Filter results by category. Options are "company", "research paper", "news", "pdf", "github", "tweet", "personal site", "linkedin profile", "financial report".
|
|
40
40
|
include_domains (Optional[List[str]]): Restrict results to these domains.
|
|
@@ -54,7 +54,7 @@ class ExaTools(Toolkit):
|
|
|
54
54
|
all: bool = False,
|
|
55
55
|
text: bool = True,
|
|
56
56
|
text_length_limit: int = 1000,
|
|
57
|
-
highlights: bool =
|
|
57
|
+
highlights: Optional[bool] = None, # Deprecated
|
|
58
58
|
summary: bool = False,
|
|
59
59
|
api_key: Optional[str] = None,
|
|
60
60
|
num_results: Optional[int] = None,
|
|
@@ -84,7 +84,24 @@ class ExaTools(Toolkit):
|
|
|
84
84
|
|
|
85
85
|
self.text: bool = text
|
|
86
86
|
self.text_length_limit: int = text_length_limit
|
|
87
|
-
|
|
87
|
+
|
|
88
|
+
if highlights:
|
|
89
|
+
import warnings
|
|
90
|
+
|
|
91
|
+
warnings.warn(
|
|
92
|
+
"The 'highlights' parameter is deprecated since it was removed in the Exa API. It will be removed from Agno in a future release.",
|
|
93
|
+
DeprecationWarning,
|
|
94
|
+
stacklevel=2,
|
|
95
|
+
)
|
|
96
|
+
if use_autoprompt:
|
|
97
|
+
import warnings
|
|
98
|
+
|
|
99
|
+
warnings.warn(
|
|
100
|
+
"The 'use_autoprompt' parameter is deprecated since it was removed in the Exa API. It will be removed from Agno in a future release.",
|
|
101
|
+
DeprecationWarning,
|
|
102
|
+
stacklevel=2,
|
|
103
|
+
)
|
|
104
|
+
|
|
88
105
|
self.summary: bool = summary
|
|
89
106
|
self.num_results: Optional[int] = num_results
|
|
90
107
|
self.livecrawl: str = livecrawl
|
|
@@ -92,7 +109,6 @@ class ExaTools(Toolkit):
|
|
|
92
109
|
self.end_crawl_date: Optional[str] = end_crawl_date
|
|
93
110
|
self.start_published_date: Optional[str] = start_published_date
|
|
94
111
|
self.end_published_date: Optional[str] = end_published_date
|
|
95
|
-
self.use_autoprompt: Optional[bool] = use_autoprompt
|
|
96
112
|
self.type: Optional[str] = type
|
|
97
113
|
self.category: Optional[str] = category
|
|
98
114
|
self.include_domains: Optional[List[str]] = include_domains
|
|
@@ -140,13 +156,6 @@ class ExaTools(Toolkit):
|
|
|
140
156
|
if self.text_length_limit:
|
|
141
157
|
_text = _text[: self.text_length_limit]
|
|
142
158
|
result_dict["text"] = _text
|
|
143
|
-
if self.highlights:
|
|
144
|
-
try:
|
|
145
|
-
if result.highlights: # type: ignore
|
|
146
|
-
result_dict["highlights"] = result.highlights # type: ignore
|
|
147
|
-
except Exception as e:
|
|
148
|
-
log_debug(f"Failed to get highlights {e}")
|
|
149
|
-
result_dict["highlights"] = f"Failed to get highlights {e}"
|
|
150
159
|
exa_results_parsed.append(result_dict)
|
|
151
160
|
return json.dumps(exa_results_parsed, indent=4, ensure_ascii=False)
|
|
152
161
|
|
|
@@ -168,14 +177,12 @@ class ExaTools(Toolkit):
|
|
|
168
177
|
log_info(f"Searching exa for: {query}")
|
|
169
178
|
search_kwargs: Dict[str, Any] = {
|
|
170
179
|
"text": self.text,
|
|
171
|
-
"highlights": self.highlights,
|
|
172
180
|
"summary": self.summary,
|
|
173
181
|
"num_results": self.num_results or num_results,
|
|
174
182
|
"start_crawl_date": self.start_crawl_date,
|
|
175
183
|
"end_crawl_date": self.end_crawl_date,
|
|
176
184
|
"start_published_date": self.start_published_date,
|
|
177
185
|
"end_published_date": self.end_published_date,
|
|
178
|
-
"use_autoprompt": self.use_autoprompt,
|
|
179
186
|
"type": self.type,
|
|
180
187
|
"category": self.category or category, # Prefer a user-set category
|
|
181
188
|
"include_domains": self.include_domains,
|
|
@@ -212,7 +219,6 @@ class ExaTools(Toolkit):
|
|
|
212
219
|
|
|
213
220
|
query_kwargs: Dict[str, Any] = {
|
|
214
221
|
"text": self.text,
|
|
215
|
-
"highlights": self.highlights,
|
|
216
222
|
"summary": self.summary,
|
|
217
223
|
}
|
|
218
224
|
|
|
@@ -249,7 +255,6 @@ class ExaTools(Toolkit):
|
|
|
249
255
|
|
|
250
256
|
query_kwargs: Dict[str, Any] = {
|
|
251
257
|
"text": self.text,
|
|
252
|
-
"highlights": self.highlights,
|
|
253
258
|
"summary": self.summary,
|
|
254
259
|
"include_domains": self.include_domains,
|
|
255
260
|
"exclude_domains": self.exclude_domains,
|
agno/tools/file.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
import json
|
|
2
2
|
from pathlib import Path
|
|
3
|
-
from typing import Any, List, Optional
|
|
3
|
+
from typing import Any, List, Optional, Tuple
|
|
4
4
|
|
|
5
5
|
from agno.tools import Toolkit
|
|
6
|
-
from agno.utils.log import log_debug, log_error
|
|
6
|
+
from agno.utils.log import log_debug, log_error
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
class FileTools(Toolkit):
|
|
@@ -12,14 +12,26 @@ class FileTools(Toolkit):
|
|
|
12
12
|
base_dir: Optional[Path] = None,
|
|
13
13
|
enable_save_file: bool = True,
|
|
14
14
|
enable_read_file: bool = True,
|
|
15
|
+
enable_delete_file: bool = False,
|
|
15
16
|
enable_list_files: bool = True,
|
|
16
17
|
enable_search_files: bool = True,
|
|
18
|
+
enable_read_file_chunk: bool = True,
|
|
19
|
+
enable_replace_file_chunk: bool = True,
|
|
20
|
+
expose_base_directory: bool = False,
|
|
21
|
+
max_file_length: int = 10000000,
|
|
22
|
+
max_file_lines: int = 100000,
|
|
23
|
+
line_separator: str = "\n",
|
|
17
24
|
all: bool = False,
|
|
18
25
|
**kwargs,
|
|
19
26
|
):
|
|
20
27
|
self.base_dir: Path = base_dir or Path.cwd()
|
|
28
|
+
self.base_dir = self.base_dir.resolve()
|
|
21
29
|
|
|
22
30
|
tools: List[Any] = []
|
|
31
|
+
self.max_file_length = max_file_length
|
|
32
|
+
self.max_file_lines = max_file_lines
|
|
33
|
+
self.line_separator = line_separator
|
|
34
|
+
self.expose_base_directory = expose_base_directory
|
|
23
35
|
if all or enable_save_file:
|
|
24
36
|
tools.append(self.save_file)
|
|
25
37
|
if all or enable_read_file:
|
|
@@ -28,10 +40,16 @@ class FileTools(Toolkit):
|
|
|
28
40
|
tools.append(self.list_files)
|
|
29
41
|
if all or enable_search_files:
|
|
30
42
|
tools.append(self.search_files)
|
|
43
|
+
if all or enable_delete_file:
|
|
44
|
+
tools.append(self.delete_file)
|
|
45
|
+
if all or enable_read_file_chunk:
|
|
46
|
+
tools.append(self.read_file_chunk)
|
|
47
|
+
if all or enable_replace_file_chunk:
|
|
48
|
+
tools.append(self.replace_file_chunk)
|
|
31
49
|
|
|
32
50
|
super().__init__(name="file_tools", tools=tools, **kwargs)
|
|
33
51
|
|
|
34
|
-
def save_file(self, contents: str, file_name: str, overwrite: bool = True) -> str:
|
|
52
|
+
def save_file(self, contents: str, file_name: str, overwrite: bool = True, encoding: str = "utf-8") -> str:
|
|
35
53
|
"""Saves the contents to a file called `file_name` and returns the file name if successful.
|
|
36
54
|
|
|
37
55
|
:param contents: The contents to save.
|
|
@@ -40,44 +58,146 @@ class FileTools(Toolkit):
|
|
|
40
58
|
:return: The file name if successful, otherwise returns an error message.
|
|
41
59
|
"""
|
|
42
60
|
try:
|
|
43
|
-
file_path = self.
|
|
61
|
+
safe, file_path = self.check_escape(file_name)
|
|
62
|
+
if not (safe):
|
|
63
|
+
log_error(f"Attempted to save file: {file_name}")
|
|
64
|
+
return "Error saving file"
|
|
44
65
|
log_debug(f"Saving contents to {file_path}")
|
|
45
66
|
if not file_path.parent.exists():
|
|
46
67
|
file_path.parent.mkdir(parents=True, exist_ok=True)
|
|
47
68
|
if file_path.exists() and not overwrite:
|
|
48
69
|
return f"File {file_name} already exists"
|
|
49
|
-
file_path.write_text(contents)
|
|
50
|
-
|
|
70
|
+
file_path.write_text(contents, encoding=encoding)
|
|
71
|
+
log_debug(f"Saved: {file_path}")
|
|
51
72
|
return str(file_name)
|
|
52
73
|
except Exception as e:
|
|
53
74
|
log_error(f"Error saving to file: {e}")
|
|
54
75
|
return f"Error saving to file: {e}"
|
|
55
76
|
|
|
56
|
-
def
|
|
77
|
+
def read_file_chunk(self, file_name: str, start_line: int, end_line: int, encoding: str = "utf-8") -> str:
|
|
78
|
+
"""Reads the contents of the file `file_name` and returns lines from start_line to end_line.
|
|
79
|
+
|
|
80
|
+
:param file_name: The name of the file to read.
|
|
81
|
+
:param start_line: Number of first line in the returned chunk
|
|
82
|
+
:param end_line: Number of the last line in the returned chunk
|
|
83
|
+
:param encoding: Encoding to use, default - utf-8
|
|
84
|
+
|
|
85
|
+
:return: The contents of the selected chunk
|
|
86
|
+
"""
|
|
87
|
+
try:
|
|
88
|
+
log_debug(f"Reading file: {file_name}")
|
|
89
|
+
safe, file_path = self.check_escape(file_name)
|
|
90
|
+
if not (safe):
|
|
91
|
+
log_error(f"Attempted to read file: {file_name}")
|
|
92
|
+
return "Error reading file"
|
|
93
|
+
contents = file_path.read_text(encoding=encoding)
|
|
94
|
+
lines = contents.split(self.line_separator)
|
|
95
|
+
return self.line_separator.join(lines[start_line : end_line + 1])
|
|
96
|
+
except Exception as e:
|
|
97
|
+
log_error(f"Error reading file: {e}")
|
|
98
|
+
return f"Error reading file: {e}"
|
|
99
|
+
|
|
100
|
+
def replace_file_chunk(
|
|
101
|
+
self, file_name: str, start_line: int, end_line: int, chunk: str, encoding: str = "utf-8"
|
|
102
|
+
) -> str:
|
|
103
|
+
"""Reads the contents of the file, replaces lines
|
|
104
|
+
between start_line and end_line with chunk and writes the file
|
|
105
|
+
|
|
106
|
+
:param file_name: The name of the file to process.
|
|
107
|
+
:param start_line: Number of first line in the replaced chunk
|
|
108
|
+
:param end_line: Number of the last line in the replaced chunk
|
|
109
|
+
:param chunk: String to be inserted instead of lines from start_line to end_line. Can have multiple lines.
|
|
110
|
+
:param encoding: Encoding to use, default - utf-8
|
|
111
|
+
|
|
112
|
+
:return: file name if successfull, error message otherwise
|
|
113
|
+
"""
|
|
114
|
+
try:
|
|
115
|
+
log_debug(f"Patching file: {file_name}")
|
|
116
|
+
safe, file_path = self.check_escape(file_name)
|
|
117
|
+
if not (safe):
|
|
118
|
+
log_error(f"Attempted to read file: {file_name}")
|
|
119
|
+
return "Error reading file"
|
|
120
|
+
contents = file_path.read_text(encoding=encoding)
|
|
121
|
+
lines = contents.split(self.line_separator)
|
|
122
|
+
start = lines[0:start_line]
|
|
123
|
+
end = lines[end_line + 1 :]
|
|
124
|
+
return self.save_file(
|
|
125
|
+
file_name=file_name, contents=self.line_separator.join(start + [chunk] + end), encoding=encoding
|
|
126
|
+
)
|
|
127
|
+
except Exception as e:
|
|
128
|
+
log_error(f"Error patching file: {e}")
|
|
129
|
+
return f"Error patching file: {e}"
|
|
130
|
+
|
|
131
|
+
def read_file(self, file_name: str, encoding: str = "utf-8") -> str:
|
|
57
132
|
"""Reads the contents of the file `file_name` and returns the contents if successful.
|
|
58
133
|
|
|
59
134
|
:param file_name: The name of the file to read.
|
|
135
|
+
:param encoding: Encoding to use, default - utf-8
|
|
60
136
|
:return: The contents of the file if successful, otherwise returns an error message.
|
|
61
137
|
"""
|
|
62
138
|
try:
|
|
63
|
-
|
|
64
|
-
file_path = self.
|
|
65
|
-
|
|
139
|
+
log_debug(f"Reading file: {file_name}")
|
|
140
|
+
safe, file_path = self.check_escape(file_name)
|
|
141
|
+
if not (safe):
|
|
142
|
+
log_error(f"Attempted to read file: {file_name}")
|
|
143
|
+
return "Error reading file"
|
|
144
|
+
contents = file_path.read_text(encoding=encoding)
|
|
145
|
+
if len(contents) > self.max_file_length:
|
|
146
|
+
return "Error reading file: file too long. Use read_file_chunk instead"
|
|
147
|
+
if len(contents.split(self.line_separator)) > self.max_file_lines:
|
|
148
|
+
return "Error reading file: file too long. Use read_file_chunk instead"
|
|
149
|
+
|
|
66
150
|
return str(contents)
|
|
67
151
|
except Exception as e:
|
|
68
152
|
log_error(f"Error reading file: {e}")
|
|
69
153
|
return f"Error reading file: {e}"
|
|
70
154
|
|
|
71
|
-
def
|
|
72
|
-
"""
|
|
155
|
+
def delete_file(self, file_name: str) -> str:
|
|
156
|
+
"""Deletes a file
|
|
157
|
+
:param file_name: Name of the file to delete
|
|
158
|
+
|
|
159
|
+
:return: Empty string, if operation succeeded, otherwise returns an error message
|
|
160
|
+
"""
|
|
161
|
+
safe, path = self.check_escape(file_name)
|
|
162
|
+
try:
|
|
163
|
+
if safe:
|
|
164
|
+
if path.is_dir():
|
|
165
|
+
path.rmdir()
|
|
166
|
+
return ""
|
|
167
|
+
path.unlink()
|
|
168
|
+
return ""
|
|
169
|
+
else:
|
|
170
|
+
log_error(f"Attempt to delete file outside {self.base_dir}: {file_name}")
|
|
171
|
+
return "Incorrect file_name"
|
|
172
|
+
except Exception as e:
|
|
173
|
+
log_error(f"Error removing {file_name}: {e}")
|
|
174
|
+
return f"Error removing file: {e}"
|
|
175
|
+
|
|
176
|
+
def check_escape(self, relative_path: str) -> Tuple[bool, Path]:
|
|
177
|
+
d = self.base_dir.joinpath(Path(relative_path)).resolve()
|
|
178
|
+
if self.base_dir == d:
|
|
179
|
+
return True, d
|
|
180
|
+
try:
|
|
181
|
+
d.relative_to(self.base_dir)
|
|
182
|
+
except ValueError:
|
|
183
|
+
log_error("Attempted to escape base_dir")
|
|
184
|
+
return False, self.base_dir
|
|
185
|
+
return True, d
|
|
186
|
+
|
|
187
|
+
def list_files(self, **kwargs) -> str:
|
|
188
|
+
"""Returns a list of files in directory
|
|
189
|
+
:param directory: (Optional) name of directory to list.
|
|
73
190
|
|
|
74
191
|
:return: The contents of the file if successful, otherwise returns an error message.
|
|
75
192
|
"""
|
|
193
|
+
directory = kwargs.get("directory", ".")
|
|
76
194
|
try:
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
195
|
+
log_debug(f"Reading files in : {self.base_dir}/{directory}")
|
|
196
|
+
safe, d = self.check_escape(directory)
|
|
197
|
+
if safe:
|
|
198
|
+
return json.dumps([str(file_path.relative_to(self.base_dir)) for file_path in d.iterdir()], indent=4)
|
|
199
|
+
else:
|
|
200
|
+
return "{}"
|
|
81
201
|
except Exception as e:
|
|
82
202
|
log_error(f"Error reading files: {e}")
|
|
83
203
|
return f"Error reading files: {e}"
|
|
@@ -94,15 +214,23 @@ class FileTools(Toolkit):
|
|
|
94
214
|
|
|
95
215
|
log_debug(f"Searching files in {self.base_dir} with pattern {pattern}")
|
|
96
216
|
matching_files = list(self.base_dir.glob(pattern))
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
217
|
+
result = None
|
|
218
|
+
if self.expose_base_directory:
|
|
219
|
+
file_paths = [str(file_path) for file_path in matching_files]
|
|
220
|
+
result = {
|
|
221
|
+
"pattern": pattern,
|
|
222
|
+
"matches_found": len(file_paths),
|
|
223
|
+
"base_directory": str(self.base_dir),
|
|
224
|
+
"files": file_paths,
|
|
225
|
+
}
|
|
226
|
+
else:
|
|
227
|
+
file_paths = [str(file_path.relative_to(self.base_dir)) for file_path in matching_files]
|
|
228
|
+
|
|
229
|
+
result = {
|
|
230
|
+
"pattern": pattern,
|
|
231
|
+
"matches_found": len(file_paths),
|
|
232
|
+
"files": file_paths,
|
|
233
|
+
}
|
|
106
234
|
log_debug(f"Found {len(file_paths)} files matching pattern {pattern}")
|
|
107
235
|
return json.dumps(result, indent=2)
|
|
108
236
|
|
agno/tools/function.py
CHANGED
|
@@ -9,6 +9,7 @@ from pydantic import BaseModel, Field, validate_call
|
|
|
9
9
|
|
|
10
10
|
from agno.exceptions import AgentRunException
|
|
11
11
|
from agno.media import Audio, File, Image, Video
|
|
12
|
+
from agno.run import RunContext
|
|
12
13
|
from agno.utils.log import log_debug, log_error, log_exception, log_warning
|
|
13
14
|
|
|
14
15
|
T = TypeVar("T")
|
|
@@ -122,6 +123,8 @@ class Function(BaseModel):
|
|
|
122
123
|
_agent: Optional[Any] = None
|
|
123
124
|
# The team that the function is associated with
|
|
124
125
|
_team: Optional[Any] = None
|
|
126
|
+
# The run context that the function is associated with
|
|
127
|
+
_run_context: Optional[RunContext] = None
|
|
125
128
|
# The session state that the function is associated with
|
|
126
129
|
_session_state: Optional[Dict[str, Any]] = None
|
|
127
130
|
# The dependencies that the function is associated with
|
|
@@ -196,8 +199,13 @@ class Function(BaseModel):
|
|
|
196
199
|
del type_hints["agent"]
|
|
197
200
|
if "team" in sig.parameters and "team" in type_hints:
|
|
198
201
|
del type_hints["team"]
|
|
202
|
+
if "run_context" in sig.parameters and "run_context" in type_hints:
|
|
203
|
+
del type_hints["run_context"]
|
|
199
204
|
if "session_state" in sig.parameters and "session_state" in type_hints:
|
|
200
205
|
del type_hints["session_state"]
|
|
206
|
+
if "dependencies" in sig.parameters and "dependencies" in type_hints:
|
|
207
|
+
del type_hints["dependencies"]
|
|
208
|
+
|
|
201
209
|
# Remove media parameters from type hints as they are injected automatically
|
|
202
210
|
if "images" in sig.parameters and "images" in type_hints:
|
|
203
211
|
del type_hints["images"]
|
|
@@ -207,8 +215,6 @@ class Function(BaseModel):
|
|
|
207
215
|
del type_hints["audios"]
|
|
208
216
|
if "files" in sig.parameters and "files" in type_hints:
|
|
209
217
|
del type_hints["files"]
|
|
210
|
-
if "dependencies" in sig.parameters and "dependencies" in type_hints:
|
|
211
|
-
del type_hints["dependencies"]
|
|
212
218
|
# log_info(f"Type hints for {function_name}: {type_hints}")
|
|
213
219
|
|
|
214
220
|
# Filter out return type and only process parameters
|
|
@@ -217,7 +223,18 @@ class Function(BaseModel):
|
|
|
217
223
|
for name in sig.parameters
|
|
218
224
|
if name != "return"
|
|
219
225
|
and name
|
|
220
|
-
not in [
|
|
226
|
+
not in [
|
|
227
|
+
"agent",
|
|
228
|
+
"team",
|
|
229
|
+
"run_context",
|
|
230
|
+
"session_state",
|
|
231
|
+
"dependencies",
|
|
232
|
+
"self",
|
|
233
|
+
"images",
|
|
234
|
+
"videos",
|
|
235
|
+
"audios",
|
|
236
|
+
"files",
|
|
237
|
+
]
|
|
221
238
|
}
|
|
222
239
|
|
|
223
240
|
# Parse docstring for parameters
|
|
@@ -250,13 +267,14 @@ class Function(BaseModel):
|
|
|
250
267
|
not in [
|
|
251
268
|
"agent",
|
|
252
269
|
"team",
|
|
270
|
+
"run_context",
|
|
253
271
|
"session_state",
|
|
272
|
+
"dependencies",
|
|
254
273
|
"self",
|
|
255
274
|
"images",
|
|
256
275
|
"videos",
|
|
257
276
|
"audios",
|
|
258
277
|
"files",
|
|
259
|
-
"dependencies",
|
|
260
278
|
]
|
|
261
279
|
]
|
|
262
280
|
else:
|
|
@@ -269,13 +287,14 @@ class Function(BaseModel):
|
|
|
269
287
|
not in [
|
|
270
288
|
"agent",
|
|
271
289
|
"team",
|
|
290
|
+
"run_context",
|
|
272
291
|
"session_state",
|
|
292
|
+
"dependencies",
|
|
273
293
|
"self",
|
|
274
294
|
"images",
|
|
275
295
|
"videos",
|
|
276
296
|
"audios",
|
|
277
297
|
"files",
|
|
278
|
-
"dependencies",
|
|
279
298
|
]
|
|
280
299
|
]
|
|
281
300
|
|
|
@@ -325,8 +344,12 @@ class Function(BaseModel):
|
|
|
325
344
|
del type_hints["agent"]
|
|
326
345
|
if "team" in sig.parameters and "team" in type_hints:
|
|
327
346
|
del type_hints["team"]
|
|
347
|
+
if "run_context" in sig.parameters and "run_context" in type_hints:
|
|
348
|
+
del type_hints["run_context"]
|
|
328
349
|
if "session_state" in sig.parameters and "session_state" in type_hints:
|
|
329
350
|
del type_hints["session_state"]
|
|
351
|
+
if "dependencies" in sig.parameters and "dependencies" in type_hints:
|
|
352
|
+
del type_hints["dependencies"]
|
|
330
353
|
if "images" in sig.parameters and "images" in type_hints:
|
|
331
354
|
del type_hints["images"]
|
|
332
355
|
if "videos" in sig.parameters and "videos" in type_hints:
|
|
@@ -335,8 +358,6 @@ class Function(BaseModel):
|
|
|
335
358
|
del type_hints["audios"]
|
|
336
359
|
if "files" in sig.parameters and "files" in type_hints:
|
|
337
360
|
del type_hints["files"]
|
|
338
|
-
if "dependencies" in sig.parameters and "dependencies" in type_hints:
|
|
339
|
-
del type_hints["dependencies"]
|
|
340
361
|
# log_info(f"Type hints for {self.name}: {type_hints}")
|
|
341
362
|
|
|
342
363
|
# Filter out return type and only process parameters
|
|
@@ -344,13 +365,14 @@ class Function(BaseModel):
|
|
|
344
365
|
"return",
|
|
345
366
|
"agent",
|
|
346
367
|
"team",
|
|
368
|
+
"run_context",
|
|
347
369
|
"session_state",
|
|
370
|
+
"dependencies",
|
|
348
371
|
"self",
|
|
349
372
|
"images",
|
|
350
373
|
"videos",
|
|
351
374
|
"audios",
|
|
352
375
|
"files",
|
|
353
|
-
"dependencies",
|
|
354
376
|
]
|
|
355
377
|
if self.requires_user_input and self.user_input_fields:
|
|
356
378
|
if len(self.user_input_fields) == 0:
|
|
@@ -440,7 +462,7 @@ class Function(BaseModel):
|
|
|
440
462
|
@staticmethod
|
|
441
463
|
def _wrap_callable(func: Callable) -> Callable:
|
|
442
464
|
"""Wrap a callable with Pydantic's validate_call decorator, if relevant"""
|
|
443
|
-
from inspect import isasyncgenfunction, iscoroutinefunction
|
|
465
|
+
from inspect import isasyncgenfunction, iscoroutinefunction, signature
|
|
444
466
|
|
|
445
467
|
pydantic_version = Version(version("pydantic"))
|
|
446
468
|
|
|
@@ -458,6 +480,10 @@ class Function(BaseModel):
|
|
|
458
480
|
# Don't wrap callables that are already wrapped with validate_call
|
|
459
481
|
elif getattr(func, "_wrapped_for_validation", False):
|
|
460
482
|
return func
|
|
483
|
+
# Don't wrap functions with session_state parameter
|
|
484
|
+
# session_state needs to be passed by reference, not copied by pydantic's validation
|
|
485
|
+
elif "session_state" in signature(func).parameters:
|
|
486
|
+
return func
|
|
461
487
|
# Wrap the callable with validate_call
|
|
462
488
|
else:
|
|
463
489
|
wrapped = validate_call(func, config=dict(arbitrary_types_allowed=True)) # type: ignore
|
|
@@ -508,7 +534,18 @@ class Function(BaseModel):
|
|
|
508
534
|
name
|
|
509
535
|
for name in self.parameters["properties"]
|
|
510
536
|
if name
|
|
511
|
-
not in [
|
|
537
|
+
not in [
|
|
538
|
+
"agent",
|
|
539
|
+
"team",
|
|
540
|
+
"run_context",
|
|
541
|
+
"session_state",
|
|
542
|
+
"dependencies",
|
|
543
|
+
"images",
|
|
544
|
+
"videos",
|
|
545
|
+
"audios",
|
|
546
|
+
"files",
|
|
547
|
+
"self",
|
|
548
|
+
]
|
|
512
549
|
]
|
|
513
550
|
|
|
514
551
|
def _get_cache_key(self, entrypoint_args: Dict[str, Any], call_args: Optional[Dict[str, Any]] = None) -> str:
|
|
@@ -522,8 +559,12 @@ class Function(BaseModel):
|
|
|
522
559
|
del copy_entrypoint_args["agent"]
|
|
523
560
|
if "team" in copy_entrypoint_args:
|
|
524
561
|
del copy_entrypoint_args["team"]
|
|
562
|
+
if "run_context" in copy_entrypoint_args:
|
|
563
|
+
del copy_entrypoint_args["run_context"]
|
|
525
564
|
if "session_state" in copy_entrypoint_args:
|
|
526
565
|
del copy_entrypoint_args["session_state"]
|
|
566
|
+
if "dependencies" in copy_entrypoint_args:
|
|
567
|
+
del copy_entrypoint_args["dependencies"]
|
|
527
568
|
if "images" in copy_entrypoint_args:
|
|
528
569
|
del copy_entrypoint_args["images"]
|
|
529
570
|
if "videos" in copy_entrypoint_args:
|
|
@@ -532,8 +573,6 @@ class Function(BaseModel):
|
|
|
532
573
|
del copy_entrypoint_args["audios"]
|
|
533
574
|
if "files" in copy_entrypoint_args:
|
|
534
575
|
del copy_entrypoint_args["files"]
|
|
535
|
-
if "dependencies" in copy_entrypoint_args:
|
|
536
|
-
del copy_entrypoint_args["dependencies"]
|
|
537
576
|
# Use json.dumps with sort_keys=True to ensure consistent ordering regardless of dict key order
|
|
538
577
|
args_str = json.dumps(copy_entrypoint_args, sort_keys=True, default=str)
|
|
539
578
|
|
|
@@ -659,8 +698,14 @@ class FunctionCall(BaseModel):
|
|
|
659
698
|
if "team" in signature(self.function.pre_hook).parameters:
|
|
660
699
|
pre_hook_args["team"] = self.function._team
|
|
661
700
|
# Check if the pre-hook has an session_state argument
|
|
701
|
+
if "run_context" in signature(self.function.pre_hook).parameters:
|
|
702
|
+
pre_hook_args["run_context"] = self.function._run_context
|
|
703
|
+
# Check if the pre-hook has an session_state argument
|
|
662
704
|
if "session_state" in signature(self.function.pre_hook).parameters:
|
|
663
705
|
pre_hook_args["session_state"] = self.function._session_state
|
|
706
|
+
# Check if the pre-hook has an dependencies argument
|
|
707
|
+
if "dependencies" in signature(self.function.pre_hook).parameters:
|
|
708
|
+
pre_hook_args["dependencies"] = self.function._dependencies
|
|
664
709
|
# Check if the pre-hook has an fc argument
|
|
665
710
|
if "fc" in signature(self.function.pre_hook).parameters:
|
|
666
711
|
pre_hook_args["fc"] = self
|
|
@@ -687,8 +732,14 @@ class FunctionCall(BaseModel):
|
|
|
687
732
|
if "team" in signature(self.function.post_hook).parameters:
|
|
688
733
|
post_hook_args["team"] = self.function._team
|
|
689
734
|
# Check if the post-hook has an session_state argument
|
|
735
|
+
if "run_context" in signature(self.function.post_hook).parameters:
|
|
736
|
+
post_hook_args["run_context"] = self.function._run_context
|
|
737
|
+
# Check if the post-hook has an session_state argument
|
|
690
738
|
if "session_state" in signature(self.function.post_hook).parameters:
|
|
691
739
|
post_hook_args["session_state"] = self.function._session_state
|
|
740
|
+
# Check if the post-hook has an dependencies argument
|
|
741
|
+
if "dependencies" in signature(self.function.post_hook).parameters:
|
|
742
|
+
post_hook_args["dependencies"] = self.function._dependencies
|
|
692
743
|
# Check if the post-hook has an fc argument
|
|
693
744
|
if "fc" in signature(self.function.post_hook).parameters:
|
|
694
745
|
post_hook_args["fc"] = self
|
|
@@ -712,6 +763,9 @@ class FunctionCall(BaseModel):
|
|
|
712
763
|
# Check if the entrypoint has an team argument
|
|
713
764
|
if "team" in signature(self.function.entrypoint).parameters: # type: ignore
|
|
714
765
|
entrypoint_args["team"] = self.function._team
|
|
766
|
+
# Check if the entrypoint has an run_context argument
|
|
767
|
+
if "run_context" in signature(self.function.entrypoint).parameters: # type: ignore
|
|
768
|
+
entrypoint_args["run_context"] = self.function._run_context
|
|
715
769
|
# Check if the entrypoint has an session_state argument
|
|
716
770
|
if "session_state" in signature(self.function.entrypoint).parameters: # type: ignore
|
|
717
771
|
entrypoint_args["session_state"] = self.function._session_state
|
|
@@ -744,13 +798,15 @@ class FunctionCall(BaseModel):
|
|
|
744
798
|
# Check if the hook has an team argument
|
|
745
799
|
if "team" in signature(hook).parameters:
|
|
746
800
|
hook_args["team"] = self.function._team
|
|
801
|
+
# Check if the hook has an run_context argument
|
|
802
|
+
if "run_context" in signature(hook).parameters:
|
|
803
|
+
hook_args["run_context"] = self.function._run_context
|
|
747
804
|
# Check if the hook has an session_state argument
|
|
748
805
|
if "session_state" in signature(hook).parameters:
|
|
749
806
|
hook_args["session_state"] = self.function._session_state
|
|
750
807
|
# Check if the hook has an dependencies argument
|
|
751
808
|
if "dependencies" in signature(hook).parameters:
|
|
752
809
|
hook_args["dependencies"] = self.function._dependencies
|
|
753
|
-
|
|
754
810
|
if "name" in signature(hook).parameters:
|
|
755
811
|
hook_args["name"] = name
|
|
756
812
|
if "function_name" in signature(hook).parameters:
|
|
@@ -853,8 +909,16 @@ class FunctionCall(BaseModel):
|
|
|
853
909
|
result = self.function.entrypoint(**entrypoint_args, **self.arguments) # type: ignore
|
|
854
910
|
|
|
855
911
|
updated_session_state = None
|
|
856
|
-
if entrypoint_args.get("
|
|
857
|
-
|
|
912
|
+
if entrypoint_args.get("run_context") is not None:
|
|
913
|
+
run_context = entrypoint_args.get("run_context")
|
|
914
|
+
updated_session_state = (
|
|
915
|
+
run_context.session_state
|
|
916
|
+
if run_context is not None and run_context.session_state is not None
|
|
917
|
+
else None
|
|
918
|
+
)
|
|
919
|
+
else:
|
|
920
|
+
if self.function._session_state is not None:
|
|
921
|
+
updated_session_state = self.function._session_state
|
|
858
922
|
|
|
859
923
|
# Handle generator case
|
|
860
924
|
if isgenerator(result):
|
|
@@ -902,9 +966,15 @@ class FunctionCall(BaseModel):
|
|
|
902
966
|
# Check if the pre-hook has an team argument
|
|
903
967
|
if "team" in signature(self.function.pre_hook).parameters:
|
|
904
968
|
pre_hook_args["team"] = self.function._team
|
|
969
|
+
# Check if the pre-hook has an run_context argument
|
|
970
|
+
if "run_context" in signature(self.function.pre_hook).parameters:
|
|
971
|
+
pre_hook_args["run_context"] = self.function._run_context
|
|
905
972
|
# Check if the pre-hook has an session_state argument
|
|
906
973
|
if "session_state" in signature(self.function.pre_hook).parameters:
|
|
907
974
|
pre_hook_args["session_state"] = self.function._session_state
|
|
975
|
+
# Check if the pre-hook has an dependencies argument
|
|
976
|
+
if "dependencies" in signature(self.function.pre_hook).parameters:
|
|
977
|
+
pre_hook_args["dependencies"] = self.function._dependencies
|
|
908
978
|
# Check if the pre-hook has an fc argument
|
|
909
979
|
if "fc" in signature(self.function.pre_hook).parameters:
|
|
910
980
|
pre_hook_args["fc"] = self
|
|
@@ -931,9 +1001,15 @@ class FunctionCall(BaseModel):
|
|
|
931
1001
|
# Check if the post-hook has an team argument
|
|
932
1002
|
if "team" in signature(self.function.post_hook).parameters:
|
|
933
1003
|
post_hook_args["team"] = self.function._team
|
|
1004
|
+
# Check if the post-hook has an run_context argument
|
|
1005
|
+
if "run_context" in signature(self.function.post_hook).parameters:
|
|
1006
|
+
post_hook_args["run_context"] = self.function._run_context
|
|
934
1007
|
# Check if the post-hook has an session_state argument
|
|
935
1008
|
if "session_state" in signature(self.function.post_hook).parameters:
|
|
936
1009
|
post_hook_args["session_state"] = self.function._session_state
|
|
1010
|
+
# Check if the post-hook has an dependencies argument
|
|
1011
|
+
if "dependencies" in signature(self.function.post_hook).parameters:
|
|
1012
|
+
post_hook_args["dependencies"] = self.function._dependencies
|
|
937
1013
|
|
|
938
1014
|
# Check if the post-hook has an fc argument
|
|
939
1015
|
if "fc" in signature(self.function.post_hook).parameters:
|
|
@@ -1067,8 +1143,13 @@ class FunctionCall(BaseModel):
|
|
|
1067
1143
|
self.function._save_to_cache(cache_file, self.result)
|
|
1068
1144
|
|
|
1069
1145
|
updated_session_state = None
|
|
1070
|
-
if entrypoint_args.get("
|
|
1071
|
-
|
|
1146
|
+
if entrypoint_args.get("run_context") is not None:
|
|
1147
|
+
run_context = entrypoint_args.get("run_context")
|
|
1148
|
+
updated_session_state = (
|
|
1149
|
+
run_context.session_state
|
|
1150
|
+
if run_context is not None and run_context.session_state is not None
|
|
1151
|
+
else None
|
|
1152
|
+
)
|
|
1072
1153
|
|
|
1073
1154
|
execution_result = FunctionExecutionResult(
|
|
1074
1155
|
status="success", result=self.result, updated_session_state=updated_session_state
|