agno 2.2.10__py3-none-any.whl → 2.2.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +13 -1
- agno/db/dynamo/utils.py +1 -1
- agno/db/firestore/utils.py +1 -1
- agno/db/gcs_json/utils.py +1 -1
- agno/db/in_memory/utils.py +1 -1
- agno/db/json/utils.py +1 -1
- agno/db/mongo/utils.py +3 -3
- agno/db/mysql/utils.py +1 -1
- agno/db/postgres/utils.py +1 -1
- agno/db/redis/utils.py +1 -1
- agno/db/singlestore/utils.py +1 -1
- agno/db/sqlite/utils.py +1 -1
- agno/knowledge/chunking/agentic.py +8 -9
- agno/knowledge/chunking/strategy.py +59 -15
- agno/knowledge/embedder/sentence_transformer.py +6 -2
- agno/knowledge/reader/base.py +6 -2
- agno/knowledge/utils.py +20 -0
- agno/models/anthropic/claude.py +45 -9
- agno/models/base.py +4 -0
- agno/os/app.py +23 -7
- agno/os/routers/health.py +5 -3
- agno/os/routers/knowledge/knowledge.py +43 -17
- agno/os/routers/knowledge/schemas.py +4 -3
- agno/run/agent.py +11 -1
- agno/team/team.py +13 -1
- agno/tools/file_generation.py +4 -4
- agno/tools/gmail.py +179 -0
- agno/tools/parallel.py +314 -0
- agno/utils/models/claude.py +2 -1
- agno/workflow/step.py +3 -2
- agno/workflow/types.py +20 -1
- agno/workflow/workflow.py +92 -9
- {agno-2.2.10.dist-info → agno-2.2.11.dist-info}/METADATA +4 -1
- {agno-2.2.10.dist-info → agno-2.2.11.dist-info}/RECORD +37 -36
- {agno-2.2.10.dist-info → agno-2.2.11.dist-info}/WHEEL +0 -0
- {agno-2.2.10.dist-info → agno-2.2.11.dist-info}/licenses/LICENSE +0 -0
- {agno-2.2.10.dist-info → agno-2.2.11.dist-info}/top_level.txt +0 -0
agno/tools/parallel.py
ADDED
|
@@ -0,0 +1,314 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from os import getenv
|
|
3
|
+
from typing import Any, Dict, List, Optional
|
|
4
|
+
|
|
5
|
+
from agno.tools import Toolkit
|
|
6
|
+
from agno.utils.log import log_error
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
from parallel import Parallel as ParallelClient
|
|
10
|
+
except ImportError:
|
|
11
|
+
raise ImportError("`parallel-web` not installed. Please install using `pip install parallel-web`")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class CustomJSONEncoder(json.JSONEncoder):
|
|
15
|
+
"""Custom JSON encoder that handles non-serializable types by converting them to strings."""
|
|
16
|
+
|
|
17
|
+
def default(self, obj):
|
|
18
|
+
try:
|
|
19
|
+
return super().default(obj)
|
|
20
|
+
except TypeError:
|
|
21
|
+
return str(obj)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class ParallelTools(Toolkit):
|
|
25
|
+
"""
|
|
26
|
+
ParallelTools provides access to Parallel's web search and extraction APIs.
|
|
27
|
+
|
|
28
|
+
Parallel offers powerful APIs optimized for AI agents:
|
|
29
|
+
- Search API: AI-optimized web search that returns relevant excerpts tailored for LLMs
|
|
30
|
+
- Extract API: Extract content from specific URLs in clean markdown format, handling JavaScript-heavy pages and PDFs
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
api_key (Optional[str]): Parallel API key. If not provided, will use PARALLEL_API_KEY environment variable.
|
|
34
|
+
enable_search (bool): Enable Search API functionality. Default is True.
|
|
35
|
+
enable_extract (bool): Enable Extract API functionality. Default is True.
|
|
36
|
+
all (bool): Enable all tools. Overrides individual flags when True. Default is False.
|
|
37
|
+
max_results (int): Default maximum number of results for search operations. Default is 10.
|
|
38
|
+
max_chars_per_result (int): Default maximum characters per result for search operations. Default is 10000.
|
|
39
|
+
beta_version (str): Beta API version header. Default is "search-extract-2025-10-10".
|
|
40
|
+
mode (Optional[str]): Default search mode. Options: "one-shot" or "agentic". Default is None.
|
|
41
|
+
include_domains (Optional[List[str]]): Default domains to restrict results to. Default is None.
|
|
42
|
+
exclude_domains (Optional[List[str]]): Default domains to exclude from results. Default is None.
|
|
43
|
+
max_age_seconds (Optional[int]): Default cache age threshold (minimum 600). Default is None.
|
|
44
|
+
timeout_seconds (Optional[float]): Default timeout for content retrieval. Default is None.
|
|
45
|
+
disable_cache_fallback (Optional[bool]): Default cache fallback behavior. Default is None.
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
def __init__(
|
|
49
|
+
self,
|
|
50
|
+
api_key: Optional[str] = None,
|
|
51
|
+
enable_search: bool = True,
|
|
52
|
+
enable_extract: bool = True,
|
|
53
|
+
all: bool = False,
|
|
54
|
+
max_results: int = 10,
|
|
55
|
+
max_chars_per_result: int = 10000,
|
|
56
|
+
beta_version: str = "search-extract-2025-10-10",
|
|
57
|
+
mode: Optional[str] = None,
|
|
58
|
+
include_domains: Optional[List[str]] = None,
|
|
59
|
+
exclude_domains: Optional[List[str]] = None,
|
|
60
|
+
max_age_seconds: Optional[int] = None,
|
|
61
|
+
timeout_seconds: Optional[float] = None,
|
|
62
|
+
disable_cache_fallback: Optional[bool] = None,
|
|
63
|
+
**kwargs,
|
|
64
|
+
):
|
|
65
|
+
self.api_key: Optional[str] = api_key or getenv("PARALLEL_API_KEY")
|
|
66
|
+
if not self.api_key:
|
|
67
|
+
log_error("PARALLEL_API_KEY not set. Please set the PARALLEL_API_KEY environment variable.")
|
|
68
|
+
|
|
69
|
+
self.max_results = max_results
|
|
70
|
+
self.max_chars_per_result = max_chars_per_result
|
|
71
|
+
self.beta_version = beta_version
|
|
72
|
+
self.mode = mode
|
|
73
|
+
self.include_domains = include_domains
|
|
74
|
+
self.exclude_domains = exclude_domains
|
|
75
|
+
self.max_age_seconds = max_age_seconds
|
|
76
|
+
self.timeout_seconds = timeout_seconds
|
|
77
|
+
self.disable_cache_fallback = disable_cache_fallback
|
|
78
|
+
|
|
79
|
+
self.parallel_client = ParallelClient(
|
|
80
|
+
api_key=self.api_key, default_headers={"parallel-beta": self.beta_version}
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
tools: List[Any] = []
|
|
84
|
+
if all or enable_search:
|
|
85
|
+
tools.append(self.parallel_search)
|
|
86
|
+
if all or enable_extract:
|
|
87
|
+
tools.append(self.parallel_extract)
|
|
88
|
+
|
|
89
|
+
super().__init__(name="parallel_tools", tools=tools, **kwargs)
|
|
90
|
+
|
|
91
|
+
def parallel_search(
|
|
92
|
+
self,
|
|
93
|
+
objective: Optional[str] = None,
|
|
94
|
+
search_queries: Optional[List[str]] = None,
|
|
95
|
+
max_results: Optional[int] = None,
|
|
96
|
+
max_chars_per_result: Optional[int] = None,
|
|
97
|
+
) -> str:
|
|
98
|
+
"""Use this function to search the web using Parallel's Search API with a natural language objective.
|
|
99
|
+
You must provide at least one of objective or search_queries.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
objective (Optional[str]): Natural-language description of what the web search is trying to find.
|
|
103
|
+
search_queries (Optional[List[str]]): Traditional keyword queries with optional search operators.
|
|
104
|
+
max_results (Optional[int]): Upper bound on results returned. Overrides constructor default.
|
|
105
|
+
max_chars_per_result (Optional[int]): Upper bound on total characters per url for excerpts.
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
str: A JSON formatted string containing the search results with URLs, titles, publish dates, and relevant excerpts.
|
|
109
|
+
"""
|
|
110
|
+
try:
|
|
111
|
+
if not objective and not search_queries:
|
|
112
|
+
return json.dumps({"error": "Please provide at least one of: objective or search_queries"}, indent=2)
|
|
113
|
+
|
|
114
|
+
# Use instance defaults if not provided
|
|
115
|
+
final_max_results = max_results if max_results is not None else self.max_results
|
|
116
|
+
|
|
117
|
+
search_params: Dict[str, Any] = {
|
|
118
|
+
"max_results": final_max_results,
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
# Add objective if provided
|
|
122
|
+
if objective:
|
|
123
|
+
search_params["objective"] = objective
|
|
124
|
+
|
|
125
|
+
# Add search_queries if provided
|
|
126
|
+
if search_queries:
|
|
127
|
+
search_params["search_queries"] = search_queries
|
|
128
|
+
|
|
129
|
+
# Add mode from constructor default
|
|
130
|
+
if self.mode:
|
|
131
|
+
search_params["mode"] = self.mode
|
|
132
|
+
|
|
133
|
+
# Add excerpts configuration
|
|
134
|
+
excerpts_config: Dict[str, Any] = {}
|
|
135
|
+
final_max_chars = max_chars_per_result if max_chars_per_result is not None else self.max_chars_per_result
|
|
136
|
+
if final_max_chars is not None:
|
|
137
|
+
excerpts_config["max_chars_per_result"] = final_max_chars
|
|
138
|
+
|
|
139
|
+
if excerpts_config:
|
|
140
|
+
search_params["excerpts"] = excerpts_config
|
|
141
|
+
|
|
142
|
+
# Add source_policy from constructor defaults
|
|
143
|
+
source_policy: Dict[str, Any] = {}
|
|
144
|
+
if self.include_domains:
|
|
145
|
+
source_policy["include_domains"] = self.include_domains
|
|
146
|
+
if self.exclude_domains:
|
|
147
|
+
source_policy["exclude_domains"] = self.exclude_domains
|
|
148
|
+
|
|
149
|
+
if source_policy:
|
|
150
|
+
search_params["source_policy"] = source_policy
|
|
151
|
+
|
|
152
|
+
# Add fetch_policy from constructor defaults
|
|
153
|
+
fetch_policy: Dict[str, Any] = {}
|
|
154
|
+
if self.max_age_seconds is not None:
|
|
155
|
+
fetch_policy["max_age_seconds"] = self.max_age_seconds
|
|
156
|
+
if self.timeout_seconds is not None:
|
|
157
|
+
fetch_policy["timeout_seconds"] = self.timeout_seconds
|
|
158
|
+
if self.disable_cache_fallback is not None:
|
|
159
|
+
fetch_policy["disable_cache_fallback"] = self.disable_cache_fallback
|
|
160
|
+
|
|
161
|
+
if fetch_policy:
|
|
162
|
+
search_params["fetch_policy"] = fetch_policy
|
|
163
|
+
|
|
164
|
+
search_result = self.parallel_client.beta.search(**search_params)
|
|
165
|
+
|
|
166
|
+
# Use model_dump() if available, otherwise convert to dict
|
|
167
|
+
try:
|
|
168
|
+
if hasattr(search_result, "model_dump"):
|
|
169
|
+
return json.dumps(search_result.model_dump(), cls=CustomJSONEncoder)
|
|
170
|
+
except Exception:
|
|
171
|
+
pass
|
|
172
|
+
|
|
173
|
+
# Manually format the results
|
|
174
|
+
formatted_results: Dict[str, Any] = {
|
|
175
|
+
"search_id": getattr(search_result, "search_id", ""),
|
|
176
|
+
"results": [],
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
if hasattr(search_result, "results") and search_result.results:
|
|
180
|
+
results_list: List[Dict[str, Any]] = []
|
|
181
|
+
for result in search_result.results:
|
|
182
|
+
formatted_result: Dict[str, Any] = {
|
|
183
|
+
"title": getattr(result, "title", ""),
|
|
184
|
+
"url": getattr(result, "url", ""),
|
|
185
|
+
"publish_date": getattr(result, "publish_date", ""),
|
|
186
|
+
"excerpt": getattr(result, "excerpt", ""),
|
|
187
|
+
}
|
|
188
|
+
results_list.append(formatted_result)
|
|
189
|
+
formatted_results["results"] = results_list
|
|
190
|
+
|
|
191
|
+
if hasattr(search_result, "warnings"):
|
|
192
|
+
formatted_results["warnings"] = search_result.warnings
|
|
193
|
+
|
|
194
|
+
if hasattr(search_result, "usage"):
|
|
195
|
+
formatted_results["usage"] = search_result.usage
|
|
196
|
+
|
|
197
|
+
return json.dumps(formatted_results, cls=CustomJSONEncoder, indent=2)
|
|
198
|
+
|
|
199
|
+
except Exception as e:
|
|
200
|
+
log_error(f"Error searching Parallel for objective '{objective}': {e}")
|
|
201
|
+
return json.dumps({"error": f"Search failed: {str(e)}"}, indent=2)
|
|
202
|
+
|
|
203
|
+
def parallel_extract(
|
|
204
|
+
self,
|
|
205
|
+
urls: List[str],
|
|
206
|
+
objective: Optional[str] = None,
|
|
207
|
+
search_queries: Optional[List[str]] = None,
|
|
208
|
+
excerpts: bool = True,
|
|
209
|
+
max_chars_per_excerpt: Optional[int] = None,
|
|
210
|
+
full_content: bool = False,
|
|
211
|
+
max_chars_for_full_content: Optional[int] = None,
|
|
212
|
+
) -> str:
|
|
213
|
+
"""Use this function to extract content from specific URLs using Parallel's Extract API.
|
|
214
|
+
|
|
215
|
+
Args:
|
|
216
|
+
urls (List[str]): List of public URLs to extract content from.
|
|
217
|
+
objective (Optional[str]): Search focus to guide content extraction.
|
|
218
|
+
search_queries (Optional[List[str]]): Keywords for targeting relevant content.
|
|
219
|
+
excerpts (bool): Include relevant text snippets.
|
|
220
|
+
max_chars_per_excerpt (Optional[int]): Upper bound on total characters per url. Only used when excerpts is True.
|
|
221
|
+
full_content (bool): Include complete page text.
|
|
222
|
+
max_chars_for_full_content (Optional[int]): Limit on characters per url. Only used when full_content is True.
|
|
223
|
+
|
|
224
|
+
Returns:
|
|
225
|
+
str: A JSON formatted string containing extracted content with titles, publish dates, excerpts and/or full content.
|
|
226
|
+
"""
|
|
227
|
+
try:
|
|
228
|
+
if not urls:
|
|
229
|
+
return json.dumps({"error": "Please provide at least one URL to extract"}, indent=2)
|
|
230
|
+
|
|
231
|
+
extract_params: Dict[str, Any] = {
|
|
232
|
+
"urls": urls,
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
# Add objective if provided
|
|
236
|
+
if objective:
|
|
237
|
+
extract_params["objective"] = objective
|
|
238
|
+
|
|
239
|
+
# Add search_queries if provided
|
|
240
|
+
if search_queries:
|
|
241
|
+
extract_params["search_queries"] = search_queries
|
|
242
|
+
|
|
243
|
+
# Add excerpts configuration
|
|
244
|
+
if excerpts and max_chars_per_excerpt is not None:
|
|
245
|
+
extract_params["excerpts"] = {"max_chars_per_result": max_chars_per_excerpt}
|
|
246
|
+
else:
|
|
247
|
+
extract_params["excerpts"] = excerpts
|
|
248
|
+
|
|
249
|
+
# Add full_content configuration
|
|
250
|
+
if full_content and max_chars_for_full_content is not None:
|
|
251
|
+
extract_params["full_content"] = {"max_chars_per_result": max_chars_for_full_content}
|
|
252
|
+
else:
|
|
253
|
+
extract_params["full_content"] = full_content
|
|
254
|
+
|
|
255
|
+
# Add fetch_policy from constructor defaults
|
|
256
|
+
fetch_policy: Dict[str, Any] = {}
|
|
257
|
+
if self.max_age_seconds is not None:
|
|
258
|
+
fetch_policy["max_age_seconds"] = self.max_age_seconds
|
|
259
|
+
if self.timeout_seconds is not None:
|
|
260
|
+
fetch_policy["timeout_seconds"] = self.timeout_seconds
|
|
261
|
+
if self.disable_cache_fallback is not None:
|
|
262
|
+
fetch_policy["disable_cache_fallback"] = self.disable_cache_fallback
|
|
263
|
+
|
|
264
|
+
if fetch_policy:
|
|
265
|
+
extract_params["fetch_policy"] = fetch_policy
|
|
266
|
+
|
|
267
|
+
extract_result = self.parallel_client.beta.extract(**extract_params)
|
|
268
|
+
|
|
269
|
+
# Use model_dump() if available, otherwise convert to dict
|
|
270
|
+
try:
|
|
271
|
+
if hasattr(extract_result, "model_dump"):
|
|
272
|
+
return json.dumps(extract_result.model_dump(), cls=CustomJSONEncoder)
|
|
273
|
+
except Exception:
|
|
274
|
+
pass
|
|
275
|
+
|
|
276
|
+
# Manually format the results
|
|
277
|
+
formatted_results: Dict[str, Any] = {
|
|
278
|
+
"extract_id": getattr(extract_result, "extract_id", ""),
|
|
279
|
+
"results": [],
|
|
280
|
+
"errors": [],
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
if hasattr(extract_result, "results") and extract_result.results:
|
|
284
|
+
results_list: List[Dict[str, Any]] = []
|
|
285
|
+
for result in extract_result.results:
|
|
286
|
+
formatted_result: Dict[str, Any] = {
|
|
287
|
+
"url": getattr(result, "url", ""),
|
|
288
|
+
"title": getattr(result, "title", ""),
|
|
289
|
+
"publish_date": getattr(result, "publish_date", ""),
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
if excerpts and hasattr(result, "excerpts"):
|
|
293
|
+
formatted_result["excerpts"] = result.excerpts
|
|
294
|
+
|
|
295
|
+
if full_content and hasattr(result, "full_content"):
|
|
296
|
+
formatted_result["full_content"] = result.full_content
|
|
297
|
+
|
|
298
|
+
results_list.append(formatted_result)
|
|
299
|
+
formatted_results["results"] = results_list
|
|
300
|
+
|
|
301
|
+
if hasattr(extract_result, "errors") and extract_result.errors:
|
|
302
|
+
formatted_results["errors"] = extract_result.errors
|
|
303
|
+
|
|
304
|
+
if hasattr(extract_result, "warnings"):
|
|
305
|
+
formatted_results["warnings"] = extract_result.warnings
|
|
306
|
+
|
|
307
|
+
if hasattr(extract_result, "usage"):
|
|
308
|
+
formatted_results["usage"] = extract_result.usage
|
|
309
|
+
|
|
310
|
+
return json.dumps(formatted_results, cls=CustomJSONEncoder, indent=2)
|
|
311
|
+
|
|
312
|
+
except Exception as e:
|
|
313
|
+
log_error(f"Error extracting from Parallel: {e}")
|
|
314
|
+
return json.dumps({"error": f"Extract failed: {str(e)}"}, indent=2)
|
agno/utils/models/claude.py
CHANGED
|
@@ -68,6 +68,8 @@ def _format_image_for_message(image: Image) -> Optional[Dict[str, Any]]:
|
|
|
68
68
|
}
|
|
69
69
|
|
|
70
70
|
try:
|
|
71
|
+
img_type = None
|
|
72
|
+
|
|
71
73
|
# Case 0: Image is an Anthropic uploaded file
|
|
72
74
|
if image.content is not None and hasattr(image.content, "id"):
|
|
73
75
|
content_bytes = image.content
|
|
@@ -80,7 +82,6 @@ def _format_image_for_message(image: Image) -> Optional[Dict[str, Any]]:
|
|
|
80
82
|
import os
|
|
81
83
|
from urllib.parse import urlparse
|
|
82
84
|
|
|
83
|
-
img_type = None
|
|
84
85
|
if image.url:
|
|
85
86
|
parsed_url = urlparse(image.url)
|
|
86
87
|
_, ext = os.path.splitext(parsed_url.path)
|
agno/workflow/step.py
CHANGED
|
@@ -11,9 +11,10 @@ from agno.agent import Agent
|
|
|
11
11
|
from agno.media import Audio, Image, Video
|
|
12
12
|
from agno.models.metrics import Metrics
|
|
13
13
|
from agno.run import RunContext
|
|
14
|
-
from agno.run.agent import RunCompletedEvent,
|
|
14
|
+
from agno.run.agent import RunCompletedEvent, RunContentEvent, RunOutput
|
|
15
15
|
from agno.run.base import BaseRunOutputEvent
|
|
16
|
-
from agno.run.team import RunCompletedEvent as TeamRunCompletedEvent
|
|
16
|
+
from agno.run.team import RunCompletedEvent as TeamRunCompletedEvent
|
|
17
|
+
from agno.run.team import RunContentEvent as TeamRunContentEvent
|
|
17
18
|
from agno.run.team import TeamRunOutput
|
|
18
19
|
from agno.run.workflow import (
|
|
19
20
|
StepCompletedEvent,
|
agno/workflow/types.py
CHANGED
|
@@ -17,6 +17,7 @@ from agno.utils.media import (
|
|
|
17
17
|
reconstruct_videos,
|
|
18
18
|
)
|
|
19
19
|
from agno.utils.serialize import json_serializer
|
|
20
|
+
from agno.utils.timer import Timer
|
|
20
21
|
|
|
21
22
|
|
|
22
23
|
@dataclass
|
|
@@ -405,12 +406,18 @@ class WorkflowMetrics:
|
|
|
405
406
|
"""Complete metrics for a workflow execution"""
|
|
406
407
|
|
|
407
408
|
steps: Dict[str, StepMetrics]
|
|
409
|
+
# Timer utility for tracking execution time
|
|
410
|
+
timer: Optional[Timer] = None
|
|
411
|
+
# Total workflow execution time
|
|
412
|
+
duration: Optional[float] = None
|
|
408
413
|
|
|
409
414
|
def to_dict(self) -> Dict[str, Any]:
|
|
410
415
|
"""Convert to dictionary"""
|
|
411
|
-
|
|
416
|
+
result: Dict[str, Any] = {
|
|
412
417
|
"steps": {name: step.to_dict() for name, step in self.steps.items()},
|
|
418
|
+
"duration": self.duration,
|
|
413
419
|
}
|
|
420
|
+
return result
|
|
414
421
|
|
|
415
422
|
@classmethod
|
|
416
423
|
def from_dict(cls, data: Dict[str, Any]) -> "WorkflowMetrics":
|
|
@@ -419,8 +426,20 @@ class WorkflowMetrics:
|
|
|
419
426
|
|
|
420
427
|
return cls(
|
|
421
428
|
steps=steps,
|
|
429
|
+
duration=data.get("duration"),
|
|
422
430
|
)
|
|
423
431
|
|
|
432
|
+
def start_timer(self):
|
|
433
|
+
if self.timer is None:
|
|
434
|
+
self.timer = Timer()
|
|
435
|
+
self.timer.start()
|
|
436
|
+
|
|
437
|
+
def stop_timer(self, set_duration: bool = True):
|
|
438
|
+
if self.timer is not None:
|
|
439
|
+
self.timer.stop()
|
|
440
|
+
if set_duration:
|
|
441
|
+
self.duration = self.timer.elapsed
|
|
442
|
+
|
|
424
443
|
|
|
425
444
|
@dataclass
|
|
426
445
|
class WebSocketHandler:
|
agno/workflow/workflow.py
CHANGED
|
@@ -974,8 +974,8 @@ class Workflow:
|
|
|
974
974
|
websocket_handler: Optional[WebSocketHandler] = None,
|
|
975
975
|
) -> "WorkflowRunOutputEvent":
|
|
976
976
|
"""Handle workflow events for storage - similar to Team._handle_event"""
|
|
977
|
-
from agno.run.base import BaseRunOutputEvent
|
|
978
977
|
from agno.run.agent import RunOutput
|
|
978
|
+
from agno.run.base import BaseRunOutputEvent
|
|
979
979
|
from agno.run.team import TeamRunOutput
|
|
980
980
|
|
|
981
981
|
if isinstance(event, (RunOutput, TeamRunOutput)):
|
|
@@ -1136,7 +1136,11 @@ class Workflow:
|
|
|
1136
1136
|
else:
|
|
1137
1137
|
return len(self.steps)
|
|
1138
1138
|
|
|
1139
|
-
def _aggregate_workflow_metrics(
|
|
1139
|
+
def _aggregate_workflow_metrics(
|
|
1140
|
+
self,
|
|
1141
|
+
step_results: List[Union[StepOutput, List[StepOutput]]],
|
|
1142
|
+
current_workflow_metrics: Optional[WorkflowMetrics] = None,
|
|
1143
|
+
) -> WorkflowMetrics:
|
|
1140
1144
|
"""Aggregate metrics from all step responses into structured workflow metrics"""
|
|
1141
1145
|
steps_dict = {}
|
|
1142
1146
|
|
|
@@ -1164,8 +1168,13 @@ class Workflow:
|
|
|
1164
1168
|
for step_result in step_results:
|
|
1165
1169
|
process_step_output(cast(StepOutput, step_result))
|
|
1166
1170
|
|
|
1171
|
+
duration = None
|
|
1172
|
+
if current_workflow_metrics and current_workflow_metrics.duration is not None:
|
|
1173
|
+
duration = current_workflow_metrics.duration
|
|
1174
|
+
|
|
1167
1175
|
return WorkflowMetrics(
|
|
1168
1176
|
steps=steps_dict,
|
|
1177
|
+
duration=duration,
|
|
1169
1178
|
)
|
|
1170
1179
|
|
|
1171
1180
|
def _call_custom_function(self, func: Callable, execution_input: WorkflowExecutionInput, **kwargs: Any) -> Any:
|
|
@@ -1316,7 +1325,14 @@ class Workflow:
|
|
|
1316
1325
|
|
|
1317
1326
|
# Update the workflow_run_response with completion data
|
|
1318
1327
|
if collected_step_outputs:
|
|
1319
|
-
|
|
1328
|
+
# Stop the timer for the Run duration
|
|
1329
|
+
if workflow_run_response.metrics:
|
|
1330
|
+
workflow_run_response.metrics.stop_timer()
|
|
1331
|
+
|
|
1332
|
+
workflow_run_response.metrics = self._aggregate_workflow_metrics(
|
|
1333
|
+
collected_step_outputs,
|
|
1334
|
+
workflow_run_response.metrics, # type: ignore[arg-type]
|
|
1335
|
+
)
|
|
1320
1336
|
last_output = cast(StepOutput, collected_step_outputs[-1])
|
|
1321
1337
|
|
|
1322
1338
|
# Use deepest nested content if this is a container (Steps/Router/Loop/etc.)
|
|
@@ -1361,6 +1377,10 @@ class Workflow:
|
|
|
1361
1377
|
raise e
|
|
1362
1378
|
|
|
1363
1379
|
finally:
|
|
1380
|
+
# Stop timer on error
|
|
1381
|
+
if workflow_run_response.metrics:
|
|
1382
|
+
workflow_run_response.metrics.stop_timer()
|
|
1383
|
+
|
|
1364
1384
|
self._update_session_metrics(session=session, workflow_run_response=workflow_run_response)
|
|
1365
1385
|
session.upsert_run(run=workflow_run_response)
|
|
1366
1386
|
self.save_session(session=session)
|
|
@@ -1551,7 +1571,14 @@ class Workflow:
|
|
|
1551
1571
|
|
|
1552
1572
|
# Update the workflow_run_response with completion data
|
|
1553
1573
|
if collected_step_outputs:
|
|
1554
|
-
|
|
1574
|
+
# Stop the timer for the Run duration
|
|
1575
|
+
if workflow_run_response.metrics:
|
|
1576
|
+
workflow_run_response.metrics.stop_timer()
|
|
1577
|
+
|
|
1578
|
+
workflow_run_response.metrics = self._aggregate_workflow_metrics(
|
|
1579
|
+
collected_step_outputs,
|
|
1580
|
+
workflow_run_response.metrics, # type: ignore[arg-type]
|
|
1581
|
+
)
|
|
1555
1582
|
last_output = cast(StepOutput, collected_step_outputs[-1])
|
|
1556
1583
|
|
|
1557
1584
|
# Use deepest nested content if this is a container (Steps/Router/Loop/etc.)
|
|
@@ -1618,7 +1645,14 @@ class Workflow:
|
|
|
1618
1645
|
# Preserve all progress (completed steps + partial step) before cancellation
|
|
1619
1646
|
if collected_step_outputs:
|
|
1620
1647
|
workflow_run_response.step_results = collected_step_outputs
|
|
1621
|
-
|
|
1648
|
+
# Stop the timer for the Run duration
|
|
1649
|
+
if workflow_run_response.metrics:
|
|
1650
|
+
workflow_run_response.metrics.stop_timer()
|
|
1651
|
+
|
|
1652
|
+
workflow_run_response.metrics = self._aggregate_workflow_metrics(
|
|
1653
|
+
collected_step_outputs,
|
|
1654
|
+
workflow_run_response.metrics, # type: ignore[arg-type]
|
|
1655
|
+
)
|
|
1622
1656
|
|
|
1623
1657
|
cancelled_event = WorkflowCancelledEvent(
|
|
1624
1658
|
run_id=workflow_run_response.run_id or "",
|
|
@@ -1660,6 +1694,10 @@ class Workflow:
|
|
|
1660
1694
|
)
|
|
1661
1695
|
yield self._handle_event(workflow_completed_event, workflow_run_response)
|
|
1662
1696
|
|
|
1697
|
+
# Stop timer on error
|
|
1698
|
+
if workflow_run_response.metrics:
|
|
1699
|
+
workflow_run_response.metrics.stop_timer()
|
|
1700
|
+
|
|
1663
1701
|
# Store the completed workflow response
|
|
1664
1702
|
self._update_session_metrics(session=session, workflow_run_response=workflow_run_response)
|
|
1665
1703
|
session.upsert_run(run=workflow_run_response)
|
|
@@ -1863,7 +1901,14 @@ class Workflow:
|
|
|
1863
1901
|
|
|
1864
1902
|
# Update the workflow_run_response with completion data
|
|
1865
1903
|
if collected_step_outputs:
|
|
1866
|
-
|
|
1904
|
+
# Stop the timer for the Run duration
|
|
1905
|
+
if workflow_run_response.metrics:
|
|
1906
|
+
workflow_run_response.metrics.stop_timer()
|
|
1907
|
+
|
|
1908
|
+
workflow_run_response.metrics = self._aggregate_workflow_metrics(
|
|
1909
|
+
collected_step_outputs,
|
|
1910
|
+
workflow_run_response.metrics, # type: ignore[arg-type]
|
|
1911
|
+
)
|
|
1867
1912
|
last_output = cast(StepOutput, collected_step_outputs[-1])
|
|
1868
1913
|
|
|
1869
1914
|
# Use deepest nested content if this is a container (Steps/Router/Loop/etc.)
|
|
@@ -1903,6 +1948,10 @@ class Workflow:
|
|
|
1903
1948
|
workflow_run_response.content = f"Workflow execution failed: {e}"
|
|
1904
1949
|
raise e
|
|
1905
1950
|
|
|
1951
|
+
# Stop timer on error
|
|
1952
|
+
if workflow_run_response.metrics:
|
|
1953
|
+
workflow_run_response.metrics.stop_timer()
|
|
1954
|
+
|
|
1906
1955
|
self._update_session_metrics(session=workflow_session, workflow_run_response=workflow_run_response)
|
|
1907
1956
|
workflow_session.upsert_run(run=workflow_run_response)
|
|
1908
1957
|
if self._has_async_db():
|
|
@@ -2114,7 +2163,14 @@ class Workflow:
|
|
|
2114
2163
|
|
|
2115
2164
|
# Update the workflow_run_response with completion data
|
|
2116
2165
|
if collected_step_outputs:
|
|
2117
|
-
|
|
2166
|
+
# Stop the timer for the Run duration
|
|
2167
|
+
if workflow_run_response.metrics:
|
|
2168
|
+
workflow_run_response.metrics.stop_timer()
|
|
2169
|
+
|
|
2170
|
+
workflow_run_response.metrics = self._aggregate_workflow_metrics(
|
|
2171
|
+
collected_step_outputs,
|
|
2172
|
+
workflow_run_response.metrics, # type: ignore[arg-type]
|
|
2173
|
+
)
|
|
2118
2174
|
last_output = cast(StepOutput, collected_step_outputs[-1])
|
|
2119
2175
|
|
|
2120
2176
|
# Use deepest nested content if this is a container (Steps/Router/Loop/etc.)
|
|
@@ -2181,7 +2237,14 @@ class Workflow:
|
|
|
2181
2237
|
# Preserve all progress (completed steps + partial step) before cancellation
|
|
2182
2238
|
if collected_step_outputs:
|
|
2183
2239
|
workflow_run_response.step_results = collected_step_outputs
|
|
2184
|
-
|
|
2240
|
+
# Stop the timer for the Run duration
|
|
2241
|
+
if workflow_run_response.metrics:
|
|
2242
|
+
workflow_run_response.metrics.stop_timer()
|
|
2243
|
+
|
|
2244
|
+
workflow_run_response.metrics = self._aggregate_workflow_metrics(
|
|
2245
|
+
collected_step_outputs,
|
|
2246
|
+
workflow_run_response.metrics, # type: ignore[arg-type]
|
|
2247
|
+
)
|
|
2185
2248
|
|
|
2186
2249
|
cancelled_event = WorkflowCancelledEvent(
|
|
2187
2250
|
run_id=workflow_run_response.run_id or "",
|
|
@@ -2227,6 +2290,10 @@ class Workflow:
|
|
|
2227
2290
|
)
|
|
2228
2291
|
yield self._handle_event(workflow_completed_event, workflow_run_response, websocket_handler=websocket_handler)
|
|
2229
2292
|
|
|
2293
|
+
# Stop timer on error
|
|
2294
|
+
if workflow_run_response.metrics:
|
|
2295
|
+
workflow_run_response.metrics.stop_timer()
|
|
2296
|
+
|
|
2230
2297
|
# Store the completed workflow response
|
|
2231
2298
|
self._update_session_metrics(session=workflow_session, workflow_run_response=workflow_run_response)
|
|
2232
2299
|
workflow_session.upsert_run(run=workflow_run_response)
|
|
@@ -2288,6 +2355,10 @@ class Workflow:
|
|
|
2288
2355
|
status=RunStatus.pending,
|
|
2289
2356
|
)
|
|
2290
2357
|
|
|
2358
|
+
# Start the run metrics timer
|
|
2359
|
+
workflow_run_response.metrics = WorkflowMetrics(steps={})
|
|
2360
|
+
workflow_run_response.metrics.start_timer()
|
|
2361
|
+
|
|
2291
2362
|
# Store PENDING response immediately
|
|
2292
2363
|
workflow_session.upsert_run(run=workflow_run_response)
|
|
2293
2364
|
if self._has_async_db():
|
|
@@ -2402,6 +2473,10 @@ class Workflow:
|
|
|
2402
2473
|
status=RunStatus.pending,
|
|
2403
2474
|
)
|
|
2404
2475
|
|
|
2476
|
+
# Start the run metrics timer
|
|
2477
|
+
workflow_run_response.metrics = WorkflowMetrics(steps={})
|
|
2478
|
+
workflow_run_response.metrics.start_timer()
|
|
2479
|
+
|
|
2405
2480
|
# Prepare execution input
|
|
2406
2481
|
inputs = WorkflowExecutionInput(
|
|
2407
2482
|
input=input,
|
|
@@ -3445,6 +3520,10 @@ class Workflow:
|
|
|
3445
3520
|
created_at=int(datetime.now().timestamp()),
|
|
3446
3521
|
)
|
|
3447
3522
|
|
|
3523
|
+
# Start the run metrics timer
|
|
3524
|
+
workflow_run_response.metrics = WorkflowMetrics(steps={})
|
|
3525
|
+
workflow_run_response.metrics.start_timer()
|
|
3526
|
+
|
|
3448
3527
|
if stream:
|
|
3449
3528
|
return self._execute_stream(
|
|
3450
3529
|
session=workflow_session,
|
|
@@ -3632,6 +3711,10 @@ class Workflow:
|
|
|
3632
3711
|
created_at=int(datetime.now().timestamp()),
|
|
3633
3712
|
)
|
|
3634
3713
|
|
|
3714
|
+
# Start the run metrics timer
|
|
3715
|
+
workflow_run_response.metrics = WorkflowMetrics(steps={})
|
|
3716
|
+
workflow_run_response.metrics.start_timer()
|
|
3717
|
+
|
|
3635
3718
|
if stream:
|
|
3636
3719
|
return self._aexecute_stream( # type: ignore
|
|
3637
3720
|
execution_input=inputs,
|
|
@@ -3978,7 +4061,7 @@ class Workflow:
|
|
|
3978
4061
|
|
|
3979
4062
|
# If workflow has metrics, convert and add them to session metrics
|
|
3980
4063
|
if workflow_run_response.metrics:
|
|
3981
|
-
run_session_metrics = self._calculate_session_metrics_from_workflow_metrics(workflow_run_response.metrics)
|
|
4064
|
+
run_session_metrics = self._calculate_session_metrics_from_workflow_metrics(workflow_run_response.metrics) # type: ignore[arg-type]
|
|
3982
4065
|
|
|
3983
4066
|
session_metrics += run_session_metrics
|
|
3984
4067
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: agno
|
|
3
|
-
Version: 2.2.
|
|
3
|
+
Version: 2.2.11
|
|
4
4
|
Summary: Agno: a lightweight library for building Multi-Agent Systems
|
|
5
5
|
Author-email: Ashpreet Bedi <ashpreet@agno.com>
|
|
6
6
|
Project-URL: homepage, https://agno.com
|
|
@@ -165,6 +165,8 @@ Provides-Extra: notion
|
|
|
165
165
|
Requires-Dist: notion-client; extra == "notion"
|
|
166
166
|
Provides-Extra: opencv
|
|
167
167
|
Requires-Dist: opencv-python; extra == "opencv"
|
|
168
|
+
Provides-Extra: parallel
|
|
169
|
+
Requires-Dist: parallel-web; extra == "parallel"
|
|
168
170
|
Provides-Extra: psycopg
|
|
169
171
|
Requires-Dist: psycopg-binary; extra == "psycopg"
|
|
170
172
|
Requires-Dist: psycopg; extra == "psycopg"
|
|
@@ -316,6 +318,7 @@ Requires-Dist: agno[mcp]; extra == "tools"
|
|
|
316
318
|
Requires-Dist: agno[browserbase]; extra == "tools"
|
|
317
319
|
Requires-Dist: agno[agentql]; extra == "tools"
|
|
318
320
|
Requires-Dist: agno[opencv]; extra == "tools"
|
|
321
|
+
Requires-Dist: agno[parallel]; extra == "tools"
|
|
319
322
|
Requires-Dist: agno[scrapegraph]; extra == "tools"
|
|
320
323
|
Requires-Dist: agno[valyu]; extra == "tools"
|
|
321
324
|
Requires-Dist: agno[confluence]; extra == "tools"
|