agno 2.3.24__py3-none-any.whl → 2.3.26__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +357 -28
- agno/db/base.py +214 -0
- agno/db/dynamo/dynamo.py +47 -0
- agno/db/firestore/firestore.py +47 -0
- agno/db/gcs_json/gcs_json_db.py +47 -0
- agno/db/in_memory/in_memory_db.py +47 -0
- agno/db/json/json_db.py +47 -0
- agno/db/mongo/async_mongo.py +229 -0
- agno/db/mongo/mongo.py +47 -0
- agno/db/mongo/schemas.py +16 -0
- agno/db/mysql/async_mysql.py +47 -0
- agno/db/mysql/mysql.py +47 -0
- agno/db/postgres/async_postgres.py +231 -0
- agno/db/postgres/postgres.py +239 -0
- agno/db/postgres/schemas.py +19 -0
- agno/db/redis/redis.py +47 -0
- agno/db/singlestore/singlestore.py +47 -0
- agno/db/sqlite/async_sqlite.py +242 -0
- agno/db/sqlite/schemas.py +18 -0
- agno/db/sqlite/sqlite.py +239 -0
- agno/db/surrealdb/surrealdb.py +47 -0
- agno/knowledge/chunking/code.py +90 -0
- agno/knowledge/chunking/document.py +62 -2
- agno/knowledge/chunking/strategy.py +14 -0
- agno/knowledge/knowledge.py +7 -1
- agno/knowledge/reader/arxiv_reader.py +1 -0
- agno/knowledge/reader/csv_reader.py +1 -0
- agno/knowledge/reader/docx_reader.py +1 -0
- agno/knowledge/reader/firecrawl_reader.py +1 -0
- agno/knowledge/reader/json_reader.py +1 -0
- agno/knowledge/reader/markdown_reader.py +1 -0
- agno/knowledge/reader/pdf_reader.py +1 -0
- agno/knowledge/reader/pptx_reader.py +1 -0
- agno/knowledge/reader/s3_reader.py +1 -0
- agno/knowledge/reader/tavily_reader.py +1 -0
- agno/knowledge/reader/text_reader.py +1 -0
- agno/knowledge/reader/web_search_reader.py +1 -0
- agno/knowledge/reader/website_reader.py +1 -0
- agno/knowledge/reader/wikipedia_reader.py +1 -0
- agno/knowledge/reader/youtube_reader.py +1 -0
- agno/knowledge/utils.py +1 -0
- agno/learn/__init__.py +65 -0
- agno/learn/config.py +463 -0
- agno/learn/curate.py +185 -0
- agno/learn/machine.py +690 -0
- agno/learn/schemas.py +1043 -0
- agno/learn/stores/__init__.py +35 -0
- agno/learn/stores/entity_memory.py +3275 -0
- agno/learn/stores/learned_knowledge.py +1583 -0
- agno/learn/stores/protocol.py +117 -0
- agno/learn/stores/session_context.py +1217 -0
- agno/learn/stores/user_memory.py +1495 -0
- agno/learn/stores/user_profile.py +1220 -0
- agno/learn/utils.py +209 -0
- agno/models/base.py +59 -0
- agno/os/routers/agents/router.py +4 -4
- agno/os/routers/knowledge/knowledge.py +7 -0
- agno/os/routers/teams/router.py +3 -3
- agno/os/routers/workflows/router.py +5 -5
- agno/os/utils.py +55 -3
- agno/team/team.py +131 -0
- agno/tools/browserbase.py +78 -6
- agno/tools/google_bigquery.py +11 -2
- agno/utils/agent.py +30 -1
- agno/workflow/workflow.py +198 -0
- {agno-2.3.24.dist-info → agno-2.3.26.dist-info}/METADATA +24 -2
- {agno-2.3.24.dist-info → agno-2.3.26.dist-info}/RECORD +70 -56
- {agno-2.3.24.dist-info → agno-2.3.26.dist-info}/WHEEL +0 -0
- {agno-2.3.24.dist-info → agno-2.3.26.dist-info}/licenses/LICENSE +0 -0
- {agno-2.3.24.dist-info → agno-2.3.26.dist-info}/top_level.txt +0 -0
agno/tools/browserbase.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import json
|
|
2
|
+
import re
|
|
2
3
|
from os import getenv
|
|
3
4
|
from typing import Any, Dict, List, Optional
|
|
4
5
|
|
|
@@ -22,6 +23,8 @@ class BrowserbaseTools(Toolkit):
|
|
|
22
23
|
enable_get_page_content: bool = True,
|
|
23
24
|
enable_close_session: bool = True,
|
|
24
25
|
all: bool = False,
|
|
26
|
+
parse_html: bool = True,
|
|
27
|
+
max_content_length: Optional[int] = 100000,
|
|
25
28
|
**kwargs,
|
|
26
29
|
):
|
|
27
30
|
"""Initialize BrowserbaseTools.
|
|
@@ -36,7 +39,14 @@ class BrowserbaseTools(Toolkit):
|
|
|
36
39
|
enable_get_page_content (bool): Enable the get_page_content tool. Defaults to True.
|
|
37
40
|
enable_close_session (bool): Enable the close_session tool. Defaults to True.
|
|
38
41
|
all (bool): Enable all tools. Defaults to False.
|
|
42
|
+
parse_html (bool): If True, extract only visible text content instead of raw HTML. Defaults to True.
|
|
43
|
+
This significantly reduces token usage and is recommended for most use cases.
|
|
44
|
+
max_content_length (int, optional): Maximum character length for page content. Defaults to 100000.
|
|
45
|
+
Content exceeding this limit will be truncated with a notice. Set to None for no limit.
|
|
39
46
|
"""
|
|
47
|
+
self.parse_html = parse_html
|
|
48
|
+
self.max_content_length = max_content_length
|
|
49
|
+
|
|
40
50
|
self.api_key = api_key or getenv("BROWSERBASE_API_KEY")
|
|
41
51
|
if not self.api_key:
|
|
42
52
|
raise ValueError(
|
|
@@ -191,18 +201,70 @@ class BrowserbaseTools(Toolkit):
|
|
|
191
201
|
self._cleanup()
|
|
192
202
|
raise e
|
|
193
203
|
|
|
204
|
+
def _extract_text_content(self, html: str) -> str:
|
|
205
|
+
"""Extract visible text content from HTML, removing scripts, styles, and tags.
|
|
206
|
+
|
|
207
|
+
Args:
|
|
208
|
+
html: Raw HTML content
|
|
209
|
+
|
|
210
|
+
Returns:
|
|
211
|
+
Cleaned text content
|
|
212
|
+
"""
|
|
213
|
+
# Remove script and style elements
|
|
214
|
+
html = re.sub(r"<script[^>]*>.*?</script>", "", html, flags=re.DOTALL | re.IGNORECASE)
|
|
215
|
+
html = re.sub(r"<style[^>]*>.*?</style>", "", html, flags=re.DOTALL | re.IGNORECASE)
|
|
216
|
+
# Remove HTML comments
|
|
217
|
+
html = re.sub(r"<!--.*?-->", "", html, flags=re.DOTALL)
|
|
218
|
+
# Remove all HTML tags
|
|
219
|
+
html = re.sub(r"<[^>]+>", " ", html)
|
|
220
|
+
# Decode common HTML entities
|
|
221
|
+
html = html.replace(" ", " ")
|
|
222
|
+
html = html.replace("&", "&")
|
|
223
|
+
html = html.replace("<", "<")
|
|
224
|
+
html = html.replace(">", ">")
|
|
225
|
+
html = html.replace(""", '"')
|
|
226
|
+
html = html.replace("'", "'")
|
|
227
|
+
# Normalize whitespace
|
|
228
|
+
html = re.sub(r"\s+", " ", html)
|
|
229
|
+
return html.strip()
|
|
230
|
+
|
|
231
|
+
def _truncate_content(self, content: str) -> str:
|
|
232
|
+
"""Truncate content if it exceeds max_content_length.
|
|
233
|
+
|
|
234
|
+
Args:
|
|
235
|
+
content: The content to potentially truncate
|
|
236
|
+
|
|
237
|
+
Returns:
|
|
238
|
+
Original or truncated content with notice
|
|
239
|
+
"""
|
|
240
|
+
if self.max_content_length is None or len(content) <= self.max_content_length:
|
|
241
|
+
return content
|
|
242
|
+
|
|
243
|
+
truncated = content[: self.max_content_length]
|
|
244
|
+
return f"{truncated}\n\n[Content truncated. Original length: {len(content)} characters. Showing first {self.max_content_length} characters.]"
|
|
245
|
+
|
|
194
246
|
def get_page_content(self, connect_url: Optional[str] = None) -> str:
|
|
195
|
-
"""Gets the
|
|
247
|
+
"""Gets the content of the current page.
|
|
196
248
|
|
|
197
249
|
Args:
|
|
198
250
|
connect_url (str, optional): The connection URL from an existing session
|
|
199
251
|
|
|
200
252
|
Returns:
|
|
201
|
-
The page HTML
|
|
253
|
+
The page content (text-only if parse_html=True, otherwise raw HTML)
|
|
202
254
|
"""
|
|
203
255
|
try:
|
|
204
256
|
self._initialize_browser(connect_url)
|
|
205
|
-
|
|
257
|
+
if not self._page:
|
|
258
|
+
return ""
|
|
259
|
+
|
|
260
|
+
raw_content = self._page.content()
|
|
261
|
+
|
|
262
|
+
if self.parse_html:
|
|
263
|
+
content = self._extract_text_content(raw_content)
|
|
264
|
+
else:
|
|
265
|
+
content = raw_content
|
|
266
|
+
|
|
267
|
+
return self._truncate_content(content)
|
|
206
268
|
except Exception as e:
|
|
207
269
|
self._cleanup()
|
|
208
270
|
raise e
|
|
@@ -307,17 +369,27 @@ class BrowserbaseTools(Toolkit):
|
|
|
307
369
|
raise e
|
|
308
370
|
|
|
309
371
|
async def aget_page_content(self, connect_url: Optional[str] = None) -> str:
|
|
310
|
-
"""Gets the
|
|
372
|
+
"""Gets the content of the current page asynchronously.
|
|
311
373
|
|
|
312
374
|
Args:
|
|
313
375
|
connect_url (str, optional): The connection URL from an existing session
|
|
314
376
|
|
|
315
377
|
Returns:
|
|
316
|
-
The page HTML
|
|
378
|
+
The page content (text-only if parse_html=True, otherwise raw HTML)
|
|
317
379
|
"""
|
|
318
380
|
try:
|
|
319
381
|
await self._ainitialize_browser(connect_url)
|
|
320
|
-
|
|
382
|
+
if not self._async_page:
|
|
383
|
+
return ""
|
|
384
|
+
|
|
385
|
+
raw_content = await self._async_page.content()
|
|
386
|
+
|
|
387
|
+
if self.parse_html:
|
|
388
|
+
content = self._extract_text_content(raw_content)
|
|
389
|
+
else:
|
|
390
|
+
content = raw_content
|
|
391
|
+
|
|
392
|
+
return self._truncate_content(content)
|
|
321
393
|
except Exception as e:
|
|
322
394
|
await self._acleanup()
|
|
323
395
|
raise e
|
agno/tools/google_bigquery.py
CHANGED
|
@@ -11,6 +11,15 @@ except ImportError:
|
|
|
11
11
|
raise ImportError("`bigquery` not installed. Please install using `pip install google-cloud-bigquery`")
|
|
12
12
|
|
|
13
13
|
|
|
14
|
+
def _clean_sql(sql: str) -> str:
|
|
15
|
+
"""Clean SQL query by normalizing whitespace while preserving token boundaries.
|
|
16
|
+
|
|
17
|
+
Replaces newlines with spaces (not empty strings) to prevent line comments
|
|
18
|
+
from swallowing subsequent SQL statements.
|
|
19
|
+
"""
|
|
20
|
+
return sql.replace("\\n", " ").replace("\n", " ")
|
|
21
|
+
|
|
22
|
+
|
|
14
23
|
class GoogleBigQueryTools(Toolkit):
|
|
15
24
|
def __init__(
|
|
16
25
|
self,
|
|
@@ -106,12 +115,12 @@ class GoogleBigQueryTools(Toolkit):
|
|
|
106
115
|
"""
|
|
107
116
|
try:
|
|
108
117
|
log_debug(f"Running Google SQL |\n{sql}")
|
|
109
|
-
cleaned_query = sql
|
|
118
|
+
cleaned_query = _clean_sql(sql)
|
|
110
119
|
job_config = bigquery.QueryJobConfig(default_dataset=f"{self.project}.{self.dataset}")
|
|
111
120
|
query_job = self.client.query(cleaned_query, job_config)
|
|
112
121
|
results = query_job.result()
|
|
113
122
|
results_str = str([dict(row) for row in results])
|
|
114
|
-
return results_str.replace("
|
|
123
|
+
return results_str.replace("\n", " ")
|
|
115
124
|
except Exception as e:
|
|
116
125
|
logger.error(f"Error while executing SQL: {e}")
|
|
117
126
|
return ""
|
agno/utils/agent.py
CHANGED
|
@@ -30,6 +30,7 @@ if TYPE_CHECKING:
|
|
|
30
30
|
async def await_for_open_threads(
|
|
31
31
|
memory_task: Optional[Task] = None,
|
|
32
32
|
cultural_knowledge_task: Optional[Task] = None,
|
|
33
|
+
learning_task: Optional[Task] = None,
|
|
33
34
|
) -> None:
|
|
34
35
|
if memory_task is not None:
|
|
35
36
|
try:
|
|
@@ -43,9 +44,17 @@ async def await_for_open_threads(
|
|
|
43
44
|
except Exception as e:
|
|
44
45
|
log_warning(f"Error in cultural knowledge creation: {str(e)}")
|
|
45
46
|
|
|
47
|
+
if learning_task is not None:
|
|
48
|
+
try:
|
|
49
|
+
await learning_task
|
|
50
|
+
except Exception as e:
|
|
51
|
+
log_warning(f"Error in learning extraction: {str(e)}")
|
|
52
|
+
|
|
46
53
|
|
|
47
54
|
def wait_for_open_threads(
|
|
48
|
-
memory_future: Optional[Future] = None,
|
|
55
|
+
memory_future: Optional[Future] = None,
|
|
56
|
+
cultural_knowledge_future: Optional[Future] = None,
|
|
57
|
+
learning_future: Optional[Future] = None,
|
|
49
58
|
) -> None:
|
|
50
59
|
if memory_future is not None:
|
|
51
60
|
try:
|
|
@@ -60,11 +69,18 @@ def wait_for_open_threads(
|
|
|
60
69
|
except Exception as e:
|
|
61
70
|
log_warning(f"Error in cultural knowledge creation: {str(e)}")
|
|
62
71
|
|
|
72
|
+
if learning_future is not None:
|
|
73
|
+
try:
|
|
74
|
+
learning_future.result()
|
|
75
|
+
except Exception as e:
|
|
76
|
+
log_warning(f"Error in learning extraction: {str(e)}")
|
|
77
|
+
|
|
63
78
|
|
|
64
79
|
async def await_for_thread_tasks_stream(
|
|
65
80
|
run_response: Union[RunOutput, TeamRunOutput],
|
|
66
81
|
memory_task: Optional[Task] = None,
|
|
67
82
|
cultural_knowledge_task: Optional[Task] = None,
|
|
83
|
+
learning_task: Optional[Task] = None,
|
|
68
84
|
stream_events: bool = False,
|
|
69
85
|
events_to_skip: Optional[List[RunEvent]] = None,
|
|
70
86
|
store_events: bool = False,
|
|
@@ -111,11 +127,18 @@ async def await_for_thread_tasks_stream(
|
|
|
111
127
|
except Exception as e:
|
|
112
128
|
log_warning(f"Error in cultural knowledge creation: {str(e)}")
|
|
113
129
|
|
|
130
|
+
if learning_task is not None:
|
|
131
|
+
try:
|
|
132
|
+
await learning_task
|
|
133
|
+
except Exception as e:
|
|
134
|
+
log_warning(f"Error in learning extraction: {str(e)}")
|
|
135
|
+
|
|
114
136
|
|
|
115
137
|
def wait_for_thread_tasks_stream(
|
|
116
138
|
run_response: Union[TeamRunOutput, RunOutput],
|
|
117
139
|
memory_future: Optional[Future] = None,
|
|
118
140
|
cultural_knowledge_future: Optional[Future] = None,
|
|
141
|
+
learning_future: Optional[Future] = None,
|
|
119
142
|
stream_events: bool = False,
|
|
120
143
|
events_to_skip: Optional[List[RunEvent]] = None,
|
|
121
144
|
store_events: bool = False,
|
|
@@ -164,6 +187,12 @@ def wait_for_thread_tasks_stream(
|
|
|
164
187
|
except Exception as e:
|
|
165
188
|
log_warning(f"Error in cultural knowledge creation: {str(e)}")
|
|
166
189
|
|
|
190
|
+
if learning_future is not None:
|
|
191
|
+
try:
|
|
192
|
+
learning_future.result()
|
|
193
|
+
except Exception as e:
|
|
194
|
+
log_warning(f"Error in learning extraction: {str(e)}")
|
|
195
|
+
|
|
167
196
|
|
|
168
197
|
def collect_joint_images(
|
|
169
198
|
run_input: Optional[RunInput] = None,
|
agno/workflow/workflow.py
CHANGED
|
@@ -4481,3 +4481,201 @@ class Workflow:
|
|
|
4481
4481
|
session_id=session_id,
|
|
4482
4482
|
**kwargs,
|
|
4483
4483
|
)
|
|
4484
|
+
|
|
4485
|
+
def deep_copy(self, *, update: Optional[Dict[str, Any]] = None) -> "Workflow":
|
|
4486
|
+
"""Create and return a deep copy of this Workflow, optionally updating fields.
|
|
4487
|
+
|
|
4488
|
+
This creates a fresh Workflow instance with isolated mutable state while sharing
|
|
4489
|
+
heavy resources like database connections. Steps containing agents/teams are also
|
|
4490
|
+
deep copied to ensure complete isolation.
|
|
4491
|
+
|
|
4492
|
+
Args:
|
|
4493
|
+
update: Optional dictionary of fields to override in the new Workflow.
|
|
4494
|
+
|
|
4495
|
+
Returns:
|
|
4496
|
+
Workflow: A new Workflow instance with copied state.
|
|
4497
|
+
"""
|
|
4498
|
+
from copy import copy, deepcopy
|
|
4499
|
+
from dataclasses import fields
|
|
4500
|
+
|
|
4501
|
+
from agno.utils.log import log_debug, log_warning
|
|
4502
|
+
|
|
4503
|
+
# Extract the fields to set for the new Workflow
|
|
4504
|
+
fields_for_new_workflow: Dict[str, Any] = {}
|
|
4505
|
+
|
|
4506
|
+
for f in fields(self):
|
|
4507
|
+
# Skip private fields (not part of __init__ signature)
|
|
4508
|
+
if f.name.startswith("_"):
|
|
4509
|
+
continue
|
|
4510
|
+
|
|
4511
|
+
field_value = getattr(self, f.name)
|
|
4512
|
+
if field_value is not None:
|
|
4513
|
+
# Special handling for steps that may contain agents/teams
|
|
4514
|
+
if f.name == "steps" and field_value is not None:
|
|
4515
|
+
fields_for_new_workflow[f.name] = self._deep_copy_steps(field_value)
|
|
4516
|
+
# Special handling for workflow agent
|
|
4517
|
+
elif f.name == "agent" and field_value is not None:
|
|
4518
|
+
if hasattr(field_value, "deep_copy"):
|
|
4519
|
+
fields_for_new_workflow[f.name] = field_value.deep_copy()
|
|
4520
|
+
else:
|
|
4521
|
+
fields_for_new_workflow[f.name] = field_value
|
|
4522
|
+
# Share heavy resources - these maintain connections/pools that shouldn't be duplicated
|
|
4523
|
+
elif f.name == "db":
|
|
4524
|
+
fields_for_new_workflow[f.name] = field_value
|
|
4525
|
+
# For compound types, attempt a deep copy
|
|
4526
|
+
elif isinstance(field_value, (list, dict, set)):
|
|
4527
|
+
try:
|
|
4528
|
+
fields_for_new_workflow[f.name] = deepcopy(field_value)
|
|
4529
|
+
except Exception:
|
|
4530
|
+
try:
|
|
4531
|
+
fields_for_new_workflow[f.name] = copy(field_value)
|
|
4532
|
+
except Exception as e:
|
|
4533
|
+
log_warning(f"Failed to copy field: {f.name} - {e}")
|
|
4534
|
+
fields_for_new_workflow[f.name] = field_value
|
|
4535
|
+
# For pydantic models, attempt a model_copy
|
|
4536
|
+
elif isinstance(field_value, BaseModel):
|
|
4537
|
+
try:
|
|
4538
|
+
fields_for_new_workflow[f.name] = field_value.model_copy(deep=True)
|
|
4539
|
+
except Exception:
|
|
4540
|
+
try:
|
|
4541
|
+
fields_for_new_workflow[f.name] = field_value.model_copy(deep=False)
|
|
4542
|
+
except Exception:
|
|
4543
|
+
fields_for_new_workflow[f.name] = field_value
|
|
4544
|
+
# For other types, attempt a shallow copy
|
|
4545
|
+
else:
|
|
4546
|
+
try:
|
|
4547
|
+
fields_for_new_workflow[f.name] = copy(field_value)
|
|
4548
|
+
except Exception:
|
|
4549
|
+
fields_for_new_workflow[f.name] = field_value
|
|
4550
|
+
|
|
4551
|
+
# Update fields if provided
|
|
4552
|
+
if update:
|
|
4553
|
+
fields_for_new_workflow.update(update)
|
|
4554
|
+
|
|
4555
|
+
# Create a new Workflow
|
|
4556
|
+
try:
|
|
4557
|
+
new_workflow = self.__class__(**fields_for_new_workflow)
|
|
4558
|
+
log_debug(f"Created new {self.__class__.__name__}")
|
|
4559
|
+
return new_workflow
|
|
4560
|
+
except Exception as e:
|
|
4561
|
+
from agno.utils.log import log_error
|
|
4562
|
+
log_error(f"Failed to create deep copy of {self.__class__.__name__}: {e}")
|
|
4563
|
+
raise
|
|
4564
|
+
|
|
4565
|
+
def _deep_copy_steps(self, steps: Any) -> Any:
|
|
4566
|
+
"""Deep copy workflow steps, handling nested agents and teams."""
|
|
4567
|
+
from agno.workflow.steps import Steps
|
|
4568
|
+
|
|
4569
|
+
if steps is None:
|
|
4570
|
+
return None
|
|
4571
|
+
|
|
4572
|
+
# Handle Steps container
|
|
4573
|
+
if isinstance(steps, Steps):
|
|
4574
|
+
copied_steps = []
|
|
4575
|
+
if steps.steps:
|
|
4576
|
+
for step in steps.steps:
|
|
4577
|
+
copied_steps.append(self._deep_copy_single_step(step))
|
|
4578
|
+
return Steps(steps=copied_steps)
|
|
4579
|
+
|
|
4580
|
+
# Handle list of steps
|
|
4581
|
+
if isinstance(steps, list):
|
|
4582
|
+
return [self._deep_copy_single_step(step) for step in steps]
|
|
4583
|
+
|
|
4584
|
+
# Handle callable steps
|
|
4585
|
+
if callable(steps):
|
|
4586
|
+
return steps
|
|
4587
|
+
|
|
4588
|
+
# Handle single step
|
|
4589
|
+
return self._deep_copy_single_step(steps)
|
|
4590
|
+
|
|
4591
|
+
def _deep_copy_single_step(self, step: Any) -> Any:
|
|
4592
|
+
"""Deep copy a single step, handling nested agents and teams."""
|
|
4593
|
+
from copy import copy, deepcopy
|
|
4594
|
+
|
|
4595
|
+
from agno.agent import Agent
|
|
4596
|
+
from agno.team import Team
|
|
4597
|
+
from agno.workflow.condition import Condition
|
|
4598
|
+
from agno.workflow.loop import Loop
|
|
4599
|
+
from agno.workflow.parallel import Parallel
|
|
4600
|
+
from agno.workflow.router import Router
|
|
4601
|
+
from agno.workflow.step import Step
|
|
4602
|
+
from agno.workflow.steps import Steps
|
|
4603
|
+
|
|
4604
|
+
# Handle Step with agent or team
|
|
4605
|
+
if isinstance(step, Step):
|
|
4606
|
+
step_kwargs: Dict[str, Any] = {}
|
|
4607
|
+
if step.name:
|
|
4608
|
+
step_kwargs["name"] = step.name
|
|
4609
|
+
if step.description:
|
|
4610
|
+
step_kwargs["description"] = step.description
|
|
4611
|
+
if step.executor:
|
|
4612
|
+
step_kwargs["executor"] = step.executor
|
|
4613
|
+
if step.agent:
|
|
4614
|
+
step_kwargs["agent"] = step.agent.deep_copy() if hasattr(step.agent, "deep_copy") else step.agent
|
|
4615
|
+
if step.team:
|
|
4616
|
+
step_kwargs["team"] = step.team.deep_copy() if hasattr(step.team, "deep_copy") else step.team
|
|
4617
|
+
# Copy Step configuration attributes
|
|
4618
|
+
for attr in [
|
|
4619
|
+
"max_retries",
|
|
4620
|
+
"timeout_seconds",
|
|
4621
|
+
"skip_on_failure",
|
|
4622
|
+
"strict_input_validation",
|
|
4623
|
+
"add_workflow_history",
|
|
4624
|
+
"num_history_runs",
|
|
4625
|
+
]:
|
|
4626
|
+
if hasattr(step, attr):
|
|
4627
|
+
value = getattr(step, attr)
|
|
4628
|
+
# Only include non-default values to avoid overriding defaults
|
|
4629
|
+
if value is not None:
|
|
4630
|
+
step_kwargs[attr] = value
|
|
4631
|
+
return Step(**step_kwargs)
|
|
4632
|
+
|
|
4633
|
+
# Handle direct Agent
|
|
4634
|
+
if isinstance(step, Agent):
|
|
4635
|
+
return step.deep_copy() if hasattr(step, "deep_copy") else step
|
|
4636
|
+
|
|
4637
|
+
# Handle direct Team
|
|
4638
|
+
if isinstance(step, Team):
|
|
4639
|
+
return step.deep_copy() if hasattr(step, "deep_copy") else step
|
|
4640
|
+
|
|
4641
|
+
# Handle Parallel steps
|
|
4642
|
+
if isinstance(step, Parallel):
|
|
4643
|
+
copied_parallel_steps = [self._deep_copy_single_step(s) for s in step.steps] if step.steps else []
|
|
4644
|
+
return Parallel(*copied_parallel_steps, name=step.name, description=step.description)
|
|
4645
|
+
|
|
4646
|
+
# Handle Loop steps
|
|
4647
|
+
if isinstance(step, Loop):
|
|
4648
|
+
copied_loop_steps = [self._deep_copy_single_step(s) for s in step.steps] if step.steps else []
|
|
4649
|
+
return Loop(
|
|
4650
|
+
steps=copied_loop_steps,
|
|
4651
|
+
name=step.name,
|
|
4652
|
+
description=step.description,
|
|
4653
|
+
max_iterations=step.max_iterations,
|
|
4654
|
+
end_condition=step.end_condition,
|
|
4655
|
+
)
|
|
4656
|
+
|
|
4657
|
+
# Handle Condition steps
|
|
4658
|
+
if isinstance(step, Condition):
|
|
4659
|
+
copied_condition_steps = [self._deep_copy_single_step(s) for s in step.steps] if step.steps else []
|
|
4660
|
+
return Condition(
|
|
4661
|
+
evaluator=step.evaluator, steps=copied_condition_steps, name=step.name, description=step.description
|
|
4662
|
+
)
|
|
4663
|
+
|
|
4664
|
+
# Handle Router steps
|
|
4665
|
+
if isinstance(step, Router):
|
|
4666
|
+
copied_choices = [self._deep_copy_single_step(s) for s in step.choices] if step.choices else []
|
|
4667
|
+
return Router(choices=copied_choices, name=step.name, description=step.description, selector=step.selector)
|
|
4668
|
+
|
|
4669
|
+
# Handle Steps container
|
|
4670
|
+
if isinstance(step, Steps):
|
|
4671
|
+
copied_steps = [self._deep_copy_single_step(s) for s in step.steps] if step.steps else []
|
|
4672
|
+
return Steps(name=step.name, description=step.description, steps=copied_steps)
|
|
4673
|
+
|
|
4674
|
+
# For other types, attempt deep copy
|
|
4675
|
+
try:
|
|
4676
|
+
return deepcopy(step)
|
|
4677
|
+
except Exception:
|
|
4678
|
+
try:
|
|
4679
|
+
return copy(step)
|
|
4680
|
+
except Exception:
|
|
4681
|
+
return step
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: agno
|
|
3
|
-
Version: 2.3.
|
|
3
|
+
Version: 2.3.26
|
|
4
4
|
Summary: Agno: a lightweight library for building Multi-Agent Systems
|
|
5
5
|
Author-email: Ashpreet Bedi <ashpreet@agno.com>
|
|
6
6
|
Project-URL: homepage, https://agno.com
|
|
@@ -264,7 +264,8 @@ Requires-Dist: unstructured; extra == "markdown"
|
|
|
264
264
|
Requires-Dist: markdown; extra == "markdown"
|
|
265
265
|
Requires-Dist: aiofiles; extra == "markdown"
|
|
266
266
|
Provides-Extra: chonkie
|
|
267
|
-
Requires-Dist: chonkie[
|
|
267
|
+
Requires-Dist: chonkie[semantic]; extra == "chonkie"
|
|
268
|
+
Requires-Dist: chonkie[code]; extra == "chonkie"
|
|
268
269
|
Requires-Dist: chonkie; extra == "chonkie"
|
|
269
270
|
Provides-Extra: agui
|
|
270
271
|
Requires-Dist: ag-ui-protocol; extra == "agui"
|
|
@@ -400,6 +401,27 @@ Requires-Dist: yfinance; extra == "integration-tests"
|
|
|
400
401
|
Requires-Dist: sqlalchemy; extra == "integration-tests"
|
|
401
402
|
Requires-Dist: Pillow; extra == "integration-tests"
|
|
402
403
|
Requires-Dist: fastmcp; extra == "integration-tests"
|
|
404
|
+
Provides-Extra: demo
|
|
405
|
+
Requires-Dist: anthropic; extra == "demo"
|
|
406
|
+
Requires-Dist: chromadb; extra == "demo"
|
|
407
|
+
Requires-Dist: ddgs; extra == "demo"
|
|
408
|
+
Requires-Dist: fastapi[standard]; extra == "demo"
|
|
409
|
+
Requires-Dist: google-genai; extra == "demo"
|
|
410
|
+
Requires-Dist: mcp; extra == "demo"
|
|
411
|
+
Requires-Dist: nest_asyncio; extra == "demo"
|
|
412
|
+
Requires-Dist: openai; extra == "demo"
|
|
413
|
+
Requires-Dist: openinference-instrumentation-agno; extra == "demo"
|
|
414
|
+
Requires-Dist: opentelemetry-api; extra == "demo"
|
|
415
|
+
Requires-Dist: opentelemetry-sdk; extra == "demo"
|
|
416
|
+
Requires-Dist: pandas; extra == "demo"
|
|
417
|
+
Requires-Dist: parallel-web; extra == "demo"
|
|
418
|
+
Requires-Dist: pgvector; extra == "demo"
|
|
419
|
+
Requires-Dist: pillow; extra == "demo"
|
|
420
|
+
Requires-Dist: psycopg[binary]; extra == "demo"
|
|
421
|
+
Requires-Dist: pypdf; extra == "demo"
|
|
422
|
+
Requires-Dist: sqlalchemy; extra == "demo"
|
|
423
|
+
Requires-Dist: yfinance; extra == "demo"
|
|
424
|
+
Requires-Dist: youtube-transcript-api; extra == "demo"
|
|
403
425
|
Dynamic: license-file
|
|
404
426
|
|
|
405
427
|
<div align="center" id="top">
|