agno 2.4.2__py3-none-any.whl → 2.4.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +13 -0
- agno/db/firestore/firestore.py +53 -30
- agno/db/surrealdb/models.py +5 -5
- agno/db/surrealdb/surrealdb.py +13 -1
- agno/knowledge/chunking/markdown.py +112 -11
- agno/knowledge/embedder/openai.py +8 -4
- agno/knowledge/knowledge.py +59 -6
- agno/knowledge/reader/csv_reader.py +48 -216
- agno/knowledge/reader/excel_reader.py +225 -0
- agno/knowledge/reader/field_labeled_csv_reader.py +13 -179
- agno/knowledge/reader/reader_factory.py +22 -5
- agno/knowledge/reader/utils/__init__.py +17 -0
- agno/knowledge/reader/utils/spreadsheet.py +114 -0
- agno/models/base.py +6 -0
- agno/models/moonshot/__init__.py +3 -0
- agno/models/moonshot/moonshot.py +57 -0
- agno/models/openrouter/responses.py +2 -2
- agno/models/response.py +4 -0
- agno/models/utils.py +5 -0
- agno/os/routers/knowledge/knowledge.py +5 -3
- agno/run/base.py +4 -0
- agno/tools/decorator.py +3 -0
- agno/tools/function.py +3 -0
- agno/tools/unsplash.py +341 -0
- agno/utils/print_response/agent.py +8 -5
- agno/utils/response.py +38 -28
- agno/utils/string.py +2 -1
- agno/vectordb/lancedb/lance_db.py +29 -7
- agno/workflow/workflow.py +16 -6
- {agno-2.4.2.dist-info → agno-2.4.4.dist-info}/METADATA +7 -5
- {agno-2.4.2.dist-info → agno-2.4.4.dist-info}/RECORD +34 -28
- {agno-2.4.2.dist-info → agno-2.4.4.dist-info}/WHEEL +1 -1
- {agno-2.4.2.dist-info → agno-2.4.4.dist-info}/licenses/LICENSE +0 -0
- {agno-2.4.2.dist-info → agno-2.4.4.dist-info}/top_level.txt +0 -0
agno/tools/function.py
CHANGED
|
@@ -121,6 +121,9 @@ class Function(BaseModel):
|
|
|
121
121
|
# If True, the function will be executed outside the agent's control.
|
|
122
122
|
external_execution: Optional[bool] = None
|
|
123
123
|
|
|
124
|
+
# If True (and external_execution=True), the function will not produce verbose paused messages (e.g., "I have tools to execute...")
|
|
125
|
+
external_execution_silent: Optional[bool] = None
|
|
126
|
+
|
|
124
127
|
# Caching configuration
|
|
125
128
|
cache_results: bool = False
|
|
126
129
|
cache_dir: Optional[str] = None
|
agno/tools/unsplash.py
ADDED
|
@@ -0,0 +1,341 @@
|
|
|
1
|
+
"""Unsplash Tools for searching and retrieving high-quality, royalty-free images.
|
|
2
|
+
|
|
3
|
+
This toolkit provides AI agents with the ability to search for and retrieve images
|
|
4
|
+
from Unsplash, a popular platform with over 4.3 million high-quality photos.
|
|
5
|
+
|
|
6
|
+
Get your free API key at: https://unsplash.com/developers
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
from os import getenv
|
|
11
|
+
from typing import Any, Dict, List, Optional
|
|
12
|
+
from urllib.parse import urlencode
|
|
13
|
+
from urllib.request import Request, urlopen
|
|
14
|
+
|
|
15
|
+
from agno.tools import Toolkit
|
|
16
|
+
from agno.utils.log import log_debug, logger
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class UnsplashTools(Toolkit):
|
|
20
|
+
"""A toolkit for searching and retrieving images from Unsplash.
|
|
21
|
+
|
|
22
|
+
Unsplash provides access to over 4.3 million high-quality, royalty-free images
|
|
23
|
+
that can be used for various purposes. This toolkit enables AI agents to:
|
|
24
|
+
- Search for photos by keywords
|
|
25
|
+
- Get detailed information about specific photos
|
|
26
|
+
- Retrieve random photos with optional filters
|
|
27
|
+
- Track downloads (required by Unsplash API guidelines)
|
|
28
|
+
|
|
29
|
+
Example:
|
|
30
|
+
```python
|
|
31
|
+
from agno.agent import Agent
|
|
32
|
+
from agno.models.openai import OpenAIChat
|
|
33
|
+
from agno.tools.unsplash import UnsplashTools
|
|
34
|
+
|
|
35
|
+
agent = Agent(
|
|
36
|
+
model=OpenAIChat(id="gpt-4o"),
|
|
37
|
+
tools=[UnsplashTools()],
|
|
38
|
+
)
|
|
39
|
+
agent.print_response("Find me 3 photos of mountains at sunset")
|
|
40
|
+
```
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
def __init__(
|
|
44
|
+
self,
|
|
45
|
+
access_key: Optional[str] = None,
|
|
46
|
+
enable_search_photos: bool = True,
|
|
47
|
+
enable_get_photo: bool = True,
|
|
48
|
+
enable_get_random_photo: bool = True,
|
|
49
|
+
enable_download_photo: bool = False,
|
|
50
|
+
all: bool = False,
|
|
51
|
+
**kwargs: Any,
|
|
52
|
+
):
|
|
53
|
+
"""Initialize the Unsplash toolkit.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
access_key: Unsplash API access key. If not provided, will look for
|
|
57
|
+
UNSPLASH_ACCESS_KEY environment variable.
|
|
58
|
+
enable_search_photos: Enable the search_photos tool. Default: True.
|
|
59
|
+
enable_get_photo: Enable the get_photo tool. Default: True.
|
|
60
|
+
enable_get_random_photo: Enable the get_random_photo tool. Default: True.
|
|
61
|
+
enable_download_photo: Enable the download_photo tool. Default: False.
|
|
62
|
+
all: Enable all tools. Default: False.
|
|
63
|
+
**kwargs: Additional arguments passed to the Toolkit base class.
|
|
64
|
+
"""
|
|
65
|
+
self.access_key = access_key or getenv("UNSPLASH_ACCESS_KEY")
|
|
66
|
+
if not self.access_key:
|
|
67
|
+
logger.warning("No Unsplash API key provided. Set UNSPLASH_ACCESS_KEY environment variable.")
|
|
68
|
+
|
|
69
|
+
self.base_url = "https://api.unsplash.com"
|
|
70
|
+
|
|
71
|
+
tools: List[Any] = []
|
|
72
|
+
if all or enable_search_photos:
|
|
73
|
+
tools.append(self.search_photos)
|
|
74
|
+
if all or enable_get_photo:
|
|
75
|
+
tools.append(self.get_photo)
|
|
76
|
+
if all or enable_get_random_photo:
|
|
77
|
+
tools.append(self.get_random_photo)
|
|
78
|
+
if all or enable_download_photo:
|
|
79
|
+
tools.append(self.download_photo)
|
|
80
|
+
|
|
81
|
+
super().__init__(name="unsplash_tools", tools=tools, **kwargs)
|
|
82
|
+
|
|
83
|
+
def _make_request(self, endpoint: str, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
|
84
|
+
"""Make an authenticated request to the Unsplash API.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
endpoint: API endpoint path (e.g., "/search/photos").
|
|
88
|
+
params: Optional query parameters.
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
JSON response as a dictionary.
|
|
92
|
+
|
|
93
|
+
Raises:
|
|
94
|
+
Exception: If the API request fails.
|
|
95
|
+
"""
|
|
96
|
+
url = f"{self.base_url}{endpoint}"
|
|
97
|
+
if params:
|
|
98
|
+
url = f"{url}?{urlencode(params)}"
|
|
99
|
+
|
|
100
|
+
headers = {
|
|
101
|
+
"Authorization": f"Client-ID {self.access_key}",
|
|
102
|
+
"Accept-Version": "v1",
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
request = Request(url, headers=headers)
|
|
106
|
+
with urlopen(request) as response:
|
|
107
|
+
return json.loads(response.read().decode())
|
|
108
|
+
|
|
109
|
+
def _format_photo(self, photo: Dict[str, Any]) -> Dict[str, Any]:
|
|
110
|
+
"""Format photo data into a clean, consistent structure.
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
photo: Raw photo data from Unsplash API.
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
Formatted photo dictionary with essential fields.
|
|
117
|
+
"""
|
|
118
|
+
return {
|
|
119
|
+
"id": photo.get("id"),
|
|
120
|
+
"description": photo.get("description") or photo.get("alt_description"),
|
|
121
|
+
"width": photo.get("width"),
|
|
122
|
+
"height": photo.get("height"),
|
|
123
|
+
"color": photo.get("color"),
|
|
124
|
+
"created_at": photo.get("created_at"),
|
|
125
|
+
"urls": {
|
|
126
|
+
"raw": photo.get("urls", {}).get("raw"),
|
|
127
|
+
"full": photo.get("urls", {}).get("full"),
|
|
128
|
+
"regular": photo.get("urls", {}).get("regular"),
|
|
129
|
+
"small": photo.get("urls", {}).get("small"),
|
|
130
|
+
"thumb": photo.get("urls", {}).get("thumb"),
|
|
131
|
+
},
|
|
132
|
+
"author": {
|
|
133
|
+
"name": photo.get("user", {}).get("name"),
|
|
134
|
+
"username": photo.get("user", {}).get("username"),
|
|
135
|
+
"profile_url": photo.get("user", {}).get("links", {}).get("html"),
|
|
136
|
+
},
|
|
137
|
+
"links": {
|
|
138
|
+
"html": photo.get("links", {}).get("html"),
|
|
139
|
+
"download": photo.get("links", {}).get("download"),
|
|
140
|
+
},
|
|
141
|
+
"likes": photo.get("likes"),
|
|
142
|
+
"tags": [tag.get("title") for tag in photo.get("tags", [])[:5] if tag.get("title")],
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
def search_photos(
|
|
146
|
+
self,
|
|
147
|
+
query: str,
|
|
148
|
+
per_page: int = 10,
|
|
149
|
+
page: int = 1,
|
|
150
|
+
orientation: Optional[str] = None,
|
|
151
|
+
color: Optional[str] = None,
|
|
152
|
+
) -> str:
|
|
153
|
+
"""Search for photos on Unsplash by keyword.
|
|
154
|
+
|
|
155
|
+
Args:
|
|
156
|
+
query: The search query string (e.g., "mountain sunset", "office workspace").
|
|
157
|
+
per_page: Number of results per page (1-30). Default: 10.
|
|
158
|
+
page: Page number to retrieve. Default: 1.
|
|
159
|
+
orientation: Filter by orientation: "landscape", "portrait", or "squarish".
|
|
160
|
+
color: Filter by color: "black_and_white", "black", "white", "yellow",
|
|
161
|
+
"orange", "red", "purple", "magenta", "green", "teal", "blue".
|
|
162
|
+
|
|
163
|
+
Returns:
|
|
164
|
+
JSON string containing search results with photo details including
|
|
165
|
+
URLs, author information, and metadata.
|
|
166
|
+
"""
|
|
167
|
+
if not self.access_key:
|
|
168
|
+
return "Error: No Unsplash API key provided. Set UNSPLASH_ACCESS_KEY environment variable."
|
|
169
|
+
|
|
170
|
+
if not query:
|
|
171
|
+
return "Error: Please provide a search query."
|
|
172
|
+
|
|
173
|
+
log_debug(f"Searching Unsplash for: {query}")
|
|
174
|
+
|
|
175
|
+
try:
|
|
176
|
+
params: Dict[str, Any] = {
|
|
177
|
+
"query": query,
|
|
178
|
+
"per_page": min(max(1, per_page), 30),
|
|
179
|
+
"page": max(1, page),
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
if orientation and orientation in ["landscape", "portrait", "squarish"]:
|
|
183
|
+
params["orientation"] = orientation
|
|
184
|
+
|
|
185
|
+
if color:
|
|
186
|
+
valid_colors = [
|
|
187
|
+
"black_and_white",
|
|
188
|
+
"black",
|
|
189
|
+
"white",
|
|
190
|
+
"yellow",
|
|
191
|
+
"orange",
|
|
192
|
+
"red",
|
|
193
|
+
"purple",
|
|
194
|
+
"magenta",
|
|
195
|
+
"green",
|
|
196
|
+
"teal",
|
|
197
|
+
"blue",
|
|
198
|
+
]
|
|
199
|
+
if color in valid_colors:
|
|
200
|
+
params["color"] = color
|
|
201
|
+
|
|
202
|
+
response = self._make_request("/search/photos", params)
|
|
203
|
+
|
|
204
|
+
results = {
|
|
205
|
+
"total": response.get("total", 0),
|
|
206
|
+
"total_pages": response.get("total_pages", 0),
|
|
207
|
+
"photos": [self._format_photo(photo) for photo in response.get("results", [])],
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
return json.dumps(results, indent=2)
|
|
211
|
+
|
|
212
|
+
except Exception as e:
|
|
213
|
+
return f"Error searching Unsplash: {e}"
|
|
214
|
+
|
|
215
|
+
def get_photo(self, photo_id: str) -> str:
|
|
216
|
+
"""Get detailed information about a specific photo.
|
|
217
|
+
|
|
218
|
+
Args:
|
|
219
|
+
photo_id: The unique identifier of the photo.
|
|
220
|
+
|
|
221
|
+
Returns:
|
|
222
|
+
JSON string containing detailed photo information including
|
|
223
|
+
URLs, author, metadata, EXIF data, and location if available.
|
|
224
|
+
"""
|
|
225
|
+
if not self.access_key:
|
|
226
|
+
return "Error: No Unsplash API key provided. Set UNSPLASH_ACCESS_KEY environment variable."
|
|
227
|
+
|
|
228
|
+
if not photo_id:
|
|
229
|
+
return "Error: Please provide a photo ID."
|
|
230
|
+
|
|
231
|
+
log_debug(f"Getting Unsplash photo: {photo_id}")
|
|
232
|
+
|
|
233
|
+
try:
|
|
234
|
+
photo = self._make_request(f"/photos/{photo_id}")
|
|
235
|
+
|
|
236
|
+
result = self._format_photo(photo)
|
|
237
|
+
|
|
238
|
+
# Add extra details available for single photo requests
|
|
239
|
+
if photo.get("exif"):
|
|
240
|
+
result["exif"] = {
|
|
241
|
+
"make": photo["exif"].get("make"),
|
|
242
|
+
"model": photo["exif"].get("model"),
|
|
243
|
+
"aperture": photo["exif"].get("aperture"),
|
|
244
|
+
"exposure_time": photo["exif"].get("exposure_time"),
|
|
245
|
+
"focal_length": photo["exif"].get("focal_length"),
|
|
246
|
+
"iso": photo["exif"].get("iso"),
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
if photo.get("location"):
|
|
250
|
+
result["location"] = {
|
|
251
|
+
"name": photo["location"].get("name"),
|
|
252
|
+
"city": photo["location"].get("city"),
|
|
253
|
+
"country": photo["location"].get("country"),
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
result["views"] = photo.get("views")
|
|
257
|
+
result["downloads"] = photo.get("downloads")
|
|
258
|
+
|
|
259
|
+
return json.dumps(result, indent=2)
|
|
260
|
+
|
|
261
|
+
except Exception as e:
|
|
262
|
+
return f"Error getting photo: {e}"
|
|
263
|
+
|
|
264
|
+
def get_random_photo(
|
|
265
|
+
self,
|
|
266
|
+
query: Optional[str] = None,
|
|
267
|
+
orientation: Optional[str] = None,
|
|
268
|
+
count: int = 1,
|
|
269
|
+
) -> str:
|
|
270
|
+
"""Get random photo(s) from Unsplash.
|
|
271
|
+
|
|
272
|
+
Args:
|
|
273
|
+
query: Optional search query to filter random photos.
|
|
274
|
+
orientation: Filter by orientation: "landscape", "portrait", or "squarish".
|
|
275
|
+
count: Number of random photos to return (1-30). Default: 1.
|
|
276
|
+
|
|
277
|
+
Returns:
|
|
278
|
+
JSON string containing random photo(s) data.
|
|
279
|
+
"""
|
|
280
|
+
if not self.access_key:
|
|
281
|
+
return "Error: No Unsplash API key provided. Set UNSPLASH_ACCESS_KEY environment variable."
|
|
282
|
+
|
|
283
|
+
log_debug(f"Getting random Unsplash photo (query={query})")
|
|
284
|
+
|
|
285
|
+
try:
|
|
286
|
+
params: Dict[str, Any] = {
|
|
287
|
+
"count": min(max(1, count), 30),
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
if query:
|
|
291
|
+
params["query"] = query
|
|
292
|
+
|
|
293
|
+
if orientation and orientation in ["landscape", "portrait", "squarish"]:
|
|
294
|
+
params["orientation"] = orientation
|
|
295
|
+
|
|
296
|
+
response = self._make_request("/photos/random", params)
|
|
297
|
+
|
|
298
|
+
# Response is a list when count > 1, single object when count = 1
|
|
299
|
+
if isinstance(response, list):
|
|
300
|
+
photos = [self._format_photo(photo) for photo in response]
|
|
301
|
+
else:
|
|
302
|
+
photos = [self._format_photo(response)]
|
|
303
|
+
|
|
304
|
+
return json.dumps({"photos": photos}, indent=2)
|
|
305
|
+
|
|
306
|
+
except Exception as e:
|
|
307
|
+
return f"Error getting random photo: {e}"
|
|
308
|
+
|
|
309
|
+
def download_photo(self, photo_id: str) -> str:
|
|
310
|
+
"""Trigger a download event for a photo.
|
|
311
|
+
|
|
312
|
+
This is required by the Unsplash API guidelines when a photo is downloaded
|
|
313
|
+
or used. It helps photographers track the usage of their work.
|
|
314
|
+
|
|
315
|
+
Args:
|
|
316
|
+
photo_id: The unique identifier of the photo being downloaded.
|
|
317
|
+
|
|
318
|
+
Returns:
|
|
319
|
+
JSON string with the download URL.
|
|
320
|
+
"""
|
|
321
|
+
if not self.access_key:
|
|
322
|
+
return "Error: No Unsplash API key provided. Set UNSPLASH_ACCESS_KEY environment variable."
|
|
323
|
+
|
|
324
|
+
if not photo_id:
|
|
325
|
+
return "Error: Please provide a photo ID."
|
|
326
|
+
|
|
327
|
+
log_debug(f"Tracking download for Unsplash photo: {photo_id}")
|
|
328
|
+
|
|
329
|
+
try:
|
|
330
|
+
response = self._make_request(f"/photos/{photo_id}/download")
|
|
331
|
+
|
|
332
|
+
return json.dumps(
|
|
333
|
+
{
|
|
334
|
+
"photo_id": photo_id,
|
|
335
|
+
"download_url": response.get("url"),
|
|
336
|
+
},
|
|
337
|
+
indent=2,
|
|
338
|
+
)
|
|
339
|
+
|
|
340
|
+
except Exception as e:
|
|
341
|
+
return f"Error tracking download: {e}"
|
|
@@ -105,8 +105,9 @@ def print_response_stream(
|
|
|
105
105
|
if response_event.is_paused: # type: ignore
|
|
106
106
|
response_event = cast(RunPausedEvent, response_event) # type: ignore
|
|
107
107
|
response_panel = create_paused_run_output_panel(response_event) # type: ignore
|
|
108
|
-
|
|
109
|
-
|
|
108
|
+
if response_panel is not None:
|
|
109
|
+
panels.append(response_panel)
|
|
110
|
+
live_log.update(Group(*panels))
|
|
110
111
|
return
|
|
111
112
|
|
|
112
113
|
if response_event.event == RunEvent.pre_hook_completed: # type: ignore
|
|
@@ -310,8 +311,9 @@ async def aprint_response_stream(
|
|
|
310
311
|
if isinstance(resp, tuple(get_args(RunOutputEvent))):
|
|
311
312
|
if resp.is_paused:
|
|
312
313
|
response_panel = create_paused_run_output_panel(resp) # type: ignore
|
|
313
|
-
|
|
314
|
-
|
|
314
|
+
if response_panel is not None:
|
|
315
|
+
panels.append(response_panel)
|
|
316
|
+
live_log.update(Group(*panels))
|
|
315
317
|
break
|
|
316
318
|
|
|
317
319
|
if (
|
|
@@ -798,7 +800,8 @@ def build_panels(
|
|
|
798
800
|
|
|
799
801
|
if isinstance(run_response, RunOutput) and run_response.is_paused:
|
|
800
802
|
response_panel = create_paused_run_output_panel(run_response)
|
|
801
|
-
|
|
803
|
+
if response_panel is not None:
|
|
804
|
+
panels.append(response_panel)
|
|
802
805
|
return panels
|
|
803
806
|
|
|
804
807
|
if isinstance(run_response, RunOutput) and run_response.reasoning_steps is not None:
|
agno/utils/response.py
CHANGED
|
@@ -80,35 +80,41 @@ def format_tool_calls(tool_calls: List[ToolExecution]) -> List[str]:
|
|
|
80
80
|
def create_paused_run_output_panel(run_output: Union[RunPausedEvent, RunOutput]):
|
|
81
81
|
from rich.text import Text
|
|
82
82
|
|
|
83
|
+
# Filter out silent tools - they don't produce verbose output
|
|
84
|
+
non_silent_tools = [tc for tc in (run_output.tools or []) if not tc.external_execution_silent]
|
|
85
|
+
|
|
86
|
+
# If all tools are silent, return None to indicate no panel should be shown
|
|
87
|
+
if not non_silent_tools:
|
|
88
|
+
return None
|
|
89
|
+
|
|
83
90
|
tool_calls_content = Text("Run is paused. ")
|
|
84
|
-
if
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
tool_calls_content.append(f"• {tool_call.tool_name}({args_str})\n")
|
|
91
|
+
if any(tc.requires_confirmation for tc in non_silent_tools):
|
|
92
|
+
tool_calls_content.append("The following tool calls require confirmation:\n")
|
|
93
|
+
for tool_call in non_silent_tools:
|
|
94
|
+
if tool_call.requires_confirmation:
|
|
95
|
+
args_str = ""
|
|
96
|
+
for arg, value in tool_call.tool_args.items() if tool_call.tool_args else {}:
|
|
97
|
+
args_str += f"{arg}={value}, "
|
|
98
|
+
args_str = args_str.rstrip(", ")
|
|
99
|
+
tool_calls_content.append(f"• {tool_call.tool_name}({args_str})\n")
|
|
100
|
+
if any(tc.requires_user_input for tc in non_silent_tools):
|
|
101
|
+
tool_calls_content.append("The following tool calls require user input:\n")
|
|
102
|
+
for tool_call in non_silent_tools:
|
|
103
|
+
if tool_call.requires_user_input:
|
|
104
|
+
args_str = ""
|
|
105
|
+
for arg, value in tool_call.tool_args.items() if tool_call.tool_args else {}:
|
|
106
|
+
args_str += f"{arg}={value}, "
|
|
107
|
+
args_str = args_str.rstrip(", ")
|
|
108
|
+
tool_calls_content.append(f"• {tool_call.tool_name}({args_str})\n")
|
|
109
|
+
if any(tc.external_execution_required for tc in non_silent_tools):
|
|
110
|
+
tool_calls_content.append("The following tool calls require external execution:\n")
|
|
111
|
+
for tool_call in non_silent_tools:
|
|
112
|
+
if tool_call.external_execution_required:
|
|
113
|
+
args_str = ""
|
|
114
|
+
for arg, value in tool_call.tool_args.items() if tool_call.tool_args else {}:
|
|
115
|
+
args_str += f"{arg}={value}, "
|
|
116
|
+
args_str = args_str.rstrip(", ")
|
|
117
|
+
tool_calls_content.append(f"• {tool_call.tool_name}({args_str})\n")
|
|
112
118
|
|
|
113
119
|
# Create panel for response
|
|
114
120
|
response_panel = create_panel(
|
|
@@ -122,6 +128,10 @@ def create_paused_run_output_panel(run_output: Union[RunPausedEvent, RunOutput])
|
|
|
122
128
|
def get_paused_content(run_output: RunOutput) -> str:
|
|
123
129
|
paused_content = ""
|
|
124
130
|
for tool in run_output.tools or []:
|
|
131
|
+
# Skip silent tools - they don't produce verbose paused messages
|
|
132
|
+
if tool.external_execution_silent:
|
|
133
|
+
continue
|
|
134
|
+
|
|
125
135
|
# Initialize flags for each tool
|
|
126
136
|
confirmation_required = False
|
|
127
137
|
user_input_required = False
|
agno/utils/string.py
CHANGED
|
@@ -89,7 +89,8 @@ def _clean_json_content(content: str) -> str:
|
|
|
89
89
|
if "```json" in content:
|
|
90
90
|
content = content.split("```json")[-1].strip()
|
|
91
91
|
parts = content.split("```")
|
|
92
|
-
parts
|
|
92
|
+
if len(parts) > 1:
|
|
93
|
+
parts.pop(-1)
|
|
93
94
|
content = "".join(parts)
|
|
94
95
|
elif "```" in content:
|
|
95
96
|
content = content.split("```")[1].strip()
|
|
@@ -282,9 +282,10 @@ class LanceDb(VectorDb):
|
|
|
282
282
|
meta_data.update(filters)
|
|
283
283
|
document.meta_data = meta_data
|
|
284
284
|
|
|
285
|
-
# Only embed if the document doesn't already have
|
|
285
|
+
# Only embed if the document doesn't already have a valid embedding
|
|
286
286
|
# This prevents duplicate embedding when called from async_insert or async_upsert
|
|
287
|
-
|
|
287
|
+
# Check for both None and empty list (async embedding failures return [])
|
|
288
|
+
if document.embedding is None or (isinstance(document.embedding, list) and len(document.embedding) == 0):
|
|
288
289
|
document.embed(embedder=self.embedder)
|
|
289
290
|
cleaned_content = document.content.replace("\x00", "\ufffd")
|
|
290
291
|
# Include content_hash in ID to ensure uniqueness across different content hashes
|
|
@@ -363,12 +364,21 @@ class LanceDb(VectorDb):
|
|
|
363
364
|
else:
|
|
364
365
|
logger.warning(f"Async batch embedding failed, falling back to individual embeddings: {e}")
|
|
365
366
|
embed_tasks = [doc.async_embed(embedder=self.embedder) for doc in documents]
|
|
366
|
-
await asyncio.gather(*embed_tasks, return_exceptions=True)
|
|
367
|
+
results = await asyncio.gather(*embed_tasks, return_exceptions=True)
|
|
368
|
+
# Log any embedding failures (they will be re-tried in sync insert)
|
|
369
|
+
for i, result in enumerate(results):
|
|
370
|
+
if isinstance(result, Exception):
|
|
371
|
+
log_warning(f"Async embedding failed for document {i}, will retry in sync insert: {result}")
|
|
367
372
|
else:
|
|
368
373
|
embed_tasks = [doc.async_embed(embedder=self.embedder) for doc in documents]
|
|
369
|
-
await asyncio.gather(*embed_tasks, return_exceptions=True)
|
|
374
|
+
results = await asyncio.gather(*embed_tasks, return_exceptions=True)
|
|
375
|
+
# Log any embedding failures (they will be re-tried in sync insert)
|
|
376
|
+
for i, result in enumerate(results):
|
|
377
|
+
if isinstance(result, Exception):
|
|
378
|
+
log_warning(f"Async embedding failed for document {i}, will retry in sync insert: {result}")
|
|
370
379
|
|
|
371
380
|
# Use sync insert to avoid sync/async table synchronization issues
|
|
381
|
+
# Sync insert will re-embed any documents that failed async embedding
|
|
372
382
|
self.insert(content_hash, documents, filters)
|
|
373
383
|
|
|
374
384
|
def upsert_available(self) -> bool:
|
|
@@ -414,13 +424,25 @@ class LanceDb(VectorDb):
|
|
|
414
424
|
if is_rate_limit:
|
|
415
425
|
raise e
|
|
416
426
|
else:
|
|
427
|
+
logger.warning(f"Async batch embedding failed, falling back to individual embeddings: {e}")
|
|
417
428
|
embed_tasks = [doc.async_embed(embedder=self.embedder) for doc in documents]
|
|
418
|
-
await asyncio.gather(*embed_tasks, return_exceptions=True)
|
|
429
|
+
results = await asyncio.gather(*embed_tasks, return_exceptions=True)
|
|
430
|
+
# Log any embedding failures (they will be re-tried in sync upsert)
|
|
431
|
+
for i, result in enumerate(results):
|
|
432
|
+
if isinstance(result, Exception):
|
|
433
|
+
log_warning(
|
|
434
|
+
f"Async embedding failed for document {i}, will retry in sync upsert: {result}"
|
|
435
|
+
)
|
|
419
436
|
else:
|
|
420
437
|
embed_tasks = [doc.async_embed(embedder=self.embedder) for doc in documents]
|
|
421
|
-
await asyncio.gather(*embed_tasks, return_exceptions=True)
|
|
438
|
+
results = await asyncio.gather(*embed_tasks, return_exceptions=True)
|
|
439
|
+
# Log any embedding failures (they will be re-tried in sync upsert)
|
|
440
|
+
for i, result in enumerate(results):
|
|
441
|
+
if isinstance(result, Exception):
|
|
442
|
+
log_warning(f"Async embedding failed for document {i}, will retry in sync upsert: {result}")
|
|
422
443
|
|
|
423
444
|
# Use sync upsert for reliability
|
|
445
|
+
# Sync upsert (via insert) will re-embed any documents that failed async embedding
|
|
424
446
|
self.upsert(content_hash=content_hash, documents=documents, filters=filters)
|
|
425
447
|
|
|
426
448
|
def search(
|
|
@@ -897,7 +919,7 @@ class LanceDb(VectorDb):
|
|
|
897
919
|
|
|
898
920
|
# Get all documents and filter in Python (LanceDB doesn't support JSON operators)
|
|
899
921
|
total_count = self.table.count_rows()
|
|
900
|
-
results = self.table.search().select(["id", "payload"]).limit(total_count).to_pandas()
|
|
922
|
+
results = self.table.search().select(["id", "payload", "vector"]).limit(total_count).to_pandas()
|
|
901
923
|
|
|
902
924
|
if results.empty:
|
|
903
925
|
logger.debug("No documents found")
|
agno/workflow/workflow.py
CHANGED
|
@@ -3822,6 +3822,7 @@ class Workflow:
|
|
|
3822
3822
|
stream_events: Optional[bool] = None,
|
|
3823
3823
|
background: Optional[bool] = False,
|
|
3824
3824
|
background_tasks: Optional[Any] = None,
|
|
3825
|
+
dependencies: Optional[Dict[str, Any]] = None,
|
|
3825
3826
|
) -> WorkflowRunOutput: ...
|
|
3826
3827
|
|
|
3827
3828
|
@overload
|
|
@@ -3841,6 +3842,7 @@ class Workflow:
|
|
|
3841
3842
|
stream_events: Optional[bool] = None,
|
|
3842
3843
|
background: Optional[bool] = False,
|
|
3843
3844
|
background_tasks: Optional[Any] = None,
|
|
3845
|
+
dependencies: Optional[Dict[str, Any]] = None,
|
|
3844
3846
|
) -> Iterator[WorkflowRunOutputEvent]: ...
|
|
3845
3847
|
|
|
3846
3848
|
def run(
|
|
@@ -3855,10 +3857,11 @@ class Workflow:
|
|
|
3855
3857
|
images: Optional[List[Image]] = None,
|
|
3856
3858
|
videos: Optional[List[Video]] = None,
|
|
3857
3859
|
files: Optional[List[File]] = None,
|
|
3858
|
-
stream: bool =
|
|
3860
|
+
stream: Optional[bool] = None,
|
|
3859
3861
|
stream_events: Optional[bool] = None,
|
|
3860
3862
|
background: Optional[bool] = False,
|
|
3861
3863
|
background_tasks: Optional[Any] = None,
|
|
3864
|
+
dependencies: Optional[Dict[str, Any]] = None,
|
|
3862
3865
|
**kwargs: Any,
|
|
3863
3866
|
) -> Union[WorkflowRunOutput, Iterator[WorkflowRunOutputEvent]]:
|
|
3864
3867
|
"""Execute the workflow synchronously with optional streaming"""
|
|
@@ -3901,8 +3904,9 @@ class Workflow:
|
|
|
3901
3904
|
|
|
3902
3905
|
log_debug(f"Workflow Run Start: {self.name}", center=True)
|
|
3903
3906
|
|
|
3904
|
-
# Use
|
|
3905
|
-
|
|
3907
|
+
# Use stream override value when necessary
|
|
3908
|
+
if stream is None:
|
|
3909
|
+
stream = self.stream or False
|
|
3906
3910
|
stream_events = stream_events or self.stream_events
|
|
3907
3911
|
|
|
3908
3912
|
# Can't stream events if streaming is disabled
|
|
@@ -3937,6 +3941,7 @@ class Workflow:
|
|
|
3937
3941
|
session_state=session_state,
|
|
3938
3942
|
workflow_id=self.id,
|
|
3939
3943
|
workflow_name=self.name,
|
|
3944
|
+
dependencies=dependencies,
|
|
3940
3945
|
)
|
|
3941
3946
|
|
|
3942
3947
|
# Execute workflow agent if configured
|
|
@@ -4004,6 +4009,7 @@ class Workflow:
|
|
|
4004
4009
|
background: Optional[bool] = False,
|
|
4005
4010
|
websocket: Optional[WebSocket] = None,
|
|
4006
4011
|
background_tasks: Optional[Any] = None,
|
|
4012
|
+
dependencies: Optional[Dict[str, Any]] = None,
|
|
4007
4013
|
) -> WorkflowRunOutput: ...
|
|
4008
4014
|
|
|
4009
4015
|
@overload
|
|
@@ -4024,6 +4030,7 @@ class Workflow:
|
|
|
4024
4030
|
background: Optional[bool] = False,
|
|
4025
4031
|
websocket: Optional[WebSocket] = None,
|
|
4026
4032
|
background_tasks: Optional[Any] = None,
|
|
4033
|
+
dependencies: Optional[Dict[str, Any]] = None,
|
|
4027
4034
|
) -> AsyncIterator[WorkflowRunOutputEvent]: ...
|
|
4028
4035
|
|
|
4029
4036
|
def arun( # type: ignore
|
|
@@ -4038,11 +4045,12 @@ class Workflow:
|
|
|
4038
4045
|
images: Optional[List[Image]] = None,
|
|
4039
4046
|
videos: Optional[List[Video]] = None,
|
|
4040
4047
|
files: Optional[List[File]] = None,
|
|
4041
|
-
stream: bool =
|
|
4048
|
+
stream: Optional[bool] = None,
|
|
4042
4049
|
stream_events: Optional[bool] = None,
|
|
4043
4050
|
background: Optional[bool] = False,
|
|
4044
4051
|
websocket: Optional[WebSocket] = None,
|
|
4045
4052
|
background_tasks: Optional[Any] = None,
|
|
4053
|
+
dependencies: Optional[Dict[str, Any]] = None,
|
|
4046
4054
|
**kwargs: Any,
|
|
4047
4055
|
) -> Union[WorkflowRunOutput, AsyncIterator[WorkflowRunOutputEvent]]:
|
|
4048
4056
|
"""Execute the workflow synchronously with optional streaming"""
|
|
@@ -4108,12 +4116,14 @@ class Workflow:
|
|
|
4108
4116
|
session_id=session_id,
|
|
4109
4117
|
user_id=user_id,
|
|
4110
4118
|
session_state=session_state,
|
|
4119
|
+
dependencies=dependencies,
|
|
4111
4120
|
)
|
|
4112
4121
|
|
|
4113
4122
|
log_debug(f"Async Workflow Run Start: {self.name}", center=True)
|
|
4114
4123
|
|
|
4115
|
-
# Use
|
|
4116
|
-
|
|
4124
|
+
# Use stream override value when necessary
|
|
4125
|
+
if stream is None:
|
|
4126
|
+
stream = self.stream or False
|
|
4117
4127
|
stream_events = stream_events or self.stream_events
|
|
4118
4128
|
|
|
4119
4129
|
# Can't stream events if streaming is disabled
|