agno 2.4.3__py3-none-any.whl → 2.4.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
agno/tools/unsplash.py ADDED
@@ -0,0 +1,341 @@
1
+ """Unsplash Tools for searching and retrieving high-quality, royalty-free images.
2
+
3
+ This toolkit provides AI agents with the ability to search for and retrieve images
4
+ from Unsplash, a popular platform with over 4.3 million high-quality photos.
5
+
6
+ Get your free API key at: https://unsplash.com/developers
7
+ """
8
+
9
+ import json
10
+ from os import getenv
11
+ from typing import Any, Dict, List, Optional
12
+ from urllib.parse import urlencode
13
+ from urllib.request import Request, urlopen
14
+
15
+ from agno.tools import Toolkit
16
+ from agno.utils.log import log_debug, logger
17
+
18
+
19
+ class UnsplashTools(Toolkit):
20
+ """A toolkit for searching and retrieving images from Unsplash.
21
+
22
+ Unsplash provides access to over 4.3 million high-quality, royalty-free images
23
+ that can be used for various purposes. This toolkit enables AI agents to:
24
+ - Search for photos by keywords
25
+ - Get detailed information about specific photos
26
+ - Retrieve random photos with optional filters
27
+ - Track downloads (required by Unsplash API guidelines)
28
+
29
+ Example:
30
+ ```python
31
+ from agno.agent import Agent
32
+ from agno.models.openai import OpenAIChat
33
+ from agno.tools.unsplash import UnsplashTools
34
+
35
+ agent = Agent(
36
+ model=OpenAIChat(id="gpt-4o"),
37
+ tools=[UnsplashTools()],
38
+ )
39
+ agent.print_response("Find me 3 photos of mountains at sunset")
40
+ ```
41
+ """
42
+
43
+ def __init__(
44
+ self,
45
+ access_key: Optional[str] = None,
46
+ enable_search_photos: bool = True,
47
+ enable_get_photo: bool = True,
48
+ enable_get_random_photo: bool = True,
49
+ enable_download_photo: bool = False,
50
+ all: bool = False,
51
+ **kwargs: Any,
52
+ ):
53
+ """Initialize the Unsplash toolkit.
54
+
55
+ Args:
56
+ access_key: Unsplash API access key. If not provided, will look for
57
+ UNSPLASH_ACCESS_KEY environment variable.
58
+ enable_search_photos: Enable the search_photos tool. Default: True.
59
+ enable_get_photo: Enable the get_photo tool. Default: True.
60
+ enable_get_random_photo: Enable the get_random_photo tool. Default: True.
61
+ enable_download_photo: Enable the download_photo tool. Default: False.
62
+ all: Enable all tools. Default: False.
63
+ **kwargs: Additional arguments passed to the Toolkit base class.
64
+ """
65
+ self.access_key = access_key or getenv("UNSPLASH_ACCESS_KEY")
66
+ if not self.access_key:
67
+ logger.warning("No Unsplash API key provided. Set UNSPLASH_ACCESS_KEY environment variable.")
68
+
69
+ self.base_url = "https://api.unsplash.com"
70
+
71
+ tools: List[Any] = []
72
+ if all or enable_search_photos:
73
+ tools.append(self.search_photos)
74
+ if all or enable_get_photo:
75
+ tools.append(self.get_photo)
76
+ if all or enable_get_random_photo:
77
+ tools.append(self.get_random_photo)
78
+ if all or enable_download_photo:
79
+ tools.append(self.download_photo)
80
+
81
+ super().__init__(name="unsplash_tools", tools=tools, **kwargs)
82
+
83
+ def _make_request(self, endpoint: str, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
84
+ """Make an authenticated request to the Unsplash API.
85
+
86
+ Args:
87
+ endpoint: API endpoint path (e.g., "/search/photos").
88
+ params: Optional query parameters.
89
+
90
+ Returns:
91
+ JSON response as a dictionary.
92
+
93
+ Raises:
94
+ Exception: If the API request fails.
95
+ """
96
+ url = f"{self.base_url}{endpoint}"
97
+ if params:
98
+ url = f"{url}?{urlencode(params)}"
99
+
100
+ headers = {
101
+ "Authorization": f"Client-ID {self.access_key}",
102
+ "Accept-Version": "v1",
103
+ }
104
+
105
+ request = Request(url, headers=headers)
106
+ with urlopen(request) as response:
107
+ return json.loads(response.read().decode())
108
+
109
+ def _format_photo(self, photo: Dict[str, Any]) -> Dict[str, Any]:
110
+ """Format photo data into a clean, consistent structure.
111
+
112
+ Args:
113
+ photo: Raw photo data from Unsplash API.
114
+
115
+ Returns:
116
+ Formatted photo dictionary with essential fields.
117
+ """
118
+ return {
119
+ "id": photo.get("id"),
120
+ "description": photo.get("description") or photo.get("alt_description"),
121
+ "width": photo.get("width"),
122
+ "height": photo.get("height"),
123
+ "color": photo.get("color"),
124
+ "created_at": photo.get("created_at"),
125
+ "urls": {
126
+ "raw": photo.get("urls", {}).get("raw"),
127
+ "full": photo.get("urls", {}).get("full"),
128
+ "regular": photo.get("urls", {}).get("regular"),
129
+ "small": photo.get("urls", {}).get("small"),
130
+ "thumb": photo.get("urls", {}).get("thumb"),
131
+ },
132
+ "author": {
133
+ "name": photo.get("user", {}).get("name"),
134
+ "username": photo.get("user", {}).get("username"),
135
+ "profile_url": photo.get("user", {}).get("links", {}).get("html"),
136
+ },
137
+ "links": {
138
+ "html": photo.get("links", {}).get("html"),
139
+ "download": photo.get("links", {}).get("download"),
140
+ },
141
+ "likes": photo.get("likes"),
142
+ "tags": [tag.get("title") for tag in photo.get("tags", [])[:5] if tag.get("title")],
143
+ }
144
+
145
+ def search_photos(
146
+ self,
147
+ query: str,
148
+ per_page: int = 10,
149
+ page: int = 1,
150
+ orientation: Optional[str] = None,
151
+ color: Optional[str] = None,
152
+ ) -> str:
153
+ """Search for photos on Unsplash by keyword.
154
+
155
+ Args:
156
+ query: The search query string (e.g., "mountain sunset", "office workspace").
157
+ per_page: Number of results per page (1-30). Default: 10.
158
+ page: Page number to retrieve. Default: 1.
159
+ orientation: Filter by orientation: "landscape", "portrait", or "squarish".
160
+ color: Filter by color: "black_and_white", "black", "white", "yellow",
161
+ "orange", "red", "purple", "magenta", "green", "teal", "blue".
162
+
163
+ Returns:
164
+ JSON string containing search results with photo details including
165
+ URLs, author information, and metadata.
166
+ """
167
+ if not self.access_key:
168
+ return "Error: No Unsplash API key provided. Set UNSPLASH_ACCESS_KEY environment variable."
169
+
170
+ if not query:
171
+ return "Error: Please provide a search query."
172
+
173
+ log_debug(f"Searching Unsplash for: {query}")
174
+
175
+ try:
176
+ params: Dict[str, Any] = {
177
+ "query": query,
178
+ "per_page": min(max(1, per_page), 30),
179
+ "page": max(1, page),
180
+ }
181
+
182
+ if orientation and orientation in ["landscape", "portrait", "squarish"]:
183
+ params["orientation"] = orientation
184
+
185
+ if color:
186
+ valid_colors = [
187
+ "black_and_white",
188
+ "black",
189
+ "white",
190
+ "yellow",
191
+ "orange",
192
+ "red",
193
+ "purple",
194
+ "magenta",
195
+ "green",
196
+ "teal",
197
+ "blue",
198
+ ]
199
+ if color in valid_colors:
200
+ params["color"] = color
201
+
202
+ response = self._make_request("/search/photos", params)
203
+
204
+ results = {
205
+ "total": response.get("total", 0),
206
+ "total_pages": response.get("total_pages", 0),
207
+ "photos": [self._format_photo(photo) for photo in response.get("results", [])],
208
+ }
209
+
210
+ return json.dumps(results, indent=2)
211
+
212
+ except Exception as e:
213
+ return f"Error searching Unsplash: {e}"
214
+
215
+ def get_photo(self, photo_id: str) -> str:
216
+ """Get detailed information about a specific photo.
217
+
218
+ Args:
219
+ photo_id: The unique identifier of the photo.
220
+
221
+ Returns:
222
+ JSON string containing detailed photo information including
223
+ URLs, author, metadata, EXIF data, and location if available.
224
+ """
225
+ if not self.access_key:
226
+ return "Error: No Unsplash API key provided. Set UNSPLASH_ACCESS_KEY environment variable."
227
+
228
+ if not photo_id:
229
+ return "Error: Please provide a photo ID."
230
+
231
+ log_debug(f"Getting Unsplash photo: {photo_id}")
232
+
233
+ try:
234
+ photo = self._make_request(f"/photos/{photo_id}")
235
+
236
+ result = self._format_photo(photo)
237
+
238
+ # Add extra details available for single photo requests
239
+ if photo.get("exif"):
240
+ result["exif"] = {
241
+ "make": photo["exif"].get("make"),
242
+ "model": photo["exif"].get("model"),
243
+ "aperture": photo["exif"].get("aperture"),
244
+ "exposure_time": photo["exif"].get("exposure_time"),
245
+ "focal_length": photo["exif"].get("focal_length"),
246
+ "iso": photo["exif"].get("iso"),
247
+ }
248
+
249
+ if photo.get("location"):
250
+ result["location"] = {
251
+ "name": photo["location"].get("name"),
252
+ "city": photo["location"].get("city"),
253
+ "country": photo["location"].get("country"),
254
+ }
255
+
256
+ result["views"] = photo.get("views")
257
+ result["downloads"] = photo.get("downloads")
258
+
259
+ return json.dumps(result, indent=2)
260
+
261
+ except Exception as e:
262
+ return f"Error getting photo: {e}"
263
+
264
+ def get_random_photo(
265
+ self,
266
+ query: Optional[str] = None,
267
+ orientation: Optional[str] = None,
268
+ count: int = 1,
269
+ ) -> str:
270
+ """Get random photo(s) from Unsplash.
271
+
272
+ Args:
273
+ query: Optional search query to filter random photos.
274
+ orientation: Filter by orientation: "landscape", "portrait", or "squarish".
275
+ count: Number of random photos to return (1-30). Default: 1.
276
+
277
+ Returns:
278
+ JSON string containing random photo(s) data.
279
+ """
280
+ if not self.access_key:
281
+ return "Error: No Unsplash API key provided. Set UNSPLASH_ACCESS_KEY environment variable."
282
+
283
+ log_debug(f"Getting random Unsplash photo (query={query})")
284
+
285
+ try:
286
+ params: Dict[str, Any] = {
287
+ "count": min(max(1, count), 30),
288
+ }
289
+
290
+ if query:
291
+ params["query"] = query
292
+
293
+ if orientation and orientation in ["landscape", "portrait", "squarish"]:
294
+ params["orientation"] = orientation
295
+
296
+ response = self._make_request("/photos/random", params)
297
+
298
+ # Response is a list when count > 1, single object when count = 1
299
+ if isinstance(response, list):
300
+ photos = [self._format_photo(photo) for photo in response]
301
+ else:
302
+ photos = [self._format_photo(response)]
303
+
304
+ return json.dumps({"photos": photos}, indent=2)
305
+
306
+ except Exception as e:
307
+ return f"Error getting random photo: {e}"
308
+
309
+ def download_photo(self, photo_id: str) -> str:
310
+ """Trigger a download event for a photo.
311
+
312
+ This is required by the Unsplash API guidelines when a photo is downloaded
313
+ or used. It helps photographers track the usage of their work.
314
+
315
+ Args:
316
+ photo_id: The unique identifier of the photo being downloaded.
317
+
318
+ Returns:
319
+ JSON string with the download URL.
320
+ """
321
+ if not self.access_key:
322
+ return "Error: No Unsplash API key provided. Set UNSPLASH_ACCESS_KEY environment variable."
323
+
324
+ if not photo_id:
325
+ return "Error: Please provide a photo ID."
326
+
327
+ log_debug(f"Tracking download for Unsplash photo: {photo_id}")
328
+
329
+ try:
330
+ response = self._make_request(f"/photos/{photo_id}/download")
331
+
332
+ return json.dumps(
333
+ {
334
+ "photo_id": photo_id,
335
+ "download_url": response.get("url"),
336
+ },
337
+ indent=2,
338
+ )
339
+
340
+ except Exception as e:
341
+ return f"Error tracking download: {e}"
@@ -105,8 +105,9 @@ def print_response_stream(
105
105
  if response_event.is_paused: # type: ignore
106
106
  response_event = cast(RunPausedEvent, response_event) # type: ignore
107
107
  response_panel = create_paused_run_output_panel(response_event) # type: ignore
108
- panels.append(response_panel)
109
- live_log.update(Group(*panels))
108
+ if response_panel is not None:
109
+ panels.append(response_panel)
110
+ live_log.update(Group(*panels))
110
111
  return
111
112
 
112
113
  if response_event.event == RunEvent.pre_hook_completed: # type: ignore
@@ -310,8 +311,9 @@ async def aprint_response_stream(
310
311
  if isinstance(resp, tuple(get_args(RunOutputEvent))):
311
312
  if resp.is_paused:
312
313
  response_panel = create_paused_run_output_panel(resp) # type: ignore
313
- panels.append(response_panel)
314
- live_log.update(Group(*panels))
314
+ if response_panel is not None:
315
+ panels.append(response_panel)
316
+ live_log.update(Group(*panels))
315
317
  break
316
318
 
317
319
  if (
@@ -798,7 +800,8 @@ def build_panels(
798
800
 
799
801
  if isinstance(run_response, RunOutput) and run_response.is_paused:
800
802
  response_panel = create_paused_run_output_panel(run_response)
801
- panels.append(response_panel)
803
+ if response_panel is not None:
804
+ panels.append(response_panel)
802
805
  return panels
803
806
 
804
807
  if isinstance(run_response, RunOutput) and run_response.reasoning_steps is not None:
agno/utils/response.py CHANGED
@@ -80,35 +80,41 @@ def format_tool_calls(tool_calls: List[ToolExecution]) -> List[str]:
80
80
  def create_paused_run_output_panel(run_output: Union[RunPausedEvent, RunOutput]):
81
81
  from rich.text import Text
82
82
 
83
+ # Filter out silent tools - they don't produce verbose output
84
+ non_silent_tools = [tc for tc in (run_output.tools or []) if not tc.external_execution_silent]
85
+
86
+ # If all tools are silent, return None to indicate no panel should be shown
87
+ if not non_silent_tools:
88
+ return None
89
+
83
90
  tool_calls_content = Text("Run is paused. ")
84
- if run_output.tools is not None:
85
- if any(tc.requires_confirmation for tc in run_output.tools):
86
- tool_calls_content.append("The following tool calls require confirmation:\n")
87
- for tool_call in run_output.tools:
88
- if tool_call.requires_confirmation:
89
- args_str = ""
90
- for arg, value in tool_call.tool_args.items() if tool_call.tool_args else {}:
91
- args_str += f"{arg}={value}, "
92
- args_str = args_str.rstrip(", ")
93
- tool_calls_content.append(f"• {tool_call.tool_name}({args_str})\n")
94
- if any(tc.requires_user_input for tc in run_output.tools):
95
- tool_calls_content.append("The following tool calls require user input:\n")
96
- for tool_call in run_output.tools:
97
- if tool_call.requires_user_input:
98
- args_str = ""
99
- for arg, value in tool_call.tool_args.items() if tool_call.tool_args else {}:
100
- args_str += f"{arg}={value}, "
101
- args_str = args_str.rstrip(", ")
102
- tool_calls_content.append(f"• {tool_call.tool_name}({args_str})\n")
103
- if any(tc.external_execution_required for tc in run_output.tools):
104
- tool_calls_content.append("The following tool calls require external execution:\n")
105
- for tool_call in run_output.tools:
106
- if tool_call.external_execution_required:
107
- args_str = ""
108
- for arg, value in tool_call.tool_args.items() if tool_call.tool_args else {}:
109
- args_str += f"{arg}={value}, "
110
- args_str = args_str.rstrip(", ")
111
- tool_calls_content.append(f"• {tool_call.tool_name}({args_str})\n")
91
+ if any(tc.requires_confirmation for tc in non_silent_tools):
92
+ tool_calls_content.append("The following tool calls require confirmation:\n")
93
+ for tool_call in non_silent_tools:
94
+ if tool_call.requires_confirmation:
95
+ args_str = ""
96
+ for arg, value in tool_call.tool_args.items() if tool_call.tool_args else {}:
97
+ args_str += f"{arg}={value}, "
98
+ args_str = args_str.rstrip(", ")
99
+ tool_calls_content.append(f" {tool_call.tool_name}({args_str})\n")
100
+ if any(tc.requires_user_input for tc in non_silent_tools):
101
+ tool_calls_content.append("The following tool calls require user input:\n")
102
+ for tool_call in non_silent_tools:
103
+ if tool_call.requires_user_input:
104
+ args_str = ""
105
+ for arg, value in tool_call.tool_args.items() if tool_call.tool_args else {}:
106
+ args_str += f"{arg}={value}, "
107
+ args_str = args_str.rstrip(", ")
108
+ tool_calls_content.append(f" {tool_call.tool_name}({args_str})\n")
109
+ if any(tc.external_execution_required for tc in non_silent_tools):
110
+ tool_calls_content.append("The following tool calls require external execution:\n")
111
+ for tool_call in non_silent_tools:
112
+ if tool_call.external_execution_required:
113
+ args_str = ""
114
+ for arg, value in tool_call.tool_args.items() if tool_call.tool_args else {}:
115
+ args_str += f"{arg}={value}, "
116
+ args_str = args_str.rstrip(", ")
117
+ tool_calls_content.append(f" {tool_call.tool_name}({args_str})\n")
112
118
 
113
119
  # Create panel for response
114
120
  response_panel = create_panel(
@@ -122,6 +128,10 @@ def create_paused_run_output_panel(run_output: Union[RunPausedEvent, RunOutput])
122
128
  def get_paused_content(run_output: RunOutput) -> str:
123
129
  paused_content = ""
124
130
  for tool in run_output.tools or []:
131
+ # Skip silent tools - they don't produce verbose paused messages
132
+ if tool.external_execution_silent:
133
+ continue
134
+
125
135
  # Initialize flags for each tool
126
136
  confirmation_required = False
127
137
  user_input_required = False
@@ -282,9 +282,10 @@ class LanceDb(VectorDb):
282
282
  meta_data.update(filters)
283
283
  document.meta_data = meta_data
284
284
 
285
- # Only embed if the document doesn't already have an embedding
285
+ # Only embed if the document doesn't already have a valid embedding
286
286
  # This prevents duplicate embedding when called from async_insert or async_upsert
287
- if document.embedding is None:
287
+ # Check for both None and empty list (async embedding failures return [])
288
+ if document.embedding is None or (isinstance(document.embedding, list) and len(document.embedding) == 0):
288
289
  document.embed(embedder=self.embedder)
289
290
  cleaned_content = document.content.replace("\x00", "\ufffd")
290
291
  # Include content_hash in ID to ensure uniqueness across different content hashes
@@ -363,12 +364,21 @@ class LanceDb(VectorDb):
363
364
  else:
364
365
  logger.warning(f"Async batch embedding failed, falling back to individual embeddings: {e}")
365
366
  embed_tasks = [doc.async_embed(embedder=self.embedder) for doc in documents]
366
- await asyncio.gather(*embed_tasks, return_exceptions=True)
367
+ results = await asyncio.gather(*embed_tasks, return_exceptions=True)
368
+ # Log any embedding failures (they will be re-tried in sync insert)
369
+ for i, result in enumerate(results):
370
+ if isinstance(result, Exception):
371
+ log_warning(f"Async embedding failed for document {i}, will retry in sync insert: {result}")
367
372
  else:
368
373
  embed_tasks = [doc.async_embed(embedder=self.embedder) for doc in documents]
369
- await asyncio.gather(*embed_tasks, return_exceptions=True)
374
+ results = await asyncio.gather(*embed_tasks, return_exceptions=True)
375
+ # Log any embedding failures (they will be re-tried in sync insert)
376
+ for i, result in enumerate(results):
377
+ if isinstance(result, Exception):
378
+ log_warning(f"Async embedding failed for document {i}, will retry in sync insert: {result}")
370
379
 
371
380
  # Use sync insert to avoid sync/async table synchronization issues
381
+ # Sync insert will re-embed any documents that failed async embedding
372
382
  self.insert(content_hash, documents, filters)
373
383
 
374
384
  def upsert_available(self) -> bool:
@@ -414,13 +424,25 @@ class LanceDb(VectorDb):
414
424
  if is_rate_limit:
415
425
  raise e
416
426
  else:
427
+ logger.warning(f"Async batch embedding failed, falling back to individual embeddings: {e}")
417
428
  embed_tasks = [doc.async_embed(embedder=self.embedder) for doc in documents]
418
- await asyncio.gather(*embed_tasks, return_exceptions=True)
429
+ results = await asyncio.gather(*embed_tasks, return_exceptions=True)
430
+ # Log any embedding failures (they will be re-tried in sync upsert)
431
+ for i, result in enumerate(results):
432
+ if isinstance(result, Exception):
433
+ log_warning(
434
+ f"Async embedding failed for document {i}, will retry in sync upsert: {result}"
435
+ )
419
436
  else:
420
437
  embed_tasks = [doc.async_embed(embedder=self.embedder) for doc in documents]
421
- await asyncio.gather(*embed_tasks, return_exceptions=True)
438
+ results = await asyncio.gather(*embed_tasks, return_exceptions=True)
439
+ # Log any embedding failures (they will be re-tried in sync upsert)
440
+ for i, result in enumerate(results):
441
+ if isinstance(result, Exception):
442
+ log_warning(f"Async embedding failed for document {i}, will retry in sync upsert: {result}")
422
443
 
423
444
  # Use sync upsert for reliability
445
+ # Sync upsert (via insert) will re-embed any documents that failed async embedding
424
446
  self.upsert(content_hash=content_hash, documents=documents, filters=filters)
425
447
 
426
448
  def search(
@@ -897,7 +919,7 @@ class LanceDb(VectorDb):
897
919
 
898
920
  # Get all documents and filter in Python (LanceDB doesn't support JSON operators)
899
921
  total_count = self.table.count_rows()
900
- results = self.table.search().select(["id", "payload"]).limit(total_count).to_pandas()
922
+ results = self.table.search().select(["id", "payload", "vector"]).limit(total_count).to_pandas()
901
923
 
902
924
  if results.empty:
903
925
  logger.debug("No documents found")
agno/workflow/workflow.py CHANGED
@@ -3822,6 +3822,7 @@ class Workflow:
3822
3822
  stream_events: Optional[bool] = None,
3823
3823
  background: Optional[bool] = False,
3824
3824
  background_tasks: Optional[Any] = None,
3825
+ dependencies: Optional[Dict[str, Any]] = None,
3825
3826
  ) -> WorkflowRunOutput: ...
3826
3827
 
3827
3828
  @overload
@@ -3841,6 +3842,7 @@ class Workflow:
3841
3842
  stream_events: Optional[bool] = None,
3842
3843
  background: Optional[bool] = False,
3843
3844
  background_tasks: Optional[Any] = None,
3845
+ dependencies: Optional[Dict[str, Any]] = None,
3844
3846
  ) -> Iterator[WorkflowRunOutputEvent]: ...
3845
3847
 
3846
3848
  def run(
@@ -3859,6 +3861,7 @@ class Workflow:
3859
3861
  stream_events: Optional[bool] = None,
3860
3862
  background: Optional[bool] = False,
3861
3863
  background_tasks: Optional[Any] = None,
3864
+ dependencies: Optional[Dict[str, Any]] = None,
3862
3865
  **kwargs: Any,
3863
3866
  ) -> Union[WorkflowRunOutput, Iterator[WorkflowRunOutputEvent]]:
3864
3867
  """Execute the workflow synchronously with optional streaming"""
@@ -3938,6 +3941,7 @@ class Workflow:
3938
3941
  session_state=session_state,
3939
3942
  workflow_id=self.id,
3940
3943
  workflow_name=self.name,
3944
+ dependencies=dependencies,
3941
3945
  )
3942
3946
 
3943
3947
  # Execute workflow agent if configured
@@ -4005,6 +4009,7 @@ class Workflow:
4005
4009
  background: Optional[bool] = False,
4006
4010
  websocket: Optional[WebSocket] = None,
4007
4011
  background_tasks: Optional[Any] = None,
4012
+ dependencies: Optional[Dict[str, Any]] = None,
4008
4013
  ) -> WorkflowRunOutput: ...
4009
4014
 
4010
4015
  @overload
@@ -4025,6 +4030,7 @@ class Workflow:
4025
4030
  background: Optional[bool] = False,
4026
4031
  websocket: Optional[WebSocket] = None,
4027
4032
  background_tasks: Optional[Any] = None,
4033
+ dependencies: Optional[Dict[str, Any]] = None,
4028
4034
  ) -> AsyncIterator[WorkflowRunOutputEvent]: ...
4029
4035
 
4030
4036
  def arun( # type: ignore
@@ -4044,6 +4050,7 @@ class Workflow:
4044
4050
  background: Optional[bool] = False,
4045
4051
  websocket: Optional[WebSocket] = None,
4046
4052
  background_tasks: Optional[Any] = None,
4053
+ dependencies: Optional[Dict[str, Any]] = None,
4047
4054
  **kwargs: Any,
4048
4055
  ) -> Union[WorkflowRunOutput, AsyncIterator[WorkflowRunOutputEvent]]:
4049
4056
  """Execute the workflow synchronously with optional streaming"""
@@ -4109,6 +4116,7 @@ class Workflow:
4109
4116
  session_id=session_id,
4110
4117
  user_id=user_id,
4111
4118
  session_state=session_state,
4119
+ dependencies=dependencies,
4112
4120
  )
4113
4121
 
4114
4122
  log_debug(f"Async Workflow Run Start: {self.name}", center=True)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agno
3
- Version: 2.4.3
3
+ Version: 2.4.5
4
4
  Summary: Agno: a lightweight library for building Multi-Agent Systems
5
5
  Author-email: Ashpreet Bedi <ashpreet@agno.com>
6
6
  Project-URL: homepage, https://agno.com
@@ -132,6 +132,8 @@ Provides-Extra: evm
132
132
  Requires-Dist: web3; extra == "evm"
133
133
  Provides-Extra: exa
134
134
  Requires-Dist: exa_py; extra == "exa"
135
+ Provides-Extra: seltz
136
+ Requires-Dist: seltz; extra == "seltz"
135
137
  Provides-Extra: fal
136
138
  Requires-Dist: fal_client; extra == "fal"
137
139
  Provides-Extra: firecrawl
@@ -308,6 +310,7 @@ Provides-Extra: tools
308
310
  Requires-Dist: agno[apify]; extra == "tools"
309
311
  Requires-Dist: agno[arxiv]; extra == "tools"
310
312
  Requires-Dist: agno[exa]; extra == "tools"
313
+ Requires-Dist: agno[seltz]; extra == "tools"
311
314
  Requires-Dist: agno[cartesia]; extra == "tools"
312
315
  Requires-Dist: agno[ddg]; extra == "tools"
313
316
  Requires-Dist: agno[duckdb]; extra == "tools"
@@ -531,7 +534,7 @@ https://github.com/user-attachments/assets/feb23db8-15cc-4e88-be7c-01a21a03ebf6
531
534
 
532
535
  ## Getting Started
533
536
 
534
- 1. Follow the [getting started guide](https://github.com/agno-agi/agno/tree/main/cookbook/00_getting_started)
537
+ 1. Follow the [quickstart guide](https://github.com/agno-agi/agno/tree/main/cookbook/00_quickstart)
535
538
  2. Browse the [cookbook](https://github.com/agno-agi/agno/tree/main/cookbook) for real-world examples
536
539
  3. Read the [docs](https://docs.agno.com) to go deeper
537
540