agno 2.4.3__py3-none-any.whl → 2.4.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,7 @@
1
1
  import json
2
2
  import time
3
3
  from datetime import date, datetime, timedelta, timezone
4
- from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
4
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union, cast
5
5
  from uuid import uuid4
6
6
 
7
7
  if TYPE_CHECKING:
@@ -25,7 +25,7 @@ from agno.db.schemas.culture import CulturalKnowledge
25
25
  from agno.db.schemas.evals import EvalFilterType, EvalRunRecord, EvalType
26
26
  from agno.db.schemas.knowledge import KnowledgeRow
27
27
  from agno.db.schemas.memory import UserMemory
28
- from agno.db.utils import deserialize_session_json_fields, serialize_session_json_fields
28
+ from agno.db.utils import deserialize_session_json_fields
29
29
  from agno.session import AgentSession, Session, TeamSession, WorkflowSession
30
30
  from agno.utils.log import log_debug, log_error, log_info
31
31
  from agno.utils.string import generate_id
@@ -496,6 +496,29 @@ class FirestoreDb(BaseDb):
496
496
  if doc_ref is None:
497
497
  return None
498
498
 
499
+ # Check if session_data is stored as JSON string (legacy) or native map.
500
+ # Legacy sessions stored session_data as a JSON string, but Firestore's dot notation
501
+ # (e.g., "session_data.session_name") only works with native maps. Using dot notation
502
+ # on a JSON string overwrites the entire field, causing data loss.
503
+ # For legacy sessions, we use read-modify-write which also migrates them to native maps.
504
+ current_doc = doc_ref.get()
505
+ if not current_doc.exists:
506
+ return None
507
+
508
+ current_data = current_doc.to_dict()
509
+ session_data = current_data.get("session_data") if current_data else None
510
+
511
+ if session_data is None or isinstance(session_data, str):
512
+ existing_session = self.get_session(session_id, session_type, deserialize=True)
513
+ if existing_session is None:
514
+ return None
515
+ existing_session = cast(Session, existing_session)
516
+ if existing_session.session_data is None:
517
+ existing_session.session_data = {}
518
+ existing_session.session_data["session_name"] = session_name
519
+ return self.upsert_session(existing_session, deserialize=deserialize)
520
+
521
+ # Native map format - use efficient dot notation update
499
522
  doc_ref.update({"session_data.session_name": session_name, "updated_at": int(time.time())})
500
523
 
501
524
  updated_doc = doc_ref.get()
@@ -539,50 +562,50 @@ class FirestoreDb(BaseDb):
539
562
  """
540
563
  try:
541
564
  collection_ref = self._get_collection(table_type="sessions", create_collection_if_not_found=True)
542
- serialized_session_dict = serialize_session_json_fields(session.to_dict())
565
+ session_dict = session.to_dict()
543
566
 
544
567
  if isinstance(session, AgentSession):
545
568
  record = {
546
- "session_id": serialized_session_dict.get("session_id"),
569
+ "session_id": session_dict.get("session_id"),
547
570
  "session_type": SessionType.AGENT.value,
548
- "agent_id": serialized_session_dict.get("agent_id"),
549
- "user_id": serialized_session_dict.get("user_id"),
550
- "runs": serialized_session_dict.get("runs"),
551
- "agent_data": serialized_session_dict.get("agent_data"),
552
- "session_data": serialized_session_dict.get("session_data"),
553
- "summary": serialized_session_dict.get("summary"),
554
- "metadata": serialized_session_dict.get("metadata"),
555
- "created_at": serialized_session_dict.get("created_at"),
571
+ "agent_id": session_dict.get("agent_id"),
572
+ "user_id": session_dict.get("user_id"),
573
+ "runs": session_dict.get("runs"),
574
+ "agent_data": session_dict.get("agent_data"),
575
+ "session_data": session_dict.get("session_data"),
576
+ "summary": session_dict.get("summary"),
577
+ "metadata": session_dict.get("metadata"),
578
+ "created_at": session_dict.get("created_at"),
556
579
  "updated_at": int(time.time()),
557
580
  }
558
581
 
559
582
  elif isinstance(session, TeamSession):
560
583
  record = {
561
- "session_id": serialized_session_dict.get("session_id"),
584
+ "session_id": session_dict.get("session_id"),
562
585
  "session_type": SessionType.TEAM.value,
563
- "team_id": serialized_session_dict.get("team_id"),
564
- "user_id": serialized_session_dict.get("user_id"),
565
- "runs": serialized_session_dict.get("runs"),
566
- "team_data": serialized_session_dict.get("team_data"),
567
- "session_data": serialized_session_dict.get("session_data"),
568
- "summary": serialized_session_dict.get("summary"),
569
- "metadata": serialized_session_dict.get("metadata"),
570
- "created_at": serialized_session_dict.get("created_at"),
586
+ "team_id": session_dict.get("team_id"),
587
+ "user_id": session_dict.get("user_id"),
588
+ "runs": session_dict.get("runs"),
589
+ "team_data": session_dict.get("team_data"),
590
+ "session_data": session_dict.get("session_data"),
591
+ "summary": session_dict.get("summary"),
592
+ "metadata": session_dict.get("metadata"),
593
+ "created_at": session_dict.get("created_at"),
571
594
  "updated_at": int(time.time()),
572
595
  }
573
596
 
574
597
  elif isinstance(session, WorkflowSession):
575
598
  record = {
576
- "session_id": serialized_session_dict.get("session_id"),
599
+ "session_id": session_dict.get("session_id"),
577
600
  "session_type": SessionType.WORKFLOW.value,
578
- "workflow_id": serialized_session_dict.get("workflow_id"),
579
- "user_id": serialized_session_dict.get("user_id"),
580
- "runs": serialized_session_dict.get("runs"),
581
- "workflow_data": serialized_session_dict.get("workflow_data"),
582
- "session_data": serialized_session_dict.get("session_data"),
583
- "summary": serialized_session_dict.get("summary"),
584
- "metadata": serialized_session_dict.get("metadata"),
585
- "created_at": serialized_session_dict.get("created_at"),
601
+ "workflow_id": session_dict.get("workflow_id"),
602
+ "user_id": session_dict.get("user_id"),
603
+ "runs": session_dict.get("runs"),
604
+ "workflow_data": session_dict.get("workflow_data"),
605
+ "session_data": session_dict.get("session_data"),
606
+ "summary": session_dict.get("summary"),
607
+ "metadata": session_dict.get("metadata"),
608
+ "created_at": session_dict.get("created_at"),
586
609
  "updated_at": int(time.time()),
587
610
  }
588
611
 
@@ -292,25 +292,25 @@ def get_schema(table_type: TableType, table_name: str) -> str:
292
292
  elif table_type == "knowledge":
293
293
  return dedent(f"""
294
294
  {define_table}
295
- DEFINE FIELD OVERWRITE created_at ON {table_name} TYPE datetime VALUE time::now();
295
+ DEFINE FIELD OVERWRITE created_at ON {table_name} TYPE datetime DEFAULT time::now();
296
296
  DEFINE FIELD OVERWRITE updated_at ON {table_name} TYPE datetime VALUE time::now();
297
297
  """)
298
298
  elif table_type == "culture":
299
299
  return dedent(f"""
300
300
  {define_table}
301
- DEFINE FIELD OVERWRITE created_at ON {table_name} TYPE datetime VALUE time::now();
301
+ DEFINE FIELD OVERWRITE created_at ON {table_name} TYPE datetime DEFAULT time::now();
302
302
  DEFINE FIELD OVERWRITE updated_at ON {table_name} TYPE datetime VALUE time::now();
303
303
  """)
304
304
  elif table_type == "sessions":
305
305
  return dedent(f"""
306
306
  {define_table}
307
- DEFINE FIELD OVERWRITE created_at ON {table_name} TYPE datetime VALUE time::now();
307
+ DEFINE FIELD OVERWRITE created_at ON {table_name} TYPE datetime DEFAULT time::now();
308
308
  DEFINE FIELD OVERWRITE updated_at ON {table_name} TYPE datetime VALUE time::now();
309
309
  """)
310
310
  elif table_type == "traces":
311
311
  return dedent(f"""
312
312
  {define_table}
313
- DEFINE FIELD OVERWRITE created_at ON {table_name} TYPE datetime VALUE time::now();
313
+ DEFINE FIELD OVERWRITE created_at ON {table_name} TYPE datetime DEFAULT time::now();
314
314
  DEFINE INDEX idx_trace_id ON {table_name} FIELDS trace_id UNIQUE;
315
315
  DEFINE INDEX idx_run_id ON {table_name} FIELDS run_id;
316
316
  DEFINE INDEX idx_session_id ON {table_name} FIELDS session_id;
@@ -324,7 +324,7 @@ def get_schema(table_type: TableType, table_name: str) -> str:
324
324
  elif table_type == "spans":
325
325
  return dedent(f"""
326
326
  {define_table}
327
- DEFINE FIELD OVERWRITE created_at ON {table_name} TYPE datetime VALUE time::now();
327
+ DEFINE FIELD OVERWRITE created_at ON {table_name} TYPE datetime DEFAULT time::now();
328
328
  DEFINE INDEX idx_span_id ON {table_name} FIELDS span_id UNIQUE;
329
329
  DEFINE INDEX idx_trace_id ON {table_name} FIELDS trace_id;
330
330
  DEFINE INDEX idx_parent_span_id ON {table_name} FIELDS parent_span_id;
@@ -287,6 +287,7 @@ class SurrealDb(BaseDb):
287
287
  where = WhereClause()
288
288
  if user_id is not None:
289
289
  where = where.and_("user_id", user_id)
290
+
290
291
  where_clause, where_vars = where.build()
291
292
  query = dedent(f"""
292
293
  SELECT *
@@ -295,7 +296,18 @@ class SurrealDb(BaseDb):
295
296
  """)
296
297
  vars = {"record": record, **where_vars}
297
298
  raw = self._query_one(query, vars, dict)
298
- if raw is None or not deserialize:
299
+ if raw is None:
300
+ return None
301
+
302
+ # Verify session type matches
303
+ if session_type == SessionType.AGENT and raw.get("agent") is None:
304
+ return None
305
+ elif session_type == SessionType.TEAM and raw.get("team") is None:
306
+ return None
307
+ elif session_type == SessionType.WORKFLOW and raw.get("workflow") is None:
308
+ return None
309
+
310
+ if not deserialize:
299
311
  return raw
300
312
 
301
313
  return deserialize_session(session_type, raw)
@@ -37,6 +37,103 @@ class MarkdownChunking(ChunkingStrategy):
37
37
  if not (1 <= split_on_headings <= 6):
38
38
  raise ValueError("split_on_headings must be between 1 and 6 when using integer value")
39
39
 
40
+ def _split_large_section(self, section: str) -> List[str]:
41
+ """
42
+ Split a large section into smaller chunks while preserving the heading context.
43
+ Each sub-chunk will include the original heading for context.
44
+
45
+ Args:
46
+ section: The section content to split (may start with a heading)
47
+
48
+ Returns:
49
+ List of chunks, each respecting chunk_size
50
+ """
51
+ if len(section) <= self.chunk_size:
52
+ return [section]
53
+
54
+ # Extract heading and content from the section
55
+ lines = section.split("\n")
56
+ if lines and re.match(r"^#{1,6}\s+", lines[0]):
57
+ heading = lines[0]
58
+ content_lines = lines[1:]
59
+ else:
60
+ heading = ""
61
+ content_lines = lines
62
+
63
+ content = "\n".join(content_lines).strip()
64
+
65
+ # If just heading and small content, return as-is
66
+ if not content or len(section) <= self.chunk_size:
67
+ return [section]
68
+
69
+ # Split content by paragraphs
70
+ paragraphs = re.split(r"\n\n+", content)
71
+
72
+ chunks: List[str] = []
73
+ current_chunk_content: List[str] = []
74
+ # Account for heading size in each chunk
75
+ heading_size = len(heading) + 2 if heading else 0 # +2 for "\n\n"
76
+
77
+ for para in paragraphs:
78
+ para = para.strip()
79
+ if not para:
80
+ continue
81
+
82
+ current_size = sum(len(p) for p in current_chunk_content) + len(current_chunk_content) * 2 # \n\n
83
+ para_size = len(para)
84
+
85
+ # Check if adding this paragraph would exceed chunk_size
86
+ if current_chunk_content and (heading_size + current_size + para_size + 2) > self.chunk_size:
87
+ # Save current chunk
88
+ chunk_text = (
89
+ heading + "\n\n" + "\n\n".join(current_chunk_content)
90
+ if heading
91
+ else "\n\n".join(current_chunk_content)
92
+ )
93
+ chunks.append(chunk_text.strip())
94
+ current_chunk_content = []
95
+
96
+ # If single paragraph exceeds chunk_size, split it further
97
+ if para_size + heading_size > self.chunk_size:
98
+ # Save any accumulated content first
99
+ if current_chunk_content:
100
+ chunk_text = (
101
+ heading + "\n\n" + "\n\n".join(current_chunk_content)
102
+ if heading
103
+ else "\n\n".join(current_chunk_content)
104
+ )
105
+ chunks.append(chunk_text.strip())
106
+ current_chunk_content = []
107
+
108
+ # Split the large paragraph by sentences or fixed size
109
+ available_size = self.chunk_size - heading_size
110
+ words = para.split()
111
+ current_words: List[str] = []
112
+ current_word_len = 0
113
+
114
+ for word in words:
115
+ if current_word_len + len(word) + 1 > available_size and current_words:
116
+ chunk_text = heading + "\n\n" + " ".join(current_words) if heading else " ".join(current_words)
117
+ chunks.append(chunk_text.strip())
118
+ current_words = []
119
+ current_word_len = 0
120
+ current_words.append(word)
121
+ current_word_len += len(word) + 1
122
+
123
+ if current_words:
124
+ current_chunk_content.append(" ".join(current_words))
125
+ else:
126
+ current_chunk_content.append(para)
127
+
128
+ # add the remaining content
129
+ if current_chunk_content:
130
+ chunk_text = (
131
+ heading + "\n\n" + "\n\n".join(current_chunk_content) if heading else "\n\n".join(current_chunk_content)
132
+ )
133
+ chunks.append(chunk_text.strip())
134
+
135
+ return chunks if chunks else [section]
136
+
40
137
  def _split_by_headings(self, content: str) -> List[str]:
41
138
  """
42
139
  Split markdown content by headings, keeping each heading with its content.
@@ -163,18 +260,22 @@ class MarkdownChunking(ChunkingStrategy):
163
260
  section_size = len(section)
164
261
 
165
262
  # When split_on_headings is True or an int, each section becomes its own chunk
263
+ # But if section exceeds chunk_size, split it further
166
264
  if self.split_on_headings:
167
- meta_data = chunk_meta_data.copy()
168
- meta_data["chunk"] = chunk_number
169
- chunk_id = None
170
- if document.id:
171
- chunk_id = f"{document.id}_{chunk_number}"
172
- elif document.name:
173
- chunk_id = f"{document.name}_{chunk_number}"
174
- meta_data["chunk_size"] = section_size
175
-
176
- chunks.append(Document(id=chunk_id, name=document.name, meta_data=meta_data, content=section))
177
- chunk_number += 1
265
+ # Split large sections to respect chunk_size
266
+ sub_chunks = self._split_large_section(section)
267
+ for sub_chunk in sub_chunks:
268
+ meta_data = chunk_meta_data.copy()
269
+ meta_data["chunk"] = chunk_number
270
+ chunk_id = None
271
+ if document.id:
272
+ chunk_id = f"{document.id}_{chunk_number}"
273
+ elif document.name:
274
+ chunk_id = f"{document.name}_{chunk_number}"
275
+ meta_data["chunk_size"] = len(sub_chunk)
276
+
277
+ chunks.append(Document(id=chunk_id, name=document.name, meta_data=meta_data, content=sub_chunk))
278
+ chunk_number += 1
178
279
  elif current_size + section_size <= self.chunk_size:
179
280
  current_chunk.append(section)
180
281
  current_size += section_size
agno/models/base.py CHANGED
@@ -2109,6 +2109,7 @@ class Model(ABC):
2109
2109
  tool_name=fc.function.name,
2110
2110
  tool_args=fc.arguments,
2111
2111
  requires_confirmation=True,
2112
+ external_execution_silent=fc.function.external_execution_silent,
2112
2113
  )
2113
2114
  )
2114
2115
 
@@ -2128,6 +2129,7 @@ class Model(ABC):
2128
2129
  tool_args=fc.arguments,
2129
2130
  requires_user_input=True,
2130
2131
  user_input_schema=user_input_schema,
2132
+ external_execution_silent=fc.function.external_execution_silent,
2131
2133
  )
2132
2134
  )
2133
2135
 
@@ -2176,6 +2178,7 @@ class Model(ABC):
2176
2178
  tool_name=fc.function.name,
2177
2179
  tool_args=fc.arguments,
2178
2180
  external_execution_required=True,
2181
+ external_execution_silent=fc.function.external_execution_silent,
2179
2182
  )
2180
2183
  )
2181
2184
 
@@ -2270,6 +2273,7 @@ class Model(ABC):
2270
2273
  tool_name=fc.function.name,
2271
2274
  tool_args=fc.arguments,
2272
2275
  requires_confirmation=True,
2276
+ external_execution_silent=fc.function.external_execution_silent,
2273
2277
  )
2274
2278
  )
2275
2279
  # If the function requires user input, we yield a message to the user
@@ -2288,6 +2292,7 @@ class Model(ABC):
2288
2292
  tool_args=fc.arguments,
2289
2293
  requires_user_input=True,
2290
2294
  user_input_schema=user_input_schema,
2295
+ external_execution_silent=fc.function.external_execution_silent,
2291
2296
  )
2292
2297
  )
2293
2298
  # If the function is from the user control flow tools, we handle it here
@@ -2340,6 +2345,7 @@ class Model(ABC):
2340
2345
  tool_name=fc.function.name,
2341
2346
  tool_args=fc.arguments,
2342
2347
  external_execution_required=True,
2348
+ external_execution_silent=fc.function.external_execution_silent,
2343
2349
  )
2344
2350
  )
2345
2351
 
@@ -0,0 +1,3 @@
1
+ from agno.models.moonshot.moonshot import MoonShot
2
+
3
+ __all__ = ["MoonShot"]
@@ -0,0 +1,57 @@
1
+ from dataclasses import dataclass, field
2
+ from os import getenv
3
+ from typing import Any, Dict, Optional
4
+
5
+ from agno.exceptions import ModelAuthenticationError
6
+ from agno.models.openai.like import OpenAILike
7
+
8
+
9
+ @dataclass
10
+ class MoonShot(OpenAILike):
11
+ """
12
+ A class for interacting with MoonShot models.
13
+
14
+ Attributes:
15
+ id (str): The model id. Defaults to "kimi-k2-thinking".
16
+ name (str): The model name. Defaults to "Moonshot".
17
+ provider (str): The provider name. Defaults to "Moonshot".
18
+ api_key (Optional[str]): The API key.
19
+ base_url (str): The base URL. Defaults to "https://api.moonshot.ai/v1".
20
+ """
21
+
22
+ id: str = "kimi-k2-thinking"
23
+ name: str = "Moonshot"
24
+ provider: str = "Moonshot"
25
+
26
+ api_key: Optional[str] = field(default_factory=lambda: getenv("MOONSHOT_API_KEY"))
27
+ base_url: str = "https://api.moonshot.ai/v1"
28
+
29
+ def _get_client_params(self) -> Dict[str, Any]:
30
+ # Fetch API key from env if not already set
31
+ if not self.api_key:
32
+ self.api_key = getenv("MOONSHOT_API_KEY")
33
+ if not self.api_key:
34
+ # Raise error immediately if key is missing
35
+ raise ModelAuthenticationError(
36
+ message="MOONSHOT_API_KEY not set. Please set the MOONSHOT_API_KEY environment variable.",
37
+ model_name=self.name,
38
+ )
39
+
40
+ # Define base client params
41
+ base_params = {
42
+ "api_key": self.api_key,
43
+ "organization": self.organization,
44
+ "base_url": self.base_url,
45
+ "timeout": self.timeout,
46
+ "max_retries": self.max_retries,
47
+ "default_headers": self.default_headers,
48
+ "default_query": self.default_query,
49
+ }
50
+
51
+ # Create client_params dict with non-None values
52
+ client_params = {k: v for k, v in base_params.items() if v is not None}
53
+
54
+ # Add additional client params if provided
55
+ if self.client_params:
56
+ client_params.update(self.client_params)
57
+ return client_params
agno/models/response.py CHANGED
@@ -52,6 +52,9 @@ class ToolExecution:
52
52
 
53
53
  external_execution_required: Optional[bool] = None
54
54
 
55
+ # If True (and external_execution_required=True), suppresses verbose paused messages
56
+ external_execution_silent: Optional[bool] = None
57
+
55
58
  @property
56
59
  def is_paused(self) -> bool:
57
60
  return bool(self.requires_confirmation or self.requires_user_input or self.external_execution_required)
@@ -84,6 +87,7 @@ class ToolExecution:
84
87
  if "user_input_schema" in data
85
88
  else None,
86
89
  external_execution_required=data.get("external_execution_required"),
90
+ external_execution_silent=data.get("external_execution_silent"),
87
91
  metrics=Metrics(**(data.get("metrics", {}) or {})),
88
92
  **{"created_at": data["created_at"]} if "created_at" in data else {},
89
93
  )
agno/models/utils.py CHANGED
@@ -139,6 +139,11 @@ def _get_model_class(model_id: str, model_provider: str) -> Model:
139
139
 
140
140
  return MistralChat(id=model_id)
141
141
 
142
+ elif model_provider == "moonshot":
143
+ from agno.models.moonshot import MoonShot
144
+
145
+ return MoonShot(id=model_id)
146
+
142
147
  elif model_provider == "nebius":
143
148
  from agno.models.nebius import Nebius
144
149
 
agno/tools/decorator.py CHANGED
@@ -70,6 +70,7 @@ def tool(
70
70
  requires_user_input: Optional[bool] = None,
71
71
  user_input_fields: Optional[List[str]] = None,
72
72
  external_execution: Optional[bool] = None,
73
+ external_execution_silent: Optional[bool] = None,
73
74
  pre_hook: Optional[Callable] = None,
74
75
  post_hook: Optional[Callable] = None,
75
76
  tool_hooks: Optional[List[Callable]] = None,
@@ -98,6 +99,7 @@ def tool(*args, **kwargs) -> Union[Function, Callable[[F], Function]]:
98
99
  requires_user_input: Optional[bool] - If True, the function will require user input before execution
99
100
  user_input_fields: Optional[List[str]] - List of fields that will be provided to the function as user input
100
101
  external_execution: Optional[bool] - If True, the function will be executed outside of the agent's context
102
+ external_execution_silent: Optional[bool] - If True (and external_execution=True), suppresses verbose paused messages (e.g., "I have tools to execute...")
101
103
  pre_hook: Optional[Callable] - Hook that runs before the function is executed.
102
104
  post_hook: Optional[Callable] - Hook that runs after the function is executed.
103
105
  tool_hooks: Optional[List[Callable]] - List of hooks that run before and after the function is executed.
@@ -135,6 +137,7 @@ def tool(*args, **kwargs) -> Union[Function, Callable[[F], Function]]:
135
137
  "requires_user_input",
136
138
  "user_input_fields",
137
139
  "external_execution",
140
+ "external_execution_silent",
138
141
  "pre_hook",
139
142
  "post_hook",
140
143
  "tool_hooks",
agno/tools/function.py CHANGED
@@ -121,6 +121,9 @@ class Function(BaseModel):
121
121
  # If True, the function will be executed outside the agent's control.
122
122
  external_execution: Optional[bool] = None
123
123
 
124
+ # If True (and external_execution=True), the function will not produce verbose paused messages (e.g., "I have tools to execute...")
125
+ external_execution_silent: Optional[bool] = None
126
+
124
127
  # Caching configuration
125
128
  cache_results: bool = False
126
129
  cache_dir: Optional[str] = None
agno/tools/unsplash.py ADDED
@@ -0,0 +1,341 @@
1
+ """Unsplash Tools for searching and retrieving high-quality, royalty-free images.
2
+
3
+ This toolkit provides AI agents with the ability to search for and retrieve images
4
+ from Unsplash, a popular platform with over 4.3 million high-quality photos.
5
+
6
+ Get your free API key at: https://unsplash.com/developers
7
+ """
8
+
9
+ import json
10
+ from os import getenv
11
+ from typing import Any, Dict, List, Optional
12
+ from urllib.parse import urlencode
13
+ from urllib.request import Request, urlopen
14
+
15
+ from agno.tools import Toolkit
16
+ from agno.utils.log import log_debug, logger
17
+
18
+
19
+ class UnsplashTools(Toolkit):
20
+ """A toolkit for searching and retrieving images from Unsplash.
21
+
22
+ Unsplash provides access to over 4.3 million high-quality, royalty-free images
23
+ that can be used for various purposes. This toolkit enables AI agents to:
24
+ - Search for photos by keywords
25
+ - Get detailed information about specific photos
26
+ - Retrieve random photos with optional filters
27
+ - Track downloads (required by Unsplash API guidelines)
28
+
29
+ Example:
30
+ ```python
31
+ from agno.agent import Agent
32
+ from agno.models.openai import OpenAIChat
33
+ from agno.tools.unsplash import UnsplashTools
34
+
35
+ agent = Agent(
36
+ model=OpenAIChat(id="gpt-4o"),
37
+ tools=[UnsplashTools()],
38
+ )
39
+ agent.print_response("Find me 3 photos of mountains at sunset")
40
+ ```
41
+ """
42
+
43
+ def __init__(
44
+ self,
45
+ access_key: Optional[str] = None,
46
+ enable_search_photos: bool = True,
47
+ enable_get_photo: bool = True,
48
+ enable_get_random_photo: bool = True,
49
+ enable_download_photo: bool = False,
50
+ all: bool = False,
51
+ **kwargs: Any,
52
+ ):
53
+ """Initialize the Unsplash toolkit.
54
+
55
+ Args:
56
+ access_key: Unsplash API access key. If not provided, will look for
57
+ UNSPLASH_ACCESS_KEY environment variable.
58
+ enable_search_photos: Enable the search_photos tool. Default: True.
59
+ enable_get_photo: Enable the get_photo tool. Default: True.
60
+ enable_get_random_photo: Enable the get_random_photo tool. Default: True.
61
+ enable_download_photo: Enable the download_photo tool. Default: False.
62
+ all: Enable all tools. Default: False.
63
+ **kwargs: Additional arguments passed to the Toolkit base class.
64
+ """
65
+ self.access_key = access_key or getenv("UNSPLASH_ACCESS_KEY")
66
+ if not self.access_key:
67
+ logger.warning("No Unsplash API key provided. Set UNSPLASH_ACCESS_KEY environment variable.")
68
+
69
+ self.base_url = "https://api.unsplash.com"
70
+
71
+ tools: List[Any] = []
72
+ if all or enable_search_photos:
73
+ tools.append(self.search_photos)
74
+ if all or enable_get_photo:
75
+ tools.append(self.get_photo)
76
+ if all or enable_get_random_photo:
77
+ tools.append(self.get_random_photo)
78
+ if all or enable_download_photo:
79
+ tools.append(self.download_photo)
80
+
81
+ super().__init__(name="unsplash_tools", tools=tools, **kwargs)
82
+
83
+ def _make_request(self, endpoint: str, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
84
+ """Make an authenticated request to the Unsplash API.
85
+
86
+ Args:
87
+ endpoint: API endpoint path (e.g., "/search/photos").
88
+ params: Optional query parameters.
89
+
90
+ Returns:
91
+ JSON response as a dictionary.
92
+
93
+ Raises:
94
+ Exception: If the API request fails.
95
+ """
96
+ url = f"{self.base_url}{endpoint}"
97
+ if params:
98
+ url = f"{url}?{urlencode(params)}"
99
+
100
+ headers = {
101
+ "Authorization": f"Client-ID {self.access_key}",
102
+ "Accept-Version": "v1",
103
+ }
104
+
105
+ request = Request(url, headers=headers)
106
+ with urlopen(request) as response:
107
+ return json.loads(response.read().decode())
108
+
109
+ def _format_photo(self, photo: Dict[str, Any]) -> Dict[str, Any]:
110
+ """Format photo data into a clean, consistent structure.
111
+
112
+ Args:
113
+ photo: Raw photo data from Unsplash API.
114
+
115
+ Returns:
116
+ Formatted photo dictionary with essential fields.
117
+ """
118
+ return {
119
+ "id": photo.get("id"),
120
+ "description": photo.get("description") or photo.get("alt_description"),
121
+ "width": photo.get("width"),
122
+ "height": photo.get("height"),
123
+ "color": photo.get("color"),
124
+ "created_at": photo.get("created_at"),
125
+ "urls": {
126
+ "raw": photo.get("urls", {}).get("raw"),
127
+ "full": photo.get("urls", {}).get("full"),
128
+ "regular": photo.get("urls", {}).get("regular"),
129
+ "small": photo.get("urls", {}).get("small"),
130
+ "thumb": photo.get("urls", {}).get("thumb"),
131
+ },
132
+ "author": {
133
+ "name": photo.get("user", {}).get("name"),
134
+ "username": photo.get("user", {}).get("username"),
135
+ "profile_url": photo.get("user", {}).get("links", {}).get("html"),
136
+ },
137
+ "links": {
138
+ "html": photo.get("links", {}).get("html"),
139
+ "download": photo.get("links", {}).get("download"),
140
+ },
141
+ "likes": photo.get("likes"),
142
+ "tags": [tag.get("title") for tag in photo.get("tags", [])[:5] if tag.get("title")],
143
+ }
144
+
145
+ def search_photos(
146
+ self,
147
+ query: str,
148
+ per_page: int = 10,
149
+ page: int = 1,
150
+ orientation: Optional[str] = None,
151
+ color: Optional[str] = None,
152
+ ) -> str:
153
+ """Search for photos on Unsplash by keyword.
154
+
155
+ Args:
156
+ query: The search query string (e.g., "mountain sunset", "office workspace").
157
+ per_page: Number of results per page (1-30). Default: 10.
158
+ page: Page number to retrieve. Default: 1.
159
+ orientation: Filter by orientation: "landscape", "portrait", or "squarish".
160
+ color: Filter by color: "black_and_white", "black", "white", "yellow",
161
+ "orange", "red", "purple", "magenta", "green", "teal", "blue".
162
+
163
+ Returns:
164
+ JSON string containing search results with photo details including
165
+ URLs, author information, and metadata.
166
+ """
167
+ if not self.access_key:
168
+ return "Error: No Unsplash API key provided. Set UNSPLASH_ACCESS_KEY environment variable."
169
+
170
+ if not query:
171
+ return "Error: Please provide a search query."
172
+
173
+ log_debug(f"Searching Unsplash for: {query}")
174
+
175
+ try:
176
+ params: Dict[str, Any] = {
177
+ "query": query,
178
+ "per_page": min(max(1, per_page), 30),
179
+ "page": max(1, page),
180
+ }
181
+
182
+ if orientation and orientation in ["landscape", "portrait", "squarish"]:
183
+ params["orientation"] = orientation
184
+
185
+ if color:
186
+ valid_colors = [
187
+ "black_and_white",
188
+ "black",
189
+ "white",
190
+ "yellow",
191
+ "orange",
192
+ "red",
193
+ "purple",
194
+ "magenta",
195
+ "green",
196
+ "teal",
197
+ "blue",
198
+ ]
199
+ if color in valid_colors:
200
+ params["color"] = color
201
+
202
+ response = self._make_request("/search/photos", params)
203
+
204
+ results = {
205
+ "total": response.get("total", 0),
206
+ "total_pages": response.get("total_pages", 0),
207
+ "photos": [self._format_photo(photo) for photo in response.get("results", [])],
208
+ }
209
+
210
+ return json.dumps(results, indent=2)
211
+
212
+ except Exception as e:
213
+ return f"Error searching Unsplash: {e}"
214
+
215
+ def get_photo(self, photo_id: str) -> str:
216
+ """Get detailed information about a specific photo.
217
+
218
+ Args:
219
+ photo_id: The unique identifier of the photo.
220
+
221
+ Returns:
222
+ JSON string containing detailed photo information including
223
+ URLs, author, metadata, EXIF data, and location if available.
224
+ """
225
+ if not self.access_key:
226
+ return "Error: No Unsplash API key provided. Set UNSPLASH_ACCESS_KEY environment variable."
227
+
228
+ if not photo_id:
229
+ return "Error: Please provide a photo ID."
230
+
231
+ log_debug(f"Getting Unsplash photo: {photo_id}")
232
+
233
+ try:
234
+ photo = self._make_request(f"/photos/{photo_id}")
235
+
236
+ result = self._format_photo(photo)
237
+
238
+ # Add extra details available for single photo requests
239
+ if photo.get("exif"):
240
+ result["exif"] = {
241
+ "make": photo["exif"].get("make"),
242
+ "model": photo["exif"].get("model"),
243
+ "aperture": photo["exif"].get("aperture"),
244
+ "exposure_time": photo["exif"].get("exposure_time"),
245
+ "focal_length": photo["exif"].get("focal_length"),
246
+ "iso": photo["exif"].get("iso"),
247
+ }
248
+
249
+ if photo.get("location"):
250
+ result["location"] = {
251
+ "name": photo["location"].get("name"),
252
+ "city": photo["location"].get("city"),
253
+ "country": photo["location"].get("country"),
254
+ }
255
+
256
+ result["views"] = photo.get("views")
257
+ result["downloads"] = photo.get("downloads")
258
+
259
+ return json.dumps(result, indent=2)
260
+
261
+ except Exception as e:
262
+ return f"Error getting photo: {e}"
263
+
264
+ def get_random_photo(
265
+ self,
266
+ query: Optional[str] = None,
267
+ orientation: Optional[str] = None,
268
+ count: int = 1,
269
+ ) -> str:
270
+ """Get random photo(s) from Unsplash.
271
+
272
+ Args:
273
+ query: Optional search query to filter random photos.
274
+ orientation: Filter by orientation: "landscape", "portrait", or "squarish".
275
+ count: Number of random photos to return (1-30). Default: 1.
276
+
277
+ Returns:
278
+ JSON string containing random photo(s) data.
279
+ """
280
+ if not self.access_key:
281
+ return "Error: No Unsplash API key provided. Set UNSPLASH_ACCESS_KEY environment variable."
282
+
283
+ log_debug(f"Getting random Unsplash photo (query={query})")
284
+
285
+ try:
286
+ params: Dict[str, Any] = {
287
+ "count": min(max(1, count), 30),
288
+ }
289
+
290
+ if query:
291
+ params["query"] = query
292
+
293
+ if orientation and orientation in ["landscape", "portrait", "squarish"]:
294
+ params["orientation"] = orientation
295
+
296
+ response = self._make_request("/photos/random", params)
297
+
298
+ # Response is a list when count > 1, single object when count = 1
299
+ if isinstance(response, list):
300
+ photos = [self._format_photo(photo) for photo in response]
301
+ else:
302
+ photos = [self._format_photo(response)]
303
+
304
+ return json.dumps({"photos": photos}, indent=2)
305
+
306
+ except Exception as e:
307
+ return f"Error getting random photo: {e}"
308
+
309
+ def download_photo(self, photo_id: str) -> str:
310
+ """Trigger a download event for a photo.
311
+
312
+ This is required by the Unsplash API guidelines when a photo is downloaded
313
+ or used. It helps photographers track the usage of their work.
314
+
315
+ Args:
316
+ photo_id: The unique identifier of the photo being downloaded.
317
+
318
+ Returns:
319
+ JSON string with the download URL.
320
+ """
321
+ if not self.access_key:
322
+ return "Error: No Unsplash API key provided. Set UNSPLASH_ACCESS_KEY environment variable."
323
+
324
+ if not photo_id:
325
+ return "Error: Please provide a photo ID."
326
+
327
+ log_debug(f"Tracking download for Unsplash photo: {photo_id}")
328
+
329
+ try:
330
+ response = self._make_request(f"/photos/{photo_id}/download")
331
+
332
+ return json.dumps(
333
+ {
334
+ "photo_id": photo_id,
335
+ "download_url": response.get("url"),
336
+ },
337
+ indent=2,
338
+ )
339
+
340
+ except Exception as e:
341
+ return f"Error tracking download: {e}"
@@ -105,8 +105,9 @@ def print_response_stream(
105
105
  if response_event.is_paused: # type: ignore
106
106
  response_event = cast(RunPausedEvent, response_event) # type: ignore
107
107
  response_panel = create_paused_run_output_panel(response_event) # type: ignore
108
- panels.append(response_panel)
109
- live_log.update(Group(*panels))
108
+ if response_panel is not None:
109
+ panels.append(response_panel)
110
+ live_log.update(Group(*panels))
110
111
  return
111
112
 
112
113
  if response_event.event == RunEvent.pre_hook_completed: # type: ignore
@@ -310,8 +311,9 @@ async def aprint_response_stream(
310
311
  if isinstance(resp, tuple(get_args(RunOutputEvent))):
311
312
  if resp.is_paused:
312
313
  response_panel = create_paused_run_output_panel(resp) # type: ignore
313
- panels.append(response_panel)
314
- live_log.update(Group(*panels))
314
+ if response_panel is not None:
315
+ panels.append(response_panel)
316
+ live_log.update(Group(*panels))
315
317
  break
316
318
 
317
319
  if (
@@ -798,7 +800,8 @@ def build_panels(
798
800
 
799
801
  if isinstance(run_response, RunOutput) and run_response.is_paused:
800
802
  response_panel = create_paused_run_output_panel(run_response)
801
- panels.append(response_panel)
803
+ if response_panel is not None:
804
+ panels.append(response_panel)
802
805
  return panels
803
806
 
804
807
  if isinstance(run_response, RunOutput) and run_response.reasoning_steps is not None:
agno/utils/response.py CHANGED
@@ -80,35 +80,41 @@ def format_tool_calls(tool_calls: List[ToolExecution]) -> List[str]:
80
80
  def create_paused_run_output_panel(run_output: Union[RunPausedEvent, RunOutput]):
81
81
  from rich.text import Text
82
82
 
83
+ # Filter out silent tools - they don't produce verbose output
84
+ non_silent_tools = [tc for tc in (run_output.tools or []) if not tc.external_execution_silent]
85
+
86
+ # If all tools are silent, return None to indicate no panel should be shown
87
+ if not non_silent_tools:
88
+ return None
89
+
83
90
  tool_calls_content = Text("Run is paused. ")
84
- if run_output.tools is not None:
85
- if any(tc.requires_confirmation for tc in run_output.tools):
86
- tool_calls_content.append("The following tool calls require confirmation:\n")
87
- for tool_call in run_output.tools:
88
- if tool_call.requires_confirmation:
89
- args_str = ""
90
- for arg, value in tool_call.tool_args.items() if tool_call.tool_args else {}:
91
- args_str += f"{arg}={value}, "
92
- args_str = args_str.rstrip(", ")
93
- tool_calls_content.append(f"• {tool_call.tool_name}({args_str})\n")
94
- if any(tc.requires_user_input for tc in run_output.tools):
95
- tool_calls_content.append("The following tool calls require user input:\n")
96
- for tool_call in run_output.tools:
97
- if tool_call.requires_user_input:
98
- args_str = ""
99
- for arg, value in tool_call.tool_args.items() if tool_call.tool_args else {}:
100
- args_str += f"{arg}={value}, "
101
- args_str = args_str.rstrip(", ")
102
- tool_calls_content.append(f"• {tool_call.tool_name}({args_str})\n")
103
- if any(tc.external_execution_required for tc in run_output.tools):
104
- tool_calls_content.append("The following tool calls require external execution:\n")
105
- for tool_call in run_output.tools:
106
- if tool_call.external_execution_required:
107
- args_str = ""
108
- for arg, value in tool_call.tool_args.items() if tool_call.tool_args else {}:
109
- args_str += f"{arg}={value}, "
110
- args_str = args_str.rstrip(", ")
111
- tool_calls_content.append(f"• {tool_call.tool_name}({args_str})\n")
91
+ if any(tc.requires_confirmation for tc in non_silent_tools):
92
+ tool_calls_content.append("The following tool calls require confirmation:\n")
93
+ for tool_call in non_silent_tools:
94
+ if tool_call.requires_confirmation:
95
+ args_str = ""
96
+ for arg, value in tool_call.tool_args.items() if tool_call.tool_args else {}:
97
+ args_str += f"{arg}={value}, "
98
+ args_str = args_str.rstrip(", ")
99
+ tool_calls_content.append(f" {tool_call.tool_name}({args_str})\n")
100
+ if any(tc.requires_user_input for tc in non_silent_tools):
101
+ tool_calls_content.append("The following tool calls require user input:\n")
102
+ for tool_call in non_silent_tools:
103
+ if tool_call.requires_user_input:
104
+ args_str = ""
105
+ for arg, value in tool_call.tool_args.items() if tool_call.tool_args else {}:
106
+ args_str += f"{arg}={value}, "
107
+ args_str = args_str.rstrip(", ")
108
+ tool_calls_content.append(f" {tool_call.tool_name}({args_str})\n")
109
+ if any(tc.external_execution_required for tc in non_silent_tools):
110
+ tool_calls_content.append("The following tool calls require external execution:\n")
111
+ for tool_call in non_silent_tools:
112
+ if tool_call.external_execution_required:
113
+ args_str = ""
114
+ for arg, value in tool_call.tool_args.items() if tool_call.tool_args else {}:
115
+ args_str += f"{arg}={value}, "
116
+ args_str = args_str.rstrip(", ")
117
+ tool_calls_content.append(f" {tool_call.tool_name}({args_str})\n")
112
118
 
113
119
  # Create panel for response
114
120
  response_panel = create_panel(
@@ -122,6 +128,10 @@ def create_paused_run_output_panel(run_output: Union[RunPausedEvent, RunOutput])
122
128
  def get_paused_content(run_output: RunOutput) -> str:
123
129
  paused_content = ""
124
130
  for tool in run_output.tools or []:
131
+ # Skip silent tools - they don't produce verbose paused messages
132
+ if tool.external_execution_silent:
133
+ continue
134
+
125
135
  # Initialize flags for each tool
126
136
  confirmation_required = False
127
137
  user_input_required = False
@@ -282,9 +282,10 @@ class LanceDb(VectorDb):
282
282
  meta_data.update(filters)
283
283
  document.meta_data = meta_data
284
284
 
285
- # Only embed if the document doesn't already have an embedding
285
+ # Only embed if the document doesn't already have a valid embedding
286
286
  # This prevents duplicate embedding when called from async_insert or async_upsert
287
- if document.embedding is None:
287
+ # Check for both None and empty list (async embedding failures return [])
288
+ if document.embedding is None or (isinstance(document.embedding, list) and len(document.embedding) == 0):
288
289
  document.embed(embedder=self.embedder)
289
290
  cleaned_content = document.content.replace("\x00", "\ufffd")
290
291
  # Include content_hash in ID to ensure uniqueness across different content hashes
@@ -363,12 +364,21 @@ class LanceDb(VectorDb):
363
364
  else:
364
365
  logger.warning(f"Async batch embedding failed, falling back to individual embeddings: {e}")
365
366
  embed_tasks = [doc.async_embed(embedder=self.embedder) for doc in documents]
366
- await asyncio.gather(*embed_tasks, return_exceptions=True)
367
+ results = await asyncio.gather(*embed_tasks, return_exceptions=True)
368
+ # Log any embedding failures (they will be re-tried in sync insert)
369
+ for i, result in enumerate(results):
370
+ if isinstance(result, Exception):
371
+ log_warning(f"Async embedding failed for document {i}, will retry in sync insert: {result}")
367
372
  else:
368
373
  embed_tasks = [doc.async_embed(embedder=self.embedder) for doc in documents]
369
- await asyncio.gather(*embed_tasks, return_exceptions=True)
374
+ results = await asyncio.gather(*embed_tasks, return_exceptions=True)
375
+ # Log any embedding failures (they will be re-tried in sync insert)
376
+ for i, result in enumerate(results):
377
+ if isinstance(result, Exception):
378
+ log_warning(f"Async embedding failed for document {i}, will retry in sync insert: {result}")
370
379
 
371
380
  # Use sync insert to avoid sync/async table synchronization issues
381
+ # Sync insert will re-embed any documents that failed async embedding
372
382
  self.insert(content_hash, documents, filters)
373
383
 
374
384
  def upsert_available(self) -> bool:
@@ -414,13 +424,25 @@ class LanceDb(VectorDb):
414
424
  if is_rate_limit:
415
425
  raise e
416
426
  else:
427
+ logger.warning(f"Async batch embedding failed, falling back to individual embeddings: {e}")
417
428
  embed_tasks = [doc.async_embed(embedder=self.embedder) for doc in documents]
418
- await asyncio.gather(*embed_tasks, return_exceptions=True)
429
+ results = await asyncio.gather(*embed_tasks, return_exceptions=True)
430
+ # Log any embedding failures (they will be re-tried in sync upsert)
431
+ for i, result in enumerate(results):
432
+ if isinstance(result, Exception):
433
+ log_warning(
434
+ f"Async embedding failed for document {i}, will retry in sync upsert: {result}"
435
+ )
419
436
  else:
420
437
  embed_tasks = [doc.async_embed(embedder=self.embedder) for doc in documents]
421
- await asyncio.gather(*embed_tasks, return_exceptions=True)
438
+ results = await asyncio.gather(*embed_tasks, return_exceptions=True)
439
+ # Log any embedding failures (they will be re-tried in sync upsert)
440
+ for i, result in enumerate(results):
441
+ if isinstance(result, Exception):
442
+ log_warning(f"Async embedding failed for document {i}, will retry in sync upsert: {result}")
422
443
 
423
444
  # Use sync upsert for reliability
445
+ # Sync upsert (via insert) will re-embed any documents that failed async embedding
424
446
  self.upsert(content_hash=content_hash, documents=documents, filters=filters)
425
447
 
426
448
  def search(
@@ -897,7 +919,7 @@ class LanceDb(VectorDb):
897
919
 
898
920
  # Get all documents and filter in Python (LanceDB doesn't support JSON operators)
899
921
  total_count = self.table.count_rows()
900
- results = self.table.search().select(["id", "payload"]).limit(total_count).to_pandas()
922
+ results = self.table.search().select(["id", "payload", "vector"]).limit(total_count).to_pandas()
901
923
 
902
924
  if results.empty:
903
925
  logger.debug("No documents found")
agno/workflow/workflow.py CHANGED
@@ -3822,6 +3822,7 @@ class Workflow:
3822
3822
  stream_events: Optional[bool] = None,
3823
3823
  background: Optional[bool] = False,
3824
3824
  background_tasks: Optional[Any] = None,
3825
+ dependencies: Optional[Dict[str, Any]] = None,
3825
3826
  ) -> WorkflowRunOutput: ...
3826
3827
 
3827
3828
  @overload
@@ -3841,6 +3842,7 @@ class Workflow:
3841
3842
  stream_events: Optional[bool] = None,
3842
3843
  background: Optional[bool] = False,
3843
3844
  background_tasks: Optional[Any] = None,
3845
+ dependencies: Optional[Dict[str, Any]] = None,
3844
3846
  ) -> Iterator[WorkflowRunOutputEvent]: ...
3845
3847
 
3846
3848
  def run(
@@ -3859,6 +3861,7 @@ class Workflow:
3859
3861
  stream_events: Optional[bool] = None,
3860
3862
  background: Optional[bool] = False,
3861
3863
  background_tasks: Optional[Any] = None,
3864
+ dependencies: Optional[Dict[str, Any]] = None,
3862
3865
  **kwargs: Any,
3863
3866
  ) -> Union[WorkflowRunOutput, Iterator[WorkflowRunOutputEvent]]:
3864
3867
  """Execute the workflow synchronously with optional streaming"""
@@ -3938,6 +3941,7 @@ class Workflow:
3938
3941
  session_state=session_state,
3939
3942
  workflow_id=self.id,
3940
3943
  workflow_name=self.name,
3944
+ dependencies=dependencies,
3941
3945
  )
3942
3946
 
3943
3947
  # Execute workflow agent if configured
@@ -4005,6 +4009,7 @@ class Workflow:
4005
4009
  background: Optional[bool] = False,
4006
4010
  websocket: Optional[WebSocket] = None,
4007
4011
  background_tasks: Optional[Any] = None,
4012
+ dependencies: Optional[Dict[str, Any]] = None,
4008
4013
  ) -> WorkflowRunOutput: ...
4009
4014
 
4010
4015
  @overload
@@ -4025,6 +4030,7 @@ class Workflow:
4025
4030
  background: Optional[bool] = False,
4026
4031
  websocket: Optional[WebSocket] = None,
4027
4032
  background_tasks: Optional[Any] = None,
4033
+ dependencies: Optional[Dict[str, Any]] = None,
4028
4034
  ) -> AsyncIterator[WorkflowRunOutputEvent]: ...
4029
4035
 
4030
4036
  def arun( # type: ignore
@@ -4044,6 +4050,7 @@ class Workflow:
4044
4050
  background: Optional[bool] = False,
4045
4051
  websocket: Optional[WebSocket] = None,
4046
4052
  background_tasks: Optional[Any] = None,
4053
+ dependencies: Optional[Dict[str, Any]] = None,
4047
4054
  **kwargs: Any,
4048
4055
  ) -> Union[WorkflowRunOutput, AsyncIterator[WorkflowRunOutputEvent]]:
4049
4056
  """Execute the workflow synchronously with optional streaming"""
@@ -4109,6 +4116,7 @@ class Workflow:
4109
4116
  session_id=session_id,
4110
4117
  user_id=user_id,
4111
4118
  session_state=session_state,
4119
+ dependencies=dependencies,
4112
4120
  )
4113
4121
 
4114
4122
  log_debug(f"Async Workflow Run Start: {self.name}", center=True)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agno
3
- Version: 2.4.3
3
+ Version: 2.4.4
4
4
  Summary: Agno: a lightweight library for building Multi-Agent Systems
5
5
  Author-email: Ashpreet Bedi <ashpreet@agno.com>
6
6
  Project-URL: homepage, https://agno.com
@@ -531,7 +531,7 @@ https://github.com/user-attachments/assets/feb23db8-15cc-4e88-be7c-01a21a03ebf6
531
531
 
532
532
  ## Getting Started
533
533
 
534
- 1. Follow the [getting started guide](https://github.com/agno-agi/agno/tree/main/cookbook/00_getting_started)
534
+ 1. Follow the [quickstart guide](https://github.com/agno-agi/agno/tree/main/cookbook/00_quickstart)
535
535
  2. Browse the [cookbook](https://github.com/agno-agi/agno/tree/main/cookbook) for real-world examples
536
536
  3. Read the [docs](https://docs.agno.com) to go deeper
537
537
 
@@ -49,7 +49,7 @@ agno/db/dynamo/dynamo.py,sha256=NPPvVCjUVdmSU9EtzkWy80KSfDODfONyfXv8tNeYLpE,1106
49
49
  agno/db/dynamo/schemas.py,sha256=NbVLIEqe2D5QYhS9JAAPqK4RdADN3G6fBJbULzGtPKM,18507
50
50
  agno/db/dynamo/utils.py,sha256=z-s0FAWfiGJPT_k68rlArP5SnaXlW8ycnGi9dh4Nehc,28005
51
51
  agno/db/firestore/__init__.py,sha256=lYAJjUs4jMxJFty1GYZw464K35zeuBlcoFR9uuIQYtI,79
52
- agno/db/firestore/firestore.py,sha256=cK0yoiRSBgbcRG5OhK99JRcX1Cot5wniVJrEASK7_8A,95261
52
+ agno/db/firestore/firestore.py,sha256=ZGua4k6_nh9vVApMNYD8983Ppf8ctkfvnL16sdikHmo,96256
53
53
  agno/db/firestore/schemas.py,sha256=dri9lGHxs-IsqvsgL_noFoeAVn9iZflMPsqHkNdA-DM,6123
54
54
  agno/db/firestore/utils.py,sha256=lshztynSRxYUQxA1LTCPZlugaW4VKOB5TtKjQ1BQ94s,14194
55
55
  agno/db/gcs_json/__init__.py,sha256=aTR4o3aFrzfANHtRw7nX9uc5_GsY52ch0rmoo7uXuc4,76
@@ -103,9 +103,9 @@ agno/db/sqlite/sqlite.py,sha256=0GQR3FS9UlDxFQs1VF_t5OXhw7FBccfL6CO8JZE7CKY,1765
103
103
  agno/db/sqlite/utils.py,sha256=PZp-g4oUf6Iw1kuDAmOpIBtfyg4poKiG_DxXP4EonFI,15721
104
104
  agno/db/surrealdb/__init__.py,sha256=C8qp5-Nx9YnSmgKEtGua-sqG_ntCXONBw1qqnNyKPqI,75
105
105
  agno/db/surrealdb/metrics.py,sha256=oKDRyjRQ6KR3HaO8zDHQLVMG7-0NDkOFOKX5I7mD5FA,10336
106
- agno/db/surrealdb/models.py,sha256=IUIpdd50MxznOB-hMstEcEHHVlWhll2FNQqVhM-9zIU,12889
106
+ agno/db/surrealdb/models.py,sha256=BDs5yk1N3G6XGPDz3OYEwuGDkli5MwZHSRCTaFwbK8w,12899
107
107
  agno/db/surrealdb/queries.py,sha256=s__yJSFIx387IEflcDdti7T5j6H9NX_-zIj13F9CN9s,2051
108
- agno/db/surrealdb/surrealdb.py,sha256=9qFoa7OTnS8XIqCpqPfKGCXK2-dFPy7LuX_bSeQvjrQ,76749
108
+ agno/db/surrealdb/surrealdb.py,sha256=RpjWArgMSzRE5MCclGVh-0d3T2bYmIIRB-1mJwHhgSo,77128
109
109
  agno/db/surrealdb/utils.py,sha256=PcZo_cTy-jI59I-XhzAomRLdV9-m0irtO4C-AYGSghs,5405
110
110
  agno/eval/__init__.py,sha256=RmiGpnwGm1dL9DpPMvzrXzFc1jRr111B361rTICVvXE,1173
111
111
  agno/eval/accuracy.py,sha256=20rxNKjH3_Y6lMPRlrnKiIBlJLfKMsCN8JiJZ9jGofk,33775
@@ -136,7 +136,7 @@ agno/knowledge/chunking/agentic.py,sha256=WeQ5ORe_CxsGYg0udOXjvwBo95hnpPZHVpFj-b
136
136
  agno/knowledge/chunking/code.py,sha256=YLdHL3muYz9MhDqxXPW7MGfqEph4KIZu6_RXPp6KUIc,3569
137
137
  agno/knowledge/chunking/document.py,sha256=uCHbPdZR4ILbm9RkpuVu8zw4dPIH0jd1XIRELOixRlI,6454
138
138
  agno/knowledge/chunking/fixed.py,sha256=Mz0QgxqVNSaOYPtzihLz6yJdz19zCZ8zLRXIREjgTb8,2371
139
- agno/knowledge/chunking/markdown.py,sha256=bVXyly0ywbCWrpaaiHocgcE0vAWCt3e9H8iDqnMXMak,10524
139
+ agno/knowledge/chunking/markdown.py,sha256=5hiHuvuGJq0UBk7gwnFV5Q84MumXD39UNcY4qlzH1fk,14665
140
140
  agno/knowledge/chunking/recursive.py,sha256=PXeq-RF9nJ8alV771q0jEyl1C2QPMVFUTOiJSU0nStw,2357
141
141
  agno/knowledge/chunking/row.py,sha256=yFGKMsHd2Ml0fkJLksw8ULUpWXmbSXIQwnwlKHVPP40,1426
142
142
  agno/knowledge/chunking/semantic.py,sha256=_PVDvmtJhpoamtpn-kHq54rwLgrbScEky5yqUbBc6JQ,7267
@@ -213,12 +213,12 @@ agno/memory/strategies/base.py,sha256=bHtkZ27U9VXKezdaSWLJZELjK97GcpQUBefSa8BYpp
213
213
  agno/memory/strategies/summarize.py,sha256=4M9zWTsooC3EtHpZoC7Z-yFaQgQoebRMNfZPitdsvB0,7307
214
214
  agno/memory/strategies/types.py,sha256=b3N5jOG_dM4AxT7vGagFIc9sqUUjxFtRHSoH4_AhEx8,1225
215
215
  agno/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
216
- agno/models/base.py,sha256=W9NXmhGrqA_q1Wx97WrEUFk6YcxMu9miaZJH_8Z52d4,129282
216
+ agno/models/base.py,sha256=YdyK6vqGEw35H7Q_TV8aRehF942Orkl0GFG_TG5TDo8,129816
217
217
  agno/models/defaults.py,sha256=1_fe4-ZbNriE8BgqxVRVi4KGzEYxYKYsz4hn6CZNEEM,40
218
218
  agno/models/message.py,sha256=5bZOFdZuhsQw06nNppvFJq-JGI4lqQt4sVhdjfEFBZM,19976
219
219
  agno/models/metrics.py,sha256=bQJ5DMoFcrb2EyA2VUm4u9HVGbgTKO5F1o2A_t_7hqI,4913
220
- agno/models/response.py,sha256=CWwWYyKCuUOECcafpUik3JHo1xGqpMFlmiJ0WyuDYT8,7756
221
- agno/models/utils.py,sha256=jxAIIG2y7KBypwFlc87GzFnvogRpGLfd-wwr6KXZIj8,7269
220
+ agno/models/response.py,sha256=xZQ-2L5tciX70hTbniSp2XLt9OTBuasW4_lsWMGGfoM,7976
221
+ agno/models/utils.py,sha256=FKzRbNm_hO_gdICBO-x4KWL6OkWIeipn07PeerUDR1M,7397
222
222
  agno/models/aimlapi/__init__.py,sha256=XQcFRvt4qJ8ol9nCC0XKEkVEDivdNf3nZNoJZMZ5m8M,78
223
223
  agno/models/aimlapi/aimlapi.py,sha256=ELPv8RuEc6qUq4JxWEJVRITsK71rzUxw6_cP3Zd8Vz0,2179
224
224
  agno/models/anthropic/__init__.py,sha256=nbReX3p17JCwfrMDR9hR7-OaEFZm80I7dng93dl-Fhw,77
@@ -269,6 +269,8 @@ agno/models/meta/llama.py,sha256=JvyuXtN7Em55U_uRy9cHEx5iw8FgbnQRPo32_1GsMxg,189
269
269
  agno/models/meta/llama_openai.py,sha256=B9kmyy9QlhZaa13N0b6UixHffxJPz4I4ds4a8ML0lyA,2653
270
270
  agno/models/mistral/__init__.py,sha256=6CP9TDn8oRUjtGBk1McvSQHrjY935vB6msGPlXBhkSw,86
271
271
  agno/models/mistral/mistral.py,sha256=FpyYp9zlnKuXVKBDhHSMNnHZTE-8m2w5h6ohoTlT6AE,16845
272
+ agno/models/moonshot/__init__.py,sha256=epY4CBZyC5fw1ouUwqHZ1Xth1weMhPD3xUtTJuVUaOU,75
273
+ agno/models/moonshot/moonshot.py,sha256=SzLgckZlyqkaXdIPBFnozIdTSLZFhZ-b9DVpX8ceFVM,2054
272
274
  agno/models/n1n/__init__.py,sha256=CymyKzTZWr-klifwaxzGTMDSVaPxBVtKOHQ-4VaPiWg,55
273
275
  agno/models/n1n/n1n.py,sha256=UW7MPHNAU0sfMTaHh7HIYcWVSuc_-I16KrrlsqF1I2U,1977
274
276
  agno/models/nebius/__init__.py,sha256=gW2yvxIfV2gxxOnBtTP8MCpI9AvMbIE6VTw-gY01Uvg,67
@@ -444,7 +446,7 @@ agno/tools/crawl4ai.py,sha256=5N6NJR5pSrNu0_zE__r4uA3cuTZDXUTLpq_wER0A5-k,6648
444
446
  agno/tools/csv_toolkit.py,sha256=1_G0cZJw1LBdXV0DZdHz-gzlQJCRocv9urU4g2WxBhw,7573
445
447
  agno/tools/dalle.py,sha256=WqkAquL3wLIUzLWbPBhdmT_JWVkyQadXJvb6R5zhuHk,4136
446
448
  agno/tools/daytona.py,sha256=I03Ddgvh6SJ5h5shpxw0lnDKq7ltMsUJK0kmnT2b0Mo,18446
447
- agno/tools/decorator.py,sha256=m49Gt-J4tpc-gjhwnMOdw4qvgYu2j4qGCrrZ91QP7ug,9728
449
+ agno/tools/decorator.py,sha256=E6r_g4cOVEK-M0IvEb-RKzzceM6ngcjVwpa4Gc9FS_M,9986
448
450
  agno/tools/desi_vocal.py,sha256=dTeIaD7pabaZxPG9IxtQhaVpC6-A3-4hX4xIt5C7kA4,3661
449
451
  agno/tools/discord.py,sha256=KkqArWVMtwpqOC-fQ7MxHAOAL9-G0CqlXUWgjQNgvOA,5819
450
452
  agno/tools/docker.py,sha256=pk1OXW3Pk318gMoFjV2vXGA0HBc93DLs8eftOM7eRIQ,25837
@@ -460,7 +462,7 @@ agno/tools/file.py,sha256=8aqXQraboSseAj2nDJG-eTORYmTZ-I4uG5uRCGoOaf0,10285
460
462
  agno/tools/file_generation.py,sha256=OxJNeEGpqf_SxemvET54Gi_j6uT1xMWFbjbQOnTSdoY,14006
461
463
  agno/tools/financial_datasets.py,sha256=NiXwyiYIFCawI8rR7JLJNIfwoQlranUeCcABHKhLHfw,9190
462
464
  agno/tools/firecrawl.py,sha256=axrrM7bvXBRkX8cBUtfrJ_G0YIeSQWaCTDAgRuhFsNk,5623
463
- agno/tools/function.py,sha256=vNkdpBNcz5FivVEwv6430B235_E1zGCnyN-CEB_vXY0,49619
465
+ agno/tools/function.py,sha256=Il_UxQ1xHCqr9q24b8UfYjQbtdESE32nJDVSUzsSGyc,49809
464
466
  agno/tools/giphy.py,sha256=_wOCWVnMdFByE9Yoz4Pf2MoKxSjkUTiPJZ928_BNe2M,3070
465
467
  agno/tools/github.py,sha256=wct6P00YzF3zgWoV2c5aHeXX_2dgb9LqRwJAboi6QXw,70286
466
468
  agno/tools/gmail.py,sha256=m_7SY4oz2sP0RSJyNItZ_h5VeyI86J8840_p5Nz_2So,37073
@@ -522,6 +524,7 @@ agno/tools/toolkit.py,sha256=_F5b63jQJqVTspGlaZGqbCZMrfJCNc7buTPr9dL7g_8,17662
522
524
  agno/tools/trafilatura.py,sha256=AK2Q_0jqwOqL8-0neMI6ZjuUt-w0dGvW-w8zE6FrZVs,14792
523
525
  agno/tools/trello.py,sha256=y2fc60ITCIXBOk4TX3w70YjkMvM9SMKsEYpfXOKWtOI,8546
524
526
  agno/tools/twilio.py,sha256=XUbUhFJdLxP3nlNx2UdS9aHva-HSIGHD01cHHuE9Rfg,6752
527
+ agno/tools/unsplash.py,sha256=7nG1nAco2y1oBTnupqx4eD4usDi9LQoHiTI0VDISAFc,12253
525
528
  agno/tools/user_control_flow.py,sha256=uL2p7j4N8DR_t5Hby6p8gBar9Wj1RVgiZ5mKXynv0Oo,4494
526
529
  agno/tools/valyu.py,sha256=HBPYVthZRHK6BWsmkHCUz9iC9P8IgdBMrbAmGsp6nR0,8669
527
530
  agno/tools/visualization.py,sha256=YxAQHzOV0N8yZij-Kq2eIC92oQUeKx9k9pfgOcNDcnI,16683
@@ -586,7 +589,7 @@ agno/utils/pprint.py,sha256=aTg3gfib3x3VK3E-xJrbBeufK7s77ebTVtWoa2VrYgU,7762
586
589
  agno/utils/prompts.py,sha256=wQU85SshxCDvZL1vD4rMWSnLhjQZCLXBL3HpOpiirXo,5811
587
590
  agno/utils/reasoning.py,sha256=pW5y674EjJ-R6IX8Fyxi_vN9xLG5dLkujPpcfSVL1jU,3862
588
591
  agno/utils/remote.py,sha256=MG-Dj2nzTvdOZke3T-wr9UcsCMIJPcTS-BP8_FSv6gc,688
589
- agno/utils/response.py,sha256=6gfo3NdpzQJx0W8Ts4HBbJ69ZqqHuM7pJi1RD4zWF6g,6870
592
+ agno/utils/response.py,sha256=5g14L1P3lQ9G_jh90J9cmxFE5HMZLQbPSGYV1EbfP_E,7159
590
593
  agno/utils/response_iterator.py,sha256=MgtadrOuMcw2vJcVvhJdMKRzpVddhLWUIkGFbBz7ZCQ,379
591
594
  agno/utils/safe_formatter.py,sha256=zLrW6O-nGUZvXoDkZOTgVpjeUFTmMUj8pk3FLvW_XjM,809
592
595
  agno/utils/serialize.py,sha256=XvQA_KSkVd5qI1QuZwdQpCsl1IOKddFu52Jl6WQASqU,904
@@ -610,7 +613,7 @@ agno/utils/models/openai_responses.py,sha256=63f2UgDFCzrr6xQITrGtn42UUHQBcZFUUFM
610
613
  agno/utils/models/schema_utils.py,sha256=L6TkraMClI471H6xYy7V81lhHR4qQloVKCN0bF4Ajw0,5047
611
614
  agno/utils/models/watsonx.py,sha256=fe6jN0hBvOCQurqjS6_9PIwDHt-4kVod9qW236Zs6DU,1496
612
615
  agno/utils/print_response/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
613
- agno/utils/print_response/agent.py,sha256=Ej6l6sn4ZfwpPjF3JbEj6K88_pvJIiMwahLsXHK77RU,40263
616
+ agno/utils/print_response/agent.py,sha256=GCC0i-NRYaJzJ2x8B1igVaZ1-hDv_4E1GbvvgiG9xdc,40424
614
617
  agno/utils/print_response/team.py,sha256=8uu0mfqnkTEDR1Jo6AYNUWHXtQZPh1apkzVh7MQj5Kc,88763
615
618
  agno/utils/print_response/workflow.py,sha256=pZ7DmJaLx0FWNzXJCKs6gplmA0XAzqxZK1nLQNfePW0,78707
616
619
  agno/vectordb/__init__.py,sha256=P0QP9PUC4j2JtWIfYJX7LeC-oiPuh_QsUaOaP1ZY_dI,64
@@ -629,7 +632,7 @@ agno/vectordb/clickhouse/index.py,sha256=_YW-8AuEYy5kzOHi0zIzjngpQPgJOBdSrn9BfEL
629
632
  agno/vectordb/couchbase/__init__.py,sha256=dKZkcQLFN4r2_NIdXby4inzAAn4BDMlb9T2BW_i0_gQ,93
630
633
  agno/vectordb/couchbase/couchbase.py,sha256=SDyNQGq_wD5mkUIQGkYtR7AZCUxf7fIw50YmI0N1T5U,65636
631
634
  agno/vectordb/lancedb/__init__.py,sha256=tb9qvinKyWMTLjJYMwW_lhYHFvrfWTfHODtBfMj-NLE,111
632
- agno/vectordb/lancedb/lance_db.py,sha256=z603icggxlrCa8wNnDPRitVp-pkVGHbfLHOHmfrUhYs,39752
635
+ agno/vectordb/lancedb/lance_db.py,sha256=G1wCwM_oex78i2gUsRiHsL83qzmWacLQvYZ5933BWl8,41587
633
636
  agno/vectordb/langchaindb/__init__.py,sha256=BxGs6tcEKTiydbVJL3P5djlnafS5Bbgql3u1k6vhW2w,108
634
637
  agno/vectordb/langchaindb/langchaindb.py,sha256=AS-Jrh7gXKYkSHFiXKiD0kwL-FUFz10VbYksm8UEBAU,6391
635
638
  agno/vectordb/lightrag/__init__.py,sha256=fgQpA8pZW-jEHI91SZ_xgmROmv14oKdwCQZ8LpyipaE,84
@@ -669,9 +672,9 @@ agno/workflow/router.py,sha256=6rwsdOXNQX2OKcm_p2nOvqyWUnnVdiwRCZNnYwkQ7N8,31320
669
672
  agno/workflow/step.py,sha256=AgSugOOoWidfZGFtR5PMpgWSBpIeUQB45vmaUDnV7E8,77919
670
673
  agno/workflow/steps.py,sha256=p1RdyTZIKDYOPdxU7FbsX_vySWehPWaobge76Q_UDac,26462
671
674
  agno/workflow/types.py,sha256=t4304WCKB19QFdV3ixXZICcU8wtBza4EBCIz5Ve6MSQ,18035
672
- agno/workflow/workflow.py,sha256=7n29-5Xs4WFfbHnNwuTs2U7TW3QdT69mWARf0jbRj6M,218201
673
- agno-2.4.3.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
674
- agno-2.4.3.dist-info/METADATA,sha256=-bF2-AoKVtmOiJiZI1bz36DyXCY7dMJCc3KhPH1YtWQ,22239
675
- agno-2.4.3.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
676
- agno-2.4.3.dist-info/top_level.txt,sha256=MKyeuVesTyOKIXUhc-d_tPa2Hrh0oTA4LM0izowpx70,5
677
- agno-2.4.3.dist-info/RECORD,,
675
+ agno/workflow/workflow.py,sha256=S4Iwx3LtpeE_XZ61slYvnM90BdnGhW6Gfax_BKEUG68,218609
676
+ agno-2.4.4.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
677
+ agno-2.4.4.dist-info/METADATA,sha256=ULxZRXgBJ7i2ZJQ61KZSeaSt8ySYUX7DsY8YHs42ooQ,22229
678
+ agno-2.4.4.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
679
+ agno-2.4.4.dist-info/top_level.txt,sha256=MKyeuVesTyOKIXUhc-d_tPa2Hrh0oTA4LM0izowpx70,5
680
+ agno-2.4.4.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.10.1)
2
+ Generator: setuptools (80.10.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5