agno 1.7.11__py3-none-any.whl → 1.7.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
agno/agent/agent.py CHANGED
@@ -5885,6 +5885,9 @@ class Agent:
5885
5885
  telemetry=self.telemetry,
5886
5886
  debug_mode=self.debug_mode,
5887
5887
  debug_level=self.debug_level,
5888
+ session_state=self.session_state,
5889
+ context=self.context,
5890
+ extra_data=self.extra_data,
5888
5891
  )
5889
5892
  is_deepseek = is_deepseek_reasoning_model(reasoning_model)
5890
5893
  is_groq = is_groq_reasoning_model(reasoning_model)
@@ -5974,6 +5977,9 @@ class Agent:
5974
5977
  telemetry=self.telemetry,
5975
5978
  debug_mode=self.debug_mode,
5976
5979
  debug_level=self.debug_level,
5980
+ session_state=self.session_state,
5981
+ context=self.context,
5982
+ extra_data=self.extra_data,
5977
5983
  )
5978
5984
 
5979
5985
  # Validate reasoning agent
@@ -6108,6 +6114,9 @@ class Agent:
6108
6114
  telemetry=self.telemetry,
6109
6115
  debug_mode=self.debug_mode,
6110
6116
  debug_level=self.debug_level,
6117
+ session_state=self.session_state,
6118
+ context=self.context,
6119
+ extra_data=self.extra_data,
6111
6120
  )
6112
6121
  is_deepseek = is_deepseek_reasoning_model(reasoning_model)
6113
6122
  is_groq = is_groq_reasoning_model(reasoning_model)
@@ -6197,6 +6206,9 @@ class Agent:
6197
6206
  telemetry=self.telemetry,
6198
6207
  debug_mode=self.debug_mode,
6199
6208
  debug_level=self.debug_level,
6209
+ session_state=self.session_state,
6210
+ context=self.context,
6211
+ extra_data=self.extra_data,
6200
6212
  )
6201
6213
 
6202
6214
  # Validate reasoning agent
@@ -7443,6 +7455,7 @@ class Agent:
7443
7455
  if citation.url # Only include citations with valid URLs
7444
7456
  )
7445
7457
  if md_content: # Only create panel if there are citations
7458
+ md_content = md_content.strip()
7446
7459
  citations_panel = create_panel(
7447
7460
  content=Markdown(md_content),
7448
7461
  title="Citations",
agno/embedder/google.py CHANGED
@@ -23,6 +23,10 @@ class GeminiEmbedder(Embedder):
23
23
  request_params: Optional[Dict[str, Any]] = None
24
24
  client_params: Optional[Dict[str, Any]] = None
25
25
  gemini_client: Optional[GeminiClient] = None
26
+ # Vertex AI parameters
27
+ vertexai: bool = False
28
+ project_id: Optional[str] = None
29
+ location: Optional[str] = None
26
30
 
27
31
  @property
28
32
  def client(self):
@@ -30,13 +34,21 @@ class GeminiEmbedder(Embedder):
30
34
  return self.gemini_client
31
35
 
32
36
  _client_params: Dict[str, Any] = {}
37
+ vertexai = self.vertexai or getenv("GOOGLE_GENAI_USE_VERTEXAI", "false").lower() == "true"
33
38
 
34
- self.api_key = self.api_key or getenv("GOOGLE_API_KEY")
35
- if not self.api_key:
36
- log_error("GOOGLE_API_KEY not set. Please set the GOOGLE_API_KEY environment variable.")
37
-
38
- if self.api_key:
39
+ if not vertexai:
40
+ self.api_key = self.api_key or getenv("GOOGLE_API_KEY")
41
+ if not self.api_key:
42
+ log_error("GOOGLE_API_KEY not set. Please set the GOOGLE_API_KEY environment variable.")
39
43
  _client_params["api_key"] = self.api_key
44
+ else:
45
+ log_info("Using Vertex AI API for embeddings")
46
+ _client_params["vertexai"] = True
47
+ _client_params["project"] = self.project_id or getenv("GOOGLE_CLOUD_PROJECT")
48
+ _client_params["location"] = self.location or getenv("GOOGLE_CLOUD_LOCATION")
49
+
50
+ _client_params = {k: v for k, v in _client_params.items() if v is not None}
51
+
40
52
  if self.client_params:
41
53
  _client_params.update(self.client_params)
42
54
 
agno/knowledge/gcs/pdf.py CHANGED
@@ -1,8 +1,9 @@
1
- from typing import AsyncIterator, Iterator, List
1
+ from typing import AsyncIterator, Iterator, List, Optional, Dict, Any
2
2
 
3
3
  from agno.document import Document
4
4
  from agno.document.reader.gcs.pdf_reader import GCSPDFReader
5
5
  from agno.knowledge.gcs.base import GCSKnowledgeBase
6
+ from agno.utils.log import log_debug, log_info
6
7
 
7
8
 
8
9
  class GCSPDFKnowledgeBase(GCSKnowledgeBase):
@@ -19,3 +20,106 @@ class GCSPDFKnowledgeBase(GCSKnowledgeBase):
19
20
  for blob in self.gcs_blobs:
20
21
  if blob.name.endswith(".pdf"):
21
22
  yield await self.reader.async_read(blob=blob)
23
+
24
+ def load(
25
+ self,
26
+ recreate: bool = False,
27
+ upsert: bool = False,
28
+ skip_existing: bool = True,
29
+ ) -> None:
30
+ """Load the knowledge base to the vector db
31
+ Args:
32
+ recreate (bool): If True, recreates the collection in the vector db. Defaults to False.
33
+ upsert (bool): If True, upserts documents to the vector db. Defaults to False.
34
+ skip_existing (bool): If True, skips documents which already exist in the vector db when inserting. Defaults to True.
35
+ """
36
+ self._load_init(recreate, upsert)
37
+ if self.vector_db is None:
38
+ return
39
+
40
+ log_info("Loading knowledge base")
41
+ num_documents = 0
42
+ for document_list in self.document_lists:
43
+ documents_to_load = document_list
44
+
45
+ # Track metadata for filtering capabilities and collect metadata for filters
46
+ filters_metadata: Optional[Dict[str, Any]] = None
47
+ for doc in document_list:
48
+ if doc.meta_data:
49
+ self._track_metadata_structure(doc.meta_data)
50
+ # Use the first non-None metadata for filters
51
+ if filters_metadata is None:
52
+ filters_metadata = doc.meta_data
53
+
54
+ # Skip processing if no documents in this batch
55
+ if not documents_to_load:
56
+ log_debug("Skipping empty document batch")
57
+ continue
58
+
59
+ # Upsert documents if upsert is True and vector db supports upsert
60
+ if upsert and self.vector_db.upsert_available():
61
+ self.vector_db.upsert(documents=documents_to_load, filters=filters_metadata)
62
+ # Insert documents
63
+ else:
64
+ # Filter out documents which already exist in the vector db
65
+ if skip_existing:
66
+ log_debug("Filtering out existing documents before insertion.")
67
+ documents_to_load = self.filter_existing_documents(document_list)
68
+
69
+ if documents_to_load:
70
+ self.vector_db.insert(documents=documents_to_load, filters=filters_metadata)
71
+
72
+ num_documents += len(documents_to_load)
73
+ log_info(f"Added {num_documents} documents to knowledge base")
74
+
75
+ async def aload(
76
+ self,
77
+ recreate: bool = False,
78
+ upsert: bool = False,
79
+ skip_existing: bool = True,
80
+ ) -> None:
81
+ """Load the knowledge base to the vector db asynchronously
82
+ Args:
83
+ recreate (bool): If True, recreates the collection in the vector db. Defaults to False.
84
+ upsert (bool): If True, upserts documents to the vector db. Defaults to False.
85
+ skip_existing (bool): If True, skips documents which already exist in the vector db when inserting. Defaults to True.
86
+ """
87
+ await self._aload_init(recreate, upsert)
88
+ if self.vector_db is None:
89
+ return
90
+
91
+ log_info("Loading knowledge base")
92
+ num_documents = 0
93
+ document_iterator = self.async_document_lists
94
+ async for document_list in document_iterator: # type: ignore
95
+ documents_to_load = document_list
96
+
97
+ # Track metadata for filtering capabilities and collect metadata for filters
98
+ filters_metadata: Optional[Dict[str, Any]] = None
99
+ for doc in document_list:
100
+ if doc.meta_data:
101
+ self._track_metadata_structure(doc.meta_data)
102
+ # Use the first non-None metadata for filters
103
+ if filters_metadata is None:
104
+ filters_metadata = doc.meta_data
105
+
106
+ # Skip processing if no documents in this batch
107
+ if not documents_to_load:
108
+ log_debug("Skipping empty document batch")
109
+ continue
110
+
111
+ # Upsert documents if upsert is True and vector db supports upsert
112
+ if upsert and self.vector_db.upsert_available():
113
+ await self.vector_db.async_upsert(documents=documents_to_load, filters=filters_metadata)
114
+ # Insert documents
115
+ else:
116
+ # Filter out documents which already exist in the vector db
117
+ if skip_existing:
118
+ log_debug("Filtering out existing documents before insertion.")
119
+ documents_to_load = await self.async_filter_existing_documents(document_list)
120
+
121
+ if documents_to_load:
122
+ await self.vector_db.async_insert(documents=documents_to_load, filters=filters_metadata)
123
+
124
+ num_documents += len(documents_to_load)
125
+ log_info(f"Added {num_documents} documents to knowledge base")
@@ -32,6 +32,7 @@ try:
32
32
  Part,
33
33
  ThinkingConfig,
34
34
  Tool,
35
+ UrlContext,
35
36
  )
36
37
  from google.genai.types import (
37
38
  File as GeminiFile,
@@ -68,6 +69,7 @@ class Gemini(Model):
68
69
  search: bool = False
69
70
  grounding: bool = False
70
71
  grounding_dynamic_threshold: Optional[float] = None
72
+ url_context: bool = False
71
73
 
72
74
  temperature: Optional[float] = None
73
75
  top_p: Optional[float] = None
@@ -115,7 +117,6 @@ class Gemini(Model):
115
117
  """
116
118
  if self.client:
117
119
  return self.client
118
-
119
120
  client_params: Dict[str, Any] = {}
120
121
  vertexai = self.vertexai or getenv("GOOGLE_GENAI_USE_VERTEXAI", "false").lower() == "true"
121
122
 
@@ -199,26 +200,34 @@ class Gemini(Model):
199
200
  if thinking_config_params:
200
201
  config["thinking_config"] = ThinkingConfig(**thinking_config_params)
201
202
 
202
- if self.grounding and self.search:
203
- log_info("Both grounding and search are enabled. Grounding will take precedence.")
204
- self.search = False
203
+ # Build tools array based on enabled built-in tools
204
+ builtin_tools = []
205
205
 
206
206
  if self.grounding:
207
- log_info("Grounding enabled. External tools will be disabled.")
208
- config["tools"] = [
207
+ log_info("Grounding enabled. This is a legacy tool. For Gemini 2.0+ Please use enable `search` flag instead.")
208
+ builtin_tools.append(
209
209
  Tool(
210
210
  google_search=GoogleSearchRetrieval(
211
211
  dynamic_retrieval_config=DynamicRetrievalConfig(
212
212
  dynamic_threshold=self.grounding_dynamic_threshold
213
213
  )
214
214
  )
215
- ),
216
- ]
215
+ )
216
+ )
217
217
 
218
- elif self.search:
219
- log_info("Search enabled. External tools will be disabled.")
220
- config["tools"] = [Tool(google_search=GoogleSearch())]
218
+ if self.search:
219
+ log_info("Google Search enabled.")
220
+ builtin_tools.append(Tool(google_search=GoogleSearch()))
221
221
 
222
+ if self.url_context:
223
+ log_info("URL context enabled.")
224
+ builtin_tools.append(Tool(url_context=UrlContext()))
225
+
226
+ # Set tools in config
227
+ if builtin_tools:
228
+ if tools:
229
+ log_info("Built-in tools enabled. External tools will be disabled.")
230
+ config["tools"] = builtin_tools
222
231
  elif tools:
223
232
  config["tools"] = [format_function_definitions(tools)]
224
233
 
@@ -388,7 +397,10 @@ class Gemini(Model):
388
397
  message_parts: List[Any] = []
389
398
 
390
399
  # Function calls
391
- if (not content or role == "model") and message.tool_calls is not None and len(message.tool_calls) > 0:
400
+ if role == "model" and message.tool_calls is not None and len(message.tool_calls) > 0:
401
+ if content is not None:
402
+ content_str = content if isinstance(content, str) else str(content)
403
+ message_parts.append(Part.from_text(text=content_str))
392
404
  for tool_call in message.tool_calls:
393
405
  message_parts.append(
394
406
  Part.from_function_call(
@@ -396,7 +408,7 @@ class Gemini(Model):
396
408
  args=json.loads(tool_call["function"]["arguments"]),
397
409
  )
398
410
  )
399
- # Function results
411
+ # Function call results
400
412
  elif message.tool_calls is not None and len(message.tool_calls) > 0:
401
413
  for tool_call in message.tool_calls:
402
414
  message_parts.append(
@@ -758,13 +770,15 @@ class Gemini(Model):
758
770
 
759
771
  model_response.tool_calls.append(tool_call)
760
772
 
773
+ citations = Citations()
774
+ citations_raw = {}
775
+ citations_urls = []
776
+
761
777
  if response.candidates and response.candidates[0].grounding_metadata is not None:
762
- citations = Citations()
763
778
  grounding_metadata = response.candidates[0].grounding_metadata.model_dump()
764
- citations.raw = grounding_metadata
779
+ citations_raw["grounding_metadata"] = grounding_metadata
765
780
 
766
- # Extract url and title
767
- chunks = grounding_metadata.pop("grounding_chunks", None) or []
781
+ chunks = grounding_metadata.get("grounding_chunks", [])
768
782
  citation_pairs = [
769
783
  (chunk.get("web", {}).get("uri"), chunk.get("web", {}).get("title"))
770
784
  for chunk in chunks
@@ -772,8 +786,31 @@ class Gemini(Model):
772
786
  ]
773
787
 
774
788
  # Create citation objects from filtered pairs
775
- citations.urls = [UrlCitation(url=url, title=title) for url, title in citation_pairs]
776
-
789
+ grounding_urls = [UrlCitation(url=url, title=title) for url, title in citation_pairs]
790
+ citations_urls.extend(grounding_urls)
791
+
792
+ # Handle URLs from URL context tool
793
+ if (
794
+ response.candidates
795
+ and hasattr(response.candidates[0], "url_context_metadata")
796
+ and response.candidates[0].url_context_metadata is not None
797
+ ):
798
+ url_context_metadata = response.candidates[0].url_context_metadata.model_dump()
799
+ citations_raw["url_context_metadata"] = url_context_metadata
800
+
801
+ url_metadata_list = url_context_metadata.get("url_metadata", [])
802
+ for url_meta in url_metadata_list:
803
+ retrieved_url = url_meta.get("retrieved_url")
804
+ status = url_meta.get("url_retrieval_status", "UNKNOWN")
805
+ if retrieved_url and status == "URL_RETRIEVAL_STATUS_SUCCESS":
806
+ # Avoid duplicate URLs
807
+ existing_urls = [citation.url for citation in citations_urls]
808
+ if retrieved_url not in existing_urls:
809
+ citations_urls.append(UrlCitation(url=retrieved_url, title=retrieved_url))
810
+
811
+ if citations_raw or citations_urls:
812
+ citations.raw = citations_raw if citations_raw else None
813
+ citations.urls = citations_urls if citations_urls else None
777
814
  model_response.citations = citations
778
815
 
779
816
  # Extract usage metadata if present
@@ -1,7 +1,7 @@
1
1
  from collections.abc import AsyncIterator
2
2
  from dataclasses import dataclass
3
3
  from os import getenv
4
- from typing import Any, Dict, Iterator, List, Optional, Type, Union
4
+ from typing import Any, Dict, Iterator, List, Literal, Optional, Type, Union
5
5
 
6
6
  import httpx
7
7
  from pydantic import BaseModel
@@ -45,6 +45,7 @@ class OpenAIChat(Model):
45
45
  # Request parameters
46
46
  store: Optional[bool] = None
47
47
  reasoning_effort: Optional[str] = None
48
+ verbosity: Optional[Literal["low", "medium", "high"]] = None
48
49
  metadata: Optional[Dict[str, Any]] = None
49
50
  frequency_penalty: Optional[float] = None
50
51
  logit_bias: Optional[Any] = None
@@ -159,6 +160,7 @@ class OpenAIChat(Model):
159
160
  base_params = {
160
161
  "store": self.store,
161
162
  "reasoning_effort": self.reasoning_effort,
163
+ "verbosity": self.verbosity,
162
164
  "frequency_penalty": self.frequency_penalty,
163
165
  "logit_bias": self.logit_bias,
164
166
  "logprobs": self.logprobs,
@@ -227,6 +229,8 @@ class OpenAIChat(Model):
227
229
  model_dict.update(
228
230
  {
229
231
  "store": self.store,
232
+ "reasoning_effort": self.reasoning_effort,
233
+ "verbosity": self.verbosity,
230
234
  "frequency_penalty": self.frequency_penalty,
231
235
  "logit_bias": self.logit_bias,
232
236
  "logprobs": self.logprobs,
@@ -42,6 +42,8 @@ class OpenAIResponses(Model):
42
42
  metadata: Optional[Dict[str, Any]] = None
43
43
  parallel_tool_calls: Optional[bool] = None
44
44
  reasoning: Optional[Dict[str, Any]] = None
45
+ verbosity: Optional[Literal["low", "medium", "high"]] = None
46
+ reasoning_effort: Optional[Literal["minimal", "medium", "high"]] = None
45
47
  store: Optional[bool] = None
46
48
  temperature: Optional[float] = None
47
49
  top_p: Optional[float] = None
@@ -176,7 +178,6 @@ class OpenAIResponses(Model):
176
178
  "max_tool_calls": self.max_tool_calls,
177
179
  "metadata": self.metadata,
178
180
  "parallel_tool_calls": self.parallel_tool_calls,
179
- "reasoning": self.reasoning,
180
181
  "store": self.store,
181
182
  "temperature": self.temperature,
182
183
  "top_p": self.top_p,
@@ -184,21 +185,37 @@ class OpenAIResponses(Model):
184
185
  "user": self.user,
185
186
  "service_tier": self.service_tier,
186
187
  }
188
+
189
+ # Handle reasoning parameter - convert reasoning_effort to reasoning format
190
+ if self.reasoning is not None:
191
+ base_params["reasoning"] = self.reasoning
192
+ elif self.reasoning_effort is not None:
193
+ base_params["reasoning"] = {"effort": self.reasoning_effort}
194
+
195
+ # Build text parameter
196
+ text_params: Dict[str, Any] = {}
197
+
198
+ # Add verbosity if specified
199
+ if self.verbosity is not None:
200
+ text_params["verbosity"] = self.verbosity
201
+
187
202
  # Set the response format
188
203
  if response_format is not None:
189
204
  if isinstance(response_format, type) and issubclass(response_format, BaseModel):
190
205
  schema = get_response_schema_for_provider(response_format, "openai")
191
- base_params["text"] = {
192
- "format": {
193
- "type": "json_schema",
194
- "name": response_format.__name__,
195
- "schema": schema,
196
- "strict": True,
197
- }
206
+ text_params["format"] = {
207
+ "type": "json_schema",
208
+ "name": response_format.__name__,
209
+ "schema": schema,
210
+ "strict": True,
198
211
  }
199
212
  else:
200
213
  # JSON mode
201
- base_params["text"] = {"format": {"type": "json_object"}}
214
+ text_params["format"] = {"type": "json_object"}
215
+
216
+ # Add text parameter if there are any text-level params
217
+ if text_params:
218
+ base_params["text"] = text_params
202
219
 
203
220
  # Filter out None values
204
221
  request_params: Dict[str, Any] = {k: v for k, v in base_params.items() if v is not None}
agno/reasoning/default.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from textwrap import dedent
4
- from typing import Callable, Dict, List, Literal, Optional, Union
4
+ from typing import Any, Callable, Dict, List, Literal, Optional, Union
5
5
 
6
6
  from agno.models.base import Model
7
7
  from agno.reasoning.step import ReasoningSteps
@@ -19,6 +19,9 @@ def get_default_reasoning_agent(
19
19
  telemetry: bool = True,
20
20
  debug_mode: bool = False,
21
21
  debug_level: Literal[1, 2] = 1,
22
+ session_state: Optional[Dict[str, Any]] = None,
23
+ context: Optional[Dict[str, Any]] = None,
24
+ extra_data: Optional[Dict[str, Any]] = None,
22
25
  ) -> Optional["Agent"]: # type: ignore # noqa: F821
23
26
  from agno.agent import Agent
24
27
 
@@ -87,6 +90,9 @@ def get_default_reasoning_agent(
87
90
  telemetry=telemetry,
88
91
  debug_mode=debug_mode,
89
92
  debug_level=debug_level,
93
+ session_state=session_state,
94
+ context=context,
95
+ extra_data=extra_data,
90
96
  )
91
97
 
92
98
  agent.model.show_tool_calls = False # type: ignore
agno/reasoning/helpers.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import List, Literal
1
+ from typing import Any, Dict, List, Literal, Optional
2
2
 
3
3
  from agno.models.base import Model
4
4
  from agno.models.message import Message
@@ -13,6 +13,9 @@ def get_reasoning_agent(
13
13
  telemetry: bool = False,
14
14
  debug_mode: bool = False,
15
15
  debug_level: Literal[1, 2] = 1,
16
+ session_state: Optional[Dict[str, Any]] = None,
17
+ context: Optional[Dict[str, Any]] = None,
18
+ extra_data: Optional[Dict[str, Any]] = None,
16
19
  ) -> "Agent": # type: ignore # noqa: F821
17
20
  from agno.agent import Agent
18
21
 
@@ -22,6 +25,9 @@ def get_reasoning_agent(
22
25
  telemetry=telemetry,
23
26
  debug_mode=debug_mode,
24
27
  debug_level=debug_level,
28
+ session_state=session_state,
29
+ context=context,
30
+ extra_data=extra_data,
25
31
  )
26
32
 
27
33
 
agno/storage/dynamodb.py CHANGED
@@ -30,19 +30,23 @@ class DynamoDbStorage(Storage):
30
30
  endpoint_url: Optional[str] = None,
31
31
  create_table_if_not_exists: bool = True,
32
32
  mode: Optional[Literal["agent", "team", "workflow", "workflow_v2"]] = "agent",
33
+ create_table_read_capacity_units: int = 5,
34
+ create_table_write_capacity_units: int = 5,
33
35
  ):
34
36
  """
35
37
  Initialize the DynamoDbStorage.
36
38
 
37
39
  Args:
38
40
  table_name (str): The name of the DynamoDB table.
39
- region_name (Optional[str]): AWS region name.
40
41
  profile_name (Optional[str]): AWS profile name to use for credentials.
42
+ region_name (Optional[str]): AWS region name.
41
43
  aws_access_key_id (Optional[str]): AWS access key ID.
42
44
  aws_secret_access_key (Optional[str]): AWS secret access key.
43
45
  endpoint_url (Optional[str]): The complete URL to use for the constructed client.
44
46
  create_table_if_not_exists (bool): Whether to create the table if it does not exist.
45
47
  mode (Optional[Literal["agent", "team", "workflow", "workflow_v2"]]): The mode of the storage.
48
+ create_table_read_capacity_units Optional[int]: Read capacity units for created table (default: 5).
49
+ create_table_write_capacity_units Optional[int]: Write capacity units for created table (default: 5).
46
50
  """
47
51
  super().__init__(mode)
48
52
  self.table_name = table_name
@@ -52,6 +56,8 @@ class DynamoDbStorage(Storage):
52
56
  self.aws_access_key_id = aws_access_key_id
53
57
  self.aws_secret_access_key = aws_secret_access_key
54
58
  self.create_table_if_not_exists = create_table_if_not_exists
59
+ self.create_table_read_capacity_units = create_table_read_capacity_units
60
+ self.create_table_write_capacity_units = create_table_write_capacity_units
55
61
 
56
62
  # Create session using profile name if provided
57
63
  if self.profile_name:
@@ -96,6 +102,11 @@ class DynamoDbStorage(Storage):
96
102
  """
97
103
  Create the DynamoDB table if it does not exist.
98
104
  """
105
+ provisioned_throughput = {
106
+ "ReadCapacityUnits": self.create_table_read_capacity_units,
107
+ "WriteCapacityUnits": self.create_table_write_capacity_units,
108
+ }
109
+
99
110
  try:
100
111
  # Check if table exists
101
112
  self.dynamodb.meta.client.describe_table(TableName=self.table_name)
@@ -141,10 +152,7 @@ class DynamoDbStorage(Storage):
141
152
  {"AttributeName": "created_at", "KeyType": "RANGE"},
142
153
  ],
143
154
  "Projection": {"ProjectionType": "ALL"},
144
- "ProvisionedThroughput": {
145
- "ReadCapacityUnits": 5,
146
- "WriteCapacityUnits": 5,
147
- },
155
+ "ProvisionedThroughput": provisioned_throughput,
148
156
  }
149
157
  ]
150
158
  if self.mode == "agent":
@@ -156,10 +164,7 @@ class DynamoDbStorage(Storage):
156
164
  {"AttributeName": "created_at", "KeyType": "RANGE"},
157
165
  ],
158
166
  "Projection": {"ProjectionType": "ALL"},
159
- "ProvisionedThroughput": {
160
- "ReadCapacityUnits": 5,
161
- "WriteCapacityUnits": 5,
162
- },
167
+ "ProvisionedThroughput": provisioned_throughput,
163
168
  }
164
169
  )
165
170
  elif self.mode == "team":
@@ -171,10 +176,7 @@ class DynamoDbStorage(Storage):
171
176
  {"AttributeName": "created_at", "KeyType": "RANGE"},
172
177
  ],
173
178
  "Projection": {"ProjectionType": "ALL"},
174
- "ProvisionedThroughput": {
175
- "ReadCapacityUnits": 5,
176
- "WriteCapacityUnits": 5,
177
- },
179
+ "ProvisionedThroughput": provisioned_throughput,
178
180
  }
179
181
  )
180
182
  elif self.mode == "workflow":
@@ -186,10 +188,7 @@ class DynamoDbStorage(Storage):
186
188
  {"AttributeName": "created_at", "KeyType": "RANGE"},
187
189
  ],
188
190
  "Projection": {"ProjectionType": "ALL"},
189
- "ProvisionedThroughput": {
190
- "ReadCapacityUnits": 5,
191
- "WriteCapacityUnits": 5,
192
- },
191
+ "ProvisionedThroughput": provisioned_throughput,
193
192
  }
194
193
  )
195
194
  elif self.mode == "workflow_v2":
@@ -201,10 +200,7 @@ class DynamoDbStorage(Storage):
201
200
  {"AttributeName": "created_at", "KeyType": "RANGE"},
202
201
  ],
203
202
  "Projection": {"ProjectionType": "ALL"},
204
- "ProvisionedThroughput": {
205
- "ReadCapacityUnits": 5,
206
- "WriteCapacityUnits": 5,
207
- },
203
+ "ProvisionedThroughput": provisioned_throughput,
208
204
  }
209
205
  )
210
206
  # Create the table
@@ -213,7 +209,7 @@ class DynamoDbStorage(Storage):
213
209
  KeySchema=[{"AttributeName": "session_id", "KeyType": "HASH"}],
214
210
  AttributeDefinitions=attribute_definitions,
215
211
  GlobalSecondaryIndexes=secondary_indexes,
216
- ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
212
+ ProvisionedThroughput=provisioned_throughput,
217
213
  )
218
214
  # Wait until the table exists.
219
215
  self.table.wait_until_exists()
agno/team/team.py CHANGED
@@ -1,4 +1,5 @@
1
1
  import asyncio
2
+ import contextlib
2
3
  import json
3
4
  from collections import ChainMap, defaultdict, deque
4
5
  from copy import deepcopy
@@ -4503,7 +4504,11 @@ class Team:
4503
4504
  from agno.reasoning.openai import is_openai_reasoning_model
4504
4505
 
4505
4506
  reasoning_agent = self.reasoning_agent or get_reasoning_agent(
4506
- reasoning_model=reasoning_model, monitoring=self.monitoring
4507
+ reasoning_model=reasoning_model,
4508
+ monitoring=self.monitoring,
4509
+ session_state=self.session_state,
4510
+ context=self.context,
4511
+ extra_data=self.extra_data,
4507
4512
  )
4508
4513
  is_deepseek = is_deepseek_reasoning_model(reasoning_model)
4509
4514
  is_groq = is_groq_reasoning_model(reasoning_model)
@@ -4596,6 +4601,9 @@ class Team:
4596
4601
  debug_mode=self.debug_mode,
4597
4602
  debug_level=self.debug_level,
4598
4603
  use_json_mode=use_json_mode,
4604
+ session_state=self.session_state,
4605
+ context=self.context,
4606
+ extra_data=self.extra_data,
4599
4607
  )
4600
4608
 
4601
4609
  # Validate reasoning agent
@@ -4726,7 +4734,11 @@ class Team:
4726
4734
  from agno.reasoning.openai import is_openai_reasoning_model
4727
4735
 
4728
4736
  reasoning_agent = self.reasoning_agent or get_reasoning_agent(
4729
- reasoning_model=reasoning_model, monitoring=self.monitoring
4737
+ reasoning_model=reasoning_model,
4738
+ monitoring=self.monitoring,
4739
+ session_state=self.session_state,
4740
+ context=self.context,
4741
+ extra_data=self.extra_data,
4730
4742
  )
4731
4743
  is_deepseek = is_deepseek_reasoning_model(reasoning_model)
4732
4744
  is_groq = is_groq_reasoning_model(reasoning_model)
@@ -4817,6 +4829,9 @@ class Team:
4817
4829
  debug_mode=self.debug_mode,
4818
4830
  debug_level=self.debug_level,
4819
4831
  use_json_mode=use_json_mode,
4832
+ session_state=self.session_state,
4833
+ context=self.context,
4834
+ extra_data=self.extra_data,
4820
4835
  )
4821
4836
 
4822
4837
  # Validate reasoning agent
@@ -6223,7 +6238,7 @@ class Team:
6223
6238
 
6224
6239
  async def arun_member_agents(
6225
6240
  task_description: str, expected_output: Optional[str] = None
6226
- ) -> AsyncIterator[str]:
6241
+ ) -> AsyncIterator[Union[RunResponseEvent, TeamRunResponseEvent, str]]:
6227
6242
  """
6228
6243
  Send the same task to all the member agents and return the responses.
6229
6244
 
@@ -6243,92 +6258,177 @@ class Team:
6243
6258
  session_id, images, videos, audio
6244
6259
  )
6245
6260
 
6246
- # Create tasks for all member agents
6247
- tasks = []
6248
- for member_agent_index, member_agent in enumerate(self.members):
6249
- # We cannot stream responses with async gather
6250
- current_agent = member_agent # Create a reference to the current agent
6251
- current_index = member_agent_index # Create a reference to the current index
6252
- self._initialize_member(current_agent, session_id=session_id)
6261
+ if stream:
6262
+ # Concurrent streaming: launch each member as a streaming worker and merge events
6263
+ done_marker = object()
6264
+ queue: "asyncio.Queue[Union[RunResponseEvent, TeamRunResponseEvent, str, object]]" = asyncio.Queue()
6253
6265
 
6254
- # Don't override the expected output of a member agent
6255
- if current_agent.expected_output is not None:
6256
- expected_output = None
6266
+ async def stream_member(agent: Union[Agent, "Team"], idx: int) -> None:
6267
+ # Compute expected output per agent (do not mutate shared var)
6268
+ local_expected_output = None if agent.expected_output is not None else expected_output
6257
6269
 
6258
- member_agent_task = self._format_member_agent_task(
6259
- task_description, expected_output, team_context_str, team_member_interactions_str
6260
- )
6270
+ member_agent_task = self._format_member_agent_task(
6271
+ task_description, local_expected_output, team_context_str, team_member_interactions_str
6272
+ )
6261
6273
 
6262
- async def run_member_agent(agent=current_agent, idx=current_index) -> str:
6263
- response = await agent.arun(
6274
+ # Stream events from the member
6275
+ member_stream = await agent.arun(
6264
6276
  member_agent_task,
6265
6277
  user_id=user_id,
6266
- # All members have the same session_id
6267
6278
  session_id=session_id,
6268
6279
  images=images,
6269
6280
  videos=videos,
6270
6281
  audio=audio,
6271
6282
  files=files,
6272
- stream=False,
6283
+ stream=True,
6284
+ stream_intermediate_steps=stream_intermediate_steps,
6273
6285
  refresh_session_before_write=True,
6274
6286
  )
6275
- check_if_run_cancelled(response)
6276
6287
 
6277
- member_name = agent.name if agent.name else f"agent_{idx}"
6278
- self.memory = cast(TeamMemory, self.memory)
6279
- if isinstance(self.memory, TeamMemory):
6280
- self.memory = cast(TeamMemory, self.memory)
6281
- self.memory.add_interaction_to_team_context(
6282
- member_name=member_name, task=task_description, run_response=agent.run_response
6283
- )
6284
- else:
6285
- self.memory = cast(Memory, self.memory)
6286
- self.memory.add_interaction_to_team_context(
6288
+ try:
6289
+ async for event in member_stream:
6290
+ check_if_run_cancelled(event)
6291
+ await queue.put(event)
6292
+ finally:
6293
+ # After the stream completes, update memory and team state
6294
+ member_name = agent.name if agent.name else f"agent_{idx}"
6295
+ if isinstance(self.memory, TeamMemory):
6296
+ self.memory = cast(TeamMemory, self.memory)
6297
+ self.memory.add_interaction_to_team_context(
6298
+ member_name=member_name,
6299
+ task=task_description,
6300
+ run_response=agent.run_response, # type: ignore
6301
+ )
6302
+ else:
6303
+ self.memory = cast(Memory, self.memory)
6304
+ self.memory.add_interaction_to_team_context(
6305
+ session_id=session_id,
6306
+ member_name=member_name,
6307
+ task=task_description,
6308
+ run_response=agent.run_response, # type: ignore
6309
+ )
6310
+
6311
+ # Add the member run to the team run response
6312
+ self.run_response = cast(TeamRunResponse, self.run_response)
6313
+ self.run_response.add_member_run(agent.run_response) # type: ignore
6314
+
6315
+ # Update team session/workflow state and media
6316
+ self._update_team_session_state(agent)
6317
+ self._update_workflow_session_state(agent)
6318
+ self._update_team_media(agent.run_response) # type: ignore
6319
+
6320
+ # Signal completion for this member
6321
+ await queue.put(done_marker)
6322
+
6323
+ # Initialize and launch all members
6324
+ tasks: List[asyncio.Task[None]] = []
6325
+ for member_agent_index, member_agent in enumerate(self.members):
6326
+ current_agent = member_agent
6327
+ current_index = member_agent_index
6328
+ self._initialize_member(current_agent, session_id=session_id)
6329
+ tasks.append(asyncio.create_task(stream_member(current_agent, current_index)))
6330
+
6331
+ # Drain queue until all members reported done
6332
+ completed = 0
6333
+ try:
6334
+ while completed < len(tasks):
6335
+ item = await queue.get()
6336
+ if item is done_marker:
6337
+ completed += 1
6338
+ else:
6339
+ yield item # type: ignore
6340
+ finally:
6341
+ # Ensure tasks do not leak on cancellation
6342
+ for t in tasks:
6343
+ if not t.done():
6344
+ t.cancel()
6345
+ # Await cancellation to suppress warnings
6346
+ for t in tasks:
6347
+ with contextlib.suppress(Exception):
6348
+ await t
6349
+ else:
6350
+ # Non-streaming concurrent run of members; collect results when done
6351
+ tasks = []
6352
+ for member_agent_index, member_agent in enumerate(self.members):
6353
+ current_agent = member_agent
6354
+ current_index = member_agent_index
6355
+ self._initialize_member(current_agent, session_id=session_id)
6356
+
6357
+ # Don't override the expected output of a member agent
6358
+ if current_agent.expected_output is not None:
6359
+ expected_output = None
6360
+
6361
+ member_agent_task = self._format_member_agent_task(
6362
+ task_description, expected_output, team_context_str, team_member_interactions_str
6363
+ )
6364
+
6365
+ async def run_member_agent(agent=current_agent, idx=current_index) -> str:
6366
+ response = await agent.arun(
6367
+ member_agent_task,
6368
+ user_id=user_id,
6369
+ # All members have the same session_id
6287
6370
  session_id=session_id,
6288
- member_name=member_name,
6289
- task=task_description,
6290
- run_response=agent.run_response,
6371
+ images=images,
6372
+ videos=videos,
6373
+ audio=audio,
6374
+ files=files,
6375
+ stream=False,
6376
+ refresh_session_before_write=True,
6291
6377
  )
6378
+ check_if_run_cancelled(response)
6379
+
6380
+ member_name = agent.name if agent.name else f"agent_{idx}"
6381
+ self.memory = cast(TeamMemory, self.memory)
6382
+ if isinstance(self.memory, TeamMemory):
6383
+ self.memory = cast(TeamMemory, self.memory)
6384
+ self.memory.add_interaction_to_team_context(
6385
+ member_name=member_name, task=task_description, run_response=agent.run_response
6386
+ )
6387
+ else:
6388
+ self.memory = cast(Memory, self.memory)
6389
+ self.memory.add_interaction_to_team_context(
6390
+ session_id=session_id,
6391
+ member_name=member_name,
6392
+ task=task_description,
6393
+ run_response=agent.run_response,
6394
+ )
6292
6395
 
6293
- # Add the member run to the team run response
6294
- self.run_response = cast(TeamRunResponse, self.run_response)
6295
- self.run_response.add_member_run(agent.run_response)
6396
+ # Add the member run to the team run response
6397
+ self.run_response = cast(TeamRunResponse, self.run_response)
6398
+ self.run_response.add_member_run(agent.run_response)
6296
6399
 
6297
- # Update team session state
6298
- self._update_team_session_state(current_agent)
6400
+ # Update team session state
6401
+ self._update_team_session_state(current_agent)
6299
6402
 
6300
- self._update_workflow_session_state(current_agent)
6403
+ self._update_workflow_session_state(current_agent)
6301
6404
 
6302
- # Update the team media
6303
- self._update_team_media(agent.run_response)
6405
+ # Update the team media
6406
+ self._update_team_media(agent.run_response)
6304
6407
 
6305
- try:
6306
- if response.content is None and (response.tools is None or len(response.tools) == 0):
6307
- return f"Agent {member_name}: No response from the member agent."
6308
- elif isinstance(response.content, str):
6309
- if len(response.content.strip()) > 0:
6310
- return f"Agent {member_name}: {response.content}"
6311
- elif response.tools is not None and len(response.tools) > 0:
6312
- return (
6313
- f"Agent {member_name}: {','.join([tool.get('content') for tool in response.tools])}"
6314
- )
6315
- elif issubclass(type(response.content), BaseModel):
6316
- return f"Agent {member_name}: {response.content.model_dump_json(indent=2)}" # type: ignore
6317
- else:
6318
- import json
6408
+ try:
6409
+ if response.content is None and (response.tools is None or len(response.tools) == 0):
6410
+ return f"Agent {member_name}: No response from the member agent."
6411
+ elif isinstance(response.content, str):
6412
+ if len(response.content.strip()) > 0:
6413
+ return f"Agent {member_name}: {response.content}"
6414
+ elif response.tools is not None and len(response.tools) > 0:
6415
+ return f"Agent {member_name}: {','.join([tool.get('content') for tool in response.tools])}"
6416
+ elif issubclass(type(response.content), BaseModel):
6417
+ return f"Agent {member_name}: {response.content.model_dump_json(indent=2)}" # type: ignore
6418
+ else:
6419
+ import json
6319
6420
 
6320
- return f"Agent {member_name}: {json.dumps(response.content, indent=2)}"
6321
- except Exception as e:
6322
- return f"Agent {member_name}: Error - {str(e)}"
6421
+ return f"Agent {member_name}: {json.dumps(response.content, indent=2)}"
6422
+ except Exception as e:
6423
+ return f"Agent {member_name}: Error - {str(e)}"
6323
6424
 
6324
- return f"Agent {member_name}: No Response"
6425
+ return f"Agent {member_name}: No Response"
6325
6426
 
6326
- tasks.append(run_member_agent)
6427
+ tasks.append(run_member_agent) # type: ignore
6327
6428
 
6328
- # Need to collect and process yielded values from each task
6329
- results = await asyncio.gather(*[task() for task in tasks])
6330
- for result in results:
6331
- yield result
6429
+ results = await asyncio.gather(*[task() for task in tasks]) # type: ignore
6430
+ for result in results:
6431
+ yield result
6332
6432
 
6333
6433
  # Afterward, switch back to the team logger
6334
6434
  use_team_logger()
agno/tools/bravesearch.py CHANGED
@@ -49,9 +49,9 @@ class BraveSearchTools(Toolkit):
49
49
  def brave_search(
50
50
  self,
51
51
  query: str,
52
- max_results: Optional[int] = None,
53
- country: Optional[str] = None,
54
- search_lang: Optional[str] = None,
52
+ max_results: int = 5,
53
+ country: str = "US",
54
+ search_lang: str = "en",
55
55
  ) -> str:
56
56
  """
57
57
  Search Brave for the specified query and return the results.
@@ -64,8 +64,8 @@ class BraveSearchTools(Toolkit):
64
64
  Returns:
65
65
  str: A JSON formatted string containing the search results.
66
66
  """
67
- max_results = self.fixed_max_results or max_results
68
- search_lang = self.fixed_language or search_lang
67
+ final_max_results = self.fixed_max_results if self.fixed_max_results is not None else max_results
68
+ final_search_lang = self.fixed_language if self.fixed_language is not None else search_lang
69
69
 
70
70
  if not query:
71
71
  return json.dumps({"error": "Please provide a query to search for"})
@@ -74,9 +74,9 @@ class BraveSearchTools(Toolkit):
74
74
 
75
75
  search_params = {
76
76
  "q": query,
77
- "count": max_results,
77
+ "count": final_max_results,
78
78
  "country": country,
79
- "search_lang": search_lang,
79
+ "search_lang": final_search_lang,
80
80
  "result_filter": "web",
81
81
  }
82
82
 
agno/tools/calculator.py CHANGED
@@ -3,7 +3,7 @@ import math
3
3
  from typing import Callable, List
4
4
 
5
5
  from agno.tools import Toolkit
6
- from agno.utils.log import log_info, logger
6
+ from agno.utils.log import log_debug, logger
7
7
 
8
8
 
9
9
  class CalculatorTools(Toolkit):
@@ -53,7 +53,7 @@ class CalculatorTools(Toolkit):
53
53
  str: JSON string of the result.
54
54
  """
55
55
  result = a + b
56
- log_info(f"Adding {a} and {b} to get {result}")
56
+ log_debug(f"Adding {a} and {b} to get {result}")
57
57
  return json.dumps({"operation": "addition", "result": result})
58
58
 
59
59
  def subtract(self, a: float, b: float) -> str:
@@ -67,7 +67,7 @@ class CalculatorTools(Toolkit):
67
67
  str: JSON string of the result.
68
68
  """
69
69
  result = a - b
70
- log_info(f"Subtracting {b} from {a} to get {result}")
70
+ log_debug(f"Subtracting {b} from {a} to get {result}")
71
71
  return json.dumps({"operation": "subtraction", "result": result})
72
72
 
73
73
  def multiply(self, a: float, b: float) -> str:
@@ -81,7 +81,7 @@ class CalculatorTools(Toolkit):
81
81
  str: JSON string of the result.
82
82
  """
83
83
  result = a * b
84
- log_info(f"Multiplying {a} and {b} to get {result}")
84
+ log_debug(f"Multiplying {a} and {b} to get {result}")
85
85
  return json.dumps({"operation": "multiplication", "result": result})
86
86
 
87
87
  def divide(self, a: float, b: float) -> str:
@@ -101,7 +101,7 @@ class CalculatorTools(Toolkit):
101
101
  result = a / b
102
102
  except Exception as e:
103
103
  return json.dumps({"operation": "division", "error": str(e), "result": "Error"})
104
- log_info(f"Dividing {a} by {b} to get {result}")
104
+ log_debug(f"Dividing {a} by {b} to get {result}")
105
105
  return json.dumps({"operation": "division", "result": result})
106
106
 
107
107
  def exponentiate(self, a: float, b: float) -> str:
@@ -115,7 +115,7 @@ class CalculatorTools(Toolkit):
115
115
  str: JSON string of the result.
116
116
  """
117
117
  result = math.pow(a, b)
118
- log_info(f"Raising {a} to the power of {b} to get {result}")
118
+ log_debug(f"Raising {a} to the power of {b} to get {result}")
119
119
  return json.dumps({"operation": "exponentiation", "result": result})
120
120
 
121
121
  def factorial(self, n: int) -> str:
@@ -131,7 +131,7 @@ class CalculatorTools(Toolkit):
131
131
  logger.error("Attempt to calculate factorial of a negative number")
132
132
  return json.dumps({"operation": "factorial", "error": "Factorial of a negative number is undefined"})
133
133
  result = math.factorial(n)
134
- log_info(f"Calculating factorial of {n} to get {result}")
134
+ log_debug(f"Calculating factorial of {n} to get {result}")
135
135
  return json.dumps({"operation": "factorial", "result": result})
136
136
 
137
137
  def is_prime(self, n: int) -> str:
@@ -164,5 +164,5 @@ class CalculatorTools(Toolkit):
164
164
  return json.dumps({"operation": "square_root", "error": "Square root of a negative number is undefined"})
165
165
 
166
166
  result = math.sqrt(n)
167
- log_info(f"Calculating square root of {n} to get {result}")
167
+ log_debug(f"Calculating square root of {n} to get {result}")
168
168
  return json.dumps({"operation": "square_root", "result": result})
agno/tools/discord.py CHANGED
@@ -51,12 +51,12 @@ class DiscordTools(Toolkit):
51
51
  response.raise_for_status()
52
52
  return response.json() if response.text else {}
53
53
 
54
- def send_message(self, channel_id: int, message: str) -> str:
54
+ def send_message(self, channel_id: str, message: str) -> str:
55
55
  """
56
56
  Send a message to a Discord channel.
57
57
 
58
58
  Args:
59
- channel_id (int): The ID of the channel to send the message to.
59
+ channel_id (str): The ID of the channel to send the message to.
60
60
  message (str): The text of the message to send.
61
61
 
62
62
  Returns:
@@ -70,12 +70,12 @@ class DiscordTools(Toolkit):
70
70
  logger.error(f"Error sending message: {e}")
71
71
  return f"Error sending message: {str(e)}"
72
72
 
73
- def get_channel_info(self, channel_id: int) -> str:
73
+ def get_channel_info(self, channel_id: str) -> str:
74
74
  """
75
75
  Get information about a Discord channel.
76
76
 
77
77
  Args:
78
- channel_id (int): The ID of the channel to get information about.
78
+ channel_id (str): The ID of the channel to get information about.
79
79
 
80
80
  Returns:
81
81
  str: A JSON string containing the channel information.
@@ -87,12 +87,12 @@ class DiscordTools(Toolkit):
87
87
  logger.error(f"Error getting channel info: {e}")
88
88
  return f"Error getting channel info: {str(e)}"
89
89
 
90
- def list_channels(self, guild_id: int) -> str:
90
+ def list_channels(self, guild_id: str) -> str:
91
91
  """
92
92
  List all channels in a Discord server.
93
93
 
94
94
  Args:
95
- guild_id (int): The ID of the server to list channels from.
95
+ guild_id (str): The ID of the server to list channels from.
96
96
 
97
97
  Returns:
98
98
  str: A JSON string containing the list of channels.
@@ -104,12 +104,12 @@ class DiscordTools(Toolkit):
104
104
  logger.error(f"Error listing channels: {e}")
105
105
  return f"Error listing channels: {str(e)}"
106
106
 
107
- def get_channel_messages(self, channel_id: int, limit: int = 100) -> str:
107
+ def get_channel_messages(self, channel_id: str, limit: int = 100) -> str:
108
108
  """
109
109
  Get the message history of a Discord channel.
110
110
 
111
111
  Args:
112
- channel_id (int): The ID of the channel to fetch messages from.
112
+ channel_id (str): The ID of the channel to fetch messages from.
113
113
  limit (int): The maximum number of messages to fetch. Defaults to 100.
114
114
 
115
115
  Returns:
@@ -122,13 +122,13 @@ class DiscordTools(Toolkit):
122
122
  logger.error(f"Error getting messages: {e}")
123
123
  return f"Error getting messages: {str(e)}"
124
124
 
125
- def delete_message(self, channel_id: int, message_id: int) -> str:
125
+ def delete_message(self, channel_id: str, message_id: str) -> str:
126
126
  """
127
127
  Delete a message from a Discord channel.
128
128
 
129
129
  Args:
130
- channel_id (int): The ID of the channel containing the message.
131
- message_id (int): The ID of the message to delete.
130
+ channel_id (str): The ID of the channel containing the message.
131
+ message_id (str): The ID of the message to delete.
132
132
 
133
133
  Returns:
134
134
  str: A success message or error message.
agno/tools/youtube.py CHANGED
@@ -126,18 +126,19 @@ class YouTubeTools(Toolkit):
126
126
  return "Error getting video ID from URL, please provide a valid YouTube url"
127
127
 
128
128
  try:
129
- captions = None
130
- kwargs: Dict = {}
131
- if self.languages:
132
- kwargs["languages"] = self.languages or ["en"]
133
- if self.proxies:
134
- kwargs["proxies"] = self.proxies
135
- captions = YouTubeTranscriptApi.get_transcript(video_id, **kwargs)
136
- # log_debug(f"Captions for video {video_id}: {captions}")
137
- if captions:
138
- return " ".join(line["text"] for line in captions)
139
- return "No captions found for video"
129
+ ytt_api = YouTubeTranscriptApi()
130
+ captions_data = ytt_api.fetch(video_id)
131
+
132
+ # log_info(f"Captions for video {video_id}: {captions_data}")
133
+
134
+ transcript_text = ""
135
+
136
+ for segment in captions_data:
137
+ transcript_text += f"{segment.text} "
138
+
139
+ return transcript_text.strip() if transcript_text else "No captions found for video"
140
140
  except Exception as e:
141
+ # log_info(f"Error getting captions for video {video_id}: {e}")
141
142
  return f"Error getting captions for video: {e}"
142
143
 
143
144
  def get_video_timestamps(self, url: str) -> str:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agno
3
- Version: 1.7.11
3
+ Version: 1.7.12
4
4
  Summary: Agno: a lightweight library for building Multi-Agent Systems
5
5
  Author-email: Ashpreet Bedi <ashpreet@agno.com>
6
6
  License: Copyright (c) Agno, Inc.
@@ -5,7 +5,7 @@ agno/exceptions.py,sha256=HWuuNFS5J0l1RYJsdUrSx51M22aFEoh9ltoeonXBoBw,2891
5
5
  agno/media.py,sha256=Lw4MXcMiwf_ynA1_ns7exR3gCmalUXRcvB0-17wt5OE,12823
6
6
  agno/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
7
  agno/agent/__init__.py,sha256=Ai6GVyw-0rkA2eYAfoEQIvbi_mrWQUxuPFaFbSDJYCQ,1306
8
- agno/agent/agent.py,sha256=9qp01o6lX72-fflCY74J7XIeFpz4jm7mnvpvF6m7n_0,381409
8
+ agno/agent/agent.py,sha256=Ely8Zz-h0LQwiv8ifOHfFBJsGDetVDAJ_nSRoXK4xLA,382017
9
9
  agno/agent/metrics.py,sha256=Lf7JYgPPdqRCyPfCDVUjnmUZ1SkWXrJClL80aW2ffEw,4379
10
10
  agno/api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  agno/api/agent.py,sha256=J-Y4HI-J0Bu6r9gxRYCM3U7SnVBGwLIouDy806KSIIw,2821
@@ -109,7 +109,7 @@ agno/embedder/base.py,sha256=z935B7YFp0lbbG7tLnY0zuLLk9WMR57ASf5vyjTPFME,405
109
109
  agno/embedder/cohere.py,sha256=aW-Aq5wNG8IgCVlKpB6poSgy9NMVHUvnTh0y-e5_xAk,3007
110
110
  agno/embedder/fastembed.py,sha256=YyHvdKOJFSxMxAx5OIfCqU9RKsQddUFHHBdCh4Q7O78,1361
111
111
  agno/embedder/fireworks.py,sha256=fdctBT34yYbfHb49yPHaPttEOvz4XE0u_kTHe9ifhnU,377
112
- agno/embedder/google.py,sha256=Qw4WSzrlUGpAcyy_0MeLv2f9OuQd1NgEleRCOGTOsBc,3586
112
+ agno/embedder/google.py,sha256=45wo5XnRYjV-i912-V66rp8GXH_ij3eKnb7Gy2iKd68,4207
113
113
  agno/embedder/huggingface.py,sha256=3oyUdRM-4haD_gtbqcuPXzcHFh7s__KLxpVZX6UZq3c,1913
114
114
  agno/embedder/jina.py,sha256=gFJ36YnSRzWWKZYumFBbqSBkDEbt0QtqDN6KW2d3hG0,2561
115
115
  agno/embedder/langdb.py,sha256=gn4N_cqzUJBuwzWdZihcuEzeYsOpBqtwijLc9KFHppg,2845
@@ -161,7 +161,7 @@ agno/knowledge/wikipedia.py,sha256=DcclE8O1mgXUJgjcxzklntk80dnaF-GYab5apaOD408,9
161
161
  agno/knowledge/youtube.py,sha256=8HZeqlOluimYkab9usNts0pemTK-E4WkcVrvesvmqss,1262
162
162
  agno/knowledge/gcs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
163
163
  agno/knowledge/gcs/base.py,sha256=IsKg2nZ4nA6MHBOna-RK36hA-bTNsYsnwP26AgYC6Dc,1463
164
- agno/knowledge/gcs/pdf.py,sha256=Zz3jG4H2hj2TRjUgAx1zVqi9rbk08fB2xx0hXWjvdG4,721
164
+ agno/knowledge/gcs/pdf.py,sha256=I-cEqyq6yLnjGKR6Py_7m6Z67E-Rk5JKl0rXMJgXOOY,5482
165
165
  agno/knowledge/s3/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
166
166
  agno/knowledge/s3/base.py,sha256=RQ3aPU_IoHknLYVycBowDbyyCctxmyMJsdhuFR1WDBk,2267
167
167
  agno/knowledge/s3/pdf.py,sha256=4lgdmP23CuIqm_8UwyeSeGd6KisGsATIHCkloUtBYqU,1273
@@ -223,7 +223,7 @@ agno/models/deepseek/deepseek.py,sha256=IsLAGroVdWgaw1FAab3ZYuqqAlIRFYTUrrheJI9a
223
223
  agno/models/fireworks/__init__.py,sha256=qIDjKUnwmrnwfa9B2Y3ybRyuUsF7Pzw6_bVq4N6M0Cg,86
224
224
  agno/models/fireworks/fireworks.py,sha256=Oh9gQeSBN223xUoc0WDKeHEzB8da1x9EnVvohXqB62U,905
225
225
  agno/models/google/__init__.py,sha256=bEOSroFJ4__38XaCgBUWiOe_Qga66ZRm_gis__yIMmc,74
226
- agno/models/google/gemini.py,sha256=-hf-daXNItLhgG0oI4nzaVtyLiEoeRWniQyK5fbq4So,41402
226
+ agno/models/google/gemini.py,sha256=lhkE-G19HnSCCHCb-A4kDgcGXF8Ywo4GFKKqWjxrbfY,43313
227
227
  agno/models/groq/__init__.py,sha256=gODf5IA4yJKlwTEYsUywmA-dsiQVyL2_yWMc8VncdVU,66
228
228
  agno/models/groq/groq.py,sha256=MR_zzSQiaqzQUhLsxgfbD0UzBBFf-R8H8vmiBMUH1zE,19880
229
229
  agno/models/huggingface/__init__.py,sha256=VgdYkgSHqsFLhvJ9lSUCyEZfest8hbCAUpWU6WCk-_c,94
@@ -252,9 +252,9 @@ agno/models/ollama/__init__.py,sha256=wZD1kXYL5PWz5h3CUj1kn1wLfECEKr9fEvJwbvg8A-
252
252
  agno/models/ollama/chat.py,sha256=g-U0XeR4S1s8LrVUHaXx_84sBb3ljwl67BeuPWPaGUM,13138
253
253
  agno/models/ollama/tools.py,sha256=PLYT9VSCGSwKAHNDEgOtyKg0HuUlYUxzGzvhoK19Vr0,19297
254
254
  agno/models/openai/__init__.py,sha256=OssVgQRpsriU6aJZ3lIp_jFuqvX6y78L4Fd3uTlmI3E,225
255
- agno/models/openai/chat.py,sha256=5Ve-kUXtePOiLpaq8uF2o7uPEaVz7ZuEbHDx8NGPFZQ,30230
255
+ agno/models/openai/chat.py,sha256=yLsLcaBYHZXNh43zrYmNYWCHbJpKPWK1OG3RwjBPrGw,30449
256
256
  agno/models/openai/like.py,sha256=wmw9PfAVqluBs4MMY73dgjelKn1yl5JDKyCRvaNFjFw,745
257
- agno/models/openai/responses.py,sha256=mTTJJQ7xQkvKCwi-XhNiACFQJSLGTT56Ki15CcwtX8A,38991
257
+ agno/models/openai/responses.py,sha256=v8eyMGRCjU3XPrKC02PCOfXXJokEDXCYFOO-3DHt7xo,39652
258
258
  agno/models/openrouter/__init__.py,sha256=ZpZhNyy_EGSXp58uC9e2iyjnxBctql7GaY8rUG-599I,90
259
259
  agno/models/openrouter/openrouter.py,sha256=Ng-_ztpq_lghGI3tM94nsC8minKhiZ6d265c6IYXtg4,869
260
260
  agno/models/perplexity/__init__.py,sha256=JNmOElDLwcZ9_Lk5owkEdgwmAhaH3YJ-VJqOI8rgp5c,90
@@ -280,9 +280,9 @@ agno/playground/settings.py,sha256=RrHkO61zWog3vs6LEEzblzevTu2jIKKbWRAyK9TDinA,9
280
280
  agno/reasoning/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
281
281
  agno/reasoning/azure_ai_foundry.py,sha256=ipfuUFobFysxpwqQGaMHMWfLC5009g-8oLlSBh8uX6s,2614
282
282
  agno/reasoning/deepseek.py,sha256=UfVgLDDfKYvGmAiVbKWK4JvzSuwWcH0pzuHFV2IaXzA,2251
283
- agno/reasoning/default.py,sha256=wT-0uNteAFODjNlxYOYBKDXDhB1sDGX9c4B08enULp0,5140
283
+ agno/reasoning/default.py,sha256=TwAZWpPz9KcjEKAKuipTDh_xIz7nhrHO8WaW2jctrgE,5385
284
284
  agno/reasoning/groq.py,sha256=FYS7aouuirNAs8RxRb6FzuT14DsbgnX7mcWAvCY_9rg,2658
285
- agno/reasoning/helpers.py,sha256=SaTZAjv8prXUTq65hPUercJc1sSzx0MsqPHmJPrLnIY,1944
285
+ agno/reasoning/helpers.py,sha256=UqQyuFTEZG-DnZr46hm2uqWDuq97i-_m07wMk9teZEY,2205
286
286
  agno/reasoning/ollama.py,sha256=i2qFrHAYuoF2Gryk7Cuv07TrUlMqKYB3zr5mIskgfN4,2578
287
287
  agno/reasoning/openai.py,sha256=WSSPvyP99nOuSrkBfDAJb2q0Lmiie5lRYar_SdUu3K0,3103
288
288
  agno/reasoning/step.py,sha256=6DaOb_0DJRz9Yh1w_mxcRaOSVzIQDrj3lQ6rzHLdIwA,1220
@@ -301,7 +301,7 @@ agno/run/v2/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
301
301
  agno/run/v2/workflow.py,sha256=FiZY5eWl6oEJCEfBUIX2q1WDHLI8mqVmaWWwKBA1PL8,18903
302
302
  agno/storage/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
303
303
  agno/storage/base.py,sha256=pe1tNwzNFDtmaA3gJE7X8aGh0zPJOZsAgtkWQfzdmbY,1940
304
- agno/storage/dynamodb.py,sha256=1uoIXTSCWZJ4811Mh1CU4OOjF_luYMxrn5SwxuADglY,32670
304
+ agno/storage/dynamodb.py,sha256=DKOpeYic458WOhI6APVDjJkgVu578KWPvo0fKzFqgbE,32730
305
305
  agno/storage/firestore.py,sha256=OzAf7rnzH4Ffp2GxpDUEMKQBAw84rhogaR9WmEWytF4,11417
306
306
  agno/storage/gcs_json.py,sha256=zoS6xbUDuSjnmu30I-7xUnHByMLFI7cHNz72hDAZk-s,10753
307
307
  agno/storage/in_memory.py,sha256=YwEaVGyScsXSdXd8Vq6lA2HxhyX68Dyy9g2dtUl9bZs,9836
@@ -332,7 +332,7 @@ agno/storage/workflow/mongodb.py,sha256=x-0Jl2WovupTfwuVNOSndE9-7V4U7BBIjejtJ1Wa
332
332
  agno/storage/workflow/postgres.py,sha256=66bvx6eT7PtFvd4EtTCfI2smynAyvpjvAPYtPo-PCNg,91
333
333
  agno/storage/workflow/sqlite.py,sha256=PLqEA1YC8AtIklINr6wy8lzK6KABEqvlJW-nz5KacWM,85
334
334
  agno/team/__init__.py,sha256=OSkwJhm4uSoOwpHLeDdcH4q2R_BmfS-7a9_aPxB-Skw,967
335
- agno/team/team.py,sha256=LnaZTX1p19uxd-4h8HH1B9dUteztsas0opVT_FcLsMQ,378343
335
+ agno/team/team.py,sha256=vnIwYSKZnCBJYM03rvb-OjmbjjvKXMe44LBd2okPj3s,383585
336
336
  agno/tools/__init__.py,sha256=jNll2sELhPPbqm5nPeT4_uyzRO2_KRTW-8Or60kioS0,210
337
337
  agno/tools/agentql.py,sha256=w6FlCfhuS0cc2BHa9K6dZjqO1ycA66fSZbR_nvXiVSo,3813
338
338
  agno/tools/airflow.py,sha256=2ZCwx65w_tSXm4xEzZQR_teOiXJlnEgIqU9AgQTQemI,2493
@@ -344,11 +344,11 @@ agno/tools/aws_ses.py,sha256=fl5NTRWcljzxp4WxTg2gIAjLMNcuEWs_vnjeRtEKRHY,2090
344
344
  agno/tools/baidusearch.py,sha256=HBdhLz1HUtKXJjIQru21jKiSonG9jEjNB_W6FPjklew,2883
345
345
  agno/tools/bitbucket.py,sha256=CLhYdB_HTriy44VHtsRtqCY-BhnHpZMBHQ9vgL4vpWg,11207
346
346
  agno/tools/brandfetch.py,sha256=s0fjQ3biokp7XO-Hgt4JzOaW9Kw-3zZRfdGlT5M1hEo,7947
347
- agno/tools/bravesearch.py,sha256=b939wSHEn2bB64TqqW7wJUh5Fkyul5OuZgvQeFkHho0,3359
347
+ agno/tools/bravesearch.py,sha256=4WwCtWiJYwDoGfCay5S2wpBdRIx-7sn3hWQErdl11qY,3427
348
348
  agno/tools/brightdata.py,sha256=JuBgosT3BBoD3bBHByQgy34zg3tm-dUrKk1fP84yg_M,12979
349
349
  agno/tools/browserbase.py,sha256=xD4hMwUR446xV3NuqR9tHepuQtnd0p6NV17_0zK-Djo,7394
350
350
  agno/tools/calcom.py,sha256=HnPemDdChx3L7G5Br-TY4jqed_ZcxOTLfB-Ysg5g2LQ,9549
351
- agno/tools/calculator.py,sha256=kbRWoMmoVjrBuXx-91EyS2szUb27sxRbvJkQakGMh3I,5709
351
+ agno/tools/calculator.py,sha256=JNIQj0EY2LVYRg6MkVX43cc3yJfF1DDKxAN_wSUiH14,5717
352
352
  agno/tools/cartesia.py,sha256=obAgm8BUK_VRb7sLhT2pU2tHkTb0cWlw7tuAojvjhec,7121
353
353
  agno/tools/clickup_tool.py,sha256=MievuuoRGKcnLgwH_g0Btatg8Xy_vdU9P-YfPGu5f6I,8781
354
354
  agno/tools/confluence.py,sha256=RUlc9QwHx6W52f0-6x7M_VPs2cEHWL1ATkPIZar8VJk,7435
@@ -358,7 +358,7 @@ agno/tools/dalle.py,sha256=ECoc7S22WrnOoR_LbMcT9IJlk-Uk_1WaUyjxoOAuioU,3910
358
358
  agno/tools/daytona.py,sha256=OZlXBE10BUWywbxpKZfo86Bx8ErT1fBoGLOWJzNni-c,18458
359
359
  agno/tools/decorator.py,sha256=bPyTfvjZ8nYe9o033np-EPMwAlMuVDsNx3ZRqGInHNA,9519
360
360
  agno/tools/desi_vocal.py,sha256=c0HnwjqgOyU-zooOZiykTRaWvz4af1c0U-Ax4JhwHNs,3255
361
- agno/tools/discord.py,sha256=k01fF4mtdmSerXb3gsHZpdWYV68D7u1jiqNxxQzxlcY,5661
361
+ agno/tools/discord.py,sha256=4g43nZ2fzLkNIj7uQYHdN4hvqeZPh_kMlsYJCYFG328,5661
362
362
  agno/tools/docker.py,sha256=vLbllXQZTyxZyusLNali_V5dwuRierLRBkoC-9QP38o,26408
363
363
  agno/tools/duckdb.py,sha256=XHgZRQ0gkwKmik5eOpQXon53Elz39zKd_WYEn6VjvI8,15384
364
364
  agno/tools/duckduckgo.py,sha256=S92JD7sPRS4umOI8ax4Pey3yR74fsdNn0rUO4DN8DqI,3515
@@ -437,7 +437,7 @@ agno/tools/whatsapp.py,sha256=VRtUv-PVgGJOGAHmu1bNlJVXg6Jr3_x4fbbbuKnK4Rs,11248
437
437
  agno/tools/wikipedia.py,sha256=ytcqNaEgxmc2SAztp5Vlw27tP21PGx_CwY74bG5MiT4,2275
438
438
  agno/tools/x.py,sha256=_FsdNXE8HvAzg5xzeKQNjlOTE1K_aAk_JJzX4-C_Djw,14543
439
439
  agno/tools/yfinance.py,sha256=IxcuBD4XFL9WOe51b6Q2uVIHyRqAR_WdrO2fyd-ePRs,12765
440
- agno/tools/youtube.py,sha256=0LxSuMlGRXXgXdwuHlewfR3XhvRF4zYBJQ0TCT57KwI,6423
440
+ agno/tools/youtube.py,sha256=r0HpBtYscZQmnxRUwH9rqROnDP52gLzNwr24s2Pc2ZE,6380
441
441
  agno/tools/zendesk.py,sha256=yzng5wtUdoSB1QTJ8mPVK6HHvUfYt5TklBCM1hmdrkU,2838
442
442
  agno/tools/zep.py,sha256=k-tfw22N8fhdiQt_mYv5W718Nm3IjRo4tWPfcJK0wP4,19135
443
443
  agno/tools/zoom.py,sha256=eJQH77SxGjDGQy5-xoQSzT4tYVXSASCr8uyvoS6K3e0,15982
@@ -556,9 +556,9 @@ agno/workspace/enums.py,sha256=MxF1CUMXBaZMTKLEfiR-7kEhTki2Gfz6W7u49RdYYaE,123
556
556
  agno/workspace/helpers.py,sha256=Mp-VlRsPVhW10CfDWYVhc9ANLk9RjNurDfCgXmycZCg,2066
557
557
  agno/workspace/operator.py,sha256=CNLwVR45eE5dSRjto2o0c9NgCi2xD-JZR5uLt9kfIt8,30758
558
558
  agno/workspace/settings.py,sha256=bcyHHN7lH1LPSMt4i_20XpTjZLoNXdzwyW-G9nHYV40,5703
559
- agno-1.7.11.dist-info/licenses/LICENSE,sha256=m2rfTWFUfIwCaQqgT2WeBjuKzMKEJRwnaiofg9n8MsQ,16751
560
- agno-1.7.11.dist-info/METADATA,sha256=zCQm49XMAyYKSxj3qADN7yf4Gtt81WihG1y5mWVTrj0,44433
561
- agno-1.7.11.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
562
- agno-1.7.11.dist-info/entry_points.txt,sha256=Be-iPnPVabMohESsuUdV5w6IAYEIlpc2emJZbyNnfGI,88
563
- agno-1.7.11.dist-info/top_level.txt,sha256=MKyeuVesTyOKIXUhc-d_tPa2Hrh0oTA4LM0izowpx70,5
564
- agno-1.7.11.dist-info/RECORD,,
559
+ agno-1.7.12.dist-info/licenses/LICENSE,sha256=m2rfTWFUfIwCaQqgT2WeBjuKzMKEJRwnaiofg9n8MsQ,16751
560
+ agno-1.7.12.dist-info/METADATA,sha256=eQa-B4AnuoCBW9ON0Epdz519VQU9EY9G88EeW5Nezmo,44433
561
+ agno-1.7.12.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
562
+ agno-1.7.12.dist-info/entry_points.txt,sha256=Be-iPnPVabMohESsuUdV5w6IAYEIlpc2emJZbyNnfGI,88
563
+ agno-1.7.12.dist-info/top_level.txt,sha256=MKyeuVesTyOKIXUhc-d_tPa2Hrh0oTA4LM0izowpx70,5
564
+ agno-1.7.12.dist-info/RECORD,,
File without changes